Merge tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "Here are some mmc fixes intended for v4.9 rc2.

  This time I have also included a few changes for a memstick driver
  which has a corresponding mmc driver. They use the same USB device as
  parent, hence both needs to play nice with runtime PM, which they
  didn't.

  MMC core:
   - Update MAINTAINERS as the mmc tree moved to kernel.org
   - A few fixes for HS400es mode
   - A few other minor fixes

  MMC host:
   - sdhci: Fix an issue when dealing with stop commands
   - sdhci-pci: Fix a bus power failure issue
   - sdhci-esdhc-imx: Correct two register accesses
   - sdhci-of-arasan: Fix the 1.8V I/O signal switch behaviour
   - rtsx_usb_sdmmc: Fix runtime PM issues

  Other: (Because of no maintainer)
   - memstick: rtsx_usb_ms: Fix runtime PM issues"

* tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  MAINTAINERS: mmc: Move the mmc tree to kernel.org
  memstick: rtsx_usb_ms: Manage runtime PM when accessing the device
  memstick: rtsx_usb_ms: Runtime resume the device when polling for cards
  mmc: rtsx_usb_sdmmc: Handle runtime PM while changing the led
  mmc: rtsx_usb_sdmmc: Avoid keeping the device runtime resumed when unused
  mmc: sdhci: cast unsigned int to unsigned long long to avoid unexpeted error
  mmc: sdhci-esdhc-imx: Correct two register accesses
  mmc: sdhci-pci: Fix bus power failing to enable for some Intel controllers
  mmc: sdhci-pci: Let devices define their own sdhci_ops
  mmc: sdhci: Rename sdhci_set_power() to sdhci_set_power_noreg()
  mmc: sdhci: Fix SDHCI_QUIRK2_STOP_WITH_TC
  mmc: core: Annotate cmd_hdr as __le32
  mmc: sdhci-of-arasan: add sdhci_arasan_voltage_switch for arasan, 5.1
  mmc: core: changes frequency to hs_max_dtr when selecting hs400es
  mmc: core: switch to 1V8 or 1V2 for hs400es mode
  mmc: block: add missing header dependencies
  mmc: sdhci-of-arasan: Fix non static symbol warning
diff --git a/.mailmap b/.mailmap
index 2408e56..02d2614 100644
--- a/.mailmap
+++ b/.mailmap
@@ -127,6 +127,7 @@
 Peter Oruba <peter.oruba@amd.com>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
+Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
 Rajesh Shah <rajesh.shah@intel.com>
 Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index cb9a6c6..3acc4f1 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -46,7 +46,8 @@
 Intel-IOMMU.txt
 	- basic info on the Intel IOMMU virtualization support.
 Makefile
-	- some files in Documentation dir are actually sample code to build
+	- This file does nothing. Removing it breaks make htmldocs and
+	  make distclean.
 ManagementStyle
 	- how to (attempt to) manage kernel hackers.
 RCU/
diff --git a/Documentation/80211/cfg80211.rst b/Documentation/80211/cfg80211.rst
new file mode 100644
index 0000000..b1e149e
--- /dev/null
+++ b/Documentation/80211/cfg80211.rst
@@ -0,0 +1,345 @@
+==================
+cfg80211 subsystem
+==================
+
+Device registration
+===================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Device registration
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_channel_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_rate_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_rate
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_sta_ht_cap
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_supported_band
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_signal_type
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_params_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wireless_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_new
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_register
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_unregister
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_free
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_name
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_priv
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: priv_to_wiphy
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: set_wiphy_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wdev_priv
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_iface_limit
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_iface_combination
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_check_combinations
+
+Actions and configuration
+=========================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Actions and configuration
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ops
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: vif_params
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: key_params
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: survey_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: survey_info
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_beacon_data
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ap_settings
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: station_parameters
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: rate_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: rate_info
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: station_info
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: monitor_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: mpath_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: mpath_info
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: bss_parameters
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_txq_params
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_crypto_settings
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_auth_request
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_assoc_request
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_deauth_request
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_disassoc_request
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ibss_params
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_connect_params
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_pmksa
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_rx_mlme_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_auth_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_rx_assoc_resp
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_assoc_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_tx_mlme_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ibss_joined
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_connect_result
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_connect_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_connect_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_roamed
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_disconnected
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ready_on_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_remain_on_channel_expired
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_new_sta
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_rx_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_mgmt_tx_status
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_cqm_rssi_notify
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_cqm_pktloss_notify
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_michael_mic_failure
+
+Scanning and BSS list handling
+==============================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Scanning and BSS list handling
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_ssid
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_scan_request
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_scan_done
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_inform_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_inform_bss_frame_data
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_inform_bss_data
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_unlink_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_find_ie
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_bss_get_ie
+
+Utility functions
+=================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Utility functions
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_channel_to_frequency
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_frequency_to_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_get_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_get_response_rate
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_hdrlen
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_get_hdrlen_from_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_radiotap_iterator
+
+Data path helpers
+=================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Data path helpers
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_data_to_8023
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_data_from_8023
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: ieee80211_amsdu_to_8023s
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_classify8021d
+
+Regulatory enforcement infrastructure
+=====================================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Regulatory enforcement infrastructure
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: regulatory_hint
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_apply_custom_regulatory
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: freq_reg_info
+
+RFkill integration
+==================
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: RFkill integration
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_rfkill_set_hw_state
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_rfkill_start_polling
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_rfkill_stop_polling
+
+Test mode
+=========
+
+.. kernel-doc:: include/net/cfg80211.h
+   :doc: Test mode
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_testmode_alloc_reply_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_testmode_reply
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_testmode_alloc_event_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: cfg80211_testmode_event
diff --git a/Documentation/80211/conf.py b/Documentation/80211/conf.py
new file mode 100644
index 0000000..20c7c27
--- /dev/null
+++ b/Documentation/80211/conf.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux 802.11 Driver Developer's Guide"
+
+tags.add("subproject")
diff --git a/Documentation/80211/index.rst b/Documentation/80211/index.rst
new file mode 100644
index 0000000..90bba47
--- /dev/null
+++ b/Documentation/80211/index.rst
@@ -0,0 +1,17 @@
+=====================================
+Linux 802.11 Driver Developer's Guide
+=====================================
+
+.. toctree::
+
+   introduction
+   cfg80211
+   mac80211
+   mac80211-advanced
+
+.. only::  subproject
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/80211/introduction.rst b/Documentation/80211/introduction.rst
new file mode 100644
index 0000000..4938fa8
--- /dev/null
+++ b/Documentation/80211/introduction.rst
@@ -0,0 +1,17 @@
+============
+Introduction
+============
+
+Explaining wireless 802.11 networking in the Linux kernel
+
+Copyright 2007-2009 Johannes Berg
+
+These books attempt to give a description of the various subsystems
+that play a role in 802.11 wireless networking in Linux. Since these
+books are for kernel developers they attempts to document the
+structures and functions used in the kernel as well as giving a
+higher-level overview.
+
+The reader is expected to be familiar with the 802.11 standard as
+published by the IEEE in 802.11-2007 (or possibly later versions).
+References to this standard will be given as "802.11-2007 8.1.5".
diff --git a/Documentation/80211/mac80211-advanced.rst b/Documentation/80211/mac80211-advanced.rst
new file mode 100644
index 0000000..70a89b2
--- /dev/null
+++ b/Documentation/80211/mac80211-advanced.rst
@@ -0,0 +1,295 @@
+=============================
+mac80211 subsystem (advanced)
+=============================
+
+Information contained within this part of the book is of interest only
+for advanced interaction of mac80211 with drivers to exploit more
+hardware capabilities and improve performance.
+
+LED support
+===========
+
+Mac80211 supports various ways of blinking LEDs. Wherever possible,
+device LEDs should be exposed as LED class devices and hooked up to the
+appropriate trigger, which will then be triggered appropriately by
+mac80211.
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_tx_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_rx_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_assoc_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_radio_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tpt_blink
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tpt_led_trigger_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_create_tpt_led_trigger
+
+Hardware crypto acceleration
+============================
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Hardware crypto acceleration
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: set_key_cmd
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_key_conf
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_key_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_tkip_p1k
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_tkip_p1k_iv
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_tkip_p2k
+
+Powersave support
+=================
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Powersave support
+
+Beacon filter support
+=====================
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Beacon filter support
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_beacon_loss
+
+Multiple queues and QoS support
+===============================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_queue_params
+
+Access point mode support
+=========================
+
+TBD
+
+Some parts of the if_conf should be discussed here instead
+
+Insert notes about VLAN interfaces with hw crypto here or in the hw
+crypto chapter.
+
+support for powersaving clients
+-------------------------------
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: AP support for powersaving clients
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_get_buffered_bc
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_beacon_get
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta_eosp
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_frame_release_type
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta_ps_transition
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta_ps_transition_ni
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta_set_buffered
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta_block_awake
+
+Supporting multiple virtual interfaces
+======================================
+
+TBD
+
+Note: WDS with identical MAC address should almost always be OK
+
+Insert notes about having multiple virtual interfaces with different MAC
+addresses here, note which configurations are supported by mac80211, add
+notes about supporting hw crypto with it.
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_iterate_active_interfaces
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_iterate_active_interfaces_atomic
+
+Station handling
+================
+
+TODO
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_sta
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: sta_notify_cmd
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_find_sta
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_find_sta_by_ifaddr
+
+Hardware scan offload
+=====================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_scan_completed
+
+Aggregation
+===========
+
+TX A-MPDU aggregation
+---------------------
+
+.. kernel-doc:: net/mac80211/agg-tx.c
+   :doc: TX A-MPDU aggregation
+
+.. WARNING: DOCPROC directive not supported: !Cnet/mac80211/agg-tx.c
+
+RX A-MPDU aggregation
+---------------------
+
+.. kernel-doc:: net/mac80211/agg-rx.c
+   :doc: RX A-MPDU aggregation
+
+.. WARNING: DOCPROC directive not supported: !Cnet/mac80211/agg-rx.c
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_ampdu_mlme_action
+
+Spatial Multiplexing Powersave (SMPS)
+=====================================
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Spatial multiplexing power save
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_request_smps
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_smps_mode
+
+TBD
+
+This part of the book describes the rate control algorithm interface and
+how it relates to mac80211 and drivers.
+
+Rate Control API
+================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_start_tx_ba_session
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_start_tx_ba_cb_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_stop_tx_ba_session
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_stop_tx_ba_cb_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rate_control_changed
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_rate_control
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: rate_control_send_low
+
+TBD
+
+This part of the book describes mac80211 internals.
+
+Key handling
+============
+
+Key handling basics
+-------------------
+
+.. kernel-doc:: net/mac80211/key.c
+   :doc: Key handling basics
+
+MORE TBD
+--------
+
+TBD
+
+Receive processing
+==================
+
+TBD
+
+Transmit processing
+===================
+
+TBD
+
+Station info handling
+=====================
+
+Programming information
+-----------------------
+
+.. kernel-doc:: net/mac80211/sta_info.h
+   :functions: sta_info
+
+.. kernel-doc:: net/mac80211/sta_info.h
+   :functions: ieee80211_sta_info_flags
+
+STA information lifetime rules
+------------------------------
+
+.. kernel-doc:: net/mac80211/sta_info.c
+   :doc: STA information lifetime rules
+
+Aggregation
+===========
+
+.. kernel-doc:: net/mac80211/sta_info.h
+   :functions: sta_ampdu_mlme
+
+.. kernel-doc:: net/mac80211/sta_info.h
+   :functions: tid_ampdu_tx
+
+.. kernel-doc:: net/mac80211/sta_info.h
+   :functions: tid_ampdu_rx
+
+Synchronisation
+===============
+
+TBD
+
+Locking, lots of RCU
diff --git a/Documentation/80211/mac80211.rst b/Documentation/80211/mac80211.rst
new file mode 100644
index 0000000..85a8335
--- /dev/null
+++ b/Documentation/80211/mac80211.rst
@@ -0,0 +1,216 @@
+===========================
+mac80211 subsystem (basics)
+===========================
+
+You should read and understand the information contained within this
+part of the book while implementing a mac80211 driver. In some chapters,
+advanced usage is noted, those may be skipped if this isn't needed.
+
+This part of the book only covers station and monitor mode
+functionality, additional information required to implement the other
+modes is covered in the second part of the book.
+
+Basic hardware handling
+=======================
+
+TBD
+
+This chapter shall contain information on getting a hw struct allocated
+and registered with mac80211.
+
+Since it is required to allocate rates/modes before registering a hw
+struct, this chapter shall also contain information on setting up the
+rate/mode structs.
+
+Additionally, some discussion about the callbacks and the general
+programming model should be in here, including the definition of
+ieee80211_ops which will be referred to a lot.
+
+Finally, a discussion of hardware capabilities should be done with
+references to other parts of the book.
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_hw
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_hw_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: SET_IEEE80211_DEV
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: SET_IEEE80211_PERM_ADDR
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_ops
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_alloc_hw
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_register_hw
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_unregister_hw
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_free_hw
+
+PHY configuration
+=================
+
+TBD
+
+This chapter should describe PHY handling including start/stop callbacks
+and the various structures used.
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_conf
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_conf_flags
+
+Virtual interfaces
+==================
+
+TBD
+
+This chapter should describe virtual interface basics that are relevant
+to the driver (VLANs, MGMT etc are not.) It should explain the use of
+the add_iface/remove_iface callbacks as well as the interface
+configuration callbacks.
+
+Things related to AP mode should be discussed there.
+
+Things related to supporting multiple interfaces should be in the
+appropriate chapter, a BIG FAT note should be here about this though and
+the recommendation to allow only a single interface in STA mode at
+first!
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_vif
+
+Receive and transmit processing
+===============================
+
+what should be here
+-------------------
+
+TBD
+
+This should describe the receive and transmit paths in mac80211/the
+drivers as well as transmit status handling.
+
+Frame format
+------------
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Frame format
+
+Packet alignment
+----------------
+
+.. kernel-doc:: net/mac80211/rx.c
+   :doc: Packet alignment
+
+Calling into mac80211 from interrupts
+-------------------------------------
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Calling mac80211 from interrupts
+
+functions/definitions
+---------------------
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rx_status
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: mac80211_rx_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: mac80211_tx_info_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: mac80211_tx_control_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: mac80211_rate_control_flags
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_rate
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_info
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_info_clear_status
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rx
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rx_ni
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rx_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_status
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_status_ni
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_tx_status_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rts_get
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_rts_duration
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_ctstoself_get
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_ctstoself_duration
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_generic_frame_duration
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_wake_queue
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_stop_queue
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_wake_queues
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_stop_queues
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_queue_stopped
+
+Frame filtering
+===============
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: Frame filtering
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_filter_flags
+
+The mac80211 workqueue
+======================
+
+.. kernel-doc:: include/net/mac80211.h
+   :doc: mac80211 workqueue
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_queue_work
+
+.. kernel-doc:: include/net/mac80211.h
+   :functions: ieee80211_queue_delayed_work
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index 2ddd680..f208ac5 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -6,7 +6,7 @@
 
 Being used for adding and removing rbd block devices.
 
-Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
+Usage: <mon ip addr> <options> <pool name> <rbd image name> [<snap name>]
 
  $ echo "192.168.0.1 name=admin rbd foo" > /sys/bus/rbd/add
 
@@ -14,9 +14,13 @@
 will be assigned for any registered block device. If snapshot is used, it will
 be mapped read-only.
 
-Removal of a device:
+Usage: <dev-id> [force]
 
-  $ echo <dev-id> > /sys/bus/rbd/remove
+ $ echo 2 > /sys/bus/rbd/remove
+
+Optional "force" argument which when passed will wait for running requests and
+then unmap the image. Requests sent to the driver after initiating the removal
+will be failed.  (August 2016, since 4.9.)
 
 What:		/sys/bus/rbd/add_single_major
 Date:		December 2013
@@ -43,10 +47,25 @@
 Entries under /sys/bus/rbd/devices/<dev-id>/
 --------------------------------------------
 
+client_addr
+
+	The ceph unique client entity_addr_t (address + nonce).
+	The format is <address>:<port>/<nonce>: '1.2.3.4:1234/5678' or
+	'[1:2:3:4:5:6:7:8]:1234/5678'.  (August 2016, since 4.9.)
+
 client_id
 
 	The ceph unique client id that was assigned for this specific session.
 
+cluster_fsid
+
+	The ceph cluster UUID.  (August 2016, since 4.9.)
+
+config_info
+
+	The string written into /sys/bus/rbd/add{,_single_major}.  (August
+	2016, since 4.9.)
+
 features
 
 	A hexadecimal encoding of the feature bits for this image.
@@ -92,6 +111,10 @@
 
 	The current snapshot for which the device is mapped.
 
+snap_id
+
+	The current snapshot's id.  (August 2016, since 4.9.)
+
 parent
 
 	Information identifying the chain of parent images in a layered rbd
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 2d455a5..98bf7ac 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -126,3 +126,20 @@
 
 NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
 though ARM64 patches will likely be posted soon.
+
+DMA_ATTR_NO_WARN
+----------------
+
+This tells the DMA-mapping subsystem to suppress allocation failure reports
+(similarly to __GFP_NOWARN).
+
+On some architectures allocation failures are reported with error messages
+to the system logs.  Although this can help to identify and debug problems,
+drivers which handle failures (eg, retry later) have no problems with them,
+and can actually flood the system logs with error messages that aren't any
+problem at all, depending on the implementation of the retry mechanism.
+
+So, this provides a way for drivers to avoid those error messages on calls
+where allocation failures are not a problem, and shouldn't bother the logs.
+
+NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
deleted file mode 100644
index 800fe7a..0000000
--- a/Documentation/DocBook/80211.tmpl
+++ /dev/null
@@ -1,584 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE set PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-<set>
-  <setinfo>
-    <title>The 802.11 subsystems &ndash; for kernel developers</title>
-    <subtitle>
-      Explaining wireless 802.11 networking in the Linux kernel
-    </subtitle>
-
-    <copyright>
-      <year>2007-2009</year>
-      <holder>Johannes Berg</holder>
-    </copyright>
-
-    <authorgroup>
-      <author>
-        <firstname>Johannes</firstname>
-        <surname>Berg</surname>
-        <affiliation>
-          <address><email>johannes@sipsolutions.net</email></address>
-        </affiliation>
-      </author>
-    </authorgroup>
-
-    <legalnotice>
-      <para>
-        This documentation is free software; you can redistribute
-        it and/or modify it under the terms of the GNU General Public
-        License version 2 as published by the Free Software Foundation.
-      </para>
-      <para>
-        This documentation is distributed in the hope that it will be
-        useful, but WITHOUT ANY WARRANTY; without even the implied
-        warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-        See the GNU General Public License for more details.
-      </para>
-      <para>
-        You should have received a copy of the GNU General Public
-        License along with this documentation; if not, write to the Free
-        Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-        MA 02111-1307 USA
-      </para>
-      <para>
-        For more details see the file COPYING in the source
-        distribution of Linux.
-      </para>
-    </legalnotice>
-
-    <abstract>
-      <para>
-        These books attempt to give a description of the
-        various subsystems that play a role in 802.11 wireless
-        networking in Linux. Since these books are for kernel
-        developers they attempts to document the structures
-        and functions used in the kernel as well as giving a
-        higher-level overview.
-      </para>
-      <para>
-	The reader is expected to be familiar with the 802.11
-	standard as published by the IEEE in 802.11-2007 (or
-	possibly later versions). References to this standard
-	will be given as "802.11-2007 8.1.5".
-      </para>
-    </abstract>
-  </setinfo>
-  <book id="cfg80211-developers-guide">
-    <bookinfo>
-      <title>The cfg80211 subsystem</title>
-
-      <abstract>
-!Pinclude/net/cfg80211.h Introduction
-      </abstract>
-    </bookinfo>
-      <chapter>
-      <title>Device registration</title>
-!Pinclude/net/cfg80211.h Device registration
-!Finclude/net/cfg80211.h ieee80211_channel_flags
-!Finclude/net/cfg80211.h ieee80211_channel
-!Finclude/net/cfg80211.h ieee80211_rate_flags
-!Finclude/net/cfg80211.h ieee80211_rate
-!Finclude/net/cfg80211.h ieee80211_sta_ht_cap
-!Finclude/net/cfg80211.h ieee80211_supported_band
-!Finclude/net/cfg80211.h cfg80211_signal_type
-!Finclude/net/cfg80211.h wiphy_params_flags
-!Finclude/net/cfg80211.h wiphy_flags
-!Finclude/net/cfg80211.h wiphy
-!Finclude/net/cfg80211.h wireless_dev
-!Finclude/net/cfg80211.h wiphy_new
-!Finclude/net/cfg80211.h wiphy_register
-!Finclude/net/cfg80211.h wiphy_unregister
-!Finclude/net/cfg80211.h wiphy_free
-
-!Finclude/net/cfg80211.h wiphy_name
-!Finclude/net/cfg80211.h wiphy_dev
-!Finclude/net/cfg80211.h wiphy_priv
-!Finclude/net/cfg80211.h priv_to_wiphy
-!Finclude/net/cfg80211.h set_wiphy_dev
-!Finclude/net/cfg80211.h wdev_priv
-!Finclude/net/cfg80211.h ieee80211_iface_limit
-!Finclude/net/cfg80211.h ieee80211_iface_combination
-!Finclude/net/cfg80211.h cfg80211_check_combinations
-      </chapter>
-      <chapter>
-      <title>Actions and configuration</title>
-!Pinclude/net/cfg80211.h Actions and configuration
-!Finclude/net/cfg80211.h cfg80211_ops
-!Finclude/net/cfg80211.h vif_params
-!Finclude/net/cfg80211.h key_params
-!Finclude/net/cfg80211.h survey_info_flags
-!Finclude/net/cfg80211.h survey_info
-!Finclude/net/cfg80211.h cfg80211_beacon_data
-!Finclude/net/cfg80211.h cfg80211_ap_settings
-!Finclude/net/cfg80211.h station_parameters
-!Finclude/net/cfg80211.h rate_info_flags
-!Finclude/net/cfg80211.h rate_info
-!Finclude/net/cfg80211.h station_info
-!Finclude/net/cfg80211.h monitor_flags
-!Finclude/net/cfg80211.h mpath_info_flags
-!Finclude/net/cfg80211.h mpath_info
-!Finclude/net/cfg80211.h bss_parameters
-!Finclude/net/cfg80211.h ieee80211_txq_params
-!Finclude/net/cfg80211.h cfg80211_crypto_settings
-!Finclude/net/cfg80211.h cfg80211_auth_request
-!Finclude/net/cfg80211.h cfg80211_assoc_request
-!Finclude/net/cfg80211.h cfg80211_deauth_request
-!Finclude/net/cfg80211.h cfg80211_disassoc_request
-!Finclude/net/cfg80211.h cfg80211_ibss_params
-!Finclude/net/cfg80211.h cfg80211_connect_params
-!Finclude/net/cfg80211.h cfg80211_pmksa
-!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt
-!Finclude/net/cfg80211.h cfg80211_auth_timeout
-!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp
-!Finclude/net/cfg80211.h cfg80211_assoc_timeout
-!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt
-!Finclude/net/cfg80211.h cfg80211_ibss_joined
-!Finclude/net/cfg80211.h cfg80211_connect_result
-!Finclude/net/cfg80211.h cfg80211_connect_bss
-!Finclude/net/cfg80211.h cfg80211_connect_timeout
-!Finclude/net/cfg80211.h cfg80211_roamed
-!Finclude/net/cfg80211.h cfg80211_disconnected
-!Finclude/net/cfg80211.h cfg80211_ready_on_channel
-!Finclude/net/cfg80211.h cfg80211_remain_on_channel_expired
-!Finclude/net/cfg80211.h cfg80211_new_sta
-!Finclude/net/cfg80211.h cfg80211_rx_mgmt
-!Finclude/net/cfg80211.h cfg80211_mgmt_tx_status
-!Finclude/net/cfg80211.h cfg80211_cqm_rssi_notify
-!Finclude/net/cfg80211.h cfg80211_cqm_pktloss_notify
-!Finclude/net/cfg80211.h cfg80211_michael_mic_failure
-      </chapter>
-      <chapter>
-      <title>Scanning and BSS list handling</title>
-!Pinclude/net/cfg80211.h Scanning and BSS list handling
-!Finclude/net/cfg80211.h cfg80211_ssid
-!Finclude/net/cfg80211.h cfg80211_scan_request
-!Finclude/net/cfg80211.h cfg80211_scan_done
-!Finclude/net/cfg80211.h cfg80211_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss_frame_data
-!Finclude/net/cfg80211.h cfg80211_inform_bss_data
-!Finclude/net/cfg80211.h cfg80211_unlink_bss
-!Finclude/net/cfg80211.h cfg80211_find_ie
-!Finclude/net/cfg80211.h ieee80211_bss_get_ie
-      </chapter>
-      <chapter>
-      <title>Utility functions</title>
-!Pinclude/net/cfg80211.h Utility functions
-!Finclude/net/cfg80211.h ieee80211_channel_to_frequency
-!Finclude/net/cfg80211.h ieee80211_frequency_to_channel
-!Finclude/net/cfg80211.h ieee80211_get_channel
-!Finclude/net/cfg80211.h ieee80211_get_response_rate
-!Finclude/net/cfg80211.h ieee80211_hdrlen
-!Finclude/net/cfg80211.h ieee80211_get_hdrlen_from_skb
-!Finclude/net/cfg80211.h ieee80211_radiotap_iterator
-      </chapter>
-      <chapter>
-      <title>Data path helpers</title>
-!Pinclude/net/cfg80211.h Data path helpers
-!Finclude/net/cfg80211.h ieee80211_data_to_8023
-!Finclude/net/cfg80211.h ieee80211_data_from_8023
-!Finclude/net/cfg80211.h ieee80211_amsdu_to_8023s
-!Finclude/net/cfg80211.h cfg80211_classify8021d
-      </chapter>
-      <chapter>
-      <title>Regulatory enforcement infrastructure</title>
-!Pinclude/net/cfg80211.h Regulatory enforcement infrastructure
-!Finclude/net/cfg80211.h regulatory_hint
-!Finclude/net/cfg80211.h wiphy_apply_custom_regulatory
-!Finclude/net/cfg80211.h freq_reg_info
-      </chapter>
-      <chapter>
-      <title>RFkill integration</title>
-!Pinclude/net/cfg80211.h RFkill integration
-!Finclude/net/cfg80211.h wiphy_rfkill_set_hw_state
-!Finclude/net/cfg80211.h wiphy_rfkill_start_polling
-!Finclude/net/cfg80211.h wiphy_rfkill_stop_polling
-      </chapter>
-      <chapter>
-      <title>Test mode</title>
-!Pinclude/net/cfg80211.h Test mode
-!Finclude/net/cfg80211.h cfg80211_testmode_alloc_reply_skb
-!Finclude/net/cfg80211.h cfg80211_testmode_reply
-!Finclude/net/cfg80211.h cfg80211_testmode_alloc_event_skb
-!Finclude/net/cfg80211.h cfg80211_testmode_event
-      </chapter>
-  </book>
-  <book id="mac80211-developers-guide">
-    <bookinfo>
-      <title>The mac80211 subsystem</title>
-      <abstract>
-!Pinclude/net/mac80211.h Introduction
-!Pinclude/net/mac80211.h Warning
-      </abstract>
-    </bookinfo>
-
-    <toc></toc>
-
-  <!--
-  Generally, this document shall be ordered by increasing complexity.
-  It is important to note that readers should be able to read only
-  the first few sections to get a working driver and only advanced
-  usage should require reading the full document.
-  -->
-
-    <part>
-      <title>The basic mac80211 driver interface</title>
-      <partintro>
-        <para>
-          You should read and understand the information contained
-          within this part of the book while implementing a driver.
-          In some chapters, advanced usage is noted, that may be
-          skipped at first.
-        </para>
-        <para>
-          This part of the book only covers station and monitor mode
-          functionality, additional information required to implement
-          the other modes is covered in the second part of the book.
-        </para>
-      </partintro>
-
-      <chapter id="basics">
-        <title>Basic hardware handling</title>
-        <para>TBD</para>
-        <para>
-          This chapter shall contain information on getting a hw
-          struct allocated and registered with mac80211.
-        </para>
-        <para>
-          Since it is required to allocate rates/modes before registering
-          a hw struct, this chapter shall also contain information on setting
-          up the rate/mode structs.
-        </para>
-        <para>
-          Additionally, some discussion about the callbacks and
-          the general programming model should be in here, including
-          the definition of ieee80211_ops which will be referred to
-          a lot.
-        </para>
-        <para>
-          Finally, a discussion of hardware capabilities should be done
-          with references to other parts of the book.
-        </para>
-  <!-- intentionally multiple !F lines to get proper order -->
-!Finclude/net/mac80211.h ieee80211_hw
-!Finclude/net/mac80211.h ieee80211_hw_flags
-!Finclude/net/mac80211.h SET_IEEE80211_DEV
-!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
-!Finclude/net/mac80211.h ieee80211_ops
-!Finclude/net/mac80211.h ieee80211_alloc_hw
-!Finclude/net/mac80211.h ieee80211_register_hw
-!Finclude/net/mac80211.h ieee80211_unregister_hw
-!Finclude/net/mac80211.h ieee80211_free_hw
-      </chapter>
-
-      <chapter id="phy-handling">
-        <title>PHY configuration</title>
-        <para>TBD</para>
-        <para>
-          This chapter should describe PHY handling including
-          start/stop callbacks and the various structures used.
-        </para>
-!Finclude/net/mac80211.h ieee80211_conf
-!Finclude/net/mac80211.h ieee80211_conf_flags
-      </chapter>
-
-      <chapter id="iface-handling">
-        <title>Virtual interfaces</title>
-        <para>TBD</para>
-        <para>
-          This chapter should describe virtual interface basics
-          that are relevant to the driver (VLANs, MGMT etc are not.)
-          It should explain the use of the add_iface/remove_iface
-          callbacks as well as the interface configuration callbacks.
-        </para>
-        <para>Things related to AP mode should be discussed there.</para>
-        <para>
-          Things related to supporting multiple interfaces should be
-          in the appropriate chapter, a BIG FAT note should be here about
-          this though and the recommendation to allow only a single
-          interface in STA mode at first!
-        </para>
-!Finclude/net/mac80211.h ieee80211_vif
-      </chapter>
-
-      <chapter id="rx-tx">
-        <title>Receive and transmit processing</title>
-        <sect1>
-          <title>what should be here</title>
-          <para>TBD</para>
-          <para>
-            This should describe the receive and transmit
-            paths in mac80211/the drivers as well as
-            transmit status handling.
-          </para>
-        </sect1>
-        <sect1>
-          <title>Frame format</title>
-!Pinclude/net/mac80211.h Frame format
-        </sect1>
-        <sect1>
-          <title>Packet alignment</title>
-!Pnet/mac80211/rx.c Packet alignment
-        </sect1>
-        <sect1>
-          <title>Calling into mac80211 from interrupts</title>
-!Pinclude/net/mac80211.h Calling mac80211 from interrupts
-        </sect1>
-        <sect1>
-          <title>functions/definitions</title>
-!Finclude/net/mac80211.h ieee80211_rx_status
-!Finclude/net/mac80211.h mac80211_rx_flags
-!Finclude/net/mac80211.h mac80211_tx_info_flags
-!Finclude/net/mac80211.h mac80211_tx_control_flags
-!Finclude/net/mac80211.h mac80211_rate_control_flags
-!Finclude/net/mac80211.h ieee80211_tx_rate
-!Finclude/net/mac80211.h ieee80211_tx_info
-!Finclude/net/mac80211.h ieee80211_tx_info_clear_status
-!Finclude/net/mac80211.h ieee80211_rx
-!Finclude/net/mac80211.h ieee80211_rx_ni
-!Finclude/net/mac80211.h ieee80211_rx_irqsafe
-!Finclude/net/mac80211.h ieee80211_tx_status
-!Finclude/net/mac80211.h ieee80211_tx_status_ni
-!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
-!Finclude/net/mac80211.h ieee80211_rts_get
-!Finclude/net/mac80211.h ieee80211_rts_duration
-!Finclude/net/mac80211.h ieee80211_ctstoself_get
-!Finclude/net/mac80211.h ieee80211_ctstoself_duration
-!Finclude/net/mac80211.h ieee80211_generic_frame_duration
-!Finclude/net/mac80211.h ieee80211_wake_queue
-!Finclude/net/mac80211.h ieee80211_stop_queue
-!Finclude/net/mac80211.h ieee80211_wake_queues
-!Finclude/net/mac80211.h ieee80211_stop_queues
-!Finclude/net/mac80211.h ieee80211_queue_stopped
-        </sect1>
-      </chapter>
-
-      <chapter id="filters">
-        <title>Frame filtering</title>
-!Pinclude/net/mac80211.h Frame filtering
-!Finclude/net/mac80211.h ieee80211_filter_flags
-      </chapter>
-
-      <chapter id="workqueue">
-        <title>The mac80211 workqueue</title>
-!Pinclude/net/mac80211.h mac80211 workqueue
-!Finclude/net/mac80211.h ieee80211_queue_work
-!Finclude/net/mac80211.h ieee80211_queue_delayed_work
-      </chapter>
-    </part>
-
-    <part id="advanced">
-      <title>Advanced driver interface</title>
-      <partintro>
-        <para>
-         Information contained within this part of the book is
-         of interest only for advanced interaction of mac80211
-         with drivers to exploit more hardware capabilities and
-         improve performance.
-        </para>
-      </partintro>
-
-      <chapter id="led-support">
-        <title>LED support</title>
-        <para>
-         Mac80211 supports various ways of blinking LEDs. Wherever possible,
-         device LEDs should be exposed as LED class devices and hooked up to
-         the appropriate trigger, which will then be triggered appropriately
-         by mac80211.
-        </para>
-!Finclude/net/mac80211.h ieee80211_get_tx_led_name
-!Finclude/net/mac80211.h ieee80211_get_rx_led_name
-!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
-!Finclude/net/mac80211.h ieee80211_get_radio_led_name
-!Finclude/net/mac80211.h ieee80211_tpt_blink
-!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags
-!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger
-      </chapter>
-
-      <chapter id="hardware-crypto-offload">
-        <title>Hardware crypto acceleration</title>
-!Pinclude/net/mac80211.h Hardware crypto acceleration
-  <!-- intentionally multiple !F lines to get proper order -->
-!Finclude/net/mac80211.h set_key_cmd
-!Finclude/net/mac80211.h ieee80211_key_conf
-!Finclude/net/mac80211.h ieee80211_key_flags
-!Finclude/net/mac80211.h ieee80211_get_tkip_p1k
-!Finclude/net/mac80211.h ieee80211_get_tkip_p1k_iv
-!Finclude/net/mac80211.h ieee80211_get_tkip_p2k
-      </chapter>
-
-      <chapter id="powersave">
-        <title>Powersave support</title>
-!Pinclude/net/mac80211.h Powersave support
-      </chapter>
-
-      <chapter id="beacon-filter">
-        <title>Beacon filter support</title>
-!Pinclude/net/mac80211.h Beacon filter support
-!Finclude/net/mac80211.h ieee80211_beacon_loss
-      </chapter>
-
-      <chapter id="qos">
-        <title>Multiple queues and QoS support</title>
-        <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_tx_queue_params
-      </chapter>
-
-      <chapter id="AP">
-        <title>Access point mode support</title>
-        <para>TBD</para>
-        <para>Some parts of the if_conf should be discussed here instead</para>
-        <para>
-          Insert notes about VLAN interfaces with hw crypto here or
-          in the hw crypto chapter.
-        </para>
-      <section id="ps-client">
-        <title>support for powersaving clients</title>
-!Pinclude/net/mac80211.h AP support for powersaving clients
-!Finclude/net/mac80211.h ieee80211_get_buffered_bc
-!Finclude/net/mac80211.h ieee80211_beacon_get
-!Finclude/net/mac80211.h ieee80211_sta_eosp
-!Finclude/net/mac80211.h ieee80211_frame_release_type
-!Finclude/net/mac80211.h ieee80211_sta_ps_transition
-!Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
-!Finclude/net/mac80211.h ieee80211_sta_set_buffered
-!Finclude/net/mac80211.h ieee80211_sta_block_awake
-      </section>
-      </chapter>
-
-      <chapter id="multi-iface">
-        <title>Supporting multiple virtual interfaces</title>
-        <para>TBD</para>
-        <para>
-          Note: WDS with identical MAC address should almost always be OK
-        </para>
-        <para>
-          Insert notes about having multiple virtual interfaces with
-          different MAC addresses here, note which configurations are
-          supported by mac80211, add notes about supporting hw crypto
-          with it.
-        </para>
-!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces
-!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces_atomic
-      </chapter>
-
-      <chapter id="station-handling">
-        <title>Station handling</title>
-        <para>TODO</para>
-!Finclude/net/mac80211.h ieee80211_sta
-!Finclude/net/mac80211.h sta_notify_cmd
-!Finclude/net/mac80211.h ieee80211_find_sta
-!Finclude/net/mac80211.h ieee80211_find_sta_by_ifaddr
-      </chapter>
-
-      <chapter id="hardware-scan-offload">
-        <title>Hardware scan offload</title>
-        <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_scan_completed
-      </chapter>
-
-      <chapter id="aggregation">
-        <title>Aggregation</title>
-        <sect1>
-          <title>TX A-MPDU aggregation</title>
-!Pnet/mac80211/agg-tx.c TX A-MPDU aggregation
-!Cnet/mac80211/agg-tx.c
-        </sect1>
-        <sect1>
-          <title>RX A-MPDU aggregation</title>
-!Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
-!Cnet/mac80211/agg-rx.c
-!Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
-        </sect1>
-      </chapter>
-
-      <chapter id="smps">
-        <title>Spatial Multiplexing Powersave (SMPS)</title>
-!Pinclude/net/mac80211.h Spatial multiplexing power save
-!Finclude/net/mac80211.h ieee80211_request_smps
-!Finclude/net/mac80211.h ieee80211_smps_mode
-      </chapter>
-    </part>
-
-    <part id="rate-control">
-      <title>Rate control interface</title>
-      <partintro>
-        <para>TBD</para>
-        <para>
-         This part of the book describes the rate control algorithm
-         interface and how it relates to mac80211 and drivers.
-        </para>
-      </partintro>
-      <chapter id="ratecontrol-api">
-        <title>Rate Control API</title>
-        <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_start_tx_ba_session
-!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
-!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
-!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
-!Finclude/net/mac80211.h ieee80211_rate_control_changed
-!Finclude/net/mac80211.h ieee80211_tx_rate_control
-!Finclude/net/mac80211.h rate_control_send_low
-      </chapter>
-    </part>
-
-    <part id="internal">
-      <title>Internals</title>
-      <partintro>
-        <para>TBD</para>
-        <para>
-         This part of the book describes mac80211 internals.
-        </para>
-      </partintro>
-
-      <chapter id="key-handling">
-        <title>Key handling</title>
-        <sect1>
-          <title>Key handling basics</title>
-!Pnet/mac80211/key.c Key handling basics
-        </sect1>
-        <sect1>
-          <title>MORE TBD</title>
-          <para>TBD</para>
-        </sect1>
-      </chapter>
-
-      <chapter id="rx-processing">
-        <title>Receive processing</title>
-        <para>TBD</para>
-      </chapter>
-
-      <chapter id="tx-processing">
-        <title>Transmit processing</title>
-        <para>TBD</para>
-      </chapter>
-
-      <chapter id="sta-info">
-        <title>Station info handling</title>
-        <sect1>
-          <title>Programming information</title>
-!Fnet/mac80211/sta_info.h sta_info
-!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
-        </sect1>
-        <sect1>
-          <title>STA information lifetime rules</title>
-!Pnet/mac80211/sta_info.c STA information lifetime rules
-        </sect1>
-      </chapter>
-
-      <chapter id="aggregation-internals">
-        <title>Aggregation</title>
-!Fnet/mac80211/sta_info.h sta_ampdu_mlme
-!Fnet/mac80211/sta_info.h tid_ampdu_tx
-!Fnet/mac80211/sta_info.h tid_ampdu_rx
-      </chapter>
-
-      <chapter id="synchronisation">
-        <title>Synchronisation</title>
-        <para>TBD</para>
-        <para>Locking, lots of RCU</para>
-      </chapter>
-    </part>
-  </book>
-</set>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 736f591..fdf8232 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@
 	    kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
 	    gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
 	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-	    80211.xml debugobjects.xml sh.xml regulator.xml \
+	    debugobjects.xml sh.xml regulator.xml \
 	    alsa-driver-api.xml writing-an-alsa-driver.xml \
 	    tracepoint.xml w1.xml \
 	    writing_musb_glue_layer.xml crypto-API.xml iio.xml
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index fb2a152..088b79c 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -797,7 +797,8 @@
      include/linux/crypto.h and their definition can be seen below.
      The former function registers a single transformation, while
      the latter works on an array of transformation descriptions.
-     The latter is useful when registering transformations in bulk.
+     The latter is useful when registering transformations in bulk,
+     for example when a driver implements multiple transformations.
     </para>
 
     <programlisting>
@@ -822,18 +823,31 @@
     </para>
 
     <para>
-     The bulk registration / unregistration functions require
-     that struct crypto_alg is an array of count size. These
-     functions simply loop over that array and register /
-     unregister each individual algorithm. If an error occurs,
-     the loop is terminated at the offending algorithm definition.
-     That means, the algorithms prior to the offending algorithm
-     are successfully registered. Note, the caller has no way of
-     knowing which cipher implementations have successfully
-     registered. If this is important to know, the caller should
-     loop through the different implementations using the single
-     instance *_alg functions for each individual implementation.
+     The bulk registration/unregistration functions
+     register/unregister each transformation in the given array of
+     length count.  They handle errors as follows:
     </para>
+    <itemizedlist>
+     <listitem>
+      <para>
+       crypto_register_algs() succeeds if and only if it
+       successfully registers all the given transformations. If an
+       error occurs partway through, then it rolls back successful
+       registrations before returning the error code. Note that if
+       a driver needs to handle registration errors for individual
+       transformations, then it will need to use the non-bulk
+       function crypto_register_alg() instead.
+      </para>
+     </listitem>
+     <listitem>
+      <para>
+       crypto_unregister_algs() tries to unregister all the given
+       transformations, continuing on error. It logs errors and
+       always returns zero.
+      </para>
+     </listitem>
+    </itemizedlist>
+
    </sect1>
 
    <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index 589b40c..2a27227 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -483,7 +483,7 @@
     <function>get_user()</function>
     /
     <function>put_user()</function>
-    <filename class="headerfile">include/asm/uaccess.h</filename>
+    <filename class="headerfile">include/linux/uaccess.h</filename>
    </title>  
 
    <para>
diff --git a/Documentation/Makefile b/Documentation/Makefile
index de955e1..c2a4691 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,3 +1 @@
-subdir-y := accounting auxdisplay blackfin \
-	filesystems filesystems ia64 laptops mic misc-devices \
-	networking pcmcia prctl ptp timers vDSO watchdog
+subdir-y :=
diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt
index bf90611..238e9f6 100644
--- a/Documentation/RCU/lockdep-splat.txt
+++ b/Documentation/RCU/lockdep-splat.txt
@@ -57,7 +57,7 @@
  [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
  [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
  [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
- [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
  [<ffffffff817db150>] ? gs_change+0xb/0xb
 
 Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
diff --git a/Documentation/accounting/Makefile b/Documentation/accounting/Makefile
deleted file mode 100644
index 7e232cb..0000000
--- a/Documentation/accounting/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := getdelays
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_getdelays.o += -I$(objtree)/usr/include
diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt
index 8a12f07..042ea59 100644
--- a/Documentation/accounting/delay-accounting.txt
+++ b/Documentation/accounting/delay-accounting.txt
@@ -54,9 +54,9 @@
 task of a thread group, the per-tgid statistics are also sent. More details
 are given in the taskstats interface description.
 
-The getdelays.c userspace utility in this directory allows simple commands to
-be run and the corresponding delay statistics to be displayed. It also serves
-as an example of using the taskstats interface.
+The getdelays.c userspace utility in tools/accounting directory allows simple
+commands to be run and the corresponding delay statistics to be displayed. It
+also serves as an example of using the taskstats interface.
 
 Usage
 -----
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index dea011c..b6e69fd 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -8,8 +8,6 @@
 	- ARM Interrupt subsystem documentation
 IXP4xx
 	- Intel IXP4xx Network processor.
-Makefile
-	- Build sourcefiles as part of the Documentation-build for arm
 Netwinder
 	- Netwinder specific documentation
 Porting
diff --git a/Documentation/auxdisplay/Makefile b/Documentation/auxdisplay/Makefile
deleted file mode 100644
index ada4dac..0000000
--- a/Documentation/auxdisplay/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := cfag12864b-example
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_cfag12864b-example.o += -I$(objtree)/usr/include
diff --git a/Documentation/auxdisplay/cfag12864b b/Documentation/auxdisplay/cfag12864b
index eb7be39..12fd51b 100644
--- a/Documentation/auxdisplay/cfag12864b
+++ b/Documentation/auxdisplay/cfag12864b
@@ -101,5 +101,5 @@
 Also, you can mmap the framebuffer: open & mmap, munmap & close...
 which is the best option for most uses.
 
-Check Documentation/auxdisplay/cfag12864b-example.c
+Check samples/auxdisplay/cfag12864b-example.c
 for a real working userspace complete program with usage examples.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index c54fcdd..265a1ef 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,10 +1,6 @@
 00-INDEX
 	- This file
-Makefile
-	- Makefile for gptimers example file.
 bfin-gpio-notes.txt
 	- Notes in developing/using bfin-gpio driver.
 bfin-spi-notes.txt
 	- Notes for using bfin spi bus driver.
-gptimers-example.c
-	- gptimers example
diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile
deleted file mode 100644
index 6782c58..0000000
--- a/Documentation/blackfin/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-ifneq ($(CONFIG_BLACKFIN),)
-ifneq ($(CONFIG_BFIN_GPTIMERS),)
-obj-m := gptimers-example.o
-endif
-endif
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 0cc8765..bf6f310 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -342,6 +342,8 @@
      'The kernel development community', 'manual'),
     ('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
      'The kernel development community', 'manual'),
+    ('media/index', 'media.tex', 'Linux Media Subsystem Documentation',
+     'The kernel development community', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index 1788722..b2391b8 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -162,6 +162,15 @@
 - ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness
 - ``kmemleak_free_recursive``	 - as kmemleak_free but checks the recursiveness
 
+The following functions take a physical address as the object pointer
+and only perform the corresponding action if the address has a lowmem
+mapping:
+
+- ``kmemleak_alloc_phys``
+- ``kmemleak_free_part_phys``
+- ``kmemleak_not_leak_phys``
+- ``kmemleak_ignore_phys``
+
 Dealing with false positives/negatives
 --------------------------------------
 
diff --git a/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt b/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
new file mode 100644
index 0000000..b69bb68
--- /dev/null
+++ b/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
@@ -0,0 +1,17 @@
+Binding for ASCII LCD displays on Imagination Technologies boards
+
+Required properties:
+- compatible : should be one of:
+    "img,boston-lcd"
+    "mti,malta-lcd"
+    "mti,sead3-lcd"
+
+Required properties for "img,boston-lcd":
+- reg : memory region locating the device registers
+
+Required properties for "mti,malta-lcd" or "mti,sead3-lcd":
+- regmap: phandle of the system controller containing the LCD registers
+- offset: offset in bytes to the LCD registers within the system controller
+
+The layout of the registers & properties of the display are determined
+from the compatible string, making this binding somewhat trivial.
diff --git a/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
new file mode 100644
index 0000000..003bc24
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/dumb-vga-dac.txt
@@ -0,0 +1,48 @@
+Dumb RGB to VGA DAC bridge
+---------------------------
+
+This binding is aimed for dumb RGB to VGA DAC based bridges that do not require
+any configuration.
+
+Required properties:
+
+- compatible: Must be "dumb-vga-dac"
+
+Required nodes:
+
+This device has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for RGB input
+- Video port 1 for VGA output
+
+
+Example
+-------
+
+bridge {
+	compatible = "dumb-vga-dac";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			reg = <0>;
+
+			vga_bridge_in: endpoint {
+				remote-endpoint = <&tcon0_out_vga>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			vga_bridge_out: endpoint {
+				remote-endpoint = <&vga_con_in>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/bridge/tda998x.txt b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
index e178e6b..24cc246 100644
--- a/Documentation/devicetree/bindings/display/bridge/tda998x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
@@ -21,8 +21,19 @@
   - video-ports: 24 bits value which defines how the video controller
 	output is wired to the TDA998x input - default: <0x230145>
 
+  - audio-ports: array of 8-bit values, 2 values per one DAI[1].
+	The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S[2].
+	The second value defines the tda998x AP_ENA reg content when the DAI
+	in question is used. The implementation allows one or two DAIs. If two
+	DAIs are defined, they must be of different type.
+
+[1] Documentation/sound/alsa/soc/DAI.txt
+[2] include/dt-bindings/display/tda998x.h
+
 Example:
 
+#include <dt-bindings/display/tda998x.h>
+
 	tda998x: hdmi-encoder {
 		compatible = "nxp,tda998x";
 		reg = <0x70>;
@@ -30,4 +41,11 @@
 		interrupts = <27 2>;		/* falling edge */
 		pinctrl-0 = <&pmx_camera>;
 		pinctrl-names = "default";
+		video-ports = <0x230145>;
+
+		#sound-dai-cells = <2>;
+			     /*	DAI-format	AP_ENA reg value */
+		audio-ports = <	TDA998x_SPDIF	0x04
+				TDA998x_I2S	0x03>;
+
 	};
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index b63f614..2ad5789 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -14,17 +14,16 @@
 - power-domains: Should be <&mmcc MDSS_GDSC>.
 - clocks: device clocks
   See ../clocks/clock-bindings.txt for details.
-- qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin
-- qcom,hdmi-tx-ddc-data-gpio: ddc data pin
-- qcom,hdmi-tx-hpd-gpio: hpd pin
 - core-vdda-supply: phandle to supply regulator
 - hdmi-mux-supply: phandle to mux regulator
 - phys: the phandle for the HDMI PHY device
 - phy-names: the name of the corresponding PHY device
 
 Optional properties:
-- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
-- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
+- hpd-gpios: hpd pin
+- qcom,hdmi-tx-mux-en-gpios: hdmi mux enable pin
+- qcom,hdmi-tx-mux-sel-gpios: hdmi mux select pin
+- qcom,hdmi-tx-mux-lpm-gpios: hdmi mux lpm pin
 - power-domains: reference to the power domain(s), if available.
 - pinctrl-names: the pin control state names; should contain "default"
 - pinctrl-0: the default pinctrl state (active)
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
new file mode 100644
index 0000000..9e75904
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
+
+Required properties:
+- compatible: should be "innolux,g101ice-l01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt
new file mode 100644
index 0000000..4989c91d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.txt
@@ -0,0 +1,31 @@
+JDI model LT070ME05000 1200x1920 7" DSI Panel
+
+Required properties:
+- compatible: should be "jdi,lt070me05000"
+- vddp-supply: phandle of the regulator that provides the supply voltage
+  Power IC supply (3-5V)
+- iovcc-supply: phandle of the regulator that provides the supply voltage
+  IOVCC , power supply for LCM (1.8V)
+- enable-gpios: phandle of gpio for enable line
+  LED_EN, LED backlight enable, High active
+- reset-gpios: phandle of gpio for reset line
+  This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
+  XRES, Reset, Low active
+- dcdc-en-gpios: phandle of the gpio for power ic line
+  Power IC supply enable, High active
+
+Example:
+
+	dsi0: qcom,mdss_dsi@4700000 {
+		panel@0 {
+			compatible = "jdi,lt070me05000";
+			reg = <0>;
+
+			vddp-supply = <&pm8921_l17>;
+			iovcc-supply = <&pm8921_lvs7>;
+
+			enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+			reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
+			dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 5489b59..9eb3f0a 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -6,8 +6,10 @@
 
 Required properties:
 - compatible: value should be one of the following
-		"rockchip,rk3288-vop";
 		"rockchip,rk3036-vop";
+		"rockchip,rk3288-vop";
+		"rockchip,rk3399-vop-big";
+		"rockchip,rk3399-vop-lit";
 
 - interrupts: should contain a list of all VOP IP block interrupts in the
 		 order: VSYNC, LCD_SYSTEM. The interrupt specifier
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index df8f4ae..b95696d 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -26,13 +26,14 @@
 The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
 
 Required properties:
- - compatible: value should be "allwinner,sun5i-a13-tcon".
+ - compatible: value must be either:
+   * allwinner,sun5i-a13-tcon
+   * allwinner,sun8i-a33-tcon
  - reg: base address and size of memory-mapped region
  - interrupts: interrupt associated to this IP
  - clocks: phandles to the clocks feeding the TCON. Three are needed:
    - 'ahb': the interface clocks
    - 'tcon-ch0': The clock driving the TCON channel 0
-   - 'tcon-ch1': The clock driving the TCON channel 1
  - resets: phandles to the reset controllers driving the encoder
    - "lcd": the reset line for the TCON channel 0
 
@@ -49,6 +50,33 @@
   second the block connected to the TCON channel 1 (usually the TV
   encoder)
 
+On the A13, there is one more clock required:
+   - 'tcon-ch1': The clock driving the TCON channel 1
+
+DRC
+---
+
+The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs
+(A31, A23, A33), allows to dynamically adjust pixel
+brightness/contrast based on histogram measurements for LCD content
+adaptive backlight control.
+
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun8i-a33-drc
+  - reg: base address and size of the memory-mapped region.
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the DRC
+    * ahb: the DRC interface clock
+    * mod: the DRC module clock
+    * ram: the DRC DRAM clock
+  - clock-names: the clock names mentioned above
+  - resets: phandles to the reset line driving the DRC
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoints, the second one the outputs
 
 Display Engine Backend
 ----------------------
@@ -59,6 +87,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-backend
+    * allwinner,sun8i-a33-display-backend
   - reg: base address and size of the memory-mapped region.
   - clocks: phandles to the clocks feeding the frontend and backend
     * ahb: the backend interface clock
@@ -71,6 +100,14 @@
   Documentation/devicetree/bindings/media/video-interfaces.txt. The
   first port should be the input endpoints, the second one the output
 
+On the A33, some additional properties are required:
+  - reg needs to have an additional region corresponding to the SAT
+  - reg-names need to be set, with "be" and "sat"
+  - clocks and clock-names need to have a phandle to the SAT bus
+    clocks, whose name will be "sat"
+  - resets and reset-names need to have a phandle to the SAT bus
+    resets, whose name will be "sat"
+
 Display Engine Frontend
 -----------------------
 
@@ -80,6 +117,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-frontend
+    * allwinner,sun8i-a33-display-frontend
   - reg: base address and size of the memory-mapped region.
   - interrupts: interrupt associated to this IP
   - clocks: phandles to the clocks feeding the frontend and backend
@@ -104,6 +142,7 @@
 Required properties:
   - compatible: value must be one of:
     * allwinner,sun5i-a13-display-engine
+    * allwinner,sun8i-a33-display-engine
 
   - allwinner,pipelines: list of phandle to the display engine
     frontends available.
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 2136ee8..a83abd7 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -17,6 +17,18 @@
    the lcd controller.
  - max-pixelclock: The maximum pixel clock that can be supported
    by the lcd controller in KHz.
+ - blue-and-red-wiring: Recognized values "straight" or "crossed".
+   This property deals with the LCDC revision 2 (found on AM335x)
+   color errata [1].
+    - "straight" indicates normal wiring that supports RGB565,
+      BGR888, and XBGR8888 color formats.
+    - "crossed" indicates wiring that has blue and red wires
+      crossed. This setup supports BGR565, RGB888 and XRGB8888
+      formats.
+    - If the property is not present or its value is not recognized
+      the legacy mode is assumed. This configuration supports RGB565,
+      RGB888 and XRGB8888 formats. However, depending on wiring, the red
+      and blue colors are swapped in either 16 or 24-bit color modes.
 
 Optional nodes:
 
@@ -24,6 +36,18 @@
    binding follows Documentation/devicetree/bindings/graph.txt and
    suppors a single port with a single endpoint.
 
+ - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
+   Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
+   tfp410 DVI encoder or lcd panel to lcdc
+
+[1] There is an errata about AM335x color wiring. For 16-bit color mode
+    the wires work as they should (LCD_DATA[0:4] is for Blue[3:7]),
+    but for 24 bit color modes the wiring of blue and red components is
+    crossed and LCD_DATA[0:4] is for Red[3:7] and LCD_DATA[11:15] is
+    for Blue[3-7]. For more details see section 3.1.1 in AM335x
+    Silicon Errata:
+    http://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sprz360
+
 Example:
 
 	fb: fb@4830e000 {
@@ -33,6 +57,8 @@
 		interrupts = <36>;
 		ti,hwmods = "lcdc";
 
+		blue-and-red-wiring = "crossed";
+
 		port {
 			lcdc_0: endpoint@0 {
 				remote-endpoint = <&hdmi_0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index f31b2ad..5fa691e 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -32,6 +32,14 @@
 - clock-frequency
 	frequency of bus clock in Hz.
 
+- i2c-bus
+	For I2C adapters that have child nodes that are a mixture of both I2C
+	devices and non-I2C devices, the 'i2c-bus' subnode can be used for
+	populating I2C devices. If the 'i2c-bus' subnode is present, only
+	subnodes of this will be considered as I2C slaves. The properties,
+	'#address-cells' and '#size-cells' must be defined under this subnode
+	if present.
+
 - i2c-scl-falling-time-ns
 	Number of nanoseconds the SCL signal takes to fall; t(f) in the I2C
 	specification.
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 1416c6a..fbbad64 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -51,7 +51,6 @@
 gmt,g751		G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
 infineon,slb9635tt	Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt	Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isil,isl12057		Intersil ISL12057 I2C RTC Chip
 isil,isl29028		Intersil ISL29028 Ambient Light and Proximity Sensor
 maxim,ds1050		5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237		Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
diff --git a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
index f97993b..d3b273e 100644
--- a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
+++ b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -14,6 +14,7 @@
 representing a ethernet device.
 - dsaf-handle: phandle, specifies a reference to a node
 representing a dsaf device.
+- node_guid: a number that uniquely identifies a device or component
 - #address-cells: must be 2
 - #size-cells: must be 2
 Optional properties:
@@ -32,6 +33,7 @@
 			dma-coherent;
 			eth-handle = <&eth2 &eth3 &eth4 &eth5 &eth6 &eth7>;
 			dsaf-handle = <&soc0_dsa>;
+			node-guid = [00 9A CD 00 00 01 02 03];
 			#address-cells = <2>;
 			#size-cells = <2>;
 			interrupt-parent = <&mbigen_dsa>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
new file mode 100644
index 0000000..7b8944c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
@@ -0,0 +1,21 @@
+* MELFAS MIP4 Touchscreen
+
+Required properties:
+- compatible: must be "melfas,mip4_ts"
+- reg: I2C slave address of the chip (0x48 or 0x34)
+- interrupt-parent: interrupt controller to which the chip is connected
+- interrupts: interrupt to which the chip is connected
+
+Optional properties:
+- ce-gpios: GPIO connected to the CE (chip enable) pin of the chip
+
+Example:
+	i2c@00000000 {
+		touchscreen: melfas_mip4@48 {
+			compatible = "melfas,mip4_ts";
+			reg = <0x48>;
+			interrupt-parent = <&gpio>;
+			interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+			ce-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index 7b94c88..be57550 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -27,6 +27,12 @@
                       * "cmdq-sync" - CMD_SYNC complete
                       * "gerror"    - Global Error activated
 
+- #iommu-cells      : See the generic IOMMU binding described in
+                        devicetree/bindings/pci/pci-iommu.txt
+                      for details. For SMMUv3, must be 1, with each cell
+                      describing a single stream ID. All possible stream
+                      IDs which a device may emit must be described.
+
 ** SMMUv3 optional properties:
 
 - dma-coherent      : Present if DMA operations made by the SMMU (page
@@ -54,6 +60,6 @@
                              <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
                 interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
                 dma-coherent;
-                #iommu-cells = <0>;
+                #iommu-cells = <1>;
                 msi-parent = <&its 0xff0000>;
         };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 19fe6f2..e862d148 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -35,12 +35,16 @@
                   interrupt per context bank. In the case of a single,
                   combined interrupt, it must be listed multiple times.
 
-- mmu-masters   : A list of phandles to device nodes representing bus
-                  masters for which the SMMU can provide a translation
-                  and their corresponding StreamIDs (see example below).
-                  Each device node linked from this list must have a
-                  "#stream-id-cells" property, indicating the number of
-                  StreamIDs associated with it.
+- #iommu-cells  : See Documentation/devicetree/bindings/iommu/iommu.txt
+                  for details. With a value of 1, each "iommus" entry
+                  represents a distinct stream ID emitted by that device
+                  into the relevant SMMU.
+
+                  SMMUs with stream matching support and complex masters
+                  may use a value of 2, where the second cell represents
+                  an SMR mask to combine with the ID in the first cell.
+                  Care must be taken to ensure the set of matched IDs
+                  does not result in conflicts.
 
 ** System MMU optional properties:
 
@@ -56,9 +60,20 @@
                   aliases of secure registers have to be used during
                   SMMU configuration.
 
-Example:
+** Deprecated properties:
 
-        smmu {
+- mmu-masters (deprecated in favour of the generic "iommus" binding) :
+                  A list of phandles to device nodes representing bus
+                  masters for which the SMMU can provide a translation
+                  and their corresponding Stream IDs. Each device node
+                  linked from this list must have a "#stream-id-cells"
+                  property, indicating the number of Stream ID
+                  arguments associated with its phandle.
+
+** Examples:
+
+        /* SMMU with stream matching or stream indexing */
+        smmu1: iommu {
                 compatible = "arm,smmu-v1";
                 reg = <0xba5e0000 0x10000>;
                 #global-interrupts = <2>;
@@ -68,11 +83,29 @@
                              <0 35 4>,
                              <0 36 4>,
                              <0 37 4>;
+                #iommu-cells = <1>;
+        };
 
-                /*
-                 * Two DMA controllers, the first with two StreamIDs (0xd01d
-                 * and 0xd01e) and the second with only one (0xd11c).
-                 */
-                mmu-masters = <&dma0 0xd01d 0xd01e>,
-                              <&dma1 0xd11c>;
+        /* device with two stream IDs, 0 and 7 */
+        master1 {
+                iommus = <&smmu1 0>,
+                         <&smmu1 7>;
+        };
+
+
+        /* SMMU with stream matching */
+        smmu2: iommu {
+                ...
+                #iommu-cells = <2>;
+        };
+
+        /* device with stream IDs 0 and 7 */
+        master2 {
+                iommus = <&smmu2 0 0>,
+                         <&smmu2 7 0>;
+        };
+
+        /* device with stream IDs 1, 17, 33 and 49 */
+        master3 {
+                iommus = <&smmu2 1 0x30>;
         };
diff --git a/Documentation/devicetree/bindings/media/atmel-isc.txt b/Documentation/devicetree/bindings/media/atmel-isc.txt
new file mode 100644
index 0000000..bbe0e87c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/atmel-isc.txt
@@ -0,0 +1,65 @@
+Atmel Image Sensor Controller (ISC)
+----------------------------------------------
+
+Required properties for ISC:
+- compatible
+	Must be "atmel,sama5d2-isc".
+- reg
+	Physical base address and length of the registers set for the device.
+- interrupts
+	Should contain IRQ line for the ISC.
+- clocks
+	List of clock specifiers, corresponding to entries in
+	the clock-names property;
+	Please refer to clock-bindings.txt.
+- clock-names
+	Required elements: "hclock", "iscck", "gck".
+- #clock-cells
+	Should be 0.
+- clock-output-names
+	Should be "isc-mck".
+- pinctrl-names, pinctrl-0
+	Please refer to pinctrl-bindings.txt.
+
+ISC supports a single port node with parallel bus. It should contain one
+'port' child node with child 'endpoint' node. Please refer to the bindings
+defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+isc: isc@f0008000 {
+	compatible = "atmel,sama5d2-isc";
+	reg = <0xf0008000 0x4000>;
+	interrupts = <46 IRQ_TYPE_LEVEL_HIGH 5>;
+	clocks = <&isc_clk>, <&iscck>, <&isc_gclk>;
+	clock-names = "hclock", "iscck", "gck";
+	#clock-cells = <0>;
+	clock-output-names = "isc-mck";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
+
+	port {
+		isc_0: endpoint {
+			remote-endpoint = <&ov7740_0>;
+			hsync-active = <1>;
+			vsync-active = <0>;
+			pclk-sample = <1>;
+		};
+	};
+};
+
+i2c1: i2c@fc028000 {
+	ov7740: camera@21 {
+		compatible = "ovti,ov7740";
+		reg = <0x21>;
+		clocks = <&isc>;
+		clock-names = "xvclk";
+		assigned-clocks = <&isc>;
+		assigned-clock-rates = <24000000>;
+
+		port {
+			ov7740_0: endpoint {
+				remote-endpoint = <&isc_0>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/media/exynos4-fimc-is.txt b/Documentation/devicetree/bindings/media/exynos4-fimc-is.txt
index 55c9ad6..32ced99 100644
--- a/Documentation/devicetree/bindings/media/exynos4-fimc-is.txt
+++ b/Documentation/devicetree/bindings/media/exynos4-fimc-is.txt
@@ -16,9 +16,10 @@
 - clocks	: list of clock specifiers, corresponding to entries in
 		  clock-names property;
 - clock-names	: must contain "ppmuispx", "ppmuispx", "lite0", "lite1"
-		  "mpll", "sysreg", "isp", "drc", "fd", "mcuisp", "uart",
-		  "ispdiv0", "ispdiv1", "mcuispdiv0", "mcuispdiv1", "aclk200",
-		  "div_aclk200", "aclk400mcuisp", "div_aclk400mcuisp" entries,
+		  "mpll", "sysreg", "isp", "drc", "fd", "mcuisp", "gicisp",
+		  "pwm_isp", "mcuctl_isp", "uart", "ispdiv0", "ispdiv1",
+		  "mcuispdiv0", "mcuispdiv1", "aclk200", "div_aclk200",
+		  "aclk400mcuisp", "div_aclk400mcuisp" entries,
 		  matching entries in the clocks property.
 pmu subnode
 -----------
diff --git a/Documentation/devicetree/bindings/media/i2c/ad5820.txt b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
new file mode 100644
index 0000000..5940ca1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
@@ -0,0 +1,19 @@
+* Analog Devices AD5820 autofocus coil
+
+Required Properties:
+
+  - compatible: Must contain "adi,ad5820"
+
+  - reg: I2C slave address
+
+  - VANA-supply: supply of voltage for VANA pin
+
+Example:
+
+       ad5820: coil@c {
+               compatible = "adi,ad5820";
+               reg = <0x0c>;
+
+               VANA-supply = <&vaux4>;
+       };
+
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.txt b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
index 0d50115..4da486f 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
@@ -15,6 +15,11 @@
 		"adi,adv7282"
 		"adi,adv7282-m"
 
+Optional Properties :
+- powerdown-gpios: reference to the GPIO connected to the powerdown pin,
+  if any.
+
+
 Example:
 
 	i2c0@1c22000 {
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.txt b/Documentation/devicetree/bindings/media/renesas,fcp.txt
index 6a12960..27f9b8e 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.txt
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.txt
@@ -7,12 +7,14 @@
 
 There are three types of FCP: FCP for Codec (FCPC), FCP for VSP (FCPV) and FCP
 for FDP (FCPF). Their configuration and behaviour depend on the module they
-are paired with. These DT bindings currently support the FCPV only.
+are paired with. These DT bindings currently support the FCPV and FCPF.
 
  - compatible: Must be one or more of the following
 
    - "renesas,r8a7795-fcpv" for R8A7795 (R-Car H3) compatible 'FCP for VSP'
+   - "renesas,r8a7795-fcpf" for R8A7795 (R-Car H3) compatible 'FCP for FDP'
    - "renesas,fcpv" for generic compatible 'FCP for VSP'
+   - "renesas,fcpf" for generic compatible 'FCP for FDP'
 
    When compatible with the generic version, nodes must list the
    SoC-specific version corresponding to the platform first, followed by the
@@ -21,6 +23,10 @@
  - reg: the register base and size for the device registers
  - clocks: Reference to the functional clock
 
+Optional properties:
+ - power-domains : power-domain property defined with a power domain specifier
+		   to respective power domain.
+
 
 Device node example
 -------------------
@@ -29,4 +35,5 @@
 		compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
 		reg = <0 0xfea2f000 0 0x200>;
 		clocks = <&cpg CPG_MOD 602>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
 	};
diff --git a/Documentation/devicetree/bindings/media/st,st-hva.txt b/Documentation/devicetree/bindings/media/st,st-hva.txt
new file mode 100644
index 0000000..0d76174
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,st-hva.txt
@@ -0,0 +1,24 @@
+st-hva: multi-format video encoder for STMicroelectronics SoC.
+
+Required properties:
+- compatible: should be "st,st-hva".
+- reg: HVA physical address location and length, esram address location and
+  length.
+- reg-names: names of the registers listed in registers property in the same
+  order.
+- interrupts: HVA interrupt number.
+- clocks: from common clock binding: handle hardware IP needed clocks, the
+  number of clocks may depend on the SoC type.
+  See ../clock/clock-bindings.txt for details.
+- clock-names: names of the clocks listed in clocks property in the same order.
+
+Example:
+	hva@8c85000{
+		compatible = "st,st-hva";
+		reg = <0x8c85000 0x400>, <0x6000000 0x40000>;
+		reg-names = "hva_registers", "hva_esram";
+		interrupts = <GIC_SPI 58 IRQ_TYPE_NONE>,
+			     <GIC_SPI 59 IRQ_TYPE_NONE>;
+		clock-names = "clk_hva";
+		clocks = <&clk_s_c0_flexgen CLK_HVA>;
+	};
diff --git a/Documentation/devicetree/bindings/media/stih-cec.txt b/Documentation/devicetree/bindings/media/stih-cec.txt
new file mode 100644
index 0000000..71c4b2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/stih-cec.txt
@@ -0,0 +1,25 @@
+STMicroelectronics STIH4xx HDMI CEC driver
+
+Required properties:
+ - compatible : value should be "st,stih-cec"
+ - reg : Physical base address of the IP registers and length of memory
+	 mapped region.
+ - clocks : from common clock binding: handle to HDMI CEC clock
+ - interrupts : HDMI CEC interrupt number to the CPU.
+ - pinctrl-names: Contains only one value - "default"
+ - pinctrl-0: Specifies the pin control groups used for CEC hardware.
+ - resets: Reference to a reset controller
+
+Example for STIH407:
+
+sti-cec@094a087c {
+	compatible = "st,stih-cec";
+	reg = <0x94a087c 0x64>;
+	clocks = <&clk_sysin>;
+	clock-names = "cec-clk";
+	interrupts = <GIC_SPI 140 IRQ_TYPE_NONE>;
+	interrupt-names = "cec-irq";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_cec0_default>;
+	resets = <&softreset STIH407_LPM_SOFTRESET>;
+};
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
index 4a7e030..e4e1cd9 100644
--- a/Documentation/devicetree/bindings/mips/brcm/soc.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -2,9 +2,9 @@
 
 Required properties:
 
-- compatible: "brcm,bcm3384", "brcm,bcm33843"
+- compatible: "brcm,bcm3368", "brcm,bcm3384", "brcm,bcm33843"
               "brcm,bcm3384-viper", "brcm,bcm33843-viper"
-              "brcm,bcm6328", "brcm,bcm6358", "brcm,bcm6368",
+              "brcm,bcm6328", "brcm,bcm6358", "brcm,bcm6362", "brcm,bcm6368",
               "brcm,bcm63168", "brcm,bcm63268",
               "brcm,bcm7125", "brcm,bcm7346", "brcm,bcm7358", "brcm,bcm7360",
               "brcm,bcm7362", "brcm,bcm7420", "brcm,bcm7425"
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 3733300..b056016 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -35,6 +35,15 @@
 - nand-ecc-step-size: integer representing the number of data bytes
 		      that are covered by a single ECC step.
 
+- nand-ecc-maximize: boolean used to specify that you want to maximize ECC
+		     strength. The maximum ECC strength is both controller and
+		     chip dependent. The controller side has to select the ECC
+		     config providing the best strength and taking the OOB area
+		     size constraint into account.
+		     This is particularly useful when only the in-band area is
+		     used by the upper layers, and you want to make your NAND
+		     as reliable as possible.
+
 The ECC strength and ECC step size properties define the correction capability
 of a controller. Together, they say a controller can correct "{strength} bit
 errors per {size} bytes".
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index f095257..c010faf 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -24,7 +24,6 @@
 Optional properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
-- mediatek,hwlro: the capability if the hardware supports LRO functions
 
 * Ethernet MAC node
 
@@ -54,7 +53,6 @@
 	reset-names = "eth";
 	mediatek,ethsys = <&ethsys>;
 	mediatek,pctl = <&syscfg_pctl_a>;
-	mediatek,hwlro;
 	#address-cells = <1>;
 	#size-cells = <0>;
 
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
index f9c32ad..c35b5b4 100644
--- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -34,16 +34,17 @@
 
   All skew control options are specified in picoseconds. The minimum
   value is 0, and the maximum is property-dependent. The increment
-  step is 60ps.
+  step is 60ps. The default value is the neutral setting, so setting
+  rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
 
   Optional properties:
 
-    Maximum value of 1860:
+    Maximum value of 1860, default value 900:
 
       - rxc-skew-ps : Skew control of RX clock pad
       - txc-skew-ps : Skew control of TX clock pad
 
-    Maximum value of 900:
+    Maximum value of 900, default value 420:
 
       - rxdv-skew-ps : Skew control of RX CTL pad
       - txen-skew-ps : Skew control of TX CTL pad
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index c8ac222..b519503 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -10,6 +10,7 @@
 	      "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC.
 	      "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
 	      "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC.
+	      "renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC.
 	      "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface.
 	      "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface.
 
@@ -33,7 +34,7 @@
 - interrupt-parent: the phandle for the interrupt controller that services
 		    interrupts for this device.
 - interrupt-names: A list of interrupt names.
-		   For the R8A7795 SoC this property is mandatory;
+		   For the R8A779[56] SoCs this property is mandatory;
 		   it should include one entry per channel, named "ch%u",
 		   where %u is the channel number ranging from 0 to 24.
 		   For other SoCs this property is optional; if present
diff --git a/Documentation/devicetree/bindings/pci/pci-iommu.txt b/Documentation/devicetree/bindings/pci/pci-iommu.txt
new file mode 100644
index 0000000..56c8296
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-iommu.txt
@@ -0,0 +1,171 @@
+This document describes the generic device tree binding for describing the
+relationship between PCI(e) devices and IOMMU(s).
+
+Each PCI(e) device under a root complex is uniquely identified by its Requester
+ID (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
+Function number.
+
+For the purpose of this document, when treated as a numeric value, a RID is
+formatted such that:
+
+* Bits [15:8] are the Bus number.
+* Bits [7:3] are the Device number.
+* Bits [2:0] are the Function number.
+* Any other bits required for padding must be zero.
+
+IOMMUs may distinguish PCI devices through sideband data derived from the
+Requester ID. While a given PCI device can only master through one IOMMU, a
+root complex may split masters across a set of IOMMUs (e.g. with one IOMMU per
+bus).
+
+The generic 'iommus' property is insufficient to describe this relationship,
+and a mechanism is required to map from a PCI device to its IOMMU and sideband
+data.
+
+For generic IOMMU bindings, see
+Documentation/devicetree/bindings/iommu/iommu.txt.
+
+
+PCI root complex
+================
+
+Optional properties
+-------------------
+
+- iommu-map: Maps a Requester ID to an IOMMU and associated iommu-specifier
+  data.
+
+  The property is an arbitrary number of tuples of
+  (rid-base,iommu,iommu-base,length).
+
+  Any RID r in the interval [rid-base, rid-base + length) is associated with
+  the listed IOMMU, with the iommu-specifier (r - rid-base + iommu-base).
+
+- iommu-map-mask: A mask to be applied to each Requester ID prior to being
+  mapped to an iommu-specifier per the iommu-map property.
+
+
+Example (1)
+===========
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	iommu: iommu@a {
+		reg = <0xa 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	pci: pci@f {
+		reg = <0xf 0x1>;
+		compatible = "vendor,pcie-root-complex";
+		device_type = "pci";
+
+		/*
+		 * The sideband data provided to the IOMMU is the RID,
+		 * identity-mapped.
+		 */
+		iommu-map = <0x0 &iommu 0x0 0x10000>;
+	};
+};
+
+
+Example (2)
+===========
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	iommu: iommu@a {
+		reg = <0xa 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	pci: pci@f {
+		reg = <0xf 0x1>;
+		compatible = "vendor,pcie-root-complex";
+		device_type = "pci";
+
+		/*
+		 * The sideband data provided to the IOMMU is the RID with the
+		 * function bits masked out.
+		 */
+		iommu-map = <0x0 &iommu 0x0 0x10000>;
+		iommu-map-mask = <0xfff8>;
+	};
+};
+
+
+Example (3)
+===========
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	iommu: iommu@a {
+		reg = <0xa 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	pci: pci@f {
+		reg = <0xf 0x1>;
+		compatible = "vendor,pcie-root-complex";
+		device_type = "pci";
+
+		/*
+		 * The sideband data provided to the IOMMU is the RID,
+		 * but the high bits of the bus number are flipped.
+		 */
+		iommu-map = <0x0000 &iommu 0x8000 0x8000>,
+			    <0x8000 &iommu 0x0000 0x8000>;
+	};
+};
+
+
+Example (4)
+===========
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	iommu_a: iommu@a {
+		reg = <0xa 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	iommu_b: iommu@b {
+		reg = <0xb 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	iommu_c: iommu@c {
+		reg = <0xc 0x1>;
+		compatible = "vendor,some-iommu";
+		#iommu-cells = <1>;
+	};
+
+	pci: pci@f {
+		reg = <0xf 0x1>;
+		compatible = "vendor,pcie-root-complex";
+		device_type = "pci";
+
+		/*
+		 * Devices with bus number 0-127 are mastered via IOMMU
+		 * a, with sideband data being RID[14:0].
+		 * Devices with bus number 128-255 are mastered via
+		 * IOMMU b, with sideband data being RID[14:0].
+		 * No devices master via IOMMU c.
+		 */
+		iommu-map = <0x0000 &iommu_a 0x0000 0x8000>,
+			    <0x8000 &iommu_b 0x0000 0x8000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index 5e60ad1..2ad18c4 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -43,7 +43,9 @@
 
 GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
 I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
-RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8
+RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
+TIMER7 TIMER8 VGABIOSROM
+
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/pwm/pwm-meson.txt b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
new file mode 100644
index 0000000..5376a44
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
@@ -0,0 +1,23 @@
+Amlogic Meson PWM Controller
+============================
+
+Required properties:
+- compatible: Shall contain "amlogic,meson8b-pwm" or "amlogic,meson-gxbb-pwm".
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+  the cells format.
+
+Optional properties:
+- clocks: Could contain one or two parents clocks phandle for each of the two
+  PWM channels.
+- clock-names: Could contain at least the "clkin0" and/or "clkin1" names.
+
+Example:
+
+	pwm_ab: pwm@8550 {
+		compatible = "amlogic,meson-gxbb-pwm";
+		reg = <0x0 0x08550 0x0 0x10>;
+		#pwm-cells = <3>;
+		status = "disabled";
+		clocks = <&xtal>, <&xtal>;
+		clock-names = "clkin0", "clkin1";
+	}
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
index f8f59ba..6f8af2b 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
@@ -2,8 +2,9 @@
 
 Required properties:
  - compatible: should be "mediatek,<name>-disp-pwm":
-   - "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
+   - "mediatek,mt2701-disp-pwm": found on mt2701 SoC.
    - "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
+   - "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
  - reg: physical base address and length of the controller's registers.
  - #pwm-cells: must be 2. See pwm.txt in this directory for a description of
    the cell format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-st.txt b/Documentation/devicetree/bindings/pwm/pwm-st.txt
index 84d2fb8..19fce77 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-st.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-st.txt
@@ -13,13 +13,14 @@
 - pinctrl-0: 		List of phandles pointing to pin configuration nodes
 			for PWM module.
 			For Pinctrl properties, please refer to [1].
-- clock-names: 		Set to "pwm".
+- clock-names: 		Valid entries are "pwm" and/or "capture".
 - clocks: 		phandle of the clock used by the PWM module.
 			For Clk properties, please refer to [2].
+- interrupts:		IRQ for the Capture device
 
 Optional properties:
-- st,pwm-num-chan:	Number of available channels. If not passed, the driver
-			will consider single channel by default.
+- st,pwm-num-chan:	Number of available PWM channels.  Default is 0.
+- st,capture-num-chan:	Number of available Capture channels.  Default is 0.
 
 [1] Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 [2] Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -38,4 +39,5 @@
 	clocks = <&clk_sysin>;
 	clock-names = "pwm";
 	st,pwm-num-chan = <4>;
+	st,capture-num-chan = <2>;
 };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
index cf6068b..f1cbeef 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
@@ -6,6 +6,7 @@
     - "allwinner,sun5i-a10s-pwm"
     - "allwinner,sun5i-a13-pwm"
     - "allwinner,sun7i-a20-pwm"
+    - "allwinner,sun8i-h3-pwm"
   - reg: physical base address and length of the controller's registers
   - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
     the cells format.
diff --git a/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt b/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
index 8e76f26..9882b81 100644
--- a/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
+++ b/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
@@ -11,7 +11,7 @@
 - trickle-diode-disable : Do not use internal trickle charger diode
 	Should be given if internal trickle charger diode should be disabled
 Example:
-	ds1390: rtc@68 {
+	ds1390: rtc@0 {
 		compatible = "dallas,ds1390";
 		trickle-resistor-ohms = <250>;
 		reg = <0>;
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.txt b/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
new file mode 100644
index 0000000..3f61e51
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
@@ -0,0 +1,22 @@
+Real Time Clock driver for:
+  - Epson RX8900
+  - Micro Crystal rv8803
+
+Required properties:
+- compatible: should be: "microcrystal,rv8803" or "epson,rx8900"
+- reg : the I2C address of the device for I2C
+
+Optional properties:
+- epson,vdet-disable : boolean, if present will disable voltage detector.
+  Should be set if no backup battery is used.
+- trickle-diode-disable : boolean, if present will disable internal trickle
+  charger diode
+
+Example:
+
+	rtc: rtc@32 {
+		compatible = "epson,rx8900"
+		reg = <0x32>;
+		epson,vdet-disable;
+		trickle-diode-disable;
+	};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index bf7d11a..bee41f9 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -18,6 +18,18 @@
   through pmic_power_en
 - clocks: Any internal or external clocks feeding in to rtc
 - clock-names: Corresponding names of the clocks
+- pinctrl-0: a phandle pointing to the pin settings for the device
+- pinctrl-names: should be "default"
+
+Optional subnodes:
+- generic pinctrl node
+
+Required pinctrl subnodes properties:
+- pins - Names of ext_wakeup pins to configure
+
+Optional pinctrl subnodes properties:
+- input-enable - Enables ext_wakeup
+- ti,active-high - Set input active high (by default active low)
 
 Example:
 
@@ -30,4 +42,13 @@
 	system-power-controller;
 	clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
 	clock-names = "ext-clk", "int-clk";
+
+	pinctrl-0 = <&ext_wakeup>;
+	pinctrl-names = "default";
+
+	ext_wakeup: ext-wakeup {
+		pins = "ext_wakeup0";
+		input-enable;
+		ti,active-high;
+	};
 };
diff --git a/Documentation/devicetree/bindings/thermal/max77620_thermal.txt b/Documentation/devicetree/bindings/thermal/max77620_thermal.txt
new file mode 100644
index 0000000..323a3b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/max77620_thermal.txt
@@ -0,0 +1,70 @@
+Thermal driver for MAX77620 Power management IC from Maxim Semiconductor.
+
+Maxim Semiconductor MAX77620 supports alarm interrupts when its
+die temperature crosses 120C and 140C. These threshold temperatures
+are not configurable. Device does not provide the real temperature
+of die other than just indicating whether temperature is above or
+below threshold level.
+
+Required properties:
+-------------------
+#thermal-sensor-cells:	Please refer <devicetree/bindings/thermal/thermal.txt>
+			for more details.
+			The value must be 0.
+
+For more details, please refer generic thermal DT binding document
+<devicetree/bindings/thermal/thermal.txt>.
+
+Please refer <devicetree/bindings/mfd/max77620.txt> for mfd DT binding
+document for the MAX77620.
+
+Example:
+--------
+#include <dt-bindings/mfd/max77620.h>
+#include <dt-bindings/thermal/thermal.h>
+...
+
+i2c@7000d000 {
+	spmic: max77620@3c {
+		compatible = "maxim,max77620";
+		:::::
+		#thermal-sensor-cells = <0>;
+		:::
+	};
+};
+
+cool_dev: cool-dev {
+	compatible = "cooling-dev";
+	#cooling-cells = <2>;
+};
+
+thermal-zones {
+	PMIC-Die {
+		polling-delay = <0>;
+		polling-delay-passive = <0>;
+		thermal-sensors = <&spmic>;
+
+		trips {
+			pmic_die_warn_temp_thresh: hot-die {
+				temperature = <120000>;
+				type = "hot";
+				hysteresis = <0>;
+			};
+
+			pmic_die_cirt_temp_thresh: cirtical-die {
+				temperature = <140000>;
+				type = "critical";
+				hysteresis = <0>;
+			};
+		};
+
+		cooling-maps {
+			map0 {
+				trip = <&pmic_die_warn_temp_thresh>;
+				cooling-device = <&cool_dev THERMAL_NO_LIMIT
+						  THERMAL_NO_LIMIT>;
+				contribution = <100>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
index 81f9a51..e2f494d 100644
--- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
@@ -8,7 +8,9 @@
 is also needed.
 
 Required properties:
-- compatible: "mediatek,mt8173-thermal"
+- compatible:
+  - "mediatek,mt8173-thermal" : For MT8173 family of SoCs
+  - "mediatek,mt2701-thermal" : For MT2701 family of SoCs
 - reg: Address range of the thermal controller
 - interrupts: IRQ for the thermal controller
 - clocks, clock-names: Clocks needed for the thermal controller. required
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
index edebfa0..b6c0ae5 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
@@ -10,8 +10,14 @@
 - compatible : For Tegra124, must contain "nvidia,tegra124-soctherm".
   For Tegra132, must contain "nvidia,tegra132-soctherm".
   For Tegra210, must contain "nvidia,tegra210-soctherm".
-- reg : Should contain 1 entry:
+- reg : Should contain at least 2 entries for each entry in reg-names:
   - SOCTHERM register set
+  - Tegra CAR register set: Required for Tegra124 and Tegra210.
+  - CCROC register set: Required for Tegra132.
+- reg-names :  Should contain at least 2 entries:
+  - soctherm-reg
+  - car-reg
+  - ccroc-reg
 - interrupts : Defines the interrupt used by SOCTHERM
 - clocks : Must contain an entry for each entry in clock-names.
   See ../clocks/clock-bindings.txt for details.
@@ -25,17 +31,45 @@
 - #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description
     of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
     list of valid values when referring to thermal sensors.
+- throttle-cfgs: A sub-node which is a container of configuration for each
+    hardware throttle events. These events can be set as cooling devices.
+  * throttle events: Sub-nodes must be named as "light" or "heavy".
+      Properties:
+      - nvidia,priority: Each throttles has its own throttle settings, so the
+        SW need to set priorities for various throttle, the HW arbiter can select
+        the final throttle settings.
+        Bigger value indicates higher priority, In general, higher priority
+        translates to lower target frequency. SW needs to ensure that critical
+        thermal alarms are given higher priority, and ensure that there is
+        no race if priority of two vectors is set to the same value.
+        The range of this value is 1~100.
+      - nvidia,cpu-throt-percent: This property is for Tegra124 and Tegra210.
+        It is the throttling depth of pulse skippers, it's the percentage
+        throttling.
+      - nvidia,cpu-throt-level: This property is only for Tegra132, it is the
+        level of pulse skippers, which used to throttle clock frequencies. It
+        indicates cpu clock throttling depth, and the depth can be programmed.
+        Must set as following values:
+        TEGRA_SOCTHERM_THROT_LEVEL_LOW, TEGRA_SOCTHERM_THROT_LEVEL_MED
+        TEGRA_SOCTHERM_THROT_LEVEL_HIGH, TEGRA_SOCTHERM_THROT_LEVEL_NONE
+      - #cooling-cells: Should be 1. This cooling device only support on/off state.
+        See ./thermal.txt for a description of this property.
 
 Note:
 - the "critical" type trip points will be set to SOC_THERM hardware as the
 shut down temperature. Once the temperature of this thermal zone is higher
 than it, the system will be shutdown or reset by hardware.
+- the "hot" type trip points will be set to SOC_THERM hardware as the throttle
+temperature. Once the the temperature of this thermal zone is higher
+than it, it will trigger the HW throttle event.
 
 Example :
 
 	soctherm@700e2000 {
 		compatible = "nvidia,tegra124-soctherm";
-		reg = <0x0 0x700e2000 0x0 0x1000>;
+		reg = <0x0 0x700e2000 0x0 0x600  /* SOC_THERM reg_base */
+			0x0 0x60006000 0x0 0x400 /* CAR reg_base */
+		reg-names = "soctherm-reg", "car-reg";
 		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
 			<&tegra_car TEGRA124_CLK_SOC_THERM>;
@@ -44,6 +78,76 @@
 		reset-names = "soctherm";
 
 		#thermal-sensor-cells = <1>;
+
+		throttle-cfgs {
+			/*
+			 * When the "heavy" cooling device triggered,
+			 * the HW will skip cpu clock's pulse in 85% depth
+			 */
+			throttle_heavy: heavy {
+				nvidia,priority = <100>;
+				nvidia,cpu-throt-percent = <85>;
+
+				#cooling-cells = <1>;
+			};
+
+			/*
+			 * When the "light" cooling device triggered,
+			 * the HW will skip cpu clock's pulse in 50% depth
+			 */
+			throttle_light: light {
+				nvidia,priority = <80>;
+				nvidia,cpu-throt-percent = <50>;
+
+				#cooling-cells = <1>;
+			};
+
+			/*
+			 * If these two devices are triggered in same time, the HW throttle
+			 * arbiter will select the highest priority as the final throttle
+			 * settings to skip cpu pulse.
+			 */
+		};
+	};
+
+Example: referring to Tegra132's "reg", "reg-names" and "throttle-cfgs" :
+
+	soctherm@700e2000 {
+		compatible = "nvidia,tegra132-soctherm";
+		reg = <0x0 0x700e2000 0x0 0x600  /* SOC_THERM reg_base */
+			0x0 0x70040000 0x0 0x200>; /* CCROC reg_base */;
+		reg-names = "soctherm-reg", "ccroc-reg";
+
+		throttle-cfgs {
+			/*
+			 * When the "heavy" cooling device triggered,
+			 * the HW will skip cpu clock's pulse in HIGH level
+			 */
+			throttle_heavy: heavy {
+				nvidia,priority = <100>;
+				nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
+
+				#cooling-cells = <1>;
+			};
+
+			/*
+			 * When the "light" cooling device triggered,
+			 * the HW will skip cpu clock's pulse in MED level
+			 */
+			throttle_light: light {
+				nvidia,priority = <80>;
+				nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_MED>;
+
+				#cooling-cells = <1>;
+			};
+
+			/*
+			 * If these two devices are triggered in same time, the HW throttle
+			 * arbiter will select the highest priority as the final throttle
+			 * settings to skip cpu pulse.
+			 */
+
+		};
 	};
 
 Example: referring to thermal sensors :
@@ -62,6 +166,19 @@
 					hysteresis = <1000>;
 					type = "critical";
 				};
+
+				cpu_throttle_trip: throttle-trip {
+					temperature = <100000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
 			};
                 };
 	};
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
new file mode 100644
index 0000000..292ed89
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
@@ -0,0 +1,21 @@
+* QCOM SoC Temperature Sensor (TSENS)
+
+Required properties:
+- compatible :
+ - "qcom,msm8916-tsens" : For 8916 Family of SoCs
+ - "qcom,msm8974-tsens" : For 8974 Family of SoCs
+ - "qcom,msm8996-tsens" : For 8996 Family of SoCs
+
+- reg: Address range of the thermal registers
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+- Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify
+nvmem cells
+
+Example:
+tsens: thermal-sensor@900000 {
+		compatible = "qcom,msm8916-tsens";
+		reg = <0x4a8000 0x2000>;
+		nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+		nvmem-cell-names = "caldata", "calsel";
+		#thermal-sensor-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 24c6f65..f0a48ea 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -163,9 +163,11 @@
 meas	Measurement Specialties
 mediatek	MediaTek Inc.
 melexis	Melexis N.V.
+melfas	MELFAS Inc.
 merrii	Merrii Technology Co., Ltd.
 micrel	Micrel Inc.
 microchip	Microchip Technology Inc.
+microcrystal	Micro Crystal AG
 micron	Micron Technology Inc.
 minix	MINIX Technology Ltd.
 mitsubishi	Mitsubishi Electric Corporation
diff --git a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
index 6d63782..c6ae9c9d 100644
--- a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
@@ -7,6 +7,8 @@
 - reg			: Physical base address and size
 
 Optional properties:
+- clocks		: Input clock specifier. Refer to common clock
+			  bindings.
 - clock-frequency	: Frequency of clock in Hz
 - xlnx,wdt-enable-once	: 0 - Watchdog can be restarted
 			  1 - Watchdog can be enabled just once
@@ -17,6 +19,7 @@
 axi-timebase-wdt@40100000 {
 	clock-frequency = <50000000>;
 	compatible = "xlnx,xps-timebase-wdt-1.00.a";
+	clocks = <&clkc 15>;
 	reg = <0x40100000 0x10000>;
 	xlnx,wdt-enable-once = <0x0>;
 	xlnx,wdt-interval = <0x1b>;
diff --git a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
index 039c5ca..b949039 100644
--- a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
@@ -9,8 +9,7 @@
 
 Required properties
 
-- compatible 	: Must be one of: "st,stih407-lpc" "st,stih416-lpc"
-				  "st,stih415-lpc" "st,stid127-lpc"
+- compatible 	: Should be: "st,stih407-lpc"
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 9855ad0..4660bf2 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -22,7 +22,7 @@
     |        m68k: | TODO |
     |       metag: | TODO |
     |  microblaze: | TODO |
-    |        mips: | TODO |
+    |        mips: |  ok  |
     |     mn10300: | TODO |
     |       nios2: | TODO |
     |    openrisc: | TODO |
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 9922939..f66e748 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,8 +2,6 @@
 	- this file (info on some of the filesystems supported by linux).
 Locking
 	- info on locking rules as they pertain to Linux VFS.
-Makefile
-	- Makefile for building the filsystems-part of DocBook.
 9p.txt
 	- 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
 adfs.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index d30fb2c..14cdc10 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -51,8 +51,6 @@
 	int (*rmdir) (struct inode *,struct dentry *);
 	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
-			struct inode *, struct dentry *);
-	int (*rename2) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *, unsigned int);
 	int (*readlink) (struct dentry *, char __user *,int);
 	const char *(*get_link) (struct dentry *, struct inode *, void **);
@@ -61,10 +59,7 @@
 	int (*get_acl)(struct inode *, int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
-	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
-	int (*removexattr) (struct dentry *, const char *);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
 	void (*update_time)(struct inode *, struct timespec *, int);
 	int (*atomic_open)(struct inode *, struct dentry *,
@@ -83,31 +78,44 @@
 mkdir:		yes
 unlink:		yes (both)
 rmdir:		yes (both)	(see below)
-rename:		yes (all)	(see below)
-rename2:	yes (all)	(see below)
+rename:	yes (all)	(see below)
 readlink:	no
 get_link:	no
 setattr:	yes
 permission:	no (may not block if called in rcu-walk mode)
 get_acl:	no
 getattr:	no
-setxattr:	yes
-getxattr:	no
 listxattr:	no
-removexattr:	yes
 fiemap:		no
 update_time:	no
 atomic_open:	yes
 tmpfile:	no
 
+
 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 victim.
-	cross-directory ->rename() and rename2() has (per-superblock)
-->s_vfs_rename_sem.
+	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
 
 See Documentation/filesystems/directory-locking for more detailed discussion
 of the locking scheme for directory operations.
 
+----------------------- xattr_handler operations -----------------------
+prototypes:
+	bool (*list)(struct dentry *dentry);
+	int (*get)(const struct xattr_handler *handler, struct dentry *dentry,
+		   struct inode *inode, const char *name, void *buffer,
+		   size_t size);
+	int (*set)(const struct xattr_handler *handler, struct dentry *dentry,
+		   struct inode *inode, const char *name, const void *buffer,
+		   size_t size, int flags);
+
+locking rules:
+	all may block
+		i_mutex(inode)
+list:		no
+get:		no
+set:		yes
+
 --------------------------- super_operations ---------------------------
 prototypes:
 	struct inode *(*alloc_inode)(struct super_block *sb);
diff --git a/Documentation/filesystems/Makefile b/Documentation/filesystems/Makefile
deleted file mode 100644
index 883010c..0000000
--- a/Documentation/filesystems/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := dnotify_test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt
index aff2211..50a3e01 100644
--- a/Documentation/filesystems/autofs4-mount-control.txt
+++ b/Documentation/filesystems/autofs4-mount-control.txt
@@ -179,8 +179,19 @@
 				 * including this struct */
 	__s32 ioctlfd;          /* automount command fd */
 
-	__u32 arg1;             /* Command parameters */
-	__u32 arg2;
+	union {
+		struct args_protover		protover;
+		struct args_protosubver		protosubver;
+		struct args_openmount		openmount;
+		struct args_ready		ready;
+		struct args_fail		fail;
+		struct args_setpipefd		setpipefd;
+		struct args_timeout		timeout;
+		struct args_requester		requester;
+		struct args_expire		expire;
+		struct args_askumount		askumount;
+		struct args_ismountpoint	ismountpoint;
+	};
 
 	char path[0];
 };
@@ -192,8 +203,8 @@
 mount point file descriptor, and when requesting the uid and gid of the
 last successful mount on a directory within the autofs file system.
 
-The fields arg1 and arg2 are used to communicate parameters and results of
-calls made as described below.
+The union is used to communicate parameters and results of calls made
+as described below.
 
 The path field is used to pass a path where it is needed and the size field
 is used account for the increased structure length when translating the
@@ -245,9 +256,9 @@
 Get the major and minor version of the autofs4 protocol version understood
 by loaded module. This call requires an initialized struct autofs_dev_ioctl
 with the ioctlfd field set to a valid autofs mount point descriptor
-and sets the requested version number in structure field arg1. These
-commands return 0 on success or one of the negative error codes if
-validation fails.
+and sets the requested version number in version field of struct args_protover
+or sub_version field of struct args_protosubver. These commands return
+0 on success or one of the negative error codes if validation fails.
 
 
 AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT
@@ -256,9 +267,9 @@
 Obtain and release a file descriptor for an autofs managed mount point
 path. The open call requires an initialized struct autofs_dev_ioctl with
 the path field set and the size field adjusted appropriately as well
-as the arg1 field set to the device number of the autofs mount. The
-device number can be obtained from the mount options shown in
-/proc/mounts. The close call requires an initialized struct
+as the devid field of struct args_openmount set to the device number of
+the autofs mount. The device number can be obtained from the mount options
+shown in /proc/mounts. The close call requires an initialized struct
 autofs_dev_ioct with the ioctlfd field set to the descriptor obtained
 from the open call. The release of the file descriptor can also be done
 with close(2) so any open descriptors will also be closed at process exit.
@@ -272,10 +283,10 @@
 Return mount and expire result status from user space to the kernel.
 Both of these calls require an initialized struct autofs_dev_ioctl
 with the ioctlfd field set to the descriptor obtained from the open
-call and the arg1 field set to the wait queue token number, received
-by user space in the foregoing mount or expire request. The arg2 field
-is set to the status to be returned. For the ready call this is always
-0 and for the fail call it is set to the errno of the operation.
+call and the token field of struct args_ready or struct args_fail set
+to the wait queue token number, received by user space in the foregoing
+mount or expire request. The status field of struct args_fail is set to
+the errno of the operation. It is set to 0 on success.
 
 
 AUTOFS_DEV_IOCTL_SETPIPEFD_CMD
@@ -290,9 +301,10 @@
 
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call and
-the arg1 field set to descriptor of the pipe. On success the call
-also sets the process group id used to identify the controlling process
-(eg. the owning automount(8) daemon) to the process group of the caller.
+the pipefd field of struct args_setpipefd set to descriptor of the pipe.
+On success the call also sets the process group id used to identify the
+controlling process (eg. the owning automount(8) daemon) to the process
+group of the caller.
 
 
 AUTOFS_DEV_IOCTL_CATATONIC_CMD
@@ -323,9 +335,8 @@
 
 The call requires an initialized struct autofs_dev_ioctl with the path
 field set to the mount point in question and the size field adjusted
-appropriately as well as the arg1 field set to the device number of the
-containing autofs mount. Upon return the struct field arg1 contains the
-uid and arg2 the gid.
+appropriately. Upon return the uid field of struct args_requester contains
+the uid and gid field the gid.
 
 When reconstructing an autofs mount tree with active mounts we need to
 re-connect to mounts that may have used the original process uid and
@@ -343,8 +354,9 @@
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call. In
 addition an immediate expire, independent of the mount timeout, can be
-requested by setting the arg1 field to 1. If no expire candidates can
-be found the ioctl returns -1 with errno set to EAGAIN.
+requested by setting the how field of struct args_expire to 1. If no
+expire candidates can be found the ioctl returns -1 with errno set to
+EAGAIN.
 
 This call causes the kernel module to check the mount corresponding
 to the given ioctlfd for mounts that can be expired, issues an expire
@@ -357,7 +369,8 @@
 
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call and
-it returns the result in the arg1 field, 1 for busy and 0 otherwise.
+it returns the result in the may_umount field of struct args_askumount,
+1 for busy and 0 otherwise.
 
 
 AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD
@@ -369,12 +382,12 @@
 possible variations. Both use the path field set to the path of the mount
 point to check and the size field adjusted appropriately. One uses the
 ioctlfd field to identify a specific mount point to check while the other
-variation uses the path and optionally arg1 set to an autofs mount type.
-The call returns 1 if this is a mount point and sets arg1 to the device
-number of the mount and field arg2 to the relevant super block magic
-number (described below) or 0 if it isn't a mountpoint. In both cases
-the the device number (as returned by new_encode_dev()) is returned
-in field arg1.
+variation uses the path and optionally in.type field of struct args_ismountpoint
+set to an autofs mount type. The call returns 1 if this is a mount point
+and sets out.devid field to the device number of the mount and out.magic
+field to the relevant super block magic number (described below) or 0 if
+it isn't a mountpoint. In both cases the the device number (as returned
+by new_encode_dev()) is returned in out.devid field.
 
 If supplied with a file descriptor we're looking for a specific mount,
 not necessarily at the top of the mounted stack. In this case the path
diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt
index 39d02e1..8fac3fe 100644
--- a/Documentation/filesystems/autofs4.txt
+++ b/Documentation/filesystems/autofs4.txt
@@ -203,9 +203,9 @@
 Mountpoint expiry
 -----------------
 
-The VFS has a mechansim for automatically expiring unused mounts,
+The VFS has a mechanism for automatically expiring unused mounts,
 much as it can expire any unused dentry information from the dcache.
-This is guided by the MNT_SHRINKABLE flag.  This  only applies to
+This is guided by the MNT_SHRINKABLE flag.  This only applies to
 mounts that were created by `d_automount()` returning a filesystem to be
 mounted.  As autofs doesn't return such a filesystem but leaves the
 mounting to the automount daemon, it must involve the automount daemon
@@ -298,7 +298,7 @@
 autofs knows whether a process requesting some operation is the daemon
 or not based on its process-group id number (see getpgid(1)).
 
-When an autofs filesystem it mounted the pgid of the mounting
+When an autofs filesystem is mounted the pgid of the mounting
 processes is recorded unless the "pgrp=" option is given, in which
 case that number is recorded instead.  Any request arriving from a
 process in that process group is considered to come from the daemon.
@@ -450,7 +450,7 @@
     numbers for existing filesystems can be found in
     `/proc/self/mountinfo`.
 - **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`.
-- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the  filesystem is in
+- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in
     catatonic mode, this can provide the write end of a new pipe
     in `arg1` to re-establish communication with a daemon.  The
     process group of the calling process is used to identify the
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index d6030aa..f5306ee 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -98,6 +98,10 @@
 	size.
 
   rsize=X
+	Specify the maximum read size in bytes.  By default there is no
+	maximum.
+
+  rasize=X
 	Specify the maximum readahead.
 
   mount_timeout=X
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index c314bad..4e32cb9 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -19,7 +19,7 @@
 
 4) rename() that is _not_ cross-directory.  Locking rules: caller locks
 the parent and finds source and target.  In case of exchange (with
-RENAME_EXCHANGE in rename2() flags argument) lock both.  In any case,
+RENAME_EXCHANGE in flags argument) lock both.  In any case,
 if the target already exists, lock it.  If the source is a non-directory,
 lock it.  If we need to lock both, lock them in inode pointer order.
 Then call the method.  All locks are exclusive.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index b1bd05e..bdd025c 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -287,8 +287,8 @@
 and vmtruncate, and the reorder the vmtruncate + foofs_vmtruncate sequence to
 be in order of zeroing blocks using block_truncate_page or similar helpers,
 size update and on finally on-disk truncation which should not fail.
-inode_change_ok now includes the size checks for ATTR_SIZE and must be called
-in the beginning of ->setattr unconditionally.
+setattr_prepare (which used to be inode_change_ok) now includes the size checks
+for ATTR_SIZE and must be called in the beginning of ->setattr unconditionally.
 
 [mandatory]
 
@@ -592,3 +592,7 @@
 	work just as well; if it's something more complicated, use dentry->d_parent.
 	Just be careful not to assume that fetching it more than once will yield
 	the same value - in RCU mode it could change under you.
+--
+[mandatory]
+	->rename() has an added flags argument.  Any flags not handled by the
+        filesystem should result in EINVAL being returned.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index cbec006..d619c8d 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -323,6 +323,35 @@
 is a pointer to a "struct inode_operations" which describes the methods that
 can be performed on individual inodes.
 
+struct xattr_handlers
+---------------------
+
+On filesystems that support extended attributes (xattrs), the s_xattr
+superblock field points to a NULL-terminated array of xattr handlers.  Extended
+attributes are name:value pairs.
+
+  name: Indicates that the handler matches attributes with the specified name
+	(such as "system.posix_acl_access"); the prefix field must be NULL.
+
+  prefix: Indicates that the handler matches all attributes with the specified
+	name prefix (such as "user."); the name field must be NULL.
+
+  list: Determine if attributes matching this xattr handler should be listed
+	for a particular dentry.  Used by some listxattr implementations like
+	generic_listxattr.
+
+  get: Called by the VFS to get the value of a particular extended attribute.
+	This method is called by the getxattr(2) system call.
+
+  set: Called by the VFS to set the value of a particular extended attribute.
+	When the new value is NULL, called to remove a particular extended
+	attribute.  This method is called by the the setxattr(2) and
+	removexattr(2) system calls.
+
+When none of the xattr handlers of a filesystem match the specified attribute
+name or when a filesystem doesn't support extended attributes, the various
+*xattr(2) system calls return -EOPNOTSUPP.
+
 
 The Inode Object
 ================
@@ -346,8 +375,6 @@
 	int (*rmdir) (struct inode *,struct dentry *);
 	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
-			struct inode *, struct dentry *);
-	int (*rename2) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *, unsigned int);
 	int (*readlink) (struct dentry *, char __user *,int);
 	const char *(*get_link) (struct dentry *, struct inode *,
@@ -356,10 +383,7 @@
 	int (*get_acl)(struct inode *, int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
-	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
-	int (*removexattr) (struct dentry *, const char *);
 	void (*update_time)(struct inode *, struct timespec *, int);
 	int (*atomic_open)(struct inode *, struct dentry *, struct file *,
 			unsigned open_flag, umode_t create_mode, int *opened);
@@ -416,11 +440,8 @@
   rename: called by the rename(2) system call to rename the object to
 	have the parent and name given by the second inode and dentry.
 
-  rename2: this has an additional flags argument compared to rename.
-	If no flags are supported by the filesystem then this method
-	need not be implemented.  If some flags are supported then the
-	filesystem must return -EINVAL for any unsupported or unknown
-	flags.  Currently the following flags are implemented:
+	The filesystem must return -EINVAL for any unsupported or
+	unknown	flags.  Currently the following flags are implemented:
 	(1) RENAME_NOREPLACE: this flag indicates that if the target
 	of the rename exists the rename should fail with -EEXIST
 	instead of replacing the target.  The VFS already checks for
@@ -463,19 +484,8 @@
   getattr: called by the VFS to get attributes of a file. This method
   	is called by stat(2) and related system calls.
 
-  setxattr: called by the VFS to set an extended attribute for a file.
-  	Extended attribute is a name:value pair associated with an
-  	inode. This method is called by setxattr(2) system call.
-
-  getxattr: called by the VFS to retrieve the value of an extended
-  	attribute name. This method is called by getxattr(2) function
-  	call.
-
   listxattr: called by the VFS to list all extended attributes for a
-  	given file. This method is called by listxattr(2) system call.
-
-  removexattr: called by the VFS to remove an extended attribute from
-  	a file. This method is called by removexattr(2) system call.
+	given file. This method is called by the listxattr(2) system call.
 
   update_time: called by the VFS to update a specific time or the i_version of
   	an inode.  If this is not defined the VFS will update the inode itself
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 3bb2613..37284bc 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -53,9 +53,12 @@
 DRIVER_USE_AGP
     Driver uses AGP interface, the DRM core will manage AGP resources.
 
-DRIVER_REQUIRE_AGP
-    Driver needs AGP interface to function. AGP initialization failure
-    will become a fatal error.
+DRIVER_LEGACY
+    Denote a legacy driver using shadow attach. Don't use.
+
+DRIVER_KMS_LEGACY_CONTEXT
+    Used only by nouveau for backwards compatibility with existing userspace.
+    Don't use.
 
 DRIVER_PCI_DMA
     Driver is capable of PCI DMA, mapping of PCI DMA buffers to
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 0b302fe..bb4254d 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -2,38 +2,45 @@
 Mode Setting Helper Functions
 =============================
 
-The plane, CRTC, encoder and connector functions provided by the drivers
-implement the DRM API. They're called by the DRM core and ioctl handlers
-to handle device state changes and configuration request. As
-implementing those functions often requires logic not specific to
-drivers, mid-layer helper functions are available to avoid duplicating
-boilerplate code.
+The DRM subsystem aims for a strong separation between core code and helper
+libraries. Core code takes care of general setup and teardown and decoding
+userspace requests to kernel internal objects. Everything else is handled by a
+large set of helper libraries, which can be combined freely to pick and choose
+for each driver what fits, and avoid shared code where special behaviour is
+needed.
 
-The DRM core contains one mid-layer implementation. The mid-layer
-provides implementations of several plane, CRTC, encoder and connector
-functions (called from the top of the mid-layer) that pre-process
-requests and call lower-level functions provided by the driver (at the
-bottom of the mid-layer). For instance, the
-:c:func:`drm_crtc_helper_set_config()` function can be used to
-fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
-set_config field. When called, it will split the set_config operation
-in smaller, simpler operations and call the driver to handle them.
+This distinction between core code and helpers is especially strong in the
+modesetting code, where there's a shared userspace ABI for all drivers. This is
+in contrast to the render side, where pretty much everything (with very few
+exceptions) can be considered optional helper code.
 
-To use the mid-layer, drivers call
-:c:func:`drm_crtc_helper_add()`,
-:c:func:`drm_encoder_helper_add()` and
-:c:func:`drm_connector_helper_add()` functions to install their
-mid-layer bottom operations handlers, and fill the :c:type:`struct
-drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
-drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
-drm_connector_funcs <drm_connector_funcs>` structures with
-pointers to the mid-layer top API functions. Installing the mid-layer
-bottom operation handlers is best done right after registering the
-corresponding KMS object.
+There are a few areas these helpers can grouped into:
 
-The mid-layer is not split between CRTC, encoder and connector
-operations. To use it, a driver must provide bottom functions for all of
-the three KMS entities.
+* Helpers to implement modesetting. The important ones here are the atomic
+  helpers. Old drivers still often use the legacy CRTC helpers. They both share
+  the same set of common helper vtables. For really simple drivers (anything
+  that would have been a great fit in the deprecated fbdev subsystem) there's
+  also the simple display pipe helpers.
+
+* There's a big pile of helpers for handling outputs. First the generic bridge
+  helpers for handling encoder and transcoder IP blocks. Second the panel helpers
+  for handling panel-related information and logic. Plus then a big set of
+  helpers for the various sink standards (DisplayPort, HDMI, MIPI DSI). Finally
+  there's also generic helpers for handling output probing, and for dealing with
+  EDIDs.
+
+* The last group of helpers concerns itself with the frontend side of a display
+  pipeline: Planes, handling rectangles for visibility checking and scissoring,
+  flip queues and assorted bits.
+
+Modeset Helper Reference for Common Vtables
+===========================================
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :internal:
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :doc: overview
 
 Atomic Modeset Helper Functions Reference
 =========================================
@@ -62,33 +69,27 @@
 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
    :export:
 
-Modeset Helper Reference for Common Vtables
-===========================================
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :internal:
-
-.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
-   :doc: overview
-
 Legacy CRTC/Modeset Helper Functions Reference
 ==============================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :doc: overview
 
-Output Probing Helper Functions Reference
-=========================================
-
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
-   :doc: output probing helper overview
-
-.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :export:
 
+Simple KMS Helper Reference
+===========================
+
+.. kernel-doc:: include/drm/drm_simple_kms_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :doc: overview
+
 fbdev Helper Functions Reference
 ================================
 
@@ -110,6 +111,43 @@
 .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
    :export:
 
+Bridges
+=======
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: overview
+
+Default bridge callback sequence
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: bridge callbacks
+
+
+Bridge Helper Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_bridge.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :export:
+
+Panel Helper Reference
+======================
+
+.. kernel-doc:: include/drm/drm_panel.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :doc: drm panel
+
 Display Port Helper Functions Reference
 =======================================
 
@@ -158,9 +196,21 @@
 .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
    :export:
 
+Output Probing Helper Functions Reference
+=========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :doc: output probing helper overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :export:
+
 EDID Helper Functions Reference
 ===============================
 
+.. kernel-doc:: include/drm/drm_edid.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_edid.c
    :export:
 
@@ -176,18 +226,6 @@
 .. kernel-doc:: drivers/gpu/drm/drm_rect.c
    :export:
 
-Flip-work Helper Reference
-==========================
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :doc: flip utils
-
-.. kernel-doc:: include/drm/drm_flip_work.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
-   :export:
-
 HDMI Infoframes Helper Reference
 ================================
 
@@ -202,59 +240,40 @@
 .. kernel-doc:: drivers/video/hdmi.c
    :export:
 
+Flip-work Helper Reference
+==========================
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :doc: flip utils
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
+   :export:
+
 Plane Helper Reference
 ======================
 
 .. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
    :doc: overview
 
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+   :export:
+
 Tile group
-----------
+==========
+
+# FIXME: This should probably be moved into a property documentation section
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :doc: Tile group
 
-Bridges
-=======
+Auxiliary Modeset Helpers
+=========================
 
-Overview
---------
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
+   :doc: aux kms helpers
 
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: overview
-
-Default bridge callback sequence
---------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: bridge callbacks
-
-.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
    :export:
-
-Panel Helper Reference
-======================
-
-.. kernel-doc:: include/drm/drm_panel.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_panel.c
-   :doc: drm panel
-
-Simple KMS Helper Reference
-===========================
-
-.. kernel-doc:: include/drm/drm_simple_kms_helper.h
-   :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
-   :export:
-
-.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
-   :doc: overview
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 8dfa4b2..53b872c 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -2,9 +2,6 @@
 Kernel Mode Setting (KMS)
 =========================
 
-Mode Setting
-============
-
 Drivers must initialize the mode setting core by calling
 :c:func:`drm_mode_config_init()` on the DRM device. The function
 initializes the :c:type:`struct drm_device <drm_device>`
@@ -18,60 +15,59 @@
 -  struct drm_mode_config_funcs \*funcs;
    Mode setting functions.
 
-Display Modes Function Reference
---------------------------------
+Modeset Base Object Abstraction
+===============================
 
-.. kernel-doc:: include/drm/drm_modes.h
+.. kernel-doc:: include/drm/drm_mode_object.h
    :internal:
 
-.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+.. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
+   :export:
+
+KMS Data Structures
+===================
+
+.. kernel-doc:: include/drm/drm_crtc.h
+   :internal:
+
+KMS API Functions
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
    :export:
 
 Atomic Mode Setting Function Reference
---------------------------------------
+======================================
 
 .. kernel-doc:: drivers/gpu/drm/drm_atomic.c
    :export:
 
-.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+.. kernel-doc:: include/drm/drm_atomic.h
    :internal:
 
 Frame Buffer Abstraction
-------------------------
+========================
 
-Frame buffers are abstract memory objects that provide a source of
-pixels to scanout to a CRTC. Applications explicitly request the
-creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
-and receive an opaque handle that can be passed to the KMS CRTC control,
-plane configuration and page flip functions.
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :doc: overview
 
-Frame buffers rely on the underneath memory manager for low-level memory
-operations. When creating a frame buffer applications pass a memory
-handle (or a list of memory handles for multi-planar formats) through
-the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
-userspace buffer management interface this would be a GEM handle.
-Drivers are however free to use their own backing storage object
-handles, e.g. vmwgfx directly exposes special TTM handles to userspace
-and so expects TTM handles in the create ioctl and not GEM handles.
+Frame Buffer Functions Reference
+--------------------------------
 
-The lifetime of a drm framebuffer is controlled with a reference count,
-drivers can grab additional references with
-:c:func:`drm_framebuffer_reference()`and drop them again with
-:c:func:`drm_framebuffer_unreference()`. For driver-private
-framebuffers for which the last reference is never dropped (e.g. for the
-fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
-<drm_framebuffer>` is embedded into the fbdev helper struct)
-drivers can manually clean up a framebuffer at module unload time with
-:c:func:`drm_framebuffer_unregister_private()`.
+.. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_framebuffer.h
+   :internal:
 
 DRM Format Handling
--------------------
+===================
 
 .. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
    :export:
 
 Dumb Buffer Objects
--------------------
+===================
 
 The KMS API doesn't standardize backing storage object creation and
 leaves it to driver-specific ioctls. Furthermore actually creating a
@@ -114,14 +110,59 @@
 attempted on some ARM embedded platforms. Such drivers really must have
 a hardware-specific ioctl to allocate suitable buffer objects.
 
-Output Polling
---------------
+Plane Abstraction
+=================
 
-void (\*output_poll_changed)(struct drm_device \*dev);
-This operation notifies the driver that the status of one or more
-connectors has changed. Drivers that use the fb helper can just call the
-:c:func:`drm_fb_helper_hotplug_event()` function to handle this
-operation.
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :doc: overview
+
+Plane Functions Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_plane.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :export:
+
+Display Modes Function Reference
+================================
+
+.. kernel-doc:: include/drm/drm_modes.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+   :export:
+
+Connector Abstraction
+=====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: overview
+
+Connector Functions Reference
+-----------------------------
+
+.. kernel-doc:: include/drm/drm_connector.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :export:
+
+Encoder Abstraction
+===================
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :doc: overview
+
+Encoder Functions Reference
+---------------------------
+
+.. kernel-doc:: include/drm/drm_encoder.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_encoder.c
+   :export:
 
 KMS Initialization and Cleanup
 ==============================
@@ -151,250 +192,6 @@
 structure, and registered with a call to :c:func:`drm_crtc_init()`
 with a pointer to CRTC functions.
 
-Planes (:c:type:`struct drm_plane <drm_plane>`)
------------------------------------------------
-
-A plane represents an image source that can be blended with or overlayed
-on top of a CRTC during the scanout process. Planes are associated with
-a frame buffer to crop a portion of the image memory (source) and
-optionally scale it to a destination size. The result is then blended
-with or overlayed on top of a CRTC.
-
-The DRM core recognizes three types of planes:
-
--  DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
-   Primary planes are the planes operated upon by CRTC modesetting and
-   flipping operations described in the page_flip hook in
-   :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
--  DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
-   Cursor planes are the planes operated upon by the
-   DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
--  DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
-   planes. Some drivers refer to these types of planes as "sprites"
-   internally.
-
-For compatibility with legacy userspace, only overlay planes are made
-available to userspace by default. Userspace clients may set the
-DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
-that they wish to receive a universal plane list containing all plane
-types.
-
-Plane Initialization
-~~~~~~~~~~~~~~~~~~~~
-
-To create a plane, a KMS drivers allocates and zeroes an instances of
-:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
-larger structure) and registers it with a call to
-:c:func:`drm_universal_plane_init()`. The function takes a
-bitmask of the CRTCs that can be associated with the plane, a pointer to
-the plane functions, a list of format supported formats, and the type of
-plane (primary, cursor, or overlay) being initialized.
-
-Cursor and overlay planes are optional. All drivers should provide one
-primary plane per CRTC (although this requirement may change in the
-future); drivers that do not wish to provide special handling for
-primary planes may make use of the helper functions described in ? to
-create and register a primary plane with standard capabilities.
-
-Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
------------------------------------------------------
-
-An encoder takes pixel data from a CRTC and converts it to a format
-suitable for any attached connectors. On some devices, it may be
-possible to have a CRTC send data to more than one encoder. In that
-case, both encoders would receive data from the same scanout buffer,
-resulting in a "cloned" display configuration across the connectors
-attached to each encoder.
-
-Encoder Initialization
-~~~~~~~~~~~~~~~~~~~~~~
-
-As for CRTCs, a KMS driver must create, initialize and register at least
-one :c:type:`struct drm_encoder <drm_encoder>` instance. The
-instance is allocated and zeroed by the driver, possibly as part of a
-larger structure.
-
-Drivers must initialize the :c:type:`struct drm_encoder
-<drm_encoder>` possible_crtcs and possible_clones fields before
-registering the encoder. Both fields are bitmasks of respectively the
-CRTCs that the encoder can be connected to, and sibling encoders
-candidate for cloning.
-
-After being initialized, the encoder must be registered with a call to
-:c:func:`drm_encoder_init()`. The function takes a pointer to the
-encoder functions and an encoder type. Supported types are
-
--  DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
--  DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
--  DRM_MODE_ENCODER_LVDS for display panels
--  DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
-   Component, SCART)
--  DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
-
-Encoders must be attached to a CRTC to be used. DRM drivers leave
-encoders unattached at initialization time. Applications (or the fbdev
-compatibility layer when implemented) are responsible for attaching the
-encoders they want to use to a CRTC.
-
-Connectors (:c:type:`struct drm_connector <drm_connector>`)
------------------------------------------------------------
-
-A connector is the final destination for pixel data on a device, and
-usually connects directly to an external display device like a monitor
-or laptop panel. A connector can only be attached to one encoder at a
-time. The connector is also the structure where information about the
-attached display is kept, so it contains fields for display data, EDID
-data, DPMS & connection status, and information about modes supported on
-the attached displays.
-
-Connector Initialization
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Finally a KMS driver must create, initialize, register and attach at
-least one :c:type:`struct drm_connector <drm_connector>`
-instance. The instance is created as other KMS objects and initialized
-by setting the following fields.
-
-interlace_allowed
-    Whether the connector can handle interlaced modes.
-
-doublescan_allowed
-    Whether the connector can handle doublescan.
-
-display_info
-    Display information is filled from EDID information when a display
-    is detected. For non hot-pluggable displays such as flat panels in
-    embedded systems, the driver should initialize the
-    display_info.width_mm and display_info.height_mm fields with the
-    physical size of the display.
-
-polled
-    Connector polling mode, a combination of
-
-    DRM_CONNECTOR_POLL_HPD
-        The connector generates hotplug events and doesn't need to be
-        periodically polled. The CONNECT and DISCONNECT flags must not
-        be set together with the HPD flag.
-
-    DRM_CONNECTOR_POLL_CONNECT
-        Periodically poll the connector for connection.
-
-    DRM_CONNECTOR_POLL_DISCONNECT
-        Periodically poll the connector for disconnection.
-
-    Set to 0 for connectors that don't support connection status
-    discovery.
-
-The connector is then registered with a call to
-:c:func:`drm_connector_init()` with a pointer to the connector
-functions and a connector type, and exposed through sysfs with a call to
-:c:func:`drm_connector_register()`.
-
-Supported connector types are
-
--  DRM_MODE_CONNECTOR_VGA
--  DRM_MODE_CONNECTOR_DVII
--  DRM_MODE_CONNECTOR_DVID
--  DRM_MODE_CONNECTOR_DVIA
--  DRM_MODE_CONNECTOR_Composite
--  DRM_MODE_CONNECTOR_SVIDEO
--  DRM_MODE_CONNECTOR_LVDS
--  DRM_MODE_CONNECTOR_Component
--  DRM_MODE_CONNECTOR_9PinDIN
--  DRM_MODE_CONNECTOR_DisplayPort
--  DRM_MODE_CONNECTOR_HDMIA
--  DRM_MODE_CONNECTOR_HDMIB
--  DRM_MODE_CONNECTOR_TV
--  DRM_MODE_CONNECTOR_eDP
--  DRM_MODE_CONNECTOR_VIRTUAL
-
-Connectors must be attached to an encoder to be used. For devices that
-map connectors to encoders 1:1, the connector should be attached at
-initialization time with a call to
-:c:func:`drm_mode_connector_attach_encoder()`. The driver must
-also set the :c:type:`struct drm_connector <drm_connector>`
-encoder field to point to the attached encoder.
-
-Finally, drivers must initialize the connectors state change detection
-with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
-one connector is pollable but can't generate hotplug interrupts
-(indicated by the DRM_CONNECTOR_POLL_CONNECT and
-DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
-automatically be queued to periodically poll for changes. Connectors
-that can generate hotplug interrupts must be marked with the
-DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
-call :c:func:`drm_helper_hpd_irq_event()`. The function will
-queue a delayed work to check the state of all connectors, but no
-periodic polling will be done.
-
-Connector Operations
-~~~~~~~~~~~~~~~~~~~~
-
-    **Note**
-
-    Unless otherwise state, all operations are mandatory.
-
-DPMS
-''''
-
-void (\*dpms)(struct drm_connector \*connector, int mode);
-The DPMS operation sets the power state of a connector. The mode
-argument is one of
-
--  DRM_MODE_DPMS_ON
-
--  DRM_MODE_DPMS_STANDBY
-
--  DRM_MODE_DPMS_SUSPEND
-
--  DRM_MODE_DPMS_OFF
-
-In all but DPMS_ON mode the encoder to which the connector is attached
-should put the display in low-power mode by driving its signals
-appropriately. If more than one connector is attached to the encoder
-care should be taken not to change the power state of other displays as
-a side effect. Low-power mode should be propagated to the encoders and
-CRTCs when all related connectors are put in low-power mode.
-
-Modes
-'''''
-
-int (\*fill_modes)(struct drm_connector \*connector, uint32_t
-max_width, uint32_t max_height);
-Fill the mode list with all supported modes for the connector. If the
-``max_width`` and ``max_height`` arguments are non-zero, the
-implementation must ignore all modes wider than ``max_width`` or higher
-than ``max_height``.
-
-The connector must also fill in this operation its display_info
-width_mm and height_mm fields with the connected display physical size
-in millimeters. The fields should be set to 0 if the value isn't known
-or is not applicable (for instance for projector devices).
-
-Connection Status
-'''''''''''''''''
-
-The connection status is updated through polling or hotplug events when
-supported (see ?). The status value is reported to userspace through
-ioctls and must not be used inside the driver, as it only gets
-initialized by a call to :c:func:`drm_mode_getconnector()` from
-userspace.
-
-enum drm_connector_status (\*detect)(struct drm_connector
-\*connector, bool force);
-Check to see if anything is attached to the connector. The ``force``
-parameter is set to false whilst polling or to true when checking the
-connector due to user request. ``force`` can be used by the driver to
-avoid expensive, destructive operations during automated probing.
-
-Return connector_status_connected if something is connected to the
-connector, connector_status_disconnected if nothing is connected and
-connector_status_unknown if the connection state isn't known.
-
-Drivers should only return connector_status_connected if the
-connection status has really been probed as connected. Connectors that
-can't detect the connection status, or failed connection status probes,
-should return connector_status_unknown.
 
 Cleanup
 -------
@@ -463,20 +260,8 @@
 the process is complete, the new connector is registered with sysfs to
 make its properties available to applications.
 
-KMS API Functions
------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
-   :export:
-
-KMS Data Structures
--------------------
-
-.. kernel-doc:: include/drm/drm_crtc.h
-   :internal:
-
 KMS Locking
------------
+===========
 
 .. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
    :doc: kms locking
@@ -490,90 +275,38 @@
 KMS Properties
 ==============
 
-Drivers may need to expose additional parameters to applications than
-those described in the previous sections. KMS supports attaching
-properties to CRTCs, connectors and planes and offers a userspace API to
-list, get and set the property values.
+Property Types and Blob Property Support
+----------------------------------------
 
-Properties are identified by a name that uniquely defines the property
-purpose, and store an associated value. For all property types except
-blob properties the value is a 64-bit unsigned integer.
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :doc: overview
 
-KMS differentiates between properties and property instances. Drivers
-first create properties and then create and associate individual
-instances of those properties to objects. A property can be instantiated
-multiple times and associated with different objects. Values are stored
-in property instances, and all other property information are stored in
-the property and shared between all instances of the property.
+.. kernel-doc:: include/drm/drm_property.h
+   :internal:
 
-Every property is created with a type that influences how the KMS core
-handles the property. Supported property types are
+.. kernel-doc:: drivers/gpu/drm/drm_property.c
+   :export:
 
-DRM_MODE_PROP_RANGE
-    Range properties report their minimum and maximum admissible values.
-    The KMS core verifies that values set by application fit in that
-    range.
+Plane Composition Properties
+----------------------------
 
-DRM_MODE_PROP_ENUM
-    Enumerated properties take a numerical value that ranges from 0 to
-    the number of enumerated values defined by the property minus one,
-    and associate a free-formed string name to each value. Applications
-    can retrieve the list of defined value-name pairs and use the
-    numerical value to get and set property instance values.
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :doc: overview
 
-DRM_MODE_PROP_BITMASK
-    Bitmask properties are enumeration properties that additionally
-    restrict all enumerated values to the 0..63 range. Bitmask property
-    instance values combine one or more of the enumerated bits defined
-    by the property.
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :export:
 
-DRM_MODE_PROP_BLOB
-    Blob properties store a binary blob without any format restriction.
-    The binary blobs are created as KMS standalone objects, and blob
-    property instance values store the ID of their associated blob
-    object.
+Color Management Properties
+---------------------------
 
-    Blob properties are only used for the connector EDID property and
-    cannot be created by drivers.
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :doc: overview
 
-To create a property drivers call one of the following functions
-depending on the property type. All property creation functions take
-property flags and name, as well as type-specific arguments.
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+   :internal:
 
--  struct drm_property \*drm_property_create_range(struct
-   drm_device \*dev, int flags, const char \*name, uint64_t min,
-   uint64_t max);
-   Create a range property with the given minimum and maximum values.
-
--  struct drm_property \*drm_property_create_enum(struct drm_device
-   \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create an enumerated property. The ``props`` argument points to an
-   array of ``num_values`` value-name pairs.
-
--  struct drm_property \*drm_property_create_bitmask(struct
-   drm_device \*dev, int flags, const char \*name, const struct
-   drm_prop_enum_list \*props, int num_values);
-   Create a bitmask property. The ``props`` argument points to an array
-   of ``num_values`` value-name pairs.
-
-Properties can additionally be created as immutable, in which case they
-will be read-only for applications but can be modified by the driver. To
-create an immutable property drivers must set the
-DRM_MODE_PROP_IMMUTABLE flag at property creation time.
-
-When no array of value-name pairs is readily available at property
-creation time for enumerated or range properties, drivers can create the
-property using the :c:func:`drm_property_create()` function and
-manually add enumeration value-name pairs by calling the
-:c:func:`drm_property_add_enum()` function. Care must be taken to
-properly specify the property type through the ``flags`` argument.
-
-After creating properties drivers can attach property instances to CRTC,
-connector and plane objects by calling the
-:c:func:`drm_object_attach_property()`. The function takes a
-pointer to the target object, a pointer to the previously created
-property and an initial instance value.
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :export:
 
 Existing KMS Properties
 -----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 59f9822..bca8085 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -26,12 +26,12 @@
 UMA devices.
 
 The Translation Table Manager (TTM)
------------------------------------
+===================================
 
 TTM design background and information belongs here.
 
 TTM initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
     **Warning**
 
@@ -77,7 +77,7 @@
 count for the TTM, which will call your initialization function.
 
 The Graphics Execution Manager (GEM)
-------------------------------------
+====================================
 
 The GEM design approach has resulted in a memory manager that doesn't
 provide full coverage of all (or even all common) use cases in its
@@ -114,7 +114,7 @@
 driver-specific ioctls.
 
 GEM Initialization
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Drivers that use GEM must set the DRIVER_GEM bit in the struct
 :c:type:`struct drm_driver <drm_driver>` driver_features
@@ -132,7 +132,7 @@
 its own DRM MM object.
 
 GEM Objects Creation
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 GEM splits creation of GEM objects and allocation of the memory that
 backs them in two distinct operations.
@@ -173,7 +173,7 @@
 must be managed by drivers.
 
 GEM Objects Lifetime
-~~~~~~~~~~~~~~~~~~~~
+--------------------
 
 All GEM objects are reference-counted by the GEM core. References can be
 acquired and release by :c:func:`calling
@@ -196,7 +196,7 @@
 :c:func:`drm_gem_object_release()`.
 
 GEM Objects Naming
-~~~~~~~~~~~~~~~~~~
+------------------
 
 Communication between userspace and the kernel refers to GEM objects
 using local handles, global names or, more recently, file descriptors.
@@ -245,7 +245,7 @@
 based on dma-bufs.
 
 GEM Objects Mapping
-~~~~~~~~~~~~~~~~~~~
+-------------------
 
 Because mapping operations are fairly heavyweight GEM favours
 read/write-like access to buffers, implemented through driver-specific
@@ -304,7 +304,7 @@
 faults can implement their own mmap file operation handler.
 
 Memory Coherency
-~~~~~~~~~~~~~~~~
+----------------
 
 When mapped to the device or used in a command buffer, backing pages for
 an object are flushed to memory and marked write combined so as to be
@@ -320,7 +320,7 @@
 any necessary flushing operations).
 
 Command Execution
-~~~~~~~~~~~~~~~~~
+-----------------
 
 Perhaps the most important GEM function for GPU devices is providing a
 command execution interface to clients. Client programs construct
@@ -348,8 +348,20 @@
 .. kernel-doc:: include/drm/drm_gem.h
    :internal:
 
+GEM CMA Helper Functions Reference
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :doc: cma helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+   :internal:
+
 VMA Offset Manager
-------------------
+==================
 
 .. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
    :doc: vma offset manager
@@ -361,14 +373,14 @@
    :internal:
 
 PRIME Buffer Sharing
---------------------
+====================
 
 PRIME is the cross device buffer sharing framework in drm, originally
 created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
 buffers are dma-buf based file descriptors.
 
 Overview and Driver Interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
 
 Similar to GEM global names, PRIME file descriptors are also used to
 share buffer objects across processes. They offer additional security:
@@ -406,7 +418,7 @@
 support PRIME.
 
 PRIME Helper Functions
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_prime.c
    :doc: PRIME Helpers
@@ -418,16 +430,16 @@
    :export:
 
 DRM MM Range Allocator
-----------------------
+======================
 
 Overview
-~~~~~~~~
+--------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: Overview
 
 LRU Scan/Eviction Support
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :doc: lru scan roaster
@@ -440,15 +452,3 @@
 
 .. kernel-doc:: include/drm/drm_mm.h
    :internal:
-
-CMA Helper Functions Reference
-------------------------------
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :doc: cma helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
-   :export:
-
-.. kernel-doc:: include/drm/drm_gem_cma_helper.h
-   :internal:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 536bf3e..1ba301c 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -33,6 +33,76 @@
 .. kernel-doc:: include/drm/drm_auth.h
    :internal:
 
+Open-Source Userspace Requirements
+==================================
+
+The DRM subsystem has stricter requirements than most other kernel subsystems on
+what the userspace side for new uAPI needs to look like. This section here
+explains what exactly those requirements are, and why they exist.
+
+The short summary is that any addition of DRM uAPI requires corresponding
+open-sourced userspace patches, and those patches must be reviewed and ready for
+merging into a suitable and canonical upstream project.
+
+GFX devices (both display and render/GPU side) are really complex bits of
+hardware, with userspace and kernel by necessity having to work together really
+closely.  The interfaces, for rendering and modesetting, must be extremely wide
+and flexible, and therefore it is almost always impossible to precisely define
+them for every possible corner case. This in turn makes it really practically
+infeasible to differentiate between behaviour that's required by userspace, and
+which must not be changed to avoid regressions, and behaviour which is only an
+accidental artifact of the current implementation.
+
+Without access to the full source code of all userspace users that means it
+becomes impossible to change the implementation details, since userspace could
+depend upon the accidental behaviour of the current implementation in minute
+details. And debugging such regressions without access to source code is pretty
+much impossible. As a consequence this means:
+
+- The Linux kernel's "no regression" policy holds in practice only for
+  open-source userspace of the DRM subsystem. DRM developers are perfectly fine
+  if closed-source blob drivers in userspace use the same uAPI as the open
+  drivers, but they must do so in the exact same way as the open drivers.
+  Creative (ab)use of the interfaces will, and in the past routinely has, lead
+  to breakage.
+
+- Any new userspace interface must have an open-source implementation as
+  demonstration vehicle.
+
+The other reason for requiring open-source userspace is uAPI review. Since the
+kernel and userspace parts of a GFX stack must work together so closely, code
+review can only assess whether a new interface achieves its goals by looking at
+both sides. Making sure that the interface indeed covers the use-case fully
+leads to a few additional requirements:
+
+- The open-source userspace must not be a toy/test application, but the real
+  thing. Specifically it needs to handle all the usual error and corner cases.
+  These are often the places where new uAPI falls apart and hence essential to
+  assess the fitness of a proposed interface.
+
+- The userspace side must be fully reviewed and tested to the standards of that
+  userspace project. For e.g. mesa this means piglit testcases and review on the
+  mailing list. This is again to ensure that the new interface actually gets the
+  job done.
+
+- The userspace patches must be against the canonical upstream, not some vendor
+  fork. This is to make sure that no one cheats on the review and testing
+  requirements by doing a quick fork.
+
+- The kernel patch can only be merged after all the above requirements are met,
+  but it **must** be merged **before** the userspace patches land. uAPI always flows
+  from the kernel, doing things the other way round risks divergence of the uAPI
+  definitions and header files.
+
+These are fairly steep requirements, but have grown out from years of shared
+pain and experience with uAPI added hastily, and almost always regretted about
+just as fast. GFX devices change really fast, requiring a paradigm shift and
+entire new set of uAPI interfaces every few years at least. Together with the
+Linux kernel's guarantee to keep existing userspace running for 10+ years this
+is already rather painful for the DRM subsystem, with multiple different uAPIs
+for the same thing co-existing. If we add a few more complete mistakes into the
+mix every year it would be entirely unmanageable.
+
 Render nodes
 ============
 
@@ -86,6 +156,43 @@
 visible to user-space and accessible beyond open-file boundaries, they
 cannot support render nodes.
 
+Validating changes with IGT
+===========================
+
+There's a collection of tests that aims to cover the whole functionality of
+DRM drivers and that can be used to check that changes to DRM drivers or the
+core don't regress existing functionality. This test suite is called IGT and
+its code can be found in https://cgit.freedesktop.org/drm/igt-gpu-tools/.
+
+To build IGT, start by installing its build dependencies. In Debian-based
+systems::
+
+	# apt-get build-dep intel-gpu-tools
+
+And in Fedora-based systems::
+
+	# dnf builddep intel-gpu-tools
+
+Then clone the repository::
+
+	$ git clone git://anongit.freedesktop.org/drm/igt-gpu-tools
+
+Configure the build system and start the build::
+
+	$ cd igt-gpu-tools && ./autogen.sh && make -j6
+
+Download the piglit dependency::
+
+	$ ./scripts/run-tests.sh -d
+
+And run the tests::
+
+	$ ./scripts/run-tests.sh -t kms -t core -s
+
+run-tests.sh is a wrapper around piglit that will execute the tests matching
+the -t options. A report in HTML format will be available in
+./results/html/index.html. Results can be compared with piglit.
+
 VBlank event handling
 =====================
 
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 2fe5952..87aaffc 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -70,6 +70,9 @@
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :doc: frontbuffer tracking
 
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :internal:
 
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index 5ff3d2b..be0dafc 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -12,6 +12,7 @@
    drm-uapi
    i915
    vga-switcheroo
+   vgaarbiter
 
 .. only::  subproject
 
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index 4c5ce3e..981873a 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -1,23 +1,10 @@
 Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
-DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
 ,,“scaling mode”,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
 ,Connector,“EDID”,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
 ,,“DPMS”,ENUM,"{ “On”, “Standby”, “Suspend”, “Off” }",Connector,Contains DPMS operation mode value.
 ,,“PATH”,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
 ,,“TILE”,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
 ,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
-,Plane,“type”,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
-,,“SRC_X”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
-,,“SRC_Y”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
-,,“SRC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
-,,“SRC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
-,,“CRTC_X”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
-,,“CRTC_Y”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
-,,“CRTC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
-,,“CRTC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
-,,“FB_ID”,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
-,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
-,,“zpos”,RANGE,"Min=0, Max=UINT_MAX","Plane,Z-order of the plane.Planes with higher Z-order values are displayed on top, planes with identical Z-order values are display in an undefined order"
 ,DVI-I,“subconnector”,ENUM,"{ “Unknown”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,,“select subconnector”,ENUM,"{ “Automatic”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,TV,“subconnector”,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
@@ -36,12 +23,6 @@
 ,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
 ,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
 ,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
-,,“dirty”,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
-,,“DEGAMMA_LUT”,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“DEGAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
-,,“CTM”,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
-,,“GAMMA_LUT”,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“GAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
 i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
 ,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
 ,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
@@ -95,7 +76,6 @@
 ,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 ,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 exynos,CRTC,“mode”,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
-,Overlay,“zpos”,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
 i2c/ch7006_drv,Generic,“scale”,RANGE,"Min=0, Max=2",Connector,TBD
 ,TV,“mode”,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, ”PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
 nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
@@ -126,4 +106,3 @@
 ,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
 rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
 ,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
-,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
diff --git a/Documentation/vgaarbiter.txt b/Documentation/gpu/vgaarbiter.rst
similarity index 64%
rename from Documentation/vgaarbiter.txt
rename to Documentation/gpu/vgaarbiter.rst
index 014423e..0b41b05 100644
--- a/Documentation/vgaarbiter.txt
+++ b/Documentation/gpu/vgaarbiter.rst
@@ -1,4 +1,4 @@
-
+===========
 VGA Arbiter
 ===========
 
@@ -19,21 +19,8 @@
 is needed to control the sharing of these resources. This document introduces
 the operation of the VGA arbiter implemented for the Linux kernel.
 
-----------------------------------------------------------------------------
-
-I.  Details and Theory of Operation
-        I.1 vgaarb
-        I.2 libpciaccess
-        I.3 xf86VGAArbiter (X server implementation)
-II. Credits
-III.References
-
-
-I. Details and Theory of Operation
-==================================
-
-I.1 vgaarb
-----------
+vgaarb kernel/userspace ABI
+---------------------------
 
 The vgaarb is a module of the Linux Kernel. When it is initially loaded, it
 scans all PCI devices and adds the VGA ones inside the arbitration. The
@@ -44,42 +31,52 @@
 The kernel exports a char device interface (/dev/vga_arbiter) to the clients,
 which has the following semantics:
 
- open       : open user instance of the arbiter. By default, it's attached to
-              the default VGA device of the system.
+open
+        Opens a user instance of the arbiter. By default, it's attached to the
+        default VGA device of the system.
 
- close      : close user instance. Release locks made by the user
+close
+        Close a user instance. Release locks made by the user
 
- read       : return a string indicating the status of the target like:
+read
+        Return a string indicating the status of the target like:
 
-              "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+        "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
 
-              An IO state string is of the form {io,mem,io+mem,none}, mc and
-              ic are respectively mem and io lock counts (for debugging/
-              diagnostic only). "decodes" indicate what the card currently
-              decodes, "owns" indicates what is currently enabled on it, and
-              "locks" indicates what is locked by this card. If the card is
-              unplugged, we get "invalid" then for card_ID and an -ENODEV
-              error is returned for any command until a new card is targeted.
+        An IO state string is of the form {io,mem,io+mem,none}, mc and
+        ic are respectively mem and io lock counts (for debugging/
+        diagnostic only). "decodes" indicate what the card currently
+        decodes, "owns" indicates what is currently enabled on it, and
+        "locks" indicates what is locked by this card. If the card is
+        unplugged, we get "invalid" then for card_ID and an -ENODEV
+        error is returned for any command until a new card is targeted.
 
 
- write       : write a command to the arbiter. List of commands:
+write
+        Write a command to the arbiter. List of commands:
 
-  target <card_ID>   : switch target to card <card_ID> (see below)
-  lock <io_state>    : acquires locks on target ("none" is an invalid io_state)
-  trylock <io_state> : non-blocking acquire locks on target (returns EBUSY if
-                       unsuccessful)
-  unlock <io_state>  : release locks on target
-  unlock all         : release all locks on target held by this user (not
-                       implemented yet)
-  decodes <io_state> : set the legacy decoding attributes for the card
+        target <card_ID>
+                switch target to card <card_ID> (see below)
+        lock <io_state>
+                acquires locks on target ("none" is an invalid io_state)
+        trylock <io_state>
+                non-blocking acquire locks on target (returns EBUSY if
+                unsuccessful)
+        unlock <io_state>
+                release locks on target
+        unlock all
+                release all locks on target held by this user (not implemented
+                yet)
+        decodes <io_state>
+                set the legacy decoding attributes for the card
 
-  poll               : event if something changes on any card (not just the
-                       target)
+        poll
+                event if something changes on any card (not just the target)
 
-  card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
-  to go back to the system default card (TODO: not implemented yet). Currently,
-  only PCI is supported as a prefix, but the userland API may support other bus
-  types in the future, even if the current kernel implementation doesn't.
+        card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+        to go back to the system default card (TODO: not implemented yet). Currently,
+        only PCI is supported as a prefix, but the userland API may support other bus
+        types in the future, even if the current kernel implementation doesn't.
 
 Note about locks:
 
@@ -97,29 +94,35 @@
 There is also an in-kernel API of the arbiter in case DRM, vgacon, or other
 drivers want to use it.
 
+In-kernel interface
+-------------------
 
-I.2 libpciaccess
-----------------
+.. kernel-doc:: include/linux/vgaarb.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/vga/vgaarb.c
+   :export:
+
+libpciaccess
+------------
 
 To use the vga arbiter char device it was implemented an API inside the
 libpciaccess library. One field was added to struct pci_device (each device
-on the system):
+on the system)::
 
     /* the type of resource decoded by the device */
     int vgaarb_rsrc;
 
-Besides it, in pci_system were added:
+Besides it, in pci_system were added::
 
     int vgaarb_fd;
     int vga_count;
     struct pci_device *vga_target;
     struct pci_device *vga_default_dev;
 
-
 The vga_count is used to track how many cards are being arbitrated, so for
 instance, if there is only one card, then it can completely escape arbitration.
 
-
 These functions below acquire VGA resources for the given card and mark those
 resources as locked. If the resources requested are "normal" (and not legacy)
 resources, the arbiter will first check whether the card is doing legacy
@@ -136,44 +139,44 @@
 succeeds.  vga_arb_trylock() will return (-EBUSY) instead of blocking. Nested
 calls are supported (a per-resource counter is maintained).
 
+Set the target device of this client. ::
 
-Set the target device of this client.
     int  pci_device_vgaarb_set_target   (struct pci_device *dev);
 
-
 For instance, in x86 if two devices on the same bus want to lock different
 resources, both will succeed (lock). If devices are in different buses and
-trying to lock different resources, only the first who tried succeeds.
+trying to lock different resources, only the first who tried succeeds. ::
+
     int  pci_device_vgaarb_lock         (void);
     int  pci_device_vgaarb_trylock      (void);
 
-Unlock resources of device.
+Unlock resources of device. ::
+
     int  pci_device_vgaarb_unlock       (void);
 
 Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA
 Memory, both, or none. All cards default to both, the card driver (fbdev for
 example) should tell the arbiter if it has disabled legacy decoding, so the
 card can be left out of the arbitration process (and can be safe to take
-interrupts at any time.
+interrupts at any time. ::
+
     int  pci_device_vgaarb_decodes      (int new_vgaarb_rsrc);
 
-Connects to the arbiter device, allocates the struct
+Connects to the arbiter device, allocates the struct ::
+
     int  pci_device_vgaarb_init         (void);
 
-Close the connection
+Close the connection ::
+
     void pci_device_vgaarb_fini         (void);
 
-
-I.3 xf86VGAArbiter (X server implementation)
---------------------------------------------
-
-(TODO)
+xf86VGAArbiter (X server implementation)
+----------------------------------------
 
 X server basically wraps all the functions that touch VGA registers somehow.
 
-
-II. Credits
-===========
+References
+----------
 
 Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
 with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
@@ -182,11 +185,7 @@
 implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
 Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
 
-
-III. References
-==============
-
-[0] http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
-[1] http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
-[2] http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
-[3] http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
+0) http://cgit.freedesktop.org/xorg/xserver/commit/?id=4b42448a2388d40f257774fbffdccaea87bd0347
+1) http://lists.freedesktop.org/archives/xorg/2005-March/006663.html
+2) http://lists.freedesktop.org/archives/xorg/2005-March/006745.html
+3) http://lists.freedesktop.org/archives/xorg/2007-October/029507.html
diff --git a/Documentation/ia64/Makefile b/Documentation/ia64/Makefile
deleted file mode 100644
index d493163..0000000
--- a/Documentation/ia64/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := aliasing-test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/index.rst b/Documentation/index.rst
index d9ccb94..c53d089 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -17,6 +17,7 @@
    driver-api/index
    media/index
    gpu/index
+   80211/index
 
 Indices and tables
 ==================
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 1fec113..8d1341c 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -319,3 +319,60 @@
                otherwise byte 0 bit 4 must be set and byte 0/4/5 are
                in NEW fmt
  F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+
+
+ALPS Absolute Mode - Protocol Version 8
+---------------------------------------
+
+Spoken by SS4 (73 03 14) and SS5 (73 03 28) hardware.
+
+The packet type is given by the APD field, bits 4-5 of byte 3.
+
+Touchpad packet (APD = 0x2):
+
+           b7   b6   b5   b4   b3   b2   b1   b0
+ byte 0:  SWM  SWR  SWL    1    1    0    0   X7
+ byte 1:    0   X6   X5   X4   X3   X2   X1   X0
+ byte 2:    0   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 3:    0  T&P    1    0    1    0    0   Y7
+ byte 4:    0   Z6   Z5   Z4   Z3   Z2   Z1   Z0
+ byte 5:    0    0    0    0    0    0    0    0
+
+SWM, SWR, SWL: Middle, Right, and Left button states
+
+Touchpad 1 Finger packet (APD = 0x0):
+
+           b7   b6   b5   b4   b3   b2   b1   b0
+ byte 0:  SWM  SWR  SWL    1    1   X2   X1   X0
+ byte 1:   X9   X8   X7    1   X6   X5   X4   X3
+ byte 2:    0  X11  X10  LFB   Y3   Y2   Y1   Y0
+ byte 3:   Y5   Y4    0    0    1 TAPF2 TAPF1 TAPF0
+ byte 4:  Zv7  Y11  Y10    1   Y9   Y8   Y7   Y6
+ byte 5:  Zv6  Zv5  Zv4    0  Zv3  Zv2  Zv1  Zv0
+
+TAPF: ???
+LFB:  ???
+
+Touchpad 2 Finger packet (APD = 0x1):
+
+           b7   b6   b5   b4   b3   b2   b1   b0
+ byte 0:  SWM  SWR  SWL    1    1  AX6  AX5  AX4
+ byte 1: AX11 AX10  AX9  AX8  AX7  AZ1  AY4  AZ0
+ byte 2: AY11 AY10  AY9  CONT AY8  AY7  AY6  AY5
+ byte 3:    0    0    0    1    1  BX6  BX5  BX4
+ byte 4: BX11 BX10  BX9  BX8  BX7  BZ1  BY4  BZ0
+ byte 5: BY11 BY10  BY9    0  BY8  BY7  BY5  BY5
+
+CONT: A 3-or-4 Finger packet is to follow
+
+Touchpad 3-or-4 Finger packet (APD = 0x3):
+
+           b7   b6   b5   b4   b3   b2   b1   b0
+ byte 0:  SWM  SWR  SWL    1    1  AX6  AX5  AX4
+ byte 1: AX11 AX10  AX9  AX8  AX7  AZ1  AY4  AZ0
+ byte 2: AY11 AY10  AY9  OVF  AY8  AY7  AY6  AY5
+ byte 3:    0    0    1    1    1  BX6  BX5  BX4
+ byte 4: BX11 BX10  BX9  BX8  BX7  BZ1  BY4  BZ0
+ byte 5: BY11 BY10  BY9    0  BY8  BY7  BY5  BY5
+
+OVF: 5th finger detected
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 385a5ef..9b9c479 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -41,6 +41,7 @@
 	   --- 6.8 Custom kbuild commands
 	   --- 6.9 Preprocessing linker scripts
 	   --- 6.10 Generic header files
+	   --- 6.11 Post-link pass
 
 	=== 7 Kbuild syntax for exported headers
 		--- 7.1 header-y
@@ -1237,6 +1238,21 @@
 	to list the file in the Kbuild file.
 	See "7.4 generic-y" for further info on syntax etc.
 
+--- 6.11 Post-link pass
+
+	If the file arch/xxx/Makefile.postlink exists, this makefile
+	will be invoked for post-link objects (vmlinux and modules.ko)
+	for architectures to run post-link passes on. Must also handle
+	the clean target.
+
+	This pass runs after kallsyms generation. If the architecture
+	needs to modify symbol locations, rather than manipulate the
+	kallsyms, it may be easier to add another postlink target for
+	.tmp_vmlinux? targets to be called from link-vmlinux.sh.
+
+	For example, powerpc uses this to check relocation sanity of
+	the linked vmlinux file.
+
 === 7 Kbuild syntax for exported headers
 
 The kernel includes a set of headers that is exported to userspace.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ec8d814..37babf9 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -33,6 +33,37 @@
 Double-quotes can be used to protect spaces in values, e.g.:
 	param="spaces in here"
 
+cpu lists:
+----------
+
+Some kernel parameters take a list of CPUs as a value, e.g.  isolcpus,
+nohz_full, irqaffinity, rcu_nocbs.  The format of this list is:
+
+	<cpu number>,...,<cpu number>
+
+or
+
+	<cpu number>-<cpu number>
+	(must be a positive range in ascending order)
+
+or a mixture
+
+<cpu number>,...,<cpu number>-<cpu number>
+
+Note that for the special case of a range one can split the range into equal
+sized groups and for each group use some amount from the beginning of that
+group:
+
+	<cpu number>-cpu number>:<used size>/<group size>
+
+For example one can add to the command line following parameter:
+
+	isolcpus=1,2,10-20,100-2000:2/25
+
+where the final item represents CPUs 100,101,125,126,150,151,...
+
+
+
 This document may not be entirely up to date and comprehensive. The command
 "modinfo -p ${modulename}" shows a current list of all parameters of a loadable
 module. Loadable modules, after being loaded into the running kernel, also
@@ -1480,7 +1511,14 @@
 	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
 			     controllers
 	i8042.notimeout	[HW] Ignore timeout condition signalled by controller
-	i8042.reset	[HW] Reset the controller during init and cleanup
+	i8042.reset	[HW] Reset the controller during init, cleanup and
+			     suspend-to-ram transitions, only during s2r
+			     transitions, or never reset
+			Format: { 1 | Y | y | 0 | N | n }
+			1, Y, y: always reset controller
+			0, N, n: don't ever reset controller
+			Default: only on s2r transitions on x86; most other
+			architectures force reset to be always executed
 	i8042.unlock	[HW] Unlock (ignore) the keylock
 	i8042.kbdreset  [HW] Reset device connected to KBD port
 
@@ -1666,6 +1704,11 @@
 
 	initrd=		[BOOT] Specify the location of the initial ramdisk
 
+	init_pkru=	[x86] Specify the default memory protection keys rights
+			register contents for all processes.  0x55555554 by
+			default (disallow access to all but pkey 0).  Can
+			override in debugfs after boot.
+
 	inport.irq=	[HW] Inport (ATI XL and Microsoft) busmouse driver
 			Format: <irq>
 
@@ -1784,13 +1827,7 @@
 			See Documentation/filesystems/nfs/nfsroot.txt.
 
 	irqaffinity=	[SMP] Set the default irq affinity mask
-			Format:
-			<cpu number>,...,<cpu number>
-			or
-			<cpu number>-<cpu number>
-			(must be a positive range in ascending order)
-			or a mixture
-			<cpu number>,...,<cpu number>-<cpu number>
+			The argument is a cpu list, as described above.
 
 	irqfixup	[HW]
 			When an interrupt is not handled search all handlers
@@ -1807,13 +1844,7 @@
 			Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
 	isolcpus=	[KNL,SMP] Isolate CPUs from the general scheduler.
-			Format:
-			<cpu number>,...,<cpu number>
-			or
-			<cpu number>-<cpu number>
-			(must be a positive range in ascending order)
-			or a mixture
-			<cpu number>,...,<cpu number>-<cpu number>
+			The argument is a cpu list, as described above.
 
 			This option can be used to specify one or more CPUs
 			to isolate from the general SMP balancing and scheduling
@@ -2446,6 +2477,11 @@
 	nfsrootdebug	[NFS] enable nfsroot debugging messages.
 			See Documentation/filesystems/nfs/nfsroot.txt.
 
+	nfs.callback_nr_threads=
+			[NFSv4] set the total number of threads that the
+			NFS client will assign to service NFSv4 callback
+			requests.
+
 	nfs.callback_tcpport=
 			[NFS] set the TCP port on which the NFSv4 callback
 			channel should listen.
@@ -2469,6 +2505,13 @@
 			of returning the full 64-bit number.
 			The default is to return 64-bit inode numbers.
 
+	nfs.max_session_cb_slots=
+			[NFSv4.1] Sets the maximum number of session
+			slots the client will assign to the callback
+			channel. This determines the maximum number of
+			callbacks the client will process in parallel for
+			a particular server.
+
 	nfs.max_session_slots=
 			[NFSv4.1] Sets the maximum number of session slots
 			the client will attempt to negotiate with the server.
@@ -2675,6 +2718,7 @@
 			Default: on
 
 	nohz_full=	[KNL,BOOT]
+			The argument is a cpu list, as described above.
 			In kernels built with CONFIG_NO_HZ_FULL=y, set
 			the specified list of CPUs whose tick will be stopped
 			whenever possible. The boot CPU will be forced outside
@@ -3280,6 +3324,8 @@
 			See Documentation/blockdev/ramdisk.txt.
 
 	rcu_nocbs=	[KNL]
+			The argument is a cpu list, as described above.
+
 			In kernels built with CONFIG_RCU_NOCB_CPU=y, set
 			the specified list of CPUs to be no-callback CPUs.
 			Invocation of these CPUs' RCU callbacks will
diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt
index 979eaca..54bee77 100644
--- a/Documentation/kselftest.txt
+++ b/Documentation/kselftest.txt
@@ -1,8 +1,9 @@
 Linux Kernel Selftests
 
 The kernel contains a set of "self tests" under the tools/testing/selftests/
-directory. These are intended to be small unit tests to exercise individual
-code paths in the kernel.
+directory. These are intended to be small tests to exercise individual code
+paths in the kernel. Tests are intended to be run after building, installing
+and booting a kernel.
 
 On some systems, hot-plug tests could hang forever waiting for cpu and
 memory to be ready to be offlined. A special hot-plug target is created
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index 7c0ac2a..86169dc 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,9 @@
 00-INDEX
 	- This file
-Makefile
-	- Makefile for building dslm example program.
 asus-laptop.txt
 	- information on the Asus Laptop Extras driver.
 disk-shock-protection.txt
 	- information on hard disk shock protection.
-dslm.c
-	- Simple Disk Sleep Monitor program
 laptop-mode.txt
 	- how to conserve battery power using laptop-mode.
 sony-laptop.txt
diff --git a/Documentation/laptops/Makefile b/Documentation/laptops/Makefile
deleted file mode 100644
index 0abe44f..0000000
--- a/Documentation/laptops/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := dslm
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 4ebbfc3..19276f5 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -779,4 +779,4 @@
 ---------------
 
 Bartek Kania submitted this, it can be used to measure how much time your disk
-spends spun up/down.  See Documentation/laptops/dslm.c
+spends spun up/down.  See tools/laptop/dslm/dslm.c
diff --git a/Documentation/media/audio.h.rst.exceptions b/Documentation/media/audio.h.rst.exceptions
index 8330edc..f40f3cb 100644
--- a/Documentation/media/audio.h.rst.exceptions
+++ b/Documentation/media/audio.h.rst.exceptions
@@ -2,7 +2,7 @@
 ignore define _DVBAUDIO_H_
 
 # Typedef pointing to structs
-replace typedef audio_karaoke_t audio-karaoke
+replace typedef audio_karaoke_t :c:type:`audio_karaoke`
 
 # Undocumented audio caps, as this is a deprecated API anyway
 ignore define AUDIO_CAP_DTS
@@ -16,5 +16,5 @@
 ignore define AUDIO_CAP_AC3
 
 # some typedefs should point to struct/enums
-replace typedef audio_mixer_t audio-mixer
-replace typedef audio_status_t audio-status
+replace typedef audio_mixer_t :c:type:`audio_mixer`
+replace typedef audio_status_t :c:type:`audio_status`
diff --git a/Documentation/media/ca.h.rst.exceptions b/Documentation/media/ca.h.rst.exceptions
index 09c13be..d7c9fed 100644
--- a/Documentation/media/ca.h.rst.exceptions
+++ b/Documentation/media/ca.h.rst.exceptions
@@ -2,23 +2,23 @@
 ignore define _DVBCA_H_
 
 # struct ca_slot_info defines
-replace define CA_CI ca-slot-info
-replace define CA_CI_LINK ca-slot-info
-replace define CA_CI_PHYS ca-slot-info
-replace define CA_DESCR ca-slot-info
-replace define CA_SC ca-slot-info
-replace define CA_CI_MODULE_PRESENT ca-slot-info
-replace define CA_CI_MODULE_READY ca-slot-info
+replace define CA_CI :c:type:`ca_slot_info`
+replace define CA_CI_LINK :c:type:`ca_slot_info`
+replace define CA_CI_PHYS :c:type:`ca_slot_info`
+replace define CA_DESCR :c:type:`ca_slot_info`
+replace define CA_SC :c:type:`ca_slot_info`
+replace define CA_CI_MODULE_PRESENT :c:type:`ca_slot_info`
+replace define CA_CI_MODULE_READY :c:type:`ca_slot_info`
 
 # struct ca_descr_info defines
-replace define CA_ECD ca-descr-info
-replace define CA_NDS ca-descr-info
-replace define CA_DSS ca-descr-info
+replace define CA_ECD :c:type:`ca_descr_info`
+replace define CA_NDS :c:type:`ca_descr_info`
+replace define CA_DSS :c:type:`ca_descr_info`
 
 # some typedefs should point to struct/enums
-replace typedef ca_pid_t ca-pid
-replace typedef ca_slot_info_t ca-slot-info
-replace typedef ca_descr_info_t ca-descr-info
-replace typedef ca_caps_t ca-caps
-replace typedef ca_msg_t ca-msg
-replace typedef ca_descr_t ca-descr
+replace typedef ca_pid_t :c:type:`ca_pid`
+replace typedef ca_slot_info_t :c:type:`ca_slot_info`
+replace typedef ca_descr_info_t :c:type:`ca_descr_info`
+replace typedef ca_caps_t :c:type:`ca_caps`
+replace typedef ca_msg_t :c:type:`ca_msg`
+replace typedef ca_descr_t :c:type:`ca_descr`
diff --git a/Documentation/media/cec.h.rst.exceptions b/Documentation/media/cec.h.rst.exceptions
index b793394..b168753 100644
--- a/Documentation/media/cec.h.rst.exceptions
+++ b/Documentation/media/cec.h.rst.exceptions
@@ -1,12 +1,6 @@
 # Ignore header name
 ignore define _CEC_UAPI_H
 
-# Rename some symbols, to avoid namespace conflicts
-replace struct cec_event_state_change cec-event-state-change_s
-replace struct cec_event_lost_msgs cec-event-lost-msgs_s
-replace enum cec_mode_initiator cec-mode-initiator_e
-replace enum cec_mode_follower cec-mode-follower_e
-
 # define macros to ignore
 
 ignore define CEC_MAX_MSG_SIZE
diff --git a/Documentation/media/conf_nitpick.py b/Documentation/media/conf_nitpick.py
index 11beac2..480d548 100644
--- a/Documentation/media/conf_nitpick.py
+++ b/Documentation/media/conf_nitpick.py
@@ -21,10 +21,14 @@
     ("c:func", "clock_gettime"),
     ("c:func", "close"),
     ("c:func", "container_of"),
+    ("c:func", "copy_from_user"),
+    ("c:func", "copy_to_user"),
     ("c:func", "determine_valid_ioctls"),
     ("c:func", "ERR_PTR"),
+    ("c:func", "i2c_new_device"),
     ("c:func", "ioctl"),
     ("c:func", "IS_ERR"),
+    ("c:func", "KERNEL_VERSION"),
     ("c:func", "mmap"),
     ("c:func", "open"),
     ("c:func", "pci_name"),
@@ -36,58 +40,70 @@
     ("c:func", "struct fd_set"),
     ("c:func", "struct pollfd"),
     ("c:func", "usb_make_path"),
+    ("c:func", "wait_finish"),
+    ("c:func", "wait_prepare"),
     ("c:func", "write"),
+
     ("c:type", "atomic_t"),
     ("c:type", "bool"),
+    ("c:type", "boolean"),
     ("c:type", "buf_queue"),
     ("c:type", "device"),
     ("c:type", "device_driver"),
     ("c:type", "device_node"),
     ("c:type", "enum"),
+    ("c:type", "fd"),
+    ("c:type", "fd_set"),
     ("c:type", "file"),
     ("c:type", "i2c_adapter"),
     ("c:type", "i2c_board_info"),
     ("c:type", "i2c_client"),
+    ("c:type", "int16_t"),
     ("c:type", "ktime_t"),
     ("c:type", "led_classdev_flash"),
     ("c:type", "list_head"),
     ("c:type", "lock_class_key"),
     ("c:type", "module"),
     ("c:type", "mutex"),
+    ("c:type", "NULL"),
+    ("c:type", "off_t"),
     ("c:type", "pci_dev"),
     ("c:type", "pdvbdev"),
+    ("c:type", "poll_table"),
+    ("c:type", "platform_device"),
+    ("c:type", "pollfd"),
     ("c:type", "poll_table_struct"),
     ("c:type", "s32"),
     ("c:type", "s64"),
     ("c:type", "sd"),
+    ("c:type", "size_t"),
     ("c:type", "spi_board_info"),
     ("c:type", "spi_device"),
     ("c:type", "spi_master"),
-    ("c:type", "struct fb_fix_screeninfo"),
-    ("c:type", "struct pollfd"),
-    ("c:type", "struct timeval"),
-    ("c:type", "struct video_capability"),
+    ("c:type", "ssize_t"),
+    ("c:type", "fb_fix_screeninfo"),
+    ("c:type", "pollfd"),
+    ("c:type", "timeval"),
+    ("c:type", "video_capability"),
+    ("c:type", "timeval"),
+    ("c:type", "__u16"),
     ("c:type", "u16"),
+    ("c:type", "__u32"),
     ("c:type", "u32"),
+    ("c:type", "__u64"),
     ("c:type", "u64"),
     ("c:type", "u8"),
+    ("c:type", "uint16_t"),
+    ("c:type", "uint32_t"),
     ("c:type", "union"),
+    ("c:type", "__user"),
     ("c:type", "usb_device"),
+    ("c:type", "usb_interface"),
+    ("c:type", "v4l2_std_id"),
+    ("c:type", "video_system_t"),
+    ("c:type", "vm_area_struct"),
 
-    ("cpp:type", "boolean"),
-    ("cpp:type", "fd"),
-    ("cpp:type", "fd_set"),
-    ("cpp:type", "int16_t"),
-    ("cpp:type", "NULL"),
-    ("cpp:type", "off_t"),
-    ("cpp:type", "pollfd"),
-    ("cpp:type", "size_t"),
-    ("cpp:type", "ssize_t"),
-    ("cpp:type", "timeval"),
-    ("cpp:type", "__u16"),
-    ("cpp:type", "__u32"),
-    ("cpp:type", "__u64"),
-    ("cpp:type", "uint16_t"),
-    ("cpp:type", "uint32_t"),
-    ("cpp:type", "video_system_t"),
+    # Opaque structures
+
+    ("c:type", "v4l2_m2m_dev"),
 ]
diff --git a/Documentation/media/dmx.h.rst.exceptions b/Documentation/media/dmx.h.rst.exceptions
index 8200653..2fdb458 100644
--- a/Documentation/media/dmx.h.rst.exceptions
+++ b/Documentation/media/dmx.h.rst.exceptions
@@ -4,29 +4,29 @@
 # Ignore limit constants
 ignore define DMX_FILTER_SIZE
 
-# dmx-pes-type-t enum symbols
-replace enum dmx_ts_pes dmx-pes-type-t
-replace symbol DMX_PES_AUDIO0 dmx-pes-type-t
-replace symbol DMX_PES_VIDEO0 dmx-pes-type-t
-replace symbol DMX_PES_TELETEXT0 dmx-pes-type-t
-replace symbol DMX_PES_SUBTITLE0 dmx-pes-type-t
-replace symbol DMX_PES_PCR0 dmx-pes-type-t
-replace symbol DMX_PES_AUDIO1 dmx-pes-type-t
-replace symbol DMX_PES_VIDEO1 dmx-pes-type-t
-replace symbol DMX_PES_TELETEXT1 dmx-pes-type-t
-replace symbol DMX_PES_SUBTITLE1 dmx-pes-type-t
-replace symbol DMX_PES_PCR1 dmx-pes-type-t
-replace symbol DMX_PES_AUDIO2 dmx-pes-type-t
-replace symbol DMX_PES_VIDEO2 dmx-pes-type-t
-replace symbol DMX_PES_TELETEXT2 dmx-pes-type-t
-replace symbol DMX_PES_SUBTITLE2 dmx-pes-type-t
-replace symbol DMX_PES_PCR2 dmx-pes-type-t
-replace symbol DMX_PES_AUDIO3 dmx-pes-type-t
-replace symbol DMX_PES_VIDEO3 dmx-pes-type-t
-replace symbol DMX_PES_TELETEXT3 dmx-pes-type-t
-replace symbol DMX_PES_SUBTITLE3 dmx-pes-type-t
-replace symbol DMX_PES_PCR3 dmx-pes-type-t
-replace symbol DMX_PES_OTHER dmx-pes-type-t
+# dmx_pes_type_t enum symbols
+replace enum dmx_ts_pes :c:type:`dmx_pes_type`
+replace symbol DMX_PES_AUDIO0 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_VIDEO0 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_TELETEXT0 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_SUBTITLE0 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_PCR0 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_AUDIO1 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_VIDEO1 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_TELETEXT1 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_SUBTITLE1 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_PCR1 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_AUDIO2 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_VIDEO2 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_TELETEXT2 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_SUBTITLE2 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_PCR2 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_AUDIO3 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_VIDEO3 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_TELETEXT3 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_SUBTITLE3 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_PCR3 :c:type:`dmx_pes_type`
+replace symbol DMX_PES_OTHER :c:type:`dmx_pes_type`
 
 # Ignore obsolete symbols
 ignore define DMX_PES_AUDIO
@@ -36,28 +36,31 @@
 ignore define DMX_PES_PCR
 
 # dmx_input_t symbols
-replace enum dmx_input dmx-input-t
-replace symbol DMX_IN_FRONTEND dmx-input-t
-replace symbol DMX_IN_DVR dmx-input-t
+replace enum dmx_input :c:type:`dmx_input`
+replace symbol DMX_IN_FRONTEND :c:type:`dmx_input`
+replace symbol DMX_IN_DVR :c:type:`dmx_input`
 
 # dmx_source_t symbols
-replace enum dmx_source dmx-source-t
-replace symbol DMX_SOURCE_FRONT0 dmx-source-t
-replace symbol DMX_SOURCE_FRONT1 dmx-source-t
-replace symbol DMX_SOURCE_FRONT2 dmx-source-t
-replace symbol DMX_SOURCE_FRONT3 dmx-source-t
-replace symbol DMX_SOURCE_DVR0 dmx-source-t
-replace symbol DMX_SOURCE_DVR1 dmx-source-t
-replace symbol DMX_SOURCE_DVR2 dmx-source-t
-replace symbol DMX_SOURCE_DVR3 dmx-source-t
+replace enum dmx_source :c:type:`dmx_source`
+replace symbol DMX_SOURCE_FRONT0 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_FRONT1 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_FRONT2 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_FRONT3 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_DVR0 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_DVR1 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_DVR2 :c:type:`dmx_source`
+replace symbol DMX_SOURCE_DVR3 :c:type:`dmx_source`
 
 
 # Flags for struct dmx_sct_filter_params
-replace define DMX_CHECK_CRC dmx-sct-filter-params
-replace define DMX_ONESHOT dmx-sct-filter-params
-replace define DMX_IMMEDIATE_START dmx-sct-filter-params
-replace define DMX_KERNEL_CLIENT dmx-sct-filter-params
+replace define DMX_CHECK_CRC :c:type:`dmx_sct_filter_params`
+replace define DMX_ONESHOT :c:type:`dmx_sct_filter_params`
+replace define DMX_IMMEDIATE_START :c:type:`dmx_sct_filter_params`
+replace define DMX_KERNEL_CLIENT :c:type:`dmx_sct_filter_params`
 
 # some typedefs should point to struct/enums
-replace typedef dmx_caps_t dmx-caps
-replace typedef dmx_filter_t dmx-filter
+replace typedef dmx_caps_t :c:type:`dmx_caps`
+replace typedef dmx_filter_t :c:type:`dmx_filter`
+replace typedef dmx_pes_type_t :c:type:`dmx_pes_type`
+replace typedef dmx_input_t :c:type:`dmx_input`
+replace typedef dmx_source_t :c:type:`dmx_source`
diff --git a/Documentation/media/frontend.h.rst.exceptions b/Documentation/media/frontend.h.rst.exceptions
index 60f2cbb..7656770 100644
--- a/Documentation/media/frontend.h.rst.exceptions
+++ b/Documentation/media/frontend.h.rst.exceptions
@@ -26,22 +26,22 @@
 ignore define DTV_IOCTL_MAX_MSGS
 
 # Stats enum is documented altogether
-replace enum fecap_scale_params frontend-stat-properties
+replace enum fecap_scale_params :ref:`frontend-stat-properties`
 replace symbol FE_SCALE_COUNTER frontend-stat-properties
 replace symbol FE_SCALE_DECIBEL frontend-stat-properties
 replace symbol FE_SCALE_NOT_AVAILABLE frontend-stat-properties
 replace symbol FE_SCALE_RELATIVE frontend-stat-properties
 
 # the same reference is used for both get and set ioctls
-replace ioctl FE_SET_PROPERTY FE_GET_PROPERTY
+replace ioctl FE_SET_PROPERTY :c:type:`FE_GET_PROPERTY`
 
 # Ignore struct used only internally at Kernel
 ignore struct dtv_cmds_h
 
 # Typedefs that use the enum reference
-replace typedef fe_sec_voltage_t fe-sec-voltage
+replace typedef fe_sec_voltage_t :c:type:`fe_sec_voltage`
 
 # Replaces for flag constants
-replace define FE_TUNE_MODE_ONESHOT fe_set_frontend_tune_mode
+replace define FE_TUNE_MODE_ONESHOT :c:func:`FE_SET_FRONTEND_TUNE_MODE`
 replace define LNA_AUTO dtv-lna
 replace define NO_STREAM_ID_FILTER dtv-stream-id
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
index 7f8f0af..e347a3e 100644
--- a/Documentation/media/index.rst
+++ b/Documentation/media/index.rst
@@ -1,6 +1,11 @@
 Linux Media Subsystem Documentation
 ===================================
 
+.. Sphinx 1.4.x has a definition for DUrole that doesn't work on alltt blocks
+.. raw:: latex
+
+	\renewcommand*{\DUrole}[2]{ #2 }
+
 Contents:
 
 .. toctree::
diff --git a/Documentation/media/intro.rst b/Documentation/media/intro.rst
index be90bda..f6086c1 100644
--- a/Documentation/media/intro.rst
+++ b/Documentation/media/intro.rst
@@ -30,7 +30,7 @@
    called as DVB API, in fact it covers several different video standards
    including DVB-T/T2, DVB-S/S2, DVB-C, ATSC, ISDB-T, ISDB-S, DTMB, etc. The
    complete list of supported standards can be found at
-   :ref:`fe-delivery-system-t`.
+   :c:type:`fe_delivery_system`.
 
 3. The :ref:`third part <remote_controllers>` covers the Remote Controller API.
 
diff --git a/Documentation/cec.txt b/Documentation/media/kapi/cec-core.rst
similarity index 71%
rename from Documentation/cec.txt
rename to Documentation/media/kapi/cec-core.rst
index 75155fe..88c33b5 100644
--- a/Documentation/cec.txt
+++ b/Documentation/media/kapi/cec-core.rst
@@ -36,39 +36,50 @@
 The struct cec_adapter represents the CEC adapter hardware. It is created by
 calling cec_allocate_adapter() and deleted by calling cec_delete_adapter():
 
-struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+.. c:function::
+   struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
 	       void *priv, const char *name, u32 caps, u8 available_las,
 	       struct device *parent);
-void cec_delete_adapter(struct cec_adapter *adap);
+
+.. c:function::
+   void cec_delete_adapter(struct cec_adapter *adap);
 
 To create an adapter you need to pass the following information:
 
-ops: adapter operations which are called by the CEC framework and that you
-have to implement.
+ops:
+	adapter operations which are called by the CEC framework and that you
+	have to implement.
 
-priv: will be stored in adap->priv and can be used by the adapter ops.
+priv:
+	will be stored in adap->priv and can be used by the adapter ops.
 
-name: the name of the CEC adapter. Note: this name will be copied.
+name:
+	the name of the CEC adapter. Note: this name will be copied.
 
-caps: capabilities of the CEC adapter. These capabilities determine the
+caps:
+	capabilities of the CEC adapter. These capabilities determine the
 	capabilities of the hardware and which parts are to be handled
 	by userspace and which parts are handled by kernelspace. The
 	capabilities are returned by CEC_ADAP_G_CAPS.
 
-available_las: the number of simultaneous logical addresses that this
+available_las:
+	the number of simultaneous logical addresses that this
 	adapter can handle. Must be 1 <= available_las <= CEC_MAX_LOG_ADDRS.
 
-parent: the parent device.
+parent:
+	the parent device.
 
 
 To register the /dev/cecX device node and the remote control device (if
 CEC_CAP_RC is set) you call:
 
-int cec_register_adapter(struct cec_adapter *adap);
+.. c:function::
+	int cec_register_adapter(struct cec_adapter \*adap);
 
 To unregister the devices call:
 
-void cec_unregister_adapter(struct cec_adapter *adap);
+.. c:function::
+	void cec_unregister_adapter(struct cec_adapter \*adap);
 
 Note: if cec_register_adapter() fails, then call cec_delete_adapter() to
 clean up. But if cec_register_adapter() succeeded, then only call
@@ -83,18 +94,23 @@
 The following low-level adapter operations have to be implemented in
 your driver:
 
-struct cec_adap_ops {
-	/* Low-level callbacks */
-	int (*adap_enable)(struct cec_adapter *adap, bool enable);
-	int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
-	int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
-	int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
-			     u32 signal_free_time, struct cec_msg *msg);
-	void (*adap_log_status)(struct cec_adapter *adap);
+.. c:type:: struct cec_adap_ops
 
-	/* High-level callbacks */
-	...
-};
+.. code-block:: none
+
+	struct cec_adap_ops
+	{
+		/* Low-level callbacks */
+		int (*adap_enable)(struct cec_adapter *adap, bool enable);
+		int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+		int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+		int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
+				      u32 signal_free_time, struct cec_msg *msg);
+		void (\*adap_log_status)(struct cec_adapter *adap);
+
+		/* High-level callbacks */
+		...
+	};
 
 The three low-level ops deal with various aspects of controlling the CEC adapter
 hardware:
@@ -102,6 +118,7 @@
 
 To enable/disable the hardware:
 
+.. c:function::
 	int (*adap_enable)(struct cec_adapter *adap, bool enable);
 
 This callback enables or disables the CEC hardware. Enabling the CEC hardware
@@ -115,6 +132,7 @@
 
 To enable/disable the 'monitor all' mode:
 
+.. c:function::
 	int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
 
 If enabled, then the adapter should be put in a mode to also monitor messages
@@ -127,6 +145,7 @@
 
 To program a new logical address:
 
+.. c:function::
 	int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
 
 If logical_addr == CEC_LOG_ADDR_INVALID then all programmed logical addresses
@@ -140,6 +159,7 @@
 
 To transmit a new message:
 
+.. c:function::
 	int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
 			     u32 signal_free_time, struct cec_msg *msg);
 
@@ -158,6 +178,7 @@
 
 To log the current CEC hardware status:
 
+.. c:function::
 	void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
 
 This optional callback can be used to show the status of the CEC hardware.
@@ -169,29 +190,41 @@
 
 When a transmit finished (successfully or otherwise):
 
-void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
+.. c:function::
+	void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
 		       u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
 
 The status can be one of:
 
-CEC_TX_STATUS_OK: the transmit was successful.
-CEC_TX_STATUS_ARB_LOST: arbitration was lost: another CEC initiator
-took control of the CEC line and you lost the arbitration.
-CEC_TX_STATUS_NACK: the message was nacked (for a directed message) or
-acked (for a broadcast message). A retransmission is needed.
-CEC_TX_STATUS_LOW_DRIVE: low drive was detected on the CEC bus. This
-indicates that a follower detected an error on the bus and requested a
-retransmission.
-CEC_TX_STATUS_ERROR: some unspecified error occurred: this can be one of
-the previous two if the hardware cannot differentiate or something else
-entirely.
-CEC_TX_STATUS_MAX_RETRIES: could not transmit the message after
-trying multiple times. Should only be set by the driver if it has hardware
-support for retrying messages. If set, then the framework assumes that it
-doesn't have to make another attempt to transmit the message since the
-hardware did that already.
+CEC_TX_STATUS_OK:
+	the transmit was successful.
 
-The *_cnt arguments are the number of error conditions that were seen.
+CEC_TX_STATUS_ARB_LOST:
+	arbitration was lost: another CEC initiator
+	took control of the CEC line and you lost the arbitration.
+
+CEC_TX_STATUS_NACK:
+	the message was nacked (for a directed message) or
+	acked (for a broadcast message). A retransmission is needed.
+
+CEC_TX_STATUS_LOW_DRIVE:
+	low drive was detected on the CEC bus. This indicates that
+	a follower detected an error on the bus and requested a
+	retransmission.
+
+CEC_TX_STATUS_ERROR:
+	some unspecified error occurred: this can be one of
+	the previous two if the hardware cannot differentiate or something
+	else entirely.
+
+CEC_TX_STATUS_MAX_RETRIES:
+	could not transmit the message after trying multiple times.
+	Should only be set by the driver if it has hardware support for
+	retrying messages. If set, then the framework assumes that it
+	doesn't have to make another attempt to transmit the message
+	since the hardware did that already.
+
+The \*_cnt arguments are the number of error conditions that were seen.
 This may be 0 if no information is available. Drivers that do not support
 hardware retry can just set the counter corresponding to the transmit error
 to 1, if the hardware does support retry then either set these counters to
@@ -200,7 +233,8 @@
 
 When a CEC message was received:
 
-void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
+.. c:function::
+	void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
 
 Speaks for itself.
 
@@ -210,17 +244,20 @@
 The low-level operations drive the hardware, the high-level operations are
 CEC protocol driven. The following high-level callbacks are available:
 
-struct cec_adap_ops {
-	/* Low-level callbacks */
-	...
+.. code-block:: none
 
-	/* High-level CEC message callback */
-	int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
-};
+	struct cec_adap_ops {
+		/\* Low-level callbacks \*/
+		...
+
+		/\* High-level CEC message callback \*/
+		int (\*received)(struct cec_adapter \*adap, struct cec_msg \*msg);
+	};
 
 The received() callback allows the driver to optionally handle a newly
 received CEC message
 
+.. c:function::
 	int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
 
 If the driver wants to process a CEC message, then it can implement this
@@ -234,13 +271,16 @@
 
 CEC Adapter drivers can call the following CEC framework functions:
 
-int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
-		     bool block);
+.. c:function::
+	int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+			     bool block);
 
 Transmit a CEC message. If block is true, then wait until the message has been
 transmitted, otherwise just queue it and return.
 
-void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
+.. c:function::
+	void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
+			     bool block);
 
 Change the physical address. This function will set adap->phys_addr and
 send an event if it has changed. If cec_s_log_addrs() has been called and
@@ -254,8 +294,9 @@
 to another valid physical address, then this function will first set the
 address to CEC_PHYS_ADDR_INVALID before enabling the new physical address.
 
-int cec_s_log_addrs(struct cec_adapter *adap,
-		    struct cec_log_addrs *log_addrs, bool block);
+.. c:function::
+	int cec_s_log_addrs(struct cec_adapter *adap,
+			    struct cec_log_addrs *log_addrs, bool block);
 
 Claim the CEC logical addresses. Should never be called if CEC_CAP_LOG_ADDRS
 is set. If block is true, then wait until the logical addresses have been
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index dd96e84..a3c4642 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -6,8 +6,6 @@
 
 .. kernel-doc:: drivers/media/dvb-core/dvb_math.h
 
-.. kernel-doc:: drivers/media/dvb-core/dvb_ringbuffer.h
-
 .. kernel-doc:: drivers/media/dvb-core/dvbdev.h
 
 
@@ -18,6 +16,42 @@
 .. kernel-doc:: drivers/media/dvb-core/dvbdev.h
    :export: drivers/media/dvb-core/dvbdev.c
 
+Digital TV Ring buffer
+----------------------
+
+Those routines implement ring buffers used to handle digital TV data and
+copy it from/to userspace.
+
+.. note::
+
+  1) For performance reasons read and write routines don't check buffer sizes
+     and/or number of bytes free/available. This has to be done before these
+     routines are called. For example:
+
+   .. code-block:: c
+
+        /* write @buflen: bytes */
+        free = dvb_ringbuffer_free(rbuf);
+        if (free >= buflen)
+                count = dvb_ringbuffer_write(rbuf, buffer, buflen);
+        else
+                /* do something */
+
+        /* read min. 1000, max. @bufsize: bytes */
+        avail = dvb_ringbuffer_avail(rbuf);
+        if (avail >= 1000)
+                count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
+        else
+                /* do something */
+
+  2) If there is exactly one reader and one writer, there is no need
+     to lock read or write operations.
+     Two or more readers must be locked against each other.
+     Flushing the buffer counts as a read operation.
+     Resetting the buffer counts as a read and write operation.
+     Two or more writers must be locked against each other.
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_ringbuffer.h
 
 
 Digital TV Frontend kABI
@@ -121,7 +155,7 @@
 bottom half mechanism or start a tasklet instead of making the callback
 function call directly from a hardware interrupt.
 
-This mechanism is implemented by :c:func:`dmx_ts_cb()` and :cpp:func:`dmx_section_cb()`
+This mechanism is implemented by :c:func:`dmx_ts_cb()` and :c:func:`dmx_section_cb()`
 callbacks.
 
 .. kernel-doc:: drivers/media/dvb-core/demux.h
diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst
index 569cfc4..1a738e5 100644
--- a/Documentation/media/kapi/mc-core.rst
+++ b/Documentation/media/kapi/mc-core.rst
@@ -34,7 +34,7 @@
 Media device
 ^^^^^^^^^^^^
 
-A media device is represented by a :c:type:`struct media_device <media_device>`
+A media device is represented by a struct :c:type:`media_device`
 instance, defined in ``include/media/media-device.h``.
 Allocation of the structure is handled by the media device driver, usually by
 embedding the :c:type:`media_device` instance in a larger driver-specific
@@ -47,7 +47,7 @@
 Entities
 ^^^^^^^^
 
-Entities are represented by a :c:type:`struct media_entity <media_entity>`
+Entities are represented by a struct :c:type:`media_entity`
 instance, defined in ``include/media/media-entity.h``. The structure is usually
 embedded into a higher-level structure, such as
 :c:type:`v4l2_subdev` or :c:type:`video_device`
@@ -65,10 +65,10 @@
 ^^^^^^^^^^
 
 Interfaces are represented by a
-:c:type:`struct media_interface <media_interface>` instance, defined in
+struct :c:type:`media_interface` instance, defined in
 ``include/media/media-entity.h``. Currently, only one type of interface is
 defined: a device node. Such interfaces are represented by a
-:c:type:`struct media_intf_devnode <media_intf_devnode>`.
+struct :c:type:`media_intf_devnode`.
 
 Drivers initialize and create device node interfaces by calling
 :c:func:`media_devnode_create()`
@@ -77,7 +77,7 @@
 
 Pads
 ^^^^
-Pads are represented by a :c:type:`struct media_pad <media_pad>` instance,
+Pads are represented by a struct :c:type:`media_pad` instance,
 defined in ``include/media/media-entity.h``. Each entity stores its pads in
 a pads array managed by the entity driver. Drivers usually embed the array in
 a driver-specific structure.
@@ -85,8 +85,9 @@
 Pads are identified by their entity and their 0-based index in the pads
 array.
 
-Both information are stored in the :c:type:`struct media_pad`, making the
-:c:type:`media_pad` pointer the canonical way to store and pass link references.
+Both information are stored in the struct :c:type:`media_pad`,
+making the struct :c:type:`media_pad` pointer the canonical way
+to store and pass link references.
 
 Pads have flags that describe the pad capabilities and state.
 
@@ -101,7 +102,7 @@
 Links
 ^^^^^
 
-Links are represented by a :c:type:`struct media_link <media_link>` instance,
+Links are represented by a struct :c:type:`media_link` instance,
 defined in ``include/media/media-entity.h``. There are two types of links:
 
 **1. pad to pad links**:
@@ -184,7 +185,7 @@
 
 Due to the wide differences between drivers regarding power management
 needs, the media controller does not implement power management. However,
-the :c:type:`struct media_entity <media_entity>` includes a ``use_count``
+the struct :c:type:`media_entity` includes a ``use_count``
 field that media drivers
 can use to track the number of users of every entity for power management
 needs.
@@ -210,11 +211,11 @@
 The function will mark all entities connected to the given entity through
 enabled links, either directly or indirectly, as streaming.
 
-The :c:type:`struct media_pipeline <media_pipeline>` instance pointed to by
+The struct :c:type:`media_pipeline` instance pointed to by
 the pipe argument will be stored in every entity in the pipeline.
-Drivers should embed the :c:type:`struct media_pipeline <media_pipeline>`
+Drivers should embed the struct :c:type:`media_pipeline`
 in higher-level pipeline structures and can then access the
-pipeline through the :c:type:`struct media_entity <media_entity>`
+pipeline through the struct :c:type:`media_entity`
 pipe field.
 
 Calls to :c:func:`media_entity_pipeline_start()` can be nested.
diff --git a/Documentation/media/kapi/v4l2-dev.rst b/Documentation/media/kapi/v4l2-dev.rst
index cdfcf0b..b29aa61 100644
--- a/Documentation/media/kapi/v4l2-dev.rst
+++ b/Documentation/media/kapi/v4l2-dev.rst
@@ -56,7 +56,7 @@
   :c:type:`video_device`->vfl_dir fields are used to disable ops that do not
   match the type/dir combination. E.g. VBI ops are disabled for non-VBI nodes,
   and output ops  are disabled for a capture device. This makes it possible to
-  provide just one :c:type:`v4l2_ioctl_ops struct` for both vbi and
+  provide just one :c:type:`v4l2_ioctl_ops` struct for both vbi and
   video nodes.
 
 - :c:type:`video_device`->lock: leave to ``NULL`` if you want to do all the
@@ -166,14 +166,14 @@
 In the case of :ref:`videobuf2 <vb2_framework>` you will need to implement the
 ``wait_prepare()`` and ``wait_finish()`` callbacks to unlock/lock if applicable.
 If you use the ``queue->lock`` pointer, then you can use the helper functions
-:c:func:`vb2_ops_wait_prepare` and :cpp:func:`vb2_ops_wait_finish`.
+:c:func:`vb2_ops_wait_prepare` and :c:func:`vb2_ops_wait_finish`.
 
 The implementation of a hotplug disconnect should also take the lock from
 :c:type:`video_device` before calling v4l2_device_disconnect. If you are also
 using :c:type:`video_device`->queue->lock, then you have to first lock
 :c:type:`video_device`->queue->lock followed by :c:type:`video_device`->lock.
 That way you can be sure no ioctl is running when you call
-:c:type:`v4l2_device_disconnect`.
+:c:func:`v4l2_device_disconnect`.
 
 Video device registration
 -------------------------
@@ -200,6 +200,7 @@
 - ``VFL_TYPE_VBI``: ``/dev/vbiX`` for vertical blank data (i.e. closed captions, teletext)
 - ``VFL_TYPE_RADIO``: ``/dev/radioX`` for radio tuners
 - ``VFL_TYPE_SDR``: ``/dev/swradioX`` for Software Defined Radio tuners
+- ``VFL_TYPE_TOUCH``: ``/dev/v4l-touchX`` for touch sensors
 
 The last argument gives you a certain amount of control over the device
 device node number used (i.e. the X in ``videoX``). Normally you will pass -1
@@ -262,6 +263,7 @@
 
 It is a bitmask and the following bits can be set:
 
+.. tabularcolumns:: |p{5ex}|L|
 
 ===== ================================================================
 Mask  Description
@@ -334,7 +336,7 @@
 
 returns the video_device belonging to the file struct.
 
-The :c:func:`video_devdata` function combines :cpp:func:`video_get_drvdata`
+The :c:func:`video_devdata` function combines :c:func:`video_get_drvdata`
 with :c:func:`video_devdata`:
 
 	:c:func:`video_drvdata <video_drvdata>`
diff --git a/Documentation/media/kapi/v4l2-event.rst b/Documentation/media/kapi/v4l2-event.rst
index f962686..9a5e315 100644
--- a/Documentation/media/kapi/v4l2-event.rst
+++ b/Documentation/media/kapi/v4l2-event.rst
@@ -40,7 +40,7 @@
 In order to queue events to video device, drivers should call:
 
 	:c:func:`v4l2_event_queue <v4l2_event_queue>`
-	(:c:type:`vdev <video_device>`, :ref:`ev <v4l2-event>`)
+	(:c:type:`vdev <video_device>`, :c:type:`ev <v4l2_event>`)
 
 The driver's only responsibility is to fill in the type and the data fields.
 The other fields will be filled in by V4L2.
@@ -51,7 +51,7 @@
 Subscribing to an event is via:
 
 	:c:func:`v4l2_event_subscribe <v4l2_event_subscribe>`
-	(:c:type:`fh <v4l2_fh>`, :ref:`sub <v4l2-event-subscription>` ,
+	(:c:type:`fh <v4l2_fh>`, :c:type:`sub <v4l2_event_subscription>` ,
 	elems, :c:type:`ops <v4l2_subscribed_event_ops>`)
 
 
@@ -86,7 +86,7 @@
 Unsubscribing to an event is via:
 
 	:c:func:`v4l2_event_unsubscribe <v4l2_event_unsubscribe>`
-	(:c:type:`fh <v4l2_fh>`, :ref:`sub <v4l2-event-subscription>`)
+	(:c:type:`fh <v4l2_fh>`, :c:type:`sub <v4l2_event_subscription>`)
 
 This function is used to implement :c:type:`video_device`->
 :c:type:`ioctl_ops <v4l2_ioctl_ops>`-> ``vidioc_unsubscribe_event``.
diff --git a/Documentation/media/kapi/v4l2-fh.rst b/Documentation/media/kapi/v4l2-fh.rst
index 9e87d5c..3ee64ad 100644
--- a/Documentation/media/kapi/v4l2-fh.rst
+++ b/Documentation/media/kapi/v4l2-fh.rst
@@ -21,8 +21,8 @@
 In many cases the struct :c:type:`v4l2_fh` will be embedded in a larger
 structure. In that case you should call:
 
-#) :c:func:`v4l2_fh_init` and :cpp:func:`v4l2_fh_add` in ``open()``
-#) :c:func:`v4l2_fh_del` and :cpp:func:`v4l2_fh_exit` in ``release()``
+#) :c:func:`v4l2_fh_init` and :c:func:`v4l2_fh_add` in ``open()``
+#) :c:func:`v4l2_fh_del` and :c:func:`v4l2_fh_exit` in ``release()``
 
 Drivers can extract their own file handle structure by using the container_of
 macro.
diff --git a/Documentation/media/kapi/v4l2-subdev.rst b/Documentation/media/kapi/v4l2-subdev.rst
index d767b61..e1f0b72 100644
--- a/Documentation/media/kapi/v4l2-subdev.rst
+++ b/Documentation/media/kapi/v4l2-subdev.rst
@@ -27,7 +27,7 @@
 Bridges might also need to store per-subdev private data, such as a pointer to
 bridge-specific per-subdev private data. The :c:type:`v4l2_subdev` structure
 provides host private data for that purpose that can be accessed with
-:c:func:`v4l2_get_subdev_hostdata` and :cpp:func:`v4l2_set_subdev_hostdata`.
+:c:func:`v4l2_get_subdev_hostdata` and :c:func:`v4l2_set_subdev_hostdata`.
 
 From the bridge driver perspective, you load the sub-device module and somehow
 obtain the :c:type:`v4l2_subdev` pointer. For i2c devices this is easy: you call
@@ -412,19 +412,7 @@
 To see which chip variants are supported you can look in the i2c driver code
 for the i2c_device_id table. This lists all the possibilities.
 
-There are two more helper functions:
-
-:c:func:`v4l2_i2c_new_subdev_cfg`: this function adds new irq and
-platform_data arguments and has both 'addr' and 'probed_addrs' arguments:
-if addr is not 0 then that will be used (non-probing variant), otherwise the
-probed_addrs are probed.
-
-For example: this will probe for address 0x10:
-
-.. code-block:: c
-
-	struct v4l2_subdev *sd = v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter,
-			  "module_foo", "chipid", 0, NULL, 0, I2C_ADDRS(0x10));
+There are one more helper function:
 
 :c:func:`v4l2_i2c_new_subdev_board` uses an :c:type:`i2c_board_info` struct
 which is passed to the i2c driver and replaces the irq, platform_data and addr
@@ -433,9 +421,10 @@
 If the subdev supports the s_config core ops, then that op is called with
 the irq and platform_data arguments after the subdev was setup.
 
-The older :c:func:`v4l2_i2c_new_subdev` and
-:c:func:`v4l2_i2c_new_probed_subdev` functions will call ``s_config`` as
-well, but with irq set to 0 and platform_data set to ``NULL``.
+The :c:func:`v4l2_i2c_new_subdev` function will call
+:c:func:`v4l2_i2c_new_subdev_board`, internally filling a
+:c:type:`i2c_board_info` structure using the ``client_type`` and the
+``addr`` to fill it.
 
 V4L2 sub-device functions and data structures
 ---------------------------------------------
diff --git a/Documentation/media/media_api_files/typical_media_device.pdf b/Documentation/media/media_api_files/typical_media_device.pdf
index eb30458..d000d80 100644
--- a/Documentation/media/media_api_files/typical_media_device.pdf
+++ b/Documentation/media/media_api_files/typical_media_device.pdf
Binary files differ
diff --git a/Documentation/media/media_kapi.rst b/Documentation/media/media_kapi.rst
index b71e8e8..f282ca2 100644
--- a/Documentation/media/media_kapi.rst
+++ b/Documentation/media/media_kapi.rst
@@ -32,3 +32,4 @@
     kapi/dtv-core
     kapi/rc-core
     kapi/mc-core
+    kapi/cec-core
diff --git a/Documentation/media/net.h.rst.exceptions b/Documentation/media/net.h.rst.exceptions
index 30a2674..afe6bef 100644
--- a/Documentation/media/net.h.rst.exceptions
+++ b/Documentation/media/net.h.rst.exceptions
@@ -7,5 +7,5 @@
 ignore struct __dvb_net_if_old
 
 # Macros used at struct dvb_net_if
-replace define DVB_NET_FEEDTYPE_MPE dvb-net-if
-replace define DVB_NET_FEEDTYPE_ULE dvb-net-if
+replace define DVB_NET_FEEDTYPE_MPE :c:type:`dvb_net_if`
+replace define DVB_NET_FEEDTYPE_ULE :c:type:`dvb_net_if`
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst
index bb94e43..8267c31 100644
--- a/Documentation/media/uapi/cec/cec-func-close.rst
+++ b/Documentation/media/uapi/cec/cec-func-close.rst
@@ -20,19 +20,22 @@
     #include <unistd.h>
 
 
-.. cpp:function:: int close( int fd )
+.. c:function:: int close( int fd )
+    :name: cec-close
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <func-open>`.
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 Closes the cec device. Resources associated with the file descriptor are
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index d0279e6d..9e8dbb1 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -19,17 +19,18 @@
     #include <sys/ioctl.h>
 
 
-.. cpp:function:: int ioctl( int fd, int request, void *argp )
+.. c:function:: int ioctl( int fd, int request, void *argp )
+   :name: cec-ioctl
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <func-open>`.
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 ``request``
     CEC ioctl request code as defined in the cec.h header file, for
-    example :ref:`CEC_ADAP_G_CAPS`.
+    example :c:func:`CEC_ADAP_G_CAPS`.
 
 ``argp``
     Pointer to a request-specific structure.
@@ -38,7 +39,9 @@
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 The :c:func:`ioctl()` function manipulates cec device parameters. The
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index 7c0f981..af3f5b5 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -19,7 +19,8 @@
     #include <fcntl.h>
 
 
-.. cpp:function:: int open( const char *device_name, int flags )
+.. c:function:: int open( const char *device_name, int flags )
+   :name: cec-open
 
 
 Arguments
@@ -45,7 +46,9 @@
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 To open a cec device applications call :c:func:`open()` with the
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index fcab65f..cfb73e6 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -20,16 +20,28 @@
     #include <sys/poll.h>
 
 
-.. cpp:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout )
+.. c:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout )
+   :name: cec-poll
 
 Arguments
 =========
 
+``ufds``
+   List of FD events to be watched
+
+``nfds``
+   Number of FD efents at the \*ufds array
+
+``timeout``
+   Timeout to wait for events
+
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 With the :c:func:`poll()` function applications can wait for CEC
@@ -37,7 +49,7 @@
 
 On success :c:func:`poll()` returns the number of file descriptors
 that have been selected (that is, file descriptors for which the
-``revents`` field of the respective :c:type:`struct pollfd` structure
+``revents`` field of the respective struct :c:type:`pollfd`
 is non-zero). CEC devices set the ``POLLIN`` and ``POLLRDNORM`` flags in
 the ``revents`` field if there are messages in the receive queue. If the
 transmit queue has room for new messages, the ``POLLOUT`` and
diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst
index afa76f2..4a19ea5 100644
--- a/Documentation/media/uapi/cec/cec-intro.rst
+++ b/Documentation/media/uapi/cec/cec-intro.rst
@@ -3,7 +3,9 @@
 Introduction
 ============
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 HDMI connectors provide a single pin for use by the Consumer Electronics
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index eaedc63..a35dca2 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct cec_caps *argp )
+.. c:function:: int ioctl( int fd, CEC_ADAP_G_CAPS, struct cec_caps *argp )
+    :name: CEC_ADAP_G_CAPS
 
 Arguments
 =========
@@ -22,25 +23,25 @@
 ``fd``
     File descriptor returned by :ref:`open() <cec-func-open>`.
 
-``request``
-    CEC_ADAP_G_CAPS
-
 ``argp``
 
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
-struct :ref:`cec_caps <cec-caps>`. The driver fills the structure and
+struct :c:type:`cec_caps`. The driver fills the structure and
 returns the information to the application. The ioctl never fails.
 
+.. tabularcolumns:: |p{1.2cm}|p{2.5cm}|p{13.8cm}|
 
-.. _cec-caps:
+.. c:type:: cec_caps
 
 .. flat-table:: struct cec_caps
     :header-rows:  0
@@ -84,6 +85,7 @@
 	  macro.
 
 
+.. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
 
 .. _cec-capabilities:
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 201d483..940a16d 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -17,33 +17,35 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct cec_log_addrs *argp )
+.. c:function:: int ioctl( int fd, CEC_ADAP_G_LOG_ADDRS, struct cec_log_addrs *argp )
+   :name: CEC_ADAP_G_LOG_ADDRS
 
+.. c:function:: int ioctl( int fd, CEC_ADAP_S_LOG_ADDRS, struct cec_log_addrs *argp )
+   :name: CEC_ADAP_S_LOG_ADDRS
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <cec-func-open>`.
-
-``request``
-    CEC_ADAP_G_LOG_ADDRS, CEC_ADAP_S_LOG_ADDRS
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 ``argp``
-
+    Pointer to struct :c:type:`cec_log_addrs`.
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
-:c:type:`struct cec_log_addrs` where the driver stores the logical addresses.
+struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
 
 To set new logical addresses, applications fill in
-:c:type:`struct cec_log_addrs` and call :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
+struct :c:type:`cec_log_addrs` and call :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
 with a pointer to this struct. The :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
 is only available if ``CEC_CAP_LOG_ADDRS`` is set (the ``ENOTTY`` error code is
 returned otherwise). The :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
@@ -64,8 +66,11 @@
 Attempting to call :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>` when
 logical address types are already defined will return with error ``EBUSY``.
 
+.. c:type:: cec_log_addrs
 
-.. _cec-log-addrs:
+.. tabularcolumns:: |p{1.0cm}|p{7.5cm}|p{8.0cm}|
+
+.. cssclass:: longtable
 
 .. flat-table:: struct cec_log_addrs
     :header-rows:  0
@@ -220,6 +225,8 @@
 	  fallback to the Unregistered logical address. Note that if the Unregistered
 	  logical address was explicitly requested, then this flag has no effect.
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _cec-versions:
 
 .. flat-table:: CEC Versions
@@ -253,6 +260,7 @@
        -  CEC version according to the HDMI 2.0 standard.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _cec-prim-dev-types:
 
@@ -319,6 +327,7 @@
        -  Use for a video processor device.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _cec-log-addr-types:
 
@@ -388,6 +397,8 @@
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _cec-all-dev-types-flags:
 
 .. flat-table:: CEC All Device Types Flags
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
index b955d04..3357deb 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
@@ -17,24 +17,27 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u16 *argp )
+.. c:function:: int ioctl( int fd, CEC_ADAP_G_PHYS_ADDR, __u16 *argp )
+    :name: CEC_ADAP_G_PHYS_ADDR
+
+.. c:function:: int ioctl( int fd, CEC_ADAP_S_PHYS_ADDR, __u16 *argp )
+    :name: CEC_ADAP_S_PHYS_ADDR
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <cec-func-open>`.
-
-``request``
-    CEC_ADAP_G_PHYS_ADDR, CEC_ADAP_S_PHYS_ADDR
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 ``argp``
-
+    Pointer to the CEC address.
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 To query the current physical address applications call
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index f8caa28..e283588 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -15,8 +15,8 @@
 Synopsis
 ========
 
-.. c:function:: int ioctl( int fd, int request, struct cec_event *argp )
-   :name: CEC_DQEVENT
+.. c:function:: int ioctl( int fd, CEC_DQEVENT, struct cec_event *argp )
+    :name: CEC_DQEVENT
 
 Arguments
 =========
@@ -24,16 +24,15 @@
 ``fd``
     File descriptor returned by :ref:`open() <cec-func-open>`.
 
-``request``
-    CEC_DQEVENT
-
 ``argp``
 
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 CEC devices can send asynchronous events. These can be retrieved by
@@ -50,8 +49,9 @@
 the same state). In that case the intermediate state changes were lost but
 it is guaranteed that the state did change in between the two events.
 
+.. tabularcolumns:: |p{1.2cm}|p{2.9cm}|p{13.4cm}|
 
-.. _cec-event-state-change_s:
+.. c:type:: cec_event_state_change
 
 .. flat-table:: struct cec_event_state_change
     :header-rows:  0
@@ -80,8 +80,9 @@
 	  has the unregistered logical address. In that case all other bits are 0.
 
 
+.. c:type:: cec_event_lost_msgs
 
-.. _cec-event-lost-msgs_s:
+.. tabularcolumns:: |p{1.0cm}|p{2.0cm}|p{14.5cm}|
 
 .. flat-table:: struct cec_event_lost_msgs
     :header-rows:  0
@@ -106,8 +107,9 @@
 	  this is more than enough.
 
 
+.. tabularcolumns:: |p{1.0cm}|p{4.2cm}|p{2.5cm}|p{8.8cm}|
 
-.. _cec-event:
+.. c:type:: cec_event
 
 .. flat-table:: struct cec_event
     :header-rows:  0
@@ -121,11 +123,10 @@
 
        -  ``ts``
 
-       -  Timestamp of the event in ns.
-	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime(2)`.
+       -  :cspan:`1` Timestamp of the event in ns.
 
-       -
+	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
+	  the same clock from userspace use :c:func:`clock_gettime`.
 
     -  .. row 2
 
@@ -133,9 +134,7 @@
 
        -  ``event``
 
-       -  The CEC event type, see :ref:`cec-events`.
-
-       -
+       -  :cspan:`1` The CEC event type, see :ref:`cec-events`.
 
     -  .. row 3
 
@@ -143,9 +142,7 @@
 
        -  ``flags``
 
-       -  Event flags, see :ref:`cec-event-flags`.
-
-       -
+       -  :cspan:`1` Event flags, see :ref:`cec-event-flags`.
 
     -  .. row 4
 
@@ -177,6 +174,7 @@
 	  event.
 
 
+.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
 
 .. _cec-events:
 
@@ -206,6 +204,7 @@
 	  application didn't dequeue CEC messages fast enough.
 
 
+.. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
 
 .. _cec-event-flags:
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index f0084d8..70a4190 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -13,24 +13,27 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *argp )
+.. c:function:: int ioctl( int fd, CEC_G_MODE, __u32 *argp )
+   :name: CEC_G_MODE
+
+.. c:function:: int ioctl( int fd, CEC_S_MODE, __u32 *argp )
+   :name: CEC_S_MODE
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <cec-func-open>`.
-
-``request``
-    CEC_G_MODE, CEC_S_MODE
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 ``argp``
-
+    Pointer to CEC mode.
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
@@ -71,6 +74,7 @@
 
 Available initiator modes are:
 
+.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
 
 .. _cec-mode-initiator_e:
 
@@ -114,6 +118,7 @@
 
 Available follower modes are:
 
+.. tabularcolumns:: |p{6.6cm}|p{0.9cm}|p{10.0cm}|
 
 .. _cec-mode-follower_e:
 
@@ -206,6 +211,7 @@
 
 Core message processing details:
 
+.. tabularcolumns:: |p{6.6cm}|p{10.9cm}|
 
 .. _cec-core-processing:
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index ae5a39a..d585b1b 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -16,28 +16,32 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct cec_msg *argp )
+.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg *argp )
+    :name: CEC_RECEIVE
+
+.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg *argp )
+    :name: CEC_TRANSMIT
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <cec-func-open>`.
-
-``request``
-    CEC_RECEIVE, CEC_TRANSMIT
+    File descriptor returned by :c:func:`open() <cec-open>`.
 
 ``argp``
-
+    Pointer to struct cec_msg.
 
 Description
 ===========
 
-.. note:: This documents the proposed CEC API. This API is not yet finalized
+.. note::
+
+   This documents the proposed CEC API. This API is not yet finalized
    and is currently only available as a staging kernel module.
 
 To receive a CEC message the application has to fill in the
-``timeout`` field of :c:type:`struct cec_msg` and pass it to :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
+``timeout`` field of struct :c:type:`cec_msg` and pass it to
+:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
 If the file descriptor is in non-blocking mode and there are no received
 messages pending, then it will return -1 and set errno to the ``EAGAIN``
 error code. If the file descriptor is in blocking mode and ``timeout``
@@ -51,9 +55,9 @@
 2. the result of an earlier non-blocking transmit (the ``sequence`` field will
    be non-zero).
 
-To send a CEC message the application has to fill in the
-:c:type:`struct cec_msg` and pass it to
-:ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`. The :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` is only available if
+To send a CEC message the application has to fill in the struct
+:c:type:` cec_msg` and pass it to :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`.
+The :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` is only available if
 ``CEC_CAP_TRANSMIT`` is set. If there is no more room in the transmit
 queue, then it will return -1 and set errno to the ``EBUSY`` error code.
 The transmit queue has enough room for 18 messages (about 1 second worth
@@ -71,7 +75,11 @@
 result.
 
 
-.. _cec-msg:
+.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{13.0cm}|
+
+.. c:type:: cec_msg
+
+.. cssclass:: longtable
 
 .. flat-table:: struct cec_msg
     :header-rows:  0
@@ -87,7 +95,7 @@
 
        -  Timestamp in ns of when the last byte of the message was transmitted.
 	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime(2)`.
+	  the same clock from userspace use :c:func:`clock_gettime`.
 
     -  .. row 2
 
@@ -97,7 +105,7 @@
 
        -  Timestamp in ns of when the last byte of the message was received.
 	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime(2)`.
+	  the same clock from userspace use :c:func:`clock_gettime`.
 
     -  .. row 3
 
@@ -247,6 +255,7 @@
 	  valid if the :ref:`CEC_TX_STATUS_ERROR <CEC-TX-STATUS-ERROR>` status bit is set.
 
 
+.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
 
 .. _cec-tx-status:
 
@@ -315,6 +324,7 @@
 	  be set to explain which failures were seen.
 
 
+.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
 
 .. _cec-rx-status:
 
diff --git a/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst b/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
index dbe20ff..1279bd2 100644
--- a/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
+++ b/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
@@ -11,11 +11,13 @@
 
 AUDIO_BILINGUAL_CHANNEL_SELECT
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_BILINGUAL_CHANNEL_SELECT, audio_channel_select_t)
+.. c:function:: int ioctl(int fd, AUDIO_BILINGUAL_CHANNEL_SELECT, struct *audio_channel_select)
+    :name: AUDIO_BILINGUAL_CHANNEL_SELECT
 
 
 Arguments
@@ -25,20 +27,13 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_BILINGUAL_CHANNEL_SELECT for this command.
-
-    -  .. row 3
+    -
 
        -  audio_channel_select_t ch
 
diff --git a/Documentation/media/uapi/dvb/audio-channel-select.rst b/Documentation/media/uapi/dvb/audio-channel-select.rst
index 69df4c0..2ceb4ef 100644
--- a/Documentation/media/uapi/dvb/audio-channel-select.rst
+++ b/Documentation/media/uapi/dvb/audio-channel-select.rst
@@ -11,11 +11,13 @@
 
 AUDIO_CHANNEL_SELECT
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_CHANNEL_SELECT, audio_channel_select_t)
+.. c:function:: int ioctl(int fd, AUDIO_CHANNEL_SELECT, struct *audio_channel_select)
+    :name: AUDIO_CHANNEL_SELECT
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_CHANNEL_SELECT for this command.
-
-    -  .. row 3
+    -
 
        -  audio_channel_select_t ch
 
diff --git a/Documentation/media/uapi/dvb/audio-clear-buffer.rst b/Documentation/media/uapi/dvb/audio-clear-buffer.rst
index a3dec29..f6bed67 100644
--- a/Documentation/media/uapi/dvb/audio-clear-buffer.rst
+++ b/Documentation/media/uapi/dvb/audio-clear-buffer.rst
@@ -11,12 +11,13 @@
 
 AUDIO_CLEAR_BUFFER
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_CLEAR_BUFFER)
-
+.. c:function:: int  ioctl(int fd, AUDIO_CLEAR_BUFFER)
+    :name: AUDIO_CLEAR_BUFFER
 
 Arguments
 ---------
@@ -32,13 +33,6 @@
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_CLEAR_BUFFER for this command.
-
-
 Description
 -----------
 
diff --git a/Documentation/media/uapi/dvb/audio-continue.rst b/Documentation/media/uapi/dvb/audio-continue.rst
index 053627d..ca58786 100644
--- a/Documentation/media/uapi/dvb/audio-continue.rst
+++ b/Documentation/media/uapi/dvb/audio-continue.rst
@@ -11,11 +11,13 @@
 
 AUDIO_CONTINUE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_CONTINUE)
+.. c:function:: int  ioctl(int fd, AUDIO_CONTINUE)
+    :name: AUDIO_CONTINUE
 
 
 Arguments
@@ -32,13 +34,6 @@
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_CONTINUE for this command.
-
-
 Description
 -----------
 
diff --git a/Documentation/media/uapi/dvb/audio-fclose.rst b/Documentation/media/uapi/dvb/audio-fclose.rst
index e5d4225c..4df24c8 100644
--- a/Documentation/media/uapi/dvb/audio-fclose.rst
+++ b/Documentation/media/uapi/dvb/audio-fclose.rst
@@ -11,11 +11,13 @@
 
 DVB audio close()
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  close(int fd)
+.. c:function:: int close(int fd)
+    :name: dvb-audio-close
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/audio-fopen.rst b/Documentation/media/uapi/dvb/audio-fopen.rst
index ec3b23a..a802c2e 100644
--- a/Documentation/media/uapi/dvb/audio-fopen.rst
+++ b/Documentation/media/uapi/dvb/audio-fopen.rst
@@ -11,11 +11,13 @@
 
 DVB audio open()
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  open(const char *deviceName, int flags)
+.. c:function:: int open(const char *deviceName, int flags)
+    :name: dvb-audio-open
 
 
 Arguments
@@ -80,6 +82,8 @@
 Return Value
 ------------
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/dvb/audio-fwrite.rst b/Documentation/media/uapi/dvb/audio-fwrite.rst
index ca95b9b..8882cad 100644
--- a/Documentation/media/uapi/dvb/audio-fwrite.rst
+++ b/Documentation/media/uapi/dvb/audio-fwrite.rst
@@ -11,11 +11,13 @@
 
 DVB audio write()
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: size_t write(int fd, const void *buf, size_t count)
+.. c:function:: size_t write(int fd, const void *buf, size_t count)
+    :name: dvb-audio-write
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/audio-get-capabilities.rst b/Documentation/media/uapi/dvb/audio-get-capabilities.rst
index e274a8d..0d867f1 100644
--- a/Documentation/media/uapi/dvb/audio-get-capabilities.rst
+++ b/Documentation/media/uapi/dvb/audio-get-capabilities.rst
@@ -11,11 +11,13 @@
 
 AUDIO_GET_CAPABILITIES
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_GET_CAPABILITIES, unsigned int *cap)
+.. c:function:: int ioctl(int fd, AUDIO_GET_CAPABILITIES, unsigned int *cap)
+    :name: AUDIO_GET_CAPABILITIES
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_GET_CAPABILITIES for this command.
-
-    -  .. row 3
+    -
 
        -  unsigned int \*cap
 
diff --git a/Documentation/media/uapi/dvb/audio-get-pts.rst b/Documentation/media/uapi/dvb/audio-get-pts.rst
index 5f87550..2d1396b 100644
--- a/Documentation/media/uapi/dvb/audio-get-pts.rst
+++ b/Documentation/media/uapi/dvb/audio-get-pts.rst
@@ -11,11 +11,13 @@
 
 AUDIO_GET_PTS
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_GET_PTS, __u64 *pts)
+.. c:function:: int ioctl(int fd, AUDIO_GET_PTS, __u64 *pts)
+    :name: AUDIO_GET_PTS
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_GET_PTS for this command.
-
-    -  .. row 3
+    -
 
        -  __u64 \*pts
 
diff --git a/Documentation/media/uapi/dvb/audio-get-status.rst b/Documentation/media/uapi/dvb/audio-get-status.rst
index cbd8227..857b058 100644
--- a/Documentation/media/uapi/dvb/audio-get-status.rst
+++ b/Documentation/media/uapi/dvb/audio-get-status.rst
@@ -11,11 +11,13 @@
 
 AUDIO_GET_STATUS
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_GET_STATUS, struct audio_status *status)
+.. c:function:: int ioctl(int fd, AUDIO_GET_STATUS, struct audio_status *status)
+    :name: AUDIO_GET_STATUS
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_GET_STATUS for this command.
-
-    -  .. row 3
+    -
 
        -  struct audio_status \*status
 
diff --git a/Documentation/media/uapi/dvb/audio-pause.rst b/Documentation/media/uapi/dvb/audio-pause.rst
index 9ca263e..c7310df 100644
--- a/Documentation/media/uapi/dvb/audio-pause.rst
+++ b/Documentation/media/uapi/dvb/audio-pause.rst
@@ -11,12 +11,13 @@
 
 AUDIO_PAUSE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_PAUSE)
-
+.. c:function:: int  ioctl(int fd, AUDIO_PAUSE)
+    :name: AUDIO_PAUSE
 
 Arguments
 ---------
@@ -32,12 +33,6 @@
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_PAUSE for this command.
-
 
 Description
 -----------
diff --git a/Documentation/media/uapi/dvb/audio-play.rst b/Documentation/media/uapi/dvb/audio-play.rst
index db4d720..943b5ee 100644
--- a/Documentation/media/uapi/dvb/audio-play.rst
+++ b/Documentation/media/uapi/dvb/audio-play.rst
@@ -11,11 +11,13 @@
 
 AUDIO_PLAY
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_PLAY)
+.. c:function:: int  ioctl(int fd, AUDIO_PLAY)
+    :name: AUDIO_PLAY
 
 
 Arguments
@@ -32,13 +34,6 @@
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_PLAY for this command.
-
-
 Description
 -----------
 
diff --git a/Documentation/media/uapi/dvb/audio-select-source.rst b/Documentation/media/uapi/dvb/audio-select-source.rst
index b806d80..c0434a0 100644
--- a/Documentation/media/uapi/dvb/audio-select-source.rst
+++ b/Documentation/media/uapi/dvb/audio-select-source.rst
@@ -11,11 +11,13 @@
 
 AUDIO_SELECT_SOURCE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_SELECT_SOURCE, audio_stream_source_t source)
+.. c:function:: int ioctl(int fd, AUDIO_SELECT_SOURCE, struct audio_stream_source *source)
+    :name: AUDIO_SELECT_SOURCE
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SELECT_SOURCE for this command.
-
-    -  .. row 3
+    -
 
        -  audio_stream_source_t source
 
diff --git a/Documentation/media/uapi/dvb/audio-set-attributes.rst b/Documentation/media/uapi/dvb/audio-set-attributes.rst
index 18667ce..f0c6153 100644
--- a/Documentation/media/uapi/dvb/audio-set-attributes.rst
+++ b/Documentation/media/uapi/dvb/audio-set-attributes.rst
@@ -11,12 +11,14 @@
 
 AUDIO_SET_ATTRIBUTES
 
+.. attention:: This ioctl is deprecated
+
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = AUDIO_SET_ATTRIBUTES, audio_attributes_t attr )
-
+.. c:function:: int ioctl(fd, AUDIO_SET_ATTRIBUTES, struct audio_attributes *attr )
+    :name: AUDIO_SET_ATTRIBUTES
 
 Arguments
 ---------
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_ATTRIBUTES for this command.
-
-    -  .. row 3
+    -
 
        -  audio_attributes_t attr
 
diff --git a/Documentation/media/uapi/dvb/audio-set-av-sync.rst b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
index 6f7e26f..0cef491 100644
--- a/Documentation/media/uapi/dvb/audio-set-av-sync.rst
+++ b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
@@ -11,11 +11,13 @@
 
 AUDIO_SET_AV_SYNC
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_SET_AV_SYNC, boolean state)
+.. c:function:: int  ioctl(int fd, AUDIO_SET_AV_SYNC, boolean state)
+    :name: AUDIO_SET_AV_SYNC
 
 
 Arguments
@@ -26,33 +28,21 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_AV_SYNC for this command.
-
-    -  .. row 3
+    -
 
        -  boolean state
 
        -  Tells the DVB subsystem if A/V synchronization shall be ON or OFF.
 
-    -  .. row 4
+          TRUE: AV-sync ON
 
-       -
-       -  TRUE AV-sync ON
-
-    -  .. row 5
-
-       -
-       -  FALSE AV-sync OFF
+          FALSE: AV-sync OFF
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
index 30bcaca..b063c49 100644
--- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
+++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
@@ -11,12 +11,13 @@
 
 AUDIO_SET_BYPASS_MODE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_SET_BYPASS_MODE, boolean mode)
-
+.. c:function:: int ioctl(int fd, AUDIO_SET_BYPASS_MODE, boolean mode)
+    :name: AUDIO_SET_BYPASS_MODE
 
 Arguments
 ---------
@@ -26,34 +27,22 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_BYPASS_MODE for this command.
-
-    -  .. row 3
+    -
 
        -  boolean mode
 
        -  Enables or disables the decoding of the current Audio stream in
 	  the DVB subsystem.
 
-    -  .. row 4
+          TRUE: Bypass is disabled
 
-       -
-       -  TRUE Bypass is disabled
-
-    -  .. row 5
-
-       -
-       -  FALSE Bypass is enabled
+          FALSE: Bypass is enabled
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/audio-set-ext-id.rst b/Documentation/media/uapi/dvb/audio-set-ext-id.rst
index 049414d..8503c47 100644
--- a/Documentation/media/uapi/dvb/audio-set-ext-id.rst
+++ b/Documentation/media/uapi/dvb/audio-set-ext-id.rst
@@ -11,12 +11,13 @@
 
 AUDIO_SET_EXT_ID
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = AUDIO_SET_EXT_ID, int id)
-
+.. c:function:: int  ioctl(fd, AUDIO_SET_EXT_ID, int id)
+    :name: AUDIO_SET_EXT_ID
 
 Arguments
 ---------
@@ -26,19 +27,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_EXT_ID for this command.
-
-    -  .. row 3
+    -
 
        -  int id
 
diff --git a/Documentation/media/uapi/dvb/audio-set-id.rst b/Documentation/media/uapi/dvb/audio-set-id.rst
index a664dc1..8b1081d 100644
--- a/Documentation/media/uapi/dvb/audio-set-id.rst
+++ b/Documentation/media/uapi/dvb/audio-set-id.rst
@@ -11,12 +11,13 @@
 
 AUDIO_SET_ID
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_SET_ID, int id)
-
+.. c:function:: int  ioctl(int fd, AUDIO_SET_ID, int id)
+    :name: AUDIO_SET_ID
 
 Arguments
 ---------
@@ -26,19 +27,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_ID for this command.
-
-    -  .. row 3
+    -
 
        -  int id
 
diff --git a/Documentation/media/uapi/dvb/audio-set-karaoke.rst b/Documentation/media/uapi/dvb/audio-set-karaoke.rst
index b55f838..c759952 100644
--- a/Documentation/media/uapi/dvb/audio-set-karaoke.rst
+++ b/Documentation/media/uapi/dvb/audio-set-karaoke.rst
@@ -11,11 +11,13 @@
 
 AUDIO_SET_KARAOKE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = AUDIO_SET_KARAOKE, audio_karaoke_t *karaoke)
+.. c:function:: int ioctl(fd, AUDIO_SET_KARAOKE, struct audio_karaoke *karaoke)
+    :name: AUDIO_SET_KARAOKE
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_KARAOKE for this command.
-
-    -  .. row 3
+    -
 
        -  audio_karaoke_t \*karaoke
 
diff --git a/Documentation/media/uapi/dvb/audio-set-mixer.rst b/Documentation/media/uapi/dvb/audio-set-mixer.rst
index 6782172..248aab8 100644
--- a/Documentation/media/uapi/dvb/audio-set-mixer.rst
+++ b/Documentation/media/uapi/dvb/audio-set-mixer.rst
@@ -11,12 +11,13 @@
 
 AUDIO_SET_MIXER
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_SET_MIXER, audio_mixer_t *mix)
-
+.. c:function:: int ioctl(int fd, AUDIO_SET_MIXER, struct audio_mixer *mix)
+    :name: AUDIO_SET_MIXER
 
 Arguments
 ---------
@@ -26,19 +27,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_ID for this command.
-
-    -  .. row 3
+    -
 
        -  audio_mixer_t \*mix
 
diff --git a/Documentation/media/uapi/dvb/audio-set-mute.rst b/Documentation/media/uapi/dvb/audio-set-mute.rst
index ebaba95..897e722 100644
--- a/Documentation/media/uapi/dvb/audio-set-mute.rst
+++ b/Documentation/media/uapi/dvb/audio-set-mute.rst
@@ -11,11 +11,13 @@
 
 AUDIO_SET_MUTE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(int fd, int request = AUDIO_SET_MUTE, boolean state)
+.. c:function:: int  ioctl(int fd, AUDIO_SET_MUTE, boolean state)
+    :name: AUDIO_SET_MUTE
 
 
 Arguments
@@ -26,33 +28,21 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_MUTE for this command.
-
-    -  .. row 3
+    -
 
        -  boolean state
 
        -  Indicates if audio device shall mute or not.
 
-    -  .. row 4
+          TRUE: Audio Mute
 
-       -
-       -  TRUE Audio Mute
-
-    -  .. row 5
-
-       -
-       -  FALSE Audio Un-mute
+          FALSE: Audio Un-mute
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/audio-set-streamtype.rst b/Documentation/media/uapi/dvb/audio-set-streamtype.rst
index dfb9a6c..46c0362 100644
--- a/Documentation/media/uapi/dvb/audio-set-streamtype.rst
+++ b/Documentation/media/uapi/dvb/audio-set-streamtype.rst
@@ -11,11 +11,13 @@
 
 AUDIO_SET_STREAMTYPE
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = AUDIO_SET_STREAMTYPE, int type)
+.. c:function:: int  ioctl(fd, AUDIO_SET_STREAMTYPE, int type)
+    :name: AUDIO_SET_STREAMTYPE
 
 
 Arguments
@@ -26,19 +28,13 @@
     :stub-columns: 0
 
 
-    -  .. row 1
+    -
 
        -  int fd
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_SET_STREAMTYPE for this command.
-
-    -  .. row 3
+    -
 
        -  int type
 
diff --git a/Documentation/media/uapi/dvb/audio-stop.rst b/Documentation/media/uapi/dvb/audio-stop.rst
index 449127e..dd6c3b6 100644
--- a/Documentation/media/uapi/dvb/audio-stop.rst
+++ b/Documentation/media/uapi/dvb/audio-stop.rst
@@ -11,12 +11,13 @@
 
 AUDIO_STOP
 
+.. attention:: This ioctl is deprecated
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = AUDIO_STOP)
-
+.. c:function:: int ioctl(int fd, AUDIO_STOP)
+    :name: AUDIO_STOP
 
 Arguments
 ---------
@@ -32,12 +33,6 @@
 
        -  File descriptor returned by a previous call to open().
 
-    -  .. row 2
-
-       -  int request
-
-       -  Equals AUDIO_STOP for this command.
-
 
 Description
 -----------
diff --git a/Documentation/media/uapi/dvb/audio_data_types.rst b/Documentation/media/uapi/dvb/audio_data_types.rst
index 4a53127..6b93359 100644
--- a/Documentation/media/uapi/dvb/audio_data_types.rst
+++ b/Documentation/media/uapi/dvb/audio_data_types.rst
@@ -9,11 +9,7 @@
 This section describes the structures, data types and defines used when
 talking to the audio device.
 
-
-.. _audio-stream-source-t:
-
-audio_stream_source_t
-=====================
+.. c:type:: audio_stream_source
 
 The audio stream source is set through the AUDIO_SELECT_SOURCE call
 and can take the following values, depending on whether we are replaying
@@ -33,10 +29,7 @@
 through the ``write()`` system call.
 
 
-.. _audio-play-state-t:
-
-audio_play_state_t
-==================
+.. c:type:: audio_play_state
 
 The following values can be returned by the AUDIO_GET_STATUS call
 representing the state of audio playback.
@@ -51,10 +44,7 @@
     } audio_play_state_t;
 
 
-.. _audio-channel-select-t:
-
-audio_channel_select_t
-======================
+.. c:type:: audio_channel_select
 
 The audio channel selected via AUDIO_CHANNEL_SELECT is determined by
 the following values.
@@ -71,10 +61,7 @@
     } audio_channel_select_t;
 
 
-.. _audio-status:
-
-struct audio_status
-===================
+.. c:type:: audio_status
 
 The AUDIO_GET_STATUS call returns the following structure informing
 about various states of the playback operation.
@@ -93,10 +80,7 @@
     } audio_status_t;
 
 
-.. _audio-mixer:
-
-struct audio_mixer
-==================
+.. c:type:: audio_mixer
 
 The following structure is used by the AUDIO_SET_MIXER call to set the
 audio volume.
@@ -131,11 +115,7 @@
      #define AUDIO_CAP_SDDS 128
      #define AUDIO_CAP_AC3  256
 
-
-.. _audio-karaoke:
-
-struct audio_karaoke
-====================
+.. c:type:: audio_karaoke
 
 The ioctl AUDIO_SET_KARAOKE uses the following format:
 
@@ -155,10 +135,7 @@
 Melody is non-zero, the melody channel gets mixed into left and right.
 
 
-.. _audio-attributes-t:
-
-audio attributes
-================
+.. c:type:: audio_attributes
 
 The following attributes can be set by a call to AUDIO_SET_ATTRIBUTES:
 
diff --git a/Documentation/media/uapi/dvb/ca-fclose.rst b/Documentation/media/uapi/dvb/ca-fclose.rst
index 16d7a1e..5ecefa4 100644
--- a/Documentation/media/uapi/dvb/ca-fclose.rst
+++ b/Documentation/media/uapi/dvb/ca-fclose.rst
@@ -15,28 +15,20 @@
 Synopsis
 --------
 
-.. cpp:function:: int  close(int fd)
+.. c:function:: int close(int fd)
+    :name: dvb-ca-close
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
 
 Description
 -----------
 
-This system call closes a previously opened audio device.
+This system call closes a previously opened CA device.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-fopen.rst b/Documentation/media/uapi/dvb/ca-fopen.rst
index f284461..3d28197 100644
--- a/Documentation/media/uapi/dvb/ca-fopen.rst
+++ b/Documentation/media/uapi/dvb/ca-fopen.rst
@@ -15,48 +15,35 @@
 Synopsis
 --------
 
-.. cpp:function:: int  open(const char *deviceName, int flags)
+.. c:function:: int open(const char *name, int flags)
+    :name: dvb-ca-open
 
 
 Arguments
 ---------
 
+``name``
+  Name of specific DVB CA device.
+
+``flags``
+  A bit-wise OR of the following flags:
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
+    -
+       - O_RDONLY
+       - read-only access
 
-    -  .. row 1
+    -
+       - O_RDWR
+       - read/write access
 
-       -  const char \*deviceName
-
-       -  Name of specific video device.
-
-    -  .. row 2
-
-       -  int flags
-
-       -  A bit-wise OR of the following flags:
-
-    -  .. row 3
-
-       -
-       -  O_RDONLY read-only access
-
-    -  .. row 4
-
-       -
-       -  O_RDWR read/write access
-
-    -  .. row 5
-
-       -
-       -  O_NONBLOCK open in non-blocking mode
-
-    -  .. row 6
-
-       -
-       -  (blocking mode is the default)
+    -
+       - O_NONBLOCK
+       - open in non-blocking mode
+         (blocking mode is the default)
 
 
 Description
@@ -79,6 +66,8 @@
 Return Value
 ------------
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/dvb/ca-get-cap.rst b/Documentation/media/uapi/dvb/ca-get-cap.rst
index 891fbf2..fbf7e35 100644
--- a/Documentation/media/uapi/dvb/ca-get-cap.rst
+++ b/Documentation/media/uapi/dvb/ca-get-cap.rst
@@ -15,40 +15,51 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_GET_CAP, ca_caps_t *)
+.. c:function:: int ioctl(fd, CA_GET_CAP, struct ca_caps *caps)
+    :name: CA_GET_CAP
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
+
+``caps``
+  Pointer to struct :c:type:`ca_caps`.
+
+.. c:type:: struct ca_caps
+
+.. flat-table:: struct ca_caps
+    :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_GET_CAP for this command.
-
-    -  .. row 3
-
-       -  ca_caps_t *
-
-       -  Undocumented.
+    -
+      - type
+      - name
+      - description
+    -
+      -	unsigned int
+      - slot_num
+      - total number of CA card and module slots
+    -
+      - unsigned int
+      - slot_type
+      - bitmask with all supported slot types
+    -
+      - unsigned int
+      - descr_num
+      - total number of descrambler slots (keys)
+    -
+      - unsigned int
+      - descr_type
+      - bit mask with all supported descr types
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-get-descr-info.rst b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
index cf8e824..7bf327a 100644
--- a/Documentation/media/uapi/dvb/ca-get-descr-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
@@ -15,40 +15,44 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_GET_DESCR_INFO, ca_descr_info_t *)
-
+.. c:function:: int  ioctl(fd, CA_GET_DESCR_INFO, struct ca_descr_info *desc)
+    :name: CA_GET_DESCR_INFO
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
+
+``desc``
+  Pointer to struct :c:type:`ca_descr_info`.
+
+.. c:type:: struct ca_descr_info
+
+.. flat-table:: struct ca_descr_info
+    :header-rows:  1
     :stub-columns: 0
 
+    -
+      - type
+      - name
+      - description
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_GET_DESCR_INFO for this command.
-
-    -  .. row 3
-
-       -  ca_descr_info_t \*
-
-       -  Undocumented.
+    -
+      - unsigned int
+      - num
+      - number of available descramblers (keys)
+    -
+      - unsigned int
+      - type
+      - type of supported scrambling system. Valid values are:
+	``CA_ECD``, ``CA_NDS`` and ``CA_DSS``.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-get-msg.rst b/Documentation/media/uapi/dvb/ca-get-msg.rst
index 56004d5..121588d 100644
--- a/Documentation/media/uapi/dvb/ca-get-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-get-msg.rst
@@ -15,40 +15,55 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_GET_MSG, ca_msg_t *)
+.. c:function:: int ioctl(fd, CA_GET_MSG, struct ca_msg *msg)
+    :name: CA_GET_MSG
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
+
+``msg``
+  Pointer to struct :c:type:`ca_msg`.
+
+
+.. c:type:: struct ca_msg
+
+.. flat-table:: struct ca_msg
+    :header-rows:  1
     :stub-columns: 0
 
+    -
+      - type
+      - name
+      - description
+    -
+       - unsigned int
+       - index
+       -
 
-    -  .. row 1
+    -
+       - unsigned int
+       - type
+       -
 
-       -  int fd
+    -
+       - unsigned int
+       - length
+       -
 
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_GET_MSG for this command.
-
-    -  .. row 3
-
-       -  ca_msg_t \*
-
-       -  Undocumented.
+    -
+       - unsigned char
+       - msg[256]
+       -
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-get-slot-info.rst b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
index 9fea28c..54e5dc7 100644
--- a/Documentation/media/uapi/dvb/ca-get-slot-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
@@ -15,40 +15,106 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_GET_SLOT_INFO, ca_slot_info_t *)
+.. c:function:: int ioctl(fd, CA_GET_SLOT_INFO, struct ca_slot_info *info)
+    :name: CA_GET_SLOT_INFO
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <cec-open>`.
+
+``info``
+  Pointer to struct c:type:`ca_slot_info`.
+
+.. _ca_slot_info_type:
+
+.. flat-table:: ca_slot_info types
+    :header-rows:  1
     :stub-columns: 0
 
+    -
+      - type
+      - name
+      - description
+    -
+       - CA_CI
+       - 1
+       - CI high level interface
 
-    -  .. row 1
+    -
+       - CA_CI_LINK
+       - 2
+       - CI link layer level interface
 
-       -  int fd
+    -
+       - CA_CI_PHYS
+       - 4
+       - CI physical layer level interface
 
-       -  File descriptor returned by a previous call to open().
+    -
+       - CA_DESCR
+       - 8
+       - built-in descrambler
 
-    -  .. row 2
+    -
+       - CA_SC
+       - 128
+       - simple smart card interface
 
-       -  int request
+.. _ca_slot_info_flag:
 
-       -  Equals CA_GET_SLOT_INFO for this command.
+.. flat-table:: ca_slot_info flags
+    :header-rows:  1
+    :stub-columns: 0
 
-    -  .. row 3
+    -
+      - type
+      - name
+      - description
 
-       -  ca_slot_info_t \*
+    -
+       - CA_CI_MODULE_PRESENT
+       - 1
+       - module (or card) inserted
 
-       -  Undocumented.
+    -
+       - CA_CI_MODULE_READY
+       - 2
+       -
+
+.. c:type:: ca_slot_info
+
+.. flat-table:: struct ca_slot_info
+    :header-rows:  1
+    :stub-columns: 0
+
+    -
+      - type
+      - name
+      - description
+
+    -
+       - int
+       - num
+       - slot number
+
+    -
+       - int
+       - type
+       - CA interface this slot supports, as defined at :ref:`ca_slot_info_type`.
+
+    -
+       - unsigned int
+       - flags
+       - flags as defined at :ref:`ca_slot_info_flag`.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-reset.rst b/Documentation/media/uapi/dvb/ca-reset.rst
index d5a5008..4773131 100644
--- a/Documentation/media/uapi/dvb/ca-reset.rst
+++ b/Documentation/media/uapi/dvb/ca-reset.rst
@@ -15,34 +15,20 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_RESET)
+.. c:function:: int ioctl(fd, CA_RESET)
+    :name: CA_RESET
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_RESET for this command.
-
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <cec-open>`.
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-send-msg.rst b/Documentation/media/uapi/dvb/ca-send-msg.rst
index 18974e6..532ef5f 100644
--- a/Documentation/media/uapi/dvb/ca-send-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-send-msg.rst
@@ -15,40 +15,24 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_SEND_MSG, ca_msg_t *)
+.. c:function:: int ioctl(fd, CA_SEND_MSG, struct ca_msg *msg)
+    :name: CA_SEND_MSG
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <cec-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_SEND_MSG for this command.
-
-    -  .. row 3
-
-       -  ca_msg_t \*
-
-       -  Undocumented.
+``msg``
+  Pointer to struct :c:type:`ca_msg`.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst
index 293e6da..70f7b3c 100644
--- a/Documentation/media/uapi/dvb/ca-set-descr.rst
+++ b/Documentation/media/uapi/dvb/ca-set-descr.rst
@@ -15,40 +15,24 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_SET_DESCR, ca_descr_t *)
+.. c:function:: int ioctl(fd, CA_SET_DESCR, struct ca_descr *desc)
+    :name: CA_SET_DESCR
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <cec-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_SET_DESCR for this command.
-
-    -  .. row 3
-
-       -  ca_descr_t \*
-
-       -  Undocumented.
+``msg``
+  Pointer to struct :c:type:`ca_descr`.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca-set-pid.rst b/Documentation/media/uapi/dvb/ca-set-pid.rst
index 5afa2fa..891c1c7 100644
--- a/Documentation/media/uapi/dvb/ca-set-pid.rst
+++ b/Documentation/media/uapi/dvb/ca-set-pid.rst
@@ -15,40 +15,41 @@
 Synopsis
 --------
 
-.. cpp:function:: int  ioctl(fd, int request = CA_SET_PID, ca_pid_t *)
+.. c:function:: int ioctl(fd, CA_SET_PID, struct ca_pid *pid)
+    :name: CA_SET_PID
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
+
+``pid``
+  Pointer to struct :c:type:`ca_pid`.
+
+.. c:type:: ca_pid
+
+.. flat-table:: struct ca_pid
+    :header-rows:  1
     :stub-columns: 0
 
+    -
+       - unsigned int
+       - pid
+       - Program ID
 
-    -  .. row 1
+    -
+       - int
+       - index
+       - PID index. Use -1 to disable.
 
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals CA_SET_PID for this command.
-
-    -  .. row 3
-
-       -  ca_pid_t \*
-
-       -  Undocumented.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/ca_data_types.rst b/Documentation/media/uapi/dvb/ca_data_types.rst
index 025f910..d9e27c7 100644
--- a/Documentation/media/uapi/dvb/ca_data_types.rst
+++ b/Documentation/media/uapi/dvb/ca_data_types.rst
@@ -7,7 +7,7 @@
 *************
 
 
-.. _ca-slot-info:
+.. c:type:: ca_slot_info
 
 ca_slot_info_t
 ==============
@@ -31,7 +31,7 @@
     } ca_slot_info_t;
 
 
-.. _ca-descr-info:
+.. c:type:: ca_descr_info
 
 ca_descr_info_t
 ===============
@@ -48,7 +48,7 @@
     } ca_descr_info_t;
 
 
-.. _ca-caps:
+.. c:type:: ca_caps
 
 ca_caps_t
 =========
@@ -64,7 +64,7 @@
      } ca_cap_t;
 
 
-.. _ca-msg:
+.. c:type:: ca_msg
 
 ca_msg_t
 ========
@@ -81,7 +81,7 @@
     } ca_msg_t;
 
 
-.. _ca-descr:
+.. c:type:: ca_descr
 
 ca_descr_t
 ==========
@@ -96,7 +96,7 @@
     } ca_descr_t;
 
 
-.. _ca-pid:
+.. c:type:: ca_pid
 
 ca-pid
 ======
diff --git a/Documentation/media/uapi/dvb/dmx-add-pid.rst b/Documentation/media/uapi/dvb/dmx-add-pid.rst
index 6343035..689cd1f 100644
--- a/Documentation/media/uapi/dvb/dmx-add-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-add-pid.rst
@@ -15,34 +15,18 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = DMX_ADD_PID, __u16 *)
+.. c:function:: int ioctl(fd, DMX_ADD_PID, __u16 *pid)
+    :name: DMX_ADD_PID
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_ADD_PID for this command.
-
-    -  .. row 3
-
-       -  __u16 *
-
-       -  PID number to be filtered.
+``pid``
+   PID number to be filtered.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/dmx-fclose.rst b/Documentation/media/uapi/dvb/dmx-fclose.rst
index f54c2a1..ca93c23 100644
--- a/Documentation/media/uapi/dvb/dmx-fclose.rst
+++ b/Documentation/media/uapi/dvb/dmx-fclose.rst
@@ -15,23 +15,15 @@
 Synopsis
 --------
 
-.. cpp:function:: int close(int fd)
+.. c:function:: int close(int fd)
+    :name: dvb-dmx-close
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
 
 Description
 -----------
diff --git a/Documentation/media/uapi/dvb/dmx-fopen.rst b/Documentation/media/uapi/dvb/dmx-fopen.rst
index 76dbb42..a697e33 100644
--- a/Documentation/media/uapi/dvb/dmx-fopen.rst
+++ b/Documentation/media/uapi/dvb/dmx-fopen.rst
@@ -15,43 +15,34 @@
 Synopsis
 --------
 
-.. cpp:function:: int open(const char *deviceName, int flags)
-
+.. c:function:: int open(const char *deviceName, int flags)
+    :name: dvb-dmx-open
 
 Arguments
 ---------
 
+``name``
+  Name of specific DVB demux device.
+
+``flags``
+  A bit-wise OR of the following flags:
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
+    -
+       - O_RDONLY
+       - read-only access
 
-    -  .. row 1
+    -
+       - O_RDWR
+       - read/write access
 
-       -  const char \*deviceName
-
-       -  Name of demux device.
-
-    -  .. row 2
-
-       -  int flags
-
-       -  A bit-wise OR of the following flags:
-
-    -  .. row 3
-
-       -
-       -  O_RDWR read/write access
-
-    -  .. row 4
-
-       -
-       -  O_NONBLOCK open in non-blocking mode
-
-    -  .. row 5
-
-       -
-       -  (blocking mode is the default)
+    -
+       - O_NONBLOCK
+       - open in non-blocking mode
+         (blocking mode is the default)
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/dmx-fread.rst b/Documentation/media/uapi/dvb/dmx-fread.rst
index d25b19e..e8c7f4d 100644
--- a/Documentation/media/uapi/dvb/dmx-fread.rst
+++ b/Documentation/media/uapi/dvb/dmx-fread.rst
@@ -15,35 +15,20 @@
 Synopsis
 --------
 
-.. cpp:function:: size_t read(int fd, void *buf, size_t count)
-
+.. c:function:: size_t read(int fd, void *buf, size_t count)
+    :name: dvb-dmx-read
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
 
+ ``buf``
+   Buffer to be filled
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  void \*buf
-
-       -  Pointer to the buffer to be used for returned filtered data.
-
-    -  .. row 3
-
-       -  size_t count
-
-       -  Size of buf.
-
+``count``
+   Max number of bytes to read
 
 Description
 -----------
@@ -53,10 +38,11 @@
 circular buffer to buf. The maximum amount of data to be transferred is
 implied by count.
 
-
 Return Value
 ------------
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/dvb/dmx-fwrite.rst b/Documentation/media/uapi/dvb/dmx-fwrite.rst
index 9efd81a..8a90dfe 100644
--- a/Documentation/media/uapi/dvb/dmx-fwrite.rst
+++ b/Documentation/media/uapi/dvb/dmx-fwrite.rst
@@ -15,35 +15,20 @@
 Synopsis
 --------
 
-.. cpp:function:: ssize_t write(int fd, const void *buf, size_t count)
-
+.. c:function:: ssize_t write(int fd, const void *buf, size_t count)
+    :name: dvb-dmx-write
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+  File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
 
+``buf``
+     Buffer with data to be written
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  void \*buf
-
-       -  Pointer to the buffer containing the Transport Stream.
-
-    -  .. row 3
-
-       -  size_t count
-
-       -  Size of buf.
-
+``count``
+    Number of bytes at the buffer
 
 Description
 -----------
@@ -59,11 +44,12 @@
 Return Value
 ------------
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
     -  .. row 1
 
        -  ``EWOULDBLOCK``
diff --git a/Documentation/media/uapi/dvb/dmx-get-caps.rst b/Documentation/media/uapi/dvb/dmx-get-caps.rst
index d0549eb..145fb52 100644
--- a/Documentation/media/uapi/dvb/dmx-get-caps.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-caps.rst
@@ -15,41 +15,23 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = DMX_GET_CAPS, dmx_caps_t *)
-
+.. c:function:: int ioctl(fd, DMX_GET_CAPS, struct dmx_caps *caps)
+    :name: DMX_GET_CAPS
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_GET_CAPS for this command.
-
-    -  .. row 3
-
-       -  dmx_caps_t *
-
-       -  Undocumented.
+``caps``
+    Pointer to struct :c:type:`dmx_caps`
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
-
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/dmx-get-event.rst b/Documentation/media/uapi/dvb/dmx-get-event.rst
index 6a7550c..8be626c 100644
--- a/Documentation/media/uapi/dvb/dmx-get-event.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-event.rst
@@ -15,34 +15,18 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_GET_EVENT, struct dmx_event *ev)
+.. c:function:: int ioctl( int fd, DMX_GET_EVENT, struct dmx_event *ev)
+    :name: DMX_GET_EVENT
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_GET_EVENT for this command.
-
-    -  .. row 3
-
-       -  struct dmx_event \*ev
-
-       -  Pointer to the location where the event is to be stored.
+``ev``
+    Pointer to the location where the event is to be stored.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
index ba5d30c..b31634a 100644
--- a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
@@ -15,40 +15,23 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = DMX_GET_PES_PIDS, __u16[5])
-
+.. c:function:: int ioctl(fd, DMX_GET_PES_PIDS, __u16 pids[5])
+    :name: DMX_GET_PES_PIDS
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_GET_PES_PIDS for this command.
-
-    -  .. row 3
-
-       -  __u16[5]
-
-       -  Undocumented.
+``pids``
+    Undocumented.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/dmx-get-stc.rst b/Documentation/media/uapi/dvb/dmx-get-stc.rst
index bd477bb..9fc501e 100644
--- a/Documentation/media/uapi/dvb/dmx-get-stc.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-stc.rst
@@ -15,34 +15,17 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_GET_STC, struct dmx_stc *stc)
-
+.. c:function:: int ioctl( int fd, DMX_GET_STC, struct dmx_stc *stc)
+    :name: DMX_GET_STC
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_GET_STC for this command.
-
-    -  .. row 3
-
-       -  struct dmx_stc \*stc
-
-       -  Pointer to the location where the stc is to be stored.
+``stc``
+    Pointer to the location where the stc is to be stored.
 
 
 Description
@@ -63,8 +46,6 @@
 appropriately. The generic error codes are described at the
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/dvb/dmx-remove-pid.rst b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
index c8f038b..e411495 100644
--- a/Documentation/media/uapi/dvb/dmx-remove-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
@@ -15,34 +15,18 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = DMX_REMOVE_PID, __u16 *)
+.. c:function:: int ioctl(fd, DMX_REMOVE_PID, __u16 *pid)
+    :name: DMX_REMOVE_PID
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_REMOVE_PID for this command.
-
-    -  .. row 3
-
-       -  __u16 *
-
-       -  PID of the PES filter to be removed.
+``pid``
+    PID of the PES filter to be removed.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
index 8ae48cf..f2f7379 100644
--- a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
@@ -15,35 +15,18 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_SET_BUFFER_SIZE, unsigned long size)
+.. c:function:: int ioctl( int fd, DMX_SET_BUFFER_SIZE, unsigned long size)
+    :name: DMX_SET_BUFFER_SIZE
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_SET_BUFFER_SIZE for this command.
-
-    -  .. row 3
-
-       -  unsigned long size
-
-       -  Size of circular buffer.
-
+``size``
+    Unsigned long size
 
 Description
 -----------
diff --git a/Documentation/media/uapi/dvb/dmx-set-filter.rst b/Documentation/media/uapi/dvb/dmx-set-filter.rst
index 8c929fa..1d50c80 100644
--- a/Documentation/media/uapi/dvb/dmx-set-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-filter.rst
@@ -15,34 +15,18 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_SET_FILTER, struct dmx_sct_filter_params *params)
-
+.. c:function:: int ioctl( int fd, DMX_SET_FILTER, struct dmx_sct_filter_params *params)
+    :name: DMX_SET_FILTER
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
+``params``
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_SET_FILTER for this command.
-
-    -  .. row 3
-
-       -  struct dmx_sct_filter_params \*params
-
-       -  Pointer to structure containing filter parameters.
+    Pointer to structure containing filter parameters.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
index addc321..145451d 100644
--- a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
@@ -15,34 +15,19 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_SET_PES_FILTER, struct dmx_pes_filter_params *params)
+.. c:function:: int ioctl( int fd, DMX_SET_PES_FILTER, struct dmx_pes_filter_params *params)
+    :name: DMX_SET_PES_FILTER
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
 
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_SET_PES_FILTER for this command.
-
-    -  .. row 3
-
-       -  struct dmx_pes_filter_params \*params
-
-       -  Pointer to structure containing filter parameters.
+``params``
+    Pointer to structure containing filter parameters.
 
 
 Description
@@ -61,7 +46,7 @@
 appropriately. The generic error codes are described at the
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
-
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
 
 .. flat-table::
     :header-rows:  0
diff --git a/Documentation/media/uapi/dvb/dmx-set-source.rst b/Documentation/media/uapi/dvb/dmx-set-source.rst
index 99a8d5c..ac7f77b 100644
--- a/Documentation/media/uapi/dvb/dmx-set-source.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-source.rst
@@ -15,40 +15,25 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = DMX_SET_SOURCE, dmx_source_t *)
+.. c:function:: int ioctl(fd, DMX_SET_SOURCE, struct dmx_source *src)
+    :name: DMX_SET_SOURCE
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
 
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_SET_SOURCE for this command.
-
-    -  .. row 3
-
-       -  dmx_source_t *
-
-       -  Undocumented.
+``src``
+   Undocumented.
 
 
 Description
 -----------
 
-This ioctl is undocumented. Documentation is welcome.
+.. note:: This ioctl is undocumented. Documentation is welcome.
 
 
 Return Value
diff --git a/Documentation/media/uapi/dvb/dmx-start.rst b/Documentation/media/uapi/dvb/dmx-start.rst
index 9835d1e..641f3e0 100644
--- a/Documentation/media/uapi/dvb/dmx-start.rst
+++ b/Documentation/media/uapi/dvb/dmx-start.rst
@@ -15,29 +15,15 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_START)
+.. c:function:: int ioctl( int fd, DMX_START)
+    :name: DMX_START
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_START for this command.
-
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
 Description
 -----------
@@ -53,7 +39,7 @@
 appropriately. The generic error codes are described at the
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
-
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
 
 .. flat-table::
     :header-rows:  0
diff --git a/Documentation/media/uapi/dvb/dmx-stop.rst b/Documentation/media/uapi/dvb/dmx-stop.rst
index 7e4bf09..569a3df 100644
--- a/Documentation/media/uapi/dvb/dmx-stop.rst
+++ b/Documentation/media/uapi/dvb/dmx-stop.rst
@@ -15,29 +15,15 @@
 Synopsis
 --------
 
-.. cpp:function:: int ioctl( int fd, int request = DMX_STOP)
+.. c:function:: int ioctl( int fd, DMX_STOP)
+    :name: DMX_STOP
 
 
 Arguments
 ---------
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals DMX_STOP for this command.
-
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
 
 Description
 -----------
diff --git a/Documentation/media/uapi/dvb/dmx_types.rst b/Documentation/media/uapi/dvb/dmx_types.rst
index 7a8900a..80dd659 100644
--- a/Documentation/media/uapi/dvb/dmx_types.rst
+++ b/Documentation/media/uapi/dvb/dmx_types.rst
@@ -6,14 +6,12 @@
 Demux Data Types
 ****************
 
-
-.. _dmx-output-t:
-
 Output for the demux
 ====================
 
+.. c:type:: dmx_output
 
-.. _dmx-output:
+.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
 
 .. flat-table:: enum dmx_output
     :header-rows:  1
@@ -65,12 +63,10 @@
 	  from the DMX device.
 
 
-
-.. _dmx-input-t:
-
 dmx_input_t
 ===========
 
+.. c:type:: dmx_input
 
 .. code-block:: c
 
@@ -81,11 +77,11 @@
     } dmx_input_t;
 
 
-.. _dmx-pes-type-t:
-
 dmx_pes_type_t
 ==============
 
+.. c:type:: dmx_pes_type
+
 
 .. code-block:: c
 
@@ -119,11 +115,10 @@
     } dmx_pes_type_t;
 
 
-.. _dmx-filter:
-
 struct dmx_filter
 =================
 
+.. c:type:: dmx_filter
 
 .. code-block:: c
 
@@ -135,7 +130,7 @@
     } dmx_filter_t;
 
 
-.. _dmx-sct-filter-params:
+.. c:type:: dmx_sct_filter_params
 
 struct dmx_sct_filter_params
 ============================
@@ -156,11 +151,10 @@
     };
 
 
-.. _dmx-pes-filter-params:
-
 struct dmx_pes_filter_params
 ============================
 
+.. c:type:: dmx_pes_filter_params
 
 .. code-block:: c
 
@@ -174,11 +168,10 @@
     };
 
 
-.. _dmx-event:
-
 struct dmx_event
 ================
 
+.. c:type:: dmx_event
 
 .. code-block:: c
 
@@ -193,11 +186,10 @@
      };
 
 
-.. _dmx-stc:
-
 struct dmx_stc
 ==============
 
+.. c:type:: dmx_stc
 
 .. code-block:: c
 
@@ -208,11 +200,10 @@
     };
 
 
-.. _dmx-caps:
-
 struct dmx_caps
 ===============
 
+.. c:type:: dmx_caps
 
 .. code-block:: c
 
@@ -222,15 +213,14 @@
     } dmx_caps_t;
 
 
-.. _dmx-source-t:
+enum dmx_source
+===============
 
-enum dmx_source_t
-=================
-
+.. c:type:: dmx_source
 
 .. code-block:: c
 
-    typedef enum {
+    typedef enum dmx_source {
 	DMX_SOURCE_FRONT0 = 0,
 	DMX_SOURCE_FRONT1,
 	DMX_SOURCE_FRONT2,
diff --git a/Documentation/media/uapi/dvb/dtv-fe-stats.rst b/Documentation/media/uapi/dvb/dtv-fe-stats.rst
index 7c105e2..e8a02a1 100644
--- a/Documentation/media/uapi/dvb/dtv-fe-stats.rst
+++ b/Documentation/media/uapi/dvb/dtv-fe-stats.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dtv-fe-stats:
+.. c:type:: dtv_fe_stats
 
 *******************
 struct dtv_fe_stats
diff --git a/Documentation/media/uapi/dvb/dtv-properties.rst b/Documentation/media/uapi/dvb/dtv-properties.rst
index c13be5d..48c4e83 100644
--- a/Documentation/media/uapi/dvb/dtv-properties.rst
+++ b/Documentation/media/uapi/dvb/dtv-properties.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dtv-properties:
+.. c:type:: dtv_properties
 
 *********************
 struct dtv_properties
diff --git a/Documentation/media/uapi/dvb/dtv-property.rst b/Documentation/media/uapi/dvb/dtv-property.rst
index 5073a49..3ddc347 100644
--- a/Documentation/media/uapi/dvb/dtv-property.rst
+++ b/Documentation/media/uapi/dvb/dtv-property.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dtv-property:
+.. c:type:: dtv_property
 
 *******************
 struct dtv_property
diff --git a/Documentation/media/uapi/dvb/dtv-stats.rst b/Documentation/media/uapi/dvb/dtv-stats.rst
index 2cfdca0..35239e7 100644
--- a/Documentation/media/uapi/dvb/dtv-stats.rst
+++ b/Documentation/media/uapi/dvb/dtv-stats.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dtv-stats:
+.. c:type:: dtv_stats
 
 ****************
 struct dtv_stats
diff --git a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
index fcffaa7..76c2061 100644
--- a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
@@ -17,7 +17,9 @@
 Signal statistics are provided via
 :ref:`FE_GET_PROPERTY`.
 
-.. note:: Most statistics require the demodulator to be fully locked
+.. note::
+
+   Most statistics require the demodulator to be fully locked
    (e. g. with FE_HAS_LOCK bit set). See
    :ref:`Frontend statistics indicators <frontend-stat-properties>` for
    more details.
diff --git a/Documentation/media/uapi/dvb/dvb-frontend-event.rst b/Documentation/media/uapi/dvb/dvb-frontend-event.rst
index 78e72fe..2088bc6 100644
--- a/Documentation/media/uapi/dvb/dvb-frontend-event.rst
+++ b/Documentation/media/uapi/dvb/dvb-frontend-event.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dvb-frontend-event:
+.. c:type:: dvb_frontend_event
 
 ***************
 frontend events
diff --git a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
index 16cb581..bf31411 100644
--- a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
+++ b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
@@ -1,6 +1,6 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _dvb-frontend-parameters:
+.. c:type:: dvb_frontend_parameters
 
 *******************
 frontend parameters
@@ -49,7 +49,7 @@
 given in Hz.
 
 
-.. _dvb-qpsk-parameters:
+.. c:type:: dvb_qpsk_parameters
 
 QPSK parameters
 ===============
@@ -66,7 +66,7 @@
      };
 
 
-.. _dvb-qam-parameters:
+.. c:type:: dvb_qam_parameters
 
 QAM parameters
 ==============
@@ -83,7 +83,7 @@
      };
 
 
-.. _dvb-vsb-parameters:
+.. c:type:: dvb_vsb_parameters
 
 VSB parameters
 ==============
@@ -98,7 +98,7 @@
     };
 
 
-.. _dvb-ofdm-parameters:
+.. c:type:: dvb_ofdm_parameters
 
 OFDM parameters
 ===============
diff --git a/Documentation/media/uapi/dvb/dvbapi.rst b/Documentation/media/uapi/dvb/dvbapi.rst
index 48e61ab..3768013 100644
--- a/Documentation/media/uapi/dvb/dvbapi.rst
+++ b/Documentation/media/uapi/dvb/dvbapi.rst
@@ -8,7 +8,9 @@
 Part II - Digital TV API
 ########################
 
-.. note:: This API is also known as **DVB API**, although it is generic
+.. note::
+
+   This API is also known as **DVB API**, although it is generic
    enough to support all digital TV standards.
 
 **Version 5.10**
diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst
index cd0511b..dd2d71c 100644
--- a/Documentation/media/uapi/dvb/dvbproperty.rst
+++ b/Documentation/media/uapi/dvb/dvbproperty.rst
@@ -20,8 +20,10 @@
 breaking userspace. So, the decision was to deprecate the legacy
 union/struct based approach, in favor of a properties set approach.
 
-.. note:: On Linux DVB API version 3, setting a frontend were done via
-   :ref:`struct dvb_frontend_parameters <dvb-frontend-parameters>`.
+.. note::
+
+   On Linux DVB API version 3, setting a frontend were done via
+   struct :c:type:`dvb_frontend_parameters`.
    This got replaced on version 5 (also called "S2API", as this API were
    added originally_enabled to provide support for DVB-S2), because the
    old API has a very limited support to new standards and new hardware.
diff --git a/Documentation/media/uapi/dvb/examples.rst b/Documentation/media/uapi/dvb/examples.rst
index bf0a861..1a94966 100644
--- a/Documentation/media/uapi/dvb/examples.rst
+++ b/Documentation/media/uapi/dvb/examples.rst
@@ -9,7 +9,9 @@
 In this section we would like to present some examples for using the DVB
 API.
 
-..note:: This section is out of date, and the code below won't even
+.. note::
+
+   This section is out of date, and the code below won't even
    compile. Please refer to the
    `libdvbv5 <https://linuxtv.org/docs/libdvbv5/index.html>`__ for
    updated/recommended examples.
diff --git a/Documentation/media/uapi/dvb/fe-bandwidth-t.rst b/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
index 8edaf1a..7025618 100644
--- a/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
+++ b/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
@@ -1,13 +1,10 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _fe-bandwidth-t:
-
 ******************
 Frontend bandwidth
 ******************
 
-
-.. _fe-bandwidth:
+.. c:type:: fe_bandwidth
 
 .. flat-table:: enum fe_bandwidth
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
index 7bd02ac..302db28 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dvb_diseqc_slave_reply *argp )
+.. c:function:: int ioctl( int fd, FE_DISEQC_RECV_SLAVE_REPLY, struct dvb_diseqc_slave_reply *argp )
+    :name: FE_DISEQC_RECV_SLAVE_REPLY
 
 
 Arguments
@@ -24,12 +25,9 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_DISEQC_RECV_SLAVE_REPLY
-
 ``argp``
     pointer to struct
-    :ref:`dvb_diseqc_slave_reply <dvb-diseqc-slave-reply>`
+    :c:type:`dvb_diseqc_slave_reply`
 
 
 Description
@@ -37,10 +35,9 @@
 
 Receives reply from a DiSEqC 2.0 command.
 
-.. _dvb-diseqc-slave-reply:
+.. c:type:: dvb_diseqc_slave_reply
 
-struct dvb_diseqc_slave_reply
------------------------------
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct dvb_diseqc_slave_reply
     :header-rows:  0
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
index cab1570..75116f2 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, NULL )
+.. c:function:: int ioctl( int fd, FE_DISEQC_RESET_OVERLOAD, NULL )
+    :name: FE_DISEQC_RESET_OVERLOAD
 
 
 Arguments
@@ -24,10 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_DISEQC_RESET_OVERLOAD
-
-
 Description
 ===========
 
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
index 9b47654..26272f2 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, enum fe_sec_mini_cmd *tone )
+.. c:function:: int ioctl( int fd, FE_DISEQC_SEND_BURST, enum fe_sec_mini_cmd *tone )
+    :name: FE_DISEQC_SEND_BURST
 
 
 Arguments
@@ -24,11 +25,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_DISEQC_SEND_BURST
-
 ``tone``
-    pointer to enum :ref:`fe_sec_mini_cmd <fe-sec-mini-cmd>`
+    pointer to enum :c:type:`fe_sec_mini_cmd`
 
 
 Description
@@ -41,12 +39,7 @@
 It provides support for what's specified at
 `Digital Satellite Equipment Control (DiSEqC) - Simple "ToneBurst" Detection Circuit specification. <http://www.eutelsat.com/files/contributed/satellites/pdf/Diseqc/associated%20docs/simple_tone_burst_detec.pdf>`__
 
-.. _fe-sec-mini-cmd-t:
-
-enum fe_sec_mini_cmd
-====================
-
-.. _fe-sec-mini-cmd:
+.. c:type:: fe_sec_mini_cmd
 
 .. flat-table:: enum fe_sec_mini_cmd
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
index 58a5e6a..bbcab3d 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dvb_diseqc_master_cmd *argp )
+.. c:function:: int ioctl( int fd, FE_DISEQC_SEND_MASTER_CMD, struct dvb_diseqc_master_cmd *argp )
+    :name: FE_DISEQC_SEND_MASTER_CMD
 
 
 Arguments
@@ -24,12 +25,9 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_DISEQC_SEND_MASTER_CMD
-
 ``argp``
     pointer to struct
-    :ref:`dvb_diseqc_master_cmd <dvb-diseqc-master-cmd>`
+    :c:type:`dvb_diseqc_master_cmd`
 
 
 Description
@@ -37,10 +35,10 @@
 
 Sends a DiSEqC command to the antenna subsystem.
 
-.. _dvb-diseqc-master-cmd:
 
-struct dvb_diseqc_master_cmd
-============================
+.. c:type:: dvb_diseqc_master_cmd
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct dvb_diseqc_master_cmd
     :header-rows:  0
diff --git a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
index d47e9db..f41371f 100644
--- a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
@@ -15,22 +15,18 @@
 Synopsis
 ========
 
-.. cpp:function:: int  ioctl(int fd, int request = FE_DISHNETWORK_SEND_LEGACY_CMD, unsigned long cmd)
+.. c:function:: int  ioctl(int fd, FE_DISHNETWORK_SEND_LEGACY_CMD, unsigned long cmd)
+    :name: FE_DISHNETWORK_SEND_LEGACY_CMD
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  unsigned long cmd
-
-       -  sends the specified raw cmd to the dish via DISEqC.
+``cmd``
+    Sends the specified raw cmd to the dish via DISEqC.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
index de99bf5..bacafbc 100644
--- a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, unsigned int high )
+.. c:function:: int ioctl( int fd, FE_ENABLE_HIGH_LNB_VOLTAGE, unsigned int high )
+    :name: FE_ENABLE_HIGH_LNB_VOLTAGE
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_ENABLE_HIGH_LNB_VOLTAGE
-
 ``high``
     Valid flags:
 
diff --git a/Documentation/media/uapi/dvb/fe-get-event.rst b/Documentation/media/uapi/dvb/fe-get-event.rst
index ffa3d04..8a719c3 100644
--- a/Documentation/media/uapi/dvb/fe-get-event.rst
+++ b/Documentation/media/uapi/dvb/fe-get-event.rst
@@ -11,43 +11,24 @@
 
 FE_GET_EVENT
 
+.. attention:: This ioctl is deprecated.
+
 
 Synopsis
 ========
 
-.. cpp:function:: int  ioctl(int fd, int request = QPSK_GET_EVENT, struct dvb_frontend_event *ev)
+.. c:function:: int  ioctl(int fd, FE_GET_EVENT, struct dvb_frontend_event *ev)
+    :name: FE_GET_EVENT
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals :ref:`FE_GET_EVENT` for this command.
-
-    -  .. row 3
-
-       -  struct dvb_frontend_event \*ev
-
-       -  Points to the location where the event,
-
-    -  .. row 4
-
-       -
-       -  if any, is to be stored.
+``ev``
+    Points to the location where the event, if any, is to be stored.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-get-frontend.rst b/Documentation/media/uapi/dvb/fe-get-frontend.rst
index 5d2df80..d53a3f8 100644
--- a/Documentation/media/uapi/dvb/fe-get-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-get-frontend.rst
@@ -11,39 +11,25 @@
 
 FE_GET_FRONTEND
 
+.. attention:: This ioctl is deprecated.
+
 
 Synopsis
 ========
 
-.. cpp:function:: int ioctl(int fd, int request = FE_GET_FRONTEND, struct dvb_frontend_parameters *p)
+.. c:function:: int ioctl(int fd, FE_GET_FRONTEND, struct dvb_frontend_parameters *p)
+    :name: FE_GET_FRONTEND
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
 
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals :ref:`FE_SET_FRONTEND` for this
-	  command.
-
-    -  .. row 3
-
-       -  struct dvb_frontend_parameters \*p
-
-       -  Points to parameters for tuning operation.
+``p``
+    Points to parameters for tuning operation.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-get-info.rst b/Documentation/media/uapi/dvb/fe-get-info.rst
index bb6c32e..e3d64b2 100644
--- a/Documentation/media/uapi/dvb/fe-get-info.rst
+++ b/Documentation/media/uapi/dvb/fe-get-info.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dvb_frontend_info *argp )
+.. c:function:: int ioctl( int fd, FE_GET_INFO, struct dvb_frontend_info *argp )
+    :name: FE_GET_INFO
 
 
 Arguments
@@ -24,12 +25,9 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_GET_INFO
-
 ``argp``
     pointer to struct struct
-    :ref:`dvb_frontend_info <dvb-frontend-info>`
+    :c:type:`dvb_frontend_info`
 
 
 Description
@@ -42,10 +40,9 @@
 When the driver is not compatible with this specification the ioctl
 returns an error.
 
-.. _dvb-frontend-info:
+.. c:type:: dvb_frontend_info
 
-struct dvb_frontend_info
-========================
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct dvb_frontend_info
     :header-rows:  0
@@ -137,27 +134,28 @@
 
     -  .. row 11
 
-       -  enum :ref:`fe_caps <fe-caps>`
+       -  enum :c:type:`fe_caps`
 
        -  caps
 
        -  Capabilities supported by the frontend
 
 
-.. note:: The frequencies are specified in Hz for Terrestrial and Cable
+.. note::
+
+   The frequencies are specified in Hz for Terrestrial and Cable
    systems. They're specified in kHz for Satellite systems
 
 
-.. _fe-caps-t:
-
 frontend capabilities
 =====================
 
 Capabilities describe what a frontend can do. Some capabilities are
 supported only on some specific frontend types.
 
+.. c:type:: fe_caps
 
-.. _fe-caps:
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
 
 .. flat-table:: enum fe_caps
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst
index 749daaf..015d4db 100644
--- a/Documentation/media/uapi/dvb/fe-get-property.rst
+++ b/Documentation/media/uapi/dvb/fe-get-property.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dtv_properties *argp )
+.. c:function:: int ioctl( int fd, FE_GET_PROPERTY, struct dtv_properties *argp )
+    :name: FE_GET_PROPERTY
+
+.. c:function:: int ioctl( int fd, FE_SET_PROPERTY, struct dtv_properties *argp )
+    :name: FE_SET_PROPERTY
 
 
 Arguments
@@ -24,11 +28,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_PROPERTY, FE_GET_PROPERTY
-
 ``argp``
-    pointer to struct :ref:`dtv_properties <dtv-properties>`
+    pointer to struct :c:type:`dtv_properties`
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-read-ber.rst b/Documentation/media/uapi/dvb/fe-read-ber.rst
index c2b5b41..e54972a 100644
--- a/Documentation/media/uapi/dvb/fe-read-ber.rst
+++ b/Documentation/media/uapi/dvb/fe-read-ber.rst
@@ -11,37 +11,23 @@
 
 FE_READ_BER
 
+.. attention:: This ioctl is deprecated.
+
 Synopsis
 ========
 
-.. cpp:function:: int  ioctl(int fd, int request = FE_READ_BER, uint32_t *ber)
+.. c:function:: int  ioctl(int fd, FE_READ_BER, uint32_t *ber)
+    :name: FE_READ_BER
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals :ref:`FE_READ_BER` for this command.
-
-    -  .. row 3
-
-       -  uint32_t \*ber
-
-       -  The bit error rate is stored into \*ber.
+``ber``
+    The bit error rate is stored into \*ber.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
index 0cdee2e..4b13c47 100644
--- a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
+++ b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
@@ -11,40 +11,23 @@
 
 FE_READ_SIGNAL_STRENGTH
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request = FE_READ_SIGNAL_STRENGTH, uint16_t *strength)
+.. c:function:: int ioctl( int fd, FE_READ_SIGNAL_STRENGTH, uint16_t *strength)
+    :name: FE_READ_SIGNAL_STRENGTH
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals
-	  :ref:`FE_READ_SIGNAL_STRENGTH`
-	  for this command.
-
-    -  .. row 3
-
-       -  uint16_t \*strength
-
-       -  The signal strength value is stored into \*strength.
+``strength``
+    The signal strength value is stored into \*strength.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-read-snr.rst b/Documentation/media/uapi/dvb/fe-read-snr.rst
index 5394f9a..2aed487 100644
--- a/Documentation/media/uapi/dvb/fe-read-snr.rst
+++ b/Documentation/media/uapi/dvb/fe-read-snr.rst
@@ -11,38 +11,23 @@
 
 FE_READ_SNR
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 ========
 
-.. cpp:function:: int  ioctl(int fd, int request = FE_READ_SNR, int16_t *snr)
+.. c:function:: int  ioctl(int fd, FE_READ_SNR, int16_t *snr)
+    :name: FE_READ_SNR
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals :ref:`FE_READ_SNR` for this command.
-
-    -  .. row 3
-
-       -  uint16_t \*snr
-
-       -  The signal-to-noise ratio is stored into \*snr.
+``snr``
+    The signal-to-noise ratio is stored into \*snr.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-read-status.rst b/Documentation/media/uapi/dvb/fe-read-status.rst
index 624ed9d..812f086 100644
--- a/Documentation/media/uapi/dvb/fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/fe-read-status.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, unsigned int *status )
+.. c:function:: int ioctl( int fd, FE_READ_STATUS, unsigned int *status )
+    :name: FE_READ_STATUS
 
 
 Arguments
@@ -24,12 +25,9 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_READ_STATUS
-
 ``status``
     pointer to a bitmask integer filled with the values defined by enum
-    :ref:`fe_status <fe-status>`.
+    :c:type:`fe_status`.
 
 
 Description
@@ -40,20 +38,23 @@
 tuned. The ioctl takes a pointer to an integer where the status will be
 written.
 
-.. note:: The size of status is actually sizeof(enum fe_status), with
+.. note::
+
+   The size of status is actually sizeof(enum fe_status), with
    varies according with the architecture. This needs to be fixed in the
    future.
 
 
-.. _fe-status-t:
-
 int fe_status
 =============
 
 The fe_status parameter is used to indicate the current state and/or
 state changes of the frontend hardware. It is produced using the enum
-:ref:`fe_status <fe-status>` values on a bitmask
+:c:type:`fe_status` values on a bitmask
 
+.. c:type:: fe_status
+
+.. tabularcolumns:: |p{3.5cm}|p{14.0cm}|
 
 .. _fe-status:
 
diff --git a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
index 5c29c058..46687c1 100644
--- a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
+++ b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
@@ -11,40 +11,23 @@
 
 FE_READ_UNCORRECTED_BLOCKS
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request =FE_READ_UNCORRECTED_BLOCKS, uint32_t *ublocks)
+.. c:function:: int ioctl( int fd, FE_READ_UNCORRECTED_BLOCKS, uint32_t *ublocks)
+    :name: FE_READ_UNCORRECTED_BLOCKS
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals
-	  :ref:`FE_READ_UNCORRECTED_BLOCKS`
-	  for this command.
-
-    -  .. row 3
-
-       -  uint32_t \*ublocks
-
-       -  The total number of uncorrected blocks seen by the driver so far.
+``ublocks``
+    The total number of uncorrected blocks seen by the driver so far.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
index 411abcf..1d5878d 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, unsigned int flags )
+.. c:function:: int ioctl( int fd, FE_SET_FRONTEND_TUNE_MODE, unsigned int flags )
+    :name: FE_SET_FRONTEND_TUNE_MODE
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_FRONTEND_TUNE_MODE
-
 ``flags``
     Valid flags:
 
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend.rst b/Documentation/media/uapi/dvb/fe-set-frontend.rst
index 7cb70c3..7f97dce 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend.rst
@@ -6,6 +6,8 @@
 FE_SET_FRONTEND
 ***************
 
+.. attention:: This ioctl is deprecated.
+
 Name
 ====
 
@@ -15,35 +17,18 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl(int fd, int request = FE_SET_FRONTEND, struct dvb_frontend_parameters *p)
+.. c:function:: int ioctl(int fd, FE_SET_FRONTEND, struct dvb_frontend_parameters *p)
+    :name: FE_SET_FRONTEND
 
 
 Arguments
 =========
 
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
+``fd``
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
-
-    -  .. row 1
-
-       -  int fd
-
-       -  File descriptor returned by a previous call to open().
-
-    -  .. row 2
-
-       -  int request
-
-       -  Equals :ref:`FE_SET_FRONTEND` for this
-	  command.
-
-    -  .. row 3
-
-       -  struct dvb_frontend_parameters \*p
-
-       -  Points to parameters for tuning operation.
+``p``
+    Points to parameters for tuning operation.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-set-tone.rst b/Documentation/media/uapi/dvb/fe-set-tone.rst
index 545e2af..bea1932 100644
--- a/Documentation/media/uapi/dvb/fe-set-tone.rst
+++ b/Documentation/media/uapi/dvb/fe-set-tone.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, enum fe_sec_tone_mode *tone )
+.. c:function:: int ioctl( int fd, FE_SET_TONE, enum fe_sec_tone_mode *tone )
+    :name: FE_SET_TONE
 
 
 Arguments
@@ -24,11 +25,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_TONE
-
 ``tone``
-    pointer to enum :ref:`fe_sec_tone_mode <fe-sec-tone-mode>`
+    pointer to enum :c:type:`fe_sec_tone_mode`
 
 
 Description
@@ -47,12 +45,7 @@
    capability of selecting the band. So, it is recommended that applications
    would change to SEC_TONE_OFF when the device is not used.
 
-.. _fe-sec-tone-mode-t:
-
-enum fe_sec_tone_mode
-=====================
-
-.. _fe-sec-tone-mode:
+.. c:type:: fe_sec_tone_mode
 
 .. flat-table:: enum fe_sec_tone_mode
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/fe-set-voltage.rst b/Documentation/media/uapi/dvb/fe-set-voltage.rst
index 2b19086..fcf6f38 100644
--- a/Documentation/media/uapi/dvb/fe-set-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-set-voltage.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, enum fe_sec_voltage *voltage )
+.. c:function:: int ioctl( int fd, FE_SET_VOLTAGE, enum fe_sec_voltage *voltage )
+    :name: FE_SET_VOLTAGE
 
 
 Arguments
@@ -24,14 +25,11 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_VOLTAGE
-
 ``voltage``
-    pointer to enum :ref:`fe_sec_voltage <fe-sec-voltage>`
+    pointer to enum :c:type:`fe_sec_voltage`
 
     Valid values are described at enum
-    :ref:`fe_sec_voltage <fe-sec-voltage>`.
+    :c:type:`fe_sec_voltage`.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/fe-type-t.rst b/Documentation/media/uapi/dvb/fe-type-t.rst
index 8ca762b..548b965 100644
--- a/Documentation/media/uapi/dvb/fe-type-t.rst
+++ b/Documentation/media/uapi/dvb/fe-type-t.rst
@@ -1,7 +1,5 @@
 .. -*- coding: utf-8; mode: rst -*-
 
-.. _fe-type-t:
-
 *************
 Frontend type
 *************
@@ -11,7 +9,9 @@
 fe_type_t type, defined as:
 
 
-.. _fe-type:
+.. c:type:: fe_type
+
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. flat-table:: Frontend types
     :header-rows:  1
@@ -76,7 +76,7 @@
 ioctl's, using the :ref:`DTV_DELIVERY_SYSTEM <DTV-DELIVERY-SYSTEM>`
 parameter.
 
-In the old days, struct :ref:`dvb_frontend_info <dvb-frontend-info>`
+In the old days, struct :c:type:`dvb_frontend_info`
 used to contain ``fe_type_t`` field to indicate the delivery systems,
 filled with either FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC. While this
 is still filled to keep backward compatibility, the usage of this field
@@ -85,7 +85,7 @@
 :ref:`DTV_ENUM_DELSYS <DTV-ENUM-DELSYS>` instead.
 
 On devices that support multiple delivery systems, struct
-:ref:`dvb_frontend_info <dvb-frontend-info>`::``fe_type_t`` is
+:c:type:`dvb_frontend_info`::``fe_type_t`` is
 filled with the currently standard, as selected by the last call to
 :ref:`FE_SET_PROPERTY <FE_GET_PROPERTY>` using the
 :ref:`DTV_DELIVERY_SYSTEM <DTV-DELIVERY-SYSTEM>` property.
diff --git a/Documentation/media/uapi/dvb/fe_property_parameters.rst b/Documentation/media/uapi/dvb/fe_property_parameters.rst
index f776d62..7bb7559c 100644
--- a/Documentation/media/uapi/dvb/fe_property_parameters.rst
+++ b/Documentation/media/uapi/dvb/fe_property_parameters.rst
@@ -68,10 +68,10 @@
 
 Specifies the frontend modulation type for delivery systems that
 supports more than one modulation type. The modulation can be one of the
-types defined by enum :ref:`fe_modulation <fe-modulation>`.
+types defined by enum :c:type:`fe_modulation`.
 
 
-.. _fe-modulation-t:
+.. c:type:: fe_modulation
 
 Modulation property
 -------------------
@@ -82,8 +82,6 @@
 modulations are supported by a given standard.
 
 
-.. _fe-modulation:
-
 .. flat-table:: enum fe_modulation
     :header-rows:  1
     :stub-columns: 0
@@ -251,8 +249,7 @@
 
 Specifies if the frontend should do spectral inversion or not.
 
-
-.. _fe-spectral-inversion-t:
+.. c:type:: fe_spectral_inversion
 
 enum fe_modulation: Frontend spectral inversion
 -----------------------------------------------
@@ -264,8 +261,6 @@
 inversion off. If it fails, it will try to enable inversion.
 
 
-.. _fe-spectral-inversion:
-
 .. flat-table:: enum fe_modulation
     :header-rows:  1
     :stub-columns: 0
@@ -327,15 +322,11 @@
 
 Used cable/satellite transmissions. The acceptable values are:
 
-
-.. _fe-code-rate-t:
+.. c:type:: fe_code_rate
 
 enum fe_code_rate: type of the Forward Error Correction.
 --------------------------------------------------------
 
-
-.. _fe-code-rate:
-
 .. flat-table:: enum fe_code_rate
     :header-rows:  1
     :stub-columns: 0
@@ -464,7 +455,7 @@
 described in the DiSEqC spec.
 
 
-.. _fe-sec-voltage:
+.. c:type:: fe_sec_voltage
 
 .. flat-table:: enum fe_sec_voltage
     :header-rows:  1
@@ -519,14 +510,12 @@
 Sets DVB-S2 pilot
 
 
-.. _fe-pilot-t:
+.. c:type:: fe_pilot
 
 fe_pilot type
 -------------
 
 
-.. _fe-pilot:
-
 .. flat-table:: enum fe_pilot
     :header-rows:  1
     :stub-columns: 0
@@ -572,14 +561,12 @@
 Sets DVB-S2 rolloff
 
 
-.. _fe-rolloff-t:
+.. c:type:: fe_rolloff
 
 fe_rolloff type
 ---------------
 
 
-.. _fe-rolloff:
-
 .. flat-table:: enum fe_rolloff
     :header-rows:  1
     :stub-columns: 0
@@ -657,7 +644,7 @@
 Specifies the type of Delivery system
 
 
-.. _fe-delivery-system-t:
+.. c:type:: fe_delivery_system
 
 fe_delivery_system type
 -----------------------
@@ -665,8 +652,6 @@
 Possible values:
 
 
-.. _fe-delivery-system:
-
 .. flat-table:: enum fe_delivery_system
     :header-rows:  1
     :stub-columns: 0
@@ -1005,10 +990,9 @@
 Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and
 ``DTV_ISDBT_PARTIAL_RECEPTION`` and ``LAYER[A-C]_SEGMENT_COUNT``
 
-
 .. _isdbt-layer_seg-cnt-table:
 
-.. flat-table::
+.. flat-table:: Truth table for ISDB-T Sound Broadcasting
     :header-rows:  0
     :stub-columns: 0
 
@@ -1099,9 +1083,9 @@
 TMCC-structure, as shown in the table below.
 
 
-.. _isdbt-layer-interleaving-table:
+.. c:type:: isdbt_layer_interleaving_table
 
-.. flat-table::
+.. flat-table:: ISDB-T time interleaving modes
     :header-rows:  0
     :stub-columns: 0
 
@@ -1234,8 +1218,9 @@
 
 Possible values are:
 
+.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
 
-.. _atscmh-rs-frame-mode:
+.. c:type:: atscmh_rs_frame_mode
 
 .. flat-table:: enum atscmh_rs_frame_mode
     :header-rows:  1
@@ -1279,7 +1264,7 @@
 Possible values are:
 
 
-.. _atscmh-rs-frame-ensemble:
+.. c:type:: atscmh_rs_frame_ensemble
 
 .. flat-table:: enum atscmh_rs_frame_ensemble
     :header-rows:  1
@@ -1328,7 +1313,7 @@
 Possible values are:
 
 
-.. _atscmh-rs-code-mode:
+.. c:type:: atscmh_rs_code_mode
 
 .. flat-table:: enum atscmh_rs_code_mode
     :header-rows:  1
@@ -1383,7 +1368,7 @@
 Reed Solomon (RS) code mode (secondary).
 
 Possible values are the same as documented on enum
-:ref:`atscmh_rs_code_mode <atscmh-rs-code-mode>`:
+:c:type:`atscmh_rs_code_mode`:
 
 
 .. _DTV-ATSCMH-SCCC-BLOCK-MODE:
@@ -1395,8 +1380,9 @@
 
 Possible values are:
 
+.. tabularcolumns:: |p{4.5cm}|p{13.0cm}|
 
-.. _atscmh-sccc-block-mode:
+.. c:type:: atscmh_sccc_block_mode
 
 .. flat-table:: enum atscmh_scc_block_mode
     :header-rows:  1
@@ -1447,7 +1433,7 @@
 Possible values are:
 
 
-.. _atscmh-sccc-code-mode:
+.. c:type:: atscmh_sccc_code_mode
 
 .. flat-table:: enum atscmh_sccc_code_mode
     :header-rows:  1
@@ -1494,7 +1480,7 @@
 Series Concatenated Convolutional Code Rate.
 
 Possible values are the same as documented on enum
-:ref:`atscmh_sccc_code_mode <atscmh-sccc-code-mode>`.
+:c:type:`atscmh_sccc_code_mode`.
 
 
 .. _DTV-ATSCMH-SCCC-CODE-MODE-C:
@@ -1505,7 +1491,7 @@
 Series Concatenated Convolutional Code Rate.
 
 Possible values are the same as documented on enum
-:ref:`atscmh_sccc_code_mode <atscmh-sccc-code-mode>`.
+:c:type:`atscmh_sccc_code_mode`.
 
 
 .. _DTV-ATSCMH-SCCC-CODE-MODE-D:
@@ -1516,7 +1502,7 @@
 Series Concatenated Convolutional Code Rate.
 
 Possible values are the same as documented on enum
-:ref:`atscmh_sccc_code_mode <atscmh-sccc-code-mode>`.
+:c:type:`atscmh_sccc_code_mode`.
 
 
 .. _DTV-API-VERSION:
@@ -1533,7 +1519,7 @@
 ================
 
 Used on terrestrial transmissions. The acceptable values are the ones
-described at :ref:`fe_transmit_mode_t <fe-transmit-mode-t>`.
+described at :c:type:`fe_transmit_mode`.
 
 
 .. _DTV-CODE-RATE-LP:
@@ -1542,7 +1528,7 @@
 ================
 
 Used on terrestrial transmissions. The acceptable values are the ones
-described at :ref:`fe_transmit_mode_t <fe-transmit-mode-t>`.
+described at :c:type:`fe_transmit_mode`.
 
 
 .. _DTV-GUARD-INTERVAL:
@@ -1553,14 +1539,12 @@
 Possible values are:
 
 
-.. _fe-guard-interval-t:
+.. c:type:: fe_guard_interval
 
 Modulation guard interval
 -------------------------
 
 
-.. _fe-guard-interval:
-
 .. flat-table:: enum fe_guard_interval
     :header-rows:  1
     :stub-columns: 0
@@ -1682,13 +1666,12 @@
 on OFTM-based standards, e. g. DVB-T/T2, ISDB-T, DTMB
 
 
-.. _fe-transmit-mode-t:
+.. c:type:: fe_transmit_mode
 
 enum fe_transmit_mode: Number of carriers per channel
 -----------------------------------------------------
 
-
-.. _fe-transmit-mode:
+.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
 
 .. flat-table:: enum fe_transmit_mode
     :header-rows:  1
@@ -1799,14 +1782,12 @@
 Frontend hierarchy
 
 
-.. _fe-hierarchy-t:
+.. c:type:: fe_hierarchy
 
 Frontend hierarchy
 ------------------
 
 
-.. _fe-hierarchy:
-
 .. flat-table:: enum fe_hierarchy
     :header-rows:  1
     :stub-columns: 0
@@ -1912,7 +1893,7 @@
 Time interleaving to be used. Currently, used only on DTMB.
 
 
-.. _fe-interleaving:
+.. c:type:: fe_interleaving
 
 .. flat-table:: enum fe_interleaving
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/frontend-stat-properties.rst b/Documentation/media/uapi/dvb/frontend-stat-properties.rst
index 0fc4aaa..e73754f 100644
--- a/Documentation/media/uapi/dvb/frontend-stat-properties.rst
+++ b/Documentation/media/uapi/dvb/frontend-stat-properties.rst
@@ -20,7 +20,7 @@
 plus one metric per each carrier group (called "layer" on ISDB).
 
 So, in order to be consistent with other delivery systems, the first
-value at :ref:`dtv_property.stat.dtv_stats <dtv-stats>` array refers
+value at :c:type:`dtv_property.stat.dtv_stats <dtv_stats>` array refers
 to the global metric. The other elements of the array represent each
 layer, starting from layer A(index 1), layer B (index 2) and so on.
 
diff --git a/Documentation/media/uapi/dvb/frontend.rst b/Documentation/media/uapi/dvb/frontend.rst
index 48c5cd4..e051a90 100644
--- a/Documentation/media/uapi/dvb/frontend.rst
+++ b/Documentation/media/uapi/dvb/frontend.rst
@@ -29,7 +29,9 @@
 Data types and ioctl definitions can be accessed by including
 ``linux/dvb/frontend.h`` in your application.
 
-.. note:: Transmission via the internet (DVB-IP) is not yet handled by this
+.. note::
+
+   Transmission via the internet (DVB-IP) is not yet handled by this
    API but a future extension is possible.
 
 On Satellite systems, the API support for the Satellite Equipment
diff --git a/Documentation/media/uapi/dvb/frontend_f_close.rst b/Documentation/media/uapi/dvb/frontend_f_close.rst
index 5cce9262..f3b04b6 100644
--- a/Documentation/media/uapi/dvb/frontend_f_close.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_close.rst
@@ -20,14 +20,14 @@
     #include <unistd.h>
 
 
-.. cpp:function:: int close( int fd )
-
+.. c:function:: int close( int fd )
+    :name: dvb-fe-close
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <func-open>`.
+    File descriptor returned by :c:func:`open() <dvb-fe-open>`.
 
 
 Description
diff --git a/Documentation/media/uapi/dvb/frontend_f_open.rst b/Documentation/media/uapi/dvb/frontend_f_open.rst
index e0c5534..690eb37 100644
--- a/Documentation/media/uapi/dvb/frontend_f_open.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_open.rst
@@ -20,8 +20,8 @@
     #include <fcntl.h>
 
 
-.. cpp:function:: int open( const char *device_name, int flags )
-
+.. c:function:: int open( const char *device_name, int flags )
+    :name: dvb-fe-open
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/dvb/net-add-if.rst b/Documentation/media/uapi/dvb/net-add-if.rst
index 2b990d0..82ce243 100644
--- a/Documentation/media/uapi/dvb/net-add-if.rst
+++ b/Documentation/media/uapi/dvb/net-add-if.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dvb_net_if *net_if )
+.. c:function:: int ioctl( int fd, NET_ADD_IF, struct dvb_net_if *net_if )
+    :name: NET_ADD_IF
 
 
 Arguments
@@ -24,11 +25,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_TONE
-
 ``net_if``
-    pointer to struct :ref:`dvb_net_if <dvb-net-if>`
+    pointer to struct :c:type:`dvb_net_if`
 
 
 Description
@@ -40,16 +38,10 @@
 the system call successfully returns, a new virtual network interface is
 created.
 
-The struct :ref:`dvb_net_if <dvb-net-if>`::ifnum field will be
+The struct :c:type:`dvb_net_if`::ifnum field will be
 filled with the number of the created interface.
 
-
-.. _dvb-net-if-t:
-
-struct dvb_net_if description
-=============================
-
-.. _dvb-net-if:
+.. c:type:: dvb_net_if
 
 .. flat-table:: struct dvb_net_if
     :header-rows:  1
diff --git a/Documentation/media/uapi/dvb/net-get-if.rst b/Documentation/media/uapi/dvb/net-get-if.rst
index 92b8841..1bb8ee0 100644
--- a/Documentation/media/uapi/dvb/net-get-if.rst
+++ b/Documentation/media/uapi/dvb/net-get-if.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct dvb_net_if *net_if )
+.. c:function:: int ioctl( int fd, NET_GET_IF, struct dvb_net_if *net_if )
+    :name: NET_GET_IF
 
 
 Arguments
@@ -24,19 +25,16 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_TONE
-
 ``net_if``
-    pointer to struct :ref:`dvb_net_if <dvb-net-if>`
+    pointer to struct :c:type:`dvb_net_if`
 
 
 Description
 ===========
 
 The NET_GET_IF ioctl uses the interface number given by the struct
-:ref:`dvb_net_if <dvb-net-if>`::ifnum field and fills the content of
-struct :ref:`dvb_net_if <dvb-net-if>` with the packet ID and
+:c:type:`dvb_net_if`::ifnum field and fills the content of
+struct :c:type:`dvb_net_if` with the packet ID and
 encapsulation type used on such interface. If the interface was not
 created yet with :ref:`NET_ADD_IF <net>`, it will return -1 and fill
 the ``errno`` with ``EINVAL`` error code.
diff --git a/Documentation/media/uapi/dvb/net-remove-if.rst b/Documentation/media/uapi/dvb/net-remove-if.rst
index d374c1d6..646af23 100644
--- a/Documentation/media/uapi/dvb/net-remove-if.rst
+++ b/Documentation/media/uapi/dvb/net-remove-if.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, int ifnum )
+.. c:function:: int ioctl( int fd, NET_REMOVE_IF, int ifnum )
+    :name: NET_REMOVE_IF
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <frontend_f_open>`.
 
-``request``
-    FE_SET_TONE
-
 ``net_if``
     number of the interface to be removed
 
diff --git a/Documentation/media/uapi/dvb/video-clear-buffer.rst b/Documentation/media/uapi/dvb/video-clear-buffer.rst
index 7c85aa0..2e51a78 100644
--- a/Documentation/media/uapi/dvb/video-clear-buffer.rst
+++ b/Documentation/media/uapi/dvb/video-clear-buffer.rst
@@ -11,11 +11,13 @@
 
 VIDEO_CLEAR_BUFFER
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_CLEAR_BUFFER)
+.. c:function:: int ioctl(fd, VIDEO_CLEAR_BUFFER)
+    :name: VIDEO_CLEAR_BUFFER
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-command.rst b/Documentation/media/uapi/dvb/video-command.rst
index b1634f7..536d0fd 100644
--- a/Documentation/media/uapi/dvb/video-command.rst
+++ b/Documentation/media/uapi/dvb/video-command.rst
@@ -11,11 +11,13 @@
 
 VIDEO_COMMAND
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_COMMAND, struct video_command *cmd)
+.. c:function:: int ioctl(int fd, VIDEO_COMMAND, struct video_command *cmd)
+    :name: VIDEO_COMMAND
 
 
 Arguments
@@ -57,6 +59,36 @@
 :ref:`VIDIOC_DECODER_CMD` documentation for
 more information.
 
+.. c:type:: struct video_command
+
+.. code-block:: c
+
+	/* The structure must be zeroed before use by the application
+	This ensures it can be extended safely in the future. */
+	struct video_command {
+		__u32 cmd;
+		__u32 flags;
+		union {
+			struct {
+				__u64 pts;
+			} stop;
+
+			struct {
+				/* 0 or 1000 specifies normal speed,
+				1 specifies forward single stepping,
+				-1 specifies backward single stepping,
+				>1: playback at speed/1000 of the normal speed,
+				<-1: reverse playback at (-speed/1000) of the normal speed. */
+				__s32 speed;
+				__u32 format;
+			} play;
+
+			struct {
+				__u32 data[16];
+			} raw;
+		};
+	};
+
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-continue.rst b/Documentation/media/uapi/dvb/video-continue.rst
index c5acc09..030c2ec 100644
--- a/Documentation/media/uapi/dvb/video-continue.rst
+++ b/Documentation/media/uapi/dvb/video-continue.rst
@@ -11,11 +11,13 @@
 
 VIDEO_CONTINUE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_CONTINUE)
+.. c:function:: int ioctl(fd, VIDEO_CONTINUE)
+    :name: VIDEO_CONTINUE
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-fast-forward.rst b/Documentation/media/uapi/dvb/video-fast-forward.rst
index db338e9..70a53e1 100644
--- a/Documentation/media/uapi/dvb/video-fast-forward.rst
+++ b/Documentation/media/uapi/dvb/video-fast-forward.rst
@@ -11,11 +11,13 @@
 
 VIDEO_FAST_FORWARD
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_FAST_FORWARD, int nFrames)
+.. c:function:: int ioctl(fd, VIDEO_FAST_FORWARD, int nFrames)
+    :name: VIDEO_FAST_FORWARD
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-fclose.rst b/Documentation/media/uapi/dvb/video-fclose.rst
index ebeaade..8a997ae 100644
--- a/Documentation/media/uapi/dvb/video-fclose.rst
+++ b/Documentation/media/uapi/dvb/video-fclose.rst
@@ -11,11 +11,12 @@
 
 dvb video close()
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int close(int fd)
+.. c:function:: int close(int fd)
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-fopen.rst b/Documentation/media/uapi/dvb/video-fopen.rst
index 9e54715..203a2c5 100644
--- a/Documentation/media/uapi/dvb/video-fopen.rst
+++ b/Documentation/media/uapi/dvb/video-fopen.rst
@@ -11,11 +11,12 @@
 
 dvb video open()
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int open(const char *deviceName, int flags)
+.. c:function:: int open(const char *deviceName, int flags)
 
 
 Arguments
@@ -82,6 +83,8 @@
 Return Value
 ------------
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/dvb/video-freeze.rst b/Documentation/media/uapi/dvb/video-freeze.rst
index d3d0dc3..9cef65a 100644
--- a/Documentation/media/uapi/dvb/video-freeze.rst
+++ b/Documentation/media/uapi/dvb/video-freeze.rst
@@ -11,11 +11,13 @@
 
 VIDEO_FREEZE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_FREEZE)
+.. c:function:: int ioctl(fd, VIDEO_FREEZE)
+    :name: VIDEO_FREEZE
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-fwrite.rst b/Documentation/media/uapi/dvb/video-fwrite.rst
index 045038f..cfe7c57 100644
--- a/Documentation/media/uapi/dvb/video-fwrite.rst
+++ b/Documentation/media/uapi/dvb/video-fwrite.rst
@@ -11,11 +11,12 @@
 
 dvb video write()
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: size_t write(int fd, const void *buf, size_t count)
+.. c:function:: size_t write(int fd, const void *buf, size_t count)
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-get-capabilities.rst b/Documentation/media/uapi/dvb/video-get-capabilities.rst
index 94cbbba..6987f65 100644
--- a/Documentation/media/uapi/dvb/video-get-capabilities.rst
+++ b/Documentation/media/uapi/dvb/video-get-capabilities.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_CAPABILITIES
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_GET_CAPABILITIES, unsigned int *cap)
+.. c:function:: int ioctl(fd, VIDEO_GET_CAPABILITIES, unsigned int *cap)
+    :name: VIDEO_GET_CAPABILITIES
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-get-event.rst b/Documentation/media/uapi/dvb/video-get-event.rst
index a1484a2..6ad14cd 100644
--- a/Documentation/media/uapi/dvb/video-get-event.rst
+++ b/Documentation/media/uapi/dvb/video-get-event.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_EVENT
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_GET_EVENT, struct video_event *ev)
+.. c:function:: int ioctl(fd, VIDEO_GET_EVENT, struct video_event *ev)
+    :name: VIDEO_GET_EVENT
 
 
 Arguments
@@ -62,6 +64,23 @@
 specified as the wake-up condition. Read-only permissions are sufficient
 for this ioctl call.
 
+.. c:type:: video_event
+
+.. code-block:: c
+
+	struct video_event {
+		__s32 type;
+	#define VIDEO_EVENT_SIZE_CHANGED	1
+	#define VIDEO_EVENT_FRAME_RATE_CHANGED	2
+	#define VIDEO_EVENT_DECODER_STOPPED 	3
+	#define VIDEO_EVENT_VSYNC 		4
+		__kernel_time_t timestamp;
+		union {
+			video_size_t size;
+			unsigned int frame_rate;	/* in frames per 1000sec */
+			unsigned char vsync_field;	/* unknown/odd/even/progressive */
+		} u;
+	};
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-get-frame-count.rst b/Documentation/media/uapi/dvb/video-get-frame-count.rst
index 4ff100c..0ffe22c 100644
--- a/Documentation/media/uapi/dvb/video-get-frame-count.rst
+++ b/Documentation/media/uapi/dvb/video-get-frame-count.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_FRAME_COUNT
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_GET_FRAME_COUNT, __u64 *pts)
+.. c:function:: int ioctl(int fd, VIDEO_GET_FRAME_COUNT, __u64 *pts)
+    :name: VIDEO_GET_FRAME_COUNT
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-get-frame-rate.rst b/Documentation/media/uapi/dvb/video-get-frame-rate.rst
index 131def9..400042a 100644
--- a/Documentation/media/uapi/dvb/video-get-frame-rate.rst
+++ b/Documentation/media/uapi/dvb/video-get-frame-rate.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_FRAME_RATE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_GET_FRAME_RATE, unsigned int *rate)
+.. c:function:: int ioctl(int fd, VIDEO_GET_FRAME_RATE, unsigned int *rate)
+    :name: VIDEO_GET_FRAME_RATE
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-get-navi.rst b/Documentation/media/uapi/dvb/video-get-navi.rst
index 6c3034f..114a9ac 100644
--- a/Documentation/media/uapi/dvb/video-get-navi.rst
+++ b/Documentation/media/uapi/dvb/video-get-navi.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_NAVI
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_GET_NAVI , video_navi_pack_t *navipack)
+.. c:function:: int ioctl(fd, VIDEO_GET_NAVI , struct video_navi_pack *navipack)
+    :name: VIDEO_GET_NAVI
 
 
 Arguments
@@ -52,6 +54,14 @@
 especially needed if an encoded stream has to be decoded by the
 hardware.
 
+.. c:type:: video_navi_pack
+
+.. code-block::c
+
+	typedef struct video_navi_pack {
+		int length;          /* 0 ... 1024 */
+		__u8 data[1024];
+	} video_navi_pack_t;
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-get-pts.rst b/Documentation/media/uapi/dvb/video-get-pts.rst
index 0826122..c73f86f 100644
--- a/Documentation/media/uapi/dvb/video-get-pts.rst
+++ b/Documentation/media/uapi/dvb/video-get-pts.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_PTS
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_GET_PTS, __u64 *pts)
+.. c:function:: int ioctl(int fd, VIDEO_GET_PTS, __u64 *pts)
+    :name: VIDEO_GET_PTS
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-get-size.rst b/Documentation/media/uapi/dvb/video-get-size.rst
index c75e3c4..d077fe2 100644
--- a/Documentation/media/uapi/dvb/video-get-size.rst
+++ b/Documentation/media/uapi/dvb/video-get-size.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_SIZE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_GET_SIZE, video_size_t *size)
+.. c:function:: int ioctl(int fd, VIDEO_GET_SIZE, video_size_t *size)
+    :name: VIDEO_GET_SIZE
 
 
 Arguments
@@ -50,6 +52,16 @@
 
 This ioctl returns the size and aspect ratio.
 
+.. c:type:: video_size_t
+
+.. code-block::c
+
+	typedef struct {
+		int w;
+		int h;
+		video_format_t aspect_ratio;
+	} video_size_t;
+
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-get-status.rst b/Documentation/media/uapi/dvb/video-get-status.rst
index ab9c223..ed6ea19 100644
--- a/Documentation/media/uapi/dvb/video-get-status.rst
+++ b/Documentation/media/uapi/dvb/video-get-status.rst
@@ -11,11 +11,13 @@
 
 VIDEO_GET_STATUS
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_GET_STATUS, struct video_status *status)
+.. c:function:: int ioctl(fd, VIDEO_GET_STATUS, struct video_status *status)
+    :name: VIDEO_GET_STATUS
 
 
 Arguments
@@ -51,6 +53,17 @@
 This ioctl call asks the Video Device to return the current status of
 the device.
 
+.. c:type:: video_status
+
+.. code-block:: c
+
+	struct video_status {
+		int                   video_blank;   /* blank video on freeze? */
+		video_play_state_t    play_state;    /* current state of playback */
+		video_stream_source_t stream_source; /* current source (demux/memory) */
+		video_format_t        video_format;  /* current aspect ratio of stream*/
+		video_displayformat_t display_format;/* selected cropping mode */
+	};
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-play.rst b/Documentation/media/uapi/dvb/video-play.rst
index 943c4b7..3f66ae3 100644
--- a/Documentation/media/uapi/dvb/video-play.rst
+++ b/Documentation/media/uapi/dvb/video-play.rst
@@ -11,11 +11,13 @@
 
 VIDEO_PLAY
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_PLAY)
+.. c:function:: int ioctl(fd, VIDEO_PLAY)
+    :name: VIDEO_PLAY
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-select-source.rst b/Documentation/media/uapi/dvb/video-select-source.rst
index 0ee0d03..2f4fbf4 100644
--- a/Documentation/media/uapi/dvb/video-select-source.rst
+++ b/Documentation/media/uapi/dvb/video-select-source.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SELECT_SOURCE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SELECT_SOURCE, video_stream_source_t source)
+.. c:function:: int ioctl(fd, VIDEO_SELECT_SOURCE, video_stream_source_t source)
+    :name: VIDEO_SELECT_SOURCE
 
 
 Arguments
@@ -56,6 +58,16 @@
 the input data. The possible sources are demux or memory. If memory is
 selected, the data is fed to the video device through the write command.
 
+.. c:type:: video_stream_source_t
+
+.. code-block:: c
+
+	typedef enum {
+		VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */
+		VIDEO_SOURCE_MEMORY /* If this source is selected, the stream
+				comes from the user through the write
+				system call */
+	} video_stream_source_t;
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-attributes.rst b/Documentation/media/uapi/dvb/video-set-attributes.rst
index 326c5c8..b2f11a6 100644
--- a/Documentation/media/uapi/dvb/video-set-attributes.rst
+++ b/Documentation/media/uapi/dvb/video-set-attributes.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_ATTRIBUTES
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_ATTRIBUTE ,video_attributes_t vattr)
+.. c:function:: int ioctl(fd, VIDEO_SET_ATTRIBUTE ,video_attributes_t vattr)
+    :name: VIDEO_SET_ATTRIBUTE
 
 
 Arguments
@@ -53,6 +55,22 @@
 information, but the call also tells the hardware to prepare for DVD
 playback.
 
+.. c:type:: video_attributes_t
+
+.. code-block::c
+
+	typedef __u16 video_attributes_t;
+	/*   bits: descr. */
+	/*   15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
+	/*   13-12 TV system (0=525/60, 1=625/50) */
+	/*   11-10 Aspect ratio (0=4:3, 3=16:9) */
+	/*    9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
+	/*    7    line 21-1 data present in GOP (1=yes, 0=no) */
+	/*    6    line 21-2 data present in GOP (1=yes, 0=no) */
+	/*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
+	/*    2    source letterboxed (1=yes, 0=no) */
+	/*    0    film/camera mode (0=camera, 1=film (625/50 only)) */
+
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-blank.rst b/Documentation/media/uapi/dvb/video-set-blank.rst
index 142ea88..3858c69 100644
--- a/Documentation/media/uapi/dvb/video-set-blank.rst
+++ b/Documentation/media/uapi/dvb/video-set-blank.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_BLANK
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_BLANK, boolean mode)
+.. c:function:: int ioctl(fd, VIDEO_SET_BLANK, boolean mode)
+    :name: VIDEO_SET_BLANK
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-set-display-format.rst b/Documentation/media/uapi/dvb/video-set-display-format.rst
index 2061ab0..2ef7401 100644
--- a/Documentation/media/uapi/dvb/video-set-display-format.rst
+++ b/Documentation/media/uapi/dvb/video-set-display-format.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_DISPLAY_FORMAT
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_DISPLAY_FORMAT, video_display_format_t format)
+.. c:function:: int ioctl(fd, VIDEO_SET_DISPLAY_FORMAT)
+    :name: VIDEO_SET_DISPLAY_FORMAT
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-set-format.rst b/Documentation/media/uapi/dvb/video-set-format.rst
index 53d66ec..4239a4e 100644
--- a/Documentation/media/uapi/dvb/video-set-format.rst
+++ b/Documentation/media/uapi/dvb/video-set-format.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_FORMAT
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_FORMAT, video_format_t format)
+.. c:function:: int ioctl(fd, VIDEO_SET_FORMAT, video_format_t format)
+    :name: VIDEO_SET_FORMAT
 
 
 Arguments
@@ -52,6 +54,15 @@
 device (TV) so that the output of the decoder can be adjusted
 accordingly.
 
+.. c:type:: video_format_t
+
+.. code-block:: c
+
+	typedef enum {
+		VIDEO_FORMAT_4_3,     /* Select 4:3 format */
+		VIDEO_FORMAT_16_9,    /* Select 16:9 format. */
+		VIDEO_FORMAT_221_1    /* 2.21:1 */
+	} video_format_t;
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-highlight.rst b/Documentation/media/uapi/dvb/video-set-highlight.rst
index 374f5d8..90aeafd 100644
--- a/Documentation/media/uapi/dvb/video-set-highlight.rst
+++ b/Documentation/media/uapi/dvb/video-set-highlight.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_HIGHLIGHT
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_HIGHLIGHT ,video_highlight_t *vhilite)
+.. c:function:: int ioctl(fd, VIDEO_SET_HIGHLIGHT, struct video_highlight *vhilite)
+    :name: VIDEO_SET_HIGHLIGHT
 
 
 Arguments
@@ -51,6 +53,30 @@
 This ioctl sets the SPU highlight information for the menu access of a
 DVD.
 
+.. c:type:: video_highlight
+
+.. code-block:: c
+
+	typedef
+	struct video_highlight {
+		int     active;      /*    1=show highlight, 0=hide highlight */
+		__u8    contrast1;   /*    7- 4  Pattern pixel contrast */
+				/*    3- 0  Background pixel contrast */
+		__u8    contrast2;   /*    7- 4  Emphasis pixel-2 contrast */
+				/*    3- 0  Emphasis pixel-1 contrast */
+		__u8    color1;      /*    7- 4  Pattern pixel color */
+				/*    3- 0  Background pixel color */
+		__u8    color2;      /*    7- 4  Emphasis pixel-2 color */
+				/*    3- 0  Emphasis pixel-1 color */
+		__u32    ypos;       /*   23-22  auto action mode */
+				/*   21-12  start y */
+				/*    9- 0  end y */
+		__u32    xpos;       /*   23-22  button color number */
+				/*   21-12  start x */
+				/*    9- 0  end x */
+	} video_highlight_t;
+
+
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-id.rst b/Documentation/media/uapi/dvb/video-set-id.rst
index 9c002d5..18f6687 100644
--- a/Documentation/media/uapi/dvb/video-set-id.rst
+++ b/Documentation/media/uapi/dvb/video-set-id.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_ID
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_SET_ID, int id)
+.. c:function:: int ioctl(int fd, VIDEO_SET_ID, int id)
+    :name: VIDEO_SET_ID
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-set-spu-palette.rst b/Documentation/media/uapi/dvb/video-set-spu-palette.rst
index 4b80b6f..51a1913 100644
--- a/Documentation/media/uapi/dvb/video-set-spu-palette.rst
+++ b/Documentation/media/uapi/dvb/video-set-spu-palette.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_SPU_PALETTE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_SPU_PALETTE, video_spu_palette_t *palette )
+.. c:function:: int ioctl(fd, VIDEO_SET_SPU_PALETTE, struct video_spu_palette *palette )
+    :name: VIDEO_SET_SPU_PALETTE
 
 
 Arguments
@@ -50,6 +52,14 @@
 
 This ioctl sets the SPU color palette.
 
+.. c:type:: video_spu_palette
+
+.. code-block::c
+
+	typedef struct video_spu_palette {      /* SPU Palette information */
+		int length;
+		__u8 __user *palette;
+	} video_spu_palette_t;
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-spu.rst b/Documentation/media/uapi/dvb/video-set-spu.rst
index a6f6924..739e5e7 100644
--- a/Documentation/media/uapi/dvb/video-set-spu.rst
+++ b/Documentation/media/uapi/dvb/video-set-spu.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_SPU
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_SPU , video_spu_t *spu)
+.. c:function:: int ioctl(fd, VIDEO_SET_SPU , struct video_spu *spu)
+    :name: VIDEO_SET_SPU
 
 
 Arguments
@@ -52,6 +54,15 @@
 This ioctl activates or deactivates SPU decoding in a DVD input stream.
 It can only be used, if the driver is able to handle a DVD stream.
 
+.. c:type:: struct video_spu
+
+.. code-block:: c
+
+	typedef struct video_spu {
+		int active;
+		int stream_id;
+	} video_spu_t;
+
 
 Return Value
 ------------
diff --git a/Documentation/media/uapi/dvb/video-set-streamtype.rst b/Documentation/media/uapi/dvb/video-set-streamtype.rst
index 75b2e7a..02a3c2e 100644
--- a/Documentation/media/uapi/dvb/video-set-streamtype.rst
+++ b/Documentation/media/uapi/dvb/video-set-streamtype.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_STREAMTYPE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_STREAMTYPE, int type)
+.. c:function:: int ioctl(fd, VIDEO_SET_STREAMTYPE, int type)
+    :name: VIDEO_SET_STREAMTYPE
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-set-system.rst b/Documentation/media/uapi/dvb/video-set-system.rst
index 9ae0df1..e39cbe0 100644
--- a/Documentation/media/uapi/dvb/video-set-system.rst
+++ b/Documentation/media/uapi/dvb/video-set-system.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SET_SYSTEM
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SET_SYSTEM , video_system_t system)
+.. c:function:: int ioctl(fd, VIDEO_SET_SYSTEM , video_system_t system)
+    :name: VIDEO_SET_SYSTEM
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-slowmotion.rst b/Documentation/media/uapi/dvb/video-slowmotion.rst
index 9057128..bd3d1a4 100644
--- a/Documentation/media/uapi/dvb/video-slowmotion.rst
+++ b/Documentation/media/uapi/dvb/video-slowmotion.rst
@@ -11,11 +11,13 @@
 
 VIDEO_SLOWMOTION
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_SLOWMOTION, int nFrames)
+.. c:function:: int ioctl(fd, VIDEO_SLOWMOTION, int nFrames)
+    :name: VIDEO_SLOWMOTION
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-stillpicture.rst b/Documentation/media/uapi/dvb/video-stillpicture.rst
index ed3a2f5..6f943f5 100644
--- a/Documentation/media/uapi/dvb/video-stillpicture.rst
+++ b/Documentation/media/uapi/dvb/video-stillpicture.rst
@@ -11,11 +11,13 @@
 
 VIDEO_STILLPICTURE
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_STILLPICTURE, struct video_still_picture *sp)
+.. c:function:: int ioctl(fd, VIDEO_STILLPICTURE, struct video_still_picture *sp)
+    :name: VIDEO_STILLPICTURE
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-stop.rst b/Documentation/media/uapi/dvb/video-stop.rst
index ad8d59e..fb827ef 100644
--- a/Documentation/media/uapi/dvb/video-stop.rst
+++ b/Documentation/media/uapi/dvb/video-stop.rst
@@ -11,11 +11,13 @@
 
 VIDEO_STOP
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(fd, int request = VIDEO_STOP, boolean mode)
+.. c:function:: int ioctl(fd, VIDEO_STOP, boolean mode)
+    :name: VIDEO_STOP
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video-try-command.rst b/Documentation/media/uapi/dvb/video-try-command.rst
index df96c2d..008e6a9 100644
--- a/Documentation/media/uapi/dvb/video-try-command.rst
+++ b/Documentation/media/uapi/dvb/video-try-command.rst
@@ -11,11 +11,13 @@
 
 VIDEO_TRY_COMMAND
 
+.. attention:: This ioctl is deprecated.
 
 Synopsis
 --------
 
-.. cpp:function:: int ioctl(int fd, int request = VIDEO_TRY_COMMAND, struct video_command *cmd)
+.. c:function:: int ioctl(int fd, VIDEO_TRY_COMMAND, struct video_command *cmd)
+    :name: VIDEO_TRY_COMMAND
 
 
 Arguments
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index 671f365..640a21d 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -95,7 +95,7 @@
     } video_play_state_t;
 
 
-.. _video-command:
+.. c:type:: video_command
 
 struct video_command
 ====================
@@ -146,7 +146,7 @@
     } video_size_t;
 
 
-.. _video-event:
+.. c:type:: video_event
 
 struct video_event
 ==================
@@ -172,7 +172,7 @@
     };
 
 
-.. _video-status:
+.. c:type:: video_status
 
 struct video_status
 ===================
@@ -203,7 +203,7 @@
 device.
 
 
-.. _video-still-picture:
+.. c:type:: video_still_picture
 
 struct video_still_picture
 ==========================
@@ -271,7 +271,7 @@
     } video_system_t;
 
 
-.. _video-highlight:
+.. c:type:: video_highlight
 
 struct video_highlight
 ======================
@@ -302,7 +302,7 @@
      } video_highlight_t;
 
 
-.. _video-spu:
+.. c:type:: video_spu
 
 struct video_spu
 ================
@@ -320,7 +320,7 @@
      } video_spu_t;
 
 
-.. _video-spu-palette:
+.. c:type:: video_spu_palette
 
 struct video_spu_palette
 ========================
@@ -338,7 +338,7 @@
      } video_spu_palette_t;
 
 
-.. _video-navi-pack:
+.. c:type:: video_navi_pack
 
 struct video_navi_pack
 ======================
diff --git a/Documentation/media/uapi/gen-errors.rst b/Documentation/media/uapi/gen-errors.rst
index d6b0cfd..6e983b9 100644
--- a/Documentation/media/uapi/gen-errors.rst
+++ b/Documentation/media/uapi/gen-errors.rst
@@ -9,6 +9,8 @@
 
 .. _gen-errors:
 
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
 .. flat-table:: Generic error codes
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/mediactl/media-func-close.rst b/Documentation/media/uapi/mediactl/media-func-close.rst
index 39ef70a..a8f5203 100644
--- a/Documentation/media/uapi/mediactl/media-func-close.rst
+++ b/Documentation/media/uapi/mediactl/media-func-close.rst
@@ -20,14 +20,14 @@
     #include <unistd.h>
 
 
-.. cpp:function:: int close( int fd )
-
+.. c:function:: int close( int fd )
+    :name: mc-close
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <func-open>`.
+    File descriptor returned by :c:func:`open() <mc-open>`.
 
 
 Description
diff --git a/Documentation/media/uapi/mediactl/media-func-ioctl.rst b/Documentation/media/uapi/mediactl/media-func-ioctl.rst
index 9d1b231..fe072b7 100644
--- a/Documentation/media/uapi/mediactl/media-func-ioctl.rst
+++ b/Documentation/media/uapi/mediactl/media-func-ioctl.rst
@@ -20,14 +20,14 @@
     #include <sys/ioctl.h>
 
 
-.. cpp:function:: int ioctl( int fd, int request, void *argp )
-
+.. c:function:: int ioctl( int fd, int request, void *argp )
+    :name: mc-ioctl
 
 Arguments
 =========
 
 ``fd``
-    File descriptor returned by :ref:`open() <func-open>`.
+    File descriptor returned by :c:func:`open() <mc-open>`.
 
 ``request``
     Media ioctl request code as defined in the media.h header file, for
diff --git a/Documentation/media/uapi/mediactl/media-func-open.rst b/Documentation/media/uapi/mediactl/media-func-open.rst
index 2b2ecd8..32f5301 100644
--- a/Documentation/media/uapi/mediactl/media-func-open.rst
+++ b/Documentation/media/uapi/mediactl/media-func-open.rst
@@ -20,8 +20,8 @@
     #include <fcntl.h>
 
 
-.. cpp:function:: int open( const char *device_name, int flags )
-
+.. c:function:: int open( const char *device_name, int flags )
+    :name: mc-open
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
index 467d82c..f690f9a 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct media_device_info *argp )
+.. c:function:: int ioctl( int fd, MEDIA_IOC_DEVICE_INFO, struct media_device_info *argp )
+    :name: MEDIA_IOC_DEVICE_INFO
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <media-func-open>`.
 
-``request``
-    MEDIA_IOC_DEVICE_INFO
-
 ``argp``
 
 
@@ -35,12 +33,14 @@
 
 All media devices must support the ``MEDIA_IOC_DEVICE_INFO`` ioctl. To
 query device information, applications call the ioctl with a pointer to
-a struct :ref:`media_device_info <media-device-info>`. The driver
+a struct :c:type:`media_device_info`. The driver
 fills the structure and returns the information to the application. The
 ioctl never fails.
 
 
-.. _media-device-info:
+.. c:type:: media_device_info
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct media_device_info
     :header-rows:  0
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
index 12d4b25..0fd3292 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct media_entity_desc *argp )
+.. c:function:: int ioctl( int fd, MEDIA_IOC_ENUM_ENTITIES, struct media_entity_desc *argp )
+    :name: MEDIA_IOC_ENUM_ENTITIES
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <media-func-open>`.
 
-``request``
-    MEDIA_IOC_ENUM_ENTITIES
-
 ``argp``
 
 
@@ -34,7 +32,7 @@
 ===========
 
 To query the attributes of an entity, applications set the id field of a
-struct :ref:`media_entity_desc <media-entity-desc>` structure and
+struct :c:type:`media_entity_desc` structure and
 call the MEDIA_IOC_ENUM_ENTITIES ioctl with a pointer to this
 structure. The driver fills the rest of the structure or returns an
 EINVAL error code when the id is invalid.
@@ -51,7 +49,9 @@
 id's until they get an error.
 
 
-.. _media-entity-desc:
+.. c:type:: media_entity_desc
+
+.. tabularcolumns:: |p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{11.5cm}|
 
 .. flat-table:: struct media_entity_desc
     :header-rows:  0
@@ -195,5 +195,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`media_entity_desc <media-entity-desc>` ``id``
+    The struct :c:type:`media_entity_desc` ``id``
     references a non-existing entity.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
index 87443b1..d05be16 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct media_links_enum *argp )
+.. c:function:: int ioctl( int fd, MEDIA_IOC_ENUM_LINKS, struct media_links_enum *argp )
+    :name: MEDIA_IOC_ENUM_LINKS
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <media-func-open>`.
 
-``request``
-    MEDIA_IOC_ENUM_LINKS
-
 ``argp``
 
 
@@ -34,10 +32,10 @@
 ===========
 
 To enumerate pads and/or links for a given entity, applications set the
-entity field of a struct :ref:`media_links_enum <media-links-enum>`
+entity field of a struct :c:type:`media_links_enum`
 structure and initialize the struct
-:ref:`media_pad_desc <media-pad-desc>` and struct
-:ref:`media_link_desc <media-link-desc>` structure arrays pointed by
+:c:type:`media_pad_desc` and struct
+:c:type:`media_link_desc` structure arrays pointed by
 the ``pads`` and ``links`` fields. They then call the
 MEDIA_IOC_ENUM_LINKS ioctl with a pointer to this structure.
 
@@ -55,7 +53,9 @@
 returned during the enumeration process.
 
 
-.. _media-links-enum:
+.. c:type:: media_links_enum
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct media_links_enum
     :header-rows:  0
@@ -73,7 +73,7 @@
 
     -  .. row 2
 
-       -  struct :ref:`media_pad_desc <media-pad-desc>`
+       -  struct :c:type:`media_pad_desc`
 
        -  \*\ ``pads``
 
@@ -82,7 +82,7 @@
 
     -  .. row 3
 
-       -  struct :ref:`media_link_desc <media-link-desc>`
+       -  struct :c:type:`media_link_desc`
 
        -  \*\ ``links``
 
@@ -91,7 +91,9 @@
 
 
 
-.. _media-pad-desc:
+.. c:type:: media_pad_desc
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct media_pad_desc
     :header-rows:  0
@@ -125,7 +127,9 @@
 
 
 
-.. _media-link-desc:
+.. c:type:: media_link_desc
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct media_link_desc
     :header-rows:  0
@@ -135,7 +139,7 @@
 
     -  .. row 1
 
-       -  struct :ref:`media_pad_desc <media-pad-desc>`
+       -  struct :c:type:`media_pad_desc`
 
        -  ``source``
 
@@ -143,7 +147,7 @@
 
     -  .. row 2
 
-       -  struct :ref:`media_pad_desc <media-pad-desc>`
+       -  struct :c:type:`media_pad_desc`
 
        -  ``sink``
 
@@ -166,5 +170,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`media_links_enum <media-links-enum>` ``id``
+    The struct :c:type:`media_links_enum` ``id``
     references a non-existing entity.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index 2e382cc..48c9531 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct media_v2_topology *argp )
+.. c:function:: int ioctl( int fd, MEDIA_IOC_G_TOPOLOGY, struct media_v2_topology *argp )
+    :name: MEDIA_IOC_G_TOPOLOGY
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <media-func-open>`.
 
-``request``
-    MEDIA_IOC_G_TOPOLOGY
-
 ``argp``
 
 
@@ -35,7 +33,7 @@
 
 The typical usage of this ioctl is to call it twice. On the first call,
 the structure defined at struct
-:ref:`media_v2_topology <media-v2-topology>` should be zeroed. At
+:c:type:`media_v2_topology` should be zeroed. At
 return, if no errors happen, this ioctl will return the
 ``topology_version`` and the total number of entities, interfaces, pads
 and links.
@@ -48,8 +46,9 @@
 If the ``topology_version`` remains the same, the ioctl should fill the
 desired arrays with the media graph elements.
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-topology:
+.. c:type:: media_v2_topology
 
 .. flat-table:: struct media_v2_topology
     :header-rows:  0
@@ -142,8 +141,9 @@
 	  won't store the links. It will just update ``num_links``
 
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-entity:
+.. c:type:: media_v2_entity
 
 .. flat-table:: struct media_v2_entity
     :header-rows:  0
@@ -185,8 +185,9 @@
 	  this array to zero.
 
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-interface:
+.. c:type:: media_v2_interface
 
 .. flat-table:: struct media_v2_interface
     :header-rows:  0
@@ -233,11 +234,12 @@
        -  ``devnode``
 
        -  Used only for device node interfaces. See
-	  :ref:`media-v2-intf-devnode` for details..
+	  :c:type:`media_v2_intf_devnode` for details..
 
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-intf-devnode:
+.. c:type:: media_v2_intf_devnode
 
 .. flat-table:: struct media_v2_interface
     :header-rows:  0
@@ -262,8 +264,9 @@
        -  Device node minor number.
 
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-pad:
+.. c:type:: media_v2_pad
 
 .. flat-table:: struct media_v2_pad
     :header-rows:  0
@@ -305,8 +308,9 @@
 	  this array to zero.
 
 
+.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
 
-.. _media-v2-link:
+.. c:type:: media_v2_link
 
 .. flat-table:: struct media_v2_pad
     :header-rows:  0
diff --git a/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst b/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
index e02fe23..ae51949 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct media_link_desc *argp )
+.. c:function:: int ioctl( int fd, MEDIA_IOC_SETUP_LINK, struct media_link_desc *argp )
+    :name: MEDIA_IOC_SETUP_LINK
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <media-func-open>`.
 
-``request``
-    MEDIA_IOC_SETUP_LINK
-
 ``argp``
 
 
@@ -34,7 +32,7 @@
 ===========
 
 To change link properties applications fill a struct
-:ref:`media_link_desc <media-link-desc>` with link identification
+:c:type:`media_link_desc` with link identification
 information (source and sink pad) and the new requested link flags. They
 then call the MEDIA_IOC_SETUP_LINK ioctl with a pointer to that
 structure.
@@ -63,6 +61,6 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`media_link_desc <media-link-desc>` references a
+    The struct :c:type:`media_link_desc` references a
     non-existing link, or the link is immutable and an attempt to modify
     its configuration was made.
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index c77717b..3e03dc2 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -5,9 +5,12 @@
 Types and flags used to represent the media graph elements
 ==========================================================
 
+..  tabularcolumns:: |p{8.0cm}|p{10.5cm}|
 
 .. _media-entity-type:
 
+.. cssclass:: longtable
+
 .. flat-table:: Media entity types
     :header-rows:  0
     :stub-columns: 0
@@ -15,10 +18,12 @@
 
     -  .. row 1
 
-       ..  _MEDIA-ENT-F-UNKNOWN:
+       .. _MEDIA-ENT-F-UNKNOWN:
        .. _MEDIA-ENT-F-V4L2-SUBDEV-UNKNOWN:
 
-       -  ``MEDIA_ENT_F_UNKNOWN`` and ``MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN``
+       -  ``MEDIA_ENT_F_UNKNOWN`` and
+
+	  ``MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN``
 
        -  Unknown entity. That generally indicates that a driver didn't
 	  initialize properly the entity, with is a Kernel bug
@@ -294,6 +299,8 @@
 	  its source pad.
 
 
+..  tabularcolumns:: |p{5.5cm}|p{12.0cm}|
+
 .. _media-entity-flag:
 
 .. flat-table:: Media entity flags
@@ -319,6 +326,7 @@
        -  The entity represents a data conector
 
 
+..  tabularcolumns:: |p{6.5cm}|p{6.0cm}|p{5.0cm}|
 
 .. _media-intf-type:
 
@@ -405,7 +413,7 @@
 
        -  Device node interface for radio (V4L)
 
-       -  typically, /dev/vbi?
+       -  typically, /dev/radio?
 
     -  .. row 9
 
@@ -429,6 +437,16 @@
 
     -  .. row 11
 
+       ..  _MEDIA-INTF-T-V4L-TOUCH:
+
+       -  ``MEDIA_INTF_T_V4L_TOUCH``
+
+       -  Device node interface for Touch device (V4L)
+
+       -  typically, /dev/v4l-touch?
+
+    -  .. row 12
+
        ..  _MEDIA-INTF-T-ALSA-PCM-CAPTURE:
 
        -  ``MEDIA_INTF_T_ALSA_PCM_CAPTURE``
@@ -437,7 +455,7 @@
 
        -  typically, /dev/snd/pcmC?D?c
 
-    -  .. row 12
+    -  .. row 13
 
        ..  _MEDIA-INTF-T-ALSA-PCM-PLAYBACK:
 
@@ -447,7 +465,7 @@
 
        -  typically, /dev/snd/pcmC?D?p
 
-    -  .. row 13
+    -  .. row 14
 
        ..  _MEDIA-INTF-T-ALSA-CONTROL:
 
@@ -457,7 +475,7 @@
 
        -  typically, /dev/snd/controlC?
 
-    -  .. row 14
+    -  .. row 15
 
        ..  _MEDIA-INTF-T-ALSA-COMPRESS:
 
@@ -467,7 +485,7 @@
 
        -  typically, /dev/snd/compr?
 
-    -  .. row 15
+    -  .. row 16
 
        ..  _MEDIA-INTF-T-ALSA-RAWMIDI:
 
@@ -477,7 +495,7 @@
 
        -  typically, /dev/snd/midi?
 
-    -  .. row 16
+    -  .. row 17
 
        ..  _MEDIA-INTF-T-ALSA-HWDEP:
 
@@ -487,7 +505,7 @@
 
        -  typically, /dev/snd/hwC?D?
 
-    -  .. row 17
+    -  .. row 18
 
        ..  _MEDIA-INTF-T-ALSA-SEQUENCER:
 
@@ -497,7 +515,7 @@
 
        -  typically, /dev/snd/seq
 
-    -  .. row 18
+    -  .. row 19
 
        ..  _MEDIA-INTF-T-ALSA-TIMER:
 
@@ -508,6 +526,7 @@
        -  typically, /dev/snd/timer
 
 
+.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
 
 .. _media-pad-flag:
 
@@ -551,6 +570,7 @@
 One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
 must be set for every pad.
 
+.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
 
 .. _media-link-flag:
 
diff --git a/Documentation/media/uapi/rc/lirc-get-features.rst b/Documentation/media/uapi/rc/lirc-get-features.rst
index e763ebf..79e07b4 100644
--- a/Documentation/media/uapi/rc/lirc-get-features.rst
+++ b/Documentation/media/uapi/rc/lirc-get-features.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *features)
+.. c:function:: int ioctl( int fd, LIRC_GET_FEATURES, __u32 *features)
+    :name: LIRC_GET_FEATURES
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_FEATURES
-
 ``features``
     Bitmask with the LIRC features.
 
diff --git a/Documentation/media/uapi/rc/lirc-get-length.rst b/Documentation/media/uapi/rc/lirc-get-length.rst
index d11c3d3..8c2747c 100644
--- a/Documentation/media/uapi/rc/lirc-get-length.rst
+++ b/Documentation/media/uapi/rc/lirc-get-length.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *length )
+.. c:function:: int ioctl( int fd, LIRC_GET_LENGTH, __u32 *length )
+    :name: LIRC_GET_LENGTH
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_LENGTH
-
 ``length``
     length, in bits
 
diff --git a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
index 586860c..a5023e0 100644
--- a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
@@ -10,12 +10,16 @@
 Name
 ====
 
-LIRC_GET_REC_MODE/LIRC_GET_REC_MODE - Get/set supported receive modes.
+LIRC_GET_REC_MODE/LIRC_SET_REC_MODE - Get/set supported receive modes.
 
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 rx_modes)
+.. c:function:: int ioctl( int fd, LIRC_GET_REC_MODE, __u32 rx_modes)
+	:name: LIRC_GET_REC_MODE
+
+.. c:function:: int ioctl( int fd, LIRC_SET_REC_MODE, __u32 rx_modes)
+	:name: LIRC_SET_REC_MODE
 
 Arguments
 =========
@@ -23,9 +27,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_REC_MODE or LIRC_GET_REC_MODE
-
 ``rx_modes``
     Bitmask with the supported transmit modes.
 
diff --git a/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst b/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
index 6ef1723..6e016ed 100644
--- a/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
+++ b/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *microseconds)
+.. c:function:: int ioctl( int fd, LIRC_GET_REC_RESOLUTION, __u32 *microseconds)
+    :name: LIRC_GET_REC_RESOLUTION
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_REC_RESOLUTION
-
 ``microseconds``
     Resolution, in microseconds.
 
diff --git a/Documentation/media/uapi/rc/lirc-get-send-mode.rst b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
index 3e1d961..51ac134 100644
--- a/Documentation/media/uapi/rc/lirc-get-send-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *tx_modes )
+.. c:function:: int ioctl( int fd, LIRC_GET_SEND_MODE, __u32 *tx_modes )
+    :name: LIRC_GET_SEND_MODE
+
+.. c:function:: int ioctl( int fd, LIRC_SET_SEND_MODE, __u32 *tx_modes )
+    :name: LIRC_SET_SEND_MODE
 
 Arguments
 =========
@@ -23,9 +27,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_SEND_MODE
-
 ``tx_modes``
     Bitmask with the supported transmit modes.
 
diff --git a/Documentation/media/uapi/rc/lirc-get-timeout.rst b/Documentation/media/uapi/rc/lirc-get-timeout.rst
index 6b8238f..c94bc5d 100644
--- a/Documentation/media/uapi/rc/lirc-get-timeout.rst
+++ b/Documentation/media/uapi/rc/lirc-get-timeout.rst
@@ -16,7 +16,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *timeout)
+.. c:function:: int ioctl( int fd, LIRC_GET_MIN_TIMEOUT, __u32 *timeout)
+    :name: LIRC_GET_MIN_TIMEOUT
+
+.. c:function:: int ioctl( int fd, LIRC_GET_MAX_TIMEOUT, __u32 *timeout)
+    :name: LIRC_GET_MAX_TIMEOUT
 
 Arguments
 =========
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_GET_MIN_TIMEOUT or LIRC_GET_MAX_TIMEOUT
-
 ``timeout``
     Timeout, in microseconds.
 
diff --git a/Documentation/media/uapi/rc/lirc-read.rst b/Documentation/media/uapi/rc/lirc-read.rst
index 8d4e9b6..4c678f6 100644
--- a/Documentation/media/uapi/rc/lirc-read.rst
+++ b/Documentation/media/uapi/rc/lirc-read.rst
@@ -20,7 +20,8 @@
     #include <unistd.h>
 
 
-.. cpp:function:: ssize_t read( int fd, void *buf, size_t count )
+.. c:function:: ssize_t read( int fd, void *buf, size_t count )
+    :name: lirc-read
 
 
 Arguments
@@ -30,8 +31,10 @@
     File descriptor returned by ``open()``.
 
 ``buf``
-``count``
+   Buffer to be filled
 
+``count``
+   Max number of bytes to read
 
 Description
 ===========
diff --git a/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst b/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
index e145d9d..6307b57 100644
--- a/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *enable )
+.. c:function:: int ioctl( int fd, LIRC_SET_MEASURE_CARRIER_MODE, __u32 *enable )
+    :name: LIRC_SET_MEASURE_CARRIER_MODE
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_MEASURE_CARRIER_MODE
-
 ``enable``
     enable = 1 means enable measure mode, enable = 0 means disable measure
     mode.
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
index 7cce9c8..a83fbbf 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *frequency )
+.. c:function:: int ioctl( int fd, LIRC_SET_REC_CARRIER_RANGE, __u32 *frequency )
+    :name: LIRC_SET_REC_CARRIER_RANGE
 
 Arguments
 =========
@@ -23,9 +24,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_REC_CARRIER_RANGE
-
 ``frequency``
     Frequency of the carrier that modulates PWM data, in Hz.
 
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst b/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
index 17ddb47..a411c033 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *frequency )
+.. c:function:: int ioctl( int fd, LIRC_SET_REC_CARRIER, __u32 *frequency )
+    :name: LIRC_SET_REC_CARRIER
 
 Arguments
 =========
@@ -23,9 +24,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_REC_CARRIER
-
 ``frequency``
     Frequency of the carrier that modulates PWM data, in Hz.
 
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
index 0c7f85d..9c501bb 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *enable )
+.. c:function:: int ioctl( int fd, LIRC_SET_REC_TIMEOUT_REPORTS, __u32 *enable )
+    :name: LIRC_SET_REC_TIMEOUT_REPORTS
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_REC_TIMEOUT_REPORTS
-
 ``enable``
     enable = 1 means enable timeout report, enable = 0 means disable timeout
     reports.
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst b/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
index ffc88f9..b3e16bb 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *timeout )
+.. c:function:: int ioctl( int fd, LIRC_SET_REC_TIMEOUT, __u32 *timeout )
+    :name: LIRC_SET_REC_TIMEOUT
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_REC_TIMEOUT
-
 ``timeout``
     Timeout, in microseconds.
 
diff --git a/Documentation/media/uapi/rc/lirc-set-send-carrier.rst b/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
index 4314d4c..42c8cfb 100644
--- a/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
+++ b/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *frequency )
+.. c:function:: int ioctl( int fd, LIRC_SET_SEND_CARRIER, __u32 *frequency )
+    :name: LIRC_SET_SEND_CARRIER
 
 Arguments
 =========
@@ -23,9 +24,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_SEND_CARRIER
-
 ``frequency``
     Frequency of the carrier to be modulated, in Hz.
 
diff --git a/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst b/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
index 48e7bb1..20d07c2 100644
--- a/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
+++ b/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *duty_cycle)
+.. c:function:: int ioctl( int fd, LIRC_SET_SEND_DUTY_CYCLE, __u32 *duty_cycle)
+    :name: LIRC_SET_SEND_DUTY_CYCLE
 
 Arguments
 =========
@@ -23,9 +24,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_SEND_DUTY_CYCLE
-
 ``duty_cycle``
     Duty cicle, describing the pulse width in percent (from 1 to 99) of
     the total cycle. Values 0 and 100 are reserved.
diff --git a/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst b/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
index 2b35e21..69b7ad8 100644
--- a/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
+++ b/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *mask )
+.. c:function:: int ioctl( int fd, LIRC_SET_TRANSMITTER_MASK, __u32 *mask )
+    :name: LIRC_SET_TRANSMITTER_MASK
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_TRANSMITTER_MASK
-
 ``mask``
     Mask with channels to enable tx. Channel 0 is the least significant bit.
 
diff --git a/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst b/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
index cffb01f..0415c6a 100644
--- a/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
+++ b/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
@@ -14,7 +14,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, __u32 *enable )
+.. c:function:: int ioctl( int fd, LIRC_SET_WIDEBAND_RECEIVER, __u32 *enable )
+    :name: LIRC_SET_WIDEBAND_RECEIVER
 
 Arguments
 =========
@@ -22,9 +23,6 @@
 ``fd``
     File descriptor returned by open().
 
-``request``
-    LIRC_SET_WIDEBAND_RECEIVER
-
 ``enable``
     enable = 1 means enable wideband receiver, enable = 0 means disable
     wideband receiver.
@@ -42,7 +40,9 @@
 also be more precise. On the other hand its disadvantage it usually
 reduced range of reception.
 
-.. note:: Wide band receiver might be implictly enabled if you enable
+.. note::
+
+    Wide band receiver might be implictly enabled if you enable
     carrier reports. In that case it will be disabled as soon as you disable
     carrier reports. Trying to disable wide band receiver while carrier
     reports are active will do nothing.
diff --git a/Documentation/media/uapi/rc/lirc-write.rst b/Documentation/media/uapi/rc/lirc-write.rst
index dcba3b1..3b035c6 100644
--- a/Documentation/media/uapi/rc/lirc-write.rst
+++ b/Documentation/media/uapi/rc/lirc-write.rst
@@ -20,8 +20,8 @@
     #include <unistd.h>
 
 
-.. cpp:function:: ssize_t write( int fd, void *buf, size_t count )
-
+.. c:function:: ssize_t write( int fd, void *buf, size_t count )
+    :name: lirc-write
 
 Arguments
 =========
@@ -30,8 +30,10 @@
     File descriptor returned by ``open()``.
 
 ``buf``
-``count``
+    Buffer with data to be written
 
+``count``
+    Number of bytes at the buffer
 
 Description
 ===========
diff --git a/Documentation/media/uapi/rc/rc-tables.rst b/Documentation/media/uapi/rc/rc-tables.rst
index 0bb16c4a..c8ae947 100644
--- a/Documentation/media/uapi/rc/rc-tables.rst
+++ b/Documentation/media/uapi/rc/rc-tables.rst
@@ -25,6 +25,8 @@
 
 .. _rc_standard_keymap:
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: IR default keymapping
     :header-rows:  0
     :stub-columns: 0
diff --git a/Documentation/media/uapi/v4l/audio.rst b/Documentation/media/uapi/v4l/audio.rst
index 4dd1134..5ec99a2 100644
--- a/Documentation/media/uapi/v4l/audio.rst
+++ b/Documentation/media/uapi/v4l/audio.rst
@@ -21,15 +21,15 @@
 and two audio inputs exist, there may be up to four valid combinations.
 The relation of video and audio connectors is defined in the
 ``audioset`` field of the respective struct
-:ref:`v4l2_input <v4l2-input>` or struct
-:ref:`v4l2_output <v4l2-output>`, where each bit represents the index
+:c:type:`v4l2_input` or struct
+:c:type:`v4l2_output`, where each bit represents the index
 number, starting at zero, of one audio input or output.
 
 To learn about the number and attributes of the available inputs and
 outputs applications can enumerate them with the
 :ref:`VIDIOC_ENUMAUDIO` and
 :ref:`VIDIOC_ENUMAUDOUT <VIDIOC_ENUMAUDOUT>` ioctl, respectively.
-The struct :ref:`v4l2_audio <v4l2-audio>` returned by the
+The struct :c:type:`v4l2_audio` returned by the
 :ref:`VIDIOC_ENUMAUDIO` ioctl also contains signal
 :status information applicable when the current audio input is queried.
 
@@ -37,7 +37,9 @@
 :ref:`VIDIOC_G_AUDOUT <VIDIOC_G_AUDOUT>` ioctls report the current
 audio input and output, respectively.
 
-.. note:: Note that, unlike :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` and
+.. note::
+
+   Note that, unlike :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` and
    :ref:`VIDIOC_G_OUTPUT <VIDIOC_G_OUTPUT>` these ioctls return a
    structure as :ref:`VIDIOC_ENUMAUDIO` and
    :ref:`VIDIOC_ENUMAUDOUT <VIDIOC_ENUMAUDOUT>` do, not just an index.
@@ -51,7 +53,7 @@
 multiple selectable audio inputs, all audio output ioctls when the
 device has multiple selectable audio outputs. When the device has any
 audio inputs or outputs the driver must set the ``V4L2_CAP_AUDIO`` flag
-in the struct :ref:`v4l2_capability <v4l2-capability>` returned by
+in the struct :c:type:`v4l2_capability` returned by
 the :ref:`VIDIOC_QUERYCAP` ioctl.
 
 
@@ -89,7 +91,7 @@
     }
 
 .. [#f1]
-   Actually struct :ref:`v4l2_audio <v4l2-audio>` ought to have a
-   ``tuner`` field like struct :ref:`v4l2_input <v4l2-input>`, not
+   Actually struct :c:type:`v4l2_audio` ought to have a
+   ``tuner`` field like struct :c:type:`v4l2_input`, not
    only making the API more consistent but also permitting radio devices
    with multiple tuners.
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 5deb4a4..ac58966 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -11,14 +11,14 @@
 planes, while the buffer structure acts as a container for the planes.
 Only pointers to buffers (planes) are exchanged, the data itself is not
 copied. These pointers, together with meta-information like timestamps
-or field parity, are stored in a struct :ref:`struct v4l2_buffer <v4l2-buffer>`,
+or field parity, are stored in a struct :c:type:`v4l2_buffer`,
 argument to the :ref:`VIDIOC_QUERYBUF`,
 :ref:`VIDIOC_QBUF` and
 :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. In the multi-planar API,
-some plane-specific members of struct :ref:`struct v4l2_buffer <v4l2-buffer>`,
+some plane-specific members of struct :c:type:`v4l2_buffer`,
 such as pointers and sizes for each plane, are stored in struct
-:ref:`struct v4l2_plane <v4l2-plane>` instead. In that case, struct
-:ref:`struct v4l2_buffer <v4l2-buffer>` contains an array of plane structures.
+struct :c:type:`v4l2_plane` instead. In that case, struct
+struct :c:type:`v4l2_buffer` contains an array of plane structures.
 
 Dequeued video buffers come with timestamps. The driver decides at which
 part of the frame and with which clock the timestamp is taken. Please
@@ -34,470 +34,302 @@
 buffer.
 
 
-.. _v4l2-buffer:
+.. c:type:: v4l2_buffer
 
 struct v4l2_buffer
 ==================
 
+.. tabularcolumns:: |p{2.8cm}|p{2.5cm}|p{1.3cm}|p{10.5cm}|
+
+.. cssclass:: longtable
+
 .. flat-table:: struct v4l2_buffer
     :header-rows:  0
     :stub-columns: 0
-    :widths:       1 1 1 2
+    :widths:       1 2 1 10
 
+    * - __u32
+      - ``index``
+      -
+      - Number of the buffer, set by the application except when calling
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`, then it is set by the
+	driver. This field can range from zero to the number of buffers
+	allocated with the :ref:`VIDIOC_REQBUFS` ioctl
+	(struct :c:type:`v4l2_requestbuffers`
+	``count``), plus any buffers allocated with
+	:ref:`VIDIOC_CREATE_BUFS` minus one.
+    * - __u32
+      - ``type``
+      -
+      - Type of the buffer, same as struct
+	:c:type:`v4l2_format` ``type`` or struct
+	:c:type:`v4l2_requestbuffers` ``type``, set
+	by the application. See :c:type:`v4l2_buf_type`
+    * - __u32
+      - ``bytesused``
+      -
+      - The number of bytes occupied by the data in the buffer. It depends
+	on the negotiated data format and may change with each buffer for
+	compressed variable size data like JPEG images. Drivers must set
+	this field when ``type`` refers to a capture stream, applications
+	when it refers to an output stream. If the application sets this
+	to 0 for an output stream, then ``bytesused`` will be set to the
+	size of the buffer (see the ``length`` field of this struct) by
+	the driver. For multiplanar formats this field is ignored and the
+	``planes`` pointer is used instead.
+    * - __u32
+      - ``flags``
+      -
+      - Flags set by the application or driver, see :ref:`buffer-flags`.
+    * - __u32
+      - ``field``
+      -
+      - Indicates the field order of the image in the buffer, see
+	:c:type:`v4l2_field`. This field is not used when the buffer
+	contains VBI data. Drivers must set it when ``type`` refers to a
+	capture stream, applications when it refers to an output stream.
+    * - struct timeval
+      - ``timestamp``
+      -
+      - For capture streams this is time when the first data byte was
+	captured, as returned by the :c:func:`clock_gettime()` function
+	for the relevant clock id; see ``V4L2_BUF_FLAG_TIMESTAMP_*`` in
+	:ref:`buffer-flags`. For output streams the driver stores the
+	time at which the last data byte was actually sent out in the
+	``timestamp`` field. This permits applications to monitor the
+	drift between the video and system clock. For output streams that
+	use ``V4L2_BUF_FLAG_TIMESTAMP_COPY`` the application has to fill
+	in the timestamp which will be copied by the driver to the capture
+	stream.
+    * - struct :c:type:`v4l2_timecode`
+      - ``timecode``
+      -
+      - When ``type`` is ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` and the
+	``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this
+	structure contains a frame timecode. In
+	:c:type:`V4L2_FIELD_ALTERNATE <v4l2_field>` mode the top and
+	bottom field contain the same timecode. Timecodes are intended to
+	help video editing and are typically recorded on video tapes, but
+	also embedded in compressed formats like MPEG. This field is
+	independent of the ``timestamp`` and ``sequence`` fields.
+    * - __u32
+      - ``sequence``
+      -
+      - Set by the driver, counting the frames (not fields!) in sequence.
+	This field is set for both input and output devices.
+    * - :cspan:`3`
 
-    -  .. row 1
+	In :c:type:`V4L2_FIELD_ALTERNATE <v4l2_field>` mode the top and
+	bottom field have the same sequence number. The count starts at
+	zero and includes dropped or repeated frames. A dropped frame was
+	received by an input device but could not be stored due to lack of
+	free buffer space. A repeated frame was displayed again by an
+	output device because the application did not pass new data in
+	time.
 
-       -  __u32
+	.. note::
 
-       -  ``index``
+	   This may count the frames received e.g. over USB, without
+	   taking into account the frames dropped by the remote hardware due
+	   to limited compression throughput or bus bandwidth. These devices
+	   identify by not enumerating any video standards, see
+	   :ref:`standard`.
 
-       -
-       -  Number of the buffer, set by the application except when calling
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`, then it is set by the
-	  driver. This field can range from zero to the number of buffers
-	  allocated with the :ref:`VIDIOC_REQBUFS` ioctl
-	  (struct :ref:`v4l2_requestbuffers <v4l2-requestbuffers>`
-	  ``count``), plus any buffers allocated with
-	  :ref:`VIDIOC_CREATE_BUFS` minus one.
+    * - __u32
+      - ``memory``
+      -
+      - This field must be set by applications and/or drivers in
+	accordance with the selected I/O method. See :c:type:`v4l2_memory`
+    * - union
+      - ``m``
+    * -
+      - __u32
+      - ``offset``
+      - For the single-planar API and when ``memory`` is
+	``V4L2_MEMORY_MMAP`` this is the offset of the buffer from the
+	start of the device memory. The value is returned by the driver
+	and apart of serving as parameter to the
+	:ref:`mmap() <func-mmap>` function not useful for applications.
+	See :ref:`mmap` for details
+    * -
+      - unsigned long
+      - ``userptr``
+      - For the single-planar API and when ``memory`` is
+	``V4L2_MEMORY_USERPTR`` this is a pointer to the buffer (casted to
+	unsigned long type) in virtual memory, set by the application. See
+	:ref:`userp` for details.
+    * -
+      - struct v4l2_plane
+      - ``*planes``
+      - When using the multi-planar API, contains a userspace pointer to
+	an array of struct :c:type:`v4l2_plane`. The size of
+	the array should be put in the ``length`` field of this
+	struct :c:type:`v4l2_buffer` structure.
+    * -
+      - int
+      - ``fd``
+      - For the single-plane API and when ``memory`` is
+	``V4L2_MEMORY_DMABUF`` this is the file descriptor associated with
+	a DMABUF buffer.
+    * - __u32
+      - ``length``
+      -
+      - Size of the buffer (not the payload) in bytes for the
+	single-planar API. This is set by the driver based on the calls to
+	:ref:`VIDIOC_REQBUFS` and/or
+	:ref:`VIDIOC_CREATE_BUFS`. For the
+	multi-planar API the application sets this to the number of
+	elements in the ``planes`` array. The driver will fill in the
+	actual number of valid elements in that array.
+    * - __u32
+      - ``reserved2``
+      -
+      - A place holder for future extensions. Drivers and applications
+	must set this to 0.
+    * - __u32
+      - ``reserved``
+      -
+      - A place holder for future extensions. Drivers and applications
+	must set this to 0.
 
-    -  .. row 2
 
-       -  __u32
 
-       -  ``type``
-
-       -
-       -  Type of the buffer, same as struct
-	  :ref:`v4l2_format <v4l2-format>` ``type`` or struct
-	  :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``, set
-	  by the application. See :ref:`v4l2-buf-type`
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``bytesused``
-
-       -
-       -  The number of bytes occupied by the data in the buffer. It depends
-	  on the negotiated data format and may change with each buffer for
-	  compressed variable size data like JPEG images. Drivers must set
-	  this field when ``type`` refers to a capture stream, applications
-	  when it refers to an output stream. If the application sets this
-	  to 0 for an output stream, then ``bytesused`` will be set to the
-	  size of the buffer (see the ``length`` field of this struct) by
-	  the driver. For multiplanar formats this field is ignored and the
-	  ``planes`` pointer is used instead.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``flags``
-
-       -
-       -  Flags set by the application or driver, see :ref:`buffer-flags`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``field``
-
-       -
-       -  Indicates the field order of the image in the buffer, see
-	  :ref:`v4l2-field`. This field is not used when the buffer
-	  contains VBI data. Drivers must set it when ``type`` refers to a
-	  capture stream, applications when it refers to an output stream.
-
-    -  .. row 6
-
-       -  struct timeval
-
-       -  ``timestamp``
-
-       -
-       -  For capture streams this is time when the first data byte was
-	  captured, as returned by the :c:func:`clock_gettime()` function
-	  for the relevant clock id; see ``V4L2_BUF_FLAG_TIMESTAMP_*`` in
-	  :ref:`buffer-flags`. For output streams the driver stores the
-	  time at which the last data byte was actually sent out in the
-	  ``timestamp`` field. This permits applications to monitor the
-	  drift between the video and system clock. For output streams that
-	  use ``V4L2_BUF_FLAG_TIMESTAMP_COPY`` the application has to fill
-	  in the timestamp which will be copied by the driver to the capture
-	  stream.
-
-    -  .. row 7
-
-       -  struct :ref:`v4l2_timecode <v4l2-timecode>`
-
-       -  ``timecode``
-
-       -
-       -  When ``type`` is ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` and the
-	  ``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this
-	  structure contains a frame timecode. In
-	  :ref:`V4L2_FIELD_ALTERNATE <v4l2-field>` mode the top and
-	  bottom field contain the same timecode. Timecodes are intended to
-	  help video editing and are typically recorded on video tapes, but
-	  also embedded in compressed formats like MPEG. This field is
-	  independent of the ``timestamp`` and ``sequence`` fields.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``sequence``
-
-       -
-       -  Set by the driver, counting the frames (not fields!) in sequence.
-	  This field is set for both input and output devices.
-
-    -  .. row 9
-
-       -  :cspan:`3`
-
-	  In :ref:`V4L2_FIELD_ALTERNATE <v4l2-field>` mode the top and
-	  bottom field have the same sequence number. The count starts at
-	  zero and includes dropped or repeated frames. A dropped frame was
-	  received by an input device but could not be stored due to lack of
-	  free buffer space. A repeated frame was displayed again by an
-	  output device because the application did not pass new data in
-	  time.
-
-	  .. note:: This may count the frames received e.g. over USB, without
-	     taking into account the frames dropped by the remote hardware due
-	     to limited compression throughput or bus bandwidth. These devices
-	     identify by not enumerating any video standards, see
-	     :ref:`standard`.
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``memory``
-
-       -
-       -  This field must be set by applications and/or drivers in
-	  accordance with the selected I/O method. See :ref:`v4l2-memory`
-
-    -  .. row 11
-
-       -  union
-
-       -  ``m``
-
-    -  .. row 12
-
-       -
-       -  __u32
-
-       -  ``offset``
-
-       -  For the single-planar API and when ``memory`` is
-	  ``V4L2_MEMORY_MMAP`` this is the offset of the buffer from the
-	  start of the device memory. The value is returned by the driver
-	  and apart of serving as parameter to the
-	  :ref:`mmap() <func-mmap>` function not useful for applications.
-	  See :ref:`mmap` for details
-
-    -  .. row 13
-
-       -
-       -  unsigned long
-
-       -  ``userptr``
-
-       -  For the single-planar API and when ``memory`` is
-	  ``V4L2_MEMORY_USERPTR`` this is a pointer to the buffer (casted to
-	  unsigned long type) in virtual memory, set by the application. See
-	  :ref:`userp` for details.
-
-    -  .. row 14
-
-       -
-       -  struct v4l2_plane
-
-       -  ``*planes``
-
-       -  When using the multi-planar API, contains a userspace pointer to
-	  an array of struct :ref:`v4l2_plane <v4l2-plane>`. The size of
-	  the array should be put in the ``length`` field of this
-	  :ref:`struct v4l2_buffer <v4l2-buffer>` structure.
-
-    -  .. row 15
-
-       -
-       -  int
-
-       -  ``fd``
-
-       -  For the single-plane API and when ``memory`` is
-	  ``V4L2_MEMORY_DMABUF`` this is the file descriptor associated with
-	  a DMABUF buffer.
-
-    -  .. row 16
-
-       -  __u32
-
-       -  ``length``
-
-       -
-       -  Size of the buffer (not the payload) in bytes for the
-	  single-planar API. This is set by the driver based on the calls to
-	  :ref:`VIDIOC_REQBUFS` and/or
-	  :ref:`VIDIOC_CREATE_BUFS`. For the
-	  multi-planar API the application sets this to the number of
-	  elements in the ``planes`` array. The driver will fill in the
-	  actual number of valid elements in that array.
-
-    -  .. row 17
-
-       -  __u32
-
-       -  ``reserved2``
-
-       -
-       -  A place holder for future extensions. Drivers and applications
-	  must set this to 0.
-
-    -  .. row 18
-
-       -  __u32
-
-       -  ``reserved``
-
-       -
-       -  A place holder for future extensions. Drivers and applications
-	  must set this to 0.
-
-
-
-.. _v4l2-plane:
+.. c:type:: v4l2_plane
 
 struct v4l2_plane
 =================
 
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{3.5cm}|p{7.0cm}|
+
+.. cssclass:: longtable
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
+    * - __u32
+      - ``bytesused``
+      -
+      - The number of bytes occupied by data in the plane (its payload).
+	Drivers must set this field when ``type`` refers to a capture
+	stream, applications when it refers to an output stream. If the
+	application sets this to 0 for an output stream, then
+	``bytesused`` will be set to the size of the plane (see the
+	``length`` field of this struct) by the driver.
 
-    -  .. row 1
+	.. note::
 
-       -  __u32
+	   Note that the actual image data starts at ``data_offset``
+	   which may not be 0.
+    * - __u32
+      - ``length``
+      -
+      - Size in bytes of the plane (not its payload). This is set by the
+	driver based on the calls to
+	:ref:`VIDIOC_REQBUFS` and/or
+	:ref:`VIDIOC_CREATE_BUFS`.
+    * - union
+      - ``m``
+      -
+      -
+    * -
+      - __u32
+      - ``mem_offset``
+      - When the memory type in the containing struct
+	:c:type:`v4l2_buffer` is ``V4L2_MEMORY_MMAP``, this
+	is the value that should be passed to :ref:`mmap() <func-mmap>`,
+	similar to the ``offset`` field in struct
+	:c:type:`v4l2_buffer`.
+    * -
+      - unsigned long
+      - ``userptr``
+      - When the memory type in the containing struct
+	:c:type:`v4l2_buffer` is ``V4L2_MEMORY_USERPTR``,
+	this is a userspace pointer to the memory allocated for this plane
+	by an application.
+    * -
+      - int
+      - ``fd``
+      - When the memory type in the containing struct
+	:c:type:`v4l2_buffer` is ``V4L2_MEMORY_DMABUF``,
+	this is a file descriptor associated with a DMABUF buffer, similar
+	to the ``fd`` field in struct :c:type:`v4l2_buffer`.
+    * - __u32
+      - ``data_offset``
+      -
+      - Offset in bytes to video data in the plane. Drivers must set this
+	field when ``type`` refers to a capture stream, applications when
+	it refers to an output stream.
 
-       -  ``bytesused``
+	.. note::
 
-       -
-       -  The number of bytes occupied by data in the plane (its payload).
-	  Drivers must set this field when ``type`` refers to a capture
-	  stream, applications when it refers to an output stream. If the
-	  application sets this to 0 for an output stream, then
-	  ``bytesused`` will be set to the size of the plane (see the
-	  ``length`` field of this struct) by the driver.
-
-	  .. note:: Note that the actual image data starts at ``data_offset``
-	     which may not be 0.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``length``
-
-       -
-       -  Size in bytes of the plane (not its payload). This is set by the
-	  driver based on the calls to
-	  :ref:`VIDIOC_REQBUFS` and/or
-	  :ref:`VIDIOC_CREATE_BUFS`.
-
-    -  .. row 3
-
-       -  union
-
-       -  ``m``
-
-       -
-       -
-
-    -  .. row 4
-
-       -
-       -  __u32
-
-       -  ``mem_offset``
-
-       -  When the memory type in the containing struct
-	  :ref:`v4l2_buffer <v4l2-buffer>` is ``V4L2_MEMORY_MMAP``, this
-	  is the value that should be passed to :ref:`mmap() <func-mmap>`,
-	  similar to the ``offset`` field in struct
-	  :ref:`v4l2_buffer <v4l2-buffer>`.
-
-    -  .. row 5
-
-       -
-       -  unsigned long
-
-       -  ``userptr``
-
-       -  When the memory type in the containing struct
-	  :ref:`v4l2_buffer <v4l2-buffer>` is ``V4L2_MEMORY_USERPTR``,
-	  this is a userspace pointer to the memory allocated for this plane
-	  by an application.
-
-    -  .. row 6
-
-       -
-       -  int
-
-       -  ``fd``
-
-       -  When the memory type in the containing struct
-	  :ref:`v4l2_buffer <v4l2-buffer>` is ``V4L2_MEMORY_DMABUF``,
-	  this is a file descriptor associated with a DMABUF buffer, similar
-	  to the ``fd`` field in struct :ref:`v4l2_buffer <v4l2-buffer>`.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``data_offset``
-
-       -
-       -  Offset in bytes to video data in the plane. Drivers must set this
-	  field when ``type`` refers to a capture stream, applications when
-	  it refers to an output stream.
-
-	  .. note:: That data_offset is included  in ``bytesused``. So the
-	     size of the image in the plane is ``bytesused``-``data_offset``
-	     at offset ``data_offset`` from the start of the plane.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved[11]``
-
-       -
-       -  Reserved for future use. Should be zeroed by drivers and
-	  applications.
+	   That data_offset is included  in ``bytesused``. So the
+	   size of the image in the plane is ``bytesused``-``data_offset``
+	   at offset ``data_offset`` from the start of the plane.
+    * - __u32
+      - ``reserved[11]``
+      -
+      - Reserved for future use. Should be zeroed by drivers and
+	applications.
 
 
 
-.. _v4l2-buf-type:
+.. c:type:: v4l2_buf_type
 
 enum v4l2_buf_type
 ==================
 
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{7.2cm}|p{0.6cm}|p{9.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       3 1 4
+    :widths:       4 1 9
 
-
-    -  .. row 1
-
-       -  ``V4L2_BUF_TYPE_VIDEO_CAPTURE``
-
-       -  1
-
-       -  Buffer of a single-planar video capture stream, see
-	  :ref:`capture`.
-
-    -  .. row 2
-
-       -  ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``
-
-       -  9
-
-       -  Buffer of a multi-planar video capture stream, see
-	  :ref:`capture`.
-
-    -  .. row 3
-
-       -  ``V4L2_BUF_TYPE_VIDEO_OUTPUT``
-
-       -  2
-
-       -  Buffer of a single-planar video output stream, see
-	  :ref:`output`.
-
-    -  .. row 4
-
-       -  ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``
-
-       -  10
-
-       -  Buffer of a multi-planar video output stream, see :ref:`output`.
-
-    -  .. row 5
-
-       -  ``V4L2_BUF_TYPE_VIDEO_OVERLAY``
-
-       -  3
-
-       -  Buffer for video overlay, see :ref:`overlay`.
-
-    -  .. row 6
-
-       -  ``V4L2_BUF_TYPE_VBI_CAPTURE``
-
-       -  4
-
-       -  Buffer of a raw VBI capture stream, see :ref:`raw-vbi`.
-
-    -  .. row 7
-
-       -  ``V4L2_BUF_TYPE_VBI_OUTPUT``
-
-       -  5
-
-       -  Buffer of a raw VBI output stream, see :ref:`raw-vbi`.
-
-    -  .. row 8
-
-       -  ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE``
-
-       -  6
-
-       -  Buffer of a sliced VBI capture stream, see :ref:`sliced`.
-
-    -  .. row 9
-
-       -  ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``
-
-       -  7
-
-       -  Buffer of a sliced VBI output stream, see :ref:`sliced`.
-
-    -  .. row 10
-
-       -  ``V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY``
-
-       -  8
-
-       -  Buffer for video output overlay (OSD), see :ref:`osd`.
-
-    -  .. row 11
-
-       -  ``V4L2_BUF_TYPE_SDR_CAPTURE``
-
-       -  11
-
-       -  Buffer for Software Defined Radio (SDR) capture stream, see
-	  :ref:`sdr`.
-
-    -  .. row 12
-
-       -  ``V4L2_BUF_TYPE_SDR_OUTPUT``
-
-       -  12
-
-       -  Buffer for Software Defined Radio (SDR) output stream, see
-	  :ref:`sdr`.
+    * - ``V4L2_BUF_TYPE_VIDEO_CAPTURE``
+      - 1
+      - Buffer of a single-planar video capture stream, see
+	:ref:`capture`.
+    * - ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``
+      - 9
+      - Buffer of a multi-planar video capture stream, see
+	:ref:`capture`.
+    * - ``V4L2_BUF_TYPE_VIDEO_OUTPUT``
+      - 2
+      - Buffer of a single-planar video output stream, see
+	:ref:`output`.
+    * - ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``
+      - 10
+      - Buffer of a multi-planar video output stream, see :ref:`output`.
+    * - ``V4L2_BUF_TYPE_VIDEO_OVERLAY``
+      - 3
+      - Buffer for video overlay, see :ref:`overlay`.
+    * - ``V4L2_BUF_TYPE_VBI_CAPTURE``
+      - 4
+      - Buffer of a raw VBI capture stream, see :ref:`raw-vbi`.
+    * - ``V4L2_BUF_TYPE_VBI_OUTPUT``
+      - 5
+      - Buffer of a raw VBI output stream, see :ref:`raw-vbi`.
+    * - ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE``
+      - 6
+      - Buffer of a sliced VBI capture stream, see :ref:`sliced`.
+    * - ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``
+      - 7
+      - Buffer of a sliced VBI output stream, see :ref:`sliced`.
+    * - ``V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY``
+      - 8
+      - Buffer for video output overlay (OSD), see :ref:`osd`.
+    * - ``V4L2_BUF_TYPE_SDR_CAPTURE``
+      - 11
+      - Buffer for Software Defined Radio (SDR) capture stream, see
+	:ref:`sdr`.
+    * - ``V4L2_BUF_TYPE_SDR_OUTPUT``
+      - 12
+      - Buffer for Software Defined Radio (SDR) output stream, see
+	:ref:`sdr`.
 
 
 
@@ -506,371 +338,267 @@
 Buffer Flags
 ============
 
+.. tabularcolumns:: |p{7.0cm}|p{2.2cm}|p{8.3cm}|
+
+.. cssclass:: longtable
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
+    * .. _`V4L2-BUF-FLAG-MAPPED`:
 
-    -  .. _`V4L2-BUF-FLAG-MAPPED`:
+      - ``V4L2_BUF_FLAG_MAPPED``
+      - 0x00000001
+      - The buffer resides in device memory and has been mapped into the
+	application's address space, see :ref:`mmap` for details.
+	Drivers set or clear this flag when the
+	:ref:`VIDIOC_QUERYBUF`,
+	:ref:`VIDIOC_QBUF` or
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called. Set by the
+	driver.
+    * .. _`V4L2-BUF-FLAG-QUEUED`:
 
-       -  ``V4L2_BUF_FLAG_MAPPED``
+      - ``V4L2_BUF_FLAG_QUEUED``
+      - 0x00000002
+      - Internally drivers maintain two buffer queues, an incoming and
+	outgoing queue. When this flag is set, the buffer is currently on
+	the incoming queue. It automatically moves to the outgoing queue
+	after the buffer has been filled (capture devices) or displayed
+	(output devices). Drivers set or clear this flag when the
+	``VIDIOC_QUERYBUF`` ioctl is called. After (successful) calling
+	the ``VIDIOC_QBUF``\ ioctl it is always set and after
+	``VIDIOC_DQBUF`` always cleared.
+    * .. _`V4L2-BUF-FLAG-DONE`:
 
-       -  0x00000001
+      - ``V4L2_BUF_FLAG_DONE``
+      - 0x00000004
+      - When this flag is set, the buffer is currently on the outgoing
+	queue, ready to be dequeued from the driver. Drivers set or clear
+	this flag when the ``VIDIOC_QUERYBUF`` ioctl is called. After
+	calling the ``VIDIOC_QBUF`` or ``VIDIOC_DQBUF`` it is always
+	cleared. Of course a buffer cannot be on both queues at the same
+	time, the ``V4L2_BUF_FLAG_QUEUED`` and ``V4L2_BUF_FLAG_DONE`` flag
+	are mutually exclusive. They can be both cleared however, then the
+	buffer is in "dequeued" state, in the application domain so to
+	say.
+    * .. _`V4L2-BUF-FLAG-ERROR`:
 
-       -  The buffer resides in device memory and has been mapped into the
-	  application's address space, see :ref:`mmap` for details.
-	  Drivers set or clear this flag when the
-	  :ref:`VIDIOC_QUERYBUF`,
-	  :ref:`VIDIOC_QBUF` or
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called. Set by the
-	  driver.
+      - ``V4L2_BUF_FLAG_ERROR``
+      - 0x00000040
+      - When this flag is set, the buffer has been dequeued successfully,
+	although the data might have been corrupted. This is recoverable,
+	streaming may continue as normal and the buffer may be reused
+	normally. Drivers set this flag when the ``VIDIOC_DQBUF`` ioctl is
+	called.
+    * .. _`V4L2-BUF-FLAG-KEYFRAME`:
 
-    -  .. _`V4L2-BUF-FLAG-QUEUED`:
+      - ``V4L2_BUF_FLAG_KEYFRAME``
+      - 0x00000008
+      - Drivers set or clear this flag when calling the ``VIDIOC_DQBUF``
+	ioctl. It may be set by video capture devices when the buffer
+	contains a compressed image which is a key frame (or field), i. e.
+	can be decompressed on its own. Also known as an I-frame.
+	Applications can set this bit when ``type`` refers to an output
+	stream.
+    * .. _`V4L2-BUF-FLAG-PFRAME`:
 
-       -  ``V4L2_BUF_FLAG_QUEUED``
+      - ``V4L2_BUF_FLAG_PFRAME``
+      - 0x00000010
+      - Similar to ``V4L2_BUF_FLAG_KEYFRAME`` this flags predicted frames
+	or fields which contain only differences to a previous key frame.
+	Applications can set this bit when ``type`` refers to an output
+	stream.
+    * .. _`V4L2-BUF-FLAG-BFRAME`:
 
-       -  0x00000002
+      - ``V4L2_BUF_FLAG_BFRAME``
+      - 0x00000020
+      - Similar to ``V4L2_BUF_FLAG_KEYFRAME`` this flags a bi-directional
+	predicted frame or field which contains only the differences
+	between the current frame and both the preceding and following key
+	frames to specify its content. Applications can set this bit when
+	``type`` refers to an output stream.
+    * .. _`V4L2-BUF-FLAG-TIMECODE`:
 
-       -  Internally drivers maintain two buffer queues, an incoming and
-	  outgoing queue. When this flag is set, the buffer is currently on
-	  the incoming queue. It automatically moves to the outgoing queue
-	  after the buffer has been filled (capture devices) or displayed
-	  (output devices). Drivers set or clear this flag when the
-	  ``VIDIOC_QUERYBUF`` ioctl is called. After (successful) calling
-	  the ``VIDIOC_QBUF``\ ioctl it is always set and after
-	  ``VIDIOC_DQBUF`` always cleared.
+      - ``V4L2_BUF_FLAG_TIMECODE``
+      - 0x00000100
+      - The ``timecode`` field is valid. Drivers set or clear this flag
+	when the ``VIDIOC_DQBUF`` ioctl is called. Applications can set
+	this bit and the corresponding ``timecode`` structure when
+	``type`` refers to an output stream.
+    * .. _`V4L2-BUF-FLAG-PREPARED`:
 
-    -  .. _`V4L2-BUF-FLAG-DONE`:
+      - ``V4L2_BUF_FLAG_PREPARED``
+      - 0x00000400
+      - The buffer has been prepared for I/O and can be queued by the
+	application. Drivers set or clear this flag when the
+	:ref:`VIDIOC_QUERYBUF`,
+	:ref:`VIDIOC_PREPARE_BUF <VIDIOC_QBUF>`,
+	:ref:`VIDIOC_QBUF` or
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called.
+    * .. _`V4L2-BUF-FLAG-NO-CACHE-INVALIDATE`:
 
-       -  ``V4L2_BUF_FLAG_DONE``
+      - ``V4L2_BUF_FLAG_NO_CACHE_INVALIDATE``
+      - 0x00000800
+      - Caches do not have to be invalidated for this buffer. Typically
+	applications shall use this flag if the data captured in the
+	buffer is not going to be touched by the CPU, instead the buffer
+	will, probably, be passed on to a DMA-capable hardware unit for
+	further processing or output.
+    * .. _`V4L2-BUF-FLAG-NO-CACHE-CLEAN`:
 
-       -  0x00000004
+      - ``V4L2_BUF_FLAG_NO_CACHE_CLEAN``
+      - 0x00001000
+      - Caches do not have to be cleaned for this buffer. Typically
+	applications shall use this flag for output buffers if the data in
+	this buffer has not been created by the CPU but by some
+	DMA-capable unit, in which case caches have not been used.
+    * .. _`V4L2-BUF-FLAG-LAST`:
 
-       -  When this flag is set, the buffer is currently on the outgoing
-	  queue, ready to be dequeued from the driver. Drivers set or clear
-	  this flag when the ``VIDIOC_QUERYBUF`` ioctl is called. After
-	  calling the ``VIDIOC_QBUF`` or ``VIDIOC_DQBUF`` it is always
-	  cleared. Of course a buffer cannot be on both queues at the same
-	  time, the ``V4L2_BUF_FLAG_QUEUED`` and ``V4L2_BUF_FLAG_DONE`` flag
-	  are mutually exclusive. They can be both cleared however, then the
-	  buffer is in "dequeued" state, in the application domain so to
-	  say.
+      - ``V4L2_BUF_FLAG_LAST``
+      - 0x00100000
+      - Last buffer produced by the hardware. mem2mem codec drivers set
+	this flag on the capture queue for the last buffer when the
+	:ref:`VIDIOC_QUERYBUF` or
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called. Due to
+	hardware limitations, the last buffer may be empty. In this case
+	the driver will set the ``bytesused`` field to 0, regardless of
+	the format. Any Any subsequent call to the
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
+	but return an ``EPIPE`` error code.
+    * .. _`V4L2-BUF-FLAG-TIMESTAMP-MASK`:
 
-    -  .. _`V4L2-BUF-FLAG-ERROR`:
+      - ``V4L2_BUF_FLAG_TIMESTAMP_MASK``
+      - 0x0000e000
+      - Mask for timestamp types below. To test the timestamp type, mask
+	out bits not belonging to timestamp type by performing a logical
+	and operation with buffer flags and timestamp mask.
+    * .. _`V4L2-BUF-FLAG-TIMESTAMP-UNKNOWN`:
 
-       -  ``V4L2_BUF_FLAG_ERROR``
+      - ``V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN``
+      - 0x00000000
+      - Unknown timestamp type. This type is used by drivers before Linux
+	3.9 and may be either monotonic (see below) or realtime (wall
+	clock). Monotonic clock has been favoured in embedded systems
+	whereas most of the drivers use the realtime clock. Either kinds
+	of timestamps are available in user space via
+	:c:func:`clock_gettime` using clock IDs ``CLOCK_MONOTONIC``
+	and ``CLOCK_REALTIME``, respectively.
+    * .. _`V4L2-BUF-FLAG-TIMESTAMP-MONOTONIC`:
 
-       -  0x00000040
+      - ``V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC``
+      - 0x00002000
+      - The buffer timestamp has been taken from the ``CLOCK_MONOTONIC``
+	clock. To access the same clock outside V4L2, use
+	:c:func:`clock_gettime`.
+    * .. _`V4L2-BUF-FLAG-TIMESTAMP-COPY`:
 
-       -  When this flag is set, the buffer has been dequeued successfully,
-	  although the data might have been corrupted. This is recoverable,
-	  streaming may continue as normal and the buffer may be reused
-	  normally. Drivers set this flag when the ``VIDIOC_DQBUF`` ioctl is
-	  called.
+      - ``V4L2_BUF_FLAG_TIMESTAMP_COPY``
+      - 0x00004000
+      - The CAPTURE buffer timestamp has been taken from the corresponding
+	OUTPUT buffer. This flag applies only to mem2mem devices.
+    * .. _`V4L2-BUF-FLAG-TSTAMP-SRC-MASK`:
 
-    -  .. _`V4L2-BUF-FLAG-KEYFRAME`:
+      - ``V4L2_BUF_FLAG_TSTAMP_SRC_MASK``
+      - 0x00070000
+      - Mask for timestamp sources below. The timestamp source defines the
+	point of time the timestamp is taken in relation to the frame.
+	Logical 'and' operation between the ``flags`` field and
+	``V4L2_BUF_FLAG_TSTAMP_SRC_MASK`` produces the value of the
+	timestamp source. Applications must set the timestamp source when
+	``type`` refers to an output stream and
+	``V4L2_BUF_FLAG_TIMESTAMP_COPY`` is set.
+    * .. _`V4L2-BUF-FLAG-TSTAMP-SRC-EOF`:
 
-       -  ``V4L2_BUF_FLAG_KEYFRAME``
-
-       -  0x00000008
-
-       -  Drivers set or clear this flag when calling the ``VIDIOC_DQBUF``
-	  ioctl. It may be set by video capture devices when the buffer
-	  contains a compressed image which is a key frame (or field), i. e.
-	  can be decompressed on its own. Also known as an I-frame.
-	  Applications can set this bit when ``type`` refers to an output
-	  stream.
-
-    -  .. _`V4L2-BUF-FLAG-PFRAME`:
-
-       -  ``V4L2_BUF_FLAG_PFRAME``
-
-       -  0x00000010
-
-       -  Similar to ``V4L2_BUF_FLAG_KEYFRAME`` this flags predicted frames
-	  or fields which contain only differences to a previous key frame.
-	  Applications can set this bit when ``type`` refers to an output
-	  stream.
-
-    -  .. _`V4L2-BUF-FLAG-BFRAME`:
-
-       -  ``V4L2_BUF_FLAG_BFRAME``
-
-       -  0x00000020
-
-       -  Similar to ``V4L2_BUF_FLAG_KEYFRAME`` this flags a bi-directional
-	  predicted frame or field which contains only the differences
-	  between the current frame and both the preceding and following key
-	  frames to specify its content. Applications can set this bit when
-	  ``type`` refers to an output stream.
-
-    -  .. _`V4L2-BUF-FLAG-TIMECODE`:
-
-       -  ``V4L2_BUF_FLAG_TIMECODE``
-
-       -  0x00000100
-
-       -  The ``timecode`` field is valid. Drivers set or clear this flag
-	  when the ``VIDIOC_DQBUF`` ioctl is called. Applications can set
-	  this bit and the corresponding ``timecode`` structure when
-	  ``type`` refers to an output stream.
-
-    -  .. _`V4L2-BUF-FLAG-PREPARED`:
-
-       -  ``V4L2_BUF_FLAG_PREPARED``
-
-       -  0x00000400
-
-       -  The buffer has been prepared for I/O and can be queued by the
-	  application. Drivers set or clear this flag when the
-	  :ref:`VIDIOC_QUERYBUF`,
-	  :ref:`VIDIOC_PREPARE_BUF <VIDIOC_QBUF>`,
-	  :ref:`VIDIOC_QBUF` or
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called.
-
-    -  .. _`V4L2-BUF-FLAG-NO-CACHE-INVALIDATE`:
-
-       -  ``V4L2_BUF_FLAG_NO_CACHE_INVALIDATE``
-
-       -  0x00000800
-
-       -  Caches do not have to be invalidated for this buffer. Typically
-	  applications shall use this flag if the data captured in the
-	  buffer is not going to be touched by the CPU, instead the buffer
-	  will, probably, be passed on to a DMA-capable hardware unit for
-	  further processing or output.
-
-    -  .. _`V4L2-BUF-FLAG-NO-CACHE-CLEAN`:
-
-       -  ``V4L2_BUF_FLAG_NO_CACHE_CLEAN``
-
-       -  0x00001000
-
-       -  Caches do not have to be cleaned for this buffer. Typically
-	  applications shall use this flag for output buffers if the data in
-	  this buffer has not been created by the CPU but by some
-	  DMA-capable unit, in which case caches have not been used.
-
-    -  .. _`V4L2-BUF-FLAG-LAST`:
-
-       -  ``V4L2_BUF_FLAG_LAST``
-
-       -  0x00100000
-
-       -  Last buffer produced by the hardware. mem2mem codec drivers set
-	  this flag on the capture queue for the last buffer when the
-	  :ref:`VIDIOC_QUERYBUF` or
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called. Due to
-	  hardware limitations, the last buffer may be empty. In this case
-	  the driver will set the ``bytesused`` field to 0, regardless of
-	  the format. Any Any subsequent call to the
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
-	  but return an ``EPIPE`` error code.
-
-    -  .. _`V4L2-BUF-FLAG-TIMESTAMP-MASK`:
-
-       -  ``V4L2_BUF_FLAG_TIMESTAMP_MASK``
-
-       -  0x0000e000
-
-       -  Mask for timestamp types below. To test the timestamp type, mask
-	  out bits not belonging to timestamp type by performing a logical
-	  and operation with buffer flags and timestamp mask.
-
-    -  .. _`V4L2-BUF-FLAG-TIMESTAMP-UNKNOWN`:
-
-       -  ``V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN``
-
-       -  0x00000000
-
-       -  Unknown timestamp type. This type is used by drivers before Linux
-	  3.9 and may be either monotonic (see below) or realtime (wall
-	  clock). Monotonic clock has been favoured in embedded systems
-	  whereas most of the drivers use the realtime clock. Either kinds
-	  of timestamps are available in user space via
-	  :c:func:`clock_gettime(2)` using clock IDs ``CLOCK_MONOTONIC``
-	  and ``CLOCK_REALTIME``, respectively.
-
-    -  .. _`V4L2-BUF-FLAG-TIMESTAMP-MONOTONIC`:
-
-       -  ``V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC``
-
-       -  0x00002000
-
-       -  The buffer timestamp has been taken from the ``CLOCK_MONOTONIC``
-	  clock. To access the same clock outside V4L2, use
-	  :c:func:`clock_gettime(2)`.
-
-    -  .. _`V4L2-BUF-FLAG-TIMESTAMP-COPY`:
-
-       -  ``V4L2_BUF_FLAG_TIMESTAMP_COPY``
-
-       -  0x00004000
-
-       -  The CAPTURE buffer timestamp has been taken from the corresponding
-	  OUTPUT buffer. This flag applies only to mem2mem devices.
-
-    -  .. _`V4L2-BUF-FLAG-TSTAMP-SRC-MASK`:
-
-       -  ``V4L2_BUF_FLAG_TSTAMP_SRC_MASK``
-
-       -  0x00070000
-
-       -  Mask for timestamp sources below. The timestamp source defines the
-	  point of time the timestamp is taken in relation to the frame.
-	  Logical 'and' operation between the ``flags`` field and
-	  ``V4L2_BUF_FLAG_TSTAMP_SRC_MASK`` produces the value of the
-	  timestamp source. Applications must set the timestamp source when
-	  ``type`` refers to an output stream and
-	  ``V4L2_BUF_FLAG_TIMESTAMP_COPY`` is set.
-
-    -  .. _`V4L2-BUF-FLAG-TSTAMP-SRC-EOF`:
-
-       -  ``V4L2_BUF_FLAG_TSTAMP_SRC_EOF``
-
-       -  0x00000000
-
-       -  End Of Frame. The buffer timestamp has been taken when the last
-	  pixel of the frame has been received or the last pixel of the
-	  frame has been transmitted. In practice, software generated
-	  timestamps will typically be read from the clock a small amount of
-	  time after the last pixel has been received or transmitten,
-	  depending on the system and other activity in it.
-
-    -  .. _`V4L2-BUF-FLAG-TSTAMP-SRC-SOE`:
-
-       -  ``V4L2_BUF_FLAG_TSTAMP_SRC_SOE``
-
-       -  0x00010000
+      - ``V4L2_BUF_FLAG_TSTAMP_SRC_EOF``
+      - 0x00000000
+      - End Of Frame. The buffer timestamp has been taken when the last
+	pixel of the frame has been received or the last pixel of the
+	frame has been transmitted. In practice, software generated
+	timestamps will typically be read from the clock a small amount of
+	time after the last pixel has been received or transmitten,
+	depending on the system and other activity in it.
+    * .. _`V4L2-BUF-FLAG-TSTAMP-SRC-SOE`:
 
-       -  Start Of Exposure. The buffer timestamp has been taken when the
-	  exposure of the frame has begun. This is only valid for the
-	  ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` buffer type.
+      - ``V4L2_BUF_FLAG_TSTAMP_SRC_SOE``
+      - 0x00010000
+      - Start Of Exposure. The buffer timestamp has been taken when the
+	exposure of the frame has begun. This is only valid for the
+	``V4L2_BUF_TYPE_VIDEO_CAPTURE`` buffer type.
 
 
 
-.. _v4l2-memory:
+.. c:type:: v4l2_memory
 
 enum v4l2_memory
 ================
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_MEMORY_MMAP``
-
-       -  1
-
-       -  The buffer is used for :ref:`memory mapping <mmap>` I/O.
-
-    -  .. row 2
-
-       -  ``V4L2_MEMORY_USERPTR``
-
-       -  2
-
-       -  The buffer is used for :ref:`user pointer <userp>` I/O.
-
-    -  .. row 3
-
-       -  ``V4L2_MEMORY_OVERLAY``
-
-       -  3
-
-       -  [to do]
-
-    -  .. row 4
-
-       -  ``V4L2_MEMORY_DMABUF``
-
-       -  4
-
-       -  The buffer is used for :ref:`DMA shared buffer <dmabuf>` I/O.
+    * - ``V4L2_MEMORY_MMAP``
+      - 1
+      - The buffer is used for :ref:`memory mapping <mmap>` I/O.
+    * - ``V4L2_MEMORY_USERPTR``
+      - 2
+      - The buffer is used for :ref:`user pointer <userp>` I/O.
+    * - ``V4L2_MEMORY_OVERLAY``
+      - 3
+      - [to do]
+    * - ``V4L2_MEMORY_DMABUF``
+      - 4
+      - The buffer is used for :ref:`DMA shared buffer <dmabuf>` I/O.
 
 
 
 Timecodes
 =========
 
-The :ref:`struct v4l2_timecode <v4l2-timecode>` structure is designed to hold a
+The struct :c:type:`v4l2_timecode` structure is designed to hold a
 :ref:`smpte12m` or similar timecode. (struct
-:c:type:`struct timeval` timestamps are stored in struct
-:ref:`v4l2_buffer <v4l2-buffer>` field ``timestamp``.)
+struct :c:type:`timeval` timestamps are stored in struct
+:c:type:`v4l2_buffer` field ``timestamp``.)
 
 
-.. _v4l2-timecode:
+.. c:type:: v4l2_timecode
 
 struct v4l2_timecode
 --------------------
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  Frame rate the timecodes are based on, see :ref:`timecode-type`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Timecode flags, see :ref:`timecode-flags`.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``frames``
-
-       -  Frame count, 0 ... 23/24/29/49/59, depending on the type of
-	  timecode.
-
-    -  .. row 4
-
-       -  __u8
-
-       -  ``seconds``
-
-       -  Seconds count, 0 ... 59. This is a binary, not BCD number.
-
-    -  .. row 5
-
-       -  __u8
-
-       -  ``minutes``
-
-       -  Minutes count, 0 ... 59. This is a binary, not BCD number.
-
-    -  .. row 6
-
-       -  __u8
-
-       -  ``hours``
-
-       -  Hours count, 0 ... 29. This is a binary, not BCD number.
-
-    -  .. row 7
-
-       -  __u8
-
-       -  ``userbits``\ [4]
-
-       -  The "user group" bits from the timecode.
+    * - __u32
+      - ``type``
+      - Frame rate the timecodes are based on, see :ref:`timecode-type`.
+    * - __u32
+      - ``flags``
+      - Timecode flags, see :ref:`timecode-flags`.
+    * - __u8
+      - ``frames``
+      - Frame count, 0 ... 23/24/29/49/59, depending on the type of
+	timecode.
+    * - __u8
+      - ``seconds``
+      - Seconds count, 0 ... 59. This is a binary, not BCD number.
+    * - __u8
+      - ``minutes``
+      - Minutes count, 0 ... 59. This is a binary, not BCD number.
+    * - __u8
+      - ``hours``
+      - Hours count, 0 ... 29. This is a binary, not BCD number.
+    * - __u8
+      - ``userbits``\ [4]
+      - The "user group" bits from the timecode.
 
 
 
@@ -879,51 +607,28 @@
 Timecode Types
 --------------
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_TC_TYPE_24FPS``
-
-       -  1
-
-       -  24 frames per second, i. e. film.
-
-    -  .. row 2
-
-       -  ``V4L2_TC_TYPE_25FPS``
-
-       -  2
-
-       -  25 frames per second, i. e. PAL or SECAM video.
-
-    -  .. row 3
-
-       -  ``V4L2_TC_TYPE_30FPS``
-
-       -  3
-
-       -  30 frames per second, i. e. NTSC video.
-
-    -  .. row 4
-
-       -  ``V4L2_TC_TYPE_50FPS``
-
-       -  4
-
-       -
-
-    -  .. row 5
-
-       -  ``V4L2_TC_TYPE_60FPS``
-
-       -  5
-
-       -
+    * - ``V4L2_TC_TYPE_24FPS``
+      - 1
+      - 24 frames per second, i. e. film.
+    * - ``V4L2_TC_TYPE_25FPS``
+      - 2
+      - 25 frames per second, i. e. PAL or SECAM video.
+    * - ``V4L2_TC_TYPE_30FPS``
+      - 3
+      - 30 frames per second, i. e. NTSC video.
+    * - ``V4L2_TC_TYPE_50FPS``
+      - 4
+      -
+    * - ``V4L2_TC_TYPE_60FPS``
+      - 5
+      -
 
 
 
@@ -932,51 +637,28 @@
 Timecode Flags
 --------------
 
+.. tabularcolumns:: |p{6.6cm}|p{1.4cm}|p{9.5cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_TC_FLAG_DROPFRAME``
-
-       -  0x0001
-
-       -  Indicates "drop frame" semantics for counting frames in 29.97 fps
-	  material. When set, frame numbers 0 and 1 at the start of each
-	  minute, except minutes 0, 10, 20, 30, 40, 50 are omitted from the
-	  count.
-
-    -  .. row 2
-
-       -  ``V4L2_TC_FLAG_COLORFRAME``
-
-       -  0x0002
-
-       -  The "color frame" flag.
-
-    -  .. row 3
-
-       -  ``V4L2_TC_USERBITS_field``
-
-       -  0x000C
-
-       -  Field mask for the "binary group flags".
-
-    -  .. row 4
-
-       -  ``V4L2_TC_USERBITS_USERDEFINED``
-
-       -  0x0000
-
-       -  Unspecified format.
-
-    -  .. row 5
-
-       -  ``V4L2_TC_USERBITS_8BITCHARS``
-
-       -  0x0008
-
-       -  8-bit ISO characters.
+    * - ``V4L2_TC_FLAG_DROPFRAME``
+      - 0x0001
+      - Indicates "drop frame" semantics for counting frames in 29.97 fps
+	material. When set, frame numbers 0 and 1 at the start of each
+	minute, except minutes 0, 10, 20, 30, 40, 50 are omitted from the
+	count.
+    * - ``V4L2_TC_FLAG_COLORFRAME``
+      - 0x0002
+      - The "color frame" flag.
+    * - ``V4L2_TC_USERBITS_field``
+      - 0x000C
+      - Field mask for the "binary group flags".
+    * - ``V4L2_TC_USERBITS_USERDEFINED``
+      - 0x0000
+      - Unspecified format.
+    * - ``V4L2_TC_USERBITS_8BITCHARS``
+      - 0x0008
+      - 8-bit ISO characters.
diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst
index 10ab53d..d3f1450 100644
--- a/Documentation/media/uapi/v4l/control.rst
+++ b/Documentation/media/uapi/v4l/control.rst
@@ -191,109 +191,48 @@
 
 
 
+.. tabularcolumns:: |p{5.5cm}|p{12cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
+    :widths: 11 24
 
-
-    -  .. row 1
-
-       -  ``V4L2_COLORFX_NONE``
-
-       -  Color effect is disabled.
-
-    -  .. row 2
-
-       -  ``V4L2_COLORFX_ANTIQUE``
-
-       -  An aging (old photo) effect.
-
-    -  .. row 3
-
-       -  ``V4L2_COLORFX_ART_FREEZE``
-
-       -  Frost color effect.
-
-    -  .. row 4
-
-       -  ``V4L2_COLORFX_AQUA``
-
-       -  Water color, cool tone.
-
-    -  .. row 5
-
-       -  ``V4L2_COLORFX_BW``
-
-       -  Black and white.
-
-    -  .. row 6
-
-       -  ``V4L2_COLORFX_EMBOSS``
-
-       -  Emboss, the highlights and shadows replace light/dark boundaries
-	  and low contrast areas are set to a gray background.
-
-    -  .. row 7
-
-       -  ``V4L2_COLORFX_GRASS_GREEN``
-
-       -  Grass green.
-
-    -  .. row 8
-
-       -  ``V4L2_COLORFX_NEGATIVE``
-
-       -  Negative.
-
-    -  .. row 9
-
-       -  ``V4L2_COLORFX_SEPIA``
-
-       -  Sepia tone.
-
-    -  .. row 10
-
-       -  ``V4L2_COLORFX_SKETCH``
-
-       -  Sketch.
-
-    -  .. row 11
-
-       -  ``V4L2_COLORFX_SKIN_WHITEN``
-
-       -  Skin whiten.
-
-    -  .. row 12
-
-       -  ``V4L2_COLORFX_SKY_BLUE``
-
-       -  Sky blue.
-
-    -  .. row 13
-
-       -  ``V4L2_COLORFX_SOLARIZATION``
-
-       -  Solarization, the image is partially reversed in tone, only color
-	  values above or below a certain threshold are inverted.
-
-    -  .. row 14
-
-       -  ``V4L2_COLORFX_SILHOUETTE``
-
-       -  Silhouette (outline).
-
-    -  .. row 15
-
-       -  ``V4L2_COLORFX_VIVID``
-
-       -  Vivid colors.
-
-    -  .. row 16
-
-       -  ``V4L2_COLORFX_SET_CBCR``
-
-       -  The Cb and Cr chroma components are replaced by fixed coefficients
-	  determined by ``V4L2_CID_COLORFX_CBCR`` control.
+    * - ``V4L2_COLORFX_NONE``
+      - Color effect is disabled.
+    * - ``V4L2_COLORFX_ANTIQUE``
+      - An aging (old photo) effect.
+    * - ``V4L2_COLORFX_ART_FREEZE``
+      - Frost color effect.
+    * - ``V4L2_COLORFX_AQUA``
+      - Water color, cool tone.
+    * - ``V4L2_COLORFX_BW``
+      - Black and white.
+    * - ``V4L2_COLORFX_EMBOSS``
+      - Emboss, the highlights and shadows replace light/dark boundaries
+	and low contrast areas are set to a gray background.
+    * - ``V4L2_COLORFX_GRASS_GREEN``
+      - Grass green.
+    * - ``V4L2_COLORFX_NEGATIVE``
+      - Negative.
+    * - ``V4L2_COLORFX_SEPIA``
+      - Sepia tone.
+    * - ``V4L2_COLORFX_SKETCH``
+      - Sketch.
+    * - ``V4L2_COLORFX_SKIN_WHITEN``
+      - Skin whiten.
+    * - ``V4L2_COLORFX_SKY_BLUE``
+      - Sky blue.
+    * - ``V4L2_COLORFX_SOLARIZATION``
+      - Solarization, the image is partially reversed in tone, only color
+	values above or below a certain threshold are inverted.
+    * - ``V4L2_COLORFX_SILHOUETTE``
+      - Silhouette (outline).
+    * - ``V4L2_COLORFX_VIVID``
+      - Vivid colors.
+    * - ``V4L2_COLORFX_SET_CBCR``
+      - The Cb and Cr chroma components are replaced by fixed coefficients
+	determined by ``V4L2_CID_COLORFX_CBCR`` control.
 
 
 
diff --git a/Documentation/media/uapi/v4l/crop.rst b/Documentation/media/uapi/v4l/crop.rst
index 0913822..3ea733a 100644
--- a/Documentation/media/uapi/v4l/crop.rst
+++ b/Documentation/media/uapi/v4l/crop.rst
@@ -15,7 +15,9 @@
 Applications can use the following API to select an area in the video
 signal, query the default area and the hardware limits.
 
-.. note:: Despite their name, the :ref:`VIDIOC_CROPCAP <VIDIOC_CROPCAP>`,
+.. note::
+
+   Despite their name, the :ref:`VIDIOC_CROPCAP <VIDIOC_CROPCAP>`,
    :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>` and :ref:`VIDIOC_S_CROP
    <VIDIOC_G_CROP>` ioctls apply to input as well as output devices.
 
@@ -38,7 +40,9 @@
 :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` ioctls. Their size (and position
 where applicable) will be fixed in this case.
 
-.. note:: All capture and output devices must support the
+.. note::
+
+   All capture and output devices must support the
    :ref:`VIDIOC_CROPCAP <VIDIOC_CROPCAP>` ioctl such that applications
    can determine if scaling takes place.
 
@@ -61,7 +65,7 @@
 
 For capture devices the coordinates of the top left corner, width and
 height of the area which can be sampled is given by the ``bounds``
-substructure of the struct :ref:`v4l2_cropcap <v4l2-cropcap>` returned
+substructure of the struct :c:type:`v4l2_cropcap` returned
 by the :ref:`VIDIOC_CROPCAP <VIDIOC_CROPCAP>` ioctl. To support a wide
 range of hardware this specification does not define an origin or units.
 However by convention drivers should horizontally count unscaled samples
@@ -73,8 +77,8 @@
 
 The top left corner, width and height of the source rectangle, that is
 the area actually sampled, is given by struct
-:ref:`v4l2_crop <v4l2-crop>` using the same coordinate system as
-struct :ref:`v4l2_cropcap <v4l2-cropcap>`. Applications can use the
+:c:type:`v4l2_crop` using the same coordinate system as
+struct :c:type:`v4l2_cropcap`. Applications can use the
 :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>` and :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>`
 ioctls to get and set this rectangle. It must lie completely within the
 capture boundaries and the driver may further adjust the requested size
@@ -82,7 +86,7 @@
 
 Each capture device has a default source rectangle, given by the
 ``defrect`` substructure of struct
-:ref:`v4l2_cropcap <v4l2-cropcap>`. The center of this rectangle
+:c:type:`v4l2_cropcap`. The center of this rectangle
 shall align with the center of the active picture area of the video
 signal, and cover what the driver writer considers the complete picture.
 Drivers shall reset the source rectangle to the default when the driver
@@ -100,11 +104,11 @@
 limitations. It may only scale up or down, support only discrete scaling
 factors, or have different scaling abilities in horizontal and vertical
 direction. Also it may not support scaling at all. At the same time the
-struct :ref:`v4l2_crop <v4l2-crop>` rectangle may have to be aligned,
+struct :c:type:`v4l2_crop` rectangle may have to be aligned,
 and both the source and target rectangles may have arbitrary upper and
 lower size limits. In particular the maximum ``width`` and ``height`` in
-struct :ref:`v4l2_crop <v4l2-crop>` may be smaller than the struct
-:ref:`v4l2_cropcap <v4l2-cropcap>`. ``bounds`` area. Therefore, as
+struct :c:type:`v4l2_crop` may be smaller than the struct
+:c:type:`v4l2_cropcap`. ``bounds`` area. Therefore, as
 usual, drivers are expected to adjust the requested parameters and
 return the actual values selected.
 
@@ -144,7 +148,9 @@
 work without special preparations. More advanced applications should
 ensure the parameters are suitable before starting I/O.
 
-.. note:: On the next two examples, a video capture device is assumed;
+.. note::
+
+   On the next two examples, a video capture device is assumed;
    change ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` for other types of device.
 
 Example: Resetting the cropping parameters
diff --git a/Documentation/media/uapi/v4l/dev-capture.rst b/Documentation/media/uapi/v4l/dev-capture.rst
index 8d04947..32b3205 100644
--- a/Documentation/media/uapi/v4l/dev-capture.rst
+++ b/Documentation/media/uapi/v4l/dev-capture.rst
@@ -26,7 +26,7 @@
 Devices supporting the video capture interface set the
 ``V4L2_CAP_VIDEO_CAPTURE`` or ``V4L2_CAP_VIDEO_CAPTURE_MPLANE`` flag in
 the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. As secondary device
 functions they may also support the :ref:`video overlay <overlay>`
 (``V4L2_CAP_VIDEO_OVERLAY``) and the :ref:`raw VBI capture <raw-vbi>`
@@ -64,18 +64,18 @@
 defaults. An example is given in :ref:`crop`.
 
 To query the current image format applications set the ``type`` field of
-a struct :ref:`v4l2_format <v4l2-format>` to
+a struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` or
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and call the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl with a pointer to this
 structure. Drivers fill the struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` ``pix`` or the struct
-:ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` ``pix_mp``
+:c:type:`v4l2_pix_format` ``pix`` or the struct
+:c:type:`v4l2_pix_format_mplane` ``pix_mp``
 member of the ``fmt`` union.
 
 To request different parameters applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` as above and initialize all
-fields of the struct :ref:`v4l2_pix_format <v4l2-pix-format>`
+struct :c:type:`v4l2_format` as above and initialize all
+fields of the struct :c:type:`v4l2_pix_format`
 ``vbi`` member of the ``fmt`` union, or better just modify the results
 of :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, and call the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`
 ioctl with a pointer to this structure. Drivers may adjust the
@@ -86,8 +86,8 @@
 can be used to learn about hardware limitations without disabling I/O or
 possibly time consuming hardware preparations.
 
-The contents of struct :ref:`v4l2_pix_format <v4l2-pix-format>` and
-struct :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` are
+The contents of struct :c:type:`v4l2_pix_format` and
+struct :c:type:`v4l2_pix_format_mplane` are
 discussed in :ref:`pixfmt`. See also the specification of the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` and :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` ioctls for
 details. Video capture devices must implement both the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`
diff --git a/Documentation/media/uapi/v4l/dev-codec.rst b/Documentation/media/uapi/v4l/dev-codec.rst
index dfb2032..d9f2184 100644
--- a/Documentation/media/uapi/v4l/dev-codec.rst
+++ b/Documentation/media/uapi/v4l/dev-codec.rst
@@ -21,7 +21,9 @@
 Video compression codecs use the MPEG controls to setup their codec
 parameters
 
-.. note:: The MPEG controls actually support many more codecs than
+.. note::
+
+   The MPEG controls actually support many more codecs than
    just MPEG. See :ref:`mpeg-controls`.
 
 Memory-to-memory devices can often be used as a shared resource: you can
diff --git a/Documentation/media/uapi/v4l/dev-osd.rst b/Documentation/media/uapi/v4l/dev-osd.rst
index fadda13..71da85e 100644
--- a/Documentation/media/uapi/v4l/dev-osd.rst
+++ b/Documentation/media/uapi/v4l/dev-osd.rst
@@ -16,7 +16,9 @@
 The OSD function is accessible through the same character special file
 as the :ref:`Video Output <capture>` function.
 
-.. note:: The default function of such a ``/dev/video`` device is video
+.. note::
+
+   The default function of such a ``/dev/video`` device is video
    capturing or output. The OSD function is only available after calling
    the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl.
 
@@ -26,7 +28,7 @@
 
 Devices supporting the *Video Output Overlay* interface set the
 ``V4L2_CAP_VIDEO_OUTPUT_OVERLAY`` flag in the ``capabilities`` field of
-struct :ref:`v4l2_capability <v4l2-capability>` returned by the
+struct :c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl.
 
 
@@ -39,11 +41,11 @@
 applications can find the corresponding framebuffer device by calling
 the :ref:`VIDIOC_G_FBUF <VIDIOC_G_FBUF>` ioctl. It returns, amongst
 other information, the physical address of the framebuffer in the
-``base`` field of struct :ref:`v4l2_framebuffer <v4l2-framebuffer>`.
+``base`` field of struct :c:type:`v4l2_framebuffer`.
 The framebuffer device ioctl ``FBIOGET_FSCREENINFO`` returns the same
 address in the ``smem_start`` field of struct
-:c:type:`struct fb_fix_screeninfo`. The ``FBIOGET_FSCREENINFO``
-ioctl and struct :c:type:`struct fb_fix_screeninfo` are defined in
+struct :c:type:`fb_fix_screeninfo`. The ``FBIOGET_FSCREENINFO``
+ioctl and struct :c:type:`fb_fix_screeninfo` are defined in
 the ``linux/fb.h`` header file.
 
 The width and height of the framebuffer depends on the current video
@@ -112,18 +114,18 @@
 (or none) of the clipping/blending methods defined for the
 :ref:`Video Overlay <overlay>` interface.
 
-A struct :ref:`v4l2_window <v4l2-window>` defines the size of the
+A struct :c:type:`v4l2_window` defines the size of the
 source rectangle, its position in the framebuffer and the
 clipping/blending method to be used for the overlay. To get the current
 parameters applications set the ``type`` field of a struct
-:ref:`v4l2_format <v4l2-format>` to
+:c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY`` and call the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl. The driver fills the
-:ref:`struct v4l2_window <v4l2-window>` substructure named ``win``. It is not
+struct :c:type:`v4l2_window` substructure named ``win``. It is not
 possible to retrieve a previously programmed clipping list or bitmap.
 
 To program the source rectangle applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` to
+struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY``, initialize the ``win``
 substructure and call the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl.
 The driver adjusts the parameters against hardware limits and returns
@@ -132,10 +134,10 @@
 about driver capabilities without actually changing driver state. Unlike
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` this also works after the overlay has been enabled.
 
-A struct :ref:`v4l2_crop <v4l2-crop>` defines the size and position
+A struct :c:type:`v4l2_crop` defines the size and position
 of the target rectangle. The scaling factor of the overlay is implied by
-the width and height given in struct :ref:`v4l2_window <v4l2-window>`
-and struct :ref:`v4l2_crop <v4l2-crop>`. The cropping API applies to
+the width and height given in struct :c:type:`v4l2_window`
+and struct :c:type:`v4l2_crop`. The cropping API applies to
 *Video Output* and *Video Output Overlay* devices in the same way as to
 *Video Capture* and *Video Overlay* devices, merely reversing the
 direction of the data flow. For more information see :ref:`crop`.
diff --git a/Documentation/media/uapi/v4l/dev-output.rst b/Documentation/media/uapi/v4l/dev-output.rst
index 4f1123a..25ae8ec 100644
--- a/Documentation/media/uapi/v4l/dev-output.rst
+++ b/Documentation/media/uapi/v4l/dev-output.rst
@@ -16,7 +16,7 @@
 ``/dev/video`` is typically a symbolic link to the preferred video
 device.
 
-..note:: The same device file names are used also for video capture devices.
+.. note:: The same device file names are used also for video capture devices.
 
 
 Querying Capabilities
@@ -25,7 +25,7 @@
 Devices supporting the video output interface set the
 ``V4L2_CAP_VIDEO_OUTPUT`` or ``V4L2_CAP_VIDEO_OUTPUT_MPLANE`` flag in
 the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. As secondary device
 functions they may also support the :ref:`raw VBI output <raw-vbi>`
 (``V4L2_CAP_VBI_OUTPUT``) interface. At least one of the read/write or
@@ -62,17 +62,17 @@
 defaults. An example is given in :ref:`crop`.
 
 To query the current image format applications set the ``type`` field of
-a struct :ref:`v4l2_format <v4l2-format>` to
+a struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` or ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``
 and call the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl with a pointer
 to this structure. Drivers fill the struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` ``pix`` or the struct
-:ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` ``pix_mp``
+:c:type:`v4l2_pix_format` ``pix`` or the struct
+:c:type:`v4l2_pix_format_mplane` ``pix_mp``
 member of the ``fmt`` union.
 
 To request different parameters applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` as above and initialize all
-fields of the struct :ref:`v4l2_pix_format <v4l2-pix-format>`
+struct :c:type:`v4l2_format` as above and initialize all
+fields of the struct :c:type:`v4l2_pix_format`
 ``vbi`` member of the ``fmt`` union, or better just modify the results
 of :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, and call the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`
 ioctl with a pointer to this structure. Drivers may adjust the
@@ -83,8 +83,8 @@
 can be used to learn about hardware limitations without disabling I/O or
 possibly time consuming hardware preparations.
 
-The contents of struct :ref:`v4l2_pix_format <v4l2-pix-format>` and
-struct :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` are
+The contents of struct :c:type:`v4l2_pix_format` and
+struct :c:type:`v4l2_pix_format_mplane` are
 discussed in :ref:`pixfmt`. See also the specification of the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` and :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` ioctls for
 details. Video output devices must implement both the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`
diff --git a/Documentation/media/uapi/v4l/dev-overlay.rst b/Documentation/media/uapi/v4l/dev-overlay.rst
index 92b4471..9be14b5 100644
--- a/Documentation/media/uapi/v4l/dev-overlay.rst
+++ b/Documentation/media/uapi/v4l/dev-overlay.rst
@@ -19,7 +19,9 @@
 Video overlay devices are accessed through the same character special
 files as :ref:`video capture <capture>` devices.
 
-.. note:: The default function of a ``/dev/video`` device is video
+.. note::
+
+   The default function of a ``/dev/video`` device is video
    capturing. The overlay function is only available after calling
    the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl.
 
@@ -41,7 +43,7 @@
 
 Devices supporting the video overlay interface set the
 ``V4L2_CAP_VIDEO_OVERLAY`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. The overlay I/O
 method specified below must be supported. Tuners and audio inputs are
 optional.
@@ -117,17 +119,17 @@
 given in :ref:`crop`.
 
 The overlay window is described by a struct
-:ref:`v4l2_window <v4l2-window>`. It defines the size of the image,
+:c:type:`v4l2_window`. It defines the size of the image,
 its position over the graphics surface and the clipping to be applied.
 To get the current parameters applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` to
+struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_OVERLAY`` and call the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl. The driver fills the
-:ref:`struct v4l2_window <v4l2-window>` substructure named ``win``. It is not
+struct :c:type:`v4l2_window` substructure named ``win``. It is not
 possible to retrieve a previously programmed clipping list or bitmap.
 
 To program the overlay window applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` to
+struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VIDEO_OVERLAY``, initialize the ``win`` substructure and
 call the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl. The driver
 adjusts the parameters against hardware limits and returns the actual
@@ -137,7 +139,7 @@
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` this also works after the overlay has been enabled.
 
 The scaling factor of the overlaid image is implied by the width and
-height given in struct :ref:`v4l2_window <v4l2-window>` and the size
+height given in struct :c:type:`v4l2_window` and the size
 of the cropping rectangle. For more information see :ref:`crop`.
 
 When simultaneous capturing and overlay is supported and the hardware
@@ -147,7 +149,7 @@
 code or return accordingly modified parameters.
 
 
-.. _v4l2-window:
+.. c:type:: v4l2_window
 
 struct v4l2_window
 ------------------
@@ -173,7 +175,7 @@
     :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>` applications set this field
     to the desired pixel value for the chroma key. The format is the
     same as the pixel format of the framebuffer (struct
-    :ref:`v4l2_framebuffer <v4l2-framebuffer>` ``fmt.pixelformat``
+    :c:type:`v4l2_framebuffer` ``fmt.pixelformat``
     field), with bytes in host order. E. g. for
     :ref:`V4L2_PIX_FMT_BGR24 <V4L2-PIX-FMT-BGR32>` the value should
     be 0xRRGGBB on a little endian, 0xBBGGRR on a big endian host.
@@ -236,13 +238,15 @@
     :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>`,
     :ref:`framebuffer-flags`).
 
-    .. note:: This field was added in Linux 2.6.23, extending the
-       structure. However the :ref:`VIDIOC_[G|S|TRY]_FMT <VIDIOC_G_FMT>`
-       ioctls, which take a pointer to a :ref:`v4l2_format <v4l2-format>`
-       parent structure with padding bytes at the end, are not affected.
+.. note::
+
+   This field was added in Linux 2.6.23, extending the
+   structure. However the :ref:`VIDIOC_[G|S|TRY]_FMT <VIDIOC_G_FMT>`
+   ioctls, which take a pointer to a :c:type:`v4l2_format`
+   parent structure with padding bytes at the end, are not affected.
 
 
-.. _v4l2-clip:
+.. c:type:: v4l2_clip
 
 struct v4l2_clip [#f4]_
 -----------------------
@@ -258,7 +262,7 @@
     linked list of clipping rectangles.
 
 
-.. _v4l2-rect:
+.. c:type:: v4l2_rect
 
 struct v4l2_rect
 ----------------
diff --git a/Documentation/media/uapi/v4l/dev-radio.rst b/Documentation/media/uapi/v4l/dev-radio.rst
index 5ff7cde..2b5b836 100644
--- a/Documentation/media/uapi/v4l/dev-radio.rst
+++ b/Documentation/media/uapi/v4l/dev-radio.rst
@@ -20,7 +20,7 @@
 Devices supporting the radio interface set the ``V4L2_CAP_RADIO`` and
 ``V4L2_CAP_TUNER`` or ``V4L2_CAP_MODULATOR`` flag in the
 ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. Other combinations of
 capability flags are reserved for future extensions.
 
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi.rst b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
index d5a4b35..b82d837 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
@@ -39,7 +39,7 @@
 Devices supporting the raw VBI capturing or output API set the
 ``V4L2_CAP_VBI_CAPTURE`` or ``V4L2_CAP_VBI_OUTPUT`` flags, respectively,
 in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. At least one of the
 read/write, streaming or asynchronous I/O methods must be supported. VBI
 devices may or may not have a tuner or modulator.
@@ -69,16 +69,16 @@
 parameters and then checking if the actual parameters are suitable.
 
 To query the current raw VBI capture parameters applications set the
-``type`` field of a struct :ref:`v4l2_format <v4l2-format>` to
+``type`` field of a struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_VBI_CAPTURE`` or ``V4L2_BUF_TYPE_VBI_OUTPUT``, and call
 the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl with a pointer to this
 structure. Drivers fill the struct
-:ref:`v4l2_vbi_format <v4l2-vbi-format>` ``vbi`` member of the
+:c:type:`v4l2_vbi_format` ``vbi`` member of the
 ``fmt`` union.
 
 To request different parameters applications set the ``type`` field of a
-struct :ref:`v4l2_format <v4l2-format>` as above and initialize all
-fields of the struct :ref:`v4l2_vbi_format <v4l2-vbi-format>`
+struct :c:type:`v4l2_format` as above and initialize all
+fields of the struct :c:type:`v4l2_vbi_format`
 ``vbi`` member of the ``fmt`` union, or better just modify the results
 of :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, and call the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`
 ioctl with a pointer to this structure. Drivers return an ``EINVAL`` error
@@ -99,131 +99,91 @@
 and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does.
 :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` is optional.
 
+.. tabularcolumns:: |p{2.4cm}|p{4.4cm}|p{10.7cm}|
 
-.. _v4l2-vbi-format:
+.. c:type:: v4l2_vbi_format
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_vbi_format
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``sampling_rate``
+      - Samples per second, i. e. unit 1 Hz.
+    * - __u32
+      - ``offset``
+      - Horizontal offset of the VBI image, relative to the leading edge
+	of the line synchronization pulse and counted in samples: The
+	first sample in the VBI image will be located ``offset`` /
+	``sampling_rate`` seconds following the leading edge. See also
+	:ref:`vbi-hsync`.
+    * - __u32
+      - ``samples_per_line``
+      -
+    * - __u32
+      - ``sample_format``
+      - Defines the sample format as in :ref:`pixfmt`, a
+	four-character-code. [#f2]_ Usually this is ``V4L2_PIX_FMT_GREY``,
+	i. e. each sample consists of 8 bits with lower values oriented
+	towards the black level. Do not assume any other correlation of
+	values with the signal level. For example, the MSB does not
+	necessarily indicate if the signal is 'high' or 'low' because 128
+	may not be the mean value of the signal. Drivers shall not convert
+	the sample format by software.
+    * - __u32
+      - ``start``\ [#f2]_
+      - This is the scanning system line number associated with the first
+	line of the VBI image, of the first and the second field
+	respectively. See :ref:`vbi-525` and :ref:`vbi-625` for valid
+	values. The ``V4L2_VBI_ITU_525_F1_START``,
+	``V4L2_VBI_ITU_525_F2_START``, ``V4L2_VBI_ITU_625_F1_START`` and
+	``V4L2_VBI_ITU_625_F2_START`` defines give the start line numbers
+	for each field for each 525 or 625 line format as a convenience.
+	Don't forget that ITU line numbering starts at 1, not 0. VBI input
+	drivers can return start values 0 if the hardware cannot reliable
+	identify scanning lines, VBI acquisition may not require this
+	information.
+    * - __u32
+      - ``count``\ [#f2]_
+      - The number of lines in the first and second field image,
+	respectively.
+    * - :cspan:`2`
 
-    -  .. row 1
+	Drivers should be as flexibility as possible. For example, it may
+	be possible to extend or move the VBI capture window down to the
+	picture area, implementing a 'full field mode' to capture data
+	service transmissions embedded in the picture.
 
-       -  __u32
+	An application can set the first or second ``count`` value to zero
+	if no data is required from the respective field; ``count``\ [1]
+	if the scanning system is progressive, i. e. not interlaced. The
+	corresponding start value shall be ignored by the application and
+	driver. Anyway, drivers may not support single field capturing and
+	return both count values non-zero.
 
-       -  ``sampling_rate``
+	Both ``count`` values set to zero, or line numbers are outside the
+	bounds depicted\ [#f4]_, or a field image covering lines of two
+	fields, are invalid and shall not be returned by the driver.
 
-       -  Samples per second, i. e. unit 1 Hz.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``offset``
-
-       -  Horizontal offset of the VBI image, relative to the leading edge
-	  of the line synchronization pulse and counted in samples: The
-	  first sample in the VBI image will be located ``offset`` /
-	  ``sampling_rate`` seconds following the leading edge. See also
-	  :ref:`vbi-hsync`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``samples_per_line``
-
-       -
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``sample_format``
-
-       -  Defines the sample format as in :ref:`pixfmt`, a
-	  four-character-code. [#f2]_ Usually this is ``V4L2_PIX_FMT_GREY``,
-	  i. e. each sample consists of 8 bits with lower values oriented
-	  towards the black level. Do not assume any other correlation of
-	  values with the signal level. For example, the MSB does not
-	  necessarily indicate if the signal is 'high' or 'low' because 128
-	  may not be the mean value of the signal. Drivers shall not convert
-	  the sample format by software.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``start``\ [#f2]_
-
-       -  This is the scanning system line number associated with the first
-	  line of the VBI image, of the first and the second field
-	  respectively. See :ref:`vbi-525` and :ref:`vbi-625` for valid
-	  values. The ``V4L2_VBI_ITU_525_F1_START``,
-	  ``V4L2_VBI_ITU_525_F2_START``, ``V4L2_VBI_ITU_625_F1_START`` and
-	  ``V4L2_VBI_ITU_625_F2_START`` defines give the start line numbers
-	  for each field for each 525 or 625 line format as a convenience.
-	  Don't forget that ITU line numbering starts at 1, not 0. VBI input
-	  drivers can return start values 0 if the hardware cannot reliable
-	  identify scanning lines, VBI acquisition may not require this
-	  information.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``count``\ [#f2]_
-
-       -  The number of lines in the first and second field image,
-	  respectively.
-
-    -  .. row 7
-
-       -  :cspan:`2`
-
-	  Drivers should be as flexibility as possible. For example, it may
-	  be possible to extend or move the VBI capture window down to the
-	  picture area, implementing a 'full field mode' to capture data
-	  service transmissions embedded in the picture.
-
-	  An application can set the first or second ``count`` value to zero
-	  if no data is required from the respective field; ``count``\ [1]
-	  if the scanning system is progressive, i. e. not interlaced. The
-	  corresponding start value shall be ignored by the application and
-	  driver. Anyway, drivers may not support single field capturing and
-	  return both count values non-zero.
-
-	  Both ``count`` values set to zero, or line numbers outside the
-	  bounds depicted in :ref:`vbi-525` and :ref:`vbi-625`, or a
-	  field image covering lines of two fields, are invalid and shall
-	  not be returned by the driver.
-
-	  To initialize the ``start`` and ``count`` fields, applications
-	  must first determine the current video standard selection. The
-	  :ref:`v4l2_std_id <v4l2-std-id>` or the ``framelines`` field
-	  of struct :ref:`v4l2_standard <v4l2-standard>` can be evaluated
-	  for this purpose.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``flags``
-
-       -  See :ref:`vbifmt-flags` below. Currently only drivers set flags,
-	  applications must set this field to zero.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved``\ [#f2]_
-
-       -  This array is reserved for future extensions. Drivers and
-	  applications must set it to zero.
+	To initialize the ``start`` and ``count`` fields, applications
+	must first determine the current video standard selection. The
+	:ref:`v4l2_std_id <v4l2-std-id>` or the ``framelines`` field
+	of struct :c:type:`v4l2_standard` can be evaluated
+	for this purpose.
+    * - __u32
+      - ``flags``
+      - See :ref:`vbifmt-flags` below. Currently only drivers set flags,
+	applications must set this field to zero.
+    * - __u32
+      - ``reserved``\ [#f2]_
+      - This array is reserved for future extensions. Drivers and
+	applications must set it to zero.
 
 
+.. tabularcolumns:: |p{4.0cm}|p{1.5cm}|p{12.0cm}|
 
 .. _vbifmt-flags:
 
@@ -232,40 +192,30 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_VBI_UNSYNC``
-
-       -  0x0001
-
-       -  This flag indicates hardware which does not properly distinguish
-	  between fields. Normally the VBI image stores the first field
-	  (lower scanning line numbers) first in memory. This may be a top
-	  or bottom field depending on the video standard. When this flag is
-	  set the first or second field may be stored first, however the
-	  fields are still in correct temporal order with the older field
-	  first in memory. [#f3]_
-
-    -  .. row 2
-
-       -  ``V4L2_VBI_INTERLACED``
-
-       -  0x0002
-
-       -  By default the two field images will be passed sequentially; all
-	  lines of the first field followed by all lines of the second field
-	  (compare :ref:`field-order` ``V4L2_FIELD_SEQ_TB`` and
-	  ``V4L2_FIELD_SEQ_BT``, whether the top or bottom field is first in
-	  memory depends on the video standard). When this flag is set, the
-	  two fields are interlaced (cf. ``V4L2_FIELD_INTERLACED``). The
-	  first line of the first field followed by the first line of the
-	  second field, then the two second lines, and so on. Such a layout
-	  may be necessary when the hardware has been programmed to capture
-	  or output interlaced video images and is unable to separate the
-	  fields for VBI capturing at the same time. For simplicity setting
-	  this flag implies that both ``count`` values are equal and
-	  non-zero.
+    * - ``V4L2_VBI_UNSYNC``
+      - 0x0001
+      - This flag indicates hardware which does not properly distinguish
+	between fields. Normally the VBI image stores the first field
+	(lower scanning line numbers) first in memory. This may be a top
+	or bottom field depending on the video standard. When this flag is
+	set the first or second field may be stored first, however the
+	fields are still in correct temporal order with the older field
+	first in memory. [#f3]_
+    * - ``V4L2_VBI_INTERLACED``
+      - 0x0002
+      - By default the two field images will be passed sequentially; all
+	lines of the first field followed by all lines of the second field
+	(compare :ref:`field-order` ``V4L2_FIELD_SEQ_TB`` and
+	``V4L2_FIELD_SEQ_BT``, whether the top or bottom field is first in
+	memory depends on the video standard). When this flag is set, the
+	two fields are interlaced (cf. ``V4L2_FIELD_INTERLACED``). The
+	first line of the first field followed by the first line of the
+	second field, then the two second lines, and so on. Such a layout
+	may be necessary when the hardware has been programmed to capture
+	or output interlaced video images and is unable to separate the
+	fields for VBI capturing at the same time. For simplicity setting
+	this flag implies that both ``count`` values are equal and
+	non-zero.
 
 
 
@@ -348,3 +298,6 @@
    Most VBI services transmit on both fields, but some have different
    semantics depending on the field number. These cannot be reliable
    decoded or encoded when ``V4L2_VBI_UNSYNC`` is set.
+
+.. [#f4]
+   The valid values ar shown at :ref:`vbi-525` and :ref:`vbi-625`.
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf
index 9e72c25..0bae283 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_525.pdf
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf
index 765235e..bf29b95 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi_files/vbi_625.pdf
Binary files differ
diff --git a/Documentation/media/uapi/v4l/dev-rds.rst b/Documentation/media/uapi/v4l/dev-rds.rst
index cd6ad63..9c4e39d 100644
--- a/Documentation/media/uapi/v4l/dev-rds.rst
+++ b/Documentation/media/uapi/v4l/dev-rds.rst
@@ -14,7 +14,9 @@
 For more information see the core RDS standard :ref:`iec62106` and the
 RBDS standard :ref:`nrsc4`.
 
-.. note:: Note that the RBDS standard as is used in the USA is almost
+.. note::
+
+   Note that the RBDS standard as is used in the USA is almost
    identical to the RDS standard. Any RDS decoder/encoder can also handle
    RBDS. Only some of the fields have slightly different meanings. See the
    RBDS standard for more information.
@@ -32,10 +34,10 @@
 
 Devices supporting the RDS capturing API set the
 ``V4L2_CAP_RDS_CAPTURE`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. Any tuner that
 supports RDS will set the ``V4L2_TUNER_CAP_RDS`` flag in the
-``capability`` field of struct :ref:`v4l2_tuner <v4l2-tuner>`. If the
+``capability`` field of struct :c:type:`v4l2_tuner`. If the
 driver only passes RDS blocks without interpreting the data the
 ``V4L2_TUNER_CAP_RDS_BLOCK_IO`` flag has to be set, see
 :ref:`Reading RDS data <reading-rds-data>`. For future use the flag
@@ -46,19 +48,19 @@
 `https://linuxtv.org/lists.php <https://linuxtv.org/lists.php>`__.
 
 Whether an RDS signal is present can be detected by looking at the
-``rxsubchans`` field of struct :ref:`v4l2_tuner <v4l2-tuner>`: the
+``rxsubchans`` field of struct :c:type:`v4l2_tuner`: the
 ``V4L2_TUNER_SUB_RDS`` will be set if RDS data was detected.
 
 Devices supporting the RDS output API set the ``V4L2_CAP_RDS_OUTPUT``
 flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. Any modulator that
 supports RDS will set the ``V4L2_TUNER_CAP_RDS`` flag in the
 ``capability`` field of struct
-:ref:`v4l2_modulator <v4l2-modulator>`. In order to enable the RDS
+:c:type:`v4l2_modulator`. In order to enable the RDS
 transmission one must set the ``V4L2_TUNER_SUB_RDS`` bit in the
 ``txsubchans`` field of struct
-:ref:`v4l2_modulator <v4l2-modulator>`. If the driver only passes RDS
+:c:type:`v4l2_modulator`. If the driver only passes RDS
 blocks without interpreting the data the ``V4L2_TUNER_CAP_RDS_BLOCK_IO``
 flag has to be set. If the tuner is capable of handling RDS entities
 like program identification codes and radio text, the flag
@@ -91,165 +93,92 @@
 ==================
 
 
-.. _v4l2-rds-data:
+.. c:type:: v4l2_rds_data
+
+.. tabularcolumns:: |p{2.5cm}|p{2.5cm}|p{12.5cm}|
 
 .. flat-table:: struct v4l2_rds_data
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 5
 
-
-    -  .. row 1
-
-       -  __u8
-
-       -  ``lsb``
-
-       -  Least Significant Byte of RDS Block
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``msb``
-
-       -  Most Significant Byte of RDS Block
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``block``
-
-       -  Block description
+    * - __u8
+      - ``lsb``
+      - Least Significant Byte of RDS Block
+    * - __u8
+      - ``msb``
+      - Most Significant Byte of RDS Block
+    * - __u8
+      - ``block``
+      - Block description
 
 
 
 .. _v4l2-rds-block:
 
+.. tabularcolumns:: |p{2.9cm}|p{14.6cm}|
+
 .. flat-table:: Block description
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 5
 
-
-    -  .. row 1
-
-       -  Bits 0-2
-
-       -  Block (aka offset) of the received data.
-
-    -  .. row 2
-
-       -  Bits 3-5
-
-       -  Deprecated. Currently identical to bits 0-2. Do not use these
-	  bits.
-
-    -  .. row 3
-
-       -  Bit 6
-
-       -  Corrected bit. Indicates that an error was corrected for this data
-	  block.
-
-    -  .. row 4
-
-       -  Bit 7
-
-       -  Error bit. Indicates that an uncorrectable error occurred during
-	  reception of this block.
+    * - Bits 0-2
+      - Block (aka offset) of the received data.
+    * - Bits 3-5
+      - Deprecated. Currently identical to bits 0-2. Do not use these
+	bits.
+    * - Bit 6
+      - Corrected bit. Indicates that an error was corrected for this data
+	block.
+    * - Bit 7
+      - Error bit. Indicates that an uncorrectable error occurred during
+	reception of this block.
 
 
 
 .. _v4l2-rds-block-codes:
 
+.. tabularcolumns:: |p{5.6cm}|p{2.0cm}|p{1.5cm}|p{7.0cm}|
+
 .. flat-table:: Block defines
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 5
 
-
-    -  .. row 1
-
-       -  V4L2_RDS_BLOCK_MSK
-
-       -
-       -  7
-
-       -  Mask for bits 0-2 to get the block ID.
-
-    -  .. row 2
-
-       -  V4L2_RDS_BLOCK_A
-
-       -
-       -  0
-
-       -  Block A.
-
-    -  .. row 3
-
-       -  V4L2_RDS_BLOCK_B
-
-       -
-       -  1
-
-       -  Block B.
-
-    -  .. row 4
-
-       -  V4L2_RDS_BLOCK_C
-
-       -
-       -  2
-
-       -  Block C.
-
-    -  .. row 5
-
-       -  V4L2_RDS_BLOCK_D
-
-       -
-       -  3
-
-       -  Block D.
-
-    -  .. row 6
-
-       -  V4L2_RDS_BLOCK_C_ALT
-
-       -
-       -  4
-
-       -  Block C'.
-
-    -  .. row 7
-
-       -  V4L2_RDS_BLOCK_INVALID
-
-       -  read-only
-
-       -  7
-
-       -  An invalid block.
-
-    -  .. row 8
-
-       -  V4L2_RDS_BLOCK_CORRECTED
-
-       -  read-only
-
-       -  0x40
-
-       -  A bit error was detected but corrected.
-
-    -  .. row 9
-
-       -  V4L2_RDS_BLOCK_ERROR
-
-       -  read-only
-
-       -  0x80
-
-       -  An uncorrectable error occurred.
+    * - V4L2_RDS_BLOCK_MSK
+      -
+      - 7
+      - Mask for bits 0-2 to get the block ID.
+    * - V4L2_RDS_BLOCK_A
+      -
+      - 0
+      - Block A.
+    * - V4L2_RDS_BLOCK_B
+      -
+      - 1
+      - Block B.
+    * - V4L2_RDS_BLOCK_C
+      -
+      - 2
+      - Block C.
+    * - V4L2_RDS_BLOCK_D
+      -
+      - 3
+      - Block D.
+    * - V4L2_RDS_BLOCK_C_ALT
+      -
+      - 4
+      - Block C'.
+    * - V4L2_RDS_BLOCK_INVALID
+      - read-only
+      - 7
+      - An invalid block.
+    * - V4L2_RDS_BLOCK_CORRECTED
+      - read-only
+      - 0x40
+      - A bit error was detected but corrected.
+    * - V4L2_RDS_BLOCK_ERROR
+      - read-only
+      - 0x80
+      - An uncorrectable error occurred.
diff --git a/Documentation/media/uapi/v4l/dev-sdr.rst b/Documentation/media/uapi/v4l/dev-sdr.rst
index fc4053f..b3e828d 100644
--- a/Documentation/media/uapi/v4l/dev-sdr.rst
+++ b/Documentation/media/uapi/v4l/dev-sdr.rst
@@ -21,7 +21,7 @@
 Devices supporting the SDR receiver interface set the
 ``V4L2_CAP_SDR_CAPTURE`` and ``V4L2_CAP_TUNER`` flag in the
 ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. That flag means the
 device has an Analog to Digital Converter (ADC), which is a mandatory
 element for the SDR receiver.
@@ -29,7 +29,7 @@
 Devices supporting the SDR transmitter interface set the
 ``V4L2_CAP_SDR_OUTPUT`` and ``V4L2_CAP_MODULATOR`` flag in the
 ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. That flag means the
 device has an Digital to Analog Converter (DAC), which is a mandatory
 element for the SDR transmitter.
@@ -67,53 +67,40 @@
 well.
 
 To use the :ref:`format` ioctls applications set the ``type``
-field of a struct :ref:`v4l2_format <v4l2-format>` to
+field of a struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_SDR_CAPTURE`` or ``V4L2_BUF_TYPE_SDR_OUTPUT`` and use
-the struct :ref:`v4l2_sdr_format <v4l2-sdr-format>` ``sdr`` member
+the struct :c:type:`v4l2_sdr_format` ``sdr`` member
 of the ``fmt`` union as needed per the desired operation. Currently
 there is two fields, ``pixelformat`` and ``buffersize``, of struct
-struct :ref:`v4l2_sdr_format <v4l2-sdr-format>` which are used.
+struct :c:type:`v4l2_sdr_format` which are used.
 Content of the ``pixelformat`` is V4L2 FourCC code of the data format.
 The ``buffersize`` field is maximum buffer size in bytes required for
 data transfer, set by the driver in order to inform application.
 
 
-.. _v4l2-sdr-format:
+.. c:type:: v4l2_sdr_format
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_sdr_format
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pixelformat``
-
-       -  The data format or type of compression, set by the application.
-	  This is a little endian
-	  :ref:`four character code <v4l2-fourcc>`. V4L2 defines SDR
-	  formats in :ref:`sdr-formats`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``buffersize``
-
-       -  Maximum size in bytes required for data. Value is set by the
-	  driver.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``reserved[24]``
-
-       -  This array is reserved for future extensions. Drivers and
-	  applications must set it to zero.
+    * - __u32
+      - ``pixelformat``
+      - The data format or type of compression, set by the application.
+	This is a little endian
+	:ref:`four character code <v4l2-fourcc>`. V4L2 defines SDR
+	formats in :ref:`sdr-formats`.
+    * - __u32
+      - ``buffersize``
+      - Maximum size in bytes required for data. Value is set by the
+	driver.
+    * - __u8
+      - ``reserved[24]``
+      - This array is reserved for future extensions. Drivers and
+	applications must set it to zero.
 
 
 An SDR device may support :ref:`read/write <rw>` and/or streaming
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index ec52a82..5f6d534 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -34,7 +34,7 @@
 Devices supporting the sliced VBI capturing or output API set the
 ``V4L2_CAP_SLICED_VBI_CAPTURE`` or ``V4L2_CAP_SLICED_VBI_OUTPUT`` flag
 respectively, in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl. At least one of the
 read/write, streaming or asynchronous :ref:`I/O methods <io>` must be
 supported. Sliced VBI devices may have a tuner or modulator.
@@ -67,17 +67,17 @@
 but not both at the same time.
 
 To determine the currently selected services applications set the
-``type`` field of struct :ref:`v4l2_format <v4l2-format>` to
+``type`` field of struct :c:type:`v4l2_format` to
 ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE`` or
 ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``, and the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl fills the ``fmt.sliced``
 member, a struct
-:ref:`v4l2_sliced_vbi_format <v4l2-sliced-vbi-format>`.
+:c:type:`v4l2_sliced_vbi_format`.
 
 Applications can request different parameters by initializing or
 modifying the ``fmt.sliced`` member and calling the
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl with a pointer to the
-:ref:`struct v4l2_format <v4l2-format>` structure.
+struct :c:type:`v4l2_format` structure.
 
 The sliced VBI API is more complicated than the raw VBI API because the
 hardware must be told which VBI service to expect on each scan line. Not
@@ -100,149 +100,104 @@
 :ref:`select() <func-select>` call.
 
 
-.. _v4l2-sliced-vbi-format:
+.. c:type:: v4l2_sliced_vbi_format
 
 struct v4l2_sliced_vbi_format
 -----------------------------
 
+.. tabularcolumns:: |p{1.0cm}|p{4.5cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|
+
+.. cssclass:: longtable
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 3 2 2 2
 
+    * - __u32
+      - ``service_set``
+      - :cspan:`2`
 
-    -  .. row 1
+	If ``service_set`` is non-zero when passed with
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
+	:ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`, the ``service_lines``
+	array will be filled by the driver according to the services
+	specified in this field. For example, if ``service_set`` is
+	initialized with ``V4L2_SLICED_TELETEXT_B | V4L2_SLICED_WSS_625``,
+	a driver for the cx25840 video decoder sets lines 7-22 of both
+	fields [#f1]_ to ``V4L2_SLICED_TELETEXT_B`` and line 23 of the first
+	field to ``V4L2_SLICED_WSS_625``. If ``service_set`` is set to
+	zero, then the values of ``service_lines`` will be used instead.
 
-       -  __u32
+	On return the driver sets this field to the union of all elements
+	of the returned ``service_lines`` array. It may contain less
+	services than requested, perhaps just one, if the hardware cannot
+	handle more services simultaneously. It may be empty (zero) if
+	none of the requested services are supported by the hardware.
+    * - __u16
+      - ``service_lines``\ [2][24]
+      - :cspan:`2`
 
-       -  ``service_set``
+	Applications initialize this array with sets of data services the
+	driver shall look for or insert on the respective scan line.
+	Subject to hardware capabilities drivers return the requested set,
+	a subset, which may be just a single service, or an empty set.
+	When the hardware cannot handle multiple services on the same line
+	the driver shall choose one. No assumptions can be made on which
+	service the driver chooses.
 
-       -  :cspan:`2`
+	Data services are defined in :ref:`vbi-services2`. Array indices
+	map to ITU-R line numbers\ [#f2]_ as follows:
+    * -
+      -
+      - Element
+      - 525 line systems
+      - 625 line systems
+    * -
+      -
+      - ``service_lines``\ [0][1]
+      - 1
+      - 1
+    * -
+      -
+      - ``service_lines``\ [0][23]
+      - 23
+      - 23
+    * -
+      -
+      - ``service_lines``\ [1][1]
+      - 264
+      - 314
+    * -
+      -
+      - ``service_lines``\ [1][23]
+      - 286
+      - 336
+    * -
+      -
+      - :cspan:`2` Drivers must set ``service_lines`` [0][0] and
+	``service_lines``\ [1][0] to zero. The
+	``V4L2_VBI_ITU_525_F1_START``, ``V4L2_VBI_ITU_525_F2_START``,
+	``V4L2_VBI_ITU_625_F1_START`` and ``V4L2_VBI_ITU_625_F2_START``
+	defines give the start line numbers for each field for each 525 or
+	625 line format as a convenience. Don't forget that ITU line
+	numbering starts at 1, not 0.
+    * - __u32
+      - ``io_size``
+      - :cspan:`2` Maximum number of bytes passed by one
+	:ref:`read() <func-read>` or :ref:`write() <func-write>` call,
+	and the buffer size in bytes for the
+	:ref:`VIDIOC_QBUF` and
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. Drivers set this field
+	to the size of struct
+	:c:type:`v4l2_sliced_vbi_data` times the
+	number of non-zero elements in the returned ``service_lines``
+	array (that is the number of lines potentially carrying data).
+    * - __u32
+      - ``reserved``\ [2]
+      - :cspan:`2` This array is reserved for future extensions.
 
-	  If ``service_set`` is non-zero when passed with
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
-	  :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`, the ``service_lines``
-	  array will be filled by the driver according to the services
-	  specified in this field. For example, if ``service_set`` is
-	  initialized with ``V4L2_SLICED_TELETEXT_B | V4L2_SLICED_WSS_625``,
-	  a driver for the cx25840 video decoder sets lines 7-22 of both
-	  fields [#f1]_ to ``V4L2_SLICED_TELETEXT_B`` and line 23 of the first
-	  field to ``V4L2_SLICED_WSS_625``. If ``service_set`` is set to
-	  zero, then the values of ``service_lines`` will be used instead.
-
-	  On return the driver sets this field to the union of all elements
-	  of the returned ``service_lines`` array. It may contain less
-	  services than requested, perhaps just one, if the hardware cannot
-	  handle more services simultaneously. It may be empty (zero) if
-	  none of the requested services are supported by the hardware.
-
-    -  .. row 2
-
-       -  __u16
-
-       -  ``service_lines``\ [2][24]
-
-       -  :cspan:`2`
-
-	  Applications initialize this array with sets of data services the
-	  driver shall look for or insert on the respective scan line.
-	  Subject to hardware capabilities drivers return the requested set,
-	  a subset, which may be just a single service, or an empty set.
-	  When the hardware cannot handle multiple services on the same line
-	  the driver shall choose one. No assumptions can be made on which
-	  service the driver chooses.
-
-	  Data services are defined in :ref:`vbi-services2`. Array indices
-	  map to ITU-R line numbers (see also :ref:`vbi-525` and
-	  :ref:`vbi-625`) as follows:
-
-    -  .. row 3
-
-       -
-       -
-       -  Element
-
-       -  525 line systems
-
-       -  625 line systems
-
-    -  .. row 4
-
-       -
-       -
-       -  ``service_lines``\ [0][1]
-
-       -  1
-
-       -  1
-
-    -  .. row 5
-
-       -
-       -
-       -  ``service_lines``\ [0][23]
-
-       -  23
-
-       -  23
-
-    -  .. row 6
-
-       -
-       -
-       -  ``service_lines``\ [1][1]
-
-       -  264
-
-       -  314
-
-    -  .. row 7
-
-       -
-       -
-       -  ``service_lines``\ [1][23]
-
-       -  286
-
-       -  336
-
-    -  .. row 8
-
-       -
-       -
-       -  :cspan:`2` Drivers must set ``service_lines`` [0][0] and
-	  ``service_lines``\ [1][0] to zero. The
-	  ``V4L2_VBI_ITU_525_F1_START``, ``V4L2_VBI_ITU_525_F2_START``,
-	  ``V4L2_VBI_ITU_625_F1_START`` and ``V4L2_VBI_ITU_625_F2_START``
-	  defines give the start line numbers for each field for each 525 or
-	  625 line format as a convenience. Don't forget that ITU line
-	  numbering starts at 1, not 0.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``io_size``
-
-       -  :cspan:`2` Maximum number of bytes passed by one
-	  :ref:`read() <func-read>` or :ref:`write() <func-write>` call,
-	  and the buffer size in bytes for the
-	  :ref:`VIDIOC_QBUF` and
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. Drivers set this field
-	  to the size of struct
-	  :ref:`v4l2_sliced_vbi_data <v4l2-sliced-vbi-data>` times the
-	  number of non-zero elements in the returned ``service_lines``
-	  array (that is the number of lines potentially carrying data).
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  :cspan:`2` This array is reserved for future extensions.
-	  Applications and drivers must set it to zero.
-
+	Applications and drivers must set it to zero.
 
 
 .. _vbi-services2:
@@ -250,96 +205,65 @@
 Sliced VBI services
 -------------------
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}|
+
 .. flat-table::
     :header-rows:  1
     :stub-columns: 0
     :widths:       2 1 1 2 2
 
+    * - Symbol
+      - Value
+      - Reference
+      - Lines, usually
+      - Payload
+    * - ``V4L2_SLICED_TELETEXT_B`` (Teletext System B)
+      - 0x0001
+      - :ref:`ets300706`,
 
-    -  .. row 1
+	:ref:`itu653`
+      - PAL/SECAM line 7-22, 320-335 (second field 7-22)
+      - Last 42 of the 45 byte Teletext packet, that is without clock
+	run-in and framing code, lsb first transmitted.
+    * - ``V4L2_SLICED_VPS``
+      - 0x0400
+      - :ref:`ets300231`
+      - PAL line 16
+      - Byte number 3 to 15 according to Figure 9 of ETS 300 231, lsb
+	first transmitted.
+    * - ``V4L2_SLICED_CAPTION_525``
+      - 0x1000
+      - :ref:`cea608`
+      - NTSC line 21, 284 (second field 21)
+      - Two bytes in transmission order, including parity bit, lsb first
+	transmitted.
+    * - ``V4L2_SLICED_WSS_625``
+      - 0x4000
+      - :ref:`itu1119`,
 
-       -  Symbol
+	:ref:`en300294`
+      - PAL/SECAM line 23
+      -
 
-       -  Value
+	::
 
-       -  Reference
+	    Byte         0                 1
+		  msb         lsb  msb           lsb
+	     Bit  7 6 5 4 3 2 1 0  x x 13 12 11 10 9
+    * - ``V4L2_SLICED_VBI_525``
+      - 0x1000
+      - :cspan:`2` Set of services applicable to 525 line systems.
+    * - ``V4L2_SLICED_VBI_625``
+      - 0x4401
+      - :cspan:`2` Set of services applicable to 625 line systems.
 
-       -  Lines, usually
+.. raw:: latex
 
-       -  Payload
-
-    -  .. row 2
-
-       -  ``V4L2_SLICED_TELETEXT_B`` (Teletext System B)
-
-       -  0x0001
-
-       -  :ref:`ets300706`, :ref:`itu653`
-
-       -  PAL/SECAM line 7-22, 320-335 (second field 7-22)
-
-       -  Last 42 of the 45 byte Teletext packet, that is without clock
-	  run-in and framing code, lsb first transmitted.
-
-    -  .. row 3
-
-       -  ``V4L2_SLICED_VPS``
-
-       -  0x0400
-
-       -  :ref:`ets300231`
-
-       -  PAL line 16
-
-       -  Byte number 3 to 15 according to Figure 9 of ETS 300 231, lsb
-	  first transmitted.
-
-    -  .. row 4
-
-       -  ``V4L2_SLICED_CAPTION_525``
-
-       -  0x1000
-
-       -  :ref:`cea608`
-
-       -  NTSC line 21, 284 (second field 21)
-
-       -  Two bytes in transmission order, including parity bit, lsb first
-	  transmitted.
-
-    -  .. row 5
-
-       -  ``V4L2_SLICED_WSS_625``
-
-       -  0x4000
-
-       -  :ref:`itu1119`, :ref:`en300294`
-
-       -  PAL/SECAM line 23
-
-       -
-
-	  ::
-
-	      Byte         0                 1
-		    msb         lsb  msb           lsb
-	       Bit  7 6 5 4 3 2 1 0  x x 13 12 11 10 9
-
-    -  .. row 6
-
-       -  ``V4L2_SLICED_VBI_525``
-
-       -  0x1000
-
-       -  :cspan:`2` Set of services applicable to 525 line systems.
-
-    -  .. row 7
-
-       -  ``V4L2_SLICED_VBI_625``
-
-       -  0x4401
-
-       -  :cspan:`2` Set of services applicable to 625 line systems.
+    \end{adjustbox}\newline\newline
 
 
 Drivers may return an ``EINVAL`` error code when applications attempt to
@@ -359,80 +283,57 @@
 
 A single :ref:`read() <func-read>` or :ref:`write() <func-write>`
 call must pass all data belonging to one video frame. That is an array
-of :ref:`struct v4l2_sliced_vbi_data <v4l2-sliced-vbi-data>` structures with one or
+of struct :c:type:`v4l2_sliced_vbi_data` structures with one or
 more elements and a total size not exceeding ``io_size`` bytes. Likewise
 in streaming I/O mode one buffer of ``io_size`` bytes must contain data
 of one video frame. The ``id`` of unused
-:ref:`struct v4l2_sliced_vbi_data <v4l2-sliced-vbi-data>` elements must be zero.
+struct :c:type:`v4l2_sliced_vbi_data` elements must be zero.
 
 
-.. _v4l2-sliced-vbi-data:
+.. c:type:: v4l2_sliced_vbi_data
 
 struct v4l2_sliced_vbi_data
 ---------------------------
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``id``
-
-       -  A flag from :ref:`vbi-services` identifying the type of data in
-	  this packet. Only a single bit must be set. When the ``id`` of a
-	  captured packet is zero, the packet is empty and the contents of
-	  other fields are undefined. Applications shall ignore empty
-	  packets. When the ``id`` of a packet for output is zero the
-	  contents of the ``data`` field are undefined and the driver must
-	  no longer insert data on the requested ``field`` and ``line``.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``field``
-
-       -  The video field number this data has been captured from, or shall
-	  be inserted at. ``0`` for the first field, ``1`` for the second
-	  field.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``line``
-
-       -  The field (as opposed to frame) line number this data has been
-	  captured from, or shall be inserted at. See :ref:`vbi-525` and
-	  :ref:`vbi-625` for valid values. Sliced VBI capture devices can
-	  set the line number of all packets to ``0`` if the hardware cannot
-	  reliably identify scan lines. The field number must always be
-	  valid.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``
-
-       -  This field is reserved for future extensions. Applications and
-	  drivers must set it to zero.
-
-    -  .. row 5
-
-       -  __u8
-
-       -  ``data``\ [48]
-
-       -  The packet payload. See :ref:`vbi-services` for the contents and
-	  number of bytes passed for each data type. The contents of padding
-	  bytes at the end of this array are undefined, drivers and
-	  applications shall ignore them.
+    * - __u32
+      - ``id``
+      - A flag from :ref:`vbi-services` identifying the type of data in
+	this packet. Only a single bit must be set. When the ``id`` of a
+	captured packet is zero, the packet is empty and the contents of
+	other fields are undefined. Applications shall ignore empty
+	packets. When the ``id`` of a packet for output is zero the
+	contents of the ``data`` field are undefined and the driver must
+	no longer insert data on the requested ``field`` and ``line``.
+    * - __u32
+      - ``field``
+      - The video field number this data has been captured from, or shall
+	be inserted at. ``0`` for the first field, ``1`` for the second
+	field.
+    * - __u32
+      - ``line``
+      - The field (as opposed to frame) line number this data has been
+	captured from, or shall be inserted at. See :ref:`vbi-525` and
+	:ref:`vbi-625` for valid values. Sliced VBI capture devices can
+	set the line number of all packets to ``0`` if the hardware cannot
+	reliably identify scan lines. The field number must always be
+	valid.
+    * - __u32
+      - ``reserved``
+      - This field is reserved for future extensions. Applications and
+	drivers must set it to zero.
+    * - __u8
+      - ``data``\ [48]
+      - The packet payload. See :ref:`vbi-services` for the contents and
+	number of bytes passed for each data type. The contents of padding
+	bytes at the end of this array are undefined, drivers and
+	applications shall ignore them.
 
 
 Packets are always passed in ascending line number order, without
@@ -542,7 +443,7 @@
 
 The payload of the MPEG-2 *Private Stream 1 PES* packets that contain
 sliced VBI data is specified by struct
-:ref:`v4l2_mpeg_vbi_fmt_ivtv <v4l2-mpeg-vbi-fmt-ivtv>`. The
+:c:type:`v4l2_mpeg_vbi_fmt_ivtv`. The
 payload is variable length, depending on the actual number of lines of
 sliced VBI data present in a video frame. The payload may be padded at
 the end with unspecified fill bytes to align the end of the payload to a
@@ -551,58 +452,41 @@
 number).
 
 
-.. _v4l2-mpeg-vbi-fmt-ivtv:
+.. c:type:: v4l2_mpeg_vbi_fmt_ivtv
 
 struct v4l2_mpeg_vbi_fmt_ivtv
 -----------------------------
 
+.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{1.0cm}|p{11.5cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
-
-    -  .. row 1
-
-       -  __u8
-
-       -  ``magic``\ [4]
-
-       -
-       -  A "magic" constant from :ref:`v4l2-mpeg-vbi-fmt-ivtv-magic` that
-	  indicates this is a valid sliced VBI data payload and also
-	  indicates which member of the anonymous union, ``itv0`` or
-	  ``ITV0``, to use for the payload data.
-
-    -  .. row 2
-
-       -  union
-
-       -  (anonymous)
-
-    -  .. row 3
-
-       -
-       -  struct :ref:`v4l2_mpeg_vbi_itv0 <v4l2-mpeg-vbi-itv0>`
-
-       -  ``itv0``
-
-       -  The primary form of the sliced VBI data payload that contains
-	  anywhere from 1 to 35 lines of sliced VBI data. Line masks are
-	  provided in this form of the payload indicating which VBI lines
-	  are provided.
-
-    -  .. row 4
-
-       -
-       -  struct :ref:`v4l2_mpeg_vbi_ITV0 <v4l2-mpeg-vbi-itv0-1>`
-
-       -  ``ITV0``
-
-       -  An alternate form of the sliced VBI data payload used when 36
-	  lines of sliced VBI data are present. No line masks are provided
-	  in this form of the payload; all valid line mask bits are
-	  implcitly set.
+    * - __u8
+      - ``magic``\ [4]
+      -
+      - A "magic" constant from :ref:`v4l2-mpeg-vbi-fmt-ivtv-magic` that
+	indicates this is a valid sliced VBI data payload and also
+	indicates which member of the anonymous union, ``itv0`` or
+	``ITV0``, to use for the payload data.
+    * - union
+      - (anonymous)
+    * -
+      - struct :c:type:`v4l2_mpeg_vbi_itv0`
+      - ``itv0``
+      - The primary form of the sliced VBI data payload that contains
+	anywhere from 1 to 35 lines of sliced VBI data. Line masks are
+	provided in this form of the payload indicating which VBI lines
+	are provided.
+    * -
+      - struct :ref:`v4l2_mpeg_vbi_ITV0 <v4l2-mpeg-vbi-itv0-1>`
+      - ``ITV0``
+      - An alternate form of the sliced VBI data payload used when 36
+	lines of sliced VBI data are present. No line masks are provided
+	in this form of the payload; all valid line mask bits are
+	implcitly set.
 
 
 
@@ -611,96 +495,77 @@
 Magic Constants for struct v4l2_mpeg_vbi_fmt_ivtv magic field
 -------------------------------------------------------------
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  1
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  Defined Symbol
-
-       -  Value
-
-       -  Description
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VBI_IVTV_MAGIC0``
-
-       -  "itv0"
-
-       -  Indicates the ``itv0`` member of the union in struct
-	  :ref:`v4l2_mpeg_vbi_fmt_ivtv <v4l2-mpeg-vbi-fmt-ivtv>` is
-	  valid.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VBI_IVTV_MAGIC1``
-
-       -  "ITV0"
-
-       -  Indicates the ``ITV0`` member of the union in struct
-	  :ref:`v4l2_mpeg_vbi_fmt_ivtv <v4l2-mpeg-vbi-fmt-ivtv>` is
-	  valid and that 36 lines of sliced VBI data are present.
+    * - Defined Symbol
+      - Value
+      - Description
+    * - ``V4L2_MPEG_VBI_IVTV_MAGIC0``
+      - "itv0"
+      - Indicates the ``itv0`` member of the union in struct
+	:c:type:`v4l2_mpeg_vbi_fmt_ivtv` is
+	valid.
+    * - ``V4L2_MPEG_VBI_IVTV_MAGIC1``
+      - "ITV0"
+      - Indicates the ``ITV0`` member of the union in struct
+	:c:type:`v4l2_mpeg_vbi_fmt_ivtv` is
+	valid and that 36 lines of sliced VBI data are present.
 
 
 
-.. _v4l2-mpeg-vbi-itv0:
+.. c:type:: v4l2_mpeg_vbi_itv0
 
-struct v4l2_mpeg_vbi_itv0
--------------------------
+.. c:type:: v4l2_mpeg_vbi_ITV0
+
+structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
+-------------------------------------------------
+
+.. tabularcolumns:: |p{4.4cm}|p{2.4cm}|p{10.7cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __le32
-
-       -  ``linemask``\ [2]
-
-       -  Bitmasks indicating the VBI service lines present. These
-	  ``linemask`` values are stored in little endian byte order in the
-	  MPEG stream. Some reference ``linemask`` bit positions with their
-	  corresponding VBI line number and video field are given below.
-	  b\ :sub:`0` indicates the least significant bit of a ``linemask``
-	  value:
+    * - __le32
+      - ``linemask``\ [2]
+      - Bitmasks indicating the VBI service lines present. These
+	``linemask`` values are stored in little endian byte order in the
+	MPEG stream. Some reference ``linemask`` bit positions with their
+	corresponding VBI line number and video field are given below.
+	b\ :sub:`0` indicates the least significant bit of a ``linemask``
+	value:
 
 
 
-	  ::
+	::
 
-	      linemask[0] b0:     line  6     first field
-	      linemask[0] b17:        line 23     first field
-	      linemask[0] b18:        line  6     second field
-	      linemask[0] b31:        line 19     second field
-	      linemask[1] b0:     line 20     second field
-	      linemask[1] b3:     line 23     second field
-	      linemask[1] b4-b31: unused and set to 0
-
-    -  .. row 2
-
-       -  struct
-	  :ref:`v4l2_mpeg_vbi_itv0_line <v4l2-mpeg-vbi-itv0-line>`
-
-       -  ``line``\ [35]
-
-       -  This is a variable length array that holds from 1 to 35 lines of
-	  sliced VBI data. The sliced VBI data lines present correspond to
-	  the bits set in the ``linemask`` array, starting from b\ :sub:`0`
-	  of ``linemask``\ [0] up through b\ :sub:`31` of ``linemask``\ [0],
-	  and from b\ :sub:`0` of ``linemask``\ [1] up through b\ :sub:`3` of
-	  ``linemask``\ [1]. ``line``\ [0] corresponds to the first bit
-	  found set in the ``linemask`` array, ``line``\ [1] corresponds to
-	  the second bit found set in the ``linemask`` array, etc. If no
-	  ``linemask`` array bits are set, then ``line``\ [0] may contain
-	  one line of unspecified data that should be ignored by
-	  applications.
+	    linemask[0] b0:     line  6     first field
+	    linemask[0] b17:    line 23     first field
+	    linemask[0] b18:    line  6     second field
+	    linemask[0] b31:    line 19     second field
+	    linemask[1] b0:     line 20     second field
+	    linemask[1] b3:     line 23     second field
+	    linemask[1] b4-b31: unused and set to 0
+    * - struct
+	:c:type:`v4l2_mpeg_vbi_itv0_line`
+      - ``line``\ [35]
+      - This is a variable length array that holds from 1 to 35 lines of
+	sliced VBI data. The sliced VBI data lines present correspond to
+	the bits set in the ``linemask`` array, starting from b\ :sub:`0`
+	of ``linemask``\ [0] up through b\ :sub:`31` of ``linemask``\ [0],
+	and from b\ :sub:`0` of ``linemask``\ [1] up through b\ :sub:`3` of
+	``linemask``\ [1]. ``line``\ [0] corresponds to the first bit
+	found set in the ``linemask`` array, ``line``\ [1] corresponds to
+	the second bit found set in the ``linemask`` array, etc. If no
+	``linemask`` array bits are set, then ``line``\ [0] may contain
+	one line of unspecified data that should be ignored by
+	applications.
 
 
 
@@ -709,54 +574,43 @@
 struct v4l2_mpeg_vbi_ITV0
 -------------------------
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  struct
-	  :ref:`v4l2_mpeg_vbi_itv0_line <v4l2-mpeg-vbi-itv0-line>`
-
-       -  ``line``\ [36]
-
-       -  A fixed length array of 36 lines of sliced VBI data. ``line``\ [0]
-	  through ``line``\ [17] correspond to lines 6 through 23 of the
-	  first field. ``line``\ [18] through ``line``\ [35] corresponds to
-	  lines 6 through 23 of the second field.
+    * - struct
+	:c:type:`v4l2_mpeg_vbi_itv0_line`
+      - ``line``\ [36]
+      - A fixed length array of 36 lines of sliced VBI data. ``line``\ [0]
+	through ``line``\ [17] correspond to lines 6 through 23 of the
+	first field. ``line``\ [18] through ``line``\ [35] corresponds to
+	lines 6 through 23 of the second field.
 
 
 
-.. _v4l2-mpeg-vbi-itv0-line:
+.. c:type:: v4l2_mpeg_vbi_itv0_line
 
 struct v4l2_mpeg_vbi_itv0_line
 ------------------------------
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u8
-
-       -  ``id``
-
-       -  A line identifier value from
-	  :ref:`ITV0-Line-Identifier-Constants` that indicates the type of
-	  sliced VBI data stored on this line.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``data``\ [42]
-
-       -  The sliced VBI data for the line.
+    * - __u8
+      - ``id``
+      - A line identifier value from
+	:ref:`ITV0-Line-Identifier-Constants` that indicates the type of
+	sliced VBI data stored on this line.
+    * - __u8
+      - ``data``\ [42]
+      - The sliced VBI data for the line.
 
 
 
@@ -765,58 +619,38 @@
 Line Identifiers for struct v4l2_mpeg_vbi_itv0_line id field
 ------------------------------------------------------------
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  1
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  Defined Symbol
-
-       -  Value
-
-       -  Description
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VBI_IVTV_TELETEXT_B``
-
-       -  1
-
-       -  Refer to :ref:`Sliced VBI services <vbi-services2>` for a
-	  description of the line payload.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VBI_IVTV_CAPTION_525``
-
-       -  4
-
-       -  Refer to :ref:`Sliced VBI services <vbi-services2>` for a
-	  description of the line payload.
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VBI_IVTV_WSS_625``
-
-       -  5
-
-       -  Refer to :ref:`Sliced VBI services <vbi-services2>` for a
-	  description of the line payload.
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VBI_IVTV_VPS``
-
-       -  7
-
-       -  Refer to :ref:`Sliced VBI services <vbi-services2>` for a
-	  description of the line payload.
+    * - Defined Symbol
+      - Value
+      - Description
+    * - ``V4L2_MPEG_VBI_IVTV_TELETEXT_B``
+      - 1
+      - Refer to :ref:`Sliced VBI services <vbi-services2>` for a
+	description of the line payload.
+    * - ``V4L2_MPEG_VBI_IVTV_CAPTION_525``
+      - 4
+      - Refer to :ref:`Sliced VBI services <vbi-services2>` for a
+	description of the line payload.
+    * - ``V4L2_MPEG_VBI_IVTV_WSS_625``
+      - 5
+      - Refer to :ref:`Sliced VBI services <vbi-services2>` for a
+	description of the line payload.
+    * - ``V4L2_MPEG_VBI_IVTV_VPS``
+      - 7
+      - Refer to :ref:`Sliced VBI services <vbi-services2>` for a
+	description of the line payload.
 
 
 
 .. [#f1]
    According to :ref:`ETS 300 706 <ets300706>` lines 6-22 of the first
    field and lines 5-22 of the second field may carry Teletext data.
+
+.. [#f2]
+   See also :ref:`vbi-525` and :ref:`vbi-625`.
diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst
index 5a112eb..fb4d0d4 100644
--- a/Documentation/media/uapi/v4l/dev-subdev.rst
+++ b/Documentation/media/uapi/v4l/dev-subdev.rst
@@ -202,93 +202,58 @@
 list entity names and pad numbers).
 
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|
+
 .. _sample-pipeline-config:
 
 .. flat-table:: Sample Pipeline Configuration
     :header-rows:  1
     :stub-columns: 0
+    :widths: 5 5 5 5 5 5 5
 
+    * -
+      - Sensor/0 format
+      - Frontend/0 format
+      - Frontend/1 format
+      - Scaler/0 format
+      - Scaler/0 compose selection rectangle
+      - Scaler/1 format
+    * - Initial state
+      - 2048x1536/SGRBG8_1X8
+      - (default)
+      - (default)
+      - (default)
+      - (default)
+      - (default)
+    * - Configure frontend sink format
+      - 2048x1536/SGRBG8_1X8
+      - *2048x1536/SGRBG8_1X8*
+      - *2046x1534/SGRBG8_1X8*
+      - (default)
+      - (default)
+      - (default)
+    * - Configure scaler sink format
+      - 2048x1536/SGRBG8_1X8
+      - 2048x1536/SGRBG8_1X8
+      - 2046x1534/SGRBG8_1X8
+      - *2046x1534/SGRBG8_1X8*
+      - *0,0/2046x1534*
+      - *2046x1534/SGRBG8_1X8*
+    * - Configure scaler sink compose selection
+      - 2048x1536/SGRBG8_1X8
+      - 2048x1536/SGRBG8_1X8
+      - 2046x1534/SGRBG8_1X8
+      - 2046x1534/SGRBG8_1X8
+      - *0,0/1280x960*
+      - *1280x960/SGRBG8_1X8*
 
-    -  .. row 1
+.. raw:: latex
 
-       -
-       -  Sensor/0 format
-
-       -  Frontend/0 format
-
-       -  Frontend/1 format
-
-       -  Scaler/0 format
-
-       -  Scaler/0 compose selection rectangle
-
-       -  Scaler/1 format
-
-    -  .. row 2
-
-       -  Initial state
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  (default)
-
-       -  (default)
-
-       -  (default)
-
-       -  (default)
-
-       -  (default)
-
-    -  .. row 3
-
-       -  Configure frontend sink format
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  *2048x1536/SGRBG8_1X8*
-
-       -  *2046x1534/SGRBG8_1X8*
-
-       -  (default)
-
-       -  (default)
-
-       -  (default)
-
-    -  .. row 4
-
-       -  Configure scaler sink format
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  2046x1534/SGRBG8_1X8
-
-       -  *2046x1534/SGRBG8_1X8*
-
-       -  *0,0/2046x1534*
-
-       -  *2046x1534/SGRBG8_1X8*
-
-    -  .. row 5
-
-       -  Configure scaler sink compose selection
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  2048x1536/SGRBG8_1X8
-
-       -  2046x1534/SGRBG8_1X8
-
-       -  2046x1534/SGRBG8_1X8
-
-       -  *0,0/1280x960*
-
-       -  *1280x960/SGRBG8_1X8*
-
-
+    \end{adjustbox}\newline\newline
 
 1. Initial state. The sensor source pad format is set to its native 3MP
    size and V4L2_MBUS_FMT_SGRBG8_1X8 media bus code. Formats on the
@@ -332,7 +297,7 @@
 the area of the image that will be scaled up.
 
 Crop settings are defined by a crop rectangle and represented in a
-struct :ref:`v4l2_rect <v4l2-rect>` by the coordinates of the top
+struct :c:type:`v4l2_rect` by the coordinates of the top
 left corner and the rectangle size. Both the coordinates and sizes are
 expressed in pixels.
 
@@ -348,7 +313,7 @@
 The scaling operation changes the size of the image by scaling it to new
 dimensions. The scaling ratio isn't specified explicitly, but is implied
 from the original and scaled image sizes. Both sizes are represented by
-struct :ref:`v4l2_rect <v4l2-rect>`.
+struct :c:type:`v4l2_rect`.
 
 Scaling support is optional. When supported by a subdev, the crop
 rectangle on the subdev's sink pad is scaled to the size configured
diff --git a/Documentation/media/uapi/v4l/dev-touch.rst b/Documentation/media/uapi/v4l/dev-touch.rst
new file mode 100644
index 0000000..98797f2
--- /dev/null
+++ b/Documentation/media/uapi/v4l/dev-touch.rst
@@ -0,0 +1,56 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _touch:
+
+*************
+Touch Devices
+*************
+
+Touch devices are accessed through character device special files named
+``/dev/v4l-touch0`` to ``/dev/v4l-touch255`` with major number 81 and
+dynamically allocated minor numbers 0 to 255.
+
+Overview
+========
+
+Sensors may be Optical, or Projected Capacitive touch (PCT).
+
+Processing is required to analyse the raw data and produce input events. In
+some systems, this may be performed on the ASIC and the raw data is purely a
+side-channel for diagnostics or tuning. In other systems, the ASIC is a simple
+analogue front end device which delivers touch data at high rate, and any touch
+processing must be done on the host.
+
+For capacitive touch sensing, the touchscreen is composed of an array of
+horizontal and vertical conductors (alternatively called rows/columns, X/Y
+lines, or tx/rx). Mutual Capacitance measured is at the nodes where the
+conductors cross. Alternatively, Self Capacitance measures the signal from each
+column and row independently.
+
+A touch input may be determined by comparing the raw capacitance measurement to
+a no-touch reference (or "baseline") measurement:
+
+Delta = Raw - Reference
+
+The reference measurement takes account of variations in the capacitance across
+the touch sensor matrix, for example manufacturing irregularities,
+environmental or edge effects.
+
+Querying Capabilities
+=====================
+
+Devices supporting the touch interface set the ``V4L2_CAP_VIDEO_CAPTURE`` flag
+and the ``V4L2_CAP_TOUCH`` flag in the ``capabilities`` field of
+:c:type:`v4l2_capability` returned by the
+:ref:`VIDIOC_QUERYCAP` ioctl.
+
+At least one of the read/write or streaming I/O methods must be
+supported.
+
+The formats supported by touch devices are documented in
+:ref:`Touch Formats <tch-formats>`.
+
+Data Format Negotiation
+=======================
+
+A touch device may support any I/O method.
diff --git a/Documentation/media/uapi/v4l/devices.rst b/Documentation/media/uapi/v4l/devices.rst
index aed0ce1..5c3d6c2 100644
--- a/Documentation/media/uapi/v4l/devices.rst
+++ b/Documentation/media/uapi/v4l/devices.rst
@@ -22,5 +22,6 @@
     dev-radio
     dev-rds
     dev-sdr
+    dev-touch
     dev-event
     dev-subdev
diff --git a/Documentation/media/uapi/v4l/diff-v4l.rst b/Documentation/media/uapi/v4l/diff-v4l.rst
index e1e034d..76b2eca 100644
--- a/Documentation/media/uapi/v4l/diff-v4l.rst
+++ b/Documentation/media/uapi/v4l/diff-v4l.rst
@@ -39,39 +39,19 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Device Type
-
-       -  File Name
-
-       -  Minor Numbers
-
-    -  .. row 2
-
-       -  Video capture and overlay
-
-       -  ``/dev/video`` and ``/dev/bttv0``\  [#f1]_, ``/dev/video0`` to
-	  ``/dev/video63``
-
-       -  0-63
-
-    -  .. row 3
-
-       -  Radio receiver
-
-       -  ``/dev/radio``\  [#f2]_, ``/dev/radio0`` to ``/dev/radio63``
-
-       -  64-127
-
-    -  .. row 4
-
-       -  Raw VBI capture
-
-       -  ``/dev/vbi``, ``/dev/vbi0`` to ``/dev/vbi31``
-
-       -  224-255
+    * - Device Type
+      - File Name
+      - Minor Numbers
+    * - Video capture and overlay
+      - ``/dev/video`` and ``/dev/bttv0``\  [#f1]_, ``/dev/video0`` to
+	``/dev/video63``
+      - 0-63
+    * - Radio receiver
+      - ``/dev/radio``\  [#f2]_, ``/dev/radio0`` to ``/dev/radio63``
+      - 64-127
+    * - Raw VBI capture
+      - ``/dev/vbi``, ``/dev/vbi0`` to ``/dev/vbi31``
+      - 224-255
 
 
 V4L prohibits (or used to prohibit) multiple opens of a device file.
@@ -87,162 +67,89 @@
 The V4L ``VIDIOCGCAP`` ioctl is equivalent to V4L2's
 :ref:`VIDIOC_QUERYCAP`.
 
-The ``name`` field in struct :c:type:`struct video_capability` became
-``card`` in struct :ref:`v4l2_capability <v4l2-capability>`, ``type``
+The ``name`` field in struct ``video_capability`` became
+``card`` in struct :c:type:`v4l2_capability`, ``type``
 was replaced by ``capabilities``. Note V4L2 does not distinguish between
 device types like this, better think of basic video input, video output
 and radio devices supporting a set of related functions like video
 capturing, video overlay and VBI capturing. See :ref:`open` for an
 introduction.
 
+.. tabularcolumns:: |p{5.5cm}|p{6.5cm}|p{5.5cm}
 
+.. cssclass:: longtable
 
 .. flat-table::
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :c:type:`struct video_capability` ``type``
-
-       -  struct :ref:`v4l2_capability <v4l2-capability>`
-	  ``capabilities`` flags
-
-       -  Purpose
-
-    -  .. row 2
-
-       -  ``VID_TYPE_CAPTURE``
-
-       -  ``V4L2_CAP_VIDEO_CAPTURE``
-
-       -  The :ref:`video capture <capture>` interface is supported.
-
-    -  .. row 3
-
-       -  ``VID_TYPE_TUNER``
-
-       -  ``V4L2_CAP_TUNER``
-
-       -  The device has a :ref:`tuner or modulator <tuner>`.
-
-    -  .. row 4
-
-       -  ``VID_TYPE_TELETEXT``
-
-       -  ``V4L2_CAP_VBI_CAPTURE``
-
-       -  The :ref:`raw VBI capture <raw-vbi>` interface is supported.
-
-    -  .. row 5
-
-       -  ``VID_TYPE_OVERLAY``
-
-       -  ``V4L2_CAP_VIDEO_OVERLAY``
-
-       -  The :ref:`video overlay <overlay>` interface is supported.
-
-    -  .. row 6
-
-       -  ``VID_TYPE_CHROMAKEY``
-
-       -  ``V4L2_FBUF_CAP_CHROMAKEY`` in field ``capability`` of struct
-	  :ref:`v4l2_framebuffer <v4l2-framebuffer>`
-
-       -  Whether chromakey overlay is supported. For more information on
-	  overlay see :ref:`overlay`.
-
-    -  .. row 7
-
-       -  ``VID_TYPE_CLIPPING``
-
-       -  ``V4L2_FBUF_CAP_LIST_CLIPPING`` and
-	  ``V4L2_FBUF_CAP_BITMAP_CLIPPING`` in field ``capability`` of
-	  struct :ref:`v4l2_framebuffer <v4l2-framebuffer>`
-
-       -  Whether clipping the overlaid image is supported, see
-	  :ref:`overlay`.
-
-    -  .. row 8
-
-       -  ``VID_TYPE_FRAMERAM``
-
-       -  ``V4L2_FBUF_CAP_EXTERNOVERLAY`` *not set* in field ``capability``
-	  of struct :ref:`v4l2_framebuffer <v4l2-framebuffer>`
-
-       -  Whether overlay overwrites frame buffer memory, see
-	  :ref:`overlay`.
-
-    -  .. row 9
-
-       -  ``VID_TYPE_SCALES``
-
-       -  ``-``
-
-       -  This flag indicates if the hardware can scale images. The V4L2 API
-	  implies the scale factor by setting the cropping dimensions and
-	  image size with the :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` and
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, respectively. The
-	  driver returns the closest sizes possible. For more information on
-	  cropping and scaling see :ref:`crop`.
-
-    -  .. row 10
-
-       -  ``VID_TYPE_MONOCHROME``
-
-       -  ``-``
-
-       -  Applications can enumerate the supported image formats with the
-	  :ref:`VIDIOC_ENUM_FMT` ioctl to determine if
-	  the device supports grey scale capturing only. For more
-	  information on image formats see :ref:`pixfmt`.
-
-    -  .. row 11
-
-       -  ``VID_TYPE_SUBCAPTURE``
-
-       -  ``-``
-
-       -  Applications can call the :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>`
-	  ioctl to determine if the device supports capturing a subsection
-	  of the full picture ("cropping" in V4L2). If not, the ioctl
-	  returns the ``EINVAL`` error code. For more information on cropping
-	  and scaling see :ref:`crop`.
-
-    -  .. row 12
-
-       -  ``VID_TYPE_MPEG_DECODER``
-
-       -  ``-``
-
-       -  Applications can enumerate the supported image formats with the
-	  :ref:`VIDIOC_ENUM_FMT` ioctl to determine if
-	  the device supports MPEG streams.
-
-    -  .. row 13
-
-       -  ``VID_TYPE_MPEG_ENCODER``
-
-       -  ``-``
-
-       -  See above.
-
-    -  .. row 14
-
-       -  ``VID_TYPE_MJPEG_DECODER``
-
-       -  ``-``
-
-       -  See above.
-
-    -  .. row 15
-
-       -  ``VID_TYPE_MJPEG_ENCODER``
-
-       -  ``-``
-
-       -  See above.
+    * - ``struct video_capability`` ``type``
+      - struct :c:type:`v4l2_capability`
+	``capabilities`` flags
+      - Purpose
+    * - ``VID_TYPE_CAPTURE``
+      - ``V4L2_CAP_VIDEO_CAPTURE``
+      - The :ref:`video capture <capture>` interface is supported.
+    * - ``VID_TYPE_TUNER``
+      - ``V4L2_CAP_TUNER``
+      - The device has a :ref:`tuner or modulator <tuner>`.
+    * - ``VID_TYPE_TELETEXT``
+      - ``V4L2_CAP_VBI_CAPTURE``
+      - The :ref:`raw VBI capture <raw-vbi>` interface is supported.
+    * - ``VID_TYPE_OVERLAY``
+      - ``V4L2_CAP_VIDEO_OVERLAY``
+      - The :ref:`video overlay <overlay>` interface is supported.
+    * - ``VID_TYPE_CHROMAKEY``
+      - ``V4L2_FBUF_CAP_CHROMAKEY`` in field ``capability`` of struct
+	:c:type:`v4l2_framebuffer`
+      - Whether chromakey overlay is supported. For more information on
+	overlay see :ref:`overlay`.
+    * - ``VID_TYPE_CLIPPING``
+      - ``V4L2_FBUF_CAP_LIST_CLIPPING`` and
+	``V4L2_FBUF_CAP_BITMAP_CLIPPING`` in field ``capability`` of
+	struct :c:type:`v4l2_framebuffer`
+      - Whether clipping the overlaid image is supported, see
+	:ref:`overlay`.
+    * - ``VID_TYPE_FRAMERAM``
+      - ``V4L2_FBUF_CAP_EXTERNOVERLAY`` *not set* in field ``capability``
+	of struct :c:type:`v4l2_framebuffer`
+      - Whether overlay overwrites frame buffer memory, see
+	:ref:`overlay`.
+    * - ``VID_TYPE_SCALES``
+      - ``-``
+      - This flag indicates if the hardware can scale images. The V4L2 API
+	implies the scale factor by setting the cropping dimensions and
+	image size with the :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` and
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, respectively. The
+	driver returns the closest sizes possible. For more information on
+	cropping and scaling see :ref:`crop`.
+    * - ``VID_TYPE_MONOCHROME``
+      - ``-``
+      - Applications can enumerate the supported image formats with the
+	:ref:`VIDIOC_ENUM_FMT` ioctl to determine if
+	the device supports grey scale capturing only. For more
+	information on image formats see :ref:`pixfmt`.
+    * - ``VID_TYPE_SUBCAPTURE``
+      - ``-``
+      - Applications can call the :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>`
+	ioctl to determine if the device supports capturing a subsection
+	of the full picture ("cropping" in V4L2). If not, the ioctl
+	returns the ``EINVAL`` error code. For more information on cropping
+	and scaling see :ref:`crop`.
+    * - ``VID_TYPE_MPEG_DECODER``
+      - ``-``
+      - Applications can enumerate the supported image formats with the
+	:ref:`VIDIOC_ENUM_FMT` ioctl to determine if
+	the device supports MPEG streams.
+    * - ``VID_TYPE_MPEG_ENCODER``
+      - ``-``
+      - See above.
+    * - ``VID_TYPE_MJPEG_DECODER``
+      - ``-``
+      - See above.
+    * - ``VID_TYPE_MJPEG_ENCODER``
+      - ``-``
+      - See above.
 
 
 The ``audios`` field was replaced by ``capabilities`` flag
@@ -262,12 +169,12 @@
 =============
 
 V4L provides the ``VIDIOCGCHAN`` and ``VIDIOCSCHAN`` ioctl using struct
-:c:type:`struct video_channel` to enumerate the video inputs of a V4L
+``video_channel`` to enumerate the video inputs of a V4L
 device. The equivalent V4L2 ioctls are
 :ref:`VIDIOC_ENUMINPUT`,
 :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` and
 :ref:`VIDIOC_S_INPUT <VIDIOC_G_INPUT>` using struct
-:ref:`v4l2_input <v4l2-input>` as discussed in :ref:`video`.
+:c:type:`v4l2_input` as discussed in :ref:`video`.
 
 The ``channel`` field counting inputs was renamed to ``index``, the
 video input types were renamed as follows:
@@ -278,24 +185,12 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :c:type:`struct video_channel` ``type``
-
-       -  struct :ref:`v4l2_input <v4l2-input>` ``type``
-
-    -  .. row 2
-
-       -  ``VIDEO_TYPE_TV``
-
-       -  ``V4L2_INPUT_TYPE_TUNER``
-
-    -  .. row 3
-
-       -  ``VIDEO_TYPE_CAMERA``
-
-       -  ``V4L2_INPUT_TYPE_CAMERA``
+    * - struct ``video_channel`` ``type``
+      - struct :c:type:`v4l2_input` ``type``
+    * - ``VIDEO_TYPE_TV``
+      - ``V4L2_INPUT_TYPE_TUNER``
+    * - ``VIDEO_TYPE_CAMERA``
+      - ``V4L2_INPUT_TYPE_CAMERA``
 
 
 Unlike the ``tuners`` field expressing the number of tuners of this
@@ -303,7 +198,7 @@
 However a tuner can have more than one input, i. e. RF connectors, and a
 device can have multiple tuners. The index number of the tuner
 associated with the input, if any, is stored in field ``tuner`` of
-struct :ref:`v4l2_input <v4l2-input>`. Enumeration of tuners is
+struct :c:type:`v4l2_input`. Enumeration of tuners is
 discussed in :ref:`tuner`.
 
 The redundant ``VIDEO_VC_TUNER`` flag was dropped. Video inputs
@@ -326,11 +221,11 @@
 ======
 
 The V4L ``VIDIOCGTUNER`` and ``VIDIOCSTUNER`` ioctl and struct
-:c:type:`struct video_tuner` can be used to enumerate the tuners of a
+``video_tuner`` can be used to enumerate the tuners of a
 V4L TV or radio device. The equivalent V4L2 ioctls are
 :ref:`VIDIOC_G_TUNER <VIDIOC_G_TUNER>` and
 :ref:`VIDIOC_S_TUNER <VIDIOC_G_TUNER>` using struct
-:ref:`v4l2_tuner <v4l2-tuner>`. Tuners are covered in :ref:`tuner`.
+:c:type:`v4l2_tuner`. Tuners are covered in :ref:`tuner`.
 
 The ``tuner`` field counting tuners was renamed to ``index``. The fields
 ``name``, ``rangelow`` and ``rangehigh`` remained unchanged.
@@ -338,7 +233,7 @@
 The ``VIDEO_TUNER_PAL``, ``VIDEO_TUNER_NTSC`` and ``VIDEO_TUNER_SECAM``
 flags indicating the supported video standards were dropped. This
 information is now contained in the associated struct
-:ref:`v4l2_input <v4l2-input>`. No replacement exists for the
+:c:type:`v4l2_input`. No replacement exists for the
 ``VIDEO_TUNER_NORM`` flag indicating whether the video standard can be
 switched. The ``mode`` field to select a different video standard was
 replaced by a whole new set of ioctls and structures described in
@@ -351,18 +246,18 @@
 The ``VIDEO_TUNER_STEREO_ON`` flag indicating stereo reception became
 ``V4L2_TUNER_SUB_STEREO`` in field ``rxsubchans``. This field also
 permits the detection of monaural and bilingual audio, see the
-definition of struct :ref:`v4l2_tuner <v4l2-tuner>` for details.
+definition of struct :c:type:`v4l2_tuner` for details.
 Presently no replacement exists for the ``VIDEO_TUNER_RDS_ON`` and
 ``VIDEO_TUNER_MBS_ON`` flags.
 
 The ``VIDEO_TUNER_LOW`` flag was renamed to ``V4L2_TUNER_CAP_LOW`` in
-the struct :ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field.
+the struct :c:type:`v4l2_tuner` ``capability`` field.
 
 The ``VIDIOCGFREQ`` and ``VIDIOCSFREQ`` ioctl to change the tuner
 frequency where renamed to
 :ref:`VIDIOC_G_FREQUENCY <VIDIOC_G_FREQUENCY>` and
 :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>`. They take a pointer
-to a struct :ref:`v4l2_frequency <v4l2-frequency>` instead of an
+to a struct :c:type:`v4l2_frequency` instead of an
 unsigned long integer.
 
 
@@ -372,7 +267,7 @@
 ================
 
 V4L2 has no equivalent of the ``VIDIOCGPICT`` and ``VIDIOCSPICT`` ioctl
-and struct :c:type:`struct video_picture`. The following fields where
+and struct ``video_picture``. The following fields where
 replaced by V4L2 controls accessible with the
 :ref:`VIDIOC_QUERYCTRL`,
 :ref:`VIDIOC_G_CTRL <VIDIOC_G_CTRL>` and
@@ -384,42 +279,18 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :c:type:`struct video_picture`
-
-       -  V4L2 Control ID
-
-    -  .. row 2
-
-       -  ``brightness``
-
-       -  ``V4L2_CID_BRIGHTNESS``
-
-    -  .. row 3
-
-       -  ``hue``
-
-       -  ``V4L2_CID_HUE``
-
-    -  .. row 4
-
-       -  ``colour``
-
-       -  ``V4L2_CID_SATURATION``
-
-    -  .. row 5
-
-       -  ``contrast``
-
-       -  ``V4L2_CID_CONTRAST``
-
-    -  .. row 6
-
-       -  ``whiteness``
-
-       -  ``V4L2_CID_WHITENESS``
+    * - struct ``video_picture``
+      - V4L2 Control ID
+    * - ``brightness``
+      - ``V4L2_CID_BRIGHTNESS``
+    * - ``hue``
+      - ``V4L2_CID_HUE``
+    * - ``colour``
+      - ``V4L2_CID_SATURATION``
+    * - ``contrast``
+      - ``V4L2_CID_CONTRAST``
+    * - ``whiteness``
+      - ``V4L2_CID_WHITENESS``
 
 
 The V4L picture controls are assumed to range from 0 to 65535 with no
@@ -432,7 +303,7 @@
 implied by the selected image format. V4L2 does not explicitly provide
 such information assuming applications recognizing the format are aware
 of the image depth and others need not know. The ``palette`` field moved
-into the struct :ref:`v4l2_pix_format <v4l2-pix-format>`:
+into the struct :c:type:`v4l2_pix_format`:
 
 
 
@@ -440,108 +311,40 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :c:type:`struct video_picture` ``palette``
-
-       -  struct :ref:`v4l2_pix_format <v4l2-pix-format>` ``pixfmt``
-
-    -  .. row 2
-
-       -  ``VIDEO_PALETTE_GREY``
-
-       -  :ref:`V4L2_PIX_FMT_GREY <V4L2-PIX-FMT-GREY>`
-
-    -  .. row 3
-
-       -  ``VIDEO_PALETTE_HI240``
-
-       -  :ref:`V4L2_PIX_FMT_HI240 <pixfmt-reserved>` [#f3]_
-
-    -  .. row 4
-
-       -  ``VIDEO_PALETTE_RGB565``
-
-       -  :ref:`V4L2_PIX_FMT_RGB565 <pixfmt-rgb>`
-
-    -  .. row 5
-
-       -  ``VIDEO_PALETTE_RGB555``
-
-       -  :ref:`V4L2_PIX_FMT_RGB555 <pixfmt-rgb>`
-
-    -  .. row 6
-
-       -  ``VIDEO_PALETTE_RGB24``
-
-       -  :ref:`V4L2_PIX_FMT_BGR24 <pixfmt-rgb>`
-
-    -  .. row 7
-
-       -  ``VIDEO_PALETTE_RGB32``
-
-       -  :ref:`V4L2_PIX_FMT_BGR32 <pixfmt-rgb>` [#f4]_
-
-    -  .. row 8
-
-       -  ``VIDEO_PALETTE_YUV422``
-
-       -  :ref:`V4L2_PIX_FMT_YUYV <V4L2-PIX-FMT-YUYV>`
-
-    -  .. row 9
-
-       -  ``VIDEO_PALETTE_YUYV``\  [#f5]_
-
-       -  :ref:`V4L2_PIX_FMT_YUYV <V4L2-PIX-FMT-YUYV>`
-
-    -  .. row 10
-
-       -  ``VIDEO_PALETTE_UYVY``
-
-       -  :ref:`V4L2_PIX_FMT_UYVY <V4L2-PIX-FMT-UYVY>`
-
-    -  .. row 11
-
-       -  ``VIDEO_PALETTE_YUV420``
-
-       -  None
-
-    -  .. row 12
-
-       -  ``VIDEO_PALETTE_YUV411``
-
-       -  :ref:`V4L2_PIX_FMT_Y41P <V4L2-PIX-FMT-Y41P>` [#f6]_
-
-    -  .. row 13
-
-       -  ``VIDEO_PALETTE_RAW``
-
-       -  None [#f7]_
-
-    -  .. row 14
-
-       -  ``VIDEO_PALETTE_YUV422P``
-
-       -  :ref:`V4L2_PIX_FMT_YUV422P <V4L2-PIX-FMT-YUV422P>`
-
-    -  .. row 15
-
-       -  ``VIDEO_PALETTE_YUV411P``
-
-       -  :ref:`V4L2_PIX_FMT_YUV411P <V4L2-PIX-FMT-YUV411P>` [#f8]_
-
-    -  .. row 16
-
-       -  ``VIDEO_PALETTE_YUV420P``
-
-       -  :ref:`V4L2_PIX_FMT_YVU420 <V4L2-PIX-FMT-YVU420>`
-
-    -  .. row 17
-
-       -  ``VIDEO_PALETTE_YUV410P``
-
-       -  :ref:`V4L2_PIX_FMT_YVU410 <V4L2-PIX-FMT-YVU410>`
+    * - struct ``video_picture`` ``palette``
+      - struct :c:type:`v4l2_pix_format` ``pixfmt``
+    * - ``VIDEO_PALETTE_GREY``
+      - :ref:`V4L2_PIX_FMT_GREY <V4L2-PIX-FMT-GREY>`
+    * - ``VIDEO_PALETTE_HI240``
+      - :ref:`V4L2_PIX_FMT_HI240 <pixfmt-reserved>` [#f3]_
+    * - ``VIDEO_PALETTE_RGB565``
+      - :ref:`V4L2_PIX_FMT_RGB565 <pixfmt-rgb>`
+    * - ``VIDEO_PALETTE_RGB555``
+      - :ref:`V4L2_PIX_FMT_RGB555 <pixfmt-rgb>`
+    * - ``VIDEO_PALETTE_RGB24``
+      - :ref:`V4L2_PIX_FMT_BGR24 <pixfmt-rgb>`
+    * - ``VIDEO_PALETTE_RGB32``
+      - :ref:`V4L2_PIX_FMT_BGR32 <pixfmt-rgb>` [#f4]_
+    * - ``VIDEO_PALETTE_YUV422``
+      - :ref:`V4L2_PIX_FMT_YUYV <V4L2-PIX-FMT-YUYV>`
+    * - ``VIDEO_PALETTE_YUYV``\  [#f5]_
+      - :ref:`V4L2_PIX_FMT_YUYV <V4L2-PIX-FMT-YUYV>`
+    * - ``VIDEO_PALETTE_UYVY``
+      - :ref:`V4L2_PIX_FMT_UYVY <V4L2-PIX-FMT-UYVY>`
+    * - ``VIDEO_PALETTE_YUV420``
+      - None
+    * - ``VIDEO_PALETTE_YUV411``
+      - :ref:`V4L2_PIX_FMT_Y41P <V4L2-PIX-FMT-Y41P>` [#f6]_
+    * - ``VIDEO_PALETTE_RAW``
+      - None [#f7]_
+    * - ``VIDEO_PALETTE_YUV422P``
+      - :ref:`V4L2_PIX_FMT_YUV422P <V4L2-PIX-FMT-YUV422P>`
+    * - ``VIDEO_PALETTE_YUV411P``
+      - :ref:`V4L2_PIX_FMT_YUV411P <V4L2-PIX-FMT-YUV411P>` [#f8]_
+    * - ``VIDEO_PALETTE_YUV420P``
+      - :ref:`V4L2_PIX_FMT_YVU420 <V4L2-PIX-FMT-YVU420>`
+    * - ``VIDEO_PALETTE_YUV410P``
+      - :ref:`V4L2_PIX_FMT_YVU410 <V4L2-PIX-FMT-YVU410>`
 
 
 V4L2 image formats are defined in :ref:`pixfmt`. The image format can
@@ -552,11 +355,11 @@
 =====
 
 The ``VIDIOCGAUDIO`` and ``VIDIOCSAUDIO`` ioctl and struct
-:c:type:`struct video_audio` are used to enumerate the audio inputs
+``video_audio`` are used to enumerate the audio inputs
 of a V4L device. The equivalent V4L2 ioctls are
 :ref:`VIDIOC_G_AUDIO <VIDIOC_G_AUDIO>` and
 :ref:`VIDIOC_S_AUDIO <VIDIOC_G_AUDIO>` using struct
-:ref:`v4l2_audio <v4l2-audio>` as discussed in :ref:`audio`.
+:c:type:`v4l2_audio` as discussed in :ref:`audio`.
 
 The ``audio`` "channel number" field counting audio inputs was renamed
 to ``index``.
@@ -569,10 +372,10 @@
 specification, there is no way to query the selected mode. On
 ``VIDIOCGAUDIO`` the driver returns the *actually received* audio
 programmes in this field. In the V4L2 API this information is stored in
-the struct :ref:`v4l2_tuner <v4l2-tuner>` ``rxsubchans`` and
+the struct :c:type:`v4l2_tuner` ``rxsubchans`` and
 ``audmode`` fields, respectively. See :ref:`tuner` for more
 information on tuners. Related to audio modes struct
-:ref:`v4l2_audio <v4l2-audio>` also reports if this is a mono or
+:c:type:`v4l2_audio` also reports if this is a mono or
 stereo input, regardless if the source is a tuner.
 
 The following fields where replaced by V4L2 controls accessible with the
@@ -586,36 +389,16 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :c:type:`struct video_audio`
-
-       -  V4L2 Control ID
-
-    -  .. row 2
-
-       -  ``volume``
-
-       -  ``V4L2_CID_AUDIO_VOLUME``
-
-    -  .. row 3
-
-       -  ``bass``
-
-       -  ``V4L2_CID_AUDIO_BASS``
-
-    -  .. row 4
-
-       -  ``treble``
-
-       -  ``V4L2_CID_AUDIO_TREBLE``
-
-    -  .. row 5
-
-       -  ``balance``
-
-       -  ``V4L2_CID_AUDIO_BALANCE``
+    * - struct ``video_audio``
+      - V4L2 Control ID
+    * - ``volume``
+      - ``V4L2_CID_AUDIO_VOLUME``
+    * - ``bass``
+      - ``V4L2_CID_AUDIO_BASS``
+    * - ``treble``
+      - ``V4L2_CID_AUDIO_TREBLE``
+    * - ``balance``
+      - ``V4L2_CID_AUDIO_BALANCE``
 
 
 To determine which of these controls are supported by a driver V4L
@@ -627,7 +410,7 @@
 ``V4L2_CID_AUDIO_MUTE`` control.
 
 All V4L2 controls have a ``step`` attribute replacing the struct
-:c:type:`struct video_audio` ``step`` field. The V4L audio controls
+``video_audio`` ``step`` field. The V4L audio controls
 are assumed to range from 0 to 65535 with no particular reset value. The
 V4L2 API permits arbitrary limits and defaults which can be queried with
 the :ref:`VIDIOC_QUERYCTRL` ioctl. For general
@@ -640,11 +423,11 @@
 The V4L2 ioctls equivalent to ``VIDIOCGFBUF`` and ``VIDIOCSFBUF`` are
 :ref:`VIDIOC_G_FBUF <VIDIOC_G_FBUF>` and
 :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>`. The ``base`` field of struct
-:c:type:`struct video_buffer` remained unchanged, except V4L2 defines
+``video_buffer`` remained unchanged, except V4L2 defines
 a flag to indicate non-destructive overlays instead of a ``NULL``
 pointer. All other fields moved into the struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` ``fmt`` substructure of
-struct :ref:`v4l2_framebuffer <v4l2-framebuffer>`. The ``depth``
+:c:type:`v4l2_pix_format` ``fmt`` substructure of
+struct :c:type:`v4l2_framebuffer`. The ``depth``
 field was replaced by ``pixelformat``. See :ref:`pixfmt-rgb` for a
 list of RGB formats and their respective color depths.
 
@@ -652,28 +435,28 @@
 uses the general-purpose data format negotiation ioctls
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` and
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`. They take a pointer to a struct
-:ref:`v4l2_format <v4l2-format>` as argument. Here the ``win`` member
+:c:type:`v4l2_format` as argument. Here the ``win`` member
 of the ``fmt`` union is used, a struct
-:ref:`v4l2_window <v4l2-window>`.
+:c:type:`v4l2_window`.
 
 The ``x``, ``y``, ``width`` and ``height`` fields of struct
-:c:type:`struct video_window` moved into struct
-:ref:`v4l2_rect <v4l2-rect>` substructure ``w`` of struct
-:c:type:`struct v4l2_window`. The ``chromakey``, ``clips``, and
+``video_window`` moved into struct
+:c:type:`v4l2_rect` substructure ``w`` of struct
+:c:type:`v4l2_window`. The ``chromakey``, ``clips``, and
 ``clipcount`` fields remained unchanged. Struct
-:c:type:`struct video_clip` was renamed to struct
-:ref:`v4l2_clip <v4l2-clip>`, also containing a struct
-:c:type:`struct v4l2_rect`, but the semantics are still the same.
+``video_clip`` was renamed to struct
+:c:type:`v4l2_clip`, also containing a struct
+:c:type:`v4l2_rect`, but the semantics are still the same.
 
 The ``VIDEO_WINDOW_INTERLACE`` flag was dropped. Instead applications
 must set the ``field`` field to ``V4L2_FIELD_ANY`` or
 ``V4L2_FIELD_INTERLACED``. The ``VIDEO_WINDOW_CHROMAKEY`` flag moved
-into struct :ref:`v4l2_framebuffer <v4l2-framebuffer>`, under the new
+into struct :c:type:`v4l2_framebuffer`, under the new
 name ``V4L2_FBUF_FLAG_CHROMAKEY``.
 
 In V4L, storing a bitmap pointer in ``clips`` and setting ``clipcount``
 to ``VIDEO_CLIP_BITMAP`` (-1) requests bitmap clipping, using a fixed
-size bitmap of 1024 × 625 bits. Struct :c:type:`struct v4l2_window`
+size bitmap of 1024 × 625 bits. Struct :c:type:`v4l2_window`
 has a separate ``bitmap`` pointer field for this purpose and the bitmap
 size is determined by ``w.width`` and ``w.height``.
 
@@ -686,24 +469,24 @@
 
 To capture only a subsection of the full picture V4L defines the
 ``VIDIOCGCAPTURE`` and ``VIDIOCSCAPTURE`` ioctls using struct
-:c:type:`struct video_capture`. The equivalent V4L2 ioctls are
+``video_capture``. The equivalent V4L2 ioctls are
 :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>` and
 :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` using struct
-:ref:`v4l2_crop <v4l2-crop>`, and the related
+:c:type:`v4l2_crop`, and the related
 :ref:`VIDIOC_CROPCAP` ioctl. This is a rather
 complex matter, see :ref:`crop` for details.
 
 The ``x``, ``y``, ``width`` and ``height`` fields moved into struct
-:ref:`v4l2_rect <v4l2-rect>` substructure ``c`` of struct
-:c:type:`struct v4l2_crop`. The ``decimation`` field was dropped. In
+:c:type:`v4l2_rect` substructure ``c`` of struct
+:c:type:`v4l2_crop`. The ``decimation`` field was dropped. In
 the V4L2 API the scaling factor is implied by the size of the cropping
 rectangle and the size of the captured or overlaid image.
 
 The ``VIDEO_CAPTURE_ODD`` and ``VIDEO_CAPTURE_EVEN`` flags to capture
 only the odd or even field, respectively, were replaced by
 ``V4L2_FIELD_TOP`` and ``V4L2_FIELD_BOTTOM`` in the field named
-``field`` of struct :ref:`v4l2_pix_format <v4l2-pix-format>` and
-struct :ref:`v4l2_window <v4l2-window>`. These structures are used to
+``field`` of struct :c:type:`v4l2_pix_format` and
+struct :c:type:`v4l2_window`. These structures are used to
 select a capture or overlay format with the
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl.
 
@@ -728,8 +511,8 @@
 ``VIDIOCSWIN`` ioctls. V4L2 uses the general-purpose data format
 negotiation ioctls :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` and
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`. They take a pointer to a struct
-:ref:`v4l2_format <v4l2-format>` as argument, here the struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` named ``pix`` of its
+:c:type:`v4l2_format` as argument, here the struct
+:c:type:`v4l2_pix_format` named ``pix`` of its
 ``fmt`` union is used.
 
 For more information about the V4L2 read interface see :ref:`rw`.
@@ -750,68 +533,49 @@
     :header-rows:  1
     :stub-columns: 0
 
+    * - V4L
+      - V4L2
+    * -
+      - The image format must be selected before buffers are allocated,
+	with the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl. When no
+	format is selected the driver may use the last, possibly by
+	another application requested format.
+    * - Applications cannot change the number of buffers. The it is built
+	into the driver, unless it has a module option to change the
+	number when the driver module is loaded.
+      - The :ref:`VIDIOC_REQBUFS` ioctl allocates the
+	desired number of buffers, this is a required step in the
+	initialization sequence.
+    * - Drivers map all buffers as one contiguous range of memory. The
+	``VIDIOCGMBUF`` ioctl is available to query the number of buffers,
+	the offset of each buffer from the start of the virtual file, and
+	the overall amount of memory used, which can be used as arguments
+	for the :ref:`mmap() <func-mmap>` function.
+      - Buffers are individually mapped. The offset and size of each
+	buffer can be determined with the
+	:ref:`VIDIOC_QUERYBUF` ioctl.
+    * - The ``VIDIOCMCAPTURE`` ioctl prepares a buffer for capturing. It
+	also determines the image format for this buffer. The ioctl
+	returns immediately, eventually with an ``EAGAIN`` error code if no
+	video signal had been detected. When the driver supports more than
+	one buffer applications can call the ioctl multiple times and thus
+	have multiple outstanding capture requests.
 
-    -  .. row 1
-
-       -  V4L
-
-       -  V4L2
-
-    -  .. row 2
-
-       -
-       -  The image format must be selected before buffers are allocated,
-	  with the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl. When no
-	  format is selected the driver may use the last, possibly by
-	  another application requested format.
-
-    -  .. row 3
-
-       -  Applications cannot change the number of buffers. The it is built
-	  into the driver, unless it has a module option to change the
-	  number when the driver module is loaded.
-
-       -  The :ref:`VIDIOC_REQBUFS` ioctl allocates the
-	  desired number of buffers, this is a required step in the
-	  initialization sequence.
-
-    -  .. row 4
-
-       -  Drivers map all buffers as one contiguous range of memory. The
-	  ``VIDIOCGMBUF`` ioctl is available to query the number of buffers,
-	  the offset of each buffer from the start of the virtual file, and
-	  the overall amount of memory used, which can be used as arguments
-	  for the :ref:`mmap() <func-mmap>` function.
-
-       -  Buffers are individually mapped. The offset and size of each
-	  buffer can be determined with the
-	  :ref:`VIDIOC_QUERYBUF` ioctl.
-
-    -  .. row 5
-
-       -  The ``VIDIOCMCAPTURE`` ioctl prepares a buffer for capturing. It
-	  also determines the image format for this buffer. The ioctl
-	  returns immediately, eventually with an ``EAGAIN`` error code if no
-	  video signal had been detected. When the driver supports more than
-	  one buffer applications can call the ioctl multiple times and thus
-	  have multiple outstanding capture requests.
-
-	  The ``VIDIOCSYNC`` ioctl suspends execution until a particular
-	  buffer has been filled.
-
-       -  Drivers maintain an incoming and outgoing queue.
-	  :ref:`VIDIOC_QBUF` enqueues any empty buffer into
-	  the incoming queue. Filled buffers are dequeued from the outgoing
-	  queue with the :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. To wait
-	  until filled buffers become available this function,
-	  :ref:`select() <func-select>` or :ref:`poll() <func-poll>` can
-	  be used. The :ref:`VIDIOC_STREAMON` ioctl
-	  must be called once after enqueuing one or more buffers to start
-	  capturing. Its counterpart
-	  :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` stops capturing and
-	  dequeues all buffers from both queues. Applications can query the
-	  signal status, if known, with the
-	  :ref:`VIDIOC_ENUMINPUT` ioctl.
+	The ``VIDIOCSYNC`` ioctl suspends execution until a particular
+	buffer has been filled.
+      - Drivers maintain an incoming and outgoing queue.
+	:ref:`VIDIOC_QBUF` enqueues any empty buffer into
+	the incoming queue. Filled buffers are dequeued from the outgoing
+	queue with the :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. To wait
+	until filled buffers become available this function,
+	:ref:`select() <func-select>` or :ref:`poll() <func-poll>` can
+	be used. The :ref:`VIDIOC_STREAMON` ioctl
+	must be called once after enqueuing one or more buffers to start
+	capturing. Its counterpart
+	:ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` stops capturing and
+	dequeues all buffers from both queues. Applications can query the
+	signal status, if known, with the
+	:ref:`VIDIOC_ENUMINPUT` ioctl.
 
 
 For a more in-depth discussion of memory mapping and examples, see
@@ -833,68 +597,36 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct :ref:`v4l2_vbi_format <v4l2-vbi-format>`
-
-       -  V4L, BTTV driver
-
-    -  .. row 2
-
-       -  sampling_rate
-
-       -  28636363 Hz NTSC (or any other 525-line standard); 35468950 Hz PAL
-	  and SECAM (625-line standards)
-
-    -  .. row 3
-
-       -  offset
-
-       -  ?
-
-    -  .. row 4
-
-       -  samples_per_line
-
-       -  2048
-
-    -  .. row 5
-
-       -  sample_format
-
-       -  V4L2_PIX_FMT_GREY. The last four bytes (a machine endianness
-	  integer) contain a frame counter.
-
-    -  .. row 6
-
-       -  start[]
-
-       -  10, 273 NTSC; 22, 335 PAL and SECAM
-
-    -  .. row 7
-
-       -  count[]
-
-       -  16, 16 [#f9]_
-
-    -  .. row 8
-
-       -  flags
-
-       -  0
+    * - struct :c:type:`v4l2_vbi_format`
+      - V4L, BTTV driver
+    * - sampling_rate
+      - 28636363 Hz NTSC (or any other 525-line standard); 35468950 Hz PAL
+	and SECAM (625-line standards)
+    * - offset
+      - ?
+    * - samples_per_line
+      - 2048
+    * - sample_format
+      - V4L2_PIX_FMT_GREY. The last four bytes (a machine endianness
+	integer) contain a frame counter.
+    * - start[]
+      - 10, 273 NTSC; 22, 335 PAL and SECAM
+    * - count[]
+      - 16, 16 [#f9]_
+    * - flags
+      - 0
 
 
 Undocumented in the V4L specification, in Linux 2.3 the
 ``VIDIOCGVBIFMT`` and ``VIDIOCSVBIFMT`` ioctls using struct
-:c:type:`struct vbi_format` were added to determine the VBI image
+``vbi_format`` were added to determine the VBI image
 parameters. These ioctls are only partially compatible with the V4L2 VBI
 interface specified in :ref:`raw-vbi`.
 
 An ``offset`` field does not exist, ``sample_format`` is supposed to be
 ``VIDEO_PALETTE_RAW``, equivalent to ``V4L2_PIX_FMT_GREY``. The
 remaining fields are probably equivalent to struct
-:ref:`v4l2_vbi_format <v4l2-vbi-format>`.
+:c:type:`v4l2_vbi_format`.
 
 Apparently only the Zoran (ZR 36120) driver implements these ioctls. The
 semantics differ from those specified for V4L2 in two ways. The
diff --git a/Documentation/media/uapi/v4l/dmabuf.rst b/Documentation/media/uapi/v4l/dmabuf.rst
index 675768f..4e980a7e 100644
--- a/Documentation/media/uapi/v4l/dmabuf.rst
+++ b/Documentation/media/uapi/v4l/dmabuf.rst
@@ -19,7 +19,7 @@
 
 Input and output devices support the streaming I/O method when the
 ``V4L2_CAP_STREAMING`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP <VIDIOC_QUERYCAP>` ioctl is set. Whether
 importing DMA buffers through DMABUF file descriptors is supported is
 determined by calling the :ref:`VIDIOC_REQBUFS <VIDIOC_REQBUFS>`
@@ -31,8 +31,8 @@
 application. Next, these buffers are exported to the application as file
 descriptors using an API which is specific for an allocator driver. Only
 such file descriptor are exchanged. The descriptors and meta-information
-are passed in struct :ref:`v4l2_buffer <v4l2-buffer>` (or in struct
-:ref:`v4l2_plane <v4l2-plane>` in the multi-planar API case). The
+are passed in struct :c:type:`v4l2_buffer` (or in struct
+:c:type:`v4l2_plane` in the multi-planar API case). The
 driver must be switched into DMABUF I/O mode by calling the
 :ref:`VIDIOC_REQBUFS <VIDIOC_REQBUFS>` with the desired buffer type.
 
@@ -151,7 +151,7 @@
    both queues and unlocks all buffers as a side effect. Since there is no
    notion of doing anything "now" on a multitasking system, if an
    application needs to synchronize with another event it should examine
-   the struct :ref:`v4l2_buffer <v4l2-buffer>` ``timestamp`` of captured or
+   the struct :c:type:`v4l2_buffer` ``timestamp`` of captured or
    outputted buffers.
 
 Drivers implementing DMABUF importing I/O must support the
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 71071d7..7725c33 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -49,7 +49,7 @@
 several controls at once.
 
 Each of the new ioctls expects a pointer to a struct
-:ref:`v4l2_ext_controls <v4l2-ext-controls>`. This structure
+:c:type:`v4l2_ext_controls`. This structure
 contains a pointer to the control array, a count of the number of
 controls in that array and a control class. Control classes are used to
 group similar controls into a single class. For example, control class
@@ -65,12 +65,12 @@
 whether the specified control class is supported.
 
 The control array is a struct
-:ref:`v4l2_ext_control <v4l2-ext-control>` array. The
-:ref:`struct v4l2_ext_control <v4l2-ext-control>` structure is very similar to
-struct :ref:`v4l2_control <v4l2-control>`, except for the fact that
+:c:type:`v4l2_ext_control` array. The
+struct :c:type:`v4l2_ext_control` is very similar to
+struct :c:type:`v4l2_control`, except for the fact that
 it also allows for 64-bit values and pointers to be passed.
 
-Since the struct :ref:`v4l2_ext_control <v4l2-ext-control>` supports
+Since the struct :c:type:`v4l2_ext_control` supports
 pointers it is now also possible to have controls with compound types
 such as N-dimensional arrays and/or structures. You need to specify the
 ``V4L2_CTRL_FLAG_NEXT_COMPOUND`` when enumerating controls to actually
@@ -184,7 +184,9 @@
 Below all controls within the Codec control class are described. First
 the generic controls, then controls specific for certain hardware.
 
-.. note:: These controls are applicable to all codecs and not just MPEG. The
+.. note::
+
+   These controls are applicable to all codecs and not just MPEG. The
    defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG as the controls
    were originally made for MPEG codecs and later extended to cover all
    encoding formats.
@@ -207,7 +209,10 @@
 
 .. _v4l2-mpeg-stream-type:
 
-``V4L2_CID_MPEG_STREAM_TYPE (enum v4l2_mpeg_stream_type)``
+``V4L2_CID_MPEG_STREAM_TYPE``
+    (enum)
+
+enum v4l2_mpeg_stream_type -
     The MPEG-1, -2 or -4 output stream type. One cannot assume anything
     here. Each hardware MPEG encoder tends to support different subsets
     of the available MPEG stream types. This control is specific to
@@ -219,42 +224,18 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS``
-
-       -  MPEG-2 program stream
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS``
-
-       -  MPEG-2 transport stream
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS``
-
-       -  MPEG-1 system stream
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD``
-
-       -  MPEG-2 DVD-compatible stream
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD``
-
-       -  MPEG-1 VCD-compatible stream
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD``
-
-       -  MPEG-2 SVCD-compatible stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS``
+      - MPEG-2 program stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS``
+      - MPEG-2 transport stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS``
+      - MPEG-1 system stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD``
+      - MPEG-2 DVD-compatible stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD``
+      - MPEG-1 VCD-compatible stream
+    * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD``
+      - MPEG-2 SVCD-compatible stream
 
 
 
@@ -280,7 +261,10 @@
 
 .. _v4l2-mpeg-stream-vbi-fmt:
 
-``V4L2_CID_MPEG_STREAM_VBI_FMT (enum v4l2_mpeg_stream_vbi_fmt)``
+``V4L2_CID_MPEG_STREAM_VBI_FMT``
+    (enum)
+
+enum v4l2_mpeg_stream_vbi_fmt -
     Some cards can embed VBI data (e. g. Closed Caption, Teletext) into
     the MPEG stream. This control selects whether VBI data should be
     embedded, and if so, what embedding method should be used. The list
@@ -289,30 +273,27 @@
 
 
 
+.. tabularcolumns:: |p{6 cm}|p{11.5cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_STREAM_VBI_FMT_NONE``
-
-       -  No VBI in the MPEG stream
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
-
-       -  VBI in private packets, IVTV format (documented in the kernel
-	  sources in the file
-	  ``Documentation/video4linux/cx2341x/README.vbi``)
+    * - ``V4L2_MPEG_STREAM_VBI_FMT_NONE``
+      - No VBI in the MPEG stream
+    * - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
+      - VBI in private packets, IVTV format (documented in the kernel
+	sources in the file
+	``Documentation/video4linux/cx2341x/README.vbi``)
 
 
 
 .. _v4l2-mpeg-audio-sampling-freq:
 
-``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (enum v4l2_mpeg_audio_sampling_freq)``
+``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ``
+    (enum)
+
+enum v4l2_mpeg_audio_sampling_freq -
     MPEG Audio sampling frequency. Possible values are:
 
 
@@ -321,30 +302,21 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100``
-
-       -  44.1 kHz
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000``
-
-       -  48 kHz
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000``
-
-       -  32 kHz
+    * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100``
+      - 44.1 kHz
+    * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000``
+      - 48 kHz
+    * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000``
+      - 32 kHz
 
 
 
 .. _v4l2-mpeg-audio-encoding:
 
-``V4L2_CID_MPEG_AUDIO_ENCODING (enum v4l2_mpeg_audio_encoding)``
+``V4L2_CID_MPEG_AUDIO_ENCODING``
+    (enum)
+
+enum v4l2_mpeg_audio_encoding -
     MPEG Audio encoding. This control is specific to multiplexed MPEG
     streams. Possible values are:
 
@@ -354,42 +326,25 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1``
-
-       -  MPEG-1/2 Layer I encoding
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2``
-
-       -  MPEG-1/2 Layer II encoding
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3``
-
-       -  MPEG-1/2 Layer III encoding
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_ENCODING_AAC``
-
-       -  MPEG-2/4 AAC (Advanced Audio Coding)
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_ENCODING_AC3``
-
-       -  AC-3 aka ATSC A/52 encoding
+    * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1``
+      - MPEG-1/2 Layer I encoding
+    * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2``
+      - MPEG-1/2 Layer II encoding
+    * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3``
+      - MPEG-1/2 Layer III encoding
+    * - ``V4L2_MPEG_AUDIO_ENCODING_AAC``
+      - MPEG-2/4 AAC (Advanced Audio Coding)
+    * - ``V4L2_MPEG_AUDIO_ENCODING_AC3``
+      - AC-3 aka ATSC A/52 encoding
 
 
 
 .. _v4l2-mpeg-audio-l1-bitrate:
 
-``V4L2_CID_MPEG_AUDIO_L1_BITRATE (enum v4l2_mpeg_audio_l1_bitrate)``
+``V4L2_CID_MPEG_AUDIO_L1_BITRATE``
+    (enum)
+
+enum v4l2_mpeg_audio_l1_bitrate -
     MPEG-1/2 Layer I bitrate. Possible values are:
 
 
@@ -398,96 +353,43 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_32K``
-
-       -  32 kbit/s
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_64K``
-
-       -  64 kbit/s
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_96K``
-
-       -  96 kbit/s
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_128K``
-
-       -  128 kbit/s
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_160K``
-
-       -  160 kbit/s
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_192K``
-
-       -  192 kbit/s
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_224K``
-
-       -  224 kbit/s
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_256K``
-
-       -  256 kbit/s
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_288K``
-
-       -  288 kbit/s
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_320K``
-
-       -  320 kbit/s
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_352K``
-
-       -  352 kbit/s
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_384K``
-
-       -  384 kbit/s
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_416K``
-
-       -  416 kbit/s
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_AUDIO_L1_BITRATE_448K``
-
-       -  448 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_32K``
+      - 32 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_64K``
+      - 64 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_96K``
+      - 96 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_128K``
+      - 128 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_160K``
+      - 160 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_192K``
+      - 192 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_224K``
+      - 224 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_256K``
+      - 256 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_288K``
+      - 288 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_320K``
+      - 320 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_352K``
+      - 352 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_384K``
+      - 384 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_416K``
+      - 416 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L1_BITRATE_448K``
+      - 448 kbit/s
 
 
 
 .. _v4l2-mpeg-audio-l2-bitrate:
 
-``V4L2_CID_MPEG_AUDIO_L2_BITRATE (enum v4l2_mpeg_audio_l2_bitrate)``
+``V4L2_CID_MPEG_AUDIO_L2_BITRATE``
+    (enum)
+
+enum v4l2_mpeg_audio_l2_bitrate -
     MPEG-1/2 Layer II bitrate. Possible values are:
 
 
@@ -496,96 +398,43 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_32K``
-
-       -  32 kbit/s
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_48K``
-
-       -  48 kbit/s
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_56K``
-
-       -  56 kbit/s
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_64K``
-
-       -  64 kbit/s
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_80K``
-
-       -  80 kbit/s
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_96K``
-
-       -  96 kbit/s
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_112K``
-
-       -  112 kbit/s
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_128K``
-
-       -  128 kbit/s
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_160K``
-
-       -  160 kbit/s
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_192K``
-
-       -  192 kbit/s
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_224K``
-
-       -  224 kbit/s
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_256K``
-
-       -  256 kbit/s
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_320K``
-
-       -  320 kbit/s
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_AUDIO_L2_BITRATE_384K``
-
-       -  384 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_32K``
+      - 32 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_48K``
+      - 48 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_56K``
+      - 56 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_64K``
+      - 64 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_80K``
+      - 80 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_96K``
+      - 96 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_112K``
+      - 112 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_128K``
+      - 128 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_160K``
+      - 160 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_192K``
+      - 192 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_224K``
+      - 224 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_256K``
+      - 256 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_320K``
+      - 320 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L2_BITRATE_384K``
+      - 384 kbit/s
 
 
 
 .. _v4l2-mpeg-audio-l3-bitrate:
 
-``V4L2_CID_MPEG_AUDIO_L3_BITRATE (enum v4l2_mpeg_audio_l3_bitrate)``
+``V4L2_CID_MPEG_AUDIO_L3_BITRATE``
+    (enum)
+
+enum v4l2_mpeg_audio_l3_bitrate -
     MPEG-1/2 Layer III bitrate. Possible values are:
 
 
@@ -594,90 +443,34 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_32K``
-
-       -  32 kbit/s
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_40K``
-
-       -  40 kbit/s
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_48K``
-
-       -  48 kbit/s
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_56K``
-
-       -  56 kbit/s
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_64K``
-
-       -  64 kbit/s
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_80K``
-
-       -  80 kbit/s
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_96K``
-
-       -  96 kbit/s
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_112K``
-
-       -  112 kbit/s
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_128K``
-
-       -  128 kbit/s
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_160K``
-
-       -  160 kbit/s
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_192K``
-
-       -  192 kbit/s
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_224K``
-
-       -  224 kbit/s
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_256K``
-
-       -  256 kbit/s
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_AUDIO_L3_BITRATE_320K``
-
-       -  320 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_32K``
+      - 32 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_40K``
+      - 40 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_48K``
+      - 48 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_56K``
+      - 56 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_64K``
+      - 64 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_80K``
+      - 80 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_96K``
+      - 96 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_112K``
+      - 112 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_128K``
+      - 128 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_160K``
+      - 160 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_192K``
+      - 192 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_224K``
+      - 224 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_256K``
+      - 256 kbit/s
+    * - ``V4L2_MPEG_AUDIO_L3_BITRATE_320K``
+      - 320 kbit/s
 
 
 
@@ -686,7 +479,10 @@
 
 .. _v4l2-mpeg-audio-ac3-bitrate:
 
-``V4L2_CID_MPEG_AUDIO_AC3_BITRATE (enum v4l2_mpeg_audio_ac3_bitrate)``
+``V4L2_CID_MPEG_AUDIO_AC3_BITRATE``
+    (enum)
+
+enum v4l2_mpeg_audio_ac3_bitrate -
     AC-3 bitrate. Possible values are:
 
 
@@ -695,126 +491,53 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K``
-
-       -  32 kbit/s
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K``
-
-       -  40 kbit/s
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K``
-
-       -  48 kbit/s
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K``
-
-       -  56 kbit/s
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K``
-
-       -  64 kbit/s
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K``
-
-       -  80 kbit/s
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K``
-
-       -  96 kbit/s
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K``
-
-       -  112 kbit/s
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K``
-
-       -  128 kbit/s
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K``
-
-       -  160 kbit/s
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K``
-
-       -  192 kbit/s
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K``
-
-       -  224 kbit/s
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K``
-
-       -  256 kbit/s
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K``
-
-       -  320 kbit/s
-
-    -  .. row 15
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K``
-
-       -  384 kbit/s
-
-    -  .. row 16
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K``
-
-       -  448 kbit/s
-
-    -  .. row 17
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K``
-
-       -  512 kbit/s
-
-    -  .. row 18
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K``
-
-       -  576 kbit/s
-
-    -  .. row 19
-
-       -  ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K``
-
-       -  640 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K``
+      - 32 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K``
+      - 40 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K``
+      - 48 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K``
+      - 56 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K``
+      - 64 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K``
+      - 80 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K``
+      - 96 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K``
+      - 112 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K``
+      - 128 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K``
+      - 160 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K``
+      - 192 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K``
+      - 224 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K``
+      - 256 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K``
+      - 320 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K``
+      - 384 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K``
+      - 448 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K``
+      - 512 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K``
+      - 576 kbit/s
+    * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K``
+      - 640 kbit/s
 
 
 
 .. _v4l2-mpeg-audio-mode:
 
-``V4L2_CID_MPEG_AUDIO_MODE (enum v4l2_mpeg_audio_mode)``
+``V4L2_CID_MPEG_AUDIO_MODE``
+    (enum)
+
+enum v4l2_mpeg_audio_mode -
     MPEG Audio mode. Possible values are:
 
 
@@ -823,36 +546,23 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_MODE_STEREO``
-
-       -  Stereo
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO``
-
-       -  Joint Stereo
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_MODE_DUAL``
-
-       -  Bilingual
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_MODE_MONO``
-
-       -  Mono
+    * - ``V4L2_MPEG_AUDIO_MODE_STEREO``
+      - Stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO``
+      - Joint Stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_DUAL``
+      - Bilingual
+    * - ``V4L2_MPEG_AUDIO_MODE_MONO``
+      - Mono
 
 
 
 .. _v4l2-mpeg-audio-mode-extension:
 
-``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (enum v4l2_mpeg_audio_mode_extension)``
+``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION``
+    (enum)
+
+enum v4l2_mpeg_audio_mode_extension -
     Joint Stereo audio mode extension. In Layer I and II they indicate
     which subbands are in intensity stereo. All other subbands are coded
     in stereo. Layer III is not (yet) supported. Possible values are:
@@ -863,36 +573,23 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4``
-
-       -  Subbands 4-31 in intensity stereo
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8``
-
-       -  Subbands 8-31 in intensity stereo
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12``
-
-       -  Subbands 12-31 in intensity stereo
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16``
-
-       -  Subbands 16-31 in intensity stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4``
+      - Subbands 4-31 in intensity stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8``
+      - Subbands 8-31 in intensity stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12``
+      - Subbands 12-31 in intensity stereo
+    * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16``
+      - Subbands 16-31 in intensity stereo
 
 
 
 .. _v4l2-mpeg-audio-emphasis:
 
-``V4L2_CID_MPEG_AUDIO_EMPHASIS (enum v4l2_mpeg_audio_emphasis)``
+``V4L2_CID_MPEG_AUDIO_EMPHASIS``
+    (enum)
+
+enum v4l2_mpeg_audio_emphasis -
     Audio Emphasis. Possible values are:
 
 
@@ -901,30 +598,21 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_EMPHASIS_NONE``
-
-       -  None
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS``
-
-       -  50/15 microsecond emphasis
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17``
-
-       -  CCITT J.17
+    * - ``V4L2_MPEG_AUDIO_EMPHASIS_NONE``
+      - None
+    * - ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS``
+      - 50/15 microsecond emphasis
+    * - ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17``
+      - CCITT J.17
 
 
 
 .. _v4l2-mpeg-audio-crc:
 
-``V4L2_CID_MPEG_AUDIO_CRC (enum v4l2_mpeg_audio_crc)``
+``V4L2_CID_MPEG_AUDIO_CRC``
+    (enum)
+
+enum v4l2_mpeg_audio_crc -
     CRC method. Possible values are:
 
 
@@ -933,18 +621,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_CRC_NONE``
-
-       -  None
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_CRC_CRC16``
-
-       -  16 bit parity check
+    * - ``V4L2_MPEG_AUDIO_CRC_NONE``
+      - None
+    * - ``V4L2_MPEG_AUDIO_CRC_CRC16``
+      - 16 bit parity check
 
 
 
@@ -956,63 +636,50 @@
 
 .. _v4l2-mpeg-audio-dec-playback:
 
-``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (enum v4l2_mpeg_audio_dec_playback)``
+``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK``
+    (enum)
+
+enum v4l2_mpeg_audio_dec_playback -
     Determines how monolingual audio should be played back. Possible
     values are:
 
 
 
+.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO``
-
-       -  Automatically determines the best playback mode.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO``
-
-       -  Stereo playback.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT``
-
-       -  Left channel playback.
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT``
-
-       -  Right channel playback.
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO``
-
-       -  Mono playback.
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO``
-
-       -  Stereo playback with swapped left and right channels.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO``
+      - Automatically determines the best playback mode.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO``
+      - Stereo playback.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT``
+      - Left channel playback.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT``
+      - Right channel playback.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO``
+      - Mono playback.
+    * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO``
+      - Stereo playback with swapped left and right channels.
 
 
 
 .. _v4l2-mpeg-audio-dec-multilingual-playback:
 
-``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (enum v4l2_mpeg_audio_dec_playback)``
+``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK``
+    (enum)
+
+enum v4l2_mpeg_audio_dec_playback -
     Determines how multilingual audio should be played back.
 
 .. _v4l2-mpeg-video-encoding:
 
-``V4L2_CID_MPEG_VIDEO_ENCODING (enum v4l2_mpeg_video_encoding)``
+``V4L2_CID_MPEG_VIDEO_ENCODING``
+    (enum)
+
+enum v4l2_mpeg_video_encoding -
     MPEG Video encoding method. This control is specific to multiplexed
     MPEG streams. Possible values are:
 
@@ -1022,30 +689,21 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1``
-
-       -  MPEG-1 Video encoding
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2``
-
-       -  MPEG-2 Video encoding
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC``
-
-       -  MPEG-4 AVC (H.264) Video encoding
+    * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1``
+      - MPEG-1 Video encoding
+    * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2``
+      - MPEG-2 Video encoding
+    * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC``
+      - MPEG-4 AVC (H.264) Video encoding
 
 
 
 .. _v4l2-mpeg-video-aspect:
 
-``V4L2_CID_MPEG_VIDEO_ASPECT (enum v4l2_mpeg_video_aspect)``
+``V4L2_CID_MPEG_VIDEO_ASPECT``
+    (enum)
+
+enum v4l2_mpeg_video_aspect -
     Video aspect. Possible values are:
 
 
@@ -1054,22 +712,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_ASPECT_1x1``
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_ASPECT_4x3``
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_ASPECT_16x9``
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_ASPECT_221x100``
+    * - ``V4L2_MPEG_VIDEO_ASPECT_1x1``
+    * - ``V4L2_MPEG_VIDEO_ASPECT_4x3``
+    * - ``V4L2_MPEG_VIDEO_ASPECT_16x9``
+    * - ``V4L2_MPEG_VIDEO_ASPECT_221x100``
 
 
 
@@ -1087,7 +733,10 @@
 
 .. _v4l2-mpeg-video-bitrate-mode:
 
-``V4L2_CID_MPEG_VIDEO_BITRATE_MODE (enum v4l2_mpeg_video_bitrate_mode)``
+``V4L2_CID_MPEG_VIDEO_BITRATE_MODE``
+    (enum)
+
+enum v4l2_mpeg_video_bitrate_mode -
     Video bitrate mode. Possible values are:
 
 
@@ -1096,18 +745,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR``
-
-       -  Variable bitrate
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR``
-
-       -  Constant bitrate
+    * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR``
+      - Variable bitrate
+    * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR``
+      - Constant bitrate
 
 
 
@@ -1138,30 +779,14 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Bit 0:7
-
-       -  V chrominance information
-
-    -  .. row 2
-
-       -  Bit 8:15
-
-       -  U chrominance information
-
-    -  .. row 3
-
-       -  Bit 16:23
-
-       -  Y luminance information
-
-    -  .. row 4
-
-       -  Bit 24:31
-
-       -  Must be zero.
+    * - Bit 0:7
+      - V chrominance information
+    * - Bit 8:15
+      - U chrominance information
+    * - Bit 16:23
+      - Y luminance information
+    * - Bit 24:31
+      - Must be zero.
 
 
 
@@ -1191,7 +816,10 @@
 
 .. _v4l2-mpeg-video-h264-vui-sar-idc:
 
-``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (enum v4l2_mpeg_video_h264_vui_sar_idc)``
+``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC``
+    (enum)
+
+enum v4l2_mpeg_video_h264_vui_sar_idc -
     VUI sample aspect ratio indicator for H.264 encoding. The value is
     defined in the table E-1 in the standard. Applicable to the H264
     encoder.
@@ -1202,114 +830,42 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED``
-
-       -  Unspecified
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1``
-
-       -  1x1
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11``
-
-       -  12x11
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11``
-
-       -  10x11
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11``
-
-       -  16x11
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33``
-
-       -  40x33
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11``
-
-       -  24x11
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11``
-
-       -  20x11
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11``
-
-       -  32x11
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33``
-
-       -  80x33
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11``
-
-       -  18x11
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11``
-
-       -  15x11
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33``
-
-       -  64x33
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99``
-
-       -  160x99
-
-    -  .. row 15
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3``
-
-       -  4x3
-
-    -  .. row 16
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2``
-
-       -  3x2
-
-    -  .. row 17
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1``
-
-       -  2x1
-
-    -  .. row 18
-
-       -  ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED``
-
-       -  Extended SAR
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED``
+      - Unspecified
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1``
+      - 1x1
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11``
+      - 12x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11``
+      - 10x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11``
+      - 16x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33``
+      - 40x33
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11``
+      - 24x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11``
+      - 20x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11``
+      - 32x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33``
+      - 80x33
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11``
+      - 18x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11``
+      - 15x11
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33``
+      - 64x33
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99``
+      - 160x99
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3``
+      - 4x3
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2``
+      - 3x2
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1``
+      - 2x1
+    * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED``
+      - Extended SAR
 
 
 
@@ -1323,7 +879,10 @@
 
 .. _v4l2-mpeg-video-h264-level:
 
-``V4L2_CID_MPEG_VIDEO_H264_LEVEL (enum v4l2_mpeg_video_h264_level)``
+``V4L2_CID_MPEG_VIDEO_H264_LEVEL``
+    (enum)
+
+enum v4l2_mpeg_video_h264_level -
     The level information for the H264 video elementary stream.
     Applicable to the H264 encoder. Possible values are:
 
@@ -1333,108 +892,47 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0``
-
-       -  Level 1.0
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_1B``
-
-       -  Level 1B
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1``
-
-       -  Level 1.1
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2``
-
-       -  Level 1.2
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3``
-
-       -  Level 1.3
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0``
-
-       -  Level 2.0
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1``
-
-       -  Level 2.1
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2``
-
-       -  Level 2.2
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0``
-
-       -  Level 3.0
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1``
-
-       -  Level 3.1
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2``
-
-       -  Level 3.2
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0``
-
-       -  Level 4.0
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1``
-
-       -  Level 4.1
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2``
-
-       -  Level 4.2
-
-    -  .. row 15
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0``
-
-       -  Level 5.0
-
-    -  .. row 16
-
-       -  ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1``
-
-       -  Level 5.1
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0``
+      - Level 1.0
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1B``
+      - Level 1B
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1``
+      - Level 1.1
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2``
+      - Level 1.2
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3``
+      - Level 1.3
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0``
+      - Level 2.0
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1``
+      - Level 2.1
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2``
+      - Level 2.2
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0``
+      - Level 3.0
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1``
+      - Level 3.1
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2``
+      - Level 3.2
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0``
+      - Level 4.0
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1``
+      - Level 4.1
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2``
+      - Level 4.2
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0``
+      - Level 5.0
+    * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1``
+      - Level 5.1
 
 
 
 .. _v4l2-mpeg-video-mpeg4-level:
 
-``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (enum v4l2_mpeg_video_mpeg4_level)``
+``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL``
+    (enum)
+
+enum v4l2_mpeg_video_mpeg4_level -
     The level information for the MPEG4 elementary stream. Applicable to
     the MPEG4 encoder. Possible values are:
 
@@ -1444,60 +942,31 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_0``
-
-       -  Level 0
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_0B``
-
-       -  Level 0b
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_1``
-
-       -  Level 1
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_2``
-
-       -  Level 2
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_3``
-
-       -  Level 3
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_3B``
-
-       -  Level 3b
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_4``
-
-       -  Level 4
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_VIDEO_LEVEL_5``
-
-       -  Level 5
+    * - ``V4L2_MPEG_VIDEO_LEVEL_0``
+      - Level 0
+    * - ``V4L2_MPEG_VIDEO_LEVEL_0B``
+      - Level 0b
+    * - ``V4L2_MPEG_VIDEO_LEVEL_1``
+      - Level 1
+    * - ``V4L2_MPEG_VIDEO_LEVEL_2``
+      - Level 2
+    * - ``V4L2_MPEG_VIDEO_LEVEL_3``
+      - Level 3
+    * - ``V4L2_MPEG_VIDEO_LEVEL_3B``
+      - Level 3b
+    * - ``V4L2_MPEG_VIDEO_LEVEL_4``
+      - Level 4
+    * - ``V4L2_MPEG_VIDEO_LEVEL_5``
+      - Level 5
 
 
 
 .. _v4l2-mpeg-video-h264-profile:
 
-``V4L2_CID_MPEG_VIDEO_H264_PROFILE (enum v4l2_mpeg_video_h264_profile)``
+``V4L2_CID_MPEG_VIDEO_H264_PROFILE``
+    (enum)
+
+enum v4l2_mpeg_video_h264_profile -
     The profile information for H264. Applicable to the H264 encoder.
     Possible values are:
 
@@ -1507,114 +976,49 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE``
-
-       -  Baseline profile
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE``
-
-       -  Constrained Baseline profile
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN``
-
-       -  Main profile
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED``
-
-       -  Extended profile
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH``
-
-       -  High profile
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10``
-
-       -  High 10 profile
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422``
-
-       -  High 422 profile
-
-    -  .. row 8
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE``
-
-       -  High 444 Predictive profile
-
-    -  .. row 9
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA``
-
-       -  High 10 Intra profile
-
-    -  .. row 10
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA``
-
-       -  High 422 Intra profile
-
-    -  .. row 11
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA``
-
-       -  High 444 Intra profile
-
-    -  .. row 12
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA``
-
-       -  CAVLC 444 Intra profile
-
-    -  .. row 13
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE``
-
-       -  Scalable Baseline profile
-
-    -  .. row 14
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH``
-
-       -  Scalable High profile
-
-    -  .. row 15
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA``
-
-       -  Scalable High Intra profile
-
-    -  .. row 16
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH``
-
-       -  Stereo High profile
-
-    -  .. row 17
-
-       -  ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH``
-
-       -  Multiview High profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE``
+      - Baseline profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE``
+      - Constrained Baseline profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN``
+      - Main profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED``
+      - Extended profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH``
+      - High profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10``
+      - High 10 profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422``
+      - High 422 profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE``
+      - High 444 Predictive profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA``
+      - High 10 Intra profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA``
+      - High 422 Intra profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA``
+      - High 444 Intra profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA``
+      - CAVLC 444 Intra profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE``
+      - Scalable Baseline profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH``
+      - Scalable High profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA``
+      - Scalable High Intra profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH``
+      - Stereo High profile
+    * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH``
+      - Multiview High profile
 
 
 
 .. _v4l2-mpeg-video-mpeg4-profile:
 
-``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (enum v4l2_mpeg_video_mpeg4_profile)``
+``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE``
+    (enum)
+
+enum v4l2_mpeg_video_mpeg4_profile -
     The profile information for MPEG4. Applicable to the MPEG4 encoder.
     Possible values are:
 
@@ -1624,36 +1028,16 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE``
-
-       -  Simple profile
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE``
-
-       -  Advanced Simple profile
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_PROFILE_CORE``
-
-       -  Core profile
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE``
-
-       -  Simple Scalable profile
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY``
-
-       -
+    * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE``
+      - Simple profile
+    * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE``
+      - Advanced Simple profile
+    * - ``V4L2_MPEG_VIDEO_PROFILE_CORE``
+      - Core profile
+    * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE``
+      - Simple Scalable profile
+    * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY``
+      -
 
 
 
@@ -1663,34 +1047,27 @@
 
 .. _v4l2-mpeg-video-multi-slice-mode:
 
-``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (enum v4l2_mpeg_video_multi_slice_mode)``
+``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE``
+    (enum)
+
+enum v4l2_mpeg_video_multi_slice_mode -
     Determines how the encoder should handle division of frame into
     slices. Applicable to the encoder. Possible values are:
 
 
 
+.. tabularcolumns:: |p{8.7cm}|p{8.8cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE``
-
-       -  Single slice per frame.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``
-
-       -  Multiple slices with set maximum number of macroblocks per slice.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``
-
-       -  Multiple slice with set maximum size in bytes per slice.
+    * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE``
+      - Single slice per frame.
+    * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``
+      - Multiple slices with set maximum number of macroblocks per slice.
+    * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``
+      - Multiple slice with set maximum size in bytes per slice.
 
 
 
@@ -1708,33 +1085,26 @@
 
 .. _v4l2-mpeg-video-h264-loop-filter-mode:
 
-``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (enum v4l2_mpeg_video_h264_loop_filter_mode)``
+``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE``
+    (enum)
+
+enum v4l2_mpeg_video_h264_loop_filter_mode -
     Loop filter mode for H264 encoder. Possible values are:
 
 
 
+.. tabularcolumns:: |p{14.0cm}|p{3.5cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED``
-
-       -  Loop filter is enabled.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED``
-
-       -  Loop filter is disabled.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
-
-       -  Loop filter is disabled at the slice boundary.
+    * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED``
+      - Loop filter is enabled.
+    * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED``
+      - Loop filter is disabled.
+    * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
+      - Loop filter is disabled at the slice boundary.
 
 
 
@@ -1748,7 +1118,10 @@
 
 .. _v4l2-mpeg-video-h264-entropy-mode:
 
-``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (enum v4l2_mpeg_video_h264_entropy_mode)``
+``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE``
+    (enum)
+
+enum v4l2_mpeg_video_h264_entropy_mode -
     Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264
     encoder. Possible values are:
 
@@ -1758,18 +1131,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC``
-
-       -  Use CAVLC entropy coding.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC``
-
-       -  Use CABAC entropy coding.
+    * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC``
+      - Use CAVLC entropy coding.
+    * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC``
+      - Use CABAC entropy coding.
 
 
 
@@ -1909,30 +1274,27 @@
 
 .. _v4l2-mpeg-video-header-mode:
 
-``V4L2_CID_MPEG_VIDEO_HEADER_MODE (enum v4l2_mpeg_video_header_mode)``
+``V4L2_CID_MPEG_VIDEO_HEADER_MODE``
+    (enum)
+
+enum v4l2_mpeg_video_header_mode -
     Determines whether the header is returned as the first buffer or is
     it returned together with the first frame. Applicable to encoders.
     Possible values are:
 
 
 
+.. tabularcolumns:: |p{10.3cm}|p{7.2cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE``
-
-       -  The stream header is returned separately in the first buffer.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME``
-
-       -  The stream header is returned together with the first encoded
-	  frame.
+    * - ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE``
+      - The stream header is returned separately in the first buffer.
+    * - ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME``
+      - The stream header is returned together with the first encoded
+	frame.
 
 
 
@@ -1965,52 +1327,31 @@
 
 .. _v4l2-mpeg-video-h264-sei-fp-arrangement-type:
 
-``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (enum v4l2_mpeg_video_h264_sei_fp_arrangement_type)``
+``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE``
+    (enum)
+
+enum v4l2_mpeg_video_h264_sei_fp_arrangement_type -
     Frame packing arrangement type for H264 SEI. Applicable to the H264
     encoder. Possible values are:
 
-
+.. tabularcolumns:: |p{12cm}|p{5.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD``
-
-       -  Pixels are alternatively from L and R.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN``
-
-       -  L and R are interlaced by column.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW``
-
-       -  L and R are interlaced by row.
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE``
-
-       -  L is on the left, R on the right.
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM``
-
-       -  L is on top, R on bottom.
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL``
-
-       -  One view per frame.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD``
+      - Pixels are alternatively from L and R.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN``
+      - L and R are interlaced by column.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW``
+      - L and R are interlaced by row.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE``
+      - L is on the left, R on the right.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM``
+      - L is on top, R on bottom.
+    * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL``
+      - One view per frame.
 
 
 
@@ -2021,61 +1362,36 @@
 
 .. _v4l2-mpeg-video-h264-fmo-map-type:
 
-``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (enum v4l2_mpeg_video_h264_fmo_map_type)``
+``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE``
+   (enum)
+
+enum v4l2_mpeg_video_h264_fmo_map_type -
     When using FMO, the map type divides the image in different scan
     patterns of macroblocks. Applicable to the H264 encoder. Possible
     values are:
 
-
+.. tabularcolumns:: |p{12.5cm}|p{5.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES``
-
-       -  Slices are interleaved one after other with macroblocks in run
-	  length order.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES``
-
-       -  Scatters the macroblocks based on a mathematical function known to
-	  both encoder and decoder.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER``
-
-       -  Macroblocks arranged in rectangular areas or regions of interest.
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT``
-
-       -  Slice groups grow in a cyclic way from centre to outwards.
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN``
-
-       -  Slice groups grow in raster scan pattern from left to right.
-
-    -  .. row 6
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN``
-
-       -  Slice groups grow in wipe scan pattern from top to bottom.
-
-    -  .. row 7
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT``
-
-       -  User defined map type.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES``
+      - Slices are interleaved one after other with macroblocks in run
+	length order.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES``
+      - Scatters the macroblocks based on a mathematical function known to
+	both encoder and decoder.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER``
+      - Macroblocks arranged in rectangular areas or regions of interest.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT``
+      - Slice groups grow in a cyclic way from centre to outwards.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN``
+      - Slice groups grow in raster scan pattern from left to right.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN``
+      - Slice groups grow in wipe scan pattern from top to bottom.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT``
+      - User defined map type.
 
 
 
@@ -2084,7 +1400,10 @@
 
 .. _v4l2-mpeg-video-h264-fmo-change-direction:
 
-``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (enum v4l2_mpeg_video_h264_fmo_change_dir)``
+``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION``
+    (enum)
+
+enum v4l2_mpeg_video_h264_fmo_change_dir -
     Specifies a direction of the slice group change for raster and wipe
     maps. Applicable to the H264 encoder. Possible values are:
 
@@ -2094,18 +1413,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT``
-
-       -  Raster scan or wipe right.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT``
-
-       -  Reverse raster scan or wipe left.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT``
+      - Raster scan or wipe right.
+    * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT``
+      - Reverse raster scan or wipe left.
 
 
 
@@ -2132,18 +1443,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Bit 0:15
-
-       -  Slice ID
-
-    -  .. row 2
-
-       -  Bit 16:32
-
-       -  Slice position or order
+    * - Bit 0:15
+      - Slice ID
+    * - Bit 16:32
+      - Slice position or order
 
 
 
@@ -2152,7 +1455,10 @@
 
 .. _v4l2-mpeg-video-h264-hierarchical-coding-type:
 
-``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (enum v4l2_mpeg_video_h264_hierarchical_coding_type)``
+``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE``
+    (enum)
+
+enum v4l2_mpeg_video_h264_hierarchical_coding_type -
     Specifies the hierarchical coding type. Applicable to the H264
     encoder. Possible values are:
 
@@ -2162,18 +1468,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B``
-
-       -  Hierarchical B coding.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P``
-
-       -  Hierarchical P coding.
+    * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B``
+      - Hierarchical B coding.
+    * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P``
+      - Hierarchical P coding.
 
 
 
@@ -2192,18 +1490,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Bit 0:15
-
-       -  QP value
-
-    -  .. row 2
-
-       -  Bit 16:32
-
-       -  Layer number
+    * - Bit 0:15
+      - QP value
+    * - Bit 16:32
+      - Layer number
 
 
 
@@ -2255,30 +1545,14 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Bit 0:7
-
-       -  V chrominance information
-
-    -  .. row 2
-
-       -  Bit 8:15
-
-       -  U chrominance information
-
-    -  .. row 3
-
-       -  Bit 16:23
-
-       -  Y luminance information
-
-    -  .. row 4
-
-       -  Bit 24:31
-
-       -  Must be zero.
+    * - Bit 0:7
+      - V chrominance information
+    * - Bit 8:15
+      - U chrominance information
+    * - Bit 16:23
+      - Y luminance information
+    * - Bit 24:31
+      - Must be zero.
 
 
 
@@ -2321,38 +1595,30 @@
 
 .. _v4l2-mpeg-mfc51-video-frame-skip-mode:
 
-``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (enum v4l2_mpeg_mfc51_video_frame_skip_mode)``
+``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE``
+    (enum)
+
+enum v4l2_mpeg_mfc51_video_frame_skip_mode -
     Indicates in what conditions the encoder should skip frames. If
     encoding a frame would cause the encoded stream to be larger then a
     chosen data limit then the frame will be skipped. Possible values
     are:
 
 
+.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED``
-
-       -  Frame skip mode is disabled.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT``
-
-       -  Frame skip mode enabled and buffer limit is set by the chosen
-	  level and is defined by the standard.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT``
-
-       -  Frame skip mode enabled and buffer limit is set by the VBV
-	  (MPEG1/2/4) or CPB (H264) buffer size control.
+    * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED``
+      - Frame skip mode is disabled.
+    * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT``
+      - Frame skip mode enabled and buffer limit is set by the chosen
+	level and is defined by the standard.
+    * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT``
+      - Frame skip mode enabled and buffer limit is set by the VBV
+	(MPEG1/2/4) or CPB (H264) buffer size control.
 
 
 
@@ -2370,7 +1636,10 @@
 
 .. _v4l2-mpeg-mfc51-video-force-frame-type:
 
-``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (enum v4l2_mpeg_mfc51_video_force_frame_type)``
+``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE``
+    (enum)
+
+enum v4l2_mpeg_mfc51_video_force_frame_type -
     Force a frame type for the next queued buffer. Applicable to
     encoders. Possible values are:
 
@@ -2380,24 +1649,12 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED``
-
-       -  Forcing a specific frame type disabled.
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME``
-
-       -  Force an I-frame.
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED``
-
-       -  Force a non-coded frame.
+    * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED``
+      - Forcing a specific frame type disabled.
+    * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME``
+      - Force an I-frame.
+    * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED``
+      - Force a non-coded frame.
 
 
 
@@ -2416,7 +1673,10 @@
 
 .. _v4l2-mpeg-cx2341x-video-spatial-filter-mode:
 
-``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (enum v4l2_mpeg_cx2341x_video_spatial_filter_mode)``
+``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE``
+    (enum)
+
+enum v4l2_mpeg_cx2341x_video_spatial_filter_mode -
     Sets the Spatial Filter mode (default ``MANUAL``). Possible values
     are:
 
@@ -2426,18 +1686,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL``
-
-       -  Choose the filter manually
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO``
-
-       -  Choose the filter automatically
+    * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL``
+      - Choose the filter manually
+    * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO``
+      - Choose the filter automatically
 
 
 
@@ -2447,52 +1699,40 @@
 
 .. _luma-spatial-filter-type:
 
-``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type)``
+``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE``
+    (enum)
+
+enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type -
     Select the algorithm to use for the Luma Spatial Filter (default
     ``1D_HOR``). Possible values:
 
 
 
+.. tabularcolumns:: |p{14.5cm}|p{3.0cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF``
-
-       -  No filter
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR``
-
-       -  One-dimensional horizontal
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT``
-
-       -  One-dimensional vertical
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE``
-
-       -  Two-dimensional separable
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE``
-
-       -  Two-dimensional symmetrical non-separable
+    * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF``
+      - No filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR``
+      - One-dimensional horizontal
+    * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT``
+      - One-dimensional vertical
+    * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE``
+      - Two-dimensional separable
+    * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE``
+      - Two-dimensional symmetrical non-separable
 
 
 
 .. _chroma-spatial-filter-type:
 
-``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type)``
+``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE``
+    (enum)
+
+enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type -
     Select the algorithm for the Chroma Spatial Filter (default
     ``1D_HOR``). Possible values are:
 
@@ -2502,24 +1742,19 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF``
-
-       -  No filter
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR``
-
-       -  One-dimensional horizontal
+    * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF``
+      - No filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR``
+      - One-dimensional horizontal
 
 
 
 .. _v4l2-mpeg-cx2341x-video-temporal-filter-mode:
 
-``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (enum v4l2_mpeg_cx2341x_video_temporal_filter_mode)``
+``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE``
+    (enum)
+
+enum v4l2_mpeg_cx2341x_video_temporal_filter_mode -
     Sets the Temporal Filter mode (default ``MANUAL``). Possible values
     are:
 
@@ -2529,18 +1764,10 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL``
-
-       -  Choose the filter manually
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO``
-
-       -  Choose the filter automatically
+    * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL``
+      - Choose the filter manually
+    * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO``
+      - Choose the filter automatically
 
 
 
@@ -2550,7 +1777,10 @@
 
 .. _v4l2-mpeg-cx2341x-video-median-filter-type:
 
-``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (enum v4l2_mpeg_cx2341x_video_median_filter_type)``
+``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE``
+    (enum)
+
+enum v4l2_mpeg_cx2341x_video_median_filter_type -
     Median Filter Type (default ``OFF``). Possible values are:
 
 
@@ -2559,36 +1789,16 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF``
-
-       -  No filter
-
-    -  .. row 2
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR``
-
-       -  Horizontal filter
-
-    -  .. row 3
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT``
-
-       -  Vertical filter
-
-    -  .. row 4
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT``
-
-       -  Horizontal and vertical filter
-
-    -  .. row 5
-
-       -  ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG``
-
-       -  Diagonal filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF``
+      - No filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR``
+      - Horizontal filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT``
+      - Vertical filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT``
+      - Horizontal and vertical filter
+    * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG``
+      - Diagonal filter
 
 
 
@@ -2631,7 +1841,10 @@
 
 .. _v4l2-vpx-num-partitions:
 
-``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (enum v4l2_vp8_num_partitions)``
+``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS``
+    (enum)
+
+enum v4l2_vp8_num_partitions -
     The number of token partitions to use in VP8 encoder. Possible
     values are:
 
@@ -2641,30 +1854,14 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION``
-
-       -  1 coefficient partition
-
-    -  .. row 2
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS``
-
-       -  2 coefficient partitions
-
-    -  .. row 3
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS``
-
-       -  4 coefficient partitions
-
-    -  .. row 4
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS``
-
-       -  8 coefficient partitions
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION``
+      - 1 coefficient partition
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS``
+      - 2 coefficient partitions
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS``
+      - 4 coefficient partitions
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS``
+      - 8 coefficient partitions
 
 
 
@@ -2673,37 +1870,28 @@
 
 .. _v4l2-vpx-num-ref-frames:
 
-``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (enum v4l2_vp8_num_ref_frames)``
+``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES``
+    (enum)
+
+enum v4l2_vp8_num_ref_frames -
     The number of reference pictures for encoding P frames. Possible
     values are:
 
-
+.. tabularcolumns:: |p{7.9cm}|p{9.6cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME``
-
-       -  Last encoded frame will be searched
-
-    -  .. row 2
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME``
-
-       -  Two frames will be searched among the last encoded frame, the
-	  golden frame and the alternate reference (altref) frame. The
-	  encoder implementation will decide which two are chosen.
-
-    -  .. row 3
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME``
-
-       -  The last encoded frame, the golden frame and the altref frame will
-	  be searched.
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME``
+      - Last encoded frame will be searched
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME``
+      - Two frames will be searched among the last encoded frame, the
+	golden frame and the alternate reference (altref) frame. The
+	encoder implementation will decide which two are chosen.
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME``
+      - The last encoded frame, the golden frame and the altref frame will
+	be searched.
 
 
 
@@ -2726,31 +1914,33 @@
 
 .. _v4l2-vpx-golden-frame-sel:
 
-``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (enum v4l2_vp8_golden_frame_sel)``
+``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL``
+    (enum)
+
+enum v4l2_vp8_golden_frame_sel -
     Selects the golden frame for encoding. Possible values are:
 
+.. raw:: latex
 
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{11.0cm}|p{10.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV``
+      - Use the (n-2)th frame as a golden frame, current frame index being
+	'n'.
+    * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD``
+      - Use the previous specific frame indicated by
+	``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD`` as a
+	golden frame.
 
-    -  .. row 1
+.. raw:: latex
 
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV``
-
-       -  Use the (n-2)th frame as a golden frame, current frame index being
-	  'n'.
-
-    -  .. row 2
-
-       -  ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD``
-
-       -  Use the previous specific frame indicated by
-	  V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD as a
-	  golden frame.
-
+    \end{adjustbox}
 
 
 ``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)``
@@ -2791,7 +1981,10 @@
 
 .. _v4l2-exposure-auto-type:
 
-``V4L2_CID_EXPOSURE_AUTO (enum v4l2_exposure_auto_type)``
+``V4L2_CID_EXPOSURE_AUTO``
+    (enum)
+
+enum v4l2_exposure_auto_type -
     Enables automatic adjustments of the exposure time and/or iris
     aperture. The effect of manual changes of the exposure time or iris
     aperture while these features are enabled is undefined, drivers
@@ -2803,30 +1996,14 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_EXPOSURE_AUTO``
-
-       -  Automatic exposure time, automatic iris aperture.
-
-    -  .. row 2
-
-       -  ``V4L2_EXPOSURE_MANUAL``
-
-       -  Manual exposure time, manual iris.
-
-    -  .. row 3
-
-       -  ``V4L2_EXPOSURE_SHUTTER_PRIORITY``
-
-       -  Manual exposure time, auto iris.
-
-    -  .. row 4
-
-       -  ``V4L2_EXPOSURE_APERTURE_PRIORITY``
-
-       -  Auto exposure time, manual iris.
+    * - ``V4L2_EXPOSURE_AUTO``
+      - Automatic exposure time, automatic iris aperture.
+    * - ``V4L2_EXPOSURE_MANUAL``
+      - Manual exposure time, manual iris.
+    * - ``V4L2_EXPOSURE_SHUTTER_PRIORITY``
+      - Manual exposure time, auto iris.
+    * - ``V4L2_EXPOSURE_APERTURE_PRIORITY``
+      - Auto exposure time, manual iris.
 
 
 
@@ -2856,45 +2033,32 @@
 
 .. _v4l2-exposure-metering:
 
-``V4L2_CID_EXPOSURE_METERING (enum v4l2_exposure_metering)``
+``V4L2_CID_EXPOSURE_METERING``
+    (enum)
+
+enum v4l2_exposure_metering -
     Determines how the camera measures the amount of light available for
     the frame exposure. Possible values are:
 
-
+.. tabularcolumns:: |p{8.5cm}|p{9.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_EXPOSURE_METERING_AVERAGE``
-
-       -  Use the light information coming from the entire frame and average
-	  giving no weighting to any particular portion of the metered area.
-
-    -  .. row 2
-
-       -  ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED``
-
-       -  Average the light information coming from the entire frame giving
-	  priority to the center of the metered area.
-
-    -  .. row 3
-
-       -  ``V4L2_EXPOSURE_METERING_SPOT``
-
-       -  Measure only very small area at the center of the frame.
-
-    -  .. row 4
-
-       -  ``V4L2_EXPOSURE_METERING_MATRIX``
-
-       -  A multi-zone metering. The light intensity is measured in several
-	  points of the frame and the results are combined. The algorithm of
-	  the zones selection and their significance in calculating the
-	  final value is device dependent.
+    * - ``V4L2_EXPOSURE_METERING_AVERAGE``
+      - Use the light information coming from the entire frame and average
+	giving no weighting to any particular portion of the metered area.
+    * - ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED``
+      - Average the light information coming from the entire frame giving
+	priority to the center of the metered area.
+    * - ``V4L2_EXPOSURE_METERING_SPOT``
+      - Measure only very small area at the center of the frame.
+    * - ``V4L2_EXPOSURE_METERING_MATRIX``
+      - A multi-zone metering. The light intensity is measured in several
+	points of the frame and the results are combined. The algorithm of
+	the zones selection and their significance in calculating the
+	final value is device dependent.
 
 
 
@@ -2968,77 +2132,48 @@
     control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS``
     control value.
 
-
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_AUTO_FOCUS_STATUS_IDLE``
-
-       -  Automatic focus is not active.
-
-    -  .. row 2
-
-       -  ``V4L2_AUTO_FOCUS_STATUS_BUSY``
-
-       -  Automatic focusing is in progress.
-
-    -  .. row 3
-
-       -  ``V4L2_AUTO_FOCUS_STATUS_REACHED``
-
-       -  Focus has been reached.
-
-    -  .. row 4
-
-       -  ``V4L2_AUTO_FOCUS_STATUS_FAILED``
-
-       -  Automatic focus has failed, the driver will not transition from
-	  this state until another action is performed by an application.
+    * - ``V4L2_AUTO_FOCUS_STATUS_IDLE``
+      - Automatic focus is not active.
+    * - ``V4L2_AUTO_FOCUS_STATUS_BUSY``
+      - Automatic focusing is in progress.
+    * - ``V4L2_AUTO_FOCUS_STATUS_REACHED``
+      - Focus has been reached.
+    * - ``V4L2_AUTO_FOCUS_STATUS_FAILED``
+      - Automatic focus has failed, the driver will not transition from
+	this state until another action is performed by an application.
 
 
 
 .. _v4l2-auto-focus-range:
 
-``V4L2_CID_AUTO_FOCUS_RANGE (enum v4l2_auto_focus_range)``
+``V4L2_CID_AUTO_FOCUS_RANGE``
+    (enum)
+
+enum v4l2_auto_focus_range -
     Determines auto focus distance range for which lens may be adjusted.
 
-
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_AUTO_FOCUS_RANGE_AUTO``
-
-       -  The camera automatically selects the focus range.
-
-    -  .. row 2
-
-       -  ``V4L2_AUTO_FOCUS_RANGE_NORMAL``
-
-       -  Normal distance range, limited for best automatic focus
-	  performance.
-
-    -  .. row 3
-
-       -  ``V4L2_AUTO_FOCUS_RANGE_MACRO``
-
-       -  Macro (close-up) auto focus. The camera will use its minimum
-	  possible distance for auto focus.
-
-    -  .. row 4
-
-       -  ``V4L2_AUTO_FOCUS_RANGE_INFINITY``
-
-       -  The lens is set to focus on an object at infinite distance.
+    * - ``V4L2_AUTO_FOCUS_RANGE_AUTO``
+      - The camera automatically selects the focus range.
+    * - ``V4L2_AUTO_FOCUS_RANGE_NORMAL``
+      - Normal distance range, limited for best automatic focus
+	performance.
+    * - ``V4L2_AUTO_FOCUS_RANGE_MACRO``
+      - Macro (close-up) auto focus. The camera will use its minimum
+	possible distance for auto focus.
+    * - ``V4L2_AUTO_FOCUS_RANGE_INFINITY``
+      - The lens is set to focus on an object at infinite distance.
 
 
 
@@ -3088,90 +2223,53 @@
 
 .. _v4l2-auto-n-preset-white-balance:
 
-``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE (enum v4l2_auto_n_preset_white_balance)``
+``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE``
+    (enum)
+
+enum v4l2_auto_n_preset_white_balance -
     Sets white balance to automatic, manual or a preset. The presets
     determine color temperature of the light as a hint to the camera for
     white balance adjustments resulting in most accurate color
     representation. The following white balance presets are listed in
     order of increasing color temperature.
 
-
+.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_WHITE_BALANCE_MANUAL``
-
-       -  Manual white balance.
-
-    -  .. row 2
-
-       -  ``V4L2_WHITE_BALANCE_AUTO``
-
-       -  Automatic white balance adjustments.
-
-    -  .. row 3
-
-       -  ``V4L2_WHITE_BALANCE_INCANDESCENT``
-
-       -  White balance setting for incandescent (tungsten) lighting. It
-	  generally cools down the colors and corresponds approximately to
-	  2500...3500 K color temperature range.
-
-    -  .. row 4
-
-       -  ``V4L2_WHITE_BALANCE_FLUORESCENT``
-
-       -  White balance preset for fluorescent lighting. It corresponds
-	  approximately to 4000...5000 K color temperature.
-
-    -  .. row 5
-
-       -  ``V4L2_WHITE_BALANCE_FLUORESCENT_H``
-
-       -  With this setting the camera will compensate for fluorescent H
-	  lighting.
-
-    -  .. row 6
-
-       -  ``V4L2_WHITE_BALANCE_HORIZON``
-
-       -  White balance setting for horizon daylight. It corresponds
-	  approximately to 5000 K color temperature.
-
-    -  .. row 7
-
-       -  ``V4L2_WHITE_BALANCE_DAYLIGHT``
-
-       -  White balance preset for daylight (with clear sky). It corresponds
-	  approximately to 5000...6500 K color temperature.
-
-    -  .. row 8
-
-       -  ``V4L2_WHITE_BALANCE_FLASH``
-
-       -  With this setting the camera will compensate for the flash light.
-	  It slightly warms up the colors and corresponds roughly to
-	  5000...5500 K color temperature.
-
-    -  .. row 9
-
-       -  ``V4L2_WHITE_BALANCE_CLOUDY``
-
-       -  White balance preset for moderately overcast sky. This option
-	  corresponds approximately to 6500...8000 K color temperature
-	  range.
-
-    -  .. row 10
-
-       -  ``V4L2_WHITE_BALANCE_SHADE``
-
-       -  White balance preset for shade or heavily overcast sky. It
-	  corresponds approximately to 9000...10000 K color temperature.
+    * - ``V4L2_WHITE_BALANCE_MANUAL``
+      - Manual white balance.
+    * - ``V4L2_WHITE_BALANCE_AUTO``
+      - Automatic white balance adjustments.
+    * - ``V4L2_WHITE_BALANCE_INCANDESCENT``
+      - White balance setting for incandescent (tungsten) lighting. It
+	generally cools down the colors and corresponds approximately to
+	2500...3500 K color temperature range.
+    * - ``V4L2_WHITE_BALANCE_FLUORESCENT``
+      - White balance preset for fluorescent lighting. It corresponds
+	approximately to 4000...5000 K color temperature.
+    * - ``V4L2_WHITE_BALANCE_FLUORESCENT_H``
+      - With this setting the camera will compensate for fluorescent H
+	lighting.
+    * - ``V4L2_WHITE_BALANCE_HORIZON``
+      - White balance setting for horizon daylight. It corresponds
+	approximately to 5000 K color temperature.
+    * - ``V4L2_WHITE_BALANCE_DAYLIGHT``
+      - White balance preset for daylight (with clear sky). It corresponds
+	approximately to 5000...6500 K color temperature.
+    * - ``V4L2_WHITE_BALANCE_FLASH``
+      - With this setting the camera will compensate for the flash light.
+	It slightly warms up the colors and corresponds roughly to
+	5000...5500 K color temperature.
+    * - ``V4L2_WHITE_BALANCE_CLOUDY``
+      - White balance preset for moderately overcast sky. This option
+	corresponds approximately to 6500...8000 K color temperature
+	range.
+    * - ``V4L2_WHITE_BALANCE_SHADE``
+      - White balance preset for shade or heavily overcast sky. It
+	corresponds approximately to 9000...10000 K color temperature.
 
 
 
@@ -3205,7 +2303,10 @@
 
 .. _v4l2-iso-sensitivity-auto-type:
 
-``V4L2_CID_ISO_SENSITIVITY_AUTO (enum v4l2_iso_sensitivity_type)``
+``V4L2_CID_ISO_SENSITIVITY_AUTO``
+    (enum)
+
+enum v4l2_iso_sensitivity_type -
     Enables or disables automatic ISO sensitivity adjustments.
 
 
@@ -3214,24 +2315,19 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_CID_ISO_SENSITIVITY_MANUAL``
-
-       -  Manual ISO sensitivity.
-
-    -  .. row 2
-
-       -  ``V4L2_CID_ISO_SENSITIVITY_AUTO``
-
-       -  Automatic ISO sensitivity adjustments.
+    * - ``V4L2_CID_ISO_SENSITIVITY_MANUAL``
+      - Manual ISO sensitivity.
+    * - ``V4L2_CID_ISO_SENSITIVITY_AUTO``
+      - Automatic ISO sensitivity adjustments.
 
 
 
 .. _v4l2-scene-mode:
 
-``V4L2_CID_SCENE_MODE (enum v4l2_scene_mode)``
+``V4L2_CID_SCENE_MODE``
+    (enum)
+
+enum v4l2_scene_mode -
     This control allows to select scene programs as the camera automatic
     modes optimized for common shooting scenes. Within these modes the
     camera determines best exposure, aperture, focusing, light metering,
@@ -3243,133 +2339,77 @@
     to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related
     controls are accessible. The following scene programs are defined:
 
-
+.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_SCENE_MODE_NONE``
-
-       -  The scene mode feature is disabled.
-
-    -  .. row 2
-
-       -  ``V4L2_SCENE_MODE_BACKLIGHT``
-
-       -  Backlight. Compensates for dark shadows when light is coming from
-	  behind a subject, also by automatically turning on the flash.
-
-    -  .. row 3
-
-       -  ``V4L2_SCENE_MODE_BEACH_SNOW``
-
-       -  Beach and snow. This mode compensates for all-white or bright
-	  scenes, which tend to look gray and low contrast, when camera's
-	  automatic exposure is based on an average scene brightness. To
-	  compensate, this mode automatically slightly overexposes the
-	  frames. The white balance may also be adjusted to compensate for
-	  the fact that reflected snow looks bluish rather than white.
-
-    -  .. row 4
-
-       -  ``V4L2_SCENE_MODE_CANDLELIGHT``
-
-       -  Candle light. The camera generally raises the ISO sensitivity and
-	  lowers the shutter speed. This mode compensates for relatively
-	  close subject in the scene. The flash is disabled in order to
-	  preserve the ambiance of the light.
-
-    -  .. row 5
-
-       -  ``V4L2_SCENE_MODE_DAWN_DUSK``
-
-       -  Dawn and dusk. Preserves the colors seen in low natural light
-	  before dusk and after down. The camera may turn off the flash, and
-	  automatically focus at infinity. It will usually boost saturation
-	  and lower the shutter speed.
-
-    -  .. row 6
-
-       -  ``V4L2_SCENE_MODE_FALL_COLORS``
-
-       -  Fall colors. Increases saturation and adjusts white balance for
-	  color enhancement. Pictures of autumn leaves get saturated reds
-	  and yellows.
-
-    -  .. row 7
-
-       -  ``V4L2_SCENE_MODE_FIREWORKS``
-
-       -  Fireworks. Long exposure times are used to capture the expanding
-	  burst of light from a firework. The camera may invoke image
-	  stabilization.
-
-    -  .. row 8
-
-       -  ``V4L2_SCENE_MODE_LANDSCAPE``
-
-       -  Landscape. The camera may choose a small aperture to provide deep
-	  depth of field and long exposure duration to help capture detail
-	  in dim light conditions. The focus is fixed at infinity. Suitable
-	  for distant and wide scenery.
-
-    -  .. row 9
-
-       -  ``V4L2_SCENE_MODE_NIGHT``
-
-       -  Night, also known as Night Landscape. Designed for low light
-	  conditions, it preserves detail in the dark areas without blowing
-	  out bright objects. The camera generally sets itself to a
-	  medium-to-high ISO sensitivity, with a relatively long exposure
-	  time, and turns flash off. As such, there will be increased image
-	  noise and the possibility of blurred image.
-
-    -  .. row 10
-
-       -  ``V4L2_SCENE_MODE_PARTY_INDOOR``
-
-       -  Party and indoor. Designed to capture indoor scenes that are lit
-	  by indoor background lighting as well as the flash. The camera
-	  usually increases ISO sensitivity, and adjusts exposure for the
-	  low light conditions.
-
-    -  .. row 11
-
-       -  ``V4L2_SCENE_MODE_PORTRAIT``
-
-       -  Portrait. The camera adjusts the aperture so that the depth of
-	  field is reduced, which helps to isolate the subject against a
-	  smooth background. Most cameras recognize the presence of faces in
-	  the scene and focus on them. The color hue is adjusted to enhance
-	  skin tones. The intensity of the flash is often reduced.
-
-    -  .. row 12
-
-       -  ``V4L2_SCENE_MODE_SPORTS``
-
-       -  Sports. Significantly increases ISO and uses a fast shutter speed
-	  to freeze motion of rapidly-moving subjects. Increased image noise
-	  may be seen in this mode.
-
-    -  .. row 13
-
-       -  ``V4L2_SCENE_MODE_SUNSET``
-
-       -  Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps
-	  up the saturation.
-
-    -  .. row 14
-
-       -  ``V4L2_SCENE_MODE_TEXT``
-
-       -  Text. It applies extra contrast and sharpness, it is typically a
-	  black-and-white mode optimized for readability. Automatic focus
-	  may be switched to close-up mode and this setting may also involve
-	  some lens-distortion correction.
+    * - ``V4L2_SCENE_MODE_NONE``
+      - The scene mode feature is disabled.
+    * - ``V4L2_SCENE_MODE_BACKLIGHT``
+      - Backlight. Compensates for dark shadows when light is coming from
+	behind a subject, also by automatically turning on the flash.
+    * - ``V4L2_SCENE_MODE_BEACH_SNOW``
+      - Beach and snow. This mode compensates for all-white or bright
+	scenes, which tend to look gray and low contrast, when camera's
+	automatic exposure is based on an average scene brightness. To
+	compensate, this mode automatically slightly overexposes the
+	frames. The white balance may also be adjusted to compensate for
+	the fact that reflected snow looks bluish rather than white.
+    * - ``V4L2_SCENE_MODE_CANDLELIGHT``
+      - Candle light. The camera generally raises the ISO sensitivity and
+	lowers the shutter speed. This mode compensates for relatively
+	close subject in the scene. The flash is disabled in order to
+	preserve the ambiance of the light.
+    * - ``V4L2_SCENE_MODE_DAWN_DUSK``
+      - Dawn and dusk. Preserves the colors seen in low natural light
+	before dusk and after down. The camera may turn off the flash, and
+	automatically focus at infinity. It will usually boost saturation
+	and lower the shutter speed.
+    * - ``V4L2_SCENE_MODE_FALL_COLORS``
+      - Fall colors. Increases saturation and adjusts white balance for
+	color enhancement. Pictures of autumn leaves get saturated reds
+	and yellows.
+    * - ``V4L2_SCENE_MODE_FIREWORKS``
+      - Fireworks. Long exposure times are used to capture the expanding
+	burst of light from a firework. The camera may invoke image
+	stabilization.
+    * - ``V4L2_SCENE_MODE_LANDSCAPE``
+      - Landscape. The camera may choose a small aperture to provide deep
+	depth of field and long exposure duration to help capture detail
+	in dim light conditions. The focus is fixed at infinity. Suitable
+	for distant and wide scenery.
+    * - ``V4L2_SCENE_MODE_NIGHT``
+      - Night, also known as Night Landscape. Designed for low light
+	conditions, it preserves detail in the dark areas without blowing
+	out bright objects. The camera generally sets itself to a
+	medium-to-high ISO sensitivity, with a relatively long exposure
+	time, and turns flash off. As such, there will be increased image
+	noise and the possibility of blurred image.
+    * - ``V4L2_SCENE_MODE_PARTY_INDOOR``
+      - Party and indoor. Designed to capture indoor scenes that are lit
+	by indoor background lighting as well as the flash. The camera
+	usually increases ISO sensitivity, and adjusts exposure for the
+	low light conditions.
+    * - ``V4L2_SCENE_MODE_PORTRAIT``
+      - Portrait. The camera adjusts the aperture so that the depth of
+	field is reduced, which helps to isolate the subject against a
+	smooth background. Most cameras recognize the presence of faces in
+	the scene and focus on them. The color hue is adjusted to enhance
+	skin tones. The intensity of the flash is often reduced.
+    * - ``V4L2_SCENE_MODE_SPORTS``
+      - Sports. Significantly increases ISO and uses a fast shutter speed
+	to freeze motion of rapidly-moving subjects. Increased image noise
+	may be seen in this mode.
+    * - ``V4L2_SCENE_MODE_SUNSET``
+      - Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps
+	up the saturation.
+    * - ``V4L2_SCENE_MODE_TEXT``
+      - Text. It applies extra contrast and sharpness, it is typically a
+	black-and-white mode optimized for readability. Automatic focus
+	may be switched to close-up mode and this setting may also involve
+	some lens-distortion correction.
 
 
 
@@ -3393,24 +2433,12 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_LOCK_EXPOSURE``
-
-       -  Automatic exposure adjustments lock.
-
-    -  .. row 2
-
-       -  ``V4L2_LOCK_WHITE_BALANCE``
-
-       -  Automatic white balance adjustments lock.
-
-    -  .. row 3
-
-       -  ``V4L2_LOCK_FOCUS``
-
-       -  Automatic focus lock.
+    * - ``V4L2_LOCK_EXPOSURE``
+      - Automatic exposure adjustments lock.
+    * - ``V4L2_LOCK_WHITE_BALANCE``
+      - Automatic white balance adjustments lock.
+    * - ``V4L2_LOCK_FOCUS``
+      - Automatic focus lock.
 
 
 
@@ -3570,7 +2598,10 @@
     Configures pilot tone frequency value. Unit is in Hz. The range and
     step are driver-specific.
 
-``V4L2_CID_TUNE_PREEMPHASIS (enum v4l2_preemphasis)``
+``V4L2_CID_TUNE_PREEMPHASIS``
+    (enum)
+
+enum v4l2_preemphasis -
     Configures the pre-emphasis value for broadcasting. A pre-emphasis
     filter is applied to the broadcast to accentuate the high audio
     frequencies. Depending on the region, a time constant of either 50
@@ -3583,24 +2614,12 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_PREEMPHASIS_DISABLED``
-
-       -  No pre-emphasis is applied.
-
-    -  .. row 2
-
-       -  ``V4L2_PREEMPHASIS_50_uS``
-
-       -  A pre-emphasis of 50 uS is used.
-
-    -  .. row 3
-
-       -  ``V4L2_PREEMPHASIS_75_uS``
-
-       -  A pre-emphasis of 75 uS is used.
+    * - ``V4L2_PREEMPHASIS_DISABLED``
+      - No pre-emphasis is applied.
+    * - ``V4L2_PREEMPHASIS_50_uS``
+      - A pre-emphasis of 50 uS is used.
+    * - ``V4L2_PREEMPHASIS_75_uS``
+      - A pre-emphasis of 75 uS is used.
 
 
 
@@ -3684,51 +2703,31 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_FLASH_LED_MODE_NONE``
-
-       -  Off.
-
-    -  .. row 2
-
-       -  ``V4L2_FLASH_LED_MODE_FLASH``
-
-       -  Flash mode.
-
-    -  .. row 3
-
-       -  ``V4L2_FLASH_LED_MODE_TORCH``
-
-       -  Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY.
+    * - ``V4L2_FLASH_LED_MODE_NONE``
+      - Off.
+    * - ``V4L2_FLASH_LED_MODE_FLASH``
+      - Flash mode.
+    * - ``V4L2_FLASH_LED_MODE_TORCH``
+      - Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY.
 
 
 
 ``V4L2_CID_FLASH_STROBE_SOURCE (menu)``
     Defines the source of the flash LED strobe.
 
-
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE``
-
-       -  The flash strobe is triggered by using the
-	  V4L2_CID_FLASH_STROBE control.
-
-    -  .. row 2
-
-       -  ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
-
-       -  The flash strobe is triggered by an external source. Typically
-	  this is a sensor, which makes it possible to synchronises the
-	  flash strobe start to exposure start.
+    * - ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE``
+      - The flash strobe is triggered by using the
+	V4L2_CID_FLASH_STROBE control.
+    * - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
+      - The flash strobe is triggered by an external source. Typically
+	this is a sensor, which makes it possible to synchronises the
+	flash strobe start to exposure start.
 
 
 
@@ -3775,75 +2774,39 @@
     an effect is chip dependent. Reading the faults resets the control
     and returns the chip to a usable state if possible.
 
-
+.. tabularcolumns:: |p{8.0cm}|p{9.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_FLASH_FAULT_OVER_VOLTAGE``
-
-       -  Flash controller voltage to the flash LED has exceeded the limit
-	  specific to the flash controller.
-
-    -  .. row 2
-
-       -  ``V4L2_FLASH_FAULT_TIMEOUT``
-
-       -  The flash strobe was still on when the timeout set by the user ---
-	  V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash
-	  controllers may set this in all such conditions.
-
-    -  .. row 3
-
-       -  ``V4L2_FLASH_FAULT_OVER_TEMPERATURE``
-
-       -  The flash controller has overheated.
-
-    -  .. row 4
-
-       -  ``V4L2_FLASH_FAULT_SHORT_CIRCUIT``
-
-       -  The short circuit protection of the flash controller has been
-	  triggered.
-
-    -  .. row 5
-
-       -  ``V4L2_FLASH_FAULT_OVER_CURRENT``
-
-       -  Current in the LED power supply has exceeded the limit specific to
-	  the flash controller.
-
-    -  .. row 6
-
-       -  ``V4L2_FLASH_FAULT_INDICATOR``
-
-       -  The flash controller has detected a short or open circuit
-	  condition on the indicator LED.
-
-    -  .. row 7
-
-       -  ``V4L2_FLASH_FAULT_UNDER_VOLTAGE``
-
-       -  Flash controller voltage to the flash LED has been below the
-	  minimum limit specific to the flash controller.
-
-    -  .. row 8
-
-       -  ``V4L2_FLASH_FAULT_INPUT_VOLTAGE``
-
-       -  The input voltage of the flash controller is below the limit under
-	  which strobing the flash at full current will not be possible.The
-	  condition persists until this flag is no longer set.
-
-    -  .. row 9
-
-       -  ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE``
-
-       -  The temperature of the LED has exceeded its allowed upper limit.
+    * - ``V4L2_FLASH_FAULT_OVER_VOLTAGE``
+      - Flash controller voltage to the flash LED has exceeded the limit
+	specific to the flash controller.
+    * - ``V4L2_FLASH_FAULT_TIMEOUT``
+      - The flash strobe was still on when the timeout set by the user ---
+	V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash
+	controllers may set this in all such conditions.
+    * - ``V4L2_FLASH_FAULT_OVER_TEMPERATURE``
+      - The flash controller has overheated.
+    * - ``V4L2_FLASH_FAULT_SHORT_CIRCUIT``
+      - The short circuit protection of the flash controller has been
+	triggered.
+    * - ``V4L2_FLASH_FAULT_OVER_CURRENT``
+      - Current in the LED power supply has exceeded the limit specific to
+	the flash controller.
+    * - ``V4L2_FLASH_FAULT_INDICATOR``
+      - The flash controller has detected a short or open circuit
+	condition on the indicator LED.
+    * - ``V4L2_FLASH_FAULT_UNDER_VOLTAGE``
+      - Flash controller voltage to the flash LED has been below the
+	minimum limit specific to the flash controller.
+    * - ``V4L2_FLASH_FAULT_INPUT_VOLTAGE``
+      - The input voltage of the flash controller is below the limit under
+	which strobing the flash at full current will not be possible.The
+	condition persists until this flag is no longer set.
+    * - ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE``
+      - The temperature of the LED has exceeded its allowed upper limit.
 
 
 
@@ -3886,48 +2849,24 @@
     how Cb and Cr components are downsampled after coverting an input
     image from RGB to Y'CbCr color space.
 
-
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_444``
-
-       -  No chroma subsampling, each pixel has Y, Cr and Cb values.
-
-    -  .. row 2
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_422``
-
-       -  Horizontally subsample Cr, Cb components by a factor of 2.
-
-    -  .. row 3
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_420``
-
-       -  Subsample Cr, Cb components horizontally and vertically by 2.
-
-    -  .. row 4
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_411``
-
-       -  Horizontally subsample Cr, Cb components by a factor of 4.
-
-    -  .. row 5
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_410``
-
-       -  Subsample Cr, Cb components horizontally by 4 and vertically by 2.
-
-    -  .. row 6
-
-       -  ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY``
-
-       -  Use only luminance component.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_444``
+      - No chroma subsampling, each pixel has Y, Cr and Cb values.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_422``
+      - Horizontally subsample Cr, Cb components by a factor of 2.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_420``
+      - Subsample Cr, Cb components horizontally and vertically by 2.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_411``
+      - Horizontally subsample Cr, Cb components by a factor of 4.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_410``
+      - Subsample Cr, Cb components horizontally by 4 and vertically by 2.
+    * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY``
+      - Use only luminance component.
 
 
 
@@ -3969,36 +2908,16 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_JPEG_ACTIVE_MARKER_APP0``
-
-       -  Application data segment APP\ :sub:`0`.
-
-    -  .. row 2
-
-       -  ``V4L2_JPEG_ACTIVE_MARKER_APP1``
-
-       -  Application data segment APP\ :sub:`1`.
-
-    -  .. row 3
-
-       -  ``V4L2_JPEG_ACTIVE_MARKER_COM``
-
-       -  Comment segment.
-
-    -  .. row 4
-
-       -  ``V4L2_JPEG_ACTIVE_MARKER_DQT``
-
-       -  Quantization tables segment.
-
-    -  .. row 5
-
-       -  ``V4L2_JPEG_ACTIVE_MARKER_DHT``
-
-       -  Huffman tables segment.
+    * - ``V4L2_JPEG_ACTIVE_MARKER_APP0``
+      - Application data segment APP\ :sub:`0`.
+    * - ``V4L2_JPEG_ACTIVE_MARKER_APP1``
+      - Application data segment APP\ :sub:`1`.
+    * - ``V4L2_JPEG_ACTIVE_MARKER_COM``
+      - Comment segment.
+    * - ``V4L2_JPEG_ACTIVE_MARKER_DQT``
+      - Quantization tables segment.
+    * - ``V4L2_JPEG_ACTIVE_MARKER_DHT``
+      - Huffman tables segment.
 
 
 
@@ -4162,13 +3081,19 @@
     EDIDs, then the bit for that pad will be 0. This read-only control
     is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
 
-``V4L2_CID_DV_TX_MODE (enum v4l2_dv_tx_mode)``
+``V4L2_CID_DV_TX_MODE``
+    (enum)
+
+enum v4l2_dv_tx_mode -
     HDMI transmitters can transmit in DVI-D mode (just video) or in HDMI
     mode (video + audio + auxiliary data). This control selects which
     mode to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI.
     This control is applicable to HDMI connectors.
 
-``V4L2_CID_DV_TX_RGB_RANGE (enum v4l2_dv_rgb_range)``
+``V4L2_CID_DV_TX_RGB_RANGE``
+    (enum)
+
+enum v4l2_dv_rgb_range -
     Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO
     follows the RGB quantization range specified in the standard for the
     video interface (ie. :ref:`cea861` for HDMI).
@@ -4180,7 +3105,10 @@
     the number of bits per component. This control is applicable to VGA,
     DVI-A/D, HDMI and DisplayPort connectors.
 
-``V4L2_CID_DV_TX_IT_CONTENT_TYPE (enum v4l2_dv_it_content_type)``
+``V4L2_CID_DV_TX_IT_CONTENT_TYPE``
+    (enum)
+
+enum v4l2_dv_it_content_type -
     Configures the IT Content Type of the transmitted video. This
     information is sent over HDMI and DisplayPort connectors as part of
     the AVI InfoFrame. The term 'IT Content' is used for content that
@@ -4188,46 +3116,26 @@
     or an analog source. The enum v4l2_dv_it_content_type defines
     the possible content types:
 
-
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS``
-
-       -  Graphics content. Pixel data should be passed unfiltered and
-	  without analog reconstruction.
-
-    -  .. row 2
-
-       -  ``V4L2_DV_IT_CONTENT_TYPE_PHOTO``
-
-       -  Photo content. The content is derived from digital still pictures.
-	  The content should be passed through with minimal scaling and
-	  picture enhancements.
-
-    -  .. row 3
-
-       -  ``V4L2_DV_IT_CONTENT_TYPE_CINEMA``
-
-       -  Cinema content.
-
-    -  .. row 4
-
-       -  ``V4L2_DV_IT_CONTENT_TYPE_GAME``
-
-       -  Game content. Audio and video latency should be minimized.
-
-    -  .. row 5
-
-       -  ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC``
-
-       -  No IT Content information is available and the ITC bit in the AVI
-	  InfoFrame is set to 0.
+    * - ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS``
+      - Graphics content. Pixel data should be passed unfiltered and
+	without analog reconstruction.
+    * - ``V4L2_DV_IT_CONTENT_TYPE_PHOTO``
+      - Photo content. The content is derived from digital still pictures.
+	The content should be passed through with minimal scaling and
+	picture enhancements.
+    * - ``V4L2_DV_IT_CONTENT_TYPE_CINEMA``
+      - Cinema content.
+    * - ``V4L2_DV_IT_CONTENT_TYPE_GAME``
+      - Game content. Audio and video latency should be minimized.
+    * - ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC``
+      - No IT Content information is available and the ITC bit in the AVI
+	InfoFrame is set to 0.
 
 
 
@@ -4241,7 +3149,10 @@
     will be 0. This read-only control is applicable to DVI-D, HDMI and
     DisplayPort connectors.
 
-``V4L2_CID_DV_RX_RGB_RANGE (enum v4l2_dv_rgb_range)``
+``V4L2_CID_DV_RX_RGB_RANGE``
+    (enum)
+
+enum v4l2_dv_rgb_range -
     Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO
     follows the RGB quantization range specified in the standard for the
     video interface (ie. :ref:`cea861` for HDMI).
@@ -4253,7 +3164,10 @@
     the number of bits per component. This control is applicable to VGA,
     DVI-A/D, HDMI and DisplayPort connectors.
 
-``V4L2_CID_DV_RX_IT_CONTENT_TYPE (enum v4l2_dv_it_content_type)``
+``V4L2_CID_DV_RX_IT_CONTENT_TYPE``
+    (enum)
+
+enum v4l2_dv_it_content_type -
     Reads the IT Content Type of the received video. This information is
     sent over HDMI and DisplayPort connectors as part of the AVI
     InfoFrame. The term 'IT Content' is used for content that originates
@@ -4325,7 +3239,10 @@
     broadcasts speech. If the transmitter doesn't make this distinction,
     then it will be set.
 
-``V4L2_CID_TUNE_DEEMPHASIS (enum v4l2_deemphasis)``
+``V4L2_CID_TUNE_DEEMPHASIS``
+    (enum)
+
+enum v4l2_deemphasis -
     Configures the de-emphasis value for reception. A de-emphasis filter
     is applied to the broadcast to accentuate the high audio
     frequencies. Depending on the region, a time constant of either 50
@@ -4338,24 +3255,12 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_DEEMPHASIS_DISABLED``
-
-       -  No de-emphasis is applied.
-
-    -  .. row 2
-
-       -  ``V4L2_DEEMPHASIS_50_uS``
-
-       -  A de-emphasis of 50 uS is used.
-
-    -  .. row 3
-
-       -  ``V4L2_DEEMPHASIS_75_uS``
-
-       -  A de-emphasis of 75 uS is used.
+    * - ``V4L2_DEEMPHASIS_DISABLED``
+      - No de-emphasis is applied.
+    * - ``V4L2_DEEMPHASIS_50_uS``
+      - A de-emphasis of 50 uS is used.
+    * - ``V4L2_DEEMPHASIS_75_uS``
+      - A de-emphasis of 75 uS is used.
 
 
 
@@ -4382,43 +3287,27 @@
 ``V4L2_CID_DETECT_MD_MODE (menu)``
     Sets the motion detection mode.
 
-
+.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  ``V4L2_DETECT_MD_MODE_DISABLED``
-
-       -  Disable motion detection.
-
-    -  .. row 2
-
-       -  ``V4L2_DETECT_MD_MODE_GLOBAL``
-
-       -  Use a single motion detection threshold.
-
-    -  .. row 3
-
-       -  ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID``
-
-       -  The image is divided into a grid, each cell with its own motion
-	  detection threshold. These thresholds are set through the
-	  ``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control.
-
-    -  .. row 4
-
-       -  ``V4L2_DETECT_MD_MODE_REGION_GRID``
-
-       -  The image is divided into a grid, each cell with its own region
-	  value that specifies which per-region motion detection thresholds
-	  should be used. Each region has its own thresholds. How these
-	  per-region thresholds are set up is driver-specific. The region
-	  values for the grid are set through the
-	  ``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control.
+    * - ``V4L2_DETECT_MD_MODE_DISABLED``
+      - Disable motion detection.
+    * - ``V4L2_DETECT_MD_MODE_GLOBAL``
+      - Use a single motion detection threshold.
+    * - ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID``
+      - The image is divided into a grid, each cell with its own motion
+	detection threshold. These thresholds are set through the
+	``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control.
+    * - ``V4L2_DETECT_MD_MODE_REGION_GRID``
+      - The image is divided into a grid, each cell with its own region
+	value that specifies which per-region motion detection thresholds
+	should be used. Each region has its own thresholds. How these
+	per-region thresholds are set up is driver-specific. The region
+	values for the grid are set through the
+	``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control.
 
 
 
diff --git a/Documentation/media/uapi/v4l/field-order.rst b/Documentation/media/uapi/v4l/field-order.rst
index 979fedb..50779a6 100644
--- a/Documentation/media/uapi/v4l/field-order.rst
+++ b/Documentation/media/uapi/v4l/field-order.rst
@@ -47,140 +47,92 @@
 All video capture and output devices must report the current field
 order. Some drivers may permit the selection of a different order, to
 this end applications initialize the ``field`` field of struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` before calling the
+:c:type:`v4l2_pix_format` before calling the
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl. If this is not desired it
 should have the value ``V4L2_FIELD_ANY`` (0).
 
 
-.. _v4l2-field:
-
 enum v4l2_field
 ===============
 
+.. c:type:: v4l2_field
+
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FIELD_ANY``
-
-       -  0
-
-       -  Applications request this field order when any one of the
-	  ``V4L2_FIELD_NONE``, ``V4L2_FIELD_TOP``, ``V4L2_FIELD_BOTTOM``, or
-	  ``V4L2_FIELD_INTERLACED`` formats is acceptable. Drivers choose
-	  depending on hardware capabilities or e. g. the requested image
-	  size, and return the actual field order. Drivers must never return
-	  ``V4L2_FIELD_ANY``. If multiple field orders are possible the
-	  driver must choose one of the possible field orders during
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
-	  :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`. struct
-	  :ref:`v4l2_buffer <v4l2-buffer>` ``field`` can never be
-	  ``V4L2_FIELD_ANY``.
-
-    -  .. row 2
-
-       -  ``V4L2_FIELD_NONE``
-
-       -  1
-
-       -  Images are in progressive format, not interlaced. The driver may
-	  also indicate this order when it cannot distinguish between
-	  ``V4L2_FIELD_TOP`` and ``V4L2_FIELD_BOTTOM``.
-
-    -  .. row 3
-
-       -  ``V4L2_FIELD_TOP``
-
-       -  2
-
-       -  Images consist of the top (aka odd) field only.
-
-    -  .. row 4
-
-       -  ``V4L2_FIELD_BOTTOM``
-
-       -  3
-
-       -  Images consist of the bottom (aka even) field only. Applications
-	  may wish to prevent a device from capturing interlaced images
-	  because they will have "comb" or "feathering" artefacts around
-	  moving objects.
-
-    -  .. row 5
-
-       -  ``V4L2_FIELD_INTERLACED``
-
-       -  4
-
-       -  Images contain both fields, interleaved line by line. The temporal
-	  order of the fields (whether the top or bottom field is first
-	  transmitted) depends on the current video standard. M/NTSC
-	  transmits the bottom field first, all other standards the top
-	  field first.
-
-    -  .. row 6
-
-       -  ``V4L2_FIELD_SEQ_TB``
-
-       -  5
-
-       -  Images contain both fields, the top field lines are stored first
-	  in memory, immediately followed by the bottom field lines. Fields
-	  are always stored in temporal order, the older one first in
-	  memory. Image sizes refer to the frame, not fields.
-
-    -  .. row 7
-
-       -  ``V4L2_FIELD_SEQ_BT``
-
-       -  6
-
-       -  Images contain both fields, the bottom field lines are stored
-	  first in memory, immediately followed by the top field lines.
-	  Fields are always stored in temporal order, the older one first in
-	  memory. Image sizes refer to the frame, not fields.
-
-    -  .. row 8
-
-       -  ``V4L2_FIELD_ALTERNATE``
-
-       -  7
-
-       -  The two fields of a frame are passed in separate buffers, in
-	  temporal order, i. e. the older one first. To indicate the field
-	  parity (whether the current field is a top or bottom field) the
-	  driver or application, depending on data direction, must set
-	  struct :ref:`v4l2_buffer <v4l2-buffer>` ``field`` to
-	  ``V4L2_FIELD_TOP`` or ``V4L2_FIELD_BOTTOM``. Any two successive
-	  fields pair to build a frame. If fields are successive, without
-	  any dropped fields between them (fields can drop individually),
-	  can be determined from the struct
-	  :ref:`v4l2_buffer <v4l2-buffer>` ``sequence`` field. This
-	  format cannot be selected when using the read/write I/O method
-	  since there is no way to communicate if a field was a top or
-	  bottom field.
-
-    -  .. row 9
-
-       -  ``V4L2_FIELD_INTERLACED_TB``
-
-       -  8
-
-       -  Images contain both fields, interleaved line by line, top field
-	  first. The top field is transmitted first.
-
-    -  .. row 10
-
-       -  ``V4L2_FIELD_INTERLACED_BT``
-
-       -  9
-
-       -  Images contain both fields, interleaved line by line, top field
-	  first. The bottom field is transmitted first.
+    * - ``V4L2_FIELD_ANY``
+      - 0
+      - Applications request this field order when any one of the
+	``V4L2_FIELD_NONE``, ``V4L2_FIELD_TOP``, ``V4L2_FIELD_BOTTOM``, or
+	``V4L2_FIELD_INTERLACED`` formats is acceptable. Drivers choose
+	depending on hardware capabilities or e. g. the requested image
+	size, and return the actual field order. Drivers must never return
+	``V4L2_FIELD_ANY``. If multiple field orders are possible the
+	driver must choose one of the possible field orders during
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
+	:ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`. struct
+	:c:type:`v4l2_buffer` ``field`` can never be
+	``V4L2_FIELD_ANY``.
+    * - ``V4L2_FIELD_NONE``
+      - 1
+      - Images are in progressive format, not interlaced. The driver may
+	also indicate this order when it cannot distinguish between
+	``V4L2_FIELD_TOP`` and ``V4L2_FIELD_BOTTOM``.
+    * - ``V4L2_FIELD_TOP``
+      - 2
+      - Images consist of the top (aka odd) field only.
+    * - ``V4L2_FIELD_BOTTOM``
+      - 3
+      - Images consist of the bottom (aka even) field only. Applications
+	may wish to prevent a device from capturing interlaced images
+	because they will have "comb" or "feathering" artefacts around
+	moving objects.
+    * - ``V4L2_FIELD_INTERLACED``
+      - 4
+      - Images contain both fields, interleaved line by line. The temporal
+	order of the fields (whether the top or bottom field is first
+	transmitted) depends on the current video standard. M/NTSC
+	transmits the bottom field first, all other standards the top
+	field first.
+    * - ``V4L2_FIELD_SEQ_TB``
+      - 5
+      - Images contain both fields, the top field lines are stored first
+	in memory, immediately followed by the bottom field lines. Fields
+	are always stored in temporal order, the older one first in
+	memory. Image sizes refer to the frame, not fields.
+    * - ``V4L2_FIELD_SEQ_BT``
+      - 6
+      - Images contain both fields, the bottom field lines are stored
+	first in memory, immediately followed by the top field lines.
+	Fields are always stored in temporal order, the older one first in
+	memory. Image sizes refer to the frame, not fields.
+    * - ``V4L2_FIELD_ALTERNATE``
+      - 7
+      - The two fields of a frame are passed in separate buffers, in
+	temporal order, i. e. the older one first. To indicate the field
+	parity (whether the current field is a top or bottom field) the
+	driver or application, depending on data direction, must set
+	struct :c:type:`v4l2_buffer` ``field`` to
+	``V4L2_FIELD_TOP`` or ``V4L2_FIELD_BOTTOM``. Any two successive
+	fields pair to build a frame. If fields are successive, without
+	any dropped fields between them (fields can drop individually),
+	can be determined from the struct
+	:c:type:`v4l2_buffer` ``sequence`` field. This
+	format cannot be selected when using the read/write I/O method
+	since there is no way to communicate if a field was a top or
+	bottom field.
+    * - ``V4L2_FIELD_INTERLACED_TB``
+      - 8
+      - Images contain both fields, interleaved line by line, top field
+	first. The top field is transmitted first.
+    * - ``V4L2_FIELD_INTERLACED_BT``
+      - 9
+      - Images contain both fields, interleaved line by line, top field
+	first. The bottom field is transmitted first.
 
 
 
diff --git a/Documentation/media/uapi/v4l/format.rst b/Documentation/media/uapi/v4l/format.rst
index 7c73278..452c6d5 100644
--- a/Documentation/media/uapi/v4l/format.rst
+++ b/Documentation/media/uapi/v4l/format.rst
@@ -22,7 +22,7 @@
 current selection.
 
 A single mechanism exists to negotiate all data formats using the
-aggregate struct :ref:`v4l2_format <v4l2-format>` and the
+aggregate struct :c:type:`v4l2_format` and the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` and
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctls. Additionally the
 :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` ioctl can be used to examine
diff --git a/Documentation/media/uapi/v4l/func-close.rst b/Documentation/media/uapi/v4l/func-close.rst
index 926a2cc..e85a674 100644
--- a/Documentation/media/uapi/v4l/func-close.rst
+++ b/Documentation/media/uapi/v4l/func-close.rst
@@ -20,8 +20,8 @@
     #include <unistd.h>
 
 
-.. cpp:function:: int close( int fd )
-
+.. c:function:: int close( int fd )
+    :name: v4l2-close
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/v4l/func-ioctl.rst b/Documentation/media/uapi/v4l/func-ioctl.rst
index 5632f48..ebfbe92 100644
--- a/Documentation/media/uapi/v4l/func-ioctl.rst
+++ b/Documentation/media/uapi/v4l/func-ioctl.rst
@@ -20,8 +20,8 @@
     #include <sys/ioctl.h>
 
 
-.. cpp:function:: int ioctl( int fd, int request, void *argp )
-
+.. c:function:: int ioctl( int fd, int request, void *argp )
+    :name: v4l2-ioctl
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/v4l/func-mmap.rst b/Documentation/media/uapi/v4l/func-mmap.rst
index c156fb7..6d2ce53 100644
--- a/Documentation/media/uapi/v4l/func-mmap.rst
+++ b/Documentation/media/uapi/v4l/func-mmap.rst
@@ -21,8 +21,8 @@
     #include <sys/mman.h>
 
 
-.. cpp:function:: void *mmap( void *start, size_t length, int prot, int flags, int fd, off_t offset )
-
+.. c:function:: void *mmap( void *start, size_t length, int prot, int flags, int fd, off_t offset )
+    :name: v4l2-mmap
 
 Arguments
 =========
@@ -37,9 +37,9 @@
 ``length``
     Length of the memory area to map. This must be the same value as
     returned by the driver in the struct
-    :ref:`v4l2_buffer <v4l2-buffer>` ``length`` field for the
+    :c:type:`v4l2_buffer` ``length`` field for the
     single-planar API, and the same value as returned by the driver in
-    the struct :ref:`v4l2_plane <v4l2-plane>` ``length`` field for
+    the struct :c:type:`v4l2_plane` ``length`` field for
     the multi-planar API.
 
 ``prot``
@@ -78,7 +78,9 @@
     ``MAP_SHARED`` allows applications to share the mapped memory with
     other (e. g. child-) processes.
 
-    .. note:: The Linux ``videobuf`` module  which is used by some
+    .. note::
+
+       The Linux ``videobuf`` module  which is used by some
        drivers supports only ``MAP_SHARED``. ``MAP_PRIVATE`` requests
        copy-on-write semantics. V4L2 applications should not set the
        ``MAP_PRIVATE``, ``MAP_DENYWRITE``, ``MAP_EXECUTABLE`` or ``MAP_ANON``
@@ -90,9 +92,9 @@
 ``offset``
     Offset of the buffer in device memory. This must be the same value
     as returned by the driver in the struct
-    :ref:`v4l2_buffer <v4l2-buffer>` ``m`` union ``offset`` field for
+    :c:type:`v4l2_buffer` ``m`` union ``offset`` field for
     the single-planar API, and the same value as returned by the driver
-    in the struct :ref:`v4l2_plane <v4l2-plane>` ``m`` union
+    in the struct :c:type:`v4l2_plane` ``m`` union
     ``mem_offset`` field for the multi-planar API.
 
 
diff --git a/Documentation/media/uapi/v4l/func-munmap.rst b/Documentation/media/uapi/v4l/func-munmap.rst
index c29c03f..c2f4043 100644
--- a/Documentation/media/uapi/v4l/func-munmap.rst
+++ b/Documentation/media/uapi/v4l/func-munmap.rst
@@ -21,8 +21,8 @@
     #include <sys/mman.h>
 
 
-.. cpp:function:: int munmap( void *start, size_t length )
-
+.. c:function:: int munmap( void *start, size_t length )
+    :name: v4l2-munmap
 
 Arguments
 =========
@@ -34,9 +34,9 @@
 ``length``
     Length of the mapped buffer. This must be the same value as given to
     :ref:`mmap() <func-mmap>` and returned by the driver in the struct
-    :ref:`v4l2_buffer <v4l2-buffer>` ``length`` field for the
+    :c:type:`v4l2_buffer` ``length`` field for the
     single-planar API and in the struct
-    :ref:`v4l2_plane <v4l2-plane>` ``length`` field for the
+    :c:type:`v4l2_plane` ``length`` field for the
     multi-planar API.
 
 
diff --git a/Documentation/media/uapi/v4l/func-open.rst b/Documentation/media/uapi/v4l/func-open.rst
index 06bcadc..deea34c 100644
--- a/Documentation/media/uapi/v4l/func-open.rst
+++ b/Documentation/media/uapi/v4l/func-open.rst
@@ -20,8 +20,8 @@
     #include <fcntl.h>
 
 
-.. cpp:function:: int open( const char *device_name, int flags )
-
+.. c:function:: int open( const char *device_name, int flags )
+    :name: v4l2-open
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/v4l/func-poll.rst b/Documentation/media/uapi/v4l/func-poll.rst
index e6ceb71..d0432dc 100644
--- a/Documentation/media/uapi/v4l/func-poll.rst
+++ b/Documentation/media/uapi/v4l/func-poll.rst
@@ -20,8 +20,8 @@
     #include <sys/poll.h>
 
 
-.. cpp:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout )
-
+.. c:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout )
+    :name: v4l2-poll
 
 Arguments
 =========
diff --git a/Documentation/media/uapi/v4l/func-read.rst b/Documentation/media/uapi/v4l/func-read.rst
index 9a2aa521..ae38c2d 100644
--- a/Documentation/media/uapi/v4l/func-read.rst
+++ b/Documentation/media/uapi/v4l/func-read.rst
@@ -20,8 +20,8 @@
     #include <unistd.h>
 
 
-.. cpp:function:: ssize_t read( int fd, void *buf, size_t count )
-
+.. c:function:: ssize_t read( int fd, void *buf, size_t count )
+    :name: v4l2-read
 
 Arguments
 =========
@@ -30,8 +30,10 @@
     File descriptor returned by :ref:`open() <func-open>`.
 
 ``buf``
-``count``
+   Buffer to be filled
 
+``count``
+  Max number of bytes to read
 
 Description
 ===========
diff --git a/Documentation/media/uapi/v4l/func-select.rst b/Documentation/media/uapi/v4l/func-select.rst
index 7798384..002dedb 100644
--- a/Documentation/media/uapi/v4l/func-select.rst
+++ b/Documentation/media/uapi/v4l/func-select.rst
@@ -22,12 +22,26 @@
     #include <unistd.h>
 
 
-.. cpp:function:: int select( int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout )
-
+.. c:function:: int select( int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout )
+    :name: v4l2-select
 
 Arguments
 =========
 
+``nfds``
+  The highest-numbered file descriptor in any of the three sets, plus 1.
+
+``readfds``
+  File descriptions to be watched if a read() call won't block.
+
+``writefds``
+  File descriptions to be watched if a write() won't block.
+
+``exceptfds``
+  File descriptions to be watched for V4L2 events.
+
+``timeout``
+  Maximum time to wait.
 
 
 Description
diff --git a/Documentation/media/uapi/v4l/func-write.rst b/Documentation/media/uapi/v4l/func-write.rst
index a3bc9b2..938f33f 100644
--- a/Documentation/media/uapi/v4l/func-write.rst
+++ b/Documentation/media/uapi/v4l/func-write.rst
@@ -20,8 +20,8 @@
     #include <unistd.h>
 
 
-.. cpp:function:: ssize_t write( int fd, void *buf, size_t count )
-
+.. c:function:: ssize_t write( int fd, void *buf, size_t count )
+    :name: v4l2-write
 
 Arguments
 =========
@@ -30,8 +30,10 @@
     File descriptor returned by :ref:`open() <func-open>`.
 
 ``buf``
-``count``
+     Buffer with data to be written
 
+``count``
+    Number of bytes at the buffer
 
 Description
 ===========
diff --git a/Documentation/media/uapi/v4l/hist-v4l2.rst b/Documentation/media/uapi/v4l/hist-v4l2.rst
index 3ba1c0c..058b5db 100644
--- a/Documentation/media/uapi/v4l/hist-v4l2.rst
+++ b/Documentation/media/uapi/v4l/hist-v4l2.rst
@@ -30,14 +30,14 @@
 this flag if they intend to access controls only, as opposed to capture
 applications which need exclusive access. The ``VIDEO_STD_XXX``
 identifiers are now ordinals instead of flags, and the
-:c:func:`video_std_construct()` helper function takes id and
+``video_std_construct()`` helper function takes id and
 transmission arguments.
 
 1998-09-28: Revamped video standard. Made video controls individually
 enumerable.
 
 1998-10-02: The ``id`` field was removed from struct
-:c:type:`struct video_standard` and the color subcarrier fields were
+struct ``video_standard`` and the color subcarrier fields were
 renamed. The :ref:`VIDIOC_QUERYSTD` ioctl was
 renamed to :ref:`VIDIOC_ENUMSTD`,
 :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` to
@@ -45,7 +45,7 @@
 Codec API was released.
 
 1998-11-08: Many minor changes. Most symbols have been renamed. Some
-material changes to struct :ref:`v4l2_capability <v4l2-capability>`.
+material changes to struct :c:type:`v4l2_capability`.
 
 1998-11-12: The read/write directon of some ioctls was misdefined.
 
@@ -117,7 +117,7 @@
 common Linux driver API conventions.
 
 1. Some typos in ``V4L2_FMT_FLAG`` symbols were fixed. struct
-   :ref:`v4l2_clip <v4l2-clip>` was changed for compatibility with
+   :c:type:`v4l2_clip` was changed for compatibility with
    v4l. (1999-08-30)
 
 2. ``V4L2_TUNER_SUB_LANG1`` was added. (1999-09-05)
@@ -151,15 +151,15 @@
    This change obsoletes the following ioctls: ``VIDIOC_S_INFMT``,
    ``VIDIOC_G_INFMT``, ``VIDIOC_S_OUTFMT``, ``VIDIOC_G_OUTFMT``,
    ``VIDIOC_S_VBIFMT`` and ``VIDIOC_G_VBIFMT``. The image format
-   structure :c:type:`struct v4l2_format` was renamed to struct
-   :ref:`v4l2_pix_format <v4l2-pix-format>`, while struct
-   :ref:`v4l2_format <v4l2-format>` is now the envelopping structure
+   structure struct :c:type:`v4l2_format` was renamed to struct
+   :c:type:`v4l2_pix_format`, while struct
+   :c:type:`v4l2_format` is now the envelopping structure
    for all format negotiations.
 
 5. Similar to the changes above, the ``VIDIOC_G_PARM`` and
    ``VIDIOC_S_PARM`` ioctls were merged with ``VIDIOC_G_OUTPARM`` and
    ``VIDIOC_S_OUTPARM``. A ``type`` field in the new struct
-   :ref:`v4l2_streamparm <v4l2-streamparm>` selects the respective
+   :c:type:`v4l2_streamparm` selects the respective
    union member.
 
    This change obsoletes the ``VIDIOC_G_OUTPARM`` and
@@ -178,7 +178,7 @@
    categories might have a greater separation, or may even appear in
    separate windows.
 
-7. The struct :ref:`v4l2_buffer <v4l2-buffer>` ``timestamp`` was
+7. The struct :c:type:`v4l2_buffer` ``timestamp`` was
    changed to a 64 bit integer, containing the sampling or output time
    of the frame in nanoseconds. Additionally timestamps will be in
    absolute system time, not starting from zero at the beginning of a
@@ -202,7 +202,7 @@
    return a 64-bit integer.
 
 8. A ``sequence`` field was added to struct
-   :ref:`v4l2_buffer <v4l2-buffer>`. The ``sequence`` field counts
+   :c:type:`v4l2_buffer`. The ``sequence`` field counts
    captured frames, it is ignored by output devices. When a capture
    driver drops a frame, the sequence number of that frame is skipped.
 
@@ -210,7 +210,7 @@
 V4L2 Version 0.20 incremental changes
 =====================================
 
-1999-12-23: In struct :ref:`v4l2_vbi_format <v4l2-vbi-format>` the
+1999-12-23: In struct :c:type:`v4l2_vbi_format` the
 ``reserved1`` field became ``offset``. Previously drivers were required
 to clear the ``reserved1`` field.
 
@@ -254,9 +254,9 @@
 2000-09-18: ``V4L2_BUF_TYPE_VBI`` was added. This may *break
 compatibility* as the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` and
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctls may fail now if the struct
-:c:type:`struct v4l2_fmt` ``type`` field does not contain
+struct ``v4l2_fmt`` ``type`` field does not contain
 ``V4L2_BUF_TYPE_VBI``. In the documentation of the struct
-:ref:`v4l2_vbi_format <v4l2-vbi-format>` ``offset`` field the
+:c:type:`v4l2_vbi_format` ``offset`` field the
 ambiguous phrase "rising edge" was changed to "leading edge".
 
 
@@ -321,7 +321,7 @@
     until the application attempts to initiate a data exchange, see
     :ref:`open`.
 
-3.  The struct :ref:`v4l2_capability <v4l2-capability>` changed
+3.  The struct :c:type:`v4l2_capability` changed
     dramatically. Note that also the size of the structure changed,
     which is encoded in the ioctl request code, thus older V4L2 devices
     will respond with an ``EINVAL`` error code to the new
@@ -354,7 +354,7 @@
     ``V4L2_FLAG_MONOCHROME`` flag was removed, this information is
     available as described in :ref:`format`.
 
-4.  In struct :ref:`v4l2_input <v4l2-input>` the ``assoc_audio``
+4.  In struct :c:type:`v4l2_input` the ``assoc_audio``
     field and the ``capability`` field and its only flag
     ``V4L2_INPUT_CAP_AUDIO`` was replaced by the new ``audioset`` field.
     Instead of linking one video input to one audio input this field
@@ -363,11 +363,11 @@
     New fields are ``tuner`` (reversing the former link from tuners to
     video inputs), ``std`` and ``status``.
 
-    Accordingly struct :ref:`v4l2_output <v4l2-output>` lost its
+    Accordingly struct :c:type:`v4l2_output` lost its
     ``capability`` and ``assoc_audio`` fields. ``audioset``,
     ``modulator`` and ``std`` where added instead.
 
-5.  The struct :ref:`v4l2_audio <v4l2-audio>` field ``audio`` was
+5.  The struct :c:type:`v4l2_audio` field ``audio`` was
     renamed to ``index``, for consistency with other structures. A new
     capability flag ``V4L2_AUDCAP_STEREO`` was added to indicated if the
     audio input in question supports stereo sound.
@@ -376,20 +376,20 @@
     (However the same applies to AVL which is still there.)
 
     Again for consistency the struct
-    :ref:`v4l2_audioout <v4l2-audioout>` field ``audio`` was renamed
+    :c:type:`v4l2_audioout` field ``audio`` was renamed
     to ``index``.
 
-6.  The struct :ref:`v4l2_tuner <v4l2-tuner>` ``input`` field was
+6.  The struct :c:type:`v4l2_tuner` ``input`` field was
     replaced by an ``index`` field, permitting devices with multiple
     tuners. The link between video inputs and tuners is now reversed,
     inputs point to their tuner. The ``std`` substructure became a
     simple set (more about this below) and moved into struct
-    :ref:`v4l2_input <v4l2-input>`. A ``type`` field was added.
+    :c:type:`v4l2_input`. A ``type`` field was added.
 
-    Accordingly in struct :ref:`v4l2_modulator <v4l2-modulator>` the
+    Accordingly in struct :c:type:`v4l2_modulator` the
     ``output`` was replaced by an ``index`` field.
 
-    In struct :ref:`v4l2_frequency <v4l2-frequency>` the ``port``
+    In struct :c:type:`v4l2_frequency` the ``port``
     field was replaced by a ``tuner`` field containing the respective
     tuner or modulator index number. A tuner ``type`` field was added
     and the ``reserved`` field became larger for future extensions
@@ -405,7 +405,7 @@
     :ref:`VIDIOC_S_STD <VIDIOC_G_STD>` now take a pointer to this
     type as argument. :ref:`VIDIOC_QUERYSTD` was
     added to autodetect the received standard, if the hardware has this
-    capability. In struct :ref:`v4l2_standard <v4l2-standard>` an
+    capability. In struct :c:type:`v4l2_standard` an
     ``index`` field was added for
     :ref:`VIDIOC_ENUMSTD`. A
     :ref:`v4l2_std_id <v4l2-std-id>` field named ``id`` was added as
@@ -415,12 +415,12 @@
     originally needed to distguish between variations of standards, were
     removed.
 
-    Struct :c:type:`struct v4l2_enumstd` ceased to be.
+    Struct ``v4l2_enumstd`` ceased to be.
     :ref:`VIDIOC_ENUMSTD` now takes a pointer to a
-    struct :ref:`v4l2_standard <v4l2-standard>` directly. The
+    struct :c:type:`v4l2_standard` directly. The
     information which standards are supported by a particular video
-    input or output moved into struct :ref:`v4l2_input <v4l2-input>`
-    and struct :ref:`v4l2_output <v4l2-output>` fields named ``std``,
+    input or output moved into struct :c:type:`v4l2_input`
+    and struct :c:type:`v4l2_output` fields named ``std``,
     respectively.
 
 8.  The struct :ref:`v4l2_queryctrl <v4l2-queryctrl>` fields
@@ -432,13 +432,13 @@
     :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`, but without the overhead of
     programming the hardware and regardless of I/O in progress.
 
-    In struct :ref:`v4l2_format <v4l2-format>` the ``fmt`` union was
-    extended to contain struct :ref:`v4l2_window <v4l2-window>`. All
+    In struct :c:type:`v4l2_format` the ``fmt`` union was
+    extended to contain struct :c:type:`v4l2_window`. All
     image format negotiations are now possible with ``VIDIOC_G_FMT``,
     ``VIDIOC_S_FMT`` and ``VIDIOC_TRY_FMT``; ioctl. The ``VIDIOC_G_WIN``
     and ``VIDIOC_S_WIN`` ioctls to prepare for a video overlay were
     removed. The ``type`` field changed to type enum
-    :ref:`v4l2_buf_type <v4l2-buf-type>` and the buffer type names
+    :c:type:`v4l2_buf_type` and the buffer type names
     changed as follows.
 
 
@@ -447,101 +447,45 @@
 	:header-rows:  1
 	:stub-columns: 0
 
-
-	-  .. row 1
-
-	   -  Old defines
-
-	   -  enum :ref:`v4l2_buf_type <v4l2-buf-type>`
-
-	-  .. row 2
-
-	   -  ``V4L2_BUF_TYPE_CAPTURE``
-
-	   -  ``V4L2_BUF_TYPE_VIDEO_CAPTURE``
-
-	-  .. row 3
-
-	   -  ``V4L2_BUF_TYPE_CODECIN``
-
-	   -  Omitted for now
-
-	-  .. row 4
-
-	   -  ``V4L2_BUF_TYPE_CODECOUT``
-
-	   -  Omitted for now
-
-	-  .. row 5
-
-	   -  ``V4L2_BUF_TYPE_EFFECTSIN``
-
-	   -  Omitted for now
-
-	-  .. row 6
-
-	   -  ``V4L2_BUF_TYPE_EFFECTSIN2``
-
-	   -  Omitted for now
-
-	-  .. row 7
-
-	   -  ``V4L2_BUF_TYPE_EFFECTSOUT``
-
-	   -  Omitted for now
-
-	-  .. row 8
-
-	   -  ``V4L2_BUF_TYPE_VIDEOOUT``
-
-	   -  ``V4L2_BUF_TYPE_VIDEO_OUTPUT``
-
-	-  .. row 9
-
-	   -  ``-``
-
-	   -  ``V4L2_BUF_TYPE_VIDEO_OVERLAY``
-
-	-  .. row 10
-
-	   -  ``-``
-
-	   -  ``V4L2_BUF_TYPE_VBI_CAPTURE``
-
-	-  .. row 11
-
-	   -  ``-``
-
-	   -  ``V4L2_BUF_TYPE_VBI_OUTPUT``
-
-	-  .. row 12
-
-	   -  ``-``
-
-	   -  ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE``
-
-	-  .. row 13
-
-	   -  ``-``
-
-	   -  ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``
-
-	-  .. row 14
-
-	   -  ``V4L2_BUF_TYPE_PRIVATE_BASE``
-
-	   -  ``V4L2_BUF_TYPE_PRIVATE`` (but this is deprecated)
+	* - Old defines
+	  - enum :c:type:`v4l2_buf_type`
+	* - ``V4L2_BUF_TYPE_CAPTURE``
+	  - ``V4L2_BUF_TYPE_VIDEO_CAPTURE``
+	* - ``V4L2_BUF_TYPE_CODECIN``
+	  - Omitted for now
+	* - ``V4L2_BUF_TYPE_CODECOUT``
+	  - Omitted for now
+	* - ``V4L2_BUF_TYPE_EFFECTSIN``
+	  - Omitted for now
+	* - ``V4L2_BUF_TYPE_EFFECTSIN2``
+	  - Omitted for now
+	* - ``V4L2_BUF_TYPE_EFFECTSOUT``
+	  - Omitted for now
+	* - ``V4L2_BUF_TYPE_VIDEOOUT``
+	  - ``V4L2_BUF_TYPE_VIDEO_OUTPUT``
+	* - ``-``
+	  - ``V4L2_BUF_TYPE_VIDEO_OVERLAY``
+	* - ``-``
+	  - ``V4L2_BUF_TYPE_VBI_CAPTURE``
+	* - ``-``
+	  - ``V4L2_BUF_TYPE_VBI_OUTPUT``
+	* - ``-``
+	  - ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE``
+	* - ``-``
+	  - ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``
+	* - ``V4L2_BUF_TYPE_PRIVATE_BASE``
+	  - ``V4L2_BUF_TYPE_PRIVATE`` (but this is deprecated)
 
 
-10. In struct :ref:`v4l2_fmtdesc <v4l2-fmtdesc>` a enum
-    :ref:`v4l2_buf_type <v4l2-buf-type>` field named ``type`` was
-    added as in struct :ref:`v4l2_format <v4l2-format>`. The
+10. In struct :c:type:`v4l2_fmtdesc` a enum
+    :c:type:`v4l2_buf_type` field named ``type`` was
+    added as in struct :c:type:`v4l2_format`. The
     ``VIDIOC_ENUM_FBUFFMT`` ioctl is no longer needed and was removed.
     These calls can be replaced by
     :ref:`VIDIOC_ENUM_FMT` with type
     ``V4L2_BUF_TYPE_VIDEO_OVERLAY``.
 
-11. In struct :ref:`v4l2_pix_format <v4l2-pix-format>` the ``depth``
+11. In struct :c:type:`v4l2_pix_format` the ``depth``
     field was removed, assuming applications which recognize the format
     by its four-character-code already know the color depth, and others
     do not care about it. The same rationale lead to the removal of the
@@ -555,7 +499,7 @@
     itself was removed.
 
     The interlace flags were replaced by a enum
-    :ref:`v4l2_field <v4l2-field>` value in a newly added ``field``
+    :c:type:`v4l2_field` value in a newly added ``field``
     field.
 
 
@@ -564,82 +508,50 @@
 	:header-rows:  1
 	:stub-columns: 0
 
-
-	-  .. row 1
-
-	   -  Old flag
-
-	   -  enum :ref:`v4l2_field <v4l2-field>`
-
-	-  .. row 2
-
-	   -  ``V4L2_FMT_FLAG_NOT_INTERLACED``
-
-	   -  ?
-
-	-  .. row 3
-
-	   -  ``V4L2_FMT_FLAG_INTERLACED`` = ``V4L2_FMT_FLAG_COMBINED``
-
-	   -  ``V4L2_FIELD_INTERLACED``
-
-	-  .. row 4
-
-	   -  ``V4L2_FMT_FLAG_TOPFIELD`` = ``V4L2_FMT_FLAG_ODDFIELD``
-
-	   -  ``V4L2_FIELD_TOP``
-
-	-  .. row 5
-
-	   -  ``V4L2_FMT_FLAG_BOTFIELD`` = ``V4L2_FMT_FLAG_EVENFIELD``
-
-	   -  ``V4L2_FIELD_BOTTOM``
-
-	-  .. row 6
-
-	   -  ``-``
-
-	   -  ``V4L2_FIELD_SEQ_TB``
-
-	-  .. row 7
-
-	   -  ``-``
-
-	   -  ``V4L2_FIELD_SEQ_BT``
-
-	-  .. row 8
-
-	   -  ``-``
-
-	   -  ``V4L2_FIELD_ALTERNATE``
+	* - Old flag
+	  - enum :c:type:`v4l2_field`
+	* - ``V4L2_FMT_FLAG_NOT_INTERLACED``
+	  - ?
+	* - ``V4L2_FMT_FLAG_INTERLACED`` = ``V4L2_FMT_FLAG_COMBINED``
+	  - ``V4L2_FIELD_INTERLACED``
+	* - ``V4L2_FMT_FLAG_TOPFIELD`` = ``V4L2_FMT_FLAG_ODDFIELD``
+	  - ``V4L2_FIELD_TOP``
+	* - ``V4L2_FMT_FLAG_BOTFIELD`` = ``V4L2_FMT_FLAG_EVENFIELD``
+	  - ``V4L2_FIELD_BOTTOM``
+	* - ``-``
+	  - ``V4L2_FIELD_SEQ_TB``
+	* - ``-``
+	  - ``V4L2_FIELD_SEQ_BT``
+	* - ``-``
+	  - ``V4L2_FIELD_ALTERNATE``
 
 
     The color space flags were replaced by a enum
-    :ref:`v4l2_colorspace <v4l2-colorspace>` value in a newly added
+    :c:type:`v4l2_colorspace` value in a newly added
     ``colorspace`` field, where one of ``V4L2_COLORSPACE_SMPTE170M``,
     ``V4L2_COLORSPACE_BT878``, ``V4L2_COLORSPACE_470_SYSTEM_M`` or
     ``V4L2_COLORSPACE_470_SYSTEM_BG`` replaces ``V4L2_FMT_CS_601YUV``.
 
-12. In struct :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` the
+12. In struct :c:type:`v4l2_requestbuffers` the
     ``type`` field was properly defined as enum
-    :ref:`v4l2_buf_type <v4l2-buf-type>`. Buffer types changed as
+    :c:type:`v4l2_buf_type`. Buffer types changed as
     mentioned above. A new ``memory`` field of type enum
-    :ref:`v4l2_memory <v4l2-memory>` was added to distinguish between
+    :c:type:`v4l2_memory` was added to distinguish between
     I/O methods using buffers allocated by the driver or the
     application. See :ref:`io` for details.
 
-13. In struct :ref:`v4l2_buffer <v4l2-buffer>` the ``type`` field was
-    properly defined as enum :ref:`v4l2_buf_type <v4l2-buf-type>`.
+13. In struct :c:type:`v4l2_buffer` the ``type`` field was
+    properly defined as enum :c:type:`v4l2_buf_type`.
     Buffer types changed as mentioned above. A ``field`` field of type
-    enum :ref:`v4l2_field <v4l2-field>` was added to indicate if a
+    enum :c:type:`v4l2_field` was added to indicate if a
     buffer contains a top or bottom field. The old field flags were
     removed. Since no unadjusted system time clock was added to the
     kernel as planned, the ``timestamp`` field changed back from type
     stamp_t, an unsigned 64 bit integer expressing the sample time in
-    nanoseconds, to struct :c:type:`struct timeval`. With the addition
+    nanoseconds, to struct :c:type:`timeval`. With the addition
     of a second memory mapping method the ``offset`` field moved into
     union ``m``, and a new ``memory`` field of type enum
-    :ref:`v4l2_memory <v4l2-memory>` was added to distinguish between
+    :c:type:`v4l2_memory` was added to distinguish between
     I/O methods. See :ref:`io` for details.
 
     The ``V4L2_BUF_REQ_CONTIG`` flag was used by the V4L compatibility
@@ -648,7 +560,7 @@
     indeed allocated in device memory rather than DMA-able system
     memory. It was barely useful and so was removed.
 
-14. In struct :ref:`v4l2_framebuffer <v4l2-framebuffer>` the
+14. In struct :c:type:`v4l2_framebuffer` the
     ``base[3]`` array anticipating double- and triple-buffering in
     off-screen video memory, however without defining a synchronization
     mechanism, was replaced by a single pointer. The
@@ -659,42 +571,42 @@
     ``V4L2_FBUF_CAP_LIST_CLIPPING`` and
     ``V4L2_FBUF_CAP_BITMAP_CLIPPING``.
 
-15. In struct :ref:`v4l2_clip <v4l2-clip>` the ``x``, ``y``,
+15. In struct :c:type:`v4l2_clip` the ``x``, ``y``,
     ``width`` and ``height`` field moved into a ``c`` substructure of
-    type struct :ref:`v4l2_rect <v4l2-rect>`. The ``x`` and ``y``
+    type struct :c:type:`v4l2_rect`. The ``x`` and ``y``
     fields were renamed to ``left`` and ``top``, i. e. offsets to a
     context dependent origin.
 
-16. In struct :ref:`v4l2_window <v4l2-window>` the ``x``, ``y``,
+16. In struct :c:type:`v4l2_window` the ``x``, ``y``,
     ``width`` and ``height`` field moved into a ``w`` substructure as
-    above. A ``field`` field of type %v4l2-field; was added to
+    above. A ``field`` field of type :c:type:`v4l2_field` was added to
     distinguish between field and frame (interlaced) overlay.
 
 17. The digital zoom interface, including struct
-    :c:type:`struct v4l2_zoomcap`, struct
-    :c:type:`struct v4l2_zoom`, ``V4L2_ZOOM_NONCAP`` and
+    struct ``v4l2_zoomcap``, struct
+    struct ``v4l2_zoom``, ``V4L2_ZOOM_NONCAP`` and
     ``V4L2_ZOOM_WHILESTREAMING`` was replaced by a new cropping and
     scaling interface. The previously unused struct
-    :c:type:`struct v4l2_cropcap` and :c:type:`struct v4l2_crop`
+    struct :c:type:`v4l2_cropcap` and struct :c:type:`v4l2_crop`
     where redefined for this purpose. See :ref:`crop` for details.
 
-18. In struct :ref:`v4l2_vbi_format <v4l2-vbi-format>` the
+18. In struct :c:type:`v4l2_vbi_format` the
     ``SAMPLE_FORMAT`` field now contains a four-character-code as used
     to identify video image formats and ``V4L2_PIX_FMT_GREY`` replaces
     the ``V4L2_VBI_SF_UBYTE`` define. The ``reserved`` field was
     extended.
 
-19. In struct :ref:`v4l2_captureparm <v4l2-captureparm>` the type of
+19. In struct :c:type:`v4l2_captureparm` the type of
     the ``timeperframe`` field changed from unsigned long to struct
-    :ref:`v4l2_fract <v4l2-fract>`. This allows the accurate
+    :c:type:`v4l2_fract`. This allows the accurate
     expression of multiples of the NTSC-M frame rate 30000 / 1001. A new
     field ``readbuffers`` was added to control the driver behaviour in
     read I/O mode.
 
     Similar changes were made to struct
-    :ref:`v4l2_outputparm <v4l2-outputparm>`.
+    :c:type:`v4l2_outputparm`.
 
-20. The struct :c:type:`struct v4l2_performance` and
+20. The struct ``v4l2_performance`` and
     ``VIDIOC_G_PERF`` ioctl were dropped. Except when using the
     :ref:`read/write I/O method <rw>`, which is limited anyway, this
     information is already available to applications.
@@ -768,46 +680,21 @@
        :header-rows:  1
        :stub-columns: 0
 
-
-       -  .. row 1
-
-	  -  Symbol
-
-	  -  In this document prior to revision 0.5
-
-	  -  Corrected
-
-       -  .. row 2
-
-	  -  ``V4L2_PIX_FMT_RGB24``
-
-	  -  B, G, R
-
-	  -  R, G, B
-
-       -  .. row 3
-
-	  -  ``V4L2_PIX_FMT_BGR24``
-
-	  -  R, G, B
-
-	  -  B, G, R
-
-       -  .. row 4
-
-	  -  ``V4L2_PIX_FMT_RGB32``
-
-	  -  B, G, R, X
-
-	  -  R, G, B, X
-
-       -  .. row 5
-
-	  -  ``V4L2_PIX_FMT_BGR32``
-
-	  -  R, G, B, X
-
-	  -  B, G, R, X
+       * - Symbol
+	 - In this document prior to revision 0.5
+	 - Corrected
+       * - ``V4L2_PIX_FMT_RGB24``
+	 - B, G, R
+	 - R, G, B
+       * - ``V4L2_PIX_FMT_BGR24``
+	 - R, G, B
+	 - B, G, R
+       * - ``V4L2_PIX_FMT_RGB32``
+	 - B, G, R, X
+	 - R, G, B, X
+       * - ``V4L2_PIX_FMT_BGR32``
+	 - R, G, B, X
+	 - B, G, R, X
 
 
    The ``V4L2_PIX_FMT_BGR24`` example was always correct.
@@ -834,7 +721,7 @@
 ===================
 
 1. A new field ``input`` (former ``reserved[0]``) was added to the
-   struct :ref:`v4l2_buffer <v4l2-buffer>` structure. Purpose of this
+   struct :c:type:`v4l2_buffer` structure. Purpose of this
    field is to alternate between video inputs (e. g. cameras) in step
    with the video capturing process. This function must be enabled with
    the new ``V4L2_BUF_FLAG_INPUT`` flag. The ``flags`` field is no
@@ -854,7 +741,7 @@
 
 4. The documentation of the :ref:`VIDIOC_QBUF` and
    :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctls did not mention the
-   struct :ref:`v4l2_buffer <v4l2-buffer>` ``memory`` field. It was
+   struct :c:type:`v4l2_buffer` ``memory`` field. It was
    also missing from examples. Also on the ``VIDIOC_DQBUF`` page the ``EIO``
    error code was not documented.
 
@@ -882,7 +769,7 @@
 3. The ``VIDIOC_G_COMP`` and ``VIDIOC_S_COMP`` ioctl were renamed to
    ``VIDIOC_G_MPEGCOMP`` and ``VIDIOC_S_MPEGCOMP`` respectively. Their
    argument was replaced by a struct
-   :c:type:`struct v4l2_mpeg_compression` pointer. (The
+   ``v4l2_mpeg_compression`` pointer. (The
    ``VIDIOC_G_MPEGCOMP`` and ``VIDIOC_S_MPEGCOMP`` ioctls where removed
    in Linux 2.6.25.)
 
@@ -901,7 +788,7 @@
 ============================
 
 1. The ``V4L2_IN_ST_COLOR_KILL`` flag in struct
-   :ref:`v4l2_input <v4l2-input>` not only indicates if the color
+   :c:type:`v4l2_input` not only indicates if the color
    killer is enabled, but also if it is active. (The color killer
    disables color decoding when it detects no color in the video signal
    to improve the image quality.)
@@ -914,18 +801,18 @@
 V4L2 spec erratum 2006-02-03
 ============================
 
-1. In struct :ref:`v4l2_captureparm <v4l2-captureparm>` and struct
-   :ref:`v4l2_outputparm <v4l2-outputparm>` the ``timeperframe``
+1. In struct :c:type:`v4l2_captureparm` and struct
+   :c:type:`v4l2_outputparm` the ``timeperframe``
    field gives the time in seconds, not microseconds.
 
 
 V4L2 spec erratum 2006-02-04
 ============================
 
-1. The ``clips`` field in struct :ref:`v4l2_window <v4l2-window>`
-   must point to an array of struct :ref:`v4l2_clip <v4l2-clip>`, not
+1. The ``clips`` field in struct :c:type:`v4l2_window`
+   must point to an array of struct :c:type:`v4l2_clip`, not
    a linked list, because drivers ignore the struct
-   :c:type:`struct v4l2_clip`. ``next`` pointer.
+   struct :c:type:`v4l2_clip`. ``next`` pointer.
 
 
 V4L2 in Linux 2.6.17
@@ -951,18 +838,18 @@
    not mentioned along with other buffer types.
 
 2. In :ref:`VIDIOC_G_AUDIO <VIDIOC_G_AUDIO>` it was clarified that the struct
-   :ref:`v4l2_audio <v4l2-audio>` ``mode`` field is a flags field.
+   :c:type:`v4l2_audio` ``mode`` field is a flags field.
 
 3. :ref:`VIDIOC_QUERYCAP` did not mention the sliced VBI and radio
    capability flags.
 
 4. In :ref:`VIDIOC_G_FREQUENCY <VIDIOC_G_FREQUENCY>` it was clarified that applications
    must initialize the tuner ``type`` field of struct
-   :ref:`v4l2_frequency <v4l2-frequency>` before calling
+   :c:type:`v4l2_frequency` before calling
    :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>`.
 
 5. The ``reserved`` array in struct
-   :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` has 2 elements,
+   :c:type:`v4l2_requestbuffers` has 2 elements,
    not 32.
 
 6. In :ref:`output` and :ref:`raw-vbi` the device file names
@@ -982,7 +869,7 @@
    flag to skip unsupported controls with
    :ref:`VIDIOC_QUERYCTRL`, new control types
    ``V4L2_CTRL_TYPE_INTEGER64`` and ``V4L2_CTRL_TYPE_CTRL_CLASS``
-   (:ref:`v4l2-ctrl-type`), and new control flags
+   (:c:type:`v4l2_ctrl_type`), and new control flags
    ``V4L2_CTRL_FLAG_READ_ONLY``, ``V4L2_CTRL_FLAG_UPDATE``,
    ``V4L2_CTRL_FLAG_INACTIVE`` and ``V4L2_CTRL_FLAG_SLIDER``
    (:ref:`control-flags`). See :ref:`extended-controls` for details.
@@ -991,7 +878,7 @@
 V4L2 in Linux 2.6.19
 ====================
 
-1. In struct :ref:`v4l2_sliced_vbi_cap <v4l2-sliced-vbi-cap>` a
+1. In struct :c:type:`v4l2_sliced_vbi_cap` a
    buffer type field was added replacing a reserved field. Note on
    architectures where the size of enum types differs from int types the
    size of the structure changed. The
@@ -1029,7 +916,7 @@
 ====================
 
 1. Two new field orders ``V4L2_FIELD_INTERLACED_TB`` and
-   ``V4L2_FIELD_INTERLACED_BT`` were added. See :ref:`v4l2-field` for
+   ``V4L2_FIELD_INTERLACED_BT`` were added. See :c:type:`v4l2_field` for
    details.
 
 2. Three new clipping/blending methods with a global or straight or
@@ -1038,15 +925,15 @@
    and :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>` ioctls for details.
 
    A new ``global_alpha`` field was added to
-   :ref:`v4l2_window <v4l2-window>`, extending the structure. This
+   :c:type:`v4l2_window`, extending the structure. This
    may *break compatibility* with applications using a struct
-   :c:type:`struct v4l2_window` directly. However the
+   struct :c:type:`v4l2_window` directly. However the
    :ref:`VIDIOC_G/S/TRY_FMT <VIDIOC_G_FMT>` ioctls, which take a
-   pointer to a :ref:`v4l2_format <v4l2-format>` parent structure
+   pointer to a :c:type:`v4l2_format` parent structure
    with padding bytes at the end, are not affected.
 
 3. The format of the ``chromakey`` field in struct
-   :ref:`v4l2_window <v4l2-window>` changed from "host order RGB32"
+   :c:type:`v4l2_window` changed from "host order RGB32"
    to a pixel value in the same format as the framebuffer. This may
    *break compatibility* with existing applications. Drivers supporting
    the "host order RGB32" format are not known.
@@ -1127,8 +1014,8 @@
 1. The ``VIDIOC_G_CHIP_IDENT`` ioctl was renamed to
    ``VIDIOC_G_CHIP_IDENT_OLD`` and ``VIDIOC_DBG_G_CHIP_IDENT`` was
    introduced in its place. The old struct
-   :c:type:`struct v4l2_chip_ident` was renamed to
-   :c:type:`struct v4l2_chip_ident_old`.
+   struct ``v4l2_chip_ident`` was renamed to
+   struct ``v4l2_chip_ident_old``.
 
 2. The pixel formats ``V4L2_PIX_FMT_VYUY``, ``V4L2_PIX_FMT_NV16`` and
    ``V4L2_PIX_FMT_NV61`` were added.
@@ -1279,7 +1166,7 @@
 V4L2 in Linux 3.6
 =================
 
-1. Replaced ``input`` in :c:type:`struct v4l2_buffer` by
+1. Replaced ``input`` in struct :c:type:`v4l2_buffer` by
    ``reserved2`` and removed ``V4L2_BUF_FLAG_INPUT``.
 
 2. Added V4L2_CAP_VIDEO_M2M and V4L2_CAP_VIDEO_M2M_MPLANE
@@ -1293,7 +1180,7 @@
 =================
 
 1. Added timestamp types to ``flags`` field in
-   :c:type:`struct v4l2_buffer`. See :ref:`buffer-flags`.
+   struct :c:type:`v4l2_buffer`. See :ref:`buffer-flags`.
 
 2. Added ``V4L2_EVENT_CTRL_CH_RANGE`` control event changes flag. See
    :ref:`ctrl-changes-flags`.
@@ -1320,7 +1207,7 @@
 V4L2 in Linux 3.14
 ==================
 
-1. In struct :c:type:`struct v4l2_rect`, the type of ``width`` and
+1. In struct :c:type:`v4l2_rect`, the type of ``width`` and
    ``height`` fields changed from _s32 to _u32.
 
 
@@ -1339,7 +1226,7 @@
 V4L2 in Linux 3.17
 ==================
 
-1. Extended struct :ref:`v4l2_pix_format <v4l2-pix-format>`. Added
+1. Extended struct :c:type:`v4l2_pix_format`. Added
    format flags.
 
 2. Added compound control types and
@@ -1357,11 +1244,11 @@
 ==================
 
 1. Rewrote Colorspace chapter, added new enum
-   :ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>` and enum
-   :ref:`v4l2_quantization <v4l2-quantization>` fields to struct
-   :ref:`v4l2_pix_format <v4l2-pix-format>`, struct
-   :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` and
-   struct :ref:`v4l2_mbus_framefmt <v4l2-mbus-framefmt>`.
+   :c:type:`v4l2_ycbcr_encoding` and enum
+   :c:type:`v4l2_quantization` fields to struct
+   :c:type:`v4l2_pix_format`, struct
+   :c:type:`v4l2_pix_format_mplane` and
+   struct :c:type:`v4l2_mbus_framefmt`.
 
 
 V4L2 in Linux 4.4
diff --git a/Documentation/media/uapi/v4l/libv4l-introduction.rst b/Documentation/media/uapi/v4l/libv4l-introduction.rst
index 61d085f..ccc3c4d 100644
--- a/Documentation/media/uapi/v4l/libv4l-introduction.rst
+++ b/Documentation/media/uapi/v4l/libv4l-introduction.rst
@@ -113,56 +113,71 @@
 
 The common file operation methods are provided by libv4l.
 
-Those functions operate just like glibc
-open/close/dup/ioctl/read/mmap/munmap:
+Those functions operate just like the gcc function ``dup()`` and
+V4L2 functions
+:c:func:`open() <v4l2-open>`, :c:func:`close() <v4l2-close>`,
+:c:func:`ioctl() <v4l2-ioctl>`, :c:func:`read() <v4l2-read>`,
+:c:func:`mmap() <v4l2-mmap>` and :c:func:`munmap() <v4l2-munmap>`:
 
--  :c:type:`int v4l2_open(const char *file, int oflag, ...)` - operates like the
-   standard :ref:`open() <func-open>` function.
+.. c:function:: int v4l2_open(const char *file, int oflag, ...)
 
--  :c:type:`int v4l2_close(int fd)` - operates like the standard
-   :ref:`close() <func-close>` function.
+   operates like the :c:func:`open() <v4l2-open>` function.
 
--  :c:type:`int v4l2_dup(int fd)` - operates like the standard dup() function,
-   duplicating a file handler.
+.. c:function:: int v4l2_close(int fd)
 
--  :c:type:`int v4l2_ioctl (int fd, unsigned long int request, ...)` - operates
-   like the standard :ref:`ioctl() <func-ioctl>` function.
+   operates like the :c:func:`close() <v4l2-close>` function.
 
--  :c:type:`int v4l2_read (int fd, void* buffer, size_t n)` - operates like the
-   standard :ref:`read() <func-read>` function.
+.. c:function:: int v4l2_dup(int fd)
 
--  :c:type:`void v4l2_mmap(void *start, size_t length, int prot, int flags, int
-   fd, int64_t offset);` - operates like the standard
-   :ref:`mmap() <func-mmap>` function.
+   operates like the libc ``dup()`` function, duplicating a file handler.
 
--  :c:type:`int v4l2_munmap(void *_start, size_t length);` - operates like the
-   standard :ref:`munmap() <func-munmap>` function.
+.. c:function:: int v4l2_ioctl (int fd, unsigned long int request, ...)
+
+   operates like the :c:func:`ioctl() <v4l2-ioctl>` function.
+
+.. c:function:: int v4l2_read (int fd, void* buffer, size_t n)
+
+   operates like the :c:func:`read() <v4l2-read>` function.
+
+.. c:function:: void v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
+
+   operates like the :c:func:`munmap() <v4l2-munmap>` function.
+
+.. c:function:: int v4l2_munmap(void *_start, size_t length);
+
+   operates like the :c:func:`munmap() <v4l2-munmap>` function.
 
 Those functions provide additional control:
 
--  :c:type:`int v4l2_fd_open(int fd, int v4l2_flags)` - opens an already opened
-   fd for further use through v4l2lib and possibly modify libv4l2's
-   default behavior through the v4l2_flags argument. Currently,
-   v4l2_flags can be ``V4L2_DISABLE_CONVERSION``, to disable format
-   conversion.
+.. c:function:: int v4l2_fd_open(int fd, int v4l2_flags)
 
--  :c:type:`int v4l2_set_control(int fd, int cid, int value)` - This function
-   takes a value of 0 - 65535, and then scales that range to the actual
-   range of the given v4l control id, and then if the cid exists and is
+   opens an already opened fd for further use through v4l2lib and possibly
+   modify libv4l2's default behavior through the ``v4l2_flags`` argument.
+   Currently, ``v4l2_flags`` can be ``V4L2_DISABLE_CONVERSION``, to disable
+   format conversion.
+
+.. c:function:: int v4l2_set_control(int fd, int cid, int value)
+
+   This function takes a value of 0 - 65535, and then scales that range to the
+   actual range of the given v4l control id, and then if the cid exists and is
    not locked sets the cid to the scaled value.
 
--  :c:type:`int v4l2_get_control(int fd, int cid)` - This function returns a
-   value of 0 - 65535, scaled to from the actual range of the given v4l
-   control id. when the cid does not exist, could not be accessed for
-   some reason, or some error occurred 0 is returned.
+.. c:function:: int v4l2_get_control(int fd, int cid)
+
+   This function returns a value of 0 - 65535, scaled to from the actual range
+   of the given v4l control id. when the cid does not exist, could not be
+   accessed for some reason, or some error occurred 0 is returned.
 
 
 v4l1compat.so wrapper library
 =============================
 
-This library intercepts calls to open/close/ioctl/mmap/mmunmap
+This library intercepts calls to
+:c:func:`open() <v4l2-open>`, :c:func:`close() <v4l2-close>`,
+:c:func:`ioctl() <v4l2-ioctl>`, :c:func:`mmap() <v4l2-mmap>` and
+:c:func:`munmap() <v4l2-munmap>`
 operations and redirects them to the libv4l counterparts, by using
-LD_PRELOAD=/usr/lib/v4l1compat.so. It also emulates V4L1 calls via V4L2
+``LD_PRELOAD=/usr/lib/v4l1compat.so``. It also emulates V4L1 calls via V4L2
 API.
 
 It allows usage of binary legacy applications that still don't use
diff --git a/Documentation/media/uapi/v4l/mmap.rst b/Documentation/media/uapi/v4l/mmap.rst
index 7ad5d5e..670596c 100644
--- a/Documentation/media/uapi/v4l/mmap.rst
+++ b/Documentation/media/uapi/v4l/mmap.rst
@@ -8,7 +8,7 @@
 
 Input and output devices support this I/O method when the
 ``V4L2_CAP_STREAMING`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl is set. There are two
 streaming methods, to determine if the memory mapping flavor is
 supported applications must call the :ref:`VIDIOC_REQBUFS` ioctl
@@ -39,10 +39,10 @@
 location of the buffers in device memory can be determined with the
 :ref:`VIDIOC_QUERYBUF` ioctl. In the single-planar
 API case, the ``m.offset`` and ``length`` returned in a struct
-:ref:`v4l2_buffer <v4l2-buffer>` are passed as sixth and second
+:c:type:`v4l2_buffer` are passed as sixth and second
 parameter to the :ref:`mmap() <func-mmap>` function. When using the
-multi-planar API, struct :ref:`v4l2_buffer <v4l2-buffer>` contains an
-array of struct :ref:`v4l2_plane <v4l2-plane>` structures, each
+multi-planar API, struct :c:type:`v4l2_buffer` contains an
+array of struct :c:type:`v4l2_plane` structures, each
 containing its own ``m.offset`` and ``length``. When using the
 multi-planar API, every plane of every buffer has to be mapped
 separately, so the number of calls to :ref:`mmap() <func-mmap>` should
@@ -218,7 +218,7 @@
 applications can enqueue in advance, or dequeue and process. They can
 also enqueue in a different order than buffers have been dequeued, and
 the driver can *fill* enqueued *empty* buffers in any order.  [#f2]_ The
-index number of a buffer (struct :ref:`v4l2_buffer <v4l2-buffer>`
+index number of a buffer (struct :c:type:`v4l2_buffer`
 ``index``) plays no role here, it only identifies the buffer.
 
 Initially all mapped buffers are in dequeued state, inaccessible by the
@@ -251,7 +251,7 @@
    removes all buffers from both queues as a side effect. Since there is
    no notion of doing anything "now" on a multitasking system, if an
    application needs to synchronize with another event it should examine
-   the struct ::ref:`v4l2_buffer <v4l2-buffer>` ``timestamp`` of captured
+   the struct ::c:type:`v4l2_buffer` ``timestamp`` of captured
    or outputted buffers.
 
 Drivers implementing memory mapping I/O must support the
diff --git a/Documentation/media/uapi/v4l/pixfmt-002.rst b/Documentation/media/uapi/v4l/pixfmt-002.rst
index fae9b2d..0d9e697 100644
--- a/Documentation/media/uapi/v4l/pixfmt-002.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-002.rst
@@ -4,193 +4,130 @@
 Single-planar format structure
 ******************************
 
+.. tabularcolumns:: |p{4.0cm}|p{2.5cm}|p{11.0cm}|
 
-.. _v4l2-pix-format:
+.. c:type:: v4l2_pix_format
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_pix_format
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``width``
+      - Image width in pixels.
+    * - __u32
+      - ``height``
+      - Image height in pixels. If ``field`` is one of ``V4L2_FIELD_TOP``,
+	``V4L2_FIELD_BOTTOM`` or ``V4L2_FIELD_ALTERNATE`` then height
+	refers to the number of lines in the field, otherwise it refers to
+	the number of lines in the frame (which is twice the field height
+	for interlaced formats).
+    * - :cspan:`2` Applications set these fields to request an image
+	size, drivers return the closest possible values. In case of
+	planar formats the ``width`` and ``height`` applies to the largest
+	plane. To avoid ambiguities drivers must return values rounded up
+	to a multiple of the scale factor of any smaller planes. For
+	example when the image format is YUV 4:2:0, ``width`` and
+	``height`` must be multiples of two.
+    * - __u32
+      - ``pixelformat``
+      - The pixel format or type of compression, set by the application.
+	This is a little endian
+	:ref:`four character code <v4l2-fourcc>`. V4L2 defines standard
+	RGB formats in :ref:`rgb-formats`, YUV formats in
+	:ref:`yuv-formats`, and reserved codes in
+	:ref:`reserved-formats`
+    * - enum :c:type::`v4l2_field`
+      - ``field``
+      - Video images are typically interlaced. Applications can request to
+	capture or output only the top or bottom field, or both fields
+	interlaced or sequentially stored in one buffer or alternating in
+	separate buffers. Drivers return the actual field order selected.
+	For more details on fields see :ref:`field-order`.
+    * - __u32
+      - ``bytesperline``
+      - Distance in bytes between the leftmost pixels in two adjacent
+	lines.
+    * - :cspan:`2`
 
-    -  .. row 1
+	Both applications and drivers can set this field to request
+	padding bytes at the end of each line. Drivers however may ignore
+	the value requested by the application, returning ``width`` times
+	bytes per pixel or a larger value required by the hardware. That
+	implies applications can just set this field to zero to get a
+	reasonable default.
 
-       -  __u32
+	Video hardware may access padding bytes, therefore they must
+	reside in accessible memory. Consider cases where padding bytes
+	after the last line of an image cross a system page boundary.
+	Input devices may write padding bytes, the value is undefined.
+	Output devices ignore the contents of padding bytes.
 
-       -  ``width``
+	When the image format is planar the ``bytesperline`` value applies
+	to the first plane and is divided by the same factor as the
+	``width`` field for the other planes. For example the Cb and Cr
+	planes of a YUV 4:2:0 image have half as many padding bytes
+	following each line as the Y plane. To avoid ambiguities drivers
+	must return a ``bytesperline`` value rounded up to a multiple of
+	the scale factor.
 
-       -  Image width in pixels.
+	For compressed formats the ``bytesperline`` value makes no sense.
+	Applications and drivers must set this to 0 in that case.
+    * - __u32
+      - ``sizeimage``
+      - Size in bytes of the buffer to hold a complete image, set by the
+	driver. Usually this is ``bytesperline`` times ``height``. When
+	the image consists of variable length compressed data this is the
+	maximum number of bytes required to hold an image.
+    * - enum :c:type:`v4l2_colorspace`
+      - ``colorspace``
+      - This information supplements the ``pixelformat`` and must be set
+	by the driver for capture streams and by the application for
+	output streams, see :ref:`colorspaces`.
+    * - __u32
+      - ``priv``
+      - This field indicates whether the remaining fields of the
+	struct :c:type:`v4l2_pix_format`, also called the
+	extended fields, are valid. When set to
+	``V4L2_PIX_FMT_PRIV_MAGIC``, it indicates that the extended fields
+	have been correctly initialized. When set to any other value it
+	indicates that the extended fields contain undefined values.
 
-    -  .. row 2
+	Applications that wish to use the pixel format extended fields
+	must first ensure that the feature is supported by querying the
+	device for the :ref:`V4L2_CAP_EXT_PIX_FORMAT <querycap>`
+	capability. If the capability isn't set the pixel format extended
+	fields are not supported and using the extended fields will lead
+	to undefined results.
 
-       -  __u32
+	To use the extended fields, applications must set the ``priv``
+	field to ``V4L2_PIX_FMT_PRIV_MAGIC``, initialize all the extended
+	fields and zero the unused bytes of the
+	struct :c:type:`v4l2_format` ``raw_data`` field.
 
-       -  ``height``
-
-       -  Image height in pixels. If ``field`` is one of ``V4L2_FIELD_TOP``,
-	  ``V4L2_FIELD_BOTTOM`` or ``V4L2_FIELD_ALTERNATE`` then height
-	  refers to the number of lines in the field, otherwise it refers to
-	  the number of lines in the frame (which is twice the field height
-	  for interlaced formats).
-
-    -  .. row 3
-
-       -  :cspan:`2` Applications set these fields to request an image
-	  size, drivers return the closest possible values. In case of
-	  planar formats the ``width`` and ``height`` applies to the largest
-	  plane. To avoid ambiguities drivers must return values rounded up
-	  to a multiple of the scale factor of any smaller planes. For
-	  example when the image format is YUV 4:2:0, ``width`` and
-	  ``height`` must be multiples of two.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``pixelformat``
-
-       -  The pixel format or type of compression, set by the application.
-	  This is a little endian
-	  :ref:`four character code <v4l2-fourcc>`. V4L2 defines standard
-	  RGB formats in :ref:`rgb-formats`, YUV formats in
-	  :ref:`yuv-formats`, and reserved codes in
-	  :ref:`reserved-formats`
-
-    -  .. row 5
-
-       -  enum :ref:`v4l2_field <v4l2-field>`
-
-       -  ``field``
-
-       -  Video images are typically interlaced. Applications can request to
-	  capture or output only the top or bottom field, or both fields
-	  interlaced or sequentially stored in one buffer or alternating in
-	  separate buffers. Drivers return the actual field order selected.
-	  For more details on fields see :ref:`field-order`.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``bytesperline``
-
-       -  Distance in bytes between the leftmost pixels in two adjacent
-	  lines.
-
-    -  .. row 7
-
-       -  :cspan:`2`
-
-	  Both applications and drivers can set this field to request
-	  padding bytes at the end of each line. Drivers however may ignore
-	  the value requested by the application, returning ``width`` times
-	  bytes per pixel or a larger value required by the hardware. That
-	  implies applications can just set this field to zero to get a
-	  reasonable default.
-
-	  Video hardware may access padding bytes, therefore they must
-	  reside in accessible memory. Consider cases where padding bytes
-	  after the last line of an image cross a system page boundary.
-	  Input devices may write padding bytes, the value is undefined.
-	  Output devices ignore the contents of padding bytes.
-
-	  When the image format is planar the ``bytesperline`` value applies
-	  to the first plane and is divided by the same factor as the
-	  ``width`` field for the other planes. For example the Cb and Cr
-	  planes of a YUV 4:2:0 image have half as many padding bytes
-	  following each line as the Y plane. To avoid ambiguities drivers
-	  must return a ``bytesperline`` value rounded up to a multiple of
-	  the scale factor.
-
-	  For compressed formats the ``bytesperline`` value makes no sense.
-	  Applications and drivers must set this to 0 in that case.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``sizeimage``
-
-       -  Size in bytes of the buffer to hold a complete image, set by the
-	  driver. Usually this is ``bytesperline`` times ``height``. When
-	  the image consists of variable length compressed data this is the
-	  maximum number of bytes required to hold an image.
-
-    -  .. row 9
-
-       -  enum :ref:`v4l2_colorspace <v4l2-colorspace>`
-
-       -  ``colorspace``
-
-       -  This information supplements the ``pixelformat`` and must be set
-	  by the driver for capture streams and by the application for
-	  output streams, see :ref:`colorspaces`.
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``priv``
-
-       -  This field indicates whether the remaining fields of the
-	  :ref:`struct v4l2_pix_format <v4l2-pix-format>` structure, also called the
-	  extended fields, are valid. When set to
-	  ``V4L2_PIX_FMT_PRIV_MAGIC``, it indicates that the extended fields
-	  have been correctly initialized. When set to any other value it
-	  indicates that the extended fields contain undefined values.
-
-	  Applications that wish to use the pixel format extended fields
-	  must first ensure that the feature is supported by querying the
-	  device for the :ref:`V4L2_CAP_EXT_PIX_FORMAT <querycap>`
-	  capability. If the capability isn't set the pixel format extended
-	  fields are not supported and using the extended fields will lead
-	  to undefined results.
-
-	  To use the extended fields, applications must set the ``priv``
-	  field to ``V4L2_PIX_FMT_PRIV_MAGIC``, initialize all the extended
-	  fields and zero the unused bytes of the
-	  :ref:`struct v4l2_format <v4l2-format>` ``raw_data`` field.
-
-	  When the ``priv`` field isn't set to ``V4L2_PIX_FMT_PRIV_MAGIC``
-	  drivers must act as if all the extended fields were set to zero.
-	  On return drivers must set the ``priv`` field to
-	  ``V4L2_PIX_FMT_PRIV_MAGIC`` and all the extended fields to
-	  applicable values.
-
-    -  .. row 11
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags set by the application or driver, see :ref:`format-flags`.
-
-    -  .. row 12
-
-       -  enum :ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>`
-
-       -  ``ycbcr_enc``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 13
-
-       -  enum :ref:`v4l2_quantization <v4l2-quantization>`
-
-       -  ``quantization``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 14
-
-       -  enum :ref:`v4l2_xfer_func <v4l2-xfer-func>`
-
-       -  ``xfer_func``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
+	When the ``priv`` field isn't set to ``V4L2_PIX_FMT_PRIV_MAGIC``
+	drivers must act as if all the extended fields were set to zero.
+	On return drivers must set the ``priv`` field to
+	``V4L2_PIX_FMT_PRIV_MAGIC`` and all the extended fields to
+	applicable values.
+    * - __u32
+      - ``flags``
+      - Flags set by the application or driver, see :ref:`format-flags`.
+    * - enum :c:type:`v4l2_ycbcr_encoding`
+      - ``ycbcr_enc``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_quantization`
+      - ``quantization``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_xfer_func`
+      - ``xfer_func``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
diff --git a/Documentation/media/uapi/v4l/pixfmt-003.rst b/Documentation/media/uapi/v4l/pixfmt-003.rst
index 25c5487..ae9ea7a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-003.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-003.rst
@@ -4,163 +4,91 @@
 Multi-planar format structures
 ******************************
 
-The :ref:`struct v4l2_plane_pix_format <v4l2-plane-pix-format>` structures define size
+The struct :c:type:`v4l2_plane_pix_format` structures define size
 and layout for each of the planes in a multi-planar format. The
-:ref:`struct v4l2_pix_format_mplane <v4l2-pix-format-mplane>` structure contains
+struct :c:type:`v4l2_pix_format_mplane` structure contains
 information common to all planes (such as image width and height) and an
-array of :ref:`struct v4l2_plane_pix_format <v4l2-plane-pix-format>` structures,
+array of struct :c:type:`v4l2_plane_pix_format` structures,
 describing all planes of that format.
 
 
-.. _v4l2-plane-pix-format:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_plane_pix_format
 
 .. flat-table:: struct v4l2_plane_pix_format
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``sizeimage``
-
-       -  Maximum size in bytes required for image data in this plane.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``bytesperline``
-
-       -  Distance in bytes between the leftmost pixels in two adjacent
-	  lines. See struct :ref:`v4l2_pix_format <v4l2-pix-format>`.
-
-    -  .. row 3
-
-       -  __u16
-
-       -  ``reserved[6]``
-
-       -  Reserved for future extensions. Should be zeroed by drivers and
-	  applications.
+    * - __u32
+      - ``sizeimage``
+      - Maximum size in bytes required for image data in this plane.
+    * - __u32
+      - ``bytesperline``
+      - Distance in bytes between the leftmost pixels in two adjacent
+	lines. See struct :c:type:`v4l2_pix_format`.
+    * - __u16
+      - ``reserved[6]``
+      - Reserved for future extensions. Should be zeroed by drivers and
+	applications.
 
 
+.. tabularcolumns:: |p{4.4cm}|p{5.6cm}|p{7.5cm}|
 
-.. _v4l2-pix-format-mplane:
+.. c:type:: v4l2_pix_format_mplane
 
 .. flat-table:: struct v4l2_pix_format_mplane
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``width``
-
-       -  Image width in pixels. See struct
-	  :ref:`v4l2_pix_format <v4l2-pix-format>`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``height``
-
-       -  Image height in pixels. See struct
-	  :ref:`v4l2_pix_format <v4l2-pix-format>`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``pixelformat``
-
-       -  The pixel format. Both single- and multi-planar four character
-	  codes can be used.
-
-    -  .. row 4
-
-       -  enum :ref:`v4l2_field <v4l2-field>`
-
-       -  ``field``
-
-       -  See struct :ref:`v4l2_pix_format <v4l2-pix-format>`.
-
-    -  .. row 5
-
-       -  enum :ref:`v4l2_colorspace <v4l2-colorspace>`
-
-       -  ``colorspace``
-
-       -  See struct :ref:`v4l2_pix_format <v4l2-pix-format>`.
-
-    -  .. row 6
-
-       -  struct :ref:`v4l2_plane_pix_format <v4l2-plane-pix-format>`
-
-       -  ``plane_fmt[VIDEO_MAX_PLANES]``
-
-       -  An array of structures describing format of each plane this pixel
-	  format consists of. The number of valid entries in this array has
-	  to be put in the ``num_planes`` field.
-
-    -  .. row 7
-
-       -  __u8
-
-       -  ``num_planes``
-
-       -  Number of planes (i.e. separate memory buffers) for this format
-	  and the number of valid entries in the ``plane_fmt`` array.
-
-    -  .. row 8
-
-       -  __u8
-
-       -  ``flags``
-
-       -  Flags set by the application or driver, see :ref:`format-flags`.
-
-    -  .. row 9
-
-       -  enum :ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>`
-
-       -  ``ycbcr_enc``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 10
-
-       -  enum :ref:`v4l2_quantization <v4l2-quantization>`
-
-       -  ``quantization``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 11
-
-       -  enum :ref:`v4l2_xfer_func <v4l2-xfer-func>`
-
-       -  ``xfer_func``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 12
-
-       -  __u8
-
-       -  ``reserved[7]``
-
-       -  Reserved for future extensions. Should be zeroed by drivers and
-	  applications.
+    * - __u32
+      - ``width``
+      - Image width in pixels. See struct
+	:c:type:`v4l2_pix_format`.
+    * - __u32
+      - ``height``
+      - Image height in pixels. See struct
+	:c:type:`v4l2_pix_format`.
+    * - __u32
+      - ``pixelformat``
+      - The pixel format. Both single- and multi-planar four character
+	codes can be used.
+    * - enum :c:type:`v4l2_field`
+      - ``field``
+      - See struct :c:type:`v4l2_pix_format`.
+    * - enum :c:type:`v4l2_colorspace`
+      - ``colorspace``
+      - See struct :c:type:`v4l2_pix_format`.
+    * - struct :c:type:`v4l2_plane_pix_format`
+      - ``plane_fmt[VIDEO_MAX_PLANES]``
+      - An array of structures describing format of each plane this pixel
+	format consists of. The number of valid entries in this array has
+	to be put in the ``num_planes`` field.
+    * - __u8
+      - ``num_planes``
+      - Number of planes (i.e. separate memory buffers) for this format
+	and the number of valid entries in the ``plane_fmt`` array.
+    * - __u8
+      - ``flags``
+      - Flags set by the application or driver, see :ref:`format-flags`.
+    * - enum :c:type:`v4l2_ycbcr_encoding`
+      - ``ycbcr_enc``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_quantization`
+      - ``quantization``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_xfer_func`
+      - ``xfer_func``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - __u8
+      - ``reserved[7]``
+      - Reserved for future extensions. Should be zeroed by drivers and
+	applications.
diff --git a/Documentation/media/uapi/v4l/pixfmt-006.rst b/Documentation/media/uapi/v4l/pixfmt-006.rst
index 987b9a8..a9890ff 100644
--- a/Documentation/media/uapi/v4l/pixfmt-006.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-006.rst
@@ -5,284 +5,145 @@
 ****************************
 
 In V4L2 colorspaces are defined by four values. The first is the
-colorspace identifier (enum :ref:`v4l2_colorspace <v4l2-colorspace>`)
+colorspace identifier (enum :c:type:`v4l2_colorspace`)
 which defines the chromaticities, the default transfer function, the
 default Y'CbCr encoding and the default quantization method. The second
 is the transfer function identifier (enum
-:ref:`v4l2_xfer_func <v4l2-xfer-func>`) to specify non-standard
+:c:type:`v4l2_xfer_func`) to specify non-standard
 transfer functions. The third is the Y'CbCr encoding identifier (enum
-:ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>`) to specify
+:c:type:`v4l2_ycbcr_encoding`) to specify
 non-standard Y'CbCr encodings and the fourth is the quantization
-identifier (enum :ref:`v4l2_quantization <v4l2-quantization>`) to
+identifier (enum :c:type:`v4l2_quantization`) to
 specify non-standard quantization methods. Most of the time only the
-colorspace field of struct :ref:`v4l2_pix_format <v4l2-pix-format>`
-or struct :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>`
+colorspace field of struct :c:type:`v4l2_pix_format`
+or struct :c:type:`v4l2_pix_format_mplane`
 needs to be filled in.
 
-.. note:: The default R'G'B' quantization is full range for all
+.. note::
+
+   The default R'G'B' quantization is full range for all
    colorspaces except for BT.2020 which uses limited range R'G'B'
    quantization.
 
+.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
 
-.. _v4l2-colorspace:
+.. c:type:: v4l2_colorspace
 
 .. flat-table:: V4L2 Colorspaces
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Identifier
-
-       -  Details
-
-    -  .. row 2
-
-       -  ``V4L2_COLORSPACE_DEFAULT``
-
-       -  The default colorspace. This can be used by applications to let
-	  the driver fill in the colorspace.
-
-    -  .. row 3
-
-       -  ``V4L2_COLORSPACE_SMPTE170M``
-
-       -  See :ref:`col-smpte-170m`.
-
-    -  .. row 4
-
-       -  ``V4L2_COLORSPACE_REC709``
-
-       -  See :ref:`col-rec709`.
-
-    -  .. row 5
-
-       -  ``V4L2_COLORSPACE_SRGB``
-
-       -  See :ref:`col-srgb`.
-
-    -  .. row 6
-
-       -  ``V4L2_COLORSPACE_ADOBERGB``
-
-       -  See :ref:`col-adobergb`.
-
-    -  .. row 7
-
-       -  ``V4L2_COLORSPACE_BT2020``
-
-       -  See :ref:`col-bt2020`.
-
-    -  .. row 8
-
-       -  ``V4L2_COLORSPACE_DCI_P3``
-
-       -  See :ref:`col-dcip3`.
-
-    -  .. row 9
-
-       -  ``V4L2_COLORSPACE_SMPTE240M``
-
-       -  See :ref:`col-smpte-240m`.
-
-    -  .. row 10
-
-       -  ``V4L2_COLORSPACE_470_SYSTEM_M``
-
-       -  See :ref:`col-sysm`.
-
-    -  .. row 11
-
-       -  ``V4L2_COLORSPACE_470_SYSTEM_BG``
-
-       -  See :ref:`col-sysbg`.
-
-    -  .. row 12
-
-       -  ``V4L2_COLORSPACE_JPEG``
-
-       -  See :ref:`col-jpeg`.
-
-    -  .. row 13
-
-       -  ``V4L2_COLORSPACE_RAW``
-
-       -  The raw colorspace. This is used for raw image capture where the
-	  image is minimally processed and is using the internal colorspace
-	  of the device. The software that processes an image using this
-	  'colorspace' will have to know the internals of the capture
-	  device.
+    * - Identifier
+      - Details
+    * - ``V4L2_COLORSPACE_DEFAULT``
+      - The default colorspace. This can be used by applications to let
+	the driver fill in the colorspace.
+    * - ``V4L2_COLORSPACE_SMPTE170M``
+      - See :ref:`col-smpte-170m`.
+    * - ``V4L2_COLORSPACE_REC709``
+      - See :ref:`col-rec709`.
+    * - ``V4L2_COLORSPACE_SRGB``
+      - See :ref:`col-srgb`.
+    * - ``V4L2_COLORSPACE_ADOBERGB``
+      - See :ref:`col-adobergb`.
+    * - ``V4L2_COLORSPACE_BT2020``
+      - See :ref:`col-bt2020`.
+    * - ``V4L2_COLORSPACE_DCI_P3``
+      - See :ref:`col-dcip3`.
+    * - ``V4L2_COLORSPACE_SMPTE240M``
+      - See :ref:`col-smpte-240m`.
+    * - ``V4L2_COLORSPACE_470_SYSTEM_M``
+      - See :ref:`col-sysm`.
+    * - ``V4L2_COLORSPACE_470_SYSTEM_BG``
+      - See :ref:`col-sysbg`.
+    * - ``V4L2_COLORSPACE_JPEG``
+      - See :ref:`col-jpeg`.
+    * - ``V4L2_COLORSPACE_RAW``
+      - The raw colorspace. This is used for raw image capture where the
+	image is minimally processed and is using the internal colorspace
+	of the device. The software that processes an image using this
+	'colorspace' will have to know the internals of the capture
+	device.
 
 
 
-.. _v4l2-xfer-func:
+.. c:type:: v4l2_xfer_func
 
 .. flat-table:: V4L2 Transfer Function
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Identifier
-
-       -  Details
-
-    -  .. row 2
-
-       -  ``V4L2_XFER_FUNC_DEFAULT``
-
-       -  Use the default transfer function as defined by the colorspace.
-
-    -  .. row 3
-
-       -  ``V4L2_XFER_FUNC_709``
-
-       -  Use the Rec. 709 transfer function.
-
-    -  .. row 4
-
-       -  ``V4L2_XFER_FUNC_SRGB``
-
-       -  Use the sRGB transfer function.
-
-    -  .. row 5
-
-       -  ``V4L2_XFER_FUNC_ADOBERGB``
-
-       -  Use the AdobeRGB transfer function.
-
-    -  .. row 6
-
-       -  ``V4L2_XFER_FUNC_SMPTE240M``
-
-       -  Use the SMPTE 240M transfer function.
-
-    -  .. row 7
-
-       -  ``V4L2_XFER_FUNC_NONE``
-
-       -  Do not use a transfer function (i.e. use linear RGB values).
-
-    -  .. row 8
-
-       -  ``V4L2_XFER_FUNC_DCI_P3``
-
-       -  Use the DCI-P3 transfer function.
-
-    -  .. row 9
-
-       -  ``V4L2_XFER_FUNC_SMPTE2084``
-
-       -  Use the SMPTE 2084 transfer function.
+    * - Identifier
+      - Details
+    * - ``V4L2_XFER_FUNC_DEFAULT``
+      - Use the default transfer function as defined by the colorspace.
+    * - ``V4L2_XFER_FUNC_709``
+      - Use the Rec. 709 transfer function.
+    * - ``V4L2_XFER_FUNC_SRGB``
+      - Use the sRGB transfer function.
+    * - ``V4L2_XFER_FUNC_ADOBERGB``
+      - Use the AdobeRGB transfer function.
+    * - ``V4L2_XFER_FUNC_SMPTE240M``
+      - Use the SMPTE 240M transfer function.
+    * - ``V4L2_XFER_FUNC_NONE``
+      - Do not use a transfer function (i.e. use linear RGB values).
+    * - ``V4L2_XFER_FUNC_DCI_P3``
+      - Use the DCI-P3 transfer function.
+    * - ``V4L2_XFER_FUNC_SMPTE2084``
+      - Use the SMPTE 2084 transfer function.
 
 
 
-.. _v4l2-ycbcr-encoding:
+.. c:type:: v4l2_ycbcr_encoding
+
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
 
 .. flat-table:: V4L2 Y'CbCr Encodings
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Identifier
-
-       -  Details
-
-    -  .. row 2
-
-       -  ``V4L2_YCBCR_ENC_DEFAULT``
-
-       -  Use the default Y'CbCr encoding as defined by the colorspace.
-
-    -  .. row 3
-
-       -  ``V4L2_YCBCR_ENC_601``
-
-       -  Use the BT.601 Y'CbCr encoding.
-
-    -  .. row 4
-
-       -  ``V4L2_YCBCR_ENC_709``
-
-       -  Use the Rec. 709 Y'CbCr encoding.
-
-    -  .. row 5
-
-       -  ``V4L2_YCBCR_ENC_XV601``
-
-       -  Use the extended gamut xvYCC BT.601 encoding.
-
-    -  .. row 6
-
-       -  ``V4L2_YCBCR_ENC_XV709``
-
-       -  Use the extended gamut xvYCC Rec. 709 encoding.
-
-    -  .. row 7
-
-       -  ``V4L2_YCBCR_ENC_SYCC``
-
-       -  Use the extended gamut sYCC encoding.
-
-    -  .. row 8
-
-       -  ``V4L2_YCBCR_ENC_BT2020``
-
-       -  Use the default non-constant luminance BT.2020 Y'CbCr encoding.
-
-    -  .. row 9
-
-       -  ``V4L2_YCBCR_ENC_BT2020_CONST_LUM``
-
-       -  Use the constant luminance BT.2020 Yc'CbcCrc encoding.
-
-    -  .. row 10
-
-       -  ``V4L2_YCBCR_ENC_SMPTE_240M``
-
-       -  Use the SMPTE 240M Y'CbCr encoding.
+    * - Identifier
+      - Details
+    * - ``V4L2_YCBCR_ENC_DEFAULT``
+      - Use the default Y'CbCr encoding as defined by the colorspace.
+    * - ``V4L2_YCBCR_ENC_601``
+      - Use the BT.601 Y'CbCr encoding.
+    * - ``V4L2_YCBCR_ENC_709``
+      - Use the Rec. 709 Y'CbCr encoding.
+    * - ``V4L2_YCBCR_ENC_XV601``
+      - Use the extended gamut xvYCC BT.601 encoding.
+    * - ``V4L2_YCBCR_ENC_XV709``
+      - Use the extended gamut xvYCC Rec. 709 encoding.
+    * - ``V4L2_YCBCR_ENC_BT2020``
+      - Use the default non-constant luminance BT.2020 Y'CbCr encoding.
+    * - ``V4L2_YCBCR_ENC_BT2020_CONST_LUM``
+      - Use the constant luminance BT.2020 Yc'CbcCrc encoding.
+    * - ``V4L2_YCBCR_ENC_SMPTE_240M``
+      - Use the SMPTE 240M Y'CbCr encoding.
 
 
 
-.. _v4l2-quantization:
+.. c:type:: v4l2_quantization
+
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
 
 .. flat-table:: V4L2 Quantization Methods
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Identifier
-
-       -  Details
-
-    -  .. row 2
-
-       -  ``V4L2_QUANTIZATION_DEFAULT``
-
-       -  Use the default quantization encoding as defined by the
-	  colorspace. This is always full range for R'G'B' (except for the
-	  BT.2020 colorspace) and usually limited range for Y'CbCr.
-
-    -  .. row 3
-
-       -  ``V4L2_QUANTIZATION_FULL_RANGE``
-
-       -  Use the full range quantization encoding. I.e. the range [0…1] is
-	  mapped to [0…255] (with possible clipping to [1…254] to avoid the
-	  0x00 and 0xff values). Cb and Cr are mapped from [-0.5…0.5] to
-	  [0…255] (with possible clipping to [1…254] to avoid the 0x00 and
-	  0xff values).
-
-    -  .. row 4
-
-       -  ``V4L2_QUANTIZATION_LIM_RANGE``
-
-       -  Use the limited range quantization encoding. I.e. the range [0…1]
-	  is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
-	  [16…240].
+    * - Identifier
+      - Details
+    * - ``V4L2_QUANTIZATION_DEFAULT``
+      - Use the default quantization encoding as defined by the
+	colorspace. This is always full range for R'G'B' (except for the
+	BT.2020 colorspace) and usually limited range for Y'CbCr.
+    * - ``V4L2_QUANTIZATION_FULL_RANGE``
+      - Use the full range quantization encoding. I.e. the range [0…1] is
+	mapped to [0…255] (with possible clipping to [1…254] to avoid the
+	0x00 and 0xff values). Cb and Cr are mapped from [-0.5…0.5] to
+	[0…255] (with possible clipping to [1…254] to avoid the 0x00 and
+	0xff values).
+    * - ``V4L2_QUANTIZATION_LIM_RANGE``
+      - Use the limited range quantization encoding. I.e. the range [0…1]
+	is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
+	[16…240].
diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/pixfmt-007.rst
index 8c946b0..44bb5a7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-007.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-007.rst
@@ -19,51 +19,28 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: SMPTE 170M Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.630
-
-       -  0.340
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.310
-
-       -  0.595
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.155
-
-       -  0.070
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.630
+      - 0.340
+    * - Green
+      - 0.310
+      - 0.595
+    * - Blue
+      - 0.155
+      - 0.070
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 The red, green and blue chromaticities are also often referred to as the
@@ -72,28 +49,34 @@
 The transfer function defined for SMPTE 170M is the same as the one
 defined in Rec. 709.
 
-    L' = -1.099(-L) :sup:`0.45` + 0.099 for L ≤ -0.018
+.. math::
 
-    L' = 4.5L for -0.018 < L < 0.018
+    L' = -1.099(-L)^{0.45} + 0.099 \text{, for } L \le-0.018
 
-    L' = 1.099L :sup:`0.45` - 0.099 for L ≥ 0.018
+    L' = 4.5L \text{, for } -0.018 < L < 0.018
+
+    L' = 1.099L^{0.45} - 0.099 \text{, for } L \ge 0.018
 
 Inverse Transfer function:
 
-    L = -((L' - 0.099) / -1.099) :sup:`1/0.45` for L' ≤ -0.081
+.. math::
 
-    L = L' / 4.5 for -0.081 < L' < 0.081
+    L = -\left( \frac{L' - 0.099}{-1.099} \right) ^{\frac{1}{0.45}} \text{, for } L' \le -0.081
 
-    L = ((L' + 0.099) / 1.099) :sup:`1/0.45` for L' ≥ 0.081
+    L = \frac{L'}{4.5} \text{, for } -0.081 < L' < 0.081
+
+    L = \left(\frac{L' + 0.099}{1.099}\right)^{\frac{1}{0.45} } \text{, for } L' \ge 0.081
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_601`` encoding:
 
-    Y' = 0.299R' + 0.587G' + 0.114B'
+.. math::
 
-    Cb = -0.169R' - 0.331G' + 0.5B'
+    Y' = 0.2990R' + 0.5870G' + 0.1140B'
 
-    Cr = 0.5R' - 0.419G' - 0.081B'
+    Cb = -0.1687R' - 0.3313G' + 0.5B'
+
+    Cr = 0.5R' - 0.4187G' - 0.0813B'
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. This conversion to Y'CbCr is identical to the one defined in
@@ -117,51 +100,28 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: Rec. 709 Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.640
-
-       -  0.330
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.300
-
-       -  0.600
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.150
-
-       -  0.060
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.640
+      - 0.330
+    * - Green
+      - 0.300
+      - 0.600
+    * - Blue
+      - 0.150
+      - 0.060
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 The full name of this standard is Rec. ITU-R BT.709-5.
@@ -169,23 +129,29 @@
 Transfer function. Normally L is in the range [0…1], but for the
 extended gamut xvYCC encoding values outside that range are allowed.
 
-    L' = -1.099(-L) :sup:`0.45` + 0.099 for L ≤ -0.018
+.. math::
 
-    L' = 4.5L for -0.018 < L < 0.018
+    L' = -1.099(-L)^{0.45} + 0.099 \text{, for } L \le -0.018
 
-    L' = 1.099L :sup:`0.45` - 0.099 for L ≥ 0.018
+    L' = 4.5L \text{, for } -0.018 < L < 0.018
+
+    L' = 1.099L^{0.45} - 0.099 \text{, for } L \ge 0.018
 
 Inverse Transfer function:
 
-    L = -((L' - 0.099) / -1.099) :sup:`1/0.45` for L' ≤ -0.081
+.. math::
 
-    L = L' / 4.5 for -0.081 < L' < 0.081
+    L = -\left( \frac{L' - 0.099}{-1.099} \right)^\frac{1}{0.45} \text{, for } L' \le -0.081
 
-    L = ((L' + 0.099) / 1.099) :sup:`1/0.45` for L' ≥ 0.081
+    L = \frac{L'}{4.5}\text{, for } -0.081 < L' < 0.081
+
+    L = \left(\frac{L' + 0.099}{1.099}\right)^{\frac{1}{0.45} } \text{, for } L' \ge 0.081
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_709`` encoding:
 
+.. math::
+
     Y' = 0.2126R' + 0.7152G' + 0.0722B'
 
     Cb = -0.1146R' - 0.3854G' + 0.5B'
@@ -210,22 +176,26 @@
 that are outside the range [0…1]. The resulting Y', Cb and Cr values are
 scaled and offset:
 
-    Y' = (219 / 256) * (0.2126R' + 0.7152G' + 0.0722B') + (16 / 256)
+.. math::
 
-    Cb = (224 / 256) * (-0.1146R' - 0.3854G' + 0.5B')
+    Y' = \frac{219}{256} * (0.2126R' + 0.7152G' + 0.0722B') + \frac{16}{256}
 
-    Cr = (224 / 256) * (0.5R' - 0.4542G' - 0.0458B')
+    Cb = \frac{224}{256} * (-0.1146R' - 0.3854G' + 0.5B')
+
+    Cr = \frac{224}{256} * (0.5R' - 0.4542G' - 0.0458B')
 
 The xvYCC 601 encoding (``V4L2_YCBCR_ENC_XV601``, :ref:`xvycc`) is
 similar to the BT.601 encoding, but it allows for R', G' and B' values
 that are outside the range [0…1]. The resulting Y', Cb and Cr values are
 scaled and offset:
 
-    Y' = (219 / 256) * (0.299R' + 0.587G' + 0.114B') + (16 / 256)
+.. math::
 
-    Cb = (224 / 256) * (-0.169R' - 0.331G' + 0.5B')
+    Y' = \frac{219}{256} * (0.2990R' + 0.5870G' + 0.1140B') + \frac{16}{256}
 
-    Cr = (224 / 256) * (0.5R' - 0.419G' - 0.081B')
+    Cb = \frac{224}{256} * (-0.1687R' - 0.3313G' + 0.5B')
+
+    Cr = \frac{224}{256} * (0.5R' - 0.4187G' - 0.0813B')
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. The non-standard xvYCC 709 or xvYCC 601 encodings can be
@@ -241,56 +211,33 @@
 The :ref:`srgb` standard defines the colorspace used by most webcams
 and computer graphics. The default transfer function is
 ``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_SYCC``. The default Y'CbCr quantization is full range.
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range.
 The chromaticities of the primary colors and the white reference are:
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: sRGB Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.640
-
-       -  0.330
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.300
-
-       -  0.600
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.150
-
-       -  0.060
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.640
+      - 0.330
+    * - Green
+      - 0.300
+      - 0.600
+    * - Blue
+      - 0.150
+      - 0.060
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 These chromaticities are identical to the Rec. 709 colorspace.
@@ -298,23 +245,28 @@
 Transfer function. Note that negative values for L are only used by the
 Y'CbCr conversion.
 
-    L' = -1.055(-L) :sup:`1/2.4` + 0.055 for L < -0.0031308
+.. math::
 
-    L' = 12.92L for -0.0031308 ≤ L ≤ 0.0031308
+    L' = -1.055(-L)^{\frac{1}{2.4} } + 0.055\text{, for }L < -0.0031308
 
-    L' = 1.055L :sup:`1/2.4` - 0.055 for 0.0031308 < L ≤ 1
+    L' = 12.92L\text{, for }-0.0031308 \le L \le 0.0031308
+
+    L' = 1.055L ^{\frac{1}{2.4} } - 0.055\text{, for }0.0031308 < L \le 1
 
 Inverse Transfer function:
 
-    L = -((-L' + 0.055) / 1.055) :sup:`2.4` for L' < -0.04045
+.. math::
 
-    L = L' / 12.92 for -0.04045 ≤ L' ≤ 0.04045
+    L = -((-L' + 0.055) / 1.055) ^{2.4}\text{, for }L' < -0.04045
 
-    L = ((L' + 0.055) / 1.055) :sup:`2.4` for L' > 0.04045
+    L = L' / 12.92\text{, for }-0.04045 \le L' \le 0.04045
+
+    L = ((L' + 0.055) / 1.055) ^{2.4}\text{, for }L' > 0.04045
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
-the following ``V4L2_YCBCR_ENC_SYCC`` encoding as defined by
-:ref:`sycc`:
+the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`:
+
+.. math::
 
     Y' = 0.2990R' + 0.5870G' + 0.1140B'
 
@@ -323,11 +275,8 @@
     Cr = 0.5R' - 0.4187G' - 0.0813B'
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
-[-0.5…0.5]. The ``V4L2_YCBCR_ENC_SYCC`` quantization is always full
-range. Although this Y'CbCr encoding looks very similar to the
-``V4L2_YCBCR_ENC_XV601`` encoding, it is not. The
-``V4L2_YCBCR_ENC_XV601`` scales and offsets the Y'CbCr values before
-quantization, but this encoding does not do that.
+[-0.5…0.5]. This transform is identical to one defined in SMPTE
+170M/BT.601. The Y'CbCr quantization is full range.
 
 
 .. _col-adobergb:
@@ -339,80 +288,63 @@
 graphics that use the AdobeRGB colorspace. This is also known as the
 :ref:`oprgb` standard. The default transfer function is
 ``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full
 range. The chromaticities of the primary colors and the white reference
 are:
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: Adobe RGB Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.6400
-
-       -  0.3300
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.2100
-
-       -  0.7100
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.1500
-
-       -  0.0600
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.6400
+      - 0.3300
+    * - Green
+      - 0.2100
+      - 0.7100
+    * - Blue
+      - 0.1500
+      - 0.0600
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 
 Transfer function:
 
-    L' = L :sup:`1/2.19921875`
+.. math::
+
+    L' = L ^{\frac{1}{2.19921875}}
 
 Inverse Transfer function:
 
-    L = L' :sup:`2.19921875`
+.. math::
+
+    L = L'^{(2.19921875)}
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_601`` encoding:
 
-    Y' = 0.299R' + 0.587G' + 0.114B'
+.. math::
 
-    Cb = -0.169R' - 0.331G' + 0.5B'
+    Y' = 0.2990R' + 0.5870G' + 0.1140B'
 
-    Cr = 0.5R' - 0.419G' - 0.081B'
+    Cb = -0.1687R' - 0.3313G' + 0.5B'
+
+    Cr = 0.5R' - 0.4187G' - 0.0813B'
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. This transform is identical to one defined in SMPTE
-170M/BT.601. The Y'CbCr quantization is limited range.
+170M/BT.601. The Y'CbCr quantization is full range.
 
 
 .. _col-bt2020:
@@ -429,69 +361,52 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: BT.2020 Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.708
-
-       -  0.292
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.170
-
-       -  0.797
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.131
-
-       -  0.046
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.708
+      - 0.292
+    * - Green
+      - 0.170
+      - 0.797
+    * - Blue
+      - 0.131
+      - 0.046
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 
 Transfer function (same as Rec. 709):
 
-    L' = 4.5L for 0 ≤ L < 0.018
+.. math::
 
-    L' = 1.099L :sup:`0.45` - 0.099 for 0.018 ≤ L ≤ 1
+    L' = 4.5L\text{, for }0 \le L < 0.018
+
+    L' = 1.099L ^{0.45} - 0.099\text{, for } 0.018 \le L \le 1
 
 Inverse Transfer function:
 
-    L = L' / 4.5 for L' < 0.081
+.. math::
 
-    L = ((L' + 0.099) / 1.099) :sup:`1/0.45` for L' ≥ 0.081
+    L = L' / 4.5\text{, for } L' < 0.081
+
+    L = \left( \frac{L' + 0.099}{1.099}\right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.081
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_BT2020`` encoding:
 
+.. math::
+
     Y' = 0.2627R' + 0.6780G' + 0.0593B'
 
     Cb = -0.1396R' - 0.3604G' + 0.5B'
@@ -506,23 +421,20 @@
 
 Luma:
 
-    Yc' = (0.2627R + 0.6780G + 0.0593B)'
+.. math::
+    :nowrap:
 
-B' - Yc' ≤ 0:
-
-    Cbc = (B' - Yc') / 1.9404
-
-B' - Yc' > 0:
-
-    Cbc = (B' - Yc') / 1.5816
-
-R' - Yc' ≤ 0:
-
-    Crc = (R' - Y') / 1.7184
-
-R' - Yc' > 0:
-
-    Crc = (R' - Y') / 0.9936
+    \begin{align*}
+    Yc' = (0.2627R + 0.6780G + 0.0593B)'& \\
+    B' - Yc' \le 0:& \\
+        &Cbc = (B' - Yc') / 1.9404 \\
+    B' - Yc' > 0: & \\
+        &Cbc = (B' - Yc') / 1.5816 \\
+    R' - Yc' \le 0:& \\
+        &Crc = (R' - Y') / 1.7184 \\
+    R' - Yc' > 0:& \\
+        &Crc = (R' - Y') / 0.9936
+    \end{align*}
 
 Yc' is clamped to the range [0…1] and Cbc and Crc are clamped to the
 range [-0.5…0.5]. The Yc'CbcCrc quantization is limited range.
@@ -536,71 +448,54 @@
 The :ref:`smpte431` standard defines the colorspace used by cinema
 projectors that use the DCI-P3 colorspace. The default transfer function
 is ``V4L2_XFER_FUNC_DCI_P3``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_709``.
+``V4L2_YCBCR_ENC_709``. The default Y'CbCr quantization is limited range.
 
-.. note:: Note that this colorspace does not specify a
+.. note::
+
+   Note that this colorspace standard does not specify a
    Y'CbCr encoding since it is not meant to be encoded to Y'CbCr. So this
-   default Y'CbCr encoding was picked because it is the HDTV encoding. The
-   default Y'CbCr quantization is limited range. The chromaticities of the
-   primary colors and the white reference are:
+   default Y'CbCr encoding was picked because it is the HDTV encoding.
+
+The chromaticities of the primary colors and the white reference are:
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: DCI-P3 Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.6800
-
-       -  0.3200
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.2650
-
-       -  0.6900
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.1500
-
-       -  0.0600
-
-    -  .. row 5
-
-       -  White Reference
-
-       -  0.3140
-
-       -  0.3510
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.6800
+      - 0.3200
+    * - Green
+      - 0.2650
+      - 0.6900
+    * - Blue
+      - 0.1500
+      - 0.0600
+    * - White Reference
+      - 0.3140
+      - 0.3510
 
 
 
 Transfer function:
 
-    L' = L :sup:`1/2.6`
+.. math::
+
+    L' = L^{\frac{1}{2.6}}
 
 Inverse Transfer function:
 
-    L = L' :sup:`2.6`
+.. math::
+
+    L = L'^{(2.6)}
 
 Y'CbCr encoding is not specified. V4L2 defaults to Rec. 709.
 
@@ -619,77 +514,60 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: SMPTE 240M Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.630
-
-       -  0.340
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.310
-
-       -  0.595
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.155
-
-       -  0.070
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.630
+      - 0.340
+    * - Green
+      - 0.310
+      - 0.595
+    * - Blue
+      - 0.155
+      - 0.070
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 These chromaticities are identical to the SMPTE 170M colorspace.
 
 Transfer function:
 
-    L' = 4L for 0 ≤ L < 0.0228
+.. math::
 
-    L' = 1.1115L :sup:`0.45` - 0.1115 for 0.0228 ≤ L ≤ 1
+    L' = 4L\text{, for } 0 \le L < 0.0228
+
+    L' = 1.1115L ^{0.45} - 0.1115\text{, for } 0.0228 \le L \le 1
 
 Inverse Transfer function:
 
-    L = L' / 4 for 0 ≤ L' < 0.0913
+.. math::
 
-    L = ((L' + 0.1115) / 1.1115) :sup:`1/0.45` for L' ≥ 0.0913
+    L = \frac{L'}{4}\text{, for } 0 \le L' < 0.0913
+
+    L = \left( \frac{L' + 0.1115}{1.1115}\right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.0913
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_SMPTE240M`` encoding:
 
+.. math::
+
     Y' = 0.2122R' + 0.7013G' + 0.0865B'
 
     Cb = -0.1161R' - 0.3839G' + 0.5B'
 
     Cr = 0.5R' - 0.4451G' - 0.0549B'
 
-Yc' is clamped to the range [0…1] and Cbc and Crc are clamped to the
+Y' is clamped to the range [0…1] and Cb and Cr are clamped to the
 range [-0.5…0.5]. The Y'CbCr quantization is limited range.
 
 
@@ -707,54 +585,33 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: NTSC 1953 Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.67
-
-       -  0.33
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.21
-
-       -  0.71
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.14
-
-       -  0.08
-
-    -  .. row 5
-
-       -  White Reference (C)
-
-       -  0.310
-
-       -  0.316
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.67
+      - 0.33
+    * - Green
+      - 0.21
+      - 0.71
+    * - Blue
+      - 0.14
+      - 0.08
+    * - White Reference (C)
+      - 0.310
+      - 0.316
 
 
-.. note:: This colorspace uses Illuminant C instead of D65 as the white
+.. note::
+
+   This colorspace uses Illuminant C instead of D65 as the white
    reference. To correctly convert an image in this colorspace to another
    that uses D65 you need to apply a chromatic adaptation algorithm such as
    the Bradford method.
@@ -762,24 +619,30 @@
 The transfer function was never properly defined for NTSC 1953. The Rec.
 709 transfer function is recommended in the literature:
 
-    L' = 4.5L for 0 ≤ L < 0.018
+.. math::
 
-    L' = 1.099L :sup:`0.45` - 0.099 for 0.018 ≤ L ≤ 1
+    L' = 4.5L\text{, for } 0 \le L < 0.018
+
+    L' = 1.099L ^{0.45} - 0.099\text{, for } 0.018 \le L \le 1
 
 Inverse Transfer function:
 
-    L = L' / 4.5 for L' < 0.081
+.. math::
 
-    L = ((L' + 0.099) / 1.099) :sup:`1/0.45` for L' ≥ 0.081
+    L = \frac{L'}{4.5} \text{, for } L' < 0.081
+
+    L = \left( \frac{L' + 0.099}{1.099}\right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.081
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_601`` encoding:
 
-    Y' = 0.299R' + 0.587G' + 0.114B'
+.. math::
 
-    Cb = -0.169R' - 0.331G' + 0.5B'
+    Y' = 0.2990R' + 0.5870G' + 0.1140B'
 
-    Cr = 0.5R' - 0.419G' - 0.081B'
+    Cb = -0.1687R' - 0.3313G' + 0.5B'
+
+    Cr = 0.5R' - 0.4187G' - 0.0813B'
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. The Y'CbCr quantization is limited range. This transform is
@@ -801,75 +664,58 @@
 
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: EBU Tech. 3213 Chromaticities
     :header-rows:  1
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Color
-
-       -  x
-
-       -  y
-
-    -  .. row 2
-
-       -  Red
-
-       -  0.64
-
-       -  0.33
-
-    -  .. row 3
-
-       -  Green
-
-       -  0.29
-
-       -  0.60
-
-    -  .. row 4
-
-       -  Blue
-
-       -  0.15
-
-       -  0.06
-
-    -  .. row 5
-
-       -  White Reference (D65)
-
-       -  0.3127
-
-       -  0.3290
+    * - Color
+      - x
+      - y
+    * - Red
+      - 0.64
+      - 0.33
+    * - Green
+      - 0.29
+      - 0.60
+    * - Blue
+      - 0.15
+      - 0.06
+    * - White Reference (D65)
+      - 0.3127
+      - 0.3290
 
 
 
 The transfer function was never properly defined for this colorspace.
 The Rec. 709 transfer function is recommended in the literature:
 
-    L' = 4.5L for 0 ≤ L < 0.018
+.. math::
 
-    L' = 1.099L :sup:`0.45` - 0.099 for 0.018 ≤ L ≤ 1
+    L' = 4.5L\text{, for } 0 \le L < 0.018
+
+    L' = 1.099L ^{0.45} - 0.099\text{, for } 0.018 \le L \le 1
 
 Inverse Transfer function:
 
-    L = L' / 4.5 for L' < 0.081
+.. math::
 
-    L = ((L' + 0.099) / 1.099) :sup:`1/0.45` for L' ≥ 0.081
+    L = \frac{L'}{4.5} \text{, for } L' < 0.081
+
+    L = \left(\frac{L' + 0.099}{1.099} \right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.081
 
 The luminance (Y') and color difference (Cb and Cr) are obtained with
 the following ``V4L2_YCBCR_ENC_601`` encoding:
 
-    Y' = 0.299R' + 0.587G' + 0.114B'
+.. math::
 
-    Cb = -0.169R' - 0.331G' + 0.5B'
+    Y' = 0.2990R' + 0.5870G' + 0.1140B'
 
-    Cr = 0.5R' - 0.419G' - 0.081B'
+    Cb = -0.1687R' - 0.3313G' + 0.5B'
+
+    Cr = 0.5R' - 0.4187G' - 0.0813B'
 
 Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
 [-0.5…0.5]. The Y'CbCr quantization is limited range. This transform is
@@ -888,7 +734,9 @@
 with full range quantization where Y' is scaled to [0…255] and Cb/Cr are
 scaled to [-128…128] and then clipped to [-128…127].
 
-.. note:: The JPEG standard does not actually store colorspace
+.. note::
+
+   The JPEG standard does not actually store colorspace
    information. So if something other than sRGB is used, then the driver
    will have to set that information explicitly. Effectively
    ``V4L2_COLORSPACE_JPEG`` can be considered to be an abbreviation for
diff --git a/Documentation/media/uapi/v4l/pixfmt-013.rst b/Documentation/media/uapi/v4l/pixfmt-013.rst
index 475f6e6..542c087 100644
--- a/Documentation/media/uapi/v4l/pixfmt-013.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-013.rst
@@ -7,123 +7,81 @@
 
 .. _compressed-formats:
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. flat-table:: Compressed Image Formats
     :header-rows:  1
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - Identifier
+      - Code
+      - Details
+    * .. _V4L2-PIX-FMT-JPEG:
 
-    -  .. row 1
+      - ``V4L2_PIX_FMT_JPEG``
+      - 'JPEG'
+      - TBD. See also :ref:`VIDIOC_G_JPEGCOMP <VIDIOC_G_JPEGCOMP>`,
+	:ref:`VIDIOC_S_JPEGCOMP <VIDIOC_G_JPEGCOMP>`.
+    * .. _V4L2-PIX-FMT-MPEG:
 
-       -  Identifier
+      - ``V4L2_PIX_FMT_MPEG``
+      - 'MPEG'
+      - MPEG multiplexed stream. The actual format is determined by
+	extended control ``V4L2_CID_MPEG_STREAM_TYPE``, see
+	:ref:`mpeg-control-id`.
+    * .. _V4L2-PIX-FMT-H264:
 
-       -  Code
+      - ``V4L2_PIX_FMT_H264``
+      - 'H264'
+      - H264 video elementary stream with start codes.
+    * .. _V4L2-PIX-FMT-H264-NO-SC:
 
-       -  Details
+      - ``V4L2_PIX_FMT_H264_NO_SC``
+      - 'AVC1'
+      - H264 video elementary stream without start codes.
+    * .. _V4L2-PIX-FMT-H264-MVC:
 
-    -  .. _V4L2-PIX-FMT-JPEG:
+      - ``V4L2_PIX_FMT_H264_MVC``
+      - 'M264'
+      - H264 MVC video elementary stream.
+    * .. _V4L2-PIX-FMT-H263:
 
-       -  ``V4L2_PIX_FMT_JPEG``
+      - ``V4L2_PIX_FMT_H263``
+      - 'H263'
+      - H263 video elementary stream.
+    * .. _V4L2-PIX-FMT-MPEG1:
 
-       -  'JPEG'
+      - ``V4L2_PIX_FMT_MPEG1``
+      - 'MPG1'
+      - MPEG1 video elementary stream.
+    * .. _V4L2-PIX-FMT-MPEG2:
 
-       -  TBD. See also :ref:`VIDIOC_G_JPEGCOMP <VIDIOC_G_JPEGCOMP>`,
-	  :ref:`VIDIOC_S_JPEGCOMP <VIDIOC_G_JPEGCOMP>`.
+      - ``V4L2_PIX_FMT_MPEG2``
+      - 'MPG2'
+      - MPEG2 video elementary stream.
+    * .. _V4L2-PIX-FMT-MPEG4:
 
-    -  .. _V4L2-PIX-FMT-MPEG:
+      - ``V4L2_PIX_FMT_MPEG4``
+      - 'MPG4'
+      - MPEG4 video elementary stream.
+    * .. _V4L2-PIX-FMT-XVID:
 
-       -  ``V4L2_PIX_FMT_MPEG``
+      - ``V4L2_PIX_FMT_XVID``
+      - 'XVID'
+      - Xvid video elementary stream.
+    * .. _V4L2-PIX-FMT-VC1-ANNEX-G:
 
-       -  'MPEG'
+      - ``V4L2_PIX_FMT_VC1_ANNEX_G``
+      - 'VC1G'
+      - VC1, SMPTE 421M Annex G compliant stream.
+    * .. _V4L2-PIX-FMT-VC1-ANNEX-L:
 
-       -  MPEG multiplexed stream. The actual format is determined by
-	  extended control ``V4L2_CID_MPEG_STREAM_TYPE``, see
-	  :ref:`mpeg-control-id`.
+      - ``V4L2_PIX_FMT_VC1_ANNEX_L``
+      - 'VC1L'
+      - VC1, SMPTE 421M Annex L compliant stream.
+    * .. _V4L2-PIX-FMT-VP8:
 
-    -  .. _V4L2-PIX-FMT-H264:
-
-       -  ``V4L2_PIX_FMT_H264``
-
-       -  'H264'
-
-       -  H264 video elementary stream with start codes.
-
-    -  .. _V4L2-PIX-FMT-H264-NO-SC:
-
-       -  ``V4L2_PIX_FMT_H264_NO_SC``
-
-       -  'AVC1'
-
-       -  H264 video elementary stream without start codes.
-
-    -  .. _V4L2-PIX-FMT-H264-MVC:
-
-       -  ``V4L2_PIX_FMT_H264_MVC``
-
-       -  'M264'
-
-       -  H264 MVC video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-H263:
-
-       -  ``V4L2_PIX_FMT_H263``
-
-       -  'H263'
-
-       -  H263 video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-MPEG1:
-
-       -  ``V4L2_PIX_FMT_MPEG1``
-
-       -  'MPG1'
-
-       -  MPEG1 video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-MPEG2:
-
-       -  ``V4L2_PIX_FMT_MPEG2``
-
-       -  'MPG2'
-
-       -  MPEG2 video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-MPEG4:
-
-       -  ``V4L2_PIX_FMT_MPEG4``
-
-       -  'MPG4'
-
-       -  MPEG4 video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-XVID:
-
-       -  ``V4L2_PIX_FMT_XVID``
-
-       -  'XVID'
-
-       -  Xvid video elementary stream.
-
-    -  .. _V4L2-PIX-FMT-VC1-ANNEX-G:
-
-       -  ``V4L2_PIX_FMT_VC1_ANNEX_G``
-
-       -  'VC1G'
-
-       -  VC1, SMPTE 421M Annex G compliant stream.
-
-    -  .. _V4L2-PIX-FMT-VC1-ANNEX-L:
-
-       -  ``V4L2_PIX_FMT_VC1_ANNEX_L``
-
-       -  'VC1L'
-
-       -  VC1, SMPTE 421M Annex L compliant stream.
-
-    -  .. _V4L2-PIX-FMT-VP8:
-
-       -  ``V4L2_PIX_FMT_VP8``
-
-       -  'VP80'
-
-       -  VP8 video elementary stream.
+      - ``V4L2_PIX_FMT_VP8``
+      - 'VP80'
+      - VP8 video elementary stream.
diff --git a/Documentation/media/uapi/v4l/pixfmt-grey.rst b/Documentation/media/uapi/v4l/pixfmt-grey.rst
index 761d783..dad8138 100644
--- a/Documentation/media/uapi/v4l/pixfmt-grey.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-grey.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_GREY ('GREY')
 **************************
 
-*man V4L2_PIX_FMT_GREY(2)*
-
 Grey-scale image
 
 
@@ -20,58 +18,27 @@
 **Byte Order.**
 Each cell is one byte.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-indexed.rst b/Documentation/media/uapi/v4l/pixfmt-indexed.rst
index 99a780f..6edac54 100644
--- a/Documentation/media/uapi/v4l/pixfmt-indexed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-indexed.rst
@@ -17,57 +17,31 @@
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`7` Byte 0
+    * -
+      -
+      - Bit
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _V4L2-PIX-FMT-PAL8:
 
-    -  .. row 1
-
-       -  Identifier
-
-       -  Code
-
-       -
-       -  :cspan:`7` Byte 0
-
-    -  .. row 2
-
-       -
-       -
-       -  Bit
-
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _V4L2-PIX-FMT-PAL8:
-
-       -  ``V4L2_PIX_FMT_PAL8``
-
-       -  'PAL8'
-
-       -
-       -  i\ :sub:`7`
-
-       -  i\ :sub:`6`
-
-       -  i\ :sub:`5`
-
-       -  i\ :sub:`4`
-
-       -  i\ :sub:`3`
-
-       -  i\ :sub:`2`
-
-       -  i\ :sub:`1`
-
-       -  i\ :sub:`0`
+      - ``V4L2_PIX_FMT_PAL8``
+      - 'PAL8'
+      -
+      - i\ :sub:`7`
+      - i\ :sub:`6`
+      - i\ :sub:`5`
+      - i\ :sub:`4`
+      - i\ :sub:`3`
+      - i\ :sub:`2`
+      - i\ :sub:`1`
+      - i\ :sub:`0`
diff --git a/Documentation/media/uapi/v4l/pixfmt-m420.rst b/Documentation/media/uapi/v4l/pixfmt-m420.rst
index 4c5b296..7dd47c0 100644
--- a/Documentation/media/uapi/v4l/pixfmt-m420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-m420.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_M420 ('M420')
 **************************
 
-*man V4L2_PIX_FMT_M420(2)*
-
 Format with ½ horizontal and vertical chroma resolution, also known as
 YUV 4:2:0. Hybrid plane line-interleaved layout.
 
@@ -32,84 +30,40 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 4
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 5
-
-       -  start + 20:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 6
-
-       -  start + 24:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+    * - start + 16:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 20:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 24:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
 
 
 **Color Sample Location..**
@@ -120,100 +74,53 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12.rst b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
index cf59b28..5b45a6d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_NV12 ('NV12'), V4L2_PIX_FMT_NV21 ('NV21')
 ******************************************************
 
-*man V4L2_PIX_FMT_NV12(2)*
 
 V4L2_PIX_FMT_NV21
 Formats with ½ horizontal and vertical chroma resolution, also known as
@@ -36,84 +35,40 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 6
-
-       -  start + 20:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+    * - start + 20:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
 
 
 **Color Sample Location..**
@@ -122,100 +77,53 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-       -
-
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-       -
-
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
index a4e7eae..de3051f 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
@@ -8,7 +8,6 @@
 V4L2_PIX_FMT_NV12M ('NM12'), V4L2_PIX_FMT_NV21M ('NM21'), V4L2_PIX_FMT_NV12MT_16X16
 ***********************************************************************************
 
-*man V4L2_PIX_FMT_NV12M(2)*
 
 V4L2_PIX_FMT_NV21M
 V4L2_PIX_FMT_NV12MT_16X16
@@ -50,84 +49,38 @@
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start0 + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start0 + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start0 + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start0 + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  start1 + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 7
-
-       -  start1 + 4:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
+    * - start0 + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start0 + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start0 + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start0 + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * -
+    * - start1 + 0:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+    * - start1 + 4:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
 
 
 **Color Sample Location..**
@@ -138,101 +91,54 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
index 6198941..9f250a1 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_NV12MT ('TM12')
 ****************************
 
-*man V4L2_PIX_FMT_NV12MT(2)*
-
 Formats with ½ horizontal and vertical chroma resolution. This format
 has two planes - one for luminance and one for chrominance. Chroma
 samples are interleaved. The difference to ``V4L2_PIX_FMT_NV12`` is the
@@ -36,7 +34,7 @@
 .. _nv12mt:
 
 .. figure::  pixfmt-nv12mt_files/nv12mt.*
-    :alt:    nv12mt.gif
+    :alt:    nv12mt.png
     :align:  center
 
     V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
@@ -53,7 +51,7 @@
 .. _nv12mt_ex:
 
 .. figure::  pixfmt-nv12mt_files/nv12mt_example.*
-    :alt:    nv12mt_example.gif
+    :alt:    nv12mt_example.png
     :align:  center
 
     Example V4L2_PIX_FMT_NV12MT memory layout of macroblocks
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.gif b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.gif
deleted file mode 100644
index ef2d4cf..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png
new file mode 100644
index 0000000..4140186
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt.png
Binary files differ
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.gif b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.gif
deleted file mode 100644
index df81d68..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.gif
+++ /dev/null
Binary files differ
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png
new file mode 100644
index 0000000..7775f5d
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12mt_files/nv12mt_example.png
Binary files differ
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16.rst b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
index 88aa761..8ceba79 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
@@ -7,8 +7,6 @@
 V4L2_PIX_FMT_NV16 ('NV16'), V4L2_PIX_FMT_NV61 ('NV61')
 ******************************************************
 
-*man V4L2_PIX_FMT_NV16(2)*
-
 V4L2_PIX_FMT_NV61
 Formats with ½ horizontal chroma resolution, also known as YUV 4:2:2.
 One luminance and one chrominance plane with alternating chroma samples
@@ -35,108 +33,50 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 6
-
-       -  start + 20:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 7
-
-       -  start + 24:
-
-       -  Cb\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-       -  Cr\ :sub:`21`
-
-    -  .. row 8
-
-       -  start + 28:
-
-       -  Cb\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Cb\ :sub:`31`
-
-       -  Cr\ :sub:`31`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+    * - start + 20:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
+    * - start + 24:
+      - Cb\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Cb\ :sub:`21`
+      - Cr\ :sub:`21`
+    * - start + 28:
+      - Cb\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Cb\ :sub:`31`
+      - Cr\ :sub:`31`
 
 
 **Color Sample Location..**
@@ -147,124 +87,67 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 6
-
-       -
-
-    -  .. row 7
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 8
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 9
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 10
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
index b7ee068..4d46ab3 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
@@ -7,8 +7,6 @@
 V4L2_PIX_FMT_NV16M ('NM16'), V4L2_PIX_FMT_NV61M ('NM61')
 ********************************************************
 
-*man V4L2_PIX_FMT_NV16M(2)*
-
 V4L2_PIX_FMT_NV61M
 Variation of ``V4L2_PIX_FMT_NV16`` and ``V4L2_PIX_FMT_NV61`` with planes
 non contiguous in memory.
@@ -38,112 +36,51 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start0 + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start0 + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start0 + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start0 + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  start1 + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`02`
-
-       -  Cr\ :sub:`02`
-
-    -  .. row 7
-
-       -  start1 + 4:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`12`
-
-       -  Cr\ :sub:`12`
-
-    -  .. row 8
-
-       -  start1 + 8:
-
-       -  Cb\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Cb\ :sub:`22`
-
-       -  Cr\ :sub:`22`
-
-    -  .. row 9
-
-       -  start1 + 12:
-
-       -  Cb\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Cb\ :sub:`32`
-
-       -  Cr\ :sub:`32`
+    * - start0 + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start0 + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start0 + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start0 + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * -
+    * - start1 + 0:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`02`
+      - Cr\ :sub:`02`
+    * - start1 + 4:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`12`
+      - Cr\ :sub:`12`
+    * - start1 + 8:
+      - Cb\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Cb\ :sub:`22`
+      - Cr\ :sub:`22`
+    * - start1 + 12:
+      - Cb\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Cb\ :sub:`32`
+      - Cr\ :sub:`32`
 
 
 **Color Sample Location..**
@@ -154,124 +91,67 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 6
-
-       -
-
-    -  .. row 7
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 8
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 9
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 10
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -  C
-
-       -
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      - C
+      -
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv24.rst b/Documentation/media/uapi/v4l/pixfmt-nv24.rst
index db98f47..bda973e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv24.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv24.rst
@@ -7,8 +7,6 @@
 V4L2_PIX_FMT_NV24 ('NV24'), V4L2_PIX_FMT_NV42 ('NV42')
 ******************************************************
 
-*man V4L2_PIX_FMT_NV24(2)*
-
 V4L2_PIX_FMT_NV42
 Formats with full horizontal and vertical chroma resolutions, also known
 as YUV 4:4:4. One luminance and one chrominance plane with alternating
@@ -35,137 +33,63 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-       -  Cb\ :sub:`02`
-
-       -  Cr\ :sub:`02`
-
-       -  Cb\ :sub:`03`
-
-       -  Cr\ :sub:`03`
-
-    -  .. row 6
-
-       -  start + 24:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
-
-       -  Cb\ :sub:`12`
-
-       -  Cr\ :sub:`12`
-
-       -  Cb\ :sub:`13`
-
-       -  Cr\ :sub:`13`
-
-    -  .. row 7
-
-       -  start + 32:
-
-       -  Cb\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-       -  Cr\ :sub:`21`
-
-       -  Cb\ :sub:`22`
-
-       -  Cr\ :sub:`22`
-
-       -  Cb\ :sub:`23`
-
-       -  Cr\ :sub:`23`
-
-    -  .. row 8
-
-       -  start + 40:
-
-       -  Cb\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Cb\ :sub:`31`
-
-       -  Cr\ :sub:`31`
-
-       -  Cb\ :sub:`32`
-
-       -  Cr\ :sub:`32`
-
-       -  Cb\ :sub:`33`
-
-       -  Cr\ :sub:`33`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+      - Cb\ :sub:`02`
+      - Cr\ :sub:`02`
+      - Cb\ :sub:`03`
+      - Cr\ :sub:`03`
+    * - start + 24:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
+      - Cb\ :sub:`12`
+      - Cr\ :sub:`12`
+      - Cb\ :sub:`13`
+      - Cr\ :sub:`13`
+    * - start + 32:
+      - Cb\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Cb\ :sub:`21`
+      - Cr\ :sub:`21`
+      - Cb\ :sub:`22`
+      - Cr\ :sub:`22`
+      - Cb\ :sub:`23`
+      - Cr\ :sub:`23`
+    * - start + 40:
+      - Cb\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Cb\ :sub:`31`
+      - Cr\ :sub:`31`
+      - Cb\ :sub:`32`
+      - Cr\ :sub:`32`
+      - Cb\ :sub:`33`
+      - Cr\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
index c7aa2e9..84fcbcb 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
@@ -6,11 +6,6 @@
 Packed RGB formats
 ******************
 
-*man Packed RGB formats(2)*
-
-Packed RGB formats
-
-
 Description
 ===========
 
@@ -19,6 +14,11 @@
 These are all packed-pixel formats, meaning all the data for a pixel lie
 next to each other in memory.
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
 
 .. _rgb-formats:
 
@@ -26,924 +26,511 @@
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`7` Byte 0 in memory
+      -
+      - :cspan:`7` Byte 1
+      -
+      - :cspan:`7` Byte 2
+      -
+      - :cspan:`7` Byte 3
+    * -
+      -
+      - Bit
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _V4L2-PIX-FMT-RGB332:
 
-    -  .. row 1
+      - ``V4L2_PIX_FMT_RGB332``
+      - 'RGB1'
+      -
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-ARGB444:
 
-       -  Identifier
+      - ``V4L2_PIX_FMT_ARGB444``
+      - 'AR12'
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _V4L2-PIX-FMT-XRGB444:
 
-       -  Code
+      - ``V4L2_PIX_FMT_XRGB444``
+      - 'XR12'
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _V4L2-PIX-FMT-ARGB555:
 
-       -
-       -  :cspan:`7` Byte 0 in memory
+      - ``V4L2_PIX_FMT_ARGB555``
+      - 'AR15'
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - a
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _V4L2-PIX-FMT-XRGB555:
 
-       -  :cspan:`7` Byte 1
+      - ``V4L2_PIX_FMT_XRGB555``
+      - 'XR15'
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _V4L2-PIX-FMT-RGB565:
 
-       -  :cspan:`7` Byte 2
+      - ``V4L2_PIX_FMT_RGB565``
+      - 'RGBP'
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _V4L2-PIX-FMT-ARGB555X:
 
-       -  :cspan:`7` Byte 3
+      - ``V4L2_PIX_FMT_ARGB555X``
+      - 'AR15' | (1 << 31)
+      -
+      - a
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-XRGB555X:
 
-    -  .. row 2
+      - ``V4L2_PIX_FMT_XRGB555X``
+      - 'XR15' | (1 << 31)
+      -
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-RGB565X:
 
-       -
-       -
-       -  Bit
+      - ``V4L2_PIX_FMT_RGB565X``
+      - 'RGBR'
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-BGR24:
 
-       -  7
+      - ``V4L2_PIX_FMT_BGR24``
+      - 'BGR3'
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _V4L2-PIX-FMT-RGB24:
 
-       -  6
+      - ``V4L2_PIX_FMT_RGB24``
+      - 'RGB3'
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-BGR666:
 
-       -  5
+      - ``V4L2_PIX_FMT_BGR666``
+      - 'BGRH'
+      -
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      -
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+    * .. _V4L2-PIX-FMT-ABGR32:
 
-       -  4
+      - ``V4L2_PIX_FMT_ABGR32``
+      - 'AR24'
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+    * .. _V4L2-PIX-FMT-XBGR32:
 
-       -  3
+      - ``V4L2_PIX_FMT_XBGR32``
+      - 'XR24'
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+    * .. _V4L2-PIX-FMT-ARGB32:
 
-       -  2
+      - ``V4L2_PIX_FMT_ARGB32``
+      - 'BA24'
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-XRGB32:
 
-       -  1
+      - ``V4L2_PIX_FMT_XRGB32``
+      - 'BX24'
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
 
-       -  0
+.. raw:: latex
 
-       -
-       -  7
+    \end{adjustbox}\newline\newline
 
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _V4L2-PIX-FMT-RGB332:
-
-       -  ``V4L2_PIX_FMT_RGB332``
-
-       -  'RGB1'
-
-       -
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-ARGB444:
-
-       -  ``V4L2_PIX_FMT_ARGB444``
-
-       -  'AR12'
-
-       -
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-XRGB444:
-
-       -  ``V4L2_PIX_FMT_XRGB444``
-
-       -  'XR12'
-
-       -
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-ARGB555:
-
-       -  ``V4L2_PIX_FMT_ARGB555``
-
-       -  'AR15'
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  a
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-XRGB555:
-
-       -  ``V4L2_PIX_FMT_XRGB555``
-
-       -  'XR15'
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  -
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-RGB565:
-
-       -  ``V4L2_PIX_FMT_RGB565``
-
-       -  'RGBP'
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-ARGB555X:
-
-       -  ``V4L2_PIX_FMT_ARGB555X``
-
-       -  'AR15' | (1 << 31)
-
-       -
-       -  a
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-XRGB555X:
-
-       -  ``V4L2_PIX_FMT_XRGB555X``
-
-       -  'XR15' | (1 << 31)
-
-       -
-       -  -
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-RGB565X:
-
-       -  ``V4L2_PIX_FMT_RGB565X``
-
-       -  'RGBR'
-
-       -
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-BGR24:
-
-       -  ``V4L2_PIX_FMT_BGR24``
-
-       -  'BGR3'
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-RGB24:
-
-       -  ``V4L2_PIX_FMT_RGB24``
-
-       -  'RGB3'
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-BGR666:
-
-       -  ``V4L2_PIX_FMT_BGR666``
-
-       -  'BGRH'
-
-       -
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-    -  .. _V4L2-PIX-FMT-ABGR32:
-
-       -  ``V4L2_PIX_FMT_ABGR32``
-
-       -  'AR24'
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-XBGR32:
-
-       -  ``V4L2_PIX_FMT_XBGR32``
-
-       -  'XR24'
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-    -  .. _V4L2-PIX-FMT-ARGB32:
-
-       -  ``V4L2_PIX_FMT_ARGB32``
-
-       -  'BA24'
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-XRGB32:
-
-       -  ``V4L2_PIX_FMT_XRGB32``
-
-       -  'BX24'
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-
-Bit 7 is the most significant bit.
+.. note:: Bit 7 is the most significant bit.
 
 The usage and value of the alpha bits (a) in the ARGB and ABGR formats
 (collectively referred to as alpha formats) depend on the device type
@@ -973,125 +560,73 @@
 Each cell is one byte.
 
 
+.. raw:: latex
 
-.. flat-table::
+    \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.3cm}|
+
+.. flat-table:: RGB byte order
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1 1 1 1 1
+    :widths:       11 3 3 3 3 3 3 3 3 3 3 3 3
 
+    * - start + 0:
+      - B\ :sub:`00`
+      - G\ :sub:`00`
+      - R\ :sub:`00`
+      - B\ :sub:`01`
+      - G\ :sub:`01`
+      - R\ :sub:`01`
+      - B\ :sub:`02`
+      - G\ :sub:`02`
+      - R\ :sub:`02`
+      - B\ :sub:`03`
+      - G\ :sub:`03`
+      - R\ :sub:`03`
+    * - start + 12:
+      - B\ :sub:`10`
+      - G\ :sub:`10`
+      - R\ :sub:`10`
+      - B\ :sub:`11`
+      - G\ :sub:`11`
+      - R\ :sub:`11`
+      - B\ :sub:`12`
+      - G\ :sub:`12`
+      - R\ :sub:`12`
+      - B\ :sub:`13`
+      - G\ :sub:`13`
+      - R\ :sub:`13`
+    * - start + 24:
+      - B\ :sub:`20`
+      - G\ :sub:`20`
+      - R\ :sub:`20`
+      - B\ :sub:`21`
+      - G\ :sub:`21`
+      - R\ :sub:`21`
+      - B\ :sub:`22`
+      - G\ :sub:`22`
+      - R\ :sub:`22`
+      - B\ :sub:`23`
+      - G\ :sub:`23`
+      - R\ :sub:`23`
+    * - start + 36:
+      - B\ :sub:`30`
+      - G\ :sub:`30`
+      - R\ :sub:`30`
+      - B\ :sub:`31`
+      - G\ :sub:`31`
+      - R\ :sub:`31`
+      - B\ :sub:`32`
+      - G\ :sub:`32`
+      - R\ :sub:`32`
+      - B\ :sub:`33`
+      - G\ :sub:`33`
+      - R\ :sub:`33`
 
-    -  .. row 1
+.. raw:: latex
 
-       -  start + 0:
-
-       -  B\ :sub:`00`
-
-       -  G\ :sub:`00`
-
-       -  R\ :sub:`00`
-
-       -  B\ :sub:`01`
-
-       -  G\ :sub:`01`
-
-       -  R\ :sub:`01`
-
-       -  B\ :sub:`02`
-
-       -  G\ :sub:`02`
-
-       -  R\ :sub:`02`
-
-       -  B\ :sub:`03`
-
-       -  G\ :sub:`03`
-
-       -  R\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 12:
-
-       -  B\ :sub:`10`
-
-       -  G\ :sub:`10`
-
-       -  R\ :sub:`10`
-
-       -  B\ :sub:`11`
-
-       -  G\ :sub:`11`
-
-       -  R\ :sub:`11`
-
-       -  B\ :sub:`12`
-
-       -  G\ :sub:`12`
-
-       -  R\ :sub:`12`
-
-       -  B\ :sub:`13`
-
-       -  G\ :sub:`13`
-
-       -  R\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 24:
-
-       -  B\ :sub:`20`
-
-       -  G\ :sub:`20`
-
-       -  R\ :sub:`20`
-
-       -  B\ :sub:`21`
-
-       -  G\ :sub:`21`
-
-       -  R\ :sub:`21`
-
-       -  B\ :sub:`22`
-
-       -  G\ :sub:`22`
-
-       -  R\ :sub:`22`
-
-       -  B\ :sub:`23`
-
-       -  G\ :sub:`23`
-
-       -  R\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 36:
-
-       -  B\ :sub:`30`
-
-       -  G\ :sub:`30`
-
-       -  R\ :sub:`30`
-
-       -  B\ :sub:`31`
-
-       -  G\ :sub:`31`
-
-       -  R\ :sub:`31`
-
-       -  B\ :sub:`32`
-
-       -  G\ :sub:`32`
-
-       -  R\ :sub:`32`
-
-       -  B\ :sub:`33`
-
-       -  G\ :sub:`33`
-
-       -  R\ :sub:`33`
-
+    \end{adjustbox}\newline\newline
 
 Formats defined in :ref:`rgb-formats-deprecated` are deprecated and
 must not be used by new drivers. They are documented here for reference.
@@ -1099,369 +634,216 @@
 either the corresponding ARGB or XRGB format, depending on the driver.
 
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
+
 .. _rgb-formats-deprecated:
 
 .. flat-table:: Deprecated Packed RGB Image Formats
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`7` Byte 0 in memory
+      -
+      - :cspan:`7` Byte 1
+      -
+      - :cspan:`7` Byte 2
+      -
+      - :cspan:`7` Byte 3
+    * -
+      -
+      - Bit
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _V4L2-PIX-FMT-RGB444:
 
-    -  .. row 1
+      - ``V4L2_PIX_FMT_RGB444``
+      - 'R444'
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _V4L2-PIX-FMT-RGB555:
 
-       -  Identifier
+      - ``V4L2_PIX_FMT_RGB555``
+      - 'RGBO'
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - a
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _V4L2-PIX-FMT-RGB555X:
 
-       -  Code
+      - ``V4L2_PIX_FMT_RGB555X``
+      - 'RGBQ'
+      -
+      - a
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _V4L2-PIX-FMT-BGR32:
 
-       -
-       -  :cspan:`7` Byte 0 in memory
+      - ``V4L2_PIX_FMT_BGR32``
+      - 'BGR4'
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+    * .. _V4L2-PIX-FMT-RGB32:
 
-       -  :cspan:`7` Byte 1
+      - ``V4L2_PIX_FMT_RGB32``
+      - 'RGB4'
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
 
-       -  :cspan:`7` Byte 2
+.. raw:: latex
 
-       -  :cspan:`7` Byte 3
-
-    -  .. row 2
-
-       -
-       -
-       -  Bit
-
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _V4L2-PIX-FMT-RGB444:
-
-       -  ``V4L2_PIX_FMT_RGB444``
-
-       -  'R444'
-
-       -
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-RGB555:
-
-       -  ``V4L2_PIX_FMT_RGB555``
-
-       -  'RGBO'
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  a
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-RGB555X:
-
-       -  ``V4L2_PIX_FMT_RGB555X``
-
-       -  'RGBQ'
-
-       -
-       -  a
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-BGR32:
-
-       -  ``V4L2_PIX_FMT_BGR32``
-
-       -  'BGR4'
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-RGB32:
-
-       -  ``V4L2_PIX_FMT_RGB32``
-
-       -  'RGB4'
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
+    \end{adjustbox}\newline\newline
 
 A test utility to determine which RGB formats a driver actually supports
 is available from the LinuxTV v4l-dvb repository. See
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
index 5471645..ebc8fcc 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
@@ -6,311 +6,188 @@
 Packed YUV formats
 ******************
 
-*man Packed YUV formats(2)*
-
-Packed YUV formats
-
-
 Description
 ===========
 
 Similar to the packed RGB formats these formats store the Y, Cb and Cr
 component of each pixel in one 16 or 32 bit word.
 
+.. raw:: latex
 
+    \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. _packed-yuv-formats:
+
+.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
 
 .. flat-table:: Packed YUV Image Formats
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`7` Byte 0 in memory
+      -
+      - :cspan:`7` Byte 1
+      -
+      - :cspan:`7` Byte 2
+      -
+      - :cspan:`7` Byte 3
+    * -
+      -
+      - Bit
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _V4L2-PIX-FMT-YUV444:
 
-    -  .. row 1
+      - ``V4L2_PIX_FMT_YUV444``
+      - 'Y444'
+      -
+      - Cb\ :sub:`3`
+      - Cb\ :sub:`2`
+      - Cb\ :sub:`1`
+      - Cb\ :sub:`0`
+      - Cr\ :sub:`3`
+      - Cr\ :sub:`2`
+      - Cr\ :sub:`1`
+      - Cr\ :sub:`0`
+      -
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - Y'\ :sub:`3`
+      - Y'\ :sub:`2`
+      - Y'\ :sub:`1`
+      - Y'\ :sub:`0`
+    * .. _V4L2-PIX-FMT-YUV555:
 
-       -  Identifier
+      - ``V4L2_PIX_FMT_YUV555``
+      - 'YUVO'
+      -
+      - Cb\ :sub:`2`
+      - Cb\ :sub:`1`
+      - Cb\ :sub:`0`
+      - Cr\ :sub:`4`
+      - Cr\ :sub:`3`
+      - Cr\ :sub:`2`
+      - Cr\ :sub:`1`
+      - Cr\ :sub:`0`
+      -
+      - a
+      - Y'\ :sub:`4`
+      - Y'\ :sub:`3`
+      - Y'\ :sub:`2`
+      - Y'\ :sub:`1`
+      - Y'\ :sub:`0`
+      - Cb\ :sub:`4`
+      - Cb\ :sub:`3`
+    * .. _V4L2-PIX-FMT-YUV565:
 
-       -  Code
+      - ``V4L2_PIX_FMT_YUV565``
+      - 'YUVP'
+      -
+      - Cb\ :sub:`2`
+      - Cb\ :sub:`1`
+      - Cb\ :sub:`0`
+      - Cr\ :sub:`4`
+      - Cr\ :sub:`3`
+      - Cr\ :sub:`2`
+      - Cr\ :sub:`1`
+      - Cr\ :sub:`0`
+      -
+      - Y'\ :sub:`4`
+      - Y'\ :sub:`3`
+      - Y'\ :sub:`2`
+      - Y'\ :sub:`1`
+      - Y'\ :sub:`0`
+      - Cb\ :sub:`5`
+      - Cb\ :sub:`4`
+      - Cb\ :sub:`3`
+    * .. _V4L2-PIX-FMT-YUV32:
 
-       -
-       -  :cspan:`7` Byte 0 in memory
+      - ``V4L2_PIX_FMT_YUV32``
+      - 'YUV4'
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      -
+      - Y'\ :sub:`7`
+      - Y'\ :sub:`6`
+      - Y'\ :sub:`5`
+      - Y'\ :sub:`4`
+      - Y'\ :sub:`3`
+      - Y'\ :sub:`2`
+      - Y'\ :sub:`1`
+      - Y'\ :sub:`0`
+      -
+      - Cb\ :sub:`7`
+      - Cb\ :sub:`6`
+      - Cb\ :sub:`5`
+      - Cb\ :sub:`4`
+      - Cb\ :sub:`3`
+      - Cb\ :sub:`2`
+      - Cb\ :sub:`1`
+      - Cb\ :sub:`0`
+      -
+      - Cr\ :sub:`7`
+      - Cr\ :sub:`6`
+      - Cr\ :sub:`5`
+      - Cr\ :sub:`4`
+      - Cr\ :sub:`3`
+      - Cr\ :sub:`2`
+      - Cr\ :sub:`1`
+      - Cr\ :sub:`0`
 
-       -
-       -  :cspan:`7` Byte 1
+.. raw:: latex
 
-       -
-       -  :cspan:`7` Byte 2
+    \end{adjustbox}\newline\newline
 
-       -
-       -  :cspan:`7` Byte 3
+.. note::
 
-    -  .. row 2
+    #) Bit 7 is the most significant bit;
 
-       -
-       -
-       -  Bit
-
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-       -
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _V4L2-PIX-FMT-YUV444:
-
-       -  ``V4L2_PIX_FMT_YUV444``
-
-       -  'Y444'
-
-       -
-       -  Cb\ :sub:`3`
-
-       -  Cb\ :sub:`2`
-
-       -  Cb\ :sub:`1`
-
-       -  Cb\ :sub:`0`
-
-       -  Cr\ :sub:`3`
-
-       -  Cr\ :sub:`2`
-
-       -  Cr\ :sub:`1`
-
-       -  Cr\ :sub:`0`
-
-       -
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  Y'\ :sub:`3`
-
-       -  Y'\ :sub:`2`
-
-       -  Y'\ :sub:`1`
-
-       -  Y'\ :sub:`0`
-
-    -  .. _V4L2-PIX-FMT-YUV555:
-
-       -  ``V4L2_PIX_FMT_YUV555``
-
-       -  'YUVO'
-
-       -
-       -  Cb\ :sub:`2`
-
-       -  Cb\ :sub:`1`
-
-       -  Cb\ :sub:`0`
-
-       -  Cr\ :sub:`4`
-
-       -  Cr\ :sub:`3`
-
-       -  Cr\ :sub:`2`
-
-       -  Cr\ :sub:`1`
-
-       -  Cr\ :sub:`0`
-
-       -
-       -  a
-
-       -  Y'\ :sub:`4`
-
-       -  Y'\ :sub:`3`
-
-       -  Y'\ :sub:`2`
-
-       -  Y'\ :sub:`1`
-
-       -  Y'\ :sub:`0`
-
-       -  Cb\ :sub:`4`
-
-       -  Cb\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-YUV565:
-
-       -  ``V4L2_PIX_FMT_YUV565``
-
-       -  'YUVP'
-
-       -
-       -  Cb\ :sub:`2`
-
-       -  Cb\ :sub:`1`
-
-       -  Cb\ :sub:`0`
-
-       -  Cr\ :sub:`4`
-
-       -  Cr\ :sub:`3`
-
-       -  Cr\ :sub:`2`
-
-       -  Cr\ :sub:`1`
-
-       -  Cr\ :sub:`0`
-
-       -
-       -  Y'\ :sub:`4`
-
-       -  Y'\ :sub:`3`
-
-       -  Y'\ :sub:`2`
-
-       -  Y'\ :sub:`1`
-
-       -  Y'\ :sub:`0`
-
-       -  Cb\ :sub:`5`
-
-       -  Cb\ :sub:`4`
-
-       -  Cb\ :sub:`3`
-
-    -  .. _V4L2-PIX-FMT-YUV32:
-
-       -  ``V4L2_PIX_FMT_YUV32``
-
-       -  'YUV4'
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -
-       -  Y'\ :sub:`7`
-
-       -  Y'\ :sub:`6`
-
-       -  Y'\ :sub:`5`
-
-       -  Y'\ :sub:`4`
-
-       -  Y'\ :sub:`3`
-
-       -  Y'\ :sub:`2`
-
-       -  Y'\ :sub:`1`
-
-       -  Y'\ :sub:`0`
-
-       -
-       -  Cb\ :sub:`7`
-
-       -  Cb\ :sub:`6`
-
-       -  Cb\ :sub:`5`
-
-       -  Cb\ :sub:`4`
-
-       -  Cb\ :sub:`3`
-
-       -  Cb\ :sub:`2`
-
-       -  Cb\ :sub:`1`
-
-       -  Cb\ :sub:`0`
-
-       -
-       -  Cr\ :sub:`7`
-
-       -  Cr\ :sub:`6`
-
-       -  Cr\ :sub:`5`
-
-       -  Cr\ :sub:`4`
-
-       -  Cr\ :sub:`3`
-
-       -  Cr\ :sub:`2`
-
-       -  Cr\ :sub:`1`
-
-       -  Cr\ :sub:`0`
-
-
-Bit 7 is the most significant bit. The value of a = alpha bits is
-undefined when reading from the driver, ignored when writing to the
-driver, except when alpha blending has been negotiated for a
-:ref:`Video Overlay <overlay>` or :ref:`Video Output Overlay <osd>`.
+    #) The value of a = alpha bits is undefined when reading from the driver,
+       ignored when writing to the driver, except when alpha blending has
+       been negotiated for a :ref:`Video Overlay <overlay>` or
+       :ref:`Video Output Overlay <osd>`.
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index 9a5704b..bd7bf3d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -17,6 +17,8 @@
 please make a proposal on the linux-media mailing list.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _reserved-formats:
 
 .. flat-table:: Reserved Image Formats
@@ -24,320 +26,218 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - Identifier
+      - Code
+      - Details
+    * .. _V4L2-PIX-FMT-DV:
 
-    -  .. row 1
+      - ``V4L2_PIX_FMT_DV``
+      - 'dvsd'
+      - unknown
+    * .. _V4L2-PIX-FMT-ET61X251:
 
-       -  Identifier
+      - ``V4L2_PIX_FMT_ET61X251``
+      - 'E625'
+      - Compressed format of the ET61X251 driver.
+    * .. _V4L2-PIX-FMT-HI240:
 
-       -  Code
+      - ``V4L2_PIX_FMT_HI240``
+      - 'HI24'
+      - 8 bit RGB format used by the BTTV driver.
+    * .. _V4L2-PIX-FMT-HM12:
 
-       -  Details
+      - ``V4L2_PIX_FMT_HM12``
+      - 'HM12'
+      - YUV 4:2:0 format used by the IVTV driver,
+	`http://www.ivtvdriver.org/ <http://www.ivtvdriver.org/>`__
 
-    -  .. _V4L2-PIX-FMT-DV:
+	The format is documented in the kernel sources in the file
+	``Documentation/video4linux/cx2341x/README.hm12``
+    * .. _V4L2-PIX-FMT-CPIA1:
 
-       -  ``V4L2_PIX_FMT_DV``
+      - ``V4L2_PIX_FMT_CPIA1``
+      - 'CPIA'
+      - YUV format used by the gspca cpia1 driver.
+    * .. _V4L2-PIX-FMT-JPGL:
 
-       -  'dvsd'
+      - ``V4L2_PIX_FMT_JPGL``
+      - 'JPGL'
+      - JPEG-Light format (Pegasus Lossless JPEG) used in Divio webcams NW
+	80x.
+    * .. _V4L2-PIX-FMT-SPCA501:
 
-       -  unknown
+      - ``V4L2_PIX_FMT_SPCA501``
+      - 'S501'
+      - YUYV per line used by the gspca driver.
+    * .. _V4L2-PIX-FMT-SPCA505:
 
-    -  .. _V4L2-PIX-FMT-ET61X251:
+      - ``V4L2_PIX_FMT_SPCA505``
+      - 'S505'
+      - YYUV per line used by the gspca driver.
+    * .. _V4L2-PIX-FMT-SPCA508:
 
-       -  ``V4L2_PIX_FMT_ET61X251``
+      - ``V4L2_PIX_FMT_SPCA508``
+      - 'S508'
+      - YUVY per line used by the gspca driver.
+    * .. _V4L2-PIX-FMT-SPCA561:
 
-       -  'E625'
+      - ``V4L2_PIX_FMT_SPCA561``
+      - 'S561'
+      - Compressed GBRG Bayer format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-PAC207:
 
-       -  Compressed format of the ET61X251 driver.
+      - ``V4L2_PIX_FMT_PAC207``
+      - 'P207'
+      - Compressed BGGR Bayer format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-MR97310A:
 
-    -  .. _V4L2-PIX-FMT-HI240:
+      - ``V4L2_PIX_FMT_MR97310A``
+      - 'M310'
+      - Compressed BGGR Bayer format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-JL2005BCD:
 
-       -  ``V4L2_PIX_FMT_HI240``
+      - ``V4L2_PIX_FMT_JL2005BCD``
+      - 'JL20'
+      - JPEG compressed RGGB Bayer format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-OV511:
 
-       -  'HI24'
+      - ``V4L2_PIX_FMT_OV511``
+      - 'O511'
+      - OV511 JPEG format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-OV518:
 
-       -  8 bit RGB format used by the BTTV driver.
+      - ``V4L2_PIX_FMT_OV518``
+      - 'O518'
+      - OV518 JPEG format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-PJPG:
 
-    -  .. _V4L2-PIX-FMT-HM12:
+      - ``V4L2_PIX_FMT_PJPG``
+      - 'PJPG'
+      - Pixart 73xx JPEG format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-SE401:
 
-       -  ``V4L2_PIX_FMT_HM12``
+      - ``V4L2_PIX_FMT_SE401``
+      - 'S401'
+      - Compressed RGB format used by the gspca se401 driver
+    * .. _V4L2-PIX-FMT-SQ905C:
 
-       -  'HM12'
+      - ``V4L2_PIX_FMT_SQ905C``
+      - '905C'
+      - Compressed RGGB bayer format used by the gspca driver.
+    * .. _V4L2-PIX-FMT-MJPEG:
 
-       -  YUV 4:2:0 format used by the IVTV driver,
-	  `http://www.ivtvdriver.org/ <http://www.ivtvdriver.org/>`__
+      - ``V4L2_PIX_FMT_MJPEG``
+      - 'MJPG'
+      - Compressed format used by the Zoran driver
+    * .. _V4L2-PIX-FMT-PWC1:
 
-	  The format is documented in the kernel sources in the file
-	  ``Documentation/video4linux/cx2341x/README.hm12``
+      - ``V4L2_PIX_FMT_PWC1``
+      - 'PWC1'
+      - Compressed format of the PWC driver.
+    * .. _V4L2-PIX-FMT-PWC2:
 
-    -  .. _V4L2-PIX-FMT-CPIA1:
+      - ``V4L2_PIX_FMT_PWC2``
+      - 'PWC2'
+      - Compressed format of the PWC driver.
+    * .. _V4L2-PIX-FMT-SN9C10X:
 
-       -  ``V4L2_PIX_FMT_CPIA1``
+      - ``V4L2_PIX_FMT_SN9C10X``
+      - 'S910'
+      - Compressed format of the SN9C102 driver.
+    * .. _V4L2-PIX-FMT-SN9C20X-I420:
 
-       -  'CPIA'
+      - ``V4L2_PIX_FMT_SN9C20X_I420``
+      - 'S920'
+      - YUV 4:2:0 format of the gspca sn9c20x driver.
+    * .. _V4L2-PIX-FMT-SN9C2028:
 
-       -  YUV format used by the gspca cpia1 driver.
+      - ``V4L2_PIX_FMT_SN9C2028``
+      - 'SONX'
+      - Compressed GBRG bayer format of the gspca sn9c2028 driver.
+    * .. _V4L2-PIX-FMT-STV0680:
 
-    -  .. _V4L2-PIX-FMT-JPGL:
+      - ``V4L2_PIX_FMT_STV0680``
+      - 'S680'
+      - Bayer format of the gspca stv0680 driver.
+    * .. _V4L2-PIX-FMT-WNVA:
 
-       -  ``V4L2_PIX_FMT_JPGL``
+      - ``V4L2_PIX_FMT_WNVA``
+      - 'WNVA'
+      - Used by the Winnov Videum driver,
+	`http://www.thedirks.org/winnov/ <http://www.thedirks.org/winnov/>`__
+    * .. _V4L2-PIX-FMT-TM6000:
 
-       -  'JPGL'
+      - ``V4L2_PIX_FMT_TM6000``
+      - 'TM60'
+      - Used by Trident tm6000
+    * .. _V4L2-PIX-FMT-CIT-YYVYUY:
 
-       -  JPEG-Light format (Pegasus Lossless JPEG) used in Divio webcams NW
-	  80x.
+      - ``V4L2_PIX_FMT_CIT_YYVYUY``
+      - 'CITV'
+      - Used by xirlink CIT, found at IBM webcams.
 
-    -  .. _V4L2-PIX-FMT-SPCA501:
+	Uses one line of Y then 1 line of VYUY
+    * .. _V4L2-PIX-FMT-KONICA420:
 
-       -  ``V4L2_PIX_FMT_SPCA501``
+      - ``V4L2_PIX_FMT_KONICA420``
+      - 'KONI'
+      - Used by Konica webcams.
 
-       -  'S501'
+	YUV420 planar in blocks of 256 pixels.
+    * .. _V4L2-PIX-FMT-YYUV:
 
-       -  YUYV per line used by the gspca driver.
+      - ``V4L2_PIX_FMT_YYUV``
+      - 'YYUV'
+      - unknown
+    * .. _V4L2-PIX-FMT-Y4:
 
-    -  .. _V4L2-PIX-FMT-SPCA505:
+      - ``V4L2_PIX_FMT_Y4``
+      - 'Y04 '
+      - Old 4-bit greyscale format. Only the most significant 4 bits of
+	each byte are used, the other bits are set to 0.
+    * .. _V4L2-PIX-FMT-Y6:
 
-       -  ``V4L2_PIX_FMT_SPCA505``
+      - ``V4L2_PIX_FMT_Y6``
+      - 'Y06 '
+      - Old 6-bit greyscale format. Only the most significant 6 bits of
+	each byte are used, the other bits are set to 0.
+    * .. _V4L2-PIX-FMT-S5C-UYVY-JPG:
 
-       -  'S505'
+      - ``V4L2_PIX_FMT_S5C_UYVY_JPG``
+      - 'S5CI'
+      - Two-planar format used by Samsung S5C73MX cameras. The first plane
+	contains interleaved JPEG and UYVY image data, followed by meta
+	data in form of an array of offsets to the UYVY data blocks. The
+	actual pointer array follows immediately the interleaved JPEG/UYVY
+	data, the number of entries in this array equals the height of the
+	UYVY image. Each entry is a 4-byte unsigned integer in big endian
+	order and it's an offset to a single pixel line of the UYVY image.
+	The first plane can start either with JPEG or UYVY data chunk. The
+	size of a single UYVY block equals the UYVY image's width
+	multiplied by 2. The size of a JPEG chunk depends on the image and
+	can vary with each line.
 
-       -  YYUV per line used by the gspca driver.
+	The second plane, at an offset of 4084 bytes, contains a 4-byte
+	offset to the pointer array in the first plane. This offset is
+	followed by a 4-byte value indicating size of the pointer array.
+	All numbers in the second plane are also in big endian order.
+	Remaining data in the second plane is undefined. The information
+	in the second plane allows to easily find location of the pointer
+	array, which can be different for each frame. The size of the
+	pointer array is constant for given UYVY image height.
 
-    -  .. _V4L2-PIX-FMT-SPCA508:
+	In order to extract UYVY and JPEG frames an application can
+	initially set a data pointer to the start of first plane and then
+	add an offset from the first entry of the pointers table. Such a
+	pointer indicates start of an UYVY image pixel line. Whole UYVY
+	line can be copied to a separate buffer. These steps should be
+	repeated for each line, i.e. the number of entries in the pointer
+	array. Anything what's in between the UYVY lines is JPEG data and
+	should be concatenated to form the JPEG stream.
 
-       -  ``V4L2_PIX_FMT_SPCA508``
 
-       -  'S508'
 
-       -  YUVY per line used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-SPCA561:
-
-       -  ``V4L2_PIX_FMT_SPCA561``
-
-       -  'S561'
-
-       -  Compressed GBRG Bayer format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-PAC207:
-
-       -  ``V4L2_PIX_FMT_PAC207``
-
-       -  'P207'
-
-       -  Compressed BGGR Bayer format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-MR97310A:
-
-       -  ``V4L2_PIX_FMT_MR97310A``
-
-       -  'M310'
-
-       -  Compressed BGGR Bayer format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-JL2005BCD:
-
-       -  ``V4L2_PIX_FMT_JL2005BCD``
-
-       -  'JL20'
-
-       -  JPEG compressed RGGB Bayer format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-OV511:
-
-       -  ``V4L2_PIX_FMT_OV511``
-
-       -  'O511'
-
-       -  OV511 JPEG format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-OV518:
-
-       -  ``V4L2_PIX_FMT_OV518``
-
-       -  'O518'
-
-       -  OV518 JPEG format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-PJPG:
-
-       -  ``V4L2_PIX_FMT_PJPG``
-
-       -  'PJPG'
-
-       -  Pixart 73xx JPEG format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-SE401:
-
-       -  ``V4L2_PIX_FMT_SE401``
-
-       -  'S401'
-
-       -  Compressed RGB format used by the gspca se401 driver
-
-    -  .. _V4L2-PIX-FMT-SQ905C:
-
-       -  ``V4L2_PIX_FMT_SQ905C``
-
-       -  '905C'
-
-       -  Compressed RGGB bayer format used by the gspca driver.
-
-    -  .. _V4L2-PIX-FMT-MJPEG:
-
-       -  ``V4L2_PIX_FMT_MJPEG``
-
-       -  'MJPG'
-
-       -  Compressed format used by the Zoran driver
-
-    -  .. _V4L2-PIX-FMT-PWC1:
-
-       -  ``V4L2_PIX_FMT_PWC1``
-
-       -  'PWC1'
-
-       -  Compressed format of the PWC driver.
-
-    -  .. _V4L2-PIX-FMT-PWC2:
-
-       -  ``V4L2_PIX_FMT_PWC2``
-
-       -  'PWC2'
-
-       -  Compressed format of the PWC driver.
-
-    -  .. _V4L2-PIX-FMT-SN9C10X:
-
-       -  ``V4L2_PIX_FMT_SN9C10X``
-
-       -  'S910'
-
-       -  Compressed format of the SN9C102 driver.
-
-    -  .. _V4L2-PIX-FMT-SN9C20X-I420:
-
-       -  ``V4L2_PIX_FMT_SN9C20X_I420``
-
-       -  'S920'
-
-       -  YUV 4:2:0 format of the gspca sn9c20x driver.
-
-    -  .. _V4L2-PIX-FMT-SN9C2028:
-
-       -  ``V4L2_PIX_FMT_SN9C2028``
-
-       -  'SONX'
-
-       -  Compressed GBRG bayer format of the gspca sn9c2028 driver.
-
-    -  .. _V4L2-PIX-FMT-STV0680:
-
-       -  ``V4L2_PIX_FMT_STV0680``
-
-       -  'S680'
-
-       -  Bayer format of the gspca stv0680 driver.
-
-    -  .. _V4L2-PIX-FMT-WNVA:
-
-       -  ``V4L2_PIX_FMT_WNVA``
-
-       -  'WNVA'
-
-       -  Used by the Winnov Videum driver,
-	  `http://www.thedirks.org/winnov/ <http://www.thedirks.org/winnov/>`__
-
-    -  .. _V4L2-PIX-FMT-TM6000:
-
-       -  ``V4L2_PIX_FMT_TM6000``
-
-       -  'TM60'
-
-       -  Used by Trident tm6000
-
-    -  .. _V4L2-PIX-FMT-CIT-YYVYUY:
-
-       -  ``V4L2_PIX_FMT_CIT_YYVYUY``
-
-       -  'CITV'
-
-       -  Used by xirlink CIT, found at IBM webcams.
-
-	  Uses one line of Y then 1 line of VYUY
-
-    -  .. _V4L2-PIX-FMT-KONICA420:
-
-       -  ``V4L2_PIX_FMT_KONICA420``
-
-       -  'KONI'
-
-       -  Used by Konica webcams.
-
-	  YUV420 planar in blocks of 256 pixels.
-
-    -  .. _V4L2-PIX-FMT-YYUV:
-
-       -  ``V4L2_PIX_FMT_YYUV``
-
-       -  'YYUV'
-
-       -  unknown
-
-    -  .. _V4L2-PIX-FMT-Y4:
-
-       -  ``V4L2_PIX_FMT_Y4``
-
-       -  'Y04 '
-
-       -  Old 4-bit greyscale format. Only the most significant 4 bits of
-	  each byte are used, the other bits are set to 0.
-
-    -  .. _V4L2-PIX-FMT-Y6:
-
-       -  ``V4L2_PIX_FMT_Y6``
-
-       -  'Y06 '
-
-       -  Old 6-bit greyscale format. Only the most significant 6 bits of
-	  each byte are used, the other bits are set to 0.
-
-    -  .. _V4L2-PIX-FMT-S5C-UYVY-JPG:
-
-       -  ``V4L2_PIX_FMT_S5C_UYVY_JPG``
-
-       -  'S5CI'
-
-       -  Two-planar format used by Samsung S5C73MX cameras. The first plane
-	  contains interleaved JPEG and UYVY image data, followed by meta
-	  data in form of an array of offsets to the UYVY data blocks. The
-	  actual pointer array follows immediately the interleaved JPEG/UYVY
-	  data, the number of entries in this array equals the height of the
-	  UYVY image. Each entry is a 4-byte unsigned integer in big endian
-	  order and it's an offset to a single pixel line of the UYVY image.
-	  The first plane can start either with JPEG or UYVY data chunk. The
-	  size of a single UYVY block equals the UYVY image's width
-	  multiplied by 2. The size of a JPEG chunk depends on the image and
-	  can vary with each line.
-
-	  The second plane, at an offset of 4084 bytes, contains a 4-byte
-	  offset to the pointer array in the first plane. This offset is
-	  followed by a 4-byte value indicating size of the pointer array.
-	  All numbers in the second plane are also in big endian order.
-	  Remaining data in the second plane is undefined. The information
-	  in the second plane allows to easily find location of the pointer
-	  array, which can be different for each frame. The size of the
-	  pointer array is constant for given UYVY image height.
-
-	  In order to extract UYVY and JPEG frames an application can
-	  initially set a data pointer to the start of first plane and then
-	  add an offset from the first entry of the pointers table. Such a
-	  pointer indicates start of an UYVY image pixel line. Whole UYVY
-	  line can be copied to a separate buffer. These steps should be
-	  repeated for each line, i.e. the number of entries in the pointer
-	  array. Anything what's in between the UYVY lines is JPEG data and
-	  should be concatenated to form the JPEG stream.
-
-
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _format-flags:
 
@@ -346,15 +246,10 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_PIX_FMT_FLAG_PREMUL_ALPHA``
-
-       -  0x00000001
-
-       -  The color values are premultiplied by the alpha channel value. For
-	  example, if a light blue pixel with 50% transparency was described
-	  by RGBA values (128, 192, 255, 128), the same pixel described with
-	  premultiplied colors would be described by RGBA values (64, 96,
-	  128, 128)
+    * - ``V4L2_PIX_FMT_FLAG_PREMUL_ALPHA``
+      - 0x00000001
+      - The color values are premultiplied by the alpha channel value. For
+	example, if a light blue pixel with 50% transparency was described
+	by RGBA values (128, 192, 255, 128), the same pixel described with
+	premultiplied colors would be described by RGBA values (64, 96,
+	128, 128)
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index 4b3651c..9cc9808 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -11,9 +11,6 @@
     :maxdepth: 1
 
     pixfmt-packed-rgb
-    pixfmt-sbggr8
-    pixfmt-sgbrg8
-    pixfmt-sgrbg8
     pixfmt-srggb8
     pixfmt-sbggr16
     pixfmt-srggb10
diff --git a/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst b/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst
index 14446ed..6f7f327 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_SBGGR16 ('BYR2')
 *****************************
 
-*man V4L2_PIX_FMT_SBGGR16(2)*
-
 Bayer RGB format
 
 
@@ -19,96 +17,46 @@
 has a depth of 16 bits. The least significant byte is stored at lower
 memory addresses (little-endian).
 
-..note:: The actual sampling precision may be lower than 16 bits,
-    for example 10 bits per pixel with values in tange 0 to 1023.
-
 **Byte Order.**
 Each cell is one byte.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  B\ :sub:`00low`
-
-       -  B\ :sub:`00high`
-
-       -  G\ :sub:`01low`
-
-       -  G\ :sub:`01high`
-
-       -  B\ :sub:`02low`
-
-       -  B\ :sub:`02high`
-
-       -  G\ :sub:`03low`
-
-       -  G\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  G\ :sub:`10low`
-
-       -  G\ :sub:`10high`
-
-       -  R\ :sub:`11low`
-
-       -  R\ :sub:`11high`
-
-       -  G\ :sub:`12low`
-
-       -  G\ :sub:`12high`
-
-       -  R\ :sub:`13low`
-
-       -  R\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  B\ :sub:`20low`
-
-       -  B\ :sub:`20high`
-
-       -  G\ :sub:`21low`
-
-       -  G\ :sub:`21high`
-
-       -  B\ :sub:`22low`
-
-       -  B\ :sub:`22high`
-
-       -  G\ :sub:`23low`
-
-       -  G\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  G\ :sub:`30low`
-
-       -  G\ :sub:`30high`
-
-       -  R\ :sub:`31low`
-
-       -  R\ :sub:`31high`
-
-       -  G\ :sub:`32low`
-
-       -  G\ :sub:`32high`
-
-       -  R\ :sub:`33low`
-
-       -  R\ :sub:`33high`
+    * - start + 0:
+      - B\ :sub:`00low`
+      - B\ :sub:`00high`
+      - G\ :sub:`01low`
+      - G\ :sub:`01high`
+      - B\ :sub:`02low`
+      - B\ :sub:`02high`
+      - G\ :sub:`03low`
+      - G\ :sub:`03high`
+    * - start + 8:
+      - G\ :sub:`10low`
+      - G\ :sub:`10high`
+      - R\ :sub:`11low`
+      - R\ :sub:`11high`
+      - G\ :sub:`12low`
+      - G\ :sub:`12high`
+      - R\ :sub:`13low`
+      - R\ :sub:`13high`
+    * - start + 16:
+      - B\ :sub:`20low`
+      - B\ :sub:`20high`
+      - G\ :sub:`21low`
+      - G\ :sub:`21high`
+      - B\ :sub:`22low`
+      - B\ :sub:`22high`
+      - G\ :sub:`23low`
+      - G\ :sub:`23high`
+    * - start + 24:
+      - G\ :sub:`30low`
+      - G\ :sub:`30high`
+      - R\ :sub:`31low`
+      - R\ :sub:`31high`
+      - G\ :sub:`32low`
+      - G\ :sub:`32high`
+      - R\ :sub:`33low`
+      - R\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sbggr8.rst b/Documentation/media/uapi/v4l/pixfmt-sbggr8.rst
deleted file mode 100644
index db4c523..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-sbggr8.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _V4L2-PIX-FMT-SBGGR8:
-
-****************************
-V4L2_PIX_FMT_SBGGR8 ('BA81')
-****************************
-
-*man V4L2_PIX_FMT_SBGGR8(2)*
-
-Bayer RGB format
-
-
-Description
-===========
-
-This is commonly the native format of digital cameras, reflecting the
-arrangement of sensors on the CCD device. Only one red, green or blue
-value is given for each pixel. Missing components must be interpolated
-from neighbouring pixels. From left to right the first row consists of a
-blue and green value, the second row of a green and red value. This
-scheme repeats to the right and down for every two columns and rows.
-
-**Byte Order.**
-Each cell is one byte.
-
-
-
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-    :widths:       2 1 1 1 1
-
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  B\ :sub:`00`
-
-       -  G\ :sub:`01`
-
-       -  B\ :sub:`02`
-
-       -  G\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  G\ :sub:`10`
-
-       -  R\ :sub:`11`
-
-       -  G\ :sub:`12`
-
-       -  R\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  B\ :sub:`20`
-
-       -  G\ :sub:`21`
-
-       -  B\ :sub:`22`
-
-       -  G\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  G\ :sub:`30`
-
-       -  R\ :sub:`31`
-
-       -  G\ :sub:`32`
-
-       -  R\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
index 2736275..179894f 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
@@ -6,8 +6,6 @@
 V4L2_SDR_FMT_CS8 ('CS08')
 *************************
 
-*man V4L2_SDR_FMT_CS8(2)*
-
 Complex signed 8-bit IQ sample
 
 
@@ -22,22 +20,11 @@
 **Byte Order.**
 Each cell is one byte.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  I'\ :sub:`0`
-
-    -  .. row 2
-
-       -  start + 1:
-
-       -  Q'\ :sub:`0`
+    * - start + 0:
+      - I'\ :sub:`0`
+    * - start + 1:
+      - Q'\ :sub:`0`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
index bfe5804..5cf7d38 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
@@ -6,8 +6,6 @@
 V4L2_SDR_FMT_CS14LE ('CS14')
 ****************************
 
-*man V4L2_SDR_FMT_CS14LE(2)*
-
 Complex signed 14-bit little endian IQ sample
 
 
@@ -24,25 +22,13 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  I'\ :sub:`0[7:0]`
-
-       -  I'\ :sub:`0[13:8]`
-
-    -  .. row 2
-
-       -  start + 2:
-
-       -  Q'\ :sub:`0[7:0]`
-
-       -  Q'\ :sub:`0[13:8]`
+    * - start + 0:
+      - I'\ :sub:`0[7:0]`
+      - I'\ :sub:`0[13:8]`
+    * - start + 2:
+      - Q'\ :sub:`0[7:0]`
+      - Q'\ :sub:`0[13:8]`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
index 68ad171..fd915b7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
@@ -6,8 +6,6 @@
 V4L2_SDR_FMT_CU8 ('CU08')
 *************************
 
-*man V4L2_SDR_FMT_CU8(2)*
-
 Complex unsigned 8-bit IQ sample
 
 
@@ -22,22 +20,11 @@
 **Byte Order.**
 Each cell is one byte.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  I'\ :sub:`0`
-
-    -  .. row 2
-
-       -  start + 1:
-
-       -  Q'\ :sub:`0`
+    * - start + 0:
+      - I'\ :sub:`0`
+    * - start + 1:
+      - Q'\ :sub:`0`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
index 2a1c0d4..8922f5b 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
@@ -6,7 +6,6 @@
 V4L2_SDR_FMT_CU16LE ('CU16')
 ****************************
 
-*man V4L2_SDR_FMT_CU16LE(2)*
 
 Complex unsigned 16-bit little endian IQ sample
 
@@ -23,25 +22,13 @@
 Each cell is one byte.
 
 
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  I'\ :sub:`0[7:0]`
-
-       -  I'\ :sub:`0[15:8]`
-
-    -  .. row 2
-
-       -  start + 2:
-
-       -  Q'\ :sub:`0[7:0]`
-
-       -  Q'\ :sub:`0[15:8]`
+    * - start + 0:
+      - I'\ :sub:`0[7:0]`
+      - I'\ :sub:`0[15:8]`
+    * - start + 2:
+      - Q'\ :sub:`0[7:0]`
+      - Q'\ :sub:`0[15:8]`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
index 378581b..5e38338 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
@@ -6,7 +6,6 @@
 V4L2_SDR_FMT_RU12LE ('RU12')
 ****************************
 
-*man V4L2_SDR_FMT_RU12LE(2)*
 
 Real unsigned 12-bit little endian sample
 
@@ -23,16 +22,11 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  I'\ :sub:`0[7:0]`
-
-       -  I'\ :sub:`0[11:8]`
+    * - start + 0:
+      - I'\ :sub:`0[7:0]`
+      - I'\ :sub:`0[11:8]`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sgbrg8.rst b/Documentation/media/uapi/v4l/pixfmt-sgbrg8.rst
deleted file mode 100644
index 6345c24..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-sgbrg8.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _V4L2-PIX-FMT-SGBRG8:
-
-****************************
-V4L2_PIX_FMT_SGBRG8 ('GBRG')
-****************************
-
-*man V4L2_PIX_FMT_SGBRG8(2)*
-
-Bayer RGB format
-
-
-Description
-===========
-
-This is commonly the native format of digital cameras, reflecting the
-arrangement of sensors on the CCD device. Only one red, green or blue
-value is given for each pixel. Missing components must be interpolated
-from neighbouring pixels. From left to right the first row consists of a
-green and blue value, the second row of a red and green value. This
-scheme repeats to the right and down for every two columns and rows.
-
-**Byte Order.**
-Each cell is one byte.
-
-
-
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-    :widths:       2 1 1 1 1
-
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  G\ :sub:`00`
-
-       -  B\ :sub:`01`
-
-       -  G\ :sub:`02`
-
-       -  B\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  R\ :sub:`10`
-
-       -  G\ :sub:`11`
-
-       -  R\ :sub:`12`
-
-       -  G\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  G\ :sub:`20`
-
-       -  B\ :sub:`21`
-
-       -  G\ :sub:`22`
-
-       -  B\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  R\ :sub:`30`
-
-       -  G\ :sub:`31`
-
-       -  R\ :sub:`32`
-
-       -  G\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-sgrbg8.rst b/Documentation/media/uapi/v4l/pixfmt-sgrbg8.rst
deleted file mode 100644
index 51b7b8e..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-sgrbg8.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _V4L2-PIX-FMT-SGRBG8:
-
-****************************
-V4L2_PIX_FMT_SGRBG8 ('GRBG')
-****************************
-
-*man V4L2_PIX_FMT_SGRBG8(2)*
-
-Bayer RGB format
-
-
-Description
-===========
-
-This is commonly the native format of digital cameras, reflecting the
-arrangement of sensors on the CCD device. Only one red, green or blue
-value is given for each pixel. Missing components must be interpolated
-from neighbouring pixels. From left to right the first row consists of a
-green and blue value, the second row of a red and green value. This
-scheme repeats to the right and down for every two columns and rows.
-
-**Byte Order.**
-Each cell is one byte.
-
-
-
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-    :widths:       2 1 1 1 1
-
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  G\ :sub:`00`
-
-       -  R\ :sub:`01`
-
-       -  G\ :sub:`02`
-
-       -  R\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  B\ :sub:`10`
-
-       -  G\ :sub:`11`
-
-       -  B\ :sub:`12`
-
-       -  G\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  G\ :sub:`20`
-
-       -  R\ :sub:`21`
-
-       -  G\ :sub:`22`
-
-       -  R\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  B\ :sub:`30`
-
-       -  G\ :sub:`31`
-
-       -  B\ :sub:`32`
-
-       -  G\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
index 44a4956..af2538c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
@@ -9,7 +9,6 @@
 V4L2_PIX_FMT_SRGGB10 ('RG10'), V4L2_PIX_FMT_SGRBG10 ('BA10'), V4L2_PIX_FMT_SGBRG10 ('GB10'), V4L2_PIX_FMT_SBGGR10 ('BG10'),
 ***************************************************************************************************************************
 
-*man V4L2_PIX_FMT_SRGGB10(2)*
 
 V4L2_PIX_FMT_SGRBG10
 V4L2_PIX_FMT_SGBRG10
@@ -21,100 +20,57 @@
 ===========
 
 These four pixel formats are raw sRGB / Bayer formats with 10 bits per
-colour. Each colour component is stored in a 16-bit word, with 6 unused
-high bits filled with zeros. Each n-pixel row contains n/2 green samples
-and n/2 blue or red samples, with alternating red and blue rows. Bytes
-are stored in memory in little endian order. They are conventionally
-described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
-of one of these formats
+sample. Each sample is stored in a 16-bit word, with 6 unused
+high bits filled with zeros. Each n-pixel row contains n/2 green samples and
+n/2 blue or red samples, with alternating red and blue rows. Bytes are
+stored in memory in little endian order. They are conventionally described
+as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example of one of
+these formats:
 
 **Byte Order.**
-Each cell is one byte, high 6 bits in high bytes are 0.
+Each cell is one byte, the 6 most significant bits in the high bytes
+are 0.
+
 
 
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  B\ :sub:`00low`
-
-       -  B\ :sub:`00high`
-
-       -  G\ :sub:`01low`
-
-       -  G\ :sub:`01high`
-
-       -  B\ :sub:`02low`
-
-       -  B\ :sub:`02high`
-
-       -  G\ :sub:`03low`
-
-       -  G\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  G\ :sub:`10low`
-
-       -  G\ :sub:`10high`
-
-       -  R\ :sub:`11low`
-
-       -  R\ :sub:`11high`
-
-       -  G\ :sub:`12low`
-
-       -  G\ :sub:`12high`
-
-       -  R\ :sub:`13low`
-
-       -  R\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  B\ :sub:`20low`
-
-       -  B\ :sub:`20high`
-
-       -  G\ :sub:`21low`
-
-       -  G\ :sub:`21high`
-
-       -  B\ :sub:`22low`
-
-       -  B\ :sub:`22high`
-
-       -  G\ :sub:`23low`
-
-       -  G\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  G\ :sub:`30low`
-
-       -  G\ :sub:`30high`
-
-       -  R\ :sub:`31low`
-
-       -  R\ :sub:`31high`
-
-       -  G\ :sub:`32low`
-
-       -  G\ :sub:`32high`
-
-       -  R\ :sub:`33low`
-
-       -  R\ :sub:`33high`
+    * - start + 0:
+      - B\ :sub:`00low`
+      - B\ :sub:`00high`
+      - G\ :sub:`01low`
+      - G\ :sub:`01high`
+      - B\ :sub:`02low`
+      - B\ :sub:`02high`
+      - G\ :sub:`03low`
+      - G\ :sub:`03high`
+    * - start + 8:
+      - G\ :sub:`10low`
+      - G\ :sub:`10high`
+      - R\ :sub:`11low`
+      - R\ :sub:`11high`
+      - G\ :sub:`12low`
+      - G\ :sub:`12high`
+      - R\ :sub:`13low`
+      - R\ :sub:`13high`
+    * - start + 16:
+      - B\ :sub:`20low`
+      - B\ :sub:`20high`
+      - G\ :sub:`21low`
+      - G\ :sub:`21high`
+      - B\ :sub:`22low`
+      - B\ :sub:`22high`
+      - G\ :sub:`23low`
+      - G\ :sub:`23high`
+    * - start + 24:
+      - G\ :sub:`30low`
+      - G\ :sub:`30high`
+      - R\ :sub:`31low`
+      - R\ :sub:`31high`
+      - G\ :sub:`32low`
+      - G\ :sub:`32high`
+      - R\ :sub:`33low`
+      - R\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
index 68bae0c..c44e093 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
@@ -9,8 +9,6 @@
 V4L2_PIX_FMT_SBGGR10ALAW8 ('aBA8'), V4L2_PIX_FMT_SGBRG10ALAW8 ('aGA8'), V4L2_PIX_FMT_SGRBG10ALAW8 ('agA8'), V4L2_PIX_FMT_SRGGB10ALAW8 ('aRA8'),
 ***********************************************************************************************************************************************
 
-*man V4L2_PIX_FMT_SBGGR10ALAW8(2)*
-
 V4L2_PIX_FMT_SGBRG10ALAW8
 V4L2_PIX_FMT_SGRBG10ALAW8
 V4L2_PIX_FMT_SRGGB10ALAW8
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
index d71368f..9a41c8d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
@@ -9,7 +9,6 @@
 V4L2_PIX_FMT_SRGGB10P ('pRAA'), V4L2_PIX_FMT_SGRBG10P ('pgAA'), V4L2_PIX_FMT_SGBRG10P ('pGAA'), V4L2_PIX_FMT_SBGGR10P ('pBAA'),
 *******************************************************************************************************************************
 
-*man V4L2_PIX_FMT_SRGGB10P(2)*
 
 V4L2_PIX_FMT_SGRBG10P
 V4L2_PIX_FMT_SGBRG10P
@@ -21,10 +20,10 @@
 ===========
 
 These four pixel formats are packed raw sRGB / Bayer formats with 10
-bits per colour. Every four consecutive colour components are packed
-into 5 bytes. Each of the first 4 bytes contain the 8 high order bits of
-the pixels, and the fifth byte contains the two least significants bits
-of each pixel, in the same order.
+bits per sample. Every four consecutive samples are packed into 5
+bytes. Each of the first 4 bytes contain the 8 high order bits
+of the pixels, and the 5th byte contains the 2 least significants
+bits of each pixel, in the same order.
 
 Each n-pixel row contains n/2 green samples and n/2 blue or red samples,
 with alternating green-red and green-blue rows. They are conventionally
@@ -34,70 +33,46 @@
 **Byte Order.**
 Each cell is one byte.
 
+.. raw:: latex
 
+    \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{2.0cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{10.9cm}|
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1
+    :widths: 12 8 8 8 8 68
 
+    * - start + 0:
+      - B\ :sub:`00high`
+      - G\ :sub:`01high`
+      - B\ :sub:`02high`
+      - G\ :sub:`03high`
+      - G\ :sub:`03low`\ (bits 7--6) B\ :sub:`02low`\ (bits 5--4)
+	G\ :sub:`01low`\ (bits 3--2) B\ :sub:`00low`\ (bits 1--0)
+    * - start + 5:
+      - G\ :sub:`10high`
+      - R\ :sub:`11high`
+      - G\ :sub:`12high`
+      - R\ :sub:`13high`
+      - R\ :sub:`13low`\ (bits 7--6) G\ :sub:`12low`\ (bits 5--4)
+	R\ :sub:`11low`\ (bits 3--2) G\ :sub:`10low`\ (bits 1--0)
+    * - start + 10:
+      - B\ :sub:`20high`
+      - G\ :sub:`21high`
+      - B\ :sub:`22high`
+      - G\ :sub:`23high`
+      - G\ :sub:`23low`\ (bits 7--6) B\ :sub:`22low`\ (bits 5--4)
+	G\ :sub:`21low`\ (bits 3--2) B\ :sub:`20low`\ (bits 1--0)
+    * - start + 15:
+      - G\ :sub:`30high`
+      - R\ :sub:`31high`
+      - G\ :sub:`32high`
+      - R\ :sub:`33high`
+      - R\ :sub:`33low`\ (bits 7--6) G\ :sub:`32low`\ (bits 5--4)
+	R\ :sub:`31low`\ (bits 3--2) G\ :sub:`30low`\ (bits 1--0)
 
-    -  .. row 1
+.. raw:: latex
 
-       -  start + 0:
-
-       -  B\ :sub:`00high`
-
-       -  G\ :sub:`01high`
-
-       -  B\ :sub:`02high`
-
-       -  G\ :sub:`03high`
-
-       -  B\ :sub:`00low`\ (bits 7--6) G\ :sub:`01low`\ (bits 5--4)
-	  B\ :sub:`02low`\ (bits 3--2) G\ :sub:`03low`\ (bits 1--0)
-
-    -  .. row 2
-
-       -  start + 5:
-
-       -  G\ :sub:`10high`
-
-       -  R\ :sub:`11high`
-
-       -  G\ :sub:`12high`
-
-       -  R\ :sub:`13high`
-
-       -  G\ :sub:`10low`\ (bits 7--6) R\ :sub:`11low`\ (bits 5--4)
-	  G\ :sub:`12low`\ (bits 3--2) R\ :sub:`13low`\ (bits 1--0)
-
-    -  .. row 3
-
-       -  start + 10:
-
-       -  B\ :sub:`20high`
-
-       -  G\ :sub:`21high`
-
-       -  B\ :sub:`22high`
-
-       -  G\ :sub:`23high`
-
-       -  B\ :sub:`20low`\ (bits 7--6) G\ :sub:`21low`\ (bits 5--4)
-	  B\ :sub:`22low`\ (bits 3--2) G\ :sub:`23low`\ (bits 1--0)
-
-    -  .. row 4
-
-       -  start + 15:
-
-       -  G\ :sub:`30high`
-
-       -  R\ :sub:`31high`
-
-       -  G\ :sub:`32high`
-
-       -  R\ :sub:`33high`
-
-       -  G\ :sub:`30low`\ (bits 7--6) R\ :sub:`31low`\ (bits 5--4)
-	  G\ :sub:`32low`\ (bits 3--2) R\ :sub:`33low`\ (bits 1--0)
+    \end{adjustbox}\newline\newline
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
index f5303ab..a50ee14 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
@@ -10,7 +10,6 @@
 V4L2_PIX_FMT_SRGGB12 ('RG12'), V4L2_PIX_FMT_SGRBG12 ('BA12'), V4L2_PIX_FMT_SGBRG12 ('GB12'), V4L2_PIX_FMT_SBGGR12 ('BG12'),
 ***************************************************************************************************************************
 
-*man V4L2_PIX_FMT_SRGGB12(2)*
 
 V4L2_PIX_FMT_SGRBG12
 V4L2_PIX_FMT_SGBRG12
@@ -27,95 +26,52 @@
 and n/2 blue or red samples, with alternating red and blue rows. Bytes
 are stored in memory in little endian order. They are conventionally
 described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
-of one of these formats
+of one of these formats:
 
 **Byte Order.**
-Each cell is one byte, high 6 bits in high bytes are 0.
+Each cell is one byte, the 4 most significant bits in the high bytes are
+0.
+
 
 
 
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  B\ :sub:`00low`
-
-       -  B\ :sub:`00high`
-
-       -  G\ :sub:`01low`
-
-       -  G\ :sub:`01high`
-
-       -  B\ :sub:`02low`
-
-       -  B\ :sub:`02high`
-
-       -  G\ :sub:`03low`
-
-       -  G\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  G\ :sub:`10low`
-
-       -  G\ :sub:`10high`
-
-       -  R\ :sub:`11low`
-
-       -  R\ :sub:`11high`
-
-       -  G\ :sub:`12low`
-
-       -  G\ :sub:`12high`
-
-       -  R\ :sub:`13low`
-
-       -  R\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  B\ :sub:`20low`
-
-       -  B\ :sub:`20high`
-
-       -  G\ :sub:`21low`
-
-       -  G\ :sub:`21high`
-
-       -  B\ :sub:`22low`
-
-       -  B\ :sub:`22high`
-
-       -  G\ :sub:`23low`
-
-       -  G\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  G\ :sub:`30low`
-
-       -  G\ :sub:`30high`
-
-       -  R\ :sub:`31low`
-
-       -  R\ :sub:`31high`
-
-       -  G\ :sub:`32low`
-
-       -  G\ :sub:`32high`
-
-       -  R\ :sub:`33low`
-
-       -  R\ :sub:`33high`
+    * - start + 0:
+      - B\ :sub:`00low`
+      - B\ :sub:`00high`
+      - G\ :sub:`01low`
+      - G\ :sub:`01high`
+      - B\ :sub:`02low`
+      - B\ :sub:`02high`
+      - G\ :sub:`03low`
+      - G\ :sub:`03high`
+    * - start + 8:
+      - G\ :sub:`10low`
+      - G\ :sub:`10high`
+      - R\ :sub:`11low`
+      - R\ :sub:`11high`
+      - G\ :sub:`12low`
+      - G\ :sub:`12high`
+      - R\ :sub:`13low`
+      - R\ :sub:`13high`
+    * - start + 16:
+      - B\ :sub:`20low`
+      - B\ :sub:`20high`
+      - G\ :sub:`21low`
+      - G\ :sub:`21high`
+      - B\ :sub:`22low`
+      - B\ :sub:`22high`
+      - G\ :sub:`23low`
+      - G\ :sub:`23high`
+    * - start + 24:
+      - G\ :sub:`30low`
+      - G\ :sub:`30high`
+      - R\ :sub:`31low`
+      - R\ :sub:`31high`
+      - G\ :sub:`32low`
+      - G\ :sub:`32high`
+      - R\ :sub:`33low`
+      - R\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
index e88de4c..a3987d2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
@@ -1,81 +1,54 @@
 .. -*- coding: utf-8; mode: rst -*-
 
 .. _V4L2-PIX-FMT-SRGGB8:
+.. _v4l2-pix-fmt-sbggr8:
+.. _v4l2-pix-fmt-sgbrg8:
+.. _v4l2-pix-fmt-sgrbg8:
 
-****************************
-V4L2_PIX_FMT_SRGGB8 ('RGGB')
-****************************
+***************************************************************************************************************************
+V4L2_PIX_FMT_SRGGB8 ('RGGB'), V4L2_PIX_FMT_SGRBG8 ('GRBG'), V4L2_PIX_FMT_SGBRG8 ('GBRG'), V4L2_PIX_FMT_SBGGR8 ('BA81'),
+***************************************************************************************************************************
 
-*man V4L2_PIX_FMT_SRGGB8(2)*
 
-Bayer RGB format
+8-bit Bayer formats
 
 
 Description
 ===========
 
-This is commonly the native format of digital cameras, reflecting the
-arrangement of sensors on the CCD device. Only one red, green or blue
-value is given for each pixel. Missing components must be interpolated
-from neighbouring pixels. From left to right the first row consists of a
-red and green value, the second row of a green and blue value. This
-scheme repeats to the right and down for every two columns and rows.
+These four pixel formats are raw sRGB / Bayer formats with 8 bits per
+sample. Each sample is stored in a byte. Each n-pixel row contains n/2
+green samples and n/2 blue or red samples, with alternating red and
+blue rows. They are conventionally described as GRGR... BGBG...,
+RGRG... GBGB..., etc. Below is an example of one of these formats:
 
 **Byte Order.**
 Each cell is one byte.
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  R\ :sub:`00`
-
-       -  G\ :sub:`01`
-
-       -  R\ :sub:`02`
-
-       -  G\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  G\ :sub:`10`
-
-       -  B\ :sub:`11`
-
-       -  G\ :sub:`12`
-
-       -  B\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  R\ :sub:`20`
-
-       -  G\ :sub:`21`
-
-       -  R\ :sub:`22`
-
-       -  G\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  G\ :sub:`30`
-
-       -  B\ :sub:`31`
-
-       -  G\ :sub:`32`
-
-       -  B\ :sub:`33`
+    * - start + 0:
+      - B\ :sub:`00`
+      - G\ :sub:`01`
+      - B\ :sub:`02`
+      - G\ :sub:`03`
+    * - start + 4:
+      - G\ :sub:`10`
+      - R\ :sub:`11`
+      - G\ :sub:`12`
+      - R\ :sub:`13`
+    * - start + 8:
+      - B\ :sub:`20`
+      - G\ :sub:`21`
+      - B\ :sub:`22`
+      - G\ :sub:`23`
+    * - start + 12:
+      - G\ :sub:`30`
+      - R\ :sub:`31`
+      - G\ :sub:`32`
+      - R\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst b/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst
new file mode 100644
index 0000000..07834cd
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst
@@ -0,0 +1,52 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-TCH-FMT-DELTA-TD08:
+
+********************************
+V4L2_TCH_FMT_DELTA_TD08 ('TD08')
+********************************
+
+*man V4L2_TCH_FMT_DELTA_TD08(2)*
+
+8-bit signed Touch Delta
+
+Description
+===========
+
+This format represents delta data from a touch controller.
+
+Delta values may range from -128 to 127. Typically the values will vary through
+a small range depending on whether the sensor is touched or not. The full value
+may be seen if one of the touchscreen nodes has a fault or the line is not
+connected.
+
+**Byte Order.**
+Each cell is one byte.
+
+
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       2 1 1 1 1
+
+    * - start + 0:
+      - D'\ :sub:`00`
+      - D'\ :sub:`01`
+      - D'\ :sub:`02`
+      - D'\ :sub:`03`
+    * - start + 4:
+      - D'\ :sub:`10`
+      - D'\ :sub:`11`
+      - D'\ :sub:`12`
+      - D'\ :sub:`13`
+    * - start + 8:
+      - D'\ :sub:`20`
+      - D'\ :sub:`21`
+      - D'\ :sub:`22`
+      - D'\ :sub:`23`
+    * - start + 12:
+      - D'\ :sub:`30`
+      - D'\ :sub:`31`
+      - D'\ :sub:`32`
+      - D'\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
new file mode 100644
index 0000000..29ebcf4
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
@@ -0,0 +1,67 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-TCH-FMT-DELTA-TD16:
+
+********************************
+V4L2_TCH_FMT_DELTA_TD16 ('TD16')
+********************************
+
+*man V4L2_TCH_FMT_DELTA_TD16(2)*
+
+16-bit signed Touch Delta
+
+
+Description
+===========
+
+This format represents delta data from a touch controller.
+
+Delta values may range from -32768 to 32767. Typically the values will vary
+through a small range depending on whether the sensor is touched or not. The
+full value may be seen if one of the touchscreen nodes has a fault or the line
+is not connected.
+
+**Byte Order.**
+Each cell is one byte.
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       2 1 1 1 1 1 1 1 1
+
+    * - start + 0:
+      - D'\ :sub:`00high`
+      - D'\ :sub:`00low`
+      - D'\ :sub:`01high`
+      - D'\ :sub:`01low`
+      - D'\ :sub:`02high`
+      - D'\ :sub:`02low`
+      - D'\ :sub:`03high`
+      - D'\ :sub:`03low`
+    * - start + 8:
+      - D'\ :sub:`10high`
+      - D'\ :sub:`10low`
+      - D'\ :sub:`11high`
+      - D'\ :sub:`11low`
+      - D'\ :sub:`12high`
+      - D'\ :sub:`12low`
+      - D'\ :sub:`13high`
+      - D'\ :sub:`13low`
+    * - start + 16:
+      - D'\ :sub:`20high`
+      - D'\ :sub:`20low`
+      - D'\ :sub:`21high`
+      - D'\ :sub:`21low`
+      - D'\ :sub:`22high`
+      - D'\ :sub:`22low`
+      - D'\ :sub:`23high`
+      - D'\ :sub:`23low`
+    * - start + 24:
+      - D'\ :sub:`30high`
+      - D'\ :sub:`30low`
+      - D'\ :sub:`31high`
+      - D'\ :sub:`31low`
+      - D'\ :sub:`32high`
+      - D'\ :sub:`32low`
+      - D'\ :sub:`33high`
+      - D'\ :sub:`33low`
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst b/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst
new file mode 100644
index 0000000..e7fb7dd
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst
@@ -0,0 +1,50 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-TCH-FMT-TU08:
+
+**************************
+V4L2_TCH_FMT_TU08 ('TU08')
+**************************
+
+*man V4L2_TCH_FMT_TU08(2)*
+
+8-bit unsigned raw touch data
+
+Description
+===========
+
+This format represents unsigned 8-bit data from a touch controller.
+
+This may be used for output for raw and reference data. Values may range from
+0 to 255.
+
+**Byte Order.**
+Each cell is one byte.
+
+
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       2 1 1 1 1
+
+    * - start + 0:
+      - R'\ :sub:`00`
+      - R'\ :sub:`01`
+      - R'\ :sub:`02`
+      - R'\ :sub:`03`
+    * - start + 4:
+      - R'\ :sub:`10`
+      - R'\ :sub:`11`
+      - R'\ :sub:`12`
+      - R'\ :sub:`13`
+    * - start + 8:
+      - R'\ :sub:`20`
+      - R'\ :sub:`21`
+      - R'\ :sub:`22`
+      - R'\ :sub:`23`
+    * - start + 12:
+      - R'\ :sub:`30`
+      - R'\ :sub:`31`
+      - R'\ :sub:`32`
+      - R'\ :sub:`33`
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
new file mode 100644
index 0000000..1588fcc
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
@@ -0,0 +1,66 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-TCH-FMT-TU16:
+
+********************************
+V4L2_TCH_FMT_TU16 ('TU16')
+********************************
+
+*man V4L2_TCH_FMT_TU16(2)*
+
+16-bit unsigned raw touch data
+
+
+Description
+===========
+
+This format represents unsigned 16-bit data from a touch controller.
+
+This may be used for output for raw and reference data. Values may range from
+0 to 65535.
+
+**Byte Order.**
+Each cell is one byte.
+
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       2 1 1 1 1 1 1 1 1
+
+    * - start + 0:
+      - R'\ :sub:`00high`
+      - R'\ :sub:`00low`
+      - R'\ :sub:`01high`
+      - R'\ :sub:`01low`
+      - R'\ :sub:`02high`
+      - R'\ :sub:`02low`
+      - R'\ :sub:`03high`
+      - R'\ :sub:`03low`
+    * - start + 8:
+      - R'\ :sub:`10high`
+      - R'\ :sub:`10low`
+      - R'\ :sub:`11high`
+      - R'\ :sub:`11low`
+      - R'\ :sub:`12high`
+      - R'\ :sub:`12low`
+      - R'\ :sub:`13high`
+      - R'\ :sub:`13low`
+    * - start + 16:
+      - R'\ :sub:`20high`
+      - R'\ :sub:`20low`
+      - R'\ :sub:`21high`
+      - R'\ :sub:`21low`
+      - R'\ :sub:`22high`
+      - R'\ :sub:`22low`
+      - R'\ :sub:`23high`
+      - R'\ :sub:`23low`
+    * - start + 24:
+      - R'\ :sub:`30high`
+      - R'\ :sub:`30low`
+      - R'\ :sub:`31high`
+      - R'\ :sub:`31low`
+      - R'\ :sub:`32high`
+      - R'\ :sub:`32low`
+      - R'\ :sub:`33high`
+      - R'\ :sub:`33low`
diff --git a/Documentation/media/uapi/v4l/pixfmt-uv8.rst b/Documentation/media/uapi/v4l/pixfmt-uv8.rst
index fa8f7ee..c449231 100644
--- a/Documentation/media/uapi/v4l/pixfmt-uv8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-uv8.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_UV8 ('UV8')
 ************************
 
-*man V4L2_PIX_FMT_UV8(2)*
 
 UV plane interleaved
 
@@ -21,56 +20,28 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Cb\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Cb\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-       -  Cr\ :sub:`21`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Cb\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Cb\ :sub:`31`
-
-       -  Cr\ :sub:`31`
+    * - start + 0:
+      - Cb\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cr\ :sub:`01`
+    * - start + 4:
+      - Cb\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cr\ :sub:`11`
+    * - start + 8:
+      - Cb\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Cb\ :sub:`21`
+      - Cr\ :sub:`21`
+    * - start + 12:
+      - Cb\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Cb\ :sub:`31`
+      - Cr\ :sub:`31`
diff --git a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
index 87b0081..30660e04d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_UYVY ('UYVY')
 **************************
 
-*man V4L2_PIX_FMT_UYVY(2)*
 
 Variation of ``V4L2_PIX_FMT_YUYV`` with different order of samples in
 memory
@@ -23,91 +22,47 @@
 **Byte Order.**
 Each cell is one byte.
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Y'\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Cb\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Cr\ :sub:`01`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Cb\ :sub:`10`
-
-       -  Y'\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Cb\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Cr\ :sub:`11`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Cb\ :sub:`20`
-
-       -  Y'\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Cb\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Cr\ :sub:`21`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Cb\ :sub:`30`
-
-       -  Y'\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Cb\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Cr\ :sub:`31`
-
-       -  Y'\ :sub:`33`
+    * - start + 0:
+      - Cb\ :sub:`00`
+      - Y'\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Cb\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Cr\ :sub:`01`
+      - Y'\ :sub:`03`
+    * - start + 8:
+      - Cb\ :sub:`10`
+      - Y'\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Cb\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Cr\ :sub:`11`
+      - Y'\ :sub:`13`
+    * - start + 16:
+      - Cb\ :sub:`20`
+      - Y'\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Cb\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Cr\ :sub:`21`
+      - Y'\ :sub:`23`
+    * - start + 24:
+      - Cb\ :sub:`30`
+      - Y'\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Cb\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Cr\ :sub:`31`
+      - Y'\ :sub:`33`
 
 
 **Color Sample Location..**
@@ -118,80 +73,38 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
index 5d8f99f..a3f61f2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_VYUY ('VYUY')
 **************************
 
-*man V4L2_PIX_FMT_VYUY(2)*
 
 Variation of ``V4L2_PIX_FMT_YUYV`` with different order of samples in
 memory
@@ -23,91 +22,47 @@
 **Byte Order.**
 Each cell is one byte.
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Cr\ :sub:`00`
-
-       -  Y'\ :sub:`00`
-
-       -  Cb\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Cr\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Cb\ :sub:`01`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Cr\ :sub:`10`
-
-       -  Y'\ :sub:`10`
-
-       -  Cb\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Cr\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Cb\ :sub:`11`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Cr\ :sub:`20`
-
-       -  Y'\ :sub:`20`
-
-       -  Cb\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Cr\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Cb\ :sub:`21`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Cr\ :sub:`30`
-
-       -  Y'\ :sub:`30`
-
-       -  Cb\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Cr\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Cb\ :sub:`31`
-
-       -  Y'\ :sub:`33`
+    * - start + 0:
+      - Cr\ :sub:`00`
+      - Y'\ :sub:`00`
+      - Cb\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Cr\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Cb\ :sub:`01`
+      - Y'\ :sub:`03`
+    * - start + 8:
+      - Cr\ :sub:`10`
+      - Y'\ :sub:`10`
+      - Cb\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Cr\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Cb\ :sub:`11`
+      - Y'\ :sub:`13`
+    * - start + 16:
+      - Cr\ :sub:`20`
+      - Y'\ :sub:`20`
+      - Cb\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Cr\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Cb\ :sub:`21`
+      - Y'\ :sub:`23`
+    * - start + 24:
+      - Cr\ :sub:`30`
+      - Y'\ :sub:`30`
+      - Cb\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Cr\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Cb\ :sub:`31`
+      - Y'\ :sub:`33`
 
 
 **Color Sample Location..**
@@ -116,80 +71,38 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -
-       -  2
-
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      -
+      - 2
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10.rst b/Documentation/media/uapi/v4l/pixfmt-y10.rst
index d22f771..89e2289 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y10 ('Y10 ')
 *************************
 
-*man V4L2_PIX_FMT_Y10(2)*
 
 Grey-scale image
 
@@ -23,88 +22,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00low`
-
-       -  Y'\ :sub:`00high`
-
-       -  Y'\ :sub:`01low`
-
-       -  Y'\ :sub:`01high`
-
-       -  Y'\ :sub:`02low`
-
-       -  Y'\ :sub:`02high`
-
-       -  Y'\ :sub:`03low`
-
-       -  Y'\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10low`
-
-       -  Y'\ :sub:`10high`
-
-       -  Y'\ :sub:`11low`
-
-       -  Y'\ :sub:`11high`
-
-       -  Y'\ :sub:`12low`
-
-       -  Y'\ :sub:`12high`
-
-       -  Y'\ :sub:`13low`
-
-       -  Y'\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20low`
-
-       -  Y'\ :sub:`20high`
-
-       -  Y'\ :sub:`21low`
-
-       -  Y'\ :sub:`21high`
-
-       -  Y'\ :sub:`22low`
-
-       -  Y'\ :sub:`22high`
-
-       -  Y'\ :sub:`23low`
-
-       -  Y'\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30low`
-
-       -  Y'\ :sub:`30high`
-
-       -  Y'\ :sub:`31low`
-
-       -  Y'\ :sub:`31high`
-
-       -  Y'\ :sub:`32low`
-
-       -  Y'\ :sub:`32high`
-
-       -  Y'\ :sub:`33low`
-
-       -  Y'\ :sub:`33high`
+    * - start + 0:
+      - Y'\ :sub:`00low`
+      - Y'\ :sub:`00high`
+      - Y'\ :sub:`01low`
+      - Y'\ :sub:`01high`
+      - Y'\ :sub:`02low`
+      - Y'\ :sub:`02high`
+      - Y'\ :sub:`03low`
+      - Y'\ :sub:`03high`
+    * - start + 8:
+      - Y'\ :sub:`10low`
+      - Y'\ :sub:`10high`
+      - Y'\ :sub:`11low`
+      - Y'\ :sub:`11high`
+      - Y'\ :sub:`12low`
+      - Y'\ :sub:`12high`
+      - Y'\ :sub:`13low`
+      - Y'\ :sub:`13high`
+    * - start + 16:
+      - Y'\ :sub:`20low`
+      - Y'\ :sub:`20high`
+      - Y'\ :sub:`21low`
+      - Y'\ :sub:`21high`
+      - Y'\ :sub:`22low`
+      - Y'\ :sub:`22high`
+      - Y'\ :sub:`23low`
+      - Y'\ :sub:`23high`
+    * - start + 24:
+      - Y'\ :sub:`30low`
+      - Y'\ :sub:`30high`
+      - Y'\ :sub:`31low`
+      - Y'\ :sub:`31high`
+      - Y'\ :sub:`32low`
+      - Y'\ :sub:`32high`
+      - Y'\ :sub:`33low`
+      - Y'\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10b.rst b/Documentation/media/uapi/v4l/pixfmt-y10b.rst
index 5b50cd6..9feddf3 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10b.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10b.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_Y10BPACK ('Y10B')
 ******************************
 
-*man V4L2_PIX_FMT_Y10BPACK(2)*
-
 Grey-scale image as a bit-packed array
 
 
@@ -24,22 +22,12 @@
 pixels cross the byte boundary and have a ratio of 5 bytes for each 4
 pixels.
 
-
-
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  Y'\ :sub:`00[9:2]`
-
-       -  Y'\ :sub:`00[1:0]`\ Y'\ :sub:`01[9:4]`
-
-       -  Y'\ :sub:`01[3:0]`\ Y'\ :sub:`02[9:6]`
-
-       -  Y'\ :sub:`02[5:0]`\ Y'\ :sub:`03[9:8]`
-
-       -  Y'\ :sub:`03[7:0]`
+    * - Y'\ :sub:`00[9:2]`
+      - Y'\ :sub:`00[1:0]`\ Y'\ :sub:`01[9:4]`
+      - Y'\ :sub:`01[3:0]`\ Y'\ :sub:`02[9:6]`
+      - Y'\ :sub:`02[5:0]`\ Y'\ :sub:`03[9:8]`
+      - Y'\ :sub:`03[7:0]`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y12.rst b/Documentation/media/uapi/v4l/pixfmt-y12.rst
index 7729bcb..0f23071 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y12.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y12 ('Y12 ')
 *************************
 
-*man V4L2_PIX_FMT_Y12(2)*
 
 Grey-scale image
 
@@ -23,88 +22,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00low`
-
-       -  Y'\ :sub:`00high`
-
-       -  Y'\ :sub:`01low`
-
-       -  Y'\ :sub:`01high`
-
-       -  Y'\ :sub:`02low`
-
-       -  Y'\ :sub:`02high`
-
-       -  Y'\ :sub:`03low`
-
-       -  Y'\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10low`
-
-       -  Y'\ :sub:`10high`
-
-       -  Y'\ :sub:`11low`
-
-       -  Y'\ :sub:`11high`
-
-       -  Y'\ :sub:`12low`
-
-       -  Y'\ :sub:`12high`
-
-       -  Y'\ :sub:`13low`
-
-       -  Y'\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20low`
-
-       -  Y'\ :sub:`20high`
-
-       -  Y'\ :sub:`21low`
-
-       -  Y'\ :sub:`21high`
-
-       -  Y'\ :sub:`22low`
-
-       -  Y'\ :sub:`22high`
-
-       -  Y'\ :sub:`23low`
-
-       -  Y'\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30low`
-
-       -  Y'\ :sub:`30high`
-
-       -  Y'\ :sub:`31low`
-
-       -  Y'\ :sub:`31high`
-
-       -  Y'\ :sub:`32low`
-
-       -  Y'\ :sub:`32high`
-
-       -  Y'\ :sub:`33low`
-
-       -  Y'\ :sub:`33high`
+    * - start + 0:
+      - Y'\ :sub:`00low`
+      - Y'\ :sub:`00high`
+      - Y'\ :sub:`01low`
+      - Y'\ :sub:`01high`
+      - Y'\ :sub:`02low`
+      - Y'\ :sub:`02high`
+      - Y'\ :sub:`03low`
+      - Y'\ :sub:`03high`
+    * - start + 8:
+      - Y'\ :sub:`10low`
+      - Y'\ :sub:`10high`
+      - Y'\ :sub:`11low`
+      - Y'\ :sub:`11high`
+      - Y'\ :sub:`12low`
+      - Y'\ :sub:`12high`
+      - Y'\ :sub:`13low`
+      - Y'\ :sub:`13high`
+    * - start + 16:
+      - Y'\ :sub:`20low`
+      - Y'\ :sub:`20high`
+      - Y'\ :sub:`21low`
+      - Y'\ :sub:`21high`
+      - Y'\ :sub:`22low`
+      - Y'\ :sub:`22high`
+      - Y'\ :sub:`23low`
+      - Y'\ :sub:`23high`
+    * - start + 24:
+      - Y'\ :sub:`30low`
+      - Y'\ :sub:`30high`
+      - Y'\ :sub:`31low`
+      - Y'\ :sub:`31high`
+      - Y'\ :sub:`32low`
+      - Y'\ :sub:`32high`
+      - Y'\ :sub:`33low`
+      - Y'\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y12i.rst b/Documentation/media/uapi/v4l/pixfmt-y12i.rst
index 8967e8c..bb39a24 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y12i.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y12i.rst
@@ -6,8 +6,6 @@
 V4L2_PIX_FMT_Y12I ('Y12I')
 **************************
 
-*man V4L2_PIX_FMT_Y12I(2)*
-
 Interleaved grey-scale image, e.g. from a stereo-pair
 
 
@@ -32,13 +30,7 @@
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1
 
-
-    -  .. row 1
-
-       -  Y'\ :sub:`0left[7:0]`
-
-       -  Y'\ :sub:`0right[3:0]`\ Y'\ :sub:`0left[11:8]`
-
-       -  Y'\ :sub:`0right[11:4]`
+    * - Y'\ :sub:`0left[7:0]`
+      - Y'\ :sub:`0right[3:0]`\ Y'\ :sub:`0left[11:8]`
+      - Y'\ :sub:`0right[11:4]`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y16-be.rst b/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
index 37fa099..54ce35e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y16_BE ('Y16 ' | (1 << 31))
 ****************************************
 
-*man V4L2_PIX_FMT_Y16_BE(2)*
 
 Grey-scale image
 
@@ -17,7 +16,9 @@
 This is a grey-scale image with a depth of 16 bits per pixel. The most
 significant byte is stored at lower memory addresses (big-endian).
 
-.. note:: Tthe actual sampling precision may be lower than 16 bits, for
+.. note::
+
+   The actual sampling precision may be lower than 16 bits, for
    example 10 bits per pixel with values in range 0 to 1023.
 
 **Byte Order.**
@@ -25,88 +26,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00high`
-
-       -  Y'\ :sub:`00low`
-
-       -  Y'\ :sub:`01high`
-
-       -  Y'\ :sub:`01low`
-
-       -  Y'\ :sub:`02high`
-
-       -  Y'\ :sub:`02low`
-
-       -  Y'\ :sub:`03high`
-
-       -  Y'\ :sub:`03low`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10high`
-
-       -  Y'\ :sub:`10low`
-
-       -  Y'\ :sub:`11high`
-
-       -  Y'\ :sub:`11low`
-
-       -  Y'\ :sub:`12high`
-
-       -  Y'\ :sub:`12low`
-
-       -  Y'\ :sub:`13high`
-
-       -  Y'\ :sub:`13low`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20high`
-
-       -  Y'\ :sub:`20low`
-
-       -  Y'\ :sub:`21high`
-
-       -  Y'\ :sub:`21low`
-
-       -  Y'\ :sub:`22high`
-
-       -  Y'\ :sub:`22low`
-
-       -  Y'\ :sub:`23high`
-
-       -  Y'\ :sub:`23low`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30high`
-
-       -  Y'\ :sub:`30low`
-
-       -  Y'\ :sub:`31high`
-
-       -  Y'\ :sub:`31low`
-
-       -  Y'\ :sub:`32high`
-
-       -  Y'\ :sub:`32low`
-
-       -  Y'\ :sub:`33high`
-
-       -  Y'\ :sub:`33low`
+    * - start + 0:
+      - Y'\ :sub:`00high`
+      - Y'\ :sub:`00low`
+      - Y'\ :sub:`01high`
+      - Y'\ :sub:`01low`
+      - Y'\ :sub:`02high`
+      - Y'\ :sub:`02low`
+      - Y'\ :sub:`03high`
+      - Y'\ :sub:`03low`
+    * - start + 8:
+      - Y'\ :sub:`10high`
+      - Y'\ :sub:`10low`
+      - Y'\ :sub:`11high`
+      - Y'\ :sub:`11low`
+      - Y'\ :sub:`12high`
+      - Y'\ :sub:`12low`
+      - Y'\ :sub:`13high`
+      - Y'\ :sub:`13low`
+    * - start + 16:
+      - Y'\ :sub:`20high`
+      - Y'\ :sub:`20low`
+      - Y'\ :sub:`21high`
+      - Y'\ :sub:`21low`
+      - Y'\ :sub:`22high`
+      - Y'\ :sub:`22low`
+      - Y'\ :sub:`23high`
+      - Y'\ :sub:`23low`
+    * - start + 24:
+      - Y'\ :sub:`30high`
+      - Y'\ :sub:`30low`
+      - Y'\ :sub:`31high`
+      - Y'\ :sub:`31low`
+      - Y'\ :sub:`32high`
+      - Y'\ :sub:`32low`
+      - Y'\ :sub:`33high`
+      - Y'\ :sub:`33low`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y16.rst b/Documentation/media/uapi/v4l/pixfmt-y16.rst
index 4c41c04..bcbd52d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y16.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y16 ('Y16 ')
 *************************
 
-*man V4L2_PIX_FMT_Y16(2)*
 
 Grey-scale image
 
@@ -17,7 +16,9 @@
 This is a grey-scale image with a depth of 16 bits per pixel. The least
 significant byte is stored at lower memory addresses (little-endian).
 
-.. note:: The actual sampling precision may be lower than 16 bits, for
+.. note::
+
+   The actual sampling precision may be lower than 16 bits, for
    example 10 bits per pixel with values in range 0 to 1023.
 
 **Byte Order.**
@@ -25,88 +26,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00low`
-
-       -  Y'\ :sub:`00high`
-
-       -  Y'\ :sub:`01low`
-
-       -  Y'\ :sub:`01high`
-
-       -  Y'\ :sub:`02low`
-
-       -  Y'\ :sub:`02high`
-
-       -  Y'\ :sub:`03low`
-
-       -  Y'\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10low`
-
-       -  Y'\ :sub:`10high`
-
-       -  Y'\ :sub:`11low`
-
-       -  Y'\ :sub:`11high`
-
-       -  Y'\ :sub:`12low`
-
-       -  Y'\ :sub:`12high`
-
-       -  Y'\ :sub:`13low`
-
-       -  Y'\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20low`
-
-       -  Y'\ :sub:`20high`
-
-       -  Y'\ :sub:`21low`
-
-       -  Y'\ :sub:`21high`
-
-       -  Y'\ :sub:`22low`
-
-       -  Y'\ :sub:`22high`
-
-       -  Y'\ :sub:`23low`
-
-       -  Y'\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30low`
-
-       -  Y'\ :sub:`30high`
-
-       -  Y'\ :sub:`31low`
-
-       -  Y'\ :sub:`31high`
-
-       -  Y'\ :sub:`32low`
-
-       -  Y'\ :sub:`32high`
-
-       -  Y'\ :sub:`33low`
-
-       -  Y'\ :sub:`33high`
+    * - start + 0:
+      - Y'\ :sub:`00low`
+      - Y'\ :sub:`00high`
+      - Y'\ :sub:`01low`
+      - Y'\ :sub:`01high`
+      - Y'\ :sub:`02low`
+      - Y'\ :sub:`02high`
+      - Y'\ :sub:`03low`
+      - Y'\ :sub:`03high`
+    * - start + 8:
+      - Y'\ :sub:`10low`
+      - Y'\ :sub:`10high`
+      - Y'\ :sub:`11low`
+      - Y'\ :sub:`11high`
+      - Y'\ :sub:`12low`
+      - Y'\ :sub:`12high`
+      - Y'\ :sub:`13low`
+      - Y'\ :sub:`13high`
+    * - start + 16:
+      - Y'\ :sub:`20low`
+      - Y'\ :sub:`20high`
+      - Y'\ :sub:`21low`
+      - Y'\ :sub:`21high`
+      - Y'\ :sub:`22low`
+      - Y'\ :sub:`22high`
+      - Y'\ :sub:`23low`
+      - Y'\ :sub:`23high`
+    * - start + 24:
+      - Y'\ :sub:`30low`
+      - Y'\ :sub:`30high`
+      - Y'\ :sub:`31low`
+      - Y'\ :sub:`31high`
+      - Y'\ :sub:`32low`
+      - Y'\ :sub:`32high`
+      - Y'\ :sub:`33low`
+      - Y'\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-y41p.rst b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
index 4760174..05d040c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y41p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y41P ('Y41P')
 **************************
 
-*man V4L2_PIX_FMT_Y41P(2)*
 
 Format with ¼ horizontal chroma resolution, also known as YUV 4:1:1
 
@@ -30,123 +29,63 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Y'\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Cb\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Cr\ :sub:`01`
-
-       -  Y'\ :sub:`03`
-
-       -  Y'\ :sub:`04`
-
-       -  Y'\ :sub:`05`
-
-       -  Y'\ :sub:`06`
-
-       -  Y'\ :sub:`07`
-
-    -  .. row 2
-
-       -  start + 12:
-
-       -  Cb\ :sub:`10`
-
-       -  Y'\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Cb\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Cr\ :sub:`11`
-
-       -  Y'\ :sub:`13`
-
-       -  Y'\ :sub:`14`
-
-       -  Y'\ :sub:`15`
-
-       -  Y'\ :sub:`16`
-
-       -  Y'\ :sub:`17`
-
-    -  .. row 3
-
-       -  start + 24:
-
-       -  Cb\ :sub:`20`
-
-       -  Y'\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Cb\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Cr\ :sub:`21`
-
-       -  Y'\ :sub:`23`
-
-       -  Y'\ :sub:`24`
-
-       -  Y'\ :sub:`25`
-
-       -  Y'\ :sub:`26`
-
-       -  Y'\ :sub:`27`
-
-    -  .. row 4
-
-       -  start + 36:
-
-       -  Cb\ :sub:`30`
-
-       -  Y'\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Cb\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Cr\ :sub:`31`
-
-       -  Y'\ :sub:`33`
-
-       -  Y'\ :sub:`34`
-
-       -  Y'\ :sub:`35`
-
-       -  Y'\ :sub:`36`
-
-       -  Y'\ :sub:`37`
+    * - start + 0:
+      - Cb\ :sub:`00`
+      - Y'\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Cb\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Cr\ :sub:`01`
+      - Y'\ :sub:`03`
+      - Y'\ :sub:`04`
+      - Y'\ :sub:`05`
+      - Y'\ :sub:`06`
+      - Y'\ :sub:`07`
+    * - start + 12:
+      - Cb\ :sub:`10`
+      - Y'\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Cb\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Cr\ :sub:`11`
+      - Y'\ :sub:`13`
+      - Y'\ :sub:`14`
+      - Y'\ :sub:`15`
+      - Y'\ :sub:`16`
+      - Y'\ :sub:`17`
+    * - start + 24:
+      - Cb\ :sub:`20`
+      - Y'\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Cb\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Cr\ :sub:`21`
+      - Y'\ :sub:`23`
+      - Y'\ :sub:`24`
+      - Y'\ :sub:`25`
+      - Y'\ :sub:`26`
+      - Y'\ :sub:`27`
+    * - start + 36:
+      - Cb\ :sub:`30`
+      - Y'\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Cb\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Cr\ :sub:`31`
+      - Y'\ :sub:`33`
+      - Y'\ :sub:`34`
+      - Y'\ :sub:`35`
+      - Y'\ :sub:`36`
+      - Y'\ :sub:`37`
 
 
 **Color Sample Location..**
@@ -155,120 +94,58 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -  1
-
-       -
-       -  2
-
-       -  3
-
-       -  4
-
-       -  5
-
-       -
-       -  6
-
-       -  7
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
+    * -
+      - 0
+      - 1
+      -
+      - 2
+      - 3
+      - 4
+      - 5
+      -
+      - 6
+      - 7
+    * - 0
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 1
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 2
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 3
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-y8i.rst b/Documentation/media/uapi/v4l/pixfmt-y8i.rst
index 7fa16ee..fd8ed23 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y8i.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y8i.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Y8I ('Y8I ')
 *************************
 
-*man V4L2_PIX_FMT_Y8I(2)*
 
 Interleaved grey-scale image, e.g. from a stereo-pair
 
@@ -24,88 +23,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00left`
-
-       -  Y'\ :sub:`00right`
-
-       -  Y'\ :sub:`01left`
-
-       -  Y'\ :sub:`01right`
-
-       -  Y'\ :sub:`02left`
-
-       -  Y'\ :sub:`02right`
-
-       -  Y'\ :sub:`03left`
-
-       -  Y'\ :sub:`03right`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10left`
-
-       -  Y'\ :sub:`10right`
-
-       -  Y'\ :sub:`11left`
-
-       -  Y'\ :sub:`11right`
-
-       -  Y'\ :sub:`12left`
-
-       -  Y'\ :sub:`12right`
-
-       -  Y'\ :sub:`13left`
-
-       -  Y'\ :sub:`13right`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20left`
-
-       -  Y'\ :sub:`20right`
-
-       -  Y'\ :sub:`21left`
-
-       -  Y'\ :sub:`21right`
-
-       -  Y'\ :sub:`22left`
-
-       -  Y'\ :sub:`22right`
-
-       -  Y'\ :sub:`23left`
-
-       -  Y'\ :sub:`23right`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30left`
-
-       -  Y'\ :sub:`30right`
-
-       -  Y'\ :sub:`31left`
-
-       -  Y'\ :sub:`31right`
-
-       -  Y'\ :sub:`32left`
-
-       -  Y'\ :sub:`32right`
-
-       -  Y'\ :sub:`33left`
-
-       -  Y'\ :sub:`33right`
+    * - start + 0:
+      - Y'\ :sub:`00left`
+      - Y'\ :sub:`00right`
+      - Y'\ :sub:`01left`
+      - Y'\ :sub:`01right`
+      - Y'\ :sub:`02left`
+      - Y'\ :sub:`02right`
+      - Y'\ :sub:`03left`
+      - Y'\ :sub:`03right`
+    * - start + 8:
+      - Y'\ :sub:`10left`
+      - Y'\ :sub:`10right`
+      - Y'\ :sub:`11left`
+      - Y'\ :sub:`11right`
+      - Y'\ :sub:`12left`
+      - Y'\ :sub:`12right`
+      - Y'\ :sub:`13left`
+      - Y'\ :sub:`13right`
+    * - start + 16:
+      - Y'\ :sub:`20left`
+      - Y'\ :sub:`20right`
+      - Y'\ :sub:`21left`
+      - Y'\ :sub:`21right`
+      - Y'\ :sub:`22left`
+      - Y'\ :sub:`22right`
+      - Y'\ :sub:`23left`
+      - Y'\ :sub:`23right`
+    * - start + 24:
+      - Y'\ :sub:`30left`
+      - Y'\ :sub:`30right`
+      - Y'\ :sub:`31left`
+      - Y'\ :sub:`31right`
+      - Y'\ :sub:`32left`
+      - Y'\ :sub:`32right`
+      - Y'\ :sub:`33left`
+      - Y'\ :sub:`33right`
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
index 8a5d1a2..0c49915 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_YVU410 ('YVU9'), V4L2_PIX_FMT_YUV410 ('YUV9')
 **********************************************************
 
-*man V4L2_PIX_FMT_YVU410(2)*
 
 V4L2_PIX_FMT_YUV410
 Planar formats with ¼ horizontal and vertical chroma resolution, also
@@ -37,71 +36,35 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cr\ :sub:`00`
-
-    -  .. row 6
-
-       -  start + 17:
-
-       -  Cb\ :sub:`00`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cr\ :sub:`00`
+    * - start + 17:
+      - Cb\ :sub:`00`
 
 
 **Color Sample Location..**
@@ -112,97 +75,53 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-       -
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      -
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+    * - 1
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+      -
+      -
+      -
+      - C
+      -
+      -
+      -
+    * - 2
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+    * - 3
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
index f85e3f3..2cf33fa 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_YUV411P ('411P')
 *****************************
 
-*man V4L2_PIX_FMT_YUV411P(2)*
 
 Format with ¼ horizontal chroma resolution, also known as YUV 4:1:1.
 Planar layout as opposed to ``V4L2_PIX_FMT_Y41P``
@@ -33,107 +32,47 @@
 Each cell is one byte.
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cb\ :sub:`00`
-
-    -  .. row 6
-
-       -  start + 17:
-
-       -  Cb\ :sub:`10`
-
-    -  .. row 7
-
-       -  start + 18:
-
-       -  Cb\ :sub:`20`
-
-    -  .. row 8
-
-       -  start + 19:
-
-       -  Cb\ :sub:`30`
-
-    -  .. row 9
-
-       -  start + 20:
-
-       -  Cr\ :sub:`00`
-
-    -  .. row 10
-
-       -  start + 21:
-
-       -  Cr\ :sub:`10`
-
-    -  .. row 11
-
-       -  start + 22:
-
-       -  Cr\ :sub:`20`
-
-    -  .. row 12
-
-       -  start + 23:
-
-       -  Cr\ :sub:`30`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cb\ :sub:`00`
+    * - start + 17:
+      - Cb\ :sub:`10`
+    * - start + 18:
+      - Cb\ :sub:`20`
+    * - start + 19:
+      - Cb\ :sub:`30`
+    * - start + 20:
+      - Cr\ :sub:`00`
+    * - start + 21:
+      - Cr\ :sub:`10`
+    * - start + 22:
+      - Cr\ :sub:`20`
+    * - start + 23:
+      - Cr\ :sub:`30`
 
 
 **Color Sample Location..**
@@ -144,71 +83,33 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -  1
-
-       -
-       -  2
-
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
+    * -
+      - 0
+      - 1
+      -
+      - 2
+      - 3
+    * - 0
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 1
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 2
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
+    * - 3
+      - Y
+      - Y
+      - C
+      - Y
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
index b22e64c..fd98904 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_YVU420 ('YV12'), V4L2_PIX_FMT_YUV420 ('YU12')
 **********************************************************
 
-*man V4L2_PIX_FMT_YVU420(2)*
 
 V4L2_PIX_FMT_YUV420
 Planar formats with ½ horizontal and vertical chroma resolution, also
@@ -38,91 +37,43 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cr\ :sub:`00`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 6
-
-       -  start + 18:
-
-       -  Cr\ :sub:`10`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 7
-
-       -  start + 20:
-
-       -  Cb\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-    -  .. row 8
-
-       -  start + 22:
-
-       -  Cb\ :sub:`10`
-
-       -  Cb\ :sub:`11`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cr\ :sub:`00`
+      - Cr\ :sub:`01`
+    * - start + 18:
+      - Cr\ :sub:`10`
+      - Cr\ :sub:`11`
+    * - start + 20:
+      - Cb\ :sub:`00`
+      - Cb\ :sub:`01`
+    * - start + 22:
+      - Cb\ :sub:`10`
+      - Cb\ :sub:`11`
 
 
 **Color Sample Location..**
@@ -133,107 +84,60 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      -
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
index 4dab850..cce8c47 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21')
 ************************************************************
 
-*man V4L2_PIX_FMT_YUV420M(2)*
 
 V4L2_PIX_FMT_YVU420M
 Variation of ``V4L2_PIX_FMT_YUV420`` and ``V4L2_PIX_FMT_YVU420`` with
@@ -45,99 +44,45 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start0 + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start0 + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start0 + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start0 + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  start1 + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-    -  .. row 7
-
-       -  start1 + 2:
-
-       -  Cb\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-    -  .. row 8
-
-       -
-
-    -  .. row 9
-
-       -  start2 + 0:
-
-       -  Cr\ :sub:`00`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 10
-
-       -  start2 + 2:
-
-       -  Cr\ :sub:`10`
-
-       -  Cr\ :sub:`11`
+    * - start0 + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start0 + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start0 + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start0 + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * -
+    * - start1 + 0:
+      - Cb\ :sub:`00`
+      - Cb\ :sub:`01`
+    * - start1 + 2:
+      - Cb\ :sub:`10`
+      - Cb\ :sub:`11`
+    * -
+    * - start2 + 0:
+      - Cr\ :sub:`00`
+      - Cr\ :sub:`01`
+    * - start2 + 2:
+      - Cr\ :sub:`10`
+      - Cr\ :sub:`11`
 
 
 **Color Sample Location..**
@@ -148,107 +93,60 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 3
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 4
-
-       -  1
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  2
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-    -  .. row 7
-
-       -
-       -
-       -  C
-
-       -
-       -
-       -
-       -  C
-
-       -
-
-    -  .. row 8
-
-       -  3
-
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
-
-       -
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      -
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      -
+      - C
+      -
+    * - 1
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+    * - 2
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
+    * -
+      -
+      - C
+      -
+      -
+      -
+      - C
+      -
+    * - 3
+      - Y
+      -
+      - Y
+      -
+      - Y
+      -
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
index ccb6728..d986393 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_YUV422M ('YM16'), V4L2_PIX_FMT_YVU422M ('YM61')
 ************************************************************
 
-*man V4L2_PIX_FMT_YUV422M(2)*
 
 V4L2_PIX_FMT_YVU422M
 Planar formats with ½ horizontal resolution, also known as YUV and YVU
@@ -44,131 +43,57 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start0 + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start0 + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start0 + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start0 + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  start1 + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-    -  .. row 7
-
-       -  start1 + 2:
-
-       -  Cb\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-    -  .. row 8
-
-       -  start1 + 4:
-
-       -  Cb\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-    -  .. row 9
-
-       -  start1 + 6:
-
-       -  Cb\ :sub:`30`
-
-       -  Cb\ :sub:`31`
-
-    -  .. row 10
-
-       -
-
-    -  .. row 11
-
-       -  start2 + 0:
-
-       -  Cr\ :sub:`00`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 12
-
-       -  start2 + 2:
-
-       -  Cr\ :sub:`10`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 13
-
-       -  start2 + 4:
-
-       -  Cr\ :sub:`20`
-
-       -  Cr\ :sub:`21`
-
-    -  .. row 14
-
-       -  start2 + 6:
-
-       -  Cr\ :sub:`30`
-
-       -  Cr\ :sub:`31`
+    * - start0 + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start0 + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start0 + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start0 + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * -
+    * - start1 + 0:
+      - Cb\ :sub:`00`
+      - Cb\ :sub:`01`
+    * - start1 + 2:
+      - Cb\ :sub:`10`
+      - Cb\ :sub:`11`
+    * - start1 + 4:
+      - Cb\ :sub:`20`
+      - Cb\ :sub:`21`
+    * - start1 + 6:
+      - Cb\ :sub:`30`
+      - Cb\ :sub:`31`
+    * -
+    * - start2 + 0:
+      - Cr\ :sub:`00`
+      - Cr\ :sub:`01`
+    * - start2 + 2:
+      - Cr\ :sub:`10`
+      - Cr\ :sub:`11`
+    * - start2 + 4:
+      - Cr\ :sub:`20`
+      - Cr\ :sub:`21`
+    * - start2 + 6:
+      - Cr\ :sub:`30`
+      - Cr\ :sub:`31`
 
 
 **Color Sample Location..**
@@ -179,80 +104,38 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
index 9f34762..e6f5de5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_YUV422P ('422P')
 *****************************
 
-*man V4L2_PIX_FMT_YUV422P(2)*
 
 Format with ½ horizontal chroma resolution, also known as YUV 4:2:2.
 Planar layout as opposed to ``V4L2_PIX_FMT_YUYV``
@@ -34,123 +33,55 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -  start + 16:
-
-       -  Cb\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-    -  .. row 6
-
-       -  start + 18:
-
-       -  Cb\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-    -  .. row 7
-
-       -  start + 20:
-
-       -  Cb\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-    -  .. row 8
-
-       -  start + 22:
-
-       -  Cb\ :sub:`30`
-
-       -  Cb\ :sub:`31`
-
-    -  .. row 9
-
-       -  start + 24:
-
-       -  Cr\ :sub:`00`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 10
-
-       -  start + 26:
-
-       -  Cr\ :sub:`10`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 11
-
-       -  start + 28:
-
-       -  Cr\ :sub:`20`
-
-       -  Cr\ :sub:`21`
-
-    -  .. row 12
-
-       -  start + 30:
-
-       -  Cr\ :sub:`30`
-
-       -  Cr\ :sub:`31`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * - start + 16:
+      - Cb\ :sub:`00`
+      - Cb\ :sub:`01`
+    * - start + 18:
+      - Cb\ :sub:`10`
+      - Cb\ :sub:`11`
+    * - start + 20:
+      - Cb\ :sub:`20`
+      - Cb\ :sub:`21`
+    * - start + 22:
+      - Cb\ :sub:`30`
+      - Cb\ :sub:`31`
+    * - start + 24:
+      - Cr\ :sub:`00`
+      - Cr\ :sub:`01`
+    * - start + 26:
+      - Cr\ :sub:`10`
+      - Cr\ :sub:`11`
+    * - start + 28:
+      - Cr\ :sub:`20`
+      - Cr\ :sub:`21`
+    * - start + 30:
+      - Cr\ :sub:`30`
+      - Cr\ :sub:`31`
 
 
 **Color Sample Location..**
@@ -161,80 +92,38 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
index 04f3450..830fbf6 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
@@ -7,7 +7,6 @@
 V4L2_PIX_FMT_YUV444M ('YM24'), V4L2_PIX_FMT_YVU444M ('YM42')
 ************************************************************
 
-*man V4L2_PIX_FMT_YUV444M(2)*
 
 V4L2_PIX_FMT_YVU444M
 Planar formats with full horizontal resolution, also known as YUV and
@@ -38,163 +37,73 @@
 **Byte Order.**
 Each cell is one byte.
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start0 + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Y'\ :sub:`02`
-
-       -  Y'\ :sub:`03`
-
-    -  .. row 2
-
-       -  start0 + 4:
-
-       -  Y'\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Y'\ :sub:`12`
-
-       -  Y'\ :sub:`13`
-
-    -  .. row 3
-
-       -  start0 + 8:
-
-       -  Y'\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Y'\ :sub:`22`
-
-       -  Y'\ :sub:`23`
-
-    -  .. row 4
-
-       -  start0 + 12:
-
-       -  Y'\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Y'\ :sub:`32`
-
-       -  Y'\ :sub:`33`
-
-    -  .. row 5
-
-       -
-
-    -  .. row 6
-
-       -  start1 + 0:
-
-       -  Cb\ :sub:`00`
-
-       -  Cb\ :sub:`01`
-
-       -  Cb\ :sub:`02`
-
-       -  Cb\ :sub:`03`
-
-    -  .. row 7
-
-       -  start1 + 4:
-
-       -  Cb\ :sub:`10`
-
-       -  Cb\ :sub:`11`
-
-       -  Cb\ :sub:`12`
-
-       -  Cb\ :sub:`13`
-
-    -  .. row 8
-
-       -  start1 + 8:
-
-       -  Cb\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-       -  Cb\ :sub:`22`
-
-       -  Cb\ :sub:`23`
-
-    -  .. row 9
-
-       -  start1 + 12:
-
-       -  Cb\ :sub:`20`
-
-       -  Cb\ :sub:`21`
-
-       -  Cb\ :sub:`32`
-
-       -  Cb\ :sub:`33`
-
-    -  .. row 10
-
-       -
-
-    -  .. row 11
-
-       -  start2 + 0:
-
-       -  Cr\ :sub:`00`
-
-       -  Cr\ :sub:`01`
-
-       -  Cr\ :sub:`02`
-
-       -  Cr\ :sub:`03`
-
-    -  .. row 12
-
-       -  start2 + 4:
-
-       -  Cr\ :sub:`10`
-
-       -  Cr\ :sub:`11`
-
-       -  Cr\ :sub:`12`
-
-       -  Cr\ :sub:`13`
-
-    -  .. row 13
-
-       -  start2 + 8:
-
-       -  Cr\ :sub:`20`
-
-       -  Cr\ :sub:`21`
-
-       -  Cr\ :sub:`22`
-
-       -  Cr\ :sub:`23`
-
-    -  .. row 14
-
-       -  start2 + 12:
-
-       -  Cr\ :sub:`30`
-
-       -  Cr\ :sub:`31`
-
-       -  Cr\ :sub:`32`
-
-       -  Cr\ :sub:`33`
+    * - start0 + 0:
+      - Y'\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Y'\ :sub:`02`
+      - Y'\ :sub:`03`
+    * - start0 + 4:
+      - Y'\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Y'\ :sub:`12`
+      - Y'\ :sub:`13`
+    * - start0 + 8:
+      - Y'\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Y'\ :sub:`22`
+      - Y'\ :sub:`23`
+    * - start0 + 12:
+      - Y'\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Y'\ :sub:`32`
+      - Y'\ :sub:`33`
+    * -
+    * - start1 + 0:
+      - Cb\ :sub:`00`
+      - Cb\ :sub:`01`
+      - Cb\ :sub:`02`
+      - Cb\ :sub:`03`
+    * - start1 + 4:
+      - Cb\ :sub:`10`
+      - Cb\ :sub:`11`
+      - Cb\ :sub:`12`
+      - Cb\ :sub:`13`
+    * - start1 + 8:
+      - Cb\ :sub:`20`
+      - Cb\ :sub:`21`
+      - Cb\ :sub:`22`
+      - Cb\ :sub:`23`
+    * - start1 + 12:
+      - Cb\ :sub:`20`
+      - Cb\ :sub:`21`
+      - Cb\ :sub:`32`
+      - Cb\ :sub:`33`
+    * -
+    * - start2 + 0:
+      - Cr\ :sub:`00`
+      - Cr\ :sub:`01`
+      - Cr\ :sub:`02`
+      - Cr\ :sub:`03`
+    * - start2 + 4:
+      - Cr\ :sub:`10`
+      - Cr\ :sub:`11`
+      - Cr\ :sub:`12`
+      - Cr\ :sub:`13`
+    * - start2 + 8:
+      - Cr\ :sub:`20`
+      - Cr\ :sub:`21`
+      - Cr\ :sub:`22`
+      - Cr\ :sub:`23`
+    * - start2 + 12:
+      - Cr\ :sub:`30`
+      - Cr\ :sub:`31`
+      - Cr\ :sub:`32`
+      - Cr\ :sub:`33`
 
 
 **Color Sample Location..**
@@ -205,62 +114,28 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -  1
-
-       -  2
-
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-    -  .. row 3
-
-       -  1
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-    -  .. row 4
-
-       -  2
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-    -  .. row 5
-
-       -  3
-
-       -  YC
-
-       -  YC
-
-       -  YC
-
-       -  YC
+    * -
+      - 0
+      - 1
+      - 2
+      - 3
+    * - 0
+      - YC
+      - YC
+      - YC
+      - YC
+    * - 1
+      - YC
+      - YC
+      - YC
+      - YC
+    * - 2
+      - YC
+      - YC
+      - YC
+      - YC
+    * - 3
+      - YC
+      - YC
+      - YC
+      - YC
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
index 52917df..e1bdd6b 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_YUYV ('YUYV')
 **************************
 
-*man V4L2_PIX_FMT_YUYV(2)*
 
 Packed format with ½ horizontal chroma resolution, also known as YUV
 4:2:2
@@ -26,91 +25,47 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Cb\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Cr\ :sub:`00`
-
-       -  Y'\ :sub:`02`
-
-       -  Cb\ :sub:`01`
-
-       -  Y'\ :sub:`03`
-
-       -  Cr\ :sub:`01`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10`
-
-       -  Cb\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Cr\ :sub:`10`
-
-       -  Y'\ :sub:`12`
-
-       -  Cb\ :sub:`11`
-
-       -  Y'\ :sub:`13`
-
-       -  Cr\ :sub:`11`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20`
-
-       -  Cb\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Cr\ :sub:`20`
-
-       -  Y'\ :sub:`22`
-
-       -  Cb\ :sub:`21`
-
-       -  Y'\ :sub:`23`
-
-       -  Cr\ :sub:`21`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30`
-
-       -  Cb\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Cr\ :sub:`30`
-
-       -  Y'\ :sub:`32`
-
-       -  Cb\ :sub:`31`
-
-       -  Y'\ :sub:`33`
-
-       -  Cr\ :sub:`31`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Cb\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Cr\ :sub:`00`
+      - Y'\ :sub:`02`
+      - Cb\ :sub:`01`
+      - Y'\ :sub:`03`
+      - Cr\ :sub:`01`
+    * - start + 8:
+      - Y'\ :sub:`10`
+      - Cb\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Cr\ :sub:`10`
+      - Y'\ :sub:`12`
+      - Cb\ :sub:`11`
+      - Y'\ :sub:`13`
+      - Cr\ :sub:`11`
+    * - start + 16:
+      - Y'\ :sub:`20`
+      - Cb\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Cr\ :sub:`20`
+      - Y'\ :sub:`22`
+      - Cb\ :sub:`21`
+      - Y'\ :sub:`23`
+      - Cr\ :sub:`21`
+    * - start + 24:
+      - Y'\ :sub:`30`
+      - Cb\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Cr\ :sub:`30`
+      - Y'\ :sub:`32`
+      - Cb\ :sub:`31`
+      - Y'\ :sub:`33`
+      - Cr\ :sub:`31`
 
 
 **Color Sample Location..**
@@ -121,85 +76,43 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      -
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      -
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      -
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      -
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      -
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
index e466052..0244ce6 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_YVYU ('YVYU')
 **************************
 
-*man V4L2_PIX_FMT_YVYU(2)*
 
 Variation of ``V4L2_PIX_FMT_YUYV`` with different order of samples in
 memory
@@ -23,91 +22,47 @@
 **Byte Order.**
 Each cell is one byte.
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Y'\ :sub:`00`
-
-       -  Cr\ :sub:`00`
-
-       -  Y'\ :sub:`01`
-
-       -  Cb\ :sub:`00`
-
-       -  Y'\ :sub:`02`
-
-       -  Cr\ :sub:`01`
-
-       -  Y'\ :sub:`03`
-
-       -  Cb\ :sub:`01`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Y'\ :sub:`10`
-
-       -  Cr\ :sub:`10`
-
-       -  Y'\ :sub:`11`
-
-       -  Cb\ :sub:`10`
-
-       -  Y'\ :sub:`12`
-
-       -  Cr\ :sub:`11`
-
-       -  Y'\ :sub:`13`
-
-       -  Cb\ :sub:`11`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Y'\ :sub:`20`
-
-       -  Cr\ :sub:`20`
-
-       -  Y'\ :sub:`21`
-
-       -  Cb\ :sub:`20`
-
-       -  Y'\ :sub:`22`
-
-       -  Cr\ :sub:`21`
-
-       -  Y'\ :sub:`23`
-
-       -  Cb\ :sub:`21`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Y'\ :sub:`30`
-
-       -  Cr\ :sub:`30`
-
-       -  Y'\ :sub:`31`
-
-       -  Cb\ :sub:`30`
-
-       -  Y'\ :sub:`32`
-
-       -  Cr\ :sub:`31`
-
-       -  Y'\ :sub:`33`
-
-       -  Cb\ :sub:`31`
+    * - start + 0:
+      - Y'\ :sub:`00`
+      - Cr\ :sub:`00`
+      - Y'\ :sub:`01`
+      - Cb\ :sub:`00`
+      - Y'\ :sub:`02`
+      - Cr\ :sub:`01`
+      - Y'\ :sub:`03`
+      - Cb\ :sub:`01`
+    * - start + 8:
+      - Y'\ :sub:`10`
+      - Cr\ :sub:`10`
+      - Y'\ :sub:`11`
+      - Cb\ :sub:`10`
+      - Y'\ :sub:`12`
+      - Cr\ :sub:`11`
+      - Y'\ :sub:`13`
+      - Cb\ :sub:`11`
+    * - start + 16:
+      - Y'\ :sub:`20`
+      - Cr\ :sub:`20`
+      - Y'\ :sub:`21`
+      - Cb\ :sub:`20`
+      - Y'\ :sub:`22`
+      - Cr\ :sub:`21`
+      - Y'\ :sub:`23`
+      - Cb\ :sub:`21`
+    * - start + 24:
+      - Y'\ :sub:`30`
+      - Cr\ :sub:`30`
+      - Y'\ :sub:`31`
+      - Cb\ :sub:`30`
+      - Y'\ :sub:`32`
+      - Cr\ :sub:`31`
+      - Y'\ :sub:`33`
+      - Cb\ :sub:`31`
 
 
 **Color Sample Location..**
@@ -116,80 +71,38 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -
-       -  0
-
-       -
-       -  1
-
-       -  2
-
-       -
-       -  3
-
-    -  .. row 2
-
-       -  0
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 3
-
-       -  1
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 4
-
-       -  2
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-    -  .. row 5
-
-       -  3
-
-       -  Y
-
-       -  C
-
-       -  Y
-
-       -  Y
-
-       -  C
-
-       -  Y
+    * -
+      - 0
+      -
+      - 1
+      - 2
+      -
+      - 3
+    * - 0
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 1
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 2
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
+    * - 3
+      - Y
+      - C
+      - Y
+      - Y
+      - C
+      - Y
diff --git a/Documentation/media/uapi/v4l/pixfmt-z16.rst b/Documentation/media/uapi/v4l/pixfmt-z16.rst
index 4ebc561..eb713a9 100644
--- a/Documentation/media/uapi/v4l/pixfmt-z16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-z16.rst
@@ -6,7 +6,6 @@
 V4L2_PIX_FMT_Z16 ('Z16 ')
 *************************
 
-*man V4L2_PIX_FMT_Z16(2)*
 
 16-bit depth data with distance values at each pixel
 
@@ -24,88 +23,44 @@
 
 
 
+
 .. flat-table::
     :header-rows:  0
     :stub-columns: 0
-    :widths:       2 1 1 1 1 1 1 1 1
 
-
-    -  .. row 1
-
-       -  start + 0:
-
-       -  Z\ :sub:`00low`
-
-       -  Z\ :sub:`00high`
-
-       -  Z\ :sub:`01low`
-
-       -  Z\ :sub:`01high`
-
-       -  Z\ :sub:`02low`
-
-       -  Z\ :sub:`02high`
-
-       -  Z\ :sub:`03low`
-
-       -  Z\ :sub:`03high`
-
-    -  .. row 2
-
-       -  start + 8:
-
-       -  Z\ :sub:`10low`
-
-       -  Z\ :sub:`10high`
-
-       -  Z\ :sub:`11low`
-
-       -  Z\ :sub:`11high`
-
-       -  Z\ :sub:`12low`
-
-       -  Z\ :sub:`12high`
-
-       -  Z\ :sub:`13low`
-
-       -  Z\ :sub:`13high`
-
-    -  .. row 3
-
-       -  start + 16:
-
-       -  Z\ :sub:`20low`
-
-       -  Z\ :sub:`20high`
-
-       -  Z\ :sub:`21low`
-
-       -  Z\ :sub:`21high`
-
-       -  Z\ :sub:`22low`
-
-       -  Z\ :sub:`22high`
-
-       -  Z\ :sub:`23low`
-
-       -  Z\ :sub:`23high`
-
-    -  .. row 4
-
-       -  start + 24:
-
-       -  Z\ :sub:`30low`
-
-       -  Z\ :sub:`30high`
-
-       -  Z\ :sub:`31low`
-
-       -  Z\ :sub:`31high`
-
-       -  Z\ :sub:`32low`
-
-       -  Z\ :sub:`32high`
-
-       -  Z\ :sub:`33low`
-
-       -  Z\ :sub:`33high`
+    * - start + 0:
+      - Z\ :sub:`00low`
+      - Z\ :sub:`00high`
+      - Z\ :sub:`01low`
+      - Z\ :sub:`01high`
+      - Z\ :sub:`02low`
+      - Z\ :sub:`02high`
+      - Z\ :sub:`03low`
+      - Z\ :sub:`03high`
+    * - start + 8:
+      - Z\ :sub:`10low`
+      - Z\ :sub:`10high`
+      - Z\ :sub:`11low`
+      - Z\ :sub:`11high`
+      - Z\ :sub:`12low`
+      - Z\ :sub:`12high`
+      - Z\ :sub:`13low`
+      - Z\ :sub:`13high`
+    * - start + 16:
+      - Z\ :sub:`20low`
+      - Z\ :sub:`20high`
+      - Z\ :sub:`21low`
+      - Z\ :sub:`21high`
+      - Z\ :sub:`22low`
+      - Z\ :sub:`22high`
+      - Z\ :sub:`23low`
+      - Z\ :sub:`23high`
+    * - start + 24:
+      - Z\ :sub:`30low`
+      - Z\ :sub:`30high`
+      - Z\ :sub:`31low`
+      - Z\ :sub:`31high`
+      - Z\ :sub:`32low`
+      - Z\ :sub:`32high`
+      - Z\ :sub:`33low`
+      - Z\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst
index 81222a9..4d297f6 100644
--- a/Documentation/media/uapi/v4l/pixfmt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt.rst
@@ -6,8 +6,8 @@
 Image Formats
 #############
 The V4L2 API was primarily designed for devices exchanging image data
-with applications. The :ref:`struct v4l2_pix_format <v4l2-pix-format>` and
-:ref:`struct v4l2_pix_format_mplane <v4l2-pix-format-mplane>` structures define the
+with applications. The struct :c:type:`v4l2_pix_format` and
+struct :c:type:`v4l2_pix_format_mplane` structures define the
 format and layout of an image in memory. The former is used with the
 single-planar API, while the latter is used with the multi-planar
 version (see :ref:`planar-apis`). Image formats are negotiated with
@@ -32,4 +32,5 @@
     depth-formats
     pixfmt-013
     sdr-formats
+    tch-formats
     pixfmt-reserved
diff --git a/Documentation/media/uapi/v4l/planar-apis.rst b/Documentation/media/uapi/v4l/planar-apis.rst
index 5fe2e11..4e059fb 100644
--- a/Documentation/media/uapi/v4l/planar-apis.rst
+++ b/Documentation/media/uapi/v4l/planar-apis.rst
@@ -22,7 +22,7 @@
 corresponding buffer type to its ioctl calls. Multi-planar versions of
 buffer types are suffixed with an ``_MPLANE`` string. For a list of
 available multi-planar buffer types see enum
-:ref:`v4l2_buf_type <v4l2-buf-type>`.
+:c:type:`v4l2_buf_type`.
 
 
 Multi-planar formats
@@ -46,16 +46,16 @@
 
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>`, :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`, :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`
     New structures for describing multi-planar formats are added: struct
-    :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` and
-    struct :ref:`v4l2_plane_pix_format <v4l2-plane-pix-format>`.
+    :c:type:`v4l2_pix_format_mplane` and
+    struct :c:type:`v4l2_plane_pix_format`.
     Drivers may define new multi-planar formats, which have distinct
     FourCC codes from the existing single-planar ones.
 
 :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`, :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`, :ref:`VIDIOC_QUERYBUF <VIDIOC_QUERYBUF>`
-    A new struct :ref:`v4l2_plane <v4l2-plane>` structure for
+    A new struct :c:type:`v4l2_plane` structure for
     describing planes is added. Arrays of this structure are passed in
     the new ``m.planes`` field of struct
-    :ref:`v4l2_buffer <v4l2-buffer>`.
+    :c:type:`v4l2_buffer`.
 
 :ref:`VIDIOC_REQBUFS <VIDIOC_REQBUFS>`
     Will allocate multi-planar buffers as requested.
diff --git a/Documentation/media/uapi/v4l/rw.rst b/Documentation/media/uapi/v4l/rw.rst
index dcac379..91596c0 100644
--- a/Documentation/media/uapi/v4l/rw.rst
+++ b/Documentation/media/uapi/v4l/rw.rst
@@ -9,7 +9,7 @@
 Input and output devices support the :ref:`read() <func-read>` and
 :ref:`write() <func-write>` function, respectively, when the
 ``V4L2_CAP_READWRITE`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl is set.
 
 Drivers may need the CPU to copy the data, but they may also support DMA
diff --git a/Documentation/media/uapi/v4l/selection-api-005.rst b/Documentation/media/uapi/v4l/selection-api-005.rst
index 94731a1..5b47a28 100644
--- a/Documentation/media/uapi/v4l/selection-api-005.rst
+++ b/Documentation/media/uapi/v4l/selection-api-005.rst
@@ -16,19 +16,19 @@
 configure a capture device to fill only a part of an image by abusing
 V4L2 API. Cropping a smaller image from a larger one is achieved by
 setting the field ``bytesperline`` at struct
-:ref:`v4l2_pix_format <v4l2-pix-format>`.
+:c:type:`v4l2_pix_format`.
 Introducing an image offsets could be done by modifying field ``m_userptr``
 at struct
-:ref:`v4l2_buffer <v4l2-buffer>` before calling
+:c:type:`v4l2_buffer` before calling
 :ref:`VIDIOC_QBUF`. Those operations should be avoided because they are not
 portable (endianness), and do not work for macroblock and Bayer formats
 and mmap buffers. The selection API deals with configuration of buffer
 cropping/composing in a clear, intuitive and portable way. Next, with
 the selection API the concepts of the padded target and constraints
-flags are introduced. Finally, struct :ref:`v4l2_crop <v4l2-crop>`
-and struct :ref:`v4l2_cropcap <v4l2-cropcap>` have no reserved
+flags are introduced. Finally, struct :c:type:`v4l2_crop`
+and struct :c:type:`v4l2_cropcap` have no reserved
 fields. Therefore there is no way to extend their functionality. The new
-struct :ref:`v4l2_selection <v4l2-selection>` provides a lot of place
+struct :c:type:`v4l2_selection` provides a lot of place
 for future extensions. Driver developers are encouraged to implement
 only selection API. The former cropping API would be simulated using the
 new one.
diff --git a/Documentation/media/uapi/v4l/standard.rst b/Documentation/media/uapi/v4l/standard.rst
index c4f678f..75a1489 100644
--- a/Documentation/media/uapi/v4l/standard.rst
+++ b/Documentation/media/uapi/v4l/standard.rst
@@ -9,8 +9,8 @@
 Video devices typically support one or more different video standards or
 variations of standards. Each video input and output may support another
 set of standards. This set is reported by the ``std`` field of struct
-:ref:`v4l2_input <v4l2-input>` and struct
-:ref:`v4l2_output <v4l2-output>` returned by the
+:c:type:`v4l2_input` and struct
+:c:type:`v4l2_output` returned by the
 :ref:`VIDIOC_ENUMINPUT` and
 :ref:`VIDIOC_ENUMOUTPUT` ioctls, respectively.
 
@@ -41,7 +41,9 @@
 *received* standard can be sensed with the
 :ref:`VIDIOC_QUERYSTD` ioctl.
 
-..note:: The parameter of all these ioctls is a pointer to a
+.. note::
+
+   The parameter of all these ioctls is a pointer to a
    :ref:`v4l2_std_id <v4l2-std-id>` type (a standard set), *not* an
    index into the standard enumeration. Drivers must implement all video
    standard ioctls when the device has one or more video inputs or outputs.
@@ -56,8 +58,8 @@
 -  that does not support the video standard formats at all.
 
 Here the driver shall set the ``std`` field of struct
-:ref:`v4l2_input <v4l2-input>` and struct
-:ref:`v4l2_output <v4l2-output>` to zero and the :ref:`VIDIOC_G_STD <VIDIOC_G_STD>`,
+:c:type:`v4l2_input` and struct
+:c:type:`v4l2_output` to zero and the :ref:`VIDIOC_G_STD <VIDIOC_G_STD>`,
 :ref:`VIDIOC_S_STD <VIDIOC_G_STD>`, :ref:`VIDIOC_QUERYSTD` and :ref:`VIDIOC_ENUMSTD` ioctls
 shall return the ``ENOTTY`` error code or the ``EINVAL`` error code.
 
diff --git a/Documentation/media/uapi/v4l/streaming-par.rst b/Documentation/media/uapi/v4l/streaming-par.rst
index b07b0f0..f9b93c5 100644
--- a/Documentation/media/uapi/v4l/streaming-par.rst
+++ b/Documentation/media/uapi/v4l/streaming-par.rst
@@ -25,7 +25,7 @@
 To get and set the streaming parameters applications call the
 :ref:`VIDIOC_G_PARM <VIDIOC_G_PARM>` and
 :ref:`VIDIOC_S_PARM <VIDIOC_G_PARM>` ioctl, respectively. They take
-a pointer to a struct :ref:`v4l2_streamparm <v4l2-streamparm>`, which
+a pointer to a struct :c:type:`v4l2_streamparm`, which
 contains a union holding separate parameters for input and output
 devices.
 
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 6dbb27b..e144370 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -5,97 +5,53 @@
 Media Bus Formats
 =================
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
-.. _v4l2-mbus-framefmt:
+.. c:type:: v4l2_mbus_framefmt
 
 .. flat-table:: struct v4l2_mbus_framefmt
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``width``
-
-       -  Image width, in pixels.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``height``
-
-       -  Image height, in pixels.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``code``
-
-       -  Format code, from enum
-	  :ref:`v4l2_mbus_pixelcode <v4l2-mbus-pixelcode>`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``field``
-
-       -  Field order, from enum :ref:`v4l2_field <v4l2-field>`. See
-	  :ref:`field-order` for details.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``colorspace``
-
-       -  Image colorspace, from enum
-	  :ref:`v4l2_colorspace <v4l2-colorspace>`. See
-	  :ref:`colorspaces` for details.
-
-    -  .. row 6
-
-       -  enum :ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>`
-
-       -  ``ycbcr_enc``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 7
-
-       -  enum :ref:`v4l2_quantization <v4l2-quantization>`
-
-       -  ``quantization``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 8
-
-       -  enum :ref:`v4l2_xfer_func <v4l2-xfer-func>`
-
-       -  ``xfer_func``
-
-       -  This information supplements the ``colorspace`` and must be set by
-	  the driver for capture streams and by the application for output
-	  streams, see :ref:`colorspaces`.
-
-    -  .. row 9
-
-       -  __u16
-
-       -  ``reserved``\ [11]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``width``
+      - Image width, in pixels.
+    * - __u32
+      - ``height``
+      - Image height, in pixels.
+    * - __u32
+      - ``code``
+      - Format code, from enum
+	:ref:`v4l2_mbus_pixelcode <v4l2-mbus-pixelcode>`.
+    * - __u32
+      - ``field``
+      - Field order, from enum :c:type:`v4l2_field`. See
+	:ref:`field-order` for details.
+    * - __u32
+      - ``colorspace``
+      - Image colorspace, from enum
+	:c:type:`v4l2_colorspace`. See
+	:ref:`colorspaces` for details.
+    * - enum :c:type:`v4l2_ycbcr_encoding`
+      - ``ycbcr_enc``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_quantization`
+      - ``quantization``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_xfer_func`
+      - ``xfer_func``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
+    * - __u16
+      - ``reserved``\ [11]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 
@@ -153,2203 +109,1159 @@
 
 The following tables list existing packed RGB formats.
 
+.. HACK: ideally, we would be using adjustbox here. However, Sphinx
+.. is a very bad behaviored guy: if the table has more than 30 cols,
+.. it switches to long table, and there's no way to override it.
+
+
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
 
 .. _v4l2-mbus-pixelcode-rgb:
 
+.. raw:: latex
+
+    \begingroup
+    \tiny
+    \setlength{\tabcolsep}{2pt}
+
 .. flat-table:: RGB formats
     :header-rows:  2
     :stub-columns: 0
+    :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`31` Data organization
+    * -
+      -
+      - Bit
+      - 31
+      - 30
+      - 29
+      - 28
+      - 27
+      - 26
+      - 25
+      - 24
+      - 23
+      - 22
+      - 21
+      - 20
+      - 19
+      - 18
+      - 17
+      - 16
+      - 15
+      - 14
+      - 13
+      - 12
+      - 11
+      - 10
+      - 9
+      - 8
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _MEDIA-BUS-FMT-RGB444-1X12:
 
-    -  .. row 1
+      - MEDIA_BUS_FMT_RGB444_1X12
+      - 0x1016
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB444-2X8-PADHI-BE:
 
-       -  Identifier
+      - MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE
+      - 0x1001
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - 0
+      - 0
+      - 0
+      - 0
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB444-2X8-PADHI-LE:
 
-       -  Code
+      - MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE
+      - 0x1002
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - 0
+      - 0
+      - 0
+      - 0
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB555-2X8-PADHI-BE:
 
-       -
-       -  :cspan:`31` Data organization
+      - MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE
+      - 0x1003
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - 0
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB555-2X8-PADHI-LE:
 
-    -  .. row 2
+      - MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE
+      - 0x1004
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - 0
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _MEDIA-BUS-FMT-RGB565-1X16:
 
-       -
-       -
-       -  Bit
+      - MEDIA_BUS_FMT_RGB565_1X16
+      - 0x1017
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-BGR565-2X8-BE:
 
-       -  31
+      - MEDIA_BUS_FMT_BGR565_2X8_BE
+      - 0x1005
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-BGR565-2X8-LE:
 
-       -  30
+      - MEDIA_BUS_FMT_BGR565_2X8_LE
+      - 0x1006
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _MEDIA-BUS-FMT-RGB565-2X8-BE:
 
-       -  29
+      - MEDIA_BUS_FMT_RGB565_2X8_BE
+      - 0x1007
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB565-2X8-LE:
 
-       -  28
+      - MEDIA_BUS_FMT_RGB565_2X8_LE
+      - 0x1008
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * .. _MEDIA-BUS-FMT-RGB666-1X18:
 
-       -  27
+      - MEDIA_BUS_FMT_RGB666_1X18
+      - 0x1009
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RBG888-1X24:
 
-       -  26
+      - MEDIA_BUS_FMT_RBG888_1X24
+      - 0x100e
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB666-1X24_CPADHI:
 
-       -  25
+      - MEDIA_BUS_FMT_RGB666_1X24_CPADHI
+      - 0x1015
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - 0
+      - 0
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - 0
+      - 0
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - 0
+      - 0
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-BGR888-1X24:
 
-       -  24
+      - MEDIA_BUS_FMT_BGR888_1X24
+      - 0x1013
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-GBR888-1X24:
 
-       -  23
+      - MEDIA_BUS_FMT_GBR888_1X24
+      - 0x1014
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-1X24:
 
-       -  22
+      - MEDIA_BUS_FMT_RGB888_1X24
+      - 0x100a
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-2X12-BE:
 
-       -  21
+      - MEDIA_BUS_FMT_RGB888_2X12_BE
+      - 0x100b
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-2X12-LE:
 
-       -  20
+      - MEDIA_BUS_FMT_RGB888_2X12_LE
+      - 0x100c
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+    * .. _MEDIA-BUS-FMT-ARGB888-1X32:
 
-       -  19
+      - MEDIA_BUS_FMT_ARGB888_1X32
+      - 0x100d
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-1X32-PADHI:
 
-       -  18
+      - MEDIA_BUS_FMT_RGB888_1X32_PADHI
+      - 0x100f
+      -
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
 
-       -  17
+.. raw:: latex
 
-       -  16
-
-       -  15
-
-       -  14
-
-       -  13
-
-       -  12
-
-       -  11
-
-       -  10
-
-       -  9
-
-       -  8
-
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _MEDIA-BUS-FMT-RGB444-1X12:
-
-       -  MEDIA_BUS_FMT_RGB444_1X12
-
-       -  0x1016
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB444-2X8-PADHI-BE:
-
-       -  MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE
-
-       -  0x1001
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. row 5
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB444-2X8-PADHI-LE:
-
-       -  MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE
-
-       -  0x1002
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. row 7
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB555-2X8-PADHI-BE:
-
-       -  MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE
-
-       -  0x1003
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. row 9
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB555-2X8-PADHI-LE:
-
-       -  MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE
-
-       -  0x1004
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. row 11
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _MEDIA-BUS-FMT-RGB565-1X16:
-
-       -  MEDIA_BUS_FMT_RGB565_1X16
-
-       -  0x1017
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-BGR565-2X8-BE:
-
-       -  MEDIA_BUS_FMT_BGR565_2X8_BE
-
-       -  0x1005
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. row 14
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-BGR565-2X8-LE:
-
-       -  MEDIA_BUS_FMT_BGR565_2X8_LE
-
-       -  0x1006
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. row 16
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _MEDIA-BUS-FMT-RGB565-2X8-BE:
-
-       -  MEDIA_BUS_FMT_RGB565_2X8_BE
-
-       -  0x1007
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. row 18
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB565-2X8-LE:
-
-       -  MEDIA_BUS_FMT_RGB565_2X8_LE
-
-       -  0x1008
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. row 20
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-    -  .. _MEDIA-BUS-FMT-RGB666-1X18:
-
-       -  MEDIA_BUS_FMT_RGB666_1X18
-
-       -  0x1009
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RBG888-1X24:
-
-       -  MEDIA_BUS_FMT_RBG888_1X24
-
-       -  0x100e
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB666-1X24_CPADHI:
-
-       -  MEDIA_BUS_FMT_RGB666_1X24_CPADHI
-
-       -  0x1015
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  0
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  0
-
-       -  0
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  0
-
-       -  0
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-BGR888-1X24:
-
-       -  MEDIA_BUS_FMT_BGR888_1X24
-
-       -  0x1013
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-GBR888-1X24:
-
-       -  MEDIA_BUS_FMT_GBR888_1X24
-
-       -  0x1014
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-1X24:
-
-       -  MEDIA_BUS_FMT_RGB888_1X24
-
-       -  0x100a
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-2X12-BE:
-
-       -  MEDIA_BUS_FMT_RGB888_2X12_BE
-
-       -  0x100b
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-    -  .. row 28
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-2X12-LE:
-
-       -  MEDIA_BUS_FMT_RGB888_2X12_LE
-
-       -  0x100c
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. row 30
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-    -  .. _MEDIA-BUS-FMT-ARGB888-1X32:
-
-       -  MEDIA_BUS_FMT_ARGB888_1X32
-
-       -  0x100d
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-1X32-PADHI:
-
-       -  MEDIA_BUS_FMT_RGB888_1X32_PADHI
-
-       -  0x100f
-
-       -
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
+    \endgroup
 
 On LVDS buses, usually each sample is transferred serialized in seven
 time slots per pixel clock, on three (18-bit) or four (24-bit)
@@ -2359,6 +1271,9 @@
 JEIDA defined bit mapping will be named
 ``MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA``, for example.
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
 
 .. _v4l2-mbus-pixelcode-rgb-lvds:
 
@@ -2366,354 +1281,197 @@
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      -
+      - :cspan:`3` Data organization
+    * -
+      -
+      - Timeslot
+      - Lane
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _MEDIA-BUS-FMT-RGB666-1X7X3-SPWG:
 
-    -  .. row 1
+      - MEDIA_BUS_FMT_RGB666_1X7X3_SPWG
+      - 0x1010
+      - 0
+      -
+      -
+      - d
+      - b\ :sub:`1`
+      - g\ :sub:`0`
+    * -
+      -
+      - 1
+      -
+      -
+      - d
+      - b\ :sub:`0`
+      - r\ :sub:`5`
+    * -
+      -
+      - 2
+      -
+      -
+      - d
+      - g\ :sub:`5`
+      - r\ :sub:`4`
+    * -
+      -
+      - 3
+      -
+      -
+      - b\ :sub:`5`
+      - g\ :sub:`4`
+      - r\ :sub:`3`
+    * -
+      -
+      - 4
+      -
+      -
+      - b\ :sub:`4`
+      - g\ :sub:`3`
+      - r\ :sub:`2`
+    * -
+      -
+      - 5
+      -
+      -
+      - b\ :sub:`3`
+      - g\ :sub:`2`
+      - r\ :sub:`1`
+    * -
+      -
+      - 6
+      -
+      -
+      - b\ :sub:`2`
+      - g\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-1X7X4-SPWG:
 
-       -  Identifier
+      - MEDIA_BUS_FMT_RGB888_1X7X4_SPWG
+      - 0x1011
+      - 0
+      -
+      - d
+      - d
+      - b\ :sub:`1`
+      - g\ :sub:`0`
+    * -
+      -
+      - 1
+      -
+      - b\ :sub:`7`
+      - d
+      - b\ :sub:`0`
+      - r\ :sub:`5`
+    * -
+      -
+      - 2
+      -
+      - b\ :sub:`6`
+      - d
+      - g\ :sub:`5`
+      - r\ :sub:`4`
+    * -
+      -
+      - 3
+      -
+      - g\ :sub:`7`
+      - b\ :sub:`5`
+      - g\ :sub:`4`
+      - r\ :sub:`3`
+    * -
+      -
+      - 4
+      -
+      - g\ :sub:`6`
+      - b\ :sub:`4`
+      - g\ :sub:`3`
+      - r\ :sub:`2`
+    * -
+      -
+      - 5
+      -
+      - r\ :sub:`7`
+      - b\ :sub:`3`
+      - g\ :sub:`2`
+      - r\ :sub:`1`
+    * -
+      -
+      - 6
+      -
+      - r\ :sub:`6`
+      - b\ :sub:`2`
+      - g\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB888-1X7X4-JEIDA:
 
-       -  Code
+      - MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA
+      - 0x1012
+      - 0
+      -
+      - d
+      - d
+      - b\ :sub:`3`
+      - g\ :sub:`2`
+    * -
+      -
+      - 1
+      -
+      - b\ :sub:`1`
+      - d
+      - b\ :sub:`2`
+      - r\ :sub:`7`
+    * -
+      -
+      - 2
+      -
+      - b\ :sub:`0`
+      - d
+      - g\ :sub:`7`
+      - r\ :sub:`6`
+    * -
+      -
+      - 3
+      -
+      - g\ :sub:`1`
+      - b\ :sub:`7`
+      - g\ :sub:`6`
+      - r\ :sub:`5`
+    * -
+      -
+      - 4
+      -
+      - g\ :sub:`0`
+      - b\ :sub:`6`
+      - g\ :sub:`5`
+      - r\ :sub:`4`
+    * -
+      -
+      - 5
+      -
+      - r\ :sub:`1`
+      - b\ :sub:`5`
+      - g\ :sub:`4`
+      - r\ :sub:`3`
+    * -
+      -
+      - 6
+      -
+      - r\ :sub:`0`
+      - b\ :sub:`4`
+      - g\ :sub:`3`
+      - r\ :sub:`2`
 
-       -
-       -
-       -  :cspan:`3` Data organization
+.. raw:: latex
 
-    -  .. row 2
-
-       -
-       -
-       -  Timeslot
-
-       -  Lane
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _MEDIA-BUS-FMT-RGB666-1X7X3-SPWG:
-
-       -  MEDIA_BUS_FMT_RGB666_1X7X3_SPWG
-
-       -  0x1010
-
-       -  0
-
-       -
-       -  -
-
-       -  d
-
-       -  b\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. row 4
-
-       -
-       -
-       -  1
-
-       -
-       -  -
-
-       -  d
-
-       -  b\ :sub:`0`
-
-       -  r\ :sub:`5`
-
-    -  .. row 5
-
-       -
-       -
-       -  2
-
-       -
-       -  -
-
-       -  d
-
-       -  g\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-    -  .. row 6
-
-       -
-       -
-       -  3
-
-       -
-       -  -
-
-       -  b\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-    -  .. row 7
-
-       -
-       -
-       -  4
-
-       -
-       -  -
-
-       -  b\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-    -  .. row 8
-
-       -
-       -
-       -  5
-
-       -
-       -  -
-
-       -  b\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-    -  .. row 9
-
-       -
-       -
-       -  6
-
-       -
-       -  -
-
-       -  b\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-1X7X4-SPWG:
-
-       -  MEDIA_BUS_FMT_RGB888_1X7X4_SPWG
-
-       -  0x1011
-
-       -  0
-
-       -
-       -  d
-
-       -  d
-
-       -  b\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. row 11
-
-       -
-       -
-       -  1
-
-       -
-       -  b\ :sub:`7`
-
-       -  d
-
-       -  b\ :sub:`0`
-
-       -  r\ :sub:`5`
-
-    -  .. row 12
-
-       -
-       -
-       -  2
-
-       -
-       -  b\ :sub:`6`
-
-       -  d
-
-       -  g\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-    -  .. row 13
-
-       -
-       -
-       -  3
-
-       -
-       -  g\ :sub:`7`
-
-       -  b\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-    -  .. row 14
-
-       -
-       -
-       -  4
-
-       -
-       -  g\ :sub:`6`
-
-       -  b\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-    -  .. row 15
-
-       -
-       -
-       -  5
-
-       -
-       -  r\ :sub:`7`
-
-       -  b\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-    -  .. row 16
-
-       -
-       -
-       -  6
-
-       -
-       -  r\ :sub:`6`
-
-       -  b\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-RGB888-1X7X4-JEIDA:
-
-       -  MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA
-
-       -  0x1012
-
-       -  0
-
-       -
-       -  d
-
-       -  d
-
-       -  b\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-    -  .. row 18
-
-       -
-       -
-       -  1
-
-       -
-       -  b\ :sub:`1`
-
-       -  d
-
-       -  b\ :sub:`2`
-
-       -  r\ :sub:`7`
-
-    -  .. row 19
-
-       -
-       -
-       -  2
-
-       -
-       -  b\ :sub:`0`
-
-       -  d
-
-       -  g\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-    -  .. row 20
-
-       -
-       -
-       -  3
-
-       -
-       -  g\ :sub:`1`
-
-       -  b\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-    -  .. row 21
-
-       -
-       -
-       -  4
-
-       -
-       -  g\ :sub:`0`
-
-       -  b\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-    -  .. row 22
-
-       -
-       -
-       -  5
-
-       -
-       -  r\ :sub:`1`
-
-       -  b\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-    -  .. row 23
-
-       -
-       -
-       -  6
-
-       -
-       -  r\ :sub:`0`
-
-       -  b\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  r\ :sub:`2`
-
+    \end{adjustbox}\newline\newline
 
 
 Bayer Formats
@@ -2768,912 +1526,795 @@
 organization is given as an example for the first pixel only.
 
 
+.. raw:: latex
+
+    \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{7.6cm}|p{1.6cm}|p{0.7cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|
+
 .. _v4l2-mbus-pixelcode-bayer:
 
+.. cssclass: longtable
+
 .. flat-table:: Bayer Formats
     :header-rows:  2
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`15` Data organization
+    * -
+      -
+      - Bit
+      - 15
+      - 14
+      - 13
+      - 12
+      - 11
+      - 10
+      - 9
+      - 8
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _MEDIA-BUS-FMT-SBGGR8-1X8:
 
-    -  .. row 1
+      - MEDIA_BUS_FMT_SBGGR8_1X8
+      - 0x3001
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG8-1X8:
 
-       -  Identifier
+      - MEDIA_BUS_FMT_SGBRG8_1X8
+      - 0x3013
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG8-1X8:
 
-       -  Code
+      - MEDIA_BUS_FMT_SGRBG8_1X8
+      - 0x3002
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB8-1X8:
 
-       -
-       -  :cspan:`11` Data organization
+      - MEDIA_BUS_FMT_SRGGB8_1X8
+      - 0x3014
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR10-ALAW8-1X8:
 
-    -  .. row 2
+      - MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8
+      - 0x3015
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG10-ALAW8-1X8:
 
-       -
-       -
-       -  Bit
+      - MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8
+      - 0x3016
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG10-ALAW8-1X8:
 
-       -  11
+      - MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8
+      - 0x3017
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB10-ALAW8-1X8:
 
-       -  10
+      - MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8
+      - 0x3018
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR10-DPCM8-1X8:
 
-       -  9
+      - MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8
+      - 0x300b
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG10-DPCM8-1X8:
 
-       -  8
+      - MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8
+      - 0x300c
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG10-DPCM8-1X8:
 
-       -  7
+      - MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8
+      - 0x3009
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB10-DPCM8-1X8:
 
-       -  6
+      - MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8
+      - 0x300d
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-BE:
 
-       -  5
+      - MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE
+      - 0x3003
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-LE:
 
-       -  4
+      - MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE
+      - 0x3004
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+    * .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-BE:
 
-       -  3
+      - MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE
+      - 0x3005
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+    * .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-LE:
 
-       -  2
+      - MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE
+      - 0x3006
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+      - 0
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+    * .. _MEDIA-BUS-FMT-SBGGR10-1X10:
 
-       -  1
+      - MEDIA_BUS_FMT_SBGGR10_1X10
+      - 0x3007
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG10-1X10:
 
-       -  0
+      - MEDIA_BUS_FMT_SGBRG10_1X10
+      - 0x300e
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG10-1X10:
 
-    -  .. _MEDIA-BUS-FMT-SBGGR8-1X8:
+      - MEDIA_BUS_FMT_SGRBG10_1X10
+      - 0x300a
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB10-1X10:
 
-       -  MEDIA_BUS_FMT_SBGGR8_1X8
+      - MEDIA_BUS_FMT_SRGGB10_1X10
+      - 0x300f
+      -
+      -
+      -
+      - -
+      - -
+      - -
+      - -
+      - r\ :sub:`9`
+      - r\ :sub:`8`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR12-1X12:
 
-       -  0x3001
+      - MEDIA_BUS_FMT_SBGGR12_1X12
+      - 0x3008
+      -
+      - -
+      - -
+      - -
+      - -
+      - b\ :sub:`11`
+      - b\ :sub:`10`
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG12-1X12:
 
-       -
-       -  -
+      - MEDIA_BUS_FMT_SGBRG12_1X12
+      - 0x3010
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG12-1X12:
 
-       -  -
+      - MEDIA_BUS_FMT_SGRBG12_1X12
+      - 0x3011
+      -
+      - -
+      - -
+      - -
+      - -
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB12-1X12:
 
-       -  -
+      - MEDIA_BUS_FMT_SRGGB12_1X12
+      - 0x3012
+      -
+      - -
+      - -
+      - -
+      - -
+      - r\ :sub:`11`
+      - r\ :sub:`10`
+      - r\ :sub:`9`
+      - r\ :sub:`8`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR14-1X14:
 
-       -  -
+      - MEDIA_BUS_FMT_SBGGR14_1X14
+      - 0x3019
+      -
+      - -
+      - -
+      - b\ :sub:`13`
+      - b\ :sub:`12`
+      - b\ :sub:`11`
+      - b\ :sub:`10`
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG14-1X14:
 
-       -  b\ :sub:`7`
+      - MEDIA_BUS_FMT_SGBRG14_1X14
+      - 0x301a
+      -
+      - -
+      - -
+      - g\ :sub:`13`
+      - g\ :sub:`12`
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG14-1X14:
 
-       -  b\ :sub:`6`
+      - MEDIA_BUS_FMT_SGRBG14_1X14
+      - 0x301b
+      -
+      - -
+      - -
+      - g\ :sub:`13`
+      - g\ :sub:`12`
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB14-1X14:
 
-       -  b\ :sub:`5`
+      - MEDIA_BUS_FMT_SRGGB14_1X14
+      - 0x301c
+      -
+      - -
+      - -
+      - r\ :sub:`13`
+      - r\ :sub:`12`
+      - r\ :sub:`11`
+      - r\ :sub:`10`
+      - r\ :sub:`9`
+      - r\ :sub:`8`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SBGGR16-1X16:
 
-       -  b\ :sub:`4`
+      - MEDIA_BUS_FMT_SBGGR16_1X16
+      - 0x301d
+      -
+      - b\ :sub:`15`
+      - b\ :sub:`14`
+      - b\ :sub:`13`
+      - b\ :sub:`12`
+      - b\ :sub:`11`
+      - b\ :sub:`10`
+      - b\ :sub:`9`
+      - b\ :sub:`8`
+      - b\ :sub:`7`
+      - b\ :sub:`6`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGBRG16-1X16:
 
-       -  b\ :sub:`3`
+      - MEDIA_BUS_FMT_SGBRG16_1X16
+      - 0x301e
+      -
+      - g\ :sub:`15`
+      - g\ :sub:`14`
+      - g\ :sub:`13`
+      - g\ :sub:`12`
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SGRBG16-1X16:
 
-       -  b\ :sub:`2`
+      - MEDIA_BUS_FMT_SGRBG16_1X16
+      - 0x301f
+      -
+      - g\ :sub:`15`
+      - g\ :sub:`14`
+      - g\ :sub:`13`
+      - g\ :sub:`12`
+      - g\ :sub:`11`
+      - g\ :sub:`10`
+      - g\ :sub:`9`
+      - g\ :sub:`8`
+      - g\ :sub:`7`
+      - g\ :sub:`6`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-SRGGB16-1X16:
 
-       -  b\ :sub:`1`
+      - MEDIA_BUS_FMT_SRGGB16_1X16
+      - 0x3020
+      -
+      - r\ :sub:`15`
+      - r\ :sub:`14`
+      - r\ :sub:`13`
+      - r\ :sub:`12`
+      - r\ :sub:`11`
+      - r\ :sub:`10`
+      - r\ :sub:`9`
+      - r\ :sub:`8`
+      - r\ :sub:`7`
+      - r\ :sub:`6`
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
 
-       -  b\ :sub:`0`
+.. raw:: latex
 
-    -  .. _MEDIA-BUS-FMT-SGBRG8-1X8:
-
-       -  MEDIA_BUS_FMT_SGBRG8_1X8
-
-       -  0x3013
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGRBG8-1X8:
-
-       -  MEDIA_BUS_FMT_SGRBG8_1X8
-
-       -  0x3002
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SRGGB8-1X8:
-
-       -  MEDIA_BUS_FMT_SRGGB8_1X8
-
-       -  0x3014
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-ALAW8-1X8:
-
-       -  MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8
-
-       -  0x3015
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGBRG10-ALAW8-1X8:
-
-       -  MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8
-
-       -  0x3016
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGRBG10-ALAW8-1X8:
-
-       -  MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8
-
-       -  0x3017
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SRGGB10-ALAW8-1X8:
-
-       -  MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8
-
-       -  0x3018
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-DPCM8-1X8:
-
-       -  MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8
-
-       -  0x300b
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGBRG10-DPCM8-1X8:
-
-       -  MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8
-
-       -  0x300c
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGRBG10-DPCM8-1X8:
-
-       -  MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8
-
-       -  0x3009
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SRGGB10-DPCM8-1X8:
-
-       -  MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8
-
-       -  0x300d
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-BE:
-
-       -  MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE
-
-       -  0x3003
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-    -  .. row 16
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-LE:
-
-       -  MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE
-
-       -  0x3004
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. row 18
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-BE:
-
-       -  MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE
-
-       -  0x3005
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-    -  .. row 20
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-LE:
-
-       -  MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE
-
-       -  0x3006
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  0
-
-    -  .. row 22
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR10-1X10:
-
-       -  MEDIA_BUS_FMT_SBGGR10_1X10
-
-       -  0x3007
-
-       -
-       -  -
-
-       -  -
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGBRG10-1X10:
-
-       -  MEDIA_BUS_FMT_SGBRG10_1X10
-
-       -  0x300e
-
-       -
-       -  -
-
-       -  -
-
-       -  g\ :sub:`9`
-
-       -  g\ :sub:`8`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGRBG10-1X10:
-
-       -  MEDIA_BUS_FMT_SGRBG10_1X10
-
-       -  0x300a
-
-       -
-       -  -
-
-       -  -
-
-       -  g\ :sub:`9`
-
-       -  g\ :sub:`8`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SRGGB10-1X10:
-
-       -  MEDIA_BUS_FMT_SRGGB10_1X10
-
-       -  0x300f
-
-       -
-       -  -
-
-       -  -
-
-       -  r\ :sub:`9`
-
-       -  r\ :sub:`8`
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SBGGR12-1X12:
-
-       -  MEDIA_BUS_FMT_SBGGR12_1X12
-
-       -  0x3008
-
-       -
-       -  b\ :sub:`11`
-
-       -  b\ :sub:`10`
-
-       -  b\ :sub:`9`
-
-       -  b\ :sub:`8`
-
-       -  b\ :sub:`7`
-
-       -  b\ :sub:`6`
-
-       -  b\ :sub:`5`
-
-       -  b\ :sub:`4`
-
-       -  b\ :sub:`3`
-
-       -  b\ :sub:`2`
-
-       -  b\ :sub:`1`
-
-       -  b\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGBRG12-1X12:
-
-       -  MEDIA_BUS_FMT_SGBRG12_1X12
-
-       -  0x3010
-
-       -
-       -  g\ :sub:`11`
-
-       -  g\ :sub:`10`
-
-       -  g\ :sub:`9`
-
-       -  g\ :sub:`8`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SGRBG12-1X12:
-
-       -  MEDIA_BUS_FMT_SGRBG12_1X12
-
-       -  0x3011
-
-       -
-       -  g\ :sub:`11`
-
-       -  g\ :sub:`10`
-
-       -  g\ :sub:`9`
-
-       -  g\ :sub:`8`
-
-       -  g\ :sub:`7`
-
-       -  g\ :sub:`6`
-
-       -  g\ :sub:`5`
-
-       -  g\ :sub:`4`
-
-       -  g\ :sub:`3`
-
-       -  g\ :sub:`2`
-
-       -  g\ :sub:`1`
-
-       -  g\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-SRGGB12-1X12:
-
-       -  MEDIA_BUS_FMT_SRGGB12_1X12
-
-       -  0x3012
-
-       -
-       -  r\ :sub:`11`
-
-       -  r\ :sub:`10`
-
-       -  r\ :sub:`9`
-
-       -  r\ :sub:`8`
-
-       -  r\ :sub:`7`
-
-       -  r\ :sub:`6`
-
-       -  r\ :sub:`5`
-
-       -  r\ :sub:`4`
-
-       -  r\ :sub:`3`
-
-       -  r\ :sub:`2`
-
-       -  r\ :sub:`1`
-
-       -  r\ :sub:`0`
-
+    \end{adjustbox}\newline\newline
 
 
 Packed YUV Formats
@@ -3726,7693 +2367,3963 @@
 
 -  a\ :sub:`x` for alpha component bit number x
 
--  - for non-available bits (for positions higher than the bus width)
+- for non-available bits (for positions higher than the bus width)
 
 -  d for dummy bits
 
+.. HACK: ideally, we would be using adjustbox here. However, this
+.. will never work for this table, as, even with tiny font, it is
+.. to big for a single page. So, we need to manually adjust the
+.. size.
+
+.. raw:: latex
+
+    \begingroup
+    \tiny
+    \setlength{\tabcolsep}{2pt}
+
+.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
 
 .. _v4l2-mbus-pixelcode-yuv8:
 
 .. flat-table:: YUV Formats
     :header-rows:  2
     :stub-columns: 0
+    :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`31` Data organization
+    * -
+      -
+      - Bit
+      - 31
+      - 30
+      - 29
+      - 28
+      - 27
+      - 26
+      - 25
+      - 24
+      - 23
+      - 22
+      - 21
+      - 10
+      - 19
+      - 18
+      - 17
+      - 16
+      - 15
+      - 14
+      - 13
+      - 12
+      - 11
+      - 10
+      - 9
+      - 8
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _MEDIA-BUS-FMT-Y8-1X8:
 
-    -  .. row 1
+      - MEDIA_BUS_FMT_Y8_1X8
+      - 0x2001
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UV8-1X8:
 
-       -  Identifier
+      - MEDIA_BUS_FMT_UV8_1X8
+      - 0x2015
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY8-1_5X8:
 
-       -  Code
+      - MEDIA_BUS_FMT_UYVY8_1_5X8
+      - 0x2002
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY8-1_5X8:
 
-       -
-       -  :cspan:`31` Data organization
+      - MEDIA_BUS_FMT_VYUY8_1_5X8
+      - 0x2003
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV8-1_5X8:
 
-    -  .. row 2
+      - MEDIA_BUS_FMT_YUYV8_1_5X8
+      - 0x2004
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU8-1_5X8:
 
-       -
-       -
-       -  Bit
+      - MEDIA_BUS_FMT_YVYU8_1_5X8
+      - 0x2005
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY8-2X8:
 
-       -  31
+      - MEDIA_BUS_FMT_UYVY8_2X8
+      - 0x2006
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY8-2X8:
 
-       -  30
+      - MEDIA_BUS_FMT_VYUY8_2X8
+      - 0x2007
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV8-2X8:
 
-       -  29
+      - MEDIA_BUS_FMT_YUYV8_2X8
+      - 0x2008
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU8-2X8:
 
-       -  28
+      - MEDIA_BUS_FMT_YVYU8_2X8
+      - 0x2009
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-Y10-1X10:
 
-       -  27
+      - MEDIA_BUS_FMT_Y10_1X10
+      - 0x200a
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY10-2X10:
 
-       -  26
+      - MEDIA_BUS_FMT_UYVY10_2X10
+      - 0x2018
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY10-2X10:
 
-       -  25
+      - MEDIA_BUS_FMT_VYUY10_2X10
+      - 0x2019
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV10-2X10:
 
-       -  24
+      - MEDIA_BUS_FMT_YUYV10_2X10
+      - 0x200b
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU10-2X10:
 
-       -  23
+      - MEDIA_BUS_FMT_YVYU10_2X10
+      - 0x200c
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-Y12-1X12:
 
-       -  22
+      - MEDIA_BUS_FMT_Y12_1X12
+      - 0x2013
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY12-2X12:
 
-       -  21
+      - MEDIA_BUS_FMT_UYVY12_2X12
+      - 0x201c
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY12-2X12:
 
-       -  10
+      - MEDIA_BUS_FMT_VYUY12_2X12
+      - 0x201d
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV12-2X12:
 
-       -  19
+      - MEDIA_BUS_FMT_YUYV12_2X12
+      - 0x201e
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU12-2X12:
 
-       -  18
+      - MEDIA_BUS_FMT_YVYU12_2X12
+      - 0x201f
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY8-1X16:
 
-       -  17
+      - MEDIA_BUS_FMT_UYVY8_1X16
+      - 0x200f
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY8-1X16:
 
-       -  16
+      - MEDIA_BUS_FMT_VYUY8_1X16
+      - 0x2010
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV8-1X16:
 
-       -  15
+      - MEDIA_BUS_FMT_YUYV8_1X16
+      - 0x2011
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU8-1X16:
 
-       -  14
+      - MEDIA_BUS_FMT_YVYU8_1X16
+      - 0x2012
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YDYUYDYV8-1X16:
 
-       -  13
+      - MEDIA_BUS_FMT_YDYUYDYV8_1X16
+      - 0x2014
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+      - d
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY10-1X20:
 
-       -  12
+      - MEDIA_BUS_FMT_UYVY10_1X20
+      - 0x201a
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY10-1X20:
 
-       -  11
+      - MEDIA_BUS_FMT_VYUY10_1X20
+      - 0x201b
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV10-1X20:
 
-       -  10
+      - MEDIA_BUS_FMT_YUYV10_1X20
+      - 0x200d
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU10-1X20:
 
-       -  9
+      - MEDIA_BUS_FMT_YVYU10_1X20
+      - 0x200e
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VUY8-1X24:
 
-       -  8
+      - MEDIA_BUS_FMT_VUY8_1X24
+      - 0x201a
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUV8-1X24:
 
-       -  7
+      - MEDIA_BUS_FMT_YUV8_1X24
+      - 0x2025
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-UYVY12-1X24:
 
-       -  6
+      - MEDIA_BUS_FMT_UYVY12_1X24
+      - 0x2020
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-VYUY12-1X24:
 
-       -  5
+      - MEDIA_BUS_FMT_VYUY12_1X24
+      - 0x2021
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUYV12-1X24:
 
-       -  4
+      - MEDIA_BUS_FMT_YUYV12_1X24
+      - 0x2022
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YVYU12-1X24:
 
-       -  3
+      - MEDIA_BUS_FMT_YVYU12_1X24
+      - 0x2023
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - v\ :sub:`11`
+      - v\ :sub:`10`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - y\ :sub:`11`
+      - y\ :sub:`10`
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`11`
+      - u\ :sub:`10`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-YUV10-1X30:
 
-       -  2
+      - MEDIA_BUS_FMT_YUV10_1X30
+      - 0x2016
+      -
+      -
+      -
+      - y\ :sub:`9`
+      - y\ :sub:`8`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`9`
+      - u\ :sub:`8`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - v\ :sub:`9`
+      - v\ :sub:`8`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-AYUV8-1X32:
 
-       -  1
+      - MEDIA_BUS_FMT_AYUV8_1X32
+      - 0x2017
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - y\ :sub:`7`
+      - y\ :sub:`6`
+      - y\ :sub:`5`
+      - y\ :sub:`4`
+      - y\ :sub:`3`
+      - y\ :sub:`2`
+      - y\ :sub:`1`
+      - y\ :sub:`0`
+      - u\ :sub:`7`
+      - u\ :sub:`6`
+      - u\ :sub:`5`
+      - u\ :sub:`4`
+      - u\ :sub:`3`
+      - u\ :sub:`2`
+      - u\ :sub:`1`
+      - u\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
 
-       -  0
 
-    -  .. _MEDIA-BUS-FMT-Y8-1X8:
+.. raw:: latex
 
-       -  MEDIA_BUS_FMT_Y8_1X8
-
-       -  0x2001
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UV8-1X8:
-
-       -  MEDIA_BUS_FMT_UV8_1X8
-
-       -  0x2015
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 5
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY8-1_5X8:
-
-       -  MEDIA_BUS_FMT_UYVY8_1_5X8
-
-       -  0x2002
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 7
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 8
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 9
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 10
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 11
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY8-1_5X8:
-
-       -  MEDIA_BUS_FMT_VYUY8_1_5X8
-
-       -  0x2003
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 13
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 14
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 15
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 16
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 17
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV8-1_5X8:
-
-       -  MEDIA_BUS_FMT_YUYV8_1_5X8
-
-       -  0x2004
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 19
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 20
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 21
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 22
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 23
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU8-1_5X8:
-
-       -  MEDIA_BUS_FMT_YVYU8_1_5X8
-
-       -  0x2005
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 25
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 26
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 27
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 28
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 29
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY8-2X8:
-
-       -  MEDIA_BUS_FMT_UYVY8_2X8
-
-       -  0x2006
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 31
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 32
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 33
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY8-2X8:
-
-       -  MEDIA_BUS_FMT_VYUY8_2X8
-
-       -  0x2007
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 35
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 36
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 37
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV8-2X8:
-
-       -  MEDIA_BUS_FMT_YUYV8_2X8
-
-       -  0x2008
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 39
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 40
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 41
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU8-2X8:
-
-       -  MEDIA_BUS_FMT_YVYU8_2X8
-
-       -  0x2009
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 43
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 44
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 45
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-Y10-1X10:
-
-       -  MEDIA_BUS_FMT_Y10_1X10
-
-       -  0x200a
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY10-2X10:
-
-       -  MEDIA_BUS_FMT_UYVY10_2X10
-
-       -  0x2018
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 48
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 49
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 50
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY10-2X10:
-
-       -  MEDIA_BUS_FMT_VYUY10_2X10
-
-       -  0x2019
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 52
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 53
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 54
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV10-2X10:
-
-       -  MEDIA_BUS_FMT_YUYV10_2X10
-
-       -  0x200b
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 56
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 57
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 58
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU10-2X10:
-
-       -  MEDIA_BUS_FMT_YVYU10_2X10
-
-       -  0x200c
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 60
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 61
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 62
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-Y12-1X12:
-
-       -  MEDIA_BUS_FMT_Y12_1X12
-
-       -  0x2013
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY12-2X12:
-
-       -  MEDIA_BUS_FMT_UYVY12_2X12
-
-       -  0x201c
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 65
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 66
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 67
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY12-2X12:
-
-       -  MEDIA_BUS_FMT_VYUY12_2X12
-
-       -  0x201d
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 69
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 70
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 71
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV12-2X12:
-
-       -  MEDIA_BUS_FMT_YUYV12_2X12
-
-       -  0x201e
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 73
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 74
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 75
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU12-2X12:
-
-       -  MEDIA_BUS_FMT_YVYU12_2X12
-
-       -  0x201f
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 77
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 78
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 79
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY8-1X16:
-
-       -  MEDIA_BUS_FMT_UYVY8_1X16
-
-       -  0x200f
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 81
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY8-1X16:
-
-       -  MEDIA_BUS_FMT_VYUY8_1X16
-
-       -  0x2010
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 83
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV8-1X16:
-
-       -  MEDIA_BUS_FMT_YUYV8_1X16
-
-       -  0x2011
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 85
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU8-1X16:
-
-       -  MEDIA_BUS_FMT_YVYU8_1X16
-
-       -  0x2012
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 87
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YDYUYDYV8-1X16:
-
-       -  MEDIA_BUS_FMT_YDYUYDYV8_1X16
-
-       -  0x2014
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-    -  .. row 89
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 90
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-       -  d
-
-    -  .. row 91
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY10-1X20:
-
-       -  MEDIA_BUS_FMT_UYVY10_1X20
-
-       -  0x201a
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 93
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY10-1X20:
-
-       -  MEDIA_BUS_FMT_VYUY10_1X20
-
-       -  0x201b
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 95
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV10-1X20:
-
-       -  MEDIA_BUS_FMT_YUYV10_1X20
-
-       -  0x200d
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 97
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU10-1X20:
-
-       -  MEDIA_BUS_FMT_YVYU10_1X20
-
-       -  0x200e
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 99
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VUY8-1X24:
-
-       -  MEDIA_BUS_FMT_VUY8_1X24
-
-       -  0x201a
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUV8-1X24:
-
-       -  MEDIA_BUS_FMT_YUV8_1X24
-
-       -  0x2025
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-UYVY12-1X24:
-
-       -  MEDIA_BUS_FMT_UYVY12_1X24
-
-       -  0x2020
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 103
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-VYUY12-1X24:
-
-       -  MEDIA_BUS_FMT_VYUY12_1X24
-
-       -  0x2021
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. row 105
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUYV12-1X24:
-
-       -  MEDIA_BUS_FMT_YUYV12_1X24
-
-       -  0x2022
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. row 107
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YVYU12-1X24:
-
-       -  MEDIA_BUS_FMT_YVYU12_1X24
-
-       -  0x2023
-
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  v\ :sub:`11`
-
-       -  v\ :sub:`10`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. row 109
-
-       -
-       -
-       -
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  -
-
-       -  y\ :sub:`11`
-
-       -  y\ :sub:`10`
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`11`
-
-       -  u\ :sub:`10`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-YUV10-1X30:
-
-       -  MEDIA_BUS_FMT_YUV10_1X30
-
-       -  0x2016
-
-       -
-       -  -
-
-       -  -
-
-       -  y\ :sub:`9`
-
-       -  y\ :sub:`8`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`9`
-
-       -  u\ :sub:`8`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  v\ :sub:`9`
-
-       -  v\ :sub:`8`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-    -  .. _MEDIA-BUS-FMT-AYUV8-1X32:
-
-       -  MEDIA_BUS_FMT_AYUV8_1X32
-
-       -  0x2017
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  y\ :sub:`7`
-
-       -  y\ :sub:`6`
-
-       -  y\ :sub:`5`
-
-       -  y\ :sub:`4`
-
-       -  y\ :sub:`3`
-
-       -  y\ :sub:`2`
-
-       -  y\ :sub:`1`
-
-       -  y\ :sub:`0`
-
-       -  u\ :sub:`7`
-
-       -  u\ :sub:`6`
-
-       -  u\ :sub:`5`
-
-       -  u\ :sub:`4`
-
-       -  u\ :sub:`3`
-
-       -  u\ :sub:`2`
-
-       -  u\ :sub:`1`
-
-       -  u\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
-
+	\endgroup
 
 HSV/HSL Formats
 ^^^^^^^^^^^^^^^
@@ -11447,164 +6358,99 @@
 
 The following table lists existing HSV/HSL formats.
 
+.. raw:: latex
+
+    \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{6.2cm}|p{1.6cm}|p{0.7cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|
 
 .. _v4l2-mbus-pixelcode-hsv:
 
 .. flat-table:: HSV/HSL formats
     :header-rows:  2
     :stub-columns: 0
+    :widths: 28 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 
+    * - Identifier
+      - Code
+      -
+      - :cspan:`31` Data organization
+    * -
+      -
+      - Bit
+      - 31
+      - 30
+      - 29
+      - 28
+      - 27
+      - 26
+      - 25
+      - 24
+      - 23
+      - 22
+      - 21
+      - 20
+      - 19
+      - 18
+      - 17
+      - 16
+      - 15
+      - 14
+      - 13
+      - 12
+      - 11
+      - 10
+      - 9
+      - 8
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _MEDIA-BUS-FMT-AHSV8888-1X32:
 
-    -  .. row 1
+      - MEDIA_BUS_FMT_AHSV8888_1X32
+      - 0x6001
+      -
+      - a\ :sub:`7`
+      - a\ :sub:`6`
+      - a\ :sub:`5`
+      - a\ :sub:`4`
+      - a\ :sub:`3`
+      - a\ :sub:`2`
+      - a\ :sub:`1`
+      - a\ :sub:`0`
+      - h\ :sub:`7`
+      - h\ :sub:`6`
+      - h\ :sub:`5`
+      - h\ :sub:`4`
+      - h\ :sub:`3`
+      - h\ :sub:`2`
+      - h\ :sub:`1`
+      - h\ :sub:`0`
+      - s\ :sub:`7`
+      - s\ :sub:`6`
+      - s\ :sub:`5`
+      - s\ :sub:`4`
+      - s\ :sub:`3`
+      - s\ :sub:`2`
+      - s\ :sub:`1`
+      - s\ :sub:`0`
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
 
-       -  Identifier
+.. raw:: latex
 
-       -  Code
-
-       -
-       -  :cspan:`31` Data organization
-
-    -  .. row 2
-
-       -
-       -
-       -  Bit
-
-       -  31
-
-       -  30
-
-       -  29
-
-       -  28
-
-       -  27
-
-       -  26
-
-       -  25
-
-       -  24
-
-       -  23
-
-       -  22
-
-       -  21
-
-       -  20
-
-       -  19
-
-       -  18
-
-       -  17
-
-       -  16
-
-       -  15
-
-       -  14
-
-       -  13
-
-       -  12
-
-       -  11
-
-       -  10
-
-       -  9
-
-       -  8
-
-       -  7
-
-       -  6
-
-       -  5
-
-       -  4
-
-       -  3
-
-       -  2
-
-       -  1
-
-       -  0
-
-    -  .. _MEDIA-BUS-FMT-AHSV8888-1X32:
-
-       -  MEDIA_BUS_FMT_AHSV8888_1X32
-
-       -  0x6001
-
-       -
-       -  a\ :sub:`7`
-
-       -  a\ :sub:`6`
-
-       -  a\ :sub:`5`
-
-       -  a\ :sub:`4`
-
-       -  a\ :sub:`3`
-
-       -  a\ :sub:`2`
-
-       -  a\ :sub:`1`
-
-       -  a\ :sub:`0`
-
-       -  h\ :sub:`7`
-
-       -  h\ :sub:`6`
-
-       -  h\ :sub:`5`
-
-       -  h\ :sub:`4`
-
-       -  h\ :sub:`3`
-
-       -  h\ :sub:`2`
-
-       -  h\ :sub:`1`
-
-       -  h\ :sub:`0`
-
-       -  s\ :sub:`7`
-
-       -  s\ :sub:`6`
-
-       -  s\ :sub:`5`
-
-       -  s\ :sub:`4`
-
-       -  s\ :sub:`3`
-
-       -  s\ :sub:`2`
-
-       -  s\ :sub:`1`
-
-       -  s\ :sub:`0`
-
-       -  v\ :sub:`7`
-
-       -  v\ :sub:`6`
-
-       -  v\ :sub:`5`
-
-       -  v\ :sub:`4`
-
-       -  v\ :sub:`3`
-
-       -  v\ :sub:`2`
-
-       -  v\ :sub:`1`
-
-       -  v\ :sub:`0`
-
+    \end{adjustbox}\newline\newline
 
 
 JPEG Compressed Formats
@@ -11626,28 +6472,22 @@
 
 .. _v4l2-mbus-pixelcode-jpeg:
 
+.. tabularcolumns:: |p{5.6cm}|p{1.2cm}|p{10.7cm}|
+
 .. flat-table:: JPEG Formats
     :header-rows:  1
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      - Remarks
+    * .. _MEDIA-BUS-FMT-JPEG-1X8:
 
-    -  .. row 1
-
-       -  Identifier
-
-       -  Code
-
-       -  Remarks
-
-    -  .. _MEDIA-BUS-FMT-JPEG-1X8:
-
-       -  MEDIA_BUS_FMT_JPEG_1X8
-
-       -  0x4001
-
-       -  Besides of its usage for the parallel bus this format is
-	  recommended for transmission of JPEG data over MIPI CSI bus using
-	  the User Defined 8-bit Data types.
+      - MEDIA_BUS_FMT_JPEG_1X8
+      - 0x4001
+      - Besides of its usage for the parallel bus this format is
+	recommended for transmission of JPEG data over MIPI CSI bus using
+	the User Defined 8-bit Data types.
 
 
 
@@ -11665,24 +6505,18 @@
 
 .. _v4l2-mbus-pixelcode-vendor-specific:
 
+.. tabularcolumns:: |p{6.6cm}|p{1.2cm}|p{9.7cm}|
+
 .. flat-table:: Vendor and device specific formats
     :header-rows:  1
     :stub-columns: 0
 
+    * - Identifier
+      - Code
+      - Comments
+    * .. _MEDIA-BUS-FMT-S5C-UYVY-JPEG-1X8:
 
-    -  .. row 1
-
-       -  Identifier
-
-       -  Code
-
-       -  Comments
-
-    -  .. _MEDIA-BUS-FMT-S5C-UYVY-JPEG-1X8:
-
-       -  MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8
-
-       -  0x5001
-
-       -  Interleaved raw UYVY and JPEG image format with embedded meta-data
-	  used by Samsung S3C73MX camera sensors.
+      - MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8
+      - 0x5001
+      - Interleaved raw UYVY and JPEG image format with embedded meta-data
+	used by Samsung S3C73MX camera sensors.
diff --git a/Documentation/media/uapi/v4l/tch-formats.rst b/Documentation/media/uapi/v4l/tch-formats.rst
new file mode 100644
index 0000000..dbaabf3
--- /dev/null
+++ b/Documentation/media/uapi/v4l/tch-formats.rst
@@ -0,0 +1,18 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _tch-formats:
+
+*************
+Touch Formats
+*************
+
+These formats are used for :ref:`touch` interface only.
+
+
+.. toctree::
+    :maxdepth: 1
+
+    pixfmt-tch-td16
+    pixfmt-tch-td08
+    pixfmt-tch-tu16
+    pixfmt-tch-tu08
diff --git a/Documentation/media/uapi/v4l/tuner.rst b/Documentation/media/uapi/v4l/tuner.rst
index 37eb4b9..ad117b0 100644
--- a/Documentation/media/uapi/v4l/tuner.rst
+++ b/Documentation/media/uapi/v4l/tuner.rst
@@ -13,7 +13,7 @@
 Video input devices can have one or more tuners demodulating a RF
 signal. Each tuner is associated with one or more video inputs,
 depending on the number of RF connectors on the tuner. The ``type``
-field of the respective struct :ref:`v4l2_input <v4l2-input>`
+field of the respective struct :c:type:`v4l2_input`
 returned by the :ref:`VIDIOC_ENUMINPUT` ioctl is
 set to ``V4L2_INPUT_TYPE_TUNER`` and its ``tuner`` field contains the
 index number of the tuner.
@@ -24,15 +24,17 @@
 To query and change tuner properties applications use the
 :ref:`VIDIOC_G_TUNER <VIDIOC_G_TUNER>` and
 :ref:`VIDIOC_S_TUNER <VIDIOC_G_TUNER>` ioctls, respectively. The
-struct :ref:`v4l2_tuner <v4l2-tuner>` returned by :ref:`VIDIOC_G_TUNER <VIDIOC_G_TUNER>`
+struct :c:type:`v4l2_tuner` returned by :ref:`VIDIOC_G_TUNER <VIDIOC_G_TUNER>`
 also contains signal status information applicable when the tuner of the
 current video or radio input is queried.
 
-.. note:: :ref:`VIDIOC_S_TUNER <VIDIOC_G_TUNER>` does not switch the
+.. note::
+
+   :ref:`VIDIOC_S_TUNER <VIDIOC_G_TUNER>` does not switch the
    current tuner, when there is more than one at all. The tuner is solely
    determined by the current video input. Drivers must support both ioctls
-   and set the ``V4L2_CAP_TUNER`` flag in the struct :ref:`v4l2_capability
-   <v4l2-capability>` returned by the :ref:`VIDIOC_QUERYCAP` ioctl when the
+   and set the ``V4L2_CAP_TUNER`` flag in the struct :c:type:`v4l2_capability`
+   returned by the :ref:`VIDIOC_QUERYCAP` ioctl when the
    device has one or more tuners.
 
 
@@ -44,7 +46,7 @@
 set or video recorder. Each modulator is associated with one or more
 video outputs, depending on the number of RF connectors on the
 modulator. The ``type`` field of the respective struct
-:ref:`v4l2_output <v4l2-output>` returned by the
+:c:type:`v4l2_output` returned by the
 :ref:`VIDIOC_ENUMOUTPUT` ioctl is set to
 ``V4L2_OUTPUT_TYPE_MODULATOR`` and its ``modulator`` field contains the
 index number of the modulator.
@@ -66,7 +68,7 @@
 is more than one at all. The modulator is solely determined by the
 current video output. Drivers must support both ioctls and set the
 ``V4L2_CAP_MODULATOR`` flag in the struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl when the device has
 one or more modulators.
 
@@ -77,7 +79,7 @@
 To get and set the tuner or modulator radio frequency applications use
 the :ref:`VIDIOC_G_FREQUENCY <VIDIOC_G_FREQUENCY>` and
 :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>` ioctl which both take
-a pointer to a struct :ref:`v4l2_frequency <v4l2-frequency>`. These
+a pointer to a struct :c:type:`v4l2_frequency`. These
 ioctls are used for TV and radio devices alike. Drivers must support
 both ioctls when the tuner or modulator ioctls are supported, or when
 the device is a radio device.
diff --git a/Documentation/media/uapi/v4l/userp.rst b/Documentation/media/uapi/v4l/userp.rst
index 1d8b14b..dc2893a 100644
--- a/Documentation/media/uapi/v4l/userp.rst
+++ b/Documentation/media/uapi/v4l/userp.rst
@@ -8,7 +8,7 @@
 
 Input and output devices support this I/O method when the
 ``V4L2_CAP_STREAMING`` flag in the ``capabilities`` field of struct
-:ref:`v4l2_capability <v4l2-capability>` returned by the
+:c:type:`v4l2_capability` returned by the
 :ref:`VIDIOC_QUERYCAP` ioctl is set. If the
 particular user pointer method (not only memory mapping) is supported
 must be determined by calling the :ref:`VIDIOC_REQBUFS` ioctl
@@ -18,8 +18,8 @@
 methods. Buffers (planes) are allocated by the application itself, and
 can reside for example in virtual or shared memory. Only pointers to
 data are exchanged, these pointers and meta-information are passed in
-struct :ref:`v4l2_buffer <v4l2-buffer>` (or in struct
-:ref:`v4l2_plane <v4l2-plane>` in the multi-planar API case). The
+struct :c:type:`v4l2_buffer` (or in struct
+:c:type:`v4l2_plane` in the multi-planar API case). The
 driver must be switched into user pointer I/O mode by calling the
 :ref:`VIDIOC_REQBUFS` with the desired buffer type.
 No buffers (planes) are allocated beforehand, consequently they are not
@@ -88,11 +88,13 @@
 :ref:`VIDIOC_STREAMON <VIDIOC_STREAMON>` and
 :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` ioctl.
 
-.. note:: ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` removes all buffers from
+.. note::
+
+   ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` removes all buffers from
    both queues and unlocks all buffers as a side effect. Since there is no
    notion of doing anything "now" on a multitasking system, if an
    application needs to synchronize with another event it should examine
-   the struct :ref:`v4l2_buffer <v4l2-buffer>` ``timestamp`` of captured or
+   the struct :c:type:`v4l2_buffer` ``timestamp`` of captured or
    outputted buffers.
 
 Drivers implementing user pointer I/O must support the
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-flags.rst b/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
index 3ce3731..1f9a038 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
@@ -6,6 +6,7 @@
 Selection flags
 ***************
 
+.. tabularcolumns:: |p{5.2cm}|p{2.0cm}|p{6.5cm}|p{1.2cm}|p{1.6cm}|
 
 .. _v4l2-selection-flags-table:
 
@@ -13,59 +14,31 @@
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Flag name
-
-       -  id
-
-       -  Definition
-
-       -  Valid for V4L2
-
-       -  Valid for V4L2 subdev
-
-    -  .. row 2
-
-       -  ``V4L2_SEL_FLAG_GE``
-
-       -  (1 << 0)
-
-       -  Suggest the driver it should choose greater or equal rectangle (in
-	  size) than was requested. Albeit the driver may choose a lesser
-	  size, it will only do so due to hardware limitations. Without this
-	  flag (and ``V4L2_SEL_FLAG_LE``) the behaviour is to choose the
-	  closest possible rectangle.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 3
-
-       -  ``V4L2_SEL_FLAG_LE``
-
-       -  (1 << 1)
-
-       -  Suggest the driver it should choose lesser or equal rectangle (in
-	  size) than was requested. Albeit the driver may choose a greater
-	  size, it will only do so due to hardware limitations.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 4
-
-       -  ``V4L2_SEL_FLAG_KEEP_CONFIG``
-
-       -  (1 << 2)
-
-       -  The configuration must not be propagated to any further processing
-	  steps. If this flag is not given, the configuration is propagated
-	  inside the subdevice to all further processing steps.
-
-       -  No
-
-       -  Yes
+    * - Flag name
+      - id
+      - Definition
+      - Valid for V4L2
+      - Valid for V4L2 subdev
+    * - ``V4L2_SEL_FLAG_GE``
+      - (1 << 0)
+      - Suggest the driver it should choose greater or equal rectangle (in
+	size) than was requested. Albeit the driver may choose a lesser
+	size, it will only do so due to hardware limitations. Without this
+	flag (and ``V4L2_SEL_FLAG_LE``) the behaviour is to choose the
+	closest possible rectangle.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_FLAG_LE``
+      - (1 << 1)
+      - Suggest the driver it should choose lesser or equal rectangle (in
+	size) than was requested. Albeit the driver may choose a greater
+	size, it will only do so due to hardware limitations.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_FLAG_KEEP_CONFIG``
+      - (1 << 2)
+      - The configuration must not be propagated to any further processing
+	steps. If this flag is not given, the configuration is propagated
+	inside the subdevice to all further processing steps.
+      - No
+      - Yes
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
index 7519099..cab07de 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
@@ -12,124 +12,63 @@
 
 .. _v4l2-selection-targets-table:
 
+.. tabularcolumns:: |p{5.8cm}|p{1.4cm}|p{6.5cm}|p{1.2cm}|p{1.6cm}|
+
 .. flat-table:: Selection target definitions
     :header-rows:  1
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Target name
-
-       -  id
-
-       -  Definition
-
-       -  Valid for V4L2
-
-       -  Valid for V4L2 subdev
-
-    -  .. row 2
-
-       -  ``V4L2_SEL_TGT_CROP``
-
-       -  0x0000
-
-       -  Crop rectangle. Defines the cropped area.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 3
-
-       -  ``V4L2_SEL_TGT_CROP_DEFAULT``
-
-       -  0x0001
-
-       -  Suggested cropping rectangle that covers the "whole picture".
-
-       -  Yes
-
-       -  No
-
-    -  .. row 4
-
-       -  ``V4L2_SEL_TGT_CROP_BOUNDS``
-
-       -  0x0002
-
-       -  Bounds of the crop rectangle. All valid crop rectangles fit inside
-	  the crop bounds rectangle.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 5
-
-       -  ``V4L2_SEL_TGT_NATIVE_SIZE``
-
-       -  0x0003
-
-       -  The native size of the device, e.g. a sensor's pixel array.
-	  ``left`` and ``top`` fields are zero for this target. Setting the
-	  native size will generally only make sense for memory to memory
-	  devices where the software can create a canvas of a given size in
-	  which for example a video frame can be composed. In that case
-	  V4L2_SEL_TGT_NATIVE_SIZE can be used to configure the size of
-	  that canvas.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 6
-
-       -  ``V4L2_SEL_TGT_COMPOSE``
-
-       -  0x0100
-
-       -  Compose rectangle. Used to configure scaling and composition.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 7
-
-       -  ``V4L2_SEL_TGT_COMPOSE_DEFAULT``
-
-       -  0x0101
-
-       -  Suggested composition rectangle that covers the "whole picture".
-
-       -  Yes
-
-       -  No
-
-    -  .. row 8
-
-       -  ``V4L2_SEL_TGT_COMPOSE_BOUNDS``
-
-       -  0x0102
-
-       -  Bounds of the compose rectangle. All valid compose rectangles fit
-	  inside the compose bounds rectangle.
-
-       -  Yes
-
-       -  Yes
-
-    -  .. row 9
-
-       -  ``V4L2_SEL_TGT_COMPOSE_PADDED``
-
-       -  0x0103
-
-       -  The active area and all padding pixels that are inserted or
-	  modified by hardware.
-
-       -  Yes
-
-       -  No
+    * - Target name
+      - id
+      - Definition
+      - Valid for V4L2
+      - Valid for V4L2 subdev
+    * - ``V4L2_SEL_TGT_CROP``
+      - 0x0000
+      - Crop rectangle. Defines the cropped area.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_TGT_CROP_DEFAULT``
+      - 0x0001
+      - Suggested cropping rectangle that covers the "whole picture".
+      - Yes
+      - No
+    * - ``V4L2_SEL_TGT_CROP_BOUNDS``
+      - 0x0002
+      - Bounds of the crop rectangle. All valid crop rectangles fit inside
+	the crop bounds rectangle.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_TGT_NATIVE_SIZE``
+      - 0x0003
+      - The native size of the device, e.g. a sensor's pixel array.
+	``left`` and ``top`` fields are zero for this target. Setting the
+	native size will generally only make sense for memory to memory
+	devices where the software can create a canvas of a given size in
+	which for example a video frame can be composed. In that case
+	V4L2_SEL_TGT_NATIVE_SIZE can be used to configure the size of
+	that canvas.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_TGT_COMPOSE``
+      - 0x0100
+      - Compose rectangle. Used to configure scaling and composition.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_TGT_COMPOSE_DEFAULT``
+      - 0x0101
+      - Suggested composition rectangle that covers the "whole picture".
+      - Yes
+      - No
+    * - ``V4L2_SEL_TGT_COMPOSE_BOUNDS``
+      - 0x0102
+      - Bounds of the compose rectangle. All valid compose rectangles fit
+	inside the compose bounds rectangle.
+      - Yes
+      - Yes
+    * - ``V4L2_SEL_TGT_COMPOSE_PADDED``
+      - 0x0103
+      - The active area and all padding pixels that are inserted or
+	modified by hardware.
+      - Yes
+      - No
diff --git a/Documentation/media/uapi/v4l/v4l2.rst b/Documentation/media/uapi/v4l/v4l2.rst
index 5e41a85..55b959d 100644
--- a/Documentation/media/uapi/v4l/v4l2.rst
+++ b/Documentation/media/uapi/v4l/v4l2.rst
@@ -112,16 +112,16 @@
 :revision: 3.19 / 2014-12-05 (*hv*)
 
 Rewrote Colorspace chapter, added new enum
-:ref:`v4l2_ycbcr_encoding <v4l2-ycbcr-encoding>` and enum
-:ref:`v4l2_quantization <v4l2-quantization>` fields to struct
-:ref:`v4l2_pix_format <v4l2-pix-format>`, struct
-:ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` and struct
-:ref:`v4l2_mbus_framefmt <v4l2-mbus-framefmt>`.
+:c:type:`v4l2_ycbcr_encoding` and enum
+:c:type:`v4l2_quantization` fields to struct
+:c:type:`v4l2_pix_format`, struct
+:c:type:`v4l2_pix_format_mplane` and struct
+:c:type:`v4l2_mbus_framefmt`.
 
 
 :revision: 3.17 / 2014-08-04 (*lp, hv*)
 
-Extended struct :ref:`v4l2_pix_format <v4l2-pix-format>`. Added
+Extended struct :c:type:`v4l2_pix_format`. Added
 format flags. Added compound control types and VIDIOC_QUERY_EXT_CTRL.
 
 
diff --git a/Documentation/media/uapi/v4l/video.rst b/Documentation/media/uapi/v4l/video.rst
index d3f0071..a205fb8 100644
--- a/Documentation/media/uapi/v4l/video.rst
+++ b/Documentation/media/uapi/v4l/video.rst
@@ -16,7 +16,7 @@
 outputs applications can enumerate them with the
 :ref:`VIDIOC_ENUMINPUT` and
 :ref:`VIDIOC_ENUMOUTPUT` ioctl, respectively. The
-struct :ref:`v4l2_input <v4l2-input>` returned by the
+struct :c:type:`v4l2_input` returned by the
 :ref:`VIDIOC_ENUMINPUT` ioctl also contains signal
 :status information applicable when the current video input is queried.
 
diff --git a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
index abdc0b4..aaca12f 100644
--- a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_create_buffers *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_CREATE_BUFS, struct v4l2_create_buffers *argp )
+    :name: VIDIOC_CREATE_BUFS
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_CREATE_BUFS
-
 ``argp``
 
 
@@ -41,14 +39,14 @@
 create buffers of different sizes.
 
 To allocate the device buffers applications must initialize the relevant
-fields of the :ref:`struct v4l2_create_buffers <v4l2-create-buffers>` structure. The
+fields of the struct :c:type:`v4l2_create_buffers` structure. The
 ``count`` field must be set to the number of requested buffers, the
 ``memory`` field specifies the requested I/O method and the ``reserved``
 array must be zeroed.
 
 The ``format`` field specifies the image format that the buffers must be
 able to handle. The application has to fill in this struct
-:ref:`v4l2_format <v4l2-format>`. Usually this will be done using the
+:c:type:`v4l2_format`. Usually this will be done using the
 :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` or
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctls to ensure that the
 requested format is supported by the driver. Based on the format's
@@ -73,62 +71,39 @@
 than the number requested.
 
 
-.. _v4l2-create-buffers:
+.. c:type:: v4l2_create_buffers
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_create_buffers
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  The starting buffer index, returned by the driver.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``count``
-
-       -  The number of buffers requested or granted. If count == 0, then
-	  :ref:`VIDIOC_CREATE_BUFS` will set ``index`` to the current number of
-	  created buffers, and it will check the validity of ``memory`` and
-	  ``format.type``. If those are invalid -1 is returned and errno is
-	  set to ``EINVAL`` error code, otherwise :ref:`VIDIOC_CREATE_BUFS` returns
-	  0. It will never set errno to ``EBUSY`` error code in this particular
-	  case.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``memory``
-
-       -  Applications set this field to ``V4L2_MEMORY_MMAP``,
-	  ``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See
-	  :ref:`v4l2-memory`
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_format <v4l2-format>`
-
-       -  ``format``
-
-       -  Filled in by the application, preserved by the driver.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  A place holder for future extensions. Drivers and applications
-	  must set the array to zero.
+    * - __u32
+      - ``index``
+      - The starting buffer index, returned by the driver.
+    * - __u32
+      - ``count``
+      - The number of buffers requested or granted. If count == 0, then
+	:ref:`VIDIOC_CREATE_BUFS` will set ``index`` to the current number of
+	created buffers, and it will check the validity of ``memory`` and
+	``format.type``. If those are invalid -1 is returned and errno is
+	set to ``EINVAL`` error code, otherwise :ref:`VIDIOC_CREATE_BUFS` returns
+	0. It will never set errno to ``EBUSY`` error code in this particular
+	case.
+    * - __u32
+      - ``memory``
+      - Applications set this field to ``V4L2_MEMORY_MMAP``,
+	``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See
+	:c:type:`v4l2_memory`
+    * - struct :c:type:`v4l2_format`
+      - ``format``
+      - Filled in by the application, preserved by the driver.
+    * - __u32
+      - ``reserved``\ [8]
+      - A place holder for future extensions. Drivers and applications
+	must set the array to zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
index 8dcbe6d..f21a69b 100644
--- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_cropcap *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_CROPCAP, struct v4l2_cropcap *argp )
+    :name: VIDIOC_CROPCAP
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_CROPCAP
-
 ``argp``
 
 
@@ -52,107 +50,71 @@
 overlay devices.
 
 
-.. _v4l2-cropcap:
+.. c:type:: v4l2_cropcap
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_cropcap
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``type``
+      - Type of the data stream, set by the application. Only these types
+	are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
+	``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
+	``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+    * - struct :ref:`v4l2_rect <v4l2-rect-crop>`
+      - ``bounds``
+      - Defines the window within capturing or output is possible, this
+	may exclude for example the horizontal and vertical blanking
+	areas. The cropping rectangle cannot exceed these limits. Width
+	and height are defined in pixels, the driver writer is free to
+	choose origin and units of the coordinate system in the analog
+	domain.
+    * - struct :ref:`v4l2_rect <v4l2-rect-crop>`
+      - ``defrect``
+      - Default cropping rectangle, it shall cover the "whole picture".
+	Assuming pixel aspect 1/1 this could be for example a 640 × 480
+	rectangle for NTSC, a 768 × 576 rectangle for PAL and SECAM
+	centered over the active picture area. The same co-ordinate system
+	as for ``bounds`` is used.
+    * - struct :c:type:`v4l2_fract`
+      - ``pixelaspect``
+      - This is the pixel aspect (y / x) when no scaling is applied, the
+	ratio of the actual sampling frequency and the frequency required
+	to get square pixels.
 
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the data stream, set by the application. Only these types
-	  are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
-	  ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
-	  ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :ref:`v4l2-buf-type`.
-
-    -  .. row 2
-
-       -  struct :ref:`v4l2_rect <v4l2-rect-crop>`
-
-       -  ``bounds``
-
-       -  Defines the window within capturing or output is possible, this
-	  may exclude for example the horizontal and vertical blanking
-	  areas. The cropping rectangle cannot exceed these limits. Width
-	  and height are defined in pixels, the driver writer is free to
-	  choose origin and units of the coordinate system in the analog
-	  domain.
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_rect <v4l2-rect-crop>`
-
-       -  ``defrect``
-
-       -  Default cropping rectangle, it shall cover the "whole picture".
-	  Assuming pixel aspect 1/1 this could be for example a 640 × 480
-	  rectangle for NTSC, a 768 × 576 rectangle for PAL and SECAM
-	  centered over the active picture area. The same co-ordinate system
-	  as for ``bounds`` is used.
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``pixelaspect``
-
-       -  This is the pixel aspect (y / x) when no scaling is applied, the
-	  ratio of the actual sampling frequency and the frequency required
-	  to get square pixels.
-
-	  When cropping coordinates refer to square pixels, the driver sets
-	  ``pixelaspect`` to 1/1. Other common values are 54/59 for PAL and
-	  SECAM, 11/10 for NTSC sampled according to [:ref:`itu601`].
+	When cropping coordinates refer to square pixels, the driver sets
+	``pixelaspect`` to 1/1. Other common values are 54/59 for PAL and
+	SECAM, 11/10 for NTSC sampled according to [:ref:`itu601`].
 
 
 
 .. _v4l2-rect-crop:
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
 .. flat-table:: struct v4l2_rect
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __s32
-
-       -  ``left``
-
-       -  Horizontal offset of the top, left corner of the rectangle, in
-	  pixels.
-
-    -  .. row 2
-
-       -  __s32
-
-       -  ``top``
-
-       -  Vertical offset of the top, left corner of the rectangle, in
-	  pixels.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``width``
-
-       -  Width of the rectangle, in pixels.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``height``
-
-       -  Height of the rectangle, in pixels.
+    * - __s32
+      - ``left``
+      - Horizontal offset of the top, left corner of the rectangle, in
+	pixels.
+    * - __s32
+      - ``top``
+      - Vertical offset of the top, left corner of the rectangle, in
+	pixels.
+    * - __u32
+      - ``width``
+      - Width of the rectangle, in pixels.
+    * - __u32
+      - ``height``
+      - Height of the rectangle, in pixels.
 
 
 Return Value
@@ -163,5 +125,8 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_cropcap <v4l2-cropcap>` ``type`` is
+    The struct :c:type:`v4l2_cropcap` ``type`` is
     invalid.
+
+ENODATA
+    Cropping is not supported for this input or output.
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
index f7e1b80..e1e5507 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_dbg_chip_info *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DBG_G_CHIP_INFO, struct v4l2_dbg_chip_info *argp )
+    :name: VIDIOC_DBG_G_CHIP_INFO
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_DBG_G_CHIP_INFO
-
 ``argp``
 
 
@@ -50,7 +48,7 @@
 
 To query the driver applications must initialize the ``match.type`` and
 ``match.addr`` or ``match.name`` fields of a struct
-:ref:`v4l2_dbg_chip_info <v4l2-dbg-chip-info>` and call
+:c:type:`v4l2_dbg_chip_info` and call
 :ref:`VIDIOC_DBG_G_CHIP_INFO` with a pointer to this structure. On success
 the driver stores information about the selected chip in the ``name``
 and ``flags`` fields.
@@ -78,6 +76,8 @@
 instructions.
 
 
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{3.5cm}|p{7.0cm}|
+
 .. _name-v4l2-dbg-match:
 
 .. flat-table:: struct v4l2_dbg_match
@@ -85,88 +85,53 @@
     :stub-columns: 0
     :widths:       1 1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  See :ref:`name-chip-match-types` for a list of possible types.
-
-    -  .. row 2
-
-       -  union
-
-       -  (anonymous)
-
-    -  .. row 3
-
-       -
-       -  __u32
-
-       -  ``addr``
-
-       -  Match a chip by this number, interpreted according to the ``type``
-	  field.
-
-    -  .. row 4
-
-       -
-       -  char
-
-       -  ``name[32]``
-
-       -  Match a chip by this name, interpreted according to the ``type``
-	  field. Currently unused.
+    * - __u32
+      - ``type``
+      - See :ref:`name-chip-match-types` for a list of possible types.
+    * - union
+      - (anonymous)
+    * -
+      - __u32
+      - ``addr``
+      - Match a chip by this number, interpreted according to the ``type``
+	field.
+    * -
+      - char
+      - ``name[32]``
+      - Match a chip by this name, interpreted according to the ``type``
+	field. Currently unused.
 
 
 
-.. _v4l2-dbg-chip-info:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_dbg_chip_info
 
 .. flat-table:: struct v4l2_dbg_chip_info
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  struct v4l2_dbg_match
-
-       -  ``match``
-
-       -  How to match the chip, see :ref:`name-v4l2-dbg-match`.
-
-    -  .. row 2
-
-       -  char
-
-       -  ``name[32]``
-
-       -  The name of the chip.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Set by the driver. If ``V4L2_CHIP_FL_READABLE`` is set, then the
-	  driver supports reading registers from the device. If
-	  ``V4L2_CHIP_FL_WRITABLE`` is set, then it supports writing
-	  registers.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved[8]``
-
-       -  Reserved fields, both application and driver must set these to 0.
+    * - struct v4l2_dbg_match
+      - ``match``
+      - How to match the chip, see :ref:`name-v4l2-dbg-match`.
+    * - char
+      - ``name[32]``
+      - The name of the chip.
+    * - __u32
+      - ``flags``
+      - Set by the driver. If ``V4L2_CHIP_FL_READABLE`` is set, then the
+	driver supports reading registers from the device. If
+	``V4L2_CHIP_FL_WRITABLE`` is set, then it supports writing
+	registers.
+    * - __u32
+      - ``reserved[8]``
+      - Reserved fields, both application and driver must set these to 0.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _name-chip-match-types:
 
 .. flat-table:: Chip Match Types
@@ -174,23 +139,13 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_CHIP_MATCH_BRIDGE``
-
-       -  0
-
-       -  Match the nth chip on the card, zero for the bridge chip. Does not
-	  match sub-devices.
-
-    -  .. row 2
-
-       -  ``V4L2_CHIP_MATCH_SUBDEV``
-
-       -  4
-
-       -  Match the nth sub-device.
+    * - ``V4L2_CHIP_MATCH_BRIDGE``
+      - 0
+      - Match the nth chip on the card, zero for the bridge chip. Does not
+	match sub-devices.
+    * - ``V4L2_CHIP_MATCH_SUBDEV``
+      - 4
+      - Match the nth sub-device.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
index 09d2880..5960a65 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_dbg_register *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DBG_G_REGISTER, struct v4l2_dbg_register *argp )
+    :name: VIDIOC_DBG_G_REGISTER
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_dbg_register *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DBG_S_REGISTER, const struct v4l2_dbg_register *argp )
+    :name: VIDIOC_DBG_S_REGISTER
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_DBG_G_REGISTER, VIDIOC_DBG_S_REGISTER
-
 ``argp``
 
 
@@ -50,7 +49,7 @@
 with the ``CONFIG_VIDEO_ADV_DEBUG`` option to enable these ioctls.
 
 To write a register applications must initialize all fields of a struct
-:ref:`v4l2_dbg_register <v4l2-dbg-register>` except for ``size`` and
+:c:type:`v4l2_dbg_register` except for ``size`` and
 call ``VIDIOC_DBG_S_REGISTER`` with a pointer to this structure. The
 ``match.type`` and ``match.addr`` or ``match.name`` fields select a chip
 on the TV card, the ``reg`` field specifies a register number and the
@@ -86,91 +85,56 @@
 instructions.
 
 
-.. _v4l2-dbg-match:
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{3.5cm}|p{7.0cm}|
+
+.. c:type:: v4l2_dbg_match
 
 .. flat-table:: struct v4l2_dbg_match
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  See :ref:`chip-match-types` for a list of possible types.
-
-    -  .. row 2
-
-       -  union
-
-       -  (anonymous)
-
-    -  .. row 3
-
-       -
-       -  __u32
-
-       -  ``addr``
-
-       -  Match a chip by this number, interpreted according to the ``type``
-	  field.
-
-    -  .. row 4
-
-       -
-       -  char
-
-       -  ``name[32]``
-
-       -  Match a chip by this name, interpreted according to the ``type``
-	  field. Currently unused.
+    * - __u32
+      - ``type``
+      - See :ref:`chip-match-types` for a list of possible types.
+    * - union
+      - (anonymous)
+    * -
+      - __u32
+      - ``addr``
+      - Match a chip by this number, interpreted according to the ``type``
+	field.
+    * -
+      - char
+      - ``name[32]``
+      - Match a chip by this name, interpreted according to the ``type``
+	field. Currently unused.
 
 
 
-.. _v4l2-dbg-register:
+.. c:type:: v4l2_dbg_register
 
 .. flat-table:: struct v4l2_dbg_register
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  struct v4l2_dbg_match
-
-       -  ``match``
-
-       -  How to match the chip, see :ref:`v4l2-dbg-match`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``size``
-
-       -  The register size in bytes.
-
-    -  .. row 3
-
-       -  __u64
-
-       -  ``reg``
-
-       -  A register number.
-
-    -  .. row 4
-
-       -  __u64
-
-       -  ``val``
-
-       -  The value read from, or to be written into the register.
+    * - struct v4l2_dbg_match
+      - ``match``
+      - How to match the chip, see :c:type:`v4l2_dbg_match`.
+    * - __u32
+      - ``size``
+      - The register size in bytes.
+    * - __u64
+      - ``reg``
+      - A register number.
+    * - __u64
+      - ``val``
+      - The value read from, or to be written into the register.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _chip-match-types:
 
 .. flat-table:: Chip Match Types
@@ -178,23 +142,13 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_CHIP_MATCH_BRIDGE``
-
-       -  0
-
-       -  Match the nth chip on the card, zero for the bridge chip. Does not
-	  match sub-devices.
-
-    -  .. row 2
-
-       -  ``V4L2_CHIP_MATCH_SUBDEV``
-
-       -  4
-
-       -  Match the nth sub-device.
+    * - ``V4L2_CHIP_MATCH_BRIDGE``
+      - 0
+      - Match the nth chip on the card, zero for the bridge chip. Does not
+	match sub-devices.
+    * - ``V4L2_CHIP_MATCH_SUBDEV``
+      - 4
+      - Match the nth sub-device.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
index 2a36e91..85c916b 100644
--- a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
@@ -15,7 +15,12 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_decoder_cmd *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DECODER_CMD, struct v4l2_decoder_cmd *argp )
+    :name: VIDIOC_DECODER_CMD
+
+
+.. c:function:: int ioctl( int fd, VIDIOC_TRY_DECODER_CMD, struct v4l2_decoder_cmd *argp )
+    :name: VIDIOC_TRY_DECODER_CMD
 
 
 Arguments
@@ -24,10 +29,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_DECODER_CMD, VIDIOC_TRY_DECODER_CMD
-
 ``argp``
+    pointer to struct :c:type:`v4l2_decoder_cmd`.
 
 
 Description
@@ -37,7 +40,7 @@
 ``VIDIOC_DECODER_CMD`` sends a command to the decoder,
 ``VIDIOC_TRY_DECODER_CMD`` can be used to try a command without actually
 executing it. To send a command applications must initialize all fields
-of a struct :ref:`v4l2_decoder_cmd <v4l2-decoder-cmd>` and call
+of a struct :c:type:`v4l2_decoder_cmd` and call
 ``VIDIOC_DECODER_CMD`` or ``VIDIOC_TRY_DECODER_CMD`` with a pointer to
 this structure.
 
@@ -56,204 +59,140 @@
 introduced in Linux 3.3.
 
 
-.. _v4l2-decoder-cmd:
+.. tabularcolumns:: |p{1.1cm}|p{2.4cm}|p{1.2cm}|p{1.6cm}|p{10.6cm}|
+
+.. c:type:: v4l2_decoder_cmd
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_decoder_cmd
     :header-rows:  0
     :stub-columns: 0
-    :widths:       1 1 2 1 1
+    :widths: 11 24 12 16 106
+
+    * - __u32
+      - ``cmd``
+      -
+      -
+      - The decoder command, see :ref:`decoder-cmds`.
+    * - __u32
+      - ``flags``
+      -
+      -
+      - Flags to go with the command. If no flags are defined for this
+	command, drivers and applications must set this field to zero.
+    * - union
+      - (anonymous)
+      -
+      -
+      -
+    * -
+      - struct
+      - ``start``
+      -
+      - Structure containing additional data for the
+	``V4L2_DEC_CMD_START`` command.
+    * -
+      -
+      - __s32
+      - ``speed``
+      - Playback speed and direction. The playback speed is defined as
+	``speed``/1000 of the normal speed. So 1000 is normal playback.
+	Negative numbers denote reverse playback, so -1000 does reverse
+	playback at normal speed. Speeds -1, 0 and 1 have special
+	meanings: speed 0 is shorthand for 1000 (normal playback). A speed
+	of 1 steps just one frame forward, a speed of -1 steps just one
+	frame back.
+    * -
+      -
+      - __u32
+      - ``format``
+      - Format restrictions. This field is set by the driver, not the
+	application. Possible values are ``V4L2_DEC_START_FMT_NONE`` if
+	there are no format restrictions or ``V4L2_DEC_START_FMT_GOP`` if
+	the decoder operates on full GOPs (*Group Of Pictures*). This is
+	usually the case for reverse playback: the decoder needs full
+	GOPs, which it can then play in reverse order. So to implement
+	reverse playback the application must feed the decoder the last
+	GOP in the video file, then the GOP before that, etc. etc.
+    * -
+      - struct
+      - ``stop``
+      -
+      - Structure containing additional data for the ``V4L2_DEC_CMD_STOP``
+	command.
+    * -
+      -
+      - __u64
+      - ``pts``
+      - Stop playback at this ``pts`` or immediately if the playback is
+	already past that timestamp. Leave to 0 if you want to stop after
+	the last frame was decoded.
+    * -
+      - struct
+      - ``raw``
+      -
+      -
+    * -
+      -
+      - __u32
+      - ``data``\ [16]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
-    -  .. row 1
 
-       -  __u32
-
-       -  ``cmd``
-
-       -
-       -
-       -  The decoder command, see :ref:`decoder-cmds`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``flags``
-
-       -
-       -
-       -  Flags to go with the command. If no flags are defined for this
-	  command, drivers and applications must set this field to zero.
-
-    -  .. row 3
-
-       -  union
-
-       -  (anonymous)
-
-       -
-       -
-       -
-
-    -  .. row 4
-
-       -
-       -  struct
-
-       -  ``start``
-
-       -
-       -  Structure containing additional data for the
-	  ``V4L2_DEC_CMD_START`` command.
-
-    -  .. row 5
-
-       -
-       -
-       -  __s32
-
-       -  ``speed``
-
-       -  Playback speed and direction. The playback speed is defined as
-	  ``speed``/1000 of the normal speed. So 1000 is normal playback.
-	  Negative numbers denote reverse playback, so -1000 does reverse
-	  playback at normal speed. Speeds -1, 0 and 1 have special
-	  meanings: speed 0 is shorthand for 1000 (normal playback). A speed
-	  of 1 steps just one frame forward, a speed of -1 steps just one
-	  frame back.
-
-    -  .. row 6
-
-       -
-       -
-       -  __u32
-
-       -  ``format``
-
-       -  Format restrictions. This field is set by the driver, not the
-	  application. Possible values are ``V4L2_DEC_START_FMT_NONE`` if
-	  there are no format restrictions or ``V4L2_DEC_START_FMT_GOP`` if
-	  the decoder operates on full GOPs (*Group Of Pictures*). This is
-	  usually the case for reverse playback: the decoder needs full
-	  GOPs, which it can then play in reverse order. So to implement
-	  reverse playback the application must feed the decoder the last
-	  GOP in the video file, then the GOP before that, etc. etc.
-
-    -  .. row 7
-
-       -
-       -  struct
-
-       -  ``stop``
-
-       -
-       -  Structure containing additional data for the ``V4L2_DEC_CMD_STOP``
-	  command.
-
-    -  .. row 8
-
-       -
-       -
-       -  __u64
-
-       -  ``pts``
-
-       -  Stop playback at this ``pts`` or immediately if the playback is
-	  already past that timestamp. Leave to 0 if you want to stop after
-	  the last frame was decoded.
-
-    -  .. row 9
-
-       -
-       -  struct
-
-       -  ``raw``
-
-       -
-       -
-
-    -  .. row 10
-
-       -
-       -
-       -  __u32
-
-       -  ``data``\ [16]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
-
-
+.. tabularcolumns:: |p{5.6cm}|p{0.6cm}|p{11.3cm}|
 
 .. _decoder-cmds:
 
 .. flat-table:: Decoder Commands
     :header-rows:  0
     :stub-columns: 0
-    :widths:       3 1 4
+    :widths: 56 6 113
 
-
-    -  .. row 1
-
-       -  ``V4L2_DEC_CMD_START``
-
-       -  0
-
-       -  Start the decoder. When the decoder is already running or paused,
-	  this command will just change the playback speed. That means that
-	  calling ``V4L2_DEC_CMD_START`` when the decoder was paused will
-	  *not* resume the decoder. You have to explicitly call
-	  ``V4L2_DEC_CMD_RESUME`` for that. This command has one flag:
-	  ``V4L2_DEC_CMD_START_MUTE_AUDIO``. If set, then audio will be
-	  muted when playing back at a non-standard speed.
-
-    -  .. row 2
-
-       -  ``V4L2_DEC_CMD_STOP``
-
-       -  1
-
-       -  Stop the decoder. When the decoder is already stopped, this
-	  command does nothing. This command has two flags: if
-	  ``V4L2_DEC_CMD_STOP_TO_BLACK`` is set, then the decoder will set
-	  the picture to black after it stopped decoding. Otherwise the last
-	  image will repeat. mem2mem decoders will stop producing new frames
-	  altogether. They will send a ``V4L2_EVENT_EOS`` event when the
-	  last frame has been decoded and all frames are ready to be
-	  dequeued and will set the ``V4L2_BUF_FLAG_LAST`` buffer flag on
-	  the last buffer of the capture queue to indicate there will be no
-	  new buffers produced to dequeue. This buffer may be empty,
-	  indicated by the driver setting the ``bytesused`` field to 0. Once
-	  the ``V4L2_BUF_FLAG_LAST`` flag was set, the
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
-	  but return an ``EPIPE`` error code. If
-	  ``V4L2_DEC_CMD_STOP_IMMEDIATELY`` is set, then the decoder stops
-	  immediately (ignoring the ``pts`` value), otherwise it will keep
-	  decoding until timestamp >= pts or until the last of the pending
-	  data from its internal buffers was decoded.
-
-    -  .. row 3
-
-       -  ``V4L2_DEC_CMD_PAUSE``
-
-       -  2
-
-       -  Pause the decoder. When the decoder has not been started yet, the
-	  driver will return an ``EPERM`` error code. When the decoder is
-	  already paused, this command does nothing. This command has one
-	  flag: if ``V4L2_DEC_CMD_PAUSE_TO_BLACK`` is set, then set the
-	  decoder output to black when paused.
-
-    -  .. row 4
-
-       -  ``V4L2_DEC_CMD_RESUME``
-
-       -  3
-
-       -  Resume decoding after a PAUSE command. When the decoder has not
-	  been started yet, the driver will return an ``EPERM`` error code. When
-	  the decoder is already running, this command does nothing. No
-	  flags are defined for this command.
+    * - ``V4L2_DEC_CMD_START``
+      - 0
+      - Start the decoder. When the decoder is already running or paused,
+	this command will just change the playback speed. That means that
+	calling ``V4L2_DEC_CMD_START`` when the decoder was paused will
+	*not* resume the decoder. You have to explicitly call
+	``V4L2_DEC_CMD_RESUME`` for that. This command has one flag:
+	``V4L2_DEC_CMD_START_MUTE_AUDIO``. If set, then audio will be
+	muted when playing back at a non-standard speed.
+    * - ``V4L2_DEC_CMD_STOP``
+      - 1
+      - Stop the decoder. When the decoder is already stopped, this
+	command does nothing. This command has two flags: if
+	``V4L2_DEC_CMD_STOP_TO_BLACK`` is set, then the decoder will set
+	the picture to black after it stopped decoding. Otherwise the last
+	image will repeat. mem2mem decoders will stop producing new frames
+	altogether. They will send a ``V4L2_EVENT_EOS`` event when the
+	last frame has been decoded and all frames are ready to be
+	dequeued and will set the ``V4L2_BUF_FLAG_LAST`` buffer flag on
+	the last buffer of the capture queue to indicate there will be no
+	new buffers produced to dequeue. This buffer may be empty,
+	indicated by the driver setting the ``bytesused`` field to 0. Once
+	the ``V4L2_BUF_FLAG_LAST`` flag was set, the
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
+	but return an ``EPIPE`` error code. If
+	``V4L2_DEC_CMD_STOP_IMMEDIATELY`` is set, then the decoder stops
+	immediately (ignoring the ``pts`` value), otherwise it will keep
+	decoding until timestamp >= pts or until the last of the pending
+	data from its internal buffers was decoded.
+    * - ``V4L2_DEC_CMD_PAUSE``
+      - 2
+      - Pause the decoder. When the decoder has not been started yet, the
+	driver will return an ``EPERM`` error code. When the decoder is
+	already paused, this command does nothing. This command has one
+	flag: if ``V4L2_DEC_CMD_PAUSE_TO_BLACK`` is set, then set the
+	decoder output to black when paused.
+    * - ``V4L2_DEC_CMD_RESUME``
+      - 3
+      - Resume decoding after a PAUSE command. When the decoder has not
+	been started yet, the driver will return an ``EPERM`` error code. When
+	the decoder is already running, this command does nothing. No
+	flags are defined for this command.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-dqevent.rst b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
index 73c0d5b..8d663a7 100644
--- a/Documentation/media/uapi/v4l/vidioc-dqevent.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_event *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DQEVENT, struct v4l2_event *argp )
+    :name: VIDIOC_DQEVENT
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_DQEVENT
-
 ``argp``
 
 
@@ -34,147 +32,90 @@
 ===========
 
 Dequeue an event from a video device. No input is required for this
-ioctl. All the fields of the struct :ref:`v4l2_event <v4l2-event>`
+ioctl. All the fields of the struct :c:type:`v4l2_event`
 structure are filled by the driver. The file handle will also receive
 exceptions which the application may get by e.g. using the select system
 call.
 
 
-.. _v4l2-event:
+.. tabularcolumns:: |p{3.0cm}|p{4.3cm}|p{2.5cm}|p{7.7cm}|
+
+.. c:type:: v4l2_event
+
+.. cssclass: longtable
 
 .. flat-table:: struct v4l2_event
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  Type of the event, see :ref:`event-type`.
-
-    -  .. row 2
-
-       -  union
-
-       -  ``u``
-
-       -
-       -
-
-    -  .. row 3
-
-       -
-       -  struct :ref:`v4l2_event_vsync <v4l2-event-vsync>`
-
-       -  ``vsync``
-
-       -  Event data for event ``V4L2_EVENT_VSYNC``.
-
-    -  .. row 4
-
-       -
-       -  struct :ref:`v4l2_event_ctrl <v4l2-event-ctrl>`
-
-       -  ``ctrl``
-
-       -  Event data for event ``V4L2_EVENT_CTRL``.
-
-    -  .. row 5
-
-       -
-       -  struct :ref:`v4l2_event_frame_sync <v4l2-event-frame-sync>`
-
-       -  ``frame_sync``
-
-       -  Event data for event ``V4L2_EVENT_FRAME_SYNC``.
-
-    -  .. row 6
-
-       -
-       -  struct :ref:`v4l2_event_motion_det <v4l2-event-motion-det>`
-
-       -  ``motion_det``
-
-       -  Event data for event V4L2_EVENT_MOTION_DET.
-
-    -  .. row 7
-
-       -
-       -  struct :ref:`v4l2_event_src_change <v4l2-event-src-change>`
-
-       -  ``src_change``
-
-       -  Event data for event V4L2_EVENT_SOURCE_CHANGE.
-
-    -  .. row 8
-
-       -
-       -  __u8
-
-       -  ``data``\ [64]
-
-       -  Event data. Defined by the event type. The union should be used to
-	  define easily accessible type for events.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``pending``
-
-       -
-       -  Number of pending events excluding this one.
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``sequence``
-
-       -
-       -  Event sequence number. The sequence number is incremented for
-	  every subscribed event that takes place. If sequence numbers are
-	  not contiguous it means that events have been lost.
-
-    -  .. row 11
-
-       -  struct timespec
-
-       -  ``timestamp``
-
-       -
-       -  Event timestamp. The timestamp has been taken from the
-	  ``CLOCK_MONOTONIC`` clock. To access the same clock outside V4L2,
-	  use :c:func:`clock_gettime(2)`.
-
-    -  .. row 12
-
-       -  u32
-
-       -  ``id``
-
-       -
-       -  The ID associated with the event source. If the event does not
-	  have an associated ID (this depends on the event type), then this
-	  is 0.
-
-    -  .. row 13
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+    * - __u32
+      - ``type``
+      -
+      - Type of the event, see :ref:`event-type`.
+    * - union
+      - ``u``
+      -
+      -
+    * -
+      - struct :c:type:`v4l2_event_vsync`
+      - ``vsync``
+      - Event data for event ``V4L2_EVENT_VSYNC``.
+    * -
+      - struct :c:type:`v4l2_event_ctrl`
+      - ``ctrl``
+      - Event data for event ``V4L2_EVENT_CTRL``.
+    * -
+      - struct :c:type:`v4l2_event_frame_sync`
+      - ``frame_sync``
+      - Event data for event ``V4L2_EVENT_FRAME_SYNC``.
+    * -
+      - struct :c:type:`v4l2_event_motion_det`
+      - ``motion_det``
+      - Event data for event V4L2_EVENT_MOTION_DET.
+    * -
+      - struct :c:type:`v4l2_event_src_change`
+      - ``src_change``
+      - Event data for event V4L2_EVENT_SOURCE_CHANGE.
+    * -
+      - __u8
+      - ``data``\ [64]
+      - Event data. Defined by the event type. The union should be used to
+	define easily accessible type for events.
+    * - __u32
+      - ``pending``
+      -
+      - Number of pending events excluding this one.
+    * - __u32
+      - ``sequence``
+      -
+      - Event sequence number. The sequence number is incremented for
+	every subscribed event that takes place. If sequence numbers are
+	not contiguous it means that events have been lost.
+    * - struct timespec
+      - ``timestamp``
+      -
+      - Event timestamp. The timestamp has been taken from the
+	``CLOCK_MONOTONIC`` clock. To access the same clock outside V4L2,
+	use :c:func:`clock_gettime`.
+    * - u32
+      - ``id``
+      -
+      - The ID associated with the event source. If the event does not
+	have an associated ID (this depends on the event type), then this
+	is 0.
+    * - __u32
+      - ``reserved``\ [8]
+      -
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
+.. cssclass:: longtable
+
 .. _event-type:
 
 .. flat-table:: Event Types
@@ -182,331 +123,224 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_EVENT_ALL``
+      - 0
+      - All events. V4L2_EVENT_ALL is valid only for
+	VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
+    * - ``V4L2_EVENT_VSYNC``
+      - 1
+      - This event is triggered on the vertical sync. This event has a
+	struct :c:type:`v4l2_event_vsync` associated
+	with it.
+    * - ``V4L2_EVENT_EOS``
+      - 2
+      - This event is triggered when the end of a stream is reached. This
+	is typically used with MPEG decoders to report to the application
+	when the last of the MPEG stream has been decoded.
+    * - ``V4L2_EVENT_CTRL``
+      - 3
+      - This event requires that the ``id`` matches the control ID from
+	which you want to receive events. This event is triggered if the
+	control's value changes, if a button control is pressed or if the
+	control's flags change. This event has a struct
+	:c:type:`v4l2_event_ctrl` associated with it.
+	This struct contains much of the same information as struct
+	:ref:`v4l2_queryctrl <v4l2-queryctrl>` and struct
+	:c:type:`v4l2_control`.
 
-    -  .. row 1
+	If the event is generated due to a call to
+	:ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` or
+	:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, then the
+	event will *not* be sent to the file handle that called the ioctl
+	function. This prevents nasty feedback loops. If you *do* want to
+	get the event, then set the ``V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK``
+	flag.
 
-       -  ``V4L2_EVENT_ALL``
+	This event type will ensure that no information is lost when more
+	events are raised than there is room internally. In that case the
+	struct :c:type:`v4l2_event_ctrl` of the
+	second-oldest event is kept, but the ``changes`` field of the
+	second-oldest event is ORed with the ``changes`` field of the
+	oldest event.
+    * - ``V4L2_EVENT_FRAME_SYNC``
+      - 4
+      - Triggered immediately when the reception of a frame has begun.
+	This event has a struct
+	:c:type:`v4l2_event_frame_sync`
+	associated with it.
 
-       -  0
+	If the hardware needs to be stopped in the case of a buffer
+	underrun it might not be able to generate this event. In such
+	cases the ``frame_sequence`` field in struct
+	:c:type:`v4l2_event_frame_sync` will not
+	be incremented. This causes two consecutive frame sequence numbers
+	to have n times frame interval in between them.
+    * - ``V4L2_EVENT_SOURCE_CHANGE``
+      - 5
+      - This event is triggered when a source parameter change is detected
+	during runtime by the video device. It can be a runtime resolution
+	change triggered by a video decoder or the format change happening
+	on an input connector. This event requires that the ``id`` matches
+	the input index (when used with a video device node) or the pad
+	index (when used with a subdevice node) from which you want to
+	receive events.
 
-       -  All events. V4L2_EVENT_ALL is valid only for
-	  VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
-
-    -  .. row 2
-
-       -  ``V4L2_EVENT_VSYNC``
-
-       -  1
-
-       -  This event is triggered on the vertical sync. This event has a
-	  struct :ref:`v4l2_event_vsync <v4l2-event-vsync>` associated
-	  with it.
-
-    -  .. row 3
-
-       -  ``V4L2_EVENT_EOS``
-
-       -  2
-
-       -  This event is triggered when the end of a stream is reached. This
-	  is typically used with MPEG decoders to report to the application
-	  when the last of the MPEG stream has been decoded.
-
-    -  .. row 4
-
-       -  ``V4L2_EVENT_CTRL``
-
-       -  3
-
-       -  This event requires that the ``id`` matches the control ID from
-	  which you want to receive events. This event is triggered if the
-	  control's value changes, if a button control is pressed or if the
-	  control's flags change. This event has a struct
-	  :ref:`v4l2_event_ctrl <v4l2-event-ctrl>` associated with it.
-	  This struct contains much of the same information as struct
-	  :ref:`v4l2_queryctrl <v4l2-queryctrl>` and struct
-	  :ref:`v4l2_control <v4l2-control>`.
-
-	  If the event is generated due to a call to
-	  :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` or
-	  :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, then the
-	  event will *not* be sent to the file handle that called the ioctl
-	  function. This prevents nasty feedback loops. If you *do* want to
-	  get the event, then set the ``V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK``
-	  flag.
-
-	  This event type will ensure that no information is lost when more
-	  events are raised than there is room internally. In that case the
-	  struct :ref:`v4l2_event_ctrl <v4l2-event-ctrl>` of the
-	  second-oldest event is kept, but the ``changes`` field of the
-	  second-oldest event is ORed with the ``changes`` field of the
-	  oldest event.
-
-    -  .. row 5
-
-       -  ``V4L2_EVENT_FRAME_SYNC``
-
-       -  4
-
-       -  Triggered immediately when the reception of a frame has begun.
-	  This event has a struct
-	  :ref:`v4l2_event_frame_sync <v4l2-event-frame-sync>`
-	  associated with it.
-
-	  If the hardware needs to be stopped in the case of a buffer
-	  underrun it might not be able to generate this event. In such
-	  cases the ``frame_sequence`` field in struct
-	  :ref:`v4l2_event_frame_sync <v4l2-event-frame-sync>` will not
-	  be incremented. This causes two consecutive frame sequence numbers
-	  to have n times frame interval in between them.
-
-    -  .. row 6
-
-       -  ``V4L2_EVENT_SOURCE_CHANGE``
-
-       -  5
-
-       -  This event is triggered when a source parameter change is detected
-	  during runtime by the video device. It can be a runtime resolution
-	  change triggered by a video decoder or the format change happening
-	  on an input connector. This event requires that the ``id`` matches
-	  the input index (when used with a video device node) or the pad
-	  index (when used with a subdevice node) from which you want to
-	  receive events.
-
-	  This event has a struct
-	  :ref:`v4l2_event_src_change <v4l2-event-src-change>`
-	  associated with it. The ``changes`` bitfield denotes what has
-	  changed for the subscribed pad. If multiple events occurred before
-	  application could dequeue them, then the changes will have the
-	  ORed value of all the events generated.
-
-    -  .. row 7
-
-       -  ``V4L2_EVENT_MOTION_DET``
-
-       -  6
-
-       -  Triggered whenever the motion detection state for one or more of
-	  the regions changes. This event has a struct
-	  :ref:`v4l2_event_motion_det <v4l2-event-motion-det>`
-	  associated with it.
-
-    -  .. row 8
-
-       -  ``V4L2_EVENT_PRIVATE_START``
-
-       -  0x08000000
-
-       -  Base event number for driver-private events.
+	This event has a struct
+	:c:type:`v4l2_event_src_change`
+	associated with it. The ``changes`` bitfield denotes what has
+	changed for the subscribed pad. If multiple events occurred before
+	application could dequeue them, then the changes will have the
+	ORed value of all the events generated.
+    * - ``V4L2_EVENT_MOTION_DET``
+      - 6
+      - Triggered whenever the motion detection state for one or more of
+	the regions changes. This event has a struct
+	:c:type:`v4l2_event_motion_det`
+	associated with it.
+    * - ``V4L2_EVENT_PRIVATE_START``
+      - 0x08000000
+      - Base event number for driver-private events.
 
 
 
-.. _v4l2-event-vsync:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_event_vsync
 
 .. flat-table:: struct v4l2_event_vsync
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u8
-
-       -  ``field``
-
-       -  The upcoming field. See enum :ref:`v4l2_field <v4l2-field>`.
+    * - __u8
+      - ``field``
+      - The upcoming field. See enum :c:type:`v4l2_field`.
 
 
 
-.. _v4l2-event-ctrl:
+.. tabularcolumns:: |p{3.5cm}|p{3.0cm}|p{1.8cm}|p{8.5cm}|
+
+.. c:type:: v4l2_event_ctrl
 
 .. flat-table:: struct v4l2_event_ctrl
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``changes``
-
-       -
-       -  A bitmask that tells what has changed. See
-	  :ref:`ctrl-changes-flags`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  The type of the control. See enum
-	  :ref:`v4l2_ctrl_type <v4l2-ctrl-type>`.
-
-    -  .. row 3
-
-       -  union (anonymous)
-
-       -
-       -
-       -
-
-    -  .. row 4
-
-       -
-       -  __s32
-
-       -  ``value``
-
-       -  The 32-bit value of the control for 32-bit control types. This is
-	  0 for string controls since the value of a string cannot be passed
-	  using :ref:`VIDIOC_DQEVENT`.
-
-    -  .. row 5
-
-       -
-       -  __s64
-
-       -  ``value64``
-
-       -  The 64-bit value of the control for 64-bit control types.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``flags``
-
-       -
-       -  The control flags. See :ref:`control-flags`.
-
-    -  .. row 7
-
-       -  __s32
-
-       -  ``minimum``
-
-       -
-       -  The minimum value of the control. See struct
-	  :ref:`v4l2_queryctrl <v4l2-queryctrl>`.
-
-    -  .. row 8
-
-       -  __s32
-
-       -  ``maximum``
-
-       -
-       -  The maximum value of the control. See struct
-	  :ref:`v4l2_queryctrl <v4l2-queryctrl>`.
-
-    -  .. row 9
-
-       -  __s32
-
-       -  ``step``
-
-       -
-       -  The step value of the control. See struct
-	  :ref:`v4l2_queryctrl <v4l2-queryctrl>`.
-
-    -  .. row 10
-
-       -  __s32
-
-       -  ``default_value``
-
-       -
-       -  The default value value of the control. See struct
-	  :ref:`v4l2_queryctrl <v4l2-queryctrl>`.
+    * - __u32
+      - ``changes``
+      -
+      - A bitmask that tells what has changed. See
+	:ref:`ctrl-changes-flags`.
+    * - __u32
+      - ``type``
+      -
+      - The type of the control. See enum
+	:c:type:`v4l2_ctrl_type`.
+    * - union (anonymous)
+      -
+      -
+      -
+    * -
+      - __s32
+      - ``value``
+      - The 32-bit value of the control for 32-bit control types. This is
+	0 for string controls since the value of a string cannot be passed
+	using :ref:`VIDIOC_DQEVENT`.
+    * -
+      - __s64
+      - ``value64``
+      - The 64-bit value of the control for 64-bit control types.
+    * - __u32
+      - ``flags``
+      -
+      - The control flags. See :ref:`control-flags`.
+    * - __s32
+      - ``minimum``
+      -
+      - The minimum value of the control. See struct
+	:ref:`v4l2_queryctrl <v4l2-queryctrl>`.
+    * - __s32
+      - ``maximum``
+      -
+      - The maximum value of the control. See struct
+	:ref:`v4l2_queryctrl <v4l2-queryctrl>`.
+    * - __s32
+      - ``step``
+      -
+      - The step value of the control. See struct
+	:ref:`v4l2_queryctrl <v4l2-queryctrl>`.
+    * - __s32
+      - ``default_value``
+      -
+      - The default value value of the control. See struct
+	:ref:`v4l2_queryctrl <v4l2-queryctrl>`.
 
 
 
-.. _v4l2-event-frame-sync:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_event_frame_sync
 
 .. flat-table:: struct v4l2_event_frame_sync
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``frame_sequence``
-
-       -  The sequence number of the frame being received.
+    * - __u32
+      - ``frame_sequence``
+      - The sequence number of the frame being received.
 
 
 
-.. _v4l2-event-src-change:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_event_src_change
 
 .. flat-table:: struct v4l2_event_src_change
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``changes``
-
-       -  A bitmask that tells what has changed. See
-	  :ref:`src-changes-flags`.
+    * - __u32
+      - ``changes``
+      - A bitmask that tells what has changed. See
+	:ref:`src-changes-flags`.
 
 
 
-.. _v4l2-event-motion-det:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_event_motion_det
 
 .. flat-table:: struct v4l2_event_motion_det
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Currently only one flag is available: if
-	  ``V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ`` is set, then the
-	  ``frame_sequence`` field is valid, otherwise that field should be
-	  ignored.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``frame_sequence``
-
-       -  The sequence number of the frame being received. Only valid if the
-	  ``V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ`` flag was set.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``region_mask``
-
-       -  The bitmask of the regions that reported motion. There is at least
-	  one region. If this field is 0, then no motion was detected at
-	  all. If there is no ``V4L2_CID_DETECT_MD_REGION_GRID`` control
-	  (see :ref:`detect-controls`) to assign a different region to
-	  each cell in the motion detection grid, then that all cells are
-	  automatically assigned to the default region 0.
+    * - __u32
+      - ``flags``
+      - Currently only one flag is available: if
+	``V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ`` is set, then the
+	``frame_sequence`` field is valid, otherwise that field should be
+	ignored.
+    * - __u32
+      - ``frame_sequence``
+      - The sequence number of the frame being received. Only valid if the
+	``V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ`` flag was set.
+    * - __u32
+      - ``region_mask``
+      - The bitmask of the regions that reported motion. There is at least
+	one region. If this field is 0, then no motion was detected at
+	all. If there is no ``V4L2_CID_DETECT_MD_REGION_GRID`` control
+	(see :ref:`detect-controls`) to assign a different region to
+	each cell in the motion detection grid, then that all cells are
+	automatically assigned to the default region 0.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _ctrl-changes-flags:
 
 .. flat-table:: Control Changes
@@ -514,38 +348,25 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_EVENT_CTRL_CH_VALUE``
-
-       -  0x0001
-
-       -  This control event was triggered because the value of the control
-	  changed. Special cases: Volatile controls do no generate this
-	  event; If a control has the ``V4L2_CTRL_FLAG_EXECUTE_ON_WRITE``
-	  flag set, then this event is sent as well, regardless its value.
-
-    -  .. row 2
-
-       -  ``V4L2_EVENT_CTRL_CH_FLAGS``
-
-       -  0x0002
-
-       -  This control event was triggered because the control flags
-	  changed.
-
-    -  .. row 3
-
-       -  ``V4L2_EVENT_CTRL_CH_RANGE``
-
-       -  0x0004
-
-       -  This control event was triggered because the minimum, maximum,
-	  step or the default value of the control changed.
+    * - ``V4L2_EVENT_CTRL_CH_VALUE``
+      - 0x0001
+      - This control event was triggered because the value of the control
+	changed. Special cases: Volatile controls do no generate this
+	event; If a control has the ``V4L2_CTRL_FLAG_EXECUTE_ON_WRITE``
+	flag set, then this event is sent as well, regardless its value.
+    * - ``V4L2_EVENT_CTRL_CH_FLAGS``
+      - 0x0002
+      - This control event was triggered because the control flags
+	changed.
+    * - ``V4L2_EVENT_CTRL_CH_RANGE``
+      - 0x0004
+      - This control event was triggered because the minimum, maximum,
+	step or the default value of the control changed.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _src-changes-flags:
 
 .. flat-table:: Source Changes
@@ -553,16 +374,11 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_EVENT_SRC_CH_RESOLUTION``
-
-       -  0x0001
-
-       -  This event gets triggered when a resolution change is detected at
-	  an input. This can come from an input connector or from a video
-	  decoder.
+    * - ``V4L2_EVENT_SRC_CH_RESOLUTION``
+      - 0x0001
+      - This event gets triggered when a resolution change is detected at
+	an input. This can come from an input connector or from a video
+	decoder.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
index 6e05957..424f3a1 100644
--- a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_dv_timings_cap *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_DV_TIMINGS_CAP, struct v4l2_dv_timings_cap *argp )
+    :name: VIDIOC_DV_TIMINGS_CAP
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_DV_TIMINGS_CAP, struct v4l2_dv_timings_cap *argp )
+    :name: VIDIOC_SUBDEV_DV_TIMINGS_CAP
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_DV_TIMINGS_CAP, VIDIOC_SUBDEV_DV_TIMINGS_CAP
-
 ``argp``
 
 
@@ -35,11 +36,13 @@
 
 To query the capabilities of the DV receiver/transmitter applications
 initialize the ``pad`` field to 0, zero the reserved array of struct
-:ref:`v4l2_dv_timings_cap <v4l2-dv-timings-cap>` and call the
+:c:type:`v4l2_dv_timings_cap` and call the
 ``VIDIOC_DV_TIMINGS_CAP`` ioctl on a video node and the driver will fill
 in the structure.
 
-.. note:: Drivers may return different values after
+.. note::
+
+   Drivers may return different values after
    switching the video input or output.
 
 When implemented by the driver DV capabilities of subdevices can be
@@ -47,157 +50,88 @@
 on a subdevice node. The capabilities are specific to inputs (for DV
 receivers) or outputs (for DV transmitters), applications must specify
 the desired pad number in the struct
-:ref:`v4l2_dv_timings_cap <v4l2-dv-timings-cap>` ``pad`` field and
+:c:type:`v4l2_dv_timings_cap` ``pad`` field and
 zero the ``reserved`` array. Attempts to query capabilities on a pad
 that doesn't support them will return an ``EINVAL`` error code.
 
 
-.. _v4l2-bt-timings-cap:
+.. tabularcolumns:: |p{1.2cm}|p{3.0cm}|p{13.3cm}|
+
+.. c:type:: v4l2_bt_timings_cap
 
 .. flat-table:: struct v4l2_bt_timings_cap
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``min_width``
-
-       -  Minimum width of the active video in pixels.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``max_width``
-
-       -  Maximum width of the active video in pixels.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``min_height``
-
-       -  Minimum height of the active video in lines.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``max_height``
-
-       -  Maximum height of the active video in lines.
-
-    -  .. row 5
-
-       -  __u64
-
-       -  ``min_pixelclock``
-
-       -  Minimum pixelclock frequency in Hz.
-
-    -  .. row 6
-
-       -  __u64
-
-       -  ``max_pixelclock``
-
-       -  Maximum pixelclock frequency in Hz.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``standards``
-
-       -  The video standard(s) supported by the hardware. See
-	  :ref:`dv-bt-standards` for a list of standards.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``capabilities``
-
-       -  Several flags giving more information about the capabilities. See
-	  :ref:`dv-bt-cap-capabilities` for a description of the flags.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved``\ [16]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+    * - __u32
+      - ``min_width``
+      - Minimum width of the active video in pixels.
+    * - __u32
+      - ``max_width``
+      - Maximum width of the active video in pixels.
+    * - __u32
+      - ``min_height``
+      - Minimum height of the active video in lines.
+    * - __u32
+      - ``max_height``
+      - Maximum height of the active video in lines.
+    * - __u64
+      - ``min_pixelclock``
+      - Minimum pixelclock frequency in Hz.
+    * - __u64
+      - ``max_pixelclock``
+      - Maximum pixelclock frequency in Hz.
+    * - __u32
+      - ``standards``
+      - The video standard(s) supported by the hardware. See
+	:ref:`dv-bt-standards` for a list of standards.
+    * - __u32
+      - ``capabilities``
+      - Several flags giving more information about the capabilities. See
+	:ref:`dv-bt-cap-capabilities` for a description of the flags.
+    * - __u32
+      - ``reserved``\ [16]
+      - Reserved for future extensions.
+	Drivers must set the array to zero.
 
 
 
-.. _v4l2-dv-timings-cap:
+.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{3.5cm}|p{9.5cm}|
+
+.. c:type:: v4l2_dv_timings_cap
 
 .. flat-table:: struct v4l2_dv_timings_cap
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1
 
+    * - __u32
+      - ``type``
+      - Type of DV timings as listed in :ref:`dv-timing-types`.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API. This field is
+	only used when operating on a subdevice node. When operating on a
+	video node applications must set this field to zero.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions.
 
-    -  .. row 1
+	Drivers and applications must set the array to zero.
+    * - union
+      -
+      -
+    * -
+      - struct :c:type:`v4l2_bt_timings_cap`
+      - ``bt``
+      - BT.656/1120 timings capabilities of the hardware.
+    * -
+      - __u32
+      - ``raw_data``\ [32]
+      -
 
-       -  __u32
-
-       -  ``type``
-
-       -  Type of DV timings as listed in :ref:`dv-timing-types`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API. This field is
-	  only used when operating on a subdevice node. When operating on a
-	  video node applications must set this field to zero.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
-
-    -  .. row 4
-
-       -  union
-
-       -
-       -
-
-    -  .. row 5
-
-       -
-       -  struct :ref:`v4l2_bt_timings_cap <v4l2-bt-timings-cap>`
-
-       -  ``bt``
-
-       -  BT.656/1120 timings capabilities of the hardware.
-
-    -  .. row 6
-
-       -
-       -  __u32
-
-       -  ``raw_data``\ [32]
-
-       -
-
-
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
 
 .. _dv-bt-cap-capabilities:
 
@@ -205,43 +139,20 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Flag
-
-       -  Description
-
-    -  .. row 2
-
-       -
-       -
-
-    -  .. row 3
-
-       -  ``V4L2_DV_BT_CAP_INTERLACED``
-
-       -  Interlaced formats are supported.
-
-    -  .. row 4
-
-       -  ``V4L2_DV_BT_CAP_PROGRESSIVE``
-
-       -  Progressive formats are supported.
-
-    -  .. row 5
-
-       -  ``V4L2_DV_BT_CAP_REDUCED_BLANKING``
-
-       -  CVT/GTF specific: the timings can make use of reduced blanking
-	  (CVT) or the 'Secondary GTF' curve (GTF).
-
-    -  .. row 6
-
-       -  ``V4L2_DV_BT_CAP_CUSTOM``
-
-       -  Can support non-standard timings, i.e. timings not belonging to
-	  the standards set in the ``standards`` field.
+    * - Flag
+      - Description
+    * -
+      -
+    * - ``V4L2_DV_BT_CAP_INTERLACED``
+      - Interlaced formats are supported.
+    * - ``V4L2_DV_BT_CAP_PROGRESSIVE``
+      - Progressive formats are supported.
+    * - ``V4L2_DV_BT_CAP_REDUCED_BLANKING``
+      - CVT/GTF specific: the timings can make use of reduced blanking
+	(CVT) or the 'Secondary GTF' curve (GTF).
+    * - ``V4L2_DV_BT_CAP_CUSTOM``
+      - Can support non-standard timings, i.e. timings not belonging to
+	the standards set in the ``standards`` field.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
index 69bd9b4..ae20ee5 100644
--- a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_encoder_cmd *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENCODER_CMD, struct v4l2_encoder_cmd *argp )
+    :name: VIDIOC_ENCODER_CMD
+
+.. c:function:: int ioctl( int fd, VIDIOC_TRY_ENCODER_CMD, struct v4l2_encoder_cmd *argp )
+    :name: VIDIOC_TRY_ENCODER_CMD
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENCODER_CMD, VIDIOC_TRY_ENCODER_CMD
-
 ``argp``
 
 
@@ -39,7 +40,7 @@
 executing it.
 
 To send a command applications must initialize all fields of a struct
-:ref:`v4l2_encoder_cmd <v4l2-encoder-cmd>` and call
+:c:type:`v4l2_encoder_cmd` and call
 ``VIDIOC_ENCODER_CMD`` or ``VIDIOC_TRY_ENCODER_CMD`` with a pointer to
 this structure.
 
@@ -64,43 +65,32 @@
 introduced in Linux 2.6.21.
 
 
-.. _v4l2-encoder-cmd:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_encoder_cmd
 
 .. flat-table:: struct v4l2_encoder_cmd
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``cmd``
-
-       -  The encoder command, see :ref:`encoder-cmds`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags to go with the command, see :ref:`encoder-flags`. If no
-	  flags are defined for this command, drivers and applications must
-	  set this field to zero.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``data``\ [8]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+    * - __u32
+      - ``cmd``
+      - The encoder command, see :ref:`encoder-cmds`.
+    * - __u32
+      - ``flags``
+      - Flags to go with the command, see :ref:`encoder-flags`. If no
+	flags are defined for this command, drivers and applications must
+	set this field to zero.
+    * - __u32
+      - ``data``\ [8]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _encoder-cmds:
 
 .. flat-table:: Encoder Commands
@@ -108,59 +98,40 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_ENC_CMD_START``
-
-       -  0
-
-       -  Start the encoder. When the encoder is already running or paused,
-	  this command does nothing. No flags are defined for this command.
-
-    -  .. row 2
-
-       -  ``V4L2_ENC_CMD_STOP``
-
-       -  1
-
-       -  Stop the encoder. When the ``V4L2_ENC_CMD_STOP_AT_GOP_END`` flag
-	  is set, encoding will continue until the end of the current *Group
-	  Of Pictures*, otherwise encoding will stop immediately. When the
-	  encoder is already stopped, this command does nothing. mem2mem
-	  encoders will send a ``V4L2_EVENT_EOS`` event when the last frame
-	  has been encoded and all frames are ready to be dequeued and will
-	  set the ``V4L2_BUF_FLAG_LAST`` buffer flag on the last buffer of
-	  the capture queue to indicate there will be no new buffers
-	  produced to dequeue. This buffer may be empty, indicated by the
-	  driver setting the ``bytesused`` field to 0. Once the
-	  ``V4L2_BUF_FLAG_LAST`` flag was set, the
-	  :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
-	  but return an ``EPIPE`` error code.
-
-    -  .. row 3
-
-       -  ``V4L2_ENC_CMD_PAUSE``
-
-       -  2
-
-       -  Pause the encoder. When the encoder has not been started yet, the
-	  driver will return an ``EPERM`` error code. When the encoder is
-	  already paused, this command does nothing. No flags are defined
-	  for this command.
-
-    -  .. row 4
-
-       -  ``V4L2_ENC_CMD_RESUME``
-
-       -  3
-
-       -  Resume encoding after a PAUSE command. When the encoder has not
-	  been started yet, the driver will return an ``EPERM`` error code. When
-	  the encoder is already running, this command does nothing. No
-	  flags are defined for this command.
+    * - ``V4L2_ENC_CMD_START``
+      - 0
+      - Start the encoder. When the encoder is already running or paused,
+	this command does nothing. No flags are defined for this command.
+    * - ``V4L2_ENC_CMD_STOP``
+      - 1
+      - Stop the encoder. When the ``V4L2_ENC_CMD_STOP_AT_GOP_END`` flag
+	is set, encoding will continue until the end of the current *Group
+	Of Pictures*, otherwise encoding will stop immediately. When the
+	encoder is already stopped, this command does nothing. mem2mem
+	encoders will send a ``V4L2_EVENT_EOS`` event when the last frame
+	has been encoded and all frames are ready to be dequeued and will
+	set the ``V4L2_BUF_FLAG_LAST`` buffer flag on the last buffer of
+	the capture queue to indicate there will be no new buffers
+	produced to dequeue. This buffer may be empty, indicated by the
+	driver setting the ``bytesused`` field to 0. Once the
+	``V4L2_BUF_FLAG_LAST`` flag was set, the
+	:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
+	but return an ``EPIPE`` error code.
+    * - ``V4L2_ENC_CMD_PAUSE``
+      - 2
+      - Pause the encoder. When the encoder has not been started yet, the
+	driver will return an ``EPERM`` error code. When the encoder is
+	already paused, this command does nothing. No flags are defined
+	for this command.
+    * - ``V4L2_ENC_CMD_RESUME``
+      - 3
+      - Resume encoding after a PAUSE command. When the encoder has not
+	been started yet, the driver will return an ``EPERM`` error code. When
+	the encoder is already running, this command does nothing. No
+	flags are defined for this command.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _encoder-flags:
 
@@ -169,15 +140,10 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_ENC_CMD_STOP_AT_GOP_END``
-
-       -  0x0001
-
-       -  Stop encoding at the end of the current *Group Of Pictures*,
-	  rather than immediately.
+    * - ``V4L2_ENC_CMD_STOP_AT_GOP_END``
+      - 0x0001
+      - Stop encoding at the end of the current *Group Of Pictures*,
+	rather than immediately.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
index 3ba75d3..3e9d0f6 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_enum_dv_timings *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUM_DV_TIMINGS, struct v4l2_enum_dv_timings *argp )
+    :name: VIDIOC_ENUM_DV_TIMINGS
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUM_DV_TIMINGS, struct v4l2_enum_dv_timings *argp )
+    :name: VIDIOC_SUBDEV_ENUM_DV_TIMINGS
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUM_DV_TIMINGS, VIDIOC_SUBDEV_ENUM_DV_TIMINGS
-
 ``argp``
 
 
@@ -42,14 +43,16 @@
 
 To query the available timings, applications initialize the ``index``
 field, set the ``pad`` field to 0, zero the reserved array of struct
-:ref:`v4l2_enum_dv_timings <v4l2-enum-dv-timings>` and call the
+:c:type:`v4l2_enum_dv_timings` and call the
 ``VIDIOC_ENUM_DV_TIMINGS`` ioctl on a video node with a pointer to this
 structure. Drivers fill the rest of the structure or return an ``EINVAL``
 error code when the index is out of bounds. To enumerate all supported
 DV timings, applications shall begin at index zero, incrementing by one
 until the driver returns ``EINVAL``.
 
-.. note:: Drivers may enumerate a different set of DV timings after
+.. note::
+
+   Drivers may enumerate a different set of DV timings after
    switching the video input or output.
 
 When implemented by the driver DV timings of subdevices can be queried
@@ -57,53 +60,35 @@
 subdevice node. The DV timings are specific to inputs (for DV receivers)
 or outputs (for DV transmitters), applications must specify the desired
 pad number in the struct
-:ref:`v4l2_enum_dv_timings <v4l2-enum-dv-timings>` ``pad`` field.
+:c:type:`v4l2_enum_dv_timings` ``pad`` field.
 Attempts to enumerate timings on a pad that doesn't support them will
 return an ``EINVAL`` error code.
 
 
-.. _v4l2-enum-dv-timings:
+.. c:type:: v4l2_enum_dv_timings
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_enum_dv_timings
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the DV timings, set by the application.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API. This field is
-	  only used when operating on a subdevice node. When operating on a
-	  video node applications must set this field to zero.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_dv_timings <v4l2-dv-timings>`
-
-       -  ``timings``
-
-       -  The timings.
+    * - __u32
+      - ``index``
+      - Number of the DV timings, set by the application.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API. This field is
+	only used when operating on a subdevice node. When operating on a
+	video node applications must set this field to zero.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
+    * - struct :c:type:`v4l2_dv_timings`
+      - ``timings``
+      - The timings.
 
 
 Return Value
@@ -114,7 +99,7 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_enum_dv_timings <v4l2-enum-dv-timings>`
+    The struct :c:type:`v4l2_enum_dv_timings`
     ``index`` is out of bounds or the ``pad`` number is invalid.
 
 ENODATA
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
index 90996f6..a2adaa4 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_fmtdesc *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUM_FMT, struct v4l2_fmtdesc *argp )
+    :name: VIDIOC_ENUM_FMT
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUM_FMT
-
 ``argp``
 
 
@@ -34,98 +32,73 @@
 ===========
 
 To enumerate image formats applications initialize the ``type`` and
-``index`` field of struct :ref:`v4l2_fmtdesc <v4l2-fmtdesc>` and call
+``index`` field of struct :c:type:`v4l2_fmtdesc` and call
 the :ref:`VIDIOC_ENUM_FMT` ioctl with a pointer to this structure. Drivers
 fill the rest of the structure or return an ``EINVAL`` error code. All
 formats are enumerable by beginning at index zero and incrementing by
 one until ``EINVAL`` is returned.
 
-.. note:: After switching input or output the list of enumerated image
+.. note::
+
+   After switching input or output the list of enumerated image
    formats may be different.
 
 
-.. _v4l2-fmtdesc:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_fmtdesc
 
 .. flat-table:: struct v4l2_fmtdesc
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``index``
+      - Number of the format in the enumeration, set by the application.
+	This is in no way related to the ``pixelformat`` field.
+    * - __u32
+      - ``type``
+      - Type of the data stream, set by the application. Only these types
+	are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
+	``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``,
+	``V4L2_BUF_TYPE_VIDEO_OUTPUT``,
+	``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and
+	``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+    * - __u32
+      - ``flags``
+      - See :ref:`fmtdesc-flags`
+    * - __u8
+      - ``description``\ [32]
+      - Description of the format, a NUL-terminated ASCII string. This
+	information is intended for the user, for example: "YUV 4:2:2".
+    * - __u32
+      - ``pixelformat``
+      - The image format identifier. This is a four character code as
+	computed by the v4l2_fourcc() macro:
+    * - :cspan:`2`
 
-    -  .. row 1
+	.. _v4l2-fourcc:
 
-       -  __u32
+	``#define v4l2_fourcc(a,b,c,d)``
 
-       -  ``index``
+	``(((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))``
 
-       -  Number of the format in the enumeration, set by the application.
-	  This is in no way related to the ``pixelformat`` field.
+	Several image formats are already defined by this specification in
+	:ref:`pixfmt`.
 
-    -  .. row 2
+	.. attention::
 
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the data stream, set by the application. Only these types
-	  are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
-	  ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``,
-	  ``V4L2_BUF_TYPE_VIDEO_OUTPUT``,
-	  ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and
-	  ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :ref:`v4l2-buf-type`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``flags``
-
-       -  See :ref:`fmtdesc-flags`
-
-    -  .. row 4
-
-       -  __u8
-
-       -  ``description``\ [32]
-
-       -  Description of the format, a NUL-terminated ASCII string. This
-	  information is intended for the user, for example: "YUV 4:2:2".
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``pixelformat``
-
-       -  The image format identifier. This is a four character code as
-	  computed by the v4l2_fourcc() macro:
-
-    -  .. row 6
-
-       -  :cspan:`2`
+	   These codes are not the same as those used
+	   in the Windows world.
+    * - __u32
+      - ``reserved``\ [4]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
-	  .. _v4l2-fourcc:
-	  .. code-block:: c
 
-	      #define v4l2_fourcc(a,b,c,d) (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
-
-	  Several image formats are already defined by this specification in
-	  :ref:`pixfmt`.
-
-	  .. attention:: These codes are not the same as those used
-	     in the Windows world.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
-
-
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _fmtdesc-flags:
 
@@ -134,24 +107,14 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FMT_FLAG_COMPRESSED``
-
-       -  0x0001
-
-       -  This is a compressed format.
-
-    -  .. row 2
-
-       -  ``V4L2_FMT_FLAG_EMULATED``
-
-       -  0x0002
-
-       -  This format is not native to the device but emulated through
-	  software (usually libv4l2), where possible try to use a native
-	  format instead for better performance.
+    * - ``V4L2_FMT_FLAG_COMPRESSED``
+      - 0x0001
+      - This is a compressed format.
+    * - ``V4L2_FMT_FLAG_EMULATED``
+      - 0x0002
+      - This format is not native to the device but emulated through
+	software (usually libv4l2), where possible try to use a native
+	format instead for better performance.
 
 
 Return Value
@@ -162,5 +125,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_fmtdesc <v4l2-fmtdesc>` ``type`` is not
+    The struct :c:type:`v4l2_fmtdesc` ``type`` is not
     supported or the ``index`` is out of bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
index ceae600..3949245 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_frmivalenum *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUM_FRAMEINTERVALS, struct v4l2_frmivalenum *argp )
+    :name: VIDIOC_ENUM_FRAMEINTERVALS
 
 
 Arguments
@@ -24,11 +25,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUM_FRAMEINTERVALS
-
 ``argp``
-    Pointer to a struct :ref:`v4l2_frmivalenum <v4l2-frmivalenum>`
+    Pointer to a struct :c:type:`v4l2_frmivalenum`
     structure that contains a pixel format and size and receives a frame
     interval.
 
@@ -73,7 +71,9 @@
 does it make sense to increase the index value to receive more frame
 intervals.
 
-.. note:: The order in which the frame intervals are returned has no
+.. note::
+
+   The order in which the frame intervals are returned has no
    special meaning. In particular does it not say anything about potential
    default frame intervals.
 
@@ -101,127 +101,70 @@
 application should zero out all members except for the *IN* fields.
 
 
-.. _v4l2-frmival-stepwise:
+.. c:type:: v4l2_frmival_stepwise
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_frmival_stepwise
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``min``
-
-       -  Minimum frame interval [s].
-
-    -  .. row 2
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``max``
-
-       -  Maximum frame interval [s].
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``step``
-
-       -  Frame interval step size [s].
+    * - struct :c:type:`v4l2_fract`
+      - ``min``
+      - Minimum frame interval [s].
+    * - struct :c:type:`v4l2_fract`
+      - ``max``
+      - Maximum frame interval [s].
+    * - struct :c:type:`v4l2_fract`
+      - ``step``
+      - Frame interval step size [s].
 
 
 
-.. _v4l2-frmivalenum:
+.. c:type:: v4l2_frmivalenum
 
 .. flat-table:: struct v4l2_frmivalenum
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -
-       -  IN: Index of the given frame interval in the enumeration.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pixel_format``
-
-       -
-       -  IN: Pixel format for which the frame intervals are enumerated.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``width``
-
-       -
-       -  IN: Frame width for which the frame intervals are enumerated.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``height``
-
-       -
-       -  IN: Frame height for which the frame intervals are enumerated.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  OUT: Frame interval type the device supports.
-
-    -  .. row 6
-
-       -  union
-
-       -
-       -
-       -  OUT: Frame interval with the given index.
-
-    -  .. row 7
-
-       -
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``discrete``
-
-       -  Frame interval [s].
-
-    -  .. row 8
-
-       -
-       -  struct :ref:`v4l2_frmival_stepwise <v4l2-frmival-stepwise>`
-
-       -  ``stepwise``
-
-       -
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved[2]``
-
-       -
-       -  Reserved space for future use. Must be zeroed by drivers and
-	  applications.
+    * - __u32
+      - ``index``
+      -
+      - IN: Index of the given frame interval in the enumeration.
+    * - __u32
+      - ``pixel_format``
+      -
+      - IN: Pixel format for which the frame intervals are enumerated.
+    * - __u32
+      - ``width``
+      -
+      - IN: Frame width for which the frame intervals are enumerated.
+    * - __u32
+      - ``height``
+      -
+      - IN: Frame height for which the frame intervals are enumerated.
+    * - __u32
+      - ``type``
+      -
+      - OUT: Frame interval type the device supports.
+    * - union
+      -
+      -
+      - OUT: Frame interval with the given index.
+    * -
+      - struct :c:type:`v4l2_fract`
+      - ``discrete``
+      - Frame interval [s].
+    * -
+      - struct :c:type:`v4l2_frmival_stepwise`
+      - ``stepwise``
+      -
+    * - __u32
+      - ``reserved[2]``
+      -
+      - Reserved space for future use. Must be zeroed by drivers and
+	applications.
 
 
 
@@ -229,37 +172,24 @@
 =====
 
 
-.. _v4l2-frmivaltypes:
+.. c:type:: v4l2_frmivaltypes
+
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. flat-table:: enum v4l2_frmivaltypes
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FRMIVAL_TYPE_DISCRETE``
-
-       -  1
-
-       -  Discrete frame interval.
-
-    -  .. row 2
-
-       -  ``V4L2_FRMIVAL_TYPE_CONTINUOUS``
-
-       -  2
-
-       -  Continuous frame interval.
-
-    -  .. row 3
-
-       -  ``V4L2_FRMIVAL_TYPE_STEPWISE``
-
-       -  3
-
-       -  Step-wise defined frame interval.
+    * - ``V4L2_FRMIVAL_TYPE_DISCRETE``
+      - 1
+      - Discrete frame interval.
+    * - ``V4L2_FRMIVAL_TYPE_CONTINUOUS``
+      - 2
+      - Continuous frame interval.
+    * - ``V4L2_FRMIVAL_TYPE_STEPWISE``
+      - 3
+      - Step-wise defined frame interval.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
index 8b26835..628f1aa 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_frmsizeenum *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUM_FRAMESIZES, struct v4l2_frmsizeenum *argp )
+    :name: VIDIOC_ENUM_FRAMESIZES
 
 
 Arguments
@@ -24,11 +25,8 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUM_FRAMESIZES
-
 ``argp``
-    Pointer to a struct :ref:`v4l2_frmsizeenum <v4l2-frmsizeenum>`
+    Pointer to a struct :c:type:`v4l2_frmsizeenum`
     that contains an index and pixel format and receives a frame width
     and height.
 
@@ -72,7 +70,9 @@
 device supports. Only for the ``V4L2_FRMSIZE_TYPE_DISCRETE`` type does
 it make sense to increase the index value to receive more frame sizes.
 
-.. note:: The order in which the frame sizes are returned has no special
+.. note::
+
+   The order in which the frame sizes are returned has no special
    meaning. In particular does it not say anything about potential default
    format sizes.
 
@@ -90,159 +90,89 @@
 application should zero out all members except for the *IN* fields.
 
 
-.. _v4l2-frmsize-discrete:
+.. c:type:: v4l2_frmsize_discrete
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_frmsize_discrete
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``width``
-
-       -  Width of the frame [pixel].
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``height``
-
-       -  Height of the frame [pixel].
+    * - __u32
+      - ``width``
+      - Width of the frame [pixel].
+    * - __u32
+      - ``height``
+      - Height of the frame [pixel].
 
 
 
-.. _v4l2-frmsize-stepwise:
+.. c:type:: v4l2_frmsize_stepwise
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_frmsize_stepwise
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``min_width``
-
-       -  Minimum frame width [pixel].
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``max_width``
-
-       -  Maximum frame width [pixel].
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``step_width``
-
-       -  Frame width step size [pixel].
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``min_height``
-
-       -  Minimum frame height [pixel].
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``max_height``
-
-       -  Maximum frame height [pixel].
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``step_height``
-
-       -  Frame height step size [pixel].
+    * - __u32
+      - ``min_width``
+      - Minimum frame width [pixel].
+    * - __u32
+      - ``max_width``
+      - Maximum frame width [pixel].
+    * - __u32
+      - ``step_width``
+      - Frame width step size [pixel].
+    * - __u32
+      - ``min_height``
+      - Minimum frame height [pixel].
+    * - __u32
+      - ``max_height``
+      - Maximum frame height [pixel].
+    * - __u32
+      - ``step_height``
+      - Frame height step size [pixel].
 
 
 
-.. _v4l2-frmsizeenum:
+.. c:type:: v4l2_frmsizeenum
 
 .. flat-table:: struct v4l2_frmsizeenum
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -
-       -  IN: Index of the given frame size in the enumeration.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pixel_format``
-
-       -
-       -  IN: Pixel format for which the frame sizes are enumerated.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  OUT: Frame size type the device supports.
-
-    -  .. row 4
-
-       -  union
-
-       -
-       -
-       -  OUT: Frame size with the given index.
-
-    -  .. row 5
-
-       -
-       -  struct :ref:`v4l2_frmsize_discrete <v4l2-frmsize-discrete>`
-
-       -  ``discrete``
-
-       -
-
-    -  .. row 6
-
-       -
-       -  struct :ref:`v4l2_frmsize_stepwise <v4l2-frmsize-stepwise>`
-
-       -  ``stepwise``
-
-       -
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``reserved[2]``
-
-       -
-       -  Reserved space for future use. Must be zeroed by drivers and
-	  applications.
+    * - __u32
+      - ``index``
+      -
+      - IN: Index of the given frame size in the enumeration.
+    * - __u32
+      - ``pixel_format``
+      -
+      - IN: Pixel format for which the frame sizes are enumerated.
+    * - __u32
+      - ``type``
+      -
+      - OUT: Frame size type the device supports.
+    * - union
+      -
+      -
+      - OUT: Frame size with the given index.
+    * -
+      - struct :c:type:`v4l2_frmsize_discrete`
+      - ``discrete``
+      -
+    * -
+      - struct :c:type:`v4l2_frmsize_stepwise`
+      - ``stepwise``
+      -
+    * - __u32
+      - ``reserved[2]``
+      -
+      - Reserved space for future use. Must be zeroed by drivers and
+	applications.
 
 
 
@@ -250,37 +180,24 @@
 =====
 
 
-.. _v4l2-frmsizetypes:
+.. c:type:: v4l2_frmsizetypes
+
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. flat-table:: enum v4l2_frmsizetypes
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FRMSIZE_TYPE_DISCRETE``
-
-       -  1
-
-       -  Discrete frame size.
-
-    -  .. row 2
-
-       -  ``V4L2_FRMSIZE_TYPE_CONTINUOUS``
-
-       -  2
-
-       -  Continuous frame size.
-
-    -  .. row 3
-
-       -  ``V4L2_FRMSIZE_TYPE_STEPWISE``
-
-       -  3
-
-       -  Step-wise defined frame size.
+    * - ``V4L2_FRMSIZE_TYPE_DISCRETE``
+      - 1
+      - Discrete frame size.
+    * - ``V4L2_FRMSIZE_TYPE_CONTINUOUS``
+      - 2
+      - Continuous frame size.
+    * - ``V4L2_FRMSIZE_TYPE_STEPWISE``
+      - 3
+      - Step-wise defined frame size.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
index 00ab5e1..4e5f5e5 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_frequency_band *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUM_FREQ_BANDS, struct v4l2_frequency_band *argp )
+    :name: VIDIOC_ENUM_FREQ_BANDS
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUM_FREQ_BANDS
-
 ``argp``
 
 
@@ -36,117 +34,84 @@
 Enumerates the frequency bands that a tuner or modulator supports. To do
 this applications initialize the ``tuner``, ``type`` and ``index``
 fields, and zero out the ``reserved`` array of a struct
-:ref:`v4l2_frequency_band <v4l2-frequency-band>` and call the
+:c:type:`v4l2_frequency_band` and call the
 :ref:`VIDIOC_ENUM_FREQ_BANDS` ioctl with a pointer to this structure.
 
 This ioctl is supported if the ``V4L2_TUNER_CAP_FREQ_BANDS`` capability
 of the corresponding tuner/modulator is set.
 
 
-.. _v4l2-frequency-band:
+.. tabularcolumns:: |p{2.9cm}|p{2.9cm}|p{5.8cm}|p{2.9cm}|p{3.0cm}|
+
+.. c:type:: v4l2_frequency_band
 
 .. flat-table:: struct v4l2_frequency_band
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1 1
 
+    * - __u32
+      - ``tuner``
+      - The tuner or modulator index number. This is the same value as in
+	the struct :c:type:`v4l2_input` ``tuner`` field and
+	the struct :c:type:`v4l2_tuner` ``index`` field, or
+	the struct :c:type:`v4l2_output` ``modulator`` field
+	and the struct :c:type:`v4l2_modulator` ``index``
+	field.
+    * - __u32
+      - ``type``
+      - The tuner type. This is the same value as in the struct
+	:c:type:`v4l2_tuner` ``type`` field. The type must be
+	set to ``V4L2_TUNER_RADIO`` for ``/dev/radioX`` device nodes, and
+	to ``V4L2_TUNER_ANALOG_TV`` for all others. Set this field to
+	``V4L2_TUNER_RADIO`` for modulators (currently only radio
+	modulators are supported). See :c:type:`v4l2_tuner_type`
+    * - __u32
+      - ``index``
+      - Identifies the frequency band, set by the application.
+    * - __u32
+      - ``capability``
+      - :cspan:`2` The tuner/modulator capability flags for this
+	frequency band, see :ref:`tuner-capability`. The
+	``V4L2_TUNER_CAP_LOW`` or ``V4L2_TUNER_CAP_1HZ`` capability must
+	be the same for all frequency bands of the selected
+	tuner/modulator. So either all bands have that capability set, or
+	none of them have that capability.
+    * - __u32
+      - ``rangelow``
+      - :cspan:`2` The lowest tunable frequency in units of 62.5 kHz, or
+	if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units
+	of 62.5 Hz, for this frequency band. A 1 Hz unit is used when the
+	``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
+    * - __u32
+      - ``rangehigh``
+      - :cspan:`2` The highest tunable frequency in units of 62.5 kHz,
+	or if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in
+	units of 62.5 Hz, for this frequency band. A 1 Hz unit is used
+	when the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
+    * - __u32
+      - ``modulation``
+      - :cspan:`2` The supported modulation systems of this frequency
+	band. See :ref:`band-modulation`.
 
-    -  .. row 1
+	.. note::
 
-       -  __u32
+	   Currently only one modulation system per frequency band
+	   is supported. More work will need to be done if multiple
+	   modulation systems are possible. Contact the linux-media
+	   mailing list
+	   (`https://linuxtv.org/lists.php <https://linuxtv.org/lists.php>`__)
+	   if you need such functionality.
+    * - __u32
+      - ``reserved``\ [9]
+      - Reserved for future extensions.
 
-       -  ``tuner``
-
-       -  The tuner or modulator index number. This is the same value as in
-	  the struct :ref:`v4l2_input <v4l2-input>` ``tuner`` field and
-	  the struct :ref:`v4l2_tuner <v4l2-tuner>` ``index`` field, or
-	  the struct :ref:`v4l2_output <v4l2-output>` ``modulator`` field
-	  and the struct :ref:`v4l2_modulator <v4l2-modulator>` ``index``
-	  field.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  The tuner type. This is the same value as in the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``type`` field. The type must be
-	  set to ``V4L2_TUNER_RADIO`` for ``/dev/radioX`` device nodes, and
-	  to ``V4L2_TUNER_ANALOG_TV`` for all others. Set this field to
-	  ``V4L2_TUNER_RADIO`` for modulators (currently only radio
-	  modulators are supported). See :ref:`v4l2-tuner-type`
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``index``
-
-       -  Identifies the frequency band, set by the application.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``capability``
-
-       -  :cspan:`2` The tuner/modulator capability flags for this
-	  frequency band, see :ref:`tuner-capability`. The
-	  ``V4L2_TUNER_CAP_LOW`` or ``V4L2_TUNER_CAP_1HZ`` capability must
-	  be the same for all frequency bands of the selected
-	  tuner/modulator. So either all bands have that capability set, or
-	  none of them have that capability.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``rangelow``
-
-       -  :cspan:`2` The lowest tunable frequency in units of 62.5 kHz, or
-	  if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units
-	  of 62.5 Hz, for this frequency band. A 1 Hz unit is used when the
-	  ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``rangehigh``
-
-       -  :cspan:`2` The highest tunable frequency in units of 62.5 kHz,
-	  or if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in
-	  units of 62.5 Hz, for this frequency band. A 1 Hz unit is used
-	  when the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``modulation``
-
-       -  :cspan:`2` The supported modulation systems of this frequency
-	  band. See :ref:`band-modulation`.
-
-	  .. note:: Currently only one modulation system per frequency band
-	     is supported. More work will need to be done if multiple
-	     modulation systems are possible. Contact the linux-media
-	     mailing list
-	     (`https://linuxtv.org/lists.php <https://linuxtv.org/lists.php>`__)
-	     if you need such functionality.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [9]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+	Applications and drivers must set the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _band-modulation:
 
 .. flat-table:: Band Modulation Systems
@@ -154,30 +119,15 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_BAND_MODULATION_VSB``
-
-       -  0x02
-
-       -  Vestigial Sideband modulation, used for analog TV.
-
-    -  .. row 2
-
-       -  ``V4L2_BAND_MODULATION_FM``
-
-       -  0x04
-
-       -  Frequency Modulation, commonly used for analog radio.
-
-    -  .. row 3
-
-       -  ``V4L2_BAND_MODULATION_AM``
-
-       -  0x08
-
-       -  Amplitude Modulation, commonly used for analog radio.
+    * - ``V4L2_BAND_MODULATION_VSB``
+      - 0x02
+      - Vestigial Sideband modulation, used for analog TV.
+    * - ``V4L2_BAND_MODULATION_FM``
+      - 0x04
+      - Frequency Modulation, commonly used for analog radio.
+    * - ``V4L2_BAND_MODULATION_AM``
+      - 0x08
+      - Amplitude Modulation, commonly used for analog radio.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
index bfdc353..74bc3ed 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_audio *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUMAUDIO, struct v4l2_audio *argp )
+    :name: VIDIOC_ENUMAUDIO
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUMAUDIO
-
 ``argp``
 
 
@@ -35,14 +33,14 @@
 
 To query the attributes of an audio input applications initialize the
 ``index`` field and zero out the ``reserved`` array of a struct
-:ref:`v4l2_audio <v4l2-audio>` and call the :ref:`VIDIOC_ENUMAUDIO`
+:c:type:`v4l2_audio` and call the :ref:`VIDIOC_ENUMAUDIO`
 ioctl with a pointer to this structure. Drivers fill the rest of the
 structure or return an ``EINVAL`` error code when the index is out of
 bounds. To enumerate all audio inputs applications shall begin at index
 zero, incrementing by one until the driver returns ``EINVAL``.
 
 See :ref:`VIDIOC_G_AUDIO <VIDIOC_G_AUDIO>` for a description of struct
-:ref:`v4l2_audio <v4l2-audio>`.
+:c:type:`v4l2_audio`.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
index cde1db5..4470a1e 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_audioout *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUMAUDOUT, struct v4l2_audioout *argp )
+    :name: VIDIOC_ENUMAUDOUT
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUMAUDOUT
-
 ``argp``
 
 
@@ -35,17 +33,19 @@
 
 To query the attributes of an audio output applications initialize the
 ``index`` field and zero out the ``reserved`` array of a struct
-:ref:`v4l2_audioout <v4l2-audioout>` and call the ``VIDIOC_G_AUDOUT``
+:c:type:`v4l2_audioout` and call the ``VIDIOC_G_AUDOUT``
 ioctl with a pointer to this structure. Drivers fill the rest of the
 structure or return an ``EINVAL`` error code when the index is out of
 bounds. To enumerate all audio outputs applications shall begin at index
 zero, incrementing by one until the driver returns ``EINVAL``.
 
-.. note:: Connectors on a TV card to loop back the received audio signal
+.. note::
+
+    Connectors on a TV card to loop back the received audio signal
     to a sound card are not audio outputs in this sense.
 
 See :ref:`VIDIOC_G_AUDIOout <VIDIOC_G_AUDOUT>` for a description of struct
-:ref:`v4l2_audioout <v4l2-audioout>`.
+:c:type:`v4l2_audioout`.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-enuminput.rst b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
index 5060f54..17aaaf9 100644
--- a/Documentation/media/uapi/v4l/vidioc-enuminput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_input *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUMINPUT, struct v4l2_input *argp )
+    :name: VIDIOC_ENUMINPUT
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUMINPUT
-
 ``argp``
 
 
@@ -34,120 +32,79 @@
 ===========
 
 To query the attributes of a video input applications initialize the
-``index`` field of struct :ref:`v4l2_input <v4l2-input>` and call the
+``index`` field of struct :c:type:`v4l2_input` and call the
 :ref:`VIDIOC_ENUMINPUT` ioctl with a pointer to this structure. Drivers
 fill the rest of the structure or return an ``EINVAL`` error code when the
 index is out of bounds. To enumerate all inputs applications shall begin
 at index zero, incrementing by one until the driver returns ``EINVAL``.
 
 
-.. _v4l2-input:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_input
 
 .. flat-table:: struct v4l2_input
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``index``
+      - Identifies the input, set by the application.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the video input, a NUL-terminated ASCII string, for
+	example: "Vin (Composite 2)". This information is intended for the
+	user, preferably the connector label on the device itself.
+    * - __u32
+      - ``type``
+      - Type of the input, see :ref:`input-type`.
+    * - __u32
+      - ``audioset``
+      - Drivers can enumerate up to 32 video and audio inputs. This field
+	shows which audio inputs were selectable as audio source if this
+	was the currently selected video input. It is a bit mask. The LSB
+	corresponds to audio input 0, the MSB to input 31. Any number of
+	bits can be set, or none.
 
-    -  .. row 1
+	When the driver does not enumerate audio inputs no bits must be
+	set. Applications shall not interpret this as lack of audio
+	support. Some drivers automatically select audio sources and do
+	not enumerate them since there is no choice anyway.
 
-       -  __u32
-
-       -  ``index``
-
-       -  Identifies the input, set by the application.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the video input, a NUL-terminated ASCII string, for
-	  example: "Vin (Composite 2)". This information is intended for the
-	  user, preferably the connector label on the device itself.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the input, see :ref:`input-type`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``audioset``
-
-       -  Drivers can enumerate up to 32 video and audio inputs. This field
-	  shows which audio inputs were selectable as audio source if this
-	  was the currently selected video input. It is a bit mask. The LSB
-	  corresponds to audio input 0, the MSB to input 31. Any number of
-	  bits can be set, or none.
-
-	  When the driver does not enumerate audio inputs no bits must be
-	  set. Applications shall not interpret this as lack of audio
-	  support. Some drivers automatically select audio sources and do
-	  not enumerate them since there is no choice anyway.
-
-	  For details on audio inputs and how to select the current input
-	  see :ref:`audio`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``tuner``
-
-       -  Capture devices can have zero or more tuners (RF demodulators).
-	  When the ``type`` is set to ``V4L2_INPUT_TYPE_TUNER`` this is an
-	  RF connector and this field identifies the tuner. It corresponds
-	  to struct :ref:`v4l2_tuner <v4l2-tuner>` field ``index``. For
-	  details on tuners see :ref:`tuner`.
-
-    -  .. row 6
-
-       -  :ref:`v4l2_std_id <v4l2-std-id>`
-
-       -  ``std``
-
-       -  Every video input supports one or more different video standards.
-	  This field is a set of all supported standards. For details on
-	  video standards and how to switch see :ref:`standard`.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``status``
-
-       -  This field provides status information about the input. See
-	  :ref:`input-status` for flags. With the exception of the sensor
-	  orientation bits ``status`` is only valid when this is the current
-	  input.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``capabilities``
-
-       -  This field provides capabilities for the input. See
-	  :ref:`input-capabilities` for flags.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved``\ [3]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+	For details on audio inputs and how to select the current input
+	see :ref:`audio`.
+    * - __u32
+      - ``tuner``
+      - Capture devices can have zero or more tuners (RF demodulators).
+	When the ``type`` is set to ``V4L2_INPUT_TYPE_TUNER`` this is an
+	RF connector and this field identifies the tuner. It corresponds
+	to struct :c:type:`v4l2_tuner` field ``index``. For
+	details on tuners see :ref:`tuner`.
+    * - :ref:`v4l2_std_id <v4l2-std-id>`
+      - ``std``
+      - Every video input supports one or more different video standards.
+	This field is a set of all supported standards. For details on
+	video standards and how to switch see :ref:`standard`.
+    * - __u32
+      - ``status``
+      - This field provides status information about the input. See
+	:ref:`input-status` for flags. With the exception of the sensor
+	orientation bits ``status`` is only valid when this is the current
+	input.
+    * - __u32
+      - ``capabilities``
+      - This field provides capabilities for the input. See
+	:ref:`input-capabilities` for flags.
+    * - __u32
+      - ``reserved``\ [3]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _input-type:
 
 .. flat-table:: Input Types
@@ -155,169 +112,93 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_INPUT_TYPE_TUNER``
-
-       -  1
-
-       -  This input uses a tuner (RF demodulator).
-
-    -  .. row 2
-
-       -  ``V4L2_INPUT_TYPE_CAMERA``
-
-       -  2
-
-       -  Analog baseband input, for example CVBS / Composite Video,
-	  S-Video, RGB.
+    * - ``V4L2_INPUT_TYPE_TUNER``
+      - 1
+      - This input uses a tuner (RF demodulator).
+    * - ``V4L2_INPUT_TYPE_CAMERA``
+      - 2
+      - Analog baseband input, for example CVBS / Composite Video,
+	S-Video, RGB.
+    * - ``V4L2_INPUT_TYPE_TOUCH``
+      - 3
+      - This input is a touch device for capturing raw touch data.
 
 
 
+.. tabularcolumns:: |p{4.8cm}|p{2.6cm}|p{10.1cm}|
+
 .. _input-status:
 
 .. flat-table:: Input Status Flags
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  :cspan:`2` General
-
-    -  .. row 2
-
-       -  ``V4L2_IN_ST_NO_POWER``
-
-       -  0x00000001
-
-       -  Attached device is off.
-
-    -  .. row 3
-
-       -  ``V4L2_IN_ST_NO_SIGNAL``
-
-       -  0x00000002
-
-       -
-
-    -  .. row 4
-
-       -  ``V4L2_IN_ST_NO_COLOR``
-
-       -  0x00000004
-
-       -  The hardware supports color decoding, but does not detect color
-	  modulation in the signal.
-
-    -  .. row 5
-
-       -  :cspan:`2` Sensor Orientation
-
-    -  .. row 6
-
-       -  ``V4L2_IN_ST_HFLIP``
-
-       -  0x00000010
-
-       -  The input is connected to a device that produces a signal that is
-	  flipped horizontally and does not correct this before passing the
-	  signal to userspace.
-
-    -  .. row 7
-
-       -  ``V4L2_IN_ST_VFLIP``
-
-       -  0x00000020
-
-       -  The input is connected to a device that produces a signal that is
-	  flipped vertically and does not correct this before passing the
-	  signal to userspace.
-	  .. note:: A 180 degree rotation is the same as HFLIP | VFLIP
-
-    -  .. row 8
-
-       -  :cspan:`2` Analog Video
-
-    -  .. row 9
-
-       -  ``V4L2_IN_ST_NO_H_LOCK``
-
-       -  0x00000100
-
-       -  No horizontal sync lock.
-
-    -  .. row 10
-
-       -  ``V4L2_IN_ST_COLOR_KILL``
-
-       -  0x00000200
-
-       -  A color killer circuit automatically disables color decoding when
-	  it detects no color modulation. When this flag is set the color
-	  killer is enabled *and* has shut off color decoding.
-
-    -  .. row 11
-
-       -  :cspan:`2` Digital Video
-
-    -  .. row 12
-
-       -  ``V4L2_IN_ST_NO_SYNC``
-
-       -  0x00010000
-
-       -  No synchronization lock.
-
-    -  .. row 13
-
-       -  ``V4L2_IN_ST_NO_EQU``
-
-       -  0x00020000
-
-       -  No equalizer lock.
-
-    -  .. row 14
-
-       -  ``V4L2_IN_ST_NO_CARRIER``
-
-       -  0x00040000
-
-       -  Carrier recovery failed.
-
-    -  .. row 15
-
-       -  :cspan:`2` VCR and Set-Top Box
-
-    -  .. row 16
-
-       -  ``V4L2_IN_ST_MACROVISION``
-
-       -  0x01000000
-
-       -  Macrovision is an analog copy prevention system mangling the video
-	  signal to confuse video recorders. When this flag is set
-	  Macrovision has been detected.
-
-    -  .. row 17
-
-       -  ``V4L2_IN_ST_NO_ACCESS``
-
-       -  0x02000000
-
-       -  Conditional access denied.
-
-    -  .. row 18
-
-       -  ``V4L2_IN_ST_VTR``
-
-       -  0x04000000
-
-       -  VTR time constant. [?]
+    * - :cspan:`2` General
+    * - ``V4L2_IN_ST_NO_POWER``
+      - 0x00000001
+      - Attached device is off.
+    * - ``V4L2_IN_ST_NO_SIGNAL``
+      - 0x00000002
+      -
+    * - ``V4L2_IN_ST_NO_COLOR``
+      - 0x00000004
+      - The hardware supports color decoding, but does not detect color
+	modulation in the signal.
+    * - :cspan:`2` Sensor Orientation
+    * - ``V4L2_IN_ST_HFLIP``
+      - 0x00000010
+      - The input is connected to a device that produces a signal that is
+	flipped horizontally and does not correct this before passing the
+	signal to userspace.
+    * - ``V4L2_IN_ST_VFLIP``
+      - 0x00000020
+      - The input is connected to a device that produces a signal that is
+	flipped vertically and does not correct this before passing the
+	signal to userspace.
+	.. note:: A 180 degree rotation is the same as HFLIP | VFLIP
+    * - :cspan:`2` Analog Video
+    * - ``V4L2_IN_ST_NO_H_LOCK``
+      - 0x00000100
+      - No horizontal sync lock.
+    * - ``V4L2_IN_ST_COLOR_KILL``
+      - 0x00000200
+      - A color killer circuit automatically disables color decoding when
+	it detects no color modulation. When this flag is set the color
+	killer is enabled *and* has shut off color decoding.
+    * - ``V4L2_IN_ST_NO_V_LOCK``
+      - 0x00000400
+      - No vertical sync lock.
+    * - ``V4L2_IN_ST_NO_STD_LOCK``
+      - 0x00000800
+      - No standard format lock in case of auto-detection format
+	by the component.
+    * - :cspan:`2` Digital Video
+    * - ``V4L2_IN_ST_NO_SYNC``
+      - 0x00010000
+      - No synchronization lock.
+    * - ``V4L2_IN_ST_NO_EQU``
+      - 0x00020000
+      - No equalizer lock.
+    * - ``V4L2_IN_ST_NO_CARRIER``
+      - 0x00040000
+      - Carrier recovery failed.
+    * - :cspan:`2` VCR and Set-Top Box
+    * - ``V4L2_IN_ST_MACROVISION``
+      - 0x01000000
+      - Macrovision is an analog copy prevention system mangling the video
+	signal to confuse video recorders. When this flag is set
+	Macrovision has been detected.
+    * - ``V4L2_IN_ST_NO_ACCESS``
+      - 0x02000000
+      - Conditional access denied.
+    * - ``V4L2_IN_ST_VTR``
+      - 0x04000000
+      - VTR time constant. [?]
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _input-capabilities:
 
 .. flat-table:: Input capabilities
@@ -325,34 +206,19 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_IN_CAP_DV_TIMINGS``
-
-       -  0x00000002
-
-       -  This input supports setting video timings by using
-	  VIDIOC_S_DV_TIMINGS.
-
-    -  .. row 2
-
-       -  ``V4L2_IN_CAP_STD``
-
-       -  0x00000004
-
-       -  This input supports setting the TV standard by using
-	  VIDIOC_S_STD.
-
-    -  .. row 3
-
-       -  ``V4L2_IN_CAP_NATIVE_SIZE``
-
-       -  0x00000008
-
-       -  This input supports setting the native size using the
-	  ``V4L2_SEL_TGT_NATIVE_SIZE`` selection target, see
-	  :ref:`v4l2-selections-common`.
+    * - ``V4L2_IN_CAP_DV_TIMINGS``
+      - 0x00000002
+      - This input supports setting video timings by using
+	VIDIOC_S_DV_TIMINGS.
+    * - ``V4L2_IN_CAP_STD``
+      - 0x00000004
+      - This input supports setting the TV standard by using
+	VIDIOC_S_STD.
+    * - ``V4L2_IN_CAP_NATIVE_SIZE``
+      - 0x00000008
+      - This input supports setting the native size using the
+	``V4L2_SEL_TGT_NATIVE_SIZE`` selection target, see
+	:ref:`v4l2-selections-common`.
 
 
 Return Value
@@ -363,5 +229,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_input <v4l2-input>` ``index`` is out of
+    The struct :c:type:`v4l2_input` ``index`` is out of
     bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
index 82fc9d3..d7dd274 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_output *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUMOUTPUT, struct v4l2_output *argp )
+    :name: VIDIOC_ENUMOUTPUT
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUMOUTPUT
-
 ``argp``
 
 
@@ -34,7 +32,7 @@
 ===========
 
 To query the attributes of a video outputs applications initialize the
-``index`` field of struct :ref:`v4l2_output <v4l2-output>` and call
+``index`` field of struct :c:type:`v4l2_output` and call
 the :ref:`VIDIOC_ENUMOUTPUT` ioctl with a pointer to this structure.
 Drivers fill the rest of the structure or return an ``EINVAL`` error code
 when the index is out of bounds. To enumerate all outputs applications
@@ -42,102 +40,66 @@
 EINVAL.
 
 
-.. _v4l2-output:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_output
 
 .. flat-table:: struct v4l2_output
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``index``
+      - Identifies the output, set by the application.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the video output, a NUL-terminated ASCII string, for
+	example: "Vout". This information is intended for the user,
+	preferably the connector label on the device itself.
+    * - __u32
+      - ``type``
+      - Type of the output, see :ref:`output-type`.
+    * - __u32
+      - ``audioset``
+      - Drivers can enumerate up to 32 video and audio outputs. This field
+	shows which audio outputs were selectable as the current output if
+	this was the currently selected video output. It is a bit mask.
+	The LSB corresponds to audio output 0, the MSB to output 31. Any
+	number of bits can be set, or none.
 
-    -  .. row 1
+	When the driver does not enumerate audio outputs no bits must be
+	set. Applications shall not interpret this as lack of audio
+	support. Drivers may automatically select audio outputs without
+	enumerating them.
 
-       -  __u32
-
-       -  ``index``
-
-       -  Identifies the output, set by the application.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the video output, a NUL-terminated ASCII string, for
-	  example: "Vout". This information is intended for the user,
-	  preferably the connector label on the device itself.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the output, see :ref:`output-type`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``audioset``
-
-       -  Drivers can enumerate up to 32 video and audio outputs. This field
-	  shows which audio outputs were selectable as the current output if
-	  this was the currently selected video output. It is a bit mask.
-	  The LSB corresponds to audio output 0, the MSB to output 31. Any
-	  number of bits can be set, or none.
-
-	  When the driver does not enumerate audio outputs no bits must be
-	  set. Applications shall not interpret this as lack of audio
-	  support. Drivers may automatically select audio outputs without
-	  enumerating them.
-
-	  For details on audio outputs and how to select the current output
-	  see :ref:`audio`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``modulator``
-
-       -  Output devices can have zero or more RF modulators. When the
-	  ``type`` is ``V4L2_OUTPUT_TYPE_MODULATOR`` this is an RF connector
-	  and this field identifies the modulator. It corresponds to struct
-	  :ref:`v4l2_modulator <v4l2-modulator>` field ``index``. For
-	  details on modulators see :ref:`tuner`.
-
-    -  .. row 6
-
-       -  :ref:`v4l2_std_id <v4l2-std-id>`
-
-       -  ``std``
-
-       -  Every video output supports one or more different video standards.
-	  This field is a set of all supported standards. For details on
-	  video standards and how to switch see :ref:`standard`.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``capabilities``
-
-       -  This field provides capabilities for the output. See
-	  :ref:`output-capabilities` for flags.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [3]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+	For details on audio outputs and how to select the current output
+	see :ref:`audio`.
+    * - __u32
+      - ``modulator``
+      - Output devices can have zero or more RF modulators. When the
+	``type`` is ``V4L2_OUTPUT_TYPE_MODULATOR`` this is an RF connector
+	and this field identifies the modulator. It corresponds to struct
+	:c:type:`v4l2_modulator` field ``index``. For
+	details on modulators see :ref:`tuner`.
+    * - :ref:`v4l2_std_id <v4l2-std-id>`
+      - ``std``
+      - Every video output supports one or more different video standards.
+	This field is a set of all supported standards. For details on
+	video standards and how to switch see :ref:`standard`.
+    * - __u32
+      - ``capabilities``
+      - This field provides capabilities for the output. See
+	:ref:`output-capabilities` for flags.
+    * - __u32
+      - ``reserved``\ [3]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
+.. tabularcolumns:: |p{7.0cm}|p{1.8cm}|p{8.7cm}|
+
 .. _output-type:
 
 .. flat-table:: Output Type
@@ -145,34 +107,21 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_OUTPUT_TYPE_MODULATOR``
-
-       -  1
-
-       -  This output is an analog TV modulator.
-
-    -  .. row 2
-
-       -  ``V4L2_OUTPUT_TYPE_ANALOG``
-
-       -  2
-
-       -  Analog baseband output, for example Composite / CVBS, S-Video,
-	  RGB.
-
-    -  .. row 3
-
-       -  ``V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY``
-
-       -  3
-
-       -  [?]
+    * - ``V4L2_OUTPUT_TYPE_MODULATOR``
+      - 1
+      - This output is an analog TV modulator.
+    * - ``V4L2_OUTPUT_TYPE_ANALOG``
+      - 2
+      - Analog baseband output, for example Composite / CVBS, S-Video,
+	RGB.
+    * - ``V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY``
+      - 3
+      - [?]
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _output-capabilities:
 
 .. flat-table:: Output capabilities
@@ -180,34 +129,19 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_OUT_CAP_DV_TIMINGS``
-
-       -  0x00000002
-
-       -  This output supports setting video timings by using
-	  VIDIOC_S_DV_TIMINGS.
-
-    -  .. row 2
-
-       -  ``V4L2_OUT_CAP_STD``
-
-       -  0x00000004
-
-       -  This output supports setting the TV standard by using
-	  VIDIOC_S_STD.
-
-    -  .. row 3
-
-       -  ``V4L2_OUT_CAP_NATIVE_SIZE``
-
-       -  0x00000008
-
-       -  This output supports setting the native size using the
-	  ``V4L2_SEL_TGT_NATIVE_SIZE`` selection target, see
-	  :ref:`v4l2-selections-common`.
+    * - ``V4L2_OUT_CAP_DV_TIMINGS``
+      - 0x00000002
+      - This output supports setting video timings by using
+	VIDIOC_S_DV_TIMINGS.
+    * - ``V4L2_OUT_CAP_STD``
+      - 0x00000004
+      - This output supports setting the TV standard by using
+	VIDIOC_S_STD.
+    * - ``V4L2_OUT_CAP_NATIVE_SIZE``
+      - 0x00000008
+      - This output supports setting the native size using the
+	``V4L2_SEL_TGT_NATIVE_SIZE`` selection target, see
+	:ref:`v4l2-selections-common`.
 
 
 Return Value
@@ -218,5 +152,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_output <v4l2-output>` ``index`` is out of
+    The struct :c:type:`v4l2_output` ``index`` is out of
     bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
index 6699b26..f2bdd45 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_standard *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_ENUMSTD, struct v4l2_standard *argp )
+    :name: VIDIOC_ENUMSTD
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_ENUMSTD
-
 ``argp``
 
 
@@ -35,7 +33,7 @@
 
 To query the attributes of a video standard, especially a custom (driver
 defined) one, applications initialize the ``index`` field of struct
-:ref:`v4l2_standard <v4l2-standard>` and call the :ref:`VIDIOC_ENUMSTD`
+:c:type:`v4l2_standard` and call the :ref:`VIDIOC_ENUMSTD`
 ioctl with a pointer to this structure. Drivers fill the rest of the
 structure or return an ``EINVAL`` error code when the index is out of
 bounds. To enumerate all standards applications shall begin at index
@@ -44,99 +42,64 @@
 or output. [#f1]_
 
 
-.. _v4l2-standard:
+.. c:type:: v4l2_standard
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_standard
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the video standard, set by the application.
-
-    -  .. row 2
-
-       -  :ref:`v4l2_std_id <v4l2-std-id>`
-
-       -  ``id``
-
-       -  The bits in this field identify the standard as one of the common
-	  standards listed in :ref:`v4l2-std-id`, or if bits 32 to 63 are
-	  set as custom standards. Multiple bits can be set if the hardware
-	  does not distinguish between these standards, however separate
-	  indices do not indicate the opposite. The ``id`` must be unique.
-	  No other enumerated :ref:`struct v4l2_standard <v4l2-standard>` structure,
-	  for this input or output anyway, can contain the same set of bits.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``name``\ [24]
-
-       -  Name of the standard, a NUL-terminated ASCII string, for example:
-	  "PAL-B/G", "NTSC Japan". This information is intended for the
-	  user.
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``frameperiod``
-
-       -  The frame period (not field period) is numerator / denominator.
-	  For example M/NTSC has a frame period of 1001 / 30000 seconds.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``framelines``
-
-       -  Total lines per frame including blanking, e. g. 625 for B/PAL.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+    * - __u32
+      - ``index``
+      - Number of the video standard, set by the application.
+    * - :ref:`v4l2_std_id <v4l2-std-id>`
+      - ``id``
+      - The bits in this field identify the standard as one of the common
+	standards listed in :ref:`v4l2-std-id`, or if bits 32 to 63 are
+	set as custom standards. Multiple bits can be set if the hardware
+	does not distinguish between these standards, however separate
+	indices do not indicate the opposite. The ``id`` must be unique.
+	No other enumerated struct :c:type:`v4l2_standard` structure,
+	for this input or output anyway, can contain the same set of bits.
+    * - __u8
+      - ``name``\ [24]
+      - Name of the standard, a NUL-terminated ASCII string, for example:
+	"PAL-B/G", "NTSC Japan". This information is intended for the
+	user.
+    * - struct :c:type:`v4l2_fract`
+      - ``frameperiod``
+      - The frame period (not field period) is numerator / denominator.
+	For example M/NTSC has a frame period of 1001 / 30000 seconds.
+    * - __u32
+      - ``framelines``
+      - Total lines per frame including blanking, e. g. 625 for B/PAL.
+    * - __u32
+      - ``reserved``\ [4]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
-.. _v4l2-fract:
+.. c:type:: v4l2_fract
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_fract
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``numerator``
-
-       -
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``denominator``
-
-       -
+    * - __u32
+      - ``numerator``
+      -
+    * - __u32
+      - ``denominator``
+      -
 
 
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. _v4l2-std-id:
 
@@ -145,17 +108,12 @@
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u64
-
-       -  ``v4l2_std_id``
-
-       -  This type is a set, each bit representing another video standard
-	  as listed below and in :ref:`video-standards`. The 32 most
-	  significant bits are reserved for custom (driver defined) video
-	  standards.
+    * - __u64
+      - ``v4l2_std_id``
+      - This type is a set, each bit representing another video standard
+	as listed below and in :ref:`video-standards`. The 32 most
+	significant bits are reserved for custom (driver defined) video
+	standards.
 
 
 
@@ -266,124 +224,77 @@
     #define V4L2_STD_ALL            (V4L2_STD_525_60        |
 		     V4L2_STD_625_50)
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+..                            NTSC/M   PAL/M    /N       /B       /D       /H       /I        SECAM/B    /D       /K1     /L
+.. tabularcolumns:: |p{2.7cm}|p{2.6cm}|p{3.0cm}|p{3.2cm}|p{3.2cm}|p{2.2cm}|p{1.2cm}|p{3.2cm}|p{3.0cm}|p{2.0cm}|p{2.0cm}|p{2.0cm}|
 
 .. _video-standards:
 
-.. flat-table:: Video Standards (based on [])
+.. flat-table:: Video Standards (based on :ref:`itu470`)
     :header-rows:  1
     :stub-columns: 0
 
+    * - Characteristics
+      - M/NTSC [#f2]_
+      - M/PAL
+      - N/PAL [#f3]_
+      - B, B1, G/PAL
+      - D, D1, K/PAL
+      - H/PAL
+      - I/PAL
+      - B, G/SECAM
+      - D, K/SECAM
+      - K1/SECAM
+      - L/SECAM
+    * - Frame lines
+      - :cspan:`1` 525
+      - :cspan:`8` 625
+    * - Frame period (s)
+      - :cspan:`1` 1001/30000
+      - :cspan:`8` 1/25
+    * - Chrominance sub-carrier frequency (Hz)
+      - 3579545 ± 10
+      - 3579611.49 ± 10
+      - 4433618.75 ± 5
 
-    -  .. row 1
+	(3582056.25 ± 5)
+      - :cspan:`3` 4433618.75 ± 5
+      - 4433618.75 ± 1
+      - :cspan:`2` f\ :sub:`OR` = 4406250 ± 2000,
 
-       -  Characteristics
+	f\ :sub:`OB` = 4250000 ± 2000
+    * - Nominal radio-frequency channel bandwidth (MHz)
+      - 6
+      - 6
+      - 6
+      - B: 7; B1, G: 8
+      - 8
+      - 8
+      - 8
+      - 8
+      - 8
+      - 8
+      - 8
+    * - Sound carrier relative to vision carrier (MHz)
+      - 4.5
+      - 4.5
+      - 4.5
+      - 5.5 ± 0.001  [#f4]_  [#f5]_  [#f6]_  [#f7]_
+      - 6.5 ± 0.001
+      - 5.5
+      - 5.9996 ± 0.0005
+      - 5.5 ± 0.001
+      - 6.5 ± 0.001
+      - 6.5
+      - 6.5 [#f8]_
 
-       -  M/NTSC [#f2]_
+.. raw:: latex
 
-       -  M/PAL
+    \end{adjustbox}\newline\newline
 
-       -  N/PAL [#f3]_
-
-       -  B, B1, G/PAL
-
-       -  D, D1, K/PAL
-
-       -  H/PAL
-
-       -  I/PAL
-
-       -  B, G/SECAM
-
-       -  D, K/SECAM
-
-       -  K1/SECAM
-
-       -  L/SECAM
-
-    -  .. row 2
-
-       -  Frame lines
-
-       -  :cspan:`1` 525
-
-       -  :cspan:`9` 625
-
-    -  .. row 3
-
-       -  Frame period (s)
-
-       -  :cspan:`1` 1001/30000
-
-       -  :cspan:`9` 1/25
-
-    -  .. row 4
-
-       -  Chrominance sub-carrier frequency (Hz)
-
-       -  3579545 ± 10
-
-       -  3579611.49 ± 10
-
-       -  4433618.75 ± 5 (3582056.25 ± 5)
-
-       -  :cspan:`3` 4433618.75 ± 5
-
-       -  4433618.75 ± 1
-
-       -  :cspan:`3` f\ :sub:`OR` = 4406250 ± 2000, f\ :sub:`OB` = 4250000
-	  ± 2000
-
-    -  .. row 5
-
-       -  Nominal radio-frequency channel bandwidth (MHz)
-
-       -  6
-
-       -  6
-
-       -  6
-
-       -  B: 7; B1, G: 8
-
-       -  8
-
-       -  8
-
-       -  8
-
-       -  8
-
-       -  8
-
-       -  8
-
-       -  8
-
-    -  .. row 6
-
-       -  Sound carrier relative to vision carrier (MHz)
-
-       -  + 4.5
-
-       -  + 4.5
-
-       -  + 4.5
-
-       -  + 5.5 ± 0.001  [#f4]_  [#f5]_  [#f6]_  [#f7]_
-
-       -  + 6.5 ± 0.001
-
-       -  + 5.5
-
-       -  + 5.9996 ± 0.0005
-
-       -  + 5.5 ± 0.001
-
-       -  + 6.5 ± 0.001
-
-       -  + 6.5
-
-       -  + 6.5  [#f8]_
 
 
 Return Value
@@ -394,7 +305,7 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_standard <v4l2-standard>` ``index`` is out
+    The struct :c:type:`v4l2_standard` ``index`` is out
     of bounds.
 
 ENODATA
diff --git a/Documentation/media/uapi/v4l/vidioc-expbuf.rst b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
index ded708e..246e480 100644
--- a/Documentation/media/uapi/v4l/vidioc-expbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_exportbuffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_EXPBUF, struct v4l2_exportbuffer *argp )
+    :name: VIDIOC_EXPBUF
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_EXPBUF
-
 ``argp``
 
 
@@ -40,13 +38,13 @@
 :ref:`VIDIOC_REQBUFS` ioctl.
 
 To export a buffer, applications fill struct
-:ref:`v4l2_exportbuffer <v4l2-exportbuffer>`. The ``type`` field is
+:c:type:`v4l2_exportbuffer`. The ``type`` field is
 set to the same buffer type as was previously used with struct
-:ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``.
+:c:type:`v4l2_requestbuffers` ``type``.
 Applications must also set the ``index`` field. Valid index numbers
 range from zero to the number of buffers allocated with
 :ref:`VIDIOC_REQBUFS` (struct
-:ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``count``) minus
+:c:type:`v4l2_requestbuffers` ``count``) minus
 one. For the multi-planar API, applications set the ``plane`` field to
 the index of the plane to be exported. Valid planes range from zero to
 the maximal number of valid planes for the currently active format. For
@@ -116,73 +114,45 @@
     }
 
 
-.. _v4l2-exportbuffer:
+.. c:type:: v4l2_exportbuffer
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_exportbuffer
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the buffer, same as struct
-	  :ref:`v4l2_format <v4l2-format>` ``type`` or struct
-	  :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``, set
-	  by the application. See :ref:`v4l2-buf-type`
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the buffer, set by the application. This field is only
-	  used for :ref:`memory mapping <mmap>` I/O and can range from
-	  zero to the number of buffers allocated with the
-	  :ref:`VIDIOC_REQBUFS` and/or
-	  :ref:`VIDIOC_CREATE_BUFS` ioctls.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``plane``
-
-       -  Index of the plane to be exported when using the multi-planar API.
-	  Otherwise this value must be set to zero.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags for the newly created file, currently only ``O_CLOEXEC``,
-	  ``O_RDONLY``, ``O_WRONLY``, and ``O_RDWR`` are supported, refer to
-	  the manual of open() for more details.
-
-    -  .. row 5
-
-       -  __s32
-
-       -  ``fd``
-
-       -  The DMABUF file descriptor associated with a buffer. Set by the
-	  driver.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``reserved[11]``
-
-       -  Reserved field for future use. Drivers and applications must set
-	  the array to zero.
+    * - __u32
+      - ``type``
+      - Type of the buffer, same as struct
+	:c:type:`v4l2_format` ``type`` or struct
+	:c:type:`v4l2_requestbuffers` ``type``, set
+	by the application. See :c:type:`v4l2_buf_type`
+    * - __u32
+      - ``index``
+      - Number of the buffer, set by the application. This field is only
+	used for :ref:`memory mapping <mmap>` I/O and can range from
+	zero to the number of buffers allocated with the
+	:ref:`VIDIOC_REQBUFS` and/or
+	:ref:`VIDIOC_CREATE_BUFS` ioctls.
+    * - __u32
+      - ``plane``
+      - Index of the plane to be exported when using the multi-planar API.
+	Otherwise this value must be set to zero.
+    * - __u32
+      - ``flags``
+      - Flags for the newly created file, currently only ``O_CLOEXEC``,
+	``O_RDONLY``, ``O_WRONLY``, and ``O_RDWR`` are supported, refer to
+	the manual of open() for more details.
+    * - __s32
+      - ``fd``
+      - The DMABUF file descriptor associated with a buffer. Set by the
+	driver.
+    * - __u32
+      - ``reserved[11]``
+      - Reserved field for future use. Drivers and applications must set
+	the array to zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audio.rst b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
index cccbcdb..5b67e81 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_audio *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_AUDIO, struct v4l2_audio *argp )
+    :name: VIDIOC_G_AUDIO
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_audio *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_AUDIO, const struct v4l2_audio *argp )
+    :name: VIDIOC_S_AUDIO
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_AUDIO, VIDIOC_S_AUDIO
-
 ``argp``
 
 
@@ -36,7 +35,7 @@
 ===========
 
 To query the current audio input applications zero out the ``reserved``
-array of a struct :ref:`v4l2_audio <v4l2-audio>` and call the
+array of a struct :c:type:`v4l2_audio` and call the
 :ref:`VIDIOC_G_AUDIO <VIDIOC_G_AUDIO>` ioctl with a pointer to this structure. Drivers fill
 the rest of the structure or return an ``EINVAL`` error code when the device
 has no audio inputs, or none which combine with the current video input.
@@ -44,66 +43,45 @@
 Audio inputs have one writable property, the audio mode. To select the
 current audio input *and* change the audio mode, applications initialize
 the ``index`` and ``mode`` fields, and the ``reserved`` array of a
-:ref:`struct v4l2_audio <v4l2-audio>` structure and call the :ref:`VIDIOC_S_AUDIO <VIDIOC_G_AUDIO>`
+struct :c:type:`v4l2_audio` structure and call the :ref:`VIDIOC_S_AUDIO <VIDIOC_G_AUDIO>`
 ioctl. Drivers may switch to a different audio mode if the request
 cannot be satisfied. However, this is a write-only ioctl, it does not
 return the actual new audio mode.
 
 
-.. _v4l2-audio:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_audio
 
 .. flat-table:: struct v4l2_audio
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Identifies the audio input, set by the driver or application.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the audio input, a NUL-terminated ASCII string, for
-	  example: "Line In". This information is intended for the user,
-	  preferably the connector label on the device itself.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``capability``
-
-       -  Audio capability flags, see :ref:`audio-capability`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``mode``
-
-       -  Audio mode flags set by drivers and applications (on
-	  :ref:`VIDIOC_S_AUDIO <VIDIOC_G_AUDIO>` ioctl), see :ref:`audio-mode`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+    * - __u32
+      - ``index``
+      - Identifies the audio input, set by the driver or application.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the audio input, a NUL-terminated ASCII string, for
+	example: "Line In". This information is intended for the user,
+	preferably the connector label on the device itself.
+    * - __u32
+      - ``capability``
+      - Audio capability flags, see :ref:`audio-capability`.
+    * - __u32
+      - ``mode``
+      - Audio mode flags set by drivers and applications (on
+	:ref:`VIDIOC_S_AUDIO <VIDIOC_G_AUDIO>` ioctl), see :ref:`audio-mode`.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _audio-capability:
 
 .. flat-table:: Audio Capability Flags
@@ -111,28 +89,20 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_AUDCAP_STEREO``
-
-       -  0x00001
-
-       -  This is a stereo input. The flag is intended to automatically
-	  disable stereo recording etc. when the signal is always monaural.
-	  The API provides no means to detect if stereo is *received*,
-	  unless the audio input belongs to a tuner.
-
-    -  .. row 2
-
-       -  ``V4L2_AUDCAP_AVL``
-
-       -  0x00002
-
-       -  Automatic Volume Level mode is supported.
+    * - ``V4L2_AUDCAP_STEREO``
+      - 0x00001
+      - This is a stereo input. The flag is intended to automatically
+	disable stereo recording etc. when the signal is always monaural.
+	The API provides no means to detect if stereo is *received*,
+	unless the audio input belongs to a tuner.
+    * - ``V4L2_AUDCAP_AVL``
+      - 0x00002
+      - Automatic Volume Level mode is supported.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _audio-mode:
 
 .. flat-table:: Audio Mode Flags
@@ -140,14 +110,9 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_AUDMODE_AVL``
-
-       -  0x00001
-
-       -  AVL mode is on.
+    * - ``V4L2_AUDMODE_AVL``
+      - 0x00001
+      - AVL mode is on.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
index b1c1bfe..d16ecba 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_audioout *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_AUDOUT, struct v4l2_audioout *argp )
+    :name: VIDIOC_G_AUDOUT
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_audioout *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_AUDOUT, const struct v4l2_audioout *argp )
+    :name: VIDIOC_S_AUDOUT
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_AUDOUT, VIDIOC_S_AUDOUT
-
 ``argp``
 
 
@@ -36,7 +35,7 @@
 ===========
 
 To query the current audio output applications zero out the ``reserved``
-array of a struct :ref:`v4l2_audioout <v4l2-audioout>` and call the
+array of a struct :c:type:`v4l2_audioout` and call the
 ``VIDIOC_G_AUDOUT`` ioctl with a pointer to this structure. Drivers fill
 the rest of the structure or return an ``EINVAL`` error code when the device
 has no audio inputs, or none which combine with the current video
@@ -45,68 +44,47 @@
 Audio outputs have no writable properties. Nevertheless, to select the
 current audio output applications can initialize the ``index`` field and
 ``reserved`` array (which in the future may contain writable properties)
-of a :ref:`struct v4l2_audioout <v4l2-audioout>` structure and call the
+of a struct :c:type:`v4l2_audioout` structure and call the
 ``VIDIOC_S_AUDOUT`` ioctl. Drivers switch to the requested output or
 return the ``EINVAL`` error code when the index is out of bounds. This is a
 write-only ioctl, it does not return the current audio output attributes
 as ``VIDIOC_G_AUDOUT`` does.
 
-.. note:: Connectors on a TV card to loop back the received audio signal
+.. note::
+
+   Connectors on a TV card to loop back the received audio signal
    to a sound card are not audio outputs in this sense.
 
 
-.. _v4l2-audioout:
+.. c:type:: v4l2_audioout
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_audioout
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Identifies the audio output, set by the driver or application.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the audio output, a NUL-terminated ASCII string, for
-	  example: "Line Out". This information is intended for the user,
-	  preferably the connector label on the device itself.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``capability``
-
-       -  Audio capability flags, none defined yet. Drivers must set this
-	  field to zero.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``mode``
-
-       -  Audio mode, none defined yet. Drivers and applications (on
-	  ``VIDIOC_S_AUDOUT``) must set this field to zero.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+    * - __u32
+      - ``index``
+      - Identifies the audio output, set by the driver or application.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the audio output, a NUL-terminated ASCII string, for
+	example: "Line Out". This information is intended for the user,
+	preferably the connector label on the device itself.
+    * - __u32
+      - ``capability``
+      - Audio capability flags, none defined yet. Drivers must set this
+	field to zero.
+    * - __u32
+      - ``mode``
+      - Audio mode, none defined yet. Drivers and applications (on
+	``VIDIOC_S_AUDOUT``) must set this field to zero.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
index 6cf7649..56a3634 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_crop *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_CROP, struct v4l2_crop *argp )
+    :name: VIDIOC_G_CROP
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_crop *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_CROP, const struct v4l2_crop *argp )
+    :name: VIDIOC_S_CROP
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_CROP, VIDIOC_S_CROP
-
 ``argp``
 
 
@@ -36,13 +35,13 @@
 ===========
 
 To query the cropping rectangle size and position applications set the
-``type`` field of a :ref:`struct v4l2_crop <v4l2-crop>` structure to the
+``type`` field of a struct :c:type:`v4l2_crop` structure to the
 respective buffer (stream) type and call the :ref:`VIDIOC_G_CROP <VIDIOC_G_CROP>` ioctl
 with a pointer to this structure. The driver fills the rest of the
 structure or returns the ``EINVAL`` error code if cropping is not supported.
 
 To change the cropping rectangle applications initialize the ``type``
-and struct :ref:`v4l2_rect <v4l2-rect>` substructure named ``c`` of a
+and struct :c:type:`v4l2_rect` substructure named ``c`` of a
 v4l2_crop structure and call the :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` ioctl with a pointer
 to this structure.
 
@@ -76,33 +75,25 @@
 :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` returns the ``EINVAL`` error code.
 
 
-.. _v4l2-crop:
+.. c:type:: v4l2_crop
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_crop
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the data stream, set by the application. Only these types
-	  are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
-	  ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
-	  ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :ref:`v4l2-buf-type`.
-
-    -  .. row 2
-
-       -  struct :ref:`v4l2_rect <v4l2-rect>`
-
-       -  ``c``
-
-       -  Cropping rectangle. The same co-ordinate system as for struct
-	  :ref:`v4l2_cropcap <v4l2-cropcap>` ``bounds`` is used.
+    * - __u32
+      - ``type``
+      - Type of the data stream, set by the application. Only these types
+	are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
+	``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
+	``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+    * - struct :c:type:`v4l2_rect`
+      - ``c``
+      - Cropping rectangle. The same co-ordinate system as for struct
+	:c:type:`v4l2_cropcap` ``bounds`` is used.
 
 
 Return Value
@@ -111,3 +102,6 @@
 On success 0 is returned, on error -1 and the ``errno`` variable is set
 appropriately. The generic error codes are described at the
 :ref:`Generic Error Codes <gen-errors>` chapter.
+
+ENODATA
+    Cropping is not supported for this input or output.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
index ee929f6..d8a3791 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_control *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_CTRL, struct v4l2_control *argp )
+    :name: VIDIOC_G_CTRL
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_CTRL, struct v4l2_control *argp )
+    :name: VIDIOC_S_CTRL
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_CTRL, VIDIOC_S_CTRL
-
 ``argp``
 
 
@@ -34,10 +35,10 @@
 ===========
 
 To get the current value of a control applications initialize the ``id``
-field of a struct :ref:`struct v4l2_control <v4l2-control>` and call the
+field of a struct :c:type:`v4l2_control` and call the
 :ref:`VIDIOC_G_CTRL <VIDIOC_G_CTRL>` ioctl with a pointer to this structure. To change the
 value of a control applications initialize the ``id`` and ``value``
-fields of a struct :ref:`struct v4l2_control <v4l2-control>` and call the
+fields of a struct :c:type:`v4l2_control` and call the
 :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` ioctl.
 
 When the ``id`` is invalid drivers return an ``EINVAL`` error code. When the
@@ -54,29 +55,21 @@
 :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` must be used.
 
 
-.. _v4l2-control:
+.. c:type:: v4l2_control
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_control
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``id``
-
-       -  Identifies the control, set by the application.
-
-    -  .. row 2
-
-       -  __s32
-
-       -  ``value``
-
-       -  New value or current value.
+    * - __u32
+      - ``id``
+      - Identifies the control, set by the application.
+    * - __s32
+      - ``value``
+      - New value or current value.
 
 
 Return Value
@@ -87,13 +80,13 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_control <v4l2-control>` ``id`` is invalid
+    The struct :c:type:`v4l2_control` ``id`` is invalid
     or the ``value`` is inappropriate for the given control (i.e. if a
     menu item is selected that is not supported by the driver according
     to :ref:`VIDIOC_QUERYMENU <VIDIOC_QUERYCTRL>`).
 
 ERANGE
-    The struct :ref:`v4l2_control <v4l2-control>` ``value`` is out of
+    The struct :c:type:`v4l2_control` ``value`` is out of
     bounds.
 
 EBUSY
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index f7bf21f..7dd943ff 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -15,7 +15,17 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_dv_timings *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_G_DV_TIMINGS
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_S_DV_TIMINGS
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_SUBDEV_G_DV_TIMINGS
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_SUBDEV_S_DV_TIMINGS
 
 
 Arguments
@@ -24,10 +34,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS,
-    VIDIOC_SUBDEV_G_DV_TIMINGS, VIDIOC_SUBDEV_S_DV_TIMINGS
-
 ``argp``
 
 
@@ -38,8 +44,8 @@
 :ref:`VIDIOC_S_DV_TIMINGS <VIDIOC_G_DV_TIMINGS>` ioctl and to get the current timings,
 applications use the :ref:`VIDIOC_G_DV_TIMINGS <VIDIOC_G_DV_TIMINGS>` ioctl. The detailed timing
 information is filled in using the structure struct
-:ref:`v4l2_dv_timings <v4l2-dv-timings>`. These ioctls take a
-pointer to the struct :ref:`v4l2_dv_timings <v4l2-dv-timings>`
+:c:type:`v4l2_dv_timings`. These ioctls take a
+pointer to the struct :c:type:`v4l2_dv_timings`
 structure as argument. If the ioctl is not supported or the timing
 values are not correct, the driver returns ``EINVAL`` error code.
 
@@ -68,202 +74,110 @@
     The device is busy and therefore can not change the timings.
 
 
-.. _v4l2-bt-timings:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_bt_timings
 
 .. flat-table:: struct v4l2_bt_timings
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``width``
+      - Width of the active video in pixels.
+    * - __u32
+      - ``height``
+      - Height of the active video frame in lines. So for interlaced
+	formats the height of the active video in each field is
+	``height``/2.
+    * - __u32
+      - ``interlaced``
+      - Progressive (``V4L2_DV_PROGRESSIVE``) or interlaced (``V4L2_DV_INTERLACED``).
+    * - __u32
+      - ``polarities``
+      - This is a bit mask that defines polarities of sync signals. bit 0
+	(``V4L2_DV_VSYNC_POS_POL``) is for vertical sync polarity and bit
+	1 (``V4L2_DV_HSYNC_POS_POL``) is for horizontal sync polarity. If
+	the bit is set (1) it is positive polarity and if is cleared (0),
+	it is negative polarity.
+    * - __u64
+      - ``pixelclock``
+      - Pixel clock in Hz. Ex. 74.25MHz->74250000
+    * - __u32
+      - ``hfrontporch``
+      - Horizontal front porch in pixels
+    * - __u32
+      - ``hsync``
+      - Horizontal sync length in pixels
+    * - __u32
+      - ``hbackporch``
+      - Horizontal back porch in pixels
+    * - __u32
+      - ``vfrontporch``
+      - Vertical front porch in lines. For interlaced formats this refers
+	to the odd field (aka field 1).
+    * - __u32
+      - ``vsync``
+      - Vertical sync length in lines. For interlaced formats this refers
+	to the odd field (aka field 1).
+    * - __u32
+      - ``vbackporch``
+      - Vertical back porch in lines. For interlaced formats this refers
+	to the odd field (aka field 1).
+    * - __u32
+      - ``il_vfrontporch``
+      - Vertical front porch in lines for the even field (aka field 2) of
+	interlaced field formats. Must be 0 for progressive formats.
+    * - __u32
+      - ``il_vsync``
+      - Vertical sync length in lines for the even field (aka field 2) of
+	interlaced field formats. Must be 0 for progressive formats.
+    * - __u32
+      - ``il_vbackporch``
+      - Vertical back porch in lines for the even field (aka field 2) of
+	interlaced field formats. Must be 0 for progressive formats.
+    * - __u32
+      - ``standards``
+      - The video standard(s) this format belongs to. This will be filled
+	in by the driver. Applications must set this to 0. See
+	:ref:`dv-bt-standards` for a list of standards.
+    * - __u32
+      - ``flags``
+      - Several flags giving more information about the format. See
+	:ref:`dv-bt-flags` for a description of the flags.
+    * - __u32
+      - ``reserved[14]``
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
-    -  .. row 1
 
-       -  __u32
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{7.0cm}|p{3.5cm}|
 
-       -  ``width``
-
-       -  Width of the active video in pixels.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``height``
-
-       -  Height of the active video frame in lines. So for interlaced
-	  formats the height of the active video in each field is
-	  ``height``/2.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``interlaced``
-
-       -  Progressive (``V4L2_DV_PROGRESSIVE``) or interlaced (``V4L2_DV_INTERLACED``).
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``polarities``
-
-       -  This is a bit mask that defines polarities of sync signals. bit 0
-	  (``V4L2_DV_VSYNC_POS_POL``) is for vertical sync polarity and bit
-	  1 (``V4L2_DV_HSYNC_POS_POL``) is for horizontal sync polarity. If
-	  the bit is set (1) it is positive polarity and if is cleared (0),
-	  it is negative polarity.
-
-    -  .. row 5
-
-       -  __u64
-
-       -  ``pixelclock``
-
-       -  Pixel clock in Hz. Ex. 74.25MHz->74250000
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``hfrontporch``
-
-       -  Horizontal front porch in pixels
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``hsync``
-
-       -  Horizontal sync length in pixels
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``hbackporch``
-
-       -  Horizontal back porch in pixels
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``vfrontporch``
-
-       -  Vertical front porch in lines. For interlaced formats this refers
-	  to the odd field (aka field 1).
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``vsync``
-
-       -  Vertical sync length in lines. For interlaced formats this refers
-	  to the odd field (aka field 1).
-
-    -  .. row 11
-
-       -  __u32
-
-       -  ``vbackporch``
-
-       -  Vertical back porch in lines. For interlaced formats this refers
-	  to the odd field (aka field 1).
-
-    -  .. row 12
-
-       -  __u32
-
-       -  ``il_vfrontporch``
-
-       -  Vertical front porch in lines for the even field (aka field 2) of
-	  interlaced field formats. Must be 0 for progressive formats.
-
-    -  .. row 13
-
-       -  __u32
-
-       -  ``il_vsync``
-
-       -  Vertical sync length in lines for the even field (aka field 2) of
-	  interlaced field formats. Must be 0 for progressive formats.
-
-    -  .. row 14
-
-       -  __u32
-
-       -  ``il_vbackporch``
-
-       -  Vertical back porch in lines for the even field (aka field 2) of
-	  interlaced field formats. Must be 0 for progressive formats.
-
-    -  .. row 15
-
-       -  __u32
-
-       -  ``standards``
-
-       -  The video standard(s) this format belongs to. This will be filled
-	  in by the driver. Applications must set this to 0. See
-	  :ref:`dv-bt-standards` for a list of standards.
-
-    -  .. row 16
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Several flags giving more information about the format. See
-	  :ref:`dv-bt-flags` for a description of the flags.
-
-
-
-.. _v4l2-dv-timings:
+.. c:type:: v4l2_dv_timings
 
 .. flat-table:: struct v4l2_dv_timings
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1
 
+    * - __u32
+      - ``type``
+      -
+      - Type of DV timings as listed in :ref:`dv-timing-types`.
+    * - union
+      -
+      -
+    * -
+      - struct :c:type:`v4l2_bt_timings`
+      - ``bt``
+      - Timings defined by BT.656/1120 specifications
+    * -
+      - __u32
+      - ``reserved``\ [32]
+      -
 
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  Type of DV timings as listed in :ref:`dv-timing-types`.
-
-    -  .. row 2
-
-       -  union
-
-       -
-       -
-
-    -  .. row 3
-
-       -
-       -  struct :ref:`v4l2_bt_timings <v4l2-bt-timings>`
-
-       -  ``bt``
-
-       -  Timings defined by BT.656/1120 specifications
-
-    -  .. row 4
-
-       -
-       -  __u32
-
-       -  ``reserved``\ [32]
-
-       -
-
-
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. _dv-timing-types:
 
@@ -272,28 +186,15 @@
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  Timing type
-
-       -  value
-
-       -  Description
-
-    -  .. row 2
-
-       -
-       -
-       -
-
-    -  .. row 3
-
-       -  ``V4L2_DV_BT_656_1120``
-
-       -  0
-
-       -  BT.656/1120 timings
+    * - Timing type
+      - value
+      - Description
+    * -
+      -
+      -
+    * - ``V4L2_DV_BT_656_1120``
+      - 0
+      - BT.656/1120 timings
 
 
 
@@ -303,43 +204,22 @@
     :header-rows:  0
     :stub-columns: 0
 
+    * - Timing standard
+      - Description
+    * - ``V4L2_DV_BT_STD_CEA861``
+      - The timings follow the CEA-861 Digital TV Profile standard
+    * - ``V4L2_DV_BT_STD_DMT``
+      - The timings follow the VESA Discrete Monitor Timings standard
+    * - ``V4L2_DV_BT_STD_CVT``
+      - The timings follow the VESA Coordinated Video Timings standard
+    * - ``V4L2_DV_BT_STD_GTF``
+      - The timings follow the VESA Generalized Timings Formula standard
+    * - ``V4L2_DV_BT_STD_SDI``
+      - The timings follow the SDI Timings standard.
+	There are no horizontal syncs/porches at all in this format.
+	Total blanking timings must be set in hsync or vsync fields only.
 
-    -  .. row 1
-
-       -  Timing standard
-
-       -  Description
-
-    -  .. row 2
-
-       -
-       -
-
-    -  .. row 3
-
-       -  ``V4L2_DV_BT_STD_CEA861``
-
-       -  The timings follow the CEA-861 Digital TV Profile standard
-
-    -  .. row 4
-
-       -  ``V4L2_DV_BT_STD_DMT``
-
-       -  The timings follow the VESA Discrete Monitor Timings standard
-
-    -  .. row 5
-
-       -  ``V4L2_DV_BT_STD_CVT``
-
-       -  The timings follow the VESA Coordinated Video Timings standard
-
-    -  .. row 6
-
-       -  ``V4L2_DV_BT_STD_GTF``
-
-       -  The timings follow the VESA Generalized Timings Formula standard
-
-
+.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
 
 .. _dv-bt-flags:
 
@@ -347,71 +227,46 @@
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  Flag
-
-       -  Description
-
-    -  .. row 2
-
-       -
-       -
-
-    -  .. row 3
-
-       -  ``V4L2_DV_FL_REDUCED_BLANKING``
-
-       -  CVT/GTF specific: the timings use reduced blanking (CVT) or the
-	  'Secondary GTF' curve (GTF). In both cases the horizontal and/or
-	  vertical blanking intervals are reduced, allowing a higher
-	  resolution over the same bandwidth. This is a read-only flag,
-	  applications must not set this.
-
-    -  .. row 4
-
-       -  ``V4L2_DV_FL_CAN_REDUCE_FPS``
-
-       -  CEA-861 specific: set for CEA-861 formats with a framerate that is
-	  a multiple of six. These formats can be optionally played at 1 /
-	  1.001 speed to be compatible with 60 Hz based standards such as
-	  NTSC and PAL-M that use a framerate of 29.97 frames per second. If
-	  the transmitter can't generate such frequencies, then the flag
-	  will also be cleared. This is a read-only flag, applications must
-	  not set this.
-
-    -  .. row 5
-
-       -  ``V4L2_DV_FL_REDUCED_FPS``
-
-       -  CEA-861 specific: only valid for video transmitters, the flag is
-	  cleared by receivers. It is also only valid for formats with the
-	  ``V4L2_DV_FL_CAN_REDUCE_FPS`` flag set, for other formats the
-	  flag will be cleared by the driver. If the application sets this
-	  flag, then the pixelclock used to set up the transmitter is
-	  divided by 1.001 to make it compatible with NTSC framerates. If
-	  the transmitter can't generate such frequencies, then the flag
-	  will also be cleared.
-
-    -  .. row 6
-
-       -  ``V4L2_DV_FL_HALF_LINE``
-
-       -  Specific to interlaced formats: if set, then the vertical
-	  frontporch of field 1 (aka the odd field) is really one half-line
-	  longer and the vertical backporch of field 2 (aka the even field)
-	  is really one half-line shorter, so each field has exactly the
-	  same number of half-lines. Whether half-lines can be detected or
-	  used depends on the hardware.
-
-    -  .. row 7
-
-       -  ``V4L2_DV_FL_IS_CE_VIDEO``
-
-       -  If set, then this is a Consumer Electronics (CE) video format.
-	  Such formats differ from other formats (commonly called IT
-	  formats) in that if R'G'B' encoding is used then by default the
-	  R'G'B' values use limited range (i.e. 16-235) as opposed to full
-	  range (i.e. 0-255). All formats defined in CEA-861 except for the
-	  640x480p59.94 format are CE formats.
+    * - Flag
+      - Description
+    * - ``V4L2_DV_FL_REDUCED_BLANKING``
+      - CVT/GTF specific: the timings use reduced blanking (CVT) or the
+	'Secondary GTF' curve (GTF). In both cases the horizontal and/or
+	vertical blanking intervals are reduced, allowing a higher
+	resolution over the same bandwidth. This is a read-only flag,
+	applications must not set this.
+    * - ``V4L2_DV_FL_CAN_REDUCE_FPS``
+      - CEA-861 specific: set for CEA-861 formats with a framerate that is
+	a multiple of six. These formats can be optionally played at 1 /
+	1.001 speed to be compatible with 60 Hz based standards such as
+	NTSC and PAL-M that use a framerate of 29.97 frames per second. If
+	the transmitter can't generate such frequencies, then the flag
+	will also be cleared. This is a read-only flag, applications must
+	not set this.
+    * - ``V4L2_DV_FL_REDUCED_FPS``
+      - CEA-861 specific: only valid for video transmitters, the flag is
+	cleared by receivers. It is also only valid for formats with the
+	``V4L2_DV_FL_CAN_REDUCE_FPS`` flag set, for other formats the
+	flag will be cleared by the driver. If the application sets this
+	flag, then the pixelclock used to set up the transmitter is
+	divided by 1.001 to make it compatible with NTSC framerates. If
+	the transmitter can't generate such frequencies, then the flag
+	will also be cleared.
+    * - ``V4L2_DV_FL_HALF_LINE``
+      - Specific to interlaced formats: if set, then the vertical
+	frontporch of field 1 (aka the odd field) is really one half-line
+	longer and the vertical backporch of field 2 (aka the even field)
+	is really one half-line shorter, so each field has exactly the
+	same number of half-lines. Whether half-lines can be detected or
+	used depends on the hardware.
+    * - ``V4L2_DV_FL_IS_CE_VIDEO``
+      - If set, then this is a Consumer Electronics (CE) video format.
+	Such formats differ from other formats (commonly called IT
+	formats) in that if R'G'B' encoding is used then by default the
+	R'G'B' values use limited range (i.e. 16-235) as opposed to full
+	range (i.e. 0-255). All formats defined in CEA-861 except for the
+	640x480p59.94 format are CE formats.
+    * - ``V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE``
+      - Some formats like SMPTE-125M have an interlaced signal with a odd
+	total height. For these formats, if this flag is set, the first
+	field has the extra line. Else, it is the second field.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-edid.rst b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
index 1a982b6..a16a193 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-edid.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
@@ -15,7 +15,18 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_edid *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_EDID, struct v4l2_edid *argp )
+    :name: VIDIOC_G_EDID
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_EDID, struct v4l2_edid *argp )
+    :name: VIDIOC_S_EDID
+
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_EDID, struct v4l2_edid *argp )
+    :name: VIDIOC_SUBDEV_G_EDID
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_EDID, struct v4l2_edid *argp )
+    :name: VIDIOC_SUBDEV_S_EDID
 
 
 Arguments
@@ -24,10 +35,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_EDID, VIDIOC_S_EDID, VIDIOC_SUBDEV_G_EDID,
-    VIDIOC_SUBDEV_S_EDID
-
 ``argp``
 
 
@@ -67,7 +74,9 @@
 copying any data. This is an easy way to discover how many EDID blocks
 there are.
 
-.. note:: If there are no EDID blocks available at all, then
+.. note::
+
+   If there are no EDID blocks available at all, then
    the driver will set ``blocks`` to 0 and it returns 0.
 
 To set the EDID blocks of a receiver the application has to fill in the
@@ -88,62 +97,39 @@
 EDID is no longer available.
 
 
-.. _v4l2-edid:
+.. c:type:: v4l2_edid
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_edid
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad for which to get/set the EDID blocks. When used with a video
-	  device node the pad represents the input or output index as
-	  returned by :ref:`VIDIOC_ENUMINPUT` and
-	  :ref:`VIDIOC_ENUMOUTPUT` respectively.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``start_block``
-
-       -  Read the EDID from starting with this block. Must be 0 when
-	  setting the EDID.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``blocks``
-
-       -  The number of blocks to get or set. Must be less or equal to 256
-	  (the maximum number of blocks as defined by the standard). When
-	  you set the EDID and ``blocks`` is 0, then the EDID is disabled or
-	  erased.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [5]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
-
-    -  .. row 5
-
-       -  __u8 *
-
-       -  ``edid``
-
-       -  Pointer to memory that contains the EDID. The minimum size is
-	  ``blocks`` * 128.
+    * - __u32
+      - ``pad``
+      - Pad for which to get/set the EDID blocks. When used with a video
+	device node the pad represents the input or output index as
+	returned by :ref:`VIDIOC_ENUMINPUT` and
+	:ref:`VIDIOC_ENUMOUTPUT` respectively.
+    * - __u32
+      - ``start_block``
+      - Read the EDID from starting with this block. Must be 0 when
+	setting the EDID.
+    * - __u32
+      - ``blocks``
+      - The number of blocks to get or set. Must be less or equal to 256
+	(the maximum number of blocks as defined by the standard). When
+	you set the EDID and ``blocks`` is 0, then the EDID is disabled or
+	erased.
+    * - __u32
+      - ``reserved``\ [5]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
+    * - __u8 *
+      - ``edid``
+      - Pointer to memory that contains the EDID. The minimum size is
+	``blocks`` * 128.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
index f0f41ac..418e886 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_enc_idx *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_ENC_INDEX, struct v4l2_enc_idx *argp )
+    :name: VIDIOC_G_ENC_INDEX
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_ENC_INDEX
-
 ``argp``
 
 
@@ -39,7 +37,7 @@
 decoding it.
 
 To read the data applications must call :ref:`VIDIOC_G_ENC_INDEX <VIDIOC_G_ENC_INDEX>` with a
-pointer to a struct :ref:`v4l2_enc_idx <v4l2-enc-idx>`. On success
+pointer to a struct :c:type:`v4l2_enc_idx`. On success
 the driver fills the ``entry`` array, stores the number of elements
 written in the ``entries`` field, and initializes the ``entries_cap``
 field.
@@ -57,108 +55,68 @@
 video elementary streams.
 
 
-.. _v4l2-enc-idx:
+.. tabularcolumns:: |p{3.5cm}|p{5.6cm}|p{8.4cm}|
+
+.. c:type:: v4l2_enc_idx
 
 .. flat-table:: struct v4l2_enc_idx
     :header-rows:  0
     :stub-columns: 0
-    :widths:       1 1 2 1 1
+    :widths:       1 3 8
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``entries``
-
-       -  The number of entries the driver stored in the ``entry`` array.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``entries_cap``
-
-       -  The number of entries the driver can buffer. Must be greater than
-	  zero.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  :cspan:`2` Reserved for future extensions. Drivers must set the
-	  array to zero.
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_enc_idx_entry <v4l2-enc-idx-entry>`
-
-       -  ``entry``\ [``V4L2_ENC_IDX_ENTRIES``]
-
-       -  Meta data about a compressed video stream. Each element of the
-	  array corresponds to one picture, sorted in ascending order by
-	  their ``offset``.
+    * - __u32
+      - ``entries``
+      - The number of entries the driver stored in the ``entry`` array.
+    * - __u32
+      - ``entries_cap``
+      - The number of entries the driver can buffer. Must be greater than
+	zero.
+    * - __u32
+      - ``reserved``\ [4]
+      - Reserved for future extensions. Drivers must set the
+	array to zero.
+    * - struct :c:type:`v4l2_enc_idx_entry`
+      - ``entry``\ [``V4L2_ENC_IDX_ENTRIES``]
+      - Meta data about a compressed video stream. Each element of the
+	array corresponds to one picture, sorted in ascending order by
+	their ``offset``.
 
 
 
-.. _v4l2-enc-idx-entry:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_enc_idx_entry
 
 .. flat-table:: struct v4l2_enc_idx_entry
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u64
-
-       -  ``offset``
-
-       -  The offset in bytes from the beginning of the compressed video
-	  stream to the beginning of this picture, that is a *PES packet
-	  header* as defined in :ref:`mpeg2part1` or a *picture header* as
-	  defined in :ref:`mpeg2part2`. When the encoder is stopped, the
-	  driver resets the offset to zero.
-
-    -  .. row 2
-
-       -  __u64
-
-       -  ``pts``
-
-       -  The 33 bit *Presentation Time Stamp* of this picture as defined in
-	  :ref:`mpeg2part1`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``length``
-
-       -  The length of this picture in bytes.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags containing the coding type of this picture, see
-	  :ref:`enc-idx-flags`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+    * - __u64
+      - ``offset``
+      - The offset in bytes from the beginning of the compressed video
+	stream to the beginning of this picture, that is a *PES packet
+	header* as defined in :ref:`mpeg2part1` or a *picture header* as
+	defined in :ref:`mpeg2part2`. When the encoder is stopped, the
+	driver resets the offset to zero.
+    * - __u64
+      - ``pts``
+      - The 33 bit *Presentation Time Stamp* of this picture as defined in
+	:ref:`mpeg2part1`.
+    * - __u32
+      - ``length``
+      - The length of this picture in bytes.
+    * - __u32
+      - ``flags``
+      - Flags containing the coding type of this picture, see
+	:ref:`enc-idx-flags`.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _enc-idx-flags:
 
@@ -167,39 +125,19 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_ENC_IDX_FRAME_I``
-
-       -  0x00
-
-       -  This is an Intra-coded picture.
-
-    -  .. row 2
-
-       -  ``V4L2_ENC_IDX_FRAME_P``
-
-       -  0x01
-
-       -  This is a Predictive-coded picture.
-
-    -  .. row 3
-
-       -  ``V4L2_ENC_IDX_FRAME_B``
-
-       -  0x02
-
-       -  This is a Bidirectionally predictive-coded picture.
-
-    -  .. row 4
-
-       -  ``V4L2_ENC_IDX_FRAME_MASK``
-
-       -  0x0F
-
-       -  *AND* the flags field with this mask to obtain the picture coding
-	  type.
+    * - ``V4L2_ENC_IDX_FRAME_I``
+      - 0x00
+      - This is an Intra-coded picture.
+    * - ``V4L2_ENC_IDX_FRAME_P``
+      - 0x01
+      - This is a Predictive-coded picture.
+    * - ``V4L2_ENC_IDX_FRAME_B``
+      - 0x02
+      - This is a Bidirectionally predictive-coded picture.
+    * - ``V4L2_ENC_IDX_FRAME_MASK``
+      - 0x0F
+      - *AND* the flags field with this mask to obtain the picture coding
+	type.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index 39e24ad..5ab8d2a 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -15,7 +15,16 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_ext_controls *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_EXT_CTRLS, struct v4l2_ext_controls *argp )
+    :name: VIDIOC_G_EXT_CTRLS
+
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_EXT_CTRLS, struct v4l2_ext_controls *argp )
+    :name: VIDIOC_S_EXT_CTRLS
+
+
+.. c:function:: int ioctl( int fd, VIDIOC_TRY_EXT_CTRLS, struct v4l2_ext_controls *argp )
+    :name: VIDIOC_TRY_EXT_CTRLS
 
 
 Arguments
@@ -24,10 +33,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_EXT_CTRLS, VIDIOC_S_EXT_CTRLS,
-    VIDIOC_TRY_EXT_CTRLS
-
 ``argp``
 
 
@@ -41,13 +46,13 @@
 
 Applications must always fill in the ``count``, ``which``, ``controls``
 and ``reserved`` fields of struct
-:ref:`v4l2_ext_controls <v4l2-ext-controls>`, and initialize the
-struct :ref:`v4l2_ext_control <v4l2-ext-control>` array pointed to
+:c:type:`v4l2_ext_controls`, and initialize the
+struct :c:type:`v4l2_ext_control` array pointed to
 by the ``controls`` fields.
 
 To get the current value of a set of controls applications initialize
 the ``id``, ``size`` and ``reserved2`` fields of each struct
-:ref:`v4l2_ext_control <v4l2-ext-control>` and call the
+:c:type:`v4l2_ext_control` and call the
 :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctl. String controls controls must also set the
 ``string`` field. Controls of compound types
 (``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set) must set the ``ptr`` field.
@@ -69,14 +74,14 @@
 
 To change the value of a set of controls applications initialize the
 ``id``, ``size``, ``reserved2`` and ``value/value64/string/ptr`` fields
-of each struct :ref:`v4l2_ext_control <v4l2-ext-control>` and call
+of each struct :c:type:`v4l2_ext_control` and call
 the :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctl. The controls will only be set if *all*
 control values are valid.
 
 To check if a set of controls have correct values applications
 initialize the ``id``, ``size``, ``reserved2`` and
 ``value/value64/string/ptr`` fields of each struct
-:ref:`v4l2_ext_control <v4l2-ext-control>` and call the
+:c:type:`v4l2_ext_control` and call the
 :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctl. It is up to the driver whether wrong
 values are automatically adjusted to a valid value or if an error is
 returned.
@@ -85,7 +90,7 @@
 code. When the value is out of bounds drivers can choose to take the
 closest valid value or return an ``ERANGE`` error code, whatever seems more
 appropriate. In the first case the new value is set in struct
-:ref:`v4l2_ext_control <v4l2-ext-control>`. If the new control value
+:c:type:`v4l2_ext_control`. If the new control value
 is inappropriate (e.g. the given menu index is not supported by the menu
 control), then this will also result in an ``EINVAL`` error code error.
 
@@ -95,264 +100,190 @@
 still cause this situation.
 
 
-.. _v4l2-ext-control:
+.. tabularcolumns:: |p{1.2cm}|p{3.0cm}|p{1.5cm}|p{11.8cm}|
+
+.. c:type:: v4l2_ext_control
+
+.. cssclass: longtable
 
 .. flat-table:: struct v4l2_ext_control
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
+    * - __u32
+      - ``id``
+      -
+      - Identifies the control, set by the application.
+    * - __u32
+      - ``size``
+      -
+      - The total size in bytes of the payload of this control. This is
+	normally 0, but for pointer controls this should be set to the
+	size of the memory containing the payload, or that will receive
+	the payload. If :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` finds that this value is
+	less than is required to store the payload result, then it is set
+	to a value large enough to store the payload result and ``ENOSPC`` is
+	returned.
 
-    -  .. row 1
+	.. note::
 
-       -  __u32
-
-       -  ``id``
-
-       -
-       -  Identifies the control, set by the application.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``size``
-
-       -
-       -  The total size in bytes of the payload of this control. This is
-	  normally 0, but for pointer controls this should be set to the
-	  size of the memory containing the payload, or that will receive
-	  the payload. If :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` finds that this value is
-	  less than is required to store the payload result, then it is set
-	  to a value large enough to store the payload result and ``ENOSPC`` is
-	  returned.
-
-	  .. note:: For string controls, this ``size`` field should
-	     not be confused with the length of the string. This field refers
-	     to the size of the memory that contains the string. The actual
-	     *length* of the string may well be much smaller.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``reserved2``\ [1]
-
-       -
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
-
-    -  .. row 4
-
-       -  union
-
-       -  (anonymous)
-
-    -  .. row 5
-
-       -
-       -  __s32
-
-       -  ``value``
-
-       -  New value or current value. Valid if this control is not of type
-	  ``V4L2_CTRL_TYPE_INTEGER64`` and ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is
-	  not set.
-
-    -  .. row 6
-
-       -
-       -  __s64
-
-       -  ``value64``
-
-       -  New value or current value. Valid if this control is of type
-	  ``V4L2_CTRL_TYPE_INTEGER64`` and ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is
-	  not set.
-
-    -  .. row 7
-
-       -
-       -  char *
-
-       -  ``string``
-
-       -  A pointer to a string. Valid if this control is of type
-	  ``V4L2_CTRL_TYPE_STRING``.
-
-    -  .. row 8
-
-       -
-       -  __u8 *
-
-       -  ``p_u8``
-
-       -  A pointer to a matrix control of unsigned 8-bit values. Valid if
-	  this control is of type ``V4L2_CTRL_TYPE_U8``.
-
-    -  .. row 9
-
-       -
-       -  __u16 *
-
-       -  ``p_u16``
-
-       -  A pointer to a matrix control of unsigned 16-bit values. Valid if
-	  this control is of type ``V4L2_CTRL_TYPE_U16``.
-
-    -  .. row 10
-
-       -
-       -  __u32 *
-
-       -  ``p_u32``
-
-       -  A pointer to a matrix control of unsigned 32-bit values. Valid if
-	  this control is of type ``V4L2_CTRL_TYPE_U32``.
-
-    -  .. row 11
-
-       -
-       -  void *
-
-       -  ``ptr``
-
-       -  A pointer to a compound type which can be an N-dimensional array
-	  and/or a compound type (the control's type is >=
-	  ``V4L2_CTRL_COMPOUND_TYPES``). Valid if
-	  ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set for this control.
+	   For string controls, this ``size`` field should
+	   not be confused with the length of the string. This field refers
+	   to the size of the memory that contains the string. The actual
+	   *length* of the string may well be much smaller.
+    * - __u32
+      - ``reserved2``\ [1]
+      -
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
+    * - union
+      - (anonymous)
+    * -
+      - __s32
+      - ``value``
+      - New value or current value. Valid if this control is not of type
+	``V4L2_CTRL_TYPE_INTEGER64`` and ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is
+	not set.
+    * -
+      - __s64
+      - ``value64``
+      - New value or current value. Valid if this control is of type
+	``V4L2_CTRL_TYPE_INTEGER64`` and ``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is
+	not set.
+    * -
+      - char *
+      - ``string``
+      - A pointer to a string. Valid if this control is of type
+	``V4L2_CTRL_TYPE_STRING``.
+    * -
+      - __u8 *
+      - ``p_u8``
+      - A pointer to a matrix control of unsigned 8-bit values. Valid if
+	this control is of type ``V4L2_CTRL_TYPE_U8``.
+    * -
+      - __u16 *
+      - ``p_u16``
+      - A pointer to a matrix control of unsigned 16-bit values. Valid if
+	this control is of type ``V4L2_CTRL_TYPE_U16``.
+    * -
+      - __u32 *
+      - ``p_u32``
+      - A pointer to a matrix control of unsigned 32-bit values. Valid if
+	this control is of type ``V4L2_CTRL_TYPE_U32``.
+    * -
+      - void *
+      - ``ptr``
+      - A pointer to a compound type which can be an N-dimensional array
+	and/or a compound type (the control's type is >=
+	``V4L2_CTRL_COMPOUND_TYPES``). Valid if
+	``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set for this control.
 
 
+.. tabularcolumns:: |p{4.0cm}|p{2.0cm}|p{2.0cm}|p{8.5cm}|
 
-.. _v4l2-ext-controls:
+.. c:type:: v4l2_ext_controls
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_ext_controls
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1
 
+    * - union
+      - (anonymous)
+    * -
+      - __u32
+      - ``ctrl_class``
+      - The control class to which all controls belong, see
+	:ref:`ctrl-class`. Drivers that use a kernel framework for
+	handling controls will also accept a value of 0 here, meaning that
+	the controls can belong to any control class. Whether drivers
+	support this can be tested by setting ``ctrl_class`` to 0 and
+	calling :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` with a ``count`` of 0. If that
+	succeeds, then the driver supports this feature.
+    * -
+      - __u32
+      - ``which``
+      - Which value of the control to get/set/try.
+	``V4L2_CTRL_WHICH_CUR_VAL`` will return the current value of the
+	control and ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default
+	value of the control.
 
-    -  .. row 1
+	.. note::
 
-       -  union
+	   You can only get the default value of the control,
+	   you cannot set or try it.
 
-       -  (anonymous)
+	For backwards compatibility you can also use a control class here
+	(see :ref:`ctrl-class`). In that case all controls have to
+	belong to that control class. This usage is deprecated, instead
+	just use ``V4L2_CTRL_WHICH_CUR_VAL``. There are some very old
+	drivers that do not yet support ``V4L2_CTRL_WHICH_CUR_VAL`` and
+	that require a control class here. You can test for such drivers
+	by setting ctrl_class to ``V4L2_CTRL_WHICH_CUR_VAL`` and calling
+	VIDIOC_TRY_EXT_CTRLS with a count of 0. If that fails, then the
+	driver does not support ``V4L2_CTRL_WHICH_CUR_VAL``.
+    * - __u32
+      - ``count``
+      - The number of controls in the controls array. May also be zero.
+    * - __u32
+      - ``error_idx``
+      - Set by the driver in case of an error. If the error is associated
+	with a particular control, then ``error_idx`` is set to the index
+	of that control. If the error is not related to a specific
+	control, or the validation step failed (see below), then
+	``error_idx`` is set to ``count``. The value is undefined if the
+	ioctl returned 0 (success).
 
-    -  .. row 2
+	Before controls are read from/written to hardware a validation
+	step takes place: this checks if all controls in the list are
+	valid controls, if no attempt is made to write to a read-only
+	control or read from a write-only control, and any other up-front
+	checks that can be done without accessing the hardware. The exact
+	validations done during this step are driver dependent since some
+	checks might require hardware access for some devices, thus making
+	it impossible to do those checks up-front. However, drivers should
+	make a best-effort to do as many up-front checks as possible.
 
-       -
-       -  __u32
+	This check is done to avoid leaving the hardware in an
+	inconsistent state due to easy-to-avoid problems. But it leads to
+	another problem: the application needs to know whether an error
+	came from the validation step (meaning that the hardware was not
+	touched) or from an error during the actual reading from/writing
+	to hardware.
 
-       -  ``ctrl_class``
+	The, in hindsight quite poor, solution for that is to set
+	``error_idx`` to ``count`` if the validation failed. This has the
+	unfortunate side-effect that it is not possible to see which
+	control failed the validation. If the validation was successful
+	and the error happened while accessing the hardware, then
+	``error_idx`` is less than ``count`` and only the controls up to
+	``error_idx-1`` were read or written correctly, and the state of
+	the remaining controls is undefined.
 
-       -  The control class to which all controls belong, see
-	  :ref:`ctrl-class`. Drivers that use a kernel framework for
-	  handling controls will also accept a value of 0 here, meaning that
-	  the controls can belong to any control class. Whether drivers
-	  support this can be tested by setting ``ctrl_class`` to 0 and
-	  calling :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` with a ``count`` of 0. If that
-	  succeeds, then the driver supports this feature.
+	Since :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` does not access hardware there is
+	also no need to handle the validation step in this special way, so
+	``error_idx`` will just be set to the control that failed the
+	validation step instead of to ``count``. This means that if
+	:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` fails with ``error_idx`` set to ``count``,
+	then you can call :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` to try to discover the
+	actual control that failed the validation step. Unfortunately,
+	there is no ``TRY`` equivalent for :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions.
 
-    -  .. row 3
+	Drivers and applications must set the array to zero.
+    * - struct :c:type:`v4l2_ext_control` *
+      - ``controls``
+      - Pointer to an array of ``count`` v4l2_ext_control structures.
 
-       -
-       -  __u32
-
-       -  ``which``
-
-       -  Which value of the control to get/set/try.
-	  ``V4L2_CTRL_WHICH_CUR_VAL`` will return the current value of the
-	  control and ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default
-	  value of the control.
-
-	  .. note:: You can only get the default value of the control,
-	     you cannot set or try it.
-
-	  For backwards compatibility you can also use a control class here
-	  (see :ref:`ctrl-class`). In that case all controls have to
-	  belong to that control class. This usage is deprecated, instead
-	  just use ``V4L2_CTRL_WHICH_CUR_VAL``. There are some very old
-	  drivers that do not yet support ``V4L2_CTRL_WHICH_CUR_VAL`` and
-	  that require a control class here. You can test for such drivers
-	  by setting ctrl_class to ``V4L2_CTRL_WHICH_CUR_VAL`` and calling
-	  VIDIOC_TRY_EXT_CTRLS with a count of 0. If that fails, then the
-	  driver does not support ``V4L2_CTRL_WHICH_CUR_VAL``.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``count``
-
-       -  The number of controls in the controls array. May also be zero.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``error_idx``
-
-       -  Set by the driver in case of an error. If the error is associated
-	  with a particular control, then ``error_idx`` is set to the index
-	  of that control. If the error is not related to a specific
-	  control, or the validation step failed (see below), then
-	  ``error_idx`` is set to ``count``. The value is undefined if the
-	  ioctl returned 0 (success).
-
-	  Before controls are read from/written to hardware a validation
-	  step takes place: this checks if all controls in the list are
-	  valid controls, if no attempt is made to write to a read-only
-	  control or read from a write-only control, and any other up-front
-	  checks that can be done without accessing the hardware. The exact
-	  validations done during this step are driver dependent since some
-	  checks might require hardware access for some devices, thus making
-	  it impossible to do those checks up-front. However, drivers should
-	  make a best-effort to do as many up-front checks as possible.
-
-	  This check is done to avoid leaving the hardware in an
-	  inconsistent state due to easy-to-avoid problems. But it leads to
-	  another problem: the application needs to know whether an error
-	  came from the validation step (meaning that the hardware was not
-	  touched) or from an error during the actual reading from/writing
-	  to hardware.
-
-	  The, in hindsight quite poor, solution for that is to set
-	  ``error_idx`` to ``count`` if the validation failed. This has the
-	  unfortunate side-effect that it is not possible to see which
-	  control failed the validation. If the validation was successful
-	  and the error happened while accessing the hardware, then
-	  ``error_idx`` is less than ``count`` and only the controls up to
-	  ``error_idx-1`` were read or written correctly, and the state of
-	  the remaining controls is undefined.
-
-	  Since :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` does not access hardware there is
-	  also no need to handle the validation step in this special way, so
-	  ``error_idx`` will just be set to the control that failed the
-	  validation step instead of to ``count``. This means that if
-	  :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` fails with ``error_idx`` set to ``count``,
-	  then you can call :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` to try to discover the
-	  actual control that failed the validation step. Unfortunately,
-	  there is no ``TRY`` equivalent for :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
-
-    -  .. row 7
-
-       -  struct :ref:`v4l2_ext_control <v4l2-ext-control>` *
-
-       -  ``controls``
-
-       -  Pointer to an array of ``count`` v4l2_ext_control structures.
-	  Ignored if ``count`` equals zero.
+	Ignored if ``count`` equals zero.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _ctrl-class:
 
@@ -361,99 +292,49 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_CTRL_CLASS_USER``
-
-       -  0x980000
-
-       -  The class containing user controls. These controls are described
-	  in :ref:`control`. All controls that can be set using the
-	  :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` and
-	  :ref:`VIDIOC_G_CTRL <VIDIOC_G_CTRL>` ioctl belong to this
-	  class.
-
-    -  .. row 2
-
-       -  ``V4L2_CTRL_CLASS_MPEG``
-
-       -  0x990000
-
-       -  The class containing MPEG compression controls. These controls are
-	  described in :ref:`mpeg-controls`.
-
-    -  .. row 3
-
-       -  ``V4L2_CTRL_CLASS_CAMERA``
-
-       -  0x9a0000
-
-       -  The class containing camera controls. These controls are described
-	  in :ref:`camera-controls`.
-
-    -  .. row 4
-
-       -  ``V4L2_CTRL_CLASS_FM_TX``
-
-       -  0x9b0000
-
-       -  The class containing FM Transmitter (FM TX) controls. These
-	  controls are described in :ref:`fm-tx-controls`.
-
-    -  .. row 5
-
-       -  ``V4L2_CTRL_CLASS_FLASH``
-
-       -  0x9c0000
-
-       -  The class containing flash device controls. These controls are
-	  described in :ref:`flash-controls`.
-
-    -  .. row 6
-
-       -  ``V4L2_CTRL_CLASS_JPEG``
-
-       -  0x9d0000
-
-       -  The class containing JPEG compression controls. These controls are
-	  described in :ref:`jpeg-controls`.
-
-    -  .. row 7
-
-       -  ``V4L2_CTRL_CLASS_IMAGE_SOURCE``
-
-       -  0x9e0000
-
-       -  The class containing image source controls. These controls are
-	  described in :ref:`image-source-controls`.
-
-    -  .. row 8
-
-       -  ``V4L2_CTRL_CLASS_IMAGE_PROC``
-
-       -  0x9f0000
-
-       -  The class containing image processing controls. These controls are
-	  described in :ref:`image-process-controls`.
-
-    -  .. row 9
-
-       -  ``V4L2_CTRL_CLASS_FM_RX``
-
-       -  0xa10000
-
-       -  The class containing FM Receiver (FM RX) controls. These controls
-	  are described in :ref:`fm-rx-controls`.
-
-    -  .. row 10
-
-       -  ``V4L2_CTRL_CLASS_RF_TUNER``
-
-       -  0xa20000
-
-       -  The class containing RF tuner controls. These controls are
-	  described in :ref:`rf-tuner-controls`.
+    * - ``V4L2_CTRL_CLASS_USER``
+      - 0x980000
+      - The class containing user controls. These controls are described
+	in :ref:`control`. All controls that can be set using the
+	:ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` and
+	:ref:`VIDIOC_G_CTRL <VIDIOC_G_CTRL>` ioctl belong to this
+	class.
+    * - ``V4L2_CTRL_CLASS_MPEG``
+      - 0x990000
+      - The class containing MPEG compression controls. These controls are
+	described in :ref:`mpeg-controls`.
+    * - ``V4L2_CTRL_CLASS_CAMERA``
+      - 0x9a0000
+      - The class containing camera controls. These controls are described
+	in :ref:`camera-controls`.
+    * - ``V4L2_CTRL_CLASS_FM_TX``
+      - 0x9b0000
+      - The class containing FM Transmitter (FM TX) controls. These
+	controls are described in :ref:`fm-tx-controls`.
+    * - ``V4L2_CTRL_CLASS_FLASH``
+      - 0x9c0000
+      - The class containing flash device controls. These controls are
+	described in :ref:`flash-controls`.
+    * - ``V4L2_CTRL_CLASS_JPEG``
+      - 0x9d0000
+      - The class containing JPEG compression controls. These controls are
+	described in :ref:`jpeg-controls`.
+    * - ``V4L2_CTRL_CLASS_IMAGE_SOURCE``
+      - 0x9e0000
+      - The class containing image source controls. These controls are
+	described in :ref:`image-source-controls`.
+    * - ``V4L2_CTRL_CLASS_IMAGE_PROC``
+      - 0x9f0000
+      - The class containing image processing controls. These controls are
+	described in :ref:`image-process-controls`.
+    * - ``V4L2_CTRL_CLASS_FM_RX``
+      - 0xa10000
+      - The class containing FM Receiver (FM RX) controls. These controls
+	are described in :ref:`fm-rx-controls`.
+    * - ``V4L2_CTRL_CLASS_RF_TUNER``
+      - 0xa20000
+      - The class containing RF tuner controls. These controls are
+	described in :ref:`rf-tuner-controls`.
 
 
 Return Value
@@ -464,17 +345,17 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_ext_control <v4l2-ext-control>` ``id`` is
-    invalid, the struct :ref:`v4l2_ext_controls <v4l2-ext-controls>`
+    The struct :c:type:`v4l2_ext_control` ``id`` is
+    invalid, the struct :c:type:`v4l2_ext_controls`
     ``which`` is invalid, or the struct
-    :ref:`v4l2_ext_control <v4l2-ext-control>` ``value`` was
+    :c:type:`v4l2_ext_control` ``value`` was
     inappropriate (e.g. the given menu index is not supported by the
     driver). This error code is also returned by the
     :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` and :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctls if two or
     more control values are in conflict.
 
 ERANGE
-    The struct :ref:`v4l2_ext_control <v4l2-ext-control>` ``value``
+    The struct :c:type:`v4l2_ext_control` ``value``
     is out of bounds.
 
 EBUSY
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
index d182d9f..4a6a03d 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_framebuffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_FBUF, struct v4l2_framebuffer *argp )
+    :name: VIDIOC_G_FBUF
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_framebuffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_FBUF, const struct v4l2_framebuffer *argp )
+    :name: VIDIOC_S_FBUF
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_FBUF, VIDIOC_S_FBUF
-
 ``argp``
 
 
@@ -50,13 +49,13 @@
 always non-destructive.
 
 To get the current parameters applications call the :ref:`VIDIOC_G_FBUF <VIDIOC_G_FBUF>`
-ioctl with a pointer to a :ref:`struct v4l2_framebuffer <v4l2-framebuffer>`
+ioctl with a pointer to a struct :c:type:`v4l2_framebuffer`
 structure. The driver fills all fields of the structure or returns an
 EINVAL error code when overlays are not supported.
 
 To set the parameters for a *Video Output Overlay*, applications must
 initialize the ``flags`` field of a struct
-:ref:`struct v4l2_framebuffer <v4l2-framebuffer>`. Since the framebuffer is
+struct :c:type:`v4l2_framebuffer`. Since the framebuffer is
 implemented on the TV card all other parameters are determined by the
 driver. When an application calls :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>` with a pointer to
 this structure, the driver prepares for the overlay and returns the
@@ -76,210 +75,140 @@
 destructive video overlay.
 
 
-.. _v4l2-framebuffer:
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{3.5cm}|p{7.0cm}|
+
+.. c:type:: v4l2_framebuffer
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_framebuffer
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
+    * - __u32
+      - ``capability``
+      -
+      - Overlay capability flags set by the driver, see
+	:ref:`framebuffer-cap`.
+    * - __u32
+      - ``flags``
+      -
+      - Overlay control flags set by application and driver, see
+	:ref:`framebuffer-flags`
+    * - void *
+      - ``base``
+      -
+      - Physical base address of the framebuffer, that is the address of
+	the pixel in the top left corner of the framebuffer. [#f1]_
+    * -
+      -
+      -
+      - This field is irrelevant to *non-destructive Video Overlays*. For
+	*destructive Video Overlays* applications must provide a base
+	address. The driver may accept only base addresses which are a
+	multiple of two, four or eight bytes. For *Video Output Overlays*
+	the driver must return a valid base address, so applications can
+	find the corresponding Linux framebuffer device (see
+	:ref:`osd`).
+    * - struct
+      - ``fmt``
+      -
+      - Layout of the frame buffer.
+    * -
+      - __u32
+      - ``width``
+      - Width of the frame buffer in pixels.
+    * -
+      - __u32
+      - ``height``
+      - Height of the frame buffer in pixels.
+    * -
+      - __u32
+      - ``pixelformat``
+      - The pixel format of the framebuffer.
+    * -
+      -
+      -
+      - For *non-destructive Video Overlays* this field only defines a
+	format for the struct :c:type:`v4l2_window`
+	``chromakey`` field.
+    * -
+      -
+      -
+      - For *destructive Video Overlays* applications must initialize this
+	field. For *Video Output Overlays* the driver must return a valid
+	format.
+    * -
+      -
+      -
+      - Usually this is an RGB format (for example
+	:ref:`V4L2_PIX_FMT_RGB565 <V4L2-PIX-FMT-RGB565>`) but YUV
+	formats (only packed YUV formats when chroma keying is used, not
+	including ``V4L2_PIX_FMT_YUYV`` and ``V4L2_PIX_FMT_UYVY``) and the
+	``V4L2_PIX_FMT_PAL8`` format are also permitted. The behavior of
+	the driver when an application requests a compressed format is
+	undefined. See :ref:`pixfmt` for information on pixel formats.
+    * -
+      - enum :c:type:`v4l2_field`
+      - ``field``
+      - Drivers and applications shall ignore this field. If applicable,
+	the field order is selected with the
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, using the ``field``
+	field of struct :c:type:`v4l2_window`.
+    * -
+      - __u32
+      - ``bytesperline``
+      - Distance in bytes between the leftmost pixels in two adjacent
+	lines.
+    * - :cspan:`3`
 
-    -  .. row 1
+	This field is irrelevant to *non-destructive Video Overlays*.
 
-       -  __u32
+	For *destructive Video Overlays* both applications and drivers can
+	set this field to request padding bytes at the end of each line.
+	Drivers however may ignore the requested value, returning
+	``width`` times bytes-per-pixel or a larger value required by the
+	hardware. That implies applications can just set this field to
+	zero to get a reasonable default.
 
-       -  ``capability``
+	For *Video Output Overlays* the driver must return a valid value.
 
-       -
-       -  Overlay capability flags set by the driver, see
-	  :ref:`framebuffer-cap`.
+	Video hardware may access padding bytes, therefore they must
+	reside in accessible memory. Consider for example the case where
+	padding bytes after the last line of an image cross a system page
+	boundary. Capture devices may write padding bytes, the value is
+	undefined. Output devices ignore the contents of padding bytes.
 
-    -  .. row 2
+	When the image format is planar the ``bytesperline`` value applies
+	to the first plane and is divided by the same factor as the
+	``width`` field for the other planes. For example the Cb and Cr
+	planes of a YUV 4:2:0 image have half as many padding bytes
+	following each line as the Y plane. To avoid ambiguities drivers
+	must return a ``bytesperline`` value rounded up to a multiple of
+	the scale factor.
+    * -
+      - __u32
+      - ``sizeimage``
+      - This field is irrelevant to *non-destructive Video Overlays*. For
+	*destructive Video Overlays* applications must initialize this
+	field. For *Video Output Overlays* the driver must return a valid
+	format.
 
-       -  __u32
+	Together with ``base`` it defines the framebuffer memory
+	accessible by the driver.
+    * -
+      - enum :c:type:`v4l2_colorspace`
+      - ``colorspace``
+      - This information supplements the ``pixelformat`` and must be set
+	by the driver, see :ref:`colorspaces`.
+    * -
+      - __u32
+      - ``priv``
+      - Reserved. Drivers and applications must set this field to zero.
 
-       -  ``flags``
 
-       -
-       -  Overlay control flags set by application and driver, see
-	  :ref:`framebuffer-flags`
-
-    -  .. row 3
-
-       -  void *
-
-       -  ``base``
-
-       -
-       -  Physical base address of the framebuffer, that is the address of
-	  the pixel in the top left corner of the framebuffer. [#f1]_
-
-    -  .. row 4
-
-       -
-       -
-       -
-       -  This field is irrelevant to *non-destructive Video Overlays*. For
-	  *destructive Video Overlays* applications must provide a base
-	  address. The driver may accept only base addresses which are a
-	  multiple of two, four or eight bytes. For *Video Output Overlays*
-	  the driver must return a valid base address, so applications can
-	  find the corresponding Linux framebuffer device (see
-	  :ref:`osd`).
-
-    -  .. row 5
-
-       -  struct
-
-       -  ``fmt``
-
-       -
-       -  Layout of the frame buffer.
-
-    -  .. row 6
-
-       -
-       -  __u32
-
-       -  ``width``
-
-       -  Width of the frame buffer in pixels.
-
-    -  .. row 7
-
-       -
-       -  __u32
-
-       -  ``height``
-
-       -  Height of the frame buffer in pixels.
-
-    -  .. row 8
-
-       -
-       -  __u32
-
-       -  ``pixelformat``
-
-       -  The pixel format of the framebuffer.
-
-    -  .. row 9
-
-       -
-       -
-       -
-       -  For *non-destructive Video Overlays* this field only defines a
-	  format for the struct :ref:`v4l2_window <v4l2-window>`
-	  ``chromakey`` field.
-
-    -  .. row 10
-
-       -
-       -
-       -
-       -  For *destructive Video Overlays* applications must initialize this
-	  field. For *Video Output Overlays* the driver must return a valid
-	  format.
-
-    -  .. row 11
-
-       -
-       -
-       -
-       -  Usually this is an RGB format (for example
-	  :ref:`V4L2_PIX_FMT_RGB565 <V4L2-PIX-FMT-RGB565>`) but YUV
-	  formats (only packed YUV formats when chroma keying is used, not
-	  including ``V4L2_PIX_FMT_YUYV`` and ``V4L2_PIX_FMT_UYVY``) and the
-	  ``V4L2_PIX_FMT_PAL8`` format are also permitted. The behavior of
-	  the driver when an application requests a compressed format is
-	  undefined. See :ref:`pixfmt` for information on pixel formats.
-
-    -  .. row 12
-
-       -
-       -  enum :ref:`v4l2_field <v4l2-field>`
-
-       -  ``field``
-
-       -  Drivers and applications shall ignore this field. If applicable,
-	  the field order is selected with the
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, using the ``field``
-	  field of struct :ref:`v4l2_window <v4l2-window>`.
-
-    -  .. row 13
-
-       -
-       -  __u32
-
-       -  ``bytesperline``
-
-       -  Distance in bytes between the leftmost pixels in two adjacent
-	  lines.
-
-    -  .. row 14
-
-       -  :cspan:`3`
-
-	  This field is irrelevant to *non-destructive Video Overlays*.
-
-	  For *destructive Video Overlays* both applications and drivers can
-	  set this field to request padding bytes at the end of each line.
-	  Drivers however may ignore the requested value, returning
-	  ``width`` times bytes-per-pixel or a larger value required by the
-	  hardware. That implies applications can just set this field to
-	  zero to get a reasonable default.
-
-	  For *Video Output Overlays* the driver must return a valid value.
-
-	  Video hardware may access padding bytes, therefore they must
-	  reside in accessible memory. Consider for example the case where
-	  padding bytes after the last line of an image cross a system page
-	  boundary. Capture devices may write padding bytes, the value is
-	  undefined. Output devices ignore the contents of padding bytes.
-
-	  When the image format is planar the ``bytesperline`` value applies
-	  to the first plane and is divided by the same factor as the
-	  ``width`` field for the other planes. For example the Cb and Cr
-	  planes of a YUV 4:2:0 image have half as many padding bytes
-	  following each line as the Y plane. To avoid ambiguities drivers
-	  must return a ``bytesperline`` value rounded up to a multiple of
-	  the scale factor.
-
-    -  .. row 15
-
-       -
-       -  __u32
-
-       -  ``sizeimage``
-
-       -  This field is irrelevant to *non-destructive Video Overlays*. For
-	  *destructive Video Overlays* applications must initialize this
-	  field. For *Video Output Overlays* the driver must return a valid
-	  format.
-
-	  Together with ``base`` it defines the framebuffer memory
-	  accessible by the driver.
-
-    -  .. row 16
-
-       -
-       -  enum :ref:`v4l2_colorspace <v4l2-colorspace>`
-
-       -  ``colorspace``
-
-       -  This information supplements the ``pixelformat`` and must be set
-	  by the driver, see :ref:`colorspaces`.
-
-    -  .. row 17
-
-       -
-       -  __u32
-
-       -  ``priv``
-
-       -  Reserved. Drivers and applications must set this field to zero.
-
-
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _framebuffer-cap:
 
@@ -288,194 +217,119 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FBUF_CAP_EXTERNOVERLAY``
-
-       -  0x0001
-
-       -  The device is capable of non-destructive overlays. When the driver
-	  clears this flag, only destructive overlays are supported. There
-	  are no drivers yet which support both destructive and
-	  non-destructive overlays. Video Output Overlays are in practice
-	  always non-destructive.
-
-    -  .. row 2
-
-       -  ``V4L2_FBUF_CAP_CHROMAKEY``
-
-       -  0x0002
-
-       -  The device supports clipping by chroma-keying the images. That is,
-	  image pixels replace pixels in the VGA or video signal only where
-	  the latter assume a certain color. Chroma-keying makes no sense
-	  for destructive overlays.
-
-    -  .. row 3
-
-       -  ``V4L2_FBUF_CAP_LIST_CLIPPING``
-
-       -  0x0004
-
-       -  The device supports clipping using a list of clip rectangles.
-
-    -  .. row 4
-
-       -  ``V4L2_FBUF_CAP_BITMAP_CLIPPING``
-
-       -  0x0008
-
-       -  The device supports clipping using a bit mask.
-
-    -  .. row 5
-
-       -  ``V4L2_FBUF_CAP_LOCAL_ALPHA``
-
-       -  0x0010
-
-       -  The device supports clipping/blending using the alpha channel of
-	  the framebuffer or VGA signal. Alpha blending makes no sense for
-	  destructive overlays.
-
-    -  .. row 6
-
-       -  ``V4L2_FBUF_CAP_GLOBAL_ALPHA``
-
-       -  0x0020
-
-       -  The device supports alpha blending using a global alpha value.
-	  Alpha blending makes no sense for destructive overlays.
-
-    -  .. row 7
-
-       -  ``V4L2_FBUF_CAP_LOCAL_INV_ALPHA``
-
-       -  0x0040
-
-       -  The device supports clipping/blending using the inverted alpha
-	  channel of the framebuffer or VGA signal. Alpha blending makes no
-	  sense for destructive overlays.
-
-    -  .. row 8
-
-       -  ``V4L2_FBUF_CAP_SRC_CHROMAKEY``
-
-       -  0x0080
-
-       -  The device supports Source Chroma-keying. Video pixels with the
-	  chroma-key colors are replaced by framebuffer pixels, which is
-	  exactly opposite of ``V4L2_FBUF_CAP_CHROMAKEY``
+    * - ``V4L2_FBUF_CAP_EXTERNOVERLAY``
+      - 0x0001
+      - The device is capable of non-destructive overlays. When the driver
+	clears this flag, only destructive overlays are supported. There
+	are no drivers yet which support both destructive and
+	non-destructive overlays. Video Output Overlays are in practice
+	always non-destructive.
+    * - ``V4L2_FBUF_CAP_CHROMAKEY``
+      - 0x0002
+      - The device supports clipping by chroma-keying the images. That is,
+	image pixels replace pixels in the VGA or video signal only where
+	the latter assume a certain color. Chroma-keying makes no sense
+	for destructive overlays.
+    * - ``V4L2_FBUF_CAP_LIST_CLIPPING``
+      - 0x0004
+      - The device supports clipping using a list of clip rectangles.
+    * - ``V4L2_FBUF_CAP_BITMAP_CLIPPING``
+      - 0x0008
+      - The device supports clipping using a bit mask.
+    * - ``V4L2_FBUF_CAP_LOCAL_ALPHA``
+      - 0x0010
+      - The device supports clipping/blending using the alpha channel of
+	the framebuffer or VGA signal. Alpha blending makes no sense for
+	destructive overlays.
+    * - ``V4L2_FBUF_CAP_GLOBAL_ALPHA``
+      - 0x0020
+      - The device supports alpha blending using a global alpha value.
+	Alpha blending makes no sense for destructive overlays.
+    * - ``V4L2_FBUF_CAP_LOCAL_INV_ALPHA``
+      - 0x0040
+      - The device supports clipping/blending using the inverted alpha
+	channel of the framebuffer or VGA signal. Alpha blending makes no
+	sense for destructive overlays.
+    * - ``V4L2_FBUF_CAP_SRC_CHROMAKEY``
+      - 0x0080
+      - The device supports Source Chroma-keying. Video pixels with the
+	chroma-key colors are replaced by framebuffer pixels, which is
+	exactly opposite of ``V4L2_FBUF_CAP_CHROMAKEY``
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _framebuffer-flags:
 
+.. cssclass:: longtable
+
 .. flat-table:: Frame Buffer Flags
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_FBUF_FLAG_PRIMARY``
-
-       -  0x0001
-
-       -  The framebuffer is the primary graphics surface. In other words,
-	  the overlay is destructive. This flag is typically set by any
-	  driver that doesn't have the ``V4L2_FBUF_CAP_EXTERNOVERLAY``
-	  capability and it is cleared otherwise.
-
-    -  .. row 2
-
-       -  ``V4L2_FBUF_FLAG_OVERLAY``
-
-       -  0x0002
-
-       -  If this flag is set for a video capture device, then the driver
-	  will set the initial overlay size to cover the full framebuffer
-	  size, otherwise the existing overlay size (as set by
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`) will be used. Only one
-	  video capture driver (bttv) supports this flag. The use of this
-	  flag for capture devices is deprecated. There is no way to detect
-	  which drivers support this flag, so the only reliable method of
-	  setting the overlay size is through
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`. If this flag is set for a
-	  video output device, then the video output overlay window is
-	  relative to the top-left corner of the framebuffer and restricted
-	  to the size of the framebuffer. If it is cleared, then the video
-	  output overlay window is relative to the video output display.
-
-    -  .. row 3
-
-       -  ``V4L2_FBUF_FLAG_CHROMAKEY``
-
-       -  0x0004
-
-       -  Use chroma-keying. The chroma-key color is determined by the
-	  ``chromakey`` field of struct :ref:`v4l2_window <v4l2-window>`
-	  and negotiated with the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`
-	  ioctl, see :ref:`overlay` and :ref:`osd`.
-
-    -  .. row 4
-
-       -  :cspan:`2` There are no flags to enable clipping using a list of
-	  clip rectangles or a bitmap. These methods are negotiated with the
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
-	  and :ref:`osd`.
-
-    -  .. row 5
-
-       -  ``V4L2_FBUF_FLAG_LOCAL_ALPHA``
-
-       -  0x0008
-
-       -  Use the alpha channel of the framebuffer to clip or blend
-	  framebuffer pixels with video images. The blend function is:
-	  output = framebuffer pixel * alpha + video pixel * (1 - alpha).
-	  The actual alpha depth depends on the framebuffer pixel format.
-
-    -  .. row 6
-
-       -  ``V4L2_FBUF_FLAG_GLOBAL_ALPHA``
-
-       -  0x0010
-
-       -  Use a global alpha value to blend the framebuffer with video
-	  images. The blend function is: output = (framebuffer pixel * alpha
-	  + video pixel * (255 - alpha)) / 255. The alpha value is
-	  determined by the ``global_alpha`` field of struct
-	  :ref:`v4l2_window <v4l2-window>` and negotiated with the
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
-	  and :ref:`osd`.
-
-    -  .. row 7
-
-       -  ``V4L2_FBUF_FLAG_LOCAL_INV_ALPHA``
-
-       -  0x0020
-
-       -  Like ``V4L2_FBUF_FLAG_LOCAL_ALPHA``, use the alpha channel of the
-	  framebuffer to clip or blend framebuffer pixels with video images,
-	  but with an inverted alpha value. The blend function is: output =
-	  framebuffer pixel * (1 - alpha) + video pixel * alpha. The actual
-	  alpha depth depends on the framebuffer pixel format.
-
-    -  .. row 8
-
-       -  ``V4L2_FBUF_FLAG_SRC_CHROMAKEY``
-
-       -  0x0040
-
-       -  Use source chroma-keying. The source chroma-key color is
-	  determined by the ``chromakey`` field of struct
-	  :ref:`v4l2_window <v4l2-window>` and negotiated with the
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
-	  and :ref:`osd`. Both chroma-keying are mutual exclusive to each
-	  other, so same ``chromakey`` field of struct
-	  :ref:`v4l2_window <v4l2-window>` is being used.
+    * - ``V4L2_FBUF_FLAG_PRIMARY``
+      - 0x0001
+      - The framebuffer is the primary graphics surface. In other words,
+	the overlay is destructive. This flag is typically set by any
+	driver that doesn't have the ``V4L2_FBUF_CAP_EXTERNOVERLAY``
+	capability and it is cleared otherwise.
+    * - ``V4L2_FBUF_FLAG_OVERLAY``
+      - 0x0002
+      - If this flag is set for a video capture device, then the driver
+	will set the initial overlay size to cover the full framebuffer
+	size, otherwise the existing overlay size (as set by
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`) will be used. Only one
+	video capture driver (bttv) supports this flag. The use of this
+	flag for capture devices is deprecated. There is no way to detect
+	which drivers support this flag, so the only reliable method of
+	setting the overlay size is through
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`. If this flag is set for a
+	video output device, then the video output overlay window is
+	relative to the top-left corner of the framebuffer and restricted
+	to the size of the framebuffer. If it is cleared, then the video
+	output overlay window is relative to the video output display.
+    * - ``V4L2_FBUF_FLAG_CHROMAKEY``
+      - 0x0004
+      - Use chroma-keying. The chroma-key color is determined by the
+	``chromakey`` field of struct :c:type:`v4l2_window`
+	and negotiated with the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>`
+	ioctl, see :ref:`overlay` and :ref:`osd`.
+    * - :cspan:`2` There are no flags to enable clipping using a list of
+	clip rectangles or a bitmap. These methods are negotiated with the
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
+	and :ref:`osd`.
+    * - ``V4L2_FBUF_FLAG_LOCAL_ALPHA``
+      - 0x0008
+      - Use the alpha channel of the framebuffer to clip or blend
+	framebuffer pixels with video images. The blend function is:
+	output = framebuffer pixel * alpha + video pixel * (1 - alpha).
+	The actual alpha depth depends on the framebuffer pixel format.
+    * - ``V4L2_FBUF_FLAG_GLOBAL_ALPHA``
+      - 0x0010
+      - Use a global alpha value to blend the framebuffer with video
+	images. The blend function is: output = (framebuffer pixel * alpha
+	+ video pixel * (255 - alpha)) / 255. The alpha value is
+	determined by the ``global_alpha`` field of struct
+	:c:type:`v4l2_window` and negotiated with the
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
+	and :ref:`osd`.
+    * - ``V4L2_FBUF_FLAG_LOCAL_INV_ALPHA``
+      - 0x0020
+      - Like ``V4L2_FBUF_FLAG_LOCAL_ALPHA``, use the alpha channel of the
+	framebuffer to clip or blend framebuffer pixels with video images,
+	but with an inverted alpha value. The blend function is: output =
+	framebuffer pixel * (1 - alpha) + video pixel * alpha. The actual
+	alpha depth depends on the framebuffer pixel format.
+    * - ``V4L2_FBUF_FLAG_SRC_CHROMAKEY``
+      - 0x0040
+      - Use source chroma-keying. The source chroma-key color is
+	determined by the ``chromakey`` field of struct
+	:c:type:`v4l2_window` and negotiated with the
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl, see :ref:`overlay`
+	and :ref:`osd`. Both chroma-keying are mutual exclusive to each
+	other, so same ``chromakey`` field of struct
+	:c:type:`v4l2_window` is being used.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index ee6f119..b853e48 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -15,8 +15,14 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_format *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_FMT, struct v4l2_format *argp )
+    :name: VIDIOC_G_FMT
 
+.. c:function:: int ioctl( int fd, VIDIOC_S_FMT, struct v4l2_format *argp )
+    :name: VIDIOC_S_FMT
+
+.. c:function:: int ioctl( int fd, VIDIOC_TRY_FMT, struct v4l2_format *argp )
+    :name: VIDIOC_TRY_FMT
 
 Arguments
 =========
@@ -24,9 +30,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_FMT, VIDIOC_S_FMT, VIDIOC_TRY_FMT
-
 ``argp``
 
 
@@ -37,15 +40,15 @@
 format) exchanged between driver and application.
 
 To query the current parameters applications set the ``type`` field of a
-struct :ref:`struct v4l2_format <v4l2-format>` to the respective buffer (stream)
+struct :c:type:`v4l2_format` to the respective buffer (stream)
 type. For example video capture devices use
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` or
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``. When the application calls the
 :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` ioctl with a pointer to this structure the driver fills
 the respective member of the ``fmt`` union. In case of video capture
 devices that is either the struct
-:ref:`v4l2_pix_format <v4l2-pix-format>` ``pix`` or the struct
-:ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>` ``pix_mp``
+:c:type:`v4l2_pix_format` ``pix`` or the struct
+:c:type:`v4l2_pix_format_mplane` ``pix_mp``
 member. When the requested buffer type is not supported drivers return
 an ``EINVAL`` error code.
 
@@ -55,7 +58,7 @@
 :ref:`devices`. Good practice is to query the current parameters
 first, and to modify only those parameters not suitable for the
 application. When the application calls the :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl with
-a pointer to a :ref:`struct v4l2_format <v4l2-format>` structure the driver
+a pointer to a struct :c:type:`v4l2_format` structure the driver
 checks and adjusts the parameters against hardware abilities. Drivers
 should not return an error code unless the ``type`` field is invalid,
 this is a mechanism to fathom device capabilities and to approach
@@ -82,98 +85,56 @@
 :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` returns for the same input or output.
 
 
-.. _v4l2-format:
+.. c:type:: v4l2_format
+
+.. tabularcolumns::  |p{1.2cm}|p{4.3cm}|p{3.0cm}|p{9.0cm}|
 
 .. flat-table:: struct v4l2_format
     :header-rows:  0
     :stub-columns: 0
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  Type of the data stream, see :ref:`v4l2-buf-type`.
-
-    -  .. row 2
-
-       -  union
-
-       -  ``fmt``
-
-    -  .. row 3
-
-       -
-       -  struct :ref:`v4l2_pix_format <v4l2-pix-format>`
-
-       -  ``pix``
-
-       -  Definition of an image format, see :ref:`pixfmt`, used by video
-	  capture and output devices.
-
-    -  .. row 4
-
-       -
-       -  struct :ref:`v4l2_pix_format_mplane <v4l2-pix-format-mplane>`
-
-       -  ``pix_mp``
-
-       -  Definition of an image format, see :ref:`pixfmt`, used by video
-	  capture and output devices that support the
-	  :ref:`multi-planar version of the API <planar-apis>`.
-
-    -  .. row 5
-
-       -
-       -  struct :ref:`v4l2_window <v4l2-window>`
-
-       -  ``win``
-
-       -  Definition of an overlaid image, see :ref:`overlay`, used by
-	  video overlay devices.
-
-    -  .. row 6
-
-       -
-       -  struct :ref:`v4l2_vbi_format <v4l2-vbi-format>`
-
-       -  ``vbi``
-
-       -  Raw VBI capture or output parameters. This is discussed in more
-	  detail in :ref:`raw-vbi`. Used by raw VBI capture and output
-	  devices.
-
-    -  .. row 7
-
-       -
-       -  struct :ref:`v4l2_sliced_vbi_format <v4l2-sliced-vbi-format>`
-
-       -  ``sliced``
-
-       -  Sliced VBI capture or output parameters. See :ref:`sliced` for
-	  details. Used by sliced VBI capture and output devices.
-
-    -  .. row 8
-
-       -
-       -  struct :ref:`v4l2_sdr_format <v4l2-sdr-format>`
-
-       -  ``sdr``
-
-       -  Definition of a data format, see :ref:`pixfmt`, used by SDR
-	  capture and output devices.
-
-    -  .. row 9
-
-       -
-       -  __u8
-
-       -  ``raw_data``\ [200]
-
-       -  Place holder for future extensions.
+    * - __u32
+      - ``type``
+      -
+      - Type of the data stream, see :c:type:`v4l2_buf_type`.
+    * - union
+      - ``fmt``
+    * -
+      - struct :c:type:`v4l2_pix_format`
+      - ``pix``
+      - Definition of an image format, see :ref:`pixfmt`, used by video
+	capture and output devices.
+    * -
+      - struct :c:type:`v4l2_pix_format_mplane`
+      - ``pix_mp``
+      - Definition of an image format, see :ref:`pixfmt`, used by video
+	capture and output devices that support the
+	:ref:`multi-planar version of the API <planar-apis>`.
+    * -
+      - struct :c:type:`v4l2_window`
+      - ``win``
+      - Definition of an overlaid image, see :ref:`overlay`, used by
+	video overlay devices.
+    * -
+      - struct :c:type:`v4l2_vbi_format`
+      - ``vbi``
+      - Raw VBI capture or output parameters. This is discussed in more
+	detail in :ref:`raw-vbi`. Used by raw VBI capture and output
+	devices.
+    * -
+      - struct :c:type:`v4l2_sliced_vbi_format`
+      - ``sliced``
+      - Sliced VBI capture or output parameters. See :ref:`sliced` for
+	details. Used by sliced VBI capture and output devices.
+    * -
+      - struct :c:type:`v4l2_sdr_format`
+      - ``sdr``
+      - Definition of a data format, see :ref:`pixfmt`, used by SDR
+	capture and output devices.
+    * -
+      - __u8
+      - ``raw_data``\ [200]
+      - Place holder for future extensions.
 
 
 Return Value
@@ -184,5 +145,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_format <v4l2-format>` ``type`` field is
+    The struct :c:type:`v4l2_format` ``type`` field is
     invalid or the requested buffer type not supported.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
index a1fd2a8..46ab276 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_frequency *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_FREQUENCY, struct v4l2_frequency *argp )
+    :name: VIDIOC_G_FREQUENCY
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_frequency *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_FREQUENCY, const struct v4l2_frequency *argp )
+    :name: VIDIOC_S_FREQUENCY
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_FREQUENCY, VIDIOC_S_FREQUENCY
-
 ``argp``
 
 
@@ -37,7 +36,7 @@
 
 To get the current tuner or modulator radio frequency applications set
 the ``tuner`` field of a struct
-:ref:`v4l2_frequency <v4l2-frequency>` to the respective tuner or
+:c:type:`v4l2_frequency` to the respective tuner or
 modulator number (only input devices have tuners, only output devices
 have modulators), zero out the ``reserved`` array and call the
 :ref:`VIDIOC_G_FREQUENCY <VIDIOC_G_FREQUENCY>` ioctl with a pointer to this structure. The
@@ -45,67 +44,49 @@
 
 To change the current tuner or modulator radio frequency applications
 initialize the ``tuner``, ``type`` and ``frequency`` fields, and the
-``reserved`` array of a struct :ref:`v4l2_frequency <v4l2-frequency>`
+``reserved`` array of a struct :c:type:`v4l2_frequency`
 and call the :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>` ioctl with a pointer to this
 structure. When the requested frequency is not possible the driver
 assumes the closest possible value. However :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>` is a
 write-only ioctl, it does not return the actual new frequency.
 
 
-.. _v4l2-frequency:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_frequency
 
 .. flat-table:: struct v4l2_frequency
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``tuner``
-
-       -  The tuner or modulator index number. This is the same value as in
-	  the struct :ref:`v4l2_input <v4l2-input>` ``tuner`` field and
-	  the struct :ref:`v4l2_tuner <v4l2-tuner>` ``index`` field, or
-	  the struct :ref:`v4l2_output <v4l2-output>` ``modulator`` field
-	  and the struct :ref:`v4l2_modulator <v4l2-modulator>` ``index``
-	  field.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  The tuner type. This is the same value as in the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``type`` field. The type must be
-	  set to ``V4L2_TUNER_RADIO`` for ``/dev/radioX`` device nodes, and
-	  to ``V4L2_TUNER_ANALOG_TV`` for all others. Set this field to
-	  ``V4L2_TUNER_RADIO`` for modulators (currently only radio
-	  modulators are supported). See :ref:`v4l2-tuner-type`
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``frequency``
-
-       -  Tuning frequency in units of 62.5 kHz, or if the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` or struct
-	  :ref:`v4l2_modulator <v4l2-modulator>` ``capability`` flag
-	  ``V4L2_TUNER_CAP_LOW`` is set, in units of 62.5 Hz. A 1 Hz unit is
-	  used when the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+    * - __u32
+      - ``tuner``
+      - The tuner or modulator index number. This is the same value as in
+	the struct :c:type:`v4l2_input` ``tuner`` field and
+	the struct :c:type:`v4l2_tuner` ``index`` field, or
+	the struct :c:type:`v4l2_output` ``modulator`` field
+	and the struct :c:type:`v4l2_modulator` ``index``
+	field.
+    * - __u32
+      - ``type``
+      - The tuner type. This is the same value as in the struct
+	:c:type:`v4l2_tuner` ``type`` field. The type must be
+	set to ``V4L2_TUNER_RADIO`` for ``/dev/radioX`` device nodes, and
+	to ``V4L2_TUNER_ANALOG_TV`` for all others. Set this field to
+	``V4L2_TUNER_RADIO`` for modulators (currently only radio
+	modulators are supported). See :c:type:`v4l2_tuner_type`
+    * - __u32
+      - ``frequency``
+      - Tuning frequency in units of 62.5 kHz, or if the struct
+	:c:type:`v4l2_tuner` or struct
+	:c:type:`v4l2_modulator` ``capability`` flag
+	``V4L2_TUNER_CAP_LOW`` is set, in units of 62.5 Hz. A 1 Hz unit is
+	used when the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is set.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-input.rst b/Documentation/media/uapi/v4l/vidioc-g-input.rst
index 29e22f6..1364a91 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-input.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-input.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, int *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_INPUT, int *argp )
+    :name: VIDIOC_G_INPUT
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_INPUT, int *argp )
+    :name: VIDIOC_S_INPUT
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_INPUT, VIDIOC_S_INPUT
-
 ``argp``
 
 
@@ -36,7 +37,7 @@
 To query the current video input applications call the
 :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` ioctl with a pointer to an integer where the driver
 stores the number of the input, as in the struct
-:ref:`v4l2_input <v4l2-input>` ``index`` field. This ioctl will fail
+:c:type:`v4l2_input` ``index`` field. This ioctl will fail
 only when there are no video inputs, returning ``EINVAL``.
 
 To select a video input applications store the number of the desired
diff --git a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
index f5bf8b7..8ba3530 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, v4l2_jpegcompression *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_JPEGCOMP, v4l2_jpegcompression *argp )
+    :name: VIDIOC_G_JPEGCOMP
 
-.. cpp:function:: int ioctl( int fd, int request, const v4l2_jpegcompression *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_JPEGCOMP, const v4l2_jpegcompression *argp )
+    :name: VIDIOC_S_JPEGCOMP
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_JPEGCOMP, VIDIOC_S_JPEGCOMP
-
 ``argp``
 
 
@@ -55,77 +54,45 @@
 encoding. You usually do want to add them.
 
 
-.. _v4l2-jpegcompression:
+.. tabularcolumns:: |p{1.2cm}|p{3.0cm}|p{13.3cm}|
+
+.. c:type:: v4l2_jpegcompression
 
 .. flat-table:: struct v4l2_jpegcompression
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  int
-
-       -  ``quality``
-
-       -  Deprecated. If
-	  :ref:`V4L2_CID_JPEG_COMPRESSION_QUALITY <jpeg-quality-control>`
-	  control is exposed by a driver applications should use it instead
-	  and ignore this field.
-
-    -  .. row 2
-
-       -  int
-
-       -  ``APPn``
-
-       -
-
-    -  .. row 3
-
-       -  int
-
-       -  ``APP_len``
-
-       -
-
-    -  .. row 4
-
-       -  char
-
-       -  ``APP_data``\ [60]
-
-       -
-
-    -  .. row 5
-
-       -  int
-
-       -  ``COM_len``
-
-       -
-
-    -  .. row 6
-
-       -  char
-
-       -  ``COM_data``\ [60]
-
-       -
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``jpeg_markers``
-
-       -  See :ref:`jpeg-markers`. Deprecated. If
-	  :ref:`V4L2_CID_JPEG_ACTIVE_MARKER <jpeg-active-marker-control>`
-	  control is exposed by a driver applications should use it instead
-	  and ignore this field.
+    * - int
+      - ``quality``
+      - Deprecated. If
+	:ref:`V4L2_CID_JPEG_COMPRESSION_QUALITY <jpeg-quality-control>`
+	control is exposed by a driver applications should use it instead
+	and ignore this field.
+    * - int
+      - ``APPn``
+      -
+    * - int
+      - ``APP_len``
+      -
+    * - char
+      - ``APP_data``\ [60]
+      -
+    * - int
+      - ``COM_len``
+      -
+    * - char
+      - ``COM_data``\ [60]
+      -
+    * - __u32
+      - ``jpeg_markers``
+      - See :ref:`jpeg-markers`. Deprecated. If
+	:ref:`V4L2_CID_JPEG_ACTIVE_MARKER <jpeg-active-marker-control>`
+	control is exposed by a driver applications should use it instead
+	and ignore this field.
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _jpeg-markers:
 
@@ -134,46 +101,21 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_JPEG_MARKER_DHT``
-
-       -  (1<<3)
-
-       -  Define Huffman Tables
-
-    -  .. row 2
-
-       -  ``V4L2_JPEG_MARKER_DQT``
-
-       -  (1<<4)
-
-       -  Define Quantization Tables
-
-    -  .. row 3
-
-       -  ``V4L2_JPEG_MARKER_DRI``
-
-       -  (1<<5)
-
-       -  Define Restart Interval
-
-    -  .. row 4
-
-       -  ``V4L2_JPEG_MARKER_COM``
-
-       -  (1<<6)
-
-       -  Comment segment
-
-    -  .. row 5
-
-       -  ``V4L2_JPEG_MARKER_APP``
-
-       -  (1<<7)
-
-       -  App segment, driver will always use APP0
+    * - ``V4L2_JPEG_MARKER_DHT``
+      - (1<<3)
+      - Define Huffman Tables
+    * - ``V4L2_JPEG_MARKER_DQT``
+      - (1<<4)
+      - Define Quantization Tables
+    * - ``V4L2_JPEG_MARKER_DRI``
+      - (1<<5)
+      - Define Restart Interval
+    * - ``V4L2_JPEG_MARKER_COM``
+      - (1<<6)
+      - Comment segment
+    * - ``V4L2_JPEG_MARKER_APP``
+      - (1<<7)
+      - App segment, driver will always use APP0
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
index a2e8c73..77d017e 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_modulator *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_MODULATOR, struct v4l2_modulator *argp )
+    :name: VIDIOC_G_MODULATOR
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_modulator *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_MODULATOR, const struct v4l2_modulator *argp )
+    :name: VIDIOC_S_MODULATOR
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_MODULATOR, VIDIOC_S_MODULATOR
-
 ``argp``
 
 
@@ -37,7 +36,7 @@
 
 To query the attributes of a modulator applications initialize the
 ``index`` field and zero out the ``reserved`` array of a struct
-:ref:`v4l2_modulator <v4l2-modulator>` and call the
+:c:type:`v4l2_modulator` and call the
 :ref:`VIDIOC_G_MODULATOR <VIDIOC_G_MODULATOR>` ioctl with a pointer to this structure. Drivers
 fill the rest of the structure or return an ``EINVAL`` error code when the
 index is out of bounds. To enumerate all modulators applications shall
@@ -61,101 +60,69 @@
 :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>` ioctl is available.
 
 
-.. _v4l2-modulator:
+.. tabularcolumns:: |p{2.9cm}|p{2.9cm}|p{5.8cm}|p{2.9cm}|p{3.0cm}|
+
+.. c:type:: v4l2_modulator
 
 .. flat-table:: struct v4l2_modulator
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2 1 1
 
+    * - __u32
+      - ``index``
+      - Identifies the modulator, set by the application.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the modulator, a NUL-terminated ASCII string.
 
-    -  .. row 1
+	This information is intended for the user.
+    * - __u32
+      - ``capability``
+      - Modulator capability flags. No flags are defined for this field,
+	the tuner flags in struct :c:type:`v4l2_tuner` are
+	used accordingly. The audio flags indicate the ability to encode
+	audio subprograms. They will *not* change for example with the
+	current video standard.
+    * - __u32
+      - ``rangelow``
+      - The lowest tunable frequency in units of 62.5 KHz, or if the
+	``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units of
+	62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is
+	set, in units of 1 Hz.
+    * - __u32
+      - ``rangehigh``
+      - The highest tunable frequency in units of 62.5 KHz, or if the
+	``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units of
+	62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is
+	set, in units of 1 Hz.
+    * - __u32
+      - ``txsubchans``
+      - With this field applications can determine how audio sub-carriers
+	shall be modulated. It contains a set of flags as defined in
+	:ref:`modulator-txsubchans`.
 
-       -  __u32
+	.. note::
 
-       -  ``index``
+	   The tuner ``rxsubchans`` flags  are reused, but the
+	   semantics are different. Video output devices
+	   are assumed to have an analog or PCM audio input with 1-3
+	   channels. The ``txsubchans`` flags select one or more channels
+	   for modulation, together with some audio subprogram indicator,
+	   for example, a stereo pilot tone.
+    * - __u32
+      - ``type``
+      - :cspan:`2` Type of the modulator, see :c:type:`v4l2_tuner_type`.
+    * - __u32
+      - ``reserved``\ [3]
+      - Reserved for future extensions.
 
-       -  Identifies the modulator, set by the application.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the modulator, a NUL-terminated ASCII string. This
-	  information is intended for the user.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``capability``
-
-       -  Modulator capability flags. No flags are defined for this field,
-	  the tuner flags in struct :ref:`v4l2_tuner <v4l2-tuner>` are
-	  used accordingly. The audio flags indicate the ability to encode
-	  audio subprograms. They will *not* change for example with the
-	  current video standard.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``rangelow``
-
-       -  The lowest tunable frequency in units of 62.5 KHz, or if the
-	  ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units of
-	  62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is
-	  set, in units of 1 Hz.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``rangehigh``
-
-       -  The highest tunable frequency in units of 62.5 KHz, or if the
-	  ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units of
-	  62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ`` is
-	  set, in units of 1 Hz.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``txsubchans``
-
-       -  With this field applications can determine how audio sub-carriers
-	  shall be modulated. It contains a set of flags as defined in
-	  :ref:`modulator-txsubchans`.
-
-	  .. note:: The tuner ``rxsubchans`` flags  are reused, but the
-	     semantics are different. Video output devices
-	     are assumed to have an analog or PCM audio input with 1-3
-	     channels. The ``txsubchans`` flags select one or more channels
-	     for modulation, together with some audio subprogram indicator,
-	     for example, a stereo pilot tone.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``type``
-
-       -  :cspan:`2` Type of the modulator, see :ref:`v4l2-tuner-type`.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [3]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+	Drivers and applications must set the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _modulator-txsubchans:
 
 .. flat-table:: Modulator Audio Transmission Flags
@@ -163,86 +130,56 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_TUNER_SUB_MONO``
-
-       -  0x0001
-
-       -  Modulate channel 1 as mono audio, when the input has more
-	  channels, a down-mix of channel 1 and 2. This flag does not
-	  combine with ``V4L2_TUNER_SUB_STEREO`` or
-	  ``V4L2_TUNER_SUB_LANG1``.
-
-    -  .. row 2
-
-       -  ``V4L2_TUNER_SUB_STEREO``
-
-       -  0x0002
-
-       -  Modulate channel 1 and 2 as left and right channel of a stereo
-	  audio signal. When the input has only one channel or two channels
-	  and ``V4L2_TUNER_SUB_SAP`` is also set, channel 1 is encoded as
-	  left and right channel. This flag does not combine with
-	  ``V4L2_TUNER_SUB_MONO`` or ``V4L2_TUNER_SUB_LANG1``. When the
-	  driver does not support stereo audio it shall fall back to mono.
-
-    -  .. row 3
-
-       -  ``V4L2_TUNER_SUB_LANG1``
-
-       -  0x0008
-
-       -  Modulate channel 1 and 2 as primary and secondary language of a
-	  bilingual audio signal. When the input has only one channel it is
-	  used for both languages. It is not possible to encode the primary
-	  or secondary language only. This flag does not combine with
-	  ``V4L2_TUNER_SUB_MONO``, ``V4L2_TUNER_SUB_STEREO`` or
-	  ``V4L2_TUNER_SUB_SAP``. If the hardware does not support the
-	  respective audio matrix, or the current video standard does not
-	  permit bilingual audio the :ref:`VIDIOC_S_MODULATOR <VIDIOC_G_MODULATOR>` ioctl shall
-	  return an ``EINVAL`` error code and the driver shall fall back to mono
-	  or stereo mode.
-
-    -  .. row 4
-
-       -  ``V4L2_TUNER_SUB_LANG2``
-
-       -  0x0004
-
-       -  Same effect as ``V4L2_TUNER_SUB_SAP``.
-
-    -  .. row 5
-
-       -  ``V4L2_TUNER_SUB_SAP``
-
-       -  0x0004
-
-       -  When combined with ``V4L2_TUNER_SUB_MONO`` the first channel is
-	  encoded as mono audio, the last channel as Second Audio Program.
-	  When the input has only one channel it is used for both audio
-	  tracks. When the input has three channels the mono track is a
-	  down-mix of channel 1 and 2. When combined with
-	  ``V4L2_TUNER_SUB_STEREO`` channel 1 and 2 are encoded as left and
-	  right stereo audio, channel 3 as Second Audio Program. When the
-	  input has only two channels, the first is encoded as left and
-	  right channel and the second as SAP. When the input has only one
-	  channel it is used for all audio tracks. It is not possible to
-	  encode a Second Audio Program only. This flag must combine with
-	  ``V4L2_TUNER_SUB_MONO`` or ``V4L2_TUNER_SUB_STEREO``. If the
-	  hardware does not support the respective audio matrix, or the
-	  current video standard does not permit SAP the
-	  :ref:`VIDIOC_S_MODULATOR <VIDIOC_G_MODULATOR>` ioctl shall return an ``EINVAL`` error code and
-	  driver shall fall back to mono or stereo mode.
-
-    -  .. row 6
-
-       -  ``V4L2_TUNER_SUB_RDS``
-
-       -  0x0010
-
-       -  Enable the RDS encoder for a radio FM transmitter.
+    * - ``V4L2_TUNER_SUB_MONO``
+      - 0x0001
+      - Modulate channel 1 as mono audio, when the input has more
+	channels, a down-mix of channel 1 and 2. This flag does not
+	combine with ``V4L2_TUNER_SUB_STEREO`` or
+	``V4L2_TUNER_SUB_LANG1``.
+    * - ``V4L2_TUNER_SUB_STEREO``
+      - 0x0002
+      - Modulate channel 1 and 2 as left and right channel of a stereo
+	audio signal. When the input has only one channel or two channels
+	and ``V4L2_TUNER_SUB_SAP`` is also set, channel 1 is encoded as
+	left and right channel. This flag does not combine with
+	``V4L2_TUNER_SUB_MONO`` or ``V4L2_TUNER_SUB_LANG1``. When the
+	driver does not support stereo audio it shall fall back to mono.
+    * - ``V4L2_TUNER_SUB_LANG1``
+      - 0x0008
+      - Modulate channel 1 and 2 as primary and secondary language of a
+	bilingual audio signal. When the input has only one channel it is
+	used for both languages. It is not possible to encode the primary
+	or secondary language only. This flag does not combine with
+	``V4L2_TUNER_SUB_MONO``, ``V4L2_TUNER_SUB_STEREO`` or
+	``V4L2_TUNER_SUB_SAP``. If the hardware does not support the
+	respective audio matrix, or the current video standard does not
+	permit bilingual audio the :ref:`VIDIOC_S_MODULATOR <VIDIOC_G_MODULATOR>` ioctl shall
+	return an ``EINVAL`` error code and the driver shall fall back to mono
+	or stereo mode.
+    * - ``V4L2_TUNER_SUB_LANG2``
+      - 0x0004
+      - Same effect as ``V4L2_TUNER_SUB_SAP``.
+    * - ``V4L2_TUNER_SUB_SAP``
+      - 0x0004
+      - When combined with ``V4L2_TUNER_SUB_MONO`` the first channel is
+	encoded as mono audio, the last channel as Second Audio Program.
+	When the input has only one channel it is used for both audio
+	tracks. When the input has three channels the mono track is a
+	down-mix of channel 1 and 2. When combined with
+	``V4L2_TUNER_SUB_STEREO`` channel 1 and 2 are encoded as left and
+	right stereo audio, channel 3 as Second Audio Program. When the
+	input has only two channels, the first is encoded as left and
+	right channel and the second as SAP. When the input has only one
+	channel it is used for all audio tracks. It is not possible to
+	encode a Second Audio Program only. This flag must combine with
+	``V4L2_TUNER_SUB_MONO`` or ``V4L2_TUNER_SUB_STEREO``. If the
+	hardware does not support the respective audio matrix, or the
+	current video standard does not permit SAP the
+	:ref:`VIDIOC_S_MODULATOR <VIDIOC_G_MODULATOR>` ioctl shall return an ``EINVAL`` error code and
+	driver shall fall back to mono or stereo mode.
+    * - ``V4L2_TUNER_SUB_RDS``
+      - 0x0010
+      - Enable the RDS encoder for a radio FM transmitter.
 
 
 Return Value
@@ -253,5 +190,5 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_modulator <v4l2-modulator>` ``index`` is
+    The struct :c:type:`v4l2_modulator` ``index`` is
     out of bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-output.rst b/Documentation/media/uapi/v4l/vidioc-g-output.rst
index ae0ad57..7750948 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-output.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-output.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, int *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_OUTPUT, int *argp )
+    :name: VIDIOC_G_OUTPUT
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_OUTPUT, int *argp )
+    :name: VIDIOC_S_OUTPUT
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_OUTPUT, VIDIOC_S_OUTPUT
-
 ``argp``
 
 
@@ -36,7 +37,7 @@
 To query the current video output applications call the
 :ref:`VIDIOC_G_OUTPUT <VIDIOC_G_OUTPUT>` ioctl with a pointer to an integer where the driver
 stores the number of the output, as in the struct
-:ref:`v4l2_output <v4l2-output>` ``index`` field. This ioctl will
+:c:type:`v4l2_output` ``index`` field. This ioctl will
 fail only when there are no video outputs, returning the ``EINVAL`` error
 code.
 
diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
index 7116e0d..3b2e6e5 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, v4l2_streamparm *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_PARM, v4l2_streamparm *argp )
+    :name: VIDIOC_G_PARM
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_PARM, v4l2_streamparm *argp )
+    :name: VIDIOC_S_PARM
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_PARM, VIDIOC_S_PARM
-
 ``argp``
 
 
@@ -46,238 +47,158 @@
 
 To get and set the streaming parameters applications call the
 :ref:`VIDIOC_G_PARM <VIDIOC_G_PARM>` and :ref:`VIDIOC_S_PARM <VIDIOC_G_PARM>` ioctl, respectively. They take a
-pointer to a struct :ref:`struct v4l2_streamparm <v4l2-streamparm>` which contains a
+pointer to a struct :c:type:`v4l2_streamparm` which contains a
 union holding separate parameters for input and output devices.
 
 
-.. _v4l2-streamparm:
+.. tabularcolumns:: |p{3.5cm}|p{3.5cm}|p{3.5cm}|p{7.0cm}|
+
+.. c:type:: v4l2_streamparm
 
 .. flat-table:: struct v4l2_streamparm
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -
-       -  The buffer (stream) type, same as struct
-	  :ref:`v4l2_format <v4l2-format>` ``type``, set by the
-	  application. See :ref:`v4l2-buf-type`
-
-    -  .. row 2
-
-       -  union
-
-       -  ``parm``
-
-       -
-       -
-
-    -  .. row 3
-
-       -
-       -  struct :ref:`v4l2_captureparm <v4l2-captureparm>`
-
-       -  ``capture``
-
-       -  Parameters for capture devices, used when ``type`` is
-	  ``V4L2_BUF_TYPE_VIDEO_CAPTURE``.
-
-    -  .. row 4
-
-       -
-       -  struct :ref:`v4l2_outputparm <v4l2-outputparm>`
-
-       -  ``output``
-
-       -  Parameters for output devices, used when ``type`` is
-	  ``V4L2_BUF_TYPE_VIDEO_OUTPUT``.
-
-    -  .. row 5
-
-       -
-       -  __u8
-
-       -  ``raw_data``\ [200]
-
-       -  A place holder for future extensions.
+    * - __u32
+      - ``type``
+      -
+      - The buffer (stream) type, same as struct
+	:c:type:`v4l2_format` ``type``, set by the
+	application. See :c:type:`v4l2_buf_type`
+    * - union
+      - ``parm``
+      -
+      -
+    * -
+      - struct :c:type:`v4l2_captureparm`
+      - ``capture``
+      - Parameters for capture devices, used when ``type`` is
+	``V4L2_BUF_TYPE_VIDEO_CAPTURE``.
+    * -
+      - struct :c:type:`v4l2_outputparm`
+      - ``output``
+      - Parameters for output devices, used when ``type`` is
+	``V4L2_BUF_TYPE_VIDEO_OUTPUT``.
+    * -
+      - __u8
+      - ``raw_data``\ [200]
+      - A place holder for future extensions.
 
 
 
-.. _v4l2-captureparm:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_captureparm
 
 .. flat-table:: struct v4l2_captureparm
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``capability``
+      - See :ref:`parm-caps`.
+    * - __u32
+      - ``capturemode``
+      - Set by drivers and applications, see :ref:`parm-flags`.
+    * - struct :c:type:`v4l2_fract`
+      - ``timeperframe``
+      - This is the desired period between successive frames captured by
+	the driver, in seconds. The field is intended to skip frames on
+	the driver side, saving I/O bandwidth.
 
-    -  .. row 1
+	Applications store here the desired frame period, drivers return
+	the actual frame period, which must be greater or equal to the
+	nominal frame period determined by the current video standard
+	(struct :c:type:`v4l2_standard` ``frameperiod``
+	field). Changing the video standard (also implicitly by switching
+	the video input) may reset this parameter to the nominal frame
+	period. To reset manually applications can just set this field to
+	zero.
 
-       -  __u32
-
-       -  ``capability``
-
-       -  See :ref:`parm-caps`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``capturemode``
-
-       -  Set by drivers and applications, see :ref:`parm-flags`.
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``timeperframe``
-
-       -  This is the desired period between successive frames captured by
-	  the driver, in seconds. The field is intended to skip frames on
-	  the driver side, saving I/O bandwidth.
-
-	  Applications store here the desired frame period, drivers return
-	  the actual frame period, which must be greater or equal to the
-	  nominal frame period determined by the current video standard
-	  (struct :ref:`v4l2_standard <v4l2-standard>` ``frameperiod``
-	  field). Changing the video standard (also implicitly by switching
-	  the video input) may reset this parameter to the nominal frame
-	  period. To reset manually applications can just set this field to
-	  zero.
-
-	  Drivers support this function only when they set the
-	  ``V4L2_CAP_TIMEPERFRAME`` flag in the ``capability`` field.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``extendedmode``
-
-       -  Custom (driver specific) streaming parameters. When unused,
-	  applications and drivers must set this field to zero. Applications
-	  using this field should check the driver name and version, see
-	  :ref:`querycap`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``readbuffers``
-
-       -  Applications set this field to the desired number of buffers used
-	  internally by the driver in :ref:`read() <func-read>` mode.
-	  Drivers return the actual number of buffers. When an application
-	  requests zero buffers, drivers should just return the current
-	  setting rather than the minimum or an error code. For details see
-	  :ref:`rw`.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+	Drivers support this function only when they set the
+	``V4L2_CAP_TIMEPERFRAME`` flag in the ``capability`` field.
+    * - __u32
+      - ``extendedmode``
+      - Custom (driver specific) streaming parameters. When unused,
+	applications and drivers must set this field to zero. Applications
+	using this field should check the driver name and version, see
+	:ref:`querycap`.
+    * - __u32
+      - ``readbuffers``
+      - Applications set this field to the desired number of buffers used
+	internally by the driver in :ref:`read() <func-read>` mode.
+	Drivers return the actual number of buffers. When an application
+	requests zero buffers, drivers should just return the current
+	setting rather than the minimum or an error code. For details see
+	:ref:`rw`.
+    * - __u32
+      - ``reserved``\ [4]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 
-.. _v4l2-outputparm:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_outputparm
 
 .. flat-table:: struct v4l2_outputparm
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``capability``
+      - See :ref:`parm-caps`.
+    * - __u32
+      - ``outputmode``
+      - Set by drivers and applications, see :ref:`parm-flags`.
+    * - struct :c:type:`v4l2_fract`
+      - ``timeperframe``
+      - This is the desired period between successive frames output by the
+	driver, in seconds.
+    * - :cspan:`2`
 
-    -  .. row 1
+	The field is intended to repeat frames on the driver side in
+	:ref:`write() <func-write>` mode (in streaming mode timestamps
+	can be used to throttle the output), saving I/O bandwidth.
 
-       -  __u32
+	Applications store here the desired frame period, drivers return
+	the actual frame period, which must be greater or equal to the
+	nominal frame period determined by the current video standard
+	(struct :c:type:`v4l2_standard` ``frameperiod``
+	field). Changing the video standard (also implicitly by switching
+	the video output) may reset this parameter to the nominal frame
+	period. To reset manually applications can just set this field to
+	zero.
 
-       -  ``capability``
-
-       -  See :ref:`parm-caps`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``outputmode``
-
-       -  Set by drivers and applications, see :ref:`parm-flags`.
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``timeperframe``
-
-       -  This is the desired period between successive frames output by the
-	  driver, in seconds.
-
-    -  .. row 4
-
-       -  :cspan:`2`
-
-	  The field is intended to repeat frames on the driver side in
-	  :ref:`write() <func-write>` mode (in streaming mode timestamps
-	  can be used to throttle the output), saving I/O bandwidth.
-
-	  Applications store here the desired frame period, drivers return
-	  the actual frame period, which must be greater or equal to the
-	  nominal frame period determined by the current video standard
-	  (struct :ref:`v4l2_standard <v4l2-standard>` ``frameperiod``
-	  field). Changing the video standard (also implicitly by switching
-	  the video output) may reset this parameter to the nominal frame
-	  period. To reset manually applications can just set this field to
-	  zero.
-
-	  Drivers support this function only when they set the
-	  ``V4L2_CAP_TIMEPERFRAME`` flag in the ``capability`` field.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``extendedmode``
-
-       -  Custom (driver specific) streaming parameters. When unused,
-	  applications and drivers must set this field to zero. Applications
-	  using this field should check the driver name and version, see
-	  :ref:`querycap`.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``writebuffers``
-
-       -  Applications set this field to the desired number of buffers used
-	  internally by the driver in :ref:`write() <func-write>` mode. Drivers
-	  return the actual number of buffers. When an application requests
-	  zero buffers, drivers should just return the current setting
-	  rather than the minimum or an error code. For details see
-	  :ref:`rw`.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+	Drivers support this function only when they set the
+	``V4L2_CAP_TIMEPERFRAME`` flag in the ``capability`` field.
+    * - __u32
+      - ``extendedmode``
+      - Custom (driver specific) streaming parameters. When unused,
+	applications and drivers must set this field to zero. Applications
+	using this field should check the driver name and version, see
+	:ref:`querycap`.
+    * - __u32
+      - ``writebuffers``
+      - Applications set this field to the desired number of buffers used
+	internally by the driver in :ref:`write() <func-write>` mode. Drivers
+	return the actual number of buffers. When an application requests
+	zero buffers, drivers should just return the current setting
+	rather than the minimum or an error code. For details see
+	:ref:`rw`.
+    * - __u32
+      - ``reserved``\ [4]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _parm-caps:
 
 .. flat-table:: Streaming Parameters Capabilites
@@ -285,18 +206,15 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_CAP_TIMEPERFRAME``
-
-       -  0x1000
-
-       -  The frame skipping/repeating controlled by the ``timeperframe``
-	  field is supported.
+    * - ``V4L2_CAP_TIMEPERFRAME``
+      - 0x1000
+      - The frame skipping/repeating controlled by the ``timeperframe``
+	field is supported.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _parm-flags:
 
 .. flat-table:: Capture Parameters Flags
@@ -304,41 +222,36 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_MODE_HIGHQUALITY``
+      - 0x0001
+      - High quality imaging mode. High quality mode is intended for still
+	imaging applications. The idea is to get the best possible image
+	quality that the hardware can deliver. It is not defined how the
+	driver writer may achieve that; it will depend on the hardware and
+	the ingenuity of the driver writer. High quality mode is a
+	different mode from the regular motion video capture modes. In
+	high quality mode:
 
-    -  .. row 1
+	-  The driver may be able to capture higher resolutions than for
+	   motion capture.
 
-       -  ``V4L2_MODE_HIGHQUALITY``
+	-  The driver may support fewer pixel formats than motion capture
+	   (eg; true color).
 
-       -  0x0001
+	-  The driver may capture and arithmetically combine multiple
+	   successive fields or frames to remove color edge artifacts and
+	   reduce the noise in the video data.
 
-       -  High quality imaging mode. High quality mode is intended for still
-	  imaging applications. The idea is to get the best possible image
-	  quality that the hardware can deliver. It is not defined how the
-	  driver writer may achieve that; it will depend on the hardware and
-	  the ingenuity of the driver writer. High quality mode is a
-	  different mode from the regular motion video capture modes. In
-	  high quality mode:
+	-  The driver may capture images in slices like a scanner in order
+	   to handle larger format images than would otherwise be
+	   possible.
 
-	  -  The driver may be able to capture higher resolutions than for
-	     motion capture.
+	-  An image capture operation may be significantly slower than
+	   motion capture.
 
-	  -  The driver may support fewer pixel formats than motion capture
-	     (eg; true color).
+	-  Moving objects in the image might have excessive motion blur.
 
-	  -  The driver may capture and arithmetically combine multiple
-	     successive fields or frames to remove color edge artifacts and
-	     reduce the noise in the video data.
-
-	  -  The driver may capture images in slices like a scanner in order
-	     to handle larger format images than would otherwise be
-	     possible.
-
-	  -  An image capture operation may be significantly slower than
-	     motion capture.
-
-	  -  Moving objects in the image might have excessive motion blur.
-
-	  -  Capture might only work through the :ref:`read() <func-read>` call.
+	-  Capture might only work through the :ref:`read() <func-read>` call.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-priority.rst b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
index 9f774ce..a763988 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-priority.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, enum v4l2_priority *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_PRIORITY, enum v4l2_priority *argp )
+    :name: VIDIOC_G_PRIORITY
 
-.. cpp:function:: int ioctl( int fd, int request, const enum v4l2_priority *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_PRIORITY, const enum v4l2_priority *argp )
+    :name: VIDIOC_S_PRIORITY
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_PRIORITY, VIDIOC_S_PRIORITY
-
 ``argp``
     Pointer to an enum v4l2_priority type.
 
@@ -45,62 +44,39 @@
 with a pointer to this variable.
 
 
-.. _v4l2-priority:
+.. c:type:: v4l2_priority
+
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. flat-table:: enum v4l2_priority
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_PRIORITY_UNSET``
-
-       -  0
-
-       -
-
-    -  .. row 2
-
-       -  ``V4L2_PRIORITY_BACKGROUND``
-
-       -  1
-
-       -  Lowest priority, usually applications running in background, for
-	  example monitoring VBI transmissions. A proxy application running
-	  in user space will be necessary if multiple applications want to
-	  read from a device at this priority.
-
-    -  .. row 3
-
-       -  ``V4L2_PRIORITY_INTERACTIVE``
-
-       -  2
-
-       -
-
-    -  .. row 4
-
-       -  ``V4L2_PRIORITY_DEFAULT``
-
-       -  2
-
-       -  Medium priority, usually applications started and interactively
-	  controlled by the user. For example TV viewers, Teletext browsers,
-	  or just "panel" applications to change the channel or video
-	  controls. This is the default priority unless an application
-	  requests another.
-
-    -  .. row 5
-
-       -  ``V4L2_PRIORITY_RECORD``
-
-       -  3
-
-       -  Highest priority. Only one file descriptor can have this priority,
-	  it blocks any other fd from changing device properties. Usually
-	  applications which must not be interrupted, like video recording.
+    * - ``V4L2_PRIORITY_UNSET``
+      - 0
+      -
+    * - ``V4L2_PRIORITY_BACKGROUND``
+      - 1
+      - Lowest priority, usually applications running in background, for
+	example monitoring VBI transmissions. A proxy application running
+	in user space will be necessary if multiple applications want to
+	read from a device at this priority.
+    * - ``V4L2_PRIORITY_INTERACTIVE``
+      - 2
+      -
+    * - ``V4L2_PRIORITY_DEFAULT``
+      - 2
+      - Medium priority, usually applications started and interactively
+	controlled by the user. For example TV viewers, Teletext browsers,
+	or just "panel" applications to change the channel or video
+	controls. This is the default priority unless an application
+	requests another.
+    * - ``V4L2_PRIORITY_RECORD``
+      - 3
+      - Highest priority. Only one file descriptor can have this priority,
+	it blocks any other fd from changing device properties. Usually
+	applications which must not be interrupted, like video recording.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index 953931f..3145a91 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -15,7 +15,12 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_selection *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_SELECTION, struct v4l2_selection *argp )
+    :name: VIDIOC_G_SELECTION
+
+
+.. c:function:: int ioctl( int fd, VIDIOC_S_SELECTION, struct v4l2_selection *argp )
+    :name: VIDIOC_S_SELECTION
 
 
 Arguments
@@ -36,43 +41,43 @@
 The ioctls are used to query and configure selection rectangles.
 
 To query the cropping (composing) rectangle set struct
-:ref:`v4l2_selection <v4l2-selection>` ``type`` field to the
+:c:type:`v4l2_selection` ``type`` field to the
 respective buffer type. Do not use the multiplanar buffer types. Use
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the
-value of struct :ref:`v4l2_selection <v4l2-selection>` ``target``
+value of struct :c:type:`v4l2_selection` ``target``
 field to ``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer
 to table :ref:`v4l2-selections-common` or :ref:`selection-api` for
 additional targets. The ``flags`` and ``reserved`` fields of struct
-:ref:`v4l2_selection <v4l2-selection>` are ignored and they must be
+:c:type:`v4l2_selection` are ignored and they must be
 filled with zeros. The driver fills the rest of the structure or returns
 EINVAL error code if incorrect buffer type or target was used. If
 cropping (composing) is not supported then the active rectangle is not
 mutable and it is always equal to the bounds rectangle. Finally, the
-struct :ref:`v4l2_rect <v4l2-rect>` ``r`` rectangle is filled with
+struct :c:type:`v4l2_rect` ``r`` rectangle is filled with
 the current cropping (composing) coordinates. The coordinates are
 expressed in driver-dependent units. The only exception are rectangles
 for images in raw formats, whose coordinates are always expressed in
 pixels.
 
 To change the cropping (composing) rectangle set the struct
-:ref:`v4l2_selection <v4l2-selection>` ``type`` field to the
+:c:type:`v4l2_selection` ``type`` field to the
 respective buffer type. Do not use multiplanar buffers. Use
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
 ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``. Use
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the
-value of struct :ref:`v4l2_selection <v4l2-selection>` ``target`` to
+value of struct :c:type:`v4l2_selection` ``target`` to
 ``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer to table
 :ref:`v4l2-selections-common` or :ref:`selection-api` for additional
-targets. The struct :ref:`v4l2_rect <v4l2-rect>` ``r`` rectangle need
+targets. The struct :c:type:`v4l2_rect` ``r`` rectangle need
 to be set to the desired active area. Field struct
-:ref:`v4l2_selection <v4l2-selection>` ``reserved`` is ignored and
+:c:type:`v4l2_selection` ``reserved`` is ignored and
 must be filled with zeros. The driver may adjust coordinates of the
 requested rectangle. An application may introduce constraints to control
-rounding behaviour. The struct :ref:`v4l2_selection <v4l2-selection>`
+rounding behaviour. The struct :c:type:`v4l2_selection`
 ``flags`` field must be set to one of the following:
 
 -  ``0`` - The driver can adjust the rectangle size freely and shall
@@ -97,7 +102,7 @@
 following priority:
 
 1. Satisfy constraints from struct
-   :ref:`v4l2_selection <v4l2-selection>` ``flags``.
+   :c:type:`v4l2_selection` ``flags``.
 
 2. Adjust width, height, left, and top to hardware limits and
    alignments.
@@ -110,7 +115,7 @@
 5. Keep horizontal and vertical offset as close as possible to original
    ones.
 
-On success the struct :ref:`v4l2_rect <v4l2-rect>` ``r`` field
+On success the struct :c:type:`v4l2_rect` ``r`` field
 contains the adjusted rectangle. When the parameters are unsuitable the
 application may modify the cropping (composing) or image parameters and
 repeat the cycle until satisfactory parameters have been negotiated. If
@@ -135,57 +140,34 @@
 
 
 
-.. _v4l2-selection:
+.. c:type:: v4l2_selection
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_selection
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the buffer (from enum
-	  :ref:`v4l2_buf_type <v4l2-buf-type>`).
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``target``
-
-       -  Used to select between
-	  :ref:`cropping and composing rectangles <v4l2-selections-common>`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags controlling the selection rectangle adjustments, refer to
-	  :ref:`selection flags <v4l2-selection-flags>`.
-
-    -  .. row 4
-
-       -  struct :ref:`v4l2_rect <v4l2-rect>`
-
-       -  ``r``
-
-       -  The selection rectangle.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved[9]``
-
-       -  Reserved fields for future use. Drivers and applications must zero
-	  this array.
+    * - __u32
+      - ``type``
+      - Type of the buffer (from enum
+	:c:type:`v4l2_buf_type`).
+    * - __u32
+      - ``target``
+      - Used to select between
+	:ref:`cropping and composing rectangles <v4l2-selections-common>`.
+    * - __u32
+      - ``flags``
+      - Flags controlling the selection rectangle adjustments, refer to
+	:ref:`selection flags <v4l2-selection-flags>`.
+    * - struct :c:type:`v4l2_rect`
+      - ``r``
+      - The selection rectangle.
+    * - __u32
+      - ``reserved[9]``
+      - Reserved fields for future use. Drivers and applications must zero
+	this array.
 
 
 Return Value
@@ -200,10 +182,13 @@
     supported, or the ``flags`` argument is not valid.
 
 ERANGE
-    It is not possible to adjust struct :ref:`v4l2_rect <v4l2-rect>`
+    It is not possible to adjust struct :c:type:`v4l2_rect`
     ``r`` rectangle to satisfy all constraints given in the ``flags``
     argument.
 
+ENODATA
+    Selection is not supported for this input or output.
+
 EBUSY
     It is not possible to apply change of the selection rectangle at the
     moment. Usually because streaming is in progress.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
index f1f661d..d7e2b2f 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_sliced_vbi_cap *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_SLICED_VBI_CAP, struct v4l2_sliced_vbi_cap *argp )
+    :name: VIDIOC_G_SLICED_VBI_CAP
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_SLICED_VBI_CAP
-
 ``argp``
 
 
@@ -35,141 +33,98 @@
 
 To find out which data services are supported by a sliced VBI capture or
 output device, applications initialize the ``type`` field of a struct
-:ref:`v4l2_sliced_vbi_cap <v4l2-sliced-vbi-cap>`, clear the
+:c:type:`v4l2_sliced_vbi_cap`, clear the
 ``reserved`` array and call the :ref:`VIDIOC_G_SLICED_VBI_CAP <VIDIOC_G_SLICED_VBI_CAP>` ioctl. The
 driver fills in the remaining fields or returns an ``EINVAL`` error code if
 the sliced VBI API is unsupported or ``type`` is invalid.
 
-.. note:: The ``type`` field was added, and the ioctl changed from read-only
+.. note::
+
+   The ``type`` field was added, and the ioctl changed from read-only
    to write-read, in Linux 2.6.19.
 
 
-.. _v4l2-sliced-vbi-cap:
+.. c:type:: v4l2_sliced_vbi_cap
+
+.. tabularcolumns:: |p{1.2cm}|p{4.2cm}|p{4.1cm}|p{4.0cm}|p{4.0cm}|
 
 .. flat-table:: struct v4l2_sliced_vbi_cap
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 3 2 2 2
 
+    * - __u16
+      - ``service_set``
+      - :cspan:`2` A set of all data services supported by the driver.
 
-    -  .. row 1
+	Equal to the union of all elements of the ``service_lines`` array.
+    * - __u16
+      - ``service_lines``\ [2][24]
+      - :cspan:`2` Each element of this array contains a set of data
+	services the hardware can look for or insert into a particular
+	scan line. Data services are defined in :ref:`vbi-services`.
+	Array indices map to ITU-R line numbers\ [#f1]_ as follows:
+    * -
+      -
+      - Element
+      - 525 line systems
+      - 625 line systems
+    * -
+      -
+      - ``service_lines``\ [0][1]
+      - 1
+      - 1
+    * -
+      -
+      - ``service_lines``\ [0][23]
+      - 23
+      - 23
+    * -
+      -
+      - ``service_lines``\ [1][1]
+      - 264
+      - 314
+    * -
+      -
+      - ``service_lines``\ [1][23]
+      - 286
+      - 336
+    * -
+    * -
+      -
+      - :cspan:`2` The number of VBI lines the hardware can capture or
+	output per frame, or the number of services it can identify on a
+	given line may be limited. For example on PAL line 16 the hardware
+	may be able to look for a VPS or Teletext signal, but not both at
+	the same time. Applications can learn about these limits using the
+	:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl as described in
+	:ref:`sliced`.
+    * -
+    * -
+      -
+      - :cspan:`2` Drivers must set ``service_lines`` [0][0] and
+	``service_lines``\ [1][0] to zero.
+    * - __u32
+      - ``type``
+      - Type of the data stream, see :c:type:`v4l2_buf_type`. Should be
+	``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE`` or
+	``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``.
+    * - __u32
+      - ``reserved``\ [3]
+      - :cspan:`2` This array is reserved for future extensions.
 
-       -  __u16
+	Applications and drivers must set it to zero.
 
-       -  ``service_set``
+.. [#f1]
 
-       -  :cspan:`2` A set of all data services supported by the driver.
-	  Equal to the union of all elements of the ``service_lines`` array.
-
-    -  .. row 2
-
-       -  __u16
-
-       -  ``service_lines``\ [2][24]
-
-       -  :cspan:`2` Each element of this array contains a set of data
-	  services the hardware can look for or insert into a particular
-	  scan line. Data services are defined in :ref:`vbi-services`.
-	  Array indices map to ITU-R line numbers (see also :ref:`vbi-525`
-	  and :ref:`vbi-625`) as follows:
-
-    -  .. row 3
-
-       -
-       -
-       -  Element
-
-       -  525 line systems
-
-       -  625 line systems
-
-    -  .. row 4
-
-       -
-       -
-       -  ``service_lines``\ [0][1]
-
-       -  1
-
-       -  1
-
-    -  .. row 5
-
-       -
-       -
-       -  ``service_lines``\ [0][23]
-
-       -  23
-
-       -  23
-
-    -  .. row 6
-
-       -
-       -
-       -  ``service_lines``\ [1][1]
-
-       -  264
-
-       -  314
-
-    -  .. row 7
-
-       -
-       -
-       -  ``service_lines``\ [1][23]
-
-       -  286
-
-       -  336
-
-    -  .. row 8
-
-       -
-
-    -  .. row 9
-
-       -
-       -
-       -  :cspan:`2` The number of VBI lines the hardware can capture or
-	  output per frame, or the number of services it can identify on a
-	  given line may be limited. For example on PAL line 16 the hardware
-	  may be able to look for a VPS or Teletext signal, but not both at
-	  the same time. Applications can learn about these limits using the
-	  :ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` ioctl as described in
-	  :ref:`sliced`.
-
-    -  .. row 10
-
-       -
-
-    -  .. row 11
-
-       -
-       -
-       -  :cspan:`2` Drivers must set ``service_lines`` [0][0] and
-	  ``service_lines``\ [1][0] to zero.
-
-    -  .. row 12
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the data stream, see :ref:`v4l2-buf-type`. Should be
-	  ``V4L2_BUF_TYPE_SLICED_VBI_CAPTURE`` or
-	  ``V4L2_BUF_TYPE_SLICED_VBI_OUTPUT``.
-
-    -  .. row 13
-
-       -  __u32
-
-       -  ``reserved``\ [3]
-
-       -  :cspan:`2` This array is reserved for future extensions.
-	  Applications and drivers must set it to zero.
+   See also :ref:`vbi-525` and :ref:`vbi-625`.
 
 
+.. raw:: latex
+
+    \begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}|
 
 .. _vbi-services:
 
@@ -178,91 +133,54 @@
     :stub-columns: 0
     :widths:       2 1 1 2 2
 
+    * - Symbol
+      - Value
+      - Reference
+      - Lines, usually
+      - Payload
+    * - ``V4L2_SLICED_TELETEXT_B`` (Teletext System B)
+      - 0x0001
+      - :ref:`ets300706`,
 
-    -  .. row 1
+	:ref:`itu653`
+      - PAL/SECAM line 7-22, 320-335 (second field 7-22)
+      - Last 42 of the 45 byte Teletext packet, that is without clock
+	run-in and framing code, lsb first transmitted.
+    * - ``V4L2_SLICED_VPS``
+      - 0x0400
+      - :ref:`ets300231`
+      - PAL line 16
+      - Byte number 3 to 15 according to Figure 9 of ETS 300 231, lsb
+	first transmitted.
+    * - ``V4L2_SLICED_CAPTION_525``
+      - 0x1000
+      - :ref:`cea608`
+      - NTSC line 21, 284 (second field 21)
+      - Two bytes in transmission order, including parity bit, lsb first
+	transmitted.
+    * - ``V4L2_SLICED_WSS_625``
+      - 0x4000
+      - :ref:`en300294`,
 
-       -  Symbol
+	:ref:`itu1119`
+      - PAL/SECAM line 23
+      -
 
-       -  Value
+	::
 
-       -  Reference
+	    Byte        0                 1
+		 msb         lsb  msb           lsb
+	    Bit  7 6 5 4 3 2 1 0  x x 13 12 11 10 9
+    * - ``V4L2_SLICED_VBI_525``
+      - 0x1000
+      - :cspan:`2` Set of services applicable to 525 line systems.
+    * - ``V4L2_SLICED_VBI_625``
+      - 0x4401
+      - :cspan:`2` Set of services applicable to 625 line systems.
 
-       -  Lines, usually
+.. raw:: latex
 
-       -  Payload
-
-    -  .. row 2
-
-       -  ``V4L2_SLICED_TELETEXT_B`` (Teletext System B)
-
-       -  0x0001
-
-       -  :ref:`ets300706`, :ref:`itu653`
-
-       -  PAL/SECAM line 7-22, 320-335 (second field 7-22)
-
-       -  Last 42 of the 45 byte Teletext packet, that is without clock
-	  run-in and framing code, lsb first transmitted.
-
-    -  .. row 3
-
-       -  ``V4L2_SLICED_VPS``
-
-       -  0x0400
-
-       -  :ref:`ets300231`
-
-       -  PAL line 16
-
-       -  Byte number 3 to 15 according to Figure 9 of ETS 300 231, lsb
-	  first transmitted.
-
-    -  .. row 4
-
-       -  ``V4L2_SLICED_CAPTION_525``
-
-       -  0x1000
-
-       -  :ref:`cea608`
-
-       -  NTSC line 21, 284 (second field 21)
-
-       -  Two bytes in transmission order, including parity bit, lsb first
-	  transmitted.
-
-    -  .. row 5
-
-       -  ``V4L2_SLICED_WSS_625``
-
-       -  0x4000
-
-       -  :ref:`en300294`, :ref:`itu1119`
-
-       -  PAL/SECAM line 23
-
-       -
-
-	  ::
-
-	      Byte        0                 1
-		   msb         lsb  msb           lsb
-	      Bit  7 6 5 4 3 2 1 0  x x 13 12 11 10 9
-
-    -  .. row 6
-
-       -  ``V4L2_SLICED_VBI_525``
-
-       -  0x1000
-
-       -  :cspan:`2` Set of services applicable to 525 line systems.
-
-    -  .. row 7
-
-       -  ``V4L2_SLICED_VBI_625``
-
-       -  0x4401
-
-       -  :cspan:`2` Set of services applicable to 625 line systems.
+    \end{adjustbox}\newline\newline
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst
index 5c2b861..cd856ad 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-std.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, v4l2_std_id *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_STD, v4l2_std_id *argp )
+    :name: VIDIOC_G_STD
 
-.. cpp:function:: int ioctl( int fd, int request, const v4l2_std_id *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_STD, const v4l2_std_id *argp )
+    :name: VIDIOC_S_STD
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_STD, VIDIOC_S_STD
-
 ``argp``
 
 
@@ -39,9 +38,9 @@
 :ref:`VIDIOC_G_STD <VIDIOC_G_STD>` and :ref:`VIDIOC_S_STD <VIDIOC_G_STD>` ioctls which take a pointer to a
 :ref:`v4l2_std_id <v4l2-std-id>` type as argument. :ref:`VIDIOC_G_STD <VIDIOC_G_STD>`
 can return a single flag or a set of flags as in struct
-:ref:`v4l2_standard <v4l2-standard>` field ``id``. The flags must be
+:c:type:`v4l2_standard` field ``id``. The flags must be
 unambiguous such that they appear in only one enumerated
-:ref:`struct v4l2_standard <v4l2-standard>` structure.
+struct :c:type:`v4l2_standard` structure.
 
 :ref:`VIDIOC_S_STD <VIDIOC_G_STD>` accepts one or more flags, being a write-only ioctl it
 does not return the actual new standard as :ref:`VIDIOC_G_STD <VIDIOC_G_STD>` does. When
diff --git a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
index 614db06..e8aa8cd 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_tuner *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_G_TUNER, struct v4l2_tuner *argp )
+    :name: VIDIOC_G_TUNER
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_tuner *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_TUNER, const struct v4l2_tuner *argp )
+    :name: VIDIOC_S_TUNER
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_G_TUNER, VIDIOC_S_TUNER
-
 ``argp``
 
 
@@ -37,7 +36,7 @@
 
 To query the attributes of a tuner applications initialize the ``index``
 field and zero out the ``reserved`` array of a struct
-:ref:`v4l2_tuner <v4l2-tuner>` and call the ``VIDIOC_G_TUNER`` ioctl
+:c:type:`v4l2_tuner` and call the ``VIDIOC_G_TUNER`` ioctl
 with a pointer to this structure. Drivers fill the rest of the structure
 or return an ``EINVAL`` error code when the index is out of bounds. To
 enumerate all tuners applications shall begin at index zero,
@@ -60,397 +59,248 @@
 :ref:`VIDIOC_S_FREQUENCY <VIDIOC_G_FREQUENCY>` ioctl is available.
 
 
-.. _v4l2-tuner:
+ .. tabularcolumns:: |p{1.3cm}|p{3.0cm}|p{6.6cm}|p{6.6cm}|
+
+.. c:type:: v4l2_tuner
+
+.. cssclass:: longtable
 
 .. flat-table:: struct v4l2_tuner
     :header-rows:  0
     :stub-columns: 0
 
+    * - __u32
+      - ``index``
+      - :cspan:`1` Identifies the tuner, set by the application.
+    * - __u8
+      - ``name``\ [32]
+      - :cspan:`1`
 
-    -  .. row 1
+	Name of the tuner, a NUL-terminated ASCII string.
 
-       -  __u32
+	This information is intended for the user.
+    * - __u32
+      - ``type``
+      - :cspan:`1` Type of the tuner, see :c:type:`v4l2_tuner_type`.
+    * - __u32
+      - ``capability``
+      - :cspan:`1`
 
-       -  ``index``
+	Tuner capability flags, see :ref:`tuner-capability`. Audio flags
+	indicate the ability to decode audio subprograms. They will *not*
+	change, for example with the current video standard.
 
-       -  :cspan:`1` Identifies the tuner, set by the application.
+	When the structure refers to a radio tuner the
+	``V4L2_TUNER_CAP_LANG1``, ``V4L2_TUNER_CAP_LANG2`` and
+	``V4L2_TUNER_CAP_NORM`` flags can't be used.
 
-    -  .. row 2
+	If multiple frequency bands are supported, then ``capability`` is
+	the union of all ``capability`` fields of each struct
+	:c:type:`v4l2_frequency_band`.
+    * - __u32
+      - ``rangelow``
+      - :cspan:`1` The lowest tunable frequency in units of 62.5 kHz, or
+	if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units
+	of 62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ``
+	is set, in units of 1 Hz. If multiple frequency bands are
+	supported, then ``rangelow`` is the lowest frequency of all the
+	frequency bands.
+    * - __u32
+      - ``rangehigh``
+      - :cspan:`1` The highest tunable frequency in units of 62.5 kHz,
+	or if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in
+	units of 62.5 Hz, or if the ``capability`` flag
+	``V4L2_TUNER_CAP_1HZ`` is set, in units of 1 Hz. If multiple
+	frequency bands are supported, then ``rangehigh`` is the highest
+	frequency of all the frequency bands.
+    * - __u32
+      - ``rxsubchans``
+      - :cspan:`1`
 
-       -  __u8
+	Some tuners or audio decoders can determine the received audio
+	subprograms by analyzing audio carriers, pilot tones or other
+	indicators. To pass this information drivers set flags defined in
+	:ref:`tuner-rxsubchans` in this field. For example:
+    * -
+      -
+      - ``V4L2_TUNER_SUB_MONO``
+      - receiving mono audio
+    * -
+      -
+      - ``STEREO | SAP``
+      - receiving stereo audio and a secondary audio program
+    * -
+      -
+      - ``MONO | STEREO``
+      - receiving mono or stereo audio, the hardware cannot distinguish
+    * -
+      -
+      - ``LANG1 | LANG2``
+      - receiving bilingual audio
+    * -
+      -
+      - ``MONO | STEREO | LANG1 | LANG2``
+      - receiving mono, stereo or bilingual audio
+    * -
+      -
+      - :cspan:`1`
 
-       -  ``name``\ [32]
+	When the ``V4L2_TUNER_CAP_STEREO``, ``_LANG1``, ``_LANG2`` or
+	``_SAP`` flag is cleared in the ``capability`` field, the
+	corresponding ``V4L2_TUNER_SUB_`` flag must not be set here.
 
-       -  :cspan:`1`
+	This field is valid only if this is the tuner of the current video
+	input, or when the structure refers to a radio tuner.
+    * - __u32
+      - ``audmode``
+      - :cspan:`1`
 
-	  Name of the tuner, a NUL-terminated ASCII string. This information
-	  is intended for the user.
+	The selected audio mode, see :ref:`tuner-audmode` for valid
+	values. The audio mode does not affect audio subprogram detection,
+	and like a :ref:`control` it does not automatically
+	change unless the requested mode is invalid or unsupported. See
+	:ref:`tuner-matrix` for possible results when the selected and
+	received audio programs do not match.
 
-    -  .. row 3
+	Currently this is the only field of struct
+	struct :c:type:`v4l2_tuner` applications can change.
+    * - __u32
+      - ``signal``
+      - :cspan:`1` The signal strength if known.
 
-       -  __u32
+	Ranging from 0 to 65535. Higher values indicate a better signal.
+    * - __s32
+      - ``afc``
+      - :cspan:`1` Automatic frequency control.
 
-       -  ``type``
+	When the ``afc`` value is negative, the frequency is too
+	low, when positive too high.
+    * - __u32
+      - ``reserved``\ [4]
+      - :cspan:`1` Reserved for future extensions.
 
-       -  :cspan:`1` Type of the tuner, see :ref:`v4l2-tuner-type`.
+	Drivers and applications must set the array to zero.
 
-    -  .. row 4
 
-       -  __u32
 
-       -  ``capability``
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
-       -  :cspan:`1`
-
-	  Tuner capability flags, see :ref:`tuner-capability`. Audio flags
-	  indicate the ability to decode audio subprograms. They will *not*
-	  change, for example with the current video standard.
-
-	  When the structure refers to a radio tuner the
-	  ``V4L2_TUNER_CAP_LANG1``, ``V4L2_TUNER_CAP_LANG2`` and
-	  ``V4L2_TUNER_CAP_NORM`` flags can't be used.
-
-	  If multiple frequency bands are supported, then ``capability`` is
-	  the union of all ``capability`` fields of each struct
-	  :ref:`v4l2_frequency_band <v4l2-frequency-band>`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``rangelow``
-
-       -  :cspan:`1` The lowest tunable frequency in units of 62.5 kHz, or
-	  if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in units
-	  of 62.5 Hz, or if the ``capability`` flag ``V4L2_TUNER_CAP_1HZ``
-	  is set, in units of 1 Hz. If multiple frequency bands are
-	  supported, then ``rangelow`` is the lowest frequency of all the
-	  frequency bands.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``rangehigh``
-
-       -  :cspan:`1` The highest tunable frequency in units of 62.5 kHz,
-	  or if the ``capability`` flag ``V4L2_TUNER_CAP_LOW`` is set, in
-	  units of 62.5 Hz, or if the ``capability`` flag
-	  ``V4L2_TUNER_CAP_1HZ`` is set, in units of 1 Hz. If multiple
-	  frequency bands are supported, then ``rangehigh`` is the highest
-	  frequency of all the frequency bands.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``rxsubchans``
-
-       -  :cspan:`1`
-
-	  Some tuners or audio decoders can determine the received audio
-	  subprograms by analyzing audio carriers, pilot tones or other
-	  indicators. To pass this information drivers set flags defined in
-	  :ref:`tuner-rxsubchans` in this field. For example:
-
-    -  .. row 8
-
-       -
-       -
-       -  ``V4L2_TUNER_SUB_MONO``
-
-       -  receiving mono audio
-
-    -  .. row 9
-
-       -
-       -
-       -  ``STEREO | SAP``
-
-       -  receiving stereo audio and a secondary audio program
-
-    -  .. row 10
-
-       -
-       -
-       -  ``MONO | STEREO``
-
-       -  receiving mono or stereo audio, the hardware cannot distinguish
-
-    -  .. row 11
-
-       -
-       -
-       -  ``LANG1 | LANG2``
-
-       -  receiving bilingual audio
-
-    -  .. row 12
-
-       -
-       -
-       -  ``MONO | STEREO | LANG1 | LANG2``
-
-       -  receiving mono, stereo or bilingual audio
-
-    -  .. row 13
-
-       -
-       -
-       -  :cspan:`1`
-
-	  When the ``V4L2_TUNER_CAP_STEREO``, ``_LANG1``, ``_LANG2`` or
-	  ``_SAP`` flag is cleared in the ``capability`` field, the
-	  corresponding ``V4L2_TUNER_SUB_`` flag must not be set here.
-
-	  This field is valid only if this is the tuner of the current video
-	  input, or when the structure refers to a radio tuner.
-
-    -  .. row 14
-
-       -  __u32
-
-       -  ``audmode``
-
-       -  :cspan:`1`
-
-	  The selected audio mode, see :ref:`tuner-audmode` for valid
-	  values. The audio mode does not affect audio subprogram detection,
-	  and like a :ref:`control` it does not automatically
-	  change unless the requested mode is invalid or unsupported. See
-	  :ref:`tuner-matrix` for possible results when the selected and
-	  received audio programs do not match.
-
-	  Currently this is the only field of struct
-	  :ref:`struct v4l2_tuner <v4l2-tuner>` applications can change.
-
-    -  .. row 15
-
-       -  __u32
-
-       -  ``signal``
-
-       -  :cspan:`1` The signal strength if known, ranging from 0 to
-	  65535. Higher values indicate a better signal.
-
-    -  .. row 16
-
-       -  __s32
-
-       -  ``afc``
-
-       -  :cspan:`1` Automatic frequency control: When the ``afc`` value
-	  is negative, the frequency is too low, when positive too high.
-
-    -  .. row 17
-
-       -  __u32
-
-       -  ``reserved``\ [4]
-
-       -  :cspan:`1` Reserved for future extensions. Drivers and
-	  applications must set the array to zero.
-
-
-
-.. _v4l2-tuner-type:
+.. c:type:: v4l2_tuner_type
 
 .. flat-table:: enum v4l2_tuner_type
     :header-rows:  0
     :stub-columns: 0
-    :widths:       3 1 4
+    :widths:       3 1 6
+
+    * - ``V4L2_TUNER_RADIO``
+      - 1
+      - Tuner supports radio
+    * - ``V4L2_TUNER_ANALOG_TV``
+      - 2
+      - Tuner supports analog TV
+    * - ``V4L2_TUNER_SDR``
+      - 4
+      - Tuner controls the A/D and/or D/A block of a
+	Sofware Digital Radio (SDR)
+    * - ``V4L2_TUNER_RF``
+      - 5
+      - Tuner controls the RF part of a Sofware Digital Radio (SDR)
 
 
-    -  .. row 1
-
-       -  ``V4L2_TUNER_RADIO``
-
-       -  1
-
-       -
-
-    -  .. row 2
-
-       -  ``V4L2_TUNER_ANALOG_TV``
-
-       -  2
-
-       -
-
-    -  .. row 3
-
-       -  ``V4L2_TUNER_SDR``
-
-       -  4
-
-       -
-
-    -  .. row 4
-
-       -  ``V4L2_TUNER_RF``
-
-       -  5
-
-       -
-
-
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _tuner-capability:
 
+.. cssclass:: longtable
+
 .. flat-table:: Tuner and Modulator Capability Flags
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_TUNER_CAP_LOW``
+      - 0x0001
+      - When set, tuning frequencies are expressed in units of 62.5 Hz
+	instead of 62.5 kHz.
+    * - ``V4L2_TUNER_CAP_NORM``
+      - 0x0002
+      - This is a multi-standard tuner; the video standard can or must be
+	switched. (B/G PAL tuners for example are typically not considered
+	multi-standard because the video standard is automatically
+	determined from the frequency band.) The set of supported video
+	standards is available from the struct
+	:c:type:`v4l2_input` pointing to this tuner, see the
+	description of ioctl :ref:`VIDIOC_ENUMINPUT`
+	for details. Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this
+	capability.
+    * - ``V4L2_TUNER_CAP_HWSEEK_BOUNDED``
+      - 0x0004
+      - If set, then this tuner supports the hardware seek functionality
+	where the seek stops when it reaches the end of the frequency
+	range.
+    * - ``V4L2_TUNER_CAP_HWSEEK_WRAP``
+      - 0x0008
+      - If set, then this tuner supports the hardware seek functionality
+	where the seek wraps around when it reaches the end of the
+	frequency range.
+    * - ``V4L2_TUNER_CAP_STEREO``
+      - 0x0010
+      - Stereo audio reception is supported.
+    * - ``V4L2_TUNER_CAP_LANG1``
+      - 0x0040
+      - Reception of the primary language of a bilingual audio program is
+	supported. Bilingual audio is a feature of two-channel systems,
+	transmitting the primary language monaural on the main audio
+	carrier and a secondary language monaural on a second carrier.
+	Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this capability.
+    * - ``V4L2_TUNER_CAP_LANG2``
+      - 0x0020
+      - Reception of the secondary language of a bilingual audio program
+	is supported. Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this
+	capability.
+    * - ``V4L2_TUNER_CAP_SAP``
+      - 0x0020
+      - Reception of a secondary audio program is supported. This is a
+	feature of the BTSC system which accompanies the NTSC video
+	standard. Two audio carriers are available for mono or stereo
+	transmissions of a primary language, and an independent third
+	carrier for a monaural secondary language. Only
+	``V4L2_TUNER_ANALOG_TV`` tuners can have this capability.
 
-    -  .. row 1
+	.. note::
 
-       -  ``V4L2_TUNER_CAP_LOW``
-
-       -  0x0001
-
-       -  When set, tuning frequencies are expressed in units of 62.5 Hz
-	  instead of 62.5 kHz.
-
-    -  .. row 2
-
-       -  ``V4L2_TUNER_CAP_NORM``
-
-       -  0x0002
-
-       -  This is a multi-standard tuner; the video standard can or must be
-	  switched. (B/G PAL tuners for example are typically not considered
-	  multi-standard because the video standard is automatically
-	  determined from the frequency band.) The set of supported video
-	  standards is available from the struct
-	  :ref:`v4l2_input <v4l2-input>` pointing to this tuner, see the
-	  description of ioctl :ref:`VIDIOC_ENUMINPUT`
-	  for details. Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this
-	  capability.
-
-    -  .. row 3
-
-       -  ``V4L2_TUNER_CAP_HWSEEK_BOUNDED``
-
-       -  0x0004
-
-       -  If set, then this tuner supports the hardware seek functionality
-	  where the seek stops when it reaches the end of the frequency
-	  range.
-
-    -  .. row 4
-
-       -  ``V4L2_TUNER_CAP_HWSEEK_WRAP``
-
-       -  0x0008
-
-       -  If set, then this tuner supports the hardware seek functionality
-	  where the seek wraps around when it reaches the end of the
-	  frequency range.
-
-    -  .. row 5
-
-       -  ``V4L2_TUNER_CAP_STEREO``
-
-       -  0x0010
-
-       -  Stereo audio reception is supported.
-
-    -  .. row 6
-
-       -  ``V4L2_TUNER_CAP_LANG1``
-
-       -  0x0040
-
-       -  Reception of the primary language of a bilingual audio program is
-	  supported. Bilingual audio is a feature of two-channel systems,
-	  transmitting the primary language monaural on the main audio
-	  carrier and a secondary language monaural on a second carrier.
-	  Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this capability.
-
-    -  .. row 7
-
-       -  ``V4L2_TUNER_CAP_LANG2``
-
-       -  0x0020
-
-       -  Reception of the secondary language of a bilingual audio program
-	  is supported. Only ``V4L2_TUNER_ANALOG_TV`` tuners can have this
-	  capability.
-
-    -  .. row 8
-
-       -  ``V4L2_TUNER_CAP_SAP``
-
-       -  0x0020
-
-       -  Reception of a secondary audio program is supported. This is a
-	  feature of the BTSC system which accompanies the NTSC video
-	  standard. Two audio carriers are available for mono or stereo
-	  transmissions of a primary language, and an independent third
-	  carrier for a monaural secondary language. Only
-	  ``V4L2_TUNER_ANALOG_TV`` tuners can have this capability.
-
-	  .. note:: The ``V4L2_TUNER_CAP_LANG2`` and ``V4L2_TUNER_CAP_SAP``
-	     flags are synonyms. ``V4L2_TUNER_CAP_SAP`` applies when the tuner
-	     supports the ``V4L2_STD_NTSC_M`` video standard.
-
-    -  .. row 9
-
-       -  ``V4L2_TUNER_CAP_RDS``
-
-       -  0x0080
-
-       -  RDS capture is supported. This capability is only valid for radio
-	  tuners.
-
-    -  .. row 10
-
-       -  ``V4L2_TUNER_CAP_RDS_BLOCK_IO``
-
-       -  0x0100
-
-       -  The RDS data is passed as unparsed RDS blocks.
-
-    -  .. row 11
-
-       -  ``V4L2_TUNER_CAP_RDS_CONTROLS``
-
-       -  0x0200
-
-       -  The RDS data is parsed by the hardware and set via controls.
-
-    -  .. row 12
-
-       -  ``V4L2_TUNER_CAP_FREQ_BANDS``
-
-       -  0x0400
-
-       -  The :ref:`VIDIOC_ENUM_FREQ_BANDS`
-	  ioctl can be used to enumerate the available frequency bands.
-
-    -  .. row 13
-
-       -  ``V4L2_TUNER_CAP_HWSEEK_PROG_LIM``
-
-       -  0x0800
-
-       -  The range to search when using the hardware seek functionality is
-	  programmable, see
-	  :ref:`VIDIOC_S_HW_FREQ_SEEK` for
-	  details.
-
-    -  .. row 14
-
-       -  ``V4L2_TUNER_CAP_1HZ``
-
-       -  0x1000
-
-       -  When set, tuning frequencies are expressed in units of 1 Hz
-	  instead of 62.5 kHz.
+	   The ``V4L2_TUNER_CAP_LANG2`` and ``V4L2_TUNER_CAP_SAP``
+	   flags are synonyms. ``V4L2_TUNER_CAP_SAP`` applies when the tuner
+	   supports the ``V4L2_STD_NTSC_M`` video standard.
+    * - ``V4L2_TUNER_CAP_RDS``
+      - 0x0080
+      - RDS capture is supported. This capability is only valid for radio
+	tuners.
+    * - ``V4L2_TUNER_CAP_RDS_BLOCK_IO``
+      - 0x0100
+      - The RDS data is passed as unparsed RDS blocks.
+    * - ``V4L2_TUNER_CAP_RDS_CONTROLS``
+      - 0x0200
+      - The RDS data is parsed by the hardware and set via controls.
+    * - ``V4L2_TUNER_CAP_FREQ_BANDS``
+      - 0x0400
+      - The :ref:`VIDIOC_ENUM_FREQ_BANDS`
+	ioctl can be used to enumerate the available frequency bands.
+    * - ``V4L2_TUNER_CAP_HWSEEK_PROG_LIM``
+      - 0x0800
+      - The range to search when using the hardware seek functionality is
+	programmable, see
+	:ref:`VIDIOC_S_HW_FREQ_SEEK` for
+	details.
+    * - ``V4L2_TUNER_CAP_1HZ``
+      - 0x1000
+      - When set, tuning frequencies are expressed in units of 1 Hz
+	instead of 62.5 kHz.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _tuner-rxsubchans:
 
 .. flat-table:: Tuner Audio Reception Flags
@@ -458,64 +308,38 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_TUNER_SUB_MONO``
+      - 0x0001
+      - The tuner receives a mono audio signal.
+    * - ``V4L2_TUNER_SUB_STEREO``
+      - 0x0002
+      - The tuner receives a stereo audio signal.
+    * - ``V4L2_TUNER_SUB_LANG1``
+      - 0x0008
+      - The tuner receives the primary language of a bilingual audio
+	signal. Drivers must clear this flag when the current video
+	standard is ``V4L2_STD_NTSC_M``.
+    * - ``V4L2_TUNER_SUB_LANG2``
+      - 0x0004
+      - The tuner receives the secondary language of a bilingual audio
+	signal (or a second audio program).
+    * - ``V4L2_TUNER_SUB_SAP``
+      - 0x0004
+      - The tuner receives a Second Audio Program.
 
-    -  .. row 1
+	.. note::
 
-       -  ``V4L2_TUNER_SUB_MONO``
-
-       -  0x0001
-
-       -  The tuner receives a mono audio signal.
-
-    -  .. row 2
-
-       -  ``V4L2_TUNER_SUB_STEREO``
-
-       -  0x0002
-
-       -  The tuner receives a stereo audio signal.
-
-    -  .. row 3
-
-       -  ``V4L2_TUNER_SUB_LANG1``
-
-       -  0x0008
-
-       -  The tuner receives the primary language of a bilingual audio
-	  signal. Drivers must clear this flag when the current video
-	  standard is ``V4L2_STD_NTSC_M``.
-
-    -  .. row 4
-
-       -  ``V4L2_TUNER_SUB_LANG2``
-
-       -  0x0004
-
-       -  The tuner receives the secondary language of a bilingual audio
-	  signal (or a second audio program).
-
-    -  .. row 5
-
-       -  ``V4L2_TUNER_SUB_SAP``
-
-       -  0x0004
-
-       -  The tuner receives a Second Audio Program.
-
-	  .. note:: The ``V4L2_TUNER_SUB_LANG2`` and ``V4L2_TUNER_SUB_SAP``
-	     flags are synonyms. The ``V4L2_TUNER_SUB_SAP`` flag applies
-	     when the current video standard is ``V4L2_STD_NTSC_M``.
-
-    -  .. row 6
-
-       -  ``V4L2_TUNER_SUB_RDS``
-
-       -  0x0010
-
-       -  The tuner receives an RDS channel.
+	   The ``V4L2_TUNER_SUB_LANG2`` and ``V4L2_TUNER_SUB_SAP``
+	   flags are synonyms. The ``V4L2_TUNER_SUB_SAP`` flag applies
+	   when the current video standard is ``V4L2_STD_NTSC_M``.
+    * - ``V4L2_TUNER_SUB_RDS``
+      - 0x0010
+      - The tuner receives an RDS channel.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _tuner-audmode:
 
 .. flat-table:: Tuner Audio Modes
@@ -523,80 +347,52 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_TUNER_MODE_MONO``
+      - 0
+      - Play mono audio. When the tuner receives a stereo signal this a
+	down-mix of the left and right channel. When the tuner receives a
+	bilingual or SAP signal this mode selects the primary language.
+    * - ``V4L2_TUNER_MODE_STEREO``
+      - 1
+      - Play stereo audio. When the tuner receives bilingual audio it may
+	play different languages on the left and right channel or the
+	primary language is played on both channels.
 
-    -  .. row 1
+	Playing different languages in this mode is deprecated. New
+	drivers should do this only in ``MODE_LANG1_LANG2``.
 
-       -  ``V4L2_TUNER_MODE_MONO``
+	When the tuner receives no stereo signal or does not support
+	stereo reception the driver shall fall back to ``MODE_MONO``.
+    * - ``V4L2_TUNER_MODE_LANG1``
+      - 3
+      - Play the primary language, mono or stereo. Only
+	``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
+    * - ``V4L2_TUNER_MODE_LANG2``
+      - 2
+      - Play the secondary language, mono. When the tuner receives no
+	bilingual audio or SAP, or their reception is not supported the
+	driver shall fall back to mono or stereo mode. Only
+	``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
+    * - ``V4L2_TUNER_MODE_SAP``
+      - 2
+      - Play the Second Audio Program. When the tuner receives no
+	bilingual audio or SAP, or their reception is not supported the
+	driver shall fall back to mono or stereo mode. Only
+	``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
 
-       -  0
+	.. note:: The ``V4L2_TUNER_MODE_LANG2`` and ``V4L2_TUNER_MODE_SAP``
+	   are synonyms.
+    * - ``V4L2_TUNER_MODE_LANG1_LANG2``
+      - 4
+      - Play the primary language on the left channel, the secondary
+	language on the right channel. When the tuner receives no
+	bilingual audio or SAP, it shall fall back to ``MODE_LANG1`` or
+	``MODE_MONO``. Only ``V4L2_TUNER_ANALOG_TV`` tuners support this
+	mode.
 
-       -  Play mono audio. When the tuner receives a stereo signal this a
-	  down-mix of the left and right channel. When the tuner receives a
-	  bilingual or SAP signal this mode selects the primary language.
+.. raw:: latex
 
-    -  .. row 2
-
-       -  ``V4L2_TUNER_MODE_STEREO``
-
-       -  1
-
-       -  Play stereo audio. When the tuner receives bilingual audio it may
-	  play different languages on the left and right channel or the
-	  primary language is played on both channels.
-
-	  Playing different languages in this mode is deprecated. New
-	  drivers should do this only in ``MODE_LANG1_LANG2``.
-
-	  When the tuner receives no stereo signal or does not support
-	  stereo reception the driver shall fall back to ``MODE_MONO``.
-
-    -  .. row 3
-
-       -  ``V4L2_TUNER_MODE_LANG1``
-
-       -  3
-
-       -  Play the primary language, mono or stereo. Only
-	  ``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
-
-    -  .. row 4
-
-       -  ``V4L2_TUNER_MODE_LANG2``
-
-       -  2
-
-       -  Play the secondary language, mono. When the tuner receives no
-	  bilingual audio or SAP, or their reception is not supported the
-	  driver shall fall back to mono or stereo mode. Only
-	  ``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
-
-    -  .. row 5
-
-       -  ``V4L2_TUNER_MODE_SAP``
-
-       -  2
-
-       -  Play the Second Audio Program. When the tuner receives no
-	  bilingual audio or SAP, or their reception is not supported the
-	  driver shall fall back to mono or stereo mode. Only
-	  ``V4L2_TUNER_ANALOG_TV`` tuners support this mode.
-
-	  .. note:: The ``V4L2_TUNER_MODE_LANG2`` and ``V4L2_TUNER_MODE_SAP``
-	     are synonyms.
-
-    -  .. row 6
-
-       -  ``V4L2_TUNER_MODE_LANG1_LANG2``
-
-       -  4
-
-       -  Play the primary language on the left channel, the secondary
-	  language on the right channel. When the tuner receives no
-	  bilingual audio or SAP, it shall fall back to ``MODE_LANG1`` or
-	  ``MODE_MONO``. Only ``V4L2_TUNER_ANALOG_TV`` tuners support this
-	  mode.
-
-
+    \begin{adjustbox}{width=\columnwidth}
 
 .. _tuner-matrix:
 
@@ -604,96 +400,48 @@
     :header-rows:  2
     :stub-columns: 0
 
+    * -
+      - :cspan:`5` Selected ``V4L2_TUNER_MODE_``
+    * - Received ``V4L2_TUNER_SUB_``
+      - ``MONO``
+      - ``STEREO``
+      - ``LANG1``
+      - ``LANG2 = SAP``
+      - ``LANG1_LANG2``\  [#f1]_
+    * - ``MONO``
+      - Mono
+      - Mono/Mono
+      - Mono
+      - Mono
+      - Mono/Mono
+    * - ``MONO | SAP``
+      - Mono
+      - Mono/Mono
+      - Mono
+      - SAP
+      - Mono/SAP (preferred) or Mono/Mono
+    * - ``STEREO``
+      - L+R
+      - L/R
+      - Stereo L/R (preferred) or Mono L+R
+      - Stereo L/R (preferred) or Mono L+R
+      - L/R (preferred) or L+R/L+R
+    * - ``STEREO | SAP``
+      - L+R
+      - L/R
+      - Stereo L/R (preferred) or Mono L+R
+      - SAP
+      - L+R/SAP (preferred) or L/R or L+R/L+R
+    * - ``LANG1 | LANG2``
+      - Language 1
+      - Lang1/Lang2 (deprecated [#f2]_) or Lang1/Lang1
+      - Language 1
+      - Language 2
+      - Lang1/Lang2 (preferred) or Lang1/Lang1
 
-    -  .. row 1
+.. raw:: latex
 
-       -
-       -  :cspan:`5` Selected ``V4L2_TUNER_MODE_``
-
-    -  .. row 2
-
-       -  Received ``V4L2_TUNER_SUB_``
-
-       -  ``MONO``
-
-       -  ``STEREO``
-
-       -  ``LANG1``
-
-       -  ``LANG2 = SAP``
-
-       -  ``LANG1_LANG2``\  [#f1]_
-
-    -  .. row 3
-
-       -  ``MONO``
-
-       -  Mono
-
-       -  Mono/Mono
-
-       -  Mono
-
-       -  Mono
-
-       -  Mono/Mono
-
-    -  .. row 4
-
-       -  ``MONO | SAP``
-
-       -  Mono
-
-       -  Mono/Mono
-
-       -  Mono
-
-       -  SAP
-
-       -  Mono/SAP (preferred) or Mono/Mono
-
-    -  .. row 5
-
-       -  ``STEREO``
-
-       -  L+R
-
-       -  L/R
-
-       -  Stereo L/R (preferred) or Mono L+R
-
-       -  Stereo L/R (preferred) or Mono L+R
-
-       -  L/R (preferred) or L+R/L+R
-
-    -  .. row 6
-
-       -  ``STEREO | SAP``
-
-       -  L+R
-
-       -  L/R
-
-       -  Stereo L/R (preferred) or Mono L+R
-
-       -  SAP
-
-       -  L+R/SAP (preferred) or L/R or L+R/L+R
-
-    -  .. row 7
-
-       -  ``LANG1 | LANG2``
-
-       -  Language 1
-
-       -  Lang1/Lang2 (deprecated [#f2]_) or Lang1/Lang1
-
-       -  Language 1
-
-       -  Language 2
-
-       -  Lang1/Lang2 (preferred) or Lang1/Lang1
-
+    \end{adjustbox}\newline\newline
 
 Return Value
 ============
@@ -703,7 +451,7 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 EINVAL
-    The struct :ref:`v4l2_tuner <v4l2-tuner>` ``index`` is out of
+    The struct :c:type:`v4l2_tuner` ``index`` is out of
     bounds.
 
 .. [#f1]
diff --git a/Documentation/media/uapi/v4l/vidioc-log-status.rst b/Documentation/media/uapi/v4l/vidioc-log-status.rst
index 66fc352..bbeb7b5 100644
--- a/Documentation/media/uapi/v4l/vidioc-log-status.rst
+++ b/Documentation/media/uapi/v4l/vidioc-log-status.rst
@@ -15,12 +15,15 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request )
+.. c:function:: int ioctl( int fd, VIDIOC_LOG_STATUS)
+    :name: VIDIOC_LOG_STATUS
 
 
 Arguments
 =========
 
+``fd``
+    File descriptor returned by :ref:`open() <func-open>`.
 
 
 Description
diff --git a/Documentation/media/uapi/v4l/vidioc-overlay.rst b/Documentation/media/uapi/v4l/vidioc-overlay.rst
index 191dbc1..cd7b62e 100644
--- a/Documentation/media/uapi/v4l/vidioc-overlay.rst
+++ b/Documentation/media/uapi/v4l/vidioc-overlay.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, const int *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_OVERLAY, const int *argp )
+    :name: VIDIOC_OVERLAY
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_OVERLAY
-
 ``argp``
 
 
diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
index 79076df..bdcfd9f 100644
--- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_buffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_PREPARE_BUF, struct v4l2_buffer *argp )
+    :name: VIDIOC_PREPARE_BUF
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_PREPARE_BUF
-
 ``argp``
 
 
@@ -42,7 +40,7 @@
 ``V4L2_BUF_FLAG_NO_CACHE_INVALIDATE`` and
 ``V4L2_BUF_FLAG_NO_CACHE_CLEAN`` flags to skip the respective step.
 
-The :ref:`struct v4l2_buffer <v4l2-buffer>` structure is specified in
+The struct :c:type:`v4l2_buffer` structure is specified in
 :ref:`buffer`.
 
 
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index 3b927f3..1f36126 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_buffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QBUF, struct v4l2_buffer *argp )
+    :name: VIDIOC_QBUF
+
+.. c:function:: int ioctl( int fd, VIDIOC_DQBUF, struct v4l2_buffer *argp )
+    :name: VIDIOC_DQBUF
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QBUF, VIDIOC_DQBUF
-
 ``argp``
 
 
@@ -38,14 +39,14 @@
 The semantics depend on the selected I/O method.
 
 To enqueue a buffer applications set the ``type`` field of a struct
-:ref:`v4l2_buffer <v4l2-buffer>` to the same buffer type as was
-previously used with struct :ref:`v4l2_format <v4l2-format>` ``type``
-and struct :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``.
+:c:type:`v4l2_buffer` to the same buffer type as was
+previously used with struct :c:type:`v4l2_format` ``type``
+and struct :c:type:`v4l2_requestbuffers` ``type``.
 Applications must also set the ``index`` field. Valid index numbers
 range from zero to the number of buffers allocated with
 :ref:`VIDIOC_REQBUFS` (struct
-:ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``count``) minus
-one. The contents of the struct :ref:`struct v4l2_buffer <v4l2-buffer>` returned
+:c:type:`v4l2_requestbuffers` ``count``) minus
+one. The contents of the struct :c:type:`v4l2_buffer` returned
 by a :ref:`VIDIOC_QUERYBUF` ioctl will do as well.
 When the buffer is intended for output (``type`` is
 ``V4L2_BUF_TYPE_VIDEO_OUTPUT``, ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``,
@@ -55,7 +56,7 @@
 ``reserved2`` and ``reserved`` fields must be set to 0. When using the
 :ref:`multi-planar API <planar-apis>`, the ``m.planes`` field must
 contain a userspace pointer to a filled-in array of struct
-:ref:`v4l2_plane <v4l2-plane>` and the ``length`` field must be set
+:c:type:`v4l2_plane` and the ``length`` field must be set
 to the number of elements in that array.
 
 To enqueue a :ref:`memory mapped <mmap>` buffer applications set the
@@ -69,7 +70,7 @@
 ``memory`` field to ``V4L2_MEMORY_USERPTR``, the ``m.userptr`` field to
 the address of the buffer and ``length`` to its size. When the
 multi-planar API is used, ``m.userptr`` and ``length`` members of the
-passed array of struct :ref:`v4l2_plane <v4l2-plane>` have to be used
+passed array of struct :c:type:`v4l2_plane` have to be used
 instead. When ``VIDIOC_QBUF`` is called with a pointer to this structure
 the driver sets the ``V4L2_BUF_FLAG_QUEUED`` flag and clears the
 ``V4L2_BUF_FLAG_MAPPED`` and ``V4L2_BUF_FLAG_DONE`` flags in the
@@ -84,7 +85,7 @@
 ``memory`` field to ``V4L2_MEMORY_DMABUF`` and the ``m.fd`` field to a
 file descriptor associated with a DMABUF buffer. When the multi-planar
 API is used the ``m.fd`` fields of the passed array of struct
-:ref:`v4l2_plane <v4l2-plane>` have to be used instead. When
+:c:type:`v4l2_plane` have to be used instead. When
 ``VIDIOC_QBUF`` is called with a pointer to this structure the driver
 sets the ``V4L2_BUF_FLAG_QUEUED`` flag and clears the
 ``V4L2_BUF_FLAG_MAPPED`` and ``V4L2_BUF_FLAG_DONE`` flags in the
@@ -99,7 +100,7 @@
 Applications call the ``VIDIOC_DQBUF`` ioctl to dequeue a filled
 (capturing) or displayed (output) buffer from the driver's outgoing
 queue. They just set the ``type``, ``memory`` and ``reserved`` fields of
-a struct :ref:`v4l2_buffer <v4l2-buffer>` as above, when
+a struct :c:type:`v4l2_buffer` as above, when
 ``VIDIOC_DQBUF`` is called with a pointer to this structure the driver
 fills the remaining fields or returns an error code. The driver may also
 set ``V4L2_BUF_FLAG_ERROR`` in the ``flags`` field. It indicates a
@@ -113,7 +114,7 @@
 :ref:`open() <func-open>` function, ``VIDIOC_DQBUF`` returns
 immediately with an ``EAGAIN`` error code when no buffer is available.
 
-The :ref:`struct v4l2_buffer <v4l2-buffer>` structure is specified in
+The struct :c:type:`v4l2_buffer` structure is specified in
 :ref:`buffer`.
 
 
@@ -137,7 +138,9 @@
     ``VIDIOC_DQBUF`` failed due to an internal error. Can also indicate
     temporary problems like signal loss.
 
-    .. note:: The driver might dequeue an (empty) buffer despite returning
+    .. note::
+
+       The driver might dequeue an (empty) buffer despite returning
        an error, or even stop capturing. Reusing such buffer may be unsafe
        though and its details (e.g. ``index``) may not be returned either.
        It is recommended that drivers indicate recoverable errors by setting
diff --git a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
index 416d8d6..0d16853 100644
--- a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_dv_timings *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERY_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_QUERY_DV_TIMINGS
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_QUERY_DV_TIMINGS, struct v4l2_dv_timings *argp )
+    :name: VIDIOC_SUBDEV_QUERY_DV_TIMINGS
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QUERY_DV_TIMINGS, VIDIOC_SUBDEV_QUERY_DV_TIMINGS
-
 ``argp``
 
 
@@ -36,10 +37,12 @@
 The hardware may be able to detect the current DV timings automatically,
 similar to sensing the video standard. To do so, applications call
 :ref:`VIDIOC_QUERY_DV_TIMINGS` with a pointer to a struct
-:ref:`v4l2_dv_timings <v4l2-dv-timings>`. Once the hardware detects
+:c:type:`v4l2_dv_timings`. Once the hardware detects
 the timings, it will fill in the timings structure.
 
-.. note:: Drivers shall *not* switch timings automatically if new
+.. note::
+
+   Drivers shall *not* switch timings automatically if new
    timings are detected. Instead, drivers should send the
    ``V4L2_EVENT_SOURCE_CHANGE`` event (if they support this) and expect
    that userspace will take action by calling :ref:`VIDIOC_QUERY_DV_TIMINGS`.
diff --git a/Documentation/media/uapi/v4l/vidioc-querybuf.rst b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
index 32af6f7..0bdc8e0 100644
--- a/Documentation/media/uapi/v4l/vidioc-querybuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_buffer *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERYBUF, struct v4l2_buffer *argp )
+    :name: VIDIOC_QUERYBUF
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QUERYBUF
-
 ``argp``
 
 
@@ -38,17 +36,17 @@
 been allocated with the :ref:`VIDIOC_REQBUFS` ioctl.
 
 Applications set the ``type`` field of a struct
-:ref:`v4l2_buffer <v4l2-buffer>` to the same buffer type as was
-previously used with struct :ref:`v4l2_format <v4l2-format>` ``type``
-and struct :ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``,
+:c:type:`v4l2_buffer` to the same buffer type as was
+previously used with struct :c:type:`v4l2_format` ``type``
+and struct :c:type:`v4l2_requestbuffers` ``type``,
 and the ``index`` field. Valid index numbers range from zero to the
 number of buffers allocated with
 :ref:`VIDIOC_REQBUFS` (struct
-:ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``count``) minus
+:c:type:`v4l2_requestbuffers` ``count``) minus
 one. The ``reserved`` and ``reserved2`` fields must be set to 0. When
 using the :ref:`multi-planar API <planar-apis>`, the ``m.planes``
 field must contain a userspace pointer to an array of struct
-:ref:`v4l2_plane <v4l2-plane>` and the ``length`` field has to be set
+:c:type:`v4l2_plane` and the ``length`` field has to be set
 to the number of elements in that array. After calling
 :ref:`VIDIOC_QUERYBUF` with a pointer to this structure drivers return an
 error code or fill the rest of the structure.
@@ -61,11 +59,11 @@
 device memory, the ``length`` field its size. For the multi-planar API,
 fields ``m.mem_offset`` and ``length`` in the ``m.planes`` array
 elements will be used instead and the ``length`` field of struct
-:ref:`v4l2_buffer <v4l2-buffer>` is set to the number of filled-in
+:c:type:`v4l2_buffer` is set to the number of filled-in
 array elements. The driver may or may not set the remaining fields and
 flags, they are meaningless in this context.
 
-The :ref:`struct v4l2_buffer <v4l2-buffer>` structure is specified in
+The struct :c:type:`v4l2_buffer` structure is specified in
 :ref:`buffer`.
 
 
diff --git a/Documentation/media/uapi/v4l/vidioc-querycap.rst b/Documentation/media/uapi/v4l/vidioc-querycap.rst
index b10fed3..165d831 100644
--- a/Documentation/media/uapi/v4l/vidioc-querycap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querycap.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_capability *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERYCAP, struct v4l2_capability *argp )
+    :name: VIDIOC_QUERYCAP
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QUERYCAP
-
 ``argp``
 
 
@@ -36,389 +34,226 @@
 All V4L2 devices support the ``VIDIOC_QUERYCAP`` ioctl. It is used to
 identify kernel devices compatible with this specification and to obtain
 information about driver and hardware capabilities. The ioctl takes a
-pointer to a struct :ref:`v4l2_capability <v4l2-capability>` which is
+pointer to a struct :c:type:`v4l2_capability` which is
 filled by the driver. When the driver is not compatible with this
 specification the ioctl returns an ``EINVAL`` error code.
 
 
-.. _v4l2-capability:
+.. tabularcolumns:: |p{1.5cm}|p{2.5cm}|p{13cm}|
+
+.. c:type:: v4l2_capability
 
 .. flat-table:: struct v4l2_capability
     :header-rows:  0
     :stub-columns: 0
-    :widths:       1 1 2
+    :widths:       3 4 20
+
+    * - __u8
+      - ``driver``\ [16]
+      - Name of the driver, a unique NUL-terminated ASCII string. For
+	example: "bttv". Driver specific applications can use this
+	information to verify the driver identity. It is also useful to
+	work around known bugs, or to identify drivers in error reports.
+
+	Storing strings in fixed sized arrays is bad practice but
+	unavoidable here. Drivers and applications should take precautions
+	to never read or write beyond the end of the array and to make
+	sure the strings are properly NUL-terminated.
+    * - __u8
+      - ``card``\ [32]
+      - Name of the device, a NUL-terminated UTF-8 string. For example:
+	"Yoyodyne TV/FM". One driver may support different brands or
+	models of video hardware. This information is intended for users,
+	for example in a menu of available devices. Since multiple TV
+	cards of the same brand may be installed which are supported by
+	the same driver, this name should be combined with the character
+	device file name (e. g. ``/dev/video2``) or the ``bus_info``
+	string to avoid ambiguities.
+    * - __u8
+      - ``bus_info``\ [32]
+      - Location of the device in the system, a NUL-terminated ASCII
+	string. For example: "PCI:0000:05:06.0". This information is
+	intended for users, to distinguish multiple identical devices. If
+	no such information is available the field must simply count the
+	devices controlled by the driver ("platform:vivi-000"). The
+	bus_info must start with "PCI:" for PCI boards, "PCIe:" for PCI
+	Express boards, "usb-" for USB devices, "I2C:" for i2c devices,
+	"ISA:" for ISA devices, "parport" for parallel port devices and
+	"platform:" for platform devices.
+    * - __u32
+      - ``version``
+      - Version number of the driver.
+
+	Starting with kernel 3.1, the version reported is provided by the
+	V4L2 subsystem following the kernel numbering scheme. However, it
+	may not always return the same version as the kernel if, for
+	example, a stable or distribution-modified kernel uses the V4L2
+	stack from a newer kernel.
+
+	The version number is formatted using the ``KERNEL_VERSION()``
+	macro:
+    * - :cspan:`2`
+
+	``#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))``
+
+	``__u32 version = KERNEL_VERSION(0, 8, 1);``
+
+	``printf ("Version: %u.%u.%u\\n",``
+
+	``(version >> 16) & 0xFF, (version >> 8) & 0xFF, version & 0xFF);``
+    * - __u32
+      - ``capabilities``
+      - Available capabilities of the physical device as a whole, see
+	:ref:`device-capabilities`. The same physical device can export
+	multiple devices in /dev (e.g. /dev/videoX, /dev/vbiY and
+	/dev/radioZ). The ``capabilities`` field should contain a union of
+	all capabilities available around the several V4L2 devices
+	exported to userspace. For all those devices the ``capabilities``
+	field returns the same set of capabilities. This allows
+	applications to open just one of the devices (typically the video
+	device) and discover whether video, vbi and/or radio are also
+	supported.
+    * - __u32
+      - ``device_caps``
+      - Device capabilities of the opened device, see
+	:ref:`device-capabilities`. Should contain the available
+	capabilities of that specific device node. So, for example,
+	``device_caps`` of a radio device will only contain radio related
+	capabilities and no video or vbi capabilities. This field is only
+	set if the ``capabilities`` field contains the
+	``V4L2_CAP_DEVICE_CAPS`` capability. Only the ``capabilities``
+	field can have the ``V4L2_CAP_DEVICE_CAPS`` capability,
+	``device_caps`` will never set ``V4L2_CAP_DEVICE_CAPS``.
+    * - __u32
+      - ``reserved``\ [3]
+      - Reserved for future extensions. Drivers must set this array to
+	zero.
 
 
-    -  .. row 1
 
-       -  __u8
-
-       -  ``driver``\ [16]
-
-       -  Name of the driver, a unique NUL-terminated ASCII string. For
-	  example: "bttv". Driver specific applications can use this
-	  information to verify the driver identity. It is also useful to
-	  work around known bugs, or to identify drivers in error reports.
-
-	  Storing strings in fixed sized arrays is bad practice but
-	  unavoidable here. Drivers and applications should take precautions
-	  to never read or write beyond the end of the array and to make
-	  sure the strings are properly NUL-terminated.
-
-    -  .. row 2
-
-       -  __u8
-
-       -  ``card``\ [32]
-
-       -  Name of the device, a NUL-terminated UTF-8 string. For example:
-	  "Yoyodyne TV/FM". One driver may support different brands or
-	  models of video hardware. This information is intended for users,
-	  for example in a menu of available devices. Since multiple TV
-	  cards of the same brand may be installed which are supported by
-	  the same driver, this name should be combined with the character
-	  device file name (e. g. ``/dev/video2``) or the ``bus_info``
-	  string to avoid ambiguities.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``bus_info``\ [32]
-
-       -  Location of the device in the system, a NUL-terminated ASCII
-	  string. For example: "PCI:0000:05:06.0". This information is
-	  intended for users, to distinguish multiple identical devices. If
-	  no such information is available the field must simply count the
-	  devices controlled by the driver ("platform:vivi-000"). The
-	  bus_info must start with "PCI:" for PCI boards, "PCIe:" for PCI
-	  Express boards, "usb-" for USB devices, "I2C:" for i2c devices,
-	  "ISA:" for ISA devices, "parport" for parallel port devices and
-	  "platform:" for platform devices.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``version``
-
-       -  Version number of the driver.
-
-	  Starting with kernel 3.1, the version reported is provided by the
-	  V4L2 subsystem following the kernel numbering scheme. However, it
-	  may not always return the same version as the kernel if, for
-	  example, a stable or distribution-modified kernel uses the V4L2
-	  stack from a newer kernel.
-
-	  The version number is formatted using the ``KERNEL_VERSION()``
-	  macro:
-
-    -  .. row 5
-
-       -  :cspan:`2`
-
-
-	  .. code-block:: c
-
-	      #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-
-	      __u32 version = KERNEL_VERSION(0, 8, 1);
-
-	      printf ("Version: %u.%u.%u\\n",
-		  (version >> 16) & 0xFF,
-		  (version >> 8) & 0xFF,
-		   version & 0xFF);
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``capabilities``
-
-       -  Available capabilities of the physical device as a whole, see
-	  :ref:`device-capabilities`. The same physical device can export
-	  multiple devices in /dev (e.g. /dev/videoX, /dev/vbiY and
-	  /dev/radioZ). The ``capabilities`` field should contain a union of
-	  all capabilities available around the several V4L2 devices
-	  exported to userspace. For all those devices the ``capabilities``
-	  field returns the same set of capabilities. This allows
-	  applications to open just one of the devices (typically the video
-	  device) and discover whether video, vbi and/or radio are also
-	  supported.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``device_caps``
-
-       -  Device capabilities of the opened device, see
-	  :ref:`device-capabilities`. Should contain the available
-	  capabilities of that specific device node. So, for example,
-	  ``device_caps`` of a radio device will only contain radio related
-	  capabilities and no video or vbi capabilities. This field is only
-	  set if the ``capabilities`` field contains the
-	  ``V4L2_CAP_DEVICE_CAPS`` capability. Only the ``capabilities``
-	  field can have the ``V4L2_CAP_DEVICE_CAPS`` capability,
-	  ``device_caps`` will never set ``V4L2_CAP_DEVICE_CAPS``.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [3]
-
-       -  Reserved for future extensions. Drivers must set this array to
-	  zero.
-
-
+.. tabularcolumns:: |p{6cm}|p{2.2cm}|p{8.8cm}|
 
 .. _device-capabilities:
 
+.. cssclass:: longtable
+
 .. flat-table:: Device Capabilities Flags
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  ``V4L2_CAP_VIDEO_CAPTURE``
-
-       -  0x00000001
-
-       -  The device supports the single-planar API through the
-	  :ref:`Video Capture <capture>` interface.
-
-    -  .. row 2
-
-       -  ``V4L2_CAP_VIDEO_CAPTURE_MPLANE``
-
-       -  0x00001000
-
-       -  The device supports the :ref:`multi-planar API <planar-apis>`
-	  through the :ref:`Video Capture <capture>` interface.
-
-    -  .. row 3
-
-       -  ``V4L2_CAP_VIDEO_OUTPUT``
-
-       -  0x00000002
-
-       -  The device supports the single-planar API through the
-	  :ref:`Video Output <output>` interface.
-
-    -  .. row 4
-
-       -  ``V4L2_CAP_VIDEO_OUTPUT_MPLANE``
-
-       -  0x00002000
-
-       -  The device supports the :ref:`multi-planar API <planar-apis>`
-	  through the :ref:`Video Output <output>` interface.
-
-    -  .. row 5
-
-       -  ``V4L2_CAP_VIDEO_M2M``
-
-       -  0x00004000
-
-       -  The device supports the single-planar API through the Video
-	  Memory-To-Memory interface.
-
-    -  .. row 6
-
-       -  ``V4L2_CAP_VIDEO_M2M_MPLANE``
-
-       -  0x00008000
-
-       -  The device supports the :ref:`multi-planar API <planar-apis>`
-	  through the Video Memory-To-Memory interface.
-
-    -  .. row 7
-
-       -  ``V4L2_CAP_VIDEO_OVERLAY``
-
-       -  0x00000004
-
-       -  The device supports the :ref:`Video Overlay <overlay>`
-	  interface. A video overlay device typically stores captured images
-	  directly in the video memory of a graphics card, with hardware
-	  clipping and scaling.
-
-    -  .. row 8
-
-       -  ``V4L2_CAP_VBI_CAPTURE``
-
-       -  0x00000010
-
-       -  The device supports the :ref:`Raw VBI Capture <raw-vbi>`
-	  interface, providing Teletext and Closed Caption data.
-
-    -  .. row 9
-
-       -  ``V4L2_CAP_VBI_OUTPUT``
-
-       -  0x00000020
-
-       -  The device supports the :ref:`Raw VBI Output <raw-vbi>`
-	  interface.
-
-    -  .. row 10
-
-       -  ``V4L2_CAP_SLICED_VBI_CAPTURE``
-
-       -  0x00000040
-
-       -  The device supports the :ref:`Sliced VBI Capture <sliced>`
-	  interface.
-
-    -  .. row 11
-
-       -  ``V4L2_CAP_SLICED_VBI_OUTPUT``
-
-       -  0x00000080
-
-       -  The device supports the :ref:`Sliced VBI Output <sliced>`
-	  interface.
-
-    -  .. row 12
-
-       -  ``V4L2_CAP_RDS_CAPTURE``
-
-       -  0x00000100
-
-       -  The device supports the :ref:`RDS <rds>` capture interface.
-
-    -  .. row 13
-
-       -  ``V4L2_CAP_VIDEO_OUTPUT_OVERLAY``
-
-       -  0x00000200
-
-       -  The device supports the :ref:`Video Output Overlay <osd>` (OSD)
-	  interface. Unlike the *Video Overlay* interface, this is a
-	  secondary function of video output devices and overlays an image
-	  onto an outgoing video signal. When the driver sets this flag, it
-	  must clear the ``V4L2_CAP_VIDEO_OVERLAY`` flag and vice
-	  versa. [#f1]_
-
-    -  .. row 14
-
-       -  ``V4L2_CAP_HW_FREQ_SEEK``
-
-       -  0x00000400
-
-       -  The device supports the
-	  :ref:`VIDIOC_S_HW_FREQ_SEEK` ioctl
-	  for hardware frequency seeking.
-
-    -  .. row 15
-
-       -  ``V4L2_CAP_RDS_OUTPUT``
-
-       -  0x00000800
-
-       -  The device supports the :ref:`RDS <rds>` output interface.
-
-    -  .. row 16
-
-       -  ``V4L2_CAP_TUNER``
-
-       -  0x00010000
-
-       -  The device has some sort of tuner to receive RF-modulated video
-	  signals. For more information about tuner programming see
-	  :ref:`tuner`.
-
-    -  .. row 17
-
-       -  ``V4L2_CAP_AUDIO``
-
-       -  0x00020000
-
-       -  The device has audio inputs or outputs. It may or may not support
-	  audio recording or playback, in PCM or compressed formats. PCM
-	  audio support must be implemented as ALSA or OSS interface. For
-	  more information on audio inputs and outputs see :ref:`audio`.
-
-    -  .. row 18
-
-       -  ``V4L2_CAP_RADIO``
-
-       -  0x00040000
-
-       -  This is a radio receiver.
-
-    -  .. row 19
-
-       -  ``V4L2_CAP_MODULATOR``
-
-       -  0x00080000
-
-       -  The device has some sort of modulator to emit RF-modulated
-	  video/audio signals. For more information about modulator
-	  programming see :ref:`tuner`.
-
-    -  .. row 20
-
-       -  ``V4L2_CAP_SDR_CAPTURE``
-
-       -  0x00100000
-
-       -  The device supports the :ref:`SDR Capture <sdr>` interface.
-
-    -  .. row 21
-
-       -  ``V4L2_CAP_EXT_PIX_FORMAT``
-
-       -  0x00200000
-
-       -  The device supports the struct
-	  :ref:`v4l2_pix_format <v4l2-pix-format>` extended fields.
-
-    -  .. row 22
-
-       -  ``V4L2_CAP_SDR_OUTPUT``
-
-       -  0x00400000
-
-       -  The device supports the :ref:`SDR Output <sdr>` interface.
-
-    -  .. row 23
-
-       -  ``V4L2_CAP_READWRITE``
-
-       -  0x01000000
-
-       -  The device supports the :ref:`read() <rw>` and/or
-	  :ref:`write() <rw>` I/O methods.
-
-    -  .. row 24
-
-       -  ``V4L2_CAP_ASYNCIO``
-
-       -  0x02000000
-
-       -  The device supports the :ref:`asynchronous <async>` I/O methods.
-
-    -  .. row 25
-
-       -  ``V4L2_CAP_STREAMING``
-
-       -  0x04000000
-
-       -  The device supports the :ref:`streaming <mmap>` I/O method.
-
-    -  .. row 26
-
-       -  ``V4L2_CAP_DEVICE_CAPS``
-
-       -  0x80000000
-
-       -  The driver fills the ``device_caps`` field. This capability can
-	  only appear in the ``capabilities`` field and never in the
-	  ``device_caps`` field.
+    * - ``V4L2_CAP_VIDEO_CAPTURE``
+      - 0x00000001
+      - The device supports the single-planar API through the
+	:ref:`Video Capture <capture>` interface.
+    * - ``V4L2_CAP_VIDEO_CAPTURE_MPLANE``
+      - 0x00001000
+      - The device supports the :ref:`multi-planar API <planar-apis>`
+	through the :ref:`Video Capture <capture>` interface.
+    * - ``V4L2_CAP_VIDEO_OUTPUT``
+      - 0x00000002
+      - The device supports the single-planar API through the
+	:ref:`Video Output <output>` interface.
+    * - ``V4L2_CAP_VIDEO_OUTPUT_MPLANE``
+      - 0x00002000
+      - The device supports the :ref:`multi-planar API <planar-apis>`
+	through the :ref:`Video Output <output>` interface.
+    * - ``V4L2_CAP_VIDEO_M2M``
+      - 0x00004000
+      - The device supports the single-planar API through the Video
+	Memory-To-Memory interface.
+    * - ``V4L2_CAP_VIDEO_M2M_MPLANE``
+      - 0x00008000
+      - The device supports the :ref:`multi-planar API <planar-apis>`
+	through the Video Memory-To-Memory interface.
+    * - ``V4L2_CAP_VIDEO_OVERLAY``
+      - 0x00000004
+      - The device supports the :ref:`Video Overlay <overlay>`
+	interface. A video overlay device typically stores captured images
+	directly in the video memory of a graphics card, with hardware
+	clipping and scaling.
+    * - ``V4L2_CAP_VBI_CAPTURE``
+      - 0x00000010
+      - The device supports the :ref:`Raw VBI Capture <raw-vbi>`
+	interface, providing Teletext and Closed Caption data.
+    * - ``V4L2_CAP_VBI_OUTPUT``
+      - 0x00000020
+      - The device supports the :ref:`Raw VBI Output <raw-vbi>`
+	interface.
+    * - ``V4L2_CAP_SLICED_VBI_CAPTURE``
+      - 0x00000040
+      - The device supports the :ref:`Sliced VBI Capture <sliced>`
+	interface.
+    * - ``V4L2_CAP_SLICED_VBI_OUTPUT``
+      - 0x00000080
+      - The device supports the :ref:`Sliced VBI Output <sliced>`
+	interface.
+    * - ``V4L2_CAP_RDS_CAPTURE``
+      - 0x00000100
+      - The device supports the :ref:`RDS <rds>` capture interface.
+    * - ``V4L2_CAP_VIDEO_OUTPUT_OVERLAY``
+      - 0x00000200
+      - The device supports the :ref:`Video Output Overlay <osd>` (OSD)
+	interface. Unlike the *Video Overlay* interface, this is a
+	secondary function of video output devices and overlays an image
+	onto an outgoing video signal. When the driver sets this flag, it
+	must clear the ``V4L2_CAP_VIDEO_OVERLAY`` flag and vice
+	versa. [#f1]_
+    * - ``V4L2_CAP_HW_FREQ_SEEK``
+      - 0x00000400
+      - The device supports the
+	:ref:`VIDIOC_S_HW_FREQ_SEEK` ioctl
+	for hardware frequency seeking.
+    * - ``V4L2_CAP_RDS_OUTPUT``
+      - 0x00000800
+      - The device supports the :ref:`RDS <rds>` output interface.
+    * - ``V4L2_CAP_TUNER``
+      - 0x00010000
+      - The device has some sort of tuner to receive RF-modulated video
+	signals. For more information about tuner programming see
+	:ref:`tuner`.
+    * - ``V4L2_CAP_AUDIO``
+      - 0x00020000
+      - The device has audio inputs or outputs. It may or may not support
+	audio recording or playback, in PCM or compressed formats. PCM
+	audio support must be implemented as ALSA or OSS interface. For
+	more information on audio inputs and outputs see :ref:`audio`.
+    * - ``V4L2_CAP_RADIO``
+      - 0x00040000
+      - This is a radio receiver.
+    * - ``V4L2_CAP_MODULATOR``
+      - 0x00080000
+      - The device has some sort of modulator to emit RF-modulated
+	video/audio signals. For more information about modulator
+	programming see :ref:`tuner`.
+    * - ``V4L2_CAP_SDR_CAPTURE``
+      - 0x00100000
+      - The device supports the :ref:`SDR Capture <sdr>` interface.
+    * - ``V4L2_CAP_EXT_PIX_FORMAT``
+      - 0x00200000
+      - The device supports the struct
+	:c:type:`v4l2_pix_format` extended fields.
+    * - ``V4L2_CAP_SDR_OUTPUT``
+      - 0x00400000
+      - The device supports the :ref:`SDR Output <sdr>` interface.
+    * - ``V4L2_CAP_READWRITE``
+      - 0x01000000
+      - The device supports the :ref:`read() <rw>` and/or
+	:ref:`write() <rw>` I/O methods.
+    * - ``V4L2_CAP_ASYNCIO``
+      - 0x02000000
+      - The device supports the :ref:`asynchronous <async>` I/O methods.
+    * - ``V4L2_CAP_STREAMING``
+      - 0x04000000
+      - The device supports the :ref:`streaming <mmap>` I/O method.
+    * - ``V4L2_CAP_TOUCH``
+      - 0x10000000
+      - This is a touch device.
+    * - ``V4L2_CAP_DEVICE_CAPS``
+      - 0x80000000
+      - The driver fills the ``device_caps`` field. This capability can
+	only appear in the ``capabilities`` field and never in the
+	``device_caps`` field.
 
 
 Return Value
@@ -429,6 +264,6 @@
 :ref:`Generic Error Codes <gen-errors>` chapter.
 
 .. [#f1]
-   The struct :ref:`v4l2_framebuffer <v4l2-framebuffer>` lacks an
-   enum :ref:`v4l2_buf_type <v4l2-buf-type>` field, therefore the
+   The struct :c:type:`v4l2_framebuffer` lacks an
+   enum :c:type:`v4l2_buf_type` field, therefore the
    type of overlay is implied by the driver capabilities.
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index 8d6e61a..82769de 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -15,11 +15,14 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_queryctrl *argp )
+.. c:function:: int ioctl( int fd, int VIDIOC_QUERYCTRL, struct v4l2_queryctrl *argp )
+    :name: VIDIOC_QUERYCTRL
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_query_ext_ctrl *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERY_EXT_CTRL, struct v4l2_query_ext_ctrl *argp )
+    :name: VIDIOC_QUERY_EXT_CTRL
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_querymenu *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERYMENU, struct v4l2_querymenu *argp )
+    :name: VIDIOC_QUERYMENU
 
 
 Arguments
@@ -28,9 +31,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QUERYCTRL, VIDIOC_QUERY_EXT_CTRL, VIDIOC_QUERYMENU
-
 ``argp``
 
 
@@ -84,7 +84,9 @@
 :ref:`v4l2_queryctrl <v4l2-queryctrl>` ``minimum`` to ``maximum``,
 inclusive.
 
-.. note:: It is possible for ``VIDIOC_QUERYMENU`` to return
+.. note::
+
+   It is possible for ``VIDIOC_QUERYMENU`` to return
    an ``EINVAL`` error code for some indices between ``minimum`` and
    ``maximum``. In that case that particular menu item is not supported by
    this driver. Also note that the ``minimum`` value is not necessarily 0.
@@ -92,284 +94,188 @@
 See also the examples in :ref:`control`.
 
 
+.. tabularcolumns:: |p{1.2cm}|p{3.6cm}|p{12.7cm}|
+
 .. _v4l2-queryctrl:
 
+.. cssclass:: longtable
+
 .. flat-table:: struct v4l2_queryctrl
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``id``
+      - Identifies the control, set by the application. See
+	:ref:`control-id` for predefined IDs. When the ID is ORed with
+	V4L2_CTRL_FLAG_NEXT_CTRL the driver clears the flag and
+	returns the first control with a higher ID. Drivers which do not
+	support this flag yet always return an ``EINVAL`` error code.
+    * - __u32
+      - ``type``
+      - Type of control, see :c:type:`v4l2_ctrl_type`.
+    * - __u8
+      - ``name``\ [32]
+      - Name of the control, a NUL-terminated ASCII string. This
+	information is intended for the user.
+    * - __s32
+      - ``minimum``
+      - Minimum value, inclusive. This field gives a lower bound for the
+	control. See enum :c:type:`v4l2_ctrl_type` how
+	the minimum value is to be used for each possible control type.
+	Note that this a signed 32-bit value.
+    * - __s32
+      - ``maximum``
+      - Maximum value, inclusive. This field gives an upper bound for the
+	control. See enum :c:type:`v4l2_ctrl_type` how
+	the maximum value is to be used for each possible control type.
+	Note that this a signed 32-bit value.
+    * - __s32
+      - ``step``
+      - This field gives a step size for the control. See enum
+	:c:type:`v4l2_ctrl_type` how the step value is
+	to be used for each possible control type. Note that this an
+	unsigned 32-bit value.
 
-    -  .. row 1
+	Generally drivers should not scale hardware control values. It may
+	be necessary for example when the ``name`` or ``id`` imply a
+	particular unit and the hardware actually accepts only multiples
+	of said unit. If so, drivers must take care values are properly
+	rounded when scaling, such that errors will not accumulate on
+	repeated read-write cycles.
 
-       -  __u32
+	This field gives the smallest change of an integer control
+	actually affecting hardware. Often the information is needed when
+	the user can change controls by keyboard or GUI buttons, rather
+	than a slider. When for example a hardware register accepts values
+	0-511 and the driver reports 0-65535, step should be 128.
 
-       -  ``id``
+	Note that although signed, the step value is supposed to be always
+	positive.
+    * - __s32
+      - ``default_value``
+      - The default value of a ``V4L2_CTRL_TYPE_INTEGER``, ``_BOOLEAN``,
+	``_BITMASK``, ``_MENU`` or ``_INTEGER_MENU`` control. Not valid
+	for other types of controls.
 
-       -  Identifies the control, set by the application. See
-	  :ref:`control-id` for predefined IDs. When the ID is ORed with
-	  V4L2_CTRL_FLAG_NEXT_CTRL the driver clears the flag and
-	  returns the first control with a higher ID. Drivers which do not
-	  support this flag yet always return an ``EINVAL`` error code.
+	.. note::
 
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of control, see :ref:`v4l2-ctrl-type`.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the control, a NUL-terminated ASCII string. This
-	  information is intended for the user.
-
-    -  .. row 4
-
-       -  __s32
-
-       -  ``minimum``
-
-       -  Minimum value, inclusive. This field gives a lower bound for the
-	  control. See enum :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how
-	  the minimum value is to be used for each possible control type.
-	  Note that this a signed 32-bit value.
-
-    -  .. row 5
-
-       -  __s32
-
-       -  ``maximum``
-
-       -  Maximum value, inclusive. This field gives an upper bound for the
-	  control. See enum :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how
-	  the maximum value is to be used for each possible control type.
-	  Note that this a signed 32-bit value.
-
-    -  .. row 6
-
-       -  __s32
-
-       -  ``step``
-
-       -  This field gives a step size for the control. See enum
-	  :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how the step value is
-	  to be used for each possible control type. Note that this an
-	  unsigned 32-bit value.
-
-	  Generally drivers should not scale hardware control values. It may
-	  be necessary for example when the ``name`` or ``id`` imply a
-	  particular unit and the hardware actually accepts only multiples
-	  of said unit. If so, drivers must take care values are properly
-	  rounded when scaling, such that errors will not accumulate on
-	  repeated read-write cycles.
-
-	  This field gives the smallest change of an integer control
-	  actually affecting hardware. Often the information is needed when
-	  the user can change controls by keyboard or GUI buttons, rather
-	  than a slider. When for example a hardware register accepts values
-	  0-511 and the driver reports 0-65535, step should be 128.
-
-	  Note that although signed, the step value is supposed to be always
-	  positive.
-
-    -  .. row 7
-
-       -  __s32
-
-       -  ``default_value``
-
-       -  The default value of a ``V4L2_CTRL_TYPE_INTEGER``, ``_BOOLEAN``,
-	  ``_BITMASK``, ``_MENU`` or ``_INTEGER_MENU`` control. Not valid
-	  for other types of controls.
-
-	  .. note:: Drivers reset controls to their default value only when
-	     the driver is first loaded, never afterwards.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Control flags, see :ref:`control-flags`.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+	   Drivers reset controls to their default value only when
+	   the driver is first loaded, never afterwards.
+    * - __u32
+      - ``flags``
+      - Control flags, see :ref:`control-flags`.
+    * - __u32
+      - ``reserved``\ [2]
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
+.. tabularcolumns:: |p{1.2cm}|p{5.0cm}|p{11.3cm}|
+
 .. _v4l2-query-ext-ctrl:
 
+.. cssclass:: longtable
+
 .. flat-table:: struct v4l2_query_ext_ctrl
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``id``
+      - Identifies the control, set by the application. See
+	:ref:`control-id` for predefined IDs. When the ID is ORed with
+	``V4L2_CTRL_FLAG_NEXT_CTRL`` the driver clears the flag and
+	returns the first non-compound control with a higher ID. When the
+	ID is ORed with ``V4L2_CTRL_FLAG_NEXT_COMPOUND`` the driver clears
+	the flag and returns the first compound control with a higher ID.
+	Set both to get the first control (compound or not) with a higher
+	ID.
+    * - __u32
+      - ``type``
+      - Type of control, see :c:type:`v4l2_ctrl_type`.
+    * - char
+      - ``name``\ [32]
+      - Name of the control, a NUL-terminated ASCII string. This
+	information is intended for the user.
+    * - __s64
+      - ``minimum``
+      - Minimum value, inclusive. This field gives a lower bound for the
+	control. See enum :c:type:`v4l2_ctrl_type` how
+	the minimum value is to be used for each possible control type.
+	Note that this a signed 64-bit value.
+    * - __s64
+      - ``maximum``
+      - Maximum value, inclusive. This field gives an upper bound for the
+	control. See enum :c:type:`v4l2_ctrl_type` how
+	the maximum value is to be used for each possible control type.
+	Note that this a signed 64-bit value.
+    * - __u64
+      - ``step``
+      - This field gives a step size for the control. See enum
+	:c:type:`v4l2_ctrl_type` how the step value is
+	to be used for each possible control type. Note that this an
+	unsigned 64-bit value.
 
-    -  .. row 1
+	Generally drivers should not scale hardware control values. It may
+	be necessary for example when the ``name`` or ``id`` imply a
+	particular unit and the hardware actually accepts only multiples
+	of said unit. If so, drivers must take care values are properly
+	rounded when scaling, such that errors will not accumulate on
+	repeated read-write cycles.
 
-       -  __u32
+	This field gives the smallest change of an integer control
+	actually affecting hardware. Often the information is needed when
+	the user can change controls by keyboard or GUI buttons, rather
+	than a slider. When for example a hardware register accepts values
+	0-511 and the driver reports 0-65535, step should be 128.
+    * - __s64
+      - ``default_value``
+      - The default value of a ``V4L2_CTRL_TYPE_INTEGER``, ``_INTEGER64``,
+	``_BOOLEAN``, ``_BITMASK``, ``_MENU``, ``_INTEGER_MENU``, ``_U8``
+	or ``_U16`` control. Not valid for other types of controls.
 
-       -  ``id``
+	.. note::
 
-       -  Identifies the control, set by the application. See
-	  :ref:`control-id` for predefined IDs. When the ID is ORed with
-	  ``V4L2_CTRL_FLAG_NEXT_CTRL`` the driver clears the flag and
-	  returns the first non-compound control with a higher ID. When the
-	  ID is ORed with ``V4L2_CTRL_FLAG_NEXT_COMPOUND`` the driver clears
-	  the flag and returns the first compound control with a higher ID.
-	  Set both to get the first control (compound or not) with a higher
-	  ID.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of control, see :ref:`v4l2-ctrl-type`.
-
-    -  .. row 3
-
-       -  char
-
-       -  ``name``\ [32]
-
-       -  Name of the control, a NUL-terminated ASCII string. This
-	  information is intended for the user.
-
-    -  .. row 4
-
-       -  __s64
-
-       -  ``minimum``
-
-       -  Minimum value, inclusive. This field gives a lower bound for the
-	  control. See enum :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how
-	  the minimum value is to be used for each possible control type.
-	  Note that this a signed 64-bit value.
-
-    -  .. row 5
-
-       -  __s64
-
-       -  ``maximum``
-
-       -  Maximum value, inclusive. This field gives an upper bound for the
-	  control. See enum :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how
-	  the maximum value is to be used for each possible control type.
-	  Note that this a signed 64-bit value.
-
-    -  .. row 6
-
-       -  __u64
-
-       -  ``step``
-
-       -  This field gives a step size for the control. See enum
-	  :ref:`v4l2_ctrl_type <v4l2-ctrl-type>` how the step value is
-	  to be used for each possible control type. Note that this an
-	  unsigned 64-bit value.
-
-	  Generally drivers should not scale hardware control values. It may
-	  be necessary for example when the ``name`` or ``id`` imply a
-	  particular unit and the hardware actually accepts only multiples
-	  of said unit. If so, drivers must take care values are properly
-	  rounded when scaling, such that errors will not accumulate on
-	  repeated read-write cycles.
-
-	  This field gives the smallest change of an integer control
-	  actually affecting hardware. Often the information is needed when
-	  the user can change controls by keyboard or GUI buttons, rather
-	  than a slider. When for example a hardware register accepts values
-	  0-511 and the driver reports 0-65535, step should be 128.
-
-    -  .. row 7
-
-       -  __s64
-
-       -  ``default_value``
-
-       -  The default value of a ``V4L2_CTRL_TYPE_INTEGER``, ``_INTEGER64``,
-	  ``_BOOLEAN``, ``_BITMASK``, ``_MENU``, ``_INTEGER_MENU``, ``_U8``
-	  or ``_U16`` control. Not valid for other types of controls.
-
-	  .. note:: Drivers reset controls to their default value only when
-	     the driver is first loaded, never afterwards.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Control flags, see :ref:`control-flags`.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``elem_size``
-
-       -  The size in bytes of a single element of the array. Given a char
-	  pointer ``p`` to a 3-dimensional array you can find the position
-	  of cell ``(z, y, x)`` as follows:
-	  ``p + ((z * dims[1] + y) * dims[0] + x) * elem_size``.
-	  ``elem_size`` is always valid, also when the control isn't an
-	  array. For string controls ``elem_size`` is equal to
-	  ``maximum + 1``.
-
-    -  .. row 10
-
-       -  __u32
-
-       -  ``elems``
-
-       -  The number of elements in the N-dimensional array. If this control
-	  is not an array, then ``elems`` is 1. The ``elems`` field can
-	  never be 0.
-
-    -  .. row 11
-
-       -  __u32
-
-       -  ``nr_of_dims``
-
-       -  The number of dimension in the N-dimensional array. If this
-	  control is not an array, then this field is 0.
-
-    -  .. row 12
-
-       -  __u32
-
-       -  ``dims[V4L2_CTRL_MAX_DIMS]``
-
-       -  The size of each dimension. The first ``nr_of_dims`` elements of
-	  this array must be non-zero, all remaining elements must be zero.
-
-    -  .. row 13
-
-       -  __u32
-
-       -  ``reserved``\ [32]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+	   Drivers reset controls to their default value only when
+	   the driver is first loaded, never afterwards.
+    * - __u32
+      - ``flags``
+      - Control flags, see :ref:`control-flags`.
+    * - __u32
+      - ``elem_size``
+      - The size in bytes of a single element of the array. Given a char
+	pointer ``p`` to a 3-dimensional array you can find the position
+	of cell ``(z, y, x)`` as follows:
+	``p + ((z * dims[1] + y) * dims[0] + x) * elem_size``.
+	``elem_size`` is always valid, also when the control isn't an
+	array. For string controls ``elem_size`` is equal to
+	``maximum + 1``.
+    * - __u32
+      - ``elems``
+      - The number of elements in the N-dimensional array. If this control
+	is not an array, then ``elems`` is 1. The ``elems`` field can
+	never be 0.
+    * - __u32
+      - ``nr_of_dims``
+      - The number of dimension in the N-dimensional array. If this
+	control is not an array, then this field is 0.
+    * - __u32
+      - ``dims[V4L2_CTRL_MAX_DIMS]``
+      - The size of each dimension. The first ``nr_of_dims`` elements of
+	this array must be non-zero, all remaining elements must be zero.
+    * - __u32
+      - ``reserved``\ [32]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{1.2cm}|p{0.6cm}|p{1.6cm}|p{13.5cm}|
+
 .. _v4l2-querymenu:
 
 .. flat-table:: struct v4l2_querymenu
@@ -377,386 +283,230 @@
     :stub-columns: 0
     :widths:       1 1 2 1
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -
-       -  ``id``
-
-       -  Identifies the control, set by the application from the respective
-	  struct :ref:`v4l2_queryctrl <v4l2-queryctrl>` ``id``.
-
-    -  .. row 2
-
-       -  __u32
-
-       -
-       -  ``index``
-
-       -  Index of the menu item, starting at zero, set by the application.
-
-    -  .. row 3
-
-       -  union
-
-       -
-       -
-       -
-
-    -  .. row 4
-
-       -
-       -  __u8
-
-       -  ``name``\ [32]
-
-       -  Name of the menu item, a NUL-terminated ASCII string. This
-	  information is intended for the user. This field is valid for
-	  ``V4L2_CTRL_FLAG_MENU`` type controls.
-
-    -  .. row 5
-
-       -
-       -  __s64
-
-       -  ``value``
-
-       -  Value of the integer menu item. This field is valid for
-	  ``V4L2_CTRL_FLAG_INTEGER_MENU`` type controls.
-
-    -  .. row 6
-
-       -  __u32
-
-       -
-       -  ``reserved``
-
-       -  Reserved for future extensions. Drivers must set the array to
-	  zero.
+    * - __u32
+      -
+      - ``id``
+      - Identifies the control, set by the application from the respective
+	struct :ref:`v4l2_queryctrl <v4l2-queryctrl>` ``id``.
+    * - __u32
+      -
+      - ``index``
+      - Index of the menu item, starting at zero, set by the application.
+    * - union
+      -
+      -
+      -
+    * -
+      - __u8
+      - ``name``\ [32]
+      - Name of the menu item, a NUL-terminated ASCII string. This
+	information is intended for the user. This field is valid for
+	``V4L2_CTRL_FLAG_MENU`` type controls.
+    * -
+      - __s64
+      - ``value``
+      - Value of the integer menu item. This field is valid for
+	``V4L2_CTRL_FLAG_INTEGER_MENU`` type controls.
+    * - __u32
+      -
+      - ``reserved``
+      - Reserved for future extensions. Drivers must set the array to
+	zero.
 
 
 
-.. _v4l2-ctrl-type:
+.. tabularcolumns:: |p{5.8cm}|p{1.4cm}|p{1.0cm}|p{1.4cm}|p{6.9cm}|
+
+.. c:type:: v4l2_ctrl_type
+
+.. cssclass:: longtable
 
 .. flat-table:: enum v4l2_ctrl_type
     :header-rows:  1
     :stub-columns: 0
     :widths:       30 5 5 5 55
 
+    * - Type
+      - ``minimum``
+      - ``step``
+      - ``maximum``
+      - Description
+    * - ``V4L2_CTRL_TYPE_INTEGER``
+      - any
+      - any
+      - any
+      - An integer-valued control ranging from minimum to maximum
+	inclusive. The step value indicates the increment between values.
+    * - ``V4L2_CTRL_TYPE_BOOLEAN``
+      - 0
+      - 1
+      - 1
+      - A boolean-valued control. Zero corresponds to "disabled", and one
+	means "enabled".
+    * - ``V4L2_CTRL_TYPE_MENU``
+      - ≥ 0
+      - 1
+      - N-1
+      - The control has a menu of N choices. The names of the menu items
+	can be enumerated with the ``VIDIOC_QUERYMENU`` ioctl.
+    * - ``V4L2_CTRL_TYPE_INTEGER_MENU``
+      - ≥ 0
+      - 1
+      - N-1
+      - The control has a menu of N choices. The values of the menu items
+	can be enumerated with the ``VIDIOC_QUERYMENU`` ioctl. This is
+	similar to ``V4L2_CTRL_TYPE_MENU`` except that instead of strings,
+	the menu items are signed 64-bit integers.
+    * - ``V4L2_CTRL_TYPE_BITMASK``
+      - 0
+      - n/a
+      - any
+      - A bitmask field. The maximum value is the set of bits that can be
+	used, all other bits are to be 0. The maximum value is interpreted
+	as a __u32, allowing the use of bit 31 in the bitmask.
+    * - ``V4L2_CTRL_TYPE_BUTTON``
+      - 0
+      - 0
+      - 0
+      - A control which performs an action when set. Drivers must ignore
+	the value passed with ``VIDIOC_S_CTRL`` and return an ``EINVAL`` error
+	code on a ``VIDIOC_G_CTRL`` attempt.
+    * - ``V4L2_CTRL_TYPE_INTEGER64``
+      - any
+      - any
+      - any
+      - A 64-bit integer valued control. Minimum, maximum and step size
+	cannot be queried using ``VIDIOC_QUERYCTRL``. Only
+	``VIDIOC_QUERY_EXT_CTRL`` can retrieve the 64-bit min/max/step
+	values, they should be interpreted as n/a when using
+	``VIDIOC_QUERYCTRL``.
+    * - ``V4L2_CTRL_TYPE_STRING``
+      - ≥ 0
+      - ≥ 1
+      - ≥ 0
+      - The minimum and maximum string lengths. The step size means that
+	the string must be (minimum + N * step) characters long for N ≥ 0.
+	These lengths do not include the terminating zero, so in order to
+	pass a string of length 8 to
+	:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` you need to
+	set the ``size`` field of struct
+	:c:type:`v4l2_ext_control` to 9. For
+	:ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` you can set
+	the ``size`` field to ``maximum`` + 1. Which character encoding is
+	used will depend on the string control itself and should be part
+	of the control documentation.
+    * - ``V4L2_CTRL_TYPE_CTRL_CLASS``
+      - n/a
+      - n/a
+      - n/a
+      - This is not a control. When ``VIDIOC_QUERYCTRL`` is called with a
+	control ID equal to a control class code (see :ref:`ctrl-class`)
+	+ 1, the ioctl returns the name of the control class and this
+	control type. Older drivers which do not support this feature
+	return an ``EINVAL`` error code.
+    * - ``V4L2_CTRL_TYPE_U8``
+      - any
+      - any
+      - any
+      - An unsigned 8-bit valued control ranging from minimum to maximum
+	inclusive. The step value indicates the increment between values.
+    * - ``V4L2_CTRL_TYPE_U16``
+      - any
+      - any
+      - any
+      - An unsigned 16-bit valued control ranging from minimum to maximum
+	inclusive. The step value indicates the increment between values.
+    * - ``V4L2_CTRL_TYPE_U32``
+      - any
+      - any
+      - any
+      - An unsigned 32-bit valued control ranging from minimum to maximum
+	inclusive. The step value indicates the increment between values.
 
-    -  .. row 1
 
-       -  Type
 
-       -  ``minimum``
-
-       -  ``step``
-
-       -  ``maximum``
-
-       -  Description
-
-    -  .. row 2
-
-       -  ``V4L2_CTRL_TYPE_INTEGER``
-
-       -  any
-
-       -  any
-
-       -  any
-
-       -  An integer-valued control ranging from minimum to maximum
-	  inclusive. The step value indicates the increment between values.
-
-    -  .. row 3
-
-       -  ``V4L2_CTRL_TYPE_BOOLEAN``
-
-       -  0
-
-       -  1
-
-       -  1
-
-       -  A boolean-valued control. Zero corresponds to "disabled", and one
-	  means "enabled".
-
-    -  .. row 4
-
-       -  ``V4L2_CTRL_TYPE_MENU``
-
-       -  ≥ 0
-
-       -  1
-
-       -  N-1
-
-       -  The control has a menu of N choices. The names of the menu items
-	  can be enumerated with the ``VIDIOC_QUERYMENU`` ioctl.
-
-    -  .. row 5
-
-       -  ``V4L2_CTRL_TYPE_INTEGER_MENU``
-
-       -  ≥ 0
-
-       -  1
-
-       -  N-1
-
-       -  The control has a menu of N choices. The values of the menu items
-	  can be enumerated with the ``VIDIOC_QUERYMENU`` ioctl. This is
-	  similar to ``V4L2_CTRL_TYPE_MENU`` except that instead of strings,
-	  the menu items are signed 64-bit integers.
-
-    -  .. row 6
-
-       -  ``V4L2_CTRL_TYPE_BITMASK``
-
-       -  0
-
-       -  n/a
-
-       -  any
-
-       -  A bitmask field. The maximum value is the set of bits that can be
-	  used, all other bits are to be 0. The maximum value is interpreted
-	  as a __u32, allowing the use of bit 31 in the bitmask.
-
-    -  .. row 7
-
-       -  ``V4L2_CTRL_TYPE_BUTTON``
-
-       -  0
-
-       -  0
-
-       -  0
-
-       -  A control which performs an action when set. Drivers must ignore
-	  the value passed with ``VIDIOC_S_CTRL`` and return an ``EINVAL`` error
-	  code on a ``VIDIOC_G_CTRL`` attempt.
-
-    -  .. row 8
-
-       -  ``V4L2_CTRL_TYPE_INTEGER64``
-
-       -  any
-
-       -  any
-
-       -  any
-
-       -  A 64-bit integer valued control. Minimum, maximum and step size
-	  cannot be queried using ``VIDIOC_QUERYCTRL``. Only
-	  ``VIDIOC_QUERY_EXT_CTRL`` can retrieve the 64-bit min/max/step
-	  values, they should be interpreted as n/a when using
-	  ``VIDIOC_QUERYCTRL``.
-
-    -  .. row 9
-
-       -  ``V4L2_CTRL_TYPE_STRING``
-
-       -  ≥ 0
-
-       -  ≥ 1
-
-       -  ≥ 0
-
-       -  The minimum and maximum string lengths. The step size means that
-	  the string must be (minimum + N * step) characters long for N ≥ 0.
-	  These lengths do not include the terminating zero, so in order to
-	  pass a string of length 8 to
-	  :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` you need to
-	  set the ``size`` field of struct
-	  :ref:`v4l2_ext_control <v4l2-ext-control>` to 9. For
-	  :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` you can set
-	  the ``size`` field to ``maximum`` + 1. Which character encoding is
-	  used will depend on the string control itself and should be part
-	  of the control documentation.
-
-    -  .. row 10
-
-       -  ``V4L2_CTRL_TYPE_CTRL_CLASS``
-
-       -  n/a
-
-       -  n/a
-
-       -  n/a
-
-       -  This is not a control. When ``VIDIOC_QUERYCTRL`` is called with a
-	  control ID equal to a control class code (see :ref:`ctrl-class`)
-	  + 1, the ioctl returns the name of the control class and this
-	  control type. Older drivers which do not support this feature
-	  return an ``EINVAL`` error code.
-
-    -  .. row 11
-
-       -  ``V4L2_CTRL_TYPE_U8``
-
-       -  any
-
-       -  any
-
-       -  any
-
-       -  An unsigned 8-bit valued control ranging from minimum to maximum
-	  inclusive. The step value indicates the increment between values.
-
-    -  .. row 12
-
-       -  ``V4L2_CTRL_TYPE_U16``
-
-       -  any
-
-       -  any
-
-       -  any
-
-       -  An unsigned 16-bit valued control ranging from minimum to maximum
-	  inclusive. The step value indicates the increment between values.
-
-    -  .. row 13
-
-       -  ``V4L2_CTRL_TYPE_U32``
-
-       -  any
-
-       -  any
-
-       -  any
-
-       -  An unsigned 32-bit valued control ranging from minimum to maximum
-	  inclusive. The step value indicates the increment between values.
-
-
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
 .. _control-flags:
 
+.. cssclass:: longtable
+
 .. flat-table:: Control Flags
     :header-rows:  0
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_CTRL_FLAG_DISABLED``
+      - 0x0001
+      - This control is permanently disabled and should be ignored by the
+	application. Any attempt to change the control will result in an
+	``EINVAL`` error code.
+    * - ``V4L2_CTRL_FLAG_GRABBED``
+      - 0x0002
+      - This control is temporarily unchangeable, for example because
+	another application took over control of the respective resource.
+	Such controls may be displayed specially in a user interface.
+	Attempts to change the control may result in an ``EBUSY`` error code.
+    * - ``V4L2_CTRL_FLAG_READ_ONLY``
+      - 0x0004
+      - This control is permanently readable only. Any attempt to change
+	the control will result in an ``EINVAL`` error code.
+    * - ``V4L2_CTRL_FLAG_UPDATE``
+      - 0x0008
+      - A hint that changing this control may affect the value of other
+	controls within the same control class. Applications should update
+	their user interface accordingly.
+    * - ``V4L2_CTRL_FLAG_INACTIVE``
+      - 0x0010
+      - This control is not applicable to the current configuration and
+	should be displayed accordingly in a user interface. For example
+	the flag may be set on a MPEG audio level 2 bitrate control when
+	MPEG audio encoding level 1 was selected with another control.
+    * - ``V4L2_CTRL_FLAG_SLIDER``
+      - 0x0020
+      - A hint that this control is best represented as a slider-like
+	element in a user interface.
+    * - ``V4L2_CTRL_FLAG_WRITE_ONLY``
+      - 0x0040
+      - This control is permanently writable only. Any attempt to read the
+	control will result in an ``EACCES`` error code error code. This flag
+	is typically present for relative controls or action controls
+	where writing a value will cause the device to carry out a given
+	action (e. g. motor control) but no meaningful value can be
+	returned.
+    * - ``V4L2_CTRL_FLAG_VOLATILE``
+      - 0x0080
+      - This control is volatile, which means that the value of the
+	control changes continuously. A typical example would be the
+	current gain value if the device is in auto-gain mode. In such a
+	case the hardware calculates the gain value based on the lighting
+	conditions which can change over time.
 
-    -  .. row 1
+	.. note::
 
-       -  ``V4L2_CTRL_FLAG_DISABLED``
+	   Setting a new value for a volatile control will be ignored
+	   unless
+	   :ref:`V4L2_CTRL_FLAG_EXECUTE_ON_WRITE <FLAG_EXECUTE_ON_WRITE>`
+	   is also set.
+	   Setting a new value for a volatile control will *never* trigger a
+	   :ref:`V4L2_EVENT_CTRL_CH_VALUE <ctrl-changes-flags>` event.
+    * - ``V4L2_CTRL_FLAG_HAS_PAYLOAD``
+      - 0x0100
+      - This control has a pointer type, so its value has to be accessed
+	using one of the pointer fields of struct
+	:c:type:`v4l2_ext_control`. This flag is set
+	for controls that are an array, string, or have a compound type.
+	In all cases you have to set a pointer to memory containing the
+	payload of the control.
+    * .. _FLAG_EXECUTE_ON_WRITE:
 
-       -  0x0001
-
-       -  This control is permanently disabled and should be ignored by the
-	  application. Any attempt to change the control will result in an
-	  ``EINVAL`` error code.
-
-    -  .. row 2
-
-       -  ``V4L2_CTRL_FLAG_GRABBED``
-
-       -  0x0002
-
-       -  This control is temporarily unchangeable, for example because
-	  another application took over control of the respective resource.
-	  Such controls may be displayed specially in a user interface.
-	  Attempts to change the control may result in an ``EBUSY`` error code.
-
-    -  .. row 3
-
-       -  ``V4L2_CTRL_FLAG_READ_ONLY``
-
-       -  0x0004
-
-       -  This control is permanently readable only. Any attempt to change
-	  the control will result in an ``EINVAL`` error code.
-
-    -  .. row 4
-
-       -  ``V4L2_CTRL_FLAG_UPDATE``
-
-       -  0x0008
-
-       -  A hint that changing this control may affect the value of other
-	  controls within the same control class. Applications should update
-	  their user interface accordingly.
-
-    -  .. row 5
-
-       -  ``V4L2_CTRL_FLAG_INACTIVE``
-
-       -  0x0010
-
-       -  This control is not applicable to the current configuration and
-	  should be displayed accordingly in a user interface. For example
-	  the flag may be set on a MPEG audio level 2 bitrate control when
-	  MPEG audio encoding level 1 was selected with another control.
-
-    -  .. row 6
-
-       -  ``V4L2_CTRL_FLAG_SLIDER``
-
-       -  0x0020
-
-       -  A hint that this control is best represented as a slider-like
-	  element in a user interface.
-
-    -  .. row 7
-
-       -  ``V4L2_CTRL_FLAG_WRITE_ONLY``
-
-       -  0x0040
-
-       -  This control is permanently writable only. Any attempt to read the
-	  control will result in an ``EACCES`` error code error code. This flag
-	  is typically present for relative controls or action controls
-	  where writing a value will cause the device to carry out a given
-	  action (e. g. motor control) but no meaningful value can be
-	  returned.
-
-    -  .. row 8
-
-       -  ``V4L2_CTRL_FLAG_VOLATILE``
-
-       -  0x0080
-
-       -  This control is volatile, which means that the value of the
-	  control changes continuously. A typical example would be the
-	  current gain value if the device is in auto-gain mode. In such a
-	  case the hardware calculates the gain value based on the lighting
-	  conditions which can change over time.
-
-	  .. note:: Setting a new value for a volatile control will have no
-	     effect and no ``V4L2_EVENT_CTRL_CH_VALUE`` will be sent, unless
-	     the ``V4L2_CTRL_FLAG_EXECUTE_ON_WRITE`` flag (see below) is
-	     also set. Otherwise the new value will just be ignored.
-
-    -  .. row 9
-
-       -  ``V4L2_CTRL_FLAG_HAS_PAYLOAD``
-
-       -  0x0100
-
-       -  This control has a pointer type, so its value has to be accessed
-	  using one of the pointer fields of struct
-	  :ref:`v4l2_ext_control <v4l2-ext-control>`. This flag is set
-	  for controls that are an array, string, or have a compound type.
-	  In all cases you have to set a pointer to memory containing the
-	  payload of the control.
-
-    -  .. row 10
-
-       -  ``V4L2_CTRL_FLAG_EXECUTE_ON_WRITE``
-
-       -  0x0200
-
-       -  The value provided to the control will be propagated to the driver
-	  even if it remains constant. This is required when the control
-	  represents an action on the hardware. For example: clearing an
-	  error flag or triggering the flash. All the controls of the type
-	  ``V4L2_CTRL_TYPE_BUTTON`` have this flag set.
+      - ``V4L2_CTRL_FLAG_EXECUTE_ON_WRITE``
+      - 0x0200
+      - The value provided to the control will be propagated to the driver
+	even if it remains constant. This is required when the control
+	represents an action on the hardware. For example: clearing an
+	error flag or triggering the flash. All the controls of the type
+	``V4L2_CTRL_TYPE_BUTTON`` have this flag set.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst
index b4a4e22..3ef9ab3 100644
--- a/Documentation/media/uapi/v4l/vidioc-querystd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, v4l2_std_id *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_QUERYSTD, v4l2_std_id *argp )
+    :name: VIDIOC_QUERYSTD
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_QUERYSTD
-
 ``argp``
 
 
@@ -43,7 +41,9 @@
 the set must contain all standards supported by the current video input
 or output.
 
-.. note:: Drivers shall *not* switch the video standard
+.. note::
+
+   Drivers shall *not* switch the video standard
    automatically if a new video standard is detected. Instead, drivers
    should send the ``V4L2_EVENT_SOURCE_CHANGE`` event (if they support
    this) and expect that userspace will take action by calling
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index 5d0bc6d..a4180d5 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_requestbuffers *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_REQBUFS, struct v4l2_requestbuffers *argp )
+    :name: VIDIOC_REQBUFS
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_REQBUFS
-
 ``argp``
 
 
@@ -45,7 +43,7 @@
 allocation.
 
 To allocate device buffers applications initialize all fields of the
-:ref:`struct v4l2_requestbuffers <v4l2-requestbuffers>` structure. They set the ``type``
+struct :c:type:`v4l2_requestbuffers` structure. They set the ``type``
 field to the respective stream or buffer type, the ``count`` field to
 the desired number of buffers, ``memory`` must be set to the requested
 I/O method and the ``reserved`` array must be zeroed. When the ioctl is
@@ -67,50 +65,32 @@
 :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>`.
 
 
-.. _v4l2-requestbuffers:
+.. c:type:: v4l2_requestbuffers
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_requestbuffers
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``count``
-
-       -  The number of buffers requested or granted.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the stream or buffers, this is the same as the struct
-	  :ref:`v4l2_format <v4l2-format>` ``type`` field. See
-	  :ref:`v4l2-buf-type` for valid values.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``memory``
-
-       -  Applications set this field to ``V4L2_MEMORY_MMAP``,
-	  ``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See
-	  :ref:`v4l2-memory`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [2]
-
-       -  A place holder for future extensions. Drivers and applications
-	  must set the array to zero.
+    * - __u32
+      - ``count``
+      - The number of buffers requested or granted.
+    * - __u32
+      - ``type``
+      - Type of the stream or buffers, this is the same as the struct
+	:c:type:`v4l2_format` ``type`` field. See
+	:c:type:`v4l2_buf_type` for valid values.
+    * - __u32
+      - ``memory``
+      - Applications set this field to ``V4L2_MEMORY_MMAP``,
+	``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See
+	:c:type:`v4l2_memory`.
+    * - __u32
+      - ``reserved``\ [2]
+      - A place holder for future extensions. Drivers and applications
+	must set the array to zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
index 5fd332a..5672ca4 100644
--- a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
+++ b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_hw_freq_seek *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_S_HW_FREQ_SEEK, struct v4l2_hw_freq_seek *argp )
+    :name: VIDIOC_S_HW_FREQ_SEEK
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_S_HW_FREQ_SEEK
-
 ``argp``
 
 
@@ -37,12 +35,12 @@
 applications initialize the ``tuner``, ``type``, ``seek_upward``,
 ``wrap_around``, ``spacing``, ``rangelow`` and ``rangehigh`` fields, and
 zero out the ``reserved`` array of a struct
-:ref:`v4l2_hw_freq_seek <v4l2-hw-freq-seek>` and call the
+:c:type:`v4l2_hw_freq_seek` and call the
 ``VIDIOC_S_HW_FREQ_SEEK`` ioctl with a pointer to this structure.
 
 The ``rangelow`` and ``rangehigh`` fields can be set to a non-zero value
 to tell the driver to search a specific band. If the struct
-:ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field has the
+:c:type:`v4l2_tuner` ``capability`` field has the
 ``V4L2_TUNER_CAP_HWSEEK_PROG_LIM`` flag set, these values must fall
 within one of the bands returned by
 :ref:`VIDIOC_ENUM_FREQ_BANDS`. If the
@@ -61,99 +59,61 @@
 error code is returned and no seek takes place.
 
 
-.. _v4l2-hw-freq-seek:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_hw_freq_seek
 
 .. flat-table:: struct v4l2_hw_freq_seek
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``tuner``
-
-       -  The tuner index number. This is the same value as in the struct
-	  :ref:`v4l2_input <v4l2-input>` ``tuner`` field and the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``index`` field.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``type``
-
-       -  The tuner type. This is the same value as in the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``type`` field. See
-	  :ref:`v4l2-tuner-type`
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``seek_upward``
-
-       -  If non-zero, seek upward from the current frequency, else seek
-	  downward.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``wrap_around``
-
-       -  If non-zero, wrap around when at the end of the frequency range,
-	  else stop seeking. The struct :ref:`v4l2_tuner <v4l2-tuner>`
-	  ``capability`` field will tell you what the hardware supports.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``spacing``
-
-       -  If non-zero, defines the hardware seek resolution in Hz. The
-	  driver selects the nearest value that is supported by the device.
-	  If spacing is zero a reasonable default value is used.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``rangelow``
-
-       -  If non-zero, the lowest tunable frequency of the band to search in
-	  units of 62.5 kHz, or if the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field has the
-	  ``V4L2_TUNER_CAP_LOW`` flag set, in units of 62.5 Hz or if the
-	  struct :ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field has
-	  the ``V4L2_TUNER_CAP_1HZ`` flag set, in units of 1 Hz. If
-	  ``rangelow`` is zero a reasonable default value is used.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``rangehigh``
-
-       -  If non-zero, the highest tunable frequency of the band to search
-	  in units of 62.5 kHz, or if the struct
-	  :ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field has the
-	  ``V4L2_TUNER_CAP_LOW`` flag set, in units of 62.5 Hz or if the
-	  struct :ref:`v4l2_tuner <v4l2-tuner>` ``capability`` field has
-	  the ``V4L2_TUNER_CAP_1HZ`` flag set, in units of 1 Hz. If
-	  ``rangehigh`` is zero a reasonable default value is used.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [5]
-
-       -  Reserved for future extensions. Applications must set the array to
-	  zero.
+    * - __u32
+      - ``tuner``
+      - The tuner index number. This is the same value as in the struct
+	:c:type:`v4l2_input` ``tuner`` field and the struct
+	:c:type:`v4l2_tuner` ``index`` field.
+    * - __u32
+      - ``type``
+      - The tuner type. This is the same value as in the struct
+	:c:type:`v4l2_tuner` ``type`` field. See
+	:c:type:`v4l2_tuner_type`
+    * - __u32
+      - ``seek_upward``
+      - If non-zero, seek upward from the current frequency, else seek
+	downward.
+    * - __u32
+      - ``wrap_around``
+      - If non-zero, wrap around when at the end of the frequency range,
+	else stop seeking. The struct :c:type:`v4l2_tuner`
+	``capability`` field will tell you what the hardware supports.
+    * - __u32
+      - ``spacing``
+      - If non-zero, defines the hardware seek resolution in Hz. The
+	driver selects the nearest value that is supported by the device.
+	If spacing is zero a reasonable default value is used.
+    * - __u32
+      - ``rangelow``
+      - If non-zero, the lowest tunable frequency of the band to search in
+	units of 62.5 kHz, or if the struct
+	:c:type:`v4l2_tuner` ``capability`` field has the
+	``V4L2_TUNER_CAP_LOW`` flag set, in units of 62.5 Hz or if the
+	struct :c:type:`v4l2_tuner` ``capability`` field has
+	the ``V4L2_TUNER_CAP_1HZ`` flag set, in units of 1 Hz. If
+	``rangelow`` is zero a reasonable default value is used.
+    * - __u32
+      - ``rangehigh``
+      - If non-zero, the highest tunable frequency of the band to search
+	in units of 62.5 kHz, or if the struct
+	:c:type:`v4l2_tuner` ``capability`` field has the
+	``V4L2_TUNER_CAP_LOW`` flag set, in units of 62.5 Hz or if the
+	struct :c:type:`v4l2_tuner` ``capability`` field has
+	the ``V4L2_TUNER_CAP_1HZ`` flag set, in units of 1 Hz. If
+	``rangehigh`` is zero a reasonable default value is used.
+    * - __u32
+      - ``reserved``\ [5]
+      - Reserved for future extensions. Applications must set the array to
+	zero.
 
 
 Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-streamon.rst b/Documentation/media/uapi/v4l/vidioc-streamon.rst
index bb23745e..972d5b3c 100644
--- a/Documentation/media/uapi/v4l/vidioc-streamon.rst
+++ b/Documentation/media/uapi/v4l/vidioc-streamon.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, const int *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_STREAMON, const int *argp )
+    :name: VIDIOC_STREAMON
+
+.. c:function:: int ioctl( int fd, VIDIOC_STREAMOFF, const int *argp )
+    :name: VIDIOC_STREAMOFF
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_STREAMON, VIDIOC_STREAMOFF
-
 ``argp``
 
 
@@ -68,7 +69,7 @@
 
 Both ioctls take a pointer to an integer, the desired buffer or stream
 type. This is the same as struct
-:ref:`v4l2_requestbuffers <v4l2-requestbuffers>` ``type``.
+:c:type:`v4l2_requestbuffers` ``type``.
 
 If ``VIDIOC_STREAMON`` is called when streaming is already in progress,
 or if ``VIDIOC_STREAMOFF`` is called when streaming is already stopped,
@@ -76,7 +77,9 @@
 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
 state as mentioned above.
 
-.. note:: Applications can be preempted for unknown periods right before
+.. note::
+
+   Applications can be preempted for unknown periods right before
    or after the ``VIDIOC_STREAMON`` or ``VIDIOC_STREAMOFF`` calls, there is
    no notion of starting or stopping "now". Buffer timestamps can be used
    to synchronize with other events.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
index 0aa6482..1a02c93 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_frame_interval_enum * argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL, struct v4l2_subdev_frame_interval_enum * argp )
+    :name: VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL
-
 ``argp``
 
 
@@ -45,7 +43,7 @@
 
 To enumerate frame intervals applications initialize the ``index``,
 ``pad``, ``which``, ``code``, ``width`` and ``height`` fields of struct
-:ref:`v4l2_subdev_frame_interval_enum <v4l2-subdev-frame-interval-enum>`
+:c:type:`v4l2_subdev_frame_interval_enum`
 and call the :ref:`VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL` ioctl with a pointer
 to this structure. Drivers fill the rest of the structure or return an
 EINVAL error code if one of the input fields is invalid. All frame
@@ -61,81 +59,42 @@
 implemented it on a single pad only. Its behaviour when supported on
 multiple pads of the same sub-device is not defined.
 
+.. c:type:: v4l2_subdev_frame_interval_enum
 
-.. _v4l2-subdev-frame-interval-enum:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_frame_interval_enum
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the format in the enumeration, set by the application.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``code``
-
-       -  The media bus format code, as defined in
-	  :ref:`v4l2-mbus-format`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``width``
-
-       -  Frame width, in pixels.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``height``
-
-       -  Frame height, in pixels.
-
-    -  .. row 6
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``interval``
-
-       -  Period, in seconds, between consecutive video frames.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``which``
-
-       -  Frame intervals to be enumerated, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``index``
+      - Number of the format in the enumeration, set by the application.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API.
+    * - __u32
+      - ``code``
+      - The media bus format code, as defined in
+	:ref:`v4l2-mbus-format`.
+    * - __u32
+      - ``width``
+      - Frame width, in pixels.
+    * - __u32
+      - ``height``
+      - Frame height, in pixels.
+    * - struct :c:type:`v4l2_fract`
+      - ``interval``
+      - Period, in seconds, between consecutive video frames.
+    * - __u32
+      - ``which``
+      - Frame intervals to be enumerated, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -147,7 +106,7 @@
 
 EINVAL
     The struct
-    :ref:`v4l2_subdev_frame_interval_enum <v4l2-subdev-frame-interval-enum>`
+    :c:type:`v4l2_subdev_frame_interval_enum`
     ``pad`` references a non-existing pad, one of the ``code``,
     ``width`` or ``height`` fields are invalid for the given pad or the
     ``index`` field is out of bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
index 7a5811b..746c24e 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_frame_size_enum * argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUM_FRAME_SIZE, struct v4l2_subdev_frame_size_enum * argp )
+    :name: VIDIOC_SUBDEV_ENUM_FRAME_SIZE
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_ENUM_FRAME_SIZE
-
 ``argp``
 
 
@@ -41,7 +39,7 @@
 
 To enumerate frame sizes applications initialize the ``pad``, ``which``
 , ``code`` and ``index`` fields of the struct
-:ref:`v4l2_subdev_mbus_code_enum <v4l2-subdev-mbus-code-enum>` and
+:c:type:`v4l2_subdev_mbus_code_enum` and
 call the :ref:`VIDIOC_SUBDEV_ENUM_FRAME_SIZE` ioctl with a pointer to the
 structure. Drivers fill the minimum and maximum frame sizes or return an
 EINVAL error code if one of the input parameters is invalid.
@@ -64,88 +62,45 @@
 information about try formats.
 
 
-.. _v4l2-subdev-frame-size-enum:
+.. c:type:: v4l2_subdev_frame_size_enum
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_frame_size_enum
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the format in the enumeration, set by the application.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``code``
-
-       -  The media bus format code, as defined in
-	  :ref:`v4l2-mbus-format`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``min_width``
-
-       -  Minimum frame width, in pixels.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``max_width``
-
-       -  Maximum frame width, in pixels.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``min_height``
-
-       -  Minimum frame height, in pixels.
-
-    -  .. row 7
-
-       -  __u32
-
-       -  ``max_height``
-
-       -  Maximum frame height, in pixels.
-
-    -  .. row 8
-
-       -  __u32
-
-       -  ``which``
-
-       -  Frame sizes to be enumerated, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 9
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``index``
+      - Number of the format in the enumeration, set by the application.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API.
+    * - __u32
+      - ``code``
+      - The media bus format code, as defined in
+	:ref:`v4l2-mbus-format`.
+    * - __u32
+      - ``min_width``
+      - Minimum frame width, in pixels.
+    * - __u32
+      - ``max_width``
+      - Maximum frame width, in pixels.
+    * - __u32
+      - ``min_height``
+      - Minimum frame height, in pixels.
+    * - __u32
+      - ``max_height``
+      - Maximum frame height, in pixels.
+    * - __u32
+      - ``which``
+      - Frame sizes to be enumerated, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -157,6 +112,6 @@
 
 EINVAL
     The struct
-    :ref:`v4l2_subdev_frame_size_enum <v4l2-subdev-frame-size-enum>`
+    :c:type:`v4l2_subdev_frame_size_enum`
     ``pad`` references a non-existing pad, the ``code`` is invalid for
     the given pad or the ``index`` field is out of bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
index bc0531e..0dfee38 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
@@ -15,7 +15,8 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_mbus_code_enum * argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUM_MBUS_CODE, struct v4l2_subdev_mbus_code_enum * argp )
+    :name: VIDIOC_SUBDEV_ENUM_MBUS_CODE
 
 
 Arguments
@@ -24,9 +25,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_ENUM_MBUS_CODE
-
 ``argp``
 
 
@@ -36,7 +34,7 @@
 To enumerate media bus formats available at a given sub-device pad
 applications initialize the ``pad``, ``which`` and ``index`` fields of
 struct
-:ref:`v4l2_subdev_mbus_code_enum <v4l2-subdev-mbus-code-enum>` and
+:c:type:`v4l2_subdev_mbus_code_enum` and
 call the :ref:`VIDIOC_SUBDEV_ENUM_MBUS_CODE` ioctl with a pointer to this
 structure. Drivers fill the rest of the structure or return an ``EINVAL``
 error code if either the ``pad`` or ``index`` are invalid. All media bus
@@ -49,56 +47,33 @@
 information about the try formats.
 
 
-.. _v4l2-subdev-mbus-code-enum:
+.. c:type:: v4l2_subdev_mbus_code_enum
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_mbus_code_enum
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``index``
-
-       -  Number of the format in the enumeration, set by the application.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``code``
-
-       -  The media bus format code, as defined in
-	  :ref:`v4l2-mbus-format`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``which``
-
-       -  Media bus format codes to be enumerated, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API.
+    * - __u32
+      - ``index``
+      - Number of the format in the enumeration, set by the application.
+    * - __u32
+      - ``code``
+      - The media bus format code, as defined in
+	:ref:`v4l2-mbus-format`.
+    * - __u32
+      - ``which``
+      - Media bus format codes to be enumerated, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -110,6 +85,6 @@
 
 EINVAL
     The struct
-    :ref:`v4l2_subdev_mbus_code_enum <v4l2-subdev-mbus-code-enum>`
+    :c:type:`v4l2_subdev_mbus_code_enum`
     ``pad`` references a non-existing pad, or the ``index`` field is out
     of bounds.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
index ae802f1..000e8fc 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
@@ -15,9 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_crop *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_CROP, struct v4l2_subdev_crop *argp )
+    :name: VIDIOC_SUBDEV_G_CROP
 
-.. cpp:function:: int ioctl( int fd, int request, const struct v4l2_subdev_crop *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_CROP, const struct v4l2_subdev_crop *argp )
+    :name: VIDIOC_SUBDEV_S_CROP
 
 
 Arguments
@@ -26,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_G_CROP, VIDIOC_SUBDEV_S_CROP
-
 ``argp``
 
 
@@ -42,7 +41,7 @@
     :ref:`the selection API <VIDIOC_SUBDEV_G_SELECTION>`.
 
 To retrieve the current crop rectangle applications set the ``pad``
-field of a struct :ref:`v4l2_subdev_crop <v4l2-subdev-crop>` to the
+field of a struct :c:type:`v4l2_subdev_crop` to the
 desired pad number as reported by the media API and the ``which`` field
 to ``V4L2_SUBDEV_FORMAT_ACTIVE``. They then call the
 ``VIDIOC_SUBDEV_G_CROP`` ioctl with a pointer to this structure. The
@@ -55,7 +54,7 @@
 call the ``VIDIOC_SUBDEV_S_CROP`` ioctl with a pointer to this
 structure. The driver verifies the requested crop rectangle, adjusts it
 based on the hardware capabilities and configures the device. Upon
-return the struct :ref:`v4l2_subdev_crop <v4l2-subdev-crop>`
+return the struct :c:type:`v4l2_subdev_crop`
 contains the current format as would be returned by a
 ``VIDIOC_SUBDEV_G_CROP`` call.
 
@@ -72,47 +71,29 @@
 modified format should be as close as possible to the original request.
 
 
-.. _v4l2-subdev-crop:
+.. c:type:: v4l2_subdev_crop
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_crop
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media framework.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``which``
-
-       -  Crop rectangle to get or set, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_rect <v4l2-rect>`
-
-       -  ``rect``
-
-       -  Crop rectangle boundaries, in pixels.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media framework.
+    * - __u32
+      - ``which``
+      - Crop rectangle to get or set, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - struct :c:type:`v4l2_rect`
+      - ``rect``
+      - Crop rectangle boundaries, in pixels.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -130,7 +111,7 @@
     ``VIDIOC_SUBDEV_S_CROP``
 
 EINVAL
-    The struct :ref:`v4l2_subdev_crop <v4l2-subdev-crop>` ``pad``
+    The struct :c:type:`v4l2_subdev_crop` ``pad``
     references a non-existing pad, the ``which`` field references a
     non-existing format, or cropping is not supported on the given
     subdev pad.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
index 90e2a66..b352456 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_format *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_FMT, struct v4l2_subdev_format *argp )
+    :name: VIDIOC_SUBDEV_G_FMT
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_FMT, struct v4l2_subdev_format *argp )
+    :name: VIDIOC_SUBDEV_S_FMT
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_G_FMT, VIDIOC_SUBDEV_S_FMT
-
 ``argp``
 
 
@@ -37,7 +38,7 @@
 pads in the image pipeline.
 
 To retrieve the current format applications set the ``pad`` field of a
-struct :ref:`v4l2_subdev_format <v4l2-subdev-format>` to the desired
+struct :c:type:`v4l2_subdev_format` to the desired
 pad number as reported by the media API and the ``which`` field to
 ``V4L2_SUBDEV_FORMAT_ACTIVE``. When they call the
 ``VIDIOC_SUBDEV_G_FMT`` ioctl with a pointer to this structure the
@@ -48,7 +49,7 @@
 the ``VIDIOC_SUBDEV_S_FMT`` ioctl with a pointer to this structure the
 driver verifies the requested format, adjusts it based on the hardware
 capabilities and configures the device. Upon return the struct
-:ref:`v4l2_subdev_format <v4l2-subdev-format>` contains the current
+:c:type:`v4l2_subdev_format` contains the current
 format as would be returned by a ``VIDIOC_SUBDEV_G_FMT`` call.
 
 Applications can query the device capabilities by setting the ``which``
@@ -75,51 +76,35 @@
 should be as close as possible to the original request.
 
 
-.. _v4l2-subdev-format:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_subdev_format
 
 .. flat-table:: struct v4l2_subdev_format
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``which``
-
-       -  Format to modified, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 3
-
-       -  struct :ref:`v4l2_mbus_framefmt <v4l2-mbus-framefmt>`
-
-       -  ``format``
-
-       -  Definition of an image format, see :ref:`v4l2-mbus-framefmt` for
-	  details.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API.
+    * - __u32
+      - ``which``
+      - Format to modified, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - struct :c:type:`v4l2_mbus_framefmt`
+      - ``format``
+      - Definition of an image format, see :c:type:`v4l2_mbus_framefmt` for
+	details.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _v4l2-subdev-format-whence:
 
 .. flat-table:: enum v4l2_subdev_format_whence
@@ -127,22 +112,12 @@
     :stub-columns: 0
     :widths:       3 1 4
 
-
-    -  .. row 1
-
-       -  V4L2_SUBDEV_FORMAT_TRY
-
-       -  0
-
-       -  Try formats, used for querying device capabilities.
-
-    -  .. row 2
-
-       -  V4L2_SUBDEV_FORMAT_ACTIVE
-
-       -  1
-
-       -  Active formats, applied to the hardware.
+    * - V4L2_SUBDEV_FORMAT_TRY
+      - 0
+      - Try formats, used for querying device capabilities.
+    * - V4L2_SUBDEV_FORMAT_ACTIVE
+      - 1
+      - Active formats, applied to the hardware.
 
 
 Return Value
@@ -159,7 +134,7 @@
     fix the problem first. Only returned by ``VIDIOC_SUBDEV_S_FMT``
 
 EINVAL
-    The struct :ref:`v4l2_subdev_format <v4l2-subdev-format>`
+    The struct :c:type:`v4l2_subdev_format`
     ``pad`` references a non-existing pad, or the ``which`` field
     references a non-existing format.
 
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
index d8a1cab..46159dc 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_frame_interval *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_FRAME_INTERVAL, struct v4l2_subdev_frame_interval *argp )
+    :name: VIDIOC_SUBDEV_G_FRAME_INTERVAL
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_FRAME_INTERVAL, struct v4l2_subdev_frame_interval *argp )
+    :name: VIDIOC_SUBDEV_S_FRAME_INTERVAL
 
 
 Arguments
@@ -24,10 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_G_FRAME_INTERVAL,
-    VIDIOC_SUBDEV_S_FRAME_INTERVAL
-
 ``argp``
 
 
@@ -42,7 +42,7 @@
 
 To retrieve the current frame interval applications set the ``pad``
 field of a struct
-:ref:`v4l2_subdev_frame_interval <v4l2-subdev-frame-interval>` to
+:c:type:`v4l2_subdev_frame_interval` to
 the desired pad number as reported by the media controller API. When
 they call the ``VIDIOC_SUBDEV_G_FRAME_INTERVAL`` ioctl with a pointer to
 this structure the driver fills the members of the ``interval`` field.
@@ -53,7 +53,7 @@
 structure the driver verifies the requested interval, adjusts it based
 on the hardware capabilities and configures the device. Upon return the
 struct
-:ref:`v4l2_subdev_frame_interval <v4l2-subdev-frame-interval>`
+:c:type:`v4l2_subdev_frame_interval`
 contains the current frame interval as would be returned by a
 ``VIDIOC_SUBDEV_G_FRAME_INTERVAL`` call.
 
@@ -67,38 +67,25 @@
 the same sub-device is not defined.
 
 
-.. _v4l2-subdev-frame-interval:
+.. c:type:: v4l2_subdev_frame_interval
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_frame_interval
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media controller API.
-
-    -  .. row 2
-
-       -  struct :ref:`v4l2_fract <v4l2-fract>`
-
-       -  ``interval``
-
-       -  Period, in seconds, between consecutive video frames.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``reserved``\ [9]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media controller API.
+    * - struct :c:type:`v4l2_fract`
+      - ``interval``
+      - Period, in seconds, between consecutive video frames.
+    * - __u32
+      - ``reserved``\ [9]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -117,6 +104,6 @@
 
 EINVAL
     The struct
-    :ref:`v4l2_subdev_frame_interval <v4l2-subdev-frame-interval>`
+    :c:type:`v4l2_subdev_frame_interval`
     ``pad`` references a non-existing pad, or the pad doesn't support
     frame intervals.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
index 50838a4..071d9c0 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
@@ -15,7 +15,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_subdev_selection *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_SELECTION, struct v4l2_subdev_selection *argp )
+    :name: VIDIOC_SUBDEV_G_SELECTION
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_SELECTION, struct v4l2_subdev_selection *argp )
+    :name: VIDIOC_SUBDEV_S_SELECTION
 
 
 Arguments
@@ -24,9 +28,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBDEV_G_SELECTION, VIDIOC_SUBDEV_S_SELECTION
-
 ``argp``
 
 
@@ -64,63 +65,35 @@
 :ref:`v4l2-selections-common`.
 
 
-.. _v4l2-subdev-selection:
+.. c:type:: v4l2_subdev_selection
+
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
 
 .. flat-table:: struct v4l2_subdev_selection
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``which``
-
-       -  Active or try selection, from enum
-	  :ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``pad``
-
-       -  Pad number as reported by the media framework.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``target``
-
-       -  Target selection rectangle. See :ref:`v4l2-selections-common`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags. See :ref:`v4l2-selection-flags`.
-
-    -  .. row 5
-
-       -  struct :ref:`v4l2_rect <v4l2-rect>`
-
-       -  ``r``
-
-       -  Selection rectangle, in pixels.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``reserved``\ [8]
-
-       -  Reserved for future extensions. Applications and drivers must set
-	  the array to zero.
+    * - __u32
+      - ``which``
+      - Active or try selection, from enum
+	:ref:`v4l2_subdev_format_whence <v4l2-subdev-format-whence>`.
+    * - __u32
+      - ``pad``
+      - Pad number as reported by the media framework.
+    * - __u32
+      - ``target``
+      - Target selection rectangle. See :ref:`v4l2-selections-common`.
+    * - __u32
+      - ``flags``
+      - Flags. See :ref:`v4l2-selection-flags`.
+    * - struct :c:type:`v4l2_rect`
+      - ``r``
+      - Selection rectangle, in pixels.
+    * - __u32
+      - ``reserved``\ [8]
+      - Reserved for future extensions. Applications and drivers must set
+	the array to zero.
 
 
 Return Value
@@ -138,7 +111,7 @@
     ``VIDIOC_SUBDEV_S_SELECTION``
 
 EINVAL
-    The struct :ref:`v4l2_subdev_selection <v4l2-subdev-selection>`
+    The struct :c:type:`v4l2_subdev_selection`
     ``pad`` references a non-existing pad, the ``which`` field
     references a non-existing format, or the selection target is not
     supported on the given subdev pad.
diff --git a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
index 3f28e8c..e4a5143 100644
--- a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
@@ -16,7 +16,11 @@
 Synopsis
 ========
 
-.. cpp:function:: int ioctl( int fd, int request, struct v4l2_event_subscription *argp )
+.. c:function:: int ioctl( int fd, VIDIOC_SUBSCRIBE_EVENT, struct v4l2_event_subscription *argp )
+    :name: VIDIOC_SUBSCRIBE_EVENT
+
+.. c:function:: int ioctl( int fd, VIDIOC_UNSUBSCRIBE_EVENT, struct v4l2_event_subscription *argp )
+    :name: VIDIOC_UNSUBSCRIBE_EVENT
 
 
 Arguments
@@ -25,9 +29,6 @@
 ``fd``
     File descriptor returned by :ref:`open() <func-open>`.
 
-``request``
-    VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT
-
 ``argp``
 
 
@@ -38,55 +39,41 @@
 using the :ref:`VIDIOC_DQEVENT` ioctl.
 
 
-.. _v4l2-event-subscription:
+.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+
+.. c:type:: v4l2_event_subscription
 
 .. flat-table:: struct v4l2_event_subscription
     :header-rows:  0
     :stub-columns: 0
     :widths:       1 1 2
 
+    * - __u32
+      - ``type``
+      - Type of the event, see :ref:`event-type`.
 
-    -  .. row 1
+	.. note::
 
-       -  __u32
-
-       -  ``type``
-
-       -  Type of the event, see :ref:`event-type`.
-
-	  .. note:: ``V4L2_EVENT_ALL`` can be used with
-	     :ref:`VIDIOC_UNSUBSCRIBE_EVENT <VIDIOC_SUBSCRIBE_EVENT>` for
-	     unsubscribing all events at once.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``id``
-
-       -  ID of the event source. If there is no ID associated with the
-	  event source, then set this to 0. Whether or not an event needs an
-	  ID depends on the event type.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Event flags, see :ref:`event-flags`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``reserved``\ [5]
-
-       -  Reserved for future extensions. Drivers and applications must set
-	  the array to zero.
+	   ``V4L2_EVENT_ALL`` can be used with
+	   :ref:`VIDIOC_UNSUBSCRIBE_EVENT <VIDIOC_SUBSCRIBE_EVENT>` for
+	   unsubscribing all events at once.
+    * - __u32
+      - ``id``
+      - ID of the event source. If there is no ID associated with the
+	event source, then set this to 0. Whether or not an event needs an
+	ID depends on the event type.
+    * - __u32
+      - ``flags``
+      - Event flags, see :ref:`event-flags`.
+    * - __u32
+      - ``reserved``\ [5]
+      - Reserved for future extensions. Drivers and applications must set
+	the array to zero.
 
 
 
+.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+
 .. _event-flags:
 
 .. flat-table:: Event Flags
@@ -94,40 +81,30 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * - ``V4L2_EVENT_SUB_FL_SEND_INITIAL``
+      - 0x0001
+      - When this event is subscribed an initial event will be sent
+	containing the current status. This only makes sense for events
+	that are triggered by a status change such as ``V4L2_EVENT_CTRL``.
+	Other events will ignore this flag.
+    * - ``V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK``
+      - 0x0002
+      - If set, then events directly caused by an ioctl will also be sent
+	to the filehandle that called that ioctl. For example, changing a
+	control using :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` will cause
+	a V4L2_EVENT_CTRL to be sent back to that same filehandle.
+	Normally such events are suppressed to prevent feedback loops
+	where an application changes a control to a one value and then
+	another, and then receives an event telling it that that control
+	has changed to the first value.
 
-    -  .. row 1
+	Since it can't tell whether that event was caused by another
+	application or by the :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>`
+	call it is hard to decide whether to set the control to the value
+	in the event, or ignore it.
 
-       -  ``V4L2_EVENT_SUB_FL_SEND_INITIAL``
-
-       -  0x0001
-
-       -  When this event is subscribed an initial event will be sent
-	  containing the current status. This only makes sense for events
-	  that are triggered by a status change such as ``V4L2_EVENT_CTRL``.
-	  Other events will ignore this flag.
-
-    -  .. row 2
-
-       -  ``V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK``
-
-       -  0x0002
-
-       -  If set, then events directly caused by an ioctl will also be sent
-	  to the filehandle that called that ioctl. For example, changing a
-	  control using :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>` will cause
-	  a V4L2_EVENT_CTRL to be sent back to that same filehandle.
-	  Normally such events are suppressed to prevent feedback loops
-	  where an application changes a control to a one value and then
-	  another, and then receives an event telling it that that control
-	  has changed to the first value.
-
-	  Since it can't tell whether that event was caused by another
-	  application or by the :ref:`VIDIOC_S_CTRL <VIDIOC_G_CTRL>`
-	  call it is hard to decide whether to set the control to the value
-	  in the event, or ignore it.
-
-	  Think carefully when you set this flag so you won't get into
-	  situations like that.
+	Think carefully when you set this flag so you won't get into
+	situations like that.
 
 
 Return Value
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index f78c135..7abc1c9 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -586,6 +586,7 @@
 -----
 
 .. note::
+
    For a more updated list, please check
    https://linuxtv.org/wiki/index.php/Hardware_Device_Information
 
diff --git a/Documentation/media/v4l-drivers/cpia2.rst b/Documentation/media/v4l-drivers/cpia2.rst
index 763705c..b512501 100644
--- a/Documentation/media/v4l-drivers/cpia2.rst
+++ b/Documentation/media/v4l-drivers/cpia2.rst
@@ -55,6 +55,9 @@
 Driver options
 ~~~~~~~~~~~~~~
 
+.. tabularcolumns:: |p{13ex}|L|
+
+
 ==============  ========================================================
 Option		Description
 ==============  ========================================================
diff --git a/Documentation/media/v4l-drivers/cx23885-cardlist.rst b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
index ded3b91..f380032 100644
--- a/Documentation/media/v4l-drivers/cx23885-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
@@ -60,3 +60,4 @@
 	 54 -> ViewCast 260e                                       [1576:0260]
 	 55 -> ViewCast 460e                                       [1576:0460]
 	 56 -> Hauppauge WinTV-QuadHD-DVB                          [0070:6a28,0070:6b28]
+	 57 -> Hauppauge WinTV-QuadHD-ATSC                         [0070:6a18,0070:6b18]
diff --git a/Documentation/media/v4l-drivers/fourcc.rst b/Documentation/media/v4l-drivers/fourcc.rst
index f7c8cef..9c82106 100644
--- a/Documentation/media/v4l-drivers/fourcc.rst
+++ b/Documentation/media/v4l-drivers/fourcc.rst
@@ -1,4 +1,4 @@
-Guidelines for Linux4Linux pixel format 4CCs
+Guidelines for Video4Linux pixel format 4CCs
 ============================================
 
 Guidelines for Video4Linux 4CC codes defined using v4l2_fourcc() are
diff --git a/Documentation/media/v4l-drivers/si476x.rst b/Documentation/media/v4l-drivers/si476x.rst
index d5c07bb..6775125 100644
--- a/Documentation/media/v4l-drivers/si476x.rst
+++ b/Documentation/media/v4l-drivers/si476x.rst
@@ -31,31 +31,33 @@
   information. The contents of the file is binary data of the
   following layout:
 
+  .. tabularcolumns:: |p{7ex}|p{12ex}|L|
+
   =============  ==============   ====================================
-  Offset	  Name		  Description
+  Offset	 Name		  Description
   =============  ==============   ====================================
-  0x00		  blend_int	  Flag, set when stereo separation has
+  0x00		 blend_int	  Flag, set when stereo separation has
 				  crossed below the blend threshold
-  0x01		  hblend_int	  Flag, set when HiBlend cutoff
+  0x01		 hblend_int	  Flag, set when HiBlend cutoff
 				  frequency is lower than threshold
-  0x02		  hicut_int	  Flag, set when HiCut cutoff
+  0x02		 hicut_int	  Flag, set when HiCut cutoff
 				  frequency is lower than threshold
-  0x03		  chbw_int	  Flag, set when channel filter
+  0x03		 chbw_int	  Flag, set when channel filter
 				  bandwidth is less than threshold
-  0x04		  softmute_int	  Flag indicating that softmute
+  0x04		 softmute_int	  Flag indicating that softmute
 				  attenuation has increased above
 				  softmute threshold
   0x05		 smute		  0 - Audio is not soft muted
 				  1 - Audio is soft muted
-  0x06		  smattn	  Soft mute attenuation level in dB
-  0x07		  chbw		  Channel filter bandwidth in kHz
-  0x08		  hicut		  HiCut cutoff frequency in units of
+  0x06		 smattn		  Soft mute attenuation level in dB
+  0x07		 chbw		  Channel filter bandwidth in kHz
+  0x08		 hicut		  HiCut cutoff frequency in units of
 				  100Hz
-  0x09		  hiblend	  HiBlend cutoff frequency in units
+  0x09		 hiblend	  HiBlend cutoff frequency in units
 				  of 100 Hz
-  0x10		  pilot		  0 - Stereo pilot is not present
+  0x10		 pilot		  0 - Stereo pilot is not present
 				  1 - Stereo pilot is present
-  0x11		  stblend	  Stereo blend in %
+  0x11		 stblend	  Stereo blend in %
   =============  ==============   ====================================
 
 
@@ -63,12 +65,14 @@
   This file contains statistics about RDS receptions. It's binary data
   has the following layout:
 
+  .. tabularcolumns:: |p{7ex}|p{12ex}|L|
+
   =============  ==============   ====================================
-  Offset	  Name		  Description
+  Offset	 Name		  Description
   =============  ==============   ====================================
-  0x00		  expected	  Number of expected RDS blocks
-  0x02		  received	  Number of received RDS blocks
-  0x04		  uncorrectable	  Number of uncorrectable RDS blocks
+  0x00		 expected	  Number of expected RDS blocks
+  0x02		 received	  Number of received RDS blocks
+  0x04		 uncorrectable	  Number of uncorrectable RDS blocks
   =============  ==============   ====================================
 
 * /sys/kernel/debug/<device-name>/agc
@@ -77,21 +81,23 @@
 
   The layout is:
 
+  .. tabularcolumns:: |p{7ex}|p{12ex}|L|
+
   =============  ==============   ====================================
-  Offset	  Name		  Description
+  Offset	 Name		  Description
   =============  ==============   ====================================
-  0x00		  mxhi		  0 - FM Mixer PD high threshold is
+  0x00		 mxhi		  0 - FM Mixer PD high threshold is
 				  not tripped
 				  1 - FM Mixer PD high threshold is
 				  tripped
-  0x01		  mxlo		  ditto for FM Mixer PD low
-  0x02		  lnahi		  ditto for FM LNA PD high
-  0x03		  lnalo		  ditto for FM LNA PD low
-  0x04		  fmagc1	  FMAGC1 attenuator resistance
+  0x01		 mxlo		  ditto for FM Mixer PD low
+  0x02		 lnahi		  ditto for FM LNA PD high
+  0x03		 lnalo		  ditto for FM LNA PD low
+  0x04		 fmagc1		  FMAGC1 attenuator resistance
 				  (see datasheet for more detail)
-  0x05		  fmagc2	  ditto for FMAGC2
-  0x06		  pgagain	  PGA gain in dB
-  0x07		  fmwblang	  FM/WB LNA Gain in dB
+  0x05		 fmagc2		  ditto for FMAGC2
+  0x06		 pgagain	  PGA gain in dB
+  0x07		 fmwblang	  FM/WB LNA Gain in dB
   =============  ==============   ====================================
 
 * /sys/kernel/debug/<device-name>/rsq
@@ -100,48 +106,50 @@
 
   The layout is:
 
+  .. tabularcolumns:: |p{7ex}|p{12ex}|p{60ex}|
+
   =============  ==============   ====================================
-  Offset	  Name		  Description
+  Offset	 Name		  Description
   =============  ==============   ====================================
-  0x00		  multhint	  0 - multipath value has not crossed
+  0x00		 multhint	  0 - multipath value has not crossed
 				  the Multipath high threshold
 				  1 - multipath value has crossed
 				  the Multipath high threshold
-  0x01		  multlint	  ditto for Multipath low threshold
-  0x02		  snrhint	  0 - received signal's SNR has not
+  0x01		 multlint	  ditto for Multipath low threshold
+  0x02		 snrhint	  0 - received signal's SNR has not
 				  crossed high threshold
 				  1 - received signal's SNR has
 				  crossed high threshold
-  0x03		  snrlint	  ditto for low threshold
-  0x04		  rssihint	  ditto for RSSI high threshold
-  0x05		  rssilint	  ditto for RSSI low threshold
-  0x06		  bltf		  Flag indicating if seek command
+  0x03		 snrlint	  ditto for low threshold
+  0x04		 rssihint	  ditto for RSSI high threshold
+  0x05		 rssilint	  ditto for RSSI low threshold
+  0x06		 bltf		  Flag indicating if seek command
 				  reached/wrapped seek band limit
-  0x07		  snr_ready	  Indicates that SNR metrics is ready
-  0x08		  rssiready	  ditto for RSSI metrics
-  0x09		  injside	  0 - Low-side injection is being used
+  0x07		 snr_ready	  Indicates that SNR metrics is ready
+  0x08		 rssiready	  ditto for RSSI metrics
+  0x09		 injside	  0 - Low-side injection is being used
 				  1 - High-side injection is used
-  0x10		  afcrl		  Flag indicating if AFC rails
-  0x11		  valid		  Flag indicating if channel is valid
-  0x12		  readfreq	  Current tuned frequency
-  0x14		  freqoff	  Signed frequency offset in units of
+  0x10		 afcrl		  Flag indicating if AFC rails
+  0x11		 valid		  Flag indicating if channel is valid
+  0x12		 readfreq	  Current tuned frequency
+  0x14		 freqoff	  Signed frequency offset in units of
 				  2ppm
-  0x15		  rssi		  Signed value of RSSI in dBuV
-  0x16		  snr		  Signed RF SNR in dB
-  0x17		  issi		  Signed Image Strength Signal
+  0x15		 rssi		  Signed value of RSSI in dBuV
+  0x16		 snr		  Signed RF SNR in dB
+  0x17		 issi		  Signed Image Strength Signal
 				  indicator
-  0x18		  lassi		  Signed Low side adjacent Channel
+  0x18		 lassi		  Signed Low side adjacent Channel
 				  Strength indicator
-  0x19		  hassi		  ditto fpr High side
-  0x20		  mult		  Multipath indicator
-  0x21		  dev		  Frequency deviation
-  0x24		  assi		  Adjacent channel SSI
-  0x25		  usn		  Ultrasonic noise indicator
-  0x26		  pilotdev	  Pilot deviation in units of 100 Hz
-  0x27		  rdsdev	  ditto for RDS
-  0x28		  assidev	  ditto for ASSI
-  0x29		  strongdev	  Frequency deviation
-  0x30		  rdspi		  RDS PI code
+  0x19		 hassi		  ditto fpr High side
+  0x20		 mult		  Multipath indicator
+  0x21		 dev		  Frequency deviation
+  0x24		 assi		  Adjacent channel SSI
+  0x25		 usn		  Ultrasonic noise indicator
+  0x26		 pilotdev	  Pilot deviation in units of 100 Hz
+  0x27		 rdsdev		  ditto for RDS
+  0x28		 assidev	  ditto for ASSI
+  0x29		 strongdev	  Frequency deviation
+  0x30		 rdspi		  RDS PI code
   =============  ==============   ====================================
 
 * /sys/kernel/debug/<device-name>/rsq_primary
diff --git a/Documentation/media/v4l-drivers/zr364xx.rst b/Documentation/media/v4l-drivers/zr364xx.rst
index d8d1171..f5280e3 100644
--- a/Documentation/media/v4l-drivers/zr364xx.rst
+++ b/Documentation/media/v4l-drivers/zr364xx.rst
@@ -5,7 +5,7 @@
 
 mail: royale@zerezo.com
 
-.. note:
+.. note::
 
    This documentation is outdated
 
diff --git a/Documentation/media/video.h.rst.exceptions b/Documentation/media/video.h.rst.exceptions
index 8866145..a91aa88 100644
--- a/Documentation/media/video.h.rst.exceptions
+++ b/Documentation/media/video.h.rst.exceptions
@@ -28,13 +28,13 @@
 ignore define VIDEO_CAP_CSS
 
 # some typedefs should point to struct/enums
-replace typedef video_format_t video-format
-replace typedef video_system_t video-system
-replace typedef video_displayformat_t video-displayformat
-replace typedef video_size_t video-size
-replace typedef video_stream_source_t video-stream-source
-replace typedef video_play_state_t video-play-state
-replace typedef video_highlight_t video-highlight
-replace typedef video_spu_t video-spu
-replace typedef video_spu_palette_t video-spu-palette
-replace typedef video_navi_pack_t video-navi-pack
+replace typedef video_format_t :c:type:`video_format`
+replace typedef video_system_t :c:type:`video_system`
+replace typedef video_displayformat_t :c:type:`video_displayformat`
+replace typedef video_size_t :c:type:`video_size`
+replace typedef video_stream_source_t :c:type:`video_stream_source`
+replace typedef video_play_state_t :c:type:`video_play_state`
+replace typedef video_highlight_t :c:type:`video_highlight`
+replace typedef video_spu_t :c:type:`video_spu`
+replace typedef video_spu_palette_t :c:type:`video_spu_palette`
+replace typedef video_navi_pack_t :c:type:`video_navi_pack`
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index 9bb9a6c..1d3f27d 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -15,115 +15,115 @@
 ignore symbol V4L2_COLORSPACE_BT878
 
 # Documented enum v4l2_field
-replace symbol V4L2_FIELD_ALTERNATE v4l2-field
-replace symbol V4L2_FIELD_ANY v4l2-field
-replace symbol V4L2_FIELD_BOTTOM v4l2-field
-replace symbol V4L2_FIELD_INTERLACED v4l2-field
-replace symbol V4L2_FIELD_INTERLACED_BT v4l2-field
-replace symbol V4L2_FIELD_INTERLACED_TB v4l2-field
-replace symbol V4L2_FIELD_NONE v4l2-field
-replace symbol V4L2_FIELD_SEQ_BT v4l2-field
-replace symbol V4L2_FIELD_SEQ_TB v4l2-field
-replace symbol V4L2_FIELD_TOP v4l2-field
+replace symbol V4L2_FIELD_ALTERNATE :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_ANY :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_BOTTOM :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_INTERLACED :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_INTERLACED_BT :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_INTERLACED_TB :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_NONE :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_SEQ_BT :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_SEQ_TB :c:type:`v4l2_field`
+replace symbol V4L2_FIELD_TOP :c:type:`v4l2_field`
 
 # Documented enum v4l2_buf_type
-replace symbol V4L2_BUF_TYPE_SDR_CAPTURE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_SDR_OUTPUT v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_SLICED_VBI_CAPTURE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_SLICED_VBI_OUTPUT v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VBI_CAPTURE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VBI_OUTPUT v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_CAPTURE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY v4l2-buf-type
-replace symbol V4L2_BUF_TYPE_VIDEO_OVERLAY v4l2-buf-type
+replace symbol V4L2_BUF_TYPE_SDR_CAPTURE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_SDR_OUTPUT :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_SLICED_VBI_CAPTURE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_SLICED_VBI_OUTPUT :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VBI_CAPTURE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VBI_OUTPUT :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_CAPTURE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_VIDEO_OVERLAY :c:type:`v4l2_buf_type`
 
 # Documented enum v4l2_tuner_type
-replace symbol V4L2_TUNER_ANALOG_TV v4l2-tuner-type
-replace symbol V4L2_TUNER_RADIO v4l2-tuner-type
-replace symbol V4L2_TUNER_RF v4l2-tuner-type
-replace symbol V4L2_TUNER_SDR v4l2-tuner-type
+replace symbol V4L2_TUNER_ANALOG_TV :c:type:`v4l2_tuner_type`
+replace symbol V4L2_TUNER_RADIO :c:type:`v4l2_tuner_type`
+replace symbol V4L2_TUNER_RF :c:type:`v4l2_tuner_type`
+replace symbol V4L2_TUNER_SDR :c:type:`v4l2_tuner_type`
 
 # Documented enum v4l2_memory
-replace symbol V4L2_MEMORY_DMABUF v4l2-memory
-replace symbol V4L2_MEMORY_MMAP v4l2-memory
-replace symbol V4L2_MEMORY_OVERLAY v4l2-memory
-replace symbol V4L2_MEMORY_USERPTR v4l2-memory
+replace symbol V4L2_MEMORY_DMABUF :c:type:`v4l2_memory`
+replace symbol V4L2_MEMORY_MMAP :c:type:`v4l2_memory`
+replace symbol V4L2_MEMORY_OVERLAY :c:type:`v4l2_memory`
+replace symbol V4L2_MEMORY_USERPTR :c:type:`v4l2_memory`
 
 # Documented enum v4l2_colorspace
-replace symbol V4L2_COLORSPACE_470_SYSTEM_BG v4l2-colorspace
-replace symbol V4L2_COLORSPACE_470_SYSTEM_M v4l2-colorspace
-replace symbol V4L2_COLORSPACE_ADOBERGB v4l2-colorspace
-replace symbol V4L2_COLORSPACE_BT2020 v4l2-colorspace
-replace symbol V4L2_COLORSPACE_DCI_P3 v4l2-colorspace
-replace symbol V4L2_COLORSPACE_DEFAULT v4l2-colorspace
-replace symbol V4L2_COLORSPACE_JPEG v4l2-colorspace
-replace symbol V4L2_COLORSPACE_RAW v4l2-colorspace
-replace symbol V4L2_COLORSPACE_REC709 v4l2-colorspace
-replace symbol V4L2_COLORSPACE_SMPTE170M v4l2-colorspace
-replace symbol V4L2_COLORSPACE_SMPTE240M v4l2-colorspace
-replace symbol V4L2_COLORSPACE_SRGB v4l2-colorspace
+replace symbol V4L2_COLORSPACE_470_SYSTEM_BG :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_470_SYSTEM_M :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_BT2020 :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_DCI_P3 :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_DEFAULT :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_JPEG :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_RAW :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_REC709 :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_SMPTE170M :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_SMPTE240M :c:type:`v4l2_colorspace`
+replace symbol V4L2_COLORSPACE_SRGB :c:type:`v4l2_colorspace`
 
 # Documented enum v4l2_xfer_func
-replace symbol V4L2_XFER_FUNC_709 v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_ADOBERGB v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_DCI_P3 v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_DEFAULT v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_NONE v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_SMPTE2084 v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_SMPTE240M v4l2-xfer-func
-replace symbol V4L2_XFER_FUNC_SRGB v4l2-xfer-func
+replace symbol V4L2_XFER_FUNC_709 :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_DCI_P3 :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_DEFAULT :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_NONE :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_SMPTE2084 :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_SMPTE240M :c:type:`v4l2_xfer_func`
+replace symbol V4L2_XFER_FUNC_SRGB :c:type:`v4l2_xfer_func`
 
 # Documented enum v4l2_ycbcr_encoding
-replace symbol V4L2_YCBCR_ENC_601 v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_709 v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_BT2020 v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_BT2020_CONST_LUM v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_DEFAULT v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_SYCC v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_XV601 v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_XV709 v4l2-ycbcr-encoding
-replace symbol V4L2_YCBCR_ENC_SMPTE240M v4l2-ycbcr-encoding
+replace symbol V4L2_YCBCR_ENC_601 :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_709 :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_BT2020 :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_BT2020_CONST_LUM :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_DEFAULT :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_SYCC :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_XV601 :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_XV709 :c:type:`v4l2_ycbcr_encoding`
+replace symbol V4L2_YCBCR_ENC_SMPTE240M :c:type:`v4l2_ycbcr_encoding`
 
 # Documented enum v4l2_quantization
-replace symbol V4L2_QUANTIZATION_DEFAULT v4l2-quantization
-replace symbol V4L2_QUANTIZATION_FULL_RANGE v4l2-quantization
-replace symbol V4L2_QUANTIZATION_LIM_RANGE v4l2-quantization
+replace symbol V4L2_QUANTIZATION_DEFAULT :c:type:`v4l2_quantization`
+replace symbol V4L2_QUANTIZATION_FULL_RANGE :c:type:`v4l2_quantization`
+replace symbol V4L2_QUANTIZATION_LIM_RANGE :c:type:`v4l2_quantization`
 
 # Documented enum v4l2_priority
-replace symbol V4L2_PRIORITY_BACKGROUND v4l2-priority
-replace symbol V4L2_PRIORITY_DEFAULT v4l2-priority
-replace symbol V4L2_PRIORITY_INTERACTIVE v4l2-priority
-replace symbol V4L2_PRIORITY_RECORD v4l2-priority
-replace symbol V4L2_PRIORITY_UNSET v4l2-priority
+replace symbol V4L2_PRIORITY_BACKGROUND :c:type:`v4l2_priority`
+replace symbol V4L2_PRIORITY_DEFAULT :c:type:`v4l2_priority`
+replace symbol V4L2_PRIORITY_INTERACTIVE :c:type:`v4l2_priority`
+replace symbol V4L2_PRIORITY_RECORD :c:type:`v4l2_priority`
+replace symbol V4L2_PRIORITY_UNSET :c:type:`v4l2_priority`
 
 # Documented enum v4l2_frmsizetypes
-replace symbol V4L2_FRMSIZE_TYPE_CONTINUOUS v4l2-frmsizetypes
-replace symbol V4L2_FRMSIZE_TYPE_DISCRETE v4l2-frmsizetypes
-replace symbol V4L2_FRMSIZE_TYPE_STEPWISE v4l2-frmsizetypes
+replace symbol V4L2_FRMSIZE_TYPE_CONTINUOUS :c:type:`v4l2_frmsizetypes`
+replace symbol V4L2_FRMSIZE_TYPE_DISCRETE :c:type:`v4l2_frmsizetypes`
+replace symbol V4L2_FRMSIZE_TYPE_STEPWISE :c:type:`v4l2_frmsizetypes`
 
 # Documented enum frmivaltypes
-replace symbol V4L2_FRMIVAL_TYPE_CONTINUOUS v4l2-frmivaltypes
-replace symbol V4L2_FRMIVAL_TYPE_DISCRETE v4l2-frmivaltypes
-replace symbol V4L2_FRMIVAL_TYPE_STEPWISE v4l2-frmivaltypes
+replace symbol V4L2_FRMIVAL_TYPE_CONTINUOUS :c:type:`v4l2_frmivaltypes`
+replace symbol V4L2_FRMIVAL_TYPE_DISCRETE :c:type:`v4l2_frmivaltypes`
+replace symbol V4L2_FRMIVAL_TYPE_STEPWISE :c:type:`v4l2_frmivaltypes`
 
-# Documented enum v4l2-ctrl-type
+# Documented enum :c:type:`v4l2_ctrl_type`
 replace symbol V4L2_CTRL_COMPOUND_TYPES vidioc_queryctrl
 
-replace symbol V4L2_CTRL_TYPE_BITMASK v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_BOOLEAN v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_BUTTON v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_CTRL_CLASS v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_INTEGER v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_INTEGER64 v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_INTEGER_MENU v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_MENU v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_STRING v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_U16 v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_U32 v4l2-ctrl-type
-replace symbol V4L2_CTRL_TYPE_U8 v4l2-ctrl-type
+replace symbol V4L2_CTRL_TYPE_BITMASK :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_BOOLEAN :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_BUTTON :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_CTRL_CLASS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_INTEGER :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_INTEGER64 :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_INTEGER_MENU :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MENU :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_STRING :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_U16 :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_U32 :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_U8 :c:type:`v4l2_ctrl_type`
 
 # V4L2 capability defines
 replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities
@@ -152,9 +152,10 @@
 replace define V4L2_CAP_ASYNCIO device-capabilities
 replace define V4L2_CAP_STREAMING device-capabilities
 replace define V4L2_CAP_DEVICE_CAPS device-capabilities
+replace define V4L2_CAP_TOUCH device-capabilities
 
 # V4L2 pix flags
-replace define V4L2_PIX_FMT_PRIV_MAGIC v4l2-pix-format
+replace define V4L2_PIX_FMT_PRIV_MAGIC :c:type:`v4l2_pix_format`
 replace define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA reserved-formats
 
 # V4L2 format flags
@@ -204,7 +205,7 @@
 # Used on VIDIOC_G_PARM
 
 replace define V4L2_MODE_HIGHQUALITY parm-flags
-replace define V4L2_CAP_TIMEPERFRAME v4l2-captureparm
+replace define V4L2_CAP_TIMEPERFRAME :c:type:`v4l2_captureparm`
 
 # The V4L2_STD_foo are all defined at v4l2_std_id table
 
@@ -257,22 +258,24 @@
 
 # V4L2 DT BT timings definitions
 
-replace define V4L2_DV_PROGRESSIVE v4l2-bt-timings
-replace define V4L2_DV_INTERLACED v4l2-bt-timings
+replace define V4L2_DV_PROGRESSIVE :c:type:`v4l2_bt_timings`
+replace define V4L2_DV_INTERLACED :c:type:`v4l2_bt_timings`
 
-replace define V4L2_DV_VSYNC_POS_POL v4l2-bt-timings
-replace define V4L2_DV_HSYNC_POS_POL v4l2-bt-timings
+replace define V4L2_DV_VSYNC_POS_POL :c:type:`v4l2_bt_timings`
+replace define V4L2_DV_HSYNC_POS_POL :c:type:`v4l2_bt_timings`
 
 replace define V4L2_DV_BT_STD_CEA861 dv-bt-standards
 replace define V4L2_DV_BT_STD_DMT dv-bt-standards
 replace define V4L2_DV_BT_STD_CVT dv-bt-standards
 replace define V4L2_DV_BT_STD_GTF dv-bt-standards
+replace define V4L2_DV_BT_STD_SDI dv-bt-standards
 
 replace define V4L2_DV_FL_REDUCED_BLANKING dv-bt-standards
 replace define V4L2_DV_FL_CAN_REDUCE_FPS dv-bt-standards
 replace define V4L2_DV_FL_REDUCED_FPS dv-bt-standards
 replace define V4L2_DV_FL_HALF_LINE dv-bt-standards
 replace define V4L2_DV_FL_IS_CE_VIDEO dv-bt-standards
+replace define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE dv-bt-standards
 
 replace define V4L2_DV_BT_656_1120 dv-timing-types
 
@@ -285,6 +288,7 @@
 
 replace define V4L2_INPUT_TYPE_TUNER input-type
 replace define V4L2_INPUT_TYPE_CAMERA input-type
+replace define V4L2_INPUT_TYPE_TOUCH input-type
 
 replace define V4L2_IN_ST_NO_POWER input-status
 replace define V4L2_IN_ST_NO_SIGNAL input-status
@@ -299,6 +303,8 @@
 replace define V4L2_IN_ST_MACROVISION input-status
 replace define V4L2_IN_ST_NO_ACCESS input-status
 replace define V4L2_IN_ST_VTR input-status
+replace define V4L2_IN_ST_NO_V_LOCK input-status
+replace define V4L2_IN_ST_NO_STD_LOCK input-status
 
 replace define V4L2_IN_CAP_DV_TIMINGS input-capabilities
 replace define V4L2_IN_CAP_STD input-capabilities
@@ -385,11 +391,11 @@
 
 # MPEG
 
-replace define V4L2_ENC_IDX_FRAME_I v4l2-enc-idx
-replace define V4L2_ENC_IDX_FRAME_P v4l2-enc-idx
-replace define V4L2_ENC_IDX_FRAME_B v4l2-enc-idx
-replace define V4L2_ENC_IDX_FRAME_MASK v4l2-enc-idx
-replace define V4L2_ENC_IDX_ENTRIES v4l2-enc-idx
+replace define V4L2_ENC_IDX_FRAME_I :c:type:`v4l2_enc_idx`
+replace define V4L2_ENC_IDX_FRAME_P :c:type:`v4l2_enc_idx`
+replace define V4L2_ENC_IDX_FRAME_B :c:type:`v4l2_enc_idx`
+replace define V4L2_ENC_IDX_FRAME_MASK :c:type:`v4l2_enc_idx`
+replace define V4L2_ENC_IDX_ENTRIES :c:type:`v4l2_enc_idx`
 
 replace define V4L2_ENC_CMD_START encoder-cmds
 replace define V4L2_ENC_CMD_STOP encoder-cmds
@@ -416,10 +422,10 @@
 replace define V4L2_VBI_UNSYNC vbifmt-flags
 replace define V4L2_VBI_INTERLACED vbifmt-flags
 
-replace define V4L2_VBI_ITU_525_F1_START v4l2-vbi-format
-replace define V4L2_VBI_ITU_525_F2_START v4l2-vbi-format
-replace define V4L2_VBI_ITU_625_F1_START v4l2-vbi-format
-replace define V4L2_VBI_ITU_625_F2_START v4l2-vbi-format
+replace define V4L2_VBI_ITU_525_F1_START :c:type:`v4l2_vbi_format`
+replace define V4L2_VBI_ITU_525_F2_START :c:type:`v4l2_vbi_format`
+replace define V4L2_VBI_ITU_625_F1_START :c:type:`v4l2_vbi_format`
+replace define V4L2_VBI_ITU_625_F2_START :c:type:`v4l2_vbi_format`
 
 
 replace define V4L2_SLICED_TELETEXT_B vbi-services
@@ -454,7 +460,7 @@
 
 replace define V4L2_EVENT_SRC_CH_RESOLUTION src-changes-flags
 
-replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ v4l2-event-motion-det
+replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
 
 replace define V4L2_EVENT_SUB_FL_SEND_INITIAL event-flags
 replace define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK event-flags
diff --git a/Documentation/mic/Makefile b/Documentation/mic/Makefile
deleted file mode 100644
index a191d45..0000000
--- a/Documentation/mic/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := mpssd
diff --git a/Documentation/mic/mpssd/Makefile b/Documentation/mic/mpssd/Makefile
deleted file mode 100644
index 06871b0..0000000
--- a/Documentation/mic/mpssd/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-ifndef CROSS_COMPILE
-# List of programs to build
-hostprogs-$(CONFIG_X86_64) := mpssd
-
-mpssd-objs := mpssd.o sysfs.o
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS += -I$(objtree)/usr/include -I$(srctree)/tools/include
-
-ifdef DEBUG
-HOSTCFLAGS += -DDEBUG=$(DEBUG)
-endif
-
-HOSTLOADLIBES_mpssd := -lpthread
-
-install:
-	install mpssd /usr/sbin/mpssd
-	install micctrl /usr/sbin/micctrl
-endif
diff --git a/Documentation/misc-devices/Makefile b/Documentation/misc-devices/Makefile
deleted file mode 100644
index e2b7aa4..0000000
--- a/Documentation/misc-devices/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := mei
diff --git a/Documentation/misc-devices/mei/Makefile b/Documentation/misc-devices/mei/Makefile
deleted file mode 100644
index d758047..0000000
--- a/Documentation/misc-devices/mei/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := mei-amt-version
-HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index a7697783..c6beb5f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -10,8 +10,6 @@
 	- GPLv2 for QLogic Linux qlge NIC Driver
 LICENSE.qlcnic
 	- GPLv2 for QLogic Linux qlcnic NIC Driver
-Makefile
-	- Makefile for docsrc.
 PLIP.txt
 	- PLIP: The Parallel Line Internet Protocol device driver
 README.ipw2100
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
deleted file mode 100644
index 4c5d7c48..0000000
--- a/Documentation/networking/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := timestamping
diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile
deleted file mode 100644
index 8c20dfa..0000000
--- a/Documentation/networking/timestamping/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# To compile, from the source root
-#
-#    make headers_install
-#    make M=documentation
-
-# List of programs to build
-hostprogs-y := hwtstamp_config timestamping txtimestamp
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include
-HOSTCFLAGS_txtimestamp.o += -I$(objtree)/usr/include
-HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include
diff --git a/Documentation/pcmcia/Makefile b/Documentation/pcmcia/Makefile
deleted file mode 100644
index 47a8fa1..0000000
--- a/Documentation/pcmcia/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := crc32hash
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_crc32hash.o += -I$(objtree)/usr/include
diff --git a/Documentation/pcmcia/devicetable.txt b/Documentation/pcmcia/devicetable.txt
index 199afd1..5f3e00a 100644
--- a/Documentation/pcmcia/devicetable.txt
+++ b/Documentation/pcmcia/devicetable.txt
@@ -27,7 +27,7 @@
 The hex value after "pa" is the hash of product ID string 1, after "pb" for
 string 2 and so on.
 
-Alternatively, you can use crc32hash (see Documentation/pcmcia/crc32hash.c)
+Alternatively, you can use crc32hash (see tools/pcmcia/crc32hash.c)
 to determine the crc32 hash.  Simply pass the string you want to evaluate
 as argument to this program, e.g.:
-$ ./crc32hash "Dual Speed"
+$ tools/pcmcia/crc32hash "Dual Speed"
diff --git a/Documentation/prctl/Makefile b/Documentation/prctl/Makefile
deleted file mode 100644
index 44de308..0000000
--- a/Documentation/prctl/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-ifndef CROSS_COMPILE
-# List of programs to build
-hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_disable-tsc-ctxt-sw-stress-test.o += -I$(objtree)/usr/include
-HOSTCFLAGS_disable-tsc-on-off-stress-test.o += -I$(objtree)/usr/include
-HOSTCFLAGS_disable-tsc-test.o += -I$(objtree)/usr/include
-endif
diff --git a/Documentation/ptp/Makefile b/Documentation/ptp/Makefile
deleted file mode 100644
index 293d6c0..0000000
--- a/Documentation/ptp/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# List of programs to build
-hostprogs-y := testptp
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_testptp.o += -I$(objtree)/usr/include
-HOSTLOADLIBES_testptp := -lrt
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
index fd88015..e2c1879 100644
--- a/Documentation/scsi/g_NCR5380.txt
+++ b/Documentation/scsi/g_NCR5380.txt
@@ -21,16 +21,6 @@
 The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
 supported by the driver.
 
-If the default configuration does not work for you, you can use the kernel
-command lines (eg using the lilo append command):
-	ncr5380=addr,irq
-	ncr53c400=addr,irq
-	ncr53c400a=addr,irq
-	dtc3181e=addr,irq
-
-The driver does not probe for any addresses or ports other than those in
-the OVERRIDE or given to the kernel as above.
-
 This driver provides some information on what it has detected in
 /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot
 time. More info to come in the future.
@@ -38,6 +28,16 @@
 This driver works as a module.
 When included as a module, parameters can be passed on the insmod/modprobe
 command line:
+  irq=xx[,...]	the interrupt(s)
+  base=xx[,...]	the port or base address(es) (for port or memory mapped, resp.)
+  card=xx[,...]	card type(s):
+		0 = NCR5380,
+		1 = NCR53C400,
+		2 = NCR53C400A,
+		3 = Domex Technology Corp 3181E (DTC3181E)
+		4 = Hewlett Packard C2502
+
+These old-style parameters can support only one card:
   ncr_irq=xx   the interrupt
   ncr_addr=xx  the port or base address (for port or memory
                mapped, resp.)
@@ -46,11 +46,19 @@
   ncr_53c400a=1 to set up for a NCR53C400A board
   dtc_3181e=1  to set up for a Domex Technology Corp 3181E board
   hp_c2502=1   to set up for a Hewlett Packard C2502 board
+
 e.g.
-modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0
   for a port mapped NCR5380 board or
-modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
-  for a memory mapped NCR53C400 board with interrupts disabled.
+
+OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+  for a memory mapped NCR53C400 board with interrupts disabled or
+
+NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
+  for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+             and HP C2502 at 0x300 with IRQ 7
 
 (255 should be specified for no or DMA interrupt, 254 to autoprobe for an 
      IRQ line if overridden on the command line.)
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index 74089b0..db0186a 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -2,12 +2,18 @@
 use strict;
 use Text::Tabs;
 
-# Uncomment if debug is needed
-#use Data::Dumper;
-
-# change to 1 to generate some debug prints
 my $debug = 0;
 
+while ($ARGV[0] =~ m/^-(.*)/) {
+	my $cmd = shift @ARGV;
+	if ($cmd eq "--debug") {
+		require Data::Dumper;
+		$debug = 1;
+		next;
+	}
+	die "argument $cmd unknown";
+}
+
 if (scalar @ARGV < 2 || scalar @ARGV > 3) {
 	die "Usage:\n\t$0 <file in> <file out> [<exceptions file>]\n";
 }
@@ -51,7 +57,7 @@
 		$n =~ tr/A-Z/a-z/;
 		$n =~ tr/_/-/;
 
-		$enum_symbols{$s} = $n;
+		$enum_symbols{$s} =  "\\ :ref:`$s <$n>`\\ ";
 
 		$is_enum = 0 if ($is_enum && m/\}/);
 		next;
@@ -63,7 +69,7 @@
 		my $n = $1;
 		$n =~ tr/A-Z/a-z/;
 
-		$ioctls{$s} = $n;
+		$ioctls{$s} = "\\ :ref:`$s <$n>`\\ ";
 		next;
 	}
 
@@ -73,17 +79,15 @@
 		$n =~ tr/A-Z/a-z/;
 		$n =~ tr/_/-/;
 
-		$defines{$s} = $n;
+		$defines{$s} = "\\ :ref:`$s <$n>`\\ ";
 		next;
 	}
 
-	if ($ln =~ m/^\s*typedef\s+.*\s+([_\w][\w\d_]+);/) {
-		my $s = $1;
-		my $n = $1;
-		$n =~ tr/A-Z/a-z/;
-		$n =~ tr/_/-/;
+	if ($ln =~ m/^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);/) {
+		my $s = $2;
+		my $n = $3;
 
-		$typedefs{$s} = $n;
+		$typedefs{$n} = "\\ :c:type:`$n <$s>`\\ ";
 		next;
 	}
 	if ($ln =~ m/^\s*enum\s+([_\w][\w\d_]+)\s+\{/
@@ -91,11 +95,8 @@
 	    || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)\s+\{/
 	    || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)$/) {
 		my $s = $1;
-		my $n = $1;
-		$n =~ tr/A-Z/a-z/;
-		$n =~ tr/_/-/;
 
-		$enums{$s} = $n;
+		$enums{$s} =  "enum :c:type:`$s`\\ ";
 
 		$is_enum = $1;
 		next;
@@ -106,11 +107,8 @@
 	    || $ln =~ m/^\s*typedef\s*struct\s+([[_\w][\w\d_]+)$/
 	    ) {
 		my $s = $1;
-		my $n = $1;
-		$n =~ tr/A-Z/a-z/;
-		$n =~ tr/_/-/;
 
-		$structs{$s} = $n;
+		$structs{$s} = "struct :c:type:`$s`\\ ";
 		next;
 	}
 }
@@ -123,12 +121,9 @@
 my @matches = ($data =~ m/typedef\s+struct\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,
 	       $data =~ m/typedef\s+enum\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,);
 foreach my $m (@matches) {
-		my $s = $m;
-		my $n = $m;
-		$n =~ tr/A-Z/a-z/;
-		$n =~ tr/_/-/;
+	my $s = $m;
 
-		$typedefs{$s} = $n;
+	$typedefs{$s} = "\\ :c:type:`$s`\\ ";
 	next;
 }
 
@@ -136,6 +131,15 @@
 # Handle exceptions, if any
 #
 
+my %def_reftype = (
+	"ioctl"   => ":ref",
+	"define"  => ":ref",
+	"symbol"  => ":ref",
+	"typedef" => ":c:type",
+	"enum"    => ":c:type",
+	"struct"  => ":c:type",
+);
+
 if ($file_exceptions) {
 	open IN, $file_exceptions or die "Can't read $file_exceptions";
 	while (<IN>) {
@@ -169,29 +173,49 @@
 		}
 
 		# Parsers to replace a symbol
+		my ($type, $old, $new, $reftype);
 
-		if (m/^replace\s+ioctl\s+(\S+)\s+(\S+)/) {
-			$ioctls{$1} = $2 if (exists($ioctls{$1}));
+		if (m/^replace\s+(\S+)\s+(\S+)\s+(\S+)/) {
+			$type = $1;
+			$old = $2;
+			$new = $3;
+		} else {
+			die "Can't parse $file_exceptions: $_";
+		}
+
+		if ($new =~ m/^\:c\:(data|func|macro|type)\:\`(.+)\`/) {
+			$reftype = ":c:$1";
+			$new = $2;
+		} elsif ($new =~ m/\:ref\:\`(.+)\`/) {
+			$reftype = ":ref";
+			$new = $1;
+		} else {
+			$reftype = $def_reftype{$type};
+		}
+		$new = "$reftype:`$old <$new>`";
+
+		if ($type eq "ioctl") {
+			$ioctls{$old} = $new if (exists($ioctls{$old}));
 			next;
 		}
-		if (m/^replace\s+define\s+(\S+)\s+(\S+)/) {
-			$defines{$1} = $2 if (exists($defines{$1}));
+		if ($type eq "define") {
+			$defines{$old} = $new if (exists($defines{$old}));
 			next;
 		}
-		if (m/^replace\s+typedef\s+(\S+)\s+(\S+)/) {
-			$typedefs{$1} = $2 if (exists($typedefs{$1}));
+		if ($type eq "symbol") {
+			$enum_symbols{$old} = $new if (exists($enum_symbols{$old}));
 			next;
 		}
-		if (m/^replace\s+enum\s+(\S+)\s+(\S+)/) {
-			$enums{$1} = $2 if (exists($enums{$1}));
+		if ($type eq "typedef") {
+			$typedefs{$old} = $new if (exists($typedefs{$old}));
 			next;
 		}
-		if (m/^replace\s+symbol\s+(\S+)\s+(\S+)/) {
-			$enum_symbols{$1} = $2 if (exists($enum_symbols{$1}));
+		if ($type eq "enum") {
+			$enums{$old} = $new if (exists($enums{$old}));
 			next;
 		}
-		if (m/^replace\s+struct\s+(\S+)\s+(\S+)/) {
-			$structs{$1} = $2 if (exists($structs{$1}));
+		if ($type eq "struct") {
+			$structs{$old} = $new if (exists($structs{$old}));
 			next;
 		}
 
@@ -232,9 +256,7 @@
 my $end_delim = "(\\s|,|\\\\=|\\\\:|\\;|\\\)|\\}|\\{)";
 
 foreach my $r (keys %ioctls) {
-	my $n = $ioctls{$r};
-
-	my $s = "\\ :ref:`$r <$n>`\\ ";
+	my $s = $ioctls{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
@@ -244,9 +266,7 @@
 }
 
 foreach my $r (keys %defines) {
-	my $n = $defines{$r};
-
-	my $s = "\\ :ref:`$r <$n>`\\ ";
+	my $s = $defines{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
@@ -256,9 +276,7 @@
 }
 
 foreach my $r (keys %enum_symbols) {
-	my $n = $enum_symbols{$r};
-
-	my $s = "\\ :ref:`$r <$n>`\\ ";
+	my $s = $enum_symbols{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
@@ -268,9 +286,7 @@
 }
 
 foreach my $r (keys %enums) {
-	my $n = $enums{$r};
-
-	my $s = "\\ :ref:`enum $r <$n>`\\ ";
+	my $s = $enums{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
@@ -280,9 +296,7 @@
 }
 
 foreach my $r (keys %structs) {
-	my $n = $structs{$r};
-
-	my $s = "\\ :ref:`struct $r <$n>`\\ ";
+	my $s = $structs{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
@@ -292,18 +306,15 @@
 }
 
 foreach my $r (keys %typedefs) {
-	my $n = $typedefs{$r};
-
-	my $s = "\\ :ref:`$r <$n>`\\ ";
+	my $s = $typedefs{$r};
 
 	$r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
 
 	print "$r -> $s\n" if ($debug);
-
 	$data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
 }
 
-$data =~ s/\\ \n/\n/g;
+$data =~ s/\\ ([\n\s])/\1/g;
 
 #
 # Generate output file
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
index 4644bf0..8e4bb17 100644
--- a/Documentation/spi/00-INDEX
+++ b/Documentation/spi/00-INDEX
@@ -1,7 +1,5 @@
 00-INDEX
 	- this file.
-Makefile
-	- Makefile for the example sourcefiles.
 butterfly
 	- AVR Butterfly SPI driver overview and pin configuration.
 ep93xx_spi
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index e8e2eba..b63a685 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -64,6 +64,20 @@
 If the creation process fail, or the sync_file needs to be released by any
 other reason fput(sync_file->file) should be used.
 
+Receiving Sync Files from Userspace
+-----------------------------------
+
+When userspace needs to send an in-fence to the driver it passes file descriptor
+of the Sync File to the kernel. The kernel can then retrieve the fences
+from it.
+
+Interface:
+	struct fence *sync_file_get_fence(int fd);
+
+
+The returned reference is owned by the caller and must be disposed of
+afterwards using fence_put(). In case of error, a NULL is returned instead.
+
 References:
 [1] struct sync_file in include/linux/sync_file.h
 [2] All interfaces mentioned above defined in include/linux/sync_file.h
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index efc3f3d..ef473dc 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -49,6 +49,9 @@
 	.bind: bind the thermal zone device with a thermal cooling device.
 	.unbind: unbind the thermal zone device with a thermal cooling device.
 	.get_temp: get the current temperature of the thermal zone.
+	.set_trips: set the trip points window. Whenever the current temperature
+		    is updated, the trip points immediately below and above the
+		    current temperature are found.
 	.get_mode: get the current mode (enabled/disabled) of the thermal zone.
 	    - "enabled" means the kernel thermal management is enabled.
 	    - "disabled" will prevent kernel thermal driver action upon trip points
@@ -95,6 +98,10 @@
 			get_temp:	a pointer to a function that reads the
 					sensor temperature. This is mandatory
 					callback provided by sensor driver.
+			set_trips:      a pointer to a function that sets a
+					temperature window. When this window is
+					left the driver must inform the thermal
+					core via thermal_zone_device_update.
 			get_trend: 	a pointer to a function that reads the
 					sensor temperature trend.
 			set_emul_temp:	a pointer to a function that sets
@@ -140,6 +147,18 @@
 	Normally this function will not need to be called and the resource
 	management code will ensure that the resource is freed.
 
+1.1.7 int thermal_zone_get_slope(struct thermal_zone_device *tz)
+
+	This interface is used to read the slope attribute value
+	for the thermal zone device, which might be useful for platform
+	drivers for temperature calculations.
+
+1.1.8 int thermal_zone_get_offset(struct thermal_zone_device *tz)
+
+	This interface is used to read the offset attribute value
+	for the thermal zone device, which might be useful for platform
+	drivers for temperature calculations.
+
 1.2 thermal cooling device interface
 1.2.1 struct thermal_cooling_device *thermal_cooling_device_register(char *name,
 		void *devdata, struct thermal_cooling_device_ops *)
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ee212a2..3be05fe 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -4,12 +4,8 @@
 	- High resolution timers and dynamic ticks design notes
 hpet.txt
 	- High Precision Event Timer Driver for Linux
-hpet_example.c
-	- sample hpet timer test program
 hrtimers.txt
 	- subsystem for high-resolution kernel timers
-Makefile
-	- Build and link hpet_example
 NO_HZ.txt
 	- Summary of the different methods for the scheduler clock-interrupts management.
 timekeeping.txt
diff --git a/Documentation/timers/Makefile b/Documentation/timers/Makefile
deleted file mode 100644
index 6c09ee6..0000000
--- a/Documentation/timers/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-$(CONFIG_X86) := hpet_example
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index a484d2c..895345e 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -25,4 +25,4 @@
 
 The driver provides a userspace API which resembles the API found in the
 RTC driver framework.  An example user space program is provided in
-file:Documentation/timers/hpet_example.c
+file:samples/timers/hpet_example.c
diff --git a/Documentation/vDSO/Makefile b/Documentation/vDSO/Makefile
deleted file mode 100644
index b12e987..0000000
--- a/Documentation/vDSO/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-ifndef CROSS_COMPILE
-# vdso_test won't build for glibc < 2.16, so disable it
-# hostprogs-y := vdso_test
-hostprogs-$(CONFIG_X86) := vdso_standalone_test_x86
-vdso_standalone_test_x86-objs := vdso_standalone_test_x86.o parse_vdso.o
-vdso_test-objs := parse_vdso.o vdso_test.o
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99
-HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector
-HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib
-ifeq ($(CONFIG_X86_32),y)
-HOSTLOADLIBES_vdso_standalone_test_x86 += -lgcc_s
-endif
-endif
diff --git a/Documentation/watchdog/Makefile b/Documentation/watchdog/Makefile
deleted file mode 100644
index 6018f45..0000000
--- a/Documentation/watchdog/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := src
diff --git a/Documentation/watchdog/src/.gitignore b/Documentation/watchdog/src/.gitignore
deleted file mode 100644
index ac90997..0000000
--- a/Documentation/watchdog/src/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-watchdog-simple
-watchdog-test
diff --git a/Documentation/watchdog/src/Makefile b/Documentation/watchdog/src/Makefile
deleted file mode 100644
index 4a892c3..0000000
--- a/Documentation/watchdog/src/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := watchdog-simple watchdog-test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index b3a701f..0e62ba3 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -37,7 +37,7 @@
 the watchdog is pinged within a certain time, this time is called the
 timeout or margin.  The simplest way to ping the watchdog is to write
 some data to the device.  So a very simple watchdog daemon would look
-like this source file:  see Documentation/watchdog/src/watchdog-simple.c
+like this source file:  see samples/watchdog/watchdog-simple.c
 
 A more advanced driver could for example check that a HTTP server is
 still responding before doing the write call to ping the watchdog.
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 7f31125..ea27747 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -48,8 +48,10 @@
 	const struct attribute_group **groups;
 	const struct watchdog_info *info;
 	const struct watchdog_ops *ops;
+	const struct watchdog_governor *gov;
 	unsigned int bootstatus;
 	unsigned int timeout;
+	unsigned int pretimeout;
 	unsigned int min_timeout;
 	unsigned int max_timeout;
 	unsigned int min_hw_heartbeat_ms;
@@ -74,9 +76,11 @@
 * info: a pointer to a watchdog_info structure. This structure gives some
   additional information about the watchdog timer itself. (Like it's unique name)
 * ops: a pointer to the list of watchdog operations that the watchdog supports.
+* gov: a pointer to the assigned watchdog device pretimeout governor or NULL.
 * timeout: the watchdog timer's timeout value (in seconds).
   This is the time after which the system will reboot if user space does
   not send a heartbeat request if WDOG_ACTIVE is set.
+* pretimeout: the watchdog timer's pretimeout value (in seconds).
 * min_timeout: the watchdog timer's minimum timeout value (in seconds).
   If set, the minimum configurable value for 'timeout'.
 * max_timeout: the watchdog timer's maximum timeout value (in seconds),
@@ -121,6 +125,7 @@
 	int (*ping)(struct watchdog_device *);
 	unsigned int (*status)(struct watchdog_device *);
 	int (*set_timeout)(struct watchdog_device *, unsigned int);
+	int (*set_pretimeout)(struct watchdog_device *, unsigned int);
 	unsigned int (*get_timeleft)(struct watchdog_device *);
 	int (*restart)(struct watchdog_device *);
 	void (*ref)(struct watchdog_device *) __deprecated;
@@ -188,6 +193,23 @@
   If set_timeout is not provided but, WDIOF_SETTIMEOUT is set, the watchdog
   infrastructure updates the timeout value of the watchdog_device internally
   to the requested value.
+  If the pretimeout feature is used (WDIOF_PRETIMEOUT), then set_timeout must
+  also take care of checking if pretimeout is still valid and set up the timer
+  accordingly. This can't be done in the core without races, so it is the
+  duty of the driver.
+* set_pretimeout: this routine checks and changes the pretimeout value of
+  the watchdog. It is optional because not all watchdogs support pretimeout
+  notification. The timeout value is not an absolute time, but the number of
+  seconds before the actual timeout would happen. It returns 0 on success,
+  -EINVAL for "parameter out of range" and -EIO for "could not write value to
+  the watchdog". A value of 0 disables pretimeout notification.
+  (Note: the WDIOF_PRETIMEOUT needs to be set in the options field of the
+  watchdog's info structure).
+  If the watchdog driver does not have to perform any action but setting the
+  watchdog_device.pretimeout, this callback can be omitted. That means if
+  set_pretimeout is not provided but WDIOF_PRETIMEOUT is set, the watchdog
+  infrastructure updates the pretimeout value of the watchdog_device internally
+  to the requested value.
 * get_timeleft: this routines returns the time that's left before a reset.
 * restart: this routine restarts the machine. It returns 0 on success or a
   negative errno code for failure.
@@ -268,3 +290,14 @@
 * 128: default restart handler, use if no other handler is expected to be
   available, and/or if restart is sufficient to restart the entire system
 * 255: highest priority, will preempt all other restart handlers
+
+To raise a pretimeout notification, the following function should be used:
+
+void watchdog_notify_pretimeout(struct watchdog_device *wdd)
+
+The function can be called in the interrupt context. If watchdog pretimeout
+governor framework (kbuild CONFIG_WATCHDOG_PRETIMEOUT_GOV symbol) is enabled,
+an action is taken by a preconfigured pretimeout governor preassigned to
+the watchdog device. If watchdog pretimeout governor framework is not
+enabled, watchdog_notify_pretimeout() prints a notification message to
+the kernel log buffer.
diff --git a/Documentation/watchdog/wdt.txt b/Documentation/watchdog/wdt.txt
index 061c2e3..ed2f0b8 100644
--- a/Documentation/watchdog/wdt.txt
+++ b/Documentation/watchdog/wdt.txt
@@ -47,4 +47,4 @@
 Minor numbers are however allocated for it.
 
 
-Example Watchdog Driver:  see Documentation/watchdog/src/watchdog-simple.c
+Example Watchdog Driver:  see samples/watchdog/watchdog-simple.c
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
index c281ded..b643045 100644
--- a/Documentation/x86/protection-keys.txt
+++ b/Documentation/x86/protection-keys.txt
@@ -18,10 +18,68 @@
 permissions are enforced on data access only and have no effect on
 instruction fetches.
 
-=========================== Config Option ===========================
+=========================== Syscalls ===========================
 
-This config option adds approximately 1.5kb of text. and 50 bytes of
-data to the executable.  A workload which does large O_DIRECT reads
-of holes in XFS files was run to exercise get_user_pages_fast().  No
-performance delta was observed with the config option
-enabled or disabled.
+There are 3 system calls which directly interact with pkeys:
+
+	int pkey_alloc(unsigned long flags, unsigned long init_access_rights)
+	int pkey_free(int pkey);
+	int pkey_mprotect(unsigned long start, size_t len,
+			  unsigned long prot, int pkey);
+
+Before a pkey can be used, it must first be allocated with
+pkey_alloc().  An application calls the WRPKRU instruction
+directly in order to change access permissions to memory covered
+with a key.  In this example WRPKRU is wrapped by a C function
+called pkey_set().
+
+	int real_prot = PROT_READ|PROT_WRITE;
+	pkey = pkey_alloc(0, PKEY_DENY_WRITE);
+	ptr = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+	ret = pkey_mprotect(ptr, PAGE_SIZE, real_prot, pkey);
+	... application runs here
+
+Now, if the application needs to update the data at 'ptr', it can
+gain access, do the update, then remove its write access:
+
+	pkey_set(pkey, 0); // clear PKEY_DENY_WRITE
+	*ptr = foo; // assign something
+	pkey_set(pkey, PKEY_DENY_WRITE); // set PKEY_DENY_WRITE again
+
+Now when it frees the memory, it will also free the pkey since it
+is no longer in use:
+
+	munmap(ptr, PAGE_SIZE);
+	pkey_free(pkey);
+
+(Note: pkey_set() is a wrapper for the RDPKRU and WRPKRU instructions.
+ An example implementation can be found in
+ tools/testing/selftests/x86/protection_keys.c)
+
+=========================== Behavior ===========================
+
+The kernel attempts to make protection keys consistent with the
+behavior of a plain mprotect().  For instance if you do this:
+
+	mprotect(ptr, size, PROT_NONE);
+	something(ptr);
+
+you can expect the same effects with protection keys when doing this:
+
+	pkey = pkey_alloc(0, PKEY_DISABLE_WRITE | PKEY_DISABLE_READ);
+	pkey_mprotect(ptr, size, PROT_READ|PROT_WRITE, pkey);
+	something(ptr);
+
+That should be true whether something() is a direct access to 'ptr'
+like:
+
+	*ptr = foo;
+
+or when the kernel does the access on the application's behalf like
+with a read():
+
+	read(fd, ptr, 1);
+
+The kernel will send a SIGSEGV in both cases, but si_code will be set
+to SEGV_PKERR when violating protection keys versus SEGV_ACCERR when
+the plain mprotect() permissions are violated.
diff --git a/MAINTAINERS b/MAINTAINERS
index e685724..c72fa18 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -316,6 +316,14 @@
 S:	Supported
 F:	drivers/acpi/fan.c
 
+ACPI FOR ARM64 (ACPI/arm64)
+M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:	Hanjun Guo <hanjun.guo@linaro.org>
+M:	Sudeep Holla <sudeep.holla@arm.com>
+L:	linux-acpi@vger.kernel.org
+S:	Maintained
+F:	drivers/acpi/arm64
+
 ACPI THERMAL DRIVER
 M:	Zhang Rui <rui.zhang@intel.com>
 L:	linux-acpi@vger.kernel.org
@@ -1702,14 +1710,6 @@
 F:	arch/arm/plat-samsung/s5p-dev-mfc.c
 F:	drivers/media/platform/s5p-mfc/
 
-ARM/SAMSUNG S5P SERIES TV SUBSYSTEM SUPPORT
-M:	Kyungmin Park <kyungmin.park@samsung.com>
-M:	Tomasz Stanislawski <t.stanislaws@samsung.com>
-L:	linux-arm-kernel@lists.infradead.org
-L:	linux-media@vger.kernel.org
-S:	Maintained
-F:	drivers/media/platform/s5p-tv/
-
 ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
 M:	Kyungmin Park <kyungmin.park@samsung.com>
 L:	linux-arm-kernel@lists.infradead.org
@@ -2252,9 +2252,9 @@
 F:	drivers/net/wireless/atmel/atmel*
 
 ATMEL MAXTOUCH DRIVER
-M:	Nick Dyer <nick.dyer@itdev.co.uk>
-T:	git git://github.com/atmel-maxtouch/linux.git
-S:	Supported
+M:	Nick Dyer <nick@shmanahar.org>
+T:	git git://github.com/ndyer/linux.git
+S:	Maintained
 F:	Documentation/devicetree/bindings/input/atmel,maxtouch.txt
 F:	drivers/input/touchscreen/atmel_mxt_ts.c
 F:	include/linux/platform_data/atmel_mxt_ts.h
@@ -2812,7 +2812,7 @@
 W:	https://linuxtv.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
-F:	Documentation/video4linux/bttv/
+F:	Documentation/media/v4l-drivers/bttv*
 F:	drivers/media/pci/bt8xx/bttv*
 
 BUSLOGIC SCSI DRIVER
@@ -2857,7 +2857,7 @@
 L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
-F:	Documentation/video4linux/cafe_ccic
+F:	Documentation/media/v4l-drivers/cafe_ccic*
 F:	drivers/media/platform/marvell-ccic/
 
 CAIF NETWORK LAYER
@@ -2959,7 +2959,7 @@
 W:	http://linuxtv.org
 S:	Supported
 F:	Documentation/cec.txt
-F:	Documentation/DocBook/media/v4l/cec*
+F:	Documentation/media/uapi/cec
 F:	drivers/staging/media/cec/
 F:	drivers/media/cec-edid.c
 F:	drivers/media/rc/keymaps/rc-cec.c
@@ -3447,7 +3447,7 @@
 W:	https://linuxtv.org
 W:	http://www.ivtvdriver.org/index.php/Cx18
 S:	Maintained
-F:	Documentation/video4linux/cx18.txt
+F:	Documentation/media/v4l-drivers/cx18*
 F:	drivers/media/pci/cx18/
 F:	include/uapi/linux/ivtv*
 
@@ -3476,7 +3476,7 @@
 W:	https://linuxtv.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
-F:	Documentation/video4linux/cx88/
+F:	Documentation/media/v4l-drivers/cx88*
 F:	drivers/media/pci/cx88/
 
 CXD2820R MEDIA DRIVER
@@ -3957,7 +3957,7 @@
 X:	Documentation/acpi
 X:	Documentation/power
 X:	Documentation/spi
-X:	Documentation/DocBook/media
+X:	Documentation/media
 T:	git git://git.lwn.net/linux.git docs-next
 
 DOUBLETALK DRIVER
@@ -4130,6 +4130,14 @@
 F:	drivers/gpu/drm/i810/
 F:	include/uapi/drm/i810_drm.h
 
+DRM DRIVERS FOR MEDIATEK
+M:	CK Hu <ck.hu@mediatek.com>
+M:	Philipp Zabel <p.zabel@pengutronix.de>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/mediatek/
+F:	Documentation/devicetree/bindings/display/mediatek/
+
 DRM DRIVER FOR MSM ADRENO GPU
 M:	Rob Clark <robdclark@gmail.com>
 L:	linux-arm-msm@vger.kernel.org
@@ -4664,6 +4672,7 @@
 T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
 F:	drivers/media/usb/em28xx/
+F:	Documentation/media/v4l-drivers/em28xx*
 
 EMBEDDED LINUX
 M:	Paul Gortmaker <paul.gortmaker@windriver.com>
@@ -4774,15 +4783,6 @@
 S:	Maintained
 F:	drivers/iommu/exynos-iommu.c
 
-EXYNOS MIPI DISPLAY DRIVERS
-M:	Inki Dae <inki.dae@samsung.com>
-M:	Donghwa Lee <dh09.lee@samsung.com>
-M:	Kyungmin Park <kyungmin.park@samsung.com>
-L:	linux-fbdev@vger.kernel.org
-S:	Maintained
-F:	drivers/video/fbdev/exynos/exynos_mipi*
-F:	include/video/exynos_mipi*
-
 EZchip NPS platform support
 M:	Noam Camus <noamc@ezchip.com>
 S:	Supported
@@ -4961,12 +4961,9 @@
 F:	drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
-M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 M:	Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:	linux-fbdev@vger.kernel.org
-W:	http://linux-fbdev.sourceforge.net/
 Q:	http://patchwork.kernel.org/project/linux-fbdev/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
 S:	Maintained
 F:	Documentation/fb/
 F:	drivers/video/
@@ -5033,6 +5030,13 @@
 F:	drivers/net/ethernet/freescale/fec.h
 F:	Documentation/devicetree/bindings/net/fsl-fec.txt
 
+FREESCALE QORIQ DPAA FMAN DRIVER
+M:	Madalin Bucur <madalin.bucur@nxp.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/freescale/fman
+F:	Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+
 FREESCALE QUICC ENGINE LIBRARY
 L:	linuxppc-dev@lists.ozlabs.org
 S:	Orphan
@@ -5423,6 +5427,13 @@
 F:	drivers/staging/greybus/arche-apb-ctrl.c
 F:	drivers/staging/greybus/arche_platform.h
 
+GS1662 VIDEO SERIALIZER
+M:	Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>
+L:	linux-media@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+S:	Maintained
+F:	drivers/media/spi/gs1662.c
+
 GSPCA FINEPIX SUBDRIVER
 M:	Frank Zago <frank@zago.net>
 L:	linux-media@vger.kernel.org
@@ -5798,6 +5809,14 @@
 S:	Maintained
 F:	fs/hugetlbfs/
 
+HVA ST MEDIA DRIVER
+M:	Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+L:	linux-media@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+W:	https://linuxtv.org
+S:	Supported
+F:	drivers/media/platform/sti/hva
+
 Hyper-V CORE AND DRIVERS
 M:	"K. Y. Srinivasan" <kys@microsoft.com>
 M:	Haiyang Zhang <haiyangz@microsoft.com>
@@ -6112,6 +6131,12 @@
 S:	Maintained
 F:	drivers/usb/atm/ueagle-atm.c
 
+IMGTEC ASCII LCD DRIVER
+M:	Paul Burton <paul.burton@imgtec.com>
+S:	Maintained
+F:	Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
+F:	drivers/auxdisplay/img-ascii-lcd.c
+
 INA209 HARDWARE MONITOR DRIVER
 M:	Guenter Roeck <linux@roeck-us.net>
 L:	linux-hwmon@vger.kernel.org
@@ -6142,6 +6167,12 @@
 S:	Maintained
 F:	drivers/dma/dma-jz4780.c
 
+INGENIC JZ4780 NAND DRIVER
+M:	Harvey Hunt <harveyhuntnexus@gmail.com>
+L:	linux-mtd@lists.infradead.org
+S:	Maintained
+F:	drivers/mtd/nand/jz4780_*
+
 INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
 M:	Mimi Zohar <zohar@linux.vnet.ibm.com>
 M:	Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
@@ -6417,6 +6448,7 @@
 F:	drivers/misc/mei/*
 F:	drivers/watchdog/mei_wdt.c
 F:	Documentation/misc-devices/mei/*
+F:	samples/mei/*
 
 INTEL MIC DRIVERS (mic)
 M:	Sudeep Dutt <sudeep.dutt@intel.com>
@@ -6603,10 +6635,10 @@
 F:	drivers/firmware/iscsi_ibft*
 
 ISCSI
-M:	Mike Christie <michaelc@cs.wisc.edu>
+M:	Lee Duncan <lduncan@suse.com>
+M:	Chris Leech <cleech@redhat.com>
 L:	open-iscsi@googlegroups.com
-W:	www.open-iscsi.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mnc/linux-2.6-iscsi.git
+W:	www.open-iscsi.com
 S:	Maintained
 F:	drivers/scsi/*iscsi*
 F:	include/scsi/*iscsi*
@@ -6676,7 +6708,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://www.ivtvdriver.org
 S:	Maintained
-F:	Documentation/video4linux/*.ivtv
+F:	Documentation/media/v4l-drivers/ivtv*
 F:	drivers/media/pci/ivtv/
 F:	include/uapi/linux/ivtv*
 
@@ -7184,17 +7216,11 @@
 F:	include/linux/lightnvm.h
 F:	include/uapi/linux/lightnvm.h
 
-LINUX FOR IBM pSERIES (RS/6000)
-M:	Paul Mackerras <paulus@au.ibm.com>
-W:	http://www.ibm.com/linux/ltc/projects/ppc
-S:	Supported
-F:	arch/powerpc/boot/rs6000.h
-
 LINUX FOR POWERPC (32-BIT AND 64-BIT)
 M:	Benjamin Herrenschmidt <benh@kernel.crashing.org>
 M:	Paul Mackerras <paulus@samba.org>
 M:	Michael Ellerman <mpe@ellerman.id.au>
-W:	http://www.penguinppc.org/
+W:	https://github.com/linuxppc/linux/wiki
 L:	linuxppc-dev@lists.ozlabs.org
 Q:	http://patchwork.ozlabs.org/project/linuxppc-dev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
@@ -7209,6 +7235,7 @@
 F:	drivers/pci/hotplug/pnv_php.c
 F:	drivers/pci/hotplug/rpa*
 F:	drivers/scsi/ibmvscsi/
+F:	tools/testing/selftests/powerpc
 N:	opal
 N:	/pmac
 N:	powermac
@@ -7265,9 +7292,8 @@
 F:	arch/powerpc/platforms/85xx/
 
 LINUX FOR POWERPC PA SEMI PWRFICIENT
-M:	Olof Johansson <olof@lixom.net>
 L:	linuxppc-dev@lists.ozlabs.org
-S:	Maintained
+S:	Orphan
 F:	arch/powerpc/platforms/pasemi/
 F:	drivers/*/*pasemi*
 F:	drivers/*/*/*pasemi*
@@ -7686,6 +7712,15 @@
 F:	drivers/media/platform/rcar-fcp.c
 F:	include/media/rcar-fcp.h
 
+MEDIA DRIVERS FOR RENESAS - VIN
+M:	Niklas Söderlund <niklas.soderlund@ragnatech.se>
+L:	linux-media@vger.kernel.org
+L:	linux-renesas-soc@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	Documentation/devicetree/bindings/media/rcar_vin.txt
+F:	drivers/media/platform/rcar-vin/
+
 MEDIA DRIVERS FOR RENESAS - VSP1
 M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:	linux-media@vger.kernel.org
@@ -7763,9 +7798,7 @@
 Q:	http://patchwork.kernel.org/project/linux-media/list/
 T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
-F:	Documentation/dvb/
-F:	Documentation/video4linux/
-F:	Documentation/DocBook/media/
+F:	Documentation/media/
 F:	drivers/media/
 F:	drivers/staging/media/
 F:	include/linux/platform_data/media/
@@ -7803,6 +7836,13 @@
 F:	drivers/scsi/megaraid.*
 F:	drivers/scsi/megaraid/
 
+MELFAS MIP4 TOUCHSCREEN DRIVER
+M:	Sangwon Jee <jeesw@melfas.com>
+W:	http://www.melfas.com
+S:	Supported
+F:	drivers/input/touchscreen/melfas_mip4.c
+F:	Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
+
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:	Tariq Toukan <tariqt@mellanox.com>
 L:	netdev@vger.kernel.org
@@ -7932,6 +7972,14 @@
 F:	drivers/tty/serial/atmel_serial.c
 F:	include/linux/atmel_serial.h
 
+MICROCHIP / ATMEL ISC DRIVER
+M:	Songjun Wu <songjun.wu@microchip.com>
+L:	linux-media@vger.kernel.org
+S:	Supported
+F:	drivers/media/platform/atmel/atmel-isc.c
+F:	drivers/media/platform/atmel/atmel-isc-regs.h
+F:	devicetree/bindings/media/atmel-isc.txt
+
 MICROSOFT SURFACE PRO 3 BUTTON DRIVER
 M:	Chen Yu <yu.c.chen@intel.com>
 L:	platform-driver-x86@vger.kernel.org
@@ -8059,7 +8107,7 @@
 MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
 W:	http://popies.net/meye/
 S:	Orphan
-F:	Documentation/video4linux/meye.txt
+F:	Documentation/media/v4l-drivers/meye*
 F:	drivers/media/pci/meye/
 F:	include/uapi/linux/meye.h
 
@@ -8987,15 +9035,13 @@
 F:	drivers/net/wireless/intersil/p54/
 
 PA SEMI ETHERNET DRIVER
-M:	Olof Johansson <olof@lixom.net>
 L:	netdev@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	drivers/net/ethernet/pasemi/*
 
 PA SEMI SMBUS DRIVER
-M:	Olof Johansson <olof@lixom.net>
 L:	linux-i2c@vger.kernel.org
-S:	Maintained
+S:	Orphan
 F:	drivers/i2c/busses/i2c-pasemi.c
 
 PADATA PARALLEL EXECUTION MECHANISM
@@ -9157,6 +9203,14 @@
 F:	Documentation/devicetree/bindings/pci/versatile.txt
 F:	drivers/pci/host/pci-versatile.c
 
+PCI DRIVER FOR ARMADA 8K
+M:	Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:	linux-pci@vger.kernel.org
+L:	linux-arm-kernel@lists.infradead.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/pci/pci-armada8k.txt
+F:	drivers/pci/host/pcie-armada8k.c
+
 PCI DRIVER FOR APPLIEDMICRO XGENE
 M:	Tanmay Inamdar <tinamdar@apm.com>
 L:	linux-pci@vger.kernel.org
@@ -9203,6 +9257,7 @@
 L:	linux-pci@vger.kernel.org
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
+F:	Documentation/devicetree/bindings/pci/aardvark-pci.txt
 F:	drivers/pci/host/pci-aardvark.c
 
 PCI DRIVER FOR NVIDIA TEGRA
@@ -9335,6 +9390,7 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git
 S:	Maintained
 F:	Documentation/pcmcia/
+F:	tools/pcmcia/
 F:	drivers/pcmcia/
 F:	include/pcmcia/
 
@@ -9718,7 +9774,7 @@
 W:	http://www.isely.net/pvrusb2/
 T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
-F:	Documentation/video4linux/README.pvrusb2
+F:	Documentation/media/v4l-drivers/pvrusb2*
 F:	drivers/media/usb/pvrusb2/
 
 PWC WEBCAM DRIVER
@@ -10385,7 +10441,7 @@
 W:	https://linuxtv.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
-F:	Documentation/video4linux/*.saa7134
+F:	Documentation/media/v4l-drivers/saa7134*
 F:	drivers/media/pci/saa7134/
 
 SAA7146 VIDEO4LINUX-2 DRIVER
@@ -10530,6 +10586,13 @@
 F:	Documentation/devicetree/bindings/serial/
 F:	drivers/tty/serial/
 
+STI CEC DRIVER
+M:	Benjamin Gaignard <benjamin.gaignard@linaro.org>
+L:	kernel@stlinux.com
+S:	Maintained
+F:	drivers/staging/media/st-cec/
+F:	Documentation/devicetree/bindings/media/stih-cec.txt
+
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:	Viresh Kumar <vireshk@kernel.org>
 M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -12094,6 +12157,15 @@
 T:	git git://linuxtv.org/media_tree.git
 S:	Odd fixes
 F:	drivers/media/usb/tm6000/
+F:	Documentation/media/v4l-drivers/tm6000*
+
+TW5864 VIDEO4LINUX DRIVER
+M:	Bluecherry Maintainers <maintainers@bluecherrydvr.com>
+M:	Andrey Utkin <andrey.utkin@corp.bluecherry.net>
+M:	Andrey Utkin <andrey_utkin@fastmail.com>
+L:	linux-media@vger.kernel.org
+S:	Supported
+F:	drivers/media/pci/tw5864/
 
 TW68 VIDEO4LINUX DRIVER
 M:	Hans Verkuil <hverkuil@xs4all.nl>
@@ -12586,7 +12658,7 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://royale.zerezo.com/zr364xx/
 S:	Maintained
-F:	Documentation/video4linux/zr364xx.txt
+F:	Documentation/media/v4l-drivers/zr364xx*
 F:	drivers/media/usb/zr364xx/
 
 ULPI BUS
@@ -13122,6 +13194,7 @@
 
 XEN NETWORK BACKEND DRIVER
 M:	Wei Liu <wei.liu2@citrix.com>
+M:	Paul Durrant <paul.durrant@citrix.com>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:	netdev@vger.kernel.org
 S:	Supported
diff --git a/Makefile b/Makefile
index addb235..512e47a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
-PATCHLEVEL = 8
+PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -621,6 +621,12 @@
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
+
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+KBUILD_CFLAGS	+= $(call cc-option,-ffunction-sections,)
+KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
+endif
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS	+= -Os
@@ -802,6 +808,10 @@
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+LDFLAGS_vmlinux	+= $(call ld-option, --gc-sections,)
+endif
+
 ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
 LDFLAGS_vmlinux	+= $(call ld-option, -X,)
 endif
@@ -926,9 +936,6 @@
 ifdef CONFIG_HEADERS_CHECK
 	$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
 endif
-ifdef CONFIG_BUILD_DOCSRC
-	$(Q)$(MAKE) $(build)=Documentation
-endif
 ifdef CONFIG_GDB_SCRIPTS
 	$(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py
 endif
@@ -941,9 +948,12 @@
 include/generated/autoksyms.h: FORCE
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
 
-# Final link of vmlinux
-      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
-quiet_cmd_link-vmlinux = LINK    $@
+ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
+
+# Final link of vmlinux with optional arch pass after final link
+    cmd_link-vmlinux =                                                 \
+	$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ;       \
+	$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
 vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
 	+$(call if_changed,link-vmlinux)
@@ -1270,6 +1280,7 @@
 
 vmlinuxclean:
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
+	$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
 
 clean: archclean vmlinuxclean
 
diff --git a/arch/Kconfig b/arch/Kconfig
index 180ea33..659bdd0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -383,6 +383,24 @@
 	  gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
 	  by Dmitry Vyukov <dvyukov@google.com>.
 
+config GCC_PLUGIN_LATENT_ENTROPY
+	bool "Generate some entropy during boot and runtime"
+	depends on GCC_PLUGINS
+	help
+	  By saying Y here the kernel will instrument some kernel code to
+	  extract some entropy from both original and artificially created
+	  program state.  This will help especially embedded systems where
+	  there is little 'natural' source of entropy normally.  The cost
+	  is some slowdown of the boot process (about 0.5%) and fork and
+	  irq processing.
+
+	  Note that entropy extracted this way is not cryptographically
+	  secure!
+
+	  This plugin was ported from grsecurity/PaX. More information at:
+	   * https://grsecurity.net/
+	   * https://pax.grsecurity.net/
+
 config HAVE_CC_STACKPROTECTOR
 	bool
 	help
@@ -450,6 +468,27 @@
 
 endchoice
 
+config THIN_ARCHIVES
+	bool
+	help
+	  Select this if the architecture wants to use thin archives
+	  instead of ld -r to create the built-in.o files.
+
+config LD_DEAD_CODE_DATA_ELIMINATION
+	bool
+	help
+	  Select this if the architecture wants to do dead code and
+	  data elimination with the linker by compiling with
+	  -ffunction-sections -fdata-sections and linking with
+	  --gc-sections.
+
+	  This requires that the arch annotates or otherwise protects
+	  its external entry points from being discarded. Linker scripts
+	  must also merge .text.*, .data.*, and .bss.* correctly into
+	  output sections. Care must be taken not to pull in unrelated
+	  sections (e.g., '.text.init'). Typically '.' in section names
+	  is used to distinguish them from label names / C identifiers.
+
 config HAVE_ARCH_WITHIN_STACK_FRAMES
 	bool
 	help
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index ffd9cf5..bf8475c 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,7 @@
 generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += exec.h
+generic-y += export.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 466e42e..94f5875 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -396,11 +396,12 @@
 extern inline long
 copy_from_user(void *to, const void __user *from, long n)
 {
+	long res = n;
 	if (likely(__access_ok((unsigned long)from, n, get_fs())))
-		n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
-	else
-		memset(to, 0, n);
-	return n;
+		res = __copy_from_user_inatomic(to, from, n);
+	if (unlikely(res))
+		memset(to + (n - res), 0, res);
+	return res;
 }
 
 extern void __do_clear_user(void);
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index fec1947..02760f6 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -78,4 +78,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* __ALPHA_MMAN_H__ */
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 3ecac01..8ce13d7 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -8,7 +8,7 @@
 
 obj-y    := entry.o traps.o process.o osf_sys.o irq.o \
 	    irq_alpha.o signal.o setup.o ptrace.o time.o \
-	    alpha_ksyms.o systbls.o err_common.o io.o
+	    systbls.o err_common.o io.o
 
 obj-$(CONFIG_VGA_HOSE)	+= console.o
 obj-$(CONFIG_SMP)	+= smp.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
deleted file mode 100644
index f4c7ab6..0000000
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * linux/arch/alpha/kernel/alpha_ksyms.c
- *
- * Export the alpha-specific functions that are needed for loadable
- * modules.
- */
-
-#include <linux/module.h>
-#include <asm/console.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpu.h>
-#include <asm/machvec.h>
-
-#include <linux/syscalls.h>
-
-/* these are C runtime functions with special calling conventions: */
-extern void __divl (void);
-extern void __reml (void);
-extern void __divq (void);
-extern void __remq (void);
-extern void __divlu (void);
-extern void __remlu (void);
-extern void __divqu (void);
-extern void __remqu (void);
-
-EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(callback_getenv);
-EXPORT_SYMBOL(callback_setenv);
-EXPORT_SYMBOL(callback_save_env);
-
-/* platform dependent support */
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(___memset);
-EXPORT_SYMBOL(__memsetw);
-EXPORT_SYMBOL(__constant_c_memset);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(alpha_read_fp_reg);
-EXPORT_SYMBOL(alpha_read_fp_reg_s);
-EXPORT_SYMBOL(alpha_write_fp_reg);
-EXPORT_SYMBOL(alpha_write_fp_reg_s);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_tcpudp_magic);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#ifdef CONFIG_MATHEMU_MODULE
-extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
-extern long (*alpha_fp_emul) (unsigned long pc);
-EXPORT_SYMBOL(alpha_fp_emul_imprecise);
-EXPORT_SYMBOL(alpha_fp_emul);
-#endif
-
-/*
- * The following are specially called from the uaccess assembly stubs.
- */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-
-/* 
- * SMP-specific symbols.
- */
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* CONFIG_SMP */
-
-/*
- * The following are special because they're not called
- * explicitly (the C compiler or assembler generates them in
- * response to division operations).  Fortunately, their
- * interface isn't gonna change any time soon now, so it's OK
- * to leave it out of version control.
- */
-# undef memcpy
-# undef memset
-EXPORT_SYMBOL(__divl);
-EXPORT_SYMBOL(__divlu);
-EXPORT_SYMBOL(__divq);
-EXPORT_SYMBOL(__divqu);
-EXPORT_SYMBOL(__reml);
-EXPORT_SYMBOL(__remlu);
-EXPORT_SYMBOL(__remq);
-EXPORT_SYMBOL(__remqu);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index d3398f6a..b7d6960 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -144,9 +144,11 @@
    else beforehand.  Fine.  We'll do it ourselves.  */
 #if 0
 #define ALIAS_MV(system) \
-  struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+  struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \
+  EXPORT_SYMBOL(alpha_mv);
 #else
 #define ALIAS_MV(system) \
-  asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+  asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \
+  EXPORT_SYMBOL(alpha_mv);
 #endif
 #endif /* GENERIC */
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index d9ee817..940dfb4 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -157,14 +157,16 @@
 static inline int
 read_int(struct task_struct *task, unsigned long addr, int * data)
 {
-	int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+	int copied = access_process_vm(task, addr, data, sizeof(int),
+			FOLL_FORCE);
 	return (copied == sizeof(int)) ? 0 : -EIO;
 }
 
 static inline int
 write_int(struct task_struct *task, unsigned long addr, int data)
 {
-	int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+	int copied = access_process_vm(task, addr, &data, sizeof(int),
+			FOLL_FORCE | FOLL_WRITE);
 	return (copied == sizeof(int)) ? 0 : -EIO;
 }
 
@@ -281,7 +283,8 @@
 	/* When I and D space are separate, these will need to be fixed.  */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
 	case PTRACE_PEEKDATA:
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+				FOLL_FORCE);
 		ret = -EIO;
 		if (copied != sizeof(tmp))
 			break;
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index b20af76..4811e54 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -115,6 +115,7 @@
 
 #ifdef CONFIG_ALPHA_GENERIC
 struct alpha_machine_vector alpha_mv;
+EXPORT_SYMBOL(alpha_mv);
 #endif
 
 #ifndef alpha_using_srm
diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S
index 8804bec..6093add 100644
--- a/arch/alpha/lib/callback_srm.S
+++ b/arch/alpha/lib/callback_srm.S
@@ -3,6 +3,7 @@
  */
 
 #include <asm/console.h>
+#include <asm/export.h>
 
 .text
 #define HWRPB_CRB_OFFSET 0xc0
@@ -92,6 +93,10 @@
 CALLBACK(save_env, CCB_SAVE_ENV, 1)
 CALLBACK(pswitch, CCB_PSWITCH, 3)
 CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
+
+EXPORT_SYMBOL(callback_getenv)
+EXPORT_SYMBOL(callback_setenv)
+EXPORT_SYMBOL(callback_save_env)
 	
 .data
 __alpha_using_srm:		# For use by bootpheader
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 377f9e3..b57f800 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -48,6 +48,7 @@
 		(__force u64)saddr + (__force u64)daddr +
 		(__force u64)sum + ((len + proto) << 8));
 }
+EXPORT_SYMBOL(csum_tcpudp_magic);
 
 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 			  __u32 len, __u8 proto, __wsum sum)
@@ -144,6 +145,7 @@
 {
 	return (__force __sum16)~do_csum(iph,ihl*4);
 }
+EXPORT_SYMBOL(ip_fast_csum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -178,3 +180,4 @@
 {
 	return (__force __sum16)~from64to16(do_csum(buff,len));
 }
+EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S
index a221ae2..263d739 100644
--- a/arch/alpha/lib/clear_page.S
+++ b/arch/alpha/lib/clear_page.S
@@ -3,7 +3,7 @@
  *
  * Zero an entire page.
  */
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global clear_page
@@ -37,3 +37,4 @@
 	nop
 
 	.end clear_page
+	EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
index 8860316..bf5b931 100644
--- a/arch/alpha/lib/clear_user.S
+++ b/arch/alpha/lib/clear_user.S
@@ -24,6 +24,7 @@
  * Clobbers:
  *	$1,$2,$3,$4,$5,$6
  */
+#include <asm/export.h>
 
 /* Allow an exception for an insn; exit if we get one.  */
 #define EX(x,y...)			\
@@ -111,3 +112,4 @@
 	ret	$31, ($28), 1	# .. e1 :
 
 	.end __do_clear_user
+	EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S
index 9f3b974..2ee0bd0 100644
--- a/arch/alpha/lib/copy_page.S
+++ b/arch/alpha/lib/copy_page.S
@@ -3,7 +3,7 @@
  *
  * Copy an entire page.
  */
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global copy_page
@@ -47,3 +47,4 @@
 	nop
 
 	.end copy_page
+	EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 6f3fab9..509f62b 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -26,6 +26,8 @@
  *	$1,$2,$3,$4,$5,$6,$7
  */
 
+#include <asm/export.h>
+
 /* Allow an exception for an insn; exit if we get one.  */
 #define EXI(x,y...)			\
 	99: x,##y;			\
@@ -124,22 +126,9 @@
 	bis $31,$31,$0
 $41:
 $35:
+$exitin:
 $exitout:
 	ret $31,($28),1
 
-$exitin:
-	/* A stupid byte-by-byte zeroing of the rest of the output
-	   buffer.  This cures security holes by never leaving 
-	   random kernel data around to be copied elsewhere.  */
-
-	mov $0,$1
-$101:
-	EXO ( ldq_u $2,0($6) )
-	subq $1,1,$1
-	mskbl $2,$6,$2
-	EXO ( stq_u $2,0($6) )
-	addq $6,1,$6
-	bgt $1,$101
-	ret $31,($28),1
-
 	.end __copy_user
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S
index 2c2acb9..e74b4544 100644
--- a/arch/alpha/lib/csum_ipv6_magic.S
+++ b/arch/alpha/lib/csum_ipv6_magic.S
@@ -12,6 +12,7 @@
  * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  */
 
+#include <asm/export.h>
 	.globl csum_ipv6_magic
 	.align 4
 	.ent csum_ipv6_magic
@@ -113,3 +114,4 @@
 	ret			# .. e1 :
 
 	.end csum_ipv6_magic
+	EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 5675dca..b4ff3b6 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -374,6 +374,7 @@
 	}
 	return (__force __wsum)checksum;
 }
+EXPORT_SYMBOL(csum_partial_copy_from_user);
 
 __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
@@ -386,3 +387,4 @@
 	set_fs(oldfs);
 	return checksum;
 }
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
index f9f5fe8..4221b40 100644
--- a/arch/alpha/lib/dec_and_lock.c
+++ b/arch/alpha/lib/dec_and_lock.c
@@ -7,6 +7,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
+#include <linux/export.h>
 
   asm (".text					\n\
 	.global _atomic_dec_and_lock		\n\
@@ -39,3 +40,4 @@
 	spin_unlock(lock);
 	return 0;
 }
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S
index 2d1a048..1e33bd1 100644
--- a/arch/alpha/lib/divide.S
+++ b/arch/alpha/lib/divide.S
@@ -45,6 +45,7 @@
  *	$28 - compare status
  */
 
+#include <asm/export.h>
 #define halt .long 0
 
 /*
@@ -151,6 +152,7 @@
 	addq	$30,STACK,$30
 	ret	$31,($23),1
 	.end	ufunction
+EXPORT_SYMBOL(ufunction)
 
 /*
  * Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -193,3 +195,4 @@
 	addq	$30,STACK,$30
 	ret	$31,($23),1
 	.end	sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S
index adf4f7b..abe99e6 100644
--- a/arch/alpha/lib/ev6-clear_page.S
+++ b/arch/alpha/lib/ev6-clear_page.S
@@ -3,7 +3,7 @@
  *
  * Zero an entire page.
  */
-
+#include <asm/export.h>
         .text
         .align 4
         .global clear_page
@@ -52,3 +52,4 @@
 	nop
 
 	.end clear_page
+	EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
index 4f42a16..05bef6b 100644
--- a/arch/alpha/lib/ev6-clear_user.S
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -43,6 +43,7 @@
  *	want to leave a hole (and we also want to avoid repeating lots of work)
  */
 
+#include <asm/export.h>
 /* Allow an exception for an insn; exit if we get one.  */
 #define EX(x,y...)			\
 	99: x,##y;			\
@@ -222,4 +223,4 @@
 	nop			# .. E  .. ..	:
 	ret	$31, ($28), 1	# L0 .. .. ..	: L U L U
 	.end __do_clear_user
-
+	EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S
index b789db1..7793506 100644
--- a/arch/alpha/lib/ev6-copy_page.S
+++ b/arch/alpha/lib/ev6-copy_page.S
@@ -56,7 +56,7 @@
    destination pages are in the dcache, but it is my guess that this is
    less important than the dcache miss case.  */
 
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global copy_page
@@ -201,3 +201,4 @@
 	nop
 
 	.end copy_page
+	EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index db42ffe..be720b5 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -37,6 +37,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
 
+#include <asm/export.h>
 /* Allow an exception for an insn; exit if we get one.  */
 #define EXI(x,y...)			\
 	99: x,##y;			\
@@ -227,33 +228,12 @@
 	bgt $0,$onebyteloop	# U  .. .. ..	: U L U L
 
 $zerolength:
+$exitin:
 $exitout:			# Destination for exception recovery(?)
 	nop			# .. .. .. E
 	nop			# .. .. E  ..
 	nop			# .. E  .. ..
 	ret $31,($28),1		# L0 .. .. ..	: L U L U
 
-$exitin:
-
-	/* A stupid byte-by-byte zeroing of the rest of the output
-	   buffer.  This cures security holes by never leaving 
-	   random kernel data around to be copied elsewhere.  */
-
-	nop
-	nop
-	nop
-	mov	$0,$1
-
-$101:
-	EXO ( stb $31,0($6) )	# L
-	subq $1,1,$1		# E
-	addq $6,1,$6		# E
-	bgt $1,$101		# U
-
-	nop
-	nop
-	nop
-	ret $31,($28),1		# L0
-
 	.end __copy_user
-
+	EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S
index fc0bc39..de62627 100644
--- a/arch/alpha/lib/ev6-csum_ipv6_magic.S
+++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S
@@ -52,6 +52,7 @@
  * may cause additional delay in rare cases (load-load replay traps).
  */
 
+#include <asm/export.h>
 	.globl csum_ipv6_magic
 	.align 4
 	.ent csum_ipv6_magic
@@ -148,3 +149,4 @@
 	ret			# L0 : L U L U
 
 	.end csum_ipv6_magic
+	EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S
index 2a82b9b..d18dc0e 100644
--- a/arch/alpha/lib/ev6-divide.S
+++ b/arch/alpha/lib/ev6-divide.S
@@ -55,6 +55,7 @@
  * Try not to change the actual algorithm if possible for consistency.
  */
 
+#include <asm/export.h>
 #define halt .long 0
 
 /*
@@ -205,6 +206,7 @@
 	addq	$30,STACK,$30		# E :
 	ret	$31,($23),1		# L0 : L U U L
 	.end	ufunction
+EXPORT_SYMBOL(ufunction)
 
 /*
  * Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -257,3 +259,4 @@
 	addq	$30,STACK,$30		# E :
 	ret	$31,($23),1		# L0 : L U U L
 	.end	sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index 1a5f71b..419adc5 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -27,7 +27,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  * Try not to change the actual algorithm if possible for consistency.
  */
-
+#include <asm/export.h>
         .set noreorder
         .set noat
 
@@ -189,3 +189,4 @@
 	ret			# L0 :
 
         .end memchr
+	EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S
index 52b37b0..b19798b 100644
--- a/arch/alpha/lib/ev6-memcpy.S
+++ b/arch/alpha/lib/ev6-memcpy.S
@@ -19,7 +19,7 @@
  * Temp usage notes:
  *	$1,$2,		- scratch
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -242,6 +242,7 @@
 	nop				# E :
 
 	.end memcpy
+	EXPORT_SYMBOL(memcpy)
 
 /* For backwards module compatibility.  */
 __memcpy = memcpy
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 356bb2f..fed21c6 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -26,7 +26,7 @@
  * as fixes will need to be made in multiple places.  The performance gain
  * is worth it.
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 .text
@@ -229,6 +229,7 @@
 	nop
 	ret $31,($26),1		# L0 :
 	.end ___memset
+	EXPORT_SYMBOL(___memset)
 
 	/*
 	 * This is the original body of code, prior to replication and
@@ -406,6 +407,7 @@
 	nop
 	ret $31,($26),1		# L0 :
 	.end __constant_c_memset
+	EXPORT_SYMBOL(__constant_c_memset)
 
 	/*
 	 * This is a replicant of the __constant_c_memset code, rescheduled
@@ -594,6 +596,9 @@
 	ret $31,($26),1		# L0 :
 
 	.end __memsetw
+	EXPORT_SYMBOL(__memsetw)
 
 memset = ___memset
 __memset = ___memset
+	EXPORT_SYMBOL(memset)
+	EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S
index c426fe3..b69f604 100644
--- a/arch/alpha/lib/ev67-strcat.S
+++ b/arch/alpha/lib/ev67-strcat.S
@@ -19,7 +19,7 @@
  * string once.
  */
 
-
+#include <asm/export.h>
 	.text
 
 	.align 4
@@ -52,3 +52,4 @@
 	br	__stxcpy	# L0 :
 
 	.end strcat
+	EXPORT_SYMBOL(strcat)
diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S
index fbb7b4f..ea8f2f3 100644
--- a/arch/alpha/lib/ev67-strchr.S
+++ b/arch/alpha/lib/ev67-strchr.S
@@ -15,7 +15,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  * Try not to change the actual algorithm if possible for consistency.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -86,3 +86,4 @@
 	ret			# L0 :
 
 	.end strchr
+	EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S
index 5039280..736fd41 100644
--- a/arch/alpha/lib/ev67-strlen.S
+++ b/arch/alpha/lib/ev67-strlen.S
@@ -17,7 +17,7 @@
  *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -47,3 +47,4 @@
 	ret	$31, ($26)	# L0 :
 
 	.end	strlen
+	EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S
index 4ae716c..cd35cba 100644
--- a/arch/alpha/lib/ev67-strncat.S
+++ b/arch/alpha/lib/ev67-strncat.S
@@ -20,7 +20,7 @@
  * Try not to change the actual algorithm if possible for consistency.
  */
 
-
+#include <asm/export.h>
 	.text
 
 	.align 4
@@ -92,3 +92,4 @@
 	ret			# L0 :
 
 	.end strncat
+	EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S
index dd0d8c6..747455f 100644
--- a/arch/alpha/lib/ev67-strrchr.S
+++ b/arch/alpha/lib/ev67-strrchr.S
@@ -18,7 +18,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
 
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -107,3 +107,4 @@
 	nop
 
 	.end strrchr
+	EXPORT_SYMBOL(strrchr)
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 05017ba..4aa6dbf 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -4,6 +4,9 @@
  * (C) Copyright 1998 Linus Torvalds
  */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define STT(reg,val)  asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
 #else
@@ -52,6 +55,7 @@
 	}
 	return val;
 }
+EXPORT_SYMBOL(alpha_read_fp_reg);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define LDT(reg,val)  asm volatile ("itoft %0,$f"#reg : : "r"(val));
@@ -97,6 +101,7 @@
 	      case 31: LDT(31, val); break;
 	}
 }
+EXPORT_SYMBOL(alpha_write_fp_reg);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define STS(reg,val)  asm volatile ("ftois $f"#reg",%0" : "=r"(val));
@@ -146,6 +151,7 @@
 	}
 	return val;
 }
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define LDS(reg,val)  asm volatile ("itofs %0,$f"#reg : : "r"(val));
@@ -191,3 +197,4 @@
 	      case 31: LDS(31, val); break;
 	}
 }
+EXPORT_SYMBOL(alpha_write_fp_reg_s);
diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S
index 14427ee..c13d3ec 100644
--- a/arch/alpha/lib/memchr.S
+++ b/arch/alpha/lib/memchr.S
@@ -31,7 +31,7 @@
       - only minimum number of quadwords may be accessed
       - the third argument is an unsigned long
 */
-
+#include <asm/export.h>
         .set noreorder
         .set noat
 
@@ -162,3 +162,4 @@
 	ret			# .. e1 :
 
         .end memchr
+	EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
index 64083fc..57d9291 100644
--- a/arch/alpha/lib/memcpy.c
+++ b/arch/alpha/lib/memcpy.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/export.h>
 
 /*
  * This should be done in one go with ldq_u*2/mask/stq_u. Do it
@@ -158,6 +159,4 @@
 	__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
 	return dest;
 }
-
-/* For backward modules compatibility, define __memcpy.  */
-asm("__memcpy = memcpy; .globl __memcpy");
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S
index eb3b6e0..6872c85 100644
--- a/arch/alpha/lib/memmove.S
+++ b/arch/alpha/lib/memmove.S
@@ -6,7 +6,7 @@
  * This is hand-massaged output from the original memcpy.c.  We defer to
  * memcpy whenever possible; the backwards copy loops are not unrolled.
  */
-        
+#include <asm/export.h>        
 	.set noat
 	.set noreorder
 	.text
@@ -179,3 +179,4 @@
 	nop
 
 	.end memmove
+	EXPORT_SYMBOL(memmove)
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 76ccc6d..89a26f5 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -13,7 +13,7 @@
  * The scheduling comments are according to the EV5 documentation (and done by 
  * hand, so they might well be incorrect, please do tell me about it..)
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 .text
@@ -106,6 +106,8 @@
 end:
 	ret $31,($26),1		/* E1 */
 	.end ___memset
+EXPORT_SYMBOL(___memset)
+EXPORT_SYMBOL(__constant_c_memset)
 
 	.align 5
 	.ent __memsetw
@@ -122,6 +124,9 @@
 	br __constant_c_memset	/* .. E1 */
 
 	.end __memsetw
+EXPORT_SYMBOL(__memsetw)
 
 memset = ___memset
 __memset = ___memset
+	EXPORT_SYMBOL(memset)
+	EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S
index 393f503..249837b 100644
--- a/arch/alpha/lib/strcat.S
+++ b/arch/alpha/lib/strcat.S
@@ -4,6 +4,7 @@
  *
  * Append a null-terminated string from SRC to DST.
  */
+#include <asm/export.h>
 
 	.text
 
@@ -50,3 +51,4 @@
 	br	__stxcpy
 
 	.end strcat
+EXPORT_SYMBOL(strcat);
diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S
index 011a175..7412a17 100644
--- a/arch/alpha/lib/strchr.S
+++ b/arch/alpha/lib/strchr.S
@@ -5,7 +5,7 @@
  * Return the address of a given character within a null-terminated
  * string, or null if it is not found.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -68,3 +68,4 @@
 	ret			# .. e1 :
 
 	.end strchr
+	EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S
index e0728e4..98deae1 100644
--- a/arch/alpha/lib/strcpy.S
+++ b/arch/alpha/lib/strcpy.S
@@ -5,7 +5,7 @@
  * Copy a null-terminated string from SRC to DST.  Return a pointer
  * to the null-terminator in the source.
  */
-
+#include <asm/export.h>
 	.text
 
 	.align 3
@@ -21,3 +21,4 @@
 	br	__stxcpy	# do the copy
 
 	.end strcpy
+	EXPORT_SYMBOL(strcpy)
diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S
index fe63353..79c416f 100644
--- a/arch/alpha/lib/strlen.S
+++ b/arch/alpha/lib/strlen.S
@@ -11,7 +11,7 @@
  *	  do this instead of the 9 instructions that
  *	  binary search needs).
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -55,3 +55,4 @@
 	ret	$31, ($26)
 
 	.end	strlen
+	EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S
index a827816..6c29ea6 100644
--- a/arch/alpha/lib/strncat.S
+++ b/arch/alpha/lib/strncat.S
@@ -9,7 +9,7 @@
  * past count, whereas libc may write to count+1.  This follows the generic
  * implementation in lib/string.c and is, IMHO, more sensible.
  */
-
+#include <asm/export.h>
 	.text
 
 	.align 3
@@ -82,3 +82,4 @@
 	ret
 
 	.end strncat
+	EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S
index a46f7f3..e102cf1 100644
--- a/arch/alpha/lib/strncpy.S
+++ b/arch/alpha/lib/strncpy.S
@@ -10,7 +10,7 @@
  * version has cropped that bit o' nastiness as well as assuming that
  * __stxncpy is in range of a branch.
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 
@@ -79,3 +79,4 @@
 	ret
 
 	.end	strncpy
+	EXPORT_SYMBOL(strncpy)
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S
index 1970dc0..4bc6cb4 100644
--- a/arch/alpha/lib/strrchr.S
+++ b/arch/alpha/lib/strrchr.S
@@ -5,7 +5,7 @@
  * Return the address of the last occurrence of a given character
  * within a null-terminated string, or null if it is not found.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -85,3 +85,4 @@
 	ret			# .. e1 :
 
 	.end strrchr
+	EXPORT_SYMBOL(strrchr)
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 6cb3736..d347bbc 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -107,13 +107,13 @@
 	struct user_regs_struct uregs;
 
 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
-	if (!err)
-		set_current_blocked(&set);
-
 	err |= __copy_from_user(&uregs.scratch,
 				&(sf->uc.uc_mcontext.regs.scratch),
 				sizeof(sf->uc.uc_mcontext.regs.scratch));
+	if (err)
+		return err;
 
+	set_current_blocked(&set);
 	regs->bta	= uregs.scratch.bta;
 	regs->lp_start	= uregs.scratch.lp_start;
 	regs->lp_end	= uregs.scratch.lp_end;
@@ -138,7 +138,7 @@
 	regs->r0	= uregs.scratch.r0;
 	regs->sp	= uregs.scratch.sp;
 
-	return err;
+	return 0;
 }
 
 static inline int is_do_ss_needed(unsigned int magic)
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 55c0e95..6bbb1fe 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -9,6 +9,7 @@
 
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
+#include <dt-bindings/display/tda998x.h>
 
 / {
 	model = "TI AM335x BeagleBone Black";
@@ -64,6 +65,16 @@
 			AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3)	/* xdma_event_intr0 */
 		>;
 	};
+
+	mcasp0_pins: mcasp0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
+			AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+			AM33XX_IOPAD(0x994, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
+			AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
+			AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+		>;
+	};
 };
 
 &lcdc {
@@ -76,16 +87,22 @@
 };
 
 &i2c0 {
-	tda19988 {
+	tda19988: tda19988 {
 		compatible = "nxp,tda998x";
 		reg = <0x70>;
+
 		pinctrl-names = "default", "off";
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
 		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
-		port {
-			hdmi_0: endpoint@0 {
-				remote-endpoint = <&lcdc_0>;
+		#sound-dai-cells = <0>;
+		audio-ports = <	TDA998x_I2S	0x03>;
+
+		ports {
+			port@0 {
+				hdmi_0: endpoint@0 {
+					remote-endpoint = <&lcdc_0>;
+				};
 			};
 		};
 	};
@@ -94,3 +111,49 @@
 &rtc {
 	system-power-controller;
 };
+
+&mcasp0	{
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcasp0_pins>;
+	status = "okay";
+	op-mode = <0>;	/* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	serial-dir = <	/* 0: INACTIVE, 1: TX, 2: RX */
+			0 0 1 0
+		>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
+};
+
+/ {
+	clk_mcasp0_fixed: clk_mcasp0_fixed {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24576000>;
+	};
+
+	clk_mcasp0: clk_mcasp0 {
+		#clock-cells = <0>;
+		compatible = "gpio-gate-clock";
+		clocks = <&clk_mcasp0_fixed>;
+		enable-gpios = <&gpio1 27 0>; /* BeagleBone Black Clk enable on GPIO1_27 */
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "TI BeagleBone Black";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&dailink0_master>;
+		simple-audio-card,frame-master = <&dailink0_master>;
+
+		dailink0_master: simple-audio-card,cpu {
+			sound-dai = <&mcasp0>;
+			clocks = <&clk_mcasp0>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&tda19988>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 5430747..91096a4 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -740,6 +740,18 @@
 				 <&clk_s_c0_flexgen CLK_ETH_PHY>;
 		};
 
+		cec: sti-cec@094a087c {
+			compatible = "st,stih-cec";
+			reg = <0x94a087c 0x64>;
+			clocks = <&clk_sysin>;
+			clock-names = "cec-clk";
+			interrupts = <GIC_SPI 140 IRQ_TYPE_NONE>;
+			interrupt-names = "cec-irq";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_cec0_default>;
+			resets = <&softreset STIH407_LPM_SOFTRESET>;
+		};
+
 		rng10: rng@08a89000 {
 			compatible      = "st,rng";
 			reg		= <0x08a89000 0x1000>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index e52b824..53994f9 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -2045,44 +2045,32 @@
 	thermal-zones {
 		cpu {
 			trips {
-				trip {
+				cpu-shutdown-trip {
 					temperature = <101000>;
 					hysteresis = <0>;
 					type = "critical";
 				};
 			};
-
-			cooling-maps {
-				/* There are currently no cooling maps because there are no cooling devices */
-			};
 		};
 
 		mem {
 			trips {
-				trip {
+				mem-shutdown-trip {
 					temperature = <101000>;
 					hysteresis = <0>;
 					type = "critical";
 				};
 			};
-
-			cooling-maps {
-				/* There are currently no cooling maps because there are no cooling devices */
-			};
 		};
 
 		gpu {
 			trips {
-				trip {
+				gpu-shutdown-trip {
 					temperature = <101000>;
 					hysteresis = <0>;
 					type = "critical";
 				};
 			};
-
-			cooling-maps {
-				/* There are currently no cooling maps because there are no cooling devices */
-			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index ea340f9..187a36c 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -851,7 +851,9 @@
 
 	soctherm: thermal-sensor@700e2000 {
 		compatible = "nvidia,tegra124-soctherm";
-		reg = <0x0 0x700e2000 0x0 0x1000>;
+		reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
+			0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
+		reg-names = "soctherm-reg", "car-reg";
 		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
 			<&tegra_car TEGRA124_CLK_SOC_THERM>;
@@ -859,6 +861,15 @@
 		resets = <&tegra_car 78>;
 		reset-names = "soctherm";
 		#thermal-sensor-cells = <1>;
+
+		throttle-cfgs {
+			throttle_heavy: heavy {
+				nvidia,priority = <100>;
+				nvidia,cpu-throt-percent = <85>;
+
+				#cooling-cells = <2>;
+			};
+		};
 	};
 
 	dfll: clock@70110000 {
@@ -1154,6 +1165,26 @@
 
 			thermal-sensors =
 				<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+
+			trips {
+				cpu-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+				cpu_throttle_trip: throttle-trip {
+					temperature = <100000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
 		};
 
 		mem {
@@ -1162,6 +1193,21 @@
 
 			thermal-sensors =
 				<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+
+			trips {
+				mem-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
 		};
 
 		gpu {
@@ -1170,6 +1216,26 @@
 
 			thermal-sensors =
 				<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+
+			trips {
+				gpu-shutdown-trip {
+					temperature = <101000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+				gpu_throttle_trip: throttle-trip {
+					temperature = <99000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
 		};
 
 		pllx {
@@ -1178,6 +1244,21 @@
 
 			thermal-sensors =
 				<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+
+			trips {
+				pllx-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
 		};
 	};
 
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 4e484f4..c58f684 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -168,8 +168,6 @@
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
 CONFIG_DRM_NXP_PTN3460=y
 CONFIG_DRM_PARADE_PS8622=y
-CONFIG_EXYNOS_VIDEO=y
-CONFIG_EXYNOS_MIPI_DSI=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=y
 CONFIG_BACKLIGHT_PWM=y
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 1568cb5..7546b3c 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -138,7 +138,7 @@
 	.setkey			= ghash_setkey,
 	.descsize		= sizeof(struct ghash_desc_ctx),
 	.base			= {
-		.cra_name	= "ghash",
+		.cra_name	= "__ghash",
 		.cra_driver_name = "__driver-ghash-ce",
 		.cra_priority	= 0,
 		.cra_flags	= CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
@@ -220,6 +220,27 @@
 	}
 }
 
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+	struct ahash_request *cryptd_req = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+	desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
+	desc->flags = req->base.flags;
+
+	return crypto_shash_import(desc, in);
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+	struct ahash_request *cryptd_req = ahash_request_ctx(req);
+	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+	return crypto_shash_export(desc, out);
+}
+
 static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
 			      unsigned int keylen)
 {
@@ -268,7 +289,10 @@
 	.final			= ghash_async_final,
 	.setkey			= ghash_async_setkey,
 	.digest			= ghash_async_digest,
+	.import			= ghash_async_import,
+	.export			= ghash_async_export,
 	.halg.digestsize	= GHASH_DIGEST_SIZE,
+	.halg.statesize		= sizeof(struct ghash_desc_ctx),
 	.halg.base		= {
 		.cra_name	= "ghash",
 		.cra_driver_name = "ghash-ce",
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
index dcd01f3..2468fad 100644
--- a/arch/arm/crypto/sha1-armv7-neon.S
+++ b/arch/arm/crypto/sha1-armv7-neon.S
@@ -12,7 +12,6 @@
 #include <asm/assembler.h>
 
 .syntax unified
-.code   32
 .fpu neon
 
 .text
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 55e0e3e..0745538 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -8,6 +8,7 @@
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
+generic-y += export.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h
index 624e1d4..0074835 100644
--- a/arch/arm/include/asm/trusted_foundations.h
+++ b/arch/arm/include/asm/trusted_foundations.h
@@ -26,7 +26,6 @@
 #ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H
 #define __ASM_ARM_TRUSTED_FOUNDATIONS_H
 
-#include <linux/kconfig.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
 #include <linux/of.h>
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a93c0f9..1f59ea05 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -533,11 +533,12 @@
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (access_ok(VERIFY_READ, from, n))
-		n = __copy_from_user(to, from, n);
-	else /* security hole - plug it */
-		memset(to, 0, n);
-	return n;
+	unsigned long res = n;
+	if (likely(access_ok(VERIFY_READ, from, n)))
+		res = __copy_from_user(to, from, n);
+	if (unlikely(res))
+		memset(to + (n - res), 0, res);
+	return res;
 }
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ad325a8..68c2c09 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -33,7 +33,7 @@
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
 obj-$(CONFIG_ISA_DMA_API)	+= dma.o
 obj-$(CONFIG_FIQ)		+= fiq.o fiqasm.o
-obj-$(CONFIG_MODULES)		+= armksyms.o module.o
+obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_ARM_MODULE_PLTS)	+= module-plts.o
 obj-$(CONFIG_ISA_DMA)		+= dma-isa.o
 obj-$(CONFIG_PCI)		+= bios32.o isa.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
deleted file mode 100644
index 7e45f69..0000000
--- a/arch/arm/kernel/armksyms.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- *  linux/arch/arm/kernel/armksyms.c
- *
- *  Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/cryptohash.h>
-#include <linux/delay.h>
-#include <linux/in6.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/arm-smccc.h>
-
-#include <asm/checksum.h>
-#include <asm/ftrace.h>
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler...  (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __ucmpdi2(void);
-extern void __udivsi3(void);
-extern void __umodsi3(void);
-extern void __do_div64(void);
-extern void __bswapsi2(void);
-extern void __bswapdi2(void);
-
-extern void __aeabi_idiv(void);
-extern void __aeabi_idivmod(void);
-extern void __aeabi_lasr(void);
-extern void __aeabi_llsl(void);
-extern void __aeabi_llsr(void);
-extern void __aeabi_lmul(void);
-extern void __aeabi_uidiv(void);
-extern void __aeabi_uidivmod(void);
-extern void __aeabi_ulcmp(void);
-
-extern void fpundefinstr(void);
-
-void mmioset(void *, unsigned int, size_t);
-void mmiocpy(void *, const void *, size_t);
-
-	/* platform dependent support */
-EXPORT_SYMBOL(arm_delay_ops);
-
-	/* networking */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_ipv6_magic);
-
-	/* io */
-#ifndef __raw_readsb
-EXPORT_SYMBOL(__raw_readsb);
-#endif
-#ifndef __raw_readsw
-EXPORT_SYMBOL(__raw_readsw);
-#endif
-#ifndef __raw_readsl
-EXPORT_SYMBOL(__raw_readsl);
-#endif
-#ifndef __raw_writesb
-EXPORT_SYMBOL(__raw_writesb);
-#endif
-#ifndef __raw_writesw
-EXPORT_SYMBOL(__raw_writesw);
-#endif
-#ifndef __raw_writesl
-EXPORT_SYMBOL(__raw_writesl);
-#endif
-
-	/* string / mem functions */
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(__memzero);
-
-EXPORT_SYMBOL(mmioset);
-EXPORT_SYMBOL(mmiocpy);
-
-#ifdef CONFIG_MMU
-EXPORT_SYMBOL(copy_page);
-
-EXPORT_SYMBOL(arm_copy_from_user);
-EXPORT_SYMBOL(arm_copy_to_user);
-EXPORT_SYMBOL(arm_clear_user);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(__get_user_64t_1);
-EXPORT_SYMBOL(__get_user_64t_2);
-EXPORT_SYMBOL(__get_user_64t_4);
-EXPORT_SYMBOL(__get_user_32t_8);
-#endif
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-#endif
-
-	/* gcc lib functions */
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__do_div64);
-EXPORT_SYMBOL(__bswapsi2);
-EXPORT_SYMBOL(__bswapdi2);
-
-#ifdef CONFIG_AEABI
-EXPORT_SYMBOL(__aeabi_idiv);
-EXPORT_SYMBOL(__aeabi_idivmod);
-EXPORT_SYMBOL(__aeabi_lasr);
-EXPORT_SYMBOL(__aeabi_llsl);
-EXPORT_SYMBOL(__aeabi_llsr);
-EXPORT_SYMBOL(__aeabi_lmul);
-EXPORT_SYMBOL(__aeabi_uidiv);
-EXPORT_SYMBOL(__aeabi_uidivmod);
-EXPORT_SYMBOL(__aeabi_ulcmp);
-#endif
-
-	/* bitops */
-EXPORT_SYMBOL(_set_bit);
-EXPORT_SYMBOL(_test_and_set_bit);
-EXPORT_SYMBOL(_clear_bit);
-EXPORT_SYMBOL(_test_and_clear_bit);
-EXPORT_SYMBOL(_change_bit);
-EXPORT_SYMBOL(_test_and_change_bit);
-EXPORT_SYMBOL(_find_first_zero_bit_le);
-EXPORT_SYMBOL(_find_next_zero_bit_le);
-EXPORT_SYMBOL(_find_first_bit_le);
-EXPORT_SYMBOL(_find_next_bit_le);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(_find_first_zero_bit_be);
-EXPORT_SYMBOL(_find_next_zero_bit_be);
-EXPORT_SYMBOL(_find_first_bit_be);
-EXPORT_SYMBOL(_find_next_bit_be);
-#endif
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
-EXPORT_SYMBOL(__gnu_mcount_nc);
-#endif
-
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-EXPORT_SYMBOL(__pv_phys_pfn_offset);
-EXPORT_SYMBOL(__pv_offset);
-#endif
-
-#ifdef CONFIG_HAVE_ARM_SMCCC
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
-#endif
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index c73c403..b629d3f 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -7,6 +7,7 @@
 #include <asm/assembler.h>
 #include <asm/ftrace.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #include "entry-header.S"
 
@@ -153,6 +154,7 @@
 	__mcount _old
 #endif
 ENDPROC(mcount)
+EXPORT_SYMBOL(mcount)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller_old)
@@ -205,6 +207,7 @@
 #endif
 UNWIND(.fnend)
 ENDPROC(__gnu_mcount_nc)
+EXPORT_SYMBOL(__gnu_mcount_nc)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04286fd..f41cee4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -22,6 +22,7 @@
 #include <asm/memory.h>
 #include <asm/thread_info.h>
 #include <asm/pgtable.h>
+#include <asm/export.h>
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 #include CONFIG_DEBUG_LL_INCLUDE
@@ -727,6 +728,8 @@
 __pv_offset:
 	.quad	0
 	.size	__pv_offset, . -__pv_offset
+EXPORT_SYMBOL(__pv_phys_pfn_offset)
+EXPORT_SYMBOL(__pv_offset)
 #endif
 
 #include "head-common.S"
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 612eb53..91d2d5b 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -318,8 +318,7 @@
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+	return randomize_page(mm->brk, 0x02000000);
 }
 
 #ifdef CONFIG_MMU
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 2e48b67..37669e7 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -16,6 +16,7 @@
 #include <asm/opcodes-sec.h>
 #include <asm/opcodes-virt.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	/*
 	 * Wrap c macros in asm macros to delay expansion until after the
@@ -51,6 +52,7 @@
 ENTRY(arm_smccc_smc)
 	SMCCC SMCCC_SMC
 ENDPROC(arm_smccc_smc)
+EXPORT_SYMBOL(arm_smccc_smc)
 
 /*
  * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -60,3 +62,4 @@
 ENTRY(arm_smccc_hvc)
 	SMCCC SMCCC_HVC
 ENDPROC(arm_smccc_hvc)
+EXPORT_SYMBOL(arm_smccc_hvc)
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index b05e958..a7e7de8 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -28,6 +28,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@
 
 ENDPROC(__ashldi3)
 ENDPROC(__aeabi_llsl)
+EXPORT_SYMBOL(__ashldi3)
+EXPORT_SYMBOL(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 275d7d2..490336e 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -28,6 +28,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@
 
 ENDPROC(__ashrdi3)
 ENDPROC(__aeabi_lasr)
+EXPORT_SYMBOL(__ashrdi3)
+EXPORT_SYMBOL(__aeabi_lasr)
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 7d807cf..df06638 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,6 @@
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #if __LINUX_ARM_ARCH__ >= 6
 	.macro	bitop, name, instr
@@ -25,6 +26,7 @@
 	bx	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 
 	.macro	testop, name, instr, store
@@ -55,6 +57,7 @@
 2:	bx	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 #else
 	.macro	bitop, name, instr
@@ -74,6 +77,7 @@
 	ret	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 
 /**
@@ -102,5 +106,6 @@
 	ret	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 #endif
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index 07cda73..f05f782 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,5 +1,6 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #if __LINUX_ARM_ARCH__ >= 6
 ENTRY(__bswapsi2)
@@ -35,3 +36,5 @@
 	ret lr
 ENDPROC(__bswapdi2)
 #endif
+EXPORT_SYMBOL(__bswapsi2)
+EXPORT_SYMBOL(__bswapdi2)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index e936352..b566154 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -10,6 +10,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 		.text
 
@@ -50,6 +51,9 @@
 UNWIND(.fnend)
 ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_clear_user)
+#endif
 
 		.pushsection .text.fixup,"ax"
 		.align	0
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 1512beb..63e4c1e 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 /*
  * Prototype:
@@ -94,16 +95,14 @@
 #include "copy_template.S"
 
 ENDPROC(arm_copy_from_user)
+EXPORT_SYMBOL(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
 	copy_abort_preamble
-	ldmfd	sp!, {r1, r2}
-	sub	r3, r0, r1
-	rsb	r1, r3, r2
-	str	r1, [sp]
-	bl	__memzero
-	ldr	r0, [sp], #4
+	ldmfd	sp!, {r1, r2, r3}
+	sub	r0, r0, r1
+	rsb	r0, r0, r2
 	copy_abort_end
 	.popsection
 
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ee2f67..d97851d 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
+#include <asm/export.h>
 
 #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
 
@@ -45,3 +46,4 @@
 	PLD(	beq	2b			)
 		ldmfd	sp!, {r4, pc}			@	3
 ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index caf5019..592c179 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 /*
  * Prototype:
@@ -99,6 +100,9 @@
 
 ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_copy_to_user)
+#endif
 
 	.pushsection .text.fixup,"ax"
 	.align 0
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 3ac6ef0..68603b5 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 
@@ -30,4 +31,4 @@
 		adcs	r0, r0, #0
 		ldmfd	sp!, {pc}
 ENDPROC(__csum_ipv6_magic)
-
+EXPORT_SYMBOL(__csum_ipv6_magic)
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 984e0f2..830b20e 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 
@@ -140,3 +141,4 @@
 		bne	4b
 		b	.Lless4
 ENDPROC(csum_partial)
+EXPORT_SYMBOL(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index d03fc71..9c3383f 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -49,5 +49,6 @@
 
 #define FN_ENTRY	ENTRY(csum_partial_copy_nocheck)
 #define FN_EXIT		ENDPROC(csum_partial_copy_nocheck)
+#define FN_EXPORT	EXPORT_SYMBOL(csum_partial_copy_nocheck)
 
 #include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 10b4590..8b94d20 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 /*
  * unsigned int
@@ -331,3 +332,4 @@
 		mov	r5, r4, get_byte_1
 		b	.Lexit
 FN_EXIT
+FN_EXPORT
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f13..5d495ed 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -73,6 +73,7 @@
 
 #define FN_ENTRY	ENTRY(csum_partial_copy_from_user)
 #define FN_EXIT		ENDPROC(csum_partial_copy_from_user)
+#define FN_EXPORT	EXPORT_SYMBOL(csum_partial_copy_from_user)
 
 #include "csumpartialcopygeneric.S"
 
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 2cef118..69aad80 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/export.h>
 #include <linux/timex.h>
 
 /*
@@ -34,6 +35,7 @@
 	.const_udelay	= __loop_const_udelay,
 	.udelay		= __loop_udelay,
 };
+EXPORT_SYMBOL(arm_delay_ops);
 
 static const struct delay_timer *delay_timer;
 static bool delay_calibrated;
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index a9eafe4..0c9e1c1 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -210,3 +211,4 @@
 
 UNWIND(.fnend)
 ENDPROC(__do_div64)
+EXPORT_SYMBOL(__do_div64)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 7848780..26302b8 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -15,6 +15,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
                 .text
 
 /*
@@ -37,6 +38,7 @@
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_zero_bit_le)
+EXPORT_SYMBOL(_find_first_zero_bit_le)
 
 /*
  * Purpose  : Find next 'zero' bit
@@ -57,6 +59,7 @@
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_zero_bit_le)
+EXPORT_SYMBOL(_find_next_zero_bit_le)
 
 /*
  * Purpose  : Find a 'one' bit
@@ -78,6 +81,7 @@
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_bit_le)
+EXPORT_SYMBOL(_find_first_bit_le)
 
 /*
  * Purpose  : Find next 'one' bit
@@ -97,6 +101,7 @@
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_bit_le)
+EXPORT_SYMBOL(_find_next_bit_le)
 
 #ifdef __ARMEB__
 
@@ -116,6 +121,7 @@
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_zero_bit_be)
+EXPORT_SYMBOL(_find_first_zero_bit_be)
 
 ENTRY(_find_next_zero_bit_be)
 		teq	r1, #0
@@ -133,6 +139,7 @@
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_zero_bit_be)
+EXPORT_SYMBOL(_find_next_zero_bit_be)
 
 ENTRY(_find_first_bit_be)
 		teq	r1, #0
@@ -150,6 +157,7 @@
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_bit_be)
+EXPORT_SYMBOL(_find_first_bit_be)
 
 ENTRY(_find_next_bit_be)
 		teq	r1, #0
@@ -166,6 +174,7 @@
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_bit_be)
+EXPORT_SYMBOL(_find_next_bit_be)
 
 #endif
 
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 8ecfd15..9d09a38 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -31,6 +31,7 @@
 #include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
+#include <asm/export.h>
 
 ENTRY(__get_user_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -38,6 +39,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
 
 ENTRY(__get_user_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +60,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
 
 ENTRY(__get_user_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +68,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
 
 ENTRY(__get_user_8)
 	check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -78,6 +82,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +96,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_32t_8)
+EXPORT_SYMBOL(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +104,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_1)
+EXPORT_SYMBOL(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +121,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_2)
+EXPORT_SYMBOL(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +129,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_4)
+EXPORT_SYMBOL(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index c31b2f3..3dff7a3 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Linsb_align:	rsb	ip, ip, #4
 		cmp	ip, r2
@@ -121,3 +122,4 @@
 
 		ldmfd	sp!, {r4 - r6, pc}
 ENDPROC(__raw_readsb)
+EXPORT_SYMBOL(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 2ed86fa..bfd3968 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 ENTRY(__raw_readsl)
 		teq	r2, #0		@ do we have to check for the zero len?
@@ -77,3 +78,4 @@
 		strb	r3, [r1, #0]
 		ret	lr
 ENDPROC(__raw_readsl)
+EXPORT_SYMBOL(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 413da99..b3af3db 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Linsw_bad_alignment:
 		adr	r0, .Linsw_bad_align_msg
@@ -103,4 +104,4 @@
 
 		ldmfd	sp!, {r4, r5, r6, pc}
 
-
+EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index d9a45e9..3c7a7a4 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	pack, rd, hw1, hw2
 #ifndef __ARMEB__
@@ -129,3 +130,4 @@
 		strneb	ip, [r1]
 		ldmfd	sp!, {r4, pc}
 ENDPROC(__raw_readsw)
+EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index a46bbc9..fa36335 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	outword, rd
 #ifndef __ARMEB__
@@ -92,3 +93,4 @@
 
 		ldmfd	sp!, {r4, r5, pc}
 ENDPROC(__raw_writesb)
+EXPORT_SYMBOL(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 4ea2435..98ed6ae 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 ENTRY(__raw_writesl)
 		teq	r2, #0		@ do we have to check for the zero len?
@@ -65,3 +66,4 @@
 		bne	6b
 		ret	lr
 ENDPROC(__raw_writesl)
+EXPORT_SYMBOL(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 121789e..577184c 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Loutsw_bad_alignment:
 		adr	r0, .Loutsw_bad_align_msg
@@ -124,3 +125,4 @@
 		strne	ip, [r0]
 
 		ldmfd	sp!, {r4, r5, r6, pc}
+EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index 269f90c..e335f48 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	outword, rd
 #ifndef __ARMEB__
@@ -98,3 +99,4 @@
 		strneh	ip, [r0]
 		ret	lr
 ENDPROC(__raw_writesw)
+EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 9397b2e..f541bc0 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -36,6 +36,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 .macro ARM_DIV_BODY dividend, divisor, result, curbit
 
@@ -238,6 +239,8 @@
 UNWIND(.fnend)
 ENDPROC(__udivsi3)
 ENDPROC(__aeabi_uidiv)
+EXPORT_SYMBOL(__udivsi3)
+EXPORT_SYMBOL(__aeabi_uidiv)
 
 ENTRY(__umodsi3)
 UNWIND(.fnstart)
@@ -256,6 +259,7 @@
 
 UNWIND(.fnend)
 ENDPROC(__umodsi3)
+EXPORT_SYMBOL(__umodsi3)
 
 #ifdef CONFIG_ARM_PATCH_IDIV
 	.align 3
@@ -303,6 +307,8 @@
 UNWIND(.fnend)
 ENDPROC(__divsi3)
 ENDPROC(__aeabi_idiv)
+EXPORT_SYMBOL(__divsi3)
+EXPORT_SYMBOL(__aeabi_idiv)
 
 ENTRY(__modsi3)
 UNWIND(.fnstart)
@@ -327,6 +333,7 @@
 
 UNWIND(.fnend)
 ENDPROC(__modsi3)
+EXPORT_SYMBOL(__modsi3)
 
 #ifdef CONFIG_AEABI
 
@@ -343,6 +350,7 @@
 
 UNWIND(.fnend)
 ENDPROC(__aeabi_uidivmod)
+EXPORT_SYMBOL(__aeabi_uidivmod)
 
 ENTRY(__aeabi_idivmod)
 UNWIND(.fnstart)
@@ -356,6 +364,7 @@
 
 UNWIND(.fnend)
 ENDPROC(__aeabi_idivmod)
+EXPORT_SYMBOL(__aeabi_idivmod)
 
 #endif
 
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index 922dcd8..e408339 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -28,6 +28,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@
 
 ENDPROC(__lshrdi3)
 ENDPROC(__aeabi_llsr)
+EXPORT_SYMBOL(__lshrdi3)
+EXPORT_SYMBOL(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 74a5bed..44182bf 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -24,3 +25,4 @@
 2:	movne	r0, #0
 	ret	lr
 ENDPROC(memchr)
+EXPORT_SYMBOL(memchr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd..1be5b6d 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #define LDR1W_SHIFT	0
 #define STR1W_SHIFT	0
@@ -68,3 +69,5 @@
 
 ENDPROC(memcpy)
 ENDPROC(mmiocpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47..71dcc54 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 		.text
 
@@ -225,3 +226,4 @@
 18:		backward_copy_shift	push=24	pull=8
 
 ENDPROC(memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 3c65e3b..7b72044 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -135,3 +136,5 @@
 UNWIND( .fnend   )
 ENDPROC(memset)
 ENDPROC(mmioset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(mmioset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 0eded95..6dec26e 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -10,6 +10,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -135,3 +136,4 @@
 	ret	lr			@ 1
 UNWIND(	.fnend				)
 ENDPROC(__memzero)
+EXPORT_SYMBOL(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index 2043059..b8f1238 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -46,3 +47,5 @@
 
 ENDPROC(__muldi3)
 ENDPROC(__aeabi_lmul)
+EXPORT_SYMBOL(__muldi3)
+EXPORT_SYMBOL(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 38d660d..11de126 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -31,6 +31,7 @@
 #include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
+#include <asm/export.h>
 
 ENTRY(__put_user_1)
 	check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -38,6 +39,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
 
 ENTRY(__put_user_2)
 	check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -62,6 +64,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
 
 ENTRY(__put_user_4)
 	check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -69,6 +72,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
 
 ENTRY(__put_user_8)
 	check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -82,6 +86,7 @@
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
 
 __put_user_bad:
 	mov	r0, #-EFAULT
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 013d64c..7301f6e6 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 		.align	5
@@ -25,3 +26,4 @@
 		subeq	r0, r0, #1
 		ret	lr
 ENDPROC(strchr)
+EXPORT_SYMBOL(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index 3cec1c7..aaf9fd9 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 		.align	5
@@ -24,3 +25,4 @@
 		mov	r0, r3
 		ret	lr
 ENDPROC(strrchr)
+EXPORT_SYMBOL(strrchr)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 6bd1089..1626e3a 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -19,6 +19,7 @@
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/hugetlb.h>
+#include <linux/export.h>
 #include <asm/current.h>
 #include <asm/page.h>
 
@@ -156,6 +157,7 @@
 	}
 	return n;
 }
+EXPORT_SYMBOL(arm_copy_to_user);
 	
 static unsigned long noinline
 __clear_user_memset(void __user *addr, unsigned long n)
@@ -213,6 +215,7 @@
 	}
 	return n;
 }
+EXPORT_SYMBOL(arm_clear_user);
 
 #if 0
 
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index ad4a630..127a91a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -35,6 +36,7 @@
 	ret	lr
 
 ENDPROC(__ucmpdi2)
+EXPORT_SYMBOL(__ucmpdi2)
 
 #ifdef CONFIG_AEABI
 
@@ -48,6 +50,7 @@
 	ret	lr
 
 ENDPROC(__aeabi_ulcmp)
+EXPORT_SYMBOL(__aeabi_ulcmp)
 
 #endif
 
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index cab1289..737450f 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -32,7 +32,6 @@
 
 ifdef CONFIG_SND_IMX_SOC
 obj-y += ssi-fiq.o
-obj-y += ssi-fiq-ksym.o
 endif
 
 # i.MX21 based machines
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c
deleted file mode 100644
index 792090f..0000000
--- a/arch/arm/mach-imx/ssi-fiq-ksym.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Exported ksyms for the SSI FIQ handler
- *
- * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-
-#include <linux/platform_data/asoc-imx-ssi.h>
-
-EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_start);
-EXPORT_SYMBOL(imx_ssi_fiq_end);
-EXPORT_SYMBOL(imx_ssi_fiq_base);
-
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S
index a8b93c5..fd7917f 100644
--- a/arch/arm/mach-imx/ssi-fiq.S
+++ b/arch/arm/mach-imx/ssi-fiq.S
@@ -8,6 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 /*
  * r8  = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -144,4 +145,8 @@
 		.word 0x0
 .L_imx_ssi_fiq_end:
 imx_ssi_fiq_end:
-
+EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_start)
+EXPORT_SYMBOL(imx_ssi_fiq_end)
+EXPORT_SYMBOL(imx_ssi_fiq_base)
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 05ec5e0..67532f2 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -23,7 +23,6 @@
 #endif
 
 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
-unsigned long search_exception_table(unsigned long addr);
 void early_abt_enable(void);
 
 #endif	/* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 58635f7..220ac70 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -502,10 +502,11 @@
 		};
 
 		sata: sata@3200000 {
-			compatible = "fsl,ls1043a-ahci", "fsl,ls1021a-ahci";
+			compatible = "fsl,ls1043a-ahci";
 			reg = <0x0 0x3200000 0x0 0x10000>;
 			interrupts = <0 69 0x4>;
 			clocks = <&clockgen 4 0>;
+			dma-coherent;
 		};
 
 		msi1: msi-controller1@1571000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index d105976..337da90 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -683,6 +683,7 @@
 			reg = <0x0 0x3200000 0x0 0x10000>;
 			interrupts = <0 133 0x4>; /* Level high type */
 			clocks = <&clockgen 4 3>;
+			dma-coherent;
 		};
 
 		sata1: sata@3210000 {
@@ -691,6 +692,7 @@
 			reg = <0x0 0x3210000 0x0 0x10000>;
 			interrupts = <0 136 0x4>; /* Level high type */
 			clocks = <&clockgen 4 3>;
+			dma-coherent;
 		};
 
 		usb0: usb3@3100000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 2013f89..3f3a46a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -4,6 +4,7 @@
 #include <dt-bindings/pinctrl/pinctrl-tegra.h>
 #include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/tegra124-soctherm.h>
 
 / {
 	compatible = "nvidia,tegra132", "nvidia,tegra124";
@@ -727,8 +728,10 @@
 	};
 
 	soctherm: thermal-sensor@700e2000 {
-		compatible = "nvidia,tegra124-soctherm";
-		reg = <0x0 0x700e2000 0x0 0x1000>;
+		compatible = "nvidia,tegra132-soctherm";
+		reg = <0x0 0x700e2000 0x0 0x600 /* 0: SOC_THERM reg_base */
+			0x0 0x70040000 0x0 0x200>; /* 2: CCROC reg_base */
+		reg-names = "soctherm-reg", "ccroc-reg";
 		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
 			<&tegra_car TEGRA124_CLK_SOC_THERM>;
@@ -736,6 +739,118 @@
 		resets = <&tegra_car 78>;
 		reset-names = "soctherm";
 		#thermal-sensor-cells = <1>;
+
+		throttle-cfgs {
+			throttle_heavy: heavy {
+				nvidia,priority = <100>;
+				nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
+
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
+	thermal-zones {
+		cpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+
+			trips {
+				cpu_shutdown_trip {
+					temperature = <105000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+
+				cpu_throttle_trip: throttle-trip {
+					temperature = <102000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
+		};
+		mem {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+
+			trips {
+				mem_shutdown_trip {
+					temperature = <101000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
+		};
+		gpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+
+			trips {
+				gpu_shutdown_trip {
+					temperature = <101000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+
+				gpu_throttle_trip: throttle-trip {
+					temperature = <99000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
+		};
+		pllx {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+
+			trips {
+				pllx_shutdown_trip {
+					temperature = <105000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
+		};
 	};
 
 	ahub@70300000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index f673979..46045fe 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -3,6 +3,7 @@
 #include <dt-bindings/memory/tegra210-mc.h>
 #include <dt-bindings/pinctrl/pinctrl-tegra.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/tegra124-soctherm.h>
 
 / {
 	compatible = "nvidia,tegra210";
@@ -1159,4 +1160,130 @@
 				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
 		interrupt-parent = <&gic>;
 	};
+
+	soctherm: thermal-sensor@700e2000 {
+		compatible = "nvidia,tegra210-soctherm";
+		reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
+			0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
+		reg-names = "soctherm-reg", "car-reg";
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&tegra_car TEGRA210_CLK_TSENSOR>,
+			<&tegra_car TEGRA210_CLK_SOC_THERM>;
+		clock-names = "tsensor", "soctherm";
+		resets = <&tegra_car 78>;
+		reset-names = "soctherm";
+		#thermal-sensor-cells = <1>;
+
+		throttle-cfgs {
+			throttle_heavy: heavy {
+				nvidia,priority = <100>;
+				nvidia,cpu-throt-percent = <85>;
+
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
+	thermal-zones {
+		cpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+
+			trips {
+				cpu-shutdown-trip {
+					temperature = <102500>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+
+				cpu_throttle_trip: throttle-trip {
+					temperature = <98500>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
+		};
+		mem {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+
+			trips {
+				mem-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
+		};
+		gpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+
+			trips {
+				gpu-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+
+				gpu_throttle_trip: throttle-trip {
+					temperature = <100000>;
+					hysteresis = <1000>;
+					type = "hot";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_throttle_trip>;
+					cooling-device = <&throttle_heavy 1 1>;
+				};
+			};
+		};
+		pllx {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+
+			trips {
+				pllx-shutdown-trip {
+					temperature = <103000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/*
+				 * There are currently no cooling maps,
+				 * because there are no cooling devices.
+				 */
+			};
+		};
+	};
 };
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 55101bd..39feb85 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -7,7 +7,6 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/init.h>
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c47257c..bcaf6fb 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -278,14 +278,16 @@
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+	unsigned long res = n;
 	kasan_check_write(to, n);
 
 	if (access_ok(VERIFY_READ, from, n)) {
 		check_object_size(to, n, false);
-		n = __arch_copy_from_user(to, from, n);
-	} else /* security hole - plug it */
-		memset(to, 0, n);
-	return n;
+		res = __arch_copy_from_user(to, from, n);
+	}
+	if (unlikely(res))
+		memset(to + (n - res), 0, res);
+	return res;
 }
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a4f5f76..27b2f138 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -372,12 +372,8 @@
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	unsigned long range_end = mm->brk;
-
 	if (is_compat_task())
-		range_end += 0x02000000;
+		return randomize_page(mm->brk, 0x02000000);
 	else
-		range_end += 0x40000000;
-
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+		return randomize_page(mm->brk, 0x40000000);
 }
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b8799e7..1bec41b 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -135,7 +135,7 @@
 
 #ifdef CONFIG_KASAN
 	mov	x0, sp
-	bl	kasan_unpoison_remaining_stack
+	bl	kasan_unpoison_task_stack_below
 #endif
 
 	ldp	x19, x20, [x29, #16]
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 0b90497..4fd67ea 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -79,11 +79,6 @@
 
 	.section .fixup,"ax"
 	.align	2
-9998:
-	sub	x0, end, dst
-9999:
-	strb	wzr, [dst], #1			// zero remaining buffer space
-	cmp	dst, end
-	b.lo	9999b
+9998:	sub	x0, end, dst			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index bdacead..3f74d0d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -828,7 +828,7 @@
 	 * then the IOMMU core will have already configured a group for this
 	 * device, and allocated the default domain for that group.
 	 */
-	if (!domain || iommu_dma_init_domain(domain, dma_base, size)) {
+	if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
 		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
 			dev_name(dev));
 		return false;
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 0a2a700..0eff88a 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -163,18 +163,29 @@
 		: "a" (__ptr(ptr)));		\
 })
 
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
 static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	memcpy(to, (const void __force *)from, n);
+	return 0;
+}
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	memcpy((void __force *)to, from, n);
+	SSYNC();
+	return 0;
+}
+
+static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (likely(access_ok(VERIFY_READ, from, n))) {
-		memcpy(to, (const void __force *)from, n);
-		return 0;
-	}
+	if (likely(access_ok(VERIFY_READ, from, n)))
+		return __copy_from_user(to, from, n);
 	memset(to, 0, n);
 	return n;
 }
@@ -182,12 +193,9 @@
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	if (access_ok(VERIFY_WRITE, to, n))
-		memcpy((void __force *)to, from, n);
-	else
-		return n;
-	SSYNC();
-	return 0;
+	if (likely(access_ok(VERIFY_WRITE, to, n)))
+		return __copy_to_user(to, from, n);
+	return n;
 }
 
 /*
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8b8fe67..8d79286 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -271,7 +271,7 @@
 			case BFIN_MEM_ACCESS_CORE:
 			case BFIN_MEM_ACCESS_CORE_ONLY:
 				copied = access_process_vm(child, addr, &tmp,
-				                           to_copy, 0);
+							   to_copy, FOLL_FORCE);
 				if (copied)
 					break;
 
@@ -324,7 +324,8 @@
 			case BFIN_MEM_ACCESS_CORE:
 			case BFIN_MEM_ACCESS_CORE_ONLY:
 				copied = access_process_vm(child, addr, &data,
-				                           to_copy, 1);
+				                           to_copy,
+							   FOLL_FORCE | FOLL_WRITE);
 				break;
 			case BFIN_MEM_ACCESS_DMA:
 				if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index deba266..71b758d 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -30,7 +30,7 @@
 	default y
 
 config NO_IOPORT_MAP
-	def_bool y
+	def_bool y if !PCI
 
 config FORCE_MAX_ZONEORDER
 	int
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index bdc25aa..28292da 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -177,15 +177,6 @@
 	},
 };
 
-#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
-/* Main flash device */
-static struct mtd_partition main_partition = {
-	.name = "main",
-	.size = 0,
-	.offset = 0
-};
-#endif
-
 /*
  * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash
  * chips in that order (because the amd_flash-driver is faster).
@@ -369,16 +360,6 @@
 		pidx++;
 	}
 
-#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
-	if (mymtd) {
-		main_partition.size = mymtd->size;
-		err = mtd_device_register(mymtd, &main_partition, 1);
-		if (err)
-			panic("axisflashmap: Could not initialize "
-			      "partition for whole main mtd device!\n");
-	}
-#endif
-
         if (mymtd) {
 		if (use_default_ptable) {
 			printk(KERN_INFO " Using default partition table.\n");
diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c
index f679a19..c903a9e 100644
--- a/arch/cris/arch-v10/drivers/eeprom.c
+++ b/arch/cris/arch-v10/drivers/eeprom.c
@@ -395,7 +395,7 @@
 static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig)
 {
 /*
- *  orig 0: position from begning of eeprom
+ *  orig 0: position from beginning of eeprom
  *  orig 1: relative from current position
  *  orig 2: position from last eeprom address
  */
diff --git a/arch/cris/arch-v10/lib/dram_init.S b/arch/cris/arch-v10/lib/dram_init.S
index e541d3d..9331012 100644
--- a/arch/cris/arch-v10/lib/dram_init.S
+++ b/arch/cris/arch-v10/lib/dram_init.S
@@ -3,7 +3,7 @@
  * This file is intended to be included from other assembler files
  *
  * Note: This file may not modify r9 because r9 is used to carry
- *       information from the decompresser to the kernel
+ *       information from the decompressor to the kernel
  *
  * Copyright (C) 2000-2012 Axis Communications AB
  *
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 2081d8b..099e170 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1210,7 +1210,7 @@
 		assert(active_count >= eop_needed_count);
 		assert((eop_needed_count == 0) || (eop_needed_count == 1));
 		if (eop_needed_count) {
-			/* This means that the bulk operation (cipeher/m2m) is terminated. */
+			/* This means that the bulk operation (cipher/m2m) is terminated. */
 			if (active_count > 1) {
 				/* Use zero length EOP descriptor. */
 				struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
@@ -2722,7 +2722,6 @@
 	err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
 			     noinpages,
 			     0,  /* read access only for in data */
-			     0, /* no force */
 			     inpages,
 			     NULL);
 
@@ -2736,8 +2735,7 @@
 	if (oper.do_cipher){
 		err = get_user_pages((unsigned long int)oper.cipher_outdata,
 				     nooutpages,
-				     1, /* write access for out data */
-				     0, /* no force */
+				     FOLL_WRITE, /* write access for out data */
 				     outpages,
 				     NULL);
 		up_read(&current->mm->mmap_sem);
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 64a5fb9..212266a 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -1,6 +1,6 @@
 #include <linux/pci.h>
 #include <linux/kernel.h>
-#include <arch/hwregs/intr_vect.h>
+#include <hwregs/intr_vect.h>
 
 void pcibios_fixup_bus(struct pci_bus *b)
 {
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index e989cee..ef515af 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1627,6 +1627,12 @@
 
 	/* Create a sysfs class for syncser */
 	syncser_class = class_create(THIS_MODULE, "syncser_class");
+	if (IS_ERR(syncser_class)) {
+		pr_err("Failed to create a sysfs class for syncser\n");
+		unregister_chrdev_region(syncser_first, minor_count);
+		cdev_del(syncser_cdev);
+		return -1;
+	}
 
 	/* Initialize Ports */
 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c
index 5c84dbb..eb4e0ff 100644
--- a/arch/cris/arch-v32/kernel/fasttimer.c
+++ b/arch/cris/arch-v32/kernel/fasttimer.c
@@ -318,11 +318,13 @@
 
 static void timer_trig_handler(struct work_struct *work)
 {
-  reg_timer_rw_ack_intr ack_intr = { 0 };
-  reg_timer_rw_intr_mask intr_mask;
-  reg_timer_rw_trig_cfg trig_cfg = { 0 };
-  struct fast_timer *t;
-  unsigned long flags;
+	reg_timer_rw_ack_intr ack_intr = { 0 };
+	reg_timer_rw_intr_mask intr_mask;
+	reg_timer_rw_trig_cfg trig_cfg = { 0 };
+	struct fast_timer *t;
+	fast_timer_function_type *f;
+	unsigned long d;
+	unsigned long flags;
 
 	/* We keep interrupts disabled not only when we modify the
 	 * fast timer list, but any time we hold a reference to a
@@ -350,9 +352,6 @@
   fast_timer_running = 0;
   fast_timer_ints++;
 
-	fast_timer_function_type *f;
-	unsigned long d;
-
   t = fast_timer_list;
 	while (t) {
 		struct fasttime_t tv;
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 6de8db6..b07da4b 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -471,9 +471,8 @@
 	irq_set_default_host(domain);
 	of_node_put(np);
 
-	for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT; i++, j++) {
+	for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT && j < MACH_IRQS; i++, j++)
 		set_exception_vector(i, interrupt[j]);
-	}
 
 	/* Mark Timer and IPI IRQs as CPU local */
 	irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f085229..f0df654 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@
 				/* The trampoline page is globally mapped, no page table to traverse.*/
 				tmp = *(unsigned long*)addr;
 			} else {
-				copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+				copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
 
 				if (copied != sizeof(tmp))
 					break;
@@ -279,7 +279,7 @@
   int opsize = 0;
 
   /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
-  copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
+  copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
   if (copied != sizeof(opcode))
     return 0;
 
diff --git a/arch/cris/arch-v32/mach-a3/dma.c b/arch/cris/arch-v32/mach-a3/dma.c
index 47c64bf..11f417f 100644
--- a/arch/cris/arch-v32/mach-a3/dma.c
+++ b/arch/cris/arch-v32/mach-a3/dma.c
@@ -41,7 +41,6 @@
 
 		if (options & DMA_PANIC_ON_ERROR)
 			panic("request_dma error!");
-		spin_unlock_irqrestore(&dma_lock, flags);
 		return -EBUSY;
 	}
 	clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
diff --git a/arch/cris/arch-v32/mach-a3/dram_init.S b/arch/cris/arch-v32/mach-a3/dram_init.S
index 5c4f24d..7dc26bd 100644
--- a/arch/cris/arch-v32/mach-a3/dram_init.S
+++ b/arch/cris/arch-v32/mach-a3/dram_init.S
@@ -3,7 +3,7 @@
  * This file is intended to be included from other assembler files
  *
  * Note: This file may not modify r8 or r9 because they are used to
- * carry information from the decompresser to the kernel
+ * carry information from the decompressor to the kernel
  *
  * Copyright (C) 2005-2007 Axis Communications AB
  *
diff --git a/arch/cris/arch-v32/mach-fs/dma.c b/arch/cris/arch-v32/mach-fs/dma.c
index fc6416a..7c93679 100644
--- a/arch/cris/arch-v32/mach-fs/dma.c
+++ b/arch/cris/arch-v32/mach-fs/dma.c
@@ -43,7 +43,6 @@
 		}
 		if (options & DMA_PANIC_ON_ERROR)
 			panic("request_dma error!");
-		spin_unlock_irqrestore(&dma_lock, flags);
 		return -EBUSY;
 	}
 	clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
diff --git a/arch/cris/arch-v32/mach-fs/dram_init.S b/arch/cris/arch-v32/mach-fs/dram_init.S
index d3ce2eb..2ed51e2 100644
--- a/arch/cris/arch-v32/mach-fs/dram_init.S
+++ b/arch/cris/arch-v32/mach-fs/dram_init.S
@@ -3,7 +3,7 @@
  * This file is intended to be included from other assembler files
  *
  * Note: This file may not modify r8 or r9 because they are used to
- * carry information from the decompresser to the kernel
+ * carry information from the decompressor to the kernel
  *
  * Copyright (C) 2000-2007 Axis Communications AB
  *
diff --git a/arch/cris/arch-v32/mm/intmem.c b/arch/cris/arch-v32/mm/intmem.c
index 9ef5609..c807284 100644
--- a/arch/cris/arch-v32/mm/intmem.c
+++ b/arch/cris/arch-v32/mm/intmem.c
@@ -113,14 +113,14 @@
 
 			allocation->status = STATUS_FREE;
 			/* Join with prev and/or next if also free */
-			if ((prev != &intmem_allocations) &&
+			if ((&prev->entry != &intmem_allocations) &&
 					(prev->status == STATUS_FREE)) {
 				prev->size += allocation->size;
 				list_del(&allocation->entry);
 				kfree(allocation);
 				allocation = prev;
 			}
-			if ((next != &intmem_allocations) &&
+			if ((&next->entry != &intmem_allocations) &&
 					(next->status == STATUS_FREE)) {
 				allocation->size += next->size;
 				list_del(&next->entry);
@@ -145,5 +145,12 @@
 		(unsigned long)intmem_virtual + MEM_INTMEM_START +
 		RESERVED_SIZE);
 }
-device_initcall(crisv32_intmem_init);
+
+static int __init crisv32_intmem_setup(void)
+{
+	crisv32_intmem_init();
+
+	return 0;
+}
+device_initcall(crisv32_intmem_setup);
 
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 70e497e..d31851f 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -25,7 +25,6 @@
 CONFIG_MTD_MTDRAM=y
 CONFIG_MTDRAM_TOTAL_SIZE=0
 CONFIG_MTDRAM_ERASE_SIZE=64
-CONFIG_MTDRAM_ABS_POS=0x0
 CONFIG_BLK_DEV_RAM=y
 CONFIG_NETDEVICES=y
 # CONFIG_INPUT is not set
diff --git a/arch/cris/configs/dev88_defconfig b/arch/cris/configs/dev88_defconfig
new file mode 100644
index 0000000..beff4ee
--- /dev/null
+++ b/arch/cris/configs/dev88_defconfig
@@ -0,0 +1,48 @@
+CONFIG_BUILTIN_DTB="dev88"
+# CONFIG_SWAP is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ETRAX_FAST_TIMER=y
+CONFIG_ETRAXFS=y
+CONFIG_ETRAX_DRAM_SIZE=32
+CONFIG_ETRAX_FLASH1_SIZE=4
+CONFIG_ETRAX_MEM_GRP1_CONFIG=0x40688
+CONFIG_ETRAX_MEM_GRP3_CONFIG=0x3
+CONFIG_ETRAX_MEM_GRP4_CONFIG=0x10040
+CONFIG_ETRAX_SDRAM_GRP0_CONFIG=0x958
+CONFIG_ETRAX_SDRAM_TIMING=0x824a
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_ETRAX_ETHERNET=y
+CONFIG_ETRAX_AXISFLASHMAP=y
+CONFIG_DEVTMPFS=y
+CONFIG_MTD_RAM=y
+CONFIG_MTDRAM_TOTAL_SIZE=0
+CONFIG_MTDRAM_ERASE_SIZE=64
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_ETRAXFS=y
+CONFIG_SERIAL_ETRAXFS_CONSOLE=y
+CONFIG_GPIO_ETRAXFS=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
diff --git a/arch/cris/configs/etrax-100lx_v2_defconfig b/arch/cris/configs/etrax-100lx_v2_defconfig
index a85aabf..d90ac95 100644
--- a/arch/cris/configs/etrax-100lx_v2_defconfig
+++ b/arch/cris/configs/etrax-100lx_v2_defconfig
@@ -28,7 +28,6 @@
 CONFIG_MTD_MTDRAM=y
 CONFIG_MTDRAM_TOTAL_SIZE=0
 CONFIG_MTDRAM_ERASE_SIZE=64
-CONFIG_MTDRAM_ABS_POS=0x0
 CONFIG_BLK_DEV_RAM=y
 CONFIG_NETDEVICES=y
 # CONFIG_INPUT is not set
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index 9123268..f714e9d 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -25,7 +25,6 @@
 CONFIG_MTD_MTDRAM=y
 CONFIG_MTDRAM_TOTAL_SIZE=0
 CONFIG_MTDRAM_ERASE_SIZE=64
-CONFIG_MTDRAM_ABS_POS=0x0
 CONFIG_BLK_DEV_RAM=y
 CONFIG_NETDEVICES=y
 # CONFIG_INPUT is not set
diff --git a/arch/cris/include/arch-v32/arch/cryptocop.h b/arch/cris/include/arch-v32/arch/cryptocop.h
index 716e434..a56ac61 100644
--- a/arch/cris/include/arch-v32/arch/cryptocop.h
+++ b/arch/cris/include/arch-v32/arch/cryptocop.h
@@ -81,7 +81,7 @@
 
 	unsigned int flags; /* DECRYPT, ENCRYPT, EXPLICIT_IV */
 
-	/* CBC initialisation vector for cihers. */
+	/* CBC initialisation vector for ciphers. */
 	u8 iv[CRYPTOCOP_MAX_IV_LENGTH];
 
 	/* The position in output where to write the transform output.  The order
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index cce8664..fe0b2a0 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -8,34 +8,6 @@
 #include <asm-generic/iomap.h>
 #include <linux/kernel.h>
 
-struct cris_io_operations
-{
-	u32 (*read_mem)(void *addr, int size);
-	void (*write_mem)(u32 val, int size, void *addr);
-	u32 (*read_io)(u32 port, void *addr, int size, int count);
-	void (*write_io)(u32 port, void *addr, int size, int count);
-};
-
-#ifdef CONFIG_PCI
-extern struct cris_io_operations *cris_iops;
-#else
-#define cris_iops ((struct cris_io_operations*)NULL)
-#endif
-
-/*
- * Change virtual addresses to physical addresses and vv.
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
-	return __pa(address);
-}
-
-static inline void * phys_to_virt(unsigned long address)
-{
-	return __va(address);
-}
-
 extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 extern void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot);
 
@@ -48,147 +20,6 @@
 
 extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
 
-/*
- * IO bus memory addresses are also 1:1 with the physical address
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the CRIS architecture, we just read/write the
- * memory location directly.
- */
-#ifdef CONFIG_PCI
-#define PCI_SPACE(x) ((((unsigned)(x)) & 0x10000000) == 0x10000000)
-#else
-#define PCI_SPACE(x) 0
-#endif
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		return cris_iops->read_mem((void*)addr, 1);
-	else
-		return *(volatile unsigned char __force *) addr;
-}
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		return cris_iops->read_mem((void*)addr, 2);
-	else
-		return *(volatile unsigned short __force *) addr;
-}
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		return cris_iops->read_mem((void*)addr, 4);
-	else
-		return *(volatile unsigned int __force *) addr;
-}
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-static inline void writeb(unsigned char b, volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		cris_iops->write_mem(b, 1, (void*)addr);
-	else
-		*(volatile unsigned char __force *) addr = b;
-}
-static inline void writew(unsigned short b, volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		cris_iops->write_mem(b, 2, (void*)addr);
-	else
-		*(volatile unsigned short __force *) addr = b;
-}
-static inline void writel(unsigned int b, volatile void __iomem *addr)
-{
-	if (PCI_SPACE(addr) && cris_iops)
-		cris_iops->write_mem(b, 4, (void*)addr);
-	else
-		*(volatile unsigned int __force *) addr = b;
-}
-#define writeb_relaxed(b, addr) writeb(b, addr)
-#define writew_relaxed(b, addr) writew(b, addr)
-#define writel_relaxed(b, addr) writel(b, addr)
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define mmiowb()
-
-#define memset_io(a,b,c)	memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c)	memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c)	memcpy((void *)(a),(b),(c))
-
-
-/* I/O port access. Normally there is no I/O space on CRIS but when
- * Cardbus/PCI is enabled the request is passed through the bridge.
- */
-
-#define IO_SPACE_LIMIT 0xffff
-#define inb(port) (cris_iops ? cris_iops->read_io(port,NULL,1,1) : 0)
-#define inw(port) (cris_iops ? cris_iops->read_io(port,NULL,2,1) : 0)
-#define inl(port) (cris_iops ? cris_iops->read_io(port,NULL,4,1) : 0)
-#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
-#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
-#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
-static inline void outb(unsigned char data, unsigned int port)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *) &data, 1, 1);
-}
-static inline void outw(unsigned short data, unsigned int port)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *) &data, 2, 1);
-}
-static inline void outl(unsigned int data, unsigned int port)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *) &data, 4, 1);
-}
-static inline void outsb(unsigned int port, const void *addr,
-			 unsigned long count)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *)addr, 1, count);
-}
-static inline void outsw(unsigned int port, const void *addr,
-			 unsigned long count)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *)addr, 2, count);
-}
-static inline void outsl(unsigned int port, const void *addr,
-			 unsigned long count)
-{
-	if (cris_iops)
-		cris_iops->write_io(port, (void *)addr, 4, count);
-}
-
-#define inb_p(port)             inb(port)
-#define inw_p(port)             inw(port)
-#define inl_p(port)             inl(port)
-#define outb_p(val, port)       outb((val), (port))
-#define outw_p(val, port)       outw((val), (port))
-#define outl_p(val, port)       outl((val), (port))
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)	p
+#include <asm-generic/io.h>
 
 #endif
diff --git a/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h b/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h
index 5517f04..c4b6b0e 100644
--- a/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h
+++ b/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h
@@ -61,7 +61,7 @@
 #define IO_WIDTH(reg, field) IO_WIDTH_ (reg##_, field##_)
 #define IO_WIDTH_(reg_, field_) (reg_##_##field_##_WIDTH)
 
-/*--- Obsolete. Kept for backw compatibility. ---*/
+/*--- Obsolete. Kept for backward compatibility. ---*/
 /* Reads (or writes) a byte/uword/udword from the specified mode
    register. */
 #define IO_RD(reg) (*(volatile u32*)(reg))
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 112ef26..94183d3 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -6,7 +6,7 @@
 
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/wait.h>
 #include <linux/uaccess.h>
 #include <arch/system.h>
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 07d7a7e..a0513d4 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -522,5 +522,6 @@
 #ifndef __ASSEMBLY__
 extern void __init paging_init(void);
 #endif /* !__ASSEMBLY__ */
+#define HAVE_ARCH_UNMAPPED_AREA
 
 #endif /* _ASM_PGTABLE_H */
diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h
index 4377c89..2305142 100644
--- a/arch/frv/include/asm/segment.h
+++ b/arch/frv/include/asm/segment.h
@@ -32,7 +32,6 @@
 #define get_ds()		(KERNEL_DS)
 #define get_fs()		(__current_thread_info->addr_limit)
 #define segment_eq(a, b)	((a).seg == (b).seg)
-#define __kernel_ds_p()		segment_eq(get_fs(), KERNEL_DS)
 #define get_addr_limit()	(get_fs().seg)
 
 #define set_fs(_x)					\
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 87d9e34..c0f4057e 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -20,8 +20,6 @@
 #include <asm/segment.h>
 #include <asm/sections.h>
 
-#define HAVE_ARCH_UNMAPPED_AREA	/* we decide where to put mmaps */
-
 #define __ptr(x) ((unsigned long __force *)(x))
 
 #define VERIFY_READ	0
diff --git a/arch/ia64/hp/sim/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile
index 2e805e0..df6e996 100644
--- a/arch/ia64/hp/sim/boot/Makefile
+++ b/arch/ia64/hp/sim/boot/Makefile
@@ -33,5 +33,5 @@
 LDFLAGS_bootloader = -static -T
 
 $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
-                   lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE
+                   lib/lib.a arch/ia64/lib/lib.a FORCE
 	$(call if_changed,ld)
diff --git a/arch/ia64/include/asm/export.h b/arch/ia64/include/asm/export.h
new file mode 100644
index 0000000..ad18c65
--- /dev/null
+++ b/arch/ia64/include/asm/export.h
@@ -0,0 +1,3 @@
+/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
+#define KSYM_FUNC(name) @fptr(name)
+#include <asm-generic/export.h>
diff --git a/arch/ia64/include/asm/libata-portmap.h b/arch/ia64/include/asm/libata-portmap.h
index 0e00c9a..7a1f831 100644
--- a/arch/ia64/include/asm/libata-portmap.h
+++ b/arch/ia64/include/asm/libata-portmap.h
@@ -1,12 +1,8 @@
 #ifndef __ASM_IA64_LIBATA_PORTMAP_H
 #define __ASM_IA64_LIBATA_PORTMAP_H
 
-#define ATA_PRIMARY_CMD		0x1F0
-#define ATA_PRIMARY_CTL		0x3F6
 #define ATA_PRIMARY_IRQ(dev)	isa_irq_to_vector(14)
 
-#define ATA_SECONDARY_CMD	0x170
-#define ATA_SECONDARY_CTL	0x376
 #define ATA_SECONDARY_IRQ(dev)	isa_irq_to_vector(15)
 
 #endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index cfaa7b2..6f27a66 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -48,6 +48,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/export.h>
 
 #include "minstate.h"
 
@@ -1345,12 +1346,14 @@
 	mov rp=loc0
 	br.ret.sptk.many rp
 END(unw_init_running)
+EXPORT_SYMBOL(unw_init_running)
 
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 GLOBAL_ENTRY(_mcount)
 	br ftrace_stub
 END(_mcount)
+EXPORT_SYMBOL(_mcount)
 
 .here:
 	br.ret.sptk.many b0
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 09f8457..5ed0ea9 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@
 	u64 virt_addr=simple_strtoull(buf, NULL, 16);
 	int ret;
 
-	ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
+	ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
 	if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
 		printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
index 6b3d6c1..2c369bf 100644
--- a/arch/ia64/kernel/esi_stub.S
+++ b/arch/ia64/kernel/esi_stub.S
@@ -35,6 +35,7 @@
 
 #include <asm/processor.h>
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 /*
  * Inputs:
@@ -94,3 +95,4 @@
 	mov gp=loc2
 	br.ret.sptk.many rp
 END(esi_call_phys)
+EXPORT_SYMBOL_GPL(esi_call_phys)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index bb748c5..c9b5e94 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -32,6 +32,7 @@
 #include <asm/mca_asm.h>
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 #ifdef CONFIG_HOTPLUG_CPU
 #define SAL_PSR_BITS_TO_SET				\
@@ -168,6 +169,7 @@
 	__PAGE_ALIGNED_DATA
 
 	.global empty_zero_page
+EXPORT_DATA_SYMBOL_GPL(empty_zero_page)
 empty_zero_page:
 	.skip PAGE_SIZE
 
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 0967310..d111248 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -1,101 +1,11 @@
 /*
  * Architecture-specific kernel symbols
- *
- * Don't put any exports here unless it's defined in an assembler file.
- * All other exports should be put directly after the definition.
  */
 
-#include <linux/module.h>
-
-#include <linux/string.h>
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(strlen);
-
-#include <asm/pgtable.h>
-EXPORT_SYMBOL_GPL(empty_zero_page);
-
-#include <asm/checksum.h>
-EXPORT_SYMBOL(ip_fast_csum);		/* hand-coded assembly */
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#include <asm/page.h>
-EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(copy_page);
-
 #ifdef CONFIG_VIRTUAL_MEM_MAP
+#include <linux/compiler.h>
+#include <linux/export.h>
 #include <linux/bootmem.h>
 EXPORT_SYMBOL(min_low_pfn);	/* defined by bootmem.c, but not exported by generic code */
 EXPORT_SYMBOL(max_low_pfn);	/* defined by bootmem.c, but not exported by generic code */
 #endif
-
-#include <asm/processor.h>
-EXPORT_SYMBOL(ia64_cpu_info);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_per_cpu_offset);
-#endif
-
-#include <asm/uaccess.h>
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* from arch/ia64/lib */
-extern void __divsi3(void);
-extern void __udivsi3(void);
-extern void __modsi3(void);
-extern void __umodsi3(void);
-extern void __divdi3(void);
-extern void __udivdi3(void);
-extern void __moddi3(void);
-extern void __umoddi3(void);
-
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__umoddi3);
-
-#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
-extern void xor_ia64_2(void);
-extern void xor_ia64_3(void);
-extern void xor_ia64_4(void);
-extern void xor_ia64_5(void);
-
-EXPORT_SYMBOL(xor_ia64_2);
-EXPORT_SYMBOL(xor_ia64_3);
-EXPORT_SYMBOL(xor_ia64_4);
-EXPORT_SYMBOL(xor_ia64_5);
-#endif
-
-#include <asm/pal.h>
-EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
-EXPORT_SYMBOL(ia64_pal_call_phys_static);
-EXPORT_SYMBOL(ia64_pal_call_stacked);
-EXPORT_SYMBOL(ia64_pal_call_static);
-EXPORT_SYMBOL(ia64_load_scratch_fpregs);
-EXPORT_SYMBOL(ia64_save_scratch_fpregs);
-
-#include <asm/unwind.h>
-EXPORT_SYMBOL(unw_init_running);
-
-#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
-extern void esi_call_phys (void);
-EXPORT_SYMBOL_GPL(esi_call_phys);
-#endif
-extern char ia64_ivt[];
-EXPORT_SYMBOL(ia64_ivt);
-
-#include <asm/ftrace.h>
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
-EXPORT_SYMBOL(_mcount);
-#endif
-
-#include <asm/cacheflush.h>
-EXPORT_SYMBOL_GPL(flush_icache_range);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index b1c3cfc..44a103a 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -57,6 +57,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/errno.h>
+#include <asm/export.h>
 
 #if 0
 # define PSR_DEFAULT_BITS	psr.ac
@@ -85,6 +86,7 @@
 
 	.align 32768	// align on 32KB boundary
 	.global ia64_ivt
+	EXPORT_DATA_SYMBOL(ia64_ivt)
 ia64_ivt:
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 0b53344..94fb2e3 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -14,6 +14,7 @@
 
 #include <asm/asmmacro.h>
 #include <asm/processor.h>
+#include <asm/export.h>
 
 	.data
 pal_entry_point:
@@ -87,6 +88,7 @@
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_static)
+EXPORT_SYMBOL(ia64_pal_call_static)
 
 /*
  * Make a PAL call using the stacked registers calling convention.
@@ -122,6 +124,7 @@
 	srlz.d				// serialize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_stacked)
+EXPORT_SYMBOL(ia64_pal_call_stacked)
 
 /*
  * Make a physical mode PAL call using the static registers calling convention.
@@ -193,6 +196,7 @@
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_phys_static)
+EXPORT_SYMBOL(ia64_pal_call_phys_static)
 
 /*
  * Make a PAL call using the stacked registers in physical mode.
@@ -250,6 +254,7 @@
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_phys_stacked)
+EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
 
 /*
  * Save scratch fp scratch regs which aren't saved in pt_regs already
@@ -275,6 +280,7 @@
 	stf.spill [r2]  = f15,32
 	br.ret.sptk.many rp
 END(ia64_save_scratch_fpregs)
+EXPORT_SYMBOL(ia64_save_scratch_fpregs)
 
 /*
  * Load scratch fp scratch regs (fp10-fp15)
@@ -296,3 +302,4 @@
 	ldf.fill  f15 = [r2],32
 	br.ret.sptk.many rp
 END(ia64_load_scratch_fpregs)
+EXPORT_SYMBOL(ia64_load_scratch_fpregs)
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6f54d51..31aa8c0 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -453,7 +453,7 @@
 			return 0;
 		}
 	}
-	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
+	copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
 	if (copied != sizeof(ret))
 		return -EIO;
 	*val = ret;
@@ -489,7 +489,8 @@
 				*ia64_rse_skip_regs(krbs, regnum) = val;
 			}
 		}
-	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
+	} else if (access_process_vm(child, addr, &val, sizeof(val),
+				FOLL_FORCE | FOLL_WRITE)
 		   != sizeof(val))
 		return -EIO;
 	return 0;
@@ -543,7 +544,8 @@
 		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
 		if (ret < 0)
 			return ret;
-		if (access_process_vm(child, addr, &val, sizeof(val), 1)
+		if (access_process_vm(child, addr, &val, sizeof(val),
+				FOLL_FORCE | FOLL_WRITE)
 		    != sizeof(val))
 			return -EIO;
 	}
@@ -559,7 +561,8 @@
 
 	/* now copy word for word from user rbs to kernel rbs: */
 	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
-		if (access_process_vm(child, addr, &val, sizeof(val), 0)
+		if (access_process_vm(child, addr, &val, sizeof(val),
+				FOLL_FORCE)
 				!= sizeof(val))
 			return -EIO;
 
@@ -1156,7 +1159,8 @@
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
 		/* read word at location addr */
-		if (access_process_vm(child, addr, &data, sizeof(data), 0)
+		if (access_process_vm(child, addr, &data, sizeof(data),
+				FOLL_FORCE)
 		    != sizeof(data))
 			return -EIO;
 		/* ensure return value is not mistaken for error code */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index afddb3e..7ec7acc 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -71,7 +71,11 @@
 #endif
 
 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(local_per_cpu_offset);
+#endif
 unsigned long ia64_cycles_per_usec;
 struct ia64_boot_param *ia64_boot_param;
 struct screen_info screen_info;
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 98771e2..1f3d387 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -2,17 +2,15 @@
 # Makefile for ia64-specific library routines..
 #
 
-obj-y := io.o
-
-lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o			\
+lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o		\
 	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o			\
 	checksum.o clear_page.o csum_partial_copy.o			\
 	clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o	\
 	flush.o ip_fast_csum.o do_csum.o				\
 	memset.o strlen.o xor.o
 
-obj-$(CONFIG_ITANIUM)	+= copy_page.o copy_user.o memcpy.o
-obj-$(CONFIG_MCKINLEY)	+= copy_page_mck.o memcpy_mck.o
+lib-$(CONFIG_ITANIUM)	+= copy_page.o copy_user.o memcpy.o
+lib-$(CONFIG_MCKINLEY)	+= copy_page_mck.o memcpy_mck.o
 lib-$(CONFIG_PERFMON)	+= carta_random.o
 
 AFLAGS___divdi3.o	=
diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S
index 2d814e7..3cf5b76 100644
--- a/arch/ia64/lib/clear_page.S
+++ b/arch/ia64/lib/clear_page.S
@@ -11,6 +11,7 @@
 
 #include <asm/asmmacro.h>
 #include <asm/page.h>
+#include <asm/export.h>
 
 #ifdef CONFIG_ITANIUM
 # define L3_LINE_SIZE	64	// Itanium L3 line size
@@ -74,3 +75,4 @@
 	mov ar.lc = saved_lc		// restore lc
 	br.ret.sptk.many rp
 END(clear_page)
+EXPORT_SYMBOL(clear_page)
diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S
index eecd857..7b40731 100644
--- a/arch/ia64/lib/clear_user.S
+++ b/arch/ia64/lib/clear_user.S
@@ -12,6 +12,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 //
 // arguments
@@ -207,3 +208,4 @@
 	mov ar.lc=saved_lc
 	br.ret.sptk.many rp
 END(__do_clear_user)
+EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S
index 127d1d0..cbdb9e3 100644
--- a/arch/ia64/lib/copy_page.S
+++ b/arch/ia64/lib/copy_page.S
@@ -16,6 +16,7 @@
  */
 #include <asm/asmmacro.h>
 #include <asm/page.h>
+#include <asm/export.h>
 
 #define PIPE_DEPTH	3
 #define EPI		p[PIPE_DEPTH-1]
@@ -96,3 +97,4 @@
 	mov ar.lc=saved_lc
 	br.ret.sptk.many rp
 END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/ia64/lib/copy_page_mck.S b/arch/ia64/lib/copy_page_mck.S
index 3c45d60..c13f690 100644
--- a/arch/ia64/lib/copy_page_mck.S
+++ b/arch/ia64/lib/copy_page_mck.S
@@ -61,6 +61,7 @@
  */
 #include <asm/asmmacro.h>
 #include <asm/page.h>
+#include <asm/export.h>
 
 #define PREFETCH_DIST	8		// McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
 
@@ -183,3 +184,4 @@
 	mov pr = saved_pr, -1
 	br.ret.sptk.many rp
 END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S
index c952bdc..66facd5 100644
--- a/arch/ia64/lib/copy_user.S
+++ b/arch/ia64/lib/copy_user.S
@@ -30,6 +30,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 //
 // Tuneable parameters
@@ -608,3 +609,4 @@
 	mov ar.pfs=saved_pfs
 	br.ret.sptk.many rp
 END(__copy_user)
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 1d8c888..9a5a2f9 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -8,6 +8,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 
 	/*
@@ -60,6 +61,7 @@
 	mov	ar.lc=r3		// restore ar.lc
 	br.ret.sptk.many rp
 END(flush_icache_range)
+EXPORT_SYMBOL_GPL(flush_icache_range)
 
 	/*
 	 * clflush_cache_range(start,size)
diff --git a/arch/ia64/lib/idiv32.S b/arch/ia64/lib/idiv32.S
index c91b5b0..715aed7 100644
--- a/arch/ia64/lib/idiv32.S
+++ b/arch/ia64/lib/idiv32.S
@@ -15,6 +15,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 #ifdef MODULO
 # define OP	mod
@@ -81,3 +82,4 @@
 	getf.sig r8 = f6		// transfer result to result register
 	br.ret.sptk.many rp
 END(NAME)
+EXPORT_SYMBOL(NAME)
diff --git a/arch/ia64/lib/idiv64.S b/arch/ia64/lib/idiv64.S
index 627573c..25840f6 100644
--- a/arch/ia64/lib/idiv64.S
+++ b/arch/ia64/lib/idiv64.S
@@ -15,6 +15,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 #ifdef MODULO
 # define OP	mod
@@ -78,3 +79,4 @@
 	getf.sig r8 = f11		// transfer result to result register
 	br.ret.sptk.many rp
 END(NAME)
+EXPORT_SYMBOL(NAME)
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 620d9dc..648e0d4 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -13,6 +13,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 /*
  * Since we know that most likely this function is called with buf aligned
@@ -92,6 +93,7 @@
 	mov	b0=r34
 	br.ret.sptk.many b0
 END(ip_fast_csum)
+EXPORT_SYMBOL(ip_fast_csum)
 
 GLOBAL_ENTRY(csum_ipv6_magic)
 	ld4	r20=[in0],4
@@ -142,3 +144,4 @@
 	andcm	r8=r9,r8
 	br.ret.sptk.many b0
 END(csum_ipv6_magic)
+EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/ia64/lib/memcpy.S b/arch/ia64/lib/memcpy.S
index 448908d..ba172fd 100644
--- a/arch/ia64/lib/memcpy.S
+++ b/arch/ia64/lib/memcpy.S
@@ -14,6 +14,7 @@
  *	David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 GLOBAL_ENTRY(memcpy)
 
@@ -299,3 +300,4 @@
 	COPY(56, 0)
 
 END(memcpy)
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index ab0f876..b264b6a 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -15,6 +15,7 @@
  */
 #include <asm/asmmacro.h>
 #include <asm/page.h>
+#include <asm/export.h>
 
 #define EK(y...) EX(y)
 
@@ -78,6 +79,7 @@
 	br.cond.sptk .common_code
 	;;
 END(memcpy)
+EXPORT_SYMBOL(memcpy)
 GLOBAL_ENTRY(__copy_user)
 	.prologue
 // check dest alignment
@@ -664,3 +666,4 @@
 
 /* end of McKinley specific optimization */
 END(__copy_user)
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S
index f26c16a..87b9747 100644
--- a/arch/ia64/lib/memset.S
+++ b/arch/ia64/lib/memset.S
@@ -18,6 +18,7 @@
    to get peak speed when value = 0.  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 #undef ret
 
 #define dest		in0
@@ -360,3 +361,4 @@
 	br.ret.sptk.many rp
 }
 END(memset)
+EXPORT_SYMBOL(memset)
diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S
index e0cdac0..1a6e17c 100644
--- a/arch/ia64/lib/strlen.S
+++ b/arch/ia64/lib/strlen.S
@@ -17,6 +17,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 //
 //
@@ -190,3 +191,4 @@
 	mov ar.pfs=saved_pfs	// because of ar.ec, restore no matter what
 	br.ret.sptk.many rp	// end of successful recovery code
 END(strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S
index c71eded..9d25768 100644
--- a/arch/ia64/lib/strlen_user.S
+++ b/arch/ia64/lib/strlen_user.S
@@ -16,6 +16,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 //
 // int strlen_user(char *)
@@ -196,3 +197,4 @@
 	mov ar.pfs=saved_pfs	// because of ar.ec, restore no matter what
 	br.ret.sptk.many rp
 END(__strlen_user)
+EXPORT_SYMBOL(__strlen_user)
diff --git a/arch/ia64/lib/strncpy_from_user.S b/arch/ia64/lib/strncpy_from_user.S
index a504381..ca9ccf2 100644
--- a/arch/ia64/lib/strncpy_from_user.S
+++ b/arch/ia64/lib/strncpy_from_user.S
@@ -17,6 +17,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 GLOBAL_ENTRY(__strncpy_from_user)
 	alloc r2=ar.pfs,3,0,0,0
@@ -42,3 +43,4 @@
 [.Lexit:]
 	br.ret.sptk.many rp
 END(__strncpy_from_user)
+EXPORT_SYMBOL(__strncpy_from_user)
diff --git a/arch/ia64/lib/strnlen_user.S b/arch/ia64/lib/strnlen_user.S
index d09066b1..80a5dfd 100644
--- a/arch/ia64/lib/strnlen_user.S
+++ b/arch/ia64/lib/strnlen_user.S
@@ -13,6 +13,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 GLOBAL_ENTRY(__strnlen_user)
 	.prologue
@@ -43,3 +44,4 @@
 	mov ar.lc=r16			// restore ar.lc
 	br.ret.sptk.many rp
 END(__strnlen_user)
+EXPORT_SYMBOL(__strnlen_user)
diff --git a/arch/ia64/lib/xor.S b/arch/ia64/lib/xor.S
index 54e3f7e..c83f1c4 100644
--- a/arch/ia64/lib/xor.S
+++ b/arch/ia64/lib/xor.S
@@ -14,6 +14,7 @@
  */
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 GLOBAL_ENTRY(xor_ia64_2)
 	.prologue
@@ -51,6 +52,7 @@
 	mov pr = r29, -1
 	br.ret.sptk.few rp
 END(xor_ia64_2)
+EXPORT_SYMBOL(xor_ia64_2)
 
 GLOBAL_ENTRY(xor_ia64_3)
 	.prologue
@@ -91,6 +93,7 @@
 	mov pr = r29, -1
 	br.ret.sptk.few rp
 END(xor_ia64_3)
+EXPORT_SYMBOL(xor_ia64_3)
 
 GLOBAL_ENTRY(xor_ia64_4)
 	.prologue
@@ -134,6 +137,7 @@
 	mov pr = r29, -1
 	br.ret.sptk.few rp
 END(xor_ia64_4)
+EXPORT_SYMBOL(xor_ia64_4)
 
 GLOBAL_ENTRY(xor_ia64_5)
 	.prologue
@@ -182,3 +186,4 @@
 	mov pr = r29, -1
 	br.ret.sptk.few rp
 END(xor_ia64_5)
+EXPORT_SYMBOL(xor_ia64_5)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 51f5e9a..c145605 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -493,7 +493,8 @@
 	int i;
 
 	for (i = 0; i < p->nr_trap; i++)
-		access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+		access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
+				FOLL_FORCE | FOLL_WRITE);
 	p->nr_trap = 0;
 }
 
@@ -537,7 +538,8 @@
 	unsigned long next_insn, code;
 	unsigned long addr = next_pc & ~3;
 
-	if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+	if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
+			FOLL_FORCE)
 	    != sizeof(next_insn)) {
 		return -1; /* error */
 	}
@@ -546,7 +548,8 @@
 	if (register_debug_trap(child, next_pc, next_insn, &code)) {
 		return -1; /* error */
 	}
-	if (access_process_vm(child, addr, &code, sizeof(code), 1)
+	if (access_process_vm(child, addr, &code, sizeof(code),
+			FOLL_FORCE | FOLL_WRITE)
 	    != sizeof(code)) {
 		return -1; /* error */
 	}
@@ -562,7 +565,8 @@
  	addr = (regs->bpc - 2) & ~3;
 	regs->bpc -= 2;
 	if (unregister_debug_trap(current, addr, &code)) {
-	    access_process_vm(current, addr, &code, sizeof(code), 1);
+	    access_process_vm(current, addr, &code, sizeof(code),
+		    FOLL_FORCE | FOLL_WRITE);
 	    invalidate_cache();
 	}
 }
@@ -589,7 +593,8 @@
 	/* Compute next pc.  */
 	pc = get_stack_long(child, PT_BPC);
 
-	if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+	if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
+			FOLL_FORCE)
 	    != sizeof(insn))
 		return;
 
diff --git a/arch/m68k/include/asm/export.h b/arch/m68k/include/asm/export.h
new file mode 100644
index 0000000..0af20f4
--- /dev/null
+++ b/arch/m68k/include/asm/export.h
@@ -0,0 +1,3 @@
+#define KSYM_ALIGN 2
+#define KCRC_ALIGN 2
+#include <asm-generic/export.h>
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index 1bdf152..36deeb3 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -44,9 +44,6 @@
 	unsigned long insn, fixup;
 };
 
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
 
 /*
  * These are the main single-value transfer routines.  They automatically
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 8a1c4d3..74c898c 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -13,7 +13,7 @@
 extra-$(CONFIG_SUN3)	:= sun3-head.o
 extra-y			+= vmlinux.lds
 
-obj-y	:= entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
+obj-y	:= entry.o irq.o module.o process.o ptrace.o
 obj-y	+= setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
 
 obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
deleted file mode 100644
index 774c1bd..0000000
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <linux/module.h>
-
-asmlinkage long long __ashldi3 (long long, int);
-asmlinkage long long __ashrdi3 (long long, int);
-asmlinkage long long __lshrdi3 (long long, int);
-asmlinkage long long __muldi3 (long long, long long);
-
-/* The following are special because they're not called
-   explicitly (the C compiler generates them).  Fortunately,
-   their interface isn't gonna change any time soon now, so
-   it's OK to leave it out of version control.  */
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-
-#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
-/*
- * Simpler 68k and ColdFire parts also need a few other gcc functions.
- */
-extern long long __divsi3(long long, long long);
-extern long long __modsi3(long long, long long);
-extern long long __mulsi3(long long, long long);
-extern long long __udivsi3(long long, long long);
-extern long long __umodsi3(long long, long long);
-
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__mulsi3);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umodsi3);
-#endif
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c
index 37234c2..8dffd36 100644
--- a/arch/m68k/lib/ashldi3.c
+++ b/arch/m68k/lib/ashldi3.c
@@ -13,6 +13,9 @@
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details. */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #define BITS_PER_UNIT 8
 
 typedef		 int SItype	__attribute__ ((mode (SI)));
@@ -55,3 +58,4 @@
 
   return w.ll;
 }
+EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c
index 1d59345..e6565a3 100644
--- a/arch/m68k/lib/ashrdi3.c
+++ b/arch/m68k/lib/ashrdi3.c
@@ -13,6 +13,9 @@
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details. */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #define BITS_PER_UNIT 8
 
 typedef		 int SItype	__attribute__ ((mode (SI)));
@@ -56,3 +59,4 @@
 
   return w.ll;
 }
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/m68k/lib/divsi3.S b/arch/m68k/lib/divsi3.S
index 2c0ec85..3a2143f 100644
--- a/arch/m68k/lib/divsi3.S
+++ b/arch/m68k/lib/divsi3.S
@@ -33,6 +33,8 @@
    D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
 */
 
+#include <asm/export.h>
+
 /* These are predefined by new versions of GNU cpp.  */
 
 #ifndef __USER_LABEL_PREFIX__
@@ -118,3 +120,4 @@
 L3:	movel	sp@+, d2
 	rts
 
+	EXPORT_SYMBOL(__divsi3)
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c
index 49e1ec8..0397797 100644
--- a/arch/m68k/lib/lshrdi3.c
+++ b/arch/m68k/lib/lshrdi3.c
@@ -13,6 +13,9 @@
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details. */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #define BITS_PER_UNIT 8
 
 typedef		 int SItype	__attribute__ ((mode (SI)));
@@ -55,3 +58,4 @@
 
   return w.ll;
 }
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/m68k/lib/modsi3.S b/arch/m68k/lib/modsi3.S
index 1d9e0ef..1c96764 100644
--- a/arch/m68k/lib/modsi3.S
+++ b/arch/m68k/lib/modsi3.S
@@ -33,6 +33,8 @@
    D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
 */
 
+#include <asm/export.h>
+
 /* These are predefined by new versions of GNU cpp.  */
 
 #ifndef __USER_LABEL_PREFIX__
@@ -106,3 +108,4 @@
 	movel	d1, d0
 	rts
 
+	EXPORT_SYMBOL(__modsi3)
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 9006d15..6459af5 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -14,6 +14,9 @@
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 GNU General Public License for more details. */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #ifdef CONFIG_CPU_HAS_NO_MULDIV64
 
 #define SI_TYPE_SIZE 32
@@ -90,3 +93,4 @@
 
   return w.ll;
 }
+EXPORT_SYMBOL(__muldi3);
diff --git a/arch/m68k/lib/mulsi3.S b/arch/m68k/lib/mulsi3.S
index c39ad4e..855675e 100644
--- a/arch/m68k/lib/mulsi3.S
+++ b/arch/m68k/lib/mulsi3.S
@@ -32,7 +32,7 @@
    Some of this code comes from MINIX, via the folks at ericsson.
    D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
 */
-
+#include <asm/export.h>
 /* These are predefined by new versions of GNU cpp.  */
 
 #ifndef __USER_LABEL_PREFIX__
@@ -102,4 +102,4 @@
 	addl	d1, d0
 
 	rts
-
+	EXPORT_SYMBOL(__mulsi3)
diff --git a/arch/m68k/lib/udivsi3.S b/arch/m68k/lib/udivsi3.S
index 35a5446..78440ae 100644
--- a/arch/m68k/lib/udivsi3.S
+++ b/arch/m68k/lib/udivsi3.S
@@ -32,7 +32,7 @@
    Some of this code comes from MINIX, via the folks at ericsson.
    D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
 */
-
+#include <asm/export.h>
 /* These are predefined by new versions of GNU cpp.  */
 
 #ifndef __USER_LABEL_PREFIX__
@@ -154,4 +154,4 @@
 	unlk	a6		| and return
 	rts
 #endif /* __mcf5200__ || __mcoldfire__ */
-
+	EXPORT_SYMBOL(__udivsi3)
diff --git a/arch/m68k/lib/umodsi3.S b/arch/m68k/lib/umodsi3.S
index 099da51..b6fd11f 100644
--- a/arch/m68k/lib/umodsi3.S
+++ b/arch/m68k/lib/umodsi3.S
@@ -32,7 +32,7 @@
    Some of this code comes from MINIX, via the folks at ericsson.
    D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
 */
-
+#include <asm/export.h>
 /* These are predefined by new versions of GNU cpp.  */
 
 #ifndef __USER_LABEL_PREFIX__
@@ -105,4 +105,4 @@
 	subl	d0, d1		/* d1 = a - (a/b)*b */
 	movel	d1, d0
 	rts
-
+	EXPORT_SYMBOL(__umodsi3)
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
index 470e365..8ff0a70 100644
--- a/arch/metag/include/asm/atomic.h
+++ b/arch/metag/include/asm/atomic.h
@@ -39,11 +39,10 @@
 #define atomic_dec(v) atomic_sub(1, (v))
 
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
 
 #endif
 
-#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
-
 #include <asm-generic/atomic64.h>
 
 #endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 8266767..253a67e 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -71,9 +71,6 @@
 	unsigned long insn, fixup;
 };
 
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
 #ifndef CONFIG_MMU
 
 /* Check against bounds of physical memory */
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index c5cd63a..f5f1bdb 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -11,6 +11,7 @@
 platforms += cobalt
 platforms += dec
 platforms += emma
+platforms += generic
 platforms += jazz
 platforms += jz4740
 platforms += lantiq
@@ -18,7 +19,6 @@
 platforms += loongson32
 platforms += loongson64
 platforms += mti-malta
-platforms += mti-sead3
 platforms += netlogic
 platforms += paravirt
 platforms += pic32
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1a322c8..b3c5bde 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@
 	select HANDLE_DOMAIN_IRQ
 	select HAVE_EXIT_THREAD
 	select HAVE_REGS_AND_STACK_ACCESS_API
+	select HAVE_ARCH_HARDENED_USERCOPY
 
 menu "Machine selection"
 
@@ -72,6 +73,57 @@
 	prompt "System type"
 	default SGI_IP22
 
+config MIPS_GENERIC
+	bool "Generic board-agnostic MIPS kernel"
+	select BOOT_RAW
+	select BUILTIN_DTB
+	select CEVT_R4K
+	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
+	select CPU_MIPSR2_IRQ_VI
+	select CPU_MIPSR2_IRQ_EI
+	select CSRC_R4K
+	select DMA_PERDEV_COHERENT
+	select HW_HAS_PCI
+	select IRQ_MIPS_CPU
+	select LIBFDT
+	select MIPS_CPU_SCACHE
+	select MIPS_GIC
+	select MIPS_L1_CACHE_SHIFT_7
+	select NO_EXCEPT_FILL
+	select PCI_DRIVERS_GENERIC
+	select PINCTRL
+	select SMP_UP if SMP
+	select SYS_HAS_CPU_MIPS32_R1
+	select SYS_HAS_CPU_MIPS32_R2
+	select SYS_HAS_CPU_MIPS32_R6
+	select SYS_HAS_CPU_MIPS64_R1
+	select SYS_HAS_CPU_MIPS64_R2
+	select SYS_HAS_CPU_MIPS64_R6
+	select SYS_SUPPORTS_32BIT_KERNEL
+	select SYS_SUPPORTS_64BIT_KERNEL
+	select SYS_SUPPORTS_BIG_ENDIAN
+	select SYS_SUPPORTS_HIGHMEM
+	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select SYS_SUPPORTS_MICROMIPS
+	select SYS_SUPPORTS_MIPS_CPS
+	select SYS_SUPPORTS_MIPS16
+	select SYS_SUPPORTS_MULTITHREADING
+	select SYS_SUPPORTS_RELOCATABLE
+	select SYS_SUPPORTS_SMARTMIPS
+	select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+	select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+	select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+	select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+	select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+	select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+	select USE_OF
+	help
+	  Select this to build a kernel which aims to support multiple boards,
+	  generally using a flattened device tree passed from the bootloader
+	  using the boot protocol defined in the UHI (Unified Hosting
+	  Interface) specification.
+
 config MIPS_ALCHEMY
 	bool "Alchemy processor based machines"
 	select ARCH_PHYS_ADDR_T_64BIT
@@ -478,6 +530,7 @@
 	select SYS_SUPPORTS_ZBOOT
 	select SYS_SUPPORTS_RELOCATABLE
 	select USE_OF
+	select LIBFDT
 	select ZONE_DMA32 if 64BIT
 	select BUILTIN_DTB
 	select LIBFDT
@@ -493,42 +546,6 @@
 	  Microchip PIC32 is a family of general-purpose 32 bit MIPS core
 	  microcontrollers.
 
-config MIPS_SEAD3
-	bool "MIPS SEAD3 board"
-	select BOOT_ELF32
-	select BOOT_RAW
-	select BUILTIN_DTB
-	select CEVT_R4K
-	select CSRC_R4K
-	select CLKSRC_MIPS_GIC
-	select COMMON_CLK
-	select CPU_MIPSR2_IRQ_VI
-	select CPU_MIPSR2_IRQ_EI
-	select DMA_NONCOHERENT
-	select IRQ_MIPS_CPU
-	select MIPS_GIC
-	select LIBFDT
-	select MIPS_MSC
-	select SYS_HAS_CPU_MIPS32_R1
-	select SYS_HAS_CPU_MIPS32_R2
-	select SYS_HAS_CPU_MIPS32_R6
-	select SYS_HAS_CPU_MIPS64_R1
-	select SYS_HAS_EARLY_PRINTK
-	select SYS_SUPPORTS_32BIT_KERNEL
-	select SYS_SUPPORTS_64BIT_KERNEL
-	select SYS_SUPPORTS_BIG_ENDIAN
-	select SYS_SUPPORTS_LITTLE_ENDIAN
-	select SYS_SUPPORTS_SMARTMIPS
-	select SYS_SUPPORTS_MICROMIPS
-	select SYS_SUPPORTS_MIPS16
-	select SYS_SUPPORTS_RELOCATABLE
-	select USB_EHCI_BIG_ENDIAN_DESC
-	select USB_EHCI_BIG_ENDIAN_MMIO
-	select USE_OF
-	help
-	  This enables support for the MIPS Technologies SEAD3 evaluation
-	  board.
-
 config NEC_MARKEINS
 	bool "NEC EMMA2RH Mark-eins board"
 	select SOC_EMMA2RH
@@ -988,6 +1005,7 @@
 source "arch/mips/bcm47xx/Kconfig"
 source "arch/mips/bcm63xx/Kconfig"
 source "arch/mips/bmips/Kconfig"
+source "arch/mips/generic/Kconfig"
 source "arch/mips/jazz/Kconfig"
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
@@ -1098,6 +1116,10 @@
 	select DMA_NONCOHERENT
 	bool
 
+config DMA_PERDEV_COHERENT
+	bool
+	select DMA_MAYBE_COHERENT
+
 config DMA_COHERENT
 	bool
 
@@ -1401,6 +1423,16 @@
 	  The Loongson 1B is a 32-bit SoC, which implements the MIPS32
 	  release 2 instruction set.
 
+config CPU_LOONGSON1C
+	bool "Loongson 1C"
+	depends on SYS_HAS_CPU_LOONGSON1C
+	select CPU_LOONGSON1
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select LEDS_GPIO_REGISTER
+	help
+	  The Loongson 1C is a 32-bit SoC, which implements the MIPS32
+	  release 2 instruction set.
+
 config CPU_MIPS32_R1
 	bool "MIPS32 Release 1"
 	depends on SYS_HAS_CPU_MIPS32_R1
@@ -1850,6 +1882,9 @@
 config SYS_HAS_CPU_LOONGSON1B
 	bool
 
+config SYS_HAS_CPU_LOONGSON1C
+	bool
+
 config SYS_HAS_CPU_MIPS32_R1
 	bool
 
@@ -2906,7 +2941,7 @@
 choice
 	prompt "Kernel command line type" if !CMDLINE_OVERRIDE
 	default MIPS_CMDLINE_FROM_DTB if USE_OF && !ATH79 && !MACH_INGENIC && \
-					 !MIPS_MALTA && !MIPS_SEAD3 && \
+					 !MIPS_MALTA && \
 					 !CAVIUM_OCTEON_SOC
 	default MIPS_CMDLINE_FROM_BOOTLOADER
 
@@ -2960,7 +2995,6 @@
 	bool "Support for PCI controller"
 	depends on HW_HAS_PCI
 	select PCI_DOMAINS
-	select NO_GENERIC_PCI_IOPORT_MAP
 	help
 	  Find out whether you have a PCI motherboard. PCI is the name of a
 	  bus system, i.e. the way the CPU talks to the other stuff inside
@@ -2981,6 +3015,17 @@
 config PCI_DOMAINS
 	bool
 
+config PCI_DOMAINS_GENERIC
+	bool
+
+config PCI_DRIVERS_GENERIC
+	select PCI_DOMAINS_GENERIC if PCI_DOMAINS
+	bool
+
+config PCI_DRIVERS_LEGACY
+	def_bool !PCI_DRIVERS_GENERIC
+	select NO_GENERIC_PCI_IOPORT_MAP
+
 source "drivers/pci/Kconfig"
 
 #
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 598ab29..fbf40d3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -262,7 +262,14 @@
 KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
 
 bootvars-y	= VMLINUX_LOAD_ADDRESS=$(load-y) \
-		  VMLINUX_ENTRY_ADDRESS=$(entry-y)
+		  VMLINUX_ENTRY_ADDRESS=$(entry-y) \
+		  PLATFORM=$(platform-y)
+ifdef CONFIG_32BIT
+bootvars-y	+= ADDR_BITS=32
+endif
+ifdef CONFIG_64BIT
+bootvars-y	+= ADDR_BITS=64
+endif
 
 LDFLAGS			+= -m $(ld-emul)
 
@@ -302,6 +309,11 @@
 boot-y			+= uImage.lzma
 boot-y			+= uImage.lzo
 endif
+boot-y			+= vmlinux.itb
+boot-y			+= vmlinux.gz.itb
+boot-y			+= vmlinux.bz2.itb
+boot-y			+= vmlinux.lzma.itb
+boot-y			+= vmlinux.lzo.itb
 
 # compressed boot image targets (arch/mips/boot/compressed/)
 bootz-y			:= vmlinuz
@@ -425,4 +437,67 @@
 	echo '  dtbs_install         - Install dtbs to $(INSTALL_DTBS_PATH)'
 	echo
 	echo '  These will be default as appropriate for a configured platform.'
+	echo
+	echo '  If you are targeting a system supported by generic kernels you may'
+	echo '  configure the kernel for a given architecture target like so:'
+	echo
+	echo '  {micro32,32,64}{r1,r2,r6}{el,}_defconfig <BOARDS="list of boards">'
+	echo
+	echo '  Otherwise, the following default configurations are available:'
 endef
+
+generic_config_dir = $(srctree)/arch/$(ARCH)/configs/generic
+generic_defconfigs :=
+
+#
+# If the user generates a generic kernel configuration without specifying a
+# list of boards to include the config fragments for, default to including all
+# available board config fragments.
+#
+ifeq ($(BOARDS),)
+BOARDS = $(patsubst board-%.config,%,$(notdir $(wildcard $(generic_config_dir)/board-*.config)))
+endif
+
+#
+# Generic kernel configurations which merge generic_defconfig with the
+# appropriate config fragments from arch/mips/configs/generic/, resulting in
+# the ability to easily configure the kernel for a given architecture,
+# endianness & set of boards without duplicating the needed configuration in
+# hundreds of defconfig files.
+#
+define gen_generic_defconfigs
+$(foreach bits,$(1),$(foreach rev,$(2),$(foreach endian,$(3),
+target := $(bits)$(rev)$(filter el,$(endian))_defconfig
+generic_defconfigs += $$(target)
+$$(target): $(generic_config_dir)/$(bits)$(rev).config
+$$(target): $(generic_config_dir)/$(endian).config
+)))
+endef
+
+$(eval $(call gen_generic_defconfigs,32 64,r1 r2 r6,eb el))
+$(eval $(call gen_generic_defconfigs,micro32,r2,eb el))
+
+.PHONY: $(generic_defconfigs)
+$(generic_defconfigs):
+	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+		-m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \
+		$(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config)
+	$(Q)$(MAKE) olddefconfig
+
+#
+# Prevent generic merge_config rules attempting to merge single fragments
+#
+$(generic_config_dir)/%.config: ;
+
+#
+# Legacy defconfig compatibility - these targets used to be real defconfigs but
+# now that the boards have been converted to use the generic kernel they are
+# wrappers around the generic rules above.
+#
+.PHONY: sead3_defconfig
+sead3_defconfig:
+	$(Q)$(MAKE) 32r2el_defconfig BOARDS=sead-3
+
+.PHONY: sead3micro_defconfig
+sead3micro_defconfig:
+	$(Q)$(MAKE) micro32r2el_defconfig BOARDS=sead-3
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 2902138..7faaa6d 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -48,17 +48,17 @@
 		clear_c0_config(1 << 19); /* Clear Config[OD] */
 
 	hw_coherentio = 0;
-	coherentio = 1;
+	coherentio = IO_COHERENCE_ENABLED;
 	switch (alchemy_get_cputype()) {
 	case ALCHEMY_CPU_AU1000:
 	case ALCHEMY_CPU_AU1500:
 	case ALCHEMY_CPU_AU1100:
-		coherentio = 0;
+		coherentio = IO_COHERENCE_DISABLED;
 		break;
 	case ALCHEMY_CPU_AU1200:
 		/* Au1200 AB USB does not support coherent memory */
 		if (0 == (read_c0_prid() & PRID_REV_MASK))
-			coherentio = 0;
+			coherentio = IO_COHERENCE_DISABLED;
 		break;
 	}
 
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index df761d3..e3c9872 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -1,4 +1,7 @@
 /*
+ * 8250 UART probe driver for the BCM47XX platforms
+ * Author: Aurelien Jarno
+ *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -6,7 +9,6 @@
  * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
  */
 
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/serial_8250.h>
@@ -88,9 +90,4 @@
 	}
 	return -EINVAL;
 }
-
-module_init(uart8250_init);
-
-MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("8250 UART probe driver for the BCM47XX platforms");
+device_initcall(uart8250_init);
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 6375652..b49fc9c 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -326,6 +326,9 @@
 
 void clk_disable(struct clk *clk)
 {
+	if (!clk)
+		return;
+
 	mutex_lock(&clocks_mutex);
 	clk_disable_unlocked(clk);
 	mutex_unlock(&clocks_mutex);
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
index 264328d..2d60f25 100644
--- a/arch/mips/bmips/Kconfig
+++ b/arch/mips/bmips/Kconfig
@@ -21,10 +21,6 @@
 	bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
 	select BUILTIN_DTB
 
-config DT_BCM96358NB4SER
-	bool "BCM96358NB4SER"
-	select BUILTIN_DTB
-
 config DT_BCM96368MVWG
 	bool "BCM96368MVWG"
 	select BUILTIN_DTB
@@ -65,6 +61,22 @@
 	bool "BCM97435SVMB"
 	select BUILTIN_DTB
 
+config DT_COMTREND_VR3032U
+	bool "Comtrend VR-3032u"
+	select BUILTIN_DTB
+
+config DT_NETGEAR_CVG834G
+	bool "NETGEAR CVG834G"
+	select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX4_SERCOMM
+	bool "SFR Neufbox 4 (Sercomm)"
+	select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX6_SERCOMM
+	bool "SFR Neufbox 6 (Sercomm)"
+	select BUILTIN_DTB
+
 endchoice
 
 endif
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 6776042..3b6f687 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -17,6 +17,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
+#include <linux/libfdt.h>
 #include <linux/smp.h>
 #include <asm/addrspace.h>
 #include <asm/bmips.h>
@@ -98,7 +99,7 @@
 static void bcm6358_quirks(void)
 {
 	/*
-	 * BCM6358 needs special handling for its shared TLB, so
+	 * BCM3368/BCM6358 need special handling for their shared TLB, so
 	 * disable SMP for now
 	 */
 	bmips_smp_enabled = 0;
@@ -110,10 +111,12 @@
 }
 
 static const struct bmips_quirk bmips_quirk_list[] = {
+	{ "brcm,bcm3368",		&bcm6358_quirks			},
 	{ "brcm,bcm3384-viper",		&bcm3384_viper_quirks		},
 	{ "brcm,bcm33843-viper",	&bcm3384_viper_quirks		},
 	{ "brcm,bcm6328",		&bcm6328_quirks			},
 	{ "brcm,bcm6358",		&bcm6358_quirks			},
+	{ "brcm,bcm6362",		&bcm6368_quirks			},
 	{ "brcm,bcm6368",		&bcm6368_quirks			},
 	{ "brcm,bcm63168",		&bcm6368_quirks			},
 	{ "brcm,bcm63268",		&bcm6368_quirks			},
@@ -150,6 +153,8 @@
 	mips_hpt_frequency = freq;
 }
 
+extern const char __appended_dtb;
+
 void __init plat_mem_setup(void)
 {
 	void *dtb;
@@ -159,6 +164,11 @@
 	ioport_resource.start = 0;
 	ioport_resource.end = ~0;
 
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+	if (!fdt_check_header(&__appended_dtb))
+		dtb = (void *)&__appended_dtb;
+	else
+#endif
 	/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
 	if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
 		dtb = phys_to_virt(fw_arg2);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index acb1988..2728a9a 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -100,3 +100,69 @@
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
 	@ln -sf $(notdir $<) $@
 	@echo '  Image $@ is ready'
+
+#
+# Flattened Image Tree (.itb) images
+#
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
+
+ifeq ($(ADDR_BITS),32)
+	itb_addr_cells = 1
+endif
+ifeq ($(ADDR_BITS),64)
+	itb_addr_cells = 2
+endif
+
+quiet_cmd_cpp_its_S = ITS     $@
+      cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
+		        -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
+			-DVMLINUX_BINARY="\"$(3)\"" \
+			-DVMLINUX_COMPRESSION="\"$(2)\"" \
+			-DVMLINUX_LOAD_ADDRESS=$(VMLINUX_LOAD_ADDRESS) \
+			-DVMLINUX_ENTRY_ADDRESS=$(VMLINUX_ENTRY_ADDRESS) \
+			-DADDR_BITS=$(ADDR_BITS) \
+			-DADDR_CELLS=$(itb_addr_cells)
+
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+	$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+	$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+	$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+	$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+	$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+quiet_cmd_itb-image = ITB     $@
+      cmd_itb-image = \
+		env PATH="$(objtree)/scripts/dtc:$(PATH)" \
+		$(CONFIG_SHELL) $(MKIMAGE) \
+		-D "-I dts -O dtb -p 500 \
+			--include $(objtree)/arch/mips \
+			--warning no-unit_address_vs_reg" \
+		-f $(2) $@
+
+$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
+	$(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
+	$(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
+	$(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
+	$(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+	$(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index fda9d38..d61bc2a 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -1,6 +1,5 @@
 dtb-$(CONFIG_DT_BCM93384WVG)		+= bcm93384wvg.dtb
 dtb-$(CONFIG_DT_BCM93384WVG_VIPER)	+= bcm93384wvg_viper.dtb
-dtb-$(CONFIG_DT_BCM96358NB4SER)		+= bcm96358nb4ser.dtb
 dtb-$(CONFIG_DT_BCM96368MVWG)		+= bcm96368mvwg.dtb
 dtb-$(CONFIG_DT_BCM9EJTAGPRB)		+= bcm9ejtagprb.dtb
 dtb-$(CONFIG_DT_BCM97125CBMB)		+= bcm97125cbmb.dtb
@@ -11,20 +10,29 @@
 dtb-$(CONFIG_DT_BCM97420C)		+= bcm97420c.dtb
 dtb-$(CONFIG_DT_BCM97425SVMB)		+= bcm97425svmb.dtb
 dtb-$(CONFIG_DT_BCM97435SVMB)		+= bcm97435svmb.dtb
+dtb-$(CONFIG_DT_COMTREND_VR3032U)	+= bcm63268-comtrend-vr-3032u.dtb
+dtb-$(CONFIG_DT_NETGEAR_CVG834G)	+= bcm3368-netgear-cvg834g.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX4_SERCOMM)	+= bcm6358-neufbox4-sercomm.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX6_SERCOMM)	+= bcm6362-neufbox6-sercomm.dtb
 
-dtb-$(CONFIG_DT_NONE)			+= \
-						bcm93384wvg.dtb		\
-						bcm93384wvg_viper.dtb	\
-						bcm96358nb4ser.dtb	\
-						bcm96368mvwg.dtb	\
-						bcm9ejtagprb.dtb	\
-						bcm97125cbmb.dtb	\
-						bcm97346dbsmb.dtb	\
-						bcm97358svmb.dtb	\
-						bcm97360svmb.dtb	\
-						bcm97362svmb.dtb	\
-						bcm97420c.dtb		\
-						bcm97425svmb.dtb
+dtb-$(CONFIG_DT_NONE) += \
+	bcm3368-netgear-cvg834g.dtb \
+	bcm6358-neufbox4-sercomm.dtb \
+	bcm6362-neufbox6-sercomm.dtb \
+	bcm63268-comtrend-vr-3032u.dtb \
+	bcm93384wvg.dtb \
+	bcm93384wvg_viper.dtb \
+	bcm96358nb4ser.dtb \
+	bcm96368mvwg.dtb \
+	bcm9ejtagprb.dtb \
+	bcm97125cbmb.dtb \
+	bcm97346dbsmb.dtb \
+	bcm97358svmb.dtb \
+	bcm97360svmb.dtb \
+	bcm97362svmb.dtb \
+	bcm97420c.dtb \
+	bcm97425svmb.dtb \
+	bcm97435svmb.dtb
 
 obj-y				+= $(patsubst %.dtb, %.dtb.o, $(dtb-y))
 
diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
new file mode 100644
index 0000000..2f2e80f
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/include/ "bcm3368.dtsi"
+
+/ {
+	compatible = "netgear,cvg834g", "brcm,bcm3368";
+	model = "NETGEAR CVG834G";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x02000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+		stdout-path = &uart0;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
new file mode 100644
index 0000000..bee855c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
@@ -0,0 +1,101 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "brcm,bcm3368";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mips-hpt-frequency = <150000000>;
+
+		cpu@0 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	clocks {
+		periph_clk: periph-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <50000000>;
+		};
+	};
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	cpu_intc: interrupt-controller {
+		#address-cells = <0>;
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	ubus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		compatible = "simple-bus";
+		ranges;
+
+		periph_cntl: syscon@fff8c000 {
+			compatible = "syscon";
+			reg = <0xfff8c000 0xc>;
+			native-endian;
+		};
+
+		reboot: syscon-reboot@fff8c008 {
+			compatible = "syscon-reboot";
+			regmap = <&periph_cntl>;
+			offset = <0x8>;
+			mask = <0x1>;
+		};
+
+		periph_intc: interrupt-controller@fff8c00c {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0xfff8c00c 0x8>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpu_intc>;
+			interrupts = <2>;
+		};
+
+		uart0: serial@fff8c100 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0xfff8c100 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <2>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		uart1: serial@fff8c120 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0xfff8c120 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <3>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
new file mode 100644
index 0000000..430d35c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
@@ -0,0 +1,108 @@
+/dts-v1/;
+
+/include/ "bcm63268.dtsi"
+
+/ {
+	compatible = "comtrend,vr-3032u", "brcm,bcm63268";
+	model = "Comtrend VR-3032u";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x04000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+		stdout-path = &uart0;
+	};
+};
+
+&leds0 {
+	status = "ok";
+	brcm,serial-leds;
+	brcm,serial-dat-low;
+	brcm,serial-shift-inv;
+
+	led@0 {
+		reg = <0>;
+		brcm,hardware-controlled;
+		brcm,link-signal-sources = <0>;
+		/* GPHY0 Speed 0 */
+	};
+	led@1 {
+		reg = <1>;
+		brcm,hardware-controlled;
+		brcm,link-signal-sources = <1>;
+		/* GPHY0 Speed 1 */
+	};
+	led@2 {
+		reg = <2>;
+		active-low;
+		label = "vr-3032u:red:inet";
+	};
+	led@3 {
+		reg = <3>;
+		active-low;
+		label = "vr-3032u:green:dsl";
+	};
+	led@4 {
+		reg = <4>;
+		active-low;
+		label = "vr-3032u:green:usb";
+	};
+	led@7 {
+		reg = <7>;
+		active-low;
+		label = "vr-3032u:green:wps";
+	};
+	led@8 {
+		reg = <8>;
+		active-low;
+		label = "vr-3032u:green:inet";
+	};
+	led@9 {
+		reg = <9>;
+		brcm,hardware-controlled;
+		/* EPHY0 Activity */
+	};
+	led@10 {
+		reg = <10>;
+		brcm,hardware-controlled;
+		/* EPHY1 Activity */
+	};
+	led@11 {
+		reg = <11>;
+		brcm,hardware-controlled;
+		/* EPHY2 Activity */
+	};
+	led@12 {
+		reg = <12>;
+		brcm,hardware-controlled;
+		/* GPHY0 Activity */
+	};
+	led@13 {
+		reg = <13>;
+		brcm,hardware-controlled;
+		/* EPHY0 Speed */
+	};
+	led@14 {
+		reg = <14>;
+		brcm,hardware-controlled;
+		/* EPHY1 Speed */
+	};
+	led@15 {
+		reg = <15>;
+		brcm,hardware-controlled;
+		/* EPHY2 Speed */
+	};
+	led@20 {
+		reg = <20>;
+		active-low;
+		label = "vr-3032u:green:power";
+		default-state = "on";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
new file mode 100644
index 0000000..7e6bf2c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -0,0 +1,134 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "brcm,bcm63268";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mips-hpt-frequency = <200000000>;
+
+		cpu@0 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	clocks {
+		periph_clk: periph-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <50000000>;
+		};
+	};
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	cpu_intc: interrupt-controller {
+		#address-cells = <0>;
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	ubus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		compatible = "simple-bus";
+		ranges;
+
+		periph_cntl: syscon@10000000 {
+			compatible = "syscon";
+			reg = <0x10000000 0x14>;
+			native-endian;
+		};
+
+		reboot: syscon-reboot@10000008 {
+			compatible = "syscon-reboot";
+			regmap = <&periph_cntl>;
+			offset = <0x8>;
+			mask = <0x1>;
+		};
+
+		periph_intc: interrupt-controller@10000020 {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0x10000020 0x20>,
+			      <0x10000040 0x20>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpu_intc>;
+			interrupts = <2>, <3>;
+		};
+
+		uart0: serial@10000180 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x10000180 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <5>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		uart1: serial@100001a0 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x100001a0 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <34>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		leds0: led-controller@10001900 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "brcm,bcm6328-leds";
+			reg = <0x10001900 0x24>;
+
+			status = "disabled";
+		};
+
+		ehci: usb@10002500 {
+			compatible = "brcm,bcm63268-ehci", "generic-ehci";
+			reg = <0x10002500 0x100>;
+			big-endian;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <10>;
+
+			status = "disabled";
+		};
+
+		ohci: usb@10002600 {
+			compatible = "brcm,bcm63268-ohci", "generic-ohci";
+			reg = <0x10002600 0x100>;
+			big-endian;
+			no-big-frame-no;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <9>;
+
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
similarity index 93%
rename from arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
rename to arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
index f412117..702eae2 100644
--- a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
+++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
@@ -12,6 +12,7 @@
 	};
 
 	chosen {
+		bootargs = "console=ttyS0,115200";
 		stdout-path = &uart0;
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
new file mode 100644
index 0000000..480f2a5
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/include/ "bcm6362.dtsi"
+
+/ {
+	compatible = "sfr,nb6-ser", "brcm,bcm6362";
+	model = "SFR NeufBox 6 (Sercomm)";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x08000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+		stdout-path = &uart0;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
new file mode 100644
index 0000000..c507da5
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
@@ -0,0 +1,134 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "brcm,bcm6362";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mips-hpt-frequency = <200000000>;
+
+		cpu@0 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	clocks {
+		periph_clk: periph-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <50000000>;
+		};
+	};
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	cpu_intc: interrupt-controller {
+		#address-cells = <0>;
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	ubus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		compatible = "simple-bus";
+		ranges;
+
+		periph_cntl: syscon@10000000 {
+			compatible = "syscon";
+			reg = <0x10000000 0x14>;
+			native-endian;
+		};
+
+		reboot: syscon-reboot@10000008 {
+			compatible = "syscon-reboot";
+			regmap = <&periph_cntl>;
+			offset = <0x8>;
+			mask = <0x1>;
+		};
+
+		periph_intc: interrupt-controller@10000020 {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0x10000020 0x10>,
+			      <0x10000030 0x10>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpu_intc>;
+			interrupts = <2>, <3>;
+		};
+
+		uart0: serial@10000100 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x10000100 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <3>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		uart1: serial@10000120 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x10000120 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <4>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		leds0: led-controller@10001900 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "brcm,bcm6328-leds";
+			reg = <0x10001900 0x24>;
+
+			status = "disabled";
+		};
+
+		ehci: usb@10002500 {
+			compatible = "brcm,bcm6362-ehci", "generic-ehci";
+			reg = <0x10002500 0x100>;
+			big-endian;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <10>;
+
+			status = "disabled";
+		};
+
+		ohci: usb@10002600 {
+			compatible = "brcm,bcm6362-ohci", "generic-ohci";
+			reg = <0x10002600 0x100>;
+			big-endian;
+			no-big-frame-no;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <9>;
+
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 550e1d9..bbd00f6 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -26,7 +26,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -40,6 +40,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -49,7 +55,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@441400 {
+		periph_intc: interrupt-controller@441400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x441400 0x30>, <0x441600 0x30>;
 
@@ -60,7 +66,7 @@
 			interrupts = <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@401800 {
+		sun_l2_intc: interrupt-controller@401800 {
 			compatible = "brcm,l2-intc";
 			reg = <0x401800 0x30>;
 			interrupt-controller;
@@ -81,7 +87,7 @@
 						     "avd_0", "jtag_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406780 {
+		upg_irq0_intc: interrupt-controller@406780 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
@@ -183,6 +189,26 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406580 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406580 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		upg_gio: gpio@406700 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406700 0x80>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 18>;
+		};
+
 		ehci0: usb@488300 {
 			compatible = "brcm,bcm7125-ehci", "generic-ehci";
 			reg = <0x488300 0x100>;
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index ec95906..4bbcc95 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -26,7 +26,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -40,6 +40,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -49,7 +55,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@411400 {
+		periph_intc: interrupt-controller@411400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x411400 0x30>, <0x411600 0x30>;
 
@@ -60,7 +66,7 @@
 			interrupts = <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -81,7 +87,7 @@
 						     "jtag_0", "svd_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406780 {
+		upg_irq0_intc: interrupt-controller@406780 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
@@ -96,7 +102,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+		upg_aon_irq0_intc: interrupt-controller@408b80 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x408b80 0x8>;
 
@@ -210,6 +216,59 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406580 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406580 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		pwmb: pwm@406800 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406800 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408440 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408440 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <53>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406700 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406700 0x60>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 16>;
+		};
+
+		upg_gio_aon: gpio@408c00 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x408c00 0x60>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <27 32 2>;
+		};
+
 		enet0: ethernet@430000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -313,6 +372,26 @@
 			status = "disabled";
 		};
 
+		hif_l2_intc: interrupt-controller@411000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x411000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <30>;
+		};
+
+		nand: nand@412800 {
+			compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand";
+			reg = <0x412800 0x400>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>;
+			status = "disabled";
+		};
+
 		sata: sata@181000 {
 			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
 			reg-names = "ahci", "top-ctrl";
@@ -352,5 +431,13 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		sdhci0: sdhci@413500 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x413500 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <85>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index ca57fb5..3e42535 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -20,7 +20,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -34,6 +34,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -43,7 +49,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@411400 {
+		periph_intc: interrupt-controller@411400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x411400 0x30>;
 
@@ -54,7 +60,7 @@
 			interrupts = <2>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -75,7 +81,7 @@
 						     "avd_0", "jtag_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406600 {
+		upg_irq0_intc: interrupt-controller@406600 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406600 0x8>;
 
@@ -90,7 +96,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+		upg_aon_irq0_intc: interrupt-controller@408b80 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x408b80 0x8>;
 
@@ -194,6 +200,59 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406400 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406400 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		pwmb: pwm@406700 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406700 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408240 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408240 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <50>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406500 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406500 0xa0>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 29 4>;
+		};
+
+		upg_gio_aon: gpio@408c00 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x408c00 0x60>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <21 32 2>;
+		};
+
 		enet0: ethernet@430000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -239,5 +298,25 @@
 			interrupts = <66>;
 			status = "disabled";
 		};
+
+		hif_l2_intc: interrupt-controller@411000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x411000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <30>;
+		};
+
+		nand: nand@412800 {
+			compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand";
+			reg = <0x412800 0x400>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index 1c0c3d4..112a557 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -20,7 +20,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -34,6 +34,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -43,7 +49,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@411400 {
+		periph_intc: interrupt-controller@411400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x411400 0x30>;
 
@@ -54,7 +60,7 @@
 			interrupts = <2>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -75,7 +81,7 @@
 						     "avd_0", "jtag_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406600 {
+		upg_irq0_intc: interrupt-controller@406600 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406600 0x8>;
 
@@ -90,7 +96,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+		upg_aon_irq0_intc: interrupt-controller@408b80 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x408b80 0x8>;
 
@@ -194,6 +200,51 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406400 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406400 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408440 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408440 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <50>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406500 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406500 0xa0>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 29 4>;
+		};
+
+		upg_gio_aon: gpio@408c00 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x408c00 0x60>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <21 32 2>;
+		};
+
 		enet0: ethernet@430000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -240,6 +291,26 @@
 			status = "disabled";
 		};
 
+		hif_l2_intc: interrupt-controller@411000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x411000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <30>;
+		};
+
+		nand: nand@412800 {
+			compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand";
+			reg = <0x412800 0x400>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>;
+			status = "disabled";
+		};
+
 		sata: sata@181000 {
 			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
 			reg-names = "ahci", "top-ctrl";
@@ -279,5 +350,13 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		sdhci0: sdhci@410000 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x410000 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <82>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index 6b4713a..34abfb0 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -26,7 +26,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -40,6 +40,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -49,7 +55,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@411400 {
+		periph_intc: interrupt-controller@411400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x411400 0x30>, <0x411600 0x30>;
 
@@ -60,7 +66,7 @@
 			interrupts = <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -81,7 +87,7 @@
 						     "avd_0", "jtag_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406600 {
+		upg_irq0_intc: interrupt-controller@406600 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406600 0x8>;
 
@@ -96,7 +102,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+		upg_aon_irq0_intc: interrupt-controller@408b80 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x408b80 0x8>;
 
@@ -190,6 +196,51 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406400 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406400 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408440 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408440 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <50>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406500 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406500 0xa0>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 29 4>;
+		};
+
+		upg_gio_aon: gpio@408c00 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x408c00 0x60>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <21 32 2>;
+		};
+
 		enet0: ethernet@430000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -236,6 +287,26 @@
 			status = "disabled";
 		};
 
+		hif_l2_intc: interrupt-controller@411000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x411000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <30>;
+		};
+
+		nand: nand@412800 {
+			compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand";
+			reg = <0x412800 0x400>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>;
+			status = "disabled";
+		};
+
 		sata: sata@181000 {
 			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
 			reg-names = "ahci", "top-ctrl";
@@ -275,5 +346,13 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		sdhci0: sdhci@410000 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x410000 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <82>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index 0586bf6..b143723 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -26,7 +26,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -40,6 +40,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -49,7 +55,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@441400 {
+		periph_intc: interrupt-controller@441400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x441400 0x30>, <0x441600 0x30>;
 
@@ -60,7 +66,7 @@
 			interrupts = <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@401800 {
+		sun_l2_intc: interrupt-controller@401800 {
 			compatible = "brcm,l2-intc";
 			reg = <0x401800 0x30>;
 			interrupt-controller;
@@ -82,7 +88,7 @@
 						     "jtag_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406780 {
+		upg_irq0_intc: interrupt-controller@406780 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
@@ -191,6 +197,34 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406580 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406580 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		pwmb: pwm@406880 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406880 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		upg_gio: gpio@406700 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406700 0x80>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 27>;
+		};
+
 		enet0: ethernet@468000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index c1c15ed..2488d2f 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -26,7 +26,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -40,6 +40,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -49,7 +55,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@41a400 {
+		periph_intc: interrupt-controller@41a400 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x41a400 0x30>, <0x41a600 0x30>;
 
@@ -60,7 +66,7 @@
 			interrupts = <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -83,7 +89,7 @@
 						     "vice_0";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406780 {
+		upg_irq0_intc: interrupt-controller@406780 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
@@ -98,7 +104,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+		upg_aon_irq0_intc: interrupt-controller@409480 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x409480 0x8>;
 
@@ -209,6 +215,59 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406580 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406580 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		pwmb: pwm@406800 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406800 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408440 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408440 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <49>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406700 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406700 0x80>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 21>;
+		};
+
+		upg_gio_aon: gpio@4094c0 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x4094c0 0x40>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <18 4>;
+		};
+
 		enet0: ethernet@b80000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -312,6 +371,26 @@
 			status = "disabled";
 		};
 
+		hif_l2_intc: interrupt-controller@41a000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x41a000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <24>;
+		};
+
+		nand: nand@41b800 {
+			compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand";
+			reg = <0x41b800 0x400>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>;
+			status = "disabled";
+		};
+
 		sata: sata@181000 {
 			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
 			reg-names = "ahci", "top-ctrl";
@@ -351,5 +430,25 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		sdhci0: sdhci@419000 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x419000 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <43>;
+			sd-uhs-sdr50;
+			mmc-hs200-1_8v;
+			status = "disabled";
+		};
+
+		sdhci1: sdhci@419200 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x419200 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <44>;
+			sd-uhs-sdr50;
+			mmc-hs200-1_8v;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index a874d3a..19fa259 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -38,7 +38,7 @@
 		uart0 = &uart0;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -52,6 +52,12 @@
 			#clock-cells = <0>;
 			clock-frequency = <81000000>;
 		};
+
+		upg_clk: upg_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <27000000>;
+		};
 	};
 
 	rdb {
@@ -61,7 +67,7 @@
 		compatible = "simple-bus";
 		ranges = <0 0x10000000 0x01000000>;
 
-		periph_intc: periph_intc@41b500 {
+		periph_intc: interrupt-controller@41b500 {
 			compatible = "brcm,bcm7038-l1-intc";
 			reg = <0x41b500 0x40>, <0x41b600 0x40>,
 				<0x41b700 0x40>, <0x41b800 0x40>;
@@ -73,7 +79,7 @@
 			interrupts = <2>, <3>, <2>, <3>;
 		};
 
-		sun_l2_intc: sun_l2_intc@403000 {
+		sun_l2_intc: interrupt-controller@403000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x403000 0x30>;
 			interrupt-controller;
@@ -98,7 +104,7 @@
 						     "scpu";
 		};
 
-		upg_irq0_intc: upg_irq0_intc@406780 {
+		upg_irq0_intc: interrupt-controller@406780 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
@@ -113,7 +119,7 @@
 			interrupt-names = "upg_main", "upg_bsc";
 		};
 
-		upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+		upg_aon_irq0_intc: interrupt-controller@409480 {
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x409480 0x8>;
 
@@ -224,6 +230,59 @@
 		      status = "disabled";
 		};
 
+		pwma: pwm@406580 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406580 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		pwmb: pwm@406800 {
+			compatible = "brcm,bcm7038-pwm";
+			reg = <0x406800 0x28>;
+			#pwm-cells = <2>;
+			clocks = <&upg_clk>;
+			status = "disabled";
+		};
+
+		aon_pm_l2_intc: interrupt-controller@408440 {
+			compatible = "brcm,l2-intc";
+			reg = <0x408440 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <54>;
+			brcm,irq-can-wake;
+		};
+
+		upg_gio: gpio@406700 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x406700 0x80>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 21>;
+		};
+
+		upg_gio_aon: gpio@4094c0 {
+			compatible = "brcm,brcmstb-gpio";
+			reg = <0x4094c0 0x40>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&upg_aon_irq0_intc>;
+			interrupts = <6>;
+			interrupts-extended = <&upg_aon_irq0_intc 6>,
+					      <&aon_pm_l2_intc 5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <18 4>;
+		};
+
 		enet0: ethernet@b80000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -327,6 +386,26 @@
 			status = "disabled";
 		};
 
+		hif_l2_intc: interrupt-controller@41b000 {
+			compatible = "brcm,l2-intc";
+			reg = <0x41b000 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <24>;
+		};
+
+		nand: nand@41c800 {
+			compatible = "brcm,brcmnand-v6.2", "brcm,brcmnand";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg-names = "nand", "flash-dma";
+			reg = <0x41c800 0x600>, <0x41d000 0x100>;
+			interrupt-parent = <&hif_l2_intc>;
+			interrupts = <24>, <4>;
+			status = "disabled";
+		};
+
 		sata: sata@181000 {
 			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
 			reg-names = "ahci", "top-ctrl";
@@ -366,5 +445,25 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		sdhci0: sdhci@41a000 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x41a000 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <47>;
+			sd-uhs-sdr50;
+			mmc-hs200-1_8v;
+			status = "disabled";
+		};
+
+		sdhci1: sdhci@41a200 {
+			compatible = "brcm,bcm7425-sdhci";
+			reg = <0x41a200 0x100>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <48>;
+			sd-uhs-sdr50;
+			mmc-hs200-1_8v;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
index f2449d1..5c24eac 100644
--- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -45,6 +45,10 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
 /* FIXME: USB is wonky; disable it for now */
 &ehci0 {
 	status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
index d3d2881..e67eaf3 100644
--- a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "bcm7346.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
 
 / {
 	compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
@@ -49,6 +50,14 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
+&pwmb {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -85,6 +94,10 @@
 	status = "okay";
 };
 
+&nand {
+	status = "okay";
+};
+
 &sata {
 	status = "okay";
 };
@@ -92,3 +105,7 @@
 &sata_phy {
 	status = "okay";
 };
+
+&sdhci0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
index 02ce6b4..ee4607f 100644
--- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "bcm7358.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
 
 / {
 	compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
@@ -45,6 +46,14 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
+&pwmb {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -56,3 +65,7 @@
 &ohci0 {
 	status = "okay";
 };
+
+&nand {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index 73124be..bed821b 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -45,6 +45,10 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -64,3 +68,7 @@
 &sata_phy {
 	status = "okay";
 };
+
+&sdhci0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
index 3cfcaeb..68fd823 100644
--- a/arch/mips/boot/dts/brcm/bcm97362svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "bcm7362.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
 
 / {
 	compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
@@ -41,6 +42,10 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -53,6 +58,10 @@
 	status = "okay";
 };
 
+&nand {
+	status = "okay";
+};
+
 &sata {
 	status = "okay";
 };
@@ -60,3 +69,7 @@
 &sata_phy {
 	status = "okay";
 };
+
+&sdhci0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
index 600d57a..e66271a 100644
--- a/arch/mips/boot/dts/brcm/bcm97420c.dts
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -51,6 +51,14 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
+&pwmb {
+	status = "okay";
+};
+
 /* FIXME: MAC driver comes up but cannot attach to PHY */
 &enet0 {
 	status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 119c714..f95ba1b 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "bcm7425.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
 
 / {
 	compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
@@ -51,6 +52,14 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
+&pwmb {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -86,3 +95,15 @@
 &ohci3 {
 	status = "okay";
 };
+
+&nand {
+	status = "okay";
+};
+
+&sdhci0 {
+	status = "okay";
+};
+
+&sdhci1 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index 43e3ba2..fb37b71 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "bcm7435.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
 
 / {
 	compatible = "brcm,bcm97435svmb", "brcm,bcm7435";
@@ -51,6 +52,14 @@
 	status = "okay";
 };
 
+&pwma {
+	status = "okay";
+};
+
+&pwmb {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -87,6 +96,10 @@
 	status = "okay";
 };
 
+&nand {
+	status = "okay";
+};
+
 &sata {
 	status = "okay";
 };
@@ -94,3 +107,11 @@
 &sata_phy {
 	status = "okay";
 };
+
+&sdhci0 {
+	status = "okay";
+};
+
+&sdhci1 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
new file mode 100644
index 0000000..3c24f97
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
@@ -0,0 +1,25 @@
+&nand {
+	nandcs@1 {
+		compatible = "brcm,nandcs";
+		reg = <1>;
+		nand-on-flash-bbt;
+
+		nand-ecc-strength = <24>;
+		nand-ecc-step-size = <1024>;
+		brcm,nand-oob-sector-size = <27>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			flash1.rootfs@0 {
+				reg = <0x0 0x10000000>;
+			};
+
+			flash1.kernel@10000000 {
+				reg = <0x10000000 0x400000>;
+			};
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
new file mode 100644
index 0000000..cb53181
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
@@ -0,0 +1,25 @@
+&nand {
+	nandcs@1 {
+		compatible = "brcm,nandcs";
+		reg = <1>;
+		nand-on-flash-bbt;
+
+		nand-ecc-strength = <4>;
+		nand-ecc-step-size = <512>;
+		brcm,nand-oob-sector-size = <16>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			flash1.rootfs@0 {
+				reg = <0x0 0x10000000>;
+			};
+
+			flash1.kernel@10000000 {
+				reg = <0x10000000 0x400000>;
+			};
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
index b134798..cfa2915 100644
--- a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -8,55 +8,16 @@
  * published by the Free Software Foundation.
  */
 
-/include/ "octeon_3xxx.dtsi"
+/include/ "dlink_dsr-500n-1000n.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
 	model = "dlink,dsr-1000n";
 
 	soc@0 {
-		smi0: mdio@1180000001800 {
-			phy8: ethernet-phy@8 {
-				reg = <8>;
-				compatible = "ethernet-phy-ieee802.3-c22";
-			};
-		};
-
-		pip: pip@11800a0000000 {
-			interface@0 {
-				ethernet@0 {
-					fixed-link {
-						speed = <1000>;
-						full-duplex;
-					};
-				};
-				ethernet@1 {
-					fixed-link {
-						speed = <1000>;
-						full-duplex;
-					};
-				};
-				ethernet@2 {
-					phy-handle = <&phy8>;
-				};
-			};
-		};
-
-		twsi0: i2c@1180000001000 {
-			rtc@68 {
-				compatible = "dallas,ds1337";
-				reg = <0x68>;
-			};
-		};
-
 		uart0: serial@1180000000800 {
 			clock-frequency = <500000000>;
 		};
-
-		usbn: usbn@1180068000000 {
-			refclk-frequency = <12000000>;
-			refclk-type = "crystal";
-		};
 	};
 
 	leds {
@@ -87,8 +48,4 @@
 			gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
 		};
 	};
-
-	aliases {
-		pip = &pip;
-	};
 };
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
new file mode 100644
index 0000000..246b598
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
@@ -0,0 +1,58 @@
+/*
+ * Device tree source for D-Link DSR-500N/1000N (common parts).
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+	soc@0 {
+		smi0: mdio@1180000001800 {
+			phy8: ethernet-phy@8 {
+				reg = <8>;
+				compatible = "ethernet-phy-ieee802.3-c22";
+			};
+		};
+
+		pip: pip@11800a0000000 {
+			interface@0 {
+				ethernet@0 {
+					fixed-link {
+						speed = <1000>;
+						full-duplex;
+					};
+				};
+				ethernet@1 {
+					fixed-link {
+						speed = <1000>;
+						full-duplex;
+					};
+				};
+				ethernet@2 {
+					phy-handle = <&phy8>;
+				};
+			};
+		};
+
+		twsi0: i2c@1180000001000 {
+			rtc@68 {
+				compatible = "dallas,ds1337";
+				reg = <0x68>;
+			};
+		};
+
+		usbn: usbn@1180068000000 {
+			refclk-frequency = <12000000>;
+			refclk-type = "crystal";
+		};
+	};
+
+	aliases {
+		pip = &pip;
+	};
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
new file mode 100644
index 0000000..78886e1
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
@@ -0,0 +1,40 @@
+/*
+ * Device tree source for D-Link DSR-500N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "dlink_dsr-500n-1000n.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "dlink,dsr-500n";
+	compatible = "dlink,dsr-500n", "cavium,octeon-3860";
+
+	soc@0 {
+		uart0: serial@1180000000800 {
+			clock-frequency = <300000000>;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		usb {
+			gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+		};
+
+		wps {
+			gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+		};
+
+		wireless {
+			label = "2.4g";
+			gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
index 144d776..fcabd69 100644
--- a/arch/mips/boot/dts/mti/Makefile
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -1,5 +1,5 @@
 dtb-$(CONFIG_MIPS_MALTA)	+= malta.dtb
-dtb-$(CONFIG_MIPS_SEAD3)	+= sead3.dtb
+dtb-$(CONFIG_LEGACY_BOARD_SEAD3)	+= sead3.dtb
 
 obj-y				+= $(patsubst %.dtb, %.dtb.o, $(dtb-y))
 
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index b18c466..f604a27 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -1,5 +1,8 @@
 /dts-v1/;
 
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
 /memreserve/ 0x00000000 0x00001000;	/* YAMON exception vectors */
 /memreserve/ 0x00001000 0x000ef000;	/* YAMON */
 /memreserve/ 0x000f0000 0x00010000;	/* PIIX4 ISA memory */
@@ -8,4 +11,100 @@
 	#address-cells = <1>;
 	#size-cells = <1>;
 	compatible = "mti,malta";
+
+	cpu_intc: interrupt-controller {
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	gic: interrupt-controller@1bdc0000 {
+		compatible = "mti,gic";
+		reg = <0x1bdc0000 0x20000>;
+
+		interrupt-controller;
+		#interrupt-cells = <3>;
+
+		/*
+		 * Declare the interrupt-parent even though the mti,gic
+		 * binding doesn't require it, such that the kernel can
+		 * figure out that cpu_intc is the root interrupt
+		 * controller & should be probed first.
+		 */
+		interrupt-parent = <&cpu_intc>;
+
+		timer {
+			compatible = "mti,gic-timer";
+			interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+		};
+	};
+
+	i8259: interrupt-controller@20 {
+		compatible = "intel,i8259";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	flash@1e000000 {
+		compatible = "intel,dt28f160", "cfi-flash";
+		reg = <0x1e000000 0x400000>;
+		bank-width = <4>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			yamon@0 {
+				label = "YAMON";
+				reg = <0x0 0x100000>;
+				read-only;
+			};
+
+			user-fs@100000 {
+				label = "User FS";
+				reg = <0x100000 0x2e0000>;
+			};
+
+			board-config@3e0000 {
+				label = "Board Config";
+				reg = <0x3e0000 0x20000>;
+				read-only;
+			};
+		};
+	};
+
+	fpga_regs: system-controller@1f000000 {
+		compatible = "mti,malta-fpga", "syscon", "simple-mfd";
+		reg = <0x1f000000 0x1000>;
+
+		reboot {
+			compatible = "syscon-reboot";
+			regmap = <&fpga_regs>;
+			offset = <0x500>;
+			mask = <0x4d>;
+		};
+	};
+
+	isa {
+		compatible = "isa";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <1 0 0 0x1000>;
+
+		rtc@70 {
+			compatible = "motorola,mc146818";
+			reg = <1 0x70 0x8>;
+
+			interrupt-parent = <&i8259>;
+			interrupts = <8>;
+		};
+	};
 };
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index e4b317d..b112879 100644
--- a/arch/mips/boot/dts/mti/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -4,10 +4,23 @@
 /memreserve/ 0x00001000 0x000ef000;	// ROM data
 /memreserve/ 0x000f0000 0x004cc000;	// reserved
 
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
 / {
 	#address-cells = <1>;
 	#size-cells = <1>;
 	compatible = "mti,sead-3";
+	model = "MIPS SEAD-3";
+	interrupt-parent = <&gic>;
+
+	chosen {
+		stdout-path = "uart1:115200";
+	};
+
+	aliases {
+		uart0 = &uart0;
+		uart1 = &uart1;
+	};
 
 	cpus {
 		cpu@0 {
@@ -19,4 +32,229 @@
 		device_type = "memory";
 		reg = <0x0 0x08000000>;
 	};
+
+	cpu_intc: interrupt-controller {
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	gic: interrupt-controller@1b1c0000 {
+		compatible = "mti,gic";
+		reg = <0x1b1c0000 0x20000>;
+
+		interrupt-controller;
+		#interrupt-cells = <3>;
+
+		/*
+		 * Declare the interrupt-parent even though the mti,gic
+		 * binding doesn't require it, such that the kernel can
+		 * figure out that cpu_intc is the root interrupt
+		 * controller & should be probed first.
+		 */
+		interrupt-parent = <&cpu_intc>;
+
+		timer {
+			compatible = "mti,gic-timer";
+			interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+		};
+	};
+
+	ehci@1b200000 {
+		compatible = "generic-ehci";
+		reg = <0x1b200000 0x1000>;
+
+		interrupts = <0>; /* GIC 0 or CPU 6 */
+
+		has-transaction-translator;
+	};
+
+	flash@1c000000 {
+		compatible = "intel,28f128j3", "cfi-flash";
+		reg = <0x1c000000 0x2000000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		bank-width = <4>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			user-fs@0 {
+				label = "User FS";
+				reg = <0x0 0x1fc0000>;
+			};
+
+			board-config@3e0000 {
+				label = "Board Config";
+				reg = <0x1fc0000 0x40000>;
+			};
+		};
+	};
+
+	fpga_regs: system-controller@1f000000 {
+		compatible = "mti,sead3-fpga", "syscon", "simple-mfd";
+		reg = <0x1f000000 0x200>;
+
+		reboot {
+			compatible = "syscon-reboot";
+			regmap = <&fpga_regs>;
+			offset = <0x50>;
+			mask = <0x4d>;
+		};
+
+		poweroff {
+			compatible = "restart-poweroff";
+		};
+	};
+
+	system-controller@1f000200 {
+		compatible = "mti,sead3-cpld", "syscon", "simple-mfd";
+		reg = <0x1f000200 0x300>;
+
+		led@10.0 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x1>;
+			label = "pled0";
+		};
+		led@10.1 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x2>;
+			label = "pled1";
+		};
+		led@10.2 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x4>;
+			label = "pled2";
+		};
+		led@10.3 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x8>;
+			label = "pled3";
+		};
+		led@10.4 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x10>;
+			label = "pled4";
+		};
+		led@10.5 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x20>;
+			label = "pled5";
+		};
+		led@10.6 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x40>;
+			label = "pled6";
+		};
+		led@10.7 {
+			compatible = "register-bit-led";
+			offset = <0x10>;
+			mask = <0x80>;
+			label = "pled7";
+		};
+
+		led@18.0 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x1>;
+			label = "fled0";
+		};
+		led@18.1 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x2>;
+			label = "fled1";
+		};
+		led@18.2 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x4>;
+			label = "fled2";
+		};
+		led@18.3 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x8>;
+			label = "fled3";
+		};
+		led@18.4 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x10>;
+			label = "fled4";
+		};
+		led@18.5 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x20>;
+			label = "fled5";
+		};
+		led@18.6 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x40>;
+			label = "fled6";
+		};
+		led@18.7 {
+			compatible = "register-bit-led";
+			offset = <0x18>;
+			mask = <0x80>;
+			label = "fled7";
+		};
+
+		lcd@200 {
+			compatible = "mti,sead3-lcd";
+			offset = <0x200>;
+		};
+	};
+
+	/* UART connected to FTDI & miniUSB socket */
+	uart0: uart@1f000900 {
+		compatible = "ns16550a";
+		reg = <0x1f000900 0x20>;
+		reg-io-width = <4>;
+		reg-shift = <2>;
+
+		clock-frequency = <14745600>;
+
+		interrupts = <3>; /* GIC 3 or CPU 4 */
+
+		no-loopback-test;
+	};
+
+	/* UART connected to RS232 socket */
+	uart1: uart@1f000800 {
+		compatible = "ns16550a";
+		reg = <0x1f000800 0x20>;
+		reg-io-width = <4>;
+		reg-shift = <2>;
+
+		clock-frequency = <14745600>;
+
+		interrupts = <2>; /* GIC 2 or CPU 4 */
+
+		no-loopback-test;
+	};
+
+	eth@1f010000 {
+		compatible = "smsc,lan9115";
+		reg = <0x1f010000 0x10000>;
+		reg-io-width = <4>;
+
+		interrupts = <0>; /* GIC 0 or CPU 6 */
+
+		phy-mode = "mii";
+		smsc,irq-push-pull;
+		smsc,save-mac-address;
+	};
 };
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index ff49fc0..ab8362e 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -36,8 +36,6 @@
 
 #include <asm/octeon/cvmx-config.h>
 
-#include <asm/octeon/cvmx-mdio.h>
-
 #include <asm/octeon/cvmx-helper.h>
 #include <asm/octeon/cvmx-helper-util.h>
 #include <asm/octeon/cvmx-helper-board.h>
@@ -46,17 +44,6 @@
 #include <asm/octeon/cvmx-asxx-defs.h>
 
 /**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
-    NULL;
-
-/**
  * Return the MII PHY address associated with the given IPD
  * port. A result of -1 means there isn't a MII capable PHY
  * connected to this port. On chips supporting multiple MII
@@ -222,12 +209,6 @@
 cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
 {
 	cvmx_helper_link_info_t result;
-	int phy_addr;
-	int is_broadcom_phy = 0;
-
-	/* Give the user a chance to override the processing of this function */
-	if (cvmx_override_board_link_get)
-		return cvmx_override_board_link_get(ipd_port);
 
 	/* Unless we fix it later, all links are defaulted to down */
 	result.u64 = 0;
@@ -263,8 +244,7 @@
 			result.s.full_duplex = 1;
 			result.s.speed = 1000;
 			return result;
-		} else		/* The other port uses a broadcom PHY */
-			is_broadcom_phy = 1;
+		}
 		break;
 	case CVMX_BOARD_TYPE_BBGW_REF:
 		/* Port 1 on these boards is always Gigabit */
@@ -282,108 +262,7 @@
 		break;
 	}
 
-	phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
-	if (phy_addr != -1) {
-		if (is_broadcom_phy) {
-			/*
-			 * Below we are going to read SMI/MDIO
-			 * register 0x19 which works on Broadcom
-			 * parts
-			 */
-			int phy_status =
-			    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-					   0x19);
-			switch ((phy_status >> 8) & 0x7) {
-			case 0:
-				result.u64 = 0;
-				break;
-			case 1:
-				result.s.link_up = 1;
-				result.s.full_duplex = 0;
-				result.s.speed = 10;
-				break;
-			case 2:
-				result.s.link_up = 1;
-				result.s.full_duplex = 1;
-				result.s.speed = 10;
-				break;
-			case 3:
-				result.s.link_up = 1;
-				result.s.full_duplex = 0;
-				result.s.speed = 100;
-				break;
-			case 4:
-				result.s.link_up = 1;
-				result.s.full_duplex = 1;
-				result.s.speed = 100;
-				break;
-			case 5:
-				result.s.link_up = 1;
-				result.s.full_duplex = 1;
-				result.s.speed = 100;
-				break;
-			case 6:
-				result.s.link_up = 1;
-				result.s.full_duplex = 0;
-				result.s.speed = 1000;
-				break;
-			case 7:
-				result.s.link_up = 1;
-				result.s.full_duplex = 1;
-				result.s.speed = 1000;
-				break;
-			}
-		} else {
-			/*
-			 * This code assumes we are using a Marvell
-			 * Gigabit PHY. All the speed information can
-			 * be read from register 17 in one
-			 * go. Somebody using a different PHY will
-			 * need to handle it above in the board
-			 * specific area.
-			 */
-			int phy_status =
-			    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
-
-			/*
-			 * If the resolve bit 11 isn't set, see if
-			 * autoneg is turned off (bit 12, reg 0). The
-			 * resolve bit doesn't get set properly when
-			 * autoneg is off, so force it.
-			 */
-			if ((phy_status & (1 << 11)) == 0) {
-				int auto_status =
-				    cvmx_mdio_read(phy_addr >> 8,
-						   phy_addr & 0xff, 0);
-				if ((auto_status & (1 << 12)) == 0)
-					phy_status |= 1 << 11;
-			}
-
-			/*
-			 * Only return a link if the PHY has finished
-			 * auto negotiation and set the resolved bit
-			 * (bit 11)
-			 */
-			if (phy_status & (1 << 11)) {
-				result.s.link_up = 1;
-				result.s.full_duplex = ((phy_status >> 13) & 1);
-				switch ((phy_status >> 14) & 3) {
-				case 0: /* 10 Mbps */
-					result.s.speed = 10;
-					break;
-				case 1: /* 100 Mbps */
-					result.s.speed = 100;
-					break;
-				case 2: /* 1 Gbps */
-					result.s.speed = 1000;
-					break;
-				case 3: /* Illegal */
-					result.u64 = 0;
-					break;
-				}
-			}
-		}
-	} else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+	if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
 		   || OCTEON_IS_MODEL(OCTEON_CN58XX)
 		   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
 		/*
@@ -433,176 +312,6 @@
 }
 
 /**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and auto-negotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr:  The address of the PHY to program
- * @enable_autoneg:
- *		    Non zero if you want to enable auto-negotiation.
- * @link_info: Link speed to program. If the speed is zero and auto-negotiation
- *		    is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
-				   cvmx_helper_board_set_phy_link_flags_types_t
-				   link_flags,
-				   cvmx_helper_link_info_t link_info)
-{
-
-	/* Set the flow control settings based on link_flags */
-	if ((link_flags & set_phy_link_flags_flow_control_mask) !=
-	    set_phy_link_flags_flow_control_dont_touch) {
-		cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
-		reg_autoneg_adver.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
-		reg_autoneg_adver.s.asymmetric_pause =
-		    (link_flags & set_phy_link_flags_flow_control_mask) ==
-		    set_phy_link_flags_flow_control_enable;
-		reg_autoneg_adver.s.pause =
-		    (link_flags & set_phy_link_flags_flow_control_mask) ==
-		    set_phy_link_flags_flow_control_enable;
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
-				reg_autoneg_adver.u16);
-	}
-
-	/* If speed isn't set and autoneg is on advertise all supported modes */
-	if ((link_flags & set_phy_link_flags_autoneg)
-	    && (link_info.s.speed == 0)) {
-		cvmx_mdio_phy_reg_control_t reg_control;
-		cvmx_mdio_phy_reg_status_t reg_status;
-		cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
-		cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
-		cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
-		reg_status.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_STATUS);
-		reg_autoneg_adver.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
-		reg_autoneg_adver.s.advert_100base_t4 =
-		    reg_status.s.capable_100base_t4;
-		reg_autoneg_adver.s.advert_10base_tx_full =
-		    reg_status.s.capable_10_full;
-		reg_autoneg_adver.s.advert_10base_tx_half =
-		    reg_status.s.capable_10_half;
-		reg_autoneg_adver.s.advert_100base_tx_full =
-		    reg_status.s.capable_100base_x_full;
-		reg_autoneg_adver.s.advert_100base_tx_half =
-		    reg_status.s.capable_100base_x_half;
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
-				reg_autoneg_adver.u16);
-		if (reg_status.s.capable_extended_status) {
-			reg_extended_status.u16 =
-			    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-					   CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
-			reg_control_1000.u16 =
-			    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-					   CVMX_MDIO_PHY_REG_CONTROL_1000);
-			reg_control_1000.s.advert_1000base_t_full =
-			    reg_extended_status.s.capable_1000base_t_full;
-			reg_control_1000.s.advert_1000base_t_half =
-			    reg_extended_status.s.capable_1000base_t_half;
-			cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-					CVMX_MDIO_PHY_REG_CONTROL_1000,
-					reg_control_1000.u16);
-		}
-		reg_control.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_CONTROL);
-		reg_control.s.autoneg_enable = 1;
-		reg_control.s.restart_autoneg = 1;
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
-	} else if ((link_flags & set_phy_link_flags_autoneg)) {
-		cvmx_mdio_phy_reg_control_t reg_control;
-		cvmx_mdio_phy_reg_status_t reg_status;
-		cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
-		cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
-		reg_status.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_STATUS);
-		reg_autoneg_adver.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
-		reg_autoneg_adver.s.advert_100base_t4 = 0;
-		reg_autoneg_adver.s.advert_10base_tx_full = 0;
-		reg_autoneg_adver.s.advert_10base_tx_half = 0;
-		reg_autoneg_adver.s.advert_100base_tx_full = 0;
-		reg_autoneg_adver.s.advert_100base_tx_half = 0;
-		if (reg_status.s.capable_extended_status) {
-			reg_control_1000.u16 =
-			    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-					   CVMX_MDIO_PHY_REG_CONTROL_1000);
-			reg_control_1000.s.advert_1000base_t_full = 0;
-			reg_control_1000.s.advert_1000base_t_half = 0;
-		}
-		switch (link_info.s.speed) {
-		case 10:
-			reg_autoneg_adver.s.advert_10base_tx_full =
-			    link_info.s.full_duplex;
-			reg_autoneg_adver.s.advert_10base_tx_half =
-			    !link_info.s.full_duplex;
-			break;
-		case 100:
-			reg_autoneg_adver.s.advert_100base_tx_full =
-			    link_info.s.full_duplex;
-			reg_autoneg_adver.s.advert_100base_tx_half =
-			    !link_info.s.full_duplex;
-			break;
-		case 1000:
-			reg_control_1000.s.advert_1000base_t_full =
-			    link_info.s.full_duplex;
-			reg_control_1000.s.advert_1000base_t_half =
-			    !link_info.s.full_duplex;
-			break;
-		}
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
-				reg_autoneg_adver.u16);
-		if (reg_status.s.capable_extended_status)
-			cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-					CVMX_MDIO_PHY_REG_CONTROL_1000,
-					reg_control_1000.u16);
-		reg_control.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_CONTROL);
-		reg_control.s.autoneg_enable = 1;
-		reg_control.s.restart_autoneg = 1;
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
-	} else {
-		cvmx_mdio_phy_reg_control_t reg_control;
-		reg_control.u16 =
-		    cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
-				   CVMX_MDIO_PHY_REG_CONTROL);
-		reg_control.s.autoneg_enable = 0;
-		reg_control.s.restart_autoneg = 1;
-		reg_control.s.duplex = link_info.s.full_duplex;
-		if (link_info.s.speed == 1000) {
-			reg_control.s.speed_msb = 1;
-			reg_control.s.speed_lsb = 0;
-		} else if (link_info.s.speed == 100) {
-			reg_control.s.speed_msb = 0;
-			reg_control.s.speed_lsb = 1;
-		} else if (link_info.s.speed == 10) {
-			reg_control.s.speed_msb = 0;
-			reg_control.s.speed_lsb = 0;
-		}
-		cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
-				CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
-	}
-	return 0;
-}
-
-/**
  * This function is called by cvmx_helper_interface_probe() after it
  * determines the number of ports Octeon can support on a specific
  * interface. This function is the per board location to override
@@ -676,48 +385,6 @@
 				       0xc);
 		}
 	} else if (cvmx_sysinfo_get()->board_type ==
-		   CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
-		/*
-		 * Broadcom PHYs require differnet ASX
-		 * clocks. Unfortunately many boards don't define a
-		 * new board Id and simply mangle the
-		 * CN3010_EVB_HS5
-		 */
-		if (interface == 0) {
-			/*
-			 * Some boards use a hacked up bootloader that
-			 * identifies them as CN3010_EVB_HS5
-			 * evaluation boards.  This leads to all kinds
-			 * of configuration problems.  Detect one
-			 * case, and print warning, while trying to do
-			 * the right thing.
-			 */
-			int phy_addr = cvmx_helper_board_get_mii_address(0);
-			if (phy_addr != -1) {
-				int phy_identifier =
-				    cvmx_mdio_read(phy_addr >> 8,
-						   phy_addr & 0xff, 0x2);
-				/* Is it a Broadcom PHY? */
-				if (phy_identifier == 0x0143) {
-					cvmx_dprintf("\n");
-					cvmx_dprintf("ERROR:\n");
-					cvmx_dprintf
-					    ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
-					cvmx_dprintf
-					    ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
-					cvmx_dprintf
-					    ("ERROR: All boards require a unique board type to identify them.\n");
-					cvmx_dprintf("ERROR:\n");
-					cvmx_dprintf("\n");
-					cvmx_wait(1000000000);
-					cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
-						       (0, interface), 5);
-					cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
-						       (0, interface), 5);
-				}
-			}
-		}
-	} else if (cvmx_sysinfo_get()->board_type ==
 			CVMX_BOARD_TYPE_UBNT_E100) {
 		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0);
 		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index f59c88e..671ab1d 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -33,8 +33,6 @@
 
 #include <asm/octeon/cvmx-config.h>
 
-
-#include <asm/octeon/cvmx-mdio.h>
 #include <asm/octeon/cvmx-pko.h>
 #include <asm/octeon/cvmx-helper.h>
 #include <asm/octeon/cvmx-helper-board.h>
@@ -243,8 +241,7 @@
 	/* enable the ports now */
 	for (port = 0; port < num_ports; port++) {
 		union cvmx_gmxx_prtx_cfg gmx_cfg;
-		cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
-					  (interface, port));
+
 		gmx_cfg.u64 =
 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
 		gmx_cfg.s.en = 1;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 6f9609e..5437534 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -34,7 +34,6 @@
 
 #include <asm/octeon/cvmx-config.h>
 
-#include <asm/octeon/cvmx-mdio.h>
 #include <asm/octeon/cvmx-helper.h>
 #include <asm/octeon/cvmx-helper-board.h>
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index a56ee59..d347fe1 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -234,8 +234,6 @@
 	cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
 	cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
 
-	cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
-
 	/* (8) Enable packet reception */
 	xauiMiscCtl.s.gmxeno = 0;
 	cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index ff26d02..6456af6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -841,7 +841,6 @@
 	int retry_cnt;
 	int retry_loop_cnt;
 	int i;
-	cvmx_helper_link_info_t link_info;
 
 	/* Save values for restore at end */
 	uint64_t prtx_cfg =
@@ -1002,15 +1001,6 @@
 		       (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
 		       frame_max);
 	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
-	/* Set link to down so autonegotiation will set it up again */
-	link_info.u64 = 0;
-	cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
-
-	/*
-	 * Bring the link back up as autonegotiation is not done in
-	 * user applications.
-	 */
-	cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
 
 	CVMX_SYNC;
 	if (num_segs)
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index cb16fcc..9a2db1c 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -65,7 +65,8 @@
 extern void pci_console_init(const char *arg);
 #endif
 
-static unsigned long long MAX_MEMORY = 512ull << 20;
+static unsigned long long max_memory = ULLONG_MAX;
+static unsigned long long reserve_low_mem;
 
 DEFINE_SEMAPHORE(octeon_bootbus_sem);
 EXPORT_SYMBOL(octeon_bootbus_sem);
@@ -75,7 +76,6 @@
 struct cvmx_bootinfo *octeon_bootinfo;
 EXPORT_SYMBOL(octeon_bootinfo);
 
-static unsigned long long RESERVE_LOW_MEM = 0ull;
 #ifdef CONFIG_KEXEC
 #ifdef CONFIG_SMP
 /*
@@ -125,18 +125,18 @@
 	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
 	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
 
-	addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
+	addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
 	bootmem_desc->head_addr = 0;
 
 	if (mem_size <= OCTEON_DDR0_SIZE) {
 		__cvmx_bootmem_phy_free(addr,
-				mem_size - RESERVE_LOW_MEM -
+				mem_size - reserve_low_mem -
 				low_reserved_bytes, 0);
 		return;
 	}
 
 	__cvmx_bootmem_phy_free(addr,
-			OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
+			OCTEON_DDR0_SIZE - reserve_low_mem -
 			low_reserved_bytes, 0);
 
 	mem_size -= OCTEON_DDR0_SIZE;
@@ -267,6 +267,17 @@
 	default_machine_crash_shutdown(regs);
 }
 
+#ifdef CONFIG_SMP
+void octeon_crash_smp_send_stop(void)
+{
+	int cpu;
+
+	/* disable watchdogs */
+	for_each_online_cpu(cpu)
+		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+}
+#endif
+
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_CAVIUM_RESERVE32
@@ -846,15 +857,15 @@
 
 	/* Default to 64MB in the simulator to speed things up */
 	if (octeon_is_simulation())
-		MAX_MEMORY = 64ull << 20;
+		max_memory = 64ull << 20;
 
 	arg = strstr(arcs_cmdline, "mem=");
 	if (arg) {
-		MAX_MEMORY = memparse(arg + 4, &p);
-		if (MAX_MEMORY == 0)
-			MAX_MEMORY = 32ull << 30;
+		max_memory = memparse(arg + 4, &p);
+		if (max_memory == 0)
+			max_memory = 32ull << 30;
 		if (*p == '@')
-			RESERVE_LOW_MEM = memparse(p + 1, &p);
+			reserve_low_mem = memparse(p + 1, &p);
 	}
 
 	arcs_cmdline[0] = 0;
@@ -864,11 +875,11 @@
 			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 		if ((strncmp(arg, "MEM=", 4) == 0) ||
 		    (strncmp(arg, "mem=", 4) == 0)) {
-			MAX_MEMORY = memparse(arg + 4, &p);
-			if (MAX_MEMORY == 0)
-				MAX_MEMORY = 32ull << 30;
+			max_memory = memparse(arg + 4, &p);
+			if (max_memory == 0)
+				max_memory = 32ull << 30;
 			if (*p == '@')
-				RESERVE_LOW_MEM = memparse(p + 1, &p);
+				reserve_low_mem = memparse(p + 1, &p);
 #ifdef CONFIG_KEXEC
 		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
 			crashk_size = memparse(arg+12, &p);
@@ -911,6 +922,9 @@
 	_machine_kexec_shutdown = octeon_shutdown;
 	_machine_crash_shutdown = octeon_crash_shutdown;
 	_machine_kexec_prepare = octeon_kexec_prepare;
+#ifdef CONFIG_SMP
+	_crash_smp_send_stop = octeon_crash_smp_send_stop;
+#endif
 #endif
 
 	octeon_user_io_init();
@@ -957,13 +971,13 @@
 	 * to consistently work.
 	 */
 	mem_alloc_size = 4 << 20;
-	if (mem_alloc_size > MAX_MEMORY)
-		mem_alloc_size = MAX_MEMORY;
+	if (mem_alloc_size > max_memory)
+		mem_alloc_size = max_memory;
 
 /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
 #ifdef CONFIG_CRASH_DUMP
-	add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
-	total += MAX_MEMORY;
+	add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
+	total += max_memory;
 #else
 #ifdef CONFIG_KEXEC
 	if (crashk_size > 0) {
@@ -978,7 +992,7 @@
 	 */
 	cvmx_bootmem_lock();
 	while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
-		&& (total < MAX_MEMORY)) {
+		&& (total < max_memory)) {
 		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
 						__pa_symbol(&_end), -1,
 						0x100000,
diff --git a/arch/mips/configs/generic/32r1.config b/arch/mips/configs/generic/32r1.config
new file mode 100644
index 0000000..a11cd87
--- /dev/null
+++ b/arch/mips/configs/generic/32r1.config
@@ -0,0 +1,2 @@
+CONFIG_CPU_MIPS32_R1=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r2.config b/arch/mips/configs/generic/32r2.config
new file mode 100644
index 0000000..9570672
--- /dev/null
+++ b/arch/mips/configs/generic/32r2.config
@@ -0,0 +1,3 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r6.config b/arch/mips/configs/generic/32r6.config
new file mode 100644
index 0000000..ca606e7
--- /dev/null
+++ b/arch/mips/configs/generic/32r6.config
@@ -0,0 +1,2 @@
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/64r1.config b/arch/mips/configs/generic/64r1.config
new file mode 100644
index 0000000..7c1ea7e
--- /dev/null
+++ b/arch/mips/configs/generic/64r1.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS64_R1=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r2.config b/arch/mips/configs/generic/64r2.config
new file mode 100644
index 0000000..b4d31ae
--- /dev/null
+++ b/arch/mips/configs/generic/64r2.config
@@ -0,0 +1,5 @@
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r6.config b/arch/mips/configs/generic/64r6.config
new file mode 100644
index 0000000..7cac033
--- /dev/null
+++ b/arch/mips/configs/generic/64r6.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS64_R6=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/board-sead-3.config b/arch/mips/configs/generic/board-sead-3.config
new file mode 100644
index 0000000..3b5e1ac
--- /dev/null
+++ b/arch/mips/configs/generic/board-sead-3.config
@@ -0,0 +1,32 @@
+CONFIG_LEGACY_BOARD_SEAD3=y
+
+CONFIG_AUXDISPLAY=y
+CONFIG_IMG_ASCII_LCD=y
+
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_SYSCON=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+CONFIG_SMSC_PHY=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
diff --git a/arch/mips/configs/generic/eb.config b/arch/mips/configs/generic/eb.config
new file mode 100644
index 0000000..c5cdc99
--- /dev/null
+++ b/arch/mips/configs/generic/eb.config
@@ -0,0 +1 @@
+CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/mips/configs/generic/el.config b/arch/mips/configs/generic/el.config
new file mode 100644
index 0000000..ee43fdb
--- /dev/null
+++ b/arch/mips/configs/generic/el.config
@@ -0,0 +1 @@
+CONFIG_CPU_LITTLE_ENDIAN=y
diff --git a/arch/mips/configs/generic/micro32r2.config b/arch/mips/configs/generic/micro32r2.config
new file mode 100644
index 0000000..b701fe7
--- /dev/null
+++ b/arch/mips/configs/generic/micro32r2.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
new file mode 100644
index 0000000..c95d94c
--- /dev/null
+++ b/arch/mips/configs/generic_defconfig
@@ -0,0 +1,96 @@
+CONFIG_MIPS_GENERIC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_CPS=y
+CONFIG_CPU_HAS_MSA=y
+CONFIG_HIGHMEM=y
+CONFIG_NR_CPUS=2
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_SCSI=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
new file mode 100644
index 0000000..2304d41
--- /dev/null
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -0,0 +1,126 @@
+CONFIG_MACH_LOONGSON32=y
+CONFIG_LOONGSON1_LS1C=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_LOONGSON1=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 5afb484..58d43f3 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -230,7 +230,7 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_FD=m
@@ -318,6 +318,8 @@
 # CONFIG_SERIO_I8042 is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 98f1387..c8f7e28 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -235,7 +235,7 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_FD=m
@@ -331,6 +331,8 @@
 # CONFIG_SERIO_I8042 is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 3b5d591..d2f54e5 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -234,7 +234,7 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_FD=m
@@ -331,6 +331,8 @@
 # CONFIG_SERIO_I8042 is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 65f140e..cbf37dd 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -132,6 +132,8 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 799c433..35f6ba2 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -132,6 +132,8 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
 CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index ac0eb4d..900f145 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -134,6 +134,8 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 3184600..8e2738b 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -137,6 +137,8 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
 CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index a79107d..6dc4e30 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -131,6 +131,8 @@
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_VIDEO_OUTPUT_CONTROL=m
 CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 7322157..3d0d9cb 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -231,7 +231,7 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_FD=m
@@ -326,6 +326,8 @@
 # CONFIG_SERIO_I8042 is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index 8b74291..7d32fbb 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -29,7 +29,6 @@
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
@@ -264,7 +263,6 @@
 CONFIG_IMG_MDC_DMA=y
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
-# CONFIG_ANDROID_TIMED_OUTPUT is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_MEMORY=y
 CONFIG_IIO=y
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
deleted file mode 100644
index dae9354..0000000
--- a/arch/mips/configs/sead3_defconfig
+++ /dev/null
@@ -1,121 +0,0 @@
-CONFIG_MIPS_SEAD3=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_HZ_100=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_CONSOLE_TRANSLATIONS is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_LEGACY_PTY_COUNT=32
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_CHARDEV=y
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_SPI=y
-CONFIG_SENSORS_ADT7475=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_M41T80=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_ARC4=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
deleted file mode 100644
index cd91a77..0000000
--- a/arch/mips/configs/sead3micro_defconfig
+++ /dev/null
@@ -1,122 +0,0 @@
-CONFIG_MIPS_SEAD3=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_CPU_MICROMIPS=y
-CONFIG_HZ_100=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_CONSOLE_TRANSLATIONS is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_LEGACY_PTY_COUNT=32
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_CHARDEV=y
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_SPI=y
-CONFIG_SENSORS_ADT7475=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_M41T80=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_ARC4=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
new file mode 100644
index 0000000..a606b3f
--- /dev/null
+++ b/arch/mips/generic/Kconfig
@@ -0,0 +1,19 @@
+if MIPS_GENERIC
+
+config LEGACY_BOARDS
+	bool
+	help
+	  Select this from your board if the board must use a legacy, non-UHI,
+	  boot protocol. This will cause the kernel to scan through the list of
+	  supported machines calling their detect functions in turn if the
+	  kernel is booted without being provided with an FDT via the UHI
+	  boot protocol.
+
+config LEGACY_BOARD_SEAD3
+	bool "Support MIPS SEAD-3 boards"
+	select LEGACY_BOARDS
+	help
+	  Enable this to include support for booting on MIPS SEAD-3 FPGA-based
+	  development boards, which boot using a legacy boot protocol.
+
+endif
diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile
new file mode 100644
index 0000000..7c66494
--- /dev/null
+++ b/arch/mips/generic/Makefile
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@imgtec.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation;  either version 2 of the  License, or (at your
+# option) any later version.
+#
+
+obj-y += init.o
+obj-y += irq.o
+obj-y += proc.o
+
+obj-$(CONFIG_LEGACY_BOARD_SEAD3)	+= board-sead3.o
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
new file mode 100644
index 0000000..9a30d69
--- /dev/null
+++ b/arch/mips/generic/Platform
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@imgtec.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation;  either version 2 of the  License, or (at your
+# option) any later version.
+#
+
+platform-$(CONFIG_MIPS_GENERIC)	+= generic/
+cflags-$(CONFIG_MIPS_GENERIC)	+= -I$(srctree)/arch/mips/include/asm/mach-generic
+load-$(CONFIG_MIPS_GENERIC)	+= 0xffffffff80100000
+all-$(CONFIG_MIPS_GENERIC)	:= vmlinux.gz.itb
diff --git a/arch/mips/generic/board-sead3.c b/arch/mips/generic/board-sead3.c
new file mode 100644
index 0000000..f4ae058
--- /dev/null
+++ b/arch/mips/generic/board-sead3.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "sead3: " fmt
+
+#include <linux/errno.h>
+#include <linux/libfdt.h>
+#include <linux/printk.h>
+
+#include <asm/fw/fw.h>
+#include <asm/io.h>
+#include <asm/machine.h>
+
+#define SEAD_CONFIG			CKSEG1ADDR(0x1b100110)
+#define SEAD_CONFIG_GIC_PRESENT		BIT(1)
+
+#define MIPS_REVISION			CKSEG1ADDR(0x1fc00010)
+#define MIPS_REVISION_MACHINE		(0xf << 4)
+#define MIPS_REVISION_MACHINE_SEAD3	(0x4 << 4)
+
+static __init bool sead3_detect(void)
+{
+	uint32_t rev;
+
+	rev = __raw_readl((void *)MIPS_REVISION);
+	return (rev & MIPS_REVISION_MACHINE) == MIPS_REVISION_MACHINE_SEAD3;
+}
+
+static __init int append_cmdline(void *fdt)
+{
+	int err, chosen_off;
+
+	/* find or add chosen node */
+	chosen_off = fdt_path_offset(fdt, "/chosen");
+	if (chosen_off == -FDT_ERR_NOTFOUND)
+		chosen_off = fdt_path_offset(fdt, "/chosen@0");
+	if (chosen_off == -FDT_ERR_NOTFOUND)
+		chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+	if (chosen_off < 0) {
+		pr_err("Unable to find or add DT chosen node: %d\n",
+		       chosen_off);
+		return chosen_off;
+	}
+
+	err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
+	if (err) {
+		pr_err("Unable to set bootargs property: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static __init int append_memory(void *fdt)
+{
+	unsigned long phys_memsize, memsize;
+	__be32 mem_array[2];
+	int err, mem_off;
+	char *var;
+
+	/* find memory size from the bootloader environment */
+	var = fw_getenv("memsize");
+	if (var) {
+		err = kstrtoul(var, 0, &phys_memsize);
+		if (err) {
+			pr_err("Failed to read memsize env variable '%s'\n",
+			       var);
+			return -EINVAL;
+		}
+	} else {
+		pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
+		phys_memsize = 32 << 20;
+	}
+
+	/* default to using all available RAM */
+	memsize = phys_memsize;
+
+	/* allow the user to override the usable memory */
+	var = strstr(arcs_cmdline, "memsize=");
+	if (var)
+		memsize = memparse(var + strlen("memsize="), NULL);
+
+	/* if the user says there's more RAM than we thought, believe them */
+	phys_memsize = max_t(unsigned long, phys_memsize, memsize);
+
+	/* find or add a memory node */
+	mem_off = fdt_path_offset(fdt, "/memory");
+	if (mem_off == -FDT_ERR_NOTFOUND)
+		mem_off = fdt_add_subnode(fdt, 0, "memory");
+	if (mem_off < 0) {
+		pr_err("Unable to find or add memory DT node: %d\n", mem_off);
+		return mem_off;
+	}
+
+	err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
+	if (err) {
+		pr_err("Unable to set memory node device_type: %d\n", err);
+		return err;
+	}
+
+	mem_array[0] = 0;
+	mem_array[1] = cpu_to_be32(phys_memsize);
+	err = fdt_setprop(fdt, mem_off, "reg", mem_array, sizeof(mem_array));
+	if (err) {
+		pr_err("Unable to set memory regs property: %d\n", err);
+		return err;
+	}
+
+	mem_array[0] = 0;
+	mem_array[1] = cpu_to_be32(memsize);
+	err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
+			  mem_array, sizeof(mem_array));
+	if (err) {
+		pr_err("Unable to set linux,usable-memory property: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static __init int remove_gic(void *fdt)
+{
+	const unsigned int cpu_ehci_int = 2;
+	const unsigned int cpu_uart_int = 4;
+	const unsigned int cpu_eth_int = 6;
+	int gic_off, cpu_off, uart_off, eth_off, ehci_off, err;
+	uint32_t cfg, cpu_phandle;
+
+	/* leave the GIC node intact if a GIC is present */
+	cfg = __raw_readl((uint32_t *)SEAD_CONFIG);
+	if (cfg & SEAD_CONFIG_GIC_PRESENT)
+		return 0;
+
+	gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+	if (gic_off < 0) {
+		pr_err("unable to find DT GIC node: %d\n", gic_off);
+		return gic_off;
+	}
+
+	err = fdt_nop_node(fdt, gic_off);
+	if (err) {
+		pr_err("unable to nop GIC node\n");
+		return err;
+	}
+
+	cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+			"mti,cpu-interrupt-controller");
+	if (cpu_off < 0) {
+		pr_err("unable to find CPU intc node: %d\n", cpu_off);
+		return cpu_off;
+	}
+
+	cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+	if (!cpu_phandle) {
+		pr_err("unable to get CPU intc phandle\n");
+		return -EINVAL;
+	}
+
+	err = fdt_setprop_u32(fdt, 0, "interrupt-parent", cpu_phandle);
+	if (err) {
+		pr_err("unable to set root interrupt-parent: %d\n", err);
+		return err;
+	}
+
+	uart_off = fdt_node_offset_by_compatible(fdt, -1, "ns16550a");
+	while (uart_off >= 0) {
+		err = fdt_setprop_u32(fdt, uart_off, "interrupts",
+				      cpu_uart_int);
+		if (err) {
+			pr_err("unable to set UART interrupts property: %d\n",
+			       err);
+			return err;
+		}
+
+		uart_off = fdt_node_offset_by_compatible(fdt, uart_off,
+							 "ns16550a");
+	}
+	if (uart_off != -FDT_ERR_NOTFOUND) {
+		pr_err("error searching for UART DT node: %d\n", uart_off);
+		return uart_off;
+	}
+
+	eth_off = fdt_node_offset_by_compatible(fdt, -1, "smsc,lan9115");
+	if (eth_off < 0) {
+		pr_err("unable to find ethernet DT node: %d\n", eth_off);
+		return eth_off;
+	}
+
+	err = fdt_setprop_u32(fdt, eth_off, "interrupts", cpu_eth_int);
+	if (err) {
+		pr_err("unable to set ethernet interrupts property: %d\n", err);
+		return err;
+	}
+
+	ehci_off = fdt_node_offset_by_compatible(fdt, -1, "generic-ehci");
+	if (ehci_off < 0) {
+		pr_err("unable to find EHCI DT node: %d\n", ehci_off);
+		return ehci_off;
+	}
+
+	err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
+	if (err) {
+		pr_err("unable to set EHCI interrupts property: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static __init int serial_config(void *fdt)
+{
+	const char *yamontty, *mode_var;
+	char mode_var_name[9], path[18], parity;
+	unsigned int uart, baud, stop_bits;
+	bool hw_flow;
+	int chosen_off, err;
+
+	yamontty = fw_getenv("yamontty");
+	if (!yamontty || !strcmp(yamontty, "tty0")) {
+		uart = 0;
+	} else if (!strcmp(yamontty, "tty1")) {
+		uart = 1;
+	} else {
+		pr_warn("yamontty environment variable '%s' invalid\n",
+			yamontty);
+		uart = 0;
+	}
+
+	baud = stop_bits = 0;
+	parity = 0;
+	hw_flow = false;
+
+	snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
+	mode_var = fw_getenv(mode_var_name);
+	if (mode_var) {
+		while (mode_var[0] >= '0' && mode_var[0] <= '9') {
+			baud *= 10;
+			baud += mode_var[0] - '0';
+			mode_var++;
+		}
+		if (mode_var[0] == ',')
+			mode_var++;
+		if (mode_var[0])
+			parity = mode_var[0];
+		if (mode_var[0] == ',')
+			mode_var++;
+		if (mode_var[0])
+			stop_bits = mode_var[0] - '0';
+		if (mode_var[0] == ',')
+			mode_var++;
+		if (!strcmp(mode_var, "hw"))
+			hw_flow = true;
+	}
+
+	if (!baud)
+		baud = 38400;
+
+	if (parity != 'e' && parity != 'n' && parity != 'o')
+		parity = 'n';
+
+	if (stop_bits != 7 && stop_bits != 8)
+		stop_bits = 8;
+
+	WARN_ON(snprintf(path, sizeof(path), "uart%u:%u%c%u%s",
+			 uart, baud, parity, stop_bits,
+			 hw_flow ? "r" : "") >= sizeof(path));
+
+	/* find or add chosen node */
+	chosen_off = fdt_path_offset(fdt, "/chosen");
+	if (chosen_off == -FDT_ERR_NOTFOUND)
+		chosen_off = fdt_path_offset(fdt, "/chosen@0");
+	if (chosen_off == -FDT_ERR_NOTFOUND)
+		chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+	if (chosen_off < 0) {
+		pr_err("Unable to find or add DT chosen node: %d\n",
+		       chosen_off);
+		return chosen_off;
+	}
+
+	err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
+	if (err) {
+		pr_err("Unable to set stdout-path property: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static __init const void *sead3_fixup_fdt(const void *fdt,
+					  const void *match_data)
+{
+	static unsigned char fdt_buf[16 << 10] __initdata;
+	int err;
+
+	if (fdt_check_header(fdt))
+		panic("Corrupt DT");
+
+	/* if this isn't SEAD3, something went wrong */
+	BUG_ON(fdt_node_check_compatible(fdt, 0, "mti,sead-3"));
+
+	fw_init_cmdline();
+
+	err = fdt_open_into(fdt, fdt_buf, sizeof(fdt_buf));
+	if (err)
+		panic("Unable to open FDT: %d", err);
+
+	err = append_cmdline(fdt_buf);
+	if (err)
+		panic("Unable to patch FDT: %d", err);
+
+	err = append_memory(fdt_buf);
+	if (err)
+		panic("Unable to patch FDT: %d", err);
+
+	err = remove_gic(fdt_buf);
+	if (err)
+		panic("Unable to patch FDT: %d", err);
+
+	err = serial_config(fdt_buf);
+	if (err)
+		panic("Unable to patch FDT: %d", err);
+
+	err = fdt_pack(fdt_buf);
+	if (err)
+		panic("Unable to pack FDT: %d\n", err);
+
+	return fdt_buf;
+}
+
+static __init unsigned int sead3_measure_hpt_freq(void)
+{
+	void __iomem *status_reg = (void __iomem *)0xbf000410;
+	unsigned int freq, orig, tick = 0;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	orig = readl(status_reg) & 0x2;		      /* get original sample */
+	/* wait for transition */
+	while ((readl(status_reg) & 0x2) == orig)
+		;
+	orig = orig ^ 0x2;			      /* flip the bit */
+
+	write_c0_count(0);
+
+	/* wait 1 second (the sampling clock transitions every 10ms) */
+	while (tick < 100) {
+		/* wait for transition */
+		while ((readl(status_reg) & 0x2) == orig)
+			;
+		orig = orig ^ 0x2;			      /* flip the bit */
+		tick++;
+	}
+
+	freq = read_c0_count();
+
+	local_irq_restore(flags);
+
+	return freq;
+}
+
+extern char __dtb_sead3_begin[];
+
+MIPS_MACHINE(sead3) = {
+	.fdt = __dtb_sead3_begin,
+	.detect = sead3_detect,
+	.fixup_fdt = sead3_fixup_fdt,
+	.measure_hpt_freq = sead3_measure_hpt_freq,
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
new file mode 100644
index 0000000..0ea73e8
--- /dev/null
+++ b/arch/mips/generic/init.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/fw/fw.h>
+#include <asm/irq_cpu.h>
+#include <asm/machine.h>
+#include <asm/mips-cpc.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+
+static __initdata const void *fdt;
+static __initdata const struct mips_machine *mach;
+static __initdata const void *mach_match_data;
+
+void __init prom_init(void)
+{
+	const struct mips_machine *check_mach;
+	const struct of_device_id *match;
+
+	if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
+		/*
+		 * We booted using the UHI boot protocol, so we have been
+		 * provided with the appropriate device tree for the board.
+		 * Make use of it & search for any machine struct based upon
+		 * the root compatible string.
+		 */
+		fdt = (void *)fw_arg1;
+
+		for_each_mips_machine(check_mach) {
+			match = mips_machine_is_compatible(check_mach, fdt);
+			if (match) {
+				mach = check_mach;
+				mach_match_data = match->data;
+				break;
+			}
+		}
+	} else if (IS_ENABLED(CONFIG_LEGACY_BOARDS)) {
+		/*
+		 * We weren't booted using the UHI boot protocol, but do
+		 * support some number of boards with legacy boot protocols.
+		 * Attempt to find the right one.
+		 */
+		for_each_mips_machine(check_mach) {
+			if (!check_mach->detect)
+				continue;
+
+			if (!check_mach->detect())
+				continue;
+
+			mach = check_mach;
+		}
+
+		/*
+		 * If we don't recognise the machine then we can't continue, so
+		 * die here.
+		 */
+		BUG_ON(!mach);
+
+		/* Retrieve the machine's FDT */
+		fdt = mach->fdt;
+	}
+
+	BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
+	return (void *)fdt;
+}
+
+void __init plat_mem_setup(void)
+{
+	if (mach && mach->fixup_fdt)
+		fdt = mach->fixup_fdt(fdt, mach_match_data);
+
+	strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+	__dt_setup_arch((void *)fdt);
+}
+
+void __init device_tree_init(void)
+{
+	int err;
+
+	unflatten_and_copy_device_tree();
+	mips_cpc_probe();
+
+	err = register_cps_smp_ops();
+	if (err)
+		err = register_up_smp_ops();
+}
+
+void __init plat_time_init(void)
+{
+	struct device_node *np;
+	struct clk *clk;
+
+	of_clk_init(NULL);
+
+	if (!cpu_has_counter) {
+		mips_hpt_frequency = 0;
+	} else if (mach && mach->measure_hpt_freq) {
+		mips_hpt_frequency = mach->measure_hpt_freq();
+	} else {
+		np = of_get_cpu_node(0, NULL);
+		if (!np) {
+			pr_err("Failed to get CPU node\n");
+			return;
+		}
+
+		clk = of_clk_get(np, 0);
+		if (IS_ERR(clk)) {
+			pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+			return;
+		}
+
+		mips_hpt_frequency = clk_get_rate(clk);
+		clk_put(clk);
+
+		switch (boot_cpu_type()) {
+		case CPU_20KC:
+		case CPU_25KF:
+			/* The counter runs at the CPU clock rate */
+			break;
+		default:
+			/* The counter runs at half the CPU clock rate */
+			mips_hpt_frequency /= 2;
+			break;
+		}
+	}
+
+	clocksource_probe();
+}
+
+void __init arch_init_irq(void)
+{
+	struct device_node *intc_node;
+
+	intc_node = of_find_compatible_node(NULL, NULL,
+					    "mti,cpu-interrupt-controller");
+	if (!cpu_has_veic && !intc_node)
+		mips_cpu_irq_init();
+
+	irqchip_init();
+}
+
+static int __init publish_devices(void)
+{
+	if (!of_have_populated_dt())
+		panic("Device-tree not present");
+
+	if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
+		panic("Failed to populate DT");
+
+	return 0;
+}
+arch_initcall(publish_devices);
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c
new file mode 100644
index 0000000..14064bd
--- /dev/null
+++ b/arch/mips/generic/irq.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/types.h>
+
+#include <asm/irq.h>
+
+int get_c0_fdc_int(void)
+{
+	int mips_cpu_fdc_irq;
+
+	if (cpu_has_veic)
+		panic("Unimplemented!");
+	else if (gic_present)
+		mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+	else if (cp0_fdc_irq >= 0)
+		mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+	else
+		mips_cpu_fdc_irq = -1;
+
+	return mips_cpu_fdc_irq;
+}
+
+int get_c0_perfcount_int(void)
+{
+	int mips_cpu_perf_irq;
+
+	if (cpu_has_veic)
+		panic("Unimplemented!");
+	else if (gic_present)
+		mips_cpu_perf_irq = gic_get_c0_perfcount_int();
+	else if (cp0_perfcount_irq >= 0)
+		mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+	else
+		mips_cpu_perf_irq = -1;
+
+	return mips_cpu_perf_irq;
+}
+
+unsigned int get_c0_compare_int(void)
+{
+	int mips_cpu_timer_irq;
+
+	if (cpu_has_veic)
+		panic("Unimplemented!");
+	else if (gic_present)
+		mips_cpu_timer_irq = gic_get_c0_compare_int();
+	else
+		mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+
+	return mips_cpu_timer_irq;
+}
diff --git a/arch/mips/generic/proc.c b/arch/mips/generic/proc.c
new file mode 100644
index 0000000..42b3325
--- /dev/null
+++ b/arch/mips/generic/proc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of.h>
+
+#include <asm/bootinfo.h>
+
+const char *get_system_type(void)
+{
+	const char *str;
+	int err;
+
+	err = of_property_read_string(of_root, "model", &str);
+	if (!err)
+		return str;
+
+	err = of_property_read_string_index(of_root, "compatible", 0, &str);
+	if (!err)
+		return str;
+
+	return "Unknown";
+}
diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
new file mode 100644
index 0000000..f67fbf1
--- /dev/null
+++ b/arch/mips/generic/vmlinux.its.S
@@ -0,0 +1,31 @@
+/dts-v1/;
+
+/ {
+	description = KERNEL_NAME;
+	#address-cells = <ADDR_CELLS>;
+
+	images {
+		kernel@0 {
+			description = KERNEL_NAME;
+			data = /incbin/(VMLINUX_BINARY);
+			type = "kernel";
+			arch = "mips";
+			os = "linux";
+			compression = VMLINUX_COMPRESSION;
+			load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
+			entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
+			hash@0 {
+				algo = "sha1";
+			};
+		};
+	};
+
+	configurations {
+		default = "conf@default";
+
+		conf@default {
+			description = "Generic Linux kernel";
+			kernel = "kernel@0";
+		};
+	};
+};
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index c5b04e7..4856adc 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -126,8 +126,7 @@
 #define PHYS_TO_XKSEG_UNCACHED(p)	PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
 #define PHYS_TO_XKSEG_CACHED(p)		PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
 #define XKPHYS_TO_PHYS(p)		((p) & TO_PHYS_MASK)
-#define PHYS_TO_XKPHYS(cm, a)		(_CONST64_(0x8000000000000000) | \
-					 (_CONST64_(cm) << 59) | (a))
+#define PHYS_TO_XKPHYS(cm, a)		(XKPHYS | (_ACAST64_(cm) << 59) | (a))
 
 /*
  * The ultimate limited of the 64-bit MIPS architecture:  2 bits for selecting
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index d296633..a5eb1bb 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -10,6 +10,102 @@
 
 #include <asm/addrspace.h>
 
+/*
+ * Sync types defined by the MIPS architecture (document MD00087 table 6.5)
+ * These values are used with the sync instruction to perform memory barriers.
+ * Types of ordering guarantees available through the SYNC instruction:
+ * - Completion Barriers
+ * - Ordering Barriers
+ * As compared to the completion barrier, the ordering barrier is a
+ * lighter-weight operation as it does not require the specified instructions
+ * before the SYNC to be already completed. Instead it only requires that those
+ * specified instructions which are subsequent to the SYNC in the instruction
+ * stream are never re-ordered for processing ahead of the specified
+ * instructions which are before the SYNC in the instruction stream.
+ * This potentially reduces how many cycles the barrier instruction must stall
+ * before it completes.
+ * Implementations that do not use any of the non-zero values of stype to define
+ * different barriers, such as ordering barriers, must make those stype values
+ * act the same as stype zero.
+ */
+
+/*
+ * Completion barriers:
+ * - Every synchronizable specified memory instruction (loads or stores or both)
+ *   that occurs in the instruction stream before the SYNC instruction must be
+ *   already globally performed before any synchronizable specified memory
+ *   instructions that occur after the SYNC are allowed to be performed, with
+ *   respect to any other processor or coherent I/O module.
+ *
+ * - The barrier does not guarantee the order in which instruction fetches are
+ *   performed.
+ *
+ * - A stype value of zero will always be defined such that it performs the most
+ *   complete set of synchronization operations that are defined.This means
+ *   stype zero always does a completion barrier that affects both loads and
+ *   stores preceding the SYNC instruction and both loads and stores that are
+ *   subsequent to the SYNC instruction. Non-zero values of stype may be defined
+ *   by the architecture or specific implementations to perform synchronization
+ *   behaviors that are less complete than that of stype zero. If an
+ *   implementation does not use one of these non-zero values to define a
+ *   different synchronization behavior, then that non-zero value of stype must
+ *   act the same as stype zero completion barrier. This allows software written
+ *   for an implementation with a lighter-weight barrier to work on another
+ *   implementation which only implements the stype zero completion barrier.
+ *
+ * - A completion barrier is required, potentially in conjunction with SSNOP (in
+ *   Release 1 of the Architecture) or EHB (in Release 2 of the Architecture),
+ *   to guarantee that memory reference results are visible across operating
+ *   mode changes. For example, a completion barrier is required on some
+ *   implementations on entry to and exit from Debug Mode to guarantee that
+ *   memory effects are handled correctly.
+ */
+
+/*
+ * stype 0 - A completion barrier that affects preceding loads and stores and
+ * subsequent loads and stores.
+ * Older instructions which must reach the load/store ordering point before the
+ * SYNC instruction completes: Loads, Stores
+ * Younger instructions which must reach the load/store ordering point only
+ * after the SYNC instruction completes: Loads, Stores
+ * Older instructions which must be globally performed when the SYNC instruction
+ * completes: Loads, Stores
+ */
+#define STYPE_SYNC 0x0
+
+/*
+ * Ordering barriers:
+ * - Every synchronizable specified memory instruction (loads or stores or both)
+ *   that occurs in the instruction stream before the SYNC instruction must
+ *   reach a stage in the load/store datapath after which no instruction
+ *   re-ordering is possible before any synchronizable specified memory
+ *   instruction which occurs after the SYNC instruction in the instruction
+ *   stream reaches the same stage in the load/store datapath.
+ *
+ * - If any memory instruction before the SYNC instruction in program order,
+ *   generates a memory request to the external memory and any memory
+ *   instruction after the SYNC instruction in program order also generates a
+ *   memory request to external memory, the memory request belonging to the
+ *   older instruction must be globally performed before the time the memory
+ *   request belonging to the younger instruction is globally performed.
+ *
+ * - The barrier does not guarantee the order in which instruction fetches are
+ *   performed.
+ */
+
+/*
+ * stype 0x10 - An ordering barrier that affects preceding loads and stores and
+ * subsequent loads and stores.
+ * Older instructions which must reach the load/store ordering point before the
+ * SYNC instruction completes: Loads, Stores
+ * Younger instructions which must reach the load/store ordering point only
+ * after the SYNC instruction completes: Loads, Stores
+ * Older instructions which must be globally performed when the SYNC instruction
+ * completes: N/A
+ */
+#define STYPE_SYNC_MB 0x10
+
+
 #ifdef CONFIG_CPU_HAS_SYNC
 #define __sync()				\
 	__asm__ __volatile__(			\
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 34ed22e..4812d1f 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -28,6 +28,7 @@
  *  - flush_cache_sigtramp() flush signal trampoline
  *  - flush_icache_all() flush the entire instruction cache
  *  - flush_data_cache_page() flushes a page from the data cache
+ *  - __flush_icache_user_range(start, end) flushes range of user instructions
  */
 
  /*
@@ -80,6 +81,10 @@
 
 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*__flush_icache_user_range)(unsigned long start,
+					 unsigned long end);
+extern void (*__local_flush_icache_user_range)(unsigned long start,
+					       unsigned long end);
 
 extern void (*__flush_cache_vmap)(void);
 
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index fbe1881..bdd6dc1 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -24,7 +24,8 @@
 	case CPU_LOONGSON3:
 #endif
 
-#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON1B) || \
+    defined(CONFIG_SYS_HAS_CPU_LOONGSON1C)
 	case CPU_LOONGSON1:
 #endif
 
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index f672df8..9a83724 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -240,6 +240,7 @@
 #define PRID_REV_VR4130		0x0080
 #define PRID_REV_34K_V1_0_2	0x0022
 #define PRID_REV_LOONGSON1B	0x0020
+#define PRID_REV_LOONGSON1C	0x0020	/* Same as Loongson-1B */
 #define PRID_REV_LOONGSON2E	0x0002
 #define PRID_REV_LOONGSON2F	0x0003
 #define PRID_REV_LOONGSON3A_R1	0x0005
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
index c94fafb..21c2082 100644
--- a/arch/mips/include/asm/device.h
+++ b/arch/mips/include/asm/device.h
@@ -11,6 +11,11 @@
 struct dev_archdata {
 	/* DMA operations on that device */
 	struct dma_map_ops *dma_ops;
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+	/* Non-zero if DMA is coherent with CPU caches */
+	bool dma_coherent;
+#endif
 };
 
 struct pdev_archdata {
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index bc5e85d..72d0eab 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -9,14 +9,22 @@
 #ifndef __ASM_DMA_COHERENCE_H
 #define __ASM_DMA_COHERENCE_H
 
-#ifdef CONFIG_DMA_MAYBE_COHERENT
-extern int coherentio;
+enum coherent_io_user_state {
+	IO_COHERENCE_DEFAULT,
+	IO_COHERENCE_ENABLED,
+	IO_COHERENCE_DISABLED,
+};
+
+#if defined(CONFIG_DMA_PERDEV_COHERENT)
+/* Don't provide (hw_)coherentio to avoid misuse */
+#elif defined(CONFIG_DMA_MAYBE_COHERENT)
+extern enum coherent_io_user_state coherentio;
 extern int hw_coherentio;
 #else
 #ifdef CONFIG_DMA_COHERENT
-#define coherentio	1
+#define coherentio	IO_COHERENCE_ENABLED
 #else
-#define coherentio	0
+#define coherentio	IO_COHERENCE_DISABLED
 #endif
 #define hw_coherentio	0
 #endif /* CONFIG_DMA_MAYBE_COHERENT */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 12fa79e..7aa71b9 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -32,4 +32,14 @@
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction);
 
+#define arch_setup_dma_ops arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+				      u64 size, const struct iommu_ops *iommu,
+				      bool coherent)
+{
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+	dev->archdata.dma_coherent = coherent;
+#endif
+}
+
 #endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/extable.h b/arch/mips/include/asm/extable.h
new file mode 100644
index 0000000..dce7a62
--- /dev/null
+++ b/arch/mips/include/asm/extable.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_EXTABLE_H
+#define _ASM_EXTABLE_H
+
+struct exception_table_entry
+{
+	unsigned long insn;
+	unsigned long nextinsn;
+};
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h
index a7fbcd6..32229c7 100644
--- a/arch/mips/include/asm/i8259.h
+++ b/arch/mips/include/asm/i8259.h
@@ -37,12 +37,22 @@
 
 extern raw_spinlock_t i8259A_lock;
 
-extern int i8259A_irq_pending(unsigned int irq);
 extern void make_8259A_irq(unsigned int irq);
 
 extern void init_i8259_irqs(void);
 extern int i8259_of_init(struct device_node *node, struct device_node *parent);
 
+/**
+ * i8159_set_poll() - Override the i8259 polling function
+ * @poll: pointer to platform-specific polling function
+ *
+ * Call this to override the generic i8259 polling function, which directly
+ * accesses i8259 registers, with a platform specific one which may be faster
+ * in cases where hardware provides a more optimal means of polling for an
+ * interrupt.
+ */
+extern void i8259_set_poll(int (*poll)(void));
+
 /*
  * Do the traditional i8259 interrupt polling thing.  This is for the few
  * cases where no better interrupt acknowledge method is available and we
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
index ee25ebb..493a3cc 100644
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -45,6 +45,7 @@
 extern unsigned long secondary_kexec_args[4];
 extern void (*relocated_kexec_smp_wait) (void *);
 extern atomic_t kexec_ready_to_reboot;
+extern void (*_crash_smp_send_stop)(void);
 #endif
 #endif
 
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 0f8a354..61addb1 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -49,7 +49,19 @@
 
 static inline int plat_device_is_coherent(struct device *dev)
 {
-	return coherentio;
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+	return dev->archdata.dma_coherent;
+#else
+	switch (coherentio) {
+	default:
+	case IO_COHERENCE_DEFAULT:
+		return hw_coherentio;
+	case IO_COHERENCE_ENABLED:
+		return 1;
+	case IO_COHERENCE_DISABLED:
+		return 0;
+	}
+#endif
 }
 
 #ifndef plat_post_dma_flush
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index e2561d9..9ec2f6a 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -115,11 +115,7 @@
 
 static inline unsigned long fd_dma_mem_alloc(unsigned long size)
 {
-	unsigned long mem;
-
-	mem = __get_dma_pages(GFP_KERNEL, get_order(size));
-
-	return mem;
+	return __get_dma_pages(GFP_KERNEL, get_order(size));
 }
 
 static inline void fd_dma_mem_free(unsigned long addr, unsigned long size)
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index afc96ec..952b0fd 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -12,6 +12,8 @@
 
 #include <linux/const.h>
 
+#include <asm/mipsregs.h>
+
 /*
  * This gives the physical RAM offset.
  */
@@ -52,11 +54,7 @@
 #ifdef CONFIG_64BIT
 
 #ifndef CAC_BASE
-#ifdef CONFIG_DMA_NONCOHERENT
-#define CAC_BASE		_AC(0x9800000000000000, UL)
-#else
-#define CAC_BASE		_AC(0xa800000000000000, UL)
-#endif
+#define CAC_BASE	PHYS_TO_XKPHYS(read_c0_config() & CONF_CM_CMASK, 0)
 #endif
 
 #ifndef IO_BASE
diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
index b18802a..4775a11 100644
--- a/arch/mips/include/asm/mach-ip27/spaces.h
+++ b/arch/mips/include/asm/mach-ip27/spaces.h
@@ -19,6 +19,7 @@
 #define IO_BASE			0x9200000000000000
 #define MSPEC_BASE		0x9400000000000000
 #define UNCAC_BASE		0x9600000000000000
+#define CAC_BASE		0xa800000000000000
 
 #define TO_MSPEC(x)		(MSPEC_BASE | ((x) & TO_PHYS_MASK))
 #define TO_HSPEC(x)		(HSPEC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/include/asm/mach-loongson32/irq.h b/arch/mips/include/asm/mach-loongson32/irq.h
index c1c7441..8c01b30 100644
--- a/arch/mips/include/asm/mach-loongson32/irq.h
+++ b/arch/mips/include/asm/mach-loongson32/irq.h
@@ -36,9 +36,14 @@
 #define LS1X_IRQ(n, x)			(LS1X_IRQ_BASE + (n << 5) + (x))
 
 #define LS1X_UART0_IRQ			LS1X_IRQ(0, 2)
+#if defined(CONFIG_LOONGSON1_LS1B)
 #define LS1X_UART1_IRQ			LS1X_IRQ(0, 3)
 #define LS1X_UART2_IRQ			LS1X_IRQ(0, 4)
 #define LS1X_UART3_IRQ			LS1X_IRQ(0, 5)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART1_IRQ			LS1X_IRQ(0, 4)
+#define LS1X_UART2_IRQ			LS1X_IRQ(0, 5)
+#endif
 #define LS1X_CAN0_IRQ			LS1X_IRQ(0, 6)
 #define LS1X_CAN1_IRQ			LS1X_IRQ(0, 7)
 #define LS1X_SPI0_IRQ			LS1X_IRQ(0, 8)
@@ -47,6 +52,9 @@
 #define LS1X_DMA0_IRQ			LS1X_IRQ(0, 13)
 #define LS1X_DMA1_IRQ			LS1X_IRQ(0, 14)
 #define LS1X_DMA2_IRQ			LS1X_IRQ(0, 15)
+#if defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_NAND_IRQ			LS1X_IRQ(0, 16)
+#endif
 #define LS1X_PWM0_IRQ			LS1X_IRQ(0, 17)
 #define LS1X_PWM1_IRQ			LS1X_IRQ(0, 18)
 #define LS1X_PWM2_IRQ			LS1X_IRQ(0, 19)
@@ -54,18 +62,49 @@
 #define LS1X_RTC_INT0_IRQ		LS1X_IRQ(0, 21)
 #define LS1X_RTC_INT1_IRQ		LS1X_IRQ(0, 22)
 #define LS1X_RTC_INT2_IRQ		LS1X_IRQ(0, 23)
+#if defined(CONFIG_LOONGSON1_LS1B)
 #define LS1X_TOY_INT0_IRQ		LS1X_IRQ(0, 24)
 #define LS1X_TOY_INT1_IRQ		LS1X_IRQ(0, 25)
 #define LS1X_TOY_INT2_IRQ		LS1X_IRQ(0, 26)
 #define LS1X_RTC_TICK_IRQ		LS1X_IRQ(0, 27)
 #define LS1X_TOY_TICK_IRQ		LS1X_IRQ(0, 28)
+#define LS1X_UART4_IRQ			LS1X_IRQ(0, 29)
+#define LS1X_UART5_IRQ			LS1X_IRQ(0, 30)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART3_IRQ			LS1X_IRQ(0, 29)
+#define LS1X_ADC_IRQ			LS1X_IRQ(0, 30)
+#define LS1X_SDIO_IRQ			LS1X_IRQ(0, 31)
+#endif
 
 #define LS1X_EHCI_IRQ			LS1X_IRQ(1, 0)
 #define LS1X_OHCI_IRQ			LS1X_IRQ(1, 1)
+#if defined(CONFIG_LOONGSON1_LS1B)
 #define LS1X_GMAC0_IRQ			LS1X_IRQ(1, 2)
 #define LS1X_GMAC1_IRQ			LS1X_IRQ(1, 3)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_OTG_IRQ			LS1X_IRQ(1, 2)
+#define LS1X_GMAC0_IRQ			LS1X_IRQ(1, 3)
+#define LS1X_CAM_IRQ			LS1X_IRQ(1, 4)
+#define LS1X_UART4_IRQ			LS1X_IRQ(1, 5)
+#define LS1X_UART5_IRQ			LS1X_IRQ(1, 6)
+#define LS1X_UART6_IRQ			LS1X_IRQ(1, 7)
+#define LS1X_UART7_IRQ			LS1X_IRQ(1, 8)
+#define LS1X_UART8_IRQ			LS1X_IRQ(1, 9)
+#define LS1X_UART9_IRQ			LS1X_IRQ(1, 13)
+#define LS1X_UART10_IRQ			LS1X_IRQ(1, 14)
+#define LS1X_UART11_IRQ			LS1X_IRQ(1, 15)
+#define LS1X_I2C0_IRQ			LS1X_IRQ(1, 17)
+#define LS1X_I2C1_IRQ			LS1X_IRQ(1, 18)
+#define LS1X_I2C2_IRQ			LS1X_IRQ(1, 19)
+#endif
 
-#define LS1X_IRQS		(LS1X_IRQ(4, 31) + 1 - LS1X_IRQ_BASE)
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define INTN	4
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define INTN	5
+#endif
+
+#define LS1X_IRQS		(LS1X_IRQ(INTN, 31) + 1 - LS1X_IRQ_BASE)
 
 #define NR_IRQS			(MIPS_CPU_IRQS + LS1X_IRQS)
 
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
index 978f6df..3584c40 100644
--- a/arch/mips/include/asm/mach-loongson32/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -12,7 +12,11 @@
 #ifndef __ASM_MACH_LOONGSON32_LOONGSON1_H
 #define __ASM_MACH_LOONGSON32_LOONGSON1_H
 
+#if defined(CONFIG_LOONGSON1_LS1B)
 #define DEFAULT_MEMSIZE			256	/* If no memsize provided */
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define DEFAULT_MEMSIZE			32
+#endif
 
 /* Loongson 1 Register Bases */
 #define LS1X_MUX_BASE			0x1fd00420
@@ -20,6 +24,7 @@
 #define LS1X_GPIO0_BASE			0x1fd010c0
 #define LS1X_GPIO1_BASE			0x1fd010c4
 #define LS1X_DMAC_BASE			0x1fd01160
+#define LS1X_CBUS_BASE			0x1fd011c0
 #define LS1X_EHCI_BASE			0x1fe00000
 #define LS1X_OHCI_BASE			0x1fe08000
 #define LS1X_GMAC0_BASE			0x1fe10000
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index 672531a..7adc313 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -30,5 +30,6 @@
 void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
 void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
 void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
 
 #endif /* __ASM_MACH_LOONGSON32_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
index 4d56fc3..e5e8f11 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-clk.h
@@ -18,6 +18,7 @@
 #define LS1X_CLK_PLL_FREQ		LS1X_CLK_REG(0x0)
 #define LS1X_CLK_PLL_DIV		LS1X_CLK_REG(0x4)
 
+#if defined(CONFIG_LOONGSON1_LS1B)
 /* Clock PLL Divisor Register Bits */
 #define DIV_DC_EN			BIT(31)
 #define DIV_DC_RST			BIT(30)
@@ -48,4 +49,37 @@
 #define BYPASS_DDR_WIDTH		1
 #define BYPASS_CPU_WIDTH		1
 
+#elif defined(CONFIG_LOONGSON1_LS1C)
+/* PLL/SDRAM Frequency configuration register Bits */
+#define PLL_VALID			BIT(31)
+#define FRAC_N				GENMASK(23, 16)
+#define RST_TIME			GENMASK(3, 2)
+#define SDRAM_DIV			GENMASK(1, 0)
+
+/* CPU/CAMERA/DC Frequency configuration register Bits */
+#define DIV_DC_EN			BIT(31)
+#define DIV_DC				GENMASK(30, 24)
+#define DIV_CAM_EN			BIT(23)
+#define DIV_CAM				GENMASK(22, 16)
+#define DIV_CPU_EN			BIT(15)
+#define DIV_CPU				GENMASK(14, 8)
+#define DIV_DC_SEL_EN			BIT(5)
+#define DIV_DC_SEL			BIT(4)
+#define DIV_CAM_SEL_EN			BIT(3)
+#define DIV_CAM_SEL			BIT(2)
+#define DIV_CPU_SEL_EN			BIT(1)
+#define DIV_CPU_SEL			BIT(0)
+
+#define DIV_DC_SHIFT			24
+#define DIV_CAM_SHIFT			16
+#define DIV_CPU_SHIFT			8
+#define DIV_DDR_SHIFT			0
+
+#define DIV_DC_WIDTH			7
+#define DIV_CAM_WIDTH			7
+#define DIV_CPU_WIDTH			7
+#define DIV_DDR_WIDTH			2
+
+#endif
+
 #endif /* __ASM_MACH_LOONGSON32_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-mux.h b/arch/mips/include/asm/mach-loongson32/regs-mux.h
index 7c394f9..4a0bdeb 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-mux.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-mux.h
@@ -18,6 +18,7 @@
 #define LS1X_MUX_CTRL0			LS1X_MUX_REG(0x0)
 #define LS1X_MUX_CTRL1			LS1X_MUX_REG(0x4)
 
+#if defined(CONFIG_LOONGSON1_LS1B)
 /* MUX CTRL0 Register Bits */
 #define UART0_USE_PWM23			BIT(28)
 #define UART0_USE_PWM01			BIT(27)
@@ -64,4 +65,64 @@
 #define GMAC1_USE_PWM23			BIT(1)
 #define GMAC0_USE_PWM01			BIT(0)
 
+#elif defined(CONFIG_LOONGSON1_LS1C)
+
+/* SHUT_CTRL Register Bits */
+#define UART_SPLIT			GENMASK(31, 30)
+#define OUTPUT_CLK			GENMASK(29, 26)
+#define ADC_SHUT			BIT(25)
+#define SDIO_SHUT			BIT(24)
+#define DMA2_SHUT			BIT(23)
+#define DMA1_SHUT			BIT(22)
+#define DMA0_SHUT			BIT(21)
+#define SPI1_SHUT			BIT(20)
+#define SPI0_SHUT			BIT(19)
+#define I2C2_SHUT			BIT(18)
+#define I2C1_SHUT			BIT(17)
+#define I2C0_SHUT			BIT(16)
+#define AC97_SHUT			BIT(15)
+#define I2S_SHUT			BIT(14)
+#define UART3_SHUT			BIT(13)
+#define UART2_SHUT			BIT(12)
+#define UART1_SHUT			BIT(11)
+#define UART0_SHUT			BIT(10)
+#define CAN1_SHUT			BIT(9)
+#define CAN0_SHUT			BIT(8)
+#define ECC_SHUT			BIT(7)
+#define GMAC_SHUT			BIT(6)
+#define USBHOST_SHUT			BIT(5)
+#define USBOTG_SHUT			BIT(4)
+#define SDRAM_SHUT			BIT(3)
+#define SRAM_SHUT			BIT(2)
+#define CAM_SHUT			BIT(1)
+#define LCD_SHUT			BIT(0)
+
+#define UART_SPLIT_SHIFT                        30
+#define OUTPUT_CLK_SHIFT                        26
+
+/* MISC_CTRL Register Bits */
+#define USBHOST_RSTN			BIT(31)
+#define PHY_INTF_SELI			GENMASK(30, 28)
+#define AC97_EN				BIT(25)
+#define SDIO_DMA_EN			GENMASK(24, 23)
+#define ADC_DMA_EN			BIT(22)
+#define SDIO_USE_SPI1			BIT(17)
+#define SDIO_USE_SPI0			BIT(16)
+#define SRAM_CTRL			GENMASK(15, 0)
+
+#define PHY_INTF_SELI_SHIFT                     28
+#define SDIO_DMA_EN_SHIFT                       23
+#define SRAM_CTRL_SHIFT				0
+
+#define LS1X_CBUS_REG(n, x) \
+		((void __iomem *)KSEG1ADDR(LS1X_CBUS_BASE + (n * 0x04) + (x)))
+
+#define LS1X_CBUS_FIRST(n)		LS1X_CBUS_REG(n, 0x00)
+#define LS1X_CBUS_SECOND(n)		LS1X_CBUS_REG(n, 0x10)
+#define LS1X_CBUS_THIRD(n)		LS1X_CBUS_REG(n, 0x20)
+#define LS1X_CBUS_FOURTHT(n)		LS1X_CBUS_REG(n, 0x30)
+#define LS1X_CBUS_FIFTHT(n)		LS1X_CBUS_REG(n, 0x40)
+
+#endif
+
 #endif /* __ASM_MACH_LOONGSON32_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
index d1ff774..c68c0cc 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -14,7 +14,6 @@
 #include <linux/io.h>
 #include <linux/init.h>
 #include <linux/irq.h>
-#include <linux/kconfig.h>
 #include <boot_param.h>
 
 /* loongson internal northbridge initialization */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
deleted file mode 100644
index bfbd703..0000000
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 2004 Chris Dearman
- * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
-
-
-/*
- * CPU feature overrides for MIPS boards
- */
-#ifdef CONFIG_CPU_MIPS32
-#define cpu_has_tlb		1
-#define cpu_has_4kex		1
-#define cpu_has_4k_cache	1
-/* #define cpu_has_fpu		? */
-/* #define cpu_has_32fpr	? */
-#define cpu_has_counter		1
-/* #define cpu_has_watch	? */
-#define cpu_has_divec		1
-#define cpu_has_vce		0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
-/* #define cpu_has_prefetch	? */
-#define cpu_has_mcheck		1
-/* #define cpu_has_ejtag	? */
-#ifdef CONFIG_CPU_MICROMIPS
-#define cpu_has_llsc		0
-#else
-#define cpu_has_llsc		1
-#endif
-/* #define cpu_has_vtag_icache	? */
-/* #define cpu_has_dc_aliases	? */
-/* #define cpu_has_ic_fills_f_dc ? */
-#define cpu_has_nofpuex		0
-/* #define cpu_has_64bits	? */
-/* #define cpu_has_64bit_zero_reg ? */
-/* #define cpu_has_inclusive_pcaches ? */
-#define cpu_icache_snoops_remote_store 1
-#endif
-
-#ifdef CONFIG_CPU_MIPS64
-#define cpu_has_tlb		1
-#define cpu_has_4kex		1
-#define cpu_has_4k_cache	1
-/* #define cpu_has_fpu		? */
-/* #define cpu_has_32fpr	? */
-#define cpu_has_counter		1
-/* #define cpu_has_watch	? */
-#define cpu_has_divec		1
-#define cpu_has_vce		0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
-/* #define cpu_has_prefetch	? */
-#define cpu_has_mcheck		1
-/* #define cpu_has_ejtag	? */
-#define cpu_has_llsc		1
-/* #define cpu_has_vtag_icache	? */
-/* #define cpu_has_dc_aliases	? */
-/* #define cpu_has_ic_fills_f_dc ? */
-#define cpu_has_nofpuex		0
-/* #define cpu_has_64bits	? */
-/* #define cpu_has_64bit_zero_reg ? */
-/* #define cpu_has_inclusive_pcaches ? */
-#define cpu_icache_snoops_remote_store 1
-#endif
-
-#endif /* __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
deleted file mode 100644
index 5d154cf..0000000
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_MACH_MIPS_IRQ_H
-#define __ASM_MACH_MIPS_IRQ_H
-
-#define NR_IRQS 256
-
-
-#include_next <irq.h>
-
-#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
deleted file mode 100644
index 6cccd4d..0000000
--- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Chris Dearman (chris@mips.com)
- * Copyright (C) 2007 Mips Technologies, Inc.
- */
-#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
-#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
-
-	.macro	kernel_entry_setup
-	.endm
-
-/*
- * Do SMP slave processor setup necessary before we can safely execute C code.
- */
-	.macro	smp_slave_setup
-	.endm
-
-#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-sead3/war.h b/arch/mips/include/asm/mach-sead3/war.h
deleted file mode 100644
index d068fc4..0000000
--- a/arch/mips/include/asm/mach-sead3/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_MIPS_WAR_H
-#define __ASM_MIPS_MACH_MIPS_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR	0
-#define R4600_V1_HIT_CACHEOP_WAR	0
-#define R4600_V2_HIT_CACHEOP_WAR	0
-#define R5432_CP0_INTERRUPT_WAR		0
-#define BCM1250_M3_WAR			0
-#define SIBYTE_1956_WAR			0
-#define MIPS4K_ICACHE_REFILL_WAR	1
-#define MIPS_CACHE_SYNC_WAR		1
-#define TX49XX_ICACHE_INDEX_INV_WAR	0
-#define ICACHE_REFILLS_WORKAROUND_WAR	1
-#define R10000_LLSC_WAR			0
-#define MIPS34K_MISSED_ITLB_WAR		0
-
-#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
new file mode 100644
index 0000000..6b444cd
--- /dev/null
+++ b/arch/mips/include/asm/machine.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MACHINE_H__
+#define __MIPS_ASM_MACHINE_H__
+
+#include <linux/libfdt.h>
+#include <linux/of.h>
+
+struct mips_machine {
+	const struct of_device_id *matches;
+	const void *fdt;
+	bool (*detect)(void);
+	const void *(*fixup_fdt)(const void *fdt, const void *match_data);
+	unsigned int (*measure_hpt_freq)(void);
+};
+
+extern long __mips_machines_start;
+extern long __mips_machines_end;
+
+#define MIPS_MACHINE(name)						\
+	static const struct mips_machine __mips_mach_##name		\
+		__used __section(.mips.machines.init)
+
+#define for_each_mips_machine(mach)					\
+	for ((mach) = (struct mips_machine *)&__mips_machines_start;	\
+	     (mach) < (struct mips_machine *)&__mips_machines_end;	\
+	     (mach)++)
+
+/**
+ * mips_machine_is_compatible() - check if a machine is compatible with an FDT
+ * @mach: the machine struct to check
+ * @fdt: the FDT to check for compatibility with
+ *
+ * Check whether the given machine @mach is compatible with the given flattened
+ * device tree @fdt, based upon the compatibility property of the root node.
+ *
+ * Return: the device id matched if any, else NULL
+ */
+static inline const struct of_device_id *
+mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
+{
+	const struct of_device_id *match;
+
+	if (!mach->matches)
+		return NULL;
+
+	for (match = mach->matches; match->compatible; match++) {
+		if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
+			return match;
+	}
+
+	return NULL;
+}
+
+#endif /* __MIPS_ASM_MACHINE_H__ */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
deleted file mode 100644
index 8932c7d..0000000
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000,2012 MIPS Technologies, Inc.  All rights reserved.
- *	Douglas Leung <douglas@mips.com>
- *	Steven J. Hill <sjhill@mips.com>
- */
-#ifndef _MIPS_SEAD3INT_H
-#define _MIPS_SEAD3INT_H
-
-#include <linux/irqchip/mips-gic.h>
-
-/* SEAD-3 GIC address space definitions. */
-#define GIC_BASE_ADDR		0x1b1c0000
-#define GIC_ADDRSPACE_SZ	(128 * 1024)
-
-/* CPU interrupt offsets */
-#define CPU_INT_GIC		2
-#define CPU_INT_EHCI		2
-#define CPU_INT_UART0		4
-#define CPU_INT_UART1		4
-#define CPU_INT_NET		6
-
-/* GIC interrupt offsets */
-#define GIC_INT_NET		GIC_SHARED_TO_HWIRQ(0)
-#define GIC_INT_UART1		GIC_SHARED_TO_HWIRQ(2)
-#define GIC_INT_UART0		GIC_SHARED_TO_HWIRQ(3)
-#define GIC_INT_EHCI		GIC_SHARED_TO_HWIRQ(5)
-
-#endif /* !(_MIPS_SEAD3INT_H) */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 4fafeef..2e41807 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -359,6 +359,7 @@
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
+#define CM3_GCR_Cx_COHERENCE_COHEN_MSK		(_ULCAST_(0x1) << 0)
 
 /* GCR_Cx_CONFIG register fields */
 #define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF		10
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 0aaf9a0..702c273 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -3,7 +3,7 @@
 
 #include <linux/list.h>
 #include <linux/elf.h>
-#include <asm/uaccess.h>
+#include <asm/extable.h>
 
 struct mod_arch_specific {
 	/* Data Bus Error exception tables */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index cda93ae..b4d19c21 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -58,16 +58,6 @@
 #define CVMX_HELPER_BOARD_MGMT_IPD_PORT	    -10
 
 /**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
-
-/**
  * Return the MII PHY address associated with the given IPD
  * port. A result of -1 means there isn't a MII capable PHY
  * connected to this port. On chips supporting multiple MII
@@ -86,26 +76,6 @@
 extern int cvmx_helper_board_get_mii_address(int ipd_port);
 
 /**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and autonegotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr:  The address of the PHY to program
- * @link_flags:
- *		    Flags to control autonegotiation.  Bit 0 is autonegotiation
- *		    enable/disable to maintain backward compatibility.
- * @link_info: Link speed to program. If the speed is zero and autonegotiation
- *		    is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
-				   cvmx_helper_board_set_phy_link_flags_types_t
-				   link_flags,
-				   cvmx_helper_link_info_t link_info);
-
-/**
  * This function is the board specific method of determining an
  * ethernet ports link speed. Most Octeon boards have Marvell PHYs
  * and are handled by the fall through case. This function must be
diff --git a/arch/mips/include/asm/octeon/cvmx-mdio.h b/arch/mips/include/asm/octeon/cvmx-mdio.h
deleted file mode 100644
index 9f6a4f3..0000000
--- a/arch/mips/include/asm/octeon/cvmx-mdio.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
- * clause 22 and clause 45 operations.
- *
- */
-
-#ifndef __CVMX_MIO_H__
-#define __CVMX_MIO_H__
-
-#include <asm/octeon/cvmx-smix-defs.h>
-
-/**
- * PHY register 0 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL 0
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t reset:1;
-		uint16_t loopback:1;
-		uint16_t speed_lsb:1;
-		uint16_t autoneg_enable:1;
-		uint16_t power_down:1;
-		uint16_t isolate:1;
-		uint16_t restart_autoneg:1;
-		uint16_t duplex:1;
-		uint16_t collision_test:1;
-		uint16_t speed_msb:1;
-		uint16_t unidirectional_enable:1;
-		uint16_t reserved_0_4:5;
-	} s;
-} cvmx_mdio_phy_reg_control_t;
-
-/**
- * PHY register 1 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS 1
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t capable_100base_t4:1;
-		uint16_t capable_100base_x_full:1;
-		uint16_t capable_100base_x_half:1;
-		uint16_t capable_10_full:1;
-		uint16_t capable_10_half:1;
-		uint16_t capable_100base_t2_full:1;
-		uint16_t capable_100base_t2_half:1;
-		uint16_t capable_extended_status:1;
-		uint16_t capable_unidirectional:1;
-		uint16_t capable_mf_preamble_suppression:1;
-		uint16_t autoneg_complete:1;
-		uint16_t remote_fault:1;
-		uint16_t capable_autoneg:1;
-		uint16_t link_status:1;
-		uint16_t jabber_detect:1;
-		uint16_t capable_extended_registers:1;
-
-	} s;
-} cvmx_mdio_phy_reg_status_t;
-
-/**
- * PHY register 2 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID1 2
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t oui_bits_3_18;
-	} s;
-} cvmx_mdio_phy_reg_id1_t;
-
-/**
- * PHY register 3 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID2 3
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t oui_bits_19_24:6;
-		uint16_t model:6;
-		uint16_t revision:4;
-	} s;
-} cvmx_mdio_phy_reg_id2_t;
-
-/**
- * PHY register 4 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t next_page:1;
-		uint16_t reserved_14:1;
-		uint16_t remote_fault:1;
-		uint16_t reserved_12:1;
-		uint16_t asymmetric_pause:1;
-		uint16_t pause:1;
-		uint16_t advert_100base_t4:1;
-		uint16_t advert_100base_tx_full:1;
-		uint16_t advert_100base_tx_half:1;
-		uint16_t advert_10base_tx_full:1;
-		uint16_t advert_10base_tx_half:1;
-		uint16_t selector:5;
-	} s;
-} cvmx_mdio_phy_reg_autoneg_adver_t;
-
-/**
- * PHY register 5 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t next_page:1;
-		uint16_t ack:1;
-		uint16_t remote_fault:1;
-		uint16_t reserved_12:1;
-		uint16_t asymmetric_pause:1;
-		uint16_t pause:1;
-		uint16_t advert_100base_t4:1;
-		uint16_t advert_100base_tx_full:1;
-		uint16_t advert_100base_tx_half:1;
-		uint16_t advert_10base_tx_full:1;
-		uint16_t advert_10base_tx_half:1;
-		uint16_t selector:5;
-	} s;
-} cvmx_mdio_phy_reg_link_partner_ability_t;
-
-/**
- * PHY register 6 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t reserved_5_15:11;
-		uint16_t parallel_detection_fault:1;
-		uint16_t link_partner_next_page_capable:1;
-		uint16_t local_next_page_capable:1;
-		uint16_t page_received:1;
-		uint16_t link_partner_autoneg_capable:1;
-
-	} s;
-} cvmx_mdio_phy_reg_autoneg_expansion_t;
-
-/**
- * PHY register 9 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t test_mode:3;
-		uint16_t manual_master_slave:1;
-		uint16_t master:1;
-		uint16_t port_type:1;
-		uint16_t advert_1000base_t_full:1;
-		uint16_t advert_1000base_t_half:1;
-		uint16_t reserved_0_7:8;
-	} s;
-} cvmx_mdio_phy_reg_control_1000_t;
-
-/**
- * PHY register 10 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS_1000 10
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t master_slave_fault:1;
-		uint16_t is_master:1;
-		uint16_t local_receiver_ok:1;
-		uint16_t remote_receiver_ok:1;
-		uint16_t remote_capable_1000base_t_full:1;
-		uint16_t remote_capable_1000base_t_half:1;
-		uint16_t reserved_8_9:2;
-		uint16_t idle_error_count:8;
-	} s;
-} cvmx_mdio_phy_reg_status_1000_t;
-
-/**
- * PHY register 15 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t capable_1000base_x_full:1;
-		uint16_t capable_1000base_x_half:1;
-		uint16_t capable_1000base_t_full:1;
-		uint16_t capable_1000base_t_half:1;
-		uint16_t reserved_0_11:12;
-	} s;
-} cvmx_mdio_phy_reg_extended_status_t;
-
-/**
- * PHY register 13 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t function:2;
-		uint16_t reserved_5_13:9;
-		uint16_t devad:5;
-	} s;
-} cvmx_mdio_phy_reg_mmd_control_t;
-
-/**
- * PHY register 14 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
-typedef union {
-	uint16_t u16;
-	struct {
-		uint16_t address_data:16;
-	} s;
-} cvmx_mdio_phy_reg_mmd_address_data_t;
-
-/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE	0
-#define MDIO_CLAUSE_22_READ	1
-
-#define MDIO_CLAUSE_45_ADDRESS	0
-#define MDIO_CLAUSE_45_WRITE	1
-#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ	3
-
-/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD	     1
-#define CVMX_MMD_DEVICE_WIS	     2
-#define CVMX_MMD_DEVICE_PCS	     3
-#define CVMX_MMD_DEVICE_PHY_XS	     4
-#define CVMX_MMD_DEVICE_DTS_XS	     5
-#define CVMX_MMD_DEVICE_TC	     6
-#define CVMX_MMD_DEVICE_CL22_EXT     29
-#define CVMX_MMD_DEVICE_VENDOR_1     30
-#define CVMX_MMD_DEVICE_VENDOR_2     31
-
-/* Helper function to put MDIO interface into clause 45 mode */
-static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
-{
-	union cvmx_smix_clk smi_clk;
-	/* Put bus into clause 45 mode */
-	smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
-	smi_clk.s.mode = 1;
-	smi_clk.s.preamble = 1;
-	cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/* Helper function to put MDIO interface into clause 22 mode */
-static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
-{
-	union cvmx_smix_clk smi_clk;
-	/* Put bus into clause 22 mode */
-	smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
-	smi_clk.s.mode = 0;
-	cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/**
- * Perform an MII read. This function is used to read PHY
- * registers controlling auto negotiation.
- *
- * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *		   support multiple busses.
- * @phy_id:   The MII phy id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
-{
-	union cvmx_smix_cmd smi_cmd;
-	union cvmx_smix_rd_dat smi_rd;
-	int timeout = 1000;
-
-	if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
-		__cvmx_mdio_set_clause22_mode(bus_id);
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = location;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
-	} while (smi_rd.s.pending && timeout--);
-
-	if (smi_rd.s.val)
-		return smi_rd.s.dat;
-	else
-		return -1;
-}
-
-/**
- * Perform an MII write. This function is used to write PHY
- * registers controlling auto negotiation.
- *
- * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *		   support multiple busses.
- * @phy_id:   The MII phy id
- * @location: Register location to write
- * @val:      Value to write
- *
- * Returns -1 on error
- *	   0 on success
- */
-static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
-{
-	union cvmx_smix_cmd smi_cmd;
-	union cvmx_smix_wr_dat smi_wr;
-	int timeout = 1000;
-
-	if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
-		__cvmx_mdio_set_clause22_mode(bus_id);
-
-	smi_wr.u64 = 0;
-	smi_wr.s.dat = val;
-	cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = location;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
-	} while (smi_wr.s.pending && --timeout);
-	if (timeout <= 0)
-		return -1;
-
-	return 0;
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII read. This function is used to
- * read PHY registers controlling auto negotiation.
- *
- * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *		   support multiple busses.
- * @phy_id:   The MII phy id
- * @device:   MDIO Managable Device (MMD) id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-
-static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
-				    int location)
-{
-	union cvmx_smix_cmd smi_cmd;
-	union cvmx_smix_rd_dat smi_rd;
-	union cvmx_smix_wr_dat smi_wr;
-	int timeout = 1000;
-
-	if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
-		return -1;
-
-	__cvmx_mdio_set_clause45_mode(bus_id);
-
-	smi_wr.u64 = 0;
-	smi_wr.s.dat = location;
-	cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = device;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
-	} while (smi_wr.s.pending && --timeout);
-	if (timeout <= 0) {
-		cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-			     "device %2d register %2d	TIME OUT(address)\n",
-		     bus_id, phy_id, device, location);
-		return -1;
-	}
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = device;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
-	} while (smi_rd.s.pending && --timeout);
-
-	if (timeout <= 0) {
-		cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-			     "device %2d register %2d	TIME OUT(data)\n",
-		     bus_id, phy_id, device, location);
-		return -1;
-	}
-
-	if (smi_rd.s.val)
-		return smi_rd.s.dat;
-	else {
-		cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-			     "device %2d register %2d	INVALID READ\n",
-		     bus_id, phy_id, device, location);
-		return -1;
-	}
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII write. This function is used to
- * write PHY registers controlling auto negotiation.
- *
- * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *		   support multiple busses.
- * @phy_id:   The MII phy id
- * @device:   MDIO Managable Device (MMD) id
- * @location: Register location to write
- * @val:      Value to write
- *
- * Returns -1 on error
- *	   0 on success
- */
-static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
-				     int location, int val)
-{
-	union cvmx_smix_cmd smi_cmd;
-	union cvmx_smix_wr_dat smi_wr;
-	int timeout = 1000;
-
-	if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
-		return -1;
-
-	__cvmx_mdio_set_clause45_mode(bus_id);
-
-	smi_wr.u64 = 0;
-	smi_wr.s.dat = location;
-	cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = device;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
-	} while (smi_wr.s.pending && --timeout);
-	if (timeout <= 0)
-		return -1;
-
-	smi_wr.u64 = 0;
-	smi_wr.s.dat = val;
-	cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
-	smi_cmd.u64 = 0;
-	smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
-	smi_cmd.s.phy_adr = phy_id;
-	smi_cmd.s.reg_adr = device;
-	cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
-	do {
-		cvmx_wait(1000);
-		smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
-	} while (smi_wr.s.pending && --timeout);
-	if (timeout <= 0)
-		return -1;
-
-	return 0;
-}
-
-#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 9b63cd4..30d1129 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,15 +17,18 @@
  */
 
 #include <linux/ioport.h>
+#include <linux/list.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
 /*
  * Each pci channel is a top-level PCI bus seem by CPU.	 A machine  with
  * multiple PCI channels may have multiple PCI host controllers or a
  * single controller supporting multiple channels.
  */
 struct pci_controller {
-	struct pci_controller *next;
+	struct list_head list;
 	struct pci_bus *bus;
 	struct device_node *of_node;
 
@@ -38,10 +41,12 @@
 	struct resource *busn_resource;
 	unsigned long busn_offset;
 
+#ifndef CONFIG_PCI_DOMAINS_GENERIC
 	unsigned int index;
 	/* For compatibility with current (as of July 2003) pciutils
 	   and XFree86. Eventually will be removed. */
 	unsigned int need_domain_info;
+#endif
 
 	/* Optional access methods for reading/writing the bus number
 	   of the PCI controller */
@@ -59,12 +64,43 @@
  */
 extern int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
 
+/* Do platform specific device initialization at pci_enable_device() time */
+extern int pcibios_plat_dev_init(struct pci_dev *dev);
+
+extern char * (*pcibios_plat_setup)(char *str);
+
+#ifdef CONFIG_OF
+/* this function parses memory ranges from a device node */
+extern void pci_load_of_ranges(struct pci_controller *hose,
+			       struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+				      struct device_node *node) {}
+#endif
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+					    int need_domain_info)
+{
+	/* nothing to do */
+}
+#elif defined(CONFIG_PCI_DOMAINS)
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+					    int need_domain_info)
+{
+	hose->need_domain_info = need_domain_info;
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#endif
 
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
    or architectures with incomplete PCI setup by the loader */
-
-extern unsigned int pcibios_assign_all_busses(void);
+static inline unsigned int pcibios_assign_all_busses(void)
+{
+	return 1;
+}
 
 extern unsigned long PCIBIOS_MIN_IO;
 extern unsigned long PCIBIOS_MIN_MEM;
@@ -100,7 +136,12 @@
  */
 #define PCI_DMA_BUS_IS_PHYS     (1)
 
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+	return pci_domain_nr(bus);
+}
+#elif defined(CONFIG_PCI_DOMAINS)
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -121,15 +162,4 @@
 	return channel ? 15 : 14;
 }
 
-extern char * (*pcibios_plat_setup)(char *str);
-
-#ifdef CONFIG_OF
-/* this function parses memory ranges from a device node */
-extern void pci_load_of_ranges(struct pci_controller *hose,
-			       struct device_node *node);
-#else
-static inline void pci_load_of_ranges(struct pci_controller *hose,
-				      struct device_node *node) {}
-#endif
-
 #endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 93c079a..a03e869 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -67,11 +67,7 @@
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 	unsigned long address)
 {
-	pte_t *pte;
-
-	pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
-
-	return pte;
+	return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
 }
 
 static inline struct page *pte_alloc_one(struct mm_struct *mm,
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h
index 625eda5..89d58d8 100644
--- a/arch/mips/include/asm/pm-cps.h
+++ b/arch/mips/include/asm/pm-cps.h
@@ -13,10 +13,12 @@
 
 /*
  * The CM & CPC can only handle coherence & power control on a per-core basis,
- * thus in an MT system the VPEs within each core are coupled and can only
+ * thus in an MT system the VP(E)s within each core are coupled and can only
  * enter or exit states requiring CM or CPC assistance in unison.
  */
-#ifdef CONFIG_MIPS_MT
+#if defined(CONFIG_CPU_MIPSR6)
+# define coupled_coherence cpu_has_vp
+#elif defined(CONFIG_MIPS_MT)
 # define coupled_coherence cpu_has_mipsmt
 #else
 # define coupled_coherence 0
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index f6fc6aa..b657861 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -152,7 +152,7 @@
 
 static inline long regs_return_value(struct pt_regs *regs)
 {
-	if (is_syscall_success(regs))
+	if (is_syscall_success(regs) || !user_mode(regs))
 		return regs->regs[2];
 	else
 		return -regs->regs[2];
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 8bc6c70..060f23f 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -85,6 +85,20 @@
 extern void play_dead(void);
 #endif
 
+/*
+ * This function will set up the necessary IPIs for Linux to communicate
+ * with the CPUs in mask.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_allocate(const struct cpumask *mask);
+
+/*
+ * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
+ * CPUs in mask, which must be a subset of the IPIs that have been configured.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_free(const struct cpumask *mask);
+
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
 	extern struct plat_smp_ops *mp_ops;	/* private */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 21a2aab..89fa5c0b 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -16,6 +16,7 @@
 #include <linux/thread_info.h>
 #include <linux/string.h>
 #include <asm/asm-eva.h>
+#include <asm/extable.h>
 
 /*
  * The fs value determines whether argument validity checking should be
@@ -858,7 +859,10 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_from, __cu_len, true);			\
 	might_fault();							\
+									\
 	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
 						   __cu_len);		\
@@ -879,6 +883,9 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_from, __cu_len, true);			\
+									\
 	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
 						   __cu_len);		\
@@ -897,6 +904,9 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_to, __cu_len, false);			\
+									\
 	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
 							      __cu_from,\
@@ -931,6 +941,9 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_from, __cu_len, true);			\
+									\
 	if (eva_kernel_access()) {					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
 						   __cu_from,		\
@@ -1123,6 +1136,9 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_to, __cu_len, false);			\
+									\
 	if (eva_kernel_access()) {					\
 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
 						     __cu_from,		\
@@ -1161,6 +1177,9 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+									\
+	check_object_size(__cu_to, __cu_len, false);			\
+									\
 	if (eva_kernel_access()) {					\
 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
 						     __cu_from,		\
@@ -1485,12 +1504,4 @@
 	return res;
 }
 
-struct exception_table_entry
-{
-	unsigned long insn;
-	unsigned long nextinsn;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
-
 #endif /* _ASM_UACCESS_H */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index ccdcfcb..655e2fb 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -105,4 +105,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* _ASM_MMAN_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 24ad815..3e940db 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -383,16 +383,20 @@
 #define __NR_copy_file_range		(__NR_Linux + 360)
 #define __NR_preadv2			(__NR_Linux + 361)
 #define __NR_pwritev2			(__NR_Linux + 362)
+#define __NR_pkey_mprotect		(__NR_Linux + 363)
+#define __NR_pkey_alloc			(__NR_Linux + 364)
+#define __NR_pkey_free			(__NR_Linux + 365)
+
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		362
+#define __NR_Linux_syscalls		365
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		362
+#define __NR_O32_Linux_syscalls		365
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -723,16 +727,19 @@
 #define __NR_copy_file_range		(__NR_Linux + 320)
 #define __NR_preadv2			(__NR_Linux + 321)
 #define __NR_pwritev2			(__NR_Linux + 322)
+#define __NR_pkey_mprotect		(__NR_Linux + 323)
+#define __NR_pkey_alloc			(__NR_Linux + 324)
+#define __NR_pkey_free			(__NR_Linux + 325)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		322
+#define __NR_Linux_syscalls		325
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		322
+#define __NR_64_Linux_syscalls		325
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -1067,15 +1074,18 @@
 #define __NR_copy_file_range		(__NR_Linux + 324)
 #define __NR_preadv2			(__NR_Linux + 325)
 #define __NR_pwritev2			(__NR_Linux + 326)
+#define __NR_pkey_mprotect		(__NR_Linux + 327)
+#define __NR_pkey_alloc			(__NR_Linux + 328)
+#define __NR_pkey_free			(__NR_Linux + 329)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		326
+#define __NR_Linux_syscalls		329
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		326
+#define __NR_N32_Linux_syscalls		329
 
 #endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 58ad63d..9c7f3e1 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -1,5 +1,6 @@
 /*
  * Support for n32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
  *
  * Copyright (C) 1999, 2001 Ralf Baechle
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
@@ -37,7 +38,6 @@
 #define ELF_ET_DYN_BASE		(TASK32_SIZE / 3 * 2)
 
 #include <asm/processor.h>
-#include <linux/module.h>
 #include <linux/elfcore.h>
 #include <linux/compat.h>
 #include <linux/math64.h>
@@ -96,12 +96,6 @@
 
 #define ELF_CORE_EFLAGS EF_MIPS_ABI2
 
-MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
-MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
-
-#undef MODULE_DESCRIPTION
-#undef MODULE_AUTHOR
-
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 49fb881..1ab3432 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -1,5 +1,6 @@
 /*
  * Support for o32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
  *
  * Copyright (C) 1999, 2001 Ralf Baechle
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
@@ -42,7 +43,6 @@
 
 #include <asm/processor.h>
 
-#include <linux/module.h>
 #include <linux/elfcore.h>
 #include <linux/compat.h>
 #include <linux/math64.h>
@@ -99,12 +99,6 @@
 	value->tv_usec = rem / NSEC_PER_USEC;
 }
 
-MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
-MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
-
-#undef MODULE_DESCRIPTION
-#undef MODULE_AUTHOR
-
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 46c227f..12c7181 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,7 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <asm/branch.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
@@ -866,3 +866,37 @@
 	force_sig(SIGBUS, current);
 	return -EFAULT;
 }
+
+#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES)
+
+int __insn_is_compact_branch(union mips_instruction insn)
+{
+	if (!cpu_has_mips_r6)
+		return 0;
+
+	switch (insn.i_format.opcode) {
+	case blezl_op:
+	case bgtzl_op:
+	case blez_op:
+	case bgtz_op:
+		/*
+		 * blez[l] and bgtz[l] opcodes with non-zero rt
+		 * are MIPS R6 compact branches
+		 */
+		if (insn.i_format.rt)
+			return 1;
+		break;
+	case bc6_op:
+	case balc6_op:
+	case pop10_op:
+	case pop30_op:
+	case pop66_op:
+	case pop76_op:
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__insn_is_compact_branch);
+
+#endif  /* CONFIG_KPROBES || CONFIG_UPROBES */
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 610f0f3..1723b17 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -47,9 +47,14 @@
 
 static void crash_kexec_prepare_cpus(void)
 {
+	static int cpus_stopped;
 	unsigned int msecs;
+	unsigned int ncpus;
 
-	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+	if (cpus_stopped)
+		return;
+
+	ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
 
 	dump_send_ipi(crash_shutdown_secondary);
 	smp_wmb();
@@ -64,6 +69,17 @@
 		cpu_relax();
 		mdelay(1);
 	}
+
+	cpus_stopped = 1;
+}
+
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+	if (_crash_smp_send_stop)
+		_crash_smp_send_stop();
+
+	crash_kexec_prepare_cpus();
 }
 
 #else /* !defined(CONFIG_SMP)  */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 212f46f..f5c8bce7 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -32,7 +32,8 @@
 #include <asm/ptrace.h>
 #include <asm/branch.h>
 #include <asm/break.h>
-#include <asm/inst.h>
+
+#include "probes-common.h"
 
 static const union mips_instruction breakpoint_insn = {
 	.b_format = {
@@ -55,63 +56,7 @@
 
 static int __kprobes insn_has_delayslot(union mips_instruction insn)
 {
-	switch (insn.i_format.opcode) {
-
-		/*
-		 * This group contains:
-		 * jr and jalr are in r_format format.
-		 */
-	case spec_op:
-		switch (insn.r_format.func) {
-		case jr_op:
-		case jalr_op:
-			break;
-		default:
-			goto insn_ok;
-		}
-
-		/*
-		 * This group contains:
-		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
-		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
-		 */
-	case bcond_op:
-
-		/*
-		 * These are unconditional and in j_format.
-		 */
-	case jal_op:
-	case j_op:
-
-		/*
-		 * These are conditional and in i_format.
-		 */
-	case beq_op:
-	case beql_op:
-	case bne_op:
-	case bnel_op:
-	case blez_op:
-	case blezl_op:
-	case bgtz_op:
-	case bgtzl_op:
-
-		/*
-		 * These are the FPA/cp1 branch instructions.
-		 */
-	case cop1_op:
-
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-	case lwc2_op: /* This is bbit0 on Octeon */
-	case ldc2_op: /* This is bbit032 on Octeon */
-	case swc2_op: /* This is bbit1 on Octeon */
-	case sdc2_op: /* This is bbit132 on Octeon */
-#endif
-		return 1;
-	default:
-		break;
-	}
-insn_ok:
-	return 0;
+	return __insn_has_delay_slot(insn);
 }
 
 /*
@@ -161,6 +106,12 @@
 		goto out;
 	}
 
+	if (__insn_is_compact_branch(insn)) {
+		pr_notice("Kprobes for compact branches are not supported\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* insn: must be on special executable page on mips. */
 	p->ainsn.insn = get_insn_slot();
 	if (!p->ainsn.insn) {
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 0b29646..50fb625 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -26,7 +26,6 @@
 #include <linux/utsname.h>
 #include <linux/personality.h>
 #include <linux/dnotify.h>
-#include <linux/module.h>
 #include <linux/binfmts.h>
 #include <linux/security.h>
 #include <linux/compat.h>
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 50980bf3..5972520 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -25,6 +25,7 @@
 #ifdef CONFIG_SMP
 void (*relocated_kexec_smp_wait) (void *);
 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+void (*_crash_smp_send_stop)(void) = NULL;
 #endif
 
 int
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 566b8d2..2a45867 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -52,7 +52,7 @@
 int mips_cpc_probe(void)
 {
 	phys_addr_t addr;
-	unsigned cpu;
+	unsigned int cpu;
 
 	for_each_possible_cpu(cpu)
 		spin_lock_init(&per_cpu(cpc_core_lock, cpu));
@@ -70,7 +70,12 @@
 
 void mips_cpc_lock_other(unsigned int core)
 {
-	unsigned curr_core;
+	unsigned int curr_core;
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+		return;
+
 	preempt_disable();
 	curr_core = current_cpu_data.core;
 	spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
@@ -86,7 +91,13 @@
 
 void mips_cpc_unlock_other(void)
 {
-	unsigned curr_core = current_cpu_data.core;
+	unsigned int curr_core;
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+		return;
+
+	curr_core = current_cpu_data.core;
 	spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
 			       per_cpu(cpc_core_lock_flags, curr_core));
 	preempt_enable();
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 0a7e10b..22dedd6 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -15,7 +15,6 @@
 #include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/ptrace.h>
 #include <linux/seq_file.h>
 
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 79850e3..94627a3 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -20,6 +20,7 @@
 
 #undef DEBUG
 
+#include <linux/extable.h>
 #include <linux/moduleloader.h>
 #include <linux/elf.h>
 #include <linux/mm.h>
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5b31a94..7cf653e 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -8,6 +8,7 @@
  * option) any later version.
  */
 
+#include <linux/cpuhotplug.h>
 #include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
@@ -70,13 +71,8 @@
 DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
 
 /* A somewhat arbitrary number of labels & relocs for uasm */
-static struct uasm_label labels[32] __initdata;
-static struct uasm_reloc relocs[32] __initdata;
-
-/* CPU dependant sync types */
-static unsigned stype_intervention;
-static unsigned stype_memory;
-static unsigned stype_ordering;
+static struct uasm_label labels[32];
+static struct uasm_reloc relocs[32];
 
 enum mips_reg {
 	zero, at, v0, v1, a0, a1, a2, a3,
@@ -134,7 +130,7 @@
 		return -EINVAL;
 
 	/* Calculate which coupled CPUs (VPEs) are online */
-#ifdef CONFIG_MIPS_MT
+#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
 	if (cpu_online(cpu)) {
 		cpumask_and(coupled_mask, cpu_online_mask,
 			    &cpu_sibling_map[cpu]);
@@ -198,10 +194,10 @@
 	return 0;
 }
 
-static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
-					 struct uasm_reloc **pr,
-					 const struct cache_desc *cache,
-					 unsigned op, int lbl)
+static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+				  struct uasm_reloc **pr,
+				  const struct cache_desc *cache,
+				  unsigned op, int lbl)
 {
 	unsigned cache_size = cache->ways << cache->waybit;
 	unsigned i;
@@ -242,10 +238,10 @@
 	uasm_i_nop(pp);
 }
 
-static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
-				    struct uasm_reloc **pr,
-				    const struct cpuinfo_mips *cpu_info,
-				    int lbl)
+static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+			     struct uasm_reloc **pr,
+			     const struct cpuinfo_mips *cpu_info,
+			     int lbl)
 {
 	unsigned i, fsb_size = 8;
 	unsigned num_loads = (fsb_size * 3) / 2;
@@ -272,14 +268,9 @@
 		/* On older ones it's unavailable */
 		return -1;
 
-	/* CPUs which do not require the workaround */
-	case CPU_P5600:
-	case CPU_I6400:
-		return 0;
-
 	default:
-		WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
-		return -1;
+		/* Assume that the CPU does not need this workaround */
+		return 0;
 	}
 
 	/*
@@ -320,8 +311,8 @@
 			     i * line_size * line_stride, t0);
 	}
 
-	/* Completion barrier */
-	uasm_i_sync(pp, stype_memory);
+	/* Barrier ensuring previous cache invalidates are complete */
+	uasm_i_sync(pp, STYPE_SYNC);
 	uasm_i_ehb(pp);
 
 	/* Check whether the pipeline stalled due to the FSB being full */
@@ -340,9 +331,9 @@
 	return 0;
 }
 
-static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
-				       struct uasm_reloc **pr,
-				       unsigned r_addr, int lbl)
+static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+				struct uasm_reloc **pr,
+				unsigned r_addr, int lbl)
 {
 	uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
 	uasm_build_label(pl, *pp, lbl);
@@ -353,7 +344,7 @@
 	uasm_i_nop(pp);
 }
 
-static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
 {
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
@@ -411,7 +402,7 @@
 
 	if (coupled_coherence) {
 		/* Increment ready_count */
-		uasm_i_sync(&p, stype_ordering);
+		uasm_i_sync(&p, STYPE_SYNC_MB);
 		uasm_build_label(&l, p, lbl_incready);
 		uasm_i_ll(&p, t1, 0, r_nc_count);
 		uasm_i_addiu(&p, t2, t1, 1);
@@ -419,8 +410,8 @@
 		uasm_il_beqz(&p, &r, t2, lbl_incready);
 		uasm_i_addiu(&p, t1, t1, 1);
 
-		/* Ordering barrier */
-		uasm_i_sync(&p, stype_ordering);
+		/* Barrier ensuring all CPUs see the updated r_nc_count value */
+		uasm_i_sync(&p, STYPE_SYNC_MB);
 
 		/*
 		 * If this is the last VPE to become ready for non-coherence
@@ -441,7 +432,8 @@
 			uasm_i_lw(&p, t0, 0, r_nc_count);
 			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
 			uasm_i_ehb(&p);
-			uasm_i_yield(&p, zero, t1);
+			if (cpu_has_mipsmt)
+				uasm_i_yield(&p, zero, t1);
 			uasm_il_b(&p, &r, lbl_poll_cont);
 			uasm_i_nop(&p);
 		} else {
@@ -449,8 +441,21 @@
 			 * The core will lose power & this VPE will not continue
 			 * so it can simply halt here.
 			 */
-			uasm_i_addiu(&p, t0, zero, TCHALT_H);
-			uasm_i_mtc0(&p, t0, 2, 4);
+			if (cpu_has_mipsmt) {
+				/* Halt the VPE via C0 tchalt register */
+				uasm_i_addiu(&p, t0, zero, TCHALT_H);
+				uasm_i_mtc0(&p, t0, 2, 4);
+			} else if (cpu_has_vp) {
+				/* Halt the VP via the CPC VP_STOP register */
+				unsigned int vpe_id;
+
+				vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+				uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
+				UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
+				uasm_i_sw(&p, t0, 0, t1);
+			} else {
+				BUG();
+			}
 			uasm_build_label(&l, p, lbl_secondary_hang);
 			uasm_il_b(&p, &r, lbl_secondary_hang);
 			uasm_i_nop(&p);
@@ -472,22 +477,24 @@
 	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
 			      Index_Writeback_Inv_D, lbl_flushdcache);
 
-	/* Completion barrier */
-	uasm_i_sync(&p, stype_memory);
+	/* Barrier ensuring previous cache invalidates are complete */
+	uasm_i_sync(&p, STYPE_SYNC);
 	uasm_i_ehb(&p);
 
-	/*
-	 * Disable all but self interventions. The load from COHCTL is defined
-	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
-	 * resulting from the preceding store is complete.
-	 */
-	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
-	uasm_i_sw(&p, t0, 0, r_pcohctl);
-	uasm_i_lw(&p, t0, 0, r_pcohctl);
+	if (mips_cm_revision() < CM_REV_CM3) {
+		/*
+		* Disable all but self interventions. The load from COHCTL is
+		* defined by the interAptiv & proAptiv SUMs as ensuring that the
+		*  operation resulting from the preceding store is complete.
+		*/
+		uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+		uasm_i_sw(&p, t0, 0, r_pcohctl);
+		uasm_i_lw(&p, t0, 0, r_pcohctl);
 
-	/* Sync to ensure previous interventions are complete */
-	uasm_i_sync(&p, stype_intervention);
-	uasm_i_ehb(&p);
+		/* Barrier to ensure write to coherence control is complete */
+		uasm_i_sync(&p, STYPE_SYNC);
+		uasm_i_ehb(&p);
+	}
 
 	/* Disable coherence */
 	uasm_i_sw(&p, zero, 0, r_pcohctl);
@@ -531,8 +538,8 @@
 			goto gen_done;
 		}
 
-		/* Completion barrier */
-		uasm_i_sync(&p, stype_memory);
+		/* Barrier to ensure write to CPC command is complete */
+		uasm_i_sync(&p, STYPE_SYNC);
 		uasm_i_ehb(&p);
 	}
 
@@ -562,26 +569,29 @@
 	 * will run this. The first will actually re-enable coherence & the
 	 * rest will just be performing a rather unusual nop.
 	 */
-	uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
+	uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
+				? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK
+				: CM3_GCR_Cx_COHERENCE_COHEN_MSK);
+
 	uasm_i_sw(&p, t0, 0, r_pcohctl);
 	uasm_i_lw(&p, t0, 0, r_pcohctl);
 
-	/* Completion barrier */
-	uasm_i_sync(&p, stype_memory);
+	/* Barrier to ensure write to coherence control is complete */
+	uasm_i_sync(&p, STYPE_SYNC);
 	uasm_i_ehb(&p);
 
 	if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
 		/* Decrement ready_count */
 		uasm_build_label(&l, p, lbl_decready);
-		uasm_i_sync(&p, stype_ordering);
+		uasm_i_sync(&p, STYPE_SYNC_MB);
 		uasm_i_ll(&p, t1, 0, r_nc_count);
 		uasm_i_addiu(&p, t2, t1, -1);
 		uasm_i_sc(&p, t2, 0, r_nc_count);
 		uasm_il_beqz(&p, &r, t2, lbl_decready);
 		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
 
-		/* Ordering barrier */
-		uasm_i_sync(&p, stype_ordering);
+		/* Barrier ensuring all CPUs see the updated r_nc_count value */
+		uasm_i_sync(&p, STYPE_SYNC_MB);
 	}
 
 	if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
@@ -602,8 +612,8 @@
 		 */
 		uasm_build_label(&l, p, lbl_secondary_cont);
 
-		/* Ordering barrier */
-		uasm_i_sync(&p, stype_ordering);
+		/* Barrier ensuring all CPUs see the updated r_nc_count value */
+		uasm_i_sync(&p, STYPE_SYNC_MB);
 	}
 
 	/* The core is coherent, time to return to C code */
@@ -628,7 +638,7 @@
 	return NULL;
 }
 
-static int __init cps_gen_core_entries(unsigned cpu)
+static int cps_pm_online_cpu(unsigned int cpu)
 {
 	enum cps_pm_state state;
 	unsigned core = cpu_data[cpu].core;
@@ -670,29 +680,10 @@
 
 static int __init cps_pm_init(void)
 {
-	unsigned cpu;
-	int err;
-
-	/* Detect appropriate sync types for the system */
-	switch (current_cpu_data.cputype) {
-	case CPU_INTERAPTIV:
-	case CPU_PROAPTIV:
-	case CPU_M5150:
-	case CPU_P5600:
-	case CPU_I6400:
-		stype_intervention = 0x2;
-		stype_memory = 0x3;
-		stype_ordering = 0x10;
-		break;
-
-	default:
-		pr_warn("Power management is using heavyweight sync 0\n");
-	}
-
 	/* A CM is required for all non-coherent states */
 	if (!mips_cm_present()) {
 		pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
-		goto out;
+		return 0;
 	}
 
 	/*
@@ -722,12 +713,7 @@
 		pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
 	}
 
-	for_each_present_cpu(cpu) {
-		err = cps_gen_core_entries(cpu);
-		if (err)
-			return err;
-	}
-out:
-	return 0;
+	return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE",
+				 cps_pm_online_cpu, NULL);
 }
 arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h
new file mode 100644
index 0000000..dd08e41
--- /dev/null
+++ b/arch/mips/kernel/probes-common.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PROBES_COMMON_H
+#define __PROBES_COMMON_H
+
+#include <asm/inst.h>
+
+int __insn_is_compact_branch(union mips_instruction insn);
+
+static inline int __insn_has_delay_slot(const union mips_instruction insn)
+{
+	switch (insn.i_format.opcode) {
+	/*
+	 * jr and jalr are in r_format format.
+	 */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+		case jr_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * This group contains:
+	 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+	 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+	 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+		case bgez_op:
+		case bgezl_op:
+		case bltzal_op:
+		case bltzall_op:
+		case bgezal_op:
+		case bgezall_op:
+		case bposge32_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * These are unconditional and in j_format.
+	 */
+	case jal_op:
+	case j_op:
+	case beq_op:
+	case beql_op:
+	case bne_op:
+	case bnel_op:
+	case blez_op: /* not really i_format */
+	case blezl_op:
+	case bgtz_op:
+	case bgtzl_op:
+		return 1;
+
+	/*
+	 * And now the FPA/cp1 branch instructions.
+	 */
+	case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case lwc2_op: /* This is bbit0 on Octeon */
+	case ldc2_op: /* This is bbit032 on Octeon */
+	case swc2_op: /* This is bbit1 on Octeon */
+	case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+		return 1;
+	}
+
+	return 0;
+}
+
+#endif  /* __PROBES_COMMON_H */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 97dc01b..4eff2ae 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -135,6 +135,13 @@
 	seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
 	seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
 
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
+	if (cpu_has_mipsmt)
+		seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+	else if (cpu_has_vp)
+		seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id);
+#endif
+
 	sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
 		      cpu_has_vce ? "%u" : "not available");
 	seq_printf(m, fmt, 'D', vced_count);
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1..7e71a4e 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -70,7 +70,7 @@
 			break;
 
 		copied = access_process_vm(child, (u64)addrOthers, &tmp,
-				sizeof(tmp), 0);
+				sizeof(tmp), FOLL_FORCE);
 		if (copied != sizeof(tmp))
 			break;
 		ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@
 			break;
 		ret = 0;
 		if (access_process_vm(child, (u64)addrOthers, &data,
-					sizeof(data), 1) == sizeof(data))
+					sizeof(data),
+					FOLL_FORCE | FOLL_WRITE) == sizeof(data))
 			break;
 		ret = -EIO;
 		break;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c8e43e0..c29d397 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -597,3 +597,6 @@
 	PTR	sys_copy_file_range		/* 4360 */
 	PTR	sys_preadv2
 	PTR	sys_pwritev2
+	PTR	sys_pkey_mprotect
+	PTR	sys_pkey_alloc
+	PTR	sys_pkey_free			/* 4365 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index e6ede12..0687f96 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -435,4 +435,7 @@
 	PTR	sys_copy_file_range		/* 5320 */
 	PTR	sys_preadv2
 	PTR	sys_pwritev2
+	PTR	sys_pkey_mprotect
+	PTR	sys_pkey_alloc
+	PTR	sys_pkey_free			/* 5325 */
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 51d3988..0331ba3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -430,4 +430,7 @@
 	PTR	sys_copy_file_range
 	PTR	compat_sys_preadv2		/* 6325 */
 	PTR	compat_sys_pwritev2
+	PTR	sys_pkey_mprotect
+	PTR	sys_pkey_alloc
+	PTR	sys_pkey_free
 	.size	sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6efa713..5a47042 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -585,4 +585,7 @@
 	PTR	sys_copy_file_range		/* 4360 */
 	PTR	compat_sys_preadv2
 	PTR	compat_sys_pwritev2
+	PTR	sys_pkey_mprotect
+	PTR	sys_pkey_alloc
+	PTR	sys_pkey_free			/* 4365 */
 	.size	sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
deleted file mode 100644
index 9b63829..0000000
--- a/arch/mips/kernel/smp-gic.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2013 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
- *
- * Based on smp-cmp.c:
- *  Copyright (C) 2007 MIPS Technologies, Inc.
- *  Author: Chris Dearman (chris@mips.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/irqchip/mips-gic.h>
-#include <linux/printk.h>
-
-#include <asm/mips-cpc.h>
-#include <asm/smp-ops.h>
-
-void gic_send_ipi_single(int cpu, unsigned int action)
-{
-	unsigned long flags;
-	unsigned int intr;
-	unsigned int core = cpu_data[cpu].core;
-
-	pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
-		 smp_processor_id(), __func__, cpu, action, read_c0_status());
-
-	local_irq_save(flags);
-
-	switch (action) {
-	case SMP_CALL_FUNCTION:
-		intr = plat_ipi_call_int_xlate(cpu);
-		break;
-
-	case SMP_RESCHEDULE_YOURSELF:
-		intr = plat_ipi_resched_int_xlate(cpu);
-		break;
-
-	default:
-		BUG();
-	}
-
-	gic_send_ipi(intr);
-
-	if (mips_cpc_present() && (core != current_cpu_data.core)) {
-		while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
-			mips_cm_lock_other(core, 0);
-			mips_cpc_lock_other(core);
-			write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
-			mips_cpc_unlock_other();
-			mips_cm_unlock_other();
-		}
-	}
-
-	local_irq_restore(flags);
-}
-
-void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
-	unsigned int i;
-
-	for_each_cpu(i, mask)
-		gic_send_ipi_single(i, action);
-}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 4f9570a..e077ea3 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -289,26 +289,3 @@
 	.prepare_cpus		= vsmp_prepare_cpus,
 };
 
-#ifdef CONFIG_PROC_FS
-static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
-	unsigned long action_unused, void *data)
-{
-	struct proc_cpuinfo_notifier_args *pcn = data;
-	struct seq_file *m = pcn->m;
-	unsigned long n = pcn->n;
-
-	if (!cpu_has_mipsmt)
-		return NOTIFY_OK;
-
-	seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-
-	return NOTIFY_OK;
-}
-
-static int __init proc_cpuinfo_notifier_init(void)
-{
-	return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
-}
-
-subsys_initcall(proc_cpuinfo_notifier_init);
-#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index b0baf48..7ebb191 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -25,7 +25,7 @@
 #include <linux/smp.h>
 #include <linux/spinlock.h>
 #include <linux/threads.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <linux/sched.h>
@@ -192,9 +192,11 @@
 				continue;
 
 			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+				mips_cm_lock_other(core, 0);
 				mips_cpc_lock_other(core);
 				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
 				mips_cpc_unlock_other();
+				mips_cm_unlock_other();
 			}
 		}
 	}
@@ -229,7 +231,7 @@
 	.name		= "IPI call"
 };
 
-static __init void smp_ipi_init_one(unsigned int virq,
+static void smp_ipi_init_one(unsigned int virq,
 				    struct irqaction *action)
 {
 	int ret;
@@ -239,9 +241,11 @@
 	BUG_ON(ret);
 }
 
-static int __init mips_smp_ipi_init(void)
+static unsigned int call_virq, sched_virq;
+
+int mips_smp_ipi_allocate(const struct cpumask *mask)
 {
-	unsigned int call_virq, sched_virq;
+	int virq;
 	struct irq_domain *ipidomain;
 	struct device_node *node;
 
@@ -268,16 +272,20 @@
 	if (!ipidomain)
 		return 0;
 
-	call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
-	BUG_ON(!call_virq);
+	virq = irq_reserve_ipi(ipidomain, mask);
+	BUG_ON(!virq);
+	if (!call_virq)
+		call_virq = virq;
 
-	sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
-	BUG_ON(!sched_virq);
+	virq = irq_reserve_ipi(ipidomain, mask);
+	BUG_ON(!virq);
+	if (!sched_virq)
+		sched_virq = virq;
 
 	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
 		int cpu;
 
-		for_each_cpu(cpu, cpu_possible_mask) {
+		for_each_cpu(cpu, mask) {
 			smp_ipi_init_one(call_virq + cpu, &irq_call);
 			smp_ipi_init_one(sched_virq + cpu, &irq_resched);
 		}
@@ -286,6 +294,45 @@
 		smp_ipi_init_one(sched_virq, &irq_resched);
 	}
 
+	return 0;
+}
+
+int mips_smp_ipi_free(const struct cpumask *mask)
+{
+	struct irq_domain *ipidomain;
+	struct device_node *node;
+
+	node = of_irq_find_parent(of_root);
+	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+	/*
+	 * Some platforms have half DT setup. So if we found irq node but
+	 * didn't find an ipidomain, try to search for one that is not in the
+	 * DT.
+	 */
+	if (node && !ipidomain)
+		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+	BUG_ON(!ipidomain);
+
+	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+		int cpu;
+
+		for_each_cpu(cpu, mask) {
+			remove_irq(call_virq + cpu, &irq_call);
+			remove_irq(sched_virq + cpu, &irq_resched);
+		}
+	}
+	irq_destroy_ipi(call_virq, mask);
+	irq_destroy_ipi(sched_virq, mask);
+	return 0;
+}
+
+
+static int __init mips_smp_ipi_init(void)
+{
+	mips_smp_ipi_allocate(cpu_possible_mask);
+
 	call_desc = irq_to_desc(call_virq);
 	sched_desc = irq_to_desc(sched_virq);
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3de85be..1f5fdee 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
@@ -48,6 +49,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
+#include <asm/mips-cm.h>
 #include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
@@ -444,6 +446,8 @@
 
 	if (board_be_handler)
 		action = board_be_handler(regs, fixup != NULL);
+	else
+		mips_cm_error_report();
 
 	switch (action) {
 	case MIPS_BE_DISCARD:
@@ -2091,6 +2095,14 @@
 {
 	if (cpu_has_veic || cpu_has_vint) {
 		unsigned long sr = set_c0_status(ST0_BEV);
+		/* If available, use WG to set top bits of EBASE */
+		if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+			write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+		}
 		write_c0_ebase(ebase);
 		write_c0_status(sr);
 		/* Setting vector spacing enables EI/VI mode  */
@@ -2127,8 +2139,17 @@
 		 * We shouldn't trust a secondary core has a sane EBASE register
 		 * so use the one calculated by the boot CPU.
 		 */
-		if (!is_boot_cpu)
+		if (!is_boot_cpu) {
+			/* If available, use WG to set top bits of EBASE */
+			if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+				write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+			}
 			write_c0_ebase(ebase);
+		}
 
 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
@@ -2209,13 +2230,39 @@
 
 	if (cpu_has_veic || cpu_has_vint) {
 		unsigned long size = 0x200 + VECTORSPACING*64;
+		phys_addr_t ebase_pa;
+
 		ebase = (unsigned long)
 			__alloc_bootmem(size, 1 << fls(size), 0);
+
+		/*
+		 * Try to ensure ebase resides in KSeg0 if possible.
+		 *
+		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
+		 * hitting a poorly defined exception base for Cache Errors.
+		 * The allocation is likely to be in the low 512MB of physical,
+		 * in which case we should be able to convert to KSeg0.
+		 *
+		 * EVA is special though as it allows segments to be rearranged
+		 * and to become uncached during cache error handling.
+		 */
+		ebase_pa = __pa(ebase);
+		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
+			ebase = CKSEG0ADDR(ebase_pa);
 	} else {
 		ebase = CAC_BASE;
 
-		if (cpu_has_mips_r2_r6)
-			ebase += (read_c0_ebase() & 0x3ffff000);
+		if (cpu_has_mips_r2_r6) {
+			if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+				ebase = (read_c0_ebase_64() & ~0xfff);
+#else
+				ebase = (read_c0_ebase() & ~0xfff);
+#endif
+			} else {
+				ebase += (read_c0_ebase() & 0x3ffff000);
+			}
+		}
 	}
 
 	if (cpu_has_mmips) {
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index 4c7c155..dbb9174 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -8,71 +8,12 @@
 #include <asm/branch.h>
 #include <asm/cpu-features.h>
 #include <asm/ptrace.h>
-#include <asm/inst.h>
+
+#include "probes-common.h"
 
 static inline int insn_has_delay_slot(const union mips_instruction insn)
 {
-	switch (insn.i_format.opcode) {
-	/*
-	 * jr and jalr are in r_format format.
-	 */
-	case spec_op:
-		switch (insn.r_format.func) {
-		case jalr_op:
-		case jr_op:
-			return 1;
-		}
-		break;
-
-	/*
-	 * This group contains:
-	 * bltz_op, bgez_op, bltzl_op, bgezl_op,
-	 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
-	 */
-	case bcond_op:
-		switch (insn.i_format.rt) {
-		case bltz_op:
-		case bltzl_op:
-		case bgez_op:
-		case bgezl_op:
-		case bltzal_op:
-		case bltzall_op:
-		case bgezal_op:
-		case bgezall_op:
-		case bposge32_op:
-			return 1;
-		}
-		break;
-
-	/*
-	 * These are unconditional and in j_format.
-	 */
-	case jal_op:
-	case j_op:
-	case beq_op:
-	case beql_op:
-	case bne_op:
-	case bnel_op:
-	case blez_op: /* not really i_format */
-	case blezl_op:
-	case bgtz_op:
-	case bgtzl_op:
-		return 1;
-
-	/*
-	 * And now the FPA/cp1 branch instructions.
-	 */
-	case cop1_op:
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-	case lwc2_op: /* This is bbit0 on Octeon */
-	case ldc2_op: /* This is bbit032 on Octeon */
-	case swc2_op: /* This is bbit1 on Octeon */
-	case sdc2_op: /* This is bbit132 on Octeon */
-#endif
-		return 1;
-	}
-
-	return 0;
+	return __insn_has_delay_slot(insn);
 }
 
 /**
@@ -95,6 +36,12 @@
 		return -EINVAL;
 
 	inst.word = aup->insn[0];
+
+	if (__insn_is_compact_branch(inst)) {
+		pr_notice("Uprobes for compact branches are not supported\n");
+		return -EINVAL;
+	}
+
 	aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
 	aup->ixol[1] = UPROBE_BRK_UPROBE_XOL;		/* NOP  */
 
@@ -282,19 +229,14 @@
 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
 				  void *src, unsigned long len)
 {
-	void *kaddr;
+	unsigned long kaddr, kstart;
 
 	/* Initialize the slot */
-	kaddr = kmap_atomic(page);
-	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
-	kunmap_atomic(kaddr);
-
-	/*
-	 * The MIPS version of flush_icache_range will operate safely on
-	 * user space addresses and more importantly, it doesn't require a
-	 * VMA argument.
-	 */
-	flush_icache_range(vaddr, vaddr + len);
+	kaddr = (unsigned long)kmap_atomic(page);
+	kstart = kaddr + (vaddr & ~PAGE_MASK);
+	memcpy((void *)kstart, src, len);
+	flush_icache_range(kstart, kstart + len);
+	kunmap_atomic((void *)kaddr);
 }
 
 /**
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index a36b77e..f436299 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -12,7 +12,6 @@
 
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/bootmem.h>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index d280894..010cef2 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -13,7 +13,6 @@
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/kvm_host.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/bootmem.h>
@@ -45,8 +44,8 @@
 	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
 		local_irq_save(flags);
 		memcpy((void *)opc, (void *)&replace, sizeof(u32));
-		local_flush_icache_range((unsigned long)opc,
-					 (unsigned long)opc + 32);
+		__local_flush_icache_user_range((unsigned long)opc,
+						(unsigned long)opc + 32);
 		local_irq_restore(flags);
 	} else {
 		kvm_err("%s: Invalid address: %p\n", __func__, opc);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4db4c03..8770f32 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -13,7 +13,6 @@
 #include <linux/err.h>
 #include <linux/ktime.h>
 #include <linux/kvm_host.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/bootmem.h>
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index ad28dac..e88403b 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -11,7 +11,6 @@
 
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
 #include <linux/bootmem.h>
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 3a5484f..3b20441 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -11,7 +11,6 @@
 
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/vmalloc.h>
 
 #include <linux/kvm_host.h>
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 4625495..577ec81 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -6,7 +6,7 @@
  *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/dma-mapping.h>
@@ -55,7 +55,6 @@
 	{ .compatible = "lantiq,vmmc-xway" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, vmmc_match);
 
 static struct platform_driver vmmc_driver = {
 	.probe = vmmc_probe,
@@ -64,5 +63,4 @@
 		.of_match_table = vmmc_match,
 	},
 };
-
-module_platform_driver(vmmc_driver);
+builtin_platform_driver(vmmc_driver);
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index 71e518c..f0a0f2d 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -1,4 +1,7 @@
 /*
+ * Lantiq XRX200 PHY Firmware Loader
+ * Author: John Crispin
+ *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
@@ -8,7 +11,6 @@
 
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/module.h>
 #include <linux/firmware.h>
 #include <linux/of_platform.h>
 
@@ -100,7 +102,6 @@
 	{ .compatible = "lantiq,phy-xrx200" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, xway_phy_match);
 
 static struct platform_driver xway_phy_driver = {
 	.probe = xway_phy_fw_probe,
@@ -109,9 +110,4 @@
 		.of_match_table = xway_phy_match,
 	},
 };
-
-module_platform_driver(xway_phy_driver);
-
-MODULE_AUTHOR("John Crispin <john@phrozen.org>");
-MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_phy_driver);
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 27533c1..dd292dc 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -16,6 +16,7 @@
 
 #include <linux/timer.h>
 #include <linux/mutex.h>
+#include <linux/uaccess.h>
 
 #include "picvue.h"
 
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index 927dc94..c3e2205 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index 9fdf1a5..1745602 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index e3e77aa..a811414 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/compiler.h>
 
 unsigned long long notrace __bswapdi2(unsigned long long u)
 {
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 530a8af..106fd97 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/compiler.h>
 
 unsigned int notrace __bswapsi2(unsigned int u)
 {
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 06857da..9d849d8 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 21d27c6..2307a3c 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -8,7 +8,7 @@
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2007, 2014 Maciej W. Rozycki
  */
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/param.h>
 #include <linux/smp.h>
 #include <linux/stringify.h>
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index fd35daa..8ed3f25 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -7,9 +7,11 @@
  *     written by Ralf Baechle <ralf@linux-mips.org>
  */
 #include <linux/pci.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
 void __iomem *__pci_ioport_map(struct pci_dev *dev,
 			       unsigned long port, unsigned int nr)
 {
@@ -40,6 +42,8 @@
 	return (void __iomem *) (ctrl->io_map_base + port);
 }
 
+#endif /* CONFIG_PCI_DRIVERS_LEGACY */
+
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
 	iounmap(addr);
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
index 8e7e378..9daa924 100644
--- a/arch/mips/lib/iomap.c
+++ b/arch/mips/lib/iomap.c
@@ -6,7 +6,7 @@
  * (C) Copyright 2007 MIPS Technologies, Inc.
  *     written by Ralf Baechle <ralf@linux-mips.org>
  */
-#include <linux/module.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
 /*
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index 3645474..221167c 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bd599f5..08067fa 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
index 7704f20..3c0c2f2 100644
--- a/arch/mips/loongson32/Kconfig
+++ b/arch/mips/loongson32/Kconfig
@@ -19,6 +19,21 @@
 	select USE_GENERIC_EARLY_PRINTK_8250
 	select COMMON_CLK
 
+config LOONGSON1_LS1C
+	bool "Loongson LS1C board"
+	select CEVT_R4K if !MIPS_EXTERNAL_TIMER
+	select CSRC_R4K if !MIPS_EXTERNAL_TIMER
+	select SYS_HAS_CPU_LOONGSON1C
+	select DMA_NONCOHERENT
+	select BOOT_ELF32
+	select IRQ_MIPS_CPU
+	select SYS_SUPPORTS_32BIT_KERNEL
+	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select SYS_SUPPORTS_HIGHMEM
+	select SYS_SUPPORTS_MIPS16
+	select SYS_HAS_EARLY_PRINTK
+	select USE_GENERIC_EARLY_PRINTK_8250
+	select COMMON_CLK
 endchoice
 
 menuconfig CEVT_CSRC_LS1X
diff --git a/arch/mips/loongson32/Makefile b/arch/mips/loongson32/Makefile
index 5f4bd6e..1ab2c5b 100644
--- a/arch/mips/loongson32/Makefile
+++ b/arch/mips/loongson32/Makefile
@@ -9,3 +9,9 @@
 #
 
 obj-$(CONFIG_LOONGSON1_LS1B)  += ls1b/
+
+#
+# Loongson LS1C board
+#
+
+obj-$(CONFIG_LOONGSON1_LS1C)  += ls1c/
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index ebb6dc2..ffe01c6 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -5,3 +5,4 @@
 platform-$(CONFIG_MACH_LOONGSON32)	+= loongson32/
 cflags-$(CONFIG_MACH_LOONGSON32)	+= -I$(srctree)/arch/mips/include/asm/mach-loongson32
 load-$(CONFIG_LOONGSON1_LS1B)		+= 0xffffffff80100000
+load-$(CONFIG_LOONGSON1_LS1C)		+= 0xffffffff80100000
diff --git a/arch/mips/loongson32/common/irq.c b/arch/mips/loongson32/common/irq.c
index 455a770..635a4ab 100644
--- a/arch/mips/loongson32/common/irq.c
+++ b/arch/mips/loongson32/common/irq.c
@@ -62,12 +62,58 @@
 			| (1 << bit), LS1X_INTC_INTIEN(n));
 }
 
+static int ls1x_irq_settype(struct irq_data *d, unsigned int type)
+{
+	unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+	unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_HIGH:
+		__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+			| (1 << bit), LS1X_INTC_INTPOL(n));
+		__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+			& ~(1 << bit), LS1X_INTC_INTEDGE(n));
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+			& ~(1 << bit), LS1X_INTC_INTPOL(n));
+		__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+			& ~(1 << bit), LS1X_INTC_INTEDGE(n));
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+			| (1 << bit), LS1X_INTC_INTPOL(n));
+		__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+			| (1 << bit), LS1X_INTC_INTEDGE(n));
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+			& ~(1 << bit), LS1X_INTC_INTPOL(n));
+		__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+			| (1 << bit), LS1X_INTC_INTEDGE(n));
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+			& ~(1 << bit), LS1X_INTC_INTPOL(n));
+		__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+			| (1 << bit), LS1X_INTC_INTEDGE(n));
+		break;
+	case IRQ_TYPE_NONE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct irq_chip ls1x_irq_chip = {
 	.name		= "LS1X-INTC",
 	.irq_ack	= ls1x_irq_ack,
 	.irq_mask	= ls1x_irq_mask,
 	.irq_mask_ack	= ls1x_irq_mask_ack,
 	.irq_unmask	= ls1x_irq_unmask,
+	.irq_set_type   = ls1x_irq_settype,
 };
 
 static void ls1x_irq_dispatch(int n)
@@ -107,7 +153,7 @@
 
 }
 
-struct irqaction cascade_irqaction = {
+static struct irqaction cascade_irqaction = {
 	.handler = no_action,
 	.name = "cascade",
 	.flags = IRQF_NO_THREAD,
@@ -120,7 +166,7 @@
 	/* Disable interrupts and clear pending,
 	 * setup all IRQs as high level triggered
 	 */
-	for (n = 0; n < 4; n++) {
+	for (n = 0; n < INTN; n++) {
 		__raw_writel(0x0, LS1X_INTC_INTIEN(n));
 		__raw_writel(0xffffffff, LS1X_INTC_INTCLR(n));
 		__raw_writel(0xffffffff, LS1X_INTC_INTPOL(n));
@@ -129,7 +175,7 @@
 	}
 
 
-	for (n = base; n < LS1X_IRQS; n++) {
+	for (n = base; n < NR_IRQS; n++) {
 		irq_set_chip_and_handler(n, &ls1x_irq_chip,
 					 handle_level_irq);
 	}
@@ -138,6 +184,9 @@
 	setup_irq(INT1_IRQ, &cascade_irqaction);
 	setup_irq(INT2_IRQ, &cascade_irqaction);
 	setup_irq(INT3_IRQ, &cascade_irqaction);
+#if defined(CONFIG_LOONGSON1_LS1C)
+	setup_irq(INT4_IRQ, &cascade_irqaction);
+#endif
 }
 
 void __init arch_init_irq(void)
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index f2c714d..beff085 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -17,11 +17,16 @@
 #include <linux/stmmac.h>
 #include <linux/usb/ehci_pdriver.h>
 
+#include <platform.h>
 #include <loongson1.h>
 #include <cpufreq.h>
 #include <dma.h>
 #include <nand.h>
 
+#define LS1X_RTC_CTRL	((void __iomem *)KSEG1ADDR(LS1X_RTC_BASE + 0x40))
+#define RTC_EXTCLK_OK	(BIT(5) | BIT(8))
+#define RTC_EXTCLK_EN	BIT(8)
+
 /* 8250/16550 compatible UART */
 #define LS1X_UART(_id)						\
 	{							\
@@ -65,6 +70,15 @@
 		p->uartclk = clk_get_rate(clk);
 }
 
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev)
+{
+	u32 val;
+
+	val = __raw_readl(LS1X_RTC_CTRL);
+	if (!(val & RTC_EXTCLK_OK))
+		__raw_writel(val | RTC_EXTCLK_EN, LS1X_RTC_CTRL);
+}
+
 /* CPUFreq */
 static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
 	.clk_name	= "cpu_clk",
@@ -132,6 +146,7 @@
 
 	val = __raw_readl(LS1X_MUX_CTRL1);
 
+#if defined(CONFIG_LOONGSON1_LS1B)
 	plat_dat = dev_get_platdata(&pdev->dev);
 	if (plat_dat->bus_id) {
 		__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
@@ -165,6 +180,17 @@
 		val &= ~GMAC0_SHUT;
 	}
 	__raw_writel(val, LS1X_MUX_CTRL1);
+#elif defined(CONFIG_LOONGSON1_LS1C)
+	plat_dat = dev_get_platdata(&pdev->dev);
+
+	val &= ~PHY_INTF_SELI;
+	if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+		val |= 0x4 << PHY_INTF_SELI_SHIFT;
+	__raw_writel(val, LS1X_MUX_CTRL1);
+
+	val = __raw_readl(LS1X_MUX_CTRL0);
+	__raw_writel(val & (~GMAC_SHUT), LS1X_MUX_CTRL0);
+#endif
 
 	return 0;
 }
@@ -172,7 +198,11 @@
 static struct plat_stmmacenet_data ls1x_eth0_pdata = {
 	.bus_id		= 0,
 	.phy_addr	= -1,
+#if defined(CONFIG_LOONGSON1_LS1B)
 	.interface	= PHY_INTERFACE_MODE_MII,
+#elif defined(CONFIG_LOONGSON1_LS1C)
+	.interface	= PHY_INTERFACE_MODE_RMII,
+#endif
 	.mdio_bus_data	= &ls1x_mdio_bus_data,
 	.dma_cfg	= &ls1x_eth_dma_cfg,
 	.has_gmac	= 1,
@@ -203,6 +233,7 @@
 	},
 };
 
+#ifdef CONFIG_LOONGSON1_LS1B
 static struct plat_stmmacenet_data ls1x_eth1_pdata = {
 	.bus_id		= 1,
 	.phy_addr	= -1,
@@ -236,6 +267,7 @@
 		.platform_data = &ls1x_eth1_pdata,
 	},
 };
+#endif	/* CONFIG_LOONGSON1_LS1B */
 
 /* GPIO */
 static struct resource ls1x_gpio0_resources[] = {
diff --git a/arch/mips/loongson32/common/setup.c b/arch/mips/loongson32/common/setup.c
index 62f41af..1640744 100644
--- a/arch/mips/loongson32/common/setup.c
+++ b/arch/mips/loongson32/common/setup.c
@@ -22,7 +22,11 @@
 
 	switch (processor_id & PRID_REV_MASK) {
 	case PRID_REV_LOONGSON1B:
+#if defined(CONFIG_LOONGSON1_LS1B)
 		return "LOONGSON LS1B";
+#elif defined(CONFIG_LOONGSON1_LS1C)
+		return "LOONGSON LS1C";
+#endif
 	default:
 		return "LOONGSON (unknown)";
 	}
diff --git a/arch/mips/loongson32/ls1c/Makefile b/arch/mips/loongson32/ls1c/Makefile
new file mode 100644
index 0000000..a92c6cd
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for loongson1C based machines.
+#
+
+obj-y += board.o
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
new file mode 100644
index 0000000..a96bed5
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/board.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ *
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <platform.h>
+
+static struct platform_device *ls1c_platform_devices[] __initdata = {
+	&ls1x_uart_pdev,
+	&ls1x_eth0_pdev,
+	&ls1x_rtc_pdev,
+};
+
+static int __init ls1c_platform_init(void)
+{
+	ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+	ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
+
+	return platform_add_devices(ls1c_platform_devices,
+				   ARRAY_SIZE(ls1c_platform_devices));
+}
+
+arch_initcall(ls1c_platform_init);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 36775d2..f8b7bf8 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -35,7 +35,6 @@
  */
 #include <linux/sched.h>
 #include <linux/debugfs.h>
-#include <linux/kconfig.h>
 #include <linux/percpu-defs.h>
 #include <linux/perf_event.h>
 
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 05b1d7c..0e45b06 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -294,6 +294,8 @@
 	flush_data_cache_page		= octeon_flush_data_cache_page;
 	flush_icache_range		= octeon_flush_icache_range;
 	local_flush_icache_range	= local_octeon_flush_icache_range;
+	__flush_icache_user_range	= octeon_flush_icache_range;
+	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
 
 	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
 
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 135ec31..21e4e66 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -325,6 +325,8 @@
 	flush_cache_page = r3k_flush_cache_page;
 	flush_icache_range = r3k_flush_icache_range;
 	local_flush_icache_range = r3k_flush_icache_range;
+	__flush_icache_user_range = r3k_flush_icache_range;
+	__local_flush_icache_user_range = r3k_flush_icache_range;
 
 	__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
 
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fa7d8d3..88cfaf8 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -17,7 +17,7 @@
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/bitops.h>
 
 #include <asm/bcache.h>
@@ -722,11 +722,13 @@
 	unsigned long start;
 	unsigned long end;
 	unsigned int type;
+	bool user;
 };
 
 static inline void __local_r4k_flush_icache_range(unsigned long start,
 						  unsigned long end,
-						  unsigned int type)
+						  unsigned int type,
+						  bool user)
 {
 	if (!cpu_has_ic_fills_f_dc) {
 		if (type == R4K_INDEX ||
@@ -734,7 +736,10 @@
 			r4k_blast_dcache();
 		} else {
 			R4600_HIT_CACHEOP_WAR_IMPL;
-			protected_blast_dcache_range(start, end);
+			if (user)
+				protected_blast_dcache_range(start, end);
+			else
+				blast_dcache_range(start, end);
 		}
 	}
 
@@ -748,27 +753,25 @@
 			break;
 
 		default:
-			protected_blast_icache_range(start, end);
+			if (user)
+				protected_blast_icache_range(start, end);
+			else
+				blast_icache_range(start, end);
 			break;
 		}
 	}
-#ifdef CONFIG_EVA
-	/*
-	 * Due to all possible segment mappings, there might cache aliases
-	 * caused by the bootloader being in non-EVA mode, and the CPU switching
-	 * to EVA during early kernel init. It's best to flush the scache
-	 * to avoid having secondary cores fetching stale data and lead to
-	 * kernel crashes.
-	 */
-	bc_wback_inv(start, (end - start));
-	__sync();
-#endif
 }
 
 static inline void local_r4k_flush_icache_range(unsigned long start,
 						unsigned long end)
 {
-	__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+	__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
+}
+
+static inline void local_r4k_flush_icache_user_range(unsigned long start,
+						     unsigned long end)
+{
+	__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
 }
 
 static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -777,11 +780,13 @@
 	unsigned long start = fir_args->start;
 	unsigned long end = fir_args->end;
 	unsigned int type = fir_args->type;
+	bool user = fir_args->user;
 
-	__local_r4k_flush_icache_range(start, end, type);
+	__local_r4k_flush_icache_range(start, end, type, user);
 }
 
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
+				     bool user)
 {
 	struct flush_icache_range_args args;
 	unsigned long size, cache_size;
@@ -789,6 +794,7 @@
 	args.start = start;
 	args.end = end;
 	args.type = R4K_HIT | R4K_INDEX;
+	args.user = user;
 
 	/*
 	 * Indexed cache ops require an SMP call.
@@ -814,6 +820,16 @@
 	instruction_hazard();
 }
 
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+	return __r4k_flush_icache_range(start, end, false);
+}
+
+static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
+{
+	return __r4k_flush_icache_range(start, end, true);
+}
+
 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -1915,9 +1931,16 @@
 	flush_data_cache_page	= r4k_flush_data_cache_page;
 	flush_icache_range	= r4k_flush_icache_range;
 	local_flush_icache_range	= local_r4k_flush_icache_range;
+	__flush_icache_user_range	= r4k_flush_icache_user_range;
+	__local_flush_icache_user_range	= local_r4k_flush_icache_user_range;
 
 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
-	if (coherentio) {
+# if defined(CONFIG_DMA_PERDEV_COHERENT)
+	if (0) {
+# else
+	if ((coherentio == IO_COHERENCE_ENABLED) ||
+	    ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
+# endif
 		_dma_cache_wback_inv	= (void *)cache_noop;
 		_dma_cache_wback	= (void *)cache_noop;
 		_dma_cache_inv		= (void *)cache_noop;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 596e184..5c28258 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -411,6 +411,9 @@
 		break;
 	}
 
+	__flush_icache_user_range = flush_icache_range;
+	__local_flush_icache_user_range = local_flush_icache_range;
+
 	current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
 	current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
 
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index bf04c6c..6db3413 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -10,7 +10,7 @@
 #include <linux/fcntl.h>
 #include <linux/kernel.h>
 #include <linux/linkage.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/syscalls.h>
 #include <linux/mm.h>
@@ -33,6 +33,10 @@
 EXPORT_SYMBOL_GPL(flush_icache_range);
 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 EXPORT_SYMBOL_GPL(local_flush_icache_range);
+void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(__flush_icache_user_range);
+void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
 
 void (*__flush_cache_vmap)(void);
 void (*__flush_cache_vunmap)(void);
@@ -74,7 +78,7 @@
 	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
 		return -EFAULT;
 
-	flush_icache_range(addr, addr + bytes);
+	__flush_icache_user_range(addr, addr + bytes);
 
 	return 0;
 }
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index b2eadd6..46d5696 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -11,7 +11,7 @@
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/scatterlist.h>
 #include <linux/string.h>
 #include <linux/gfp.h>
@@ -24,14 +24,15 @@
 
 #include <dma-coherence.h>
 
-#ifdef CONFIG_DMA_MAYBE_COHERENT
-int coherentio = 0;	/* User defined DMA coherency from command line. */
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
 EXPORT_SYMBOL_GPL(coherentio);
 int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
 
 static int __init setcoherentio(char *str)
 {
-	coherentio = 1;
+	coherentio = IO_COHERENCE_ENABLED;
 	pr_info("Hardware DMA cache coherency (command line)\n");
 	return 0;
 }
@@ -39,7 +40,7 @@
 
 static int __init setnocoherentio(char *str)
 {
-	coherentio = 0;
+	coherentio = IO_COHERENCE_DISABLED;
 	pr_info("Software DMA cache coherency (command line)\n");
 	return 0;
 }
@@ -160,8 +161,7 @@
 	*dma_handle = plat_map_dma_mem(dev, ret, size);
 	if (!plat_device_is_coherent(dev)) {
 		dma_cache_wback_inv((unsigned long) ret, size);
-		if (!hw_coherentio)
-			ret = UNCAC_ADDR(ret);
+		ret = UNCAC_ADDR(ret);
 	}
 
 	return ret;
@@ -189,7 +189,7 @@
 
 	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
-	if (!plat_device_is_coherent(dev) && !hw_coherentio)
+	if (!plat_device_is_coherent(dev))
 		addr = CAC_ADDR(addr);
 
 	page = virt_to_page((void *) addr);
@@ -209,7 +209,7 @@
 	unsigned long pfn;
 	int ret = -ENXIO;
 
-	if (!plat_device_is_coherent(dev) && !hw_coherentio)
+	if (!plat_device_is_coherent(dev))
 		addr = CAC_ADDR(addr);
 
 	pfn = page_to_pfn(virt_to_page((void *)addr));
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
index 9d25d2b..e474fa2 100644
--- a/arch/mips/mm/extable.c
+++ b/arch/mips/mm/extable.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
  */
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/spinlock.h>
 #include <asm/branch.h>
 #include <asm/uaccess.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 9560ad7..d56a855 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -18,7 +18,6 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/perf_event.h>
 #include <linux/uaccess.h>
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 42d124f..d8c3c15 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -287,7 +287,7 @@
 	pages += nr;
 
 	ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
-				      write, 0, pages);
+				      pages, write ? FOLL_WRITE : 0);
 
 	/* Have to be a bit careful with return values */
 	if (nr > 0) {
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index d7258a1..f13f510 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
 #include <linux/compiler.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 72f7478..3a6edec 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -10,7 +10,7 @@
  */
 #include <linux/bug.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 8d5008c..1f18962 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -6,7 +6,7 @@
  * (C) Copyright 1995 1996 Linus Torvalds
  * (C) Copyright 2001, 2002 Ralf Baechle
  */
-#include <linux/module.h>
+#include <linux/export.h>
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
 #include <linux/sched.h>
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 3530376..d08ea3f 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -10,7 +10,7 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/personality.h>
 #include <linux/random.h>
 #include <linux/sched.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index c41953c..6f804f5 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -12,7 +12,6 @@
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
-#include <linux/module.h>
 #include <linux/proc_fs.h>
 
 #include <asm/bugs.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e8b335c..bba9c14 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -14,7 +14,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include <asm/cpu.h>
 #include <asm/cpu-type.h>
@@ -67,8 +67,11 @@
 
 	entry = read_c0_wired();
 
-	/* Blast 'em all away. */
-	if (cpu_has_tlbinv) {
+	/*
+	 * Blast 'em all away.
+	 * If there are any wired entries, fall back to iterating
+	 */
+	if (cpu_has_tlbinv && !entry) {
 		if (current_cpu_data.tlbsizevtlb) {
 			write_c0_index(0);
 			mtc0_tlbw_hazard();
diff --git a/arch/mips/mti-malta/malta-dt.c b/arch/mips/mti-malta/malta-dt.c
index 47a2288..4822943 100644
--- a/arch/mips/mti-malta/malta-dt.c
+++ b/arch/mips/mti-malta/malta-dt.c
@@ -17,18 +17,3 @@
 {
 	unflatten_and_copy_device_tree();
 }
-
-static const struct of_device_id bus_ids[] __initconst = {
-	{ .compatible = "simple-bus", },
-	{ .compatible = "isa", },
-	{},
-};
-
-static int __init publish_devices(void)
-{
-	if (!of_have_populated_dt())
-		return 0;
-
-	return of_platform_bus_probe(NULL, bus_ids, NULL);
-}
-device_initcall(publish_devices);
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
index 151f488..c398582 100644
--- a/arch/mips/mti-malta/malta-dtshim.c
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -13,18 +13,66 @@
 #include <linux/libfdt.h>
 #include <linux/of_fdt.h>
 #include <linux/sizes.h>
+#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 #include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+#include <asm/mips-cm.h>
 #include <asm/page.h>
 
+#define ROCIT_REG_BASE			0x1f403000
+#define ROCIT_CONFIG_GEN1		(ROCIT_REG_BASE + 0x04)
+#define  ROCIT_CONFIG_GEN1_MEMMAP_SHIFT	8
+#define  ROCIT_CONFIG_GEN1_MEMMAP_MASK	(0xf << 8)
+
 static unsigned char fdt_buf[16 << 10] __initdata;
 
 /* determined physical memory size, not overridden by command line args	 */
 extern unsigned long physical_memsize;
 
-#define MAX_MEM_ARRAY_ENTRIES 1
+enum mem_map {
+	MEM_MAP_V1 = 0,
+	MEM_MAP_V2,
+};
 
-static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
+#define MAX_MEM_ARRAY_ENTRIES 2
+
+static __init int malta_scon(void)
+{
+	int scon = MIPS_REVISION_SCONID;
+
+	if (scon != MIPS_REVISION_SCON_OTHER)
+		return scon;
+
+	switch (MIPS_REVISION_CORID) {
+	case MIPS_REVISION_CORID_QED_RM5261:
+	case MIPS_REVISION_CORID_CORE_LV:
+	case MIPS_REVISION_CORID_CORE_FPGA:
+	case MIPS_REVISION_CORID_CORE_FPGAR2:
+		return MIPS_REVISION_SCON_GT64120;
+
+	case MIPS_REVISION_CORID_CORE_EMUL_BON:
+	case MIPS_REVISION_CORID_BONITO64:
+	case MIPS_REVISION_CORID_CORE_20K:
+		return MIPS_REVISION_SCON_BONITO;
+
+	case MIPS_REVISION_CORID_CORE_MSC:
+	case MIPS_REVISION_CORID_CORE_FPGA2:
+	case MIPS_REVISION_CORID_CORE_24K:
+		return MIPS_REVISION_SCON_SOCIT;
+
+	case MIPS_REVISION_CORID_CORE_FPGA3:
+	case MIPS_REVISION_CORID_CORE_FPGA4:
+	case MIPS_REVISION_CORID_CORE_FPGA5:
+	case MIPS_REVISION_CORID_CORE_EMUL_MSC:
+	default:
+		return MIPS_REVISION_SCON_ROCIT;
+	}
+}
+
+static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size,
+					 enum mem_map map)
 {
 	unsigned long size_preio;
 	unsigned entries;
@@ -39,11 +87,47 @@
 		 * DDR but limits it to 2GB.
 		 */
 		mem_array[1] = cpu_to_be32(size);
-	} else {
-		size_preio = min_t(unsigned long, size, SZ_256M);
-		mem_array[1] = cpu_to_be32(size_preio);
+		goto done;
 	}
 
+	size_preio = min_t(unsigned long, size, SZ_256M);
+	mem_array[1] = cpu_to_be32(size_preio);
+	size -= size_preio;
+	if (!size)
+		goto done;
+
+	if (map == MEM_MAP_V2) {
+		/*
+		 * We have a flat 32 bit physical memory map with DDR filling
+		 * all 4GB of the memory map, apart from the I/O region which
+		 * obscures 256MB from 0x10000000-0x1fffffff.
+		 *
+		 * Therefore we discard the 256MB behind the I/O region.
+		 */
+		if (size <= SZ_256M)
+			goto done;
+		size -= SZ_256M;
+
+		/* Make use of the memory following the I/O region */
+		entries++;
+		mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_512M);
+		mem_array[3] = cpu_to_be32(size);
+	} else {
+		/*
+		 * We have a 32 bit physical memory map with a 2GB DDR region
+		 * aliased in the upper & lower halves of it. The I/O region
+		 * obscures 256MB from 0x10000000-0x1fffffff in the low alias
+		 * but the DDR it obscures is accessible via the high alias.
+		 *
+		 * Simply access everything beyond the lowest 256MB of DDR using
+		 * the high alias.
+		 */
+		entries++;
+		mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_2G + SZ_256M);
+		mem_array[3] = cpu_to_be32(size);
+	}
+
+done:
 	BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES);
 	return entries;
 }
@@ -54,6 +138,8 @@
 	unsigned long memsize;
 	unsigned mem_entries;
 	int i, err, mem_off;
+	enum mem_map mem_map;
+	u32 config;
 	char *var, param_name[10], *var_names[] = {
 		"ememsize", "memsize",
 	};
@@ -106,6 +192,20 @@
 	/* if the user says there's more RAM than we thought, believe them */
 	physical_memsize = max_t(unsigned long, physical_memsize, memsize);
 
+	/* detect the memory map in use */
+	if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+		/* ROCit has a register indicating the memory map in use */
+		config = readl((void __iomem *)CKSEG1ADDR(ROCIT_CONFIG_GEN1));
+		mem_map = config & ROCIT_CONFIG_GEN1_MEMMAP_MASK;
+		mem_map >>= ROCIT_CONFIG_GEN1_MEMMAP_SHIFT;
+	} else {
+		/* if not using ROCit, presume the v1 memory map */
+		mem_map = MEM_MAP_V1;
+	}
+	if (mem_map > MEM_MAP_V2)
+		panic("Unsupported physical memory map v%u detected",
+		      (unsigned int)mem_map);
+
 	/* append memory to the DT */
 	mem_off = fdt_add_subnode(fdt, root_off, "memory");
 	if (mem_off < 0)
@@ -115,19 +215,93 @@
 	if (err)
 		panic("Unable to set memory node device_type: %d", err);
 
-	mem_entries = gen_fdt_mem_array(mem_array, physical_memsize);
+	mem_entries = gen_fdt_mem_array(mem_array, physical_memsize, mem_map);
 	err = fdt_setprop(fdt, mem_off, "reg", mem_array,
 			  mem_entries * 2 * sizeof(mem_array[0]));
 	if (err)
 		panic("Unable to set memory regs property: %d", err);
 
-	mem_entries = gen_fdt_mem_array(mem_array, memsize);
+	mem_entries = gen_fdt_mem_array(mem_array, memsize, mem_map);
 	err = fdt_setprop(fdt, mem_off, "linux,usable-memory", mem_array,
 			  mem_entries * 2 * sizeof(mem_array[0]));
 	if (err)
 		panic("Unable to set linux,usable-memory property: %d", err);
 }
 
+static void __init remove_gic(void *fdt)
+{
+	int err, gic_off, i8259_off, cpu_off;
+	void __iomem *biu_base;
+	uint32_t cpu_phandle, sc_cfg;
+
+	/* if we have a CM which reports a GIC is present, leave the DT alone */
+	err = mips_cm_probe();
+	if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_GICEX_MSK))
+		return;
+
+	if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+		/*
+		 * On systems using the RocIT system controller a GIC may be
+		 * present without a CM. Detect whether that is the case.
+		 */
+		biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
+				MSC01_BIU_ADDRSPACE_SZ);
+		sc_cfg = __raw_readl(biu_base + MSC01_SC_CFG_OFS);
+		if (sc_cfg & MSC01_SC_CFG_GICPRES_MSK) {
+			/* enable the GIC at the system controller level */
+			sc_cfg |= BIT(MSC01_SC_CFG_GICENA_SHF);
+			__raw_writel(sc_cfg, biu_base + MSC01_SC_CFG_OFS);
+			return;
+		}
+	}
+
+	gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+	if (gic_off < 0) {
+		pr_warn("malta-dtshim: unable to find DT GIC node: %d\n",
+			gic_off);
+		return;
+	}
+
+	err = fdt_nop_node(fdt, gic_off);
+	if (err)
+		pr_warn("malta-dtshim: unable to nop GIC node\n");
+
+	i8259_off = fdt_node_offset_by_compatible(fdt, -1, "intel,i8259");
+	if (i8259_off < 0) {
+		pr_warn("malta-dtshim: unable to find DT i8259 node: %d\n",
+			i8259_off);
+		return;
+	}
+
+	cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+			"mti,cpu-interrupt-controller");
+	if (cpu_off < 0) {
+		pr_warn("malta-dtshim: unable to find CPU intc node: %d\n",
+			cpu_off);
+		return;
+	}
+
+	cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+	if (!cpu_phandle) {
+		pr_warn("malta-dtshim: unable to get CPU intc phandle\n");
+		return;
+	}
+
+	err = fdt_setprop_u32(fdt, i8259_off, "interrupt-parent", cpu_phandle);
+	if (err) {
+		pr_warn("malta-dtshim: unable to set i8259 interrupt-parent: %d\n",
+			err);
+		return;
+	}
+
+	err = fdt_setprop_u32(fdt, i8259_off, "interrupts", 2);
+	if (err) {
+		pr_warn("malta-dtshim: unable to set i8259 interrupts: %d\n",
+			err);
+		return;
+	}
+}
+
 void __init *malta_dt_shim(void *fdt)
 {
 	int root_off, len, err;
@@ -153,6 +327,7 @@
 		return fdt;
 
 	append_memory(fdt_buf, root_off);
+	remove_gic(fdt_buf);
 
 	err = fdt_pack(fdt_buf);
 	if (err)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index dc2c521..0f3b881 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/pci_regs.h>
 #include <linux/serial_core.h>
 
 #include <asm/cacheflush.h>
@@ -242,23 +243,19 @@
 			  MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
 			  MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
 #endif
-#ifndef CONFIG_EVA
-		/* Fix up target memory mapping.  */
-		MSC_READ(MSC01_PCI_BAR0, mask);
-		MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
-#else
+
 		/*
 		 * Setup the Malta max (2GB) memory for PCI DMA in host bridge
-		 * in transparent addressing mode, starting from 0x80000000.
+		 * in transparent addressing mode.
 		 */
-		mask = PHYS_OFFSET | (1<<3);
+		mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
 		MSC_WRITE(MSC01_PCI_BAR0, mask);
-
-		mask = PHYS_OFFSET;
 		MSC_WRITE(MSC01_PCI_HEAD4, mask);
+
+		mask &= MSC01_PCI_BAR0_SIZE_MSK;
 		MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
 		MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
-#endif
+
 		/* Don't handle target retries indefinitely.  */
 		if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
 		    MSC01_PCI_CFG_MAXRTRY_MSK)
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index c6a6c7a..cb675ec 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -14,11 +14,13 @@
  */
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irqchip/mips-gic.h>
+#include <linux/of_irq.h>
 #include <linux/kernel_stat.h>
 #include <linux/kernel.h>
 #include <linux/random.h>
@@ -37,10 +39,6 @@
 #include <asm/setup.h>
 #include <asm/rtlx.h>
 
-static void __iomem *_msc01_biu_base;
-
-static DEFINE_RAW_SPINLOCK(mips_irq_lock);
-
 static inline int mips_pcibios_iack(void)
 {
 	int irq;
@@ -85,49 +83,6 @@
 	return irq;
 }
 
-static inline int get_int(void)
-{
-	unsigned long flags;
-	int irq;
-	raw_spin_lock_irqsave(&mips_irq_lock, flags);
-
-	irq = mips_pcibios_iack();
-
-	/*
-	 * The only way we can decide if an interrupt is spurious
-	 * is by checking the 8259 registers.  This needs a spinlock
-	 * on an SMP system,  so leave it up to the generic code...
-	 */
-
-	raw_spin_unlock_irqrestore(&mips_irq_lock, flags);
-
-	return irq;
-}
-
-static void malta_hw0_irqdispatch(void)
-{
-	int irq;
-
-	irq = get_int();
-	if (irq < 0) {
-		/* interrupt has already been cleared */
-		return;
-	}
-
-	do_IRQ(MALTA_INT_BASE + irq);
-
-#ifdef CONFIG_MIPS_VPE_APSP_API_MT
-	if (aprp_hook)
-		aprp_hook();
-#endif
-}
-
-static irqreturn_t i8259_handler(int irq, void *dev_id)
-{
-	malta_hw0_irqdispatch();
-	return IRQ_HANDLED;
-}
-
 static void corehi_irqdispatch(void)
 {
 	unsigned int intedge, intsteer, pcicmd, pcibadaddr;
@@ -240,12 +195,6 @@
 };
 #endif /* CONFIG_MIPS_MT_SMP */
 
-static struct irqaction i8259irq = {
-	.handler = i8259_handler,
-	.name = "XT-PIC cascade",
-	.flags = IRQF_NO_THREAD,
-};
-
 static struct irqaction corehi_irqaction = {
 	.handler = corehi_handler,
 	.name = "CoreHi",
@@ -281,28 +230,10 @@
 
 void __init arch_init_irq(void)
 {
-	int corehi_irq, i8259_irq;
+	int corehi_irq;
 
-	init_i8259_irqs();
-
-	if (!cpu_has_veic)
-		mips_cpu_irq_init();
-
-	if (mips_cm_present()) {
-		write_gcr_gic_base(GIC_BASE_ADDR | CM_GCR_GIC_BASE_GICEN_MSK);
-		gic_present = 1;
-	} else {
-		if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
-			_msc01_biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
-						MSC01_BIU_ADDRSPACE_SZ);
-			gic_present =
-			  (__raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS) &
-			   MSC01_SC_CFG_GICPRES_MSK) >>
-			  MSC01_SC_CFG_GICPRES_SHF;
-		}
-	}
-	if (gic_present)
-		pr_debug("GIC present\n");
+	i8259_set_poll(mips_pcibios_iack);
+	irqchip_init();
 
 	switch (mips_revision_sconid) {
 	case MIPS_REVISION_SCON_SOCIT:
@@ -330,18 +261,6 @@
 	}
 
 	if (gic_present) {
-		int i;
-
-		gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, MIPSCPU_INT_GIC,
-			 MIPS_GIC_IRQ_BASE);
-		if (!mips_cm_present()) {
-			/* Enable the GIC */
-			i = __raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS);
-			__raw_writel(i | (0x1 << MSC01_SC_CFG_GICENA_SHF),
-				 _msc01_biu_base + MSC01_SC_CFG_OFS);
-			pr_debug("GIC Enabled\n");
-		}
-		i8259_irq = MIPS_GIC_IRQ_BASE + GIC_INT_I8259A;
 		corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
 	} else {
 #if defined(CONFIG_MIPS_MT_SMP)
@@ -361,33 +280,13 @@
 		arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
 #endif
 		if (cpu_has_veic) {
-			set_vi_handler(MSC01E_INT_I8259A,
-				       malta_hw0_irqdispatch);
 			set_vi_handler(MSC01E_INT_COREHI,
 				       corehi_irqdispatch);
-			i8259_irq = MSC01E_INT_BASE + MSC01E_INT_I8259A;
 			corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
 		} else {
-			i8259_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_I8259A;
 			corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
 		}
 	}
 
-	setup_irq(i8259_irq, &i8259irq);
 	setup_irq(corehi_irq, &corehi_irqaction);
 }
-
-void malta_be_init(void)
-{
-	/* Could change CM error mask register. */
-}
-
-int malta_be_handler(struct pt_regs *regs, int is_fixup)
-{
-	/* This duplicates the handling in do_be which seems wrong */
-	int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
-
-	mips_cm_error_report();
-
-	return retval;
-}
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index e1dd1c1..516e123 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -23,14 +23,10 @@
  */
 #include <linux/init.h>
 #include <linux/serial_8250.h>
-#include <linux/mc146818rtc.h>
 #include <linux/module.h>
 #include <linux/irq.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
 #include <asm/mips-boards/maltaint.h>
-#include <mtd/mtd-abi.h>
 
 #define SMC_PORT(base, int)						\
 {									\
@@ -68,80 +64,13 @@
 	},
 };
 
-struct resource malta_rtc_resources[] = {
-	{
-		.start	= RTC_PORT(0),
-		.end	= RTC_PORT(7),
-		.flags	= IORESOURCE_IO,
-	}, {
-		.start	= RTC_IRQ,
-		.end	= RTC_IRQ,
-		.flags	= IORESOURCE_IRQ,
-	}
-};
-
-static struct platform_device malta_rtc_device = {
-	.name		= "rtc_cmos",
-	.id		= -1,
-	.resource	= malta_rtc_resources,
-	.num_resources	= ARRAY_SIZE(malta_rtc_resources),
-};
-
-static struct mtd_partition malta_mtd_partitions[] = {
-	{
-		.name =		"YAMON",
-		.offset =	0x0,
-		.size =		0x100000,
-		.mask_flags =	MTD_WRITEABLE
-	}, {
-		.name =		"User FS",
-		.offset =	0x100000,
-		.size =		0x2e0000
-	}, {
-		.name =		"Board Config",
-		.offset =	0x3e0000,
-		.size =		0x020000,
-		.mask_flags =	MTD_WRITEABLE
-	}
-};
-
-static struct physmap_flash_data malta_flash_data = {
-	.width		= 4,
-	.nr_parts	= ARRAY_SIZE(malta_mtd_partitions),
-	.parts		= malta_mtd_partitions
-};
-
-static struct resource malta_flash_resource = {
-	.start		= 0x1e000000,
-	.end		= 0x1e3fffff,
-	.flags		= IORESOURCE_MEM
-};
-
-static struct platform_device malta_flash_device = {
-	.name		= "physmap-flash",
-	.id		= 0,
-	.dev		= {
-		.platform_data	= &malta_flash_data,
-	},
-	.num_resources	= 1,
-	.resource	= &malta_flash_resource,
-};
-
 static struct platform_device *malta_devices[] __initdata = {
 	&malta_uart8250_device,
-	&malta_rtc_device,
-	&malta_flash_device,
 };
 
 static int __init malta_add_devices(void)
 {
-	int err;
-
-	err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
-	if (err)
-		return err;
-
-	return 0;
+	return platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
 }
 
 device_initcall(malta_add_devices);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
index 2fd2cc2..dd6f62a 100644
--- a/arch/mips/mti-malta/malta-reset.c
+++ b/arch/mips/mti-malta/malta-reset.c
@@ -8,38 +8,21 @@
  */
 #include <linux/io.h>
 #include <linux/pm.h>
+#include <linux/reboot.h>
 
 #include <asm/reboot.h>
 #include <asm/mach-malta/malta-pm.h>
 
-#define SOFTRES_REG	0x1f000500
-#define GORESET		0x42
-
-static void mips_machine_restart(char *command)
-{
-	unsigned int __iomem *softres_reg =
-		ioremap(SOFTRES_REG, sizeof(unsigned int));
-
-	__raw_writel(GORESET, softres_reg);
-}
-
-static void mips_machine_halt(void)
-{
-	while (true);
-}
-
 static void mips_machine_power_off(void)
 {
 	mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
 
 	pr_info("Failed to power down, resetting\n");
-	mips_machine_restart(NULL);
+	machine_restart(NULL);
 }
 
 static int __init mips_reboot_setup(void)
 {
-	_machine_restart = mips_machine_restart;
-	_machine_halt = mips_machine_halt;
 	pm_power_off = mips_machine_power_off;
 
 	return 0;
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 7e7364b..a01d5de 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -42,9 +42,6 @@
 #define ROCIT_CONFIG_GEN0		0x1f403000
 #define  ROCIT_CONFIG_GEN0_PCI_IOCU	BIT(7)
 
-extern void malta_be_init(void);
-extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
-
 static struct resource standard_io_resources[] = {
 	{
 		.name = "dma1",
@@ -154,12 +151,12 @@
 	 * coherency instead.
 	 */
 	if (plat_enable_iocoherency()) {
-		if (coherentio == 0)
+		if (coherentio == IO_COHERENCE_DISABLED)
 			pr_info("Hardware DMA cache coherency disabled\n");
 		else
 			pr_info("Hardware DMA cache coherency enabled\n");
 	} else {
-		if (coherentio == 1)
+		if (coherentio == IO_COHERENCE_ENABLED)
 			pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
 		else
 			pr_info("Software DMA cache coherency enabled\n");
@@ -301,7 +298,4 @@
 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
 	screen_info_setup();
 #endif
-
-	board_be_init = malta_be_init;
-	board_be_handler = malta_be_handler;
 }
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
deleted file mode 100644
index 7a584e0..0000000
--- a/arch/mips/mti-sead3/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
-#
-# Copyright (C) 2008 Wind River Systems, Inc.
-#   written by Ralf Baechle <ralf@linux-mips.org>
-#
-# Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
-# Steven J. Hill <sjhill@mips.com>
-#
-obj-y				:= sead3-lcd.o sead3-display.o sead3-init.o \
-				   sead3-int.o sead3-platform.o sead3-reset.o \
-				   sead3-setup.o sead3-time.o
-
-obj-$(CONFIG_EARLY_PRINTK)	+= sead3-console.o
diff --git a/arch/mips/mti-sead3/Platform b/arch/mips/mti-sead3/Platform
deleted file mode 100644
index 3870924..0000000
--- a/arch/mips/mti-sead3/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# MIPS SEAD-3 board
-#
-platform-$(CONFIG_MIPS_SEAD3)	+= mti-sead3/
-cflags-$(CONFIG_MIPS_SEAD3)	+= -I$(srctree)/arch/mips/include/asm/mach-sead3
-load-$(CONFIG_MIPS_SEAD3)	+= 0xffffffff80100000
-all-$(CONFIG_MIPS_SEAD3)	:= $(COMPRESSION_FNAME).srec
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
deleted file mode 100644
index 031f47d..0000000
--- a/arch/mips/mti-sead3/sead3-console.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>
-
-#define SEAD_UART1_REGS_BASE	0xbf000800   /* ttyS1 = DB9 port */
-#define SEAD_UART0_REGS_BASE	0xbf000900   /* ttyS0 = USB port   */
-#define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4))
-
-static char console_port = 1;
-
-static inline unsigned int serial_in(int offset, unsigned int base_addr)
-{
-	return __raw_readl(PORT(base_addr, offset)) & 0xff;
-}
-
-static inline void serial_out(int offset, int value, unsigned int base_addr)
-{
-	__raw_writel(value, PORT(base_addr, offset));
-}
-
-void __init fw_init_early_console(char port)
-{
-	console_port = port;
-}
-
-int prom_putchar(char c)
-{
-	unsigned int base_addr;
-
-	base_addr = console_port ? SEAD_UART1_REGS_BASE : SEAD_UART0_REGS_BASE;
-
-	while ((serial_in(UART_LSR, base_addr) & UART_LSR_THRE) == 0)
-		;
-
-	serial_out(UART_TX, c, base_addr);
-
-	return 1;
-}
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
deleted file mode 100644
index 9487599..0000000
--- a/arch/mips/mti-sead3/sead3-display.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <asm/mips-boards/generic.h>
-
-static unsigned int display_count;
-static unsigned int max_display_count;
-
-#define LCD_DISPLAY_POS_BASE		0x1f000400
-#define DISPLAY_LCDINSTRUCTION		(0*2)
-#define DISPLAY_LCDDATA			(1*2)
-#define DISPLAY_CPLDSTATUS		(2*2)
-#define DISPLAY_CPLDDATA		(3*2)
-#define LCD_SETDDRAM			0x80
-#define LCD_IR_BF			0x80
-
-const char display_string[] = "		      LINUX ON SEAD3		   ";
-
-static void scroll_display_message(unsigned long data);
-static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
-
-static void lcd_wait(unsigned int __iomem *display)
-{
-	/* Wait for CPLD state machine to become idle. */
-	do { } while (__raw_readl(display + DISPLAY_CPLDSTATUS) & 1);
-
-	do {
-		__raw_readl(display + DISPLAY_LCDINSTRUCTION);
-
-		/* Wait for CPLD state machine to become idle. */
-		do { } while (__raw_readl(display + DISPLAY_CPLDSTATUS) & 1);
-	} while (__raw_readl(display + DISPLAY_CPLDDATA) & LCD_IR_BF);
-}
-
-void mips_display_message(const char *str)
-{
-	static unsigned int __iomem *display;
-	char ch;
-	int i;
-
-	if (unlikely(display == NULL))
-		display = ioremap_nocache(LCD_DISPLAY_POS_BASE,
-			(8 * sizeof(int)));
-
-	for (i = 0; i < 16; i++) {
-		if (*str)
-			ch = *str++;
-		else
-			ch = ' ';
-		lcd_wait(display);
-		__raw_writel((LCD_SETDDRAM | i),
-			(display + DISPLAY_LCDINSTRUCTION));
-		lcd_wait(display);
-		__raw_writel(ch, display + DISPLAY_LCDDATA);
-	}
-}
-
-static void scroll_display_message(unsigned long data)
-{
-	mips_display_message(&display_string[display_count++]);
-	if (display_count == max_display_count)
-		display_count = 0;
-	mod_timer(&mips_scroll_timer, jiffies + HZ);
-}
-
-void mips_scroll_message(void)
-{
-	del_timer_sync(&mips_scroll_timer);
-	max_display_count = strlen(display_string) + 1 - 16;
-	mod_timer(&mips_scroll_timer, jiffies + 1);
-}
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
deleted file mode 100644
index 3572ea3..0000000
--- a/arch/mips/mti-sead3/sead3-init.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <asm/mips-boards/generic.h>
-#include <asm/fw/fw.h>
-
-extern char except_vec_nmi;
-extern char except_vec_ejtag_debug;
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-static void __init console_config(void)
-{
-	char console_string[40];
-	int baud = 0;
-	char parity = '\0', bits = '\0', flow = '\0';
-	char *s;
-
-	if ((strstr(fw_getcmdline(), "console=")) == NULL) {
-		s = fw_getenv("modetty0");
-		if (s) {
-			while (*s >= '0' && *s <= '9')
-				baud = baud*10 + *s++ - '0';
-			if (*s == ',')
-				s++;
-			if (*s)
-				parity = *s++;
-			if (*s == ',')
-				s++;
-			if (*s)
-				bits = *s++;
-			if (*s == ',')
-				s++;
-			if (*s == 'h')
-				flow = 'r';
-		}
-		if (baud == 0)
-			baud = 38400;
-		if (parity != 'n' && parity != 'o' && parity != 'e')
-			parity = 'n';
-		if (bits != '7' && bits != '8')
-			bits = '8';
-		if (flow == '\0')
-			flow = 'r';
-		sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
-			parity, bits, flow);
-		strcat(fw_getcmdline(), console_string);
-	}
-}
-#endif
-
-static void __init mips_nmi_setup(void)
-{
-	void *base;
-
-	base = cpu_has_veic ?
-		(void *)(CAC_BASE + 0xa80) :
-		(void *)(CAC_BASE + 0x380);
-#ifdef CONFIG_CPU_MICROMIPS
-	/*
-	 * Decrement the exception vector address by one for microMIPS.
-	 */
-	memcpy(base, (&except_vec_nmi - 1), 0x80);
-
-	/*
-	 * This is a hack. We do not know if the boot loader was built with
-	 * microMIPS instructions or not. If it was not, the NMI exception
-	 * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
-	 * assembly below forces us into microMIPS mode if we are a pure
-	 * microMIPS kernel. The assembly instructions are:
-	 *
-	 *  3C1A8000   lui       k0,0x8000
-	 *  375A0381   ori       k0,k0,0x381
-	 *  03400008   jr        k0
-	 *  00000000   nop
-	 *
-	 * The mode switch occurs by jumping to the unaligned exception
-	 * vector address at 0x80000381 which would have been 0x80000380
-	 * in MIPS32 mode. The jump to the unaligned address transitions
-	 * us into microMIPS mode.
-	 */
-	if (!cpu_has_veic) {
-		void *base2 = (void *)(CAC_BASE + 0xa80);
-		*((unsigned int *)base2) = 0x3c1a8000;
-		*((unsigned int *)base2 + 1) = 0x375a0381;
-		*((unsigned int *)base2 + 2) = 0x03400008;
-		*((unsigned int *)base2 + 3) = 0x00000000;
-		flush_icache_range((unsigned long)base2,
-			(unsigned long)base2 + 0x10);
-	}
-#else
-	memcpy(base, &except_vec_nmi, 0x80);
-#endif
-	flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
-}
-
-static void __init mips_ejtag_setup(void)
-{
-	void *base;
-
-	base = cpu_has_veic ?
-		(void *)(CAC_BASE + 0xa00) :
-		(void *)(CAC_BASE + 0x300);
-#ifdef CONFIG_CPU_MICROMIPS
-	/* Deja vu... */
-	memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
-	if (!cpu_has_veic) {
-		void *base2 = (void *)(CAC_BASE + 0xa00);
-		*((unsigned int *)base2) = 0x3c1a8000;
-		*((unsigned int *)base2 + 1) = 0x375a0301;
-		*((unsigned int *)base2 + 2) = 0x03400008;
-		*((unsigned int *)base2 + 3) = 0x00000000;
-		flush_icache_range((unsigned long)base2,
-			(unsigned long)base2 + 0x10);
-	}
-#else
-	memcpy(base, &except_vec_ejtag_debug, 0x80);
-#endif
-	flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
-}
-
-void __init prom_init(void)
-{
-	board_nmi_handler_setup = mips_nmi_setup;
-	board_ejtag_handler_setup = mips_ejtag_setup;
-
-	fw_init_cmdline();
-#ifdef CONFIG_EARLY_PRINTK
-	if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
-		fw_init_early_console(0);
-	else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
-		fw_init_early_console(1);
-#endif
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-	if ((strstr(fw_getcmdline(), "console=")) == NULL)
-		strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
-	console_config();
-#endif
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
deleted file mode 100644
index e31e17f..0000000
--- a/arch/mips/mti-sead3/sead3-int.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
-#include <linux/io.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/setup.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-#define SEAD_CONFIG_GIC_PRESENT_SHF	1
-#define SEAD_CONFIG_GIC_PRESENT_MSK	(1 << SEAD_CONFIG_GIC_PRESENT_SHF)
-#define SEAD_CONFIG_BASE		0x1b100110
-#define SEAD_CONFIG_SIZE		4
-
-static void __iomem *sead3_config_reg;
-
-void __init arch_init_irq(void)
-{
-	if (!cpu_has_veic)
-		mips_cpu_irq_init();
-
-	sead3_config_reg = ioremap_nocache(SEAD_CONFIG_BASE, SEAD_CONFIG_SIZE);
-	gic_present = (__raw_readl(sead3_config_reg) &
-		       SEAD_CONFIG_GIC_PRESENT_MSK) >>
-		SEAD_CONFIG_GIC_PRESENT_SHF;
-	pr_info("GIC: %spresent\n", (gic_present) ? "" : "not ");
-	pr_info("EIC: %s\n",
-		(current_cpu_data.options & MIPS_CPU_VEIC) ?  "on" : "off");
-
-	if (gic_present)
-		gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, CPU_INT_GIC,
-			 MIPS_GIC_IRQ_BASE);
-}
-
diff --git a/arch/mips/mti-sead3/sead3-lcd.c b/arch/mips/mti-sead3/sead3-lcd.c
deleted file mode 100644
index 10b10ed2..0000000
--- a/arch/mips/mti-sead3/sead3-lcd.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/platform_device.h>
-
-static struct resource __initdata sead3_lcd_resource = {
-		.start	= 0x1f000400,
-		.end	= 0x1f00041f,
-		.flags	= IORESOURCE_MEM,
-};
-
-static __init int sead3_lcd_add(void)
-{
-	struct platform_device *pdev;
-	int retval;
-
-	/* SEAD-3 and Cobalt platforms use same display type. */
-	pdev = platform_device_alloc("cobalt-lcd", -1);
-	if (!pdev)
-		return -ENOMEM;
-
-	retval = platform_device_add_resources(pdev, &sead3_lcd_resource, 1);
-	if (retval)
-		goto err_free_device;
-
-	retval = platform_device_add(pdev);
-	if (retval)
-		goto err_free_device;
-
-	return 0;
-
-err_free_device:
-	platform_device_put(pdev);
-
-	return retval;
-}
-
-device_initcall(sead3_lcd_add);
diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c
deleted file mode 100644
index 73b73ef..0000000
--- a/arch/mips/mti-sead3/sead3-platform.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
-#include <linux/leds.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
-#include <linux/serial_8250.h>
-#include <linux/smsc911x.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-#define UART(base)							\
-{									\
-	.mapbase	= base,						\
-	.irq		= -1,						\
-	.uartclk	= 14745600,					\
-	.iotype		= UPIO_MEM32,					\
-	.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
-	.regshift	= 2,						\
-}
-
-static struct plat_serial8250_port uart8250_data[] = {
-	UART(0x1f000900),   /* ttyS0 = USB   */
-	UART(0x1f000800),   /* ttyS1 = RS232 */
-	{ },
-};
-
-static struct platform_device uart8250_device = {
-	.name			= "serial8250",
-	.id			= PLAT8250_DEV_PLATFORM2,
-	.dev			= {
-		.platform_data	= uart8250_data,
-	},
-};
-
-static struct smsc911x_platform_config sead3_smsc911x_data = {
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
-	.flags		= SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-};
-
-static struct resource sead3_net_resources[] = {
-	{
-		.start			= 0x1f010000,
-		.end			= 0x1f01ffff,
-		.flags			= IORESOURCE_MEM
-	}, {
-		.flags			= IORESOURCE_IRQ
-	}
-};
-
-static struct platform_device sead3_net_device = {
-	.name			= "smsc911x",
-	.id			= 0,
-	.dev			= {
-		.platform_data	= &sead3_smsc911x_data,
-	},
-	.num_resources		= ARRAY_SIZE(sead3_net_resources),
-	.resource		= sead3_net_resources
-};
-
-static struct mtd_partition sead3_mtd_partitions[] = {
-	{
-		.name =		"User FS",
-		.offset =	0x00000000,
-		.size =		0x01fc0000,
-	}, {
-		.name =		"Board Config",
-		.offset =	0x01fc0000,
-		.size =		0x00040000,
-		.mask_flags =	MTD_WRITEABLE
-	},
-};
-
-static struct physmap_flash_data sead3_flash_data = {
-	.width		= 4,
-	.nr_parts	= ARRAY_SIZE(sead3_mtd_partitions),
-	.parts		= sead3_mtd_partitions
-};
-
-static struct resource sead3_flash_resource = {
-	.start		= 0x1c000000,
-	.end		= 0x1dffffff,
-	.flags		= IORESOURCE_MEM
-};
-
-static struct platform_device sead3_flash = {
-	.name		= "physmap-flash",
-	.id		= 0,
-	.dev		= {
-		.platform_data	= &sead3_flash_data,
-	},
-	.num_resources	= 1,
-	.resource	= &sead3_flash_resource,
-};
-
-#define LEDFLAGS(bits, shift)		\
-	((bits << 8) | (shift << 8))
-
-#define LEDBITS(id, shift, bits)	\
-	.name = id #shift,		\
-	.flags = LEDFLAGS(bits, shift)
-
-static struct led_info led_data_info[] = {
-	{ LEDBITS("bit", 0, 1) },
-	{ LEDBITS("bit", 1, 1) },
-	{ LEDBITS("bit", 2, 1) },
-	{ LEDBITS("bit", 3, 1) },
-	{ LEDBITS("bit", 4, 1) },
-	{ LEDBITS("bit", 5, 1) },
-	{ LEDBITS("bit", 6, 1) },
-	{ LEDBITS("bit", 7, 1) },
-	{ LEDBITS("all", 0, 8) },
-};
-
-static struct led_platform_data led_data = {
-	.num_leds	= ARRAY_SIZE(led_data_info),
-	.leds		= led_data_info
-};
-
-static struct resource pled_resources[] = {
-	{
-		.start	= 0x1f000210,
-		.end	= 0x1f000217,
-		.flags	= IORESOURCE_MEM
-	}
-};
-
-static struct platform_device pled_device = {
-	.name			= "sead3::pled",
-	.id			= 0,
-	.dev			= {
-		.platform_data	= &led_data,
-	},
-	.num_resources		= ARRAY_SIZE(pled_resources),
-	.resource		= pled_resources
-};
-
-
-static struct resource fled_resources[] = {
-	{
-		.start			= 0x1f000218,
-		.end			= 0x1f00021f,
-		.flags			= IORESOURCE_MEM
-	}
-};
-
-static struct platform_device fled_device = {
-	.name			= "sead3::fled",
-	.id			= 0,
-	.dev			= {
-		.platform_data	= &led_data,
-	},
-	.num_resources		= ARRAY_SIZE(fled_resources),
-	.resource		= fled_resources
-};
-
-static struct platform_device sead3_led_device = {
-        .name   = "sead3-led",
-        .id     = -1,
-};
-
-static struct resource ehci_resources[] = {
-	{
-		.start			= 0x1b200000,
-		.end			= 0x1b200fff,
-		.flags			= IORESOURCE_MEM
-	}, {
-		.flags			= IORESOURCE_IRQ
-	}
-};
-
-static u64 sead3_usbdev_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ehci_device = {
-	.name		= "sead3-ehci",
-	.id		= 0,
-	.dev		= {
-		.dma_mask		= &sead3_usbdev_dma_mask,
-		.coherent_dma_mask	= DMA_BIT_MASK(32)
-	},
-	.num_resources	= ARRAY_SIZE(ehci_resources),
-	.resource	= ehci_resources
-};
-
-static struct platform_device *sead3_platform_devices[] __initdata = {
-	&uart8250_device,
-	&sead3_flash,
-	&pled_device,
-	&fled_device,
-	&sead3_led_device,
-	&ehci_device,
-	&sead3_net_device,
-};
-
-static int __init sead3_platforms_device_init(void)
-{
-	if (gic_present) {
-		uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0;
-		uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1;
-		ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
-		sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
-	} else {
-		uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0;
-		uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1;
-		ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
-		sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
-	}
-
-	return platform_add_devices(sead3_platform_devices,
-				    ARRAY_SIZE(sead3_platform_devices));
-}
-
-device_initcall(sead3_platforms_device_init);
diff --git a/arch/mips/mti-sead3/sead3-reset.c b/arch/mips/mti-sead3/sead3-reset.c
deleted file mode 100644
index e6fb244..0000000
--- a/arch/mips/mti-sead3/sead3-reset.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-
-#include <asm/reboot.h>
-
-#define SOFTRES_REG	0x1f000050
-#define GORESET		0x4d
-
-static void mips_machine_restart(char *command)
-{
-	unsigned int __iomem *softres_reg =
-		ioremap(SOFTRES_REG, sizeof(unsigned int));
-
-	__raw_writel(GORESET, softres_reg);
-}
-
-static void mips_machine_halt(void)
-{
-	unsigned int __iomem *softres_reg =
-		ioremap(SOFTRES_REG, sizeof(unsigned int));
-
-	__raw_writel(GORESET, softres_reg);
-}
-
-static int __init mips_reboot_setup(void)
-{
-	_machine_restart = mips_machine_restart;
-	_machine_halt = mips_machine_halt;
-	pm_power_off = mips_machine_halt;
-
-	return 0;
-}
-arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
deleted file mode 100644
index edfcaf0..0000000
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- * Copyright (C) 2013 Imagination Technologies Ltd.
- */
-#include <linux/init.h>
-#include <linux/libfdt.h>
-#include <linux/of_fdt.h>
-
-#include <asm/prom.h>
-#include <asm/fw/fw.h>
-
-#include <asm/mips-boards/generic.h>
-
-const char *get_system_type(void)
-{
-	return "MIPS SEAD3";
-}
-
-static uint32_t get_memsize_from_cmdline(void)
-{
-	int memsize = 0;
-	char *p = arcs_cmdline;
-	char *s = "memsize=";
-
-	p = strstr(p, s);
-	if (p) {
-		p += strlen(s);
-		memsize = memparse(p, NULL);
-	}
-
-	return memsize;
-}
-
-static uint32_t get_memsize_from_env(void)
-{
-	int memsize = 0;
-	char *p;
-
-	p = fw_getenv("memsize");
-	if (p)
-		memsize = memparse(p, NULL);
-
-	return memsize;
-}
-
-static uint32_t get_memsize(void)
-{
-	uint32_t memsize;
-
-	memsize = get_memsize_from_cmdline();
-	if (memsize)
-		return memsize;
-
-	return get_memsize_from_env();
-}
-
-static void __init parse_memsize_param(void)
-{
-	int offset;
-	const uint64_t *prop_value;
-	int prop_len;
-	uint32_t memsize = get_memsize();
-
-	if (!memsize)
-		return;
-
-	offset = fdt_path_offset(__dtb_start, "/memory");
-	if (offset > 0) {
-		uint64_t new_value;
-		/*
-		 * reg contains 2 32-bits BE values, offset and size. We just
-		 * want to replace the size value without affecting the offset
-		 */
-		prop_value = fdt_getprop(__dtb_start, offset, "reg", &prop_len);
-		new_value = be64_to_cpu(*prop_value);
-		new_value =  (new_value & ~0xffffffffllu) | memsize;
-		fdt_setprop_inplace_u64(__dtb_start, offset, "reg", new_value);
-	}
-}
-
-void __init *plat_get_fdt(void)
-{
-	return (void *)__dtb_start;
-}
-
-void __init plat_mem_setup(void)
-{
-	/* allow command line/bootloader env to override memory size in DT */
-	parse_memsize_param();
-
-	/*
-	 * Load the builtin devicetree. This causes the chosen node to be
-	 * parsed resulting in our memory appearing
-	 */
-	__dt_setup_arch(__dtb_start);
-}
-
-void __init device_tree_init(void)
-{
-	if (!initial_boot_params)
-		return;
-
-	unflatten_and_copy_device_tree();
-}
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
deleted file mode 100644
index a120b7a..0000000
--- a/arch/mips/mti-sead3/sead3-time.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/irqchip/mips-gic.h>
-
-#include <asm/cpu.h>
-#include <asm/setup.h>
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/mips-boards/generic.h>
-
-static void __iomem *status_reg = (void __iomem *)0xbf000410;
-
-/*
- * Estimate CPU frequency.  Sets mips_hpt_frequency as a side-effect.
- */
-static unsigned int __init estimate_cpu_frequency(void)
-{
-	unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
-	unsigned int tick = 0;
-	unsigned int freq;
-	unsigned int orig;
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	orig = readl(status_reg) & 0x2;		      /* get original sample */
-	/* wait for transition */
-	while ((readl(status_reg) & 0x2) == orig)
-		;
-	orig = orig ^ 0x2;			      /* flip the bit */
-
-	write_c0_count(0);
-
-	/* wait 1 second (the sampling clock transitions every 10ms) */
-	while (tick < 100) {
-		/* wait for transition */
-		while ((readl(status_reg) & 0x2) == orig)
-			;
-		orig = orig ^ 0x2;			      /* flip the bit */
-		tick++;
-	}
-
-	freq = read_c0_count();
-
-	local_irq_restore(flags);
-
-	mips_hpt_frequency = freq;
-
-	/* Adjust for processor */
-	if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
-		(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
-		freq *= 2;
-
-	freq += 5000;	     /* rounding */
-	freq -= freq%10000;
-
-	return freq ;
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
-	ts->tv_sec = 0;
-	ts->tv_nsec = 0;
-}
-
-int get_c0_perfcount_int(void)
-{
-	if (gic_present)
-		return gic_get_c0_perfcount_int();
-	if (cp0_perfcount_irq >= 0)
-		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
-	return -1;
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
-unsigned int get_c0_compare_int(void)
-{
-	if (gic_present)
-		return gic_get_c0_compare_int();
-	return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
-}
-
-void __init plat_time_init(void)
-{
-	unsigned int est_freq;
-
-	est_freq = estimate_cpu_frequency();
-
-	pr_debug("CPU frequency %d.%02d MHz\n", (est_freq / 1000000),
-		(est_freq % 1000000) * 100 / 1000000);
-
-	mips_scroll_message();
-}
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 39e7b47..49a2e22 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -14,7 +14,6 @@
 #include <linux/errno.h>
 #include <linux/filter.h>
 #include <linux/if_vlan.h>
-#include <linux/kconfig.h>
 #include <linux/moduleloader.h>
 #include <linux/netdevice.h>
 #include <linux/string.h>
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 139ad1d..4b82148 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -3,6 +3,8 @@
 #
 
 obj-y				+= pci.o
+obj-$(CONFIG_PCI_DRIVERS_LEGACY)+= pci-legacy.o
+obj-$(CONFIG_PCI_DRIVERS_GENERIC)+= pci-generic.o
 
 #
 # PCI bus host bridge specific code
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index c8994c1..e99ca77 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -429,7 +429,8 @@
 
 	/* Au1500 revisions older than AD have borked coherent PCI */
 	if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
-	    (read_c0_prid() < 0x01030202) && !coherentio) {
+	    (read_c0_prid() < 0x01030202) &&
+	    (coherentio == IO_COHERENCE_DISABLED)) {
 		val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
 		val |= PCI_CONFIG_NC;
 		__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 7db963d..bdf87b4 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -18,7 +18,7 @@
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 
 #include <asm/mach-ath79/ar71xx_regs.h>
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 2013dad..1e23c8d 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -11,7 +11,7 @@
 
 #include <linux/irq.h>
 #include <linux/pci.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c
new file mode 100644
index 0000000..dce304d
--- /dev/null
+++ b/arch/mips/pci/pci-generic.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * pcibios_align_resource taken from arch/arm/kernel/bios32.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/pci.h>
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
+{
+	struct pci_dev *dev = data;
+	resource_size_t start = res->start;
+	struct pci_host_bridge *host_bridge;
+
+	if (res->flags & IORESOURCE_IO && start & 0x300)
+		start = (start + 0x3ff) & ~0x3ff;
+
+	start = (start + align - 1) & ~(align - 1);
+
+	host_bridge = pci_find_host_bridge(dev->bus);
+
+	if (host_bridge->align_resource)
+		return host_bridge->align_resource(dev, res,
+				start, size, align);
+
+	return start;
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	pci_read_bridge_bases(bus);
+}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index b9deab1..f18f887 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,7 +13,6 @@
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
-#include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
@@ -234,7 +233,6 @@
 	{ .compatible = "lantiq,pci-xway" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, ltq_pci_match);
 
 static struct platform_driver ltq_pci_driver = {
 	.probe = ltq_pci_probe,
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
new file mode 100644
index 0000000..014649b
--- /dev/null
+++ b/arch/mips/pci/pci-legacy.c
@@ -0,0 +1,302 @@
+/*
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2011 Wind River Systems,
+ *   written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/of_address.h>
+
+#include <asm/cpu-info.h>
+
+/*
+ * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
+ * assignments.
+ */
+
+/*
+ * The PCI controller list.
+ */
+static LIST_HEAD(controllers);
+
+static int pci_initialized;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+		       resource_size_t size, resource_size_t align)
+{
+	struct pci_dev *dev = data;
+	struct pci_controller *hose = dev->sysdata;
+	resource_size_t start = res->start;
+
+	if (res->flags & IORESOURCE_IO) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
+			start = PCIBIOS_MIN_IO + hose->io_resource->start;
+
+		/*
+		 * Put everything into 0x00-0xff region modulo 0x400
+		 */
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
+	} else if (res->flags & IORESOURCE_MEM) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
+			start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
+	}
+
+	return start;
+}
+
+static void pcibios_scanbus(struct pci_controller *hose)
+{
+	static int next_busno;
+	static int need_domain_info;
+	LIST_HEAD(resources);
+	struct pci_bus *bus;
+
+	if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
+		next_busno = (*hose->get_busno)();
+
+	pci_add_resource_offset(&resources,
+				hose->mem_resource, hose->mem_offset);
+	pci_add_resource_offset(&resources,
+				hose->io_resource, hose->io_offset);
+	pci_add_resource_offset(&resources,
+				hose->busn_resource, hose->busn_offset);
+	bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
+				&resources);
+	hose->bus = bus;
+
+	need_domain_info = need_domain_info || pci_domain_nr(bus);
+	set_pci_need_domain_info(hose, need_domain_info);
+
+	if (!bus) {
+		pci_free_resource_list(&resources);
+		return;
+	}
+
+	next_busno = bus->busn_res.end + 1;
+	/* Don't allow 8-bit bus number overflow inside the hose -
+	   reserve some space for bridges. */
+	if (next_busno > 224) {
+		next_busno = 0;
+		need_domain_info = 1;
+	}
+
+	/*
+	 * We insert PCI resources into the iomem_resource and
+	 * ioport_resource trees in either pci_bus_claim_resources()
+	 * or pci_bus_assign_resources().
+	 */
+	if (pci_has_flag(PCI_PROBE_ONLY)) {
+		pci_bus_claim_resources(bus);
+	} else {
+		pci_bus_size_bridges(bus);
+		pci_bus_assign_resources(bus);
+	}
+	pci_bus_add_devices(bus);
+}
+
+#ifdef CONFIG_OF
+void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+
+	pr_info("PCI host bridge %s ranges:\n", node->full_name);
+	hose->of_node = node;
+
+	if (of_pci_range_parser_init(&parser, node))
+		return;
+
+	for_each_of_pci_range(&parser, &range) {
+		struct resource *res = NULL;
+
+		switch (range.flags & IORESOURCE_TYPE_BITS) {
+		case IORESOURCE_IO:
+			pr_info("  IO 0x%016llx..0x%016llx\n",
+				range.cpu_addr,
+				range.cpu_addr + range.size - 1);
+			hose->io_map_base =
+				(unsigned long)ioremap(range.cpu_addr,
+						       range.size);
+			res = hose->io_resource;
+			break;
+		case IORESOURCE_MEM:
+			pr_info(" MEM 0x%016llx..0x%016llx\n",
+				range.cpu_addr,
+				range.cpu_addr + range.size - 1);
+			res = hose->mem_resource;
+			break;
+		}
+		if (res != NULL)
+			of_pci_range_to_resource(&range, node, res);
+	}
+}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+	struct pci_controller *hose = bus->sysdata;
+
+	return of_node_get(hose->of_node);
+}
+#endif
+
+static DEFINE_MUTEX(pci_scan_mutex);
+
+void register_pci_controller(struct pci_controller *hose)
+{
+	struct resource *parent;
+
+	parent = hose->mem_resource->parent;
+	if (!parent)
+		parent = &iomem_resource;
+
+	if (request_resource(parent, hose->mem_resource) < 0)
+		goto out;
+
+	parent = hose->io_resource->parent;
+	if (!parent)
+		parent = &ioport_resource;
+
+	if (request_resource(parent, hose->io_resource) < 0) {
+		release_resource(hose->mem_resource);
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&hose->list);
+	list_add(&hose->list, &controllers);
+
+	/*
+	 * Do not panic here but later - this might happen before console init.
+	 */
+	if (!hose->io_map_base) {
+		printk(KERN_WARNING
+		       "registering PCI controller with io_map_base unset\n");
+	}
+
+	/*
+	 * Scan the bus if it is register after the PCI subsystem
+	 * initialization.
+	 */
+	if (pci_initialized) {
+		mutex_lock(&pci_scan_mutex);
+		pcibios_scanbus(hose);
+		mutex_unlock(&pci_scan_mutex);
+	}
+
+	return;
+
+out:
+	printk(KERN_WARNING
+	       "Skipping PCI bus scan due to resource conflict\n");
+}
+
+static int __init pcibios_init(void)
+{
+	struct pci_controller *hose;
+
+	/* Scan all of the recorded PCI controllers.  */
+	list_for_each_entry(hose, &controllers, list)
+		pcibios_scanbus(hose);
+
+	pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
+
+	pci_initialized = 1;
+
+	return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+static int pcibios_enable_resources(struct pci_dev *dev, int mask)
+{
+	u16 cmd, old_cmd;
+	int idx;
+	struct resource *r;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+	for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
+		/* Only set up the requested stuff */
+		if (!(mask & (1<<idx)))
+			continue;
+
+		r = &dev->resource[idx];
+		if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+			continue;
+		if ((idx == PCI_ROM_RESOURCE) &&
+				(!(r->flags & IORESOURCE_ROM_ENABLE)))
+			continue;
+		if (!r->start && r->end) {
+			printk(KERN_ERR "PCI: Device %s not available "
+			       "because of resource collisions\n",
+			       pci_name(dev));
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			cmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			cmd |= PCI_COMMAND_MEMORY;
+	}
+	if (cmd != old_cmd) {
+		printk("PCI: Enabling device %s (%04x -> %04x)\n",
+		       pci_name(dev), old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+	return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	int err;
+
+	if ((err = pcibios_enable_resources(dev, mask)) < 0)
+		return err;
+
+	return pcibios_plat_dev_init(dev);
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	struct pci_dev *dev = bus->self;
+
+	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+		pci_read_bridge_bases(bus);
+	}
+}
+
+char * (*pcibios_plat_setup)(char *str) __initdata;
+
+char *__init pcibios_setup(char *str)
+{
+	if (pcibios_plat_setup)
+		return pcibios_plat_setup(str);
+	return str;
+}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 6ce8162..628c513 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
@@ -407,13 +406,11 @@
 	{ .compatible = "mediatek,mt7620-pci" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, mt7620_pci_ids);
 
 static struct platform_driver mt7620_pci_driver = {
 	.probe = mt7620_pci_probe,
 	.driver = {
 		.name = "mt7620-pci",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(mt7620_pci_ids),
 	},
 };
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index c258cd4..308d051 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -204,6 +204,8 @@
 	 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
 	 * INTD# = 3)
 	 */
+	if (of_machine_is_compatible("dlink,dsr-500n"))
+		return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
 	switch (octeon_bootinfo->board_type) {
 	case CVMX_BOARD_TYPE_NAO38:
 		/* This is really the NAC38 */
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index f2a1050..d6360fe 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -16,7 +16,6 @@
 #include <linux/pci.h>
 #include <linux/io.h>
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
@@ -260,7 +259,6 @@
 	{ .compatible = "ralink,rt288x-pci" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, rt288x_pci_match);
 
 static struct platform_driver rt288x_pci_driver = {
 	.probe = rt288x_pci_probe,
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 53a42b0..3520e9b 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -16,7 +16,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
@@ -580,7 +579,6 @@
 	{ .compatible = "ralink,rt3883-pci" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, rt3883_pci_ids);
 
 static struct platform_driver rt3883_pci_driver = {
 	.probe = rt3883_pci_probe,
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index b4c02f2..f6325fa 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -20,208 +20,13 @@
 
 #include <asm/cpu-info.h>
 
-/*
- * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
- * assignments.
- */
-
-/*
- * The PCI controller list.
- */
-
-static struct pci_controller *hose_head, **hose_tail = &hose_head;
-
 unsigned long PCIBIOS_MIN_IO;
+EXPORT_SYMBOL(PCIBIOS_MIN_IO);
+
 unsigned long PCIBIOS_MIN_MEM;
+EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
 
-static int pci_initialized;
-
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
-resource_size_t
-pcibios_align_resource(void *data, const struct resource *res,
-		       resource_size_t size, resource_size_t align)
-{
-	struct pci_dev *dev = data;
-	struct pci_controller *hose = dev->sysdata;
-	resource_size_t start = res->start;
-
-	if (res->flags & IORESOURCE_IO) {
-		/* Make sure we start at our min on all hoses */
-		if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
-			start = PCIBIOS_MIN_IO + hose->io_resource->start;
-
-		/*
-		 * Put everything into 0x00-0xff region modulo 0x400
-		 */
-		if (start & 0x300)
-			start = (start + 0x3ff) & ~0x3ff;
-	} else if (res->flags & IORESOURCE_MEM) {
-		/* Make sure we start at our min on all hoses */
-		if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
-			start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
-	}
-
-	return start;
-}
-
-static void pcibios_scanbus(struct pci_controller *hose)
-{
-	static int next_busno;
-	static int need_domain_info;
-	LIST_HEAD(resources);
-	struct pci_bus *bus;
-
-	if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
-		next_busno = (*hose->get_busno)();
-
-	pci_add_resource_offset(&resources,
-				hose->mem_resource, hose->mem_offset);
-	pci_add_resource_offset(&resources,
-				hose->io_resource, hose->io_offset);
-	pci_add_resource_offset(&resources,
-				hose->busn_resource, hose->busn_offset);
-	bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
-				&resources);
-	hose->bus = bus;
-
-	need_domain_info = need_domain_info || hose->index;
-	hose->need_domain_info = need_domain_info;
-
-	if (!bus) {
-		pci_free_resource_list(&resources);
-		return;
-	}
-
-	next_busno = bus->busn_res.end + 1;
-	/* Don't allow 8-bit bus number overflow inside the hose -
-	   reserve some space for bridges. */
-	if (next_busno > 224) {
-		next_busno = 0;
-		need_domain_info = 1;
-	}
-
-	/*
-	 * We insert PCI resources into the iomem_resource and
-	 * ioport_resource trees in either pci_bus_claim_resources()
-	 * or pci_bus_assign_resources().
-	 */
-	if (pci_has_flag(PCI_PROBE_ONLY)) {
-		pci_bus_claim_resources(bus);
-	} else {
-		pci_bus_size_bridges(bus);
-		pci_bus_assign_resources(bus);
-	}
-	pci_bus_add_devices(bus);
-}
-
-#ifdef CONFIG_OF
-void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
-{
-	struct of_pci_range range;
-	struct of_pci_range_parser parser;
-
-	pr_info("PCI host bridge %s ranges:\n", node->full_name);
-	hose->of_node = node;
-
-	if (of_pci_range_parser_init(&parser, node))
-		return;
-
-	for_each_of_pci_range(&parser, &range) {
-		struct resource *res = NULL;
-
-		switch (range.flags & IORESOURCE_TYPE_BITS) {
-		case IORESOURCE_IO:
-			pr_info("  IO 0x%016llx..0x%016llx\n",
-				range.cpu_addr,
-				range.cpu_addr + range.size - 1);
-			hose->io_map_base =
-				(unsigned long)ioremap(range.cpu_addr,
-						       range.size);
-			res = hose->io_resource;
-			break;
-		case IORESOURCE_MEM:
-			pr_info(" MEM 0x%016llx..0x%016llx\n",
-				range.cpu_addr,
-				range.cpu_addr + range.size - 1);
-			res = hose->mem_resource;
-			break;
-		}
-		if (res != NULL)
-			of_pci_range_to_resource(&range, node, res);
-	}
-}
-
-struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
-{
-	struct pci_controller *hose = bus->sysdata;
-
-	return of_node_get(hose->of_node);
-}
-#endif
-
-static DEFINE_MUTEX(pci_scan_mutex);
-
-void register_pci_controller(struct pci_controller *hose)
-{
-	struct resource *parent;
-
-	parent = hose->mem_resource->parent;
-	if (!parent)
-		parent = &iomem_resource;
-
-	if (request_resource(parent, hose->mem_resource) < 0)
-		goto out;
-
-	parent = hose->io_resource->parent;
-	if (!parent)
-		parent = &ioport_resource;
-
-	if (request_resource(parent, hose->io_resource) < 0) {
-		release_resource(hose->mem_resource);
-		goto out;
-	}
-
-	*hose_tail = hose;
-	hose_tail = &hose->next;
-
-	/*
-	 * Do not panic here but later - this might happen before console init.
-	 */
-	if (!hose->io_map_base) {
-		printk(KERN_WARNING
-		       "registering PCI controller with io_map_base unset\n");
-	}
-
-	/*
-	 * Scan the bus if it is register after the PCI subsystem
-	 * initialization.
-	 */
-	if (pci_initialized) {
-		mutex_lock(&pci_scan_mutex);
-		pcibios_scanbus(hose);
-		mutex_unlock(&pci_scan_mutex);
-	}
-
-	return;
-
-out:
-	printk(KERN_WARNING
-	       "Skipping PCI bus scan due to resource conflict\n");
-}
-
-static void __init pcibios_set_cache_line_size(void)
+static int __init pcibios_set_cache_line_size(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 	unsigned int lsize;
@@ -239,92 +44,9 @@
 	pci_dfl_cache_line_size = lsize >> 2;
 
 	pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
-}
-
-static int __init pcibios_init(void)
-{
-	struct pci_controller *hose;
-
-	pcibios_set_cache_line_size();
-
-	/* Scan all of the recorded PCI controllers.  */
-	for (hose = hose_head; hose; hose = hose->next)
-		pcibios_scanbus(hose);
-
-	pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
-
-	pci_initialized = 1;
-
 	return 0;
 }
-
-subsys_initcall(pcibios_init);
-
-static int pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
-	u16 cmd, old_cmd;
-	int idx;
-	struct resource *r;
-
-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-	old_cmd = cmd;
-	for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
-		/* Only set up the requested stuff */
-		if (!(mask & (1<<idx)))
-			continue;
-
-		r = &dev->resource[idx];
-		if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
-			continue;
-		if ((idx == PCI_ROM_RESOURCE) &&
-				(!(r->flags & IORESOURCE_ROM_ENABLE)))
-			continue;
-		if (!r->start && r->end) {
-			printk(KERN_ERR "PCI: Device %s not available "
-			       "because of resource collisions\n",
-			       pci_name(dev));
-			return -EINVAL;
-		}
-		if (r->flags & IORESOURCE_IO)
-			cmd |= PCI_COMMAND_IO;
-		if (r->flags & IORESOURCE_MEM)
-			cmd |= PCI_COMMAND_MEMORY;
-	}
-	if (cmd != old_cmd) {
-		printk("PCI: Enabling device %s (%04x -> %04x)\n",
-		       pci_name(dev), old_cmd, cmd);
-		pci_write_config_word(dev, PCI_COMMAND, cmd);
-	}
-	return 0;
-}
-
-unsigned int pcibios_assign_all_busses(void)
-{
-	return 1;
-}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
-	int err;
-
-	if ((err = pcibios_enable_resources(dev, mask)) < 0)
-		return err;
-
-	return pcibios_plat_dev_init(dev);
-}
-
-void pcibios_fixup_bus(struct pci_bus *bus)
-{
-	struct pci_dev *dev = bus->self;
-
-	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
-	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
-		pci_read_bridge_bases(bus);
-	}
-}
-
-EXPORT_SYMBOL(PCIBIOS_MIN_IO);
-EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+arch_initcall(pcibios_set_cache_line_size);
 
 void pci_resource_to_user(const struct pci_dev *dev, int bar,
 			  const struct resource *rsrc, resource_size_t *start,
@@ -359,12 +81,3 @@
 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 		vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
-
-char * (*pcibios_plat_setup)(char *str) __initdata;
-
-char *__init pcibios_setup(char *str)
-{
-	if (pcibios_plat_setup)
-		return pcibios_plat_setup(str);
-	return str;
-}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 99f3db4..9f672ce 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -11,7 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-npei-defs.h>
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 3cd3577..7cf4eb5 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -232,12 +232,8 @@
 
 static int __init pnx833x_platform_init(void)
 {
-	int res;
-
-	res = platform_add_devices(pnx833x_platform_devices,
-				   ARRAY_SIZE(pnx833x_platform_devices));
-
-	return res;
+	return platform_add_devices(pnx833x_platform_devices,
+				    ARRAY_SIZE(pnx833x_platform_devices));
 }
 
 arch_initcall(pnx833x_platform_init);
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index b0343ff..8077ff3 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -1,4 +1,7 @@
 /*
+ * Ralink RT2880 timer
+ * Author: John Crispin
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
@@ -6,7 +9,6 @@
  * Copyright (C) 2013 John Crispin <john@phrozen.org>
 */
 
-#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/timer.h>
@@ -152,33 +154,17 @@
 	return 0;
 }
 
-static int rt_timer_remove(struct platform_device *pdev)
-{
-	struct rt_timer *rt = platform_get_drvdata(pdev);
-
-	rt_timer_disable(rt);
-	rt_timer_free(rt);
-
-	return 0;
-}
-
 static const struct of_device_id rt_timer_match[] = {
 	{ .compatible = "ralink,rt2880-timer" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, rt_timer_match);
 
 static struct platform_driver rt_timer_driver = {
 	.probe = rt_timer_probe,
-	.remove = rt_timer_remove,
 	.driver = {
-		.name		= "rt-timer",
-		.of_match_table	= rt_timer_match
+		.name			= "rt-timer",
+		.of_match_table		= rt_timer_match,
+		.suppress_bind_attrs	= true,
 	},
 };
-
-module_platform_driver(rt_timer_driver);
-
-MODULE_DESCRIPTION("Ralink RT2880 timer");
-MODULE_AUTHOR("John Crispin <john@phrozen.org");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(rt_timer_driver);
diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig
index 8c337d6..42923478 100644
--- a/arch/mips/txx9/Kconfig
+++ b/arch/mips/txx9/Kconfig
@@ -20,7 +20,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_BIG_ENDIAN
-	select HAVE_CLK
+	select COMMON_CLK
 
 config TOSHIBA_JMR3927
 	bool "Toshiba JMR-TX3927 board"
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 1f6bc9a..285d84e 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -29,12 +29,8 @@
 early_read_config_word(struct pci_controller *hose,
 		       int top_bus, int bus, int devfn, int offset, u16 *value)
 {
-	struct pci_dev fake_dev;
 	struct pci_bus fake_bus;
 
-	fake_dev.bus = &fake_bus;
-	fake_dev.sysdata = hose;
-	fake_dev.devfn = devfn;
 	fake_bus.number = bus;
 	fake_bus.sysdata = hose;
 	fake_bus.ops = hose->pci_ops;
@@ -45,7 +41,7 @@
 	else
 		fake_bus.parent = NULL;
 
-	return pci_read_config_word(&fake_dev, offset, value);
+	return pci_bus_read_config_word(&fake_bus, devfn, offset, value);
 }
 
 int __init txx9_pci66_check(struct pci_controller *hose, int top_bus,
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index ada92db..a1d98b5 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -15,7 +15,8 @@
 #include <linux/interrupt.h>
 #include <linux/string.h>
 #include <linux/module.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/gpio/driver.h>
 #include <linux/platform_device.h>
@@ -83,40 +84,6 @@
 int txx9_ccfg_toeon __initdata = 1;
 #endif
 
-/* Minimum CLK support */
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
-	if (!strcmp(id, "spi-baseclk"))
-		return (struct clk *)((unsigned long)txx9_gbus_clock / 2 / 2);
-	if (!strcmp(id, "imbus_clk"))
-		return (struct clk *)((unsigned long)txx9_gbus_clock / 2);
-	return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-int clk_enable(struct clk *clk)
-{
-	return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
-	return (unsigned long)clk;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
 #define BOARD_VEC(board)	extern struct txx9_board_vec board;
 #include <asm/txx9/boards.h>
 #undef BOARD_VEC
@@ -560,8 +527,41 @@
 	txx9_board_vec->time_init();
 }
 
+static void txx9_clk_init(void)
+{
+	struct clk_hw *hw;
+	int error;
+
+	hw = clk_hw_register_fixed_rate(NULL, "gbus", NULL, 0, txx9_gbus_clock);
+	if (IS_ERR(hw)) {
+		error = PTR_ERR(hw);
+		goto fail;
+	}
+
+	hw = clk_hw_register_fixed_factor(NULL, "imbus", "gbus", 0, 1, 2);
+	error = clk_hw_register_clkdev(hw, "imbus_clk", NULL);
+	if (error)
+		goto fail;
+
+#ifdef CONFIG_CPU_TX49XX
+	if (TX4938_REV_PCODE() == 0x4938) {
+		hw = clk_hw_register_fixed_factor(NULL, "spi", "gbus", 0, 1, 4);
+		error = clk_hw_register_clkdev(hw, "spi-baseclk", NULL);
+		if (error)
+			goto fail;
+	}
+#endif
+
+	return;
+
+fail:
+	pr_err("Failed to register clocks: %d\n", error);
+}
+
 static int __init _txx9_arch_init(void)
 {
+	txx9_clk_init();
+
 	if (txx9_board_vec->arch_init)
 		txx9_board_vec->arch_init();
 	return 0;
diff --git a/arch/mips/txx9/generic/setup_tx3927.c b/arch/mips/txx9/generic/setup_tx3927.c
index 110e05c..d3b83a9 100644
--- a/arch/mips/txx9/generic/setup_tx3927.c
+++ b/arch/mips/txx9/generic/setup_tx3927.c
@@ -92,7 +92,6 @@
 	/* PIO */
 	__raw_writel(0, &tx3927_pioptr->maskcpu);
 	__raw_writel(0, &tx3927_pioptr->maskext);
-	txx9_gpio_init(TX3927_PIO_REG, 0, 16);
 
 	conf = read_c0_conf();
 	if (conf & TX39_CONF_DCE) {
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index a4664cb..8d80115 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -215,7 +215,6 @@
 		txx9_tmr_init(TX4927_TMR_REG(i) & 0xfffffffffULL);
 
 	/* PIO */
-	txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO);
 	__raw_writel(0, &tx4927_pioptr->maskcpu);
 	__raw_writel(0, &tx4927_pioptr->maskext);
 
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 58cdb2a..ba265bf 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -241,7 +241,6 @@
 		txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL);
 
 	/* PIO */
-	txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
 	__raw_writel(0, &tx4938_pioptr->maskcpu);
 	__raw_writel(0, &tx4938_pioptr->maskext);
 
diff --git a/arch/mips/txx9/jmr3927/setup.c b/arch/mips/txx9/jmr3927/setup.c
index 3206f76..a455166 100644
--- a/arch/mips/txx9/jmr3927/setup.c
+++ b/arch/mips/txx9/jmr3927/setup.c
@@ -142,8 +142,6 @@
 
 	/* PIO[15:12] connected to LEDs */
 	__raw_writel(0x0000f000, &tx3927_pioptr->dir);
-	gpio_request(11, "dipsw1");
-	gpio_request(10, "dipsw2");
 
 	jmr3927_pci_setup();
 
@@ -204,6 +202,14 @@
 	txx9_iocled_init(iocled_base, -1, 8, 1, "green", NULL);
 }
 
+static void __init jmr3927_arch_init(void)
+{
+	txx9_gpio_init(TX3927_PIO_REG, 0, 16);
+
+	gpio_request(11, "dipsw1");
+	gpio_request(10, "dipsw2");
+}
+
 struct txx9_board_vec jmr3927_vec __initdata = {
 	.system = "Toshiba JMR_TX3927",
 	.prom_init = jmr3927_prom_init,
@@ -211,6 +217,7 @@
 	.irq_setup = jmr3927_irq_setup,
 	.time_init = jmr3927_time_init,
 	.device_init = jmr3927_device_init,
+	.arch_init = jmr3927_arch_init,
 #ifdef CONFIG_PCI
 	.pci_map_irq = jmr3927_pci_map_irq,
 #endif
diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c
index 3c516ef..f5b367e 100644
--- a/arch/mips/txx9/rbtx4927/setup.c
+++ b/arch/mips/txx9/rbtx4927/setup.c
@@ -52,6 +52,7 @@
 #include <linux/leds.h>
 #include <asm/io.h>
 #include <asm/reboot.h>
+#include <asm/txx9pio.h>
 #include <asm/txx9/generic.h>
 #include <asm/txx9/pci.h>
 #include <asm/txx9/rbtx4927.h>
@@ -151,20 +152,37 @@
 	}
 	tx4938_setup_pcierr_irq();
 }
+#else
+static inline void tx4927_pci_setup(void) {}
+static inline void tx4937_pci_setup(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init rbtx4927_gpio_init(void)
+{
+	/* TX4927-SIO DTR on (PIO[15]) */
+	gpio_request(15, "sio-dtr");
+	gpio_direction_output(15, 1);
+
+	tx4927_sio_init(0, 0);
+}
 
 static void __init rbtx4927_arch_init(void)
 {
+	txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO);
+
+	rbtx4927_gpio_init();
+
 	tx4927_pci_setup();
 }
 
 static void __init rbtx4937_arch_init(void)
 {
+	txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
+
+	rbtx4927_gpio_init();
+
 	tx4937_pci_setup();
 }
-#else
-#define rbtx4927_arch_init NULL
-#define rbtx4937_arch_init NULL
-#endif /* CONFIG_PCI */
 
 static void toshiba_rbtx4927_restart(char *command)
 {
@@ -205,12 +223,6 @@
 #else
 	set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET);
 #endif
-
-	/* TX4927-SIO DTR on (PIO[15]) */
-	gpio_request(15, "sio-dtr");
-	gpio_direction_output(15, 1);
-
-	tx4927_sio_init(0, 0);
 }
 
 static void __init rbtx4927_clock_init(void)
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
index 54de668..07939ed 100644
--- a/arch/mips/txx9/rbtx4938/setup.c
+++ b/arch/mips/txx9/rbtx4938/setup.c
@@ -336,6 +336,7 @@
 
 static void __init rbtx4938_arch_init(void)
 {
+	txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
 	gpiochip_add_data(&rbtx4938_spi_gpio_chip, NULL);
 	rbtx4938_pci_setup();
 	rbtx4938_spi_init();
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 3b4538e..c3dc12a 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -13,8 +13,6 @@
 	-DDISABLE_BRANCH_PROFILING \
 	$(call cc-option, -fno-stack-protector)
 aflags-vdso := $(ccflags-vdso) \
-	$(filter -I%,$(KBUILD_CFLAGS)) \
-	$(filter -E%,$(KBUILD_CFLAGS)) \
 	-D__ASSEMBLY__ -Wa,-gdwarf-2
 
 #
@@ -82,7 +80,7 @@
 $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
 $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
 
-$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
 
 $(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
 	$(call if_changed,vdsold)
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index 769d5ed..b10ba12 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -18,7 +18,6 @@
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/cpu-regs.h>
-#include <asm/uaccess.h>
 #include <asm/current.h>
 
 /* Forward declaration, a strange C thing */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index d012e87..2eedf6f 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -38,7 +38,6 @@
 #define get_ds()	(KERNEL_DS)
 #define get_fs()	(current_thread_info()->addr_limit)
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
-#define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
 
 #define segment_eq(a, b) ((a).seg == (b).seg)
 
@@ -72,12 +71,6 @@
 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
 #define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
 
-static inline int verify_area(int type, const void *addr, unsigned long size)
-{
-	return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
-
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index dfd0301..cd8cb1d 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -75,7 +75,7 @@
 		struct fpucontext *buf;
 		err |= __get_user(buf, &sc->fpucontext);
 		if (buf) {
-			if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
 				goto badframe;
 			err |= fpu_restore_sigcontext(buf);
 		}
@@ -98,7 +98,7 @@
 	long d0;
 
 	frame = (struct sigframe __user *) current_frame()->sp;
-	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 	if (__get_user(set.sig[0], &frame->sc.oldmask))
 		goto badframe;
@@ -130,7 +130,7 @@
 	long d0;
 
 	frame = (struct rt_sigframe __user *) current_frame()->sp;
-	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 		goto badframe;
diff --git a/arch/nios2/include/asm/cpuinfo.h b/arch/nios2/include/asm/cpuinfo.h
index e88fcae..348bb22 100644
--- a/arch/nios2/include/asm/cpuinfo.h
+++ b/arch/nios2/include/asm/cpuinfo.h
@@ -25,10 +25,10 @@
 	/* Core CPU configuration */
 	char cpu_impl[12];
 	u32 cpu_clock_freq;
-	u32 mmu;
-	u32 has_div;
-	u32 has_mul;
-	u32 has_mulx;
+	bool mmu;
+	bool has_div;
+	bool has_mul;
+	bool has_mulx;
 
 	/* CPU caches */
 	u32 icache_line_size;
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 1d96de0..1cccc36 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -41,11 +41,6 @@
 	return val;
 }
 
-static inline u32 fcpu_has(struct device_node *cpu, const char *n)
-{
-	return of_get_property(cpu, n, NULL) ? 1 : 0;
-}
-
 void __init setup_cpuinfo(void)
 {
 	struct device_node *cpu;
@@ -56,7 +51,7 @@
 	if (!cpu)
 		panic("%s: No CPU found in devicetree!\n", __func__);
 
-	if (!fcpu_has(cpu, "altr,has-initda"))
+	if (!of_property_read_bool(cpu, "altr,has-initda"))
 		panic("initda instruction is unimplemented. Please update your "
 			"hardware system to have more than 4-byte line data "
 			"cache\n");
@@ -69,10 +64,10 @@
 	else
 		strcpy(cpuinfo.cpu_impl, "<unknown>");
 
-	cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
-	cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
-	cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
-	cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
+	cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
+	cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
+	cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
+	cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
 
 	if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
 		err_cpu("DIV");
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 5cc6b4f..140faa1 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -82,10 +82,6 @@
 	unsigned long insn, fixup;
 };
 
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-extern void sort_exception_table(void);
-
 /*
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index e44bdb9..c2c43f7 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,10 +83,10 @@
 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
 
 /* This is the size of the initially mapped kernel memory */
-#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
-#define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
+#if defined(CONFIG_64BIT)
+#define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
 #else
-#define KERNEL_INITIAL_ORDER	24	/* 1<<24 = 16MB */
+#define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
 #endif
 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
 
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 5e953ab..6367023 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -11,6 +11,7 @@
 void die_if_kernel(char *str, struct pt_regs *regs, long err);
 
 /* mm/fault.c */
+const char *trap_name(unsigned long code);
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 		unsigned long address);
 #endif
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index f3db7d8..5979745 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -75,4 +75,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* __PARISC_MMAN_H__ */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 6700127..629eb46 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -345,7 +345,7 @@
 				      != (addr & (SHM_COLOUR - 1))) {
 			__flush_cache_page(mpnt, addr, page_to_phys(page));
 			if (old_addr)
-				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
 			old_addr = addr;
 		}
 	}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 97d6b20..378df92 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -458,8 +458,8 @@
 	}
 
 	printk("\n");
-	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
-			msg, code, regs, offset);
+	pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
+		msg, code, trap_name(code), regs, offset);
 	show_regs(regs);
 
 	spin_unlock(&terminate_lock);
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index b37787d..3d6ef1b 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -90,8 +90,9 @@
 	/* Start of data section */
 	_sdata = .;
 
-	RO_DATA_SECTION(8)
-
+	/* Architecturally we need to keep __gp below 0x1000000 and thus
+	 * in front of RO_DATA_SECTION() which stores lots of tracepoint
+	 * and ftrace symbols. */
 #ifdef CONFIG_64BIT
 	. = ALIGN(16);
 	/* Linkage tables */
@@ -106,6 +107,12 @@
 	}
 #endif
 
+	RO_DATA_SECTION(8)
+
+	/* RO because of BUILDTIME_EXTABLE_SORT */
+	EXCEPTION_TABLE(8)
+	NOTES
+
 	/* unwind info */
 	.PARISC.unwind : {
 		__start___unwind = .;
@@ -121,9 +128,6 @@
 	. = ALIGN(HUGEPAGE_SIZE);
 	data_start = .;
 
-	EXCEPTION_TABLE(8)
-	NOTES
-
 	/* Data */
 	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
 
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 47a6ca4..8ff9253 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -14,7 +14,7 @@
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/extable.h>
 #include <linux/uaccess.h>
 
 #include <asm/traps.h>
@@ -204,6 +204,16 @@
 	[28] "Unaligned data reference trap",
 };
 
+const char *trap_name(unsigned long code)
+{
+	const char *t = NULL;
+
+	if (code < ARRAY_SIZE(trap_description))
+		t = trap_description[code];
+
+	return t ? t : "Unknown trap";
+}
+
 /*
  * Print out info about fatal segfaults, if the show_unhandled_signals
  * sysctl is set:
@@ -213,8 +223,6 @@
 		unsigned long address, struct task_struct *tsk,
 		struct vm_area_struct *vma)
 {
-	const char *trap_name = NULL;
-
 	if (!unhandled_signal(tsk, SIGSEGV))
 		return;
 
@@ -226,10 +234,7 @@
 	    tsk->comm, code, address);
 	print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
 
-	if (code < ARRAY_SIZE(trap_description))
-		trap_name = trap_description[code];
-	pr_warn(KERN_CONT " trap #%lu: %s%c", code,
-		trap_name ? trap_name : "unknown",
+	pr_cont(" trap #%lu: %s%c", code, trap_name(code),
 		vma ? ',':'\n');
 
 	if (vma)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 356f384..e02ada3 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -105,6 +105,8 @@
 	else
 		panic("get_memblock() failed.\n");
 
+	memset(__va(phys), 0, size);
+
 	return __va(phys);
 }
 
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 50d020a..617dece 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -318,12 +318,12 @@
 PHONY += corenet32_smp_defconfig
 corenet32_smp_defconfig:
 	$(call merge_into_defconfig,corenet_basic_defconfig,\
-		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa)
 
 PHONY += corenet64_smp_defconfig
 corenet64_smp_defconfig:
 	$(call merge_into_defconfig,corenet_basic_defconfig,\
-		85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+		85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa)
 
 PHONY += mpc86xx_defconfig
 mpc86xx_defconfig:
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
new file mode 100644
index 0000000..efa99c0
--- /dev/null
+++ b/arch/powerpc/configs/dpaa.config
@@ -0,0 +1 @@
+CONFIG_FSL_DPAA=y
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index 125e165..82ddc9b 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -7,6 +7,15 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 
+#ifdef __BIG_ENDIAN__
+#define LWZ(rt, d, ra)	\
+	lwz	rt,d(ra)
+#else
+#define LWZ(rt, d, ra)	\
+	li	rt,d;	\
+	lwbrx	rt,rt,ra
+#endif
+
 /*
  * We roll the registers for T, A, B, C, D, E around on each
  * iteration; T on iteration t is A on iteration t+1, and so on.
@@ -23,7 +32,7 @@
 #define W(t)	(((t)%16)+16)
 
 #define LOADW(t)				\
-	lwz	W(t),(t)*4(r4)
+	LWZ(W(t),(t)*4,r4)
 
 #define STEPD0_LOAD(t)				\
 	andc	r0,RD(t),RB(t);		\
@@ -33,7 +42,7 @@
 	add	r0,RE(t),r15;			\
 	add	RT(t),RT(t),r6;		\
 	add	r14,r0,W(t);			\
-	lwz	W((t)+4),((t)+4)*4(r4);	\
+	LWZ(W((t)+4),((t)+4)*4,r4);	\
 	rotlwi	RB(t),RB(t),30;			\
 	add	RT(t),RT(t),r14
 
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index ab9f4e0..5c4fbc8 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,5 +1,6 @@
 generic-y += clkdev.h
 generic-y += div64.h
+generic-y += export.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += local64.h
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f752e6f..ab68d0e 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,7 @@
 extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
 extern int machine_check_47x(struct pt_regs *regs);
+int machine_check_8xx(struct pt_regs *regs);
 
 extern void cpu_down_flush_e500v2(void);
 extern void cpu_down_flush_e500mc(void);
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c7d82ff..eba6041 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -155,6 +155,8 @@
 	unsigned long flags = arch_local_save_flags();
 #ifdef CONFIG_BOOKE
 	asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EID);
 #else
 	SET_MSR_EE(flags & ~MSR_EE);
 #endif
@@ -165,6 +167,8 @@
 {
 #ifdef CONFIG_BOOKE
 	asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EID);
 #else
 	arch_local_irq_save();
 #endif
@@ -174,6 +178,8 @@
 {
 #ifdef CONFIG_BOOKE
 	asm volatile("wrteei 1" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+	wrtspr(SPRN_EIE);
 #else
 	unsigned long msr = mfmsr();
 	SET_MSR_EE(msr | MSR_EE);
diff --git a/arch/powerpc/include/asm/libata-portmap.h b/arch/powerpc/include/asm/libata-portmap.h
index 4d85180..4396db5 100644
--- a/arch/powerpc/include/asm/libata-portmap.h
+++ b/arch/powerpc/include/asm/libata-portmap.h
@@ -1,12 +1,8 @@
 #ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
 #define __ASM_POWERPC_LIBATA_PORTMAP_H
 
-#define ATA_PRIMARY_CMD	0x1F0
-#define ATA_PRIMARY_CTL	0x3F6
 #define ATA_PRIMARY_IRQ(dev)	pci_get_legacy_ide_irq(dev, 0)
 
-#define ATA_SECONDARY_CMD	0x170
-#define ATA_SECONDARY_CTL	0x376
 #define ATA_SECONDARY_IRQ(dev)	pci_get_legacy_ide_irq(dev, 1)
 
 #endif
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 54ff8ce..0132831 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -152,6 +152,7 @@
 #define PPC_INST_LWSYNC			0x7c2004ac
 #define PPC_INST_SYNC			0x7c0004ac
 #define PPC_INST_SYNC_MASK		0xfc0007fe
+#define PPC_INST_ISYNC			0x4c00012c
 #define PPC_INST_LXVD2X			0x7c000698
 #define PPC_INST_MCRXR			0x7c000400
 #define PPC_INST_MCRXR_MASK		0xfc0007fe
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 2a62078..9cd4e8c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1250,6 +1250,8 @@
 				     : "r" ((unsigned long)(v)) \
 				     : "memory")
 #endif
+#define wrtspr(rn)	asm volatile("mtspr " __stringify(rn) ",0" : \
+				     : : "memory")
 
 extern unsigned long msr_check_and_set(unsigned long bits);
 extern bool strict_msr_control;
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 94d01f8..0197e12 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -25,6 +25,10 @@
 #define SPRN_MD_RAM0	825
 #define SPRN_MD_RAM1	826
 
+/* Special MSR manipulation registers */
+#define SPRN_EIE	80	/* External interrupt enable (EE=1, RI=1) */
+#define SPRN_EID	81	/* External interrupt disable (EE=0, RI=1) */
+
 /* Commands.  Only the first few are available to the instruction cache.
 */
 #define	IDC_ENABLE	0x02000000	/* Cache enable */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index aded29a..1925341 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -14,6 +14,11 @@
 CFLAGS_btext.o		+= -fPIC
 endif
 
+CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
@@ -90,10 +95,6 @@
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
-obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
-ifeq ($(CONFIG_PPC32),y)
-obj-$(CONFIG_MODULES)		+= ppc_ksyms_32.o
-endif
 obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6c4646a..6a82ef0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1248,6 +1248,7 @@
 		.mmu_features		= MMU_FTR_TYPE_8xx,
 		.icache_bsize		= 16,
 		.dcache_bsize		= 16,
+		.machine_check		= machine_check_8xx,
 		.platform		= "ppc823",
 	},
 #endif /* CONFIG_8xx */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 83428a2..3841d74 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -33,6 +33,7 @@
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 /*
  * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -1358,6 +1359,7 @@
 	MCOUNT_RESTORE_FRAME
 	bctr
 #endif
+EXPORT_SYMBOL(_mcount)
 
 _GLOBAL(ftrace_stub)
 	blr
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 51df82b..6432d4b 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -38,6 +38,7 @@
 #include <asm/context_tracking.h>
 #include <asm/tm.h>
 #include <asm/ppc-opcode.h>
+#include <asm/export.h>
 
 /*
  * System calls.
@@ -1177,6 +1178,7 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
+EXPORT_SYMBOL(_mcount)
 	mflr	r12
 	mtctr	r12
 	mtlr	r0
@@ -1413,6 +1415,7 @@
 
 #else
 _GLOBAL_TOC(_mcount)
+EXPORT_SYMBOL(_mcount)
 	/* Taken from output of objdump from lib64/glibc */
 	mflr	r3
 	ld	r11, 0(r1)
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 9f1ebf7..52ca247 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -16,6 +16,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-compat.h>
 #include <asm/asm-offsets.h>
+#include <asm/export.h>
 
 #ifndef CONFIG_PPC64
 /* epapr_ev_idle() was derived from e500_idle() */
@@ -53,3 +54,4 @@
 	nop
 	nop
 	blr
+EXPORT_SYMBOL(epapr_hypercall_start)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08992f8..f129408 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1377,7 +1377,7 @@
 DEFINE_FIXED_SYMBOL(__end_interrupts)
 
 #ifdef CONFIG_PPC_970_NAP
-TRAMP_REAL_BEGIN(power4_fixup_nap)
+EXC_COMMON_BEGIN(power4_fixup_nap)
 	andc	r9,r9,r10
 	std	r9,TI_LOCAL_FLAGS(r11)
 	ld	r10,_LINK(r1)		/* make idle task do the */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 08d14b0..6c509f3 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -24,6 +24,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 #ifdef CONFIG_VSX
 #define __REST_32FPVSRS(n,c,base)					\
@@ -59,6 +60,7 @@
 	MTFSF_L(fr0)
 	REST_32FPVSRS(0, R4, R3)
 	blr
+EXPORT_SYMBOL(load_fp_state)
 
 /*
  * Store FP state into memory, including FPSCR
@@ -69,6 +71,7 @@
 	mffs	fr0
 	stfd	fr0,FPSTATE_FPSCR(r3)
 	blr
+EXPORT_SYMBOL(store_fp_state)
 
 /*
  * This task wants to use the FPU now.
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index a3f821e..9d96354 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -34,6 +34,7 @@
 #include <asm/ptrace.h>
 #include <asm/bug.h>
 #include <asm/kvm_book3s_asm.h>
+#include <asm/export.h>
 
 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
 #define LOAD_BAT(n, reg, RA, RB)	\
@@ -738,6 +739,7 @@
 
 	.globl mol_trampoline
 	.set mol_trampoline, i0x2f00
+	EXPORT_SYMBOL(mol_trampoline)
 
 	. = 0x3000
 
@@ -1045,6 +1047,7 @@
 4:	trap
 	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
 	blr
+EXPORT_SYMBOL(switch_mmu_context)
 
 /*
  * An undocumented "feature" of 604e requires that the v bit
@@ -1272,6 +1275,7 @@
 	.globl	empty_zero_page
 empty_zero_page:
 	.space	4096
+EXPORT_SYMBOL(empty_zero_page)
 
 	.globl	swapper_pg_dir
 swapper_pg_dir:
@@ -1285,6 +1289,7 @@
 	.long 0, 0, 0, 0, 0, 0, 0, 0
 	.long 0, 0, 0, 0, 0, 0, 0, 0
 	.long 0, 0, 0, 0, 0, 0, 0, 0
+EXPORT_SYMBOL(intercept_table)
 
 /* Room for two PTE pointers, usually the kernel and current user pointers
  * to their respective root page table.
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 7d7d863..41374a4 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -41,6 +41,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 /* As with the other PowerPC ports, it is expected that when code
  * execution begins here, the following registers contain valid, yet
@@ -971,6 +972,7 @@
 	.globl	empty_zero_page
 empty_zero_page:
 	.space	4096
+EXPORT_SYMBOL(empty_zero_page)
 	.globl	swapper_pg_dir
 swapper_pg_dir:
 	.space	PGD_TABLE_SIZE
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 9cdf5c7..37e4a7c 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -39,6 +39,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
 #include <asm/synch.h>
+#include <asm/export.h>
 #include "head_booke.h"
 
 
@@ -1254,6 +1255,7 @@
 	.globl	empty_zero_page
 empty_zero_page:
 	.space	PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
 
 /*
  * To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 79da0641..04c546e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -43,6 +43,7 @@
 #include <asm/hw_irq.h>
 #include <asm/cputhreads.h>
 #include <asm/ppc-opcode.h>
+#include <asm/export.h>
 
 /* The physical memory is laid out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -1002,3 +1003,4 @@
 	.globl	empty_zero_page
 empty_zero_page:
 	.space	PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3a185c5..fb133a1 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -31,6 +31,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
 #include <asm/fixmap.h>
+#include <asm/export.h>
 
 /* Macro to make the code more readable. */
 #ifdef CONFIG_8xx_CPU6
@@ -226,7 +227,7 @@
 			  ret_from_except)
 
 /* System reset */
-	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+	EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
 
 /* Machine check */
 	. = 0x200
@@ -321,7 +322,7 @@
 #endif
 
 InstructionTLBMiss:
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	mtspr	SPRN_SPRG_SCRATCH2, r3
 #endif
 	EXCEPTION_PROLOG_0
@@ -329,23 +330,20 @@
 	/* If we are faulting a kernel address, we have to use the
 	 * kernel page tables.
 	 */
+	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
+	INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
 #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	/* Only modules will cause ITLB Misses as we always
 	 * pin the first 8MB of kernel memory */
-	mfspr	r11, SPRN_SRR0	/* Get effective address of fault */
-	INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
-	mfcr	r10
-	IS_KERNEL(r11, r11)
+	mfcr	r3
+	IS_KERNEL(r11, r10)
+#endif
 	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	BRANCH_UNLESS_KERNEL(3f)
 	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 3:
-	mtcr	r10
-	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
-#else
-	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
-	INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
-	mfspr	r11, SPRN_M_TW	/* Get level 1 table base address */
+	mtcr	r3
 #endif
 	/* Insert level 1 index */
 	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -377,58 +375,39 @@
 	MTSPR_CPU6(SPRN_MI_RPN, r10, r3)	/* Update TLB entry */
 
 	/* Restore registers */
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	mfspr	r3, SPRN_SPRG_SCRATCH2
 #endif
 	EXCEPTION_EPILOG_0
 	rfi
 
-/*
- * Bottom part of DataStoreTLBMiss handler for IMMR area
- * not enough space in the DataStoreTLBMiss area
- */
-DTLBMissIMMR:
-	mtcr	r10
-	/* Set 512k byte guarded page and mark it valid */
-	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
-	MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
-	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
-	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
-	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
-			  _PAGE_PRESENT | _PAGE_NO_CACHE
-	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
-
-	li	r11, RPN_PATTERN
-	mtspr	SPRN_DAR, r11	/* Tag DAR */
-	EXCEPTION_EPILOG_0
-	rfi
-
 	. = 0x1200
 DataStoreTLBMiss:
+	mtspr	SPRN_SPRG_SCRATCH2, r3
 	EXCEPTION_PROLOG_0
-	mfcr	r10
+	mfcr	r3
 
 	/* If we are faulting a kernel address, we have to use the
 	 * kernel page tables.
 	 */
-	mfspr	r11, SPRN_MD_EPN
-	rlwinm	r11, r11, 16, 0xfff8
+	mfspr	r10, SPRN_MD_EPN
+	rlwinm	r10, r10, 16, 0xfff8
+	cmpli	cr0, r10, PAGE_OFFSET@h
+	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
+	blt+	3f
 #ifndef CONFIG_PIN_TLB_IMMR
-	cmpli	cr0, r11, VIRT_IMMR_BASE@h
+	cmpli	cr0, r10, VIRT_IMMR_BASE@h
 #endif
-	cmpli	cr7, r11, PAGE_OFFSET@h
+_ENTRY(DTLBMiss_cmp)
+	cmpli	cr7, r10, (PAGE_OFFSET + 0x1800000)@h
+	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 #ifndef CONFIG_PIN_TLB_IMMR
 _ENTRY(DTLBMiss_jmp)
 	beq-	DTLBMissIMMR
 #endif
-	bge-	cr7, 4f
-
-	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
+	blt	cr7, DTLBMissLinear
 3:
-	mtcr	r10
-#ifdef CONFIG_8xx_CPU6
-	mtspr	SPRN_SPRG_SCRATCH2, r3
-#endif
+	mtcr	r3
 	mfspr	r10, SPRN_MD_EPN
 
 	/* Insert level 1 index */
@@ -481,30 +460,7 @@
 	MTSPR_CPU6(SPRN_MD_RPN, r10, r3)	/* Update TLB entry */
 
 	/* Restore registers */
-#ifdef CONFIG_8xx_CPU6
 	mfspr	r3, SPRN_SPRG_SCRATCH2
-#endif
-	mtspr	SPRN_DAR, r11	/* Tag DAR */
-	EXCEPTION_EPILOG_0
-	rfi
-
-4:
-_ENTRY(DTLBMiss_cmp)
-	cmpli	cr0, r11, (PAGE_OFFSET + 0x1800000)@h
-	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
-	bge-	3b
-
-	mtcr	r10
-	/* Set 8M byte page and mark it valid */
-	li	r10, MD_PS8MEG | MD_SVALID
-	MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
-	mfspr	r10, SPRN_MD_EPN
-	rlwinm	r10, r10, 0, 0x0f800000		/* 8xx supports max 256Mb RAM */
-	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
-			  _PAGE_PRESENT
-	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
-
-	li	r11, RPN_PATTERN
 	mtspr	SPRN_DAR, r11	/* Tag DAR */
 	EXCEPTION_EPILOG_0
 	rfi
@@ -570,6 +526,43 @@
 
 	. = 0x2000
 
+/*
+ * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
+ * not enough space in the DataStoreTLBMiss area.
+ */
+DTLBMissIMMR:
+	mtcr	r3
+	/* Set 512k byte guarded page and mark it valid */
+	li	r10, MD_PS512K | MD_GUARDED | MD_SVALID
+	MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+	mfspr	r10, SPRN_IMMR			/* Get current IMMR */
+	rlwinm	r10, r10, 0, 0xfff80000		/* Get 512 kbytes boundary */
+	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
+			  _PAGE_PRESENT | _PAGE_NO_CACHE
+	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
+
+	li	r11, RPN_PATTERN
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r3, SPRN_SPRG_SCRATCH2
+	EXCEPTION_EPILOG_0
+	rfi
+
+DTLBMissLinear:
+	mtcr	r3
+	/* Set 8M byte page and mark it valid */
+	li	r11, MD_PS8MEG | MD_SVALID
+	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+	rlwinm	r10, r10, 16, 0x0f800000	/* 8xx supports max 256Mb RAM */
+	ori	r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY	| \
+			  _PAGE_PRESENT
+	MTSPR_CPU6(SPRN_MD_RPN, r10, r11)	/* Update TLB entry */
+
+	li	r11, RPN_PATTERN
+	mtspr	SPRN_DAR, r11	/* Tag DAR */
+	mfspr	r3, SPRN_SPRG_SCRATCH2
+	EXCEPTION_EPILOG_0
+	rfi
+
 /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
  * by decoding the registers used by the dcbx instruction and adding them.
  * DAR is set to the calculated address.
@@ -586,7 +579,9 @@
 	rlwinm	r11, r10, 16, 0xfff8
 _ENTRY(FixupDAR_cmp)
 	cmpli	cr7, r11, (PAGE_OFFSET + 0x1800000)@h
-	blt-	cr7, 200f
+	/* create physical page address from effective address */
+	tophys(r11, r10)
+	blt-	cr7, 201f
 	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 	/* Insert level 1 index */
 3:	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -616,10 +611,6 @@
 141:	mfspr	r10,SPRN_SPRG_SCRATCH2
 	b	DARFixed	/* Nope, go back to normal TLB processing */
 
-	/* create physical page address from effective address */
-200:	tophys(r11, r10)
-	b	201b
-
 144:	mfspr	r10, SPRN_DSISR
 	rlwinm	r10, r10,0,7,5	/* Clear store bit for buggy dcbst insn */
 	mtspr	SPRN_DSISR, r10
@@ -894,6 +885,7 @@
 	.align	PAGE_SHIFT
 empty_zero_page:
 	.space	PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
 
 	.globl	swapper_pg_dir
 swapper_pg_dir:
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3bfa315..bf4c602 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -42,6 +42,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 #include "head_booke.h"
 
 /* As with the other PowerPC ports, it is expected that when code
@@ -1223,6 +1224,7 @@
 	.globl	empty_zero_page
 empty_zero_page:
 	.space	4096
+EXPORT_SYMBOL(empty_zero_page)
 	.globl	swapper_pg_dir
 swapper_pg_dir:
 	.space	PGD_TABLE_SIZE
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 37d6e74..5f202a5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -479,7 +479,8 @@
 
 		/* Handle failure */
 		if (unlikely(entry == DMA_ERROR_CODE)) {
-			if (printk_ratelimit())
+			if (!(attrs & DMA_ATTR_NO_WARN) &&
+			    printk_ratelimit())
 				dev_info(dev, "iommu_alloc failed, tbl %p "
 					 "vaddr %lx npages %lu\n", tbl, vaddr,
 					 npages);
@@ -776,7 +777,8 @@
 					 mask >> tbl->it_page_shift, align,
 					 attrs);
 		if (dma_handle == DMA_ERROR_CODE) {
-			if (printk_ratelimit())  {
+			if (!(attrs & DMA_ATTR_NO_WARN) &&
+			    printk_ratelimit())  {
 				dev_info(dev, "iommu_alloc failed, tbl %p "
 					 "vaddr %p npages %d\n", tbl, vaddr,
 					 npages);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 0d43219..384357c 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -18,6 +18,7 @@
 #include <asm/unistd.h>
 #include <asm/asm-compat.h>
 #include <asm/asm-offsets.h>
+#include <asm/export.h>
 
 	.text
 
@@ -118,3 +119,4 @@
 _GLOBAL(current_stack_pointer)
 	PPC_LL	r3,0(r1)
 	blr
+EXPORT_SYMBOL(current_stack_pointer)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 03756ff..93cf7a5 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -33,6 +33,7 @@
 #include <asm/kexec.h>
 #include <asm/bug.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 	.text
 
@@ -319,6 +320,7 @@
 #endif /* CONFIG_4xx */
 	isync
 	blr
+EXPORT_SYMBOL(flush_instruction_cache)
 #endif /* CONFIG_PPC_8xx */
 
 /*
@@ -359,6 +361,7 @@
 	isync
 	blr
 _ASM_NOKPROBE_SYMBOL(flush_icache_range)
+EXPORT_SYMBOL(flush_icache_range)
 
 /*
  * Flush a particular page from the data cache to RAM.
@@ -497,6 +500,7 @@
 	li	r0,MAX_COPY_PREFETCH
 	li	r11,4
 	b	2b
+EXPORT_SYMBOL(copy_page)
 
 /*
  * Extended precision shifts.
@@ -524,6 +528,7 @@
 	sraw	r3,r3,r5	# MSW = MSW >> count
 	or	r4,r4,r7	# LSW |= t2
 	blr
+EXPORT_SYMBOL(__ashrdi3)
 
 _GLOBAL(__ashldi3)
 	subfic	r6,r5,32
@@ -535,6 +540,7 @@
 	slw	r4,r4,r5	# LSW = LSW << count
 	or	r3,r3,r7	# MSW |= t2
 	blr
+EXPORT_SYMBOL(__ashldi3)
 
 _GLOBAL(__lshrdi3)
 	subfic	r6,r5,32
@@ -546,6 +552,7 @@
 	srw	r3,r3,r5	# MSW = MSW >> count
 	or	r4,r4,r7	# LSW |= t2
 	blr
+EXPORT_SYMBOL(__lshrdi3)
 
 /*
  * 64-bit comparison: __cmpdi2(s64 a, s64 b)
@@ -561,6 +568,7 @@
 	bltlr
 	li	r3,2
 	blr
+EXPORT_SYMBOL(__cmpdi2)
 /*
  * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
@@ -575,6 +583,7 @@
 	bltlr
 	li	r3,2
 	blr
+EXPORT_SYMBOL(__ucmpdi2)
 
 _GLOBAL(__bswapdi2)
 	rotlwi  r9,r4,8
@@ -586,6 +595,7 @@
 	mr      r3,r9
 	mr      r4,r10
 	blr
+EXPORT_SYMBOL(__bswapdi2)
 
 #ifdef CONFIG_SMP
 _GLOBAL(start_secondary_resume)
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 9f0bed2..4f17867 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -27,6 +27,7 @@
 #include <asm/kexec.h>
 #include <asm/ptrace.h>
 #include <asm/mmu.h>
+#include <asm/export.h>
 
 	.text
 
@@ -110,6 +111,7 @@
 	isync
 	blr
 _ASM_NOKPROBE_SYMBOL(flush_icache_range)
+EXPORT_SYMBOL(flush_icache_range)
 
 /*
  * Like above, but only do the D-cache.
@@ -140,6 +142,7 @@
 	bdnz	0b
 	sync
 	blr
+EXPORT_SYMBOL(flush_dcache_range)
 
 /*
  * Like above, but works on non-mapped physical addresses.
@@ -243,6 +246,7 @@
 	blr
 
 _GLOBAL(__bswapdi2)
+EXPORT_SYMBOL(__bswapdi2)
 	srdi	r8,r3,32
 	rlwinm	r7,r3,8,0xffffffff
 	rlwimi	r7,r3,24,0,7
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 95d3769..74bec54 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -56,6 +56,7 @@
 
 /* ISA Memory physical address */
 resource_size_t isa_mem_base;
+EXPORT_SYMBOL(isa_mem_base);
 
 
 static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1f793003..678f87a 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -32,6 +32,8 @@
 unsigned long isa_io_base     = 0;
 unsigned long pci_dram_offset = 0;
 int pcibios_assign_bus_offset = 1;
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(pci_dram_offset);
 
 void pcibios_make_OF_bus_map(void);
 
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
deleted file mode 100644
index 9f01e28..0000000
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/ftrace.h>
-#include <linux/mm.h>
-
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-#include <asm/cacheflush.h>
-#include <asm/epapr_hcalls.h>
-
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(flush_dcache_range);
-#endif
-EXPORT_SYMBOL(flush_icache_range);
-
-EXPORT_SYMBOL(empty_zero_page);
-
-long long __bswapdi2(long long);
-EXPORT_SYMBOL(__bswapdi2);
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-#endif
-
-#ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(load_fp_state);
-EXPORT_SYMBOL(store_fp_state);
-#endif
-
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(load_vr_state);
-EXPORT_SYMBOL(store_vr_state);
-#endif
-
-#ifdef CONFIG_EPAPR_PARAVIRT
-EXPORT_SYMBOL(epapr_hypercall_start);
-#endif
-
-EXPORT_SYMBOL(current_stack_pointer);
diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c
deleted file mode 100644
index 2bfaafe..0000000
--- a/arch/powerpc/kernel/ppc_ksyms_32.c
+++ /dev/null
@@ -1,60 +0,0 @@
-#include <linux/export.h>
-#include <linux/smp.h>
-
-#include <asm/page.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/time.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/dcr.h>
-
-EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
-EXPORT_SYMBOL(DMA_MODE_READ);
-EXPORT_SYMBOL(DMA_MODE_WRITE);
-
-#if defined(CONFIG_PCI)
-EXPORT_SYMBOL(isa_io_base);
-EXPORT_SYMBOL(isa_mem_base);
-EXPORT_SYMBOL(pci_dram_offset);
-#endif
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(smp_hw_index);
-#endif
-
-long long __ashrdi3(long long, int);
-long long __ashldi3(long long, int);
-long long __lshrdi3(long long, int);
-int __ucmpdi2(unsigned long long, unsigned long long);
-int __cmpdi2(long long, long long);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__cmpdi2);
-
-EXPORT_SYMBOL(timer_interrupt);
-EXPORT_SYMBOL(tb_ticks_per_jiffy);
-
-EXPORT_SYMBOL(switch_mmu_context);
-
-#ifdef CONFIG_PPC_STD_MMU_32
-extern long mol_trampoline;
-EXPORT_SYMBOL(mol_trampoline); /* For MOL */
-EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-#ifdef CONFIG_SMP
-extern int mmu_hash_lock;
-EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
-#endif /* CONFIG_SMP */
-extern long *intercept_table;
-EXPORT_SYMBOL(intercept_table);
-#endif /* CONFIG_PPC_STD_MMU_32 */
-
-#ifdef CONFIG_PPC_DCR_NATIVE
-EXPORT_SYMBOL(__mtdcr);
-EXPORT_SYMBOL(__mfdcr);
-#endif
-
-EXPORT_SYMBOL(flush_instruction_cache);
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f52b7db3..010b7b3 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -74,7 +74,7 @@
 			break;
 
 		copied = access_process_vm(child, (u64)addrOthers, &tmp,
-				sizeof(tmp), 0);
+				sizeof(tmp), FOLL_FORCE);
 		if (copied != sizeof(tmp))
 			break;
 		ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@
 			break;
 		ret = 0;
 		if (access_process_vm(child, (u64)addrOthers, &tmp,
-					sizeof(tmp), 1) == sizeof(tmp))
+					sizeof(tmp),
+					FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
 			break;
 		ret = -EIO;
 		break;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index dba265c..270ee30 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -131,15 +131,26 @@
 		ppc_md.machine_shutdown();
 }
 
+static void machine_hang(void)
+{
+	pr_emerg("System Halted, OK to turn off power\n");
+	local_irq_disable();
+	while (1)
+		;
+}
+
 void machine_restart(char *cmd)
 {
 	machine_shutdown();
 	if (ppc_md.restart)
 		ppc_md.restart(cmd);
+
 	smp_send_stop();
-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
-	local_irq_disable();
-	while (1) ;
+
+	do_kernel_restart(cmd);
+	mdelay(1000);
+
+	machine_hang();
 }
 
 void machine_power_off(void)
@@ -147,10 +158,9 @@
 	machine_shutdown();
 	if (pm_power_off)
 		pm_power_off();
+
 	smp_send_stop();
-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
-	local_irq_disable();
-	while (1) ;
+	machine_hang();
 }
 /* Used by the G5 thermal driver */
 EXPORT_SYMBOL_GPL(machine_power_off);
@@ -163,10 +173,9 @@
 	machine_shutdown();
 	if (ppc_md.halt)
 		ppc_md.halt();
+
 	smp_send_stop();
-	printk(KERN_EMERG "System Halted, OK to turn off power\n");
-	local_irq_disable();
-	while (1) ;
+	machine_hang();
 }
 
 
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 24ec3ea..5fe7918 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -16,6 +16,7 @@
 #include <linux/cpu.h>
 #include <linux/console.h>
 #include <linux/memblock.h>
+#include <linux/export.h>
 
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -47,11 +48,16 @@
 EXPORT_SYMBOL_GPL(boot_cpuid_phys);
 
 int smp_hw_index[NR_CPUS];
+EXPORT_SYMBOL(smp_hw_index);
 
 unsigned long ISA_DMA_THRESHOLD;
 unsigned int DMA_MODE_READ;
 unsigned int DMA_MODE_WRITE;
 
+EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+EXPORT_SYMBOL(DMA_MODE_READ);
+EXPORT_SYMBOL(DMA_MODE_WRITE);
+
 /*
  * These are used in binfmt_elf.c to put aux entries on the stack
  * for each elf executable being started.
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 67859b7..bc3f7d0 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -596,6 +596,7 @@
 	irq_exit();
 	set_irq_regs(old_regs);
 }
+EXPORT_SYMBOL(timer_interrupt);
 
 /*
  * Hypervisor decrementer interrupts shouldn't occur but are sometimes
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a1f8f56..023a462 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -273,7 +273,6 @@
 	force_sig_info(signr, &info, current);
 }
 
-#ifdef CONFIG_PPC64
 void system_reset_exception(struct pt_regs *regs)
 {
 	/* See if any machine dependent calls */
@@ -291,6 +290,7 @@
 	/* What should we do here? We could issue a shutdown or hard reset. */
 }
 
+#ifdef CONFIG_PPC64
 /*
  * This function is called in real mode. Strictly no printk's please.
  *
@@ -352,12 +352,11 @@
 		 * For the debug message, we look at the preceding
 		 * load or store.
 		 */
-		if (*nip == 0x60000000)		/* nop */
+		if (*nip == PPC_INST_NOP)
 			nip -= 2;
-		else if (*nip == 0x4c00012c)	/* isync */
+		else if (*nip == PPC_INST_ISYNC)
 			--nip;
-		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
-			/* sync or twi */
+		if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
 			unsigned int rb;
 
 			--nip;
@@ -668,6 +667,31 @@
 
 	return 0;
 }
+#elif defined(CONFIG_PPC_8xx)
+int machine_check_8xx(struct pt_regs *regs)
+{
+	unsigned long reason = get_mc_reason(regs);
+
+	pr_err("Machine check in kernel mode.\n");
+	pr_err("Caused by (from SRR1=%lx): ", reason);
+	if (reason & 0x40000000)
+		pr_err("Fetch error at address %lx\n", regs->nip);
+	else
+		pr_err("Data access error at address %lx\n", regs->dar);
+
+#ifdef CONFIG_PCI
+	/* the qspan pci read routines can cause machine checks -- Cort
+	 *
+	 * yuck !!! that totally needs to go away ! There are better ways
+	 * to deal with that than having a wart in the mcheck handler.
+	 * -- BenH
+	 */
+	bad_page_fault(regs, regs->dar, SIGBUS);
+	return 1;
+#else
+	return 0;
+#endif
+}
 #else
 int machine_check_generic(struct pt_regs *regs)
 {
@@ -727,17 +751,6 @@
 	if (recover > 0)
 		goto bail;
 
-#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
-	/* the qspan pci read routines can cause machine checks -- Cort
-	 *
-	 * yuck !!! that totally needs to go away ! There are better ways
-	 * to deal with that than having a wart in the mcheck handler.
-	 * -- BenH
-	 */
-	bad_page_fault(regs, regs->dar, SIGBUS);
-	goto bail;
-#endif
-
 	if (debugger_fault_handler(regs))
 		goto bail;
 
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index bc85bdf..0c123f3 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -6,6 +6,7 @@
 #include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 /*
  * Load state from memory into VMX registers including VSCR.
@@ -17,6 +18,7 @@
 	mtvscr	v0
 	REST_32VRS(0,r4,r3)
 	blr
+EXPORT_SYMBOL(load_vr_state)
 
 /*
  * Store VMX state into memory, including VSCR.
@@ -28,6 +30,7 @@
 	li	r4, VRSTATE_VSCR
 	stvx	v0, r4, r3
 	blr
+EXPORT_SYMBOL(store_vr_state)
 
 /*
  * Disable VMX for the task which had it previously,
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index ad52900..309361e8 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,7 +9,7 @@
 CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
 
-obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
+obj-y += string.o alloc.o crtsavres.o code-patching.o \
 	 feature-fixups.o
 
 obj-$(CONFIG_PPC32)	+= div64.o copy_32.o
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index aa8214f..ea29a5d 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -17,6 +17,7 @@
 #include <asm/cache.h>
 #include <asm/errno.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 	.text
 
@@ -68,6 +69,7 @@
 	adde	r5,r5,r0
 5:	addze	r3,r5		/* add in final carry */
 	blr
+EXPORT_SYMBOL(__csum_partial)
 
 /*
  * Computes the checksum of a memory block at src, length len,
@@ -297,3 +299,4 @@
 	.long	41b,dst_error
 	.long	50b,src_error
 	.long	51b,dst_error
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index fdec6e6..fd91766 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -16,6 +16,7 @@
 #include <asm/processor.h>
 #include <asm/errno.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 /*
  * Computes the checksum of a memory block at buff, length len,
@@ -176,6 +177,7 @@
 	add	r3,r4,r0
 	srdi	r3,r3,32
 	blr
+EXPORT_SYMBOL(__csum_partial)
 
 
 	.macro srcnr
@@ -430,3 +432,4 @@
 	li	r6,-EFAULT
 	stw	r6,0(r8)
 	blr
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 99f37f2..40cce33 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -12,6 +12,7 @@
 #include <asm/cache.h>
 #include <asm/errno.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 #define COPY_16_BYTES		\
 	lwz	r7,4(r4);	\
@@ -92,6 +93,7 @@
 	subf	r6,r0,r6
 	cmplwi	0,r4,0
 	bne	2f	/* Use normal procedure if r4 is not zero */
+EXPORT_SYMBOL(memset)
 _GLOBAL(memset_nocache_branch)
 	b	2f	/* Skip optimised bloc until cache is enabled */
 
@@ -216,6 +218,8 @@
 	stbu	r0,1(r6)
 	bdnz	40b
 65:	blr
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(memmove)
 
 generic_memcpy:
 	srwi.	r7,r5,3
@@ -507,3 +511,4 @@
 	.long	112b,120b
 	.long	114b,120b
 	.text
+EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index a3c4dc4..21367b3 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/export.h>
 
         .section        ".toc","aw"
 PPC64_CACHES:
@@ -110,3 +111,4 @@
 	std	r11,120(r3)
 	std	r12,128(r3)
 	blr
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index f09899e..60386b2 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -8,6 +8,7 @@
  */
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 #ifdef __BIG_ENDIAN__
 #define sLd sld		/* Shift towards low-numbered address. */
@@ -359,6 +360,7 @@
 	addi	r3,r3,8
 171:
 177:
+179:
 	addi	r3,r3,8
 370:
 372:
@@ -373,7 +375,6 @@
 173:
 174:
 175:
-179:
 181:
 184:
 186:
@@ -671,3 +672,4 @@
 	.llong	89b,100b
 	.llong	90b,100b
 	.llong	91b,100b
+EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 19e6600..3de7ac1 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -19,6 +19,7 @@
  */
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 /* Note: This code relies on -mminimal-toc */
 
@@ -32,6 +33,7 @@
 	clrldi	r3,r3,64-8
 	blr
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight8)
 
 _GLOBAL(__arch_hweight16)
 BEGIN_FTR_SECTION
@@ -54,6 +56,7 @@
 	blr
   ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight16)
 
 _GLOBAL(__arch_hweight32)
 BEGIN_FTR_SECTION
@@ -79,6 +82,7 @@
 	blr
   ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight32)
 
 _GLOBAL(__arch_hweight64)
 BEGIN_FTR_SECTION
@@ -108,3 +112,4 @@
 	blr
   ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight64)
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index eda7a96..85fa986 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/errno.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 _GLOBAL(memset)
 	neg	r0,r3
@@ -77,6 +78,7 @@
 10:	bflr	31
 	stb	r4,0(r6)
 	blr
+EXPORT_SYMBOL(memset)
 
 _GLOBAL_TOC(memmove)
 	cmplw	0,r3,r4
@@ -119,3 +121,4 @@
 	beq	2b
 	mtctr	r7
 	b	1b
+EXPORT_SYMBOL(memmove)
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 8953d23..d75d18b 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -8,6 +8,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 #define off8	r6
 #define off16	r7
@@ -231,3 +232,4 @@
 	ld	r28,-32(r1)
 	ld	r27,-40(r1)
 	blr
+EXPORT_SYMBOL(memcmp)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 32a06ec..f4d6088 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -8,6 +8,7 @@
  */
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 	.align	7
 _GLOBAL_TOC(memcpy)
@@ -219,3 +220,4 @@
 4:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
 	blr
 #endif
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
deleted file mode 100644
index ae69d84..0000000
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <net/checksum.h>
-
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memchr);
-
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncmp);
-
-#ifndef CONFIG_GENERIC_CSUM
-EXPORT_SYMBOL(__csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif
-
-EXPORT_SYMBOL(__copy_tofrom_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(copy_page);
-
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
-#endif
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index beabc68..d13e076 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/errno.h>
 #include <asm/ppc_asm.h>
+#include <asm/export.h>
 
 	.section __ex_table,"a"
 	PPC_LONG_ALIGN
@@ -36,6 +37,7 @@
 2:	stbu	r0,1(r6)	/* clear it out if so */
 	bdnz	2b
 	blr
+EXPORT_SYMBOL(strncpy)
 
 _GLOBAL(strncmp)
 	PPC_LCMPI 0,r5,0
@@ -53,6 +55,7 @@
 	blr
 2:	li	r3,0
 	blr
+EXPORT_SYMBOL(strncmp)
 
 #ifdef CONFIG_PPC32
 _GLOBAL(memcmp)
@@ -68,6 +71,7 @@
 	blr
 2:	li	r3,0
 	blr
+EXPORT_SYMBOL(memcmp)
 #endif
 
 _GLOBAL(memchr)
@@ -82,6 +86,7 @@
 	beqlr
 2:	li	r3,0
 	blr
+EXPORT_SYMBOL(memchr)
 
 #ifdef CONFIG_PPC32
 _GLOBAL(__clear_user)
@@ -125,4 +130,5 @@
 	PPC_LONG	1b,91b
 	PPC_LONG	8b,92b
 	.text
+EXPORT_SYMBOL(__clear_user)
 #endif
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 7bd9549..57ace35 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -20,6 +20,7 @@
 
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/export.h>
 
 	.section	".toc","aw"
 PPC64_CACHES:
@@ -200,3 +201,4 @@
 	cmpdi	r4,32
 	blt	.Lshort_clear
 	b	.Lmedium_clear
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 115347f..09cc50c 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -26,6 +26,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
+#include <asm/export.h>
 
 #ifdef CONFIG_SMP
 	.section .bss
@@ -33,6 +34,7 @@
 	.globl mmu_hash_lock
 mmu_hash_lock:
 	.space	4
+EXPORT_SYMBOL(mmu_hash_lock)
 #endif /* CONFIG_SMP */
 
 /*
@@ -575,6 +577,7 @@
 	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
 	stwcx.	r8,0,r5			/* update the pte */
 	bne-	33b
+EXPORT_SYMBOL(flush_hash_pages)
 
 	/* Get the address of the primary PTE group in the hash table (r3) */
 _GLOBAL(flush_hash_patch_A)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 90480e2..44d3c3a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -529,7 +529,7 @@
 	 */
 #ifdef CONFIG_IBMEBUS
 	return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
-		!firmware_has_feature(FW_FEATURE_SPLPAR);
+		firmware_has_feature(FW_FEATURE_SPLPAR);
 #else
 	return false;
 #endif
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index 7c7df400..994d1a9 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -30,8 +30,8 @@
 	select 8272
 	select 8260
 	select FSL_SOC
-	select PHYLIB
-	select MDIO_BITBANG
+	select PHYLIB if NETDEVICES
+	select MDIO_BITBANG if PHYLIB
 	help
 	  This enables support for the Embedded Planet EP8248E board.
 
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index cdab847..8fec050 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -298,7 +298,9 @@
 static int __init declare_of_platform_devices(void)
 {
 	of_platform_bus_probe(NULL, of_bus_ids, NULL);
-	platform_driver_register(&ep8248e_mdio_driver);
+
+	if (IS_ENABLED(CONFIG_MDIO_BITBANG))
+		platform_driver_register(&ep8248e_mdio_driver);
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 17e5433..575afd6 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -30,9 +30,7 @@
  */
 static void __init asp834x_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("asp834x_setup_arch()", 0);
-
+	mpc83xx_setup_arch();
 	mpc834x_usb_cfg();
 }
 
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index e7fbd63..d8642a4 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -130,10 +130,7 @@
 	struct device_node *np;
 #endif
 
-	if (ppc_md.progress)
-		ppc_md.progress("kmpbec83xx_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 
 #ifdef CONFIG_QUICC_ENGINE
 	np = of_find_node_by_name(NULL, "par_io");
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 8899aa9..d75c981 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -142,3 +142,11 @@
 		mpc83xx_add_bridge(np);
 }
 #endif
+
+void __init mpc83xx_setup_arch(void)
+{
+	if (ppc_md.progress)
+		ppc_md.progress("mpc83xx_setup_arch()", 0);
+
+	mpc83xx_setup_pci();
+}
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 040d5d0..272c41c 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -27,10 +27,7 @@
  */
 static void __init mpc830x_rdb_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 	mpc831x_usb_cfg();
 }
 
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 40e0d83..fd80fd5 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -28,10 +28,7 @@
  */
 static void __init mpc831x_rdb_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 	mpc831x_usb_cfg();
 }
 
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index cdfa47c..bb7b25a 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -58,8 +58,7 @@
 	struct device_node *np;
 	u8 __iomem *bcsr_regs = NULL;
 
-	if (ppc_md.progress)
-		ppc_md.progress("mpc832x_sys_setup_arch()", 0);
+	mpc83xx_setup_arch();
 
 	/* Map BCSR area */
 	np = of_find_node_by_name(NULL, "bcsr");
@@ -71,8 +70,6 @@
 		of_node_put(np);
 	}
 
-	mpc83xx_setup_pci();
-
 #ifdef CONFIG_QUICC_ENGINE
 	if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
 		par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0d6a62f..d7c9b18 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -197,10 +197,7 @@
 	struct device_node *np;
 #endif
 
-	if (ppc_md.progress)
-		ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 
 #ifdef CONFIG_QUICC_ENGINE
 	if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 8fd0c1e..73a5267d 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -57,10 +57,7 @@
  */
 static void __init mpc834x_itx_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc834x_itx_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 
 	mpc834x_usb_cfg();
 }
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index eeaee61..009cfc1 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -76,10 +76,7 @@
  */
 static void __init mpc834x_mds_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc834x_mds_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 
 	mpc834xemds_usb_cfg();
 }
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index dacf4c2..4fc3051 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -66,8 +66,7 @@
 	struct device_node *np;
 	u8 __iomem *bcsr_regs = NULL;
 
-	if (ppc_md.progress)
-		ppc_md.progress("mpc836x_mds_setup_arch()", 0);
+	mpc83xx_setup_arch();
 
 	/* Map BCSR area */
 	np = of_find_node_by_name(NULL, "bcsr");
@@ -79,8 +78,6 @@
 		of_node_put(np);
 	}
 
-	mpc83xx_setup_pci();
-
 #ifdef CONFIG_QUICC_ENGINE
 	if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
 		par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index cf67ac9..93f024f 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -31,10 +31,7 @@
 
 static void __init mpc836x_rdk_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 }
 
 /*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 652b97d..3b34cc1 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -79,10 +79,7 @@
  */
 static void __init mpc837x_mds_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc837x_mds_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 	mpc837xmds_usb_cfg();
 }
 
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 667731d..0c55fa6 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -50,10 +50,7 @@
  */
 static void __init mpc837x_rdb_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 	mpc837x_usb_cfg();
 	mpc837x_rdb_sd_cfg();
 }
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index ad48419..636eb9d 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -86,5 +86,6 @@
 #endif
 
 extern int mpc83xx_declare_of_platform_devices(void);
+extern void mpc83xx_setup_arch(void);
 
 #endif				/* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index b867e88..cb4bdab 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -47,10 +47,7 @@
  */
 static void __init sbc834x_setup_arch(void)
 {
-	if (ppc_md.progress)
-		ppc_md.progress("sbc834x_setup_arch()", 0);
-
-	mpc83xx_setup_pci();
+	mpc83xx_setup_arch();
 }
 
 machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index df25a3e..9dc1d28 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -72,7 +72,7 @@
 config MPC85xx_MDS
 	bool "Freescale MPC85xx MDS"
 	select DEFAULT_UIMAGE
-	select PHYLIB
+	select PHYLIB if NETDEVICES
 	select HAS_RAPIDIO
 	select SWIOTLB
 	help
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index 07dd6ae..d2f4556 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -72,7 +72,6 @@
 	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index e48f671..0ffdb4a 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -59,7 +59,6 @@
 	.setup_arch		= bsc913x_rdb_setup_arch,
 	.init_IRQ		= bsc913x_rdb_pic_init,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 3b9e3f0..4df1b40 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -65,7 +65,6 @@
 	.setup_arch		= c293_pcie_setup_arch,
 	.init_IRQ		= c293_pcie_pic_init,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 3a6a84f..1179115 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -225,7 +225,6 @@
 #else
 	.get_irq		= mpic_get_coreint_irq,
 #endif
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 14af36a..f29c6f0 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -215,7 +215,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 6ba687f..94a7f92 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -77,7 +77,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 8756715..f3e055f 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -170,7 +170,6 @@
 	.init_IRQ		= mpc85xx_ads_pic_init,
 	.show_cpuinfo		= mpc85xx_ads_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 86f2015..224db30 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -83,7 +83,8 @@
 		return PCIBIOS_SUCCESSFUL;
 }
 
-static void __noreturn mpc85xx_cds_restart(char *cmd)
+static int mpc85xx_cds_restart(struct notifier_block *this,
+			       unsigned long mode, void *cmd)
 {
 	struct pci_dev *dev;
 	u_char tmp;
@@ -108,12 +109,25 @@
 	}
 
 	/*
-	 *  If we can't find the VIA chip (maybe the P2P bridge is disabled)
-	 *  or the VIA chip reset didn't work, just use the default reset.
+	 *  If we can't find the VIA chip (maybe the P2P bridge is
+	 *  disabled) or the VIA chip reset didn't work, just return
+	 *  and let default reset sequence happen.
 	 */
-	fsl_rstcr_restart(NULL);
+	return NOTIFY_DONE;
 }
 
+static int mpc85xx_cds_restart_register(void)
+{
+	static struct notifier_block restart_handler;
+
+	restart_handler.notifier_call = mpc85xx_cds_restart;
+	restart_handler.priority = 192;
+
+	return register_restart_handler(&restart_handler);
+}
+machine_arch_initcall(mpc85xx_cds, mpc85xx_cds_restart_register);
+
+
 static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
 {
 	u_char c;
@@ -380,11 +394,8 @@
 	.show_cpuinfo	= mpc85xx_cds_show_cpuinfo,
 	.get_irq	= mpic_get_irq,
 #ifdef CONFIG_PCI
-	.restart	= mpc85xx_cds_restart,
 	.pcibios_fixup_bus	= mpc85xx_cds_fixup_bus,
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
-#else
-	.restart	= fsl_rstcr_restart,
 #endif
 	.calibrate_decr = generic_calibrate_decr,
 	.progress	= udbg_progress,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ed69c7e..dc9e035 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -204,7 +204,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -219,7 +218,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -234,7 +232,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index fa9cd71..d7e440e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -63,6 +63,8 @@
 #define DBG(fmt...)
 #endif
 
+#if IS_BUILTIN(CONFIG_PHYLIB)
+
 #define MV88E1111_SCR	0x10
 #define MV88E1111_SCR_125CLK	0x0010
 static int mpc8568_fixup_125_clock(struct phy_device *phydev)
@@ -152,6 +154,8 @@
 	return err;
 }
 
+#endif
+
 /* ************************************************************************
  *
  * Setup the architecture
@@ -313,6 +317,7 @@
 	swiotlb_detect_4g();
 }
 
+#if IS_BUILTIN(CONFIG_PHYLIB)
 
 static int __init board_fixups(void)
 {
@@ -342,9 +347,12 @@
 
 	return 0;
 }
+
 machine_arch_initcall(mpc8568_mds, board_fixups);
 machine_arch_initcall(mpc8569_mds, board_fixups);
 
+#endif
+
 static int __init mpc85xx_publish_devices(void)
 {
 	if (machine_is(mpc8568_mds))
@@ -385,7 +393,6 @@
 	.setup_arch	= mpc85xx_mds_setup_arch,
 	.init_IRQ	= mpc85xx_mds_pic_init,
 	.get_irq	= mpic_get_irq,
-	.restart	= fsl_rstcr_restart,
 	.calibrate_decr	= generic_calibrate_decr,
 	.progress	= udbg_progress,
 #ifdef CONFIG_PCI
@@ -405,7 +412,6 @@
 	.setup_arch	= mpc85xx_mds_setup_arch,
 	.init_IRQ	= mpc85xx_mds_pic_init,
 	.get_irq	= mpic_get_irq,
-	.restart	= fsl_rstcr_restart,
 	.calibrate_decr	= generic_calibrate_decr,
 	.progress	= udbg_progress,
 #ifdef CONFIG_PCI
@@ -426,7 +432,6 @@
 	.setup_arch	= mpc85xx_mds_setup_arch,
 	.init_IRQ	= mpc85xx_mds_pic_init,
 	.get_irq	= mpic_get_irq,
-	.restart	= fsl_rstcr_restart,
 	.calibrate_decr	= generic_calibrate_decr,
 	.progress	= udbg_progress,
 #ifdef CONFIG_PCI
@@ -434,4 +439,3 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 };
-
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index c1499cb..1006950 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -213,7 +213,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -228,7 +227,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -243,7 +241,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -258,7 +255,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -273,7 +269,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -288,7 +283,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -303,7 +297,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -318,7 +311,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -333,7 +325,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -348,7 +339,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index acc3d0d..d5af072 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -66,7 +66,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 661d7b5..78d13b3 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -79,7 +79,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 63568d6..0908abd 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -568,7 +568,6 @@
 	.pcibios_fixup_phb	= fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 2f29436..276e00a 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -148,7 +148,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index 40d8de5..3e8cd032 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -110,7 +110,6 @@
 	.setup_arch		= mpc85xx_rdb_setup_arch,
 	.init_IRQ		= mpc85xx_rdb_pic_init,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 #ifdef CONFIG_PCI
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 2410167..33c5ba6 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -91,7 +91,6 @@
 	.init_IRQ	= ppa8548_pic_init,
 	.show_cpuinfo	= ppa8548_show_cpuinfo,
 	.get_irq	= mpic_get_irq,
-	.restart	= fsl_rstcr_restart,
 	.calibrate_decr = generic_calibrate_decr,
 	.progress	= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 50d7458..b63a854 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -77,7 +77,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_coreint_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 62b6c45..2c67084 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -130,7 +130,6 @@
 	.init_IRQ	= sbc8548_pic_init,
 	.show_cpuinfo	= sbc8548_show_cpuinfo,
 	.get_irq	= mpic_get_irq,
-	.restart	= fsl_rstcr_restart,
 #ifdef CONFIG_PCI
 	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 79fd0df..21d6aaa 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -38,18 +38,18 @@
 }
 static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn);
 
-static void gpio_halt_cb(void)
+static void __noreturn gpio_halt_cb(void)
 {
 	enum of_gpio_flags flags;
 	int trigger, gpio;
 
 	if (!halt_node)
-		return;
+		panic("No reset GPIO information was provided in DT\n");
 
 	gpio = of_get_gpio_flags(halt_node, 0, &flags);
 
 	if (!gpio_is_valid(gpio))
-		return;
+		panic("Provided GPIO is invalid\n");
 
 	trigger = (flags == OF_GPIO_ACTIVE_LOW);
 
@@ -57,6 +57,8 @@
 
 	/* Probably wont return */
 	gpio_set_value(gpio, trigger);
+
+	panic("Halt failed\n");
 }
 
 /* This IRQ means someone pressed the power button and it is waiting for us
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index cd255ac..8da4ed9 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -91,7 +91,6 @@
 	.setup_arch		= socrates_setup_arch,
 	.init_IRQ		= socrates_pic_init,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 91b824c..1a1d44e 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -103,7 +103,6 @@
 	.init_IRQ		= stx_gp3_pic_init,
 	.show_cpuinfo		= stx_gp3_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index b7c5445..9fc20a3 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -132,7 +132,6 @@
 	.init_IRQ		= tqm85xx_pic_init,
 	.show_cpuinfo		= tqm85xx_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 1bc02a8..360f625 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -140,7 +140,6 @@
 	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index cf0c70f..cd6ce84 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -167,7 +167,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -182,7 +181,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
@@ -197,7 +195,6 @@
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 };
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index ef684af..6b99300 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -204,7 +204,6 @@
 	.init_IRQ		= gef_ppc9a_init_irq,
 	.show_cpuinfo		= gef_ppc9a_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 67dd0c2..8cdeca0 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -191,7 +191,6 @@
 	.init_IRQ		= gef_sbc310_init_irq,
 	.show_cpuinfo		= gef_sbc310_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 8050269..da8723a 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -181,7 +181,6 @@
 	.init_IRQ		= gef_sbc610_init_irq,
 	.show_cpuinfo		= gef_sbc610_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index fef0582..a5d73fa 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -331,7 +331,6 @@
 	.setup_arch		= mpc86xx_hpcd_setup_arch,
 	.init_IRQ		= mpc86xx_init_irq,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 5ae42a0..a0e989e 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -130,7 +130,6 @@
 	.init_IRQ		= mpc86xx_init_irq,
 	.show_cpuinfo		= mpc86xx_hpcn_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
index addb41e..835352e 100644
--- a/arch/powerpc/platforms/86xx/mvme7100.c
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -111,7 +111,6 @@
 	.setup_arch		= mvme7100_setup_arch,
 	.init_IRQ		= mpc86xx_init_irq,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index 52af573..93db35d 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -82,7 +82,6 @@
 	.init_IRQ		= mpc86xx_init_irq,
 	.show_cpuinfo		= sbc8641_show_cpuinfo,
 	.get_irq		= mpic_get_irq,
-	.restart		= fsl_rstcr_restart,
 	.time_init		= mpc86xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 2975754..5364d4a 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -103,7 +103,7 @@
 	inode->i_mode = mode;
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 out:
 	return inode;
 }
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 86707e6..aa35245 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -393,7 +393,7 @@
 					     unsigned long *vpn, int count,
 					     int psize, int ssize)
 {
-	unsigned long param[8];
+	unsigned long param[PLPAR_HCALL9_BUFSIZE];
 	int i = 0, pix = 0, rc;
 	unsigned long flags = 0;
 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -522,7 +522,7 @@
 	unsigned long flags = 0;
 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-	unsigned long param[9];
+	unsigned long param[PLPAR_HCALL9_BUFSIZE];
 	unsigned long hash, index, shift, hidx, slot;
 	real_pte_t pte;
 	int psize, ssize;
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh
index 2e4ebd0..ec2d5c8 100755
--- a/arch/powerpc/relocs_check.sh
+++ b/arch/powerpc/relocs_check.sh
@@ -30,6 +30,7 @@
 	# On PPC64:
 	#	R_PPC64_RELATIVE, R_PPC64_NONE
 	#	R_PPC64_ADDR64 mach_<name>
+	#	R_PPC64_ADDR64 __crc_<name>
 	# On PPC:
 	#	R_PPC_RELATIVE, R_PPC_ADDR16_HI,
 	#	R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
@@ -41,7 +42,8 @@
 R_PPC_ADDR16_HA
 R_PPC_RELATIVE
 R_PPC_NONE' |
-	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
+	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
+	grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
 )
 
 if [ -z "$bad_relocs" ]; then
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 3c0eb9b..986cd11 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -233,8 +233,6 @@
 	else
 		out_be32(&siu_conf->sc_sdcr, 1);
 	immr_unmap(siu_conf);
-
-	cpm_muram_init();
 }
 
 static DEFINE_SPINLOCK(cmd_lock);
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 8dc1e24..f78ff84 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -66,10 +66,6 @@
 	cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
 #endif
 
-	/* Reclaim the DP memory for our use.
-	 */
-	cpm_muram_init();
-
 	/* Tell everyone where the comm processor resides.
 	 */
 	cpmp = &cpm2_immr->im_cpm;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 947f420..51bf749 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -37,6 +37,21 @@
 #include <linux/of_gpio.h>
 #endif
 
+static int __init cpm_init(void)
+{
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
+	if (!np)
+		np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
+	if (!np)
+		return -ENODEV;
+	cpm_muram_init();
+	of_node_put(np);
+	return 0;
+}
+subsys_initcall(cpm_init);
+
 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
 static u32 __iomem *cpm_udbg_txdesc;
 static u8 __iomem *cpm_udbg_txbuf;
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index d3098ef..e687bb2 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -12,6 +12,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/processor.h>
 #include <asm/bug.h>
+#include <asm/export.h>
 
 #define DCR_ACCESS_PROLOG(table) \
 	cmpli	cr0,r3,1024;	 \
@@ -28,9 +29,11 @@
 
 _GLOBAL(__mfdcr)
 	DCR_ACCESS_PROLOG(__mfdcr_table)
+EXPORT_SYMBOL(__mfdcr)
 
 _GLOBAL(__mtdcr)
 	DCR_ACCESS_PROLOG(__mtdcr_table)
+EXPORT_SYMBOL(__mtdcr)
 
 __mfdcr_table:
 	mfdcr  r3,0; blr
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 0ef9df4..d3a5974 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,8 +111,7 @@
 	.write = indirect_write_config,
 };
 
-#define MAX_PHYS_ADDR_BITS	40
-static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+static u64 pci64_dma_offset;
 
 #ifdef CONFIG_SWIOTLB
 static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -132,12 +131,10 @@
 		return -EIO;
 
 	/*
-	 * Fixup PCI devices that are able to DMA to above the physical
-	 * address width of the SoC such that we can address any internal
-	 * SoC address from across PCI if needed
+	 * Fix up PCI devices that are able to DMA to the large inbound
+	 * mapping that allows addressing any RAM address from across PCI.
 	 */
-	if ((dev_is_pci(dev)) &&
-	    dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
+	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 		set_dma_ops(dev, &dma_direct_ops);
 		set_dma_offset(dev, pci64_dma_offset);
 	}
@@ -387,6 +384,7 @@
 				mem_log++;
 
 			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
+			pci64_dma_offset = 1ULL << mem_log;
 
 			if (setup_inbound) {
 				/* Setup inbound memory window */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index a09ca70..d93056e 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -29,6 +29,7 @@
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
 #include <linux/fs_uart_pd.h>
+#include <linux/reboot.h>
 
 #include <linux/atomic.h>
 #include <asm/io.h>
@@ -180,23 +181,38 @@
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 static __be32 __iomem *rstcr;
 
+static int fsl_rstcr_restart(struct notifier_block *this,
+			     unsigned long mode, void *cmd)
+{
+	local_irq_disable();
+	/* set reset control register */
+	out_be32(rstcr, 0x2);	/* HRESET_REQ */
+
+	return NOTIFY_DONE;
+}
+
 static int __init setup_rstcr(void)
 {
 	struct device_node *np;
 
+	static struct notifier_block restart_handler = {
+		.notifier_call = fsl_rstcr_restart,
+		.priority = 128,
+	};
+
 	for_each_node_by_name(np, "global-utilities") {
 		if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
 			rstcr = of_iomap(np, 0) + 0xb0;
-			if (!rstcr)
+			if (!rstcr) {
 				printk (KERN_ERR "Error: reset control "
 						"register not mapped!\n");
+			} else {
+				register_restart_handler(&restart_handler);
+			}
 			break;
 		}
 	}
 
-	if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
-		printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
-
 	of_node_put(np);
 
 	return 0;
@@ -204,15 +220,6 @@
 
 arch_initcall(setup_rstcr);
 
-void __noreturn fsl_rstcr_restart(char *cmd)
-{
-	local_irq_disable();
-	if (rstcr)
-		/* set reset control register */
-		out_be32(rstcr, 0x2);	/* HRESET_REQ */
-
-	while (1) ;
-}
 #endif
 
 #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 433566a..d73daa4 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -19,8 +19,6 @@
 struct spi_board_info;
 struct device_node;
 
-extern void __noreturn fsl_rstcr_restart(char *cmd);
-
 /* The different ports that the DIU can be connected to */
 enum fsl_diu_monitor_port {
 	FSL_DIU_PORT_DVI,	/* DVI */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4d48cec..b9aac95 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1249,7 +1249,7 @@
 	/* Pick the physical address from the device tree if unspecified */
 	if (!phys_addr) {
 		/* Check if it is DCR-based */
-		if (of_get_property(node, "dcr-reg", NULL)) {
+		if (of_property_read_bool(node, "dcr-reg")) {
 			flags |= MPIC_USES_DCR;
 		} else {
 			struct resource r;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 255c7ee..09bccb2 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -51,7 +51,7 @@
 	struct inode *inode = d_inode(sb_info->update_file);
 
 	sb_info->last_update = get_seconds();
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 }
 
 /* directory tree removal functions */
@@ -99,7 +99,7 @@
 		ret->i_mode = mode;
 		ret->i_uid = hypfs_info->uid;
 		ret->i_gid = hypfs_info->gid;
-		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+		ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
 		if (S_ISDIR(mode))
 			set_nlink(ret, 2);
 	}
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 9043d2e..20f196b 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
 
 
 generic-y += clkdev.h
+generic-y += export.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 72ccc41..1f0fe98 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -61,7 +61,7 @@
 
 extra-y				+= head.o head64.o vmlinux.lds
 
-obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
+obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SCHED_TOPOLOGY)	+= topology.o
 obj-$(CONFIG_HIBERNATION)	+= suspend.o swsusp.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c51650a..49a3073 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -23,6 +23,7 @@
 #include <asm/vx-insn.h>
 #include <asm/setup.h>
 #include <asm/nmi.h>
+#include <asm/export.h>
 
 __PT_R0      =	__PT_GPRS
 __PT_R1      =	__PT_GPRS + 8
@@ -259,6 +260,8 @@
 
 	EX_TABLE(.Lrewind_pad,.Lsie_fault)
 	EX_TABLE(sie_exit,.Lsie_fault)
+EXPORT_SYMBOL(sie64a)
+EXPORT_SYMBOL(sie_exit)
 #endif
 
 /*
@@ -825,6 +828,9 @@
 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
 	br	%r14
 .Lsave_fpu_regs_end:
+#if IS_ENABLED(CONFIG_KVM)
+EXPORT_SYMBOL(save_fpu_regs)
+#endif
 
 /*
  * Load floating-point controls and floating-point or vector registers.
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index e499370..9a17e44 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,6 +9,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/ftrace.h>
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 	.section .kprobes.text, "ax"
 
@@ -23,6 +24,8 @@
 ENTRY(_mcount)
 	br	%r14
 
+EXPORT_SYMBOL(_mcount)
+
 ENTRY(ftrace_caller)
 	.globl	ftrace_regs_caller
 	.set	ftrace_regs_caller,ftrace_caller
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
deleted file mode 100644
index e67453b..0000000
--- a/arch/s390/kernel/s390_ksyms.c
+++ /dev/null
@@ -1,15 +0,0 @@
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <asm/fpu/api.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-#endif
-#if IS_ENABLED(CONFIG_KVM)
-EXPORT_SYMBOL(sie64a);
-EXPORT_SYMBOL(sie_exit);
-EXPORT_SYMBOL(save_fpu_regs);
-#endif
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index c6d553e..be9fa65 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -5,6 +5,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 /*
  * memset implementation
@@ -60,6 +61,7 @@
 	xc	0(1,%r1),0(%r1)
 .Lmemset_mvc:
 	mvc	1(1,%r1),0(%r1)
+EXPORT_SYMBOL(memset)
 
 /*
  * memcpy implementation
@@ -86,3 +88,4 @@
 	j	.Lmemcpy_rest
 .Lmemcpy_mvc:
 	mvc	0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index adb0c34..18d4107e 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -266,7 +266,8 @@
 	/* Try to get the remaining pages with get_user_pages */
 	start += nr << PAGE_SHIFT;
 	pages += nr;
-	ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+	ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+				      write ? FOLL_WRITE : 0);
 	/* Have to be a bit careful with return values */
 	if (nr > 0)
 		ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/score/include/asm/extable.h b/arch/score/include/asm/extable.h
new file mode 100644
index 0000000..c4423ccf
--- /dev/null
+++ b/arch/score/include/asm/extable.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_SCORE_EXTABLE_H
+#define _ASM_SCORE_EXTABLE_H
+
+struct exception_table_entry {
+	unsigned long insn;
+	unsigned long fixup;
+};
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+#endif
diff --git a/arch/score/include/asm/module.h b/arch/score/include/asm/module.h
index abf395b..6dc1f29 100644
--- a/arch/score/include/asm/module.h
+++ b/arch/score/include/asm/module.h
@@ -2,7 +2,7 @@
 #define _ASM_SCORE_MODULE_H
 
 #include <linux/list.h>
-#include <asm/uaccess.h>
+#include <asm/extable.h>
 #include <asm-generic/module.h>
 
 struct mod_arch_specific {
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index 01aec8c..db58ab9 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -4,6 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/thread_info.h>
+#include <asm/extable.h>
 
 #define VERIFY_READ		0
 #define VERIFY_WRITE		1
@@ -420,12 +421,5 @@
 		return __strnlen_user(str, len);
 }
 
-struct exception_table_entry {
-	unsigned long insn;
-	unsigned long fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
-
 #endif /* __SCORE_UACCESS_H */
 
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 5583618..4f7314d 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -131,7 +131,7 @@
 {
 	int copied;
 
-	copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+	copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
 
 	return copied != sizeof(*res) ? -EIO : 0;
 }
@@ -142,7 +142,7 @@
 {
 	int copied;
 
-	copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+	copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
 
 	return copied != sizeof(*res) ? -EIO : 0;
 }
@@ -153,7 +153,8 @@
 {
 	int copied;
 
-	copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+	copied = access_process_vm(child, addr, &val, sizeof(val),
+			FOLL_FORCE | FOLL_WRITE);
 
 	return copied != sizeof(val) ? -EIO : 0;
 }
@@ -164,7 +165,8 @@
 {
 	int copied;
 
-	copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+	copied = access_process_vm(child, addr, &val, sizeof(val),
+			FOLL_FORCE | FOLL_WRITE);
 
 	return copied != sizeof(val) ? -EIO : 0;
 }
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 1517a7d..5cea1e7 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -29,6 +29,7 @@
 #include <asm/cacheflush.h>
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
+#include <asm/uaccess.h>
 
 unsigned long exception_handlers[32];
 
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 0047666..336f33a 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -31,7 +31,7 @@
 endif
 
 cflags-$(CONFIG_CPU_SH2)		:= $(call cc-option,-m2,)
-cflags-$(CONFIG_CPU_J2)			:= $(call cc-option,-mj2,)
+cflags-$(CONFIG_CPU_J2)			+= $(call cc-option,-mj2,)
 cflags-$(CONFIG_CPU_SH2A)		+= $(call cc-option,-m2a,) \
 					   $(call cc-option,-m2a-nofpu,) \
 					   $(call cc-option,-m4-nofpu,)
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index e9c2c42..4e21949 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -22,6 +22,16 @@
 	  have sufficient driver coverage to use this option; do not
 	  select it if you are using original SuperH hardware.
 
+config SH_JCORE_SOC
+	bool "J-Core SoC"
+	depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+	select CLKSRC_JCORE_PIT
+	select JCORE_AIC
+	default y if CPU_J2
+	help
+	  Select this option to include drivers core components of the
+	  J-Core SoC, including interrupt controllers and timers.
+
 config SH_SOLUTION_ENGINE
 	bool "SolutionEngine"
 	select SOLUTION_ENGINE
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig
index 94d1eca..2eb81ebe 100644
--- a/arch/sh/configs/j2_defconfig
+++ b/arch/sh/configs/j2_defconfig
@@ -8,6 +8,7 @@
 CONFIG_MEMORY_SIZE=0x04000000
 CONFIG_CPU_BIG_ENDIAN=y
 CONFIG_SH_DEVICE_TREE=y
+CONFIG_SH_JCORE_SOC=y
 CONFIG_HZ_100=y
 CONFIG_CMDLINE_OVERWRITE=y
 CONFIG_CMDLINE="console=ttyUL0 earlycon"
@@ -20,6 +21,7 @@
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_NETDEVICES=y
+CONFIG_SERIAL_EARLYCON=y
 CONFIG_SERIAL_UARTLITE=y
 CONFIG_SERIAL_UARTLITE_CONSOLE=y
 CONFIG_I2C=y
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 92ade79..a38d0c7 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -192,8 +192,6 @@
 #endif
 
 int fixup_exception(struct pt_regs *regs);
-/* Returns 0 if exception not found and fixup.unit otherwise.  */
-unsigned long search_exception_table(unsigned long addr);
 const struct exception_table_entry *search_exception_tables(unsigned long addr);
 
 extern void *set_exception_table_vec(unsigned int vec, void *handler);
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 40fa6c8..063c298 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@
 		pages += nr;
 
 		ret = get_user_pages_unlocked(start,
-			(end - start) >> PAGE_SHIFT, write, 0, pages);
+			(end - start) >> PAGE_SHIFT, pages,
+			write ? FOLL_WRITE : 0);
 
 		/* Have to be a bit careful with return values */
 		if (nr > 0) {
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 6024c26..cfc9180 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,6 +6,7 @@
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
+generic-y += export.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += linkage.h
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 9331083..3f2d403 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -7,7 +7,7 @@
 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <asm/extable_64.h>
 #include <asm/spitfire.h>
 
 /*
diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable_64.h
new file mode 100644
index 0000000..1121cb0
--- /dev/null
+++ b/arch/sparc/include/asm/extable_64.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_EXTABLE64_H
+#define __ASM_EXTABLE64_H
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+        unsigned int insn, fixup;
+};
+
+#endif
diff --git a/arch/sparc/include/asm/string.h b/arch/sparc/include/asm/string.h
index 98b72a0..86f34be 100644
--- a/arch/sparc/include/asm/string.h
+++ b/arch/sparc/include/asm/string.h
@@ -5,4 +5,38 @@
 #else
 #include <asm/string_32.h>
 #endif
+
+/* First the mem*() things. */
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) __builtin_memset(s, c, count)
+
+#define __HAVE_ARCH_MEMSCAN
+
+#define memscan(__arg0, __char, __arg2)						\
+({										\
+	void *__memscan_zero(void *, size_t);					\
+	void *__memscan_generic(void *, int, size_t);				\
+	void *__retval, *__addr = (__arg0);					\
+	size_t __size = (__arg2);						\
+										\
+	if(__builtin_constant_p(__char) && !(__char))				\
+		__retval = __memscan_zero(__addr, __size);			\
+	else									\
+		__retval = __memscan_generic(__addr, (__char), __size);		\
+										\
+	__retval;								\
+})
+
+#define __HAVE_ARCH_MEMCMP
+int memcmp(const void *,const void *,__kernel_size_t);
+
+#define __HAVE_ARCH_STRNCMP
+int strncmp(const char *, const char *, __kernel_size_t);
+
 #endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 69974e9..6494124 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -11,60 +11,4 @@
 
 #include <asm/page.h>
 
-/* Really, userland/ksyms should not see any of this stuff. */
-
-#ifdef __KERNEL__
-
-void __memmove(void *,const void *,__kernel_size_t);
-
-#ifndef EXPORT_SYMTAB_STROPS
-
-/* First the mem*() things. */
-#define __HAVE_ARCH_MEMMOVE
-#undef memmove
-#define memmove(_to, _from, _n) \
-({ \
-	void *_t = (_to); \
-	__memmove(_t, (_from), (_n)); \
-	_t; \
-})
-
-#define __HAVE_ARCH_MEMCPY
-#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-
-#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) __builtin_memset(s, c, count)
-
-#define __HAVE_ARCH_MEMSCAN
-
-#undef memscan
-#define memscan(__arg0, __char, __arg2)						\
-({										\
-	void *__memscan_zero(void *, size_t);					\
-	void *__memscan_generic(void *, int, size_t);				\
-	void *__retval, *__addr = (__arg0);					\
-	size_t __size = (__arg2);						\
-										\
-	if(__builtin_constant_p(__char) && !(__char))				\
-		__retval = __memscan_zero(__addr, __size);			\
-	else									\
-		__retval = __memscan_generic(__addr, (__char), __size);		\
-										\
-	__retval;								\
-})
-
-#define __HAVE_ARCH_MEMCMP
-int memcmp(const void *,const void *,__kernel_size_t);
-
-/* Now the str*() stuff... */
-#define __HAVE_ARCH_STRLEN
-__kernel_size_t strlen(const char *);
-
-#define __HAVE_ARCH_STRNCMP
-int strncmp(const char *, const char *, __kernel_size_t);
-
-#endif /* !EXPORT_SYMTAB_STROPS */
-
-#endif /* __KERNEL__ */
-
 #endif /* !(__SPARC_STRING_H__) */
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 5936b8f..6b9ccb3 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -9,54 +9,10 @@
 #ifndef __SPARC64_STRING_H__
 #define __SPARC64_STRING_H__
 
-/* Really, userland/ksyms should not see any of this stuff. */
-
-#ifdef __KERNEL__
-
 #include <asm/asi.h>
 
-#ifndef EXPORT_SYMTAB_STROPS
-
-/* First the mem*() things. */
-#define __HAVE_ARCH_MEMMOVE
-void *memmove(void *, const void *, __kernel_size_t);
-
-#define __HAVE_ARCH_MEMCPY
-#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-
-#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) __builtin_memset(s, c, count)
-
-#define __HAVE_ARCH_MEMSCAN
-
-#undef memscan
-#define memscan(__arg0, __char, __arg2)					\
-({									\
-	void *__memscan_zero(void *, size_t);				\
-	void *__memscan_generic(void *, int, size_t);			\
-	void *__retval, *__addr = (__arg0);				\
-	size_t __size = (__arg2);					\
-									\
-	if(__builtin_constant_p(__char) && !(__char))			\
-		__retval = __memscan_zero(__addr, __size);		\
-	else								\
-		__retval = __memscan_generic(__addr, (__char), __size);	\
-									\
-	__retval;							\
-})
-
-#define __HAVE_ARCH_MEMCMP
-int memcmp(const void *,const void *,__kernel_size_t);
-
 /* Now the str*() stuff... */
 #define __HAVE_ARCH_STRLEN
 __kernel_size_t strlen(const char *);
 
-#define __HAVE_ARCH_STRNCMP
-int strncmp(const char *, const char *, __kernel_size_t);
-
-#endif /* !EXPORT_SYMTAB_STROPS */
-
-#endif /* __KERNEL__ */
-
 #endif /* !(__SPARC64_STRING_H__) */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 37a315d..b68acc5 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -13,6 +13,7 @@
 #include <asm/asi.h>
 #include <asm/spitfire.h>
 #include <asm-generic/uaccess-unaligned.h>
+#include <asm/extable_64.h>
 #endif
 
 #ifndef __ASSEMBLY__
@@ -81,23 +82,6 @@
 	return 1;
 }
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
-        unsigned int insn, fixup;
-};
-
 void __ret_efault(void);
 void __retl_efault(void);
 
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index fdb1332..fa3c02d 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -86,7 +86,7 @@
 obj-$(CONFIG_SUN_PM)      += apc.o pmc.o
 
 obj-$(CONFIG_MODULES)     += module.o
-obj-$(CONFIG_MODULES)     += sparc_ksyms_$(BITS).o
+obj-$(CONFIG_MODULES)     += sparc_ksyms.o
 obj-$(CONFIG_SPARC_LED)   += led.o
 obj-$(CONFIG_KGDB)        += kgdb_$(BITS).o
 
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 07918ab..d85bdb9 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -29,6 +29,7 @@
 #include <asm/unistd.h>
 
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 #define curptr      g6
 
@@ -1207,6 +1208,8 @@
 	
 	ret
 	restore
+EXPORT_SYMBOL(__udelay)
+EXPORT_SYMBOL(__ndelay)
 
 	/* Handle a software breakpoint */
 	/* We have to inform parent that child has stopped */
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 3d92c0a..7bb317b 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -24,6 +24,7 @@
 #include <asm/thread_info.h>	/* TI_UWINMASK */
 #include <asm/errno.h>
 #include <asm/pgtsrmmu.h>	/* SRMMU_PGDIR_SHIFT */
+#include <asm/export.h>
 
 	.data
 /* The following are used with the prom_vector node-ops to figure out
@@ -60,6 +61,7 @@
  */
 	.globl empty_zero_page
 empty_zero_page:	.skip PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
 
 	.global root_flags
 	.global ram_flags
@@ -813,3 +815,4 @@
 __ret_efault:
         ret
          restore %g0, -EFAULT, %o0
+EXPORT_SYMBOL(__ret_efault)
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index a076b42..beba6c1 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -32,7 +32,8 @@
 #include <asm/estate.h>
 #include <asm/sfafsr.h>
 #include <asm/unistd.h>
-	
+#include <asm/export.h>
+
 /* This section from from _start to sparc64_boot_end should fit into
  * 0x0000000000404000 to 0x0000000000408000.
  */
@@ -143,6 +144,7 @@
 	.skip	64
 prom_root_node:
 	.word	0
+EXPORT_SYMBOL(prom_root_node)
 prom_mmu_ihandle_cache:
 	.word	0
 prom_boot_mapped_pc:
@@ -158,6 +160,7 @@
 	.word	0
 sun4v_chip_type:
 	.word	SUN4V_CHIP_INVALID
+EXPORT_SYMBOL(sun4v_chip_type)
 1:
 	rd	%pc, %l0
 
@@ -920,6 +923,7 @@
 	.globl	prom_tba, tlb_type
 prom_tba:	.xword	0
 tlb_type:	.word	0	/* Must NOT end up in BSS */
+EXPORT_SYMBOL(tlb_type)
 	.section	".fixup",#alloc,#execinstr
 
 	.globl	__ret_efault, __retl_efault, __ret_one, __retl_one
@@ -927,6 +931,7 @@
 	ret
 	 restore %g0, -EFAULT, %o0
 ENDPROC(__ret_efault)
+EXPORT_SYMBOL(__ret_efault)
 
 ENTRY(__retl_efault)
 	retl
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S
index 314dd0c..e4e5b83 100644
--- a/arch/sparc/kernel/helpers.S
+++ b/arch/sparc/kernel/helpers.S
@@ -15,6 +15,7 @@
 2:	retl
 	 nop
 	.size	__flushw_user,.-__flushw_user
+EXPORT_SYMBOL(__flushw_user)
 
 	/* Flush %fp and %i7 to the stack for all register
 	 * windows active inside of the cpu.  This allows
@@ -61,3 +62,4 @@
 	.size		hard_smp_processor_id,.-hard_smp_processor_id
 #endif
 	.size		real_hard_smp_processor_id,.-real_hard_smp_processor_id
+EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index d127130..4116ee5 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -343,6 +343,7 @@
 0:	retl
 	 nop
 ENDPROC(sun4v_mach_set_watchdog)
+EXPORT_SYMBOL(sun4v_mach_set_watchdog)
 
 	/* No inputs and does not return.  */
 ENTRY(sun4v_mach_sir)
@@ -776,6 +777,7 @@
 	retl
 	 nop
 ENDPROC(sun4v_niagara_getperf)
+EXPORT_SYMBOL(sun4v_niagara_getperf)
 
 ENTRY(sun4v_niagara_setperf)
 	mov	HV_FAST_SET_PERFREG, %o5
@@ -783,6 +785,7 @@
 	retl
 	 nop
 ENDPROC(sun4v_niagara_setperf)
+EXPORT_SYMBOL(sun4v_niagara_setperf)
 
 ENTRY(sun4v_niagara2_getperf)
 	mov	%o0, %o4
@@ -792,6 +795,7 @@
 	retl
 	 nop
 ENDPROC(sun4v_niagara2_getperf)
+EXPORT_SYMBOL(sun4v_niagara2_getperf)
 
 ENTRY(sun4v_niagara2_setperf)
 	mov	HV_FAST_N2_SET_PERFREG, %o5
@@ -799,6 +803,7 @@
 	retl
 	 nop
 ENDPROC(sun4v_niagara2_setperf)
+EXPORT_SYMBOL(sun4v_niagara2_setperf)
 
 ENTRY(sun4v_reboot_data_set)
 	mov	HV_FAST_REBOOT_DATA_SET, %o5
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc492..ac082dd 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -127,7 +127,8 @@
 		if (copy_from_user(kbuf, (void __user *) uaddr, len))
 			return -EFAULT;
 	} else {
-		int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
+		int len2 = access_process_vm(target, uaddr, kbuf, len,
+				FOLL_FORCE);
 		if (len2 != len)
 			return -EFAULT;
 	}
@@ -141,7 +142,8 @@
 		if (copy_to_user((void __user *) uaddr, kbuf, len))
 			return -EFAULT;
 	} else {
-		int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
+		int len2 = access_process_vm(target, uaddr, kbuf, len,
+				FOLL_FORCE | FOLL_WRITE);
 		if (len2 != len)
 			return -EFAULT;
 	}
@@ -505,7 +507,8 @@
 				if (access_process_vm(target,
 						      (unsigned long)
 						      &reg_window[pos],
-						      k, sizeof(*k), 0)
+						      k, sizeof(*k),
+						      FOLL_FORCE)
 				    != sizeof(*k))
 					return -EFAULT;
 				k++;
@@ -531,12 +534,14 @@
 				if (access_process_vm(target,
 						      (unsigned long)
 						      &reg_window[pos],
-						      &reg, sizeof(reg), 0)
+						      &reg, sizeof(reg),
+						      FOLL_FORCE)
 				    != sizeof(reg))
 					return -EFAULT;
 				if (access_process_vm(target,
 						      (unsigned long) u,
-						      &reg, sizeof(reg), 1)
+						      &reg, sizeof(reg),
+						      FOLL_FORCE | FOLL_WRITE)
 				    != sizeof(reg))
 					return -EFAULT;
 				pos++;
@@ -615,7 +620,8 @@
 						      (unsigned long)
 						      &reg_window[pos],
 						      (void *) k,
-						      sizeof(*k), 1)
+						      sizeof(*k),
+						      FOLL_FORCE | FOLL_WRITE)
 				    != sizeof(*k))
 					return -EFAULT;
 				k++;
@@ -642,13 +648,15 @@
 				if (access_process_vm(target,
 						      (unsigned long)
 						      u,
-						      &reg, sizeof(reg), 0)
+						      &reg, sizeof(reg),
+						      FOLL_FORCE)
 				    != sizeof(reg))
 					return -EFAULT;
 				if (access_process_vm(target,
 						      (unsigned long)
 						      &reg_window[pos],
-						      &reg, sizeof(reg), 1)
+						      &reg, sizeof(reg),
+						      FOLL_FORCE | FOLL_WRITE)
 				    != sizeof(reg))
 					return -EFAULT;
 				pos++;
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 0000000..09aa69e
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,12 @@
+/*
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+
+/* This is needed only for drivers/sbus/char/openprom.c */
+EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
deleted file mode 100644
index bf4ccb1..0000000
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- */
-
-#include <linux/module.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-#include <asm/head.h>
-#include <asm/dma.h>
-
-struct poll {
-	int fd;
-	short events;
-	short revents;
-};
-
-/* from entry.S */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-
-/* from head_32.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(empty_zero_page);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
deleted file mode 100644
index 9e034f2..0000000
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
- *
- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-
-#include <asm/cpudata.h>
-#include <asm/uaccess.h>
-#include <asm/spitfire.h>
-#include <asm/oplib.h>
-#include <asm/hypervisor.h>
-#include <asm/cacheflush.h>
-
-struct poll {
-	int fd;
-	short events;
-	short revents;
-};
-
-/* from helpers.S */
-EXPORT_SYMBOL(__flushw_user);
-EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
-
-/* from head_64.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(tlb_type);
-EXPORT_SYMBOL(sun4v_chip_type);
-EXPORT_SYMBOL(prom_root_node);
-
-/* from hvcalls.S */
-EXPORT_SYMBOL(sun4v_niagara_getperf);
-EXPORT_SYMBOL(sun4v_niagara_setperf);
-EXPORT_SYMBOL(sun4v_niagara2_getperf);
-EXPORT_SYMBOL(sun4v_niagara2_setperf);
-EXPORT_SYMBOL(sun4v_mach_set_watchdog);
-
-/* from hweight.S */
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
-
-/* from ffs_ffz.S */
-EXPORT_SYMBOL(ffs);
-EXPORT_SYMBOL(__ffs);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 3269b02..885f00e 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -43,5 +43,4 @@
 
 obj-$(CONFIG_SPARC64) += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
-obj-y                 += ksyms.o
 obj-$(CONFIG_SPARC64) += PeeCeeI.o
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 3e6209e..97e1b21 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -7,6 +7,7 @@
 #ifdef __KERNEL__
 #include <asm/visasm.h>
 #include <asm/asi.h>
+#include <asm/export.h>
 #define GLOBAL_SPARE	g7
 #else
 #define GLOBAL_SPARE	g5
@@ -567,3 +568,4 @@
 	 mov		EX_RETVAL(%o4), %o0
 
 	.size		FUNC_NAME, .-FUNC_NAME
+EXPORT_SYMBOL(FUNC_NAME)
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index 62c2647b..1c7b6a3 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -13,6 +13,7 @@
 #include <asm/ptrace.h>
 #include <asm/visasm.h>
 #include <asm/thread_info.h>
+#include <asm/export.h>
 
 	/* On entry: %o5=current FPRS value, %g7 is callers address */
 	/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
@@ -79,3 +80,4 @@
 80:	jmpl		%g7 + %g0, %g0
 	 nop
 ENDPROC(VISenter)
+EXPORT_SYMBOL(VISenter)
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
index 86f60de..c8b1cf7 100644
--- a/arch/sparc/lib/ashldi3.S
+++ b/arch/sparc/lib/ashldi3.S
@@ -6,6 +6,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(__ashldi3)
@@ -33,3 +34,4 @@
 	retl
 	 nop
 ENDPROC(__ashldi3)
+EXPORT_SYMBOL(__ashldi3)
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index 6eb8ba2..4310256 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -6,6 +6,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(__ashrdi3)
@@ -35,3 +36,4 @@
 	jmpl	%o7 + 8, %g0
 	 nop
 ENDPROC(__ashrdi3)
+EXPORT_SYMBOL(__ashrdi3)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index a5c5a02..1c6a1bd 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -6,6 +6,7 @@
 #include <linux/linkage.h>
 #include <asm/asi.h>
 #include <asm/backoff.h>
+#include <asm/export.h>
 
 	.text
 
@@ -29,6 +30,7 @@
 	 nop;								\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
 ENDPROC(atomic_##op);							\
+EXPORT_SYMBOL(atomic_##op);
 
 #define ATOMIC_OP_RETURN(op)						\
 ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */	\
@@ -42,7 +44,8 @@
 	retl;								\
 	 sra	%g1, 0, %o0;						\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
-ENDPROC(atomic_##op##_return);
+ENDPROC(atomic_##op##_return);						\
+EXPORT_SYMBOL(atomic_##op##_return);
 
 #define ATOMIC_FETCH_OP(op)						\
 ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */	\
@@ -56,7 +59,8 @@
 	retl;								\
 	 sra	%g1, 0, %o0;						\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
-ENDPROC(atomic_fetch_##op);
+ENDPROC(atomic_fetch_##op);						\
+EXPORT_SYMBOL(atomic_fetch_##op);
 
 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
@@ -88,6 +92,7 @@
 	 nop;								\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
 ENDPROC(atomic64_##op);							\
+EXPORT_SYMBOL(atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(op)						\
 ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */	\
@@ -101,7 +106,8 @@
 	retl;								\
 	 op	%g1, %o0, %o0;						\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
-ENDPROC(atomic64_##op##_return);
+ENDPROC(atomic64_##op##_return);					\
+EXPORT_SYMBOL(atomic64_##op##_return);
 
 #define ATOMIC64_FETCH_OP(op)						\
 ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */	\
@@ -115,7 +121,8 @@
 	retl;								\
 	 mov	%g1, %o0;						\
 2:	BACKOFF_SPIN(%o2, %o3, 1b);					\
-ENDPROC(atomic64_fetch_##op);
+ENDPROC(atomic64_fetch_##op);						\
+EXPORT_SYMBOL(atomic64_fetch_##op);
 
 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
 
@@ -147,3 +154,4 @@
 	 sub	%g1, 1, %o0
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic64_dec_if_positive)
+EXPORT_SYMBOL(atomic64_dec_if_positive)
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 36f72cc..7031bf1 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -6,6 +6,7 @@
 #include <linux/linkage.h>
 #include <asm/asi.h>
 #include <asm/backoff.h>
+#include <asm/export.h>
 
 	.text
 
@@ -29,6 +30,7 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(test_and_set_bit)
+EXPORT_SYMBOL(test_and_set_bit)
 
 ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
@@ -50,6 +52,7 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(test_and_clear_bit)
+EXPORT_SYMBOL(test_and_clear_bit)
 
 ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
@@ -71,6 +74,7 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(test_and_change_bit)
+EXPORT_SYMBOL(test_and_change_bit)
 
 ENTRY(set_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
@@ -90,6 +94,7 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(set_bit)
+EXPORT_SYMBOL(set_bit)
 
 ENTRY(clear_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
@@ -109,6 +114,7 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(clear_bit)
+EXPORT_SYMBOL(clear_bit)
 
 ENTRY(change_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
@@ -128,3 +134,4 @@
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
 ENDPROC(change_bit)
+EXPORT_SYMBOL(change_bit)
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index 3c77101..1f2692d5 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -6,6 +6,7 @@
 
 #include <linux/linkage.h>
 #include <asm/page.h>
+#include <asm/export.h>
 
 	/* Zero out 64 bytes of memory at (buf + offset).
 	 * Assumes %g1 contains zero.
@@ -64,6 +65,7 @@
 	retl
 	 nop
 ENDPROC(bzero_1page)
+EXPORT_SYMBOL(bzero_1page)
 
 ENTRY(__copy_1page)
 /* NOTE: If you change the number of insns of this routine, please check
@@ -87,3 +89,4 @@
 	retl
 	 nop
 ENDPROC(__copy_1page)
+EXPORT_SYMBOL(__copy_1page)
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index 8c05811..3bb1914 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -5,6 +5,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 
@@ -78,6 +79,8 @@
 	 mov		%o3, %o0
 ENDPROC(__bzero)
 ENDPROC(memset)
+EXPORT_SYMBOL(__bzero)
+EXPORT_SYMBOL(memset)
 
 #define EX_ST(x,y)		\
 98:	x,y;			\
@@ -143,3 +146,4 @@
 	retl
 	 clr		%o0
 ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 0084c33..c9d8b62 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -14,6 +14,7 @@
  */
 
 #include <asm/errno.h>
+#include <asm/export.h>
 
 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5)	\
 	ldd	[buf + offset + 0x00], t0;			\
@@ -104,6 +105,7 @@
 	 * buffer of size 0x20.  Follow the code path for that case.
 	 */
 	.globl	csum_partial
+	EXPORT_SYMBOL(csum_partial)
 csum_partial:			/* %o0=buf, %o1=len, %o2=sum */
 	andcc	%o0, 0x7, %g0				! alignment problems?
 	bne	csum_partial_fix_alignment		! yep, handle it
@@ -335,6 +337,7 @@
 	 */
 	.align	8
 	.globl	__csum_partial_copy_sparc_generic
+	EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
 __csum_partial_copy_sparc_generic:
 					/* %o0=src, %o1=dest, %g1=len, %g7=sum */
 	xor	%o0, %o1, %o4		! get changing bits
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
index 1d230f6..f673217 100644
--- a/arch/sparc/lib/checksum_64.S
+++ b/arch/sparc/lib/checksum_64.S
@@ -13,6 +13,7 @@
  *	BSD4.4 portable checksum routine
  */
 
+#include <asm/export.h>
 	.text
 
 csum_partial_fix_alignment:
@@ -37,6 +38,7 @@
 
 	.align		32
 	.globl		csum_partial
+	EXPORT_SYMBOL(csum_partial)
 csum_partial:		/* %o0=buff, %o1=len, %o2=sum */
 	prefetch	[%o0 + 0x000], #n_reads
 	clr		%o4
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 46272df..f30d6b7 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -10,6 +10,7 @@
 #include <asm/pgtable.h>
 #include <asm/spitfire.h>
 #include <asm/head.h>
+#include <asm/export.h>
 
 	/* What we used to do was lock a TLB entry into a specific
 	 * TLB slot, clear the page with interrupts disabled, then
@@ -26,6 +27,7 @@
 	.text
 
 	.globl		_clear_page
+	EXPORT_SYMBOL(_clear_page)
 _clear_page:		/* %o0=dest */
 	ba,pt		%xcc, clear_page_common
 	 clr		%o4
@@ -35,6 +37,7 @@
 	 */
 	.align		32
 	.globl		clear_user_page
+	EXPORT_SYMBOL(clear_user_page)
 clear_user_page:	/* %o0=dest, %o1=vaddr */
 	lduw		[%g6 + TI_PRE_COUNT], %o2
 	sethi		%hi(PAGE_OFFSET), %g2
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 302c0e6..482de09 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -5,6 +5,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asi.h>
+#include <asm/export.h>
 
 #define XCC xcc
 
@@ -90,3 +91,4 @@
 	retl
 	 clr		%o0
 ENDPROC(___copy_in_user)
+EXPORT_SYMBOL(___copy_in_user)
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index dd16c61..7197b72 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -10,6 +10,7 @@
 #include <asm/pgtable.h>
 #include <asm/spitfire.h>
 #include <asm/head.h>
+#include <asm/export.h>
 
 	/* What we used to do was lock a TLB entry into a specific
 	 * TLB slot, clear the page with interrupts disabled, then
@@ -44,6 +45,7 @@
 	.align		32
 	.globl		copy_user_page
 	.type		copy_user_page,#function
+	EXPORT_SYMBOL(copy_user_page)
 copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
 	lduw		[%g6 + TI_PRE_COUNT], %o4
 	sethi		%hi(PAGE_OFFSET), %g2
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index ef095b6..cea644d 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -15,6 +15,7 @@
 #include <asm/asmmacro.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
+#include <asm/export.h>
 
 /* Work around cpp -rob */
 #define ALLOC #alloc
@@ -119,6 +120,7 @@
 __copy_user_begin:
 
 	.globl	__copy_user
+	EXPORT_SYMBOL(__copy_user)
 dword_align:
 	andcc	%o1, 1, %g0
 	be	4f
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index e566c77..0ecbafc 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -3,6 +3,8 @@
  * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
  */
 
+#include <asm/export.h>
+
 #ifdef __KERNEL__
 #define GLOBAL_SPARE	%g7
 #else
@@ -63,6 +65,7 @@
 	 add		%o5, %o4, %o4
 
 	.globl		FUNC_NAME
+	EXPORT_SYMBOL(FUNC_NAME)
 FUNC_NAME:		/* %o0=src, %o1=dst, %o2=len, %o3=sum */
 	LOAD(prefetch, %o0 + 0x000, #n_reads)
 	xor		%o0, %o1, %g1
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index 9614b48..a2b5a97 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -17,6 +17,7 @@
 the Free Software Foundation, 59 Temple Place - Suite 330,
 Boston, MA 02111-1307, USA.  */
 
+#include <asm/export.h>
 	.text
 	.align 4
 	.globl __divdi3
@@ -279,3 +280,4 @@
 .LL81:
 	ret
 	restore
+EXPORT_SYMBOL(__divdi3)
diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S
index b39389f..23aab14 100644
--- a/arch/sparc/lib/ffs.S
+++ b/arch/sparc/lib/ffs.S
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.register	%g2,#scratch
 
@@ -65,6 +66,8 @@
 	 add	%o2, %g1, %o0
 ENDPROC(ffs)
 ENDPROC(__ffs)
+EXPORT_SYMBOL(__ffs)
+EXPORT_SYMBOL(ffs)
 
 	.section	.popc_6insn_patch, "ax"
 	.word		ffs
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index 95414e0..f9985f1 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 	.align	32
@@ -7,6 +8,7 @@
 	 nop
 	nop
 ENDPROC(__arch_hweight8)
+EXPORT_SYMBOL(__arch_hweight8)
 	.section	.popc_3insn_patch, "ax"
 	.word		__arch_hweight8
 	sllx		%o0, 64-8, %g1
@@ -19,6 +21,7 @@
 	 nop
 	nop
 ENDPROC(__arch_hweight16)
+EXPORT_SYMBOL(__arch_hweight16)
 	.section	.popc_3insn_patch, "ax"
 	.word		__arch_hweight16
 	sllx		%o0, 64-16, %g1
@@ -31,6 +34,7 @@
 	 nop
 	nop
 ENDPROC(__arch_hweight32)
+EXPORT_SYMBOL(__arch_hweight32)
 	.section	.popc_3insn_patch, "ax"
 	.word		__arch_hweight32
 	sllx		%o0, 64-32, %g1
@@ -43,6 +47,7 @@
 	 nop
 	nop
 ENDPROC(__arch_hweight64)
+EXPORT_SYMBOL(__arch_hweight64)
 	.section	.popc_3insn_patch, "ax"
 	.word		__arch_hweight64
 	retl
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
index 4742d59..5d61648 100644
--- a/arch/sparc/lib/ipcsum.S
+++ b/arch/sparc/lib/ipcsum.S
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
@@ -31,3 +32,4 @@
 	retl
 	 and	%o2, %o1, %o0
 ENDPROC(ip_fast_csum)
+EXPORT_SYMBOL(ip_fast_csum)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
deleted file mode 100644
index de5e978..0000000
--- a/arch/sparc/lib/ksyms.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Export of symbols defined in assembler
- */
-
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/checksum.h>
-#include <asm/uaccess.h>
-#include <asm/ftrace.h>
-
-/* string functions */
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
-
-/* mem* functions */
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern void *__bzero(void *, size_t);
-
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__bzero);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial);
-
-#ifdef CONFIG_MCOUNT
-EXPORT_SYMBOL(_mcount);
-#endif
-
-/*
- * sparc
- */
-#ifdef CONFIG_SPARC32
-extern int __ashrdi3(int, int);
-extern int __ashldi3(int, int);
-extern int __lshrdi3(int, int);
-extern int __muldi3(int, int);
-extern int __divdi3(int, int);
-
-extern void (*__copy_1page)(void *, const void *);
-extern void (*bzero_1page)(void *);
-
-extern void ___rw_read_enter(void);
-extern void ___rw_read_try(void);
-extern void ___rw_read_exit(void);
-extern void ___rw_write_enter(void);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(bzero_1page);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(__copy_user);
-
-/* Used by asm/spinlock.h */
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(___rw_read_enter);
-EXPORT_SYMBOL(___rw_read_try);
-EXPORT_SYMBOL(___rw_read_exit);
-EXPORT_SYMBOL(___rw_write_enter);
-#endif
-
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__divdi3);
-#endif
-
-/*
- * sparc64
- */
-#ifdef CONFIG_SPARC64
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_from_user);
-EXPORT_SYMBOL(__csum_partial_copy_to_user);
-EXPORT_SYMBOL(ip_fast_csum);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(___copy_to_user);
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(___copy_in_user);
-EXPORT_SYMBOL(__clear_user);
-
-/* Atomic counter implementation. */
-#define ATOMIC_OP(op)							\
-EXPORT_SYMBOL(atomic_##op);						\
-EXPORT_SYMBOL(atomic64_##op);
-
-#define ATOMIC_OP_RETURN(op)						\
-EXPORT_SYMBOL(atomic_##op##_return);					\
-EXPORT_SYMBOL(atomic64_##op##_return);
-
-#define ATOMIC_FETCH_OP(op)						\
-EXPORT_SYMBOL(atomic_fetch_##op);					\
-EXPORT_SYMBOL(atomic64_fetch_##op);
-
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
-
-ATOMIC_OPS(add)
-ATOMIC_OPS(sub)
-
-#undef ATOMIC_OPS
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
-
-ATOMIC_OPS(and)
-ATOMIC_OPS(or)
-ATOMIC_OPS(xor)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-EXPORT_SYMBOL(atomic64_dec_if_positive);
-
-/* Atomic bit operations. */
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(change_bit);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(_clear_page);
-EXPORT_SYMBOL(clear_user_page);
-EXPORT_SYMBOL(copy_user_page);
-
-/* RAID code needs this */
-void VISenter(void);
-EXPORT_SYMBOL(VISenter);
-
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *, unsigned long *, unsigned long *);
-EXPORT_SYMBOL(xor_vis_2);
-EXPORT_SYMBOL(xor_vis_3);
-EXPORT_SYMBOL(xor_vis_4);
-EXPORT_SYMBOL(xor_vis_5);
-
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
-		unsigned long *, unsigned long *, unsigned long *);
-
-EXPORT_SYMBOL(xor_niagara_2);
-EXPORT_SYMBOL(xor_niagara_3);
-EXPORT_SYMBOL(xor_niagara_4);
-EXPORT_SYMBOL(xor_niagara_5);
-#endif
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
index 64f53f2..f38c4e5 100644
--- a/arch/sparc/lib/locks.S
+++ b/arch/sparc/lib/locks.S
@@ -10,6 +10,7 @@
 #include <asm/psr.h>
 #include <asm/smp.h>
 #include <asm/spinlock.h>
+#include <asm/export.h>
 
 	.text
 	.align	4
@@ -48,6 +49,7 @@
 	 ld	[%g1], %g2
 
 	.globl	___rw_read_enter
+EXPORT_SYMBOL(___rw_read_enter)
 ___rw_read_enter:
 	orcc	%g2, 0x0, %g0
 	bne,a	___rw_read_enter_spin_on_wlock
@@ -59,6 +61,7 @@
 	 mov	%g4, %o7
 
 	.globl	___rw_read_exit
+EXPORT_SYMBOL(___rw_read_exit)
 ___rw_read_exit:
 	orcc	%g2, 0x0, %g0
 	bne,a	___rw_read_exit_spin_on_wlock
@@ -70,6 +73,7 @@
 	 mov	%g4, %o7
 
 	.globl	___rw_read_try
+EXPORT_SYMBOL(___rw_read_try)
 ___rw_read_try:
 	orcc	%g2, 0x0, %g0
 	bne	___rw_read_try_spin_on_wlock
@@ -81,6 +85,7 @@
 	 mov	%g4, %o7
 
 	.globl	___rw_write_enter
+EXPORT_SYMBOL(___rw_write_enter)
 ___rw_write_enter:
 	orcc	%g2, 0x0, %g0
 	bne	___rw_write_enter_spin_on_wlock
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
index 60ebc7c..c9b9373 100644
--- a/arch/sparc/lib/lshrdi3.S
+++ b/arch/sparc/lib/lshrdi3.S
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 ENTRY(__lshrdi3)
 	cmp	%o2, 0
@@ -25,3 +26,4 @@
 	retl 
 	 nop 
 ENDPROC(__lshrdi3)
+EXPORT_SYMBOL(__lshrdi3)
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 0b0ed4d..194f383 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -6,6 +6,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 /*
  * This is the main variant and is called by C code.  GCC's -pg option
@@ -16,6 +17,7 @@
 	.align		32
 	.globl		_mcount
 	.type		_mcount,#function
+	EXPORT_SYMBOL(_mcount)
 	.globl		mcount
 	.type		mcount,#function
 _mcount:
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
index efa106c..cee7f30 100644
--- a/arch/sparc/lib/memcmp.S
+++ b/arch/sparc/lib/memcmp.S
@@ -6,6 +6,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(memcmp)
@@ -25,3 +26,4 @@
 2:	retl
 	 mov	0, %o0
 ENDPROC(memcmp)
+EXPORT_SYMBOL(memcmp)
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index 4d8c497..8913fea 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -7,6 +7,7 @@
  * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
+#include <asm/export.h>
 #define FUNC(x) 		\
 	.globl	x;		\
 	.type	x,@function;	\
@@ -58,93 +59,11 @@
 	stb	%t0, [%dst - (offset) - 0x02]; \
 	stb	%t1, [%dst - (offset) - 0x01];
 
-/* Both these macros have to start with exactly the same insn */
-#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
-	ldd	[%src - (offset) - 0x20], %t0; \
-	ldd	[%src - (offset) - 0x18], %t2; \
-	ldd	[%src - (offset) - 0x10], %t4; \
-	ldd	[%src - (offset) - 0x08], %t6; \
-	st	%t0, [%dst - (offset) - 0x20]; \
-	st	%t1, [%dst - (offset) - 0x1c]; \
-	st	%t2, [%dst - (offset) - 0x18]; \
-	st	%t3, [%dst - (offset) - 0x14]; \
-	st	%t4, [%dst - (offset) - 0x10]; \
-	st	%t5, [%dst - (offset) - 0x0c]; \
-	st	%t6, [%dst - (offset) - 0x08]; \
-	st	%t7, [%dst - (offset) - 0x04];
-
-#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
-	ldd	[%src - (offset) - 0x20], %t0; \
-	ldd	[%src - (offset) - 0x18], %t2; \
-	ldd	[%src - (offset) - 0x10], %t4; \
-	ldd	[%src - (offset) - 0x08], %t6; \
-	std	%t0, [%dst - (offset) - 0x20]; \
-	std	%t2, [%dst - (offset) - 0x18]; \
-	std	%t4, [%dst - (offset) - 0x10]; \
-	std	%t6, [%dst - (offset) - 0x08];
-
-#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
-	ldd	[%src + (offset) + 0x00], %t0; \
-	ldd	[%src + (offset) + 0x08], %t2; \
-	st	%t0, [%dst + (offset) + 0x00]; \
-	st	%t1, [%dst + (offset) + 0x04]; \
-	st	%t2, [%dst + (offset) + 0x08]; \
-	st	%t3, [%dst + (offset) + 0x0c];
-
-#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
-	ldub	[%src + (offset) + 0x00], %t0; \
-	ldub	[%src + (offset) + 0x01], %t1; \
-	stb	%t0, [%dst + (offset) + 0x00]; \
-	stb	%t1, [%dst + (offset) + 0x01];
-
-#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
-	ldd	[%src + (offset) + 0x00], %t0; \
-	ldd	[%src + (offset) + 0x08], %t2; \
-	srl	%t0, shir, %t5; \
-	srl	%t1, shir, %t6; \
-	sll	%t0, shil, %t0; \
-	or	%t5, %prev, %t5; \
-	sll	%t1, shil, %prev; \
-	or	%t6, %t0, %t0; \
-	srl	%t2, shir, %t1; \
-	srl	%t3, shir, %t6; \
-	sll	%t2, shil, %t2; \
-	or	%t1, %prev, %t1; \
-	std	%t4, [%dst + (offset) + (offset2) - 0x04]; \
-	std	%t0, [%dst + (offset) + (offset2) + 0x04]; \
-	sll	%t3, shil, %prev; \
-	or	%t6, %t2, %t4;
-
-#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
-	ldd	[%src + (offset) + 0x00], %t0; \
-	ldd	[%src + (offset) + 0x08], %t2; \
-	srl	%t0, shir, %t4;	\
-	srl	%t1, shir, %t5;	\
-	sll	%t0, shil, %t6;	\
-	or	%t4, %prev, %t0; \
-	sll	%t1, shil, %prev; \
-	or	%t5, %t6, %t1; \
-	srl	%t2, shir, %t4;	\
-	srl	%t3, shir, %t5;	\
-	sll	%t2, shil, %t6; \
-	or	%t4, %prev, %t2; \
-	sll	%t3, shil, %prev; \
-	or	%t5, %t6, %t3; \
-	std	%t0, [%dst + (offset) + (offset2) + 0x00]; \
-	std	%t2, [%dst + (offset) + (offset2) + 0x08];
-
 	.text
 	.align	4
 
-0:
-	retl
-	 nop		! Only bcopy returns here and it retuns void...
-
-#ifdef __KERNEL__
-FUNC(amemmove)
-FUNC(__memmove)
-#endif
 FUNC(memmove)
+EXPORT_SYMBOL(memmove)
 	cmp		%o0, %o1
 	mov		%o0, %g7
 	bleu		9f
@@ -202,6 +121,7 @@
 	 add		%o0, 2, %o0
 
 FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+EXPORT_SYMBOL(memcpy)
 
 	sub		%o0, %o1, %o4
 	mov		%o0, %g7
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index 857ad4f..012cdb6 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -5,6 +5,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(memmove) /* o0=dst o1=src o2=len */
@@ -57,3 +58,4 @@
 	 stb		%g7, [%o0 - 0x1]
 	ba,a,pt		%xcc, 99b
 ENDPROC(memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S
index 4ff1657..51ce690 100644
--- a/arch/sparc/lib/memscan_32.S
+++ b/arch/sparc/lib/memscan_32.S
@@ -4,6 +4,8 @@
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <asm/export.h>
+
 /* In essence, this is just a fancy strlen. */
 
 #define LO_MAGIC 0x01010101
@@ -13,6 +15,8 @@
 	.align	4
 	.globl	__memscan_zero, __memscan_generic
 	.globl	memscan
+EXPORT_SYMBOL(__memscan_zero)
+EXPORT_SYMBOL(__memscan_generic)
 __memscan_zero:
 	/* %o0 = addr, %o1 = size */
 	cmp	%o1, 0
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
index 5686dfa..daa96f4 100644
--- a/arch/sparc/lib/memscan_64.S
+++ b/arch/sparc/lib/memscan_64.S
@@ -5,6 +5,8 @@
  * Copyright (C) 1998 David S. Miller (davem@redhat.com)
  */
 
+	#include <asm/export.h>
+
 #define HI_MAGIC	0x8080808080808080
 #define LO_MAGIC	0x0101010101010101
 #define ASI_PL		0x88
@@ -13,6 +15,8 @@
 	.align	32
 	.globl		__memscan_zero, __memscan_generic
 	.globl		memscan
+	EXPORT_SYMBOL(__memscan_zero)
+	EXPORT_SYMBOL(__memscan_generic)
 
 __memscan_zero:
 	/* %o0 = bufp, %o1 = size */
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index f75e690..bb539b4 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -9,6 +9,7 @@
  */
 
 #include <asm/ptrace.h>
+#include <asm/export.h>
 
 /* Work around cpp -rob */
 #define ALLOC #alloc
@@ -63,6 +64,8 @@
 
 	.globl	__bzero
 	.globl	memset
+	EXPORT_SYMBOL(__bzero)
+	EXPORT_SYMBOL(memset)
 	.globl	__memset_start, __memset_end
 __memset_start:
 memset:
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
index 9794939..17a0f49 100644
--- a/arch/sparc/lib/muldi3.S
+++ b/arch/sparc/lib/muldi3.S
@@ -17,6 +17,7 @@
 the Free Software Foundation, 59 Temple Place - Suite 330,
 Boston, MA 02111-1307, USA.  */
 
+#include <asm/export.h>
 	.text
 	.align 4
 	.globl __muldi3
@@ -74,3 +75,4 @@
 	add  %l2, %l0, %i0
 	ret 
 	restore  %g0, %l3, %o1
+EXPORT_SYMBOL(__muldi3)
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
index 536f835..ca0e707 100644
--- a/arch/sparc/lib/strlen.S
+++ b/arch/sparc/lib/strlen.S
@@ -7,6 +7,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm.h>
+#include <asm/export.h>
 
 #define LO_MAGIC 0x01010101
 #define HI_MAGIC 0x80808080
@@ -78,3 +79,4 @@
 	retl
 	 mov	2, %o0
 ENDPROC(strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S
index c0d1b56..e3fe014 100644
--- a/arch/sparc/lib/strncmp_32.S
+++ b/arch/sparc/lib/strncmp_32.S
@@ -4,6 +4,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(strncmp)
@@ -116,3 +117,4 @@
 	retl
 	 sub	%o3, %o0, %o0
 ENDPROC(strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
index 0656627..efb5f88 100644
--- a/arch/sparc/lib/strncmp_64.S
+++ b/arch/sparc/lib/strncmp_64.S
@@ -6,6 +6,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asi.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(strncmp)
@@ -28,3 +29,4 @@
 	retl
 	 clr	%o0
 ENDPROC(strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
index 2c05641..45a49cb 100644
--- a/arch/sparc/lib/xor.S
+++ b/arch/sparc/lib/xor.S
@@ -13,6 +13,7 @@
 #include <asm/asi.h>
 #include <asm/dcu.h>
 #include <asm/spitfire.h>
+#include <asm/export.h>
 
 /*
  *	Requirements:
@@ -90,6 +91,7 @@
 	retl
 	  wr	%g0, 0, %fprs
 ENDPROC(xor_vis_2)
+EXPORT_SYMBOL(xor_vis_2)
 
 ENTRY(xor_vis_3)
 	rd	%fprs, %o5
@@ -156,6 +158,7 @@
 	retl
 	 wr	%g0, 0, %fprs
 ENDPROC(xor_vis_3)
+EXPORT_SYMBOL(xor_vis_3)
 
 ENTRY(xor_vis_4)
 	rd	%fprs, %o5
@@ -241,6 +244,7 @@
 	retl
 	 wr	%g0, 0, %fprs
 ENDPROC(xor_vis_4)
+EXPORT_SYMBOL(xor_vis_4)
 
 ENTRY(xor_vis_5)
 	save	%sp, -192, %sp
@@ -347,6 +351,7 @@
 	ret
 	 restore
 ENDPROC(xor_vis_5)
+EXPORT_SYMBOL(xor_vis_5)
 
 	/* Niagara versions. */
 ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
@@ -393,6 +398,7 @@
 	ret
 	 restore
 ENDPROC(xor_niagara_2)
+EXPORT_SYMBOL(xor_niagara_2)
 
 ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
 	save		%sp, -192, %sp
@@ -454,6 +460,7 @@
 	ret
 	 restore
 ENDPROC(xor_niagara_3)
+EXPORT_SYMBOL(xor_niagara_3)
 
 ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
 	save		%sp, -192, %sp
@@ -536,6 +543,7 @@
 	ret
 	 restore
 ENDPROC(xor_niagara_4)
+EXPORT_SYMBOL(xor_niagara_4)
 
 ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
 	save		%sp, -192, %sp
@@ -634,3 +642,4 @@
 	ret
 	 restore
 ENDPROC(xor_niagara_5)
+EXPORT_SYMBOL(xor_niagara_5)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 4e06750..cd0e32b 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -238,7 +238,8 @@
 		pages += nr;
 
 		ret = get_user_pages_unlocked(start,
-			(end - start) >> PAGE_SHIFT, write, 0, pages);
+			(end - start) >> PAGE_SHIFT, pages,
+			write ? FOLL_WRITE : 0);
 
 		/* Have to be a bit careful with return values */
 		if (nr > 0) {
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c
index 851a94e..ef61c59 100644
--- a/arch/tile/mm/mmap.c
+++ b/arch/tile/mm/mmap.c
@@ -88,6 +88,5 @@
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+	return randomize_page(mm->brk, 0x02000000);
 }
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 00299c9..d7c6b67 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -295,8 +295,7 @@
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+	return randomize_page(mm->brk, 0x02000000);
 }
 
 /*
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index d28bdab..7ef4a09 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -255,7 +255,6 @@
 CONFIG_DMADEVICES=y
 CONFIG_EEEPC_LAPTOP=y
 CONFIG_AMD_IOMMU=y
-CONFIG_AMD_IOMMU_STATS=y
 CONFIG_INTEL_IOMMU=y
 # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
 CONFIG_EFI_VARS=y
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index b75a8bc..21b352a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/export.h>
 
 	.section .entry.text, "ax"
 
@@ -991,6 +992,7 @@
 	jmp	ftrace_stub
 END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
+EXPORT_SYMBOL(mcount)
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c98ec2e..ef766a3 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -35,6 +35,7 @@
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
+#include <asm/export.h>
 #include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -875,6 +876,7 @@
 	popfq
 	ret
 END(native_load_gs_index)
+EXPORT_SYMBOL(native_load_gs_index)
 
 	_ASM_EXTABLE(.Lgs_change, bad_gs)
 	.section .fixup, "ax"
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index f848572..2b36185 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -386,3 +386,6 @@
 377	i386	copy_file_range		sys_copy_file_range
 378	i386	preadv2			sys_preadv2			compat_sys_preadv2
 379	i386	pwritev2		sys_pwritev2			compat_sys_pwritev2
+380	i386	pkey_mprotect		sys_pkey_mprotect
+381	i386	pkey_alloc		sys_pkey_alloc
+382	i386	pkey_free		sys_pkey_free
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index e9ce9c7..e93ef0b 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
 326	common	copy_file_range		sys_copy_file_range
 327	64	preadv2			sys_preadv2
 328	64	pwritev2		sys_pwritev2
+329	common	pkey_mprotect		sys_pkey_mprotect
+330	common	pkey_alloc		sys_pkey_alloc
+331	common	pkey_free		sys_pkey_free
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index cd3d301..751d1f9 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -10,8 +10,11 @@
 
     # Entry can be either just a function name or "function/qualifier"
     real_entry="${entry%%/*}"
-    qualifier="${entry:${#real_entry}}"		# Strip the function name
-    qualifier="${qualifier:1}"			# Strip the slash, if any
+    if [ "$entry" = "$real_entry" ]; then
+        qualifier=
+    else
+        qualifier=${entry#*/}
+    fi
 
     echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
 }
@@ -22,7 +25,7 @@
     entry="$3"
     compat="$4"
 
-    if [ "$abi" == "64" -a -n "$compat" ]; then
+    if [ "$abi" = "64" -a -n "$compat" ]; then
 	echo "a compat entry for a 64-bit syscall makes no sense" >&2
 	exit 1
     fi
@@ -45,17 +48,17 @@
 grep '^[0-9]' "$in" | sort -n | (
     while read nr abi name entry compat; do
 	abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
-	if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
+	if [ "$abi" = "COMMON" -o "$abi" = "64" ]; then
 	    # COMMON is the same as 64, except that we don't expect X32
 	    # programs to use it.  Our expectation has nothing to do with
 	    # any generated code, so treat them the same.
 	    emit 64 "$nr" "$entry" "$compat"
-	elif [ "$abi" == "X32" ]; then
+	elif [ "$abi" = "X32" ]; then
 	    # X32 is equivalent to 64 on an X32-compatible kernel.
 	    echo "#ifdef CONFIG_X86_X32_ABI"
 	    emit 64 "$nr" "$entry" "$compat"
 	    echo "#endif"
-	elif [ "$abi" == "I386" ]; then
+	elif [ "$abi" = "I386" ]; then
 	    emit "$abi" "$nr" "$entry" "$compat"
 	else
 	    echo "Unknown abi $abi" >&2
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index e5a1711..fee6bc7 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -6,6 +6,7 @@
  */
 	#include <linux/linkage.h>
 	#include <asm/asm.h>
+	#include <asm/export.h>
 
 	/* put return address in eax (arg1) */
 	.macro THUNK name, func, put_ret_addr_in_eax=0
@@ -36,5 +37,7 @@
 #ifdef CONFIG_PREEMPT
 	THUNK ___preempt_schedule, preempt_schedule
 	THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+	EXPORT_SYMBOL(___preempt_schedule)
+	EXPORT_SYMBOL(___preempt_schedule_notrace)
 #endif
 
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 627ecbc..be36bf4 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,6 +8,7 @@
 #include <linux/linkage.h>
 #include "calling.h"
 #include <asm/asm.h>
+#include <asm/export.h>
 
 	/* rdi:	arg1 ... normal C conventions. rax is saved/restored. */
 	.macro THUNK name, func, put_ret_addr_in_rdi=0
@@ -49,6 +50,8 @@
 #ifdef CONFIG_PREEMPT
 	THUNK ___preempt_schedule, preempt_schedule
 	THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+	EXPORT_SYMBOL(___preempt_schedule)
+	EXPORT_SYMBOL(___preempt_schedule_notrace)
 #endif
 
 #if defined(CONFIG_TRACE_IRQFLAGS) \
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3a9eb8..eab0915 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3898,6 +3898,7 @@
 		break;
 
 	case INTEL_FAM6_XEON_PHI_KNL:
+	case INTEL_FAM6_XEON_PHI_KNM:
 		memcpy(hw_cache_event_ids,
 		       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs,
@@ -3912,7 +3913,7 @@
 		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
 		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
 
-		pr_cont("Knights Landing events, ");
+		pr_cont("Knights Landing/Mill events, ");
 		break;
 
 	case INTEL_FAM6_SKYLAKE_MOBILE:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index fc6cf21..81b321a 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -458,8 +458,8 @@
 	if (!x86_pmu.lbr_nr)
 		return;
 
-	if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
-					event->ctx->task_ctx_data) {
+	if (branch_user_callstack(cpuc->br_sel) &&
+	    event->ctx->task_ctx_data) {
 		task_ctx = event->ctx->task_ctx_data;
 		task_ctx->lbr_callstack_users--;
 	}
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index b0f0e83..0a535ce 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -763,6 +763,7 @@
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
 
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
+	X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
 
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE,  skl_rapl_init),
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d9844cc..efca268 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1349,6 +1349,7 @@
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,	  bdx_uncore_init),
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,	  knl_uncore_init),
+	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,	  knl_uncore_init),
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 61518cf..872877d 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -4,7 +4,6 @@
 /* Caches aren't brain-dead on the intel. */
 #include <asm-generic/cacheflush.h>
 #include <asm/special_insns.h>
-#include <asm/uaccess.h>
 
 /*
  * The set_memory_* API can be used to change various attributes of a virtual
diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h
new file mode 100644
index 0000000..138de56
--- /dev/null
+++ b/arch/x86/include/asm/export.h
@@ -0,0 +1,4 @@
+#ifdef CONFIG_64BIT
+#define KSYM_ALIGN 16
+#endif
+#include <asm-generic/export.h>
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
new file mode 100644
index 0000000..b8ad261
--- /dev/null
+++ b/arch/x86/include/asm/extable.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_X86_EXTABLE_H
+#define _ASM_X86_EXTABLE_H
+/*
+ * The exception table consists of triples of addresses relative to the
+ * exception table entry itself. The first address is of an instruction
+ * that is allowed to fault, the second is the target at which the program
+ * should continue. The third is a handler function to deal with the fault
+ * caused by the instruction in the first field.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+	int insn, fixup, handler;
+};
+struct pt_regs;
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta)			\
+	do {							\
+		(a)->fixup = (b)->fixup + (delta);		\
+		(b)->fixup = (tmp).fixup - (delta);		\
+		(a)->handler = (b)->handler + (delta);		\
+		(b)->handler = (tmp).handler - (delta);		\
+	} while (0)
+
+extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern bool ex_has_fault_handler(unsigned long ip);
+extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
+
+#endif
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index d4957ac..430bacf 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -27,11 +27,12 @@
 				 XFEATURE_MASK_YMM | \
 				 XFEATURE_MASK_OPMASK | \
 				 XFEATURE_MASK_ZMM_Hi256 | \
-				 XFEATURE_MASK_Hi16_ZMM	 | \
-				 XFEATURE_MASK_PKRU)
+				 XFEATURE_MASK_Hi16_ZMM)
 
 /* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER	(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+#define XFEATURE_MASK_EAGER	(XFEATURE_MASK_BNDREGS | \
+				 XFEATURE_MASK_BNDCSR | \
+				 XFEATURE_MASK_PKRU)
 
 /* All currently supported features */
 #define XCNTXT_MASK	(XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9ae5ab8..34a46dc 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -64,5 +64,6 @@
 /* Xeon Phi */
 
 #define INTEL_FAM6_XEON_PHI_KNL		0x57 /* Knights Landing */
+#define INTEL_FAM6_XEON_PHI_KNM		0x85 /* Knights Mill */
 
 #endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index d2434c1..282630e 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -210,6 +210,7 @@
 
 typedef void crash_vmclear_fn(void);
 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+extern void kdump_nmi_shootdown_cpus(void);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 1ea0bae..72198c6 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -23,6 +23,14 @@
 	const struct vdso_image *vdso_image;	/* vdso image in use */
 
 	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+	/*
+	 * One bit per protection key says whether userspace can
+	 * use it or not.  protected by mmap_sem.
+	 */
+	u16 pkey_allocation_map;
+	s16 execute_only_pkey;
+#endif
 } mm_context_t;
 
 #ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index d8abfcf..8e0a9fe 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -4,6 +4,7 @@
 #include <asm/desc.h>
 #include <linux/atomic.h>
 #include <linux/mm_types.h>
+#include <linux/pkeys.h>
 
 #include <trace/events/tlb.h>
 
@@ -107,7 +108,16 @@
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
 {
+	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
+		/* pkey 0 is the default and always allocated */
+		mm->context.pkey_allocation_map = 0x1;
+		/* -1 means unallocated or invalid */
+		mm->context.execute_only_pkey = -1;
+	}
+	#endif
 	init_new_context_ldt(tsk, mm);
+
 	return 0;
 }
 static inline void destroy_context(struct mm_struct *mm)
@@ -195,16 +205,20 @@
 		mpx_notify_unmap(mm, vma, start, end);
 }
 
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 static inline int vma_pkey(struct vm_area_struct *vma)
 {
-	u16 pkey = 0;
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 	unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
 				      VM_PKEY_BIT2 | VM_PKEY_BIT3;
-	pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
-#endif
-	return pkey;
+
+	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
 }
+#else
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+	return 0;
+}
+#endif
 
 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
 {
@@ -258,5 +272,4 @@
 {
 	return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write);
 }
-
 #endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 56f4c66..78f3760 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -88,7 +88,6 @@
 
 #define MSR_IA32_RTIT_CTL		0x00000570
 #define MSR_IA32_RTIT_STATUS		0x00000571
-#define MSR_IA32_RTIT_STATUS		0x00000571
 #define MSR_IA32_RTIT_ADDR0_A		0x00000580
 #define MSR_IA32_RTIT_ADDR0_B		0x00000581
 #define MSR_IA32_RTIT_ADDR1_A		0x00000582
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e02e3f8..84f58de 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -521,7 +521,8 @@
 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
                         const unsigned long __percpu *addr)
 {
-	unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
+	unsigned long __percpu *a =
+		(unsigned long __percpu *)addr + nr / BITS_PER_LONG;
 
 #ifdef CONFIG_X86_64
 	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
@@ -538,7 +539,7 @@
 	asm volatile("bt "__percpu_arg(2)",%1\n\t"
 			CC_SET(c)
 			: CC_OUT(c) (oldbit)
-			: "m" (*(unsigned long *)addr), "Ir" (nr));
+			: "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
 
 	return oldbit;
 }
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 7b84565..34684ad 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -10,7 +10,6 @@
  * Try to dedicate one of the protection keys to be used as an
  * execute-only protection key.
  */
-#define PKEY_DEDICATED_EXECUTE_ONLY 15
 extern int __execute_only_pkey(struct mm_struct *mm);
 static inline int execute_only_pkey(struct mm_struct *mm)
 {
@@ -31,4 +30,76 @@
 	return __arch_override_mprotect_pkey(vma, prot, pkey);
 }
 
+extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+		unsigned long init_val);
+
+#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3)
+
+#define mm_pkey_allocation_map(mm)	(mm->context.pkey_allocation_map)
+#define mm_set_pkey_allocated(mm, pkey) do {		\
+	mm_pkey_allocation_map(mm) |= (1U << pkey);	\
+} while (0)
+#define mm_set_pkey_free(mm, pkey) do {			\
+	mm_pkey_allocation_map(mm) &= ~(1U << pkey);	\
+} while (0)
+
+static inline
+bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+{
+	return mm_pkey_allocation_map(mm) & (1U << pkey);
+}
+
+/*
+ * Returns a positive, 4-bit key on success, or -1 on failure.
+ */
+static inline
+int mm_pkey_alloc(struct mm_struct *mm)
+{
+	/*
+	 * Note: this is the one and only place we make sure
+	 * that the pkey is valid as far as the hardware is
+	 * concerned.  The rest of the kernel trusts that
+	 * only good, valid pkeys come out of here.
+	 */
+	u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1);
+	int ret;
+
+	/*
+	 * Are we out of pkeys?  We must handle this specially
+	 * because ffz() behavior is undefined if there are no
+	 * zeros.
+	 */
+	if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
+		return -1;
+
+	ret = ffz(mm_pkey_allocation_map(mm));
+
+	mm_set_pkey_allocated(mm, ret);
+
+	return ret;
+}
+
+static inline
+int mm_pkey_free(struct mm_struct *mm, int pkey)
+{
+	/*
+	 * pkey 0 is special, always allocated and can never
+	 * be freed.
+	 */
+	if (!pkey)
+		return -EINVAL;
+	if (!mm_pkey_is_allocated(mm, pkey))
+		return -EINVAL;
+
+	mm_set_pkey_free(mm, pkey);
+
+	return 0;
+}
+
+extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+		unsigned long init_val);
+extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+		unsigned long init_val);
+extern void copy_init_pkru_to_fpregs(void);
+
 #endif /*_ASM_X86_PKEYS_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 3d33a71..a34e0d4 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -103,8 +103,10 @@
 ({							\
 	long tmp;					\
 	struct rw_semaphore* ret;			\
+	register void *__sp asm(_ASM_SP);		\
+							\
 	asm volatile("# beginning down_write\n\t"	\
-		     LOCK_PREFIX "  xadd      %1,(%3)\n\t"	\
+		     LOCK_PREFIX "  xadd      %1,(%4)\n\t"	\
 		     /* adds 0xffff0001, returns the old value */ \
 		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
 		     /* was the active mask 0 before? */\
@@ -112,7 +114,7 @@
 		     "  call " slow_path "\n"		\
 		     "1:\n"				\
 		     "# ending down_write"		\
-		     : "+m" (sem->count), "=d" (tmp), "=a" (ret)	\
+		     : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
 		     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
 		     : "memory", "cc");			\
 	ret;						\
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 13b6cdd..2f75f30 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -2,7 +2,7 @@
 #define _ASM_X86_SECTIONS_H
 
 #include <asm-generic/sections.h>
-#include <asm/uaccess.h>
+#include <asm/extable.h>
 
 extern char __brk_base[], __brk_limit[];
 extern struct exception_table_entry __stop___ex_table[];
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19980b3..026ea82 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -47,6 +47,7 @@
 	void (*smp_cpus_done)(unsigned max_cpus);
 
 	void (*stop_other_cpus)(int wait);
+	void (*crash_stop_other_cpus)(void);
 	void (*smp_send_reschedule)(int cpu);
 
 	int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 2131c4c..faf3687 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -11,6 +11,7 @@
 #include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/smap.h>
+#include <asm/extable.h>
 
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
@@ -91,37 +92,6 @@
 	likely(!__range_not_ok(addr, size, user_addr_max()))
 
 /*
- * The exception table consists of triples of addresses relative to the
- * exception table entry itself. The first address is of an instruction
- * that is allowed to fault, the second is the target at which the program
- * should continue. The third is a handler function to deal with the fault
- * caused by the instruction in the first field.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
-	int insn, fixup, handler;
-};
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-#define swap_ex_entry_fixup(a, b, tmp, delta)			\
-	do {							\
-		(a)->fixup = (b)->fixup + (delta);		\
-		(b)->fixup = (tmp).fixup - (delta);		\
-		(a)->handler = (b)->handler + (delta);		\
-		(b)->handler = (tmp).handler - (delta);		\
-	} while (0)
-
-extern int fixup_exception(struct pt_regs *regs, int trapnr);
-extern bool ex_has_fault_handler(unsigned long ip);
-extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
-
-/*
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
  *
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index c4b6d1c..46de9ac 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -23,6 +23,8 @@
 
 bool unwind_next_frame(struct unwind_state *state);
 
+unsigned long unwind_get_return_address(struct unwind_state *state);
+
 static inline bool unwind_done(struct unwind_state *state)
 {
 	return state->stack_info.type == STACK_TYPE_UNKNOWN;
@@ -48,8 +50,6 @@
 	return state->bp + 1;
 }
 
-unsigned long unwind_get_return_address(struct unwind_state *state);
-
 #else /* !CONFIG_FRAME_POINTER */
 
 static inline
@@ -58,16 +58,6 @@
 	return NULL;
 }
 
-static inline
-unsigned long unwind_get_return_address(struct unwind_state *state)
-{
-	if (unwind_done(state))
-		return 0;
-
-	return ftrace_graph_ret_addr(state->task, &state->graph_idx,
-				     *state->sp, state->sp);
-}
-
 #endif /* CONFIG_FRAME_POINTER */
 
 #endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4dd5d50..79076d7 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -46,9 +46,7 @@
 obj-y			+= setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y			+= probe_roms.o
-obj-$(CONFIG_X86_32)	+= i386_ksyms_32.o
-obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
-obj-$(CONFIG_X86_64)	+= mcount_64.o
+obj-$(CONFIG_X86_64)	+= sys_x86_64.o mcount_64.o
 obj-$(CONFIG_X86_ESPFIX64)	+= espfix_64.o
 obj-$(CONFIG_SYSFS)	+= ksysfs.o
 obj-y			+= bootflag.o e820.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 32a7d70..8a5abaa 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -233,6 +233,10 @@
 
 	acpi_table_print_madt_entry(header);
 
+	/* Ignore invalid ID */
+	if (processor->id == 0xff)
+		return 0;
+
 	/*
 	 * We need to register disabled CPU as well to permit
 	 * counting disabled CPUs. This allows us to size
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f266b8a..88c657b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2128,9 +2128,11 @@
 	if (num_processors >= nr_cpu_ids) {
 		int thiscpu = max + disabled_cpus;
 
-		pr_warning(
-			"APIC: NR_CPUS/possible_cpus limit of %i reached."
-			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
+		if (enabled) {
+			pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+				   "reached. Processor %d/0x%x ignored.\n",
+				   max, thiscpu, apicid);
+		}
 
 		disabled_cpus++;
 		return -EINVAL;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6066d94..5d30c5e 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -661,11 +661,28 @@
  */
 void irq_force_complete_move(struct irq_desc *desc)
 {
-	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
-	struct apic_chip_data *data = apic_chip_data(irqdata);
-	struct irq_cfg *cfg = data ? &data->cfg : NULL;
+	struct irq_data *irqdata;
+	struct apic_chip_data *data;
+	struct irq_cfg *cfg;
 	unsigned int cpu;
 
+	/*
+	 * The function is called for all descriptors regardless of which
+	 * irqdomain they belong to. For example if an IRQ is provided by
+	 * an irq_chip as part of a GPIO driver, the chip data for that
+	 * descriptor is specific to the irq_chip in question.
+	 *
+	 * Check first that the chip_data is what we expect
+	 * (apic_chip_data) before touching it any further.
+	 */
+	irqdata = irq_domain_get_irq_data(x86_vector_domain,
+					  irq_desc_get_irq(desc));
+	if (!irqdata)
+		return;
+
+	data = apic_chip_data(irqdata);
+	cfg = data ? &data->cfg : NULL;
+
 	if (!cfg)
 		return;
 
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 1ff0598..8116057 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
 #include <asm/div64.h>
 #include <asm/x86_init.h>
 #include <asm/hypervisor.h>
+#include <asm/apic.h>
 
 #define CPUID_VMWARE_INFO_LEAF	0x40000000
 #define VMWARE_HYPERVISOR_MAGIC	0x564D5868
@@ -82,10 +83,17 @@
 
 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
 
-	if (ebx != UINT_MAX)
+	if (ebx != UINT_MAX) {
 		x86_platform.calibrate_tsc = vmware_get_tsc_khz;
-	else
+#ifdef CONFIG_X86_LOCAL_APIC
+		/* Skip lapic calibration since we know the bus frequency. */
+		lapic_timer_frequency = ecx / HZ;
+		pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
+			ecx);
+#endif
+	} else {
 		pr_warn("Failed to get TSC freq from the hypervisor\n");
+	}
 }
 
 /*
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 9616cf7..650830e 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -133,15 +133,31 @@
 	disable_local_APIC();
 }
 
-static void kdump_nmi_shootdown_cpus(void)
+void kdump_nmi_shootdown_cpus(void)
 {
 	nmi_shootdown_cpus(kdump_nmi_callback);
 
 	disable_local_APIC();
 }
 
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+	static int cpus_stopped;
+
+	if (cpus_stopped)
+		return;
+
+	if (smp_ops.crash_stop_other_cpus)
+		smp_ops.crash_stop_other_cpus();
+	else
+		smp_send_stop();
+
+	cpus_stopped = 1;
+}
+
 #else
-static void kdump_nmi_shootdown_cpus(void)
+void crash_smp_send_stop(void)
 {
 	/* There are no cpus to shootdown */
 }
@@ -160,7 +176,7 @@
 	/* The kernel is broken so disable interrupts */
 	local_irq_disable();
 
-	kdump_nmi_shootdown_cpus();
+	crash_smp_send_stop();
 
 	/*
 	 * VMCLEAR VMCSs loaded on this cpu if needed.
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index b85fe5f..90e8dde 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -350,7 +350,7 @@
 		 * continue building up new bios map based on this
 		 * information
 		 */
-		if (current_type != last_type) {
+		if (current_type != last_type || current_type == E820_PRAM) {
 			if (last_type != 0)	 {
 				new_bios[new_bios_entry].size =
 					change_point[chgidx]->addr - last_addr;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 18bb3a6..6a08e25 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -317,16 +317,11 @@
 static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
 					   size_t stolen_size)
 {
-	u16 toud;
+	u16 toud = 0;
 
-	/*
-	 * FIXME is the graphics stolen memory region
-	 * always at TOUD? Ie. is it always the last
-	 * one to be allocated by the BIOS?
-	 */
 	toud = read_pci_config_16(0, 0, 0, I865_TOUD);
 
-	return (phys_addr_t)toud << 16;
+	return (phys_addr_t)(toud << 16) + i845_tseg_size();
 }
 
 static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
@@ -512,8 +507,7 @@
 	INTEL_I915GM_IDS(&gen3_early_ops),
 	INTEL_I945G_IDS(&gen3_early_ops),
 	INTEL_I945GM_IDS(&gen3_early_ops),
-	INTEL_VLV_M_IDS(&gen6_early_ops),
-	INTEL_VLV_D_IDS(&gen6_early_ops),
+	INTEL_VLV_IDS(&gen6_early_ops),
 	INTEL_PINEVIEW_IDS(&gen3_early_ops),
 	INTEL_I965G_IDS(&gen3_early_ops),
 	INTEL_G33_IDS(&gen3_early_ops),
@@ -526,10 +520,8 @@
 	INTEL_SNB_M_IDS(&gen6_early_ops),
 	INTEL_IVB_M_IDS(&gen6_early_ops),
 	INTEL_IVB_D_IDS(&gen6_early_ops),
-	INTEL_HSW_D_IDS(&gen6_early_ops),
-	INTEL_HSW_M_IDS(&gen6_early_ops),
-	INTEL_BDW_M_IDS(&gen8_early_ops),
-	INTEL_BDW_D_IDS(&gen8_early_ops),
+	INTEL_HSW_IDS(&gen6_early_ops),
+	INTEL_BDW_IDS(&gen8_early_ops),
 	INTEL_CHV_IDS(&chv_early_ops),
 	INTEL_SKL_IDS(&gen9_early_ops),
 	INTEL_BXT_IDS(&gen9_early_ops),
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 3fc03a0..4700401 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -12,6 +12,7 @@
 #include <asm/traps.h>
 
 #include <linux/hardirq.h>
+#include <linux/pkeys.h>
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/fpu.h>
@@ -505,6 +506,9 @@
 		copy_kernel_to_fxregs(&init_fpstate.fxsave);
 	else
 		copy_kernel_to_fregs(&init_fpstate.fsave);
+
+	if (boot_cpu_has(X86_FEATURE_OSPKE))
+		copy_init_pkru_to_fpregs();
 }
 
 /*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 01567aa..124aa5c 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -5,6 +5,7 @@
  */
 #include <linux/compat.h>
 #include <linux/cpu.h>
+#include <linux/mman.h>
 #include <linux/pkeys.h>
 
 #include <asm/fpu/api.h>
@@ -866,9 +867,10 @@
 	return get_xsave_addr(&fpu->state.xsave, xsave_state);
 }
 
+#ifdef CONFIG_ARCH_HAS_PKEYS
+
 #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
 #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
-
 /*
  * This will go out and modify PKRU register to set the access
  * rights for @pkey to @init_val.
@@ -914,6 +916,7 @@
 
 	return 0;
 }
+#endif /* ! CONFIG_ARCH_HAS_PKEYS */
 
 /*
  * This is similar to user_regset_copyout(), but will not add offset to
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5f40126..b6b2f02 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -23,6 +23,7 @@
 #include <asm/percpu.h>
 #include <asm/nops.h>
 #include <asm/bootparam.h>
+#include <asm/export.h>
 
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
@@ -673,6 +674,7 @@
 	.fill 4096,1,0
 ENTRY(swapper_pg_dir)
 	.fill 1024,4,0
+EXPORT_SYMBOL(empty_zero_page)
 
 /*
  * This starts the data section.
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index c98a559..b4421cc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -21,6 +21,7 @@
 #include <asm/percpu.h>
 #include <asm/nops.h>
 #include "../entry/calling.h"
+#include <asm/export.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -486,10 +487,12 @@
 ENTRY(phys_base)
 	/* This must match the first entry in level2_kernel_pgt */
 	.quad   0x0000000000000000
+EXPORT_SYMBOL(phys_base)
 
 #include "../../x86/xen/xen-head.S"
 	
 	__PAGE_ALIGNED_BSS
 NEXT_PAGE(empty_zero_page)
 	.skip PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
 
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
deleted file mode 100644
index 1f9b878..0000000
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <linux/export.h>
-#include <linux/spinlock_types.h>
-
-#include <asm/checksum.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
-EXPORT_SYMBOL(mcount);
-#endif
-
-/*
- * Note, this is a prototype to get at the symbol for
- * the export, but dont use it from C code, it is used
- * by assembly code and is not using C calling convention!
- */
-#ifndef CONFIG_X86_CMPXCHG64
-extern void cmpxchg8b_emu(void);
-EXPORT_SYMBOL(cmpxchg8b_emu);
-#endif
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_generic);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(empty_zero_page);
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(___preempt_schedule);
-EXPORT_SYMBOL(___preempt_schedule_notrace);
-#endif
-
-EXPORT_SYMBOL(__sw_hweight32);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee01..d9d8d16 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -50,6 +50,7 @@
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
 #include <linux/frame.h>
+#include <linux/kasan.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -1057,9 +1058,10 @@
 	 * tailcall optimization. So, to be absolutely safe
 	 * we also save and restore enough stack bytes to cover
 	 * the argument area.
+	 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
+	 * raw stack chunk with redzones:
 	 */
-	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
-	       MIN_STACK_SIZE(addr));
+	__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
 	regs->flags &= ~X86_EFLAGS_IF;
 	trace_hardirqs_off();
 	regs->ip = (unsigned long)(jp->entry);
@@ -1080,6 +1082,9 @@
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 
+	/* Unpoison stack redzones in the frames we are going to jump over. */
+	kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
+
 	asm volatile (
 #ifdef CONFIG_X86_64
 			"       xchg   %%rbx,%%rsp	\n"
@@ -1118,7 +1123,7 @@
 		/* It's OK to start function graph tracing again */
 		unpause_graph_tracing();
 		*regs = kcb->jprobe_saved_regs;
-		memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+		__memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
 		preempt_enable_no_resched();
 		return 1;
 	}
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 5a294e4..8c1f218 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -337,6 +337,9 @@
 #endif
 	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
 			      kaslr_offset());
+	VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
+	VMCOREINFO_VMALLOC_START(VMALLOC_START);
+	VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
 }
 
 /* arch-dependent functionality related to kexec file-based syscall */
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 6192422..efe73aa 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -7,6 +7,7 @@
 #include <linux/linkage.h>
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
+#include <asm/export.h>
 
 
 	.code64
@@ -294,6 +295,7 @@
 	jmp fgraph_trace
 END(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
+EXPORT_SYMBOL(function_hook)
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 28cea78..0888a87 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -509,8 +509,7 @@
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+	return randomize_page(mm->brk, 0x02000000);
 }
 
 /*
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ee944bd..b3760b3 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -109,12 +109,13 @@
 	get_debugreg(d7, 7);
 
 	/* Only print out debug registers if they are in their non-default state. */
-	if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
-	    (d6 == DR6_RESERVED) && (d7 == 0x400))
-		return;
-
-	printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
-	printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
+	if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+	    (d6 == DR6_RESERVED) && (d7 == 0x400))) {
+		printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
+		       d0, d1, d2);
+		printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
+		       d3, d6, d7);
+	}
 
 	if (boot_cpu_has(X86_FEATURE_OSPKE))
 		printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 658777c..c00cb64 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -32,6 +32,8 @@
 #include <asm/nmi.h>
 #include <asm/mce.h>
 #include <asm/trace/irq_vectors.h>
+#include <asm/kexec.h>
+
 /*
  *	Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -259,8 +261,10 @@
 
 __visible void smp_reschedule_interrupt(struct pt_regs *regs)
 {
+	irq_enter();
 	ack_APIC_irq();
 	__smp_reschedule_interrupt();
+	irq_exit();
 	/*
 	 * KVM uses this interrupt to force a cpu out of guest mode
 	 */
@@ -342,6 +346,9 @@
 	.smp_cpus_done		= native_smp_cpus_done,
 
 	.stop_other_cpus	= native_stop_other_cpus,
+#if defined(CONFIG_KEXEC_CORE)
+	.crash_stop_other_cpus	= kdump_nmi_shootdown_cpus,
+#endif
 	.smp_send_reschedule	= native_smp_send_reschedule,
 
 	.cpu_up			= native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a9362..951f093 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1407,9 +1407,21 @@
 {
 	int i, possible;
 
-	/* no processor from mptable or madt */
-	if (!num_processors)
-		num_processors = 1;
+	/* No boot processor was found in mptable or ACPI MADT */
+	if (!num_processors) {
+		int apicid = boot_cpu_physical_apicid;
+		int cpu = hard_smp_processor_id();
+
+		pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+
+		/* Make sure boot cpu is enumerated */
+		if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+		    apic->apic_id_valid(apicid))
+			generic_processor_info(apicid, boot_cpu_apic_version);
+
+		if (!num_processors)
+			num_processors = 1;
+	}
 
 	i = setup_max_cpus ?: 1;
 	if (setup_possible_cpus == -1) {
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c9a0738..a23ce84 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -57,7 +57,8 @@
 	unsigned char opcode[15];
 	unsigned long addr = convert_ip_to_linear(child, regs);
 
-	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+	copied = access_process_vm(child, addr, opcode, sizeof(opcode),
+			FOLL_FORCE);
 	for (i = 0; i < copied; i++) {
 		switch (opcode[i]) {
 		/* popf and iret */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 10e0272..a55ed63 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,6 @@
 			   unsigned long *end)
 {
 	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
-		unsigned long new_begin;
 		/* This is usually used needed to map code in small
 		   model, so it needs to be in the first 31bit. Limit
 		   it to that.  This means we need to move the
@@ -112,9 +111,7 @@
 		*begin = 0x40000000;
 		*end = 0x80000000;
 		if (current->flags & PF_RANDOMIZE) {
-			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
-			if (new_begin)
-				*begin = new_begin;
+			*begin = randomize_page(*begin, 0x02000000);
 		}
 	} else {
 		*begin = current->mm->mmap_legacy_base;
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index b5a834c..9298993 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -5,6 +5,16 @@
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+	if (unwind_done(state))
+		return 0;
+
+	return ftrace_graph_ret_addr(state->task, &state->graph_idx,
+				     *state->sp, state->sp);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
 bool unwind_next_frame(struct unwind_state *state)
 {
 	struct stack_info *info = &state->stack_info;
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
deleted file mode 100644
index b2cee3d1..0000000
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Exports for assembly files.
-   All C exports should go in the respective C files. */
-
-#include <linux/export.h>
-#include <linux/spinlock_types.h>
-#include <linux/smp.h>
-
-#include <net/checksum.h>
-
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/desc.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount and __fentry__ are defined in assembly */
-#ifdef CC_USING_FENTRY
-EXPORT_SYMBOL(__fentry__);
-#else
-EXPORT_SYMBOL(mcount);
-#endif
-#endif
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
-EXPORT_SYMBOL(copy_user_generic_string);
-EXPORT_SYMBOL(copy_user_generic_unrolled);
-EXPORT_SYMBOL(copy_user_enhanced_fast_string);
-EXPORT_SYMBOL(__copy_user_nocache);
-EXPORT_SYMBOL(_copy_from_user);
-EXPORT_SYMBOL(_copy_to_user);
-
-EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled);
-
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(csum_partial);
-
-EXPORT_SYMBOL(__sw_hweight32);
-EXPORT_SYMBOL(__sw_hweight64);
-
-/*
- * Export string functions. We normally rely on gcc builtin for most of these,
- * but gcc sometimes decides not to inline them.
- */
-#undef memcpy
-#undef memset
-#undef memmove
-
-extern void *__memset(void *, int, __kernel_size_t);
-extern void *__memcpy(void *, const void *, __kernel_size_t);
-extern void *__memmove(void *, const void *, __kernel_size_t);
-extern void *memset(void *, int, __kernel_size_t);
-extern void *memcpy(void *, const void *, __kernel_size_t);
-extern void *memmove(void *, const void *, __kernel_size_t);
-
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memmove);
-
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-
-#ifndef CONFIG_DEBUG_VIRTUAL
-EXPORT_SYMBOL(phys_base);
-#endif
-EXPORT_SYMBOL(empty_zero_page);
-#ifndef CONFIG_PARAVIRT
-EXPORT_SYMBOL(native_load_gs_index);
-#endif
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(___preempt_schedule);
-EXPORT_SYMBOL(___preempt_schedule_notrace);
-#endif
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 5fb6c62..16a7134 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -212,7 +212,7 @@
 	 */
 	smp_mb();
 	if (atomic_dec_if_positive(&ps->pending) > 0)
-		queue_kthread_work(&pit->worker, &pit->expired);
+		kthread_queue_work(&pit->worker, &pit->expired);
 }
 
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -233,7 +233,7 @@
 static void destroy_pit_timer(struct kvm_pit *pit)
 {
 	hrtimer_cancel(&pit->pit_state.timer);
-	flush_kthread_work(&pit->expired);
+	kthread_flush_work(&pit->expired);
 }
 
 static void pit_do_work(struct kthread_work *work)
@@ -272,7 +272,7 @@
 	if (atomic_read(&ps->reinject))
 		atomic_inc(&ps->pending);
 
-	queue_kthread_work(&pt->worker, &pt->expired);
+	kthread_queue_work(&pt->worker, &pt->expired);
 
 	if (ps->is_periodic) {
 		hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -324,7 +324,7 @@
 
 	/* TODO The new value only affected after the retriggered */
 	hrtimer_cancel(&ps->timer);
-	flush_kthread_work(&pit->expired);
+	kthread_flush_work(&pit->expired);
 	ps->period = interval;
 	ps->is_periodic = is_period;
 
@@ -667,13 +667,13 @@
 	pid_nr = pid_vnr(pid);
 	put_pid(pid);
 
-	init_kthread_worker(&pit->worker);
+	kthread_init_worker(&pit->worker);
 	pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
 				       "kvm-pit/%d", pid_nr);
 	if (IS_ERR(pit->worker_task))
 		goto fail_kthread;
 
-	init_kthread_work(&pit->expired, pit_do_work);
+	kthread_init_work(&pit->expired, pit_do_work);
 
 	pit->kvm = kvm;
 
@@ -730,7 +730,7 @@
 		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
 		kvm_pit_set_reinject(pit, false);
 		hrtimer_cancel(&pit->pit_state.timer);
-		flush_kthread_work(&pit->expired);
+		kthread_flush_work(&pit->expired);
 		kthread_stop(pit->worker_task);
 		kvm_free_irq_source_id(kvm, pit->irq_source_id);
 		kfree(pit);
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index c1e6232..4d34bb5 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -28,6 +28,7 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
+#include <asm/export.h>
 				
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
@@ -251,6 +252,7 @@
 ENDPROC(csum_partial)
 				
 #endif
+EXPORT_SYMBOL(csum_partial)
 
 /*
 unsigned int csum_partial_copy_generic (const char *src, char *dst,
@@ -490,3 +492,4 @@
 #undef ROUND1		
 		
 #endif
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 65be7cf..5e2af3a 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,6 +1,7 @@
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
+#include <asm/export.h>
 
 /*
  * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
@@ -23,6 +24,7 @@
 	rep stosq
 	ret
 ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
 
 ENTRY(clear_page_orig)
 
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index ad53497..03a186f 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,6 +7,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 .text
 
@@ -48,3 +49,4 @@
 	ret
 
 ENDPROC(cmpxchg8b_emu)
+EXPORT_SYMBOL(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 24ef1c2..e850815 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
+#include <asm/export.h>
 
 /*
  * Some CPUs run faster using the string copy instructions (sane microcode).
@@ -17,6 +18,7 @@
 	rep	movsq
 	ret
 ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
 
 ENTRY(copy_page_regs)
 	subq	$2*8,	%rsp
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index bf603eb..d376e4b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -14,6 +14,7 @@
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/export.h>
 
 /* Standard copy_to_user with segment limit checking */
 ENTRY(_copy_to_user)
@@ -29,6 +30,7 @@
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
 ENDPROC(_copy_to_user)
+EXPORT_SYMBOL(_copy_to_user)
 
 /* Standard copy_from_user with segment limit checking */
 ENTRY(_copy_from_user)
@@ -44,6 +46,8 @@
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
 ENDPROC(_copy_from_user)
+EXPORT_SYMBOL(_copy_from_user)
+
 
 	.section .fixup,"ax"
 	/* must zero dest */
@@ -155,6 +159,7 @@
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
 ENDPROC(copy_user_generic_unrolled)
+EXPORT_SYMBOL(copy_user_generic_unrolled)
 
 /* Some CPUs run faster using the string copy instructions.
  * This is also a lot simpler. Use them when possible.
@@ -200,6 +205,7 @@
 	_ASM_EXTABLE(1b,11b)
 	_ASM_EXTABLE(3b,12b)
 ENDPROC(copy_user_generic_string)
+EXPORT_SYMBOL(copy_user_generic_string)
 
 /*
  * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@@ -229,6 +235,7 @@
 
 	_ASM_EXTABLE(1b,12b)
 ENDPROC(copy_user_enhanced_fast_string)
+EXPORT_SYMBOL(copy_user_enhanced_fast_string)
 
 /*
  * copy_user_nocache - Uncached memory copy with exception handling
@@ -379,3 +386,4 @@
 	_ASM_EXTABLE(40b,.L_fixup_1b_copy)
 	_ASM_EXTABLE(41b,.L_fixup_1b_copy)
 ENDPROC(__copy_user_nocache)
+EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9a7fe6a..378e5d5 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -135,6 +135,7 @@
 	return (__force __wsum)add32_with_carry(do_csum(buff, len),
 						(__force u32)sum);
 }
+EXPORT_SYMBOL(csum_partial);
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 0ef5128..37b62d4 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -32,6 +32,7 @@
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/export.h>
 
 	.text
 ENTRY(__get_user_1)
@@ -44,6 +45,7 @@
 	ASM_CLAC
 	ret
 ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
 
 ENTRY(__get_user_2)
 	add $1,%_ASM_AX
@@ -57,6 +59,7 @@
 	ASM_CLAC
 	ret
 ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
 
 ENTRY(__get_user_4)
 	add $3,%_ASM_AX
@@ -70,6 +73,7 @@
 	ASM_CLAC
 	ret
 ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
 
 ENTRY(__get_user_8)
 #ifdef CONFIG_X86_64
@@ -97,6 +101,7 @@
 	ret
 #endif
 ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
 
 
 bad_get_user:
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 8a602a1..23d893c 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 #include <asm/asm.h>
 
@@ -32,6 +33,7 @@
 	__ASM_SIZE(pop,) %__ASM_REG(dx)
 	ret
 ENDPROC(__sw_hweight32)
+EXPORT_SYMBOL(__sw_hweight32)
 
 ENTRY(__sw_hweight64)
 #ifdef CONFIG_X86_64
@@ -77,3 +79,4 @@
 	ret
 #endif
 ENDPROC(__sw_hweight64)
+EXPORT_SYMBOL(__sw_hweight64)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 49e6eba..779782f 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,6 +4,7 @@
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
+#include <asm/export.h>
 
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
@@ -40,6 +41,8 @@
 	ret
 ENDPROC(memcpy)
 ENDPROC(__memcpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(__memcpy)
 
 /*
  * memcpy_erms() - enhanced fast string memcpy. This is faster and
@@ -274,6 +277,7 @@
 	xorq %rax, %rax
 	ret
 ENDPROC(memcpy_mcsafe_unrolled)
+EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
 
 	.section .fixup, "ax"
 	/* Return -EFAULT for any failure */
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 90ce01b..15de86c 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,6 +8,7 @@
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
+#include <asm/export.h>
 
 #undef memmove
 
@@ -207,3 +208,5 @@
 	retq
 ENDPROC(__memmove)
 ENDPROC(memmove)
+EXPORT_SYMBOL(__memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index e1229ec..55b95db 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
+#include <asm/export.h>
 
 .weak memset
 
@@ -43,6 +44,8 @@
 	ret
 ENDPROC(memset)
 ENDPROC(__memset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
 
 /*
  * ISO C memset - set a memory block to a byte value. This function uses
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index c891ece..cd5d716 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -15,6 +15,7 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/export.h>
 
 
 /*
@@ -43,6 +44,7 @@
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
 
 ENTRY(__put_user_2)
 	ENTER
@@ -55,6 +57,7 @@
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
 
 ENTRY(__put_user_4)
 	ENTER
@@ -67,6 +70,7 @@
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
 
 ENTRY(__put_user_8)
 	ENTER
@@ -82,6 +86,7 @@
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
 
 bad_put_user:
 	movl $-EFAULT,%eax
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 8e2d55f..a03b1c7 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -1,4 +1,5 @@
 #include <linux/string.h>
+#include <linux/export.h>
 
 char *strstr(const char *cs, const char *ct)
 {
@@ -28,4 +29,4 @@
 	: "dx", "di");
 return __res;
 }
-
+EXPORT_SYMBOL(strstr);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1e52512..9f72ca3 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -5,7 +5,7 @@
  */
 #include <linux/sched.h>		/* test_thread_flag(), ...	*/
 #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
-#include <linux/extable.h>		/* search_exception_table	*/
+#include <linux/extable.h>		/* search_exception_tables	*/
 #include <linux/bootmem.h>		/* max_low_pfn			*/
 #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
 #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
@@ -1144,6 +1144,15 @@
 {
 	/* This is only called for the current mm, so: */
 	bool foreign = false;
+
+	/*
+	 * Read or write was blocked by protection keys.  This is
+	 * always an unconditional error and can never result in
+	 * a follow-up action to resolve the fault, like a COW.
+	 */
+	if (error_code & PF_PK)
+		return 1;
+
 	/*
 	 * Make sure to check the VMA so that we do not perform
 	 * faults just to hit a PF_PK as soon as we fill in a
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index b8b6a60..0d4fb3e 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -435,7 +435,7 @@
 
 		ret = get_user_pages_unlocked(start,
 					      (end - start) >> PAGE_SHIFT,
-					      write, 0, pages);
+					      pages, write ? FOLL_WRITE : 0);
 
 		/* Have to be a bit careful with return values */
 		if (nr > 0) {
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 8047687..e4f8009 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -544,10 +544,9 @@
 {
 	long gup_ret;
 	int nr_pages = 1;
-	int force = 0;
 
-	gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
-			force, NULL, NULL);
+	gup_ret = get_user_pages((unsigned long)addr, nr_pages,
+			write ? FOLL_WRITE : 0,	NULL, NULL);
 	/*
 	 * get_user_pages() returns number of pages gotten.
 	 * 0 means we failed to fault in and get anything,
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index e8c4744..f88ce0e 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -11,6 +11,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  */
+#include <linux/debugfs.h>		/* debugfs_create_u32()		*/
 #include <linux/mm_types.h>             /* mm_struct, vma, etc...       */
 #include <linux/pkeys.h>                /* PKEY_*                       */
 #include <uapi/asm-generic/mman-common.h>
@@ -21,8 +22,19 @@
 
 int __execute_only_pkey(struct mm_struct *mm)
 {
+	bool need_to_set_mm_pkey = false;
+	int execute_only_pkey = mm->context.execute_only_pkey;
 	int ret;
 
+	/* Do we need to assign a pkey for mm's execute-only maps? */
+	if (execute_only_pkey == -1) {
+		/* Go allocate one to use, which might fail */
+		execute_only_pkey = mm_pkey_alloc(mm);
+		if (execute_only_pkey < 0)
+			return -1;
+		need_to_set_mm_pkey = true;
+	}
+
 	/*
 	 * We do not want to go through the relatively costly
 	 * dance to set PKRU if we do not need to.  Check it
@@ -32,22 +44,33 @@
 	 * can make fpregs inactive.
 	 */
 	preempt_disable();
-	if (fpregs_active() &&
-	    !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) {
+	if (!need_to_set_mm_pkey &&
+	    fpregs_active() &&
+	    !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
 		preempt_enable();
-		return PKEY_DEDICATED_EXECUTE_ONLY;
+		return execute_only_pkey;
 	}
 	preempt_enable();
-	ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY,
+
+	/*
+	 * Set up PKRU so that it denies access for everything
+	 * other than execution.
+	 */
+	ret = arch_set_user_pkey_access(current, execute_only_pkey,
 			PKEY_DISABLE_ACCESS);
 	/*
 	 * If the PKRU-set operation failed somehow, just return
 	 * 0 and effectively disable execute-only support.
 	 */
-	if (ret)
-		return 0;
+	if (ret) {
+		mm_set_pkey_free(mm, execute_only_pkey);
+		return -1;
+	}
 
-	return PKEY_DEDICATED_EXECUTE_ONLY;
+	/* We got one, store it and use it from here on out */
+	if (need_to_set_mm_pkey)
+		mm->context.execute_only_pkey = execute_only_pkey;
+	return execute_only_pkey;
 }
 
 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
@@ -55,7 +78,7 @@
 	/* Do this check first since the vm_flags should be hot */
 	if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
 		return false;
-	if (vma_pkey(vma) != PKEY_DEDICATED_EXECUTE_ONLY)
+	if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
 		return false;
 
 	return true;
@@ -99,3 +122,106 @@
 	 */
 	return vma_pkey(vma);
 }
+
+#define PKRU_AD_KEY(pkey)	(PKRU_AD_BIT << ((pkey) * PKRU_BITS_PER_PKEY))
+
+/*
+ * Make the default PKRU value (at execve() time) as restrictive
+ * as possible.  This ensures that any threads clone()'d early
+ * in the process's lifetime will not accidentally get access
+ * to data which is pkey-protected later on.
+ */
+u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
+		      PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
+		      PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
+		      PKRU_AD_KEY(10) | PKRU_AD_KEY(11) | PKRU_AD_KEY(12) |
+		      PKRU_AD_KEY(13) | PKRU_AD_KEY(14) | PKRU_AD_KEY(15);
+
+/*
+ * Called from the FPU code when creating a fresh set of FPU
+ * registers.  This is called from a very specific context where
+ * we know the FPU regstiers are safe for use and we can use PKRU
+ * directly.  The fact that PKRU is only available when we are
+ * using eagerfpu mode makes this possible.
+ */
+void copy_init_pkru_to_fpregs(void)
+{
+	u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
+	/*
+	 * Any write to PKRU takes it out of the XSAVE 'init
+	 * state' which increases context switch cost.  Avoid
+	 * writing 0 when PKRU was already 0.
+	 */
+	if (!init_pkru_value_snapshot && !read_pkru())
+		return;
+	/*
+	 * Override the PKRU state that came from 'init_fpstate'
+	 * with the baseline from the process.
+	 */
+	write_pkru(init_pkru_value_snapshot);
+}
+
+static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "0x%x\n", init_pkru_value);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t init_pkru_write_file(struct file *file,
+		 const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	ssize_t len;
+	u32 new_init_pkru;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	/* Make the buffer a valid string that we can not overrun */
+	buf[len] = '\0';
+	if (kstrtouint(buf, 0, &new_init_pkru))
+		return -EINVAL;
+
+	/*
+	 * Don't allow insane settings that will blow the system
+	 * up immediately if someone attempts to disable access
+	 * or writes to pkey 0.
+	 */
+	if (new_init_pkru & (PKRU_AD_BIT|PKRU_WD_BIT))
+		return -EINVAL;
+
+	WRITE_ONCE(init_pkru_value, new_init_pkru);
+	return count;
+}
+
+static const struct file_operations fops_init_pkru = {
+	.read = init_pkru_read_file,
+	.write = init_pkru_write_file,
+	.llseek = default_llseek,
+};
+
+static int __init create_init_pkru_value(void)
+{
+	debugfs_create_file("init_pkru", S_IRUSR | S_IWUSR,
+			arch_debugfs_dir, NULL, &fops_init_pkru);
+	return 0;
+}
+late_initcall(create_init_pkru_value);
+
+static __init int setup_init_pkru(char *opt)
+{
+	u32 new_init_pkru;
+
+	if (kstrtouint(opt, 0, &new_init_pkru))
+		return 1;
+
+	WRITE_ONCE(init_pkru_value, new_init_pkru);
+
+	return 1;
+}
+__setup("init_pkru=", setup_init_pkru);
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 3ee2bb6..e7e7055 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -8,7 +8,7 @@
 	BITS := 64
 endif
 
-obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \
+obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
 	ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
 	stub_$(BITS).o stub_segv.o \
 	sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S
index fa4b8b9..b9933eb 100644
--- a/arch/x86/um/checksum_32.S
+++ b/arch/x86/um/checksum_32.S
@@ -27,6 +27,7 @@
 
 #include <asm/errno.h>
 #include <asm/asm.h>
+#include <asm/export.h>
 				
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
@@ -214,3 +215,4 @@
 	ret
 				
 #endif
+	EXPORT_SYMBOL(csum_partial)
diff --git a/arch/x86/um/ksyms.c b/arch/x86/um/ksyms.c
deleted file mode 100644
index 2e8f43e..0000000
--- a/arch/x86/um/ksyms.c
+++ /dev/null
@@ -1,13 +0,0 @@
-#include <linux/module.h>
-#include <asm/string.h>
-#include <asm/checksum.h>
-
-#ifndef CONFIG_X86_32
-/*XXX: we need them because they would be exported by x86_64 */
-#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
-EXPORT_SYMBOL(memcpy);
-#else
-EXPORT_SYMBOL(__memcpy);
-#endif
-#endif
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 5766ead..60a5a5a 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -36,7 +36,8 @@
 		 * slow, but that doesn't matter, since it will be called only
 		 * in case of singlestepping, if copy_from_user failed.
 		 */
-		n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+		n = access_process_vm(current, addr, &instr, sizeof(instr),
+				FOLL_FORCE);
 		if (n != sizeof(instr)) {
 			printk(KERN_ERR "is_syscall : failed to read "
 			       "instruction from 0x%lx\n", addr);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index 0b5c184..e30202b 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -212,7 +212,8 @@
 		 * slow, but that doesn't matter, since it will be called only
 		 * in case of singlestepping, if copy_from_user failed.
 		 */
-		n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+		n = access_process_vm(current, addr, &instr, sizeof(instr),
+				FOLL_FORCE);
 		if (n != sizeof(instr)) {
 			printk("is_syscall : failed to read instruction from "
 			       "0x%lx\n", addr);
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
new file mode 100644
index 0000000..a7a1100
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -0,0 +1,160 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASM_UACCESS_H
+#define _XTENSA_ASM_UACCESS_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+/*
+ * These assembly macros mirror the C macros in asm/uaccess.h.  They
+ * should always have identical functionality.  See
+ * arch/xtensa/kernel/sys.S for usage.
+ */
+
+#define KERNEL_DS	0
+#define USER_DS		1
+
+#define get_ds		(KERNEL_DS)
+
+/*
+ * get_fs reads current->thread.current_ds into a register.
+ * On Entry:
+ * 	<ad>	anything
+ * 	<sp>	stack
+ * On Exit:
+ * 	<ad>	contains current->thread.current_ds
+ */
+	.macro	get_fs	ad, sp
+	GET_CURRENT(\ad,\sp)
+#if THREAD_CURRENT_DS > 1020
+	addi	\ad, \ad, TASK_THREAD
+	l32i	\ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
+#else
+	l32i	\ad, \ad, THREAD_CURRENT_DS
+#endif
+	.endm
+
+/*
+ * set_fs sets current->thread.current_ds to some value.
+ * On Entry:
+ *	<at>	anything (temp register)
+ *	<av>	value to write
+ *	<sp>	stack
+ * On Exit:
+ *	<at>	destroyed (actually, current)
+ *	<av>	preserved, value to write
+ */
+	.macro	set_fs	at, av, sp
+	GET_CURRENT(\at,\sp)
+	s32i	\av, \at, THREAD_CURRENT_DS
+	.endm
+
+/*
+ * kernel_ok determines whether we should bypass addr/size checking.
+ * See the equivalent C-macro version below for clarity.
+ * On success, kernel_ok branches to a label indicated by parameter
+ * <success>.  This implies that the macro falls through to the next
+ * insruction on an error.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on error).
+ *
+ * On Entry:
+ * 	<at>		anything (temp register)
+ * 	<success>	label to branch to on success; implies
+ * 			fall-through macro on error
+ * 	<sp>		stack pointer
+ * On Exit:
+ * 	<at>		destroyed (actually, current->thread.current_ds)
+ */
+
+#if ((KERNEL_DS != 0) || (USER_DS == 0))
+# error Assembly macro kernel_ok fails
+#endif
+	.macro	kernel_ok  at, sp, success
+	get_fs	\at, \sp
+	beqz	\at, \success
+	.endm
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>.  This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * 	<aa>	register containing memory address
+ * 	<as>	register containing memory size
+ * 	<at>	temp register
+ * 	<error>	label to branch to on error; implies fall-through
+ * 		macro on success
+ * On Exit:
+ * 	<aa>	preserved
+ * 	<as>	preserved
+ * 	<at>	destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+	.macro	user_ok	aa, as, at, error
+	movi	\at, __XTENSA_UL_CONST(TASK_SIZE)
+	bgeu	\as, \at, \error
+	sub	\at, \at, \as
+	bgeu	\aa, \at, \error
+	.endm
+
+/*
+ * access_ok determines whether a memory access is allowed.  See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>.  This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * 	<aa>	register containing memory address
+ * 	<as>	register containing memory size
+ * 	<at>	temp register
+ * 	<sp>
+ * 	<error>	label to branch to on error; implies fall-through
+ * 		macro on success
+ * On Exit:
+ * 	<aa>	preserved
+ * 	<as>	preserved
+ * 	<at>	destroyed
+ */
+	.macro	access_ok  aa, as, at, sp, error
+	kernel_ok  \at, \sp, .Laccess_ok_\@
+	user_ok    \aa, \as, \at, \error
+.Laccess_ok_\@:
+	.endm
+
+#endif	/* _XTENSA_ASM_UACCESS_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 147b26e..848a3d7 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -17,153 +17,12 @@
 #define _XTENSA_UACCESS_H
 
 #include <linux/errno.h>
-#ifndef __ASSEMBLY__
 #include <linux/prefetch.h>
-#endif
 #include <asm/types.h>
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
 
-#ifdef __ASSEMBLY__
-
-#include <asm/current.h>
-#include <asm/asm-offsets.h>
-#include <asm/processor.h>
-
-/*
- * These assembly macros mirror the C macros that follow below.  They
- * should always have identical functionality.  See
- * arch/xtensa/kernel/sys.S for usage.
- */
-
-#define KERNEL_DS	0
-#define USER_DS		1
-
-#define get_ds		(KERNEL_DS)
-
-/*
- * get_fs reads current->thread.current_ds into a register.
- * On Entry:
- * 	<ad>	anything
- * 	<sp>	stack
- * On Exit:
- * 	<ad>	contains current->thread.current_ds
- */
-	.macro	get_fs	ad, sp
-	GET_CURRENT(\ad,\sp)
-#if THREAD_CURRENT_DS > 1020
-	addi	\ad, \ad, TASK_THREAD
-	l32i	\ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
-#else
-	l32i	\ad, \ad, THREAD_CURRENT_DS
-#endif
-	.endm
-
-/*
- * set_fs sets current->thread.current_ds to some value.
- * On Entry:
- *	<at>	anything (temp register)
- *	<av>	value to write
- *	<sp>	stack
- * On Exit:
- *	<at>	destroyed (actually, current)
- *	<av>	preserved, value to write
- */
-	.macro	set_fs	at, av, sp
-	GET_CURRENT(\at,\sp)
-	s32i	\av, \at, THREAD_CURRENT_DS
-	.endm
-
-/*
- * kernel_ok determines whether we should bypass addr/size checking.
- * See the equivalent C-macro version below for clarity.
- * On success, kernel_ok branches to a label indicated by parameter
- * <success>.  This implies that the macro falls through to the next
- * insruction on an error.
- *
- * Note that while this macro can be used independently, we designed
- * in for optimal use in the access_ok macro below (i.e., we fall
- * through on error).
- *
- * On Entry:
- * 	<at>		anything (temp register)
- * 	<success>	label to branch to on success; implies
- * 			fall-through macro on error
- * 	<sp>		stack pointer
- * On Exit:
- * 	<at>		destroyed (actually, current->thread.current_ds)
- */
-
-#if ((KERNEL_DS != 0) || (USER_DS == 0))
-# error Assembly macro kernel_ok fails
-#endif
-	.macro	kernel_ok  at, sp, success
-	get_fs	\at, \sp
-	beqz	\at, \success
-	.endm
-
-/*
- * user_ok determines whether the access to user-space memory is allowed.
- * See the equivalent C-macro version below for clarity.
- *
- * On error, user_ok branches to a label indicated by parameter
- * <error>.  This implies that the macro falls through to the next
- * instruction on success.
- *
- * Note that while this macro can be used independently, we designed
- * in for optimal use in the access_ok macro below (i.e., we fall
- * through on success).
- *
- * On Entry:
- * 	<aa>	register containing memory address
- * 	<as>	register containing memory size
- * 	<at>	temp register
- * 	<error>	label to branch to on error; implies fall-through
- * 		macro on success
- * On Exit:
- * 	<aa>	preserved
- * 	<as>	preserved
- * 	<at>	destroyed (actually, (TASK_SIZE + 1 - size))
- */
-	.macro	user_ok	aa, as, at, error
-	movi	\at, __XTENSA_UL_CONST(TASK_SIZE)
-	bgeu	\as, \at, \error
-	sub	\at, \at, \as
-	bgeu	\aa, \at, \error
-	.endm
-
-/*
- * access_ok determines whether a memory access is allowed.  See the
- * equivalent C-macro version below for clarity.
- *
- * On error, access_ok branches to a label indicated by parameter
- * <error>.  This implies that the macro falls through to the next
- * instruction on success.
- *
- * Note that we assume success is the common case, and we optimize the
- * branch fall-through case on success.
- *
- * On Entry:
- * 	<aa>	register containing memory address
- * 	<as>	register containing memory size
- * 	<at>	temp register
- * 	<sp>
- * 	<error>	label to branch to on error; implies fall-through
- * 		macro on success
- * On Exit:
- * 	<aa>	preserved
- * 	<as>	preserved
- * 	<at>	destroyed
- */
-	.macro	access_ok  aa, as, at, sp, error
-	kernel_ok  \at, \sp, .Laccess_ok_\@
-	user_ok    \aa, \as, \at, \error
-.Laccess_ok_\@:
-	.endm
-
-#else /* __ASSEMBLY__ not defined */
-
 #include <linux/sched.h>
 
 /*
@@ -495,16 +354,4 @@
 	unsigned long insn, fixup;
 };
 
-/* Returns 0 if exception not found and fixup.unit otherwise.  */
-
-extern unsigned long search_exception_table(unsigned long addr);
-extern void sort_exception_table(void);
-
-/* Returns the new pc */
-#define fixup_exception(map_reg, fixup_unit, pc)                \
-({                                                              \
-	fixup_unit;                                             \
-})
-
-#endif	/* __ASSEMBLY__ */
 #endif	/* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 9e079d4..24365b3 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -117,4 +117,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* _XTENSA_MMAN_H */
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index a482df5..6911e38 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -17,7 +17,7 @@
 #include <asm/processor.h>
 #include <asm/coprocessor.h>
 #include <asm/thread_info.h>
-#include <asm/uaccess.h>
+#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 #include <asm/ptrace.h>
 #include <asm/current.h>
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fa04d9d..f5ef3cc 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -17,7 +17,7 @@
 #include <asm/processor.h>
 #include <asm/coprocessor.h>
 #include <asm/thread_info.h>
-#include <asm/uaccess.h>
+#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 #include <asm/ptrace.h>
 #include <asm/current.h>
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index dd38e5c..b08ccbb 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1340,10 +1340,8 @@
 			struct blkcg_policy_data *cpd;
 
 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
-			if (!cpd) {
-				mutex_unlock(&blkcg_pol_mutex);
+			if (!cpd)
 				goto err_free_cpds;
-			}
 
 			blkcg->cpd[pol->plid] = cpd;
 			cpd->blkcg = blkcg;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 083e56f..46fe924 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -31,6 +31,7 @@
 	unsigned int granularity;
 	enum req_op op;
 	int alignment;
+	sector_t bs_mask;
 
 	if (!q)
 		return -ENXIO;
@@ -50,6 +51,10 @@
 		op = REQ_OP_DISCARD;
 	}
 
+	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+	if ((sector | nr_sects) & bs_mask)
+		return -EINVAL;
+
 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 	granularity = max(q->limits.discard_granularity >> 9, 1U);
 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
@@ -150,10 +155,15 @@
 	unsigned int max_write_same_sectors;
 	struct bio *bio = NULL;
 	int ret = 0;
+	sector_t bs_mask;
 
 	if (!q)
 		return -ENXIO;
 
+	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+	if ((sector | nr_sects) & bs_mask)
+		return -EINVAL;
+
 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
 	max_write_same_sectors = UINT_MAX >> 9;
 
@@ -202,6 +212,11 @@
 	int ret;
 	struct bio *bio = NULL;
 	unsigned int sz;
+	sector_t bs_mask;
+
+	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+	if ((sector | nr_sects) & bs_mask)
+		return -EINVAL;
 
 	while (nr_sects != 0) {
 		bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 96631e6..06cf980 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -18,7 +18,7 @@
  * Softirq action handler - move entries to local list and loop over them
  * while passing them to the queue registered handler.
  */
-static void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
 {
 	struct list_head *cpu_list, local_list;
 
diff --git a/block/ioctl.c b/block/ioctl.c
index ed2397f..755119c 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -225,7 +225,8 @@
 		unsigned long arg)
 {
 	uint64_t range[2];
-	uint64_t start, len;
+	struct address_space *mapping;
+	uint64_t start, end, len;
 
 	if (!(mode & FMODE_WRITE))
 		return -EBADF;
@@ -235,18 +236,23 @@
 
 	start = range[0];
 	len = range[1];
+	end = start + len - 1;
 
 	if (start & 511)
 		return -EINVAL;
 	if (len & 511)
 		return -EINVAL;
-	start >>= 9;
-	len >>= 9;
-
-	if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+	if (end >= (uint64_t)i_size_read(bdev->bd_inode))
+		return -EINVAL;
+	if (end < start)
 		return -EINVAL;
 
-	return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false);
+	/* Invalidate the page cache, including dirty pages */
+	mapping = bdev->bd_inode->i_mapping;
+	truncate_inode_pages_range(mapping, start, end);
+
+	return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
+				    false);
 }
 
 static int put_ushort(unsigned long arg, unsigned short val)
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 68a5cea..2d8466f 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -39,6 +39,37 @@
 	bool has_key;
 };
 
+static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
+{
+	unsigned ds;
+
+	if (ctx->result)
+		return 0;
+
+	ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+	if (!ctx->result)
+		return -ENOMEM;
+
+	memset(ctx->result, 0, ds);
+
+	return 0;
+}
+
+static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
+{
+	unsigned ds;
+
+	if (!ctx->result)
+		return;
+
+	ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+	sock_kzfree_s(sk, ctx->result, ds);
+	ctx->result = NULL;
+}
+
 static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 			size_t ignored)
 {
@@ -54,6 +85,9 @@
 
 	lock_sock(sk);
 	if (!ctx->more) {
+		if ((msg->msg_flags & MSG_MORE))
+			hash_free_result(sk, ctx);
+
 		err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
 						&ctx->completion);
 		if (err)
@@ -90,6 +124,10 @@
 
 	ctx->more = msg->msg_flags & MSG_MORE;
 	if (!ctx->more) {
+		err = hash_alloc_result(sk, ctx);
+		if (err)
+			goto unlock;
+
 		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
 		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
 						 &ctx->completion);
@@ -116,6 +154,13 @@
 	sg_init_table(ctx->sgl.sg, 1);
 	sg_set_page(ctx->sgl.sg, page, size, offset);
 
+	if (!(flags & MSG_MORE)) {
+		err = hash_alloc_result(sk, ctx);
+		if (err)
+			goto unlock;
+	} else if (!ctx->more)
+		hash_free_result(sk, ctx);
+
 	ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
 
 	if (!(flags & MSG_MORE)) {
@@ -153,6 +198,7 @@
 	struct alg_sock *ask = alg_sk(sk);
 	struct hash_ctx *ctx = ask->private;
 	unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+	bool result;
 	int err;
 
 	if (len > ds)
@@ -161,17 +207,29 @@
 		msg->msg_flags |= MSG_TRUNC;
 
 	lock_sock(sk);
+	result = ctx->result;
+	err = hash_alloc_result(sk, ctx);
+	if (err)
+		goto unlock;
+
+	ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+
 	if (ctx->more) {
 		ctx->more = 0;
-		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
 		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
 						 &ctx->completion);
 		if (err)
 			goto unlock;
+	} else if (!result) {
+		err = af_alg_wait_for_completion(
+				crypto_ahash_digest(&ctx->req),
+				&ctx->completion);
 	}
 
 	err = memcpy_to_msg(msg, ctx->result, len);
 
+	hash_free_result(sk, ctx);
+
 unlock:
 	release_sock(sk);
 
@@ -394,8 +452,7 @@
 	struct alg_sock *ask = alg_sk(sk);
 	struct hash_ctx *ctx = ask->private;
 
-	sock_kzfree_s(sk, ctx->result,
-		      crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+	hash_free_result(sk, ctx);
 	sock_kfree_s(sk, ctx, ctx->len);
 	af_alg_release_parent(sk);
 }
@@ -407,20 +464,12 @@
 	struct algif_hash_tfm *tfm = private;
 	struct crypto_ahash *hash = tfm->hash;
 	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
-	unsigned ds = crypto_ahash_digestsize(hash);
 
 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
-	if (!ctx->result) {
-		sock_kfree_s(sk, ctx, len);
-		return -ENOMEM;
-	}
-
-	memset(ctx->result, 0, ds);
-
+	ctx->result = NULL;
 	ctx->len = len;
 	ctx->more = 0;
 	af_alg_init_completion(&ctx->completion);
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index c1229614..8e94e29 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -107,10 +107,7 @@
 
 static int __init crct10dif_mod_init(void)
 {
-	int ret;
-
-	ret = crypto_register_shash(&alg);
-	return ret;
+	return crypto_register_shash(&alg);
 }
 
 static void __exit crct10dif_mod_fini(void)
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index a55c82d..6989ba0 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -14,13 +14,12 @@
 
 #include <linux/err.h>
 #include <linux/delay.h>
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
 #include "internal.h"
 
 #define CRYPTO_ENGINE_MAX_QLEN 10
 
-void crypto_finalize_request(struct crypto_engine *engine,
-			     struct ablkcipher_request *req, int err);
-
 /**
  * crypto_pump_requests - dequeue one request from engine queue to process
  * @engine: the hardware engine
@@ -34,10 +33,11 @@
 				 bool in_kthread)
 {
 	struct crypto_async_request *async_req, *backlog;
-	struct ablkcipher_request *req;
+	struct ahash_request *hreq;
+	struct ablkcipher_request *breq;
 	unsigned long flags;
 	bool was_busy = false;
-	int ret;
+	int ret, rtype;
 
 	spin_lock_irqsave(&engine->queue_lock, flags);
 
@@ -47,7 +47,7 @@
 
 	/* If another context is idling then defer */
 	if (engine->idling) {
-		queue_kthread_work(&engine->kworker, &engine->pump_requests);
+		kthread_queue_work(&engine->kworker, &engine->pump_requests);
 		goto out;
 	}
 
@@ -58,7 +58,7 @@
 
 		/* Only do teardown in the thread */
 		if (!in_kthread) {
-			queue_kthread_work(&engine->kworker,
+			kthread_queue_work(&engine->kworker,
 					   &engine->pump_requests);
 			goto out;
 		}
@@ -82,9 +82,7 @@
 	if (!async_req)
 		goto out;
 
-	req = ablkcipher_request_cast(async_req);
-
-	engine->cur_req = req;
+	engine->cur_req = async_req;
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 
@@ -95,6 +93,7 @@
 
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
+	rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
 	/* Until here we get the request need to be encrypted successfully */
 	if (!was_busy && engine->prepare_crypt_hardware) {
 		ret = engine->prepare_crypt_hardware(engine);
@@ -104,24 +103,55 @@
 		}
 	}
 
-	if (engine->prepare_request) {
-		ret = engine->prepare_request(engine, engine->cur_req);
+	switch (rtype) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		hreq = ahash_request_cast(engine->cur_req);
+		if (engine->prepare_hash_request) {
+			ret = engine->prepare_hash_request(engine, hreq);
+			if (ret) {
+				pr_err("failed to prepare request: %d\n", ret);
+				goto req_err;
+			}
+			engine->cur_req_prepared = true;
+		}
+		ret = engine->hash_one_request(engine, hreq);
 		if (ret) {
-			pr_err("failed to prepare request: %d\n", ret);
+			pr_err("failed to hash one request from queue\n");
 			goto req_err;
 		}
-		engine->cur_req_prepared = true;
+		return;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		breq = ablkcipher_request_cast(engine->cur_req);
+		if (engine->prepare_cipher_request) {
+			ret = engine->prepare_cipher_request(engine, breq);
+			if (ret) {
+				pr_err("failed to prepare request: %d\n", ret);
+				goto req_err;
+			}
+			engine->cur_req_prepared = true;
+		}
+		ret = engine->cipher_one_request(engine, breq);
+		if (ret) {
+			pr_err("failed to cipher one request from queue\n");
+			goto req_err;
+		}
+		return;
+	default:
+		pr_err("failed to prepare request of unknown type\n");
+		return;
 	}
 
-	ret = engine->crypt_one_request(engine, engine->cur_req);
-	if (ret) {
-		pr_err("failed to crypt one request from queue\n");
-		goto req_err;
-	}
-	return;
-
 req_err:
-	crypto_finalize_request(engine, engine->cur_req, ret);
+	switch (rtype) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		hreq = ahash_request_cast(engine->cur_req);
+		crypto_finalize_hash_request(engine, hreq, ret);
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		breq = ablkcipher_request_cast(engine->cur_req);
+		crypto_finalize_cipher_request(engine, breq, ret);
+		break;
+	}
 	return;
 
 out:
@@ -137,12 +167,14 @@
 }
 
 /**
- * crypto_transfer_request - transfer the new request into the engine queue
+ * crypto_transfer_cipher_request - transfer the new request into the
+ * enginequeue
  * @engine: the hardware engine
  * @req: the request need to be listed into the engine queue
  */
-int crypto_transfer_request(struct crypto_engine *engine,
-			    struct ablkcipher_request *req, bool need_pump)
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+				   struct ablkcipher_request *req,
+				   bool need_pump)
 {
 	unsigned long flags;
 	int ret;
@@ -157,51 +189,93 @@
 	ret = ablkcipher_enqueue_request(&engine->queue, req);
 
 	if (!engine->busy && need_pump)
-		queue_kthread_work(&engine->kworker, &engine->pump_requests);
+		kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(crypto_transfer_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
 
 /**
- * crypto_transfer_request_to_engine - transfer one request to list into the
- * engine queue
+ * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * into the engine queue
  * @engine: the hardware engine
  * @req: the request need to be listed into the engine queue
  */
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
-				      struct ablkcipher_request *req)
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+					     struct ablkcipher_request *req)
 {
-	return crypto_transfer_request(engine, req, true);
+	return crypto_transfer_cipher_request(engine, req, true);
 }
-EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
 
 /**
- * crypto_finalize_request - finalize one request if the request is done
+ * crypto_transfer_hash_request - transfer the new request into the
+ * enginequeue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+				 struct ahash_request *req, bool need_pump)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&engine->queue_lock, flags);
+
+	if (!engine->running) {
+		spin_unlock_irqrestore(&engine->queue_lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	ret = ahash_enqueue_request(&engine->queue, req);
+
+	if (!engine->busy && need_pump)
+		kthread_queue_work(&engine->kworker, &engine->pump_requests);
+
+	spin_unlock_irqrestore(&engine->queue_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
+
+/**
+ * crypto_transfer_hash_request_to_engine - transfer one request to list
+ * into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+					   struct ahash_request *req)
+{
+	return crypto_transfer_hash_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
+
+/**
+ * crypto_finalize_cipher_request - finalize one request if the request is done
  * @engine: the hardware engine
  * @req: the request need to be finalized
  * @err: error number
  */
-void crypto_finalize_request(struct crypto_engine *engine,
-			     struct ablkcipher_request *req, int err)
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+				    struct ablkcipher_request *req, int err)
 {
 	unsigned long flags;
 	bool finalize_cur_req = false;
 	int ret;
 
 	spin_lock_irqsave(&engine->queue_lock, flags);
-	if (engine->cur_req == req)
+	if (engine->cur_req == &req->base)
 		finalize_cur_req = true;
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 	if (finalize_cur_req) {
-		if (engine->cur_req_prepared && engine->unprepare_request) {
-			ret = engine->unprepare_request(engine, req);
+		if (engine->cur_req_prepared &&
+		    engine->unprepare_cipher_request) {
+			ret = engine->unprepare_cipher_request(engine, req);
 			if (ret)
 				pr_err("failed to unprepare request\n");
 		}
-
 		spin_lock_irqsave(&engine->queue_lock, flags);
 		engine->cur_req = NULL;
 		engine->cur_req_prepared = false;
@@ -210,9 +284,46 @@
 
 	req->base.complete(&req->base, err);
 
-	queue_kthread_work(&engine->kworker, &engine->pump_requests);
+	kthread_queue_work(&engine->kworker, &engine->pump_requests);
 }
-EXPORT_SYMBOL_GPL(crypto_finalize_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+
+/**
+ * crypto_finalize_hash_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+				  struct ahash_request *req, int err)
+{
+	unsigned long flags;
+	bool finalize_cur_req = false;
+	int ret;
+
+	spin_lock_irqsave(&engine->queue_lock, flags);
+	if (engine->cur_req == &req->base)
+		finalize_cur_req = true;
+	spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+	if (finalize_cur_req) {
+		if (engine->cur_req_prepared &&
+		    engine->unprepare_hash_request) {
+			ret = engine->unprepare_hash_request(engine, req);
+			if (ret)
+				pr_err("failed to unprepare request\n");
+		}
+		spin_lock_irqsave(&engine->queue_lock, flags);
+		engine->cur_req = NULL;
+		engine->cur_req_prepared = false;
+		spin_unlock_irqrestore(&engine->queue_lock, flags);
+	}
+
+	req->base.complete(&req->base, err);
+
+	kthread_queue_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 
 /**
  * crypto_engine_start - start the hardware engine
@@ -234,7 +345,7 @@
 	engine->running = true;
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-	queue_kthread_work(&engine->kworker, &engine->pump_requests);
+	kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
 	return 0;
 }
@@ -249,7 +360,7 @@
 int crypto_engine_stop(struct crypto_engine *engine)
 {
 	unsigned long flags;
-	unsigned limit = 500;
+	unsigned int limit = 500;
 	int ret = 0;
 
 	spin_lock_irqsave(&engine->queue_lock, flags);
@@ -311,7 +422,7 @@
 	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
 	spin_lock_init(&engine->queue_lock);
 
-	init_kthread_worker(&engine->kworker);
+	kthread_init_worker(&engine->kworker);
 	engine->kworker_task = kthread_run(kthread_worker_fn,
 					   &engine->kworker, "%s",
 					   engine->name);
@@ -319,7 +430,7 @@
 		dev_err(dev, "failed to create crypto request pump task\n");
 		return NULL;
 	}
-	init_kthread_work(&engine->pump_requests, crypto_pump_work);
+	kthread_init_work(&engine->pump_requests, crypto_pump_work);
 
 	if (engine->rt) {
 		dev_info(dev, "will run requests pump with realtime priority\n");
@@ -344,7 +455,7 @@
 	if (ret)
 		return ret;
 
-	flush_kthread_worker(&engine->kworker);
+	kthread_flush_worker(&engine->kworker);
 	kthread_stop(engine->kworker_task);
 
 	return 0;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f752da3..fb33f7d 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1178,12 +1178,16 @@
 		goto err;
 
 	drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
-	if (!drbg->Vbuf)
+	if (!drbg->Vbuf) {
+		ret = -ENOMEM;
 		goto fini;
+	}
 	drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
 	drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
-	if (!drbg->Cbuf)
+	if (!drbg->Cbuf) {
+		ret = -ENOMEM;
 		goto fini;
+	}
 	drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
 	/* scratchpad is only generated for CTR and Hash */
 	if (drbg->core->flags & DRBG_HMAC)
@@ -1199,8 +1203,10 @@
 
 	if (0 < sb_size) {
 		drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
-		if (!drbg->scratchpadbuf)
+		if (!drbg->scratchpadbuf) {
+			ret = -ENOMEM;
 			goto fini;
+		}
 		drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
 	}
 
@@ -1917,6 +1923,8 @@
 		return -ENOMEM;
 
 	mutex_init(&drbg->drbg_mutex);
+	drbg->core = &drbg_cores[coreref];
+	drbg->reseed_threshold = drbg_max_requests(drbg);
 
 	/*
 	 * if the following tests fail, it is likely that there is a buffer
@@ -1926,12 +1934,6 @@
 	 * grave bug.
 	 */
 
-	/* get a valid instance of DRBG for following tests */
-	ret = drbg_instantiate(drbg, NULL, coreref, pr);
-	if (ret) {
-		rc = ret;
-		goto outbuf;
-	}
 	max_addtllen = drbg_max_addtl(drbg);
 	max_request_bytes = drbg_max_request_bytes(drbg);
 	drbg_string_fill(&addtl, buf, max_addtllen + 1);
@@ -1941,10 +1943,9 @@
 	/* overflow max_bits */
 	len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
 	BUG_ON(0 < len);
-	drbg_uninstantiate(drbg);
 
 	/* overflow max addtllen with personalization string */
-	ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+	ret = drbg_seed(drbg, &addtl, false);
 	BUG_ON(0 == ret);
 	/* all tests passed */
 	rc = 0;
@@ -1952,9 +1953,7 @@
 	pr_devel("DRBG: Sanity tests for failure code paths successfully "
 		 "completed\n");
 
-	drbg_uninstantiate(drbg);
-outbuf:
-	kzfree(drbg);
+	kfree(drbg);
 	return rc;
 }
 
@@ -2006,7 +2005,7 @@
 {
 	unsigned int i = 0; /* pointer to drbg_algs */
 	unsigned int j = 0; /* pointer to drbg_cores */
-	int ret = -EFAULT;
+	int ret;
 
 	ret = drbg_healthcheck_sanity();
 	if (ret)
@@ -2016,7 +2015,7 @@
 		pr_info("DRBG: Cannot register all DRBG types"
 			"(slots needed: %zu, slots available: %zu)\n",
 			ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
-		return ret;
+		return -EFAULT;
 	}
 
 	/*
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 70a892e8..f624ac9 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -117,7 +117,7 @@
 	struct crypto_skcipher *ctr = ctx->ctr;
 	struct {
 		be128 hash;
-		u8 iv[8];
+		u8 iv[16];
 
 		struct crypto_gcm_setkey_result result;
 
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index bac7099..12ad3e3 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -14,24 +14,13 @@
 
 #include <crypto/algapi.h>
 #include <crypto/gf128mul.h>
+#include <crypto/ghash.h>
 #include <crypto/internal/hash.h>
 #include <linux/crypto.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
-#define GHASH_BLOCK_SIZE	16
-#define GHASH_DIGEST_SIZE	16
-
-struct ghash_ctx {
-	struct gf128mul_4k *gf128;
-};
-
-struct ghash_desc_ctx {
-	u8 buffer[GHASH_BLOCK_SIZE];
-	u32 bytes;
-};
-
 static int ghash_init(struct shash_desc *desc)
 {
 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 86fb59b..94ee44a 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -612,12 +612,7 @@
 
 int ahash_mcryptd_digest(struct ahash_request *desc)
 {
-	int err;
-
-	err = crypto_ahash_init(desc) ?:
-	      ahash_mcryptd_finup(desc);
-
-	return err;
+	return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
 }
 
 int ahash_mcryptd_update(struct ahash_request *desc)
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 4df6451..0b66dc8 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -35,8 +35,8 @@
 			n_sz--;
 		}
 
-		/* In FIPS mode only allow key size 2K & 3K */
-		if (n_sz != 256 && n_sz != 384) {
+		/* In FIPS mode only allow key size 2K and higher */
+		if (n_sz < 256) {
 			pr_err("RSA: key size not allowed in FIPS mode\n");
 			return -EINVAL;
 		}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5c9d5a5..62dffa0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -209,16 +209,19 @@
 	char *state;
 	struct ahash_request *req;
 	int statesize, ret = -EINVAL;
+	const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
 
 	req = *preq;
 	statesize = crypto_ahash_statesize(
 			crypto_ahash_reqtfm(req));
-	state = kmalloc(statesize, GFP_KERNEL);
+	state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
 	if (!state) {
 		pr_err("alt: hash: Failed to alloc state for %s\n", algo);
 		goto out_nostate;
 	}
+	memcpy(state + statesize, guard, sizeof(guard));
 	ret = crypto_ahash_export(req, state);
+	WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
 	if (ret) {
 		pr_err("alt: hash: Failed to export() for %s\n", algo);
 		goto out;
@@ -665,7 +668,7 @@
 		memcpy(key, template[i].key, template[i].klen);
 
 		ret = crypto_aead_setkey(tfm, key, template[i].klen);
-		if (!ret == template[i].fail) {
+		if (template[i].fail == !ret) {
 			pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
 			       d, j, algo, crypto_aead_get_flags(tfm));
 			goto out;
@@ -770,7 +773,7 @@
 		memcpy(key, template[i].key, template[i].klen);
 
 		ret = crypto_aead_setkey(tfm, key, template[i].klen);
-		if (!ret == template[i].fail) {
+		if (template[i].fail == !ret) {
 			pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
 			       d, j, algo, crypto_aead_get_flags(tfm));
 			goto out;
@@ -1008,6 +1011,9 @@
 		if (template[i].np)
 			continue;
 
+		if (fips_enabled && template[i].fips_skip)
+			continue;
+
 		j++;
 
 		ret = -EINVAL;
@@ -1023,7 +1029,7 @@
 
 		ret = crypto_cipher_setkey(tfm, template[i].key,
 					   template[i].klen);
-		if (!ret == template[i].fail) {
+		if (template[i].fail == !ret) {
 			printk(KERN_ERR "alg: cipher: setkey failed "
 			       "on test %d for %s: flags=%x\n", j,
 			       algo, crypto_cipher_get_flags(tfm));
@@ -1112,6 +1118,9 @@
 		if (template[i].np && !template[i].also_non_np)
 			continue;
 
+		if (fips_enabled && template[i].fips_skip)
+			continue;
+
 		if (template[i].iv)
 			memcpy(iv, template[i].iv, ivsize);
 		else
@@ -1133,7 +1142,7 @@
 
 		ret = crypto_skcipher_setkey(tfm, template[i].key,
 					     template[i].klen);
-		if (!ret == template[i].fail) {
+		if (template[i].fail == !ret) {
 			pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
 			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
@@ -1198,6 +1207,9 @@
 		if (!template[i].np)
 			continue;
 
+		if (fips_enabled && template[i].fips_skip)
+			continue;
+
 		if (template[i].iv)
 			memcpy(iv, template[i].iv, ivsize);
 		else
@@ -1211,7 +1223,7 @@
 
 		ret = crypto_skcipher_setkey(tfm, template[i].key,
 					     template[i].klen);
-		if (!ret == template[i].fail) {
+		if (template[i].fail == !ret) {
 			pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
 			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index acb6bbf..e64a4ef 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -59,6 +59,7 @@
  * @tap:	How to distribute data in @np SGs
  * @also_non_np: 	if set to 1, the test will be also done without
  * 			splitting data in @np SGs
+ * @fips_skip:	Skip the test vector in FIPS mode
  */
 
 struct cipher_testvec {
@@ -75,6 +76,7 @@
 	unsigned char klen;
 	unsigned short ilen;
 	unsigned short rlen;
+	bool fips_skip;
 };
 
 struct aead_testvec {
@@ -18224,6 +18226,7 @@
 			  "\x00\x00\x00\x00\x00\x00\x00\x00"
 			  "\x00\x00\x00\x00\x00\x00\x00\x00",
 		.klen   = 32,
+		.fips_skip = 1,
 		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x00"
 			  "\x00\x00\x00\x00\x00\x00\x00\x00",
 		.input  = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -18566,6 +18569,7 @@
 			  "\x00\x00\x00\x00\x00\x00\x00\x00"
 			  "\x00\x00\x00\x00\x00\x00\x00\x00",
 		.klen   = 32,
+		.fips_skip = 1,
 		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x00"
 			  "\x00\x00\x00\x00\x00\x00\x00\x00",
 		.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
diff --git a/crypto/xor.c b/crypto/xor.c
index 35d6b3a..263af9f 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -24,6 +24,10 @@
 #include <linux/preempt.h>
 #include <asm/xor.h>
 
+#ifndef XOR_SELECT_TEMPLATE
+#define XOR_SELECT_TEMPLATE(x) (x)
+#endif
+
 /* The xor routines to use.  */
 static struct xor_block_template *active_template;
 
@@ -109,6 +113,15 @@
 	void *b1, *b2;
 	struct xor_block_template *f, *fastest;
 
+	fastest = XOR_SELECT_TEMPLATE(NULL);
+
+	if (fastest) {
+		printk(KERN_INFO "xor: automatically using best "
+				 "checksumming function   %-10s\n",
+		       fastest->name);
+		goto out;
+	}
+
 	/*
 	 * Note: Since the memory is not actually used for _anything_ but to
 	 * test the XOR speed, we don't really want kmemcheck to warn about
@@ -126,36 +139,22 @@
 	 * all the possible functions, just test the best one
 	 */
 
-	fastest = NULL;
-
-#ifdef XOR_SELECT_TEMPLATE
-		fastest = XOR_SELECT_TEMPLATE(fastest);
-#endif
-
 #define xor_speed(templ)	do_xor_speed((templ), b1, b2)
 
-	if (fastest) {
-		printk(KERN_INFO "xor: automatically using best "
-				 "checksumming function:\n");
-		xor_speed(fastest);
-		goto out;
-	} else {
-		printk(KERN_INFO "xor: measuring software checksum speed\n");
-		XOR_TRY_TEMPLATES;
-		fastest = template_list;
-		for (f = fastest; f; f = f->next)
-			if (f->speed > fastest->speed)
-				fastest = f;
-	}
+	printk(KERN_INFO "xor: measuring software checksum speed\n");
+	XOR_TRY_TEMPLATES;
+	fastest = template_list;
+	for (f = fastest; f; f = f->next)
+		if (f->speed > fastest->speed)
+			fastest = f;
 
 	printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
 	       fastest->name, fastest->speed / 1000, fastest->speed % 1000);
 
 #undef xor_speed
 
- out:
 	free_pages((unsigned long)b1, 2);
-
+out:
 	active_template = fastest;
 	return 0;
 }
diff --git a/crypto/xts.c b/crypto/xts.c
index 26ba583..305343f 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -5,7 +5,7 @@
  *
  * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  *
- * Based om ecb.c
+ * Based on ecb.c
  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211..eb76a4c 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <asm/mwait.h>
+#include <xen/xen.h>
 
 #define ACPI_PROCESSOR_AGGREGATOR_CLASS	"acpi_pad"
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -477,6 +478,10 @@
 
 static int __init acpi_pad_init(void)
 {
+	/* Xen ACPI PAD is used when running as Xen Dom0. */
+	if (xen_initial_domain())
+		return -ENODEV;
+
 	power_saving_mwait_init();
 	if (power_saving_mwait_eax == 0)
 		return -EINVAL;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 6805310..48e19d0 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -526,6 +526,7 @@
 		acpi_ec_clear(ec);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static bool acpi_ec_query_flushed(struct acpi_ec *ec)
 {
 	bool flushed;
@@ -557,6 +558,7 @@
 	spin_unlock_irqrestore(&ec->lock, flags);
 	__acpi_ec_flush_event(ec);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static bool acpi_ec_guard_event(struct acpi_ec *ec)
 {
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 384cfc3..6cf4988 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -129,8 +129,18 @@
 
 	control = obj->package.elements[1].integer.value;
 	for (i = 0; i < fan->fps_count; i++) {
-		if (control == fan->fps[i].control)
+		/*
+		 * When Fine Grain Control is set, return the state
+		 * corresponding to maximum fan->fps[i].control
+		 * value compared to the current speed. Here the
+		 * fan->fps[] is sorted array with increasing speed.
+		 */
+		if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
+			i = (i > 0) ? i - 1 : 0;
 			break;
+		} else if (control == fan->fps[i].control) {
+			break;
+		}
 	}
 	if (i == fan->fps_count) {
 		dev_dbg(&device->dev, "Invalid control value returned\n");
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e1d5ea6..71a7d07 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -886,6 +886,58 @@
 }
 static DEVICE_ATTR_RO(revision);
 
+static ssize_t hw_error_scrub_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+	return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
+}
+
+/*
+ * The 'hw_error_scrub' attribute can have the following values written to it:
+ * '0': Switch to the default mode where an exception will only insert
+ *      the address of the memory error into the poison and badblocks lists.
+ * '1': Enable a full scrub to happen if an exception for a memory error is
+ *      received.
+ */
+static ssize_t hw_error_scrub_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct nvdimm_bus_descriptor *nd_desc;
+	ssize_t rc;
+	long val;
+
+	rc = kstrtol(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	device_lock(dev);
+	nd_desc = dev_get_drvdata(dev);
+	if (nd_desc) {
+		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+		switch (val) {
+		case HW_ERROR_SCRUB_ON:
+			acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
+			break;
+		case HW_ERROR_SCRUB_OFF:
+			acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+	}
+	device_unlock(dev);
+	if (rc)
+		return rc;
+	return size;
+}
+static DEVICE_ATTR_RW(hw_error_scrub);
+
 /*
  * This shows the number of full Address Range Scrubs that have been
  * completed since driver load time. Userspace can wait on this using
@@ -958,6 +1010,7 @@
 static struct attribute *acpi_nfit_attributes[] = {
 	&dev_attr_revision.attr,
 	&dev_attr_scrub.attr,
+	&dev_attr_hw_error_scrub.attr,
 	NULL,
 };
 
@@ -1256,6 +1309,44 @@
 	return NULL;
 }
 
+void __acpi_nvdimm_notify(struct device *dev, u32 event)
+{
+	struct nfit_mem *nfit_mem;
+	struct acpi_nfit_desc *acpi_desc;
+
+	dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
+			event);
+
+	if (event != NFIT_NOTIFY_DIMM_HEALTH) {
+		dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
+				event);
+		return;
+	}
+
+	acpi_desc = dev_get_drvdata(dev->parent);
+	if (!acpi_desc)
+		return;
+
+	/*
+	 * If we successfully retrieved acpi_desc, then we know nfit_mem data
+	 * is still valid.
+	 */
+	nfit_mem = dev_get_drvdata(dev);
+	if (nfit_mem && nfit_mem->flags_attr)
+		sysfs_notify_dirent(nfit_mem->flags_attr);
+}
+EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
+
+static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct acpi_device *adev = data;
+	struct device *dev = &adev->dev;
+
+	device_lock(dev->parent);
+	__acpi_nvdimm_notify(dev, event);
+	device_unlock(dev->parent);
+}
+
 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_mem *nfit_mem, u32 device_handle)
 {
@@ -1280,6 +1371,13 @@
 		return force_enable_dimms ? 0 : -ENODEV;
 	}
 
+	if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
+		ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
+		dev_err(dev, "%s: notification registration failed\n",
+				dev_name(&adev_dimm->dev));
+		return -ENXIO;
+	}
+
 	/*
 	 * Until standardization materializes we need to consider 4
 	 * different command sets.  Note, that checking for function0 (bit0)
@@ -1318,18 +1416,41 @@
 	return 0;
 }
 
+static void shutdown_dimm_notify(void *data)
+{
+	struct acpi_nfit_desc *acpi_desc = data;
+	struct nfit_mem *nfit_mem;
+
+	mutex_lock(&acpi_desc->init_mutex);
+	/*
+	 * Clear out the nfit_mem->flags_attr and shut down dimm event
+	 * notifications.
+	 */
+	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+		struct acpi_device *adev_dimm = nfit_mem->adev;
+
+		if (nfit_mem->flags_attr) {
+			sysfs_put(nfit_mem->flags_attr);
+			nfit_mem->flags_attr = NULL;
+		}
+		if (adev_dimm)
+			acpi_remove_notify_handler(adev_dimm->handle,
+					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+	}
+	mutex_unlock(&acpi_desc->init_mutex);
+}
+
 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 {
 	struct nfit_mem *nfit_mem;
-	int dimm_count = 0;
+	int dimm_count = 0, rc;
+	struct nvdimm *nvdimm;
 
 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
 		struct acpi_nfit_flush_address *flush;
 		unsigned long flags = 0, cmd_mask;
-		struct nvdimm *nvdimm;
 		u32 device_handle;
 		u16 mem_flags;
-		int rc;
 
 		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
 		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
@@ -1382,7 +1503,30 @@
 
 	}
 
-	return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+	rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+	if (rc)
+		return rc;
+
+	/*
+	 * Now that dimms are successfully registered, and async registration
+	 * is flushed, attempt to enable event notification.
+	 */
+	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+		struct kernfs_node *nfit_kernfs;
+
+		nvdimm = nfit_mem->nvdimm;
+		nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
+		if (nfit_kernfs)
+			nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
+					"flags");
+		sysfs_put(nfit_kernfs);
+		if (!nfit_mem->flags_attr)
+			dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
+					nvdimm_name(nvdimm));
+	}
+
+	return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
+			acpi_desc);
 }
 
 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@ -1491,9 +1635,9 @@
 	if (!info)
 		return -ENOMEM;
 	for (i = 0; i < nr; i++) {
-		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
+		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
 		struct nfit_set_info_map *map = &info->mapping[i];
-		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+		struct nvdimm *nvdimm = mapping->nvdimm;
 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
 				spa->range_index, i);
@@ -1917,7 +2061,7 @@
 }
 
 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
-		struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
+		struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
 		struct acpi_nfit_memory_map *memdev,
 		struct nfit_spa *nfit_spa)
 {
@@ -1934,12 +2078,12 @@
 		return -ENODEV;
 	}
 
-	nd_mapping->nvdimm = nvdimm;
+	mapping->nvdimm = nvdimm;
 	switch (nfit_spa_type(spa)) {
 	case NFIT_SPA_PM:
 	case NFIT_SPA_VOLATILE:
-		nd_mapping->start = memdev->address;
-		nd_mapping->size = memdev->region_size;
+		mapping->start = memdev->address;
+		mapping->size = memdev->region_size;
 		break;
 	case NFIT_SPA_DCR:
 		nfit_mem = nvdimm_provider_data(nvdimm);
@@ -1947,13 +2091,13 @@
 			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
 					spa->range_index, nvdimm_name(nvdimm));
 		} else {
-			nd_mapping->size = nfit_mem->bdw->capacity;
-			nd_mapping->start = nfit_mem->bdw->start_address;
+			mapping->size = nfit_mem->bdw->capacity;
+			mapping->start = nfit_mem->bdw->start_address;
 			ndr_desc->num_lanes = nfit_mem->bdw->windows;
 			blk_valid = 1;
 		}
 
-		ndr_desc->nd_mapping = nd_mapping;
+		ndr_desc->mapping = mapping;
 		ndr_desc->num_mappings = blk_valid;
 		ndbr_desc = to_blk_region_desc(ndr_desc);
 		ndbr_desc->enable = acpi_nfit_blk_region_enable;
@@ -1979,7 +2123,7 @@
 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
 		struct nfit_spa *nfit_spa)
 {
-	static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
+	static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
 	struct nd_blk_region_desc ndbr_desc;
 	struct nd_region_desc *ndr_desc;
@@ -1998,7 +2142,7 @@
 	}
 
 	memset(&res, 0, sizeof(res));
-	memset(&nd_mappings, 0, sizeof(nd_mappings));
+	memset(&mappings, 0, sizeof(mappings));
 	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
 	res.start = spa->address;
 	res.end = res.start + spa->length - 1;
@@ -2014,7 +2158,7 @@
 
 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
 		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
-		struct nd_mapping *nd_mapping;
+		struct nd_mapping_desc *mapping;
 
 		if (memdev->range_index != spa->range_index)
 			continue;
@@ -2023,14 +2167,14 @@
 					spa->range_index, ND_MAX_MAPPINGS);
 			return -ENXIO;
 		}
-		nd_mapping = &nd_mappings[count++];
-		rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
+		mapping = &mappings[count++];
+		rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
 				memdev, nfit_spa);
 		if (rc)
 			goto out;
 	}
 
-	ndr_desc->nd_mapping = nd_mappings;
+	ndr_desc->mapping = mappings;
 	ndr_desc->num_mappings = count;
 	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
 	if (rc)
@@ -2678,29 +2822,30 @@
 	return 0;
 }
 
-static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
 {
-	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-	struct device *dev = &adev->dev;
 	union acpi_object *obj;
 	acpi_status status;
 	int ret;
 
 	dev_dbg(dev, "%s: event: %d\n", __func__, event);
 
-	device_lock(dev);
+	if (event != NFIT_NOTIFY_UPDATE)
+		return;
+
 	if (!dev->driver) {
 		/* dev->driver may be null if we're being removed */
 		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
-		goto out_unlock;
+		return;
 	}
 
 	if (!acpi_desc) {
 		acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
 		if (!acpi_desc)
-			goto out_unlock;
-		acpi_nfit_desc_init(acpi_desc, &adev->dev);
+			return;
+		acpi_nfit_desc_init(acpi_desc, dev);
 	} else {
 		/*
 		 * Finish previous registration before considering new
@@ -2710,10 +2855,10 @@
 	}
 
 	/* Evaluate _FIT */
-	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
+	status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
 	if (ACPI_FAILURE(status)) {
 		dev_err(dev, "failed to evaluate _FIT\n");
-		goto out_unlock;
+		return;
 	}
 
 	obj = buf.pointer;
@@ -2725,9 +2870,14 @@
 	} else
 		dev_err(dev, "Invalid _FIT\n");
 	kfree(buf.pointer);
+}
+EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
 
- out_unlock:
-	device_unlock(dev);
+static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+{
+	device_lock(&adev->dev);
+	__acpi_nfit_notify(&adev->dev, adev->handle, event);
+	device_unlock(&adev->dev);
 }
 
 static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 161f915..e5ce81c 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -14,6 +14,7 @@
  */
 #include <linux/notifier.h>
 #include <linux/acpi.h>
+#include <linux/nd.h>
 #include <asm/mce.h>
 #include "nfit.h"
 
@@ -62,12 +63,25 @@
 		}
 		mutex_unlock(&acpi_desc->init_mutex);
 
-		/*
-		 * We can ignore an -EBUSY here because if an ARS is already
-		 * in progress, just let that be the last authoritative one
-		 */
-		if (found_match)
+		if (!found_match)
+			continue;
+
+		/* If this fails due to an -ENOMEM, there is little we can do */
+		nvdimm_bus_add_poison(acpi_desc->nvdimm_bus,
+				ALIGN(mce->addr, L1_CACHE_BYTES),
+				L1_CACHE_BYTES);
+		nvdimm_region_notify(nfit_spa->nd_region,
+				NVDIMM_REVALIDATE_POISON);
+
+		if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) {
+			/*
+			 * We can ignore an -EBUSY here because if an ARS is
+			 * already in progress, just let that be the last
+			 * authoritative one
+			 */
 			acpi_nfit_ars_rescan(acpi_desc);
+		}
+		break;
 	}
 
 	mutex_unlock(&acpi_desc_lock);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index e894ded..14296f5 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -78,6 +78,14 @@
 	NFIT_ARS_TIMEOUT = 90,
 };
 
+enum nfit_root_notifiers {
+	NFIT_NOTIFY_UPDATE = 0x80,
+};
+
+enum nfit_dimm_notifiers {
+	NFIT_NOTIFY_DIMM_HEALTH = 0x81,
+};
+
 struct nfit_spa {
 	struct list_head list;
 	struct nd_region *nd_region;
@@ -124,6 +132,7 @@
 	struct acpi_nfit_system_address *spa_bdw;
 	struct acpi_nfit_interleave *idt_dcr;
 	struct acpi_nfit_interleave *idt_bdw;
+	struct kernfs_node *flags_attr;
 	struct nfit_flush *nfit_flush;
 	struct list_head list;
 	struct acpi_device *adev;
@@ -152,6 +161,7 @@
 	struct list_head list;
 	struct kernfs_node *scrub_count_state;
 	unsigned int scrub_count;
+	unsigned int scrub_mode;
 	unsigned int cancel:1;
 	unsigned long dimm_cmd_force_en;
 	unsigned long bus_cmd_force_en;
@@ -159,6 +169,11 @@
 			void *iobuf, u64 len, int rw);
 };
 
+enum scrub_mode {
+	HW_ERROR_SCRUB_OFF,
+	HW_ERROR_SCRUB_ON,
+};
+
 enum nd_blk_mmio_selector {
 	BDW,
 	DCR,
@@ -223,5 +238,7 @@
 
 const u8 *to_nfit_uuid(enum nfit_uuids id);
 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
+void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
+void __acpi_nvdimm_notify(struct device *dev, u32 event);
 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
 #endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4305ee9..416953a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -162,11 +162,18 @@
 	if (acpi_in_debugger) {
 		kdb_printf("%s", buffer);
 	} else {
-		printk(KERN_CONT "%s", buffer);
+		if (printk_get_level(buffer))
+			printk("%s", buffer);
+		else
+			printk(KERN_CONT "%s", buffer);
 	}
 #else
-	if (acpi_debugger_write_log(buffer) < 0)
-		printk(KERN_CONT "%s", buffer);
+	if (acpi_debugger_write_log(buffer) < 0) {
+		if (printk_get_level(buffer))
+			printk("%s", buffer);
+		else
+			printk(KERN_CONT "%s", buffer);
+	}
 #endif
 }
 
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index f2fd3fe..03f5ec1 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -468,10 +468,11 @@
 }
 
 /**
- * acpi_data_get_property_reference - returns handle to the referenced object
- * @data: ACPI device data object containing the property
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
  * @propname: Name of the property
  * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
  * @args: Location to store the returned reference with optional arguments
  *
  * Find property with @name, verifify that it is a package containing at least
@@ -482,17 +483,40 @@
  * If there's more than one reference in the property value package, @index is
  * used to select the one to return.
  *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ *     "cs-gpios",
+ *     Package () {
+ *        ^GPIO, 19, 0, 0,
+ *        ^GPIO, 20, 0, 0,
+ *        0,
+ *        ^GPIO, 21, 0, 0,
+ *     }
+ * }
+ *
+ * Calling this function with index %2 return %-ENOENT and with index %3
+ * returns the last entry. If the property does not contain any more values
+ * %-ENODATA is returned. The NULL entry must be single integer and
+ * preferably contain value %0.
+ *
  * Return: %0 on success, negative error code on failure.
  */
-static int acpi_data_get_property_reference(struct acpi_device_data *data,
-					    const char *propname, size_t index,
-					    struct acpi_reference_args *args)
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+	const char *propname, size_t index, size_t num_args,
+	struct acpi_reference_args *args)
 {
 	const union acpi_object *element, *end;
 	const union acpi_object *obj;
+	struct acpi_device_data *data;
 	struct acpi_device *device;
 	int ret, idx = 0;
 
+	data = acpi_device_data_of_node(fwnode);
+	if (!data)
+		return -EINVAL;
+
 	ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
 	if (ret)
 		return ret;
@@ -532,59 +556,54 @@
 	while (element < end) {
 		u32 nargs, i;
 
-		if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
-			return -EPROTO;
+		if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+			ret = acpi_bus_get_device(element->reference.handle,
+						  &device);
+			if (ret)
+				return -ENODEV;
 
-		ret = acpi_bus_get_device(element->reference.handle, &device);
-		if (ret)
-			return -ENODEV;
+			nargs = 0;
+			element++;
 
-		element++;
-		nargs = 0;
+			/* assume following integer elements are all args */
+			for (i = 0; element + i < end && i < num_args; i++) {
+				int type = element[i].type;
 
-		/* assume following integer elements are all args */
-		for (i = 0; element + i < end; i++) {
-			int type = element[i].type;
+				if (type == ACPI_TYPE_INTEGER)
+					nargs++;
+				else if (type == ACPI_TYPE_LOCAL_REFERENCE)
+					break;
+				else
+					return -EPROTO;
+			}
 
-			if (type == ACPI_TYPE_INTEGER)
-				nargs++;
-			else if (type == ACPI_TYPE_LOCAL_REFERENCE)
-				break;
-			else
+			if (nargs > MAX_ACPI_REFERENCE_ARGS)
 				return -EPROTO;
+
+			if (idx == index) {
+				args->adev = device;
+				args->nargs = nargs;
+				for (i = 0; i < nargs; i++)
+					args->args[i] = element[i].integer.value;
+
+				return 0;
+			}
+
+			element += nargs;
+		} else if (element->type == ACPI_TYPE_INTEGER) {
+			if (idx == index)
+				return -ENOENT;
+			element++;
+		} else {
+			return -EPROTO;
 		}
 
-		if (idx++ == index) {
-			args->adev = device;
-			args->nargs = nargs;
-			for (i = 0; i < nargs; i++)
-				args->args[i] = element[i].integer.value;
-
-			return 0;
-		}
-
-		element += nargs;
+		idx++;
 	}
 
-	return -EPROTO;
+	return -ENODATA;
 }
-
-/**
- * acpi_node_get_property_reference - get a handle to the referenced object.
- * @fwnode: Firmware node to get the property from.
- * @propname: Name of the property.
- * @index: Index of the reference to return.
- * @args: Location to store the returned reference with optional arguments.
- */
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
-				     const char *name, size_t index,
-				     struct acpi_reference_args *args)
-{
-	struct acpi_device_data *data = acpi_device_data_of_node(fwnode);
-
-	return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL;
-}
-EXPORT_SYMBOL_GPL(acpi_node_get_property_reference);
+EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
 
 static int acpi_data_prop_read_single(struct acpi_device_data *data,
 				      const char *propname,
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index f4ebe39..35e8fbc 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -520,7 +520,8 @@
 	if (!tz->tz_enabled)
 		return;
 
-	thermal_zone_device_update(tz->thermal_zone);
+	thermal_zone_device_update(tz->thermal_zone,
+				   THERMAL_EVENT_UNSPECIFIED);
 }
 
 /* sys I/F for generic thermal sysfs support */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 90eabaf..ba5f11c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1400,142 +1400,56 @@
 }
 #endif
 
-/*
- * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
- * to single msi.
- */
-static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
-			  struct ahci_host_priv *hpriv, unsigned long flags)
+static int ahci_get_irq_vector(struct ata_host *host, int port)
 {
-	int nvec, i, rc;
-
-	/* Do not init MSI-X if MSI is disabled for the device */
-	if (hpriv->flags & AHCI_HFLAG_NO_MSI)
-		return -ENODEV;
-
-	nvec = pci_msix_vec_count(pdev);
-	if (nvec < 0)
-		return nvec;
-
-	/*
-	 * Proper MSI-X implementations will have a vector per-port.
-	 * Barring that, we prefer single-MSI over single-MSIX.  If this
-	 * check fails (not enough MSI-X vectors for all ports) we will
-	 * be called again with the flag clear iff ahci_init_msi()
-	 * fails.
-	 */
-	if (flags & AHCI_HFLAG_MULTI_MSIX) {
-		if (nvec < n_ports)
-			return -ENODEV;
-		nvec = n_ports;
-	} else if (nvec) {
-		nvec = 1;
-	} else {
-		/*
-		 * Emit dev_err() since this was the non-legacy irq
-		 * method of last resort.
-		 */
-		rc = -ENODEV;
-		goto fail;
-	}
-
-	for (i = 0; i < nvec; i++)
-		hpriv->msix[i].entry = i;
-	rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
-	if (rc < 0)
-		goto fail;
-
-	if (nvec > 1)
-		hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
-	hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
-
-	return nvec;
-fail:
-	dev_err(&pdev->dev,
-		"failed to enable MSI-X with error %d, # of vectors: %d\n",
-		rc, nvec);
-
-	return rc;
+	return pci_irq_vector(to_pci_dev(host->dev), port);
 }
 
 static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
 			struct ahci_host_priv *hpriv)
 {
-	int rc, nvec;
+	int nvec;
 
 	if (hpriv->flags & AHCI_HFLAG_NO_MSI)
 		return -ENODEV;
 
-	nvec = pci_msi_vec_count(pdev);
-	if (nvec < 0)
-		return nvec;
-
 	/*
 	 * If number of MSIs is less than number of ports then Sharing Last
 	 * Message mode could be enforced. In this case assume that advantage
 	 * of multipe MSIs is negated and use single MSI mode instead.
 	 */
-	if (nvec < n_ports)
-		goto single_msi;
+	nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+			PCI_IRQ_MSIX | PCI_IRQ_MSI);
+	if (nvec > 0) {
+		if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+			hpriv->get_irq_vector = ahci_get_irq_vector;
+			hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+			return nvec;
+		}
 
-	rc = pci_enable_msi_exact(pdev, nvec);
-	if (rc == -ENOSPC)
-		goto single_msi;
-	if (rc < 0)
-		return rc;
-
-	/* fallback to single MSI mode if the controller enforced MRSM mode */
-	if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
-		pci_disable_msi(pdev);
+		/*
+		 * Fallback to single MSI mode if the controller enforced MRSM
+		 * mode.
+		 */
 		printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
-		goto single_msi;
+		pci_free_irq_vectors(pdev);
 	}
 
-	if (nvec > 1)
-		hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
-	goto out;
-
-single_msi:
-	nvec = 1;
-
-	rc = pci_enable_msi(pdev);
-	if (rc < 0)
-		return rc;
-out:
-	hpriv->irq = pdev->irq;
-
-	return nvec;
-}
-
-static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
-				struct ahci_host_priv *hpriv)
-{
-	int nvec;
+	/*
+	 * -ENOSPC indicated we don't have enough vectors.  Don't bother trying
+	 * a single vectors for any other error:
+	 */
+	if (nvec < 0 && nvec != -ENOSPC)
+		return nvec;
 
 	/*
-	 * Try to enable per-port MSI-X.  If the host is not capable
-	 * fall back to single MSI before finally attempting single
-	 * MSI-X.
+	 * If the host is not capable of supporting per-port vectors, fall
+	 * back to single MSI before finally attempting single MSI-X.
 	 */
-	nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
-	if (nvec >= 0)
+	nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+	if (nvec == 1)
 		return nvec;
-
-	nvec = ahci_init_msi(pdev, n_ports, hpriv);
-	if (nvec >= 0)
-		return nvec;
-
-	/* try single-msix */
-	nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
-	if (nvec >= 0)
-		return nvec;
-
-	/* legacy intx interrupts */
-	pci_intx(pdev, 1);
-	hpriv->irq = pdev->irq;
-
-	return 0;
+	return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
 }
 
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1698,11 +1612,12 @@
 	if (!host)
 		return -ENOMEM;
 	host->private_data = hpriv;
-	hpriv->msix = devm_kzalloc(&pdev->dev,
-			sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
-	if (!hpriv->msix)
-		return -ENOMEM;
-	ahci_init_interrupts(pdev, n_ports, hpriv);
+
+	if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+		/* legacy intx interrupts */
+		pci_intx(pdev, 1);
+	}
+	hpriv->irq = pdev->irq;
 
 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
 		host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 70b06bc..0cc08f8 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -242,12 +242,10 @@
 	AHCI_HFLAG_NO_FBS		= (1 << 18), /* no FBS */
 
 #ifdef CONFIG_PCI_MSI
-	AHCI_HFLAG_MULTI_MSI		= (1 << 20), /* multiple PCI MSIs */
-	AHCI_HFLAG_MULTI_MSIX		= (1 << 21), /* per-port MSI-X */
+	AHCI_HFLAG_MULTI_MSI		= (1 << 20), /* per-port MSI(-X) */
 #else
 	/* compile out MSI infrastructure */
 	AHCI_HFLAG_MULTI_MSI		= 0,
-	AHCI_HFLAG_MULTI_MSIX		= 0,
 #endif
 	AHCI_HFLAG_WAKE_BEFORE_STOP	= (1 << 22), /* wake before DMA stop */
 
@@ -351,7 +349,6 @@
 	 * the PHY position in this array.
 	 */
 	struct phy		**phys;
-	struct msix_entry	*msix;		/* Optional MSI-X support */
 	unsigned		nports;		/* Number of ports */
 	void			*plat_data;	/* Other platform data */
 	unsigned int		irq;		/* interrupt line */
@@ -362,22 +359,11 @@
 	 */
 	void			(*start_engine)(struct ata_port *ap);
 	irqreturn_t 		(*irq_handler)(int irq, void *dev_instance);
-};
 
-#ifdef CONFIG_PCI_MSI
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
-	if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
-		return hpriv->msix[port].vector;
-	else
-		return hpriv->irq + port;
-}
-#else
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
-	return hpriv->irq;
-}
-#endif
+	/* only required for per-port MSI(-X) support */
+	int			(*get_irq_vector)(struct ata_host *host,
+						  int port);
+};
 
 extern int ahci_ignore_sss;
 
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 7bdee9b..1eba8df 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -30,24 +30,23 @@
 #define PORT_PHY3	0xB0
 #define PORT_PHY4	0xB4
 #define PORT_PHY5	0xB8
+#define PORT_AXICC	0xBC
 #define PORT_TRANS	0xC8
 
 /* port register default value */
 #define AHCI_PORT_PHY_1_CFG	0xa003fffe
 #define AHCI_PORT_TRANS_CFG	0x08000029
+#define AHCI_PORT_AXICC_CFG	0x3fffffff
 
 /* for ls1021a */
 #define LS1021A_PORT_PHY2	0x28183414
 #define LS1021A_PORT_PHY3	0x0e080e06
 #define LS1021A_PORT_PHY4	0x064a080b
 #define LS1021A_PORT_PHY5	0x2aa86470
+#define LS1021A_AXICC_ADDR	0xC0
 
 #define SATA_ECC_DISABLE	0x00020000
 
-/* for ls1043a */
-#define LS1043A_PORT_PHY2	0x28184d1f
-#define LS1043A_PORT_PHY3	0x0e081509
-
 enum ahci_qoriq_type {
 	AHCI_LS1021A,
 	AHCI_LS1043A,
@@ -137,7 +136,7 @@
 	.hardreset	= ahci_qoriq_hardreset,
 };
 
-static struct ata_port_info ahci_qoriq_port_info = {
+static const struct ata_port_info ahci_qoriq_port_info = {
 	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
 	.pio_mask	= ATA_PIO4,
 	.udma_mask	= ATA_UDMA6,
@@ -162,18 +161,19 @@
 		writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
 		writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
 		break;
 
 	case AHCI_LS1043A:
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
-		writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
-		writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
 		break;
 
 	case AHCI_LS2080A:
 		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
 		break;
 	}
 
@@ -221,12 +221,6 @@
 	if (rc)
 		goto disable_resources;
 
-	/* Workaround for ls2080a */
-	if (qoriq_priv->type == AHCI_LS2080A) {
-		hpriv->flags |= AHCI_HFLAG_NO_NCQ;
-		ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
-	}
-
 	rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
 				     &ahci_qoriq_sht);
 	if (rc)
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8ff428f..bc345f2 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -147,6 +147,7 @@
 
 static int st_ahci_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct st_ahci_drv_data *drv_data;
 	struct ahci_host_priv *hpriv;
 	int err;
@@ -170,6 +171,9 @@
 
 	st_ahci_configure_oob(hpriv->mmio);
 
+	of_property_read_u32(dev->of_node,
+			     "ports-implemented", &hpriv->force_port_map);
+
 	err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
 				      &ahci_platform_sht);
 	if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index dcf2c72..0d028ea 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2520,7 +2520,7 @@
 	 */
 	for (i = 0; i < host->n_ports; i++) {
 		struct ahci_port_priv *pp = host->ports[i]->private_data;
-		int irq = ahci_irq_vector(hpriv, i);
+		int irq = hpriv->get_irq_vector(host, i);
 
 		/* Do not receive interrupts sent by dummy ports */
 		if (!pp) {
@@ -2556,10 +2556,15 @@
 	int irq = hpriv->irq;
 	int rc;
 
-	if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
 		if (hpriv->irq_handler)
 			dev_warn(host->dev,
 			         "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+		if (!hpriv->get_irq_vector) {
+			dev_err(host->dev,
+				"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+			return -EIO;
+		}
 
 		rc = ahci_host_activate_multi_irqs(host, sht);
 	} else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e207b33..9cceb4a 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1159,8 +1159,6 @@
 {
 	sdev->use_10_for_rw = 1;
 	sdev->use_10_for_ms = 1;
-	sdev->no_report_opcodes = 1;
-	sdev->no_write_same = 1;
 
 	/* Schedule policy is determined by ->qc_defer() callback and
 	 * it needs to see every deferred qc.  Set dev_blocked to 1 to
@@ -3282,18 +3280,125 @@
 	return 1;
 }
 
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ *   63:48 Range Length
+ *   47:0  LBA
+ *
+ *  Range Length of 0 is ignored.
+ *  LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+					u64 sector, u32 count)
+{
+	struct scsi_device *sdp = cmd->device;
+	size_t len = sdp->sector_size;
+	size_t r;
+	__le64 *buf;
+	u32 i = 0;
+	unsigned long flags;
+
+	WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+	if (len > ATA_SCSI_RBUF_SIZE)
+		len = ATA_SCSI_RBUF_SIZE;
+
+	spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+	buf = ((void *)ata_scsi_rbuf);
+	memset(buf, 0, len);
+	while (i < trmax) {
+		u64 entry = sector |
+			((u64)(count > 0xffff ? 0xffff : count) << 48);
+		buf[i++] = __cpu_to_le64(entry);
+		if (count <= 0xffff)
+			break;
+		count -= 0xffff;
+		sector += 0xffff;
+	}
+	r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+	spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+	return r;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
+ * @cmd: SCSI command being translated
+ * @lba: Starting sector
+ * @num: Number of sectors to be zero'd.
+ *
+ * Rewrite the WRITE SAME payload to be an SCT Write Same formatted
+ * descriptor.
+ * NOTE: Writes a pattern (0's) in the foreground.
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
+{
+	struct scsi_device *sdp = cmd->device;
+	size_t len = sdp->sector_size;
+	size_t r;
+	u16 *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+	buf = ((void *)ata_scsi_rbuf);
+
+	put_unaligned_le16(0x0002,  &buf[0]); /* SCT_ACT_WRITE_SAME */
+	put_unaligned_le16(0x0101,  &buf[1]); /* WRITE PTRN FG */
+	put_unaligned_le64(lba,     &buf[2]);
+	put_unaligned_le64(num,     &buf[6]);
+	put_unaligned_le32(0u,      &buf[10]); /* pattern */
+
+	WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+	if (len > ATA_SCSI_RBUF_SIZE)
+		len = ATA_SCSI_RBUF_SIZE;
+
+	r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+	spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+	return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag
+ *   When set translate to DSM TRIM
+ *   When clear translate to SCT Write Same
+ */
 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct scsi_device *sdp = scmd->device;
+	size_t len = sdp->sector_size;
 	struct ata_device *dev = qc->dev;
 	const u8 *cdb = scmd->cmnd;
 	u64 block;
 	u32 n_block;
+	const u32 trmax = len >> 3;
 	u32 size;
-	void *buf;
 	u16 fp;
 	u8 bp = 0xff;
+	u8 unmap = cdb[1] & 0x8;
 
 	/* we may not issue DMA commands if no DMA mode is set */
 	if (unlikely(!dev->dma_mode))
@@ -3305,11 +3410,26 @@
 	}
 	scsi_16_lba_len(cdb, &block, &n_block);
 
-	/* for now we only support WRITE SAME with the unmap bit set */
-	if (unlikely(!(cdb[1] & 0x8))) {
-		fp = 1;
-		bp = 3;
-		goto invalid_fld;
+	if (unmap) {
+		/* If trim is not enabled the cmd is invalid. */
+		if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
+		    !ata_id_has_trim(dev->id)) {
+			fp = 1;
+			bp = 3;
+			goto invalid_fld;
+		}
+		/* If the request is too large the cmd is invalid */
+		if (n_block > 0xffff * trmax) {
+			fp = 2;
+			goto invalid_fld;
+		}
+	} else {
+		/* If write same is not available the cmd is invalid */
+		if (!ata_id_sct_write_same(dev->id)) {
+			fp = 1;
+			bp = 3;
+			goto invalid_fld;
+		}
 	}
 
 	/*
@@ -3319,32 +3439,54 @@
 	if (!scsi_sg_count(scmd))
 		goto invalid_param_len;
 
-	buf = page_address(sg_page(scsi_sglist(scmd)));
+	/*
+	 * size must match sector size in bytes
+	 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+	 * is defined as number of 512 byte blocks to be transferred.
+	 */
+	if (unmap) {
+		size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+		if (size != len)
+			goto invalid_param_len;
 
-	if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
-		size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
+		if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+			/* Newer devices support queued TRIM commands */
+			tf->protocol = ATA_PROT_NCQ;
+			tf->command = ATA_CMD_FPDMA_SEND;
+			tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+			tf->nsect = qc->tag << 3;
+			tf->hob_feature = (size / 512) >> 8;
+			tf->feature = size / 512;
+
+			tf->auxiliary = 1;
+		} else {
+			tf->protocol = ATA_PROT_DMA;
+			tf->hob_feature = 0;
+			tf->feature = ATA_DSM_TRIM;
+			tf->hob_nsect = (size / 512) >> 8;
+			tf->nsect = size / 512;
+			tf->command = ATA_CMD_DSM;
+		}
 	} else {
-		fp = 2;
-		goto invalid_fld;
-	}
+		size = ata_format_sct_write_same(scmd, block, n_block);
+		if (size != len)
+			goto invalid_param_len;
 
-	if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
-		/* Newer devices support queued TRIM commands */
-		tf->protocol = ATA_PROT_NCQ;
-		tf->command = ATA_CMD_FPDMA_SEND;
-		tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
-		tf->nsect = qc->tag << 3;
-		tf->hob_feature = (size / 512) >> 8;
-		tf->feature = size / 512;
-
-		tf->auxiliary = 1;
-	} else {
-		tf->protocol = ATA_PROT_DMA;
 		tf->hob_feature = 0;
-		tf->feature = ATA_DSM_TRIM;
-		tf->hob_nsect = (size / 512) >> 8;
-		tf->nsect = size / 512;
-		tf->command = ATA_CMD_DSM;
+		tf->feature = 0;
+		tf->hob_nsect = 0;
+		tf->nsect = 1;
+		tf->lbah = 0;
+		tf->lbam = 0;
+		tf->lbal = ATA_CMD_STANDBYNOW1;
+		tf->hob_lbah = 0;
+		tf->hob_lbam = 0;
+		tf->hob_lbal = 0;
+		tf->device = ATA_CMD_STANDBYNOW1;
+		tf->protocol = ATA_PROT_DMA;
+		tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
+		if (unlikely(dev->flags & ATA_DFLAG_PIO))
+			tf->command = ATA_CMD_WRITE_LOG_EXT;
 	}
 
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
@@ -3368,6 +3510,76 @@
 }
 
 /**
+ *	ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ *	@args: device MAINTENANCE_IN data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Yields a subset to satisfy scsi_report_opcode()
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+	struct ata_device *dev = args->dev;
+	u8 *cdb = args->cmd->cmnd;
+	u8 supported = 0;
+	unsigned int err = 0;
+
+	if (cdb[2] != 1) {
+		ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+		err = 2;
+		goto out;
+	}
+	switch (cdb[3]) {
+	case INQUIRY:
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_CAPACITY:
+	case SERVICE_ACTION_IN_16:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+	case SYNCHRONIZE_CACHE:
+	case REZERO_UNIT:
+	case SEEK_6:
+	case SEEK_10:
+	case TEST_UNIT_READY:
+	case SEND_DIAGNOSTIC:
+	case MAINTENANCE_IN:
+	case READ_6:
+	case READ_10:
+	case READ_16:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_16:
+	case ATA_12:
+	case ATA_16:
+	case VERIFY:
+	case VERIFY_16:
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+	case START_STOP:
+		supported = 3;
+		break;
+	case WRITE_SAME_16:
+		if (!ata_id_sct_write_same(dev->id))
+			break;
+		/* fallthrough: if SCT ... only enable for ZBC */
+	case ZBC_IN:
+	case ZBC_OUT:
+		if (ata_id_zoned_cap(dev->id) ||
+		    dev->class == ATA_DEV_ZAC)
+			supported = 3;
+		break;
+	default:
+		break;
+	}
+out:
+	rbuf[1] = supported; /* supported */
+	return err;
+}
+
+/**
  *	ata_scsi_report_zones_complete - convert ATA output
  *	@qc: command structure returning the data
  *
@@ -3610,7 +3822,7 @@
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_device *dev = qc->dev;
-	char mpage[CACHE_MPAGE_LEN];
+	u8 mpage[CACHE_MPAGE_LEN];
 	u8 wce;
 	int i;
 
@@ -3666,7 +3878,7 @@
 			       const u8 *buf, int len, u16 *fp)
 {
 	struct ata_device *dev = qc->dev;
-	char mpage[CONTROL_MPAGE_LEN];
+	u8 mpage[CONTROL_MPAGE_LEN];
 	u8 d_sense;
 	int i;
 
@@ -3701,8 +3913,6 @@
 		dev->flags |= ATA_DFLAG_D_SENSE;
 	else
 		dev->flags &= ~ATA_DFLAG_D_SENSE;
-	qc->scsicmd->result = SAM_STAT_GOOD;
-	qc->scsicmd->scsi_done(qc->scsicmd);
 	return 0;
 }
 
@@ -3829,6 +4039,8 @@
 		if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
 			fp += hdr_len + bd_len;
 			goto invalid_param;
+		} else {
+			goto skip; /* No ATA command to send */
 		}
 		break;
 	default:		/* invalid page code */
@@ -4147,6 +4359,13 @@
 			ata_scsi_invalid_field(dev, cmd, 1);
 		break;
 
+	case MAINTENANCE_IN:
+		if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+			ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+		else
+			ata_scsi_invalid_field(dev, cmd, 1);
+		break;
+
 	/* all other commands */
 	default:
 		ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
@@ -4179,7 +4398,6 @@
 		shost->max_lun = 1;
 		shost->max_channel = 1;
 		shost->max_cmd_len = 16;
-		shost->no_write_same = 1;
 
 		/* Schedule policy is determined by ->qc_defer()
 		 * callback and it needs to see every deferred qc.
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 9f27b14..1611e0e 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -347,10 +347,8 @@
 
 	field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
 	fields.mode = devm_regmap_field_alloc(dev, smc, field);
-	if (IS_ERR(fields.mode))
-		return PTR_ERR(fields.mode);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(fields.mode);
 }
 
 static int pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2724595..475a006 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -152,8 +152,7 @@
 		div = 8;
 	T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
 
-	if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
-		BUG();
+	BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
 
 	t1 = timing.setup;
 	if (t1)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 745489a..efc48bf 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1727,15 +1727,13 @@
 		return -ENOMEM;
 	ap->private_data = pp;
 
-	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
 	if (!pp->crqb)
 		return -ENOMEM;
-	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
 
-	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
 	if (!pp->crpb)
 		goto out_port_free_dma_mem;
-	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
 
 	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
 	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725..10e1b9e 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,13 @@
 	  If you compile this as a module, you can still override this
 	  value using the module parameters.
 
+config IMG_ASCII_LCD
+	tristate "Imagination Technologies ASCII LCD Display"
+	default y if MIPS_MALTA || MIPS_SEAD3
+	select SYSCON
+	help
+	  Enable this to support the simple ASCII LCD displays found on
+	  development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+	  from Imagination Technologies.
+
 endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a..3127175 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_KS0108)		+= ks0108.o
 obj-$(CONFIG_CFAG12864B)	+= cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_ASCII_LCD)	+= img-ascii-lcd.o
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
new file mode 100644
index 0000000..bf43b5d
--- /dev/null
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct img_ascii_lcd_ctx;
+
+/**
+ * struct img_ascii_lcd_config - Configuration information about an LCD model
+ * @num_chars: the number of characters the LCD can display
+ * @external_regmap: true if registers are in a system controller, else false
+ * @update: function called to update the LCD
+ */
+struct img_ascii_lcd_config {
+	unsigned int num_chars;
+	bool external_regmap;
+	void (*update)(struct img_ascii_lcd_ctx *ctx);
+};
+
+/**
+ * struct img_ascii_lcd_ctx - Private data structure
+ * @pdev: the ASCII LCD platform device
+ * @base: the base address of the LCD registers
+ * @regmap: the regmap through which LCD registers are accessed
+ * @offset: the offset within regmap to the start of the LCD registers
+ * @cfg: pointer to the LCD model configuration
+ * @message: the full message to display or scroll on the LCD
+ * @message_len: the length of the @message string
+ * @scroll_pos: index of the first character of @message currently displayed
+ * @scroll_rate: scroll interval in jiffies
+ * @timer: timer used to implement scrolling
+ * @curr: the string currently displayed on the LCD
+ */
+struct img_ascii_lcd_ctx {
+	struct platform_device *pdev;
+	union {
+		void __iomem *base;
+		struct regmap *regmap;
+	};
+	u32 offset;
+	const struct img_ascii_lcd_config *cfg;
+	char *message;
+	unsigned int message_len;
+	unsigned int scroll_pos;
+	unsigned int scroll_rate;
+	struct timer_list timer;
+	char curr[] __aligned(8);
+};
+
+/*
+ * MIPS Boston development board
+ */
+
+static void boston_update(struct img_ascii_lcd_ctx *ctx)
+{
+	ulong val;
+
+#if BITS_PER_LONG == 64
+	val = *((u64 *)&ctx->curr[0]);
+	__raw_writeq(val, ctx->base);
+#elif BITS_PER_LONG == 32
+	val = *((u32 *)&ctx->curr[0]);
+	__raw_writel(val, ctx->base);
+	val = *((u32 *)&ctx->curr[4]);
+	__raw_writel(val, ctx->base + 4);
+#else
+# error Not 32 or 64 bit
+#endif
+}
+
+static struct img_ascii_lcd_config boston_config = {
+	.num_chars = 8,
+	.update = boston_update,
+};
+
+/*
+ * MIPS Malta development board
+ */
+
+static void malta_update(struct img_ascii_lcd_ctx *ctx)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ctx->cfg->num_chars; i++) {
+		err = regmap_write(ctx->regmap,
+				   ctx->offset + (i * 8), ctx->curr[i]);
+		if (err)
+			break;
+	}
+
+	if (unlikely(err))
+		pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config malta_config = {
+	.num_chars = 8,
+	.external_regmap = true,
+	.update = malta_update,
+};
+
+/*
+ * MIPS SEAD3 development board
+ */
+
+enum {
+	SEAD3_REG_LCD_CTRL		= 0x00,
+#define SEAD3_REG_LCD_CTRL_SETDRAM	BIT(7)
+	SEAD3_REG_LCD_DATA		= 0x08,
+	SEAD3_REG_CPLD_STATUS		= 0x10,
+#define SEAD3_REG_CPLD_STATUS_BUSY	BIT(0)
+	SEAD3_REG_CPLD_DATA		= 0x18,
+#define SEAD3_REG_CPLD_DATA_BUSY	BIT(7)
+};
+
+static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx)
+{
+	unsigned int status;
+	int err;
+
+	do {
+		err = regmap_read(ctx->regmap,
+				  ctx->offset + SEAD3_REG_CPLD_STATUS,
+				  &status);
+		if (err)
+			return err;
+	} while (status & SEAD3_REG_CPLD_STATUS_BUSY);
+
+	return 0;
+
+}
+
+static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
+{
+	unsigned int cpld_data;
+	int err;
+
+	err = sead3_wait_sm_idle(ctx);
+	if (err)
+		return err;
+
+	do {
+		err = regmap_read(ctx->regmap,
+				  ctx->offset + SEAD3_REG_LCD_CTRL,
+				  &cpld_data);
+		if (err)
+			return err;
+
+		err = sead3_wait_sm_idle(ctx);
+		if (err)
+			return err;
+
+		err = regmap_read(ctx->regmap,
+				  ctx->offset + SEAD3_REG_CPLD_DATA,
+				  &cpld_data);
+		if (err)
+			return err;
+	} while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY);
+
+	return 0;
+}
+
+static void sead3_update(struct img_ascii_lcd_ctx *ctx)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ctx->cfg->num_chars; i++) {
+		err = sead3_wait_lcd_idle(ctx);
+		if (err)
+			break;
+
+		err = regmap_write(ctx->regmap,
+				   ctx->offset + SEAD3_REG_LCD_CTRL,
+				   SEAD3_REG_LCD_CTRL_SETDRAM | i);
+		if (err)
+			break;
+
+		err = sead3_wait_lcd_idle(ctx);
+		if (err)
+			break;
+
+		err = regmap_write(ctx->regmap,
+				   ctx->offset + SEAD3_REG_LCD_DATA,
+				   ctx->curr[i]);
+		if (err)
+			break;
+	}
+
+	if (unlikely(err))
+		pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config sead3_config = {
+	.num_chars = 16,
+	.external_regmap = true,
+	.update = sead3_update,
+};
+
+static const struct of_device_id img_ascii_lcd_matches[] = {
+	{ .compatible = "img,boston-lcd", .data = &boston_config },
+	{ .compatible = "mti,malta-lcd", .data = &malta_config },
+	{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
+};
+
+/**
+ * img_ascii_lcd_scroll() - scroll the display by a character
+ * @arg: really a pointer to the private data structure
+ *
+ * Scroll the current message along the LCD by one character, rearming the
+ * timer if required.
+ */
+static void img_ascii_lcd_scroll(unsigned long arg)
+{
+	struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+	unsigned int i, ch = ctx->scroll_pos;
+	unsigned int num_chars = ctx->cfg->num_chars;
+
+	/* update the current message string */
+	for (i = 0; i < num_chars;) {
+		/* copy as many characters from the string as possible */
+		for (; i < num_chars && ch < ctx->message_len; i++, ch++)
+			ctx->curr[i] = ctx->message[ch];
+
+		/* wrap around to the start of the string */
+		ch = 0;
+	}
+
+	/* update the LCD */
+	ctx->cfg->update(ctx);
+
+	/* move on to the next character */
+	ctx->scroll_pos++;
+	ctx->scroll_pos %= ctx->message_len;
+
+	/* rearm the timer */
+	if (ctx->message_len > ctx->cfg->num_chars)
+		mod_timer(&ctx->timer, jiffies + ctx->scroll_rate);
+}
+
+/**
+ * img_ascii_lcd_display() - set the message to be displayed
+ * @ctx: pointer to the private data structure
+ * @msg: the message to display
+ * @count: length of msg, or -1
+ *
+ * Display a new message @msg on the LCD. @msg can be longer than the number of
+ * characters the LCD can display, in which case it will begin scrolling across
+ * the LCD display.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
+			     const char *msg, ssize_t count)
+{
+	char *new_msg;
+
+	/* stop the scroll timer */
+	del_timer_sync(&ctx->timer);
+
+	if (count == -1)
+		count = strlen(msg);
+
+	/* if the string ends with a newline, trim it */
+	if (msg[count - 1] == '\n')
+		count--;
+
+	new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL);
+	if (!new_msg)
+		return -ENOMEM;
+
+	memcpy(new_msg, msg, count);
+	new_msg[count] = 0;
+
+	if (ctx->message)
+		devm_kfree(&ctx->pdev->dev, ctx->message);
+
+	ctx->message = new_msg;
+	ctx->message_len = count;
+	ctx->scroll_pos = 0;
+
+	/* update the LCD */
+	img_ascii_lcd_scroll((unsigned long)ctx);
+
+	return 0;
+}
+
+/**
+ * message_show() - read message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer to read the message into
+ *
+ * Read the current message being displayed or scrolled across the LCD display
+ * into @buf, for reads from sysfs.
+ *
+ * Return: the number of characters written to @buf
+ */
+static ssize_t message_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", ctx->message);
+}
+
+/**
+ * message_store() - write a new message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer containing the new message
+ * @count: the size of the message in @buf
+ *
+ * Write a new message to display or scroll across the LCD display from sysfs.
+ *
+ * Return: the size of the message on success, else -ERRNO
+ */
+static ssize_t message_store(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+	int err;
+
+	err = img_ascii_lcd_display(ctx, buf, count);
+	return err ?: count;
+}
+
+static DEVICE_ATTR_RW(message);
+
+/**
+ * img_ascii_lcd_probe() - probe an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Probe an LCD display device, ensuring that we have the required resources in
+ * order to access the LCD & setting up private data as well as sysfs files.
+ *
+ * Return: 0 on success, else -ERRNO
+ */
+static int img_ascii_lcd_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct img_ascii_lcd_config *cfg;
+	struct img_ascii_lcd_ctx *ctx;
+	struct resource *res;
+	int err;
+
+	match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	cfg = match->data;
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars,
+			   GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	if (cfg->external_regmap) {
+		ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+		if (IS_ERR(ctx->regmap))
+			return PTR_ERR(ctx->regmap);
+
+		if (of_property_read_u32(pdev->dev.of_node, "offset",
+					 &ctx->offset))
+			return -EINVAL;
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		ctx->base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(ctx->base))
+			return PTR_ERR(ctx->base);
+	}
+
+	ctx->pdev = pdev;
+	ctx->cfg = cfg;
+	ctx->message = NULL;
+	ctx->scroll_pos = 0;
+	ctx->scroll_rate = HZ / 2;
+
+	/* initialise a timer for scrolling the message */
+	init_timer(&ctx->timer);
+	ctx->timer.function = img_ascii_lcd_scroll;
+	ctx->timer.data = (unsigned long)ctx;
+
+	platform_set_drvdata(pdev, ctx);
+
+	/* display a default message */
+	err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE "       ", -1);
+	if (err)
+		goto out_del_timer;
+
+	err = device_create_file(&pdev->dev, &dev_attr_message);
+	if (err)
+		goto out_del_timer;
+
+	return 0;
+out_del_timer:
+	del_timer_sync(&ctx->timer);
+	return err;
+}
+
+/**
+ * img_ascii_lcd_remove() - remove an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Remove an LCD display device, freeing private resources & ensuring that the
+ * driver stops using the LCD display registers.
+ *
+ * Return: 0
+ */
+static int img_ascii_lcd_remove(struct platform_device *pdev)
+{
+	struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
+
+	device_remove_file(&pdev->dev, &dev_attr_message);
+	del_timer_sync(&ctx->timer);
+	return 0;
+}
+
+static struct platform_driver img_ascii_lcd_driver = {
+	.driver = {
+		.name		= "img-ascii-lcd",
+		.of_match_table	= img_ascii_lcd_matches,
+	},
+	.probe	= img_ascii_lcd_probe,
+	.remove	= img_ascii_lcd_remove,
+};
+module_platform_driver(img_ascii_lcd_driver);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cbdb3b1..fa1b7a9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -840,13 +840,13 @@
 
 static void loop_unprepare_queue(struct loop_device *lo)
 {
-	flush_kthread_worker(&lo->worker);
+	kthread_flush_worker(&lo->worker);
 	kthread_stop(lo->worker_task);
 }
 
 static int loop_prepare_queue(struct loop_device *lo)
 {
-	init_kthread_worker(&lo->worker);
+	kthread_init_worker(&lo->worker);
 	lo->worker_task = kthread_run(kthread_worker_fn,
 			&lo->worker, "loop%d", lo->lo_number);
 	if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@
 		break;
 	}
 
-	queue_kthread_work(&lo->worker, &cmd->work);
+	kthread_queue_work(&lo->worker, &cmd->work);
 
 	return BLK_MQ_RQ_QUEUE_OK;
 }
@@ -1696,7 +1696,7 @@
 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
 	cmd->rq = rq;
-	init_kthread_work(&cmd->work, loop_queue_work);
+	kthread_init_work(&cmd->work, loop_queue_work);
 
 	return 0;
 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index c1f84df..abb7162 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -31,6 +31,7 @@
 #include <linux/ceph/libceph.h>
 #include <linux/ceph/osd_client.h>
 #include <linux/ceph/mon_client.h>
+#include <linux/ceph/cls_lock_client.h>
 #include <linux/ceph/decode.h>
 #include <linux/parser.h>
 #include <linux/bsearch.h>
@@ -114,12 +115,17 @@
 
 #define RBD_OBJ_PREFIX_LEN_MAX	64
 
+#define RBD_NOTIFY_TIMEOUT	5	/* seconds */
+#define RBD_RETRY_DELAY		msecs_to_jiffies(1000)
+
 /* Feature bits */
 
 #define RBD_FEATURE_LAYERING	(1<<0)
 #define RBD_FEATURE_STRIPINGV2	(1<<1)
-#define RBD_FEATURES_ALL \
-	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
+#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
+#define RBD_FEATURES_ALL	(RBD_FEATURE_LAYERING |		\
+				 RBD_FEATURE_STRIPINGV2 |	\
+				 RBD_FEATURE_EXCLUSIVE_LOCK)
 
 /* Features supported by this (client software) implementation. */
 
@@ -128,11 +134,8 @@
 /*
  * An RBD device name will be "rbd#", where the "rbd" comes from
  * RBD_DRV_NAME above, and # is a unique integer identifier.
- * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
- * enough to hold all possible device names.
  */
 #define DEV_NAME_LEN		32
-#define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
 
 /*
  * block device image metadata (in-memory version)
@@ -322,6 +325,24 @@
 #define for_each_obj_request_safe(ireq, oreq, n) \
 	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
 
+enum rbd_watch_state {
+	RBD_WATCH_STATE_UNREGISTERED,
+	RBD_WATCH_STATE_REGISTERED,
+	RBD_WATCH_STATE_ERROR,
+};
+
+enum rbd_lock_state {
+	RBD_LOCK_STATE_UNLOCKED,
+	RBD_LOCK_STATE_LOCKED,
+	RBD_LOCK_STATE_RELEASING,
+};
+
+/* WatchNotify::ClientId */
+struct rbd_client_id {
+	u64 gid;
+	u64 handle;
+};
+
 struct rbd_mapping {
 	u64                     size;
 	u64                     features;
@@ -349,13 +370,29 @@
 	unsigned long		flags;		/* possibly lock protected */
 	struct rbd_spec		*spec;
 	struct rbd_options	*opts;
+	char			*config_info;	/* add{,_single_major} string */
 
 	struct ceph_object_id	header_oid;
 	struct ceph_object_locator header_oloc;
 
-	struct ceph_file_layout	layout;
+	struct ceph_file_layout	layout;		/* used for all rbd requests */
 
+	struct mutex		watch_mutex;
+	enum rbd_watch_state	watch_state;
 	struct ceph_osd_linger_request *watch_handle;
+	u64			watch_cookie;
+	struct delayed_work	watch_dwork;
+
+	struct rw_semaphore	lock_rwsem;
+	enum rbd_lock_state	lock_state;
+	struct rbd_client_id	owner_cid;
+	struct work_struct	acquired_lock_work;
+	struct work_struct	released_lock_work;
+	struct delayed_work	lock_dwork;
+	struct work_struct	unlock_work;
+	wait_queue_head_t	lock_waitq;
+
+	struct workqueue_struct	*task_wq;
 
 	struct rbd_spec		*parent_spec;
 	u64			parent_overlap;
@@ -439,6 +476,29 @@
 	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
 }
 
+static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
+{
+	return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
+	       rbd_dev->spec->snap_id == CEPH_NOSNAP &&
+	       !rbd_dev->mapping.read_only;
+}
+
+static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
+{
+	return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
+	       rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
+}
+
+static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
+{
+	bool is_lock_owner;
+
+	down_read(&rbd_dev->lock_rwsem);
+	is_lock_owner = __rbd_is_lock_owner(rbd_dev);
+	up_read(&rbd_dev->lock_rwsem);
+	return is_lock_owner;
+}
+
 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
@@ -735,6 +795,7 @@
 	/* string args above */
 	Opt_read_only,
 	Opt_read_write,
+	Opt_lock_on_read,
 	Opt_err
 };
 
@@ -746,16 +807,19 @@
 	{Opt_read_only, "ro"},		/* Alternate spelling */
 	{Opt_read_write, "read_write"},
 	{Opt_read_write, "rw"},		/* Alternate spelling */
+	{Opt_lock_on_read, "lock_on_read"},
 	{Opt_err, NULL}
 };
 
 struct rbd_options {
 	int	queue_depth;
 	bool	read_only;
+	bool	lock_on_read;
 };
 
 #define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
 #define RBD_READ_ONLY_DEFAULT	false
+#define RBD_LOCK_ON_READ_DEFAULT false
 
 static int parse_rbd_opts_token(char *c, void *private)
 {
@@ -791,6 +855,9 @@
 	case Opt_read_write:
 		rbd_opts->read_only = false;
 		break;
+	case Opt_lock_on_read:
+		rbd_opts->lock_on_read = true;
+		break;
 	default:
 		/* libceph prints "bad option" msg */
 		return -EINVAL;
@@ -919,7 +986,6 @@
 	char *snap_names = NULL;
 	u64 *snap_sizes = NULL;
 	u32 snap_count;
-	size_t size;
 	int ret = -ENOMEM;
 	u32 i;
 
@@ -957,9 +1023,9 @@
 			goto out_err;
 
 		/* ...as well as the array of their sizes. */
-
-		size = snap_count * sizeof (*header->snap_sizes);
-		snap_sizes = kmalloc(size, GFP_KERNEL);
+		snap_sizes = kmalloc_array(snap_count,
+					   sizeof(*header->snap_sizes),
+					   GFP_KERNEL);
 		if (!snap_sizes)
 			goto out_err;
 
@@ -1551,11 +1617,18 @@
 	}
 }
 
-static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
-				struct rbd_obj_request *obj_request)
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
+
+static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
 {
-	dout("%s %p\n", __func__, obj_request);
-	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
+	struct ceph_osd_request *osd_req = obj_request->osd_req;
+
+	dout("%s %p osd_req %p\n", __func__, obj_request, osd_req);
+	if (obj_request_img_data_test(obj_request)) {
+		WARN_ON(obj_request->callback != rbd_img_obj_callback);
+		rbd_img_request_get(obj_request->img_request);
+	}
+	ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
 }
 
 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
@@ -1745,6 +1818,22 @@
 		complete_all(&obj_request->completion);
 }
 
+static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err)
+{
+	obj_request->result = err;
+	obj_request->xferred = 0;
+	/*
+	 * kludge - mirror rbd_obj_request_submit() to match a put in
+	 * rbd_img_obj_callback()
+	 */
+	if (obj_request_img_data_test(obj_request)) {
+		WARN_ON(obj_request->callback != rbd_img_obj_callback);
+		rbd_img_request_get(obj_request->img_request);
+	}
+	obj_request_done_set(obj_request);
+	rbd_obj_request_complete(obj_request);
+}
+
 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
 {
 	struct rbd_img_request *img_request = NULL;
@@ -1877,11 +1966,10 @@
 
 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
 {
-	struct rbd_img_request *img_request = obj_request->img_request;
 	struct ceph_osd_request *osd_req = obj_request->osd_req;
 
-	if (img_request)
-		osd_req->r_snapid = img_request->snap_id;
+	rbd_assert(obj_request_img_data_test(obj_request));
+	osd_req->r_snapid = obj_request->img_request->snap_id;
 }
 
 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
@@ -2074,7 +2162,9 @@
 			bio_chain_put(obj_request->bio_list);
 		break;
 	case OBJ_REQUEST_PAGES:
-		if (obj_request->pages)
+		/* img_data requests don't own their page array */
+		if (obj_request->pages &&
+		    !obj_request_img_data_test(obj_request))
 			ceph_release_page_vector(obj_request->pages,
 						obj_request->page_count);
 		break;
@@ -2295,13 +2385,6 @@
 		xferred = obj_request->length;
 	}
 
-	/* Image object requests don't own their page array */
-
-	if (obj_request->type == OBJ_REQUEST_PAGES) {
-		obj_request->pages = NULL;
-		obj_request->page_count = 0;
-	}
-
 	if (img_request_child_test(img_request)) {
 		rbd_assert(img_request->obj_request != NULL);
 		more = obj_request->which < img_request->obj_request_count - 1;
@@ -2520,8 +2603,6 @@
 
 		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
 
-		rbd_img_request_get(img_request);
-
 		img_offset += length;
 		resid -= length;
 	}
@@ -2579,7 +2660,6 @@
 {
 	struct rbd_obj_request *orig_request;
 	struct ceph_osd_request *osd_req;
-	struct ceph_osd_client *osdc;
 	struct rbd_device *rbd_dev;
 	struct page **pages;
 	enum obj_operation_type op_type;
@@ -2603,7 +2683,7 @@
 	rbd_assert(obj_request_type_valid(orig_request->type));
 	img_result = img_request->result;
 	parent_length = img_request->length;
-	rbd_assert(parent_length == img_request->xferred);
+	rbd_assert(img_result || parent_length == img_request->xferred);
 	rbd_img_request_put(img_request);
 
 	rbd_assert(orig_request->img_request);
@@ -2616,13 +2696,9 @@
 	 * and re-submit the original write request.
 	 */
 	if (!rbd_dev->parent_overlap) {
-		struct ceph_osd_client *osdc;
-
 		ceph_release_page_vector(pages, page_count);
-		osdc = &rbd_dev->rbd_client->client->osdc;
-		img_result = rbd_obj_request_submit(osdc, orig_request);
-		if (!img_result)
-			return;
+		rbd_obj_request_submit(orig_request);
+		return;
 	}
 
 	if (img_result)
@@ -2656,17 +2732,12 @@
 
 	/* All set, send it off. */
 
-	osdc = &rbd_dev->rbd_client->client->osdc;
-	img_result = rbd_obj_request_submit(osdc, orig_request);
-	if (!img_result)
-		return;
-out_err:
-	/* Record the error code and complete the request */
+	rbd_obj_request_submit(orig_request);
+	return;
 
-	orig_request->result = img_result;
-	orig_request->xferred = 0;
-	obj_request_done_set(orig_request);
-	rbd_obj_request_complete(orig_request);
+out_err:
+	ceph_release_page_vector(pages, page_count);
+	rbd_obj_request_error(orig_request, img_result);
 }
 
 /*
@@ -2680,26 +2751,19 @@
  * When the read completes, this page array will be transferred to
  * the original object request for the copyup operation.
  *
- * If an error occurs, record it as the result of the original
- * object request and mark it done so it gets completed.
+ * If an error occurs, it is recorded as the result of the original
+ * object request in rbd_img_obj_exists_callback().
  */
 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
 {
-	struct rbd_img_request *img_request = NULL;
+	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
 	struct rbd_img_request *parent_request = NULL;
-	struct rbd_device *rbd_dev;
 	u64 img_offset;
 	u64 length;
 	struct page **pages = NULL;
 	u32 page_count;
 	int result;
 
-	rbd_assert(obj_request_img_data_test(obj_request));
-	rbd_assert(obj_request_type_valid(obj_request->type));
-
-	img_request = obj_request->img_request;
-	rbd_assert(img_request != NULL);
-	rbd_dev = img_request->rbd_dev;
 	rbd_assert(rbd_dev->parent != NULL);
 
 	/*
@@ -2740,10 +2804,11 @@
 	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
 	if (result)
 		goto out_err;
+
 	parent_request->copyup_pages = pages;
 	parent_request->copyup_page_count = page_count;
-
 	parent_request->callback = rbd_img_obj_parent_read_full_callback;
+
 	result = rbd_img_request_submit(parent_request);
 	if (!result)
 		return 0;
@@ -2757,10 +2822,6 @@
 		ceph_release_page_vector(pages, page_count);
 	if (parent_request)
 		rbd_img_request_put(parent_request);
-	obj_request->result = result;
-	obj_request->xferred = 0;
-	obj_request_done_set(obj_request);
-
 	return result;
 }
 
@@ -2793,17 +2854,13 @@
 
 	/*
 	 * If the overlap has become 0 (most likely because the
-	 * image has been flattened) we need to free the pages
-	 * and re-submit the original write request.
+	 * image has been flattened) we need to re-submit the
+	 * original request.
 	 */
 	rbd_dev = orig_request->img_request->rbd_dev;
 	if (!rbd_dev->parent_overlap) {
-		struct ceph_osd_client *osdc;
-
-		osdc = &rbd_dev->rbd_client->client->osdc;
-		result = rbd_obj_request_submit(osdc, orig_request);
-		if (!result)
-			return;
+		rbd_obj_request_submit(orig_request);
+		return;
 	}
 
 	/*
@@ -2816,31 +2873,45 @@
 		obj_request_existence_set(orig_request, true);
 	} else if (result == -ENOENT) {
 		obj_request_existence_set(orig_request, false);
-	} else if (result) {
-		orig_request->result = result;
-		goto out;
+	} else {
+		goto fail_orig_request;
 	}
 
 	/*
 	 * Resubmit the original request now that we have recorded
 	 * whether the target object exists.
 	 */
-	orig_request->result = rbd_img_obj_request_submit(orig_request);
-out:
-	if (orig_request->result)
-		rbd_obj_request_complete(orig_request);
+	result = rbd_img_obj_request_submit(orig_request);
+	if (result)
+		goto fail_orig_request;
+
+	return;
+
+fail_orig_request:
+	rbd_obj_request_error(orig_request, result);
 }
 
 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
 {
+	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
 	struct rbd_obj_request *stat_request;
-	struct rbd_device *rbd_dev;
-	struct ceph_osd_client *osdc;
-	struct page **pages = NULL;
+	struct page **pages;
 	u32 page_count;
 	size_t size;
 	int ret;
 
+	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
+					      OBJ_REQUEST_PAGES);
+	if (!stat_request)
+		return -ENOMEM;
+
+	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
+						   stat_request);
+	if (!stat_request->osd_req) {
+		ret = -ENOMEM;
+		goto fail_stat_request;
+	}
+
 	/*
 	 * The response data for a STAT call consists of:
 	 *     le64 length;
@@ -2852,52 +2923,33 @@
 	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
 	page_count = (u32)calc_pages_for(0, size);
 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto fail_stat_request;
+	}
 
-	ret = -ENOMEM;
-	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
-							OBJ_REQUEST_PAGES);
-	if (!stat_request)
-		goto out;
+	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
+	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
+				     false, false);
 
 	rbd_obj_request_get(obj_request);
 	stat_request->obj_request = obj_request;
 	stat_request->pages = pages;
 	stat_request->page_count = page_count;
-
-	rbd_assert(obj_request->img_request);
-	rbd_dev = obj_request->img_request->rbd_dev;
-	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
-						   stat_request);
-	if (!stat_request->osd_req)
-		goto out;
 	stat_request->callback = rbd_img_obj_exists_callback;
 
-	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
-	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
-					false, false);
-	rbd_osd_req_format_read(stat_request);
+	rbd_obj_request_submit(stat_request);
+	return 0;
 
-	osdc = &rbd_dev->rbd_client->client->osdc;
-	ret = rbd_obj_request_submit(osdc, stat_request);
-out:
-	if (ret)
-		rbd_obj_request_put(obj_request);
-
+fail_stat_request:
+	rbd_obj_request_put(stat_request);
 	return ret;
 }
 
 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
 {
-	struct rbd_img_request *img_request;
-	struct rbd_device *rbd_dev;
-
-	rbd_assert(obj_request_img_data_test(obj_request));
-
-	img_request = obj_request->img_request;
-	rbd_assert(img_request);
-	rbd_dev = img_request->rbd_dev;
+	struct rbd_img_request *img_request = obj_request->img_request;
+	struct rbd_device *rbd_dev = img_request->rbd_dev;
 
 	/* Reads */
 	if (!img_request_write_test(img_request) &&
@@ -2936,14 +2988,13 @@
 
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
 {
+	rbd_assert(obj_request_img_data_test(obj_request));
+	rbd_assert(obj_request_type_valid(obj_request->type));
+	rbd_assert(obj_request->img_request);
+
 	if (img_obj_request_simple(obj_request)) {
-		struct rbd_device *rbd_dev;
-		struct ceph_osd_client *osdc;
-
-		rbd_dev = obj_request->img_request->rbd_dev;
-		osdc = &rbd_dev->rbd_client->client->osdc;
-
-		return rbd_obj_request_submit(osdc, obj_request);
+		rbd_obj_request_submit(obj_request);
+		return 0;
 	}
 
 	/*
@@ -3006,12 +3057,8 @@
 	rbd_assert(obj_request->img_request);
 	rbd_dev = obj_request->img_request->rbd_dev;
 	if (!rbd_dev->parent_overlap) {
-		struct ceph_osd_client *osdc;
-
-		osdc = &rbd_dev->rbd_client->client->osdc;
-		img_result = rbd_obj_request_submit(osdc, obj_request);
-		if (!img_result)
-			return;
+		rbd_obj_request_submit(obj_request);
+		return;
 	}
 
 	obj_request->result = img_result;
@@ -3084,65 +3131,724 @@
 	obj_request_done_set(obj_request);
 }
 
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
-static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
+static const struct rbd_client_id rbd_empty_cid;
+
+static bool rbd_cid_equal(const struct rbd_client_id *lhs,
+			  const struct rbd_client_id *rhs)
+{
+	return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
+}
+
+static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
+{
+	struct rbd_client_id cid;
+
+	mutex_lock(&rbd_dev->watch_mutex);
+	cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
+	cid.handle = rbd_dev->watch_cookie;
+	mutex_unlock(&rbd_dev->watch_mutex);
+	return cid;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
+			      const struct rbd_client_id *cid)
+{
+	dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
+	     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
+	     cid->gid, cid->handle);
+	rbd_dev->owner_cid = *cid; /* struct */
+}
+
+static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
+{
+	mutex_lock(&rbd_dev->watch_mutex);
+	sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
+	mutex_unlock(&rbd_dev->watch_mutex);
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_lock(struct rbd_device *rbd_dev)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+	char cookie[32];
+	int ret;
+
+	WARN_ON(__rbd_is_lock_owner(rbd_dev));
+
+	format_lock_cookie(rbd_dev, cookie);
+	ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+			    RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
+			    RBD_LOCK_TAG, "", 0);
+	if (ret)
+		return ret;
+
+	rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
+	rbd_set_owner_cid(rbd_dev, &cid);
+	queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+	return 0;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_unlock(struct rbd_device *rbd_dev)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	char cookie[32];
+	int ret;
+
+	WARN_ON(!__rbd_is_lock_owner(rbd_dev));
+
+	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+
+	format_lock_cookie(rbd_dev, cookie);
+	ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+			      RBD_LOCK_NAME, cookie);
+	if (ret && ret != -ENOENT) {
+		rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
+		return ret;
+	}
+
+	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+	queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
+	return 0;
+}
+
+static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
+				enum rbd_notify_op notify_op,
+				struct page ***preply_pages,
+				size_t *preply_len)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+	int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
+	char buf[buf_size];
+	void *p = buf;
+
+	dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
+
+	/* encode *LockPayload NotifyMessage (op + ClientId) */
+	ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
+	ceph_encode_32(&p, notify_op);
+	ceph_encode_64(&p, cid.gid);
+	ceph_encode_64(&p, cid.handle);
+
+	return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
+				&rbd_dev->header_oloc, buf, buf_size,
+				RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
+}
+
+static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
+			       enum rbd_notify_op notify_op)
+{
+	struct page **reply_pages;
+	size_t reply_len;
+
+	__rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
+	ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+}
+
+static void rbd_notify_acquired_lock(struct work_struct *work)
+{
+	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+						  acquired_lock_work);
+
+	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
+}
+
+static void rbd_notify_released_lock(struct work_struct *work)
+{
+	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+						  released_lock_work);
+
+	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
+}
+
+static int rbd_request_lock(struct rbd_device *rbd_dev)
+{
+	struct page **reply_pages;
+	size_t reply_len;
+	bool lock_owner_responded = false;
+	int ret;
+
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+	ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
+				   &reply_pages, &reply_len);
+	if (ret && ret != -ETIMEDOUT) {
+		rbd_warn(rbd_dev, "failed to request lock: %d", ret);
+		goto out;
+	}
+
+	if (reply_len > 0 && reply_len <= PAGE_SIZE) {
+		void *p = page_address(reply_pages[0]);
+		void *const end = p + reply_len;
+		u32 n;
+
+		ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
+		while (n--) {
+			u8 struct_v;
+			u32 len;
+
+			ceph_decode_need(&p, end, 8 + 8, e_inval);
+			p += 8 + 8; /* skip gid and cookie */
+
+			ceph_decode_32_safe(&p, end, len, e_inval);
+			if (!len)
+				continue;
+
+			if (lock_owner_responded) {
+				rbd_warn(rbd_dev,
+					 "duplicate lock owners detected");
+				ret = -EIO;
+				goto out;
+			}
+
+			lock_owner_responded = true;
+			ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
+						  &struct_v, &len);
+			if (ret) {
+				rbd_warn(rbd_dev,
+					 "failed to decode ResponseMessage: %d",
+					 ret);
+				goto e_inval;
+			}
+
+			ret = ceph_decode_32(&p);
+		}
+	}
+
+	if (!lock_owner_responded) {
+		rbd_warn(rbd_dev, "no lock owners detected");
+		ret = -ETIMEDOUT;
+	}
+
+out:
+	ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+	return ret;
+
+e_inval:
+	ret = -EINVAL;
+	goto out;
+}
+
+static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
+{
+	dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
+
+	cancel_delayed_work(&rbd_dev->lock_dwork);
+	if (wake_all)
+		wake_up_all(&rbd_dev->lock_waitq);
+	else
+		wake_up(&rbd_dev->lock_waitq);
+}
+
+static int get_lock_owner_info(struct rbd_device *rbd_dev,
+			       struct ceph_locker **lockers, u32 *num_lockers)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	u8 lock_type;
+	char *lock_tag;
+	int ret;
+
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+	ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
+				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
+				 &lock_type, &lock_tag, lockers, num_lockers);
+	if (ret)
+		return ret;
+
+	if (*num_lockers == 0) {
+		dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
+		goto out;
+	}
+
+	if (strcmp(lock_tag, RBD_LOCK_TAG)) {
+		rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
+			 lock_tag);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (lock_type == CEPH_CLS_LOCK_SHARED) {
+		rbd_warn(rbd_dev, "shared lock type detected");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
+		    strlen(RBD_LOCK_COOKIE_PREFIX))) {
+		rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
+			 (*lockers)[0].id.cookie);
+		ret = -EBUSY;
+		goto out;
+	}
+
+out:
+	kfree(lock_tag);
+	return ret;
+}
+
+static int find_watcher(struct rbd_device *rbd_dev,
+			const struct ceph_locker *locker)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	struct ceph_watch_item *watchers;
+	u32 num_watchers;
+	u64 cookie;
+	int i;
+	int ret;
+
+	ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
+				      &rbd_dev->header_oloc, &watchers,
+				      &num_watchers);
+	if (ret)
+		return ret;
+
+	sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+	for (i = 0; i < num_watchers; i++) {
+		if (!memcmp(&watchers[i].addr, &locker->info.addr,
+			    sizeof(locker->info.addr)) &&
+		    watchers[i].cookie == cookie) {
+			struct rbd_client_id cid = {
+				.gid = le64_to_cpu(watchers[i].name.num),
+				.handle = cookie,
+			};
+
+			dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
+			     rbd_dev, cid.gid, cid.handle);
+			rbd_set_owner_cid(rbd_dev, &cid);
+			ret = 1;
+			goto out;
+		}
+	}
+
+	dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
+	ret = 0;
+out:
+	kfree(watchers);
+	return ret;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_try_lock(struct rbd_device *rbd_dev)
+{
+	struct ceph_client *client = rbd_dev->rbd_client->client;
+	struct ceph_locker *lockers;
+	u32 num_lockers;
+	int ret;
+
+	for (;;) {
+		ret = rbd_lock(rbd_dev);
+		if (ret != -EBUSY)
+			return ret;
+
+		/* determine if the current lock holder is still alive */
+		ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
+		if (ret)
+			return ret;
+
+		if (num_lockers == 0)
+			goto again;
+
+		ret = find_watcher(rbd_dev, lockers);
+		if (ret) {
+			if (ret > 0)
+				ret = 0; /* have to request lock */
+			goto out;
+		}
+
+		rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
+			 ENTITY_NAME(lockers[0].id.name));
+
+		ret = ceph_monc_blacklist_add(&client->monc,
+					      &lockers[0].info.addr);
+		if (ret) {
+			rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
+				 ENTITY_NAME(lockers[0].id.name), ret);
+			goto out;
+		}
+
+		ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
+					  &rbd_dev->header_oloc, RBD_LOCK_NAME,
+					  lockers[0].id.cookie,
+					  &lockers[0].id.name);
+		if (ret && ret != -ENOENT)
+			goto out;
+
+again:
+		ceph_free_lockers(lockers, num_lockers);
+	}
+
+out:
+	ceph_free_lockers(lockers, num_lockers);
+	return ret;
+}
+
+/*
+ * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
+ */
+static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
+						int *pret)
+{
+	enum rbd_lock_state lock_state;
+
+	down_read(&rbd_dev->lock_rwsem);
+	dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
+	     rbd_dev->lock_state);
+	if (__rbd_is_lock_owner(rbd_dev)) {
+		lock_state = rbd_dev->lock_state;
+		up_read(&rbd_dev->lock_rwsem);
+		return lock_state;
+	}
+
+	up_read(&rbd_dev->lock_rwsem);
+	down_write(&rbd_dev->lock_rwsem);
+	dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
+	     rbd_dev->lock_state);
+	if (!__rbd_is_lock_owner(rbd_dev)) {
+		*pret = rbd_try_lock(rbd_dev);
+		if (*pret)
+			rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
+	}
+
+	lock_state = rbd_dev->lock_state;
+	up_write(&rbd_dev->lock_rwsem);
+	return lock_state;
+}
+
+static void rbd_acquire_lock(struct work_struct *work)
+{
+	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
+					    struct rbd_device, lock_dwork);
+	enum rbd_lock_state lock_state;
+	int ret;
+
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+again:
+	lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
+	if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
+		if (lock_state == RBD_LOCK_STATE_LOCKED)
+			wake_requests(rbd_dev, true);
+		dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
+		     rbd_dev, lock_state, ret);
+		return;
+	}
+
+	ret = rbd_request_lock(rbd_dev);
+	if (ret == -ETIMEDOUT) {
+		goto again; /* treat this as a dead client */
+	} else if (ret < 0) {
+		rbd_warn(rbd_dev, "error requesting lock: %d", ret);
+		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
+				 RBD_RETRY_DELAY);
+	} else {
+		/*
+		 * lock owner acked, but resend if we don't see them
+		 * release the lock
+		 */
+		dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
+		     rbd_dev);
+		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
+		    msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
+	}
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static bool rbd_release_lock(struct rbd_device *rbd_dev)
+{
+	dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
+	     rbd_dev->lock_state);
+	if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+		return false;
+
+	rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
+	downgrade_write(&rbd_dev->lock_rwsem);
+	/*
+	 * Ensure that all in-flight IO is flushed.
+	 *
+	 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
+	 * may be shared with other devices.
+	 */
+	ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
+	up_read(&rbd_dev->lock_rwsem);
+
+	down_write(&rbd_dev->lock_rwsem);
+	dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
+	     rbd_dev->lock_state);
+	if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
+		return false;
+
+	if (!rbd_unlock(rbd_dev))
+		/*
+		 * Give others a chance to grab the lock - we would re-acquire
+		 * almost immediately if we got new IO during ceph_osdc_sync()
+		 * otherwise.  We need to ack our own notifications, so this
+		 * lock_dwork will be requeued from rbd_wait_state_locked()
+		 * after wake_requests() in rbd_handle_released_lock().
+		 */
+		cancel_delayed_work(&rbd_dev->lock_dwork);
+
+	return true;
+}
+
+static void rbd_release_lock_work(struct work_struct *work)
+{
+	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+						  unlock_work);
+
+	down_write(&rbd_dev->lock_rwsem);
+	rbd_release_lock(rbd_dev);
+	up_write(&rbd_dev->lock_rwsem);
+}
+
+static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
+				     void **p)
+{
+	struct rbd_client_id cid = { 0 };
+
+	if (struct_v >= 2) {
+		cid.gid = ceph_decode_64(p);
+		cid.handle = ceph_decode_64(p);
+	}
+
+	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+	     cid.handle);
+	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
+		down_write(&rbd_dev->lock_rwsem);
+		if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
+			/*
+			 * we already know that the remote client is
+			 * the owner
+			 */
+			up_write(&rbd_dev->lock_rwsem);
+			return;
+		}
+
+		rbd_set_owner_cid(rbd_dev, &cid);
+		downgrade_write(&rbd_dev->lock_rwsem);
+	} else {
+		down_read(&rbd_dev->lock_rwsem);
+	}
+
+	if (!__rbd_is_lock_owner(rbd_dev))
+		wake_requests(rbd_dev, false);
+	up_read(&rbd_dev->lock_rwsem);
+}
+
+static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
+				     void **p)
+{
+	struct rbd_client_id cid = { 0 };
+
+	if (struct_v >= 2) {
+		cid.gid = ceph_decode_64(p);
+		cid.handle = ceph_decode_64(p);
+	}
+
+	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+	     cid.handle);
+	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
+		down_write(&rbd_dev->lock_rwsem);
+		if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
+			dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+			     __func__, rbd_dev, cid.gid, cid.handle,
+			     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
+			up_write(&rbd_dev->lock_rwsem);
+			return;
+		}
+
+		rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+		downgrade_write(&rbd_dev->lock_rwsem);
+	} else {
+		down_read(&rbd_dev->lock_rwsem);
+	}
+
+	if (!__rbd_is_lock_owner(rbd_dev))
+		wake_requests(rbd_dev, false);
+	up_read(&rbd_dev->lock_rwsem);
+}
+
+static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
+				    void **p)
+{
+	struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
+	struct rbd_client_id cid = { 0 };
+	bool need_to_send;
+
+	if (struct_v >= 2) {
+		cid.gid = ceph_decode_64(p);
+		cid.handle = ceph_decode_64(p);
+	}
+
+	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+	     cid.handle);
+	if (rbd_cid_equal(&cid, &my_cid))
+		return false;
+
+	down_read(&rbd_dev->lock_rwsem);
+	need_to_send = __rbd_is_lock_owner(rbd_dev);
+	if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
+		if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
+			dout("%s rbd_dev %p queueing unlock_work\n", __func__,
+			     rbd_dev);
+			queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
+		}
+	}
+	up_read(&rbd_dev->lock_rwsem);
+	return need_to_send;
+}
+
+static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
+				     u64 notify_id, u64 cookie, s32 *result)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
+	char buf[buf_size];
+	int ret;
+
+	if (result) {
+		void *p = buf;
+
+		/* encode ResponseMessage */
+		ceph_start_encoding(&p, 1, 1,
+				    buf_size - CEPH_ENCODING_START_BLK_LEN);
+		ceph_encode_32(&p, *result);
+	} else {
+		buf_size = 0;
+	}
+
+	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
+				   &rbd_dev->header_oloc, notify_id, cookie,
+				   buf, buf_size);
+	if (ret)
+		rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
+}
+
+static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
+				   u64 cookie)
+{
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
+}
+
+static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
+					  u64 notify_id, u64 cookie, s32 result)
+{
+	dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
+	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
+}
 
 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
 			 u64 notifier_id, void *data, size_t data_len)
 {
 	struct rbd_device *rbd_dev = arg;
-	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+	void *p = data;
+	void *const end = p + data_len;
+	u8 struct_v;
+	u32 len;
+	u32 notify_op;
 	int ret;
 
-	dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
-	     cookie, notify_id);
+	dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
+	     __func__, rbd_dev, cookie, notify_id, data_len);
+	if (data_len) {
+		ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
+					  &struct_v, &len);
+		if (ret) {
+			rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
+				 ret);
+			return;
+		}
 
-	/*
-	 * Until adequate refresh error handling is in place, there is
-	 * not much we can do here, except warn.
-	 *
-	 * See http://tracker.ceph.com/issues/5040
-	 */
-	ret = rbd_dev_refresh(rbd_dev);
-	if (ret)
-		rbd_warn(rbd_dev, "refresh failed: %d", ret);
+		notify_op = ceph_decode_32(&p);
+	} else {
+		/* legacy notification for header updates */
+		notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
+		len = 0;
+	}
 
-	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
-				   &rbd_dev->header_oloc, notify_id, cookie,
-				   NULL, 0);
-	if (ret)
-		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
+	dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
+	switch (notify_op) {
+	case RBD_NOTIFY_OP_ACQUIRED_LOCK:
+		rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
+		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+		break;
+	case RBD_NOTIFY_OP_RELEASED_LOCK:
+		rbd_handle_released_lock(rbd_dev, struct_v, &p);
+		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+		break;
+	case RBD_NOTIFY_OP_REQUEST_LOCK:
+		if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
+			/*
+			 * send ResponseMessage(0) back so the client
+			 * can detect a missing owner
+			 */
+			rbd_acknowledge_notify_result(rbd_dev, notify_id,
+						      cookie, 0);
+		else
+			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+		break;
+	case RBD_NOTIFY_OP_HEADER_UPDATE:
+		ret = rbd_dev_refresh(rbd_dev);
+		if (ret)
+			rbd_warn(rbd_dev, "refresh failed: %d", ret);
+
+		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+		break;
+	default:
+		if (rbd_is_lock_owner(rbd_dev))
+			rbd_acknowledge_notify_result(rbd_dev, notify_id,
+						      cookie, -EOPNOTSUPP);
+		else
+			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+		break;
+	}
 }
 
+static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
+
 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
 {
 	struct rbd_device *rbd_dev = arg;
-	int ret;
 
 	rbd_warn(rbd_dev, "encountered watch error: %d", err);
 
-	__rbd_dev_header_unwatch_sync(rbd_dev);
+	down_write(&rbd_dev->lock_rwsem);
+	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+	up_write(&rbd_dev->lock_rwsem);
 
-	ret = rbd_dev_header_watch_sync(rbd_dev);
-	if (ret) {
-		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
-		return;
+	mutex_lock(&rbd_dev->watch_mutex);
+	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
+		__rbd_unregister_watch(rbd_dev);
+		rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
+
+		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
 	}
-
-	ret = rbd_dev_refresh(rbd_dev);
-	if (ret)
-		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
+	mutex_unlock(&rbd_dev->watch_mutex);
 }
 
 /*
- * Initiate a watch request, synchronously.
+ * watch_mutex must be locked
  */
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
+static int __rbd_register_watch(struct rbd_device *rbd_dev)
 {
 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	struct ceph_osd_linger_request *handle;
 
 	rbd_assert(!rbd_dev->watch_handle);
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
 
 	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
 				 &rbd_dev->header_oloc, rbd_watch_cb,
@@ -3154,13 +3860,16 @@
 	return 0;
 }
 
-static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+/*
+ * watch_mutex must be locked
+ */
+static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
 {
 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	int ret;
 
-	if (!rbd_dev->watch_handle)
-		return;
+	rbd_assert(rbd_dev->watch_handle);
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
 
 	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
 	if (ret)
@@ -3169,17 +3878,100 @@
 	rbd_dev->watch_handle = NULL;
 }
 
-/*
- * Tear down a watch request, synchronously.
- */
-static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+static int rbd_register_watch(struct rbd_device *rbd_dev)
 {
-	__rbd_dev_header_unwatch_sync(rbd_dev);
+	int ret;
 
-	dout("%s flushing notifies\n", __func__);
+	mutex_lock(&rbd_dev->watch_mutex);
+	rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
+	ret = __rbd_register_watch(rbd_dev);
+	if (ret)
+		goto out;
+
+	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
+	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
+
+out:
+	mutex_unlock(&rbd_dev->watch_mutex);
+	return ret;
+}
+
+static void cancel_tasks_sync(struct rbd_device *rbd_dev)
+{
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+	cancel_delayed_work_sync(&rbd_dev->watch_dwork);
+	cancel_work_sync(&rbd_dev->acquired_lock_work);
+	cancel_work_sync(&rbd_dev->released_lock_work);
+	cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+	cancel_work_sync(&rbd_dev->unlock_work);
+}
+
+static void rbd_unregister_watch(struct rbd_device *rbd_dev)
+{
+	WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
+	cancel_tasks_sync(rbd_dev);
+
+	mutex_lock(&rbd_dev->watch_mutex);
+	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
+		__rbd_unregister_watch(rbd_dev);
+	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
+	mutex_unlock(&rbd_dev->watch_mutex);
+
 	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 }
 
+static void rbd_reregister_watch(struct work_struct *work)
+{
+	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
+					    struct rbd_device, watch_dwork);
+	bool was_lock_owner = false;
+	int ret;
+
+	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+	down_write(&rbd_dev->lock_rwsem);
+	if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
+		was_lock_owner = rbd_release_lock(rbd_dev);
+
+	mutex_lock(&rbd_dev->watch_mutex);
+	if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
+		goto fail_unlock;
+
+	ret = __rbd_register_watch(rbd_dev);
+	if (ret) {
+		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+		if (ret != -EBLACKLISTED)
+			queue_delayed_work(rbd_dev->task_wq,
+					   &rbd_dev->watch_dwork,
+					   RBD_RETRY_DELAY);
+		goto fail_unlock;
+	}
+
+	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
+	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
+	mutex_unlock(&rbd_dev->watch_mutex);
+
+	ret = rbd_dev_refresh(rbd_dev);
+	if (ret)
+		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
+
+	if (was_lock_owner) {
+		ret = rbd_try_lock(rbd_dev);
+		if (ret)
+			rbd_warn(rbd_dev, "reregisteration lock failed: %d",
+				 ret);
+	}
+
+	up_write(&rbd_dev->lock_rwsem);
+	wake_requests(rbd_dev, true);
+	return;
+
+fail_unlock:
+	mutex_unlock(&rbd_dev->watch_mutex);
+	up_write(&rbd_dev->lock_rwsem);
+}
+
 /*
  * Synchronous osd object method call.  Returns the number of bytes
  * returned in the outbound buffer, or a negative error code.
@@ -3193,7 +3985,6 @@
 			     void *inbound,
 			     size_t inbound_size)
 {
-	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	struct rbd_obj_request *obj_request;
 	struct page **pages;
 	u32 page_count;
@@ -3242,11 +4033,8 @@
 	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
 					obj_request->pages, inbound_size,
 					0, false, false);
-	rbd_osd_req_format_read(obj_request);
 
-	ret = rbd_obj_request_submit(osdc, obj_request);
-	if (ret)
-		goto out;
+	rbd_obj_request_submit(obj_request);
 	ret = rbd_obj_request_wait(obj_request);
 	if (ret)
 		goto out;
@@ -3267,6 +4055,29 @@
 	return ret;
 }
 
+/*
+ * lock_rwsem must be held for read
+ */
+static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
+{
+	DEFINE_WAIT(wait);
+
+	do {
+		/*
+		 * Note the use of mod_delayed_work() in rbd_acquire_lock()
+		 * and cancel_delayed_work() in wake_requests().
+		 */
+		dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
+		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
+		prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
+					  TASK_UNINTERRUPTIBLE);
+		up_read(&rbd_dev->lock_rwsem);
+		schedule();
+		down_read(&rbd_dev->lock_rwsem);
+	} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+	finish_wait(&rbd_dev->lock_waitq, &wait);
+}
+
 static void rbd_queue_workfn(struct work_struct *work)
 {
 	struct request *rq = blk_mq_rq_from_pdu(work);
@@ -3277,6 +4088,7 @@
 	u64 length = blk_rq_bytes(rq);
 	enum obj_operation_type op_type;
 	u64 mapping_size;
+	bool must_be_locked;
 	int result;
 
 	if (rq->cmd_type != REQ_TYPE_FS) {
@@ -3338,6 +4150,10 @@
 	if (op_type != OBJ_OP_READ) {
 		snapc = rbd_dev->header.snapc;
 		ceph_get_snap_context(snapc);
+		must_be_locked = rbd_is_lock_supported(rbd_dev);
+	} else {
+		must_be_locked = rbd_dev->opts->lock_on_read &&
+					rbd_is_lock_supported(rbd_dev);
 	}
 	up_read(&rbd_dev->header_rwsem);
 
@@ -3348,11 +4164,17 @@
 		goto err_rq;
 	}
 
+	if (must_be_locked) {
+		down_read(&rbd_dev->lock_rwsem);
+		if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+			rbd_wait_state_locked(rbd_dev);
+	}
+
 	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
 					     snapc);
 	if (!img_request) {
 		result = -ENOMEM;
-		goto err_rq;
+		goto err_unlock;
 	}
 	img_request->rq = rq;
 	snapc = NULL; /* img_request consumes a ref */
@@ -3370,10 +4192,15 @@
 	if (result)
 		goto err_img_request;
 
+	if (must_be_locked)
+		up_read(&rbd_dev->lock_rwsem);
 	return;
 
 err_img_request:
 	rbd_img_request_put(img_request);
+err_unlock:
+	if (must_be_locked)
+		up_read(&rbd_dev->lock_rwsem);
 err_rq:
 	if (result)
 		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
@@ -3415,7 +4242,6 @@
 				u64 offset, u64 length, void *buf)
 
 {
-	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	struct rbd_obj_request *obj_request;
 	struct page **pages = NULL;
 	u32 page_count;
@@ -3448,11 +4274,8 @@
 					obj_request->length,
 					obj_request->offset & ~PAGE_MASK,
 					false, false);
-	rbd_osd_req_format_read(obj_request);
 
-	ret = rbd_obj_request_submit(osdc, obj_request);
-	if (ret)
-		goto out;
+	rbd_obj_request_submit(obj_request);
 	ret = rbd_obj_request_wait(obj_request);
 	if (ret)
 		goto out;
@@ -3751,13 +4574,40 @@
 	return sprintf(buf, "%d\n", rbd_dev->minor);
 }
 
+static ssize_t rbd_client_addr_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+	struct ceph_entity_addr *client_addr =
+	    ceph_client_addr(rbd_dev->rbd_client->client);
+
+	return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
+		       le32_to_cpu(client_addr->nonce));
+}
+
 static ssize_t rbd_client_id_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
 
 	return sprintf(buf, "client%lld\n",
-			ceph_client_id(rbd_dev->rbd_client->client));
+		       ceph_client_gid(rbd_dev->rbd_client->client));
+}
+
+static ssize_t rbd_cluster_fsid_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+	return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
+}
+
+static ssize_t rbd_config_info_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+	return sprintf(buf, "%s\n", rbd_dev->config_info);
 }
 
 static ssize_t rbd_pool_show(struct device *dev,
@@ -3809,6 +4659,14 @@
 	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
 }
 
+static ssize_t rbd_snap_id_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+	return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
+}
+
 /*
  * For a v2 image, shows the chain of parent images, separated by empty
  * lines.  For v1 images or if there is no parent, shows "(no parent
@@ -3861,13 +4719,17 @@
 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
+static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
+static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
+static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
 
 static struct attribute *rbd_attrs[] = {
@@ -3875,12 +4737,16 @@
 	&dev_attr_features.attr,
 	&dev_attr_major.attr,
 	&dev_attr_minor.attr,
+	&dev_attr_client_addr.attr,
 	&dev_attr_client_id.attr,
+	&dev_attr_cluster_fsid.attr,
+	&dev_attr_config_info.attr,
 	&dev_attr_pool.attr,
 	&dev_attr_pool_id.attr,
 	&dev_attr_name.attr,
 	&dev_attr_image_id.attr,
 	&dev_attr_current_snap.attr,
+	&dev_attr_snap_id.attr,
 	&dev_attr_parent.attr,
 	&dev_attr_refresh.attr,
 	NULL
@@ -3943,18 +4809,32 @@
 	kfree(spec);
 }
 
-static void rbd_dev_release(struct device *dev)
+static void rbd_dev_free(struct rbd_device *rbd_dev)
 {
-	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
-	bool need_put = !!rbd_dev->opts;
+	WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
+	WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
 
 	ceph_oid_destroy(&rbd_dev->header_oid);
 	ceph_oloc_destroy(&rbd_dev->header_oloc);
+	kfree(rbd_dev->config_info);
 
 	rbd_put_client(rbd_dev->rbd_client);
 	rbd_spec_put(rbd_dev->spec);
 	kfree(rbd_dev->opts);
 	kfree(rbd_dev);
+}
+
+static void rbd_dev_release(struct device *dev)
+{
+	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+	bool need_put = !!rbd_dev->opts;
+
+	if (need_put) {
+		destroy_workqueue(rbd_dev->task_wq);
+		ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+	}
+
+	rbd_dev_free(rbd_dev);
 
 	/*
 	 * This is racy, but way better than putting module outside of
@@ -3965,25 +4845,34 @@
 		module_put(THIS_MODULE);
 }
 
-static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
-					 struct rbd_spec *spec,
-					 struct rbd_options *opts)
+static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+					   struct rbd_spec *spec)
 {
 	struct rbd_device *rbd_dev;
 
-	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
+	rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
 	if (!rbd_dev)
 		return NULL;
 
 	spin_lock_init(&rbd_dev->lock);
-	rbd_dev->flags = 0;
-	atomic_set(&rbd_dev->parent_ref, 0);
 	INIT_LIST_HEAD(&rbd_dev->node);
 	init_rwsem(&rbd_dev->header_rwsem);
 
 	ceph_oid_init(&rbd_dev->header_oid);
 	ceph_oloc_init(&rbd_dev->header_oloc);
 
+	mutex_init(&rbd_dev->watch_mutex);
+	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
+	INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
+
+	init_rwsem(&rbd_dev->lock_rwsem);
+	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+	INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
+	INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
+	INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
+	INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
+	init_waitqueue_head(&rbd_dev->lock_waitq);
+
 	rbd_dev->dev.bus = &rbd_bus_type;
 	rbd_dev->dev.type = &rbd_device_type;
 	rbd_dev->dev.parent = &rbd_root_dev;
@@ -3991,9 +4880,6 @@
 
 	rbd_dev->rbd_client = rbdc;
 	rbd_dev->spec = spec;
-	rbd_dev->opts = opts;
-
-	/* Initialize the layout used for all rbd requests */
 
 	rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
 	rbd_dev->layout.stripe_count = 1;
@@ -4001,17 +4887,50 @@
 	rbd_dev->layout.pool_id = spec->pool_id;
 	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
 
-	/*
-	 * If this is a mapping rbd_dev (as opposed to a parent one),
-	 * pin our module.  We have a ref from do_rbd_add(), so use
-	 * __module_get().
-	 */
-	if (rbd_dev->opts)
-		__module_get(THIS_MODULE);
-
 	return rbd_dev;
 }
 
+/*
+ * Create a mapping rbd_dev.
+ */
+static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+					 struct rbd_spec *spec,
+					 struct rbd_options *opts)
+{
+	struct rbd_device *rbd_dev;
+
+	rbd_dev = __rbd_dev_create(rbdc, spec);
+	if (!rbd_dev)
+		return NULL;
+
+	rbd_dev->opts = opts;
+
+	/* get an id and fill in device name */
+	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
+					 minor_to_rbd_dev_id(1 << MINORBITS),
+					 GFP_KERNEL);
+	if (rbd_dev->dev_id < 0)
+		goto fail_rbd_dev;
+
+	sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
+	rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
+						   rbd_dev->name);
+	if (!rbd_dev->task_wq)
+		goto fail_dev_id;
+
+	/* we have a ref from do_rbd_add() */
+	__module_get(THIS_MODULE);
+
+	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
+	return rbd_dev;
+
+fail_dev_id:
+	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+fail_rbd_dev:
+	rbd_dev_free(rbd_dev);
+	return NULL;
+}
+
 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
 {
 	if (rbd_dev)
@@ -4645,46 +5564,6 @@
 }
 
 /*
- * Get a unique rbd identifier for the given new rbd_dev, and add
- * the rbd_dev to the global list.
- */
-static int rbd_dev_id_get(struct rbd_device *rbd_dev)
-{
-	int new_dev_id;
-
-	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
-				    0, minor_to_rbd_dev_id(1 << MINORBITS),
-				    GFP_KERNEL);
-	if (new_dev_id < 0)
-		return new_dev_id;
-
-	rbd_dev->dev_id = new_dev_id;
-
-	spin_lock(&rbd_dev_list_lock);
-	list_add_tail(&rbd_dev->node, &rbd_dev_list);
-	spin_unlock(&rbd_dev_list_lock);
-
-	dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
-
-	return 0;
-}
-
-/*
- * Remove an rbd_dev from the global list, and record that its
- * identifier is no longer in use.
- */
-static void rbd_dev_id_put(struct rbd_device *rbd_dev)
-{
-	spin_lock(&rbd_dev_list_lock);
-	list_del_init(&rbd_dev->node);
-	spin_unlock(&rbd_dev_list_lock);
-
-	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
-
-	dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
-}
-
-/*
  * Skips over white space at *buf, and updates *buf to point to the
  * first found non-space character (if any). Returns the length of
  * the token (string of non-white space characters) found.  Note
@@ -4859,6 +5738,7 @@
 
 	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
 	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+	rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
 
 	copts = ceph_parse_options(options, mon_addrs,
 					mon_addrs + mon_addrs_size - 1,
@@ -5076,8 +5956,7 @@
 		goto out_err;
 	}
 
-	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
-				NULL);
+	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
 	if (!parent) {
 		ret = -ENOMEM;
 		goto out_err;
@@ -5112,22 +5991,12 @@
 {
 	int ret;
 
-	/* Get an id and fill in device name. */
-
-	ret = rbd_dev_id_get(rbd_dev);
-	if (ret)
-		goto err_out_unlock;
-
-	BUILD_BUG_ON(DEV_NAME_LEN
-			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
-	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
-
 	/* Record our major and minor device numbers. */
 
 	if (!single_major) {
 		ret = register_blkdev(0, rbd_dev->name);
 		if (ret < 0)
-			goto err_out_id;
+			goto err_out_unlock;
 
 		rbd_dev->major = ret;
 		rbd_dev->minor = 0;
@@ -5159,9 +6028,14 @@
 	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
 	up_write(&rbd_dev->header_rwsem);
 
+	spin_lock(&rbd_dev_list_lock);
+	list_add_tail(&rbd_dev->node, &rbd_dev_list);
+	spin_unlock(&rbd_dev_list_lock);
+
 	add_disk(rbd_dev->disk);
-	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
-		(unsigned long long) rbd_dev->mapping.size);
+	pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
+		(unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
+		rbd_dev->header.features);
 
 	return ret;
 
@@ -5172,8 +6046,6 @@
 err_out_blkdev:
 	if (!single_major)
 		unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_id:
-	rbd_dev_id_put(rbd_dev);
 err_out_unlock:
 	up_write(&rbd_dev->header_rwsem);
 	return ret;
@@ -5234,7 +6106,7 @@
 		goto err_out_format;
 
 	if (!depth) {
-		ret = rbd_dev_header_watch_sync(rbd_dev);
+		ret = rbd_register_watch(rbd_dev);
 		if (ret) {
 			if (ret == -ENOENT)
 				pr_info("image %s/%s does not exist\n",
@@ -5293,7 +6165,7 @@
 	rbd_dev_unprobe(rbd_dev);
 err_out_watch:
 	if (!depth)
-		rbd_dev_header_unwatch_sync(rbd_dev);
+		rbd_unregister_watch(rbd_dev);
 err_out_format:
 	rbd_dev->image_format = 0;
 	kfree(rbd_dev->spec->image_id);
@@ -5345,10 +6217,18 @@
 	spec = NULL;		/* rbd_dev now owns this */
 	rbd_opts = NULL;	/* rbd_dev now owns this */
 
+	rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
+	if (!rbd_dev->config_info) {
+		rc = -ENOMEM;
+		goto err_out_rbd_dev;
+	}
+
 	down_write(&rbd_dev->header_rwsem);
 	rc = rbd_dev_image_probe(rbd_dev, 0);
-	if (rc < 0)
+	if (rc < 0) {
+		up_write(&rbd_dev->header_rwsem);
 		goto err_out_rbd_dev;
+	}
 
 	/* If we are mapping a snapshot it must be marked read-only */
 
@@ -5360,11 +6240,11 @@
 	rc = rbd_dev_device_setup(rbd_dev);
 	if (rc) {
 		/*
-		 * rbd_dev_header_unwatch_sync() can't be moved into
+		 * rbd_unregister_watch() can't be moved into
 		 * rbd_dev_image_release() without refactoring, see
 		 * commit 1f3ef78861ac.
 		 */
-		rbd_dev_header_unwatch_sync(rbd_dev);
+		rbd_unregister_watch(rbd_dev);
 		rbd_dev_image_release(rbd_dev);
 		goto out;
 	}
@@ -5375,7 +6255,6 @@
 	return rc;
 
 err_out_rbd_dev:
-	up_write(&rbd_dev->header_rwsem);
 	rbd_dev_destroy(rbd_dev);
 err_out_client:
 	rbd_put_client(rbdc);
@@ -5405,12 +6284,16 @@
 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
 {
 	rbd_free_disk(rbd_dev);
+
+	spin_lock(&rbd_dev_list_lock);
+	list_del_init(&rbd_dev->node);
+	spin_unlock(&rbd_dev_list_lock);
+
 	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
 	device_del(&rbd_dev->dev);
 	rbd_dev_mapping_clear(rbd_dev);
 	if (!single_major)
 		unregister_blkdev(rbd_dev->major, rbd_dev->name);
-	rbd_dev_id_put(rbd_dev);
 }
 
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
@@ -5446,18 +6329,26 @@
 	struct rbd_device *rbd_dev = NULL;
 	struct list_head *tmp;
 	int dev_id;
-	unsigned long ul;
+	char opt_buf[6];
 	bool already = false;
+	bool force = false;
 	int ret;
 
-	ret = kstrtoul(buf, 10, &ul);
-	if (ret)
-		return ret;
-
-	/* convert to int; abort if we lost anything in the conversion */
-	dev_id = (int)ul;
-	if (dev_id != ul)
+	dev_id = -1;
+	opt_buf[0] = '\0';
+	sscanf(buf, "%d %5s", &dev_id, opt_buf);
+	if (dev_id < 0) {
+		pr_err("dev_id out of range\n");
 		return -EINVAL;
+	}
+	if (opt_buf[0] != '\0') {
+		if (!strcmp(opt_buf, "force")) {
+			force = true;
+		} else {
+			pr_err("bad remove option at '%s'\n", opt_buf);
+			return -EINVAL;
+		}
+	}
 
 	ret = -ENOENT;
 	spin_lock(&rbd_dev_list_lock);
@@ -5470,7 +6361,7 @@
 	}
 	if (!ret) {
 		spin_lock_irq(&rbd_dev->lock);
-		if (rbd_dev->open_count)
+		if (rbd_dev->open_count && !force)
 			ret = -EBUSY;
 		else
 			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
@@ -5481,7 +6372,20 @@
 	if (ret < 0 || already)
 		return ret;
 
-	rbd_dev_header_unwatch_sync(rbd_dev);
+	if (force) {
+		/*
+		 * Prevent new IO from being queued and wait for existing
+		 * IO to complete/fail.
+		 */
+		blk_mq_freeze_queue(rbd_dev->disk->queue);
+		blk_set_queue_dying(rbd_dev->disk->queue);
+	}
+
+	down_write(&rbd_dev->lock_rwsem);
+	if (__rbd_is_lock_owner(rbd_dev))
+		rbd_unlock(rbd_dev);
+	up_write(&rbd_dev->lock_rwsem);
+	rbd_unregister_watch(rbd_dev);
 
 	/*
 	 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index 49d77cb..94f367db2 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -28,6 +28,17 @@
 #define RBD_DATA_PREFIX        "rbd_data."
 #define RBD_ID_PREFIX          "rbd_id."
 
+#define RBD_LOCK_NAME          "rbd_lock"
+#define RBD_LOCK_TAG           "internal"
+#define RBD_LOCK_COOKIE_PREFIX "auto"
+
+enum rbd_notify_op {
+	RBD_NOTIFY_OP_ACQUIRED_LOCK      = 0,
+	RBD_NOTIFY_OP_RELEASED_LOCK      = 1,
+	RBD_NOTIFY_OP_REQUEST_LOCK       = 2,
+	RBD_NOTIFY_OP_HEADER_UPDATE      = 3,
+};
+
 /*
  * For format version 1, rbd image 'foo' consists of objects
  *   foo.rbd		- image metadata
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 6bd63b8..2f633df 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -314,6 +314,7 @@
 	/* Marvell Bluetooth devices */
 	{ USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
 	{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+	{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
 
 	/* Intel Bluetooth devices */
 	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
@@ -1042,6 +1043,10 @@
 
 	BT_DBG("%s", hdev->name);
 
+	err = usb_autopm_get_interface(data->intf);
+	if (err < 0)
+		return err;
+
 	/* Patching USB firmware files prior to starting any URBs of HCI path
 	 * It is more safe to use USB bulk channel for downloading USB patch
 	 */
@@ -1051,10 +1056,6 @@
 			return err;
 	}
 
-	err = usb_autopm_get_interface(data->intf);
-	if (err < 0)
-		return err;
-
 	data->intf->needs_remote_wakeup = 1;
 
 	if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 4431129..0f7d28a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -845,6 +845,8 @@
 			   unsigned int flags)
 {
 	intel_private.driver->write_entry(addr, pg, flags);
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
 }
 EXPORT_SYMBOL(intel_gtt_insert_page);
 
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8c0770b..200dab5 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -410,6 +410,19 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_CAVIUM
+       tristate "Cavium ThunderX Random Number Generator support"
+       depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+       default HW_RANDOM
+       ---help---
+         This driver provides kernel-side support for the Random Number
+         Generator hardware found on Cavium SoCs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cavium_rng.
+
+         If unsure, say Y.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 04bb0b0..5f52b1e 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -35,3 +35,4 @@
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
 obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
 obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
+obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 48f6a83..4a99ac7 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -24,16 +24,18 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/hw_random.h>
 #include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 
+#define DRV_NAME "AMD768-HWRNG"
 
-#define PFX	KBUILD_MODNAME ": "
-
+#define RNGDATA		0x00
+#define RNGDONE		0x04
+#define PMBASE_OFFSET	0xF0
+#define PMBASE_SIZE	8
 
 /*
  * Data for PCI driver interface
@@ -50,72 +52,84 @@
 };
 MODULE_DEVICE_TABLE(pci, pci_tbl);
 
-static struct pci_dev *amd_pdev;
+struct amd768_priv {
+	void __iomem *iobase;
+	struct pci_dev *pcidev;
+};
 
-
-static int amd_rng_data_present(struct hwrng *rng, int wait)
+static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
 {
-	u32 pmbase = (u32)rng->priv;
-	int data, i;
+	u32 *data = buf;
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+	size_t read = 0;
+	/* We will wait at maximum one time per read */
+	int timeout = max / 4 + 1;
 
-	for (i = 0; i < 20; i++) {
-		data = !!(inl(pmbase + 0xF4) & 1);
-		if (data || !wait)
-			break;
-		udelay(10);
+	/*
+	 * RNG data is available when RNGDONE is set to 1
+	 * New random numbers are generated approximately 128 microseconds
+	 * after RNGDATA is read
+	 */
+	while (read < max) {
+		if (ioread32(priv->iobase + RNGDONE) == 0) {
+			if (wait) {
+				/* Delay given by datasheet */
+				usleep_range(128, 196);
+				if (timeout-- == 0)
+					return read;
+			} else {
+				return 0;
+			}
+		} else {
+			*data = ioread32(priv->iobase + RNGDATA);
+			data++;
+			read += 4;
+		}
 	}
-	return data;
-}
 
-static int amd_rng_data_read(struct hwrng *rng, u32 *data)
-{
-	u32 pmbase = (u32)rng->priv;
-
-	*data = inl(pmbase + 0xF0);
-
-	return 4;
+	return read;
 }
 
 static int amd_rng_init(struct hwrng *rng)
 {
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
 	u8 rnen;
 
-	pci_read_config_byte(amd_pdev, 0x40, &rnen);
-	rnen |= (1 << 7);	/* RNG on */
-	pci_write_config_byte(amd_pdev, 0x40, rnen);
+	pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+	rnen |= BIT(7);	/* RNG on */
+	pci_write_config_byte(priv->pcidev, 0x40, rnen);
 
-	pci_read_config_byte(amd_pdev, 0x41, &rnen);
-	rnen |= (1 << 7);	/* PMIO enable */
-	pci_write_config_byte(amd_pdev, 0x41, rnen);
+	pci_read_config_byte(priv->pcidev, 0x41, &rnen);
+	rnen |= BIT(7);	/* PMIO enable */
+	pci_write_config_byte(priv->pcidev, 0x41, rnen);
 
 	return 0;
 }
 
 static void amd_rng_cleanup(struct hwrng *rng)
 {
+	struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
 	u8 rnen;
 
-	pci_read_config_byte(amd_pdev, 0x40, &rnen);
-	rnen &= ~(1 << 7);	/* RNG off */
-	pci_write_config_byte(amd_pdev, 0x40, rnen);
+	pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+	rnen &= ~BIT(7);	/* RNG off */
+	pci_write_config_byte(priv->pcidev, 0x40, rnen);
 }
 
-
 static struct hwrng amd_rng = {
 	.name		= "amd",
 	.init		= amd_rng_init,
 	.cleanup	= amd_rng_cleanup,
-	.data_present	= amd_rng_data_present,
-	.data_read	= amd_rng_data_read,
+	.read		= amd_rng_read,
 };
 
-
 static int __init mod_init(void)
 {
 	int err = -ENODEV;
 	struct pci_dev *pdev = NULL;
 	const struct pci_device_id *ent;
 	u32 pmbase;
+	struct amd768_priv *priv;
 
 	for_each_pci_dev(pdev) {
 		ent = pci_match_id(pci_tbl, pdev);
@@ -123,42 +137,44 @@
 			goto found;
 	}
 	/* Device not found. */
-	goto out;
+	return -ENODEV;
 
 found:
 	err = pci_read_config_dword(pdev, 0x58, &pmbase);
 	if (err)
-		goto out;
-	err = -EIO;
+		return err;
+
 	pmbase &= 0x0000FF00;
 	if (pmbase == 0)
-		goto out;
-	if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
-		dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
-			pmbase + 0xF0);
-		err = -EBUSY;
-		goto out;
-	}
-	amd_rng.priv = (unsigned long)pmbase;
-	amd_pdev = pdev;
+		return -EIO;
 
-	pr_info("AMD768 RNG detected\n");
-	err = hwrng_register(&amd_rng);
-	if (err) {
-		pr_err(PFX "RNG registering failed (%d)\n",
-		       err);
-		release_region(pmbase + 0xF0, 8);
-		goto out;
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
+				PMBASE_SIZE, DRV_NAME)) {
+		dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+			pmbase + 0xF0);
+		return -EBUSY;
 	}
-out:
-	return err;
+
+	priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
+			PMBASE_SIZE);
+	if (!priv->iobase) {
+		pr_err(DRV_NAME "Cannot map ioport\n");
+		return -ENOMEM;
+	}
+
+	amd_rng.priv = (unsigned long)priv;
+	priv->pcidev = pdev;
+
+	pr_info(DRV_NAME " detected\n");
+	return devm_hwrng_register(&pdev->dev, &amd_rng);
 }
 
 static void __exit mod_exit(void)
 {
-	u32 pmbase = (unsigned long)amd_rng.priv;
-	release_region(pmbase + 0xF0, 8);
-	hwrng_unregister(&amd_rng);
 }
 
 module_init(mod_init);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index af21492..574211a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -92,9 +92,10 @@
 	bcm2835_rng_ops.priv = (unsigned long)rng_base;
 
 	rng_id = of_match_node(bcm2835_rng_of_match, np);
-	if (!rng_id)
+	if (!rng_id) {
+		iounmap(rng_base);
 		return -EINVAL;
-
+	}
 	/* Check for rng init function, execute it */
 	rng_setup = rng_id->data;
 	if (rng_setup)
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
new file mode 100644
index 0000000..066ae0e
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -0,0 +1,99 @@
+/*
+ * Hardware Random Number Generator support for Cavium, Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+struct cavium_rng {
+	struct hwrng ops;
+	void __iomem *result;
+};
+
+/* Read data from the RNG unit */
+static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
+{
+	struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
+	unsigned int size = max;
+
+	while (size >= 8) {
+		*((u64 *)dat) = readq(p->result);
+		size -= 8;
+		dat += 8;
+	}
+	while (size > 0) {
+		*((u8 *)dat) = readb(p->result);
+		size--;
+		dat++;
+	}
+	return max;
+}
+
+/* Map Cavium RNG to an HWRNG object */
+static int cavium_rng_probe_vf(struct	pci_dev		*pdev,
+			 const struct	pci_device_id	*id)
+{
+	struct	cavium_rng *rng;
+	int	ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	/* Map the RNG result */
+	rng->result = pcim_iomap(pdev, 0, 0);
+	if (!rng->result) {
+		dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
+		return -ENOMEM;
+	}
+
+	rng->ops.name    = "cavium rng";
+	rng->ops.read    = cavium_rng_read;
+	rng->ops.quality = 1000;
+
+	pci_set_drvdata(pdev, rng);
+
+	ret = hwrng_register(&rng->ops);
+	if (ret) {
+		dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Remove the VF */
+void  cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+	struct cavium_rng *rng;
+
+	rng = pci_get_drvdata(pdev);
+	hwrng_unregister(&rng->ops);
+}
+
+static const struct pci_device_id cavium_rng_vf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
+	{0,},
+};
+MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
+
+static struct pci_driver cavium_rng_vf_driver = {
+	.name		= "cavium_rng_vf",
+	.id_table	= cavium_rng_vf_id_table,
+	.probe		= cavium_rng_probe_vf,
+	.remove		= cavium_rng_remove_vf,
+};
+module_pci_driver(cavium_rng_vf_driver);
+
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
new file mode 100644
index 0000000..a944e0a
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -0,0 +1,94 @@
+/*
+ * Hardware Random Number Generator support for Cavium Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define THUNDERX_RNM_ENT_EN     0x1
+#define THUNDERX_RNM_RNG_EN     0x2
+
+struct cavium_rng_pf {
+	void __iomem *control_status;
+};
+
+/* Enable the RNG hardware and activate the VF */
+static int cavium_rng_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id)
+{
+	struct	cavium_rng_pf *rng;
+	int	iov_err;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	/*Map the RNG control */
+	rng->control_status = pcim_iomap(pdev, 0, 0);
+	if (!rng->control_status) {
+		dev_err(&pdev->dev,
+			"Error iomap failed retrieving control_status.\n");
+		return -ENOMEM;
+	}
+
+	/* Enable the RNG hardware and entropy source */
+	writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
+		rng->control_status);
+
+	pci_set_drvdata(pdev, rng);
+
+	/* Enable the Cavium RNG as a VF */
+	iov_err = pci_enable_sriov(pdev, 1);
+	if (iov_err != 0) {
+		/* Disable the RNG hardware and entropy source */
+		writeq(0, rng->control_status);
+		dev_err(&pdev->dev,
+			"Error initializing RNG virtual function,(%i).\n",
+			iov_err);
+		return iov_err;
+	}
+
+	return 0;
+}
+
+/* Disable VF and RNG Hardware */
+void  cavium_rng_remove(struct pci_dev *pdev)
+{
+	struct cavium_rng_pf *rng;
+
+	rng = pci_get_drvdata(pdev);
+
+	/* Remove the VF */
+	pci_disable_sriov(pdev);
+
+	/* Disable the RNG hardware and entropy source */
+	writeq(0, rng->control_status);
+}
+
+static const struct pci_device_id cavium_rng_pf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
+	{0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
+
+static struct pci_driver cavium_rng_pf_driver = {
+	.name		= "cavium_rng_pf",
+	.id_table	= cavium_rng_pf_id_table,
+	.probe		= cavium_rng_probe,
+	.remove		= cavium_rng_remove,
+};
+
+module_pci_driver(cavium_rng_pf_driver);
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 9203f2d..4827945 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -449,22 +449,6 @@
 		goto out;
 
 	mutex_lock(&rng_mutex);
-
-	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
-	err = -ENOMEM;
-	if (!rng_buffer) {
-		rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
-		if (!rng_buffer)
-			goto out_unlock;
-	}
-	if (!rng_fillbuf) {
-		rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
-		if (!rng_fillbuf) {
-			kfree(rng_buffer);
-			goto out_unlock;
-		}
-	}
-
 	/* Must not register two RNGs with the same name. */
 	err = -EEXIST;
 	list_for_each_entry(tmp, &rng_list, list) {
@@ -573,7 +557,26 @@
 
 static int __init hwrng_modinit(void)
 {
-	return register_miscdev();
+	int ret = -ENOMEM;
+
+	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+	if (!rng_buffer)
+		return -ENOMEM;
+
+	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
+	if (!rng_fillbuf) {
+		kfree(rng_buffer);
+		return -ENOMEM;
+	}
+
+	ret = register_miscdev();
+	if (ret) {
+		kfree(rng_fillbuf);
+		kfree(rng_buffer);
+	}
+
+	return ret;
 }
 
 static void __exit hwrng_modexit(void)
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 0d0579f..e7a2459 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -24,15 +24,12 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/hw_random.h>
 #include <linux/delay.h>
-#include <asm/io.h>
-
-
-#define PFX	KBUILD_MODNAME ": "
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 
 #define GEODE_RNG_DATA_REG   0x50
 #define GEODE_RNG_STATUS_REG 0x54
@@ -85,7 +82,6 @@
 
 static int __init mod_init(void)
 {
-	int err = -ENODEV;
 	struct pci_dev *pdev = NULL;
 	const struct pci_device_id *ent;
 	void __iomem *mem;
@@ -93,43 +89,27 @@
 
 	for_each_pci_dev(pdev) {
 		ent = pci_match_id(pci_tbl, pdev);
-		if (ent)
-			goto found;
+		if (ent) {
+			rng_base = pci_resource_start(pdev, 0);
+			if (rng_base == 0)
+				return -ENODEV;
+
+			mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
+			if (!mem)
+				return -ENOMEM;
+			geode_rng.priv = (unsigned long)mem;
+
+			pr_info("AMD Geode RNG detected\n");
+			return devm_hwrng_register(&pdev->dev, &geode_rng);
+		}
 	}
+
 	/* Device not found. */
-	goto out;
-
-found:
-	rng_base = pci_resource_start(pdev, 0);
-	if (rng_base == 0)
-		goto out;
-	err = -ENOMEM;
-	mem = ioremap(rng_base, 0x58);
-	if (!mem)
-		goto out;
-	geode_rng.priv = (unsigned long)mem;
-
-	pr_info("AMD Geode RNG detected\n");
-	err = hwrng_register(&geode_rng);
-	if (err) {
-		pr_err(PFX "RNG registering failed (%d)\n",
-		       err);
-		goto err_unmap;
-	}
-out:
-	return err;
-
-err_unmap:
-	iounmap(mem);
-	goto out;
+	return -ENODEV;
 }
 
 static void __exit mod_exit(void)
 {
-	void __iomem *mem = (void __iomem *)geode_rng.priv;
-
-	hwrng_unregister(&geode_rng);
-	iounmap(mem);
 }
 
 module_init(mod_init);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 0cfd81b..58bef39 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -76,9 +76,6 @@
 	struct meson_rng_data *data =
 			container_of(rng, struct meson_rng_data, rng);
 
-	if (max < sizeof(u32))
-		return 0;
-
 	*(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
 
 	return sizeof(u32);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 01d4be2..f5c26a5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -385,7 +385,7 @@
 
 	pm_runtime_enable(&pdev->dev);
 	ret = pm_runtime_get_sync(&pdev->dev);
-	if (ret) {
+	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
 		pm_runtime_put_noidle(&pdev->dev);
 		goto err_ioremap;
@@ -443,7 +443,7 @@
 	int ret;
 
 	ret = pm_runtime_get_sync(dev);
-	if (ret) {
+	if (ret < 0) {
 		dev_err(dev, "Failed to runtime_get device: %d\n", ret);
 		pm_runtime_put_noidle(dev);
 		return ret;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 8da14f1..37a58d7 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -71,12 +71,7 @@
 	return 0;
 }
 
-static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
-{
-	return 1;
-}
-
-static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
 {
 	int r;
 
@@ -88,8 +83,7 @@
 
 static struct hwrng omap3_rom_rng_ops = {
 	.name		= "omap3-rom",
-	.data_present	= omap3_rom_rng_data_present,
-	.data_read	= omap3_rom_rng_data_read,
+	.read		= omap3_rom_rng_read,
 };
 
 static int omap3_rom_rng_probe(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c19e23d..545df48 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -95,42 +95,20 @@
 	.data_read	= pasemi_rng_data_read,
 };
 
-static int rng_probe(struct platform_device *ofdev)
+static int rng_probe(struct platform_device *pdev)
 {
 	void __iomem *rng_regs;
-	struct device_node *rng_np = ofdev->dev.of_node;
-	struct resource res;
-	int err = 0;
+	struct resource *res;
 
-	err = of_address_to_resource(rng_np, 0, &res);
-	if (err)
-		return -ENODEV;
-
-	rng_regs = ioremap(res.start, 0x100);
-
-	if (!rng_regs)
-		return -ENOMEM;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng_regs))
+		return PTR_ERR(rng_regs);
 
 	pasemi_rng.priv = (unsigned long)rng_regs;
 
 	pr_info("Registering PA Semi RNG\n");
-
-	err = hwrng_register(&pasemi_rng);
-
-	if (err)
-		iounmap(rng_regs);
-
-	return err;
-}
-
-static int rng_remove(struct platform_device *dev)
-{
-	void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
-
-	hwrng_unregister(&pasemi_rng);
-	iounmap(rng_regs);
-
-	return 0;
+	return devm_hwrng_register(&pdev->dev, &pasemi_rng);
 }
 
 static const struct of_device_id rng_match[] = {
@@ -146,7 +124,6 @@
 		.of_match_table = rng_match,
 	},
 	.probe		= rng_probe,
-	.remove		= rng_remove,
 };
 
 module_platform_driver(rng_driver);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 108897b..11dc9b7 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -143,7 +143,6 @@
 	.remove		= pic32_rng_remove,
 	.driver		= {
 		.name	= "pic32-rng",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pic32_rng_of_match),
 	},
 };
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 1d35363..938ec10 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -54,9 +54,6 @@
 	u32 status;
 	int i;
 
-	if (max < sizeof(u16))
-		return -EINVAL;
-
 	/* Wait until FIFO is full - max 4uS*/
 	for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
 		status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
@@ -111,6 +108,7 @@
 	ret = hwrng_register(&ddata->ops);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register HW RNG\n");
+		clk_disable_unprepare(clk);
 		return ret;
 	}
 
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index a7b6949..1093583 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -144,22 +144,13 @@
 	}
 
 	platform_set_drvdata(dev, rngdev);
-	return hwrng_register(&rngdev->rng);
-}
-
-static int __exit tx4939_rng_remove(struct platform_device *dev)
-{
-	struct tx4939_rng *rngdev = platform_get_drvdata(dev);
-
-	hwrng_unregister(&rngdev->rng);
-	return 0;
+	return devm_hwrng_register(&dev->dev, &rngdev->rng);
 }
 
 static struct platform_driver tx4939_rng_driver = {
 	.driver		= {
 		.name	= "tx4939-rng",
 	},
-	.remove = tx4939_rng_remove,
 };
 
 module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3efb3bf..d6876d5 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@
 
 static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
 static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 
 static struct entropy_store input_pool = {
 	.poolinfo = &poolinfo_table[0],
@@ -2100,23 +2100,37 @@
 }
 EXPORT_SYMBOL(get_random_long);
 
-/*
- * randomize_range() returns a start address such that
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start:	The smallest acceptable address the caller will take.
+ * @range:	The size of the area, starting at @start, within which the
+ *		random address must fall.
  *
- *    [...... <range> .....]
- *  start                  end
+ * If @start + @range would overflow, @range is capped.
  *
- * a <range> with size "len" starting at the return value is inside in the
- * area defined by [start, end], but is otherwise randomized.
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned.  We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range).  On error,
+ * @start is returned.
  */
 unsigned long
-randomize_range(unsigned long start, unsigned long end, unsigned long len)
+randomize_page(unsigned long start, unsigned long range)
 {
-	unsigned long range = end - len - start;
+	if (!PAGE_ALIGNED(start)) {
+		range -= PAGE_ALIGN(start) - start;
+		start = PAGE_ALIGN(start);
+	}
 
-	if (end <= start + len)
-		return 0;
-	return PAGE_ALIGN(get_random_int() % range + start);
+	if (start > ULONG_MAX - range)
+		range = ULONG_MAX - start;
+
+	range >>= PAGE_SHIFT;
+
+	if (range == 0)
+		return start;
+
+	return start + (get_random_long() % range << PAGE_SHIFT);
 }
 
 /* Interface for in-kernel drivers of true hardware RNGs.
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index e496dae..719c5b4 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -934,7 +934,7 @@
 
 	if (ret > 0) {
 		struct inode *inode = file_inode(file);
-		inode->i_atime = current_fs_time(inode->i_sb);
+		inode->i_atime = current_time(inode);
 	}
 
 	return ret;
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 480a777..7c19d9b 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -21,6 +21,7 @@
 #include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
 #include <asm/io.h>
 #include <asm/reboot.h>
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8114744..d433b1d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -38,7 +38,6 @@
 #include <linux/workqueue.h>
 #include <linux/module.h>
 #include <linux/dma-mapping.h>
-#include <linux/kconfig.h>
 #include "../tty/hvc/hvc_console.h"
 
 #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1b2f28f..4852d9e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -80,11 +80,17 @@
 {
 	struct cppc_cpudata *cpu;
 	struct cpufreq_freqs freqs;
+	u32 desired_perf;
 	int ret = 0;
 
 	cpu = all_cpu_data[policy->cpu];
 
-	cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
+	desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+	/* Return if it is exactly the same perf */
+	if (desired_perf == cpu->perf_ctrls.desired_perf)
+		return ret;
+
+	cpu->perf_ctrls.desired_perf = desired_perf;
 	freqs.old = policy->cur;
 	freqs.new = target_freq;
 
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 18da4f8..1347589 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
 struct cs_policy_dbs_info {
 	struct policy_dbs_info policy_dbs;
 	unsigned int down_skip;
+	unsigned int requested_freq;
 };
 
 static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -61,6 +62,7 @@
 {
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+	unsigned int requested_freq = dbs_info->requested_freq;
 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int load = dbs_update(policy);
@@ -72,10 +74,16 @@
 	if (cs_tuners->freq_step == 0)
 		goto out;
 
+	/*
+	 * If requested_freq is out of range, it is likely that the limits
+	 * changed in the meantime, so fall back to current frequency in that
+	 * case.
+	 */
+	if (requested_freq > policy->max || requested_freq < policy->min)
+		requested_freq = policy->cur;
+
 	/* Check for frequency increase */
 	if (load > dbs_data->up_threshold) {
-		unsigned int requested_freq = policy->cur;
-
 		dbs_info->down_skip = 0;
 
 		/* if we are already at full speed then break out early */
@@ -83,8 +91,11 @@
 			goto out;
 
 		requested_freq += get_freq_target(cs_tuners, policy);
+		if (requested_freq > policy->max)
+			requested_freq = policy->max;
 
 		__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+		dbs_info->requested_freq = requested_freq;
 		goto out;
 	}
 
@@ -95,7 +106,7 @@
 
 	/* Check for frequency decrease */
 	if (load < cs_tuners->down_threshold) {
-		unsigned int freq_target, requested_freq = policy->cur;
+		unsigned int freq_target;
 		/*
 		 * if we cannot reduce the frequency anymore, break out early
 		 */
@@ -109,6 +120,7 @@
 			requested_freq = policy->min;
 
 		__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+		dbs_info->requested_freq = requested_freq;
 	}
 
  out:
@@ -287,6 +299,7 @@
 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
 
 	dbs_info->down_skip = 0;
+	dbs_info->requested_freq = policy->cur;
 }
 
 static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 806f203..f535f81 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,7 +225,7 @@
 static struct cpudata **all_cpu_data;
 
 /**
- * struct pid_adjust_policy - Stores static PID configuration data
+ * struct pstate_adjust_policy - Stores static PID configuration data
  * @sample_rate_ms:	PID calculation sample rate in ms
  * @sample_rate_ns:	Sample rate calculation in ns
  * @deadband:		PID deadband
@@ -562,12 +562,12 @@
 	int min, hw_min, max, hw_max, cpu, range, adj_range;
 	u64 value, cap;
 
-	rdmsrl(MSR_HWP_CAPABILITIES, cap);
-	hw_min = HWP_LOWEST_PERF(cap);
-	hw_max = HWP_HIGHEST_PERF(cap);
-	range = hw_max - hw_min;
-
 	for_each_cpu(cpu, cpumask) {
+		rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+		hw_min = HWP_LOWEST_PERF(cap);
+		hw_max = HWP_HIGHEST_PERF(cap);
+		range = hw_max - hw_min;
+
 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
 		adj_range = limits->min_perf_pct * range / 100;
 		min = hw_min + adj_range;
@@ -1232,6 +1232,7 @@
 {
 	struct sample *sample = &cpu->sample;
 	int32_t busy_frac, boost;
+	int target, avg_pstate;
 
 	busy_frac = div_fp(sample->mperf, sample->tsc);
 
@@ -1242,7 +1243,26 @@
 		busy_frac = boost;
 
 	sample->busy_scaled = busy_frac * 100;
-	return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
+
+	target = limits->no_turbo || limits->turbo_disabled ?
+			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+	target += target >> 2;
+	target = mul_fp(target, busy_frac);
+	if (target < cpu->pstate.min_pstate)
+		target = cpu->pstate.min_pstate;
+
+	/*
+	 * If the average P-state during the previous cycle was higher than the
+	 * current target, add 50% of the difference to the target to reduce
+	 * possible performance oscillations and offset possible performance
+	 * loss related to moving the workload from one CPU to another within
+	 * a package/module.
+	 */
+	avg_pstate = get_avg_pstate(cpu);
+	if (avg_pstate > target)
+		target += (avg_pstate - target) >> 1;
+
+	return target;
 }
 
 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@ -1251,10 +1271,11 @@
 	u64 duration_ns;
 
 	/*
-	 * perf_scaled is the average performance during the last sampling
-	 * period scaled by the ratio of the maximum P-state to the P-state
-	 * requested last time (in percent).  That measures the system's
-	 * response to the previous P-state selection.
+	 * perf_scaled is the ratio of the average P-state during the last
+	 * sampling period to the P-state requested last time (in percent).
+	 *
+	 * That measures the system's response to the previous P-state
+	 * selection.
 	 */
 	max_pstate = cpu->pstate.max_pstate_physical;
 	current_pstate = cpu->pstate.current_pstate;
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 4102be0..512ee37 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -5,7 +5,7 @@
 	bool "CPU Idle driver for MIPS CPS platforms"
 	depends on CPU_IDLE && MIPS_CPS
 	depends on SYS_SUPPORTS_MIPS_CPS
-	select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+	select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 	select MIPS_CPS_PM
 	default y
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 1adb698..926ba98 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -163,7 +163,7 @@
 		core = cpu_data[cpu].core;
 		device = &per_cpu(cpuidle_dev, cpu);
 		device->cpu = cpu;
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 		cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
 #endif
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9b035b7..4d2b81f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -318,6 +318,9 @@
 	select CRYPTO_AES
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_ENGINE
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_CTR
 	help
 	  OMAP processors have AES module accelerator. Select this if you
 	  want to use the OMAP module for AES algorithms.
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b304421..156aad1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -111,6 +111,42 @@
 #else
 #define debug(format, arg...)
 #endif
+
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+static void dbg_dump_sg(const char *level, const char *prefix_str,
+			int prefix_type, int rowsize, int groupsize,
+			struct scatterlist *sg, size_t tlen, bool ascii,
+			bool may_sleep)
+{
+	struct scatterlist *it;
+	void *it_page;
+	size_t len;
+	void *buf;
+
+	for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+		/*
+		 * make sure the scatterlist's page
+		 * has a valid virtual memory mapping
+		 */
+		it_page = kmap_atomic(sg_page(it));
+		if (unlikely(!it_page)) {
+			printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+			return;
+		}
+
+		buf = it_page + it->offset;
+		len = min(tlen, it->length);
+		print_hex_dump(level, prefix_str, prefix_type, rowsize,
+			       groupsize, buf, len, ascii);
+		tlen -= len;
+
+		kunmap_atomic(it_page);
+	}
+}
+#endif
+
 static struct list_head alg_list;
 
 struct caam_alg_entry {
@@ -227,8 +263,9 @@
 	if (is_rfc3686) {
 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
 			       enckeylen);
-		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
-				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
 		append_move(desc,
 			    MOVE_SRC_OUTFIFO |
 			    MOVE_DEST_CLASS1CTX |
@@ -500,11 +537,10 @@
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -578,11 +614,10 @@
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	/* Choose operation */
 	if (ctr_mode)
@@ -683,11 +718,10 @@
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -1478,7 +1512,7 @@
 	int ret = 0;
 	u32 *key_jump_cmd;
 	u32 *desc;
-	u32 *nonce;
+	u8 *nonce;
 	u32 geniv;
 	u32 ctx1_iv_off = 0;
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
@@ -1531,9 +1565,10 @@
 
 	/* Load nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
-		nonce = (u32 *)(key + keylen);
-		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
-				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		nonce = (u8 *)key + keylen;
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
 		append_move(desc, MOVE_WAITCOMP |
 			    MOVE_SRC_OUTFIFO |
 			    MOVE_DEST_CLASS1CTX |
@@ -1549,11 +1584,10 @@
 
 	/* Load counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	/* Load operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -1590,9 +1624,10 @@
 
 	/* Load nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
-		nonce = (u32 *)(key + keylen);
-		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
-				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		nonce = (u8 *)key + keylen;
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
 		append_move(desc, MOVE_WAITCOMP |
 			    MOVE_SRC_OUTFIFO |
 			    MOVE_DEST_CLASS1CTX |
@@ -1608,11 +1643,10 @@
 
 	/* Load counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	/* Choose operation */
 	if (ctr_mode)
@@ -1653,9 +1687,10 @@
 
 	/* Load Nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
-		nonce = (u32 *)(key + keylen);
-		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
-				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		nonce = (u8 *)key + keylen;
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
 		append_move(desc, MOVE_WAITCOMP |
 			    MOVE_SRC_OUTFIFO |
 			    MOVE_DEST_CLASS1CTX |
@@ -1685,11 +1720,10 @@
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
-		append_load_imm_u32(desc, (u32)1, LDST_IMM |
-				    LDST_CLASS_1_CCB |
-				    LDST_SRCDST_BYTE_CONTEXT |
-				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				     LDST_OFFSET_SHIFT));
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
 
 	if (ctx1_iv_off)
 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
@@ -1995,9 +2029,9 @@
 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
+		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
@@ -2027,9 +2061,9 @@
 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
+		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
@@ -2184,12 +2218,15 @@
 	int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
+	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->src_nents ? 100 : req->nbytes, 1);
+	printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
+	dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
+		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
 #endif
 
 	len = desc_len(sh_desc);
@@ -2241,12 +2278,14 @@
 	int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
+	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->src_nents ? 100 : req->nbytes, 1);
+	dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
 #endif
 
 	len = desc_len(sh_desc);
@@ -2516,18 +2555,20 @@
 	u32 *desc;
 	int ret = 0;
 
+#ifdef DEBUG
+	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+	dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		    req->assoclen + req->cryptlen, 1, may_sleep);
+#endif
+
 	/* allocate extended descriptor */
 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
 				 &all_contig, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->assoclen + req->cryptlen, 1);
-#endif
-
 	/* Create and submit job descriptor*/
 	init_authenc_job(req, edesc, all_contig, false);
 #ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 36365b3..660dc20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -99,17 +99,17 @@
 
 /* ahash per-session context */
 struct caam_hash_ctx {
-	struct device *jrdev;
-	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
-	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
-	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
-	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
-	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
-	dma_addr_t sh_desc_update_dma;
+	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
 	dma_addr_t sh_desc_update_first_dma;
 	dma_addr_t sh_desc_fin_dma;
 	dma_addr_t sh_desc_digest_dma;
 	dma_addr_t sh_desc_finup_dma;
+	struct device *jrdev;
 	u32 alg_type;
 	u32 alg_op;
 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
@@ -187,15 +187,6 @@
 	return buf_dma;
 }
 
-/* Map req->src and put it in link table */
-static inline void src_map_to_sec4_sg(struct device *jrdev,
-				      struct scatterlist *src, int src_nents,
-				      struct sec4_sg_entry *sec4_sg)
-{
-	dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
-	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
-}
-
 /*
  * Only put buffer in link table if it contains data, which is possible,
  * since a buffer has previously been used, and needs to be unmapped,
@@ -449,7 +440,7 @@
 	u32 *desc;
 	struct split_key_result result;
 	dma_addr_t src_dma, dst_dma;
-	int ret = 0;
+	int ret;
 
 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
 	if (!desc) {
@@ -526,7 +517,7 @@
 	struct device *jrdev = ctx->jrdev;
 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
 	int digestsize = crypto_ahash_digestsize(ahash);
-	int ret = 0;
+	int ret;
 	u8 *hashed_key = NULL;
 
 #ifdef DEBUG
@@ -534,14 +525,15 @@
 #endif
 
 	if (keylen > blocksize) {
-		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
-				     GFP_DMA);
+		hashed_key = kmalloc_array(digestsize,
+					   sizeof(*hashed_key),
+					   GFP_KERNEL | GFP_DMA);
 		if (!hashed_key)
 			return -ENOMEM;
 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
 				      digestsize);
 		if (ret)
-			goto badkey;
+			goto bad_free_key;
 		key = hashed_key;
 	}
 
@@ -559,14 +551,14 @@
 
 	ret = gen_split_hash_key(ctx, key, keylen);
 	if (ret)
-		goto badkey;
+		goto bad_free_key;
 
 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		ret = -ENOMEM;
-		goto map_err;
+		goto error_free_key;
 	}
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
@@ -579,11 +571,10 @@
 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
 				 DMA_TO_DEVICE);
 	}
-
-map_err:
+ error_free_key:
 	kfree(hashed_key);
 	return ret;
-badkey:
+ bad_free_key:
 	kfree(hashed_key);
 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	return -EINVAL;
@@ -595,16 +586,16 @@
  * @sec4_sg_dma: physical mapped address of h/w link table
  * @src_nents: number of segments in input scatterlist
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
- * @sec4_sg: pointer to h/w link table
  * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * @sec4_sg: h/w link table
  */
 struct ahash_edesc {
 	dma_addr_t dst_dma;
 	dma_addr_t sec4_sg_dma;
 	int src_nents;
 	int sec4_sg_bytes;
-	struct sec4_sg_entry *sec4_sg;
-	u32 hw_desc[0];
+	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+	struct sec4_sg_entry sec4_sg[0];
 };
 
 static inline void ahash_unmap(struct device *dev,
@@ -774,6 +765,65 @@
 	req->base.complete(&req->base, err);
 }
 
+/*
+ * Allocate an enhanced descriptor, which contains the hardware descriptor
+ * and space for hardware scatter table containing sg_num entries.
+ */
+static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+					     int sg_num, u32 *sh_desc,
+					     dma_addr_t sh_desc_dma,
+					     gfp_t flags)
+{
+	struct ahash_edesc *edesc;
+	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
+
+	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+		return NULL;
+	}
+
+	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
+			     HDR_SHARE_DEFER | HDR_REVERSE);
+
+	return edesc;
+}
+
+static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
+			       struct ahash_edesc *edesc,
+			       struct ahash_request *req, int nents,
+			       unsigned int first_sg,
+			       unsigned int first_bytes, size_t to_hash)
+{
+	dma_addr_t src_dma;
+	u32 options;
+
+	if (nents > 1 || first_sg) {
+		struct sec4_sg_entry *sg = edesc->sec4_sg;
+		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+
+		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+
+		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(ctx->jrdev, src_dma)) {
+			dev_err(ctx->jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
+
+		edesc->sec4_sg_bytes = sgsize;
+		edesc->sec4_sg_dma = src_dma;
+		options = LDST_SGF;
+	} else {
+		src_dma = sg_dma_address(req->src);
+		options = 0;
+	}
+
+	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
+			  options);
+
+	return 0;
+}
+
 /* submit update job descriptor */
 static int ahash_update_ctx(struct ahash_request *req)
 {
@@ -789,12 +839,10 @@
 	int *next_buflen = state->current_buf ? &state->buflen_0 :
 			   &state->buflen_1, last_buflen;
 	int in_len = *buflen + req->nbytes, to_hash;
-	u32 *sh_desc = ctx->sh_desc_update, *desc;
-	dma_addr_t ptr = ctx->sh_desc_update_dma;
-	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+	u32 *desc;
+	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
 	struct ahash_edesc *edesc;
 	int ret = 0;
-	int sh_len;
 
 	last_buflen = *next_buflen;
 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
@@ -807,40 +855,51 @@
 			dev_err(jrdev, "Invalid number of src SG.\n");
 			return src_nents;
 		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
-		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
 				 sizeof(struct sec4_sg_entry);
 
 		/*
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
-				sec4_sg_bytes, GFP_DMA | flags);
+		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+					  ctx->sh_desc_update,
+					  ctx->sh_desc_update_dma, flags);
 		if (!edesc) {
-			dev_err(jrdev,
-				"could not allocate extended descriptor\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
 			return -ENOMEM;
 		}
 
 		edesc->src_nents = src_nents;
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
-		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-				 DESC_JOB_IO_LEN;
 
 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
 		if (ret)
-			return ret;
+			goto unmap_ctx;
 
 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
 							edesc->sec4_sg + 1,
 							buf, state->buf_dma,
 							*buflen, last_buflen);
 
-		if (src_nents) {
-			src_map_to_sec4_sg(jrdev, req->src, src_nents,
-					   edesc->sec4_sg + sec4_sg_src_index);
+		if (mapped_nents) {
+			sg_to_sec4_sg_last(req->src, mapped_nents,
+					   edesc->sec4_sg + sec4_sg_src_index,
+					   0);
 			if (*next_buflen)
 				scatterwalk_map_and_copy(next_buf, req->src,
 							 to_hash - *buflen,
@@ -852,17 +911,15 @@
 
 		state->current_buf = !state->current_buf;
 
-		sh_len = desc_len(sh_desc);
 		desc = edesc->hw_desc;
-		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
-				     HDR_REVERSE);
 
 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
 						     sec4_sg_bytes,
 						     DMA_TO_DEVICE);
 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 			dev_err(jrdev, "unable to map S/G table\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto unmap_ctx;
 		}
 
 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
@@ -877,13 +934,10 @@
 #endif
 
 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
-		if (!ret) {
-			ret = -EINPROGRESS;
-		} else {
-			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
-					   DMA_BIDIRECTIONAL);
-			kfree(edesc);
-		}
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
 	} else if (*next_buflen) {
 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
 					 req->nbytes, 0);
@@ -899,6 +953,10 @@
 #endif
 
 	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	kfree(edesc);
+	return ret;
 }
 
 static int ahash_final_ctx(struct ahash_request *req)
@@ -913,38 +971,31 @@
 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
 	int last_buflen = state->current_buf ? state->buflen_0 :
 			  state->buflen_1;
-	u32 *sh_desc = ctx->sh_desc_fin, *desc;
-	dma_addr_t ptr = ctx->sh_desc_fin_dma;
+	u32 *desc;
 	int sec4_sg_bytes, sec4_sg_src_index;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
-	int ret = 0;
-	int sh_len;
+	int ret;
 
 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
-			GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+				  flags);
+	if (!edesc)
 		return -ENOMEM;
-	}
 
-	sh_len = desc_len(sh_desc);
 	desc = edesc->hw_desc;
-	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-			 DESC_JOB_IO_LEN;
 	edesc->src_nents = 0;
 
 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
 				 edesc->sec4_sg, DMA_TO_DEVICE);
 	if (ret)
-		return ret;
+		goto unmap_ctx;
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
 						buf, state->buf_dma, buflen,
@@ -956,7 +1007,8 @@
 					    sec4_sg_bytes, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 		dev_err(jrdev, "unable to map S/G table\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto unmap_ctx;
 	}
 
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
@@ -966,7 +1018,8 @@
 						digestsize);
 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
 		dev_err(jrdev, "unable to map dst\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto unmap_ctx;
 	}
 
 #ifdef DEBUG
@@ -975,13 +1028,13 @@
 #endif
 
 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
-	if (!ret) {
-		ret = -EINPROGRESS;
-	} else {
-		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
-		kfree(edesc);
-	}
+	if (ret)
+		goto unmap_ctx;
 
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
 	return ret;
 }
 
@@ -997,68 +1050,66 @@
 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
 	int last_buflen = state->current_buf ? state->buflen_0 :
 			  state->buflen_1;
-	u32 *sh_desc = ctx->sh_desc_finup, *desc;
-	dma_addr_t ptr = ctx->sh_desc_finup_dma;
-	int sec4_sg_bytes, sec4_sg_src_index;
-	int src_nents;
+	u32 *desc;
+	int sec4_sg_src_index;
+	int src_nents, mapped_nents;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
-	int ret = 0;
-	int sh_len;
+	int ret;
 
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	if (src_nents < 0) {
 		dev_err(jrdev, "Invalid number of src SG.\n");
 		return src_nents;
 	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
-	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
-			 sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
-			GFP_DMA | flags);
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+				  flags);
 	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
 		return -ENOMEM;
 	}
 
-	sh_len = desc_len(sh_desc);
 	desc = edesc->hw_desc;
-	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	edesc->src_nents = src_nents;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-			 DESC_JOB_IO_LEN;
 
 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
 				 edesc->sec4_sg, DMA_TO_DEVICE);
 	if (ret)
-		return ret;
+		goto unmap_ctx;
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
 						buf, state->buf_dma, buflen,
 						last_buflen);
 
-	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
-			   sec4_sg_src_index);
-
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		return -ENOMEM;
-	}
-
-	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
-			       buflen + req->nbytes, LDST_SGF);
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+				  sec4_sg_src_index, ctx->ctx_len + buflen,
+				  req->nbytes);
+	if (ret)
+		goto unmap_ctx;
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
 		dev_err(jrdev, "unable to map dst\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto unmap_ctx;
 	}
 
 #ifdef DEBUG
@@ -1067,13 +1118,13 @@
 #endif
 
 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
-	if (!ret) {
-		ret = -EINPROGRESS;
-	} else {
-		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
-		kfree(edesc);
-	}
+	if (ret)
+		goto unmap_ctx;
 
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
 	return ret;
 }
 
@@ -1084,60 +1135,56 @@
 	struct device *jrdev = ctx->jrdev;
 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-	u32 *sh_desc = ctx->sh_desc_digest, *desc;
-	dma_addr_t ptr = ctx->sh_desc_digest_dma;
+	u32 *desc;
 	int digestsize = crypto_ahash_digestsize(ahash);
-	int src_nents, sec4_sg_bytes;
-	dma_addr_t src_dma;
+	int src_nents, mapped_nents;
 	struct ahash_edesc *edesc;
-	int ret = 0;
-	u32 options;
-	int sh_len;
+	int ret;
 
-	src_nents = sg_count(req->src, req->nbytes);
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	if (src_nents < 0) {
 		dev_err(jrdev, "Invalid number of src SG.\n");
 		return src_nents;
 	}
-	dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
-	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
-
-	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
-			GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
-		return -ENOMEM;
-	}
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-			  DESC_JOB_IO_LEN;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->src_nents = src_nents;
-
-	sh_len = desc_len(sh_desc);
-	desc = edesc->hw_desc;
-	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	if (src_nents) {
-		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
-		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-			dev_err(jrdev, "unable to map S/G table\n");
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to map source for DMA\n");
 			return -ENOMEM;
 		}
-		src_dma = edesc->sec4_sg_dma;
-		options = LDST_SGF;
 	} else {
-		src_dma = sg_dma_address(req->src);
-		options = 0;
+		mapped_nents = 0;
 	}
-	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	edesc->src_nents = src_nents;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+				  req->nbytes);
+	if (ret) {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+		return ret;
+	}
+
+	desc = edesc->hw_desc;
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
 		dev_err(jrdev, "unable to map dst\n");
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
 		return -ENOMEM;
 	}
 
@@ -1168,29 +1215,23 @@
 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
-	u32 *sh_desc = ctx->sh_desc_digest, *desc;
-	dma_addr_t ptr = ctx->sh_desc_digest_dma;
+	u32 *desc;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
-	int ret = 0;
-	int sh_len;
+	int ret;
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
+	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
+				  ctx->sh_desc_digest_dma, flags);
+	if (!edesc)
 		return -ENOMEM;
-	}
 
-	edesc->sec4_sg_bytes = 0;
-	sh_len = desc_len(sh_desc);
 	desc = edesc->hw_desc;
-	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, state->buf_dma)) {
 		dev_err(jrdev, "unable to map src\n");
-		return -ENOMEM;
+		goto unmap;
 	}
 
 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
@@ -1199,7 +1240,7 @@
 						digestsize);
 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
 		dev_err(jrdev, "unable to map dst\n");
-		return -ENOMEM;
+		goto unmap;
 	}
 	edesc->src_nents = 0;
 
@@ -1217,6 +1258,11 @@
 	}
 
 	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
 }
 
 /* submit ahash update if it the first job descriptor after update */
@@ -1234,48 +1280,58 @@
 	int *next_buflen = state->current_buf ? &state->buflen_0 :
 			   &state->buflen_1;
 	int in_len = *buflen + req->nbytes, to_hash;
-	int sec4_sg_bytes, src_nents;
+	int sec4_sg_bytes, src_nents, mapped_nents;
 	struct ahash_edesc *edesc;
-	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
-	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+	u32 *desc;
 	int ret = 0;
-	int sh_len;
 
 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
 	to_hash = in_len - *next_buflen;
 
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
-					     req->nbytes - (*next_buflen));
+					     req->nbytes - *next_buflen);
 		if (src_nents < 0) {
 			dev_err(jrdev, "Invalid number of src SG.\n");
 			return src_nents;
 		}
-		sec4_sg_bytes = (1 + src_nents) *
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		sec4_sg_bytes = (1 + mapped_nents) *
 				sizeof(struct sec4_sg_entry);
 
 		/*
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
-				sec4_sg_bytes, GFP_DMA | flags);
+		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
 		if (!edesc) {
-			dev_err(jrdev,
-				"could not allocate extended descriptor\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
 			return -ENOMEM;
 		}
 
 		edesc->src_nents = src_nents;
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
-		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-				 DESC_JOB_IO_LEN;
 		edesc->dst_dma = 0;
 
 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
 						    buf, *buflen);
-		src_map_to_sec4_sg(jrdev, req->src, src_nents,
-				   edesc->sec4_sg + 1);
+		sg_to_sec4_sg_last(req->src, mapped_nents,
+				   edesc->sec4_sg + 1, 0);
+
 		if (*next_buflen) {
 			scatterwalk_map_and_copy(next_buf, req->src,
 						 to_hash - *buflen,
@@ -1284,24 +1340,22 @@
 
 		state->current_buf = !state->current_buf;
 
-		sh_len = desc_len(sh_desc);
 		desc = edesc->hw_desc;
-		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
-				     HDR_REVERSE);
 
 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
 						    sec4_sg_bytes,
 						    DMA_TO_DEVICE);
 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 			dev_err(jrdev, "unable to map S/G table\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto unmap_ctx;
 		}
 
 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
 
 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
 		if (ret)
-			return ret;
+			goto unmap_ctx;
 
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1310,16 +1364,13 @@
 #endif
 
 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
-		if (!ret) {
-			ret = -EINPROGRESS;
-			state->update = ahash_update_ctx;
-			state->finup = ahash_finup_ctx;
-			state->final = ahash_final_ctx;
-		} else {
-			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
-					DMA_TO_DEVICE);
-			kfree(edesc);
-		}
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
 	} else if (*next_buflen) {
 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
 					 req->nbytes, 0);
@@ -1335,6 +1386,10 @@
 #endif
 
 	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
 }
 
 /* submit ahash finup if it the first job descriptor after update */
@@ -1350,61 +1405,63 @@
 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
 	int last_buflen = state->current_buf ? state->buflen_0 :
 			  state->buflen_1;
-	u32 *sh_desc = ctx->sh_desc_digest, *desc;
-	dma_addr_t ptr = ctx->sh_desc_digest_dma;
-	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+	u32 *desc;
+	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
-	int sh_len;
-	int ret = 0;
+	int ret;
 
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	if (src_nents < 0) {
 		dev_err(jrdev, "Invalid number of src SG.\n");
 		return src_nents;
 	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
 	sec4_sg_src_index = 2;
-	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
 			 sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
-			GFP_DMA | flags);
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
 	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
 		return -ENOMEM;
 	}
 
-	sh_len = desc_len(sh_desc);
 	desc = edesc->hw_desc;
-	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	edesc->src_nents = src_nents;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-			 DESC_JOB_IO_LEN;
 
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
 						state->buf_dma, buflen,
 						last_buflen);
 
-	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
-
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+				  req->nbytes);
+	if (ret) {
 		dev_err(jrdev, "unable to map S/G table\n");
-		return -ENOMEM;
+		goto unmap;
 	}
 
-	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
-			       req->nbytes, LDST_SGF);
-
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
 		dev_err(jrdev, "unable to map dst\n");
-		return -ENOMEM;
+		goto unmap;
 	}
 
 #ifdef DEBUG
@@ -1421,6 +1478,11 @@
 	}
 
 	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
 }
 
 /* submit first update job descriptor after init */
@@ -1436,78 +1498,65 @@
 	int *next_buflen = state->current_buf ?
 		&state->buflen_1 : &state->buflen_0;
 	int to_hash;
-	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
-	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
-	int sec4_sg_bytes, src_nents;
-	dma_addr_t src_dma;
-	u32 options;
+	u32 *desc;
+	int src_nents, mapped_nents;
 	struct ahash_edesc *edesc;
 	int ret = 0;
-	int sh_len;
 
 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
 				      1);
 	to_hash = req->nbytes - *next_buflen;
 
 	if (to_hash) {
-		src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - *next_buflen);
 		if (src_nents < 0) {
 			dev_err(jrdev, "Invalid number of src SG.\n");
 			return src_nents;
 		}
-		dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
-		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to map source for DMA\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
 
 		/*
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
-				sec4_sg_bytes, GFP_DMA | flags);
+		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+					  mapped_nents : 0,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
 		if (!edesc) {
-			dev_err(jrdev,
-				"could not allocate extended descriptor\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
 			return -ENOMEM;
 		}
 
 		edesc->src_nents = src_nents;
-		edesc->sec4_sg_bytes = sec4_sg_bytes;
-		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
-				 DESC_JOB_IO_LEN;
 		edesc->dst_dma = 0;
 
-		if (src_nents) {
-			sg_to_sec4_sg_last(req->src, src_nents,
-					   edesc->sec4_sg, 0);
-			edesc->sec4_sg_dma = dma_map_single(jrdev,
-							    edesc->sec4_sg,
-							    sec4_sg_bytes,
-							    DMA_TO_DEVICE);
-			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-				dev_err(jrdev, "unable to map S/G table\n");
-				return -ENOMEM;
-			}
-			src_dma = edesc->sec4_sg_dma;
-			options = LDST_SGF;
-		} else {
-			src_dma = sg_dma_address(req->src);
-			options = 0;
-		}
+		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+					  to_hash);
+		if (ret)
+			goto unmap_ctx;
 
 		if (*next_buflen)
 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
 						 *next_buflen, 0);
 
-		sh_len = desc_len(sh_desc);
 		desc = edesc->hw_desc;
-		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
-				     HDR_REVERSE);
-
-		append_seq_in_ptr(desc, src_dma, to_hash, options);
 
 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
 		if (ret)
-			return ret;
+			goto unmap_ctx;
 
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1515,18 +1564,14 @@
 			       desc_bytes(desc), 1);
 #endif
 
-		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
-				      req);
-		if (!ret) {
-			ret = -EINPROGRESS;
-			state->update = ahash_update_ctx;
-			state->finup = ahash_finup_ctx;
-			state->final = ahash_final_ctx;
-		} else {
-			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
-					DMA_TO_DEVICE);
-			kfree(edesc);
-		}
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
 	} else if (*next_buflen) {
 		state->update = ahash_update_no_ctx;
 		state->finup = ahash_finup_no_ctx;
@@ -1541,6 +1586,10 @@
 #endif
 
 	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
 }
 
 static int ahash_finup_first(struct ahash_request *req)
@@ -1799,7 +1848,6 @@
 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
 					 HASH_MSG_LEN + 64,
 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
-	int ret = 0;
 
 	/*
 	 * Get a Job ring from Job Ring driver to ensure in-order
@@ -1819,10 +1867,7 @@
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct caam_hash_state));
-
-	ret = ahash_set_sh_desc(ahash);
-
-	return ret;
+	return ahash_set_sh_desc(ahash);
 }
 
 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 0ec112e..72ff196 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -14,6 +14,7 @@
 #include "jr.h"
 #include "desc_constr.h"
 #include "error.h"
+#include "ctrl.h"
 
 bool caam_little_end;
 EXPORT_SYMBOL(caam_little_end);
@@ -826,6 +827,8 @@
 
 caam_remove:
 	caam_remove(pdev);
+	return ret;
+
 iounmap_ctrl:
 	iounmap(ctrl);
 disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 26427c1..513b664 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -23,13 +23,7 @@
 #define SEC4_SG_OFFSET_MASK	0x00001fff
 
 struct sec4_sg_entry {
-#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
-	defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
-	u32 rsvd1;
-	dma_addr_t ptr;
-#else
 	u64 ptr;
-#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
 	u32 len;
 	u32 bpid_offset;
 };
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d3869b9..a8cd8a7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -325,6 +325,23 @@
 APPEND_CMD_RAW_IMM(load, LOAD, u32);
 
 /*
+ * ee - endianness
+ * size - size of immediate type in bytes
+ */
+#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
+static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
+						   u##size immediate, \
+						   u32 options) \
+{ \
+	__##ee##size data = cpu_to_##ee##size(immediate); \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
+	append_data(desc, &data, sizeof(data)); \
+}
+
+APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
+
+/*
  * Append math command. Only the last part of destination and source need to
  * be specified
  */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc..5d4c050 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,7 +41,6 @@
 	struct device		*dev;
 	int ridx;
 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
-	struct tasklet_struct irqtask;
 	int irq;			/* One per queue */
 
 	/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index a81f551..757c27f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,8 +73,6 @@
 
 	ret = caam_reset_hw_jr(dev);
 
-	tasklet_kill(&jrp->irqtask);
-
 	/* Release interrupt */
 	free_irq(jrp->irq, dev);
 
@@ -130,7 +128,7 @@
 
 	/*
 	 * Check the output ring for ready responses, kick
-	 * tasklet if jobs done.
+	 * the threaded irq if jobs done.
 	 */
 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
 	if (!irqstate)
@@ -152,18 +150,13 @@
 	/* Have valid interrupt at this point, just ACK and trigger */
 	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
 
-	preempt_disable();
-	tasklet_schedule(&jrp->irqtask);
-	preempt_enable();
-
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void caam_jr_dequeue(unsigned long devarg)
+static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
 {
 	int hw_idx, sw_idx, i, head, tail;
-	struct device *dev = (struct device *)devarg;
+	struct device *dev = st_dev;
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
@@ -237,6 +230,8 @@
 
 	/* reenable / unmask IRQs */
 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+
+	return IRQ_HANDLED;
 }
 
 /**
@@ -394,11 +389,10 @@
 
 	jrp = dev_get_drvdata(dev);
 
-	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-
 	/* Connect job ring interrupt handler. */
-	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
-			    dev_name(dev), dev);
+	error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
+				     caam_jr_threadirq, IRQF_SHARED,
+				     dev_name(dev), dev);
 	if (error) {
 		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
 			jrp->ridx, jrp->irq);
@@ -460,7 +454,6 @@
 out_free_irq:
 	free_irq(jrp->irq, dev);
 out_kill_deq:
-	tasklet_kill(&jrp->irqtask);
 	return error;
 }
 
@@ -513,6 +506,7 @@
 	error = caam_jr_init(jrdev); /* now turn on hardware */
 	if (error) {
 		irq_dispose_mapping(jrpriv->irq);
+		iounmap(ctrl);
 		return error;
 	}
 
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index b3c5016..84d2f83 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -196,6 +196,14 @@
 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
 #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT  */
 
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+#define cpu_to_caam_dma64(value) \
+		(((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+		 (u64)cpu_to_caam32(upper_32_bits(value)))
+#else
+#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
+#endif
+
 /*
  * jr_outentry
  * Represents each entry in a JobR output ring
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 19dc64f..41cd5a3 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,7 @@
 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
 				      dma_addr_t dma, u32 len, u16 offset)
 {
-	sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
+	sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
 	sec4_sg_ptr->len = cpu_to_caam32(len);
 	sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
 #ifdef DEBUG
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index ee4d274..346ceb8 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -2,6 +2,7 @@
 ccp-objs := ccp-dev.o \
 	    ccp-ops.o \
 	    ccp-dev-v3.o \
+	    ccp-dev-v5.o \
 	    ccp-platform.o \
 	    ccp-dmaengine.o
 ccp-$(CONFIG_PCI) += ccp-pci.o
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8f36af6..84a652b 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -134,7 +135,22 @@
 	rctx->cmd.engine = CCP_ENGINE_SHA;
 	rctx->cmd.u.sha.type = rctx->type;
 	rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
-	rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
+
+	switch (rctx->type) {
+	case CCP_SHA_TYPE_1:
+		rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_224:
+		rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_256:
+		rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		/* Should never get here */
+		break;
+	}
+
 	rctx->cmd.u.sha.src = sg;
 	rctx->cmd.u.sha.src_len = rctx->hash_cnt;
 	rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index d7a7103..8d2dbac 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +20,61 @@
 
 #include "ccp-dev.h"
 
+static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+	int start;
+	struct ccp_device *ccp = cmd_q->ccp;
+
+	for (;;) {
+		mutex_lock(&ccp->sb_mutex);
+
+		start = (u32)bitmap_find_next_zero_area(ccp->sb,
+							ccp->sb_count,
+							ccp->sb_start,
+							count, 0);
+		if (start <= ccp->sb_count) {
+			bitmap_set(ccp->sb, start, count);
+
+			mutex_unlock(&ccp->sb_mutex);
+			break;
+		}
+
+		ccp->sb_avail = 0;
+
+		mutex_unlock(&ccp->sb_mutex);
+
+		/* Wait for KSB entries to become available */
+		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+			return 0;
+	}
+
+	return KSB_START + start;
+}
+
+static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
+			 unsigned int count)
+{
+	struct ccp_device *ccp = cmd_q->ccp;
+
+	if (!start)
+		return;
+
+	mutex_lock(&ccp->sb_mutex);
+
+	bitmap_clear(ccp->sb, start - KSB_START, count);
+
+	ccp->sb_avail = 1;
+
+	mutex_unlock(&ccp->sb_mutex);
+
+	wake_up_interruptible_all(&ccp->sb_queue);
+}
+
+static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+	return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+}
+
 static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
 {
 	struct ccp_cmd_queue *cmd_q = op->cmd_q;
@@ -68,6 +124,9 @@
 			/* On error delete all related jobs from the queue */
 			cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
 			      | op->jobid;
+			if (cmd_q->cmd_error)
+				ccp_log_error(cmd_q->ccp,
+					      cmd_q->cmd_error);
 
 			iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
 
@@ -99,10 +158,10 @@
 		| (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
 		| (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
 		| (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
-		| (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT);
 	cr[1] = op->src.u.dma.length - 1;
 	cr[2] = ccp_addr_lo(&op->src.u.dma);
-	cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 		| ccp_addr_hi(&op->src.u.dma);
 	cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -129,10 +188,10 @@
 	cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
 		| (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
 		| (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
-		| (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT);
 	cr[1] = op->src.u.dma.length - 1;
 	cr[2] = ccp_addr_lo(&op->src.u.dma);
-	cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 		| ccp_addr_hi(&op->src.u.dma);
 	cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -158,7 +217,7 @@
 		| REQ1_INIT;
 	cr[1] = op->src.u.dma.length - 1;
 	cr[2] = ccp_addr_lo(&op->src.u.dma);
-	cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 		| ccp_addr_hi(&op->src.u.dma);
 
@@ -181,11 +240,11 @@
 	/* Fill out the register contents for REQ1 through REQ6 */
 	cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
 		| (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
-		| (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT)
 		| REQ1_EOM;
 	cr[1] = op->u.rsa.input_len - 1;
 	cr[2] = ccp_addr_lo(&op->src.u.dma);
-	cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 		| ccp_addr_hi(&op->src.u.dma);
 	cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -215,10 +274,10 @@
 			| ccp_addr_hi(&op->src.u.dma);
 
 		if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
-			cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+			cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
 	} else {
-		cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
-		cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+		cr[2] = op->src.u.sb * CCP_SB_BYTES;
+		cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
 	}
 
 	if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
@@ -226,8 +285,8 @@
 		cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 			| ccp_addr_hi(&op->dst.u.dma);
 	} else {
-		cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
-		cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+		cr[4] = op->dst.u.sb * CCP_SB_BYTES;
+		cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
 	}
 
 	if (op->eom)
@@ -256,35 +315,6 @@
 	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 }
 
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
-	struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
-	u32 trng_value;
-	int len = min_t(int, sizeof(trng_value), max);
-
-	/*
-	 * Locking is provided by the caller so we can update device
-	 * hwrng-related fields safely
-	 */
-	trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
-	if (!trng_value) {
-		/* Zero is returned if not data is available or if a
-		 * bad-entropy error is present. Assume an error if
-		 * we exceed TRNG_RETRIES reads of zero.
-		 */
-		if (ccp->hwrng_retries++ > TRNG_RETRIES)
-			return -EIO;
-
-		return 0;
-	}
-
-	/* Reset the counter and save the rng value */
-	ccp->hwrng_retries = 0;
-	memcpy(data, &trng_value, len);
-
-	return len;
-}
-
 static int ccp_init(struct ccp_device *ccp)
 {
 	struct device *dev = ccp->dev;
@@ -321,9 +351,9 @@
 		cmd_q->dma_pool = dma_pool;
 
 		/* Reserve 2 KSB regions for the queue */
-		cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
-		cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
-		ccp->ksb_count -= 2;
+		cmd_q->sb_key = KSB_START + ccp->sb_start++;
+		cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
+		ccp->sb_count -= 2;
 
 		/* Preset some register values and masks that are queue
 		 * number dependent
@@ -335,7 +365,7 @@
 		cmd_q->int_ok = 1 << (i * 2);
 		cmd_q->int_err = 1 << ((i * 2) + 1);
 
-		cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+		cmd_q->free_slots = ccp_get_free_slots(cmd_q);
 
 		init_waitqueue_head(&cmd_q->int_queue);
 
@@ -375,9 +405,10 @@
 	}
 
 	/* Initialize the queues used to wait for KSB space and suspend */
-	init_waitqueue_head(&ccp->ksb_queue);
+	init_waitqueue_head(&ccp->sb_queue);
 	init_waitqueue_head(&ccp->suspend_queue);
 
+	dev_dbg(dev, "Starting threads...\n");
 	/* Create a kthread for each queue */
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		struct task_struct *kthread;
@@ -397,29 +428,26 @@
 		wake_up_process(kthread);
 	}
 
-	/* Register the RNG */
-	ccp->hwrng.name = ccp->rngname;
-	ccp->hwrng.read = ccp_trng_read;
-	ret = hwrng_register(&ccp->hwrng);
-	if (ret) {
-		dev_err(dev, "error registering hwrng (%d)\n", ret);
+	dev_dbg(dev, "Enabling interrupts...\n");
+	/* Enable interrupts */
+	iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+	dev_dbg(dev, "Registering device...\n");
+	ccp_add_device(ccp);
+
+	ret = ccp_register_rng(ccp);
+	if (ret)
 		goto e_kthread;
-	}
 
 	/* Register the DMA engine support */
 	ret = ccp_dmaengine_register(ccp);
 	if (ret)
 		goto e_hwrng;
 
-	ccp_add_device(ccp);
-
-	/* Enable interrupts */
-	iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
 	return 0;
 
 e_hwrng:
-	hwrng_unregister(&ccp->hwrng);
+	ccp_unregister_rng(ccp);
 
 e_kthread:
 	for (i = 0; i < ccp->cmd_q_count; i++)
@@ -441,19 +469,14 @@
 	struct ccp_cmd *cmd;
 	unsigned int qim, i;
 
-	/* Remove this device from the list of available units first */
-	ccp_del_device(ccp);
-
 	/* Unregister the DMA engine */
 	ccp_dmaengine_unregister(ccp);
 
 	/* Unregister the RNG */
-	hwrng_unregister(&ccp->hwrng);
+	ccp_unregister_rng(ccp);
 
-	/* Stop the queue kthreads */
-	for (i = 0; i < ccp->cmd_q_count; i++)
-		if (ccp->cmd_q[i].kthread)
-			kthread_stop(ccp->cmd_q[i].kthread);
+	/* Remove this device from the list of available units */
+	ccp_del_device(ccp);
 
 	/* Build queue interrupt mask (two interrupt masks per queue) */
 	qim = 0;
@@ -472,6 +495,11 @@
 	}
 	iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
 
+	/* Stop the queue kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
 	ccp->free_irq(ccp);
 
 	for (i = 0; i < ccp->cmd_q_count; i++)
@@ -527,18 +555,24 @@
 }
 
 static const struct ccp_actions ccp3_actions = {
-	.perform_aes = ccp_perform_aes,
-	.perform_xts_aes = ccp_perform_xts_aes,
-	.perform_sha = ccp_perform_sha,
-	.perform_rsa = ccp_perform_rsa,
-	.perform_passthru = ccp_perform_passthru,
-	.perform_ecc = ccp_perform_ecc,
+	.aes = ccp_perform_aes,
+	.xts_aes = ccp_perform_xts_aes,
+	.sha = ccp_perform_sha,
+	.rsa = ccp_perform_rsa,
+	.passthru = ccp_perform_passthru,
+	.ecc = ccp_perform_ecc,
+	.sballoc = ccp_alloc_ksb,
+	.sbfree = ccp_free_ksb,
 	.init = ccp_init,
 	.destroy = ccp_destroy,
+	.get_free_slots = ccp_get_free_slots,
 	.irqhandler = ccp_irq_handler,
 };
 
-struct ccp_vdata ccpv3 = {
+const struct ccp_vdata ccpv3 = {
 	.version = CCP_VERSION(3, 0),
+	.setup = NULL,
 	.perform = &ccp3_actions,
+	.bar = 2,
+	.offset = 0x20000,
 };
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
new file mode 100644
index 0000000..faf3cb3
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -0,0 +1,1017 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
+							LSB_SIZE,
+							0, count, 0);
+		if (start < LSB_SIZE) {
+			bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* No joy; try to get an entry from the shared blocks */
+	ccp = cmd_q->ccp;
+	for (;;) {
+		mutex_lock(&ccp->sb_mutex);
+
+		start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
+							MAX_LSB_CNT * LSB_SIZE,
+							0,
+							count, 0);
+		if (start <= MAX_LSB_CNT * LSB_SIZE) {
+			bitmap_set(ccp->lsbmap, start, count);
+
+			mutex_unlock(&ccp->sb_mutex);
+			return start * LSB_ITEM_SIZE;
+		}
+
+		ccp->sb_avail = 0;
+
+		mutex_unlock(&ccp->sb_mutex);
+
+		/* Wait for KSB entries to become available */
+		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+			return 0;
+	}
+}
+
+static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
+			 unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->ccp;
+
+		mutex_lock(&ccp->sb_mutex);
+		bitmap_clear(ccp->lsbmap, start, count);
+		ccp->sb_avail = 1;
+		mutex_unlock(&ccp->sb_mutex);
+		wake_up_interruptible_all(&ccp->sb_queue);
+	}
+}
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 mode:5;
+		u16 type:2;
+	} aes;
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 rsvd:5;
+		u16 type:2;
+	} aes_xts;
+	struct {
+		u16 rsvd1:10;
+		u16 type:4;
+		u16 rsvd2:1;
+	} sha;
+	struct {
+		u16 mode:3;
+		u16 size:12;
+	} rsa;
+	struct {
+		u16 byteswap:2;
+		u16 bitwise:3;
+		u16 reflect:2;
+		u16 rsvd:8;
+	} pt;
+	struct  {
+		u16 rsvd:13;
+	} zlib;
+	struct {
+		u16 size:10;
+		u16 type:2;
+		u16 mode:3;
+	} ecc;
+	u16 raw;
+};
+
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
+#define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
+#define	CCP_RSA_SIZE(p)		((p)->rsa.size)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+#define	CCP_ECC_MODE(p)		((p)->ecc.mode)
+#define	CCP_ECC_AFFINE(p)	((p)->ecc.one)
+
+/* Word 0 */
+#define CCP5_CMD_DW0(p)		((p)->dw0)
+#define CCP5_CMD_SOC(p)		(CCP5_CMD_DW0(p).soc)
+#define CCP5_CMD_IOC(p)		(CCP5_CMD_DW0(p).ioc)
+#define CCP5_CMD_INIT(p)	(CCP5_CMD_DW0(p).init)
+#define CCP5_CMD_EOM(p)		(CCP5_CMD_DW0(p).eom)
+#define CCP5_CMD_FUNCTION(p)	(CCP5_CMD_DW0(p).function)
+#define CCP5_CMD_ENGINE(p)	(CCP5_CMD_DW0(p).engine)
+#define CCP5_CMD_PROT(p)	(CCP5_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP5_CMD_DW1(p)		((p)->length)
+#define CCP5_CMD_LEN(p)		(CCP5_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP5_CMD_DW2(p)		((p)->src_lo)
+#define CCP5_CMD_SRC_LO(p)	(CCP5_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP5_CMD_DW3(p)		((p)->dw3)
+#define CCP5_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP5_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP5_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP5_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP5_CMD_DW4(p)		((p)->dw4)
+#define CCP5_CMD_DST_LO(p)	(CCP5_CMD_DW4(p).dst_lo)
+#define CCP5_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP5_CMD_DST_HI(p)	(CCP5_CMD_DW5(p))
+#define CCP5_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP5_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP5_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP5_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP5_CMD_DW6(p)		((p)->key_lo)
+#define CCP5_CMD_KEY_LO(p)	(CCP5_CMD_DW6(p))
+#define CCP5_CMD_DW7(p)		((p)->dw7)
+#define CCP5_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP5_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
+
+static inline u32 low_address(unsigned long addr)
+{
+	return (u64)addr & 0x0ffffffff;
+}
+
+static inline u32 high_address(unsigned long addr)
+{
+	return ((u64)addr >> 32) & 0x00000ffff;
+}
+
+static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+	unsigned int head_idx, n;
+	u32 head_lo, queue_start;
+
+	queue_start = low_address(cmd_q->qdma_tail);
+	head_lo = ioread32(cmd_q->reg_head_lo);
+	head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
+
+	n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
+
+	return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
+}
+
+static int ccp5_do_cmd(struct ccp5_desc *desc,
+		       struct ccp_cmd_queue *cmd_q)
+{
+	u32 *mP;
+	__le32 *dP;
+	u32 tail;
+	int	i;
+	int ret = 0;
+
+	if (CCP5_CMD_SOC(desc)) {
+		CCP5_CMD_IOC(desc) = 1;
+		CCP5_CMD_SOC(desc) = 0;
+	}
+	mutex_lock(&cmd_q->q_mutex);
+
+	mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
+	dP = (__le32 *) desc;
+	for (i = 0; i < 8; i++)
+		mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	/* The data used by this command must be flushed to memory */
+	wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+	iowrite32(tail, cmd_q->reg_tail_lo);
+
+	/* Turn the queue back on using our cached control register */
+	iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
+	mutex_unlock(&cmd_q->q_mutex);
+
+	if (CCP5_CMD_IOC(desc)) {
+		/* Wait for the job to complete */
+		ret = wait_event_interruptible(cmd_q->int_queue,
+					       cmd_q->int_rcvd);
+		if (ret || cmd_q->cmd_error) {
+			if (cmd_q->cmd_error)
+				ccp_log_error(cmd_q->ccp,
+					      cmd_q->cmd_error);
+			/* A version 5 device doesn't use Job IDs... */
+			if (!ret)
+				ret = -EIO;
+		}
+		cmd_q->int_rcvd = 0;
+	}
+
+	return 0;
+}
+
+static int ccp5_perform_aes(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = op->u.aes.action;
+	CCP_AES_MODE(&function) = op->u.aes.mode;
+	CCP_AES_TYPE(&function) = op->u.aes.type;
+	if (op->u.aes.mode == CCP_AES_MODE_CFB)
+		CCP_AES_SIZE(&function) = 0x7f;
+
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+	CCP5_CMD_KEY_HI(&desc) = 0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_xts_aes(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+	CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+	CCP5_CMD_KEY_HI(&desc) =  0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_sha(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 1;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = op->u.sha.type;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	if (op->eom) {
+		CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
+		CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
+	} else {
+		CCP5_CMD_SHA_LO(&desc) = 0;
+		CCP5_CMD_SHA_HI(&desc) = 0;
+	}
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_rsa(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = 1;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
+
+	/* Source is from external memory */
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	/* Destination is in external memory */
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	/* Key (Exponent) is in external memory */
+	CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+	CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_passthru(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	struct ccp_dma_info *saddr = &op->src.u.dma;
+	struct ccp_dma_info *daddr = &op->dst.u.dma;
+
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP5_CMD_SOC(&desc) = 0;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
+	CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	/* Length of source data is always 256 bytes */
+	if (op->src.type == CCP_MEMTYPE_SYSTEM)
+		CCP5_CMD_LEN(&desc) = saddr->length;
+	else
+		CCP5_CMD_LEN(&desc) = daddr->length;
+
+	if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+		CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+		CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+		CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+		if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP5_CMD_LSB_ID(&desc) = op->sb_key;
+	} else {
+		u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
+
+		CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
+		CCP5_CMD_SRC_HI(&desc) = 0;
+		CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
+	}
+
+	if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+		CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+		CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+		CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+	} else {
+		u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
+
+		CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
+		CCP5_CMD_DST_HI(&desc) = 0;
+		CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
+	}
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_ecc(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
+
+	CCP5_CMD_SOC(&desc) = 0;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = 1;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	function.ecc.mode = op->u.ecc.function;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int queues = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs this queue has access to.
+	 * Don't bother with segment 0 as it has special privileges.
+	 */
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			bitmap_set(cmd_q->lsbmask, j, 1);
+		status >>= LSB_REGION_WIDTH;
+	}
+	queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+	dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
+		 cmd_q->id, queues);
+
+	return queues ? 0 : -EINVAL;
+}
+
+
+static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+					int lsb_cnt, int n_lsbs,
+					unsigned long *lsb_pub)
+{
+	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+	int bitno;
+	int qlsb_wgt;
+	int i;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+
+		if (qlsb_wgt == lsb_cnt) {
+			bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
+
+			bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+			while (bitno < MAX_LSB_CNT) {
+				if (test_bit(bitno, lsb_pub)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					bitmap_clear(lsb_pub, bitno, 1);
+					dev_info(ccp->dev,
+						 "Queue %d gets LSB %d\n",
+						 i, bitno);
+					break;
+				}
+				bitmap_clear(qlsb, bitno, 1);
+				bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared resources.
+ */
+static int ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
+	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	bitmap_zero(lsb_pub, MAX_LSB_CNT);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		bitmap_or(lsb_pub,
+			  lsb_pub, ccp->cmd_q[i].lsbmask,
+			  MAX_LSB_CNT);
+
+	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1;
+		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
+
+	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		bitmap_set(qlsb, bitno, 1);
+		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int ccp5_init(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct ccp_cmd_queue *cmd_q;
+	struct dma_pool *dma_pool;
+	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+	unsigned int qmr, qim, i;
+	u64 status;
+	u32 status_lo, status_hi;
+	int ret;
+
+	/* Find available queues */
+	qim = 0;
+	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+
+		if (!(qmr & (1 << i)))
+			continue;
+
+		/* Allocate a dma pool for this queue */
+		snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+			 ccp->name, i);
+		dma_pool = dma_pool_create(dma_pool_name, dev,
+					   CCP_DMAPOOL_MAX_SIZE,
+					   CCP_DMAPOOL_ALIGN, 0);
+		if (!dma_pool) {
+			dev_err(dev, "unable to allocate dma pool\n");
+			ret = -ENOMEM;
+		}
+
+		cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+		ccp->cmd_q_count++;
+
+		cmd_q->ccp = ccp;
+		cmd_q->id = i;
+		cmd_q->dma_pool = dma_pool;
+		mutex_init(&cmd_q->q_mutex);
+
+		/* Page alignment satisfies our needs for N <= 128 */
+		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
+						   &cmd_q->qbase_dma,
+						   GFP_KERNEL);
+		if (!cmd_q->qbase) {
+			dev_err(dev, "unable to allocate command queue\n");
+			ret = -ENOMEM;
+			goto e_pool;
+		}
+
+		cmd_q->qidx = 0;
+		/* Preset some register values and masks that are queue
+		 * number dependent
+		 */
+		cmd_q->reg_control = ccp->io_regs +
+				     CMD5_Q_STATUS_INCR * (i + 1);
+		cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
+		cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
+		cmd_q->reg_int_enable = cmd_q->reg_control +
+					CMD5_Q_INT_ENABLE_BASE;
+		cmd_q->reg_interrupt_status = cmd_q->reg_control +
+					      CMD5_Q_INTERRUPT_STATUS_BASE;
+		cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
+		cmd_q->reg_int_status = cmd_q->reg_control +
+					CMD5_Q_INT_STATUS_BASE;
+		cmd_q->reg_dma_status = cmd_q->reg_control +
+					CMD5_Q_DMA_STATUS_BASE;
+		cmd_q->reg_dma_read_status = cmd_q->reg_control +
+					     CMD5_Q_DMA_READ_STATUS_BASE;
+		cmd_q->reg_dma_write_status = cmd_q->reg_control +
+					      CMD5_Q_DMA_WRITE_STATUS_BASE;
+
+		init_waitqueue_head(&cmd_q->int_queue);
+
+		dev_dbg(dev, "queue #%u available\n", i);
+	}
+	if (ccp->cmd_q_count == 0) {
+		dev_notice(dev, "no command queues available\n");
+		ret = -EIO;
+		goto e_pool;
+	}
+	dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+	/* Turn off the queues and disable interrupts until ready */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		cmd_q->qcontrol = 0; /* Start with nothing */
+		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+		/* Disable the interrupts */
+		iowrite32(0x00, cmd_q->reg_int_enable);
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+
+		/* Clear the interrupts */
+		iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+	}
+
+	dev_dbg(dev, "Requesting an IRQ...\n");
+	/* Request an irq */
+	ret = ccp->get_irq(ccp);
+	if (ret) {
+		dev_err(dev, "unable to allocate an IRQ\n");
+		goto e_pool;
+	}
+
+	/* Initialize the queue used to suspend */
+	init_waitqueue_head(&ccp->suspend_queue);
+
+	dev_dbg(dev, "Loading LSB map...\n");
+	/* Copy the private LSB mask to the public registers */
+	status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+	iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
+	iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
+	status = ((u64)status_hi<<30) | (u64)status_lo;
+
+	dev_dbg(dev, "Configuring virtual queues...\n");
+	/* Configure size of each virtual queue accessible to host */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		u32 dma_addr_lo;
+		u32 dma_addr_hi;
+
+		cmd_q = &ccp->cmd_q[i];
+
+		cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
+
+		cmd_q->qdma_tail = cmd_q->qbase_dma;
+		dma_addr_lo = low_address(cmd_q->qdma_tail);
+		iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
+		iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
+
+		dma_addr_hi = high_address(cmd_q->qdma_tail);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+		/* Find the LSB regions accessible to the queue */
+		ccp_find_lsb_regions(cmd_q, status);
+		cmd_q->lsb = -1; /* Unassigned value */
+	}
+
+	dev_dbg(dev, "Assigning LSBs...\n");
+	ret = ccp_assign_lsbs(ccp);
+	if (ret) {
+		dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
+		goto e_irq;
+	}
+
+	/* Optimization: pre-allocate LSB slots for each queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+		ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+	}
+
+	dev_dbg(dev, "Starting threads...\n");
+	/* Create a kthread for each queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct task_struct *kthread;
+
+		cmd_q = &ccp->cmd_q[i];
+
+		kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+					 "%s-q%u", ccp->name, cmd_q->id);
+		if (IS_ERR(kthread)) {
+			dev_err(dev, "error creating queue thread (%ld)\n",
+				PTR_ERR(kthread));
+			ret = PTR_ERR(kthread);
+			goto e_kthread;
+		}
+
+		cmd_q->kthread = kthread;
+		wake_up_process(kthread);
+	}
+
+	dev_dbg(dev, "Enabling interrupts...\n");
+	/* Enable interrupts */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+		iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
+	}
+
+	dev_dbg(dev, "Registering device...\n");
+	/* Put this on the unit list to make it available */
+	ccp_add_device(ccp);
+
+	ret = ccp_register_rng(ccp);
+	if (ret)
+		goto e_kthread;
+
+	/* Register the DMA engine support */
+	ret = ccp_dmaengine_register(ccp);
+	if (ret)
+		goto e_hwrng;
+
+	return 0;
+
+e_hwrng:
+	ccp_unregister_rng(ccp);
+
+e_kthread:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+e_irq:
+	ccp->free_irq(ccp);
+
+e_pool:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+	return ret;
+}
+
+static void ccp5_destroy(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct ccp_cmd_queue *cmd_q;
+	struct ccp_cmd *cmd;
+	unsigned int i;
+
+	/* Unregister the DMA engine */
+	ccp_dmaengine_unregister(ccp);
+
+	/* Unregister the RNG */
+	ccp_unregister_rng(ccp);
+
+	/* Remove this device from the list of available units first */
+	ccp_del_device(ccp);
+
+	/* Disable and clear interrupts */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		/* Turn off the run bit */
+		iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+
+		/* Disable the interrupts */
+		iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+
+		/* Clear the interrupt status */
+		iowrite32(0x00, cmd_q->reg_int_enable);
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+	}
+
+	/* Stop the queue kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+	ccp->free_irq(ccp);
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+		dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+				  cmd_q->qbase_dma);
+	}
+
+	/* Flush the cmd and backlog queue */
+	while (!list_empty(&ccp->cmd)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+	while (!list_empty(&ccp->backlog)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+}
+
+static irqreturn_t ccp5_irq_handler(int irq, void *data)
+{
+	struct device *dev = data;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+	u32 status;
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		status = ioread32(cmd_q->reg_interrupt_status);
+
+		if (status) {
+			cmd_q->int_status = status;
+			cmd_q->q_status = ioread32(cmd_q->reg_status);
+			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+			/* On error, only save the first error value */
+			if ((status & INT_ERROR) && !cmd_q->cmd_error)
+				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+			cmd_q->int_rcvd = 1;
+
+			/* Acknowledge the interrupt and wake the kthread */
+			iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+			wake_up_interruptible(&cmd_q->int_queue);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void ccp5_config(struct ccp_device *ccp)
+{
+	/* Public side */
+	iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+}
+
+static void ccp5other_config(struct ccp_device *ccp)
+{
+	int i;
+	u32 rnd;
+
+	/* We own all of the queues on the NTB CCP */
+
+	iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
+	iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
+	for (i = 0; i < 12; i++) {
+		rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
+		iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
+	}
+
+	iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
+	iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
+	iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
+
+	iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+	iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+
+	iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
+
+	ccp5_config(ccp);
+}
+
+/* Version 5 adds some function, but is essentially the same as v5 */
+static const struct ccp_actions ccp5_actions = {
+	.aes = ccp5_perform_aes,
+	.xts_aes = ccp5_perform_xts_aes,
+	.sha = ccp5_perform_sha,
+	.rsa = ccp5_perform_rsa,
+	.passthru = ccp5_perform_passthru,
+	.ecc = ccp5_perform_ecc,
+	.sballoc = ccp_lsb_alloc,
+	.sbfree = ccp_lsb_free,
+	.init = ccp5_init,
+	.destroy = ccp5_destroy,
+	.get_free_slots = ccp5_get_free_slots,
+	.irqhandler = ccp5_irq_handler,
+};
+
+const struct ccp_vdata ccpv5a = {
+	.version = CCP_VERSION(5, 0),
+	.setup = ccp5_config,
+	.perform = &ccp5_actions,
+	.bar = 2,
+	.offset = 0x0,
+};
+
+const struct ccp_vdata ccpv5b = {
+	.version = CCP_VERSION(5, 0),
+	.setup = ccp5other_config,
+	.perform = &ccp5_actions,
+	.bar = 2,
+	.offset = 0x0,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 87b9f2b..cafa633 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -39,6 +40,59 @@
 	struct ccp_cmd *cmd;
 };
 
+/* Human-readable error strings */
+char *ccp_error_codes[] = {
+	"",
+	"ERR 01: ILLEGAL_ENGINE",
+	"ERR 02: ILLEGAL_KEY_ID",
+	"ERR 03: ILLEGAL_FUNCTION_TYPE",
+	"ERR 04: ILLEGAL_FUNCTION_MODE",
+	"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
+	"ERR 06: ILLEGAL_FUNCTION_SIZE",
+	"ERR 07: Zlib_MISSING_INIT_EOM",
+	"ERR 08: ILLEGAL_FUNCTION_RSVD",
+	"ERR 09: ILLEGAL_BUFFER_LENGTH",
+	"ERR 10: VLSB_FAULT",
+	"ERR 11: ILLEGAL_MEM_ADDR",
+	"ERR 12: ILLEGAL_MEM_SEL",
+	"ERR 13: ILLEGAL_CONTEXT_ID",
+	"ERR 14: ILLEGAL_KEY_ADDR",
+	"ERR 15: 0xF Reserved",
+	"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
+	"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
+	"ERR 18: CMD_TIMEOUT",
+	"ERR 19: IDMA0_AXI_SLVERR",
+	"ERR 20: IDMA0_AXI_DECERR",
+	"ERR 21: 0x15 Reserved",
+	"ERR 22: IDMA1_AXI_SLAVE_FAULT",
+	"ERR 23: IDMA1_AIXI_DECERR",
+	"ERR 24: 0x18 Reserved",
+	"ERR 25: ZLIBVHB_AXI_SLVERR",
+	"ERR 26: ZLIBVHB_AXI_DECERR",
+	"ERR 27: 0x1B Reserved",
+	"ERR 27: ZLIB_UNEXPECTED_EOM",
+	"ERR 27: ZLIB_EXTRA_DATA",
+	"ERR 30: ZLIB_BTYPE",
+	"ERR 31: ZLIB_UNDEFINED_SYMBOL",
+	"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
+	"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
+	"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
+	"ERR 35: ZLIB_UNCOMPRESSED_LEN",
+	"ERR 36: ZLIB_LIMIT_REACHED",
+	"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
+	"ERR 38: ODMA0_AXI_SLVERR",
+	"ERR 39: ODMA0_AXI_DECERR",
+	"ERR 40: 0x28 Reserved",
+	"ERR 41: ODMA1_AXI_SLVERR",
+	"ERR 42: ODMA1_AXI_DECERR",
+	"ERR 43: LSB_PARITY_ERR",
+};
+
+void ccp_log_error(struct ccp_device *d, int e)
+{
+	dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+}
+
 /* List of CCPs, CCP count, read-write access lock, and access functions
  *
  * Lock structure: get ccp_unit_lock for reading whenever we need to
@@ -58,7 +112,7 @@
 
 /* Ever-increasing value to produce unique unit numbers */
 static atomic_t ccp_unit_ordinal;
-unsigned int ccp_increment_unit_ordinal(void)
+static unsigned int ccp_increment_unit_ordinal(void)
 {
 	return atomic_inc_return(&ccp_unit_ordinal);
 }
@@ -118,6 +172,29 @@
 	write_unlock_irqrestore(&ccp_unit_lock, flags);
 }
 
+
+
+int ccp_register_rng(struct ccp_device *ccp)
+{
+	int ret = 0;
+
+	dev_dbg(ccp->dev, "Registering RNG...\n");
+	/* Register an RNG */
+	ccp->hwrng.name = ccp->rngname;
+	ccp->hwrng.read = ccp_trng_read;
+	ret = hwrng_register(&ccp->hwrng);
+	if (ret)
+		dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
+
+	return ret;
+}
+
+void ccp_unregister_rng(struct ccp_device *ccp)
+{
+	if (ccp->hwrng.name)
+		hwrng_unregister(&ccp->hwrng);
+}
+
 static struct ccp_device *ccp_get_device(void)
 {
 	unsigned long flags;
@@ -397,9 +474,9 @@
 
 	spin_lock_init(&ccp->cmd_lock);
 	mutex_init(&ccp->req_mutex);
-	mutex_init(&ccp->ksb_mutex);
-	ccp->ksb_count = KSB_COUNT;
-	ccp->ksb_start = 0;
+	mutex_init(&ccp->sb_mutex);
+	ccp->sb_count = KSB_COUNT;
+	ccp->sb_start = 0;
 
 	ccp->ord = ccp_increment_unit_ordinal();
 	snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
@@ -408,6 +485,34 @@
 	return ccp;
 }
 
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+	u32 trng_value;
+	int len = min_t(int, sizeof(trng_value), max);
+
+	/* Locking is provided by the caller so we can update device
+	 * hwrng-related fields safely
+	 */
+	trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+	if (!trng_value) {
+		/* Zero is returned if not data is available or if a
+		 * bad-entropy error is present. Assume an error if
+		 * we exceed TRNG_RETRIES reads of zero.
+		 */
+		if (ccp->hwrng_retries++ > TRNG_RETRIES)
+			return -EIO;
+
+		return 0;
+	}
+
+	/* Reset the counter and save the rng value */
+	ccp->hwrng_retries = 0;
+	memcpy(data, &trng_value, len);
+
+	return len;
+}
+
 #ifdef CONFIG_PM
 bool ccp_queues_suspended(struct ccp_device *ccp)
 {
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index bd41ffce..da5f4a6 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -60,7 +61,69 @@
 #define CMD_Q_ERROR(__qs)		((__qs) & 0x0000003f)
 #define CMD_Q_DEPTH(__qs)		(((__qs) >> 12) & 0x0000000f)
 
-/****** REQ0 Related Values ******/
+/* ------------------------ CCP Version 5 Specifics ------------------------ */
+#define CMD5_QUEUE_MASK_OFFSET		0x00
+#define	CMD5_QUEUE_PRIO_OFFSET		0x04
+#define CMD5_REQID_CONFIG_OFFSET	0x08
+#define	CMD5_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD5_Q_CONTROL_BASE		0x0000
+#define CMD5_Q_TAIL_LO_BASE		0x0004
+#define CMD5_Q_HEAD_LO_BASE		0x0008
+#define CMD5_Q_INT_ENABLE_BASE		0x000C
+#define CMD5_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD5_Q_STATUS_BASE		0x0100
+#define CMD5_Q_INT_STATUS_BASE		0x0104
+#define CMD5_Q_DMA_STATUS_BASE		0x0108
+#define CMD5_Q_DMA_READ_STATUS_BASE	0x010C
+#define CMD5_Q_DMA_WRITE_STATUS_BASE	0x0110
+#define CMD5_Q_ABORT_BASE		0x0114
+#define CMD5_Q_AX_CACHE_BASE		0x0118
+
+#define	CMD5_CONFIG_0_OFFSET		0x6000
+#define	CMD5_TRNG_CTL_OFFSET		0x6008
+#define	CMD5_AES_MASK_OFFSET		0x6010
+#define	CMD5_CLK_GATE_CTL_OFFSET	0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD5_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD5_Q_RUN			0x1
+#define CMD5_Q_HALT			0x2
+#define CMD5_Q_MEM_LOCATION		0x4
+#define CMD5_Q_SIZE			0x1F
+#define CMD5_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		16
+#define QUEUE_SIZE_VAL			((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					  CMD5_Q_SIZE)
+#define Q_PTR_MASK			(2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE			sizeof(struct ccp5_desc)
+#define Q_SIZE(n)			(COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION			0x1
+#define INT_ERROR			0x2
+#define INT_QUEUE_STOPPED		0x4
+#define ALL_INTERRUPTS			(INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH		5
+#define MAX_LSB_CNT			8
+
+#define LSB_SIZE			16
+#define LSB_ITEM_SIZE			32
+#define PLSB_MAP_SIZE			(LSB_SIZE)
+#define SLSB_MAP_SIZE			(MAX_LSB_CNT * LSB_SIZE)
+
+#define LSB_ENTRY_NUMBER(LSB_ADDR)	(LSB_ADDR / LSB_ITEM_SIZE)
+
+/* ------------------------ CCP Version 3 Specifics ------------------------ */
 #define REQ0_WAIT_FOR_WRITE		0x00000004
 #define REQ0_INT_ON_COMPLETE		0x00000002
 #define REQ0_STOP_ON_COMPLETE		0x00000001
@@ -110,29 +173,30 @@
 #define KSB_START			77
 #define KSB_END				127
 #define KSB_COUNT			(KSB_END - KSB_START + 1)
-#define CCP_KSB_BITS			256
-#define CCP_KSB_BYTES			32
+#define CCP_SB_BITS			256
 
 #define CCP_JOBID_MASK			0x0000003f
 
+/* ------------------------ General CCP Defines ------------------------ */
+
 #define CCP_DMAPOOL_MAX_SIZE		64
 #define CCP_DMAPOOL_ALIGN		BIT(5)
 
 #define CCP_REVERSE_BUF_SIZE		64
 
-#define CCP_AES_KEY_KSB_COUNT		1
-#define CCP_AES_CTX_KSB_COUNT		1
+#define CCP_AES_KEY_SB_COUNT		1
+#define CCP_AES_CTX_SB_COUNT		1
 
-#define CCP_XTS_AES_KEY_KSB_COUNT	1
-#define CCP_XTS_AES_CTX_KSB_COUNT	1
+#define CCP_XTS_AES_KEY_SB_COUNT	1
+#define CCP_XTS_AES_CTX_SB_COUNT	1
 
-#define CCP_SHA_KSB_COUNT		1
+#define CCP_SHA_SB_COUNT		1
 
 #define CCP_RSA_MAX_WIDTH		4096
 
 #define CCP_PASSTHRU_BLOCKSIZE		256
 #define CCP_PASSTHRU_MASKSIZE		32
-#define CCP_PASSTHRU_KSB_COUNT		1
+#define CCP_PASSTHRU_SB_COUNT		1
 
 #define CCP_ECC_MODULUS_BYTES		48      /* 384-bits */
 #define CCP_ECC_MAX_OPERANDS		6
@@ -144,31 +208,12 @@
 #define CCP_ECC_RESULT_OFFSET		60
 #define CCP_ECC_RESULT_SUCCESS		0x0001
 
+#define CCP_SB_BYTES			32
+
 struct ccp_op;
-
-/* Structure for computation functions that are device-specific */
-struct ccp_actions {
-	int (*perform_aes)(struct ccp_op *);
-	int (*perform_xts_aes)(struct ccp_op *);
-	int (*perform_sha)(struct ccp_op *);
-	int (*perform_rsa)(struct ccp_op *);
-	int (*perform_passthru)(struct ccp_op *);
-	int (*perform_ecc)(struct ccp_op *);
-	int (*init)(struct ccp_device *);
-	void (*destroy)(struct ccp_device *);
-	irqreturn_t (*irqhandler)(int, void *);
-};
-
-/* Structure to hold CCP version-specific values */
-struct ccp_vdata {
-	unsigned int version;
-	const struct ccp_actions *perform;
-};
-
-extern struct ccp_vdata ccpv3;
-
 struct ccp_device;
 struct ccp_cmd;
+struct ccp_fns;
 
 struct ccp_dma_cmd {
 	struct list_head entry;
@@ -212,9 +257,29 @@
 	/* Queue dma pool */
 	struct dma_pool *dma_pool;
 
-	/* Queue reserved KSB regions */
-	u32 ksb_key;
-	u32 ksb_ctx;
+	/* Queue base address (not neccessarily aligned)*/
+	struct ccp5_desc *qbase;
+
+	/* Aligned queue start address (per requirement) */
+	struct mutex q_mutex ____cacheline_aligned;
+	unsigned int qidx;
+
+	/* Version 5 has different requirements for queue memory */
+	unsigned int qsize;
+	dma_addr_t qbase_dma;
+	dma_addr_t qdma_tail;
+
+	/* Per-queue reserved storage block(s) */
+	u32 sb_key;
+	u32 sb_ctx;
+
+	/* Bitmap of LSBs that can be accessed by this queue */
+	DECLARE_BITMAP(lsbmask, MAX_LSB_CNT);
+	/* Private LSB that is assigned to this queue, or -1 if none.
+	 * Bitmap for my private LSB, unused otherwise
+	 */
+	unsigned int lsb;
+	DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
 
 	/* Queue processing thread */
 	struct task_struct *kthread;
@@ -229,8 +294,17 @@
 	u32 int_err;
 
 	/* Register addresses for queue */
+	void __iomem *reg_control;
+	void __iomem *reg_tail_lo;
+	void __iomem *reg_head_lo;
+	void __iomem *reg_int_enable;
+	void __iomem *reg_interrupt_status;
 	void __iomem *reg_status;
 	void __iomem *reg_int_status;
+	void __iomem *reg_dma_status;
+	void __iomem *reg_dma_read_status;
+	void __iomem *reg_dma_write_status;
+	u32 qcontrol; /* Cached control register */
 
 	/* Status values from job */
 	u32 int_status;
@@ -253,16 +327,14 @@
 
 	struct device *dev;
 
-	/*
-	 * Bus specific device information
+	/* Bus specific device information
 	 */
 	void *dev_specific;
 	int (*get_irq)(struct ccp_device *ccp);
 	void (*free_irq)(struct ccp_device *ccp);
 	unsigned int irq;
 
-	/*
-	 * I/O area used for device communication. The register mapping
+	/* I/O area used for device communication. The register mapping
 	 * starts at an offset into the mapped bar.
 	 *   The CMD_REQx registers and the Delete_Cmd_Queue_Job register
 	 *   need to be protected while a command queue thread is accessing
@@ -272,8 +344,7 @@
 	void __iomem *io_map;
 	void __iomem *io_regs;
 
-	/*
-	 * Master lists that all cmds are queued on. Because there can be
+	/* Master lists that all cmds are queued on. Because there can be
 	 * more than one CCP command queue that can process a cmd a separate
 	 * backlog list is neeeded so that the backlog completion call
 	 * completes before the cmd is available for execution.
@@ -283,47 +354,54 @@
 	struct list_head cmd;
 	struct list_head backlog;
 
-	/*
-	 * The command queues. These represent the queues available on the
+	/* The command queues. These represent the queues available on the
 	 * CCP that are available for processing cmds
 	 */
 	struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
 	unsigned int cmd_q_count;
 
-	/*
-	 * Support for the CCP True RNG
+	/* Support for the CCP True RNG
 	 */
 	struct hwrng hwrng;
 	unsigned int hwrng_retries;
 
-	/*
-	 * Support for the CCP DMA capabilities
+	/* Support for the CCP DMA capabilities
 	 */
 	struct dma_device dma_dev;
 	struct ccp_dma_chan *ccp_dma_chan;
 	struct kmem_cache *dma_cmd_cache;
 	struct kmem_cache *dma_desc_cache;
 
-	/*
-	 * A counter used to generate job-ids for cmds submitted to the CCP
+	/* A counter used to generate job-ids for cmds submitted to the CCP
 	 */
 	atomic_t current_id ____cacheline_aligned;
 
-	/*
-	 * The CCP uses key storage blocks (KSB) to maintain context for certain
-	 * operations. To prevent multiple cmds from using the same KSB range
-	 * a command queue reserves a KSB range for the duration of the cmd.
-	 * Each queue, will however, reserve 2 KSB blocks for operations that
-	 * only require single KSB entries (eg. AES context/iv and key) in order
-	 * to avoid allocation contention.  This will reserve at most 10 KSB
-	 * entries, leaving 40 KSB entries available for dynamic allocation.
+	/* The v3 CCP uses key storage blocks (SB) to maintain context for
+	 * certain operations. To prevent multiple cmds from using the same
+	 * SB range a command queue reserves an SB range for the duration of
+	 * the cmd. Each queue, will however, reserve 2 SB blocks for
+	 * operations that only require single SB entries (eg. AES context/iv
+	 * and key) in order to avoid allocation contention.  This will reserve
+	 * at most 10 SB entries, leaving 40 SB entries available for dynamic
+	 * allocation.
+	 *
+	 * The v5 CCP Local Storage Block (LSB) is broken up into 8
+	 * memrory ranges, each of which can be enabled for access by one
+	 * or more queues. Device initialization takes this into account,
+	 * and attempts to assign one region for exclusive use by each
+	 * available queue; the rest are then aggregated as "public" use.
+	 * If there are fewer regions than queues, all regions are shared
+	 * amongst all queues.
 	 */
-	struct mutex ksb_mutex ____cacheline_aligned;
-	DECLARE_BITMAP(ksb, KSB_COUNT);
-	wait_queue_head_t ksb_queue;
-	unsigned int ksb_avail;
-	unsigned int ksb_count;
-	u32 ksb_start;
+	struct mutex sb_mutex ____cacheline_aligned;
+	DECLARE_BITMAP(sb, KSB_COUNT);
+	wait_queue_head_t sb_queue;
+	unsigned int sb_avail;
+	unsigned int sb_count;
+	u32 sb_start;
+
+	/* Bitmap of shared LSBs, if any */
+	DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE);
 
 	/* Suspend support */
 	unsigned int suspending;
@@ -335,10 +413,11 @@
 
 enum ccp_memtype {
 	CCP_MEMTYPE_SYSTEM = 0,
-	CCP_MEMTYPE_KSB,
+	CCP_MEMTYPE_SB,
 	CCP_MEMTYPE_LOCAL,
 	CCP_MEMTYPE__LAST,
 };
+#define	CCP_MEMTYPE_LSB	CCP_MEMTYPE_KSB
 
 struct ccp_dma_info {
 	dma_addr_t address;
@@ -379,7 +458,7 @@
 	enum ccp_memtype type;
 	union {
 		struct ccp_dma_info dma;
-		u32 ksb;
+		u32 sb;
 	} u;
 };
 
@@ -419,13 +498,14 @@
 	u32 jobid;
 	u32 ioc;
 	u32 soc;
-	u32 ksb_key;
-	u32 ksb_ctx;
+	u32 sb_key;
+	u32 sb_ctx;
 	u32 init;
 	u32 eom;
 
 	struct ccp_mem src;
 	struct ccp_mem dst;
+	struct ccp_mem exp;
 
 	union {
 		struct ccp_aes_op aes;
@@ -435,6 +515,7 @@
 		struct ccp_passthru_op passthru;
 		struct ccp_ecc_op ecc;
 	} u;
+	struct ccp_mem key;
 };
 
 static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -447,6 +528,70 @@
 	return upper_32_bits(info->address + info->offset) & 0x0000ffff;
 }
 
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	__le32 soc:1;
+	__le32 ioc:1;
+	__le32 rsvd1:1;
+	__le32 init:1;
+	__le32 eom:1;		/* AES/SHA only */
+	__le32 function:15;
+	__le32 engine:4;
+	__le32 prot:1;
+	__le32 rsvd2:7;
+};
+
+struct dword3 {
+	__le32 src_hi:16;
+	__le32 src_mem:2;
+	__le32 lsb_cxt_id:8;
+	__le32 rsvd1:5;
+	__le32 fixed:1;
+};
+
+union dword4 {
+	__le32 dst_lo;		/* NON-SHA	*/
+	__le32 sha_len_lo;	/* SHA		*/
+};
+
+union dword5 {
+	struct {
+		__le32 dst_hi:16;
+		__le32 dst_mem:2;
+		__le32 rsvd1:13;
+		__le32 fixed:1;
+	} fields;
+	__le32 sha_len_hi;
+};
+
+struct dword7 {
+	__le32 key_hi:16;
+	__le32 key_mem:2;
+	__le32 rsvd1:14;
+};
+
+struct ccp5_desc {
+	struct dword0 dw0;
+	__le32 length;
+	__le32 src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	__le32 key_lo;
+	struct dword7 dw7;
+};
+
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
@@ -456,13 +601,48 @@
 void ccp_add_device(struct ccp_device *ccp);
 void ccp_del_device(struct ccp_device *ccp);
 
+extern void ccp_log_error(struct ccp_device *, int);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 bool ccp_queues_suspended(struct ccp_device *ccp);
 int ccp_cmd_queue_thread(void *data);
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
 
 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
 
+int ccp_register_rng(struct ccp_device *ccp);
+void ccp_unregister_rng(struct ccp_device *ccp);
 int ccp_dmaengine_register(struct ccp_device *ccp);
 void ccp_dmaengine_unregister(struct ccp_device *ccp);
 
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+	int (*aes)(struct ccp_op *);
+	int (*xts_aes)(struct ccp_op *);
+	int (*sha)(struct ccp_op *);
+	int (*rsa)(struct ccp_op *);
+	int (*passthru)(struct ccp_op *);
+	int (*ecc)(struct ccp_op *);
+	u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
+	void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
+			       unsigned int);
+	unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
+	int (*init)(struct ccp_device *);
+	void (*destroy)(struct ccp_device *);
+	irqreturn_t (*irqhandler)(int, void *);
+};
+
+/* Structure to hold CCP version-specific values */
+struct ccp_vdata {
+	const unsigned int version;
+	void (*setup)(struct ccp_device *);
+	const struct ccp_actions *perform;
+	const unsigned int bar;
+	const unsigned int offset;
+};
+
+extern const struct ccp_vdata ccpv3;
+extern const struct ccp_vdata ccpv5a;
+extern const struct ccp_vdata ccpv5b;
+
 #endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 94f77b0..6553912 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -299,12 +299,10 @@
 {
 	struct ccp_dma_desc *desc;
 
-	desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+	desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
 	if (!desc)
 		return NULL;
 
-	memset(desc, 0, sizeof(*desc));
-
 	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
 	desc->tx_desc.flags = flags;
 	desc->tx_desc.tx_submit = ccp_tx_submit;
@@ -650,8 +648,11 @@
 	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
 					     "%s-dmaengine-desc-cache",
 					     ccp->name);
-	if (!dma_cmd_cache_name)
-		return -ENOMEM;
+	if (!dma_desc_cache_name) {
+		ret = -ENOMEM;
+		goto err_cache;
+	}
+
 	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
 						sizeof(struct ccp_dma_desc),
 						sizeof(void *),
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index ffa2891..50fae44 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,72 +21,28 @@
 #include "ccp-dev.h"
 
 /* SHA initial context values */
-static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
 	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
 	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
-	cpu_to_be32(SHA1_H4), 0, 0, 0,
+	cpu_to_be32(SHA1_H4),
 };
 
-static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
 	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
 	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
 	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
 	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
 };
 
-static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
 	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
 	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
 	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
 };
 
-static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
-{
-	int start;
-
-	for (;;) {
-		mutex_lock(&ccp->ksb_mutex);
-
-		start = (u32)bitmap_find_next_zero_area(ccp->ksb,
-							ccp->ksb_count,
-							ccp->ksb_start,
-							count, 0);
-		if (start <= ccp->ksb_count) {
-			bitmap_set(ccp->ksb, start, count);
-
-			mutex_unlock(&ccp->ksb_mutex);
-			break;
-		}
-
-		ccp->ksb_avail = 0;
-
-		mutex_unlock(&ccp->ksb_mutex);
-
-		/* Wait for KSB entries to become available */
-		if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
-			return 0;
-	}
-
-	return KSB_START + start;
-}
-
-static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
-			 unsigned int count)
-{
-	if (!start)
-		return;
-
-	mutex_lock(&ccp->ksb_mutex);
-
-	bitmap_clear(ccp->ksb, start - KSB_START, count);
-
-	ccp->ksb_avail = 1;
-
-	mutex_unlock(&ccp->ksb_mutex);
-
-	wake_up_interruptible_all(&ccp->ksb_queue);
-}
+#define	CCP_NEW_JOBID(ccp)	((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
+					ccp_gen_jobid(ccp) : 0)
 
 static u32 ccp_gen_jobid(struct ccp_device *ccp)
 {
@@ -231,7 +188,7 @@
 				   unsigned int len, unsigned int se_len,
 				   bool sign_extend)
 {
-	unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
 	u8 buffer[CCP_REVERSE_BUF_SIZE];
 
 	if (WARN_ON(se_len > sizeof(buffer)))
@@ -241,21 +198,21 @@
 	dm_offset = 0;
 	nbytes = len;
 	while (nbytes) {
-		ksb_len = min_t(unsigned int, nbytes, se_len);
-		sg_offset -= ksb_len;
+		sb_len = min_t(unsigned int, nbytes, se_len);
+		sg_offset -= sb_len;
 
-		scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
-		for (i = 0; i < ksb_len; i++)
-			wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
+		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
+		for (i = 0; i < sb_len; i++)
+			wa->address[dm_offset + i] = buffer[sb_len - i - 1];
 
-		dm_offset += ksb_len;
-		nbytes -= ksb_len;
+		dm_offset += sb_len;
+		nbytes -= sb_len;
 
-		if ((ksb_len != se_len) && sign_extend) {
+		if ((sb_len != se_len) && sign_extend) {
 			/* Must sign-extend to nearest sign-extend length */
 			if (wa->address[dm_offset - 1] & 0x80)
 				memset(wa->address + dm_offset, 0xff,
-				       se_len - ksb_len);
+				       se_len - sb_len);
 		}
 	}
 
@@ -266,22 +223,22 @@
 				    struct scatterlist *sg,
 				    unsigned int len)
 {
-	unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
 	u8 buffer[CCP_REVERSE_BUF_SIZE];
 
 	sg_offset = 0;
 	dm_offset = len;
 	nbytes = len;
 	while (nbytes) {
-		ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
-		dm_offset -= ksb_len;
+		sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
+		dm_offset -= sb_len;
 
-		for (i = 0; i < ksb_len; i++)
-			buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
-		scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
+		for (i = 0; i < sb_len; i++)
+			buffer[sb_len - i - 1] = wa->address[dm_offset + i];
+		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
 
-		sg_offset += ksb_len;
-		nbytes -= ksb_len;
+		sg_offset += sb_len;
+		nbytes -= sb_len;
 	}
 }
 
@@ -449,9 +406,9 @@
 	}
 }
 
-static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
-				struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
-				u32 byte_swap, bool from)
+static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
+			       struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			       u32 byte_swap, bool from)
 {
 	struct ccp_op op;
 
@@ -463,8 +420,8 @@
 
 	if (from) {
 		op.soc = 1;
-		op.src.type = CCP_MEMTYPE_KSB;
-		op.src.u.ksb = ksb;
+		op.src.type = CCP_MEMTYPE_SB;
+		op.src.u.sb = sb;
 		op.dst.type = CCP_MEMTYPE_SYSTEM;
 		op.dst.u.dma.address = wa->dma.address;
 		op.dst.u.dma.length = wa->length;
@@ -472,27 +429,27 @@
 		op.src.type = CCP_MEMTYPE_SYSTEM;
 		op.src.u.dma.address = wa->dma.address;
 		op.src.u.dma.length = wa->length;
-		op.dst.type = CCP_MEMTYPE_KSB;
-		op.dst.u.ksb = ksb;
+		op.dst.type = CCP_MEMTYPE_SB;
+		op.dst.u.sb = sb;
 	}
 
 	op.u.passthru.byte_swap = byte_swap;
 
-	return cmd_q->ccp->vdata->perform->perform_passthru(&op);
+	return cmd_q->ccp->vdata->perform->passthru(&op);
 }
 
-static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
-			   struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
-			   u32 byte_swap)
+static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
+			  struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			  u32 byte_swap)
 {
-	return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
+	return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
 }
 
-static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
-			     struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
-			     u32 byte_swap)
+static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
+			    struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			    u32 byte_swap)
 {
-	return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
+	return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
 }
 
 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
@@ -527,54 +484,54 @@
 			return -EINVAL;
 	}
 
-	BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
-	BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
 
 	ret = -EIO;
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.ksb_key = cmd_q->ksb_key;
-	op.ksb_ctx = cmd_q->ksb_ctx;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
 	op.init = 1;
 	op.u.aes.type = aes->type;
 	op.u.aes.mode = aes->mode;
 	op.u.aes.action = aes->action;
 
-	/* All supported key sizes fit in a single (32-byte) KSB entry
+	/* All supported key sizes fit in a single (32-byte) SB entry
 	 * and must be in little endian format. Use the 256-bit byte
 	 * swap passthru option to convert from big endian to little
 	 * endian.
 	 */
 	ret = ccp_init_dm_workarea(&key, cmd_q,
-				   CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 				   DMA_TO_DEVICE);
 	if (ret)
 		return ret;
 
-	dm_offset = CCP_KSB_BYTES - aes->key_len;
+	dm_offset = CCP_SB_BYTES - aes->key_len;
 	ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
-	ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
-			      CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_key;
 	}
 
-	/* The AES context fits in a single (32-byte) KSB entry and
+	/* The AES context fits in a single (32-byte) SB entry and
 	 * must be in little endian format. Use the 256-bit byte swap
 	 * passthru option to convert from big endian to little endian.
 	 */
 	ret = ccp_init_dm_workarea(&ctx, cmd_q,
-				   CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 				   DMA_BIDIRECTIONAL);
 	if (ret)
 		goto e_key;
 
-	dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 	ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
-	ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-			      CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_ctx;
@@ -592,9 +549,9 @@
 			op.eom = 1;
 
 			/* Push the K1/K2 key to the CCP now */
-			ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
-						op.ksb_ctx,
-						CCP_PASSTHRU_BYTESWAP_256BIT);
+			ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
+					       op.sb_ctx,
+					       CCP_PASSTHRU_BYTESWAP_256BIT);
 			if (ret) {
 				cmd->engine_error = cmd_q->cmd_error;
 				goto e_src;
@@ -602,15 +559,15 @@
 
 			ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
 					aes->cmac_key_len);
-			ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-					      CCP_PASSTHRU_BYTESWAP_256BIT);
+			ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+					     CCP_PASSTHRU_BYTESWAP_256BIT);
 			if (ret) {
 				cmd->engine_error = cmd_q->cmd_error;
 				goto e_src;
 			}
 		}
 
-		ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
+		ret = cmd_q->ccp->vdata->perform->aes(&op);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_src;
@@ -622,15 +579,15 @@
 	/* Retrieve the AES context - convert from LE to BE using
 	 * 32-byte (256-bit) byteswapping
 	 */
-	ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-				CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_src;
 	}
 
 	/* ...but we only need AES_BLOCK_SIZE bytes */
-	dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 	ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 
 e_src:
@@ -680,56 +637,56 @@
 			return -EINVAL;
 	}
 
-	BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
-	BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
 
 	ret = -EIO;
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.ksb_key = cmd_q->ksb_key;
-	op.ksb_ctx = cmd_q->ksb_ctx;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
 	op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
 	op.u.aes.type = aes->type;
 	op.u.aes.mode = aes->mode;
 	op.u.aes.action = aes->action;
 
-	/* All supported key sizes fit in a single (32-byte) KSB entry
+	/* All supported key sizes fit in a single (32-byte) SB entry
 	 * and must be in little endian format. Use the 256-bit byte
 	 * swap passthru option to convert from big endian to little
 	 * endian.
 	 */
 	ret = ccp_init_dm_workarea(&key, cmd_q,
-				   CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 				   DMA_TO_DEVICE);
 	if (ret)
 		return ret;
 
-	dm_offset = CCP_KSB_BYTES - aes->key_len;
+	dm_offset = CCP_SB_BYTES - aes->key_len;
 	ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
-	ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
-			      CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_key;
 	}
 
-	/* The AES context fits in a single (32-byte) KSB entry and
+	/* The AES context fits in a single (32-byte) SB entry and
 	 * must be in little endian format. Use the 256-bit byte swap
 	 * passthru option to convert from big endian to little endian.
 	 */
 	ret = ccp_init_dm_workarea(&ctx, cmd_q,
-				   CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 				   DMA_BIDIRECTIONAL);
 	if (ret)
 		goto e_key;
 
 	if (aes->mode != CCP_AES_MODE_ECB) {
-		/* Load the AES context - conver to LE */
-		dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+		/* Load the AES context - convert to LE */
+		dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 		ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
-		ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-				      CCP_PASSTHRU_BYTESWAP_256BIT);
+		ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				     CCP_PASSTHRU_BYTESWAP_256BIT);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_ctx;
@@ -772,7 +729,7 @@
 				op.soc = 1;
 		}
 
-		ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
+		ret = cmd_q->ccp->vdata->perform->aes(&op);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_dst;
@@ -785,15 +742,15 @@
 		/* Retrieve the AES context - convert from LE to BE using
 		 * 32-byte (256-bit) byteswapping
 		 */
-		ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-					CCP_PASSTHRU_BYTESWAP_256BIT);
+		ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				       CCP_PASSTHRU_BYTESWAP_256BIT);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_dst;
 		}
 
 		/* ...but we only need AES_BLOCK_SIZE bytes */
-		dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+		dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 		ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 	}
 
@@ -857,53 +814,53 @@
 	if (!xts->key || !xts->iv || !xts->src || !xts->dst)
 		return -EINVAL;
 
-	BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
-	BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
+	BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
 
 	ret = -EIO;
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.ksb_key = cmd_q->ksb_key;
-	op.ksb_ctx = cmd_q->ksb_ctx;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
 	op.init = 1;
 	op.u.xts.action = xts->action;
 	op.u.xts.unit_size = xts->unit_size;
 
-	/* All supported key sizes fit in a single (32-byte) KSB entry
+	/* All supported key sizes fit in a single (32-byte) SB entry
 	 * and must be in little endian format. Use the 256-bit byte
 	 * swap passthru option to convert from big endian to little
 	 * endian.
 	 */
 	ret = ccp_init_dm_workarea(&key, cmd_q,
-				   CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 				   DMA_TO_DEVICE);
 	if (ret)
 		return ret;
 
-	dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
+	dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
 	ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
 	ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
-	ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
-			      CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_key;
 	}
 
-	/* The AES context fits in a single (32-byte) KSB entry and
+	/* The AES context fits in a single (32-byte) SB entry and
 	 * for XTS is already in little endian format so no byte swapping
 	 * is needed.
 	 */
 	ret = ccp_init_dm_workarea(&ctx, cmd_q,
-				   CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+				   CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 				   DMA_BIDIRECTIONAL);
 	if (ret)
 		goto e_key;
 
 	ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
-	ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-			      CCP_PASSTHRU_BYTESWAP_NOOP);
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_NOOP);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_ctx;
@@ -937,7 +894,7 @@
 		if (!src.sg_wa.bytes_left)
 			op.eom = 1;
 
-		ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
+		ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_dst;
@@ -949,15 +906,15 @@
 	/* Retrieve the AES context - convert from LE to BE using
 	 * 32-byte (256-bit) byteswapping
 	 */
-	ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-				CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_dst;
 	}
 
 	/* ...but we only need AES_BLOCK_SIZE bytes */
-	dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 	ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
 
 e_dst:
@@ -982,164 +939,228 @@
 	struct ccp_dm_workarea ctx;
 	struct ccp_data src;
 	struct ccp_op op;
+	unsigned int ioffset, ooffset;
+	unsigned int digest_size;
+	int sb_count;
+	const void *init;
+	u64 block_size;
+	int ctx_size;
 	int ret;
 
-	if (sha->ctx_len != CCP_SHA_CTXSIZE)
+	switch (sha->type) {
+	case CCP_SHA_TYPE_1:
+		if (sha->ctx_len < SHA1_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA1_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_224:
+		if (sha->ctx_len < SHA224_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA224_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_256:
+		if (sha->ctx_len < SHA256_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA256_BLOCK_SIZE;
+		break;
+	default:
 		return -EINVAL;
+	}
 
 	if (!sha->ctx)
 		return -EINVAL;
 
-	if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
+	if (!sha->final && (sha->src_len & (block_size - 1)))
 		return -EINVAL;
 
-	if (!sha->src_len) {
-		const u8 *sha_zero;
+	/* The version 3 device can't handle zero-length input */
+	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
 
-		/* Not final, just return */
-		if (!sha->final)
+		if (!sha->src_len) {
+			unsigned int digest_len;
+			const u8 *sha_zero;
+
+			/* Not final, just return */
+			if (!sha->final)
+				return 0;
+
+			/* CCP can't do a zero length sha operation so the
+			 * caller must buffer the data.
+			 */
+			if (sha->msg_bits)
+				return -EINVAL;
+
+			/* The CCP cannot perform zero-length sha operations
+			 * so the caller is required to buffer data for the
+			 * final operation. However, a sha operation for a
+			 * message with a total length of zero is valid so
+			 * known values are required to supply the result.
+			 */
+			switch (sha->type) {
+			case CCP_SHA_TYPE_1:
+				sha_zero = sha1_zero_message_hash;
+				digest_len = SHA1_DIGEST_SIZE;
+				break;
+			case CCP_SHA_TYPE_224:
+				sha_zero = sha224_zero_message_hash;
+				digest_len = SHA224_DIGEST_SIZE;
+				break;
+			case CCP_SHA_TYPE_256:
+				sha_zero = sha256_zero_message_hash;
+				digest_len = SHA256_DIGEST_SIZE;
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
+						 digest_len, 1);
+
 			return 0;
-
-		/* CCP can't do a zero length sha operation so the caller
-		 * must buffer the data.
-		 */
-		if (sha->msg_bits)
-			return -EINVAL;
-
-		/* The CCP cannot perform zero-length sha operations so the
-		 * caller is required to buffer data for the final operation.
-		 * However, a sha operation for a message with a total length
-		 * of zero is valid so known values are required to supply
-		 * the result.
-		 */
-		switch (sha->type) {
-		case CCP_SHA_TYPE_1:
-			sha_zero = sha1_zero_message_hash;
-			break;
-		case CCP_SHA_TYPE_224:
-			sha_zero = sha224_zero_message_hash;
-			break;
-		case CCP_SHA_TYPE_256:
-			sha_zero = sha256_zero_message_hash;
-			break;
-		default:
-			return -EINVAL;
 		}
-
-		scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
-					 sha->ctx_len, 1);
-
-		return 0;
 	}
 
-	if (!sha->src)
-		return -EINVAL;
+	/* Set variables used throughout */
+	switch (sha->type) {
+	case CCP_SHA_TYPE_1:
+		digest_size = SHA1_DIGEST_SIZE;
+		init = (void *) ccp_sha1_init;
+		ctx_size = SHA1_DIGEST_SIZE;
+		sb_count = 1;
+		if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+			ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		else
+			ooffset = ioffset = 0;
+		break;
+	case CCP_SHA_TYPE_224:
+		digest_size = SHA224_DIGEST_SIZE;
+		init = (void *) ccp_sha224_init;
+		ctx_size = SHA256_DIGEST_SIZE;
+		sb_count = 1;
+		ioffset = 0;
+		if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+			ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		else
+			ooffset = 0;
+		break;
+	case CCP_SHA_TYPE_256:
+		digest_size = SHA256_DIGEST_SIZE;
+		init = (void *) ccp_sha256_init;
+		ctx_size = SHA256_DIGEST_SIZE;
+		sb_count = 1;
+		ooffset = ioffset = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		goto e_data;
+	}
 
-	BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
+	/* For zero-length plaintext the src pointer is ignored;
+	 * otherwise both parts must be valid
+	 */
+	if (sha->src_len && !sha->src)
+		return -EINVAL;
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.ksb_ctx = cmd_q->ksb_ctx;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
 	op.u.sha.type = sha->type;
 	op.u.sha.msg_bits = sha->msg_bits;
 
-	/* The SHA context fits in a single (32-byte) KSB entry and
-	 * must be in little endian format. Use the 256-bit byte swap
-	 * passthru option to convert from big endian to little endian.
-	 */
-	ret = ccp_init_dm_workarea(&ctx, cmd_q,
-				   CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
+	ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
 				   DMA_BIDIRECTIONAL);
 	if (ret)
 		return ret;
-
 	if (sha->first) {
-		const __be32 *init;
-
 		switch (sha->type) {
 		case CCP_SHA_TYPE_1:
-			init = ccp_sha1_init;
-			break;
 		case CCP_SHA_TYPE_224:
-			init = ccp_sha224_init;
-			break;
 		case CCP_SHA_TYPE_256:
-			init = ccp_sha256_init;
+			memcpy(ctx.address + ioffset, init, ctx_size);
 			break;
 		default:
 			ret = -EINVAL;
 			goto e_ctx;
 		}
-		memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
 	} else {
-		ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+		/* Restore the context */
+		ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+				sb_count * CCP_SB_BYTES);
 	}
 
-	ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-			      CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_ctx;
 	}
 
-	/* Send data to the CCP SHA engine */
-	ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
-			    CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
-	if (ret)
-		goto e_ctx;
+	if (sha->src) {
+		/* Send data to the CCP SHA engine; block_size is set above */
+		ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
+				    block_size, DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
 
-	while (src.sg_wa.bytes_left) {
-		ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
-		if (sha->final && !src.sg_wa.bytes_left)
-			op.eom = 1;
+		while (src.sg_wa.bytes_left) {
+			ccp_prepare_data(&src, NULL, &op, block_size, false);
+			if (sha->final && !src.sg_wa.bytes_left)
+				op.eom = 1;
 
-		ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
+			ret = cmd_q->ccp->vdata->perform->sha(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_data;
+			}
+
+			ccp_process_data(&src, NULL, &op);
+		}
+	} else {
+		op.eom = 1;
+		ret = cmd_q->ccp->vdata->perform->sha(&op);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_data;
 		}
-
-		ccp_process_data(&src, NULL, &op);
 	}
 
 	/* Retrieve the SHA context - convert from LE to BE using
 	 * 32-byte (256-bit) byteswapping to BE
 	 */
-	ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
-				CCP_PASSTHRU_BYTESWAP_256BIT);
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_data;
 	}
 
-	ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+	if (sha->final) {
+		/* Finishing up, so get the digest */
+		switch (sha->type) {
+		case CCP_SHA_TYPE_1:
+		case CCP_SHA_TYPE_224:
+		case CCP_SHA_TYPE_256:
+			ccp_get_dm_area(&ctx, ooffset,
+					sha->ctx, 0,
+					digest_size);
+			break;
+		default:
+			ret = -EINVAL;
+			goto e_ctx;
+		}
+	} else {
+		/* Stash the context */
+		ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
+				sb_count * CCP_SB_BYTES);
+	}
 
 	if (sha->final && sha->opad) {
 		/* HMAC operation, recursively perform final SHA */
 		struct ccp_cmd hmac_cmd;
 		struct scatterlist sg;
-		u64 block_size, digest_size;
 		u8 *hmac_buf;
 
-		switch (sha->type) {
-		case CCP_SHA_TYPE_1:
-			block_size = SHA1_BLOCK_SIZE;
-			digest_size = SHA1_DIGEST_SIZE;
-			break;
-		case CCP_SHA_TYPE_224:
-			block_size = SHA224_BLOCK_SIZE;
-			digest_size = SHA224_DIGEST_SIZE;
-			break;
-		case CCP_SHA_TYPE_256:
-			block_size = SHA256_BLOCK_SIZE;
-			digest_size = SHA256_DIGEST_SIZE;
-			break;
-		default:
-			ret = -EINVAL;
-			goto e_data;
-		}
-
 		if (sha->opad_len != block_size) {
 			ret = -EINVAL;
 			goto e_data;
@@ -1153,7 +1174,18 @@
 		sg_init_one(&sg, hmac_buf, block_size + digest_size);
 
 		scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
-		memcpy(hmac_buf + block_size, ctx.address, digest_size);
+		switch (sha->type) {
+		case CCP_SHA_TYPE_1:
+		case CCP_SHA_TYPE_224:
+		case CCP_SHA_TYPE_256:
+			memcpy(hmac_buf + block_size,
+			       ctx.address + ooffset,
+			       digest_size);
+			break;
+		default:
+			ret = -EINVAL;
+			goto e_ctx;
+		}
 
 		memset(&hmac_cmd, 0, sizeof(hmac_cmd));
 		hmac_cmd.engine = CCP_ENGINE_SHA;
@@ -1176,7 +1208,8 @@
 	}
 
 e_data:
-	ccp_free_data(&src, cmd_q);
+	if (sha->src)
+		ccp_free_data(&src, cmd_q);
 
 e_ctx:
 	ccp_dm_free(&ctx);
@@ -1190,7 +1223,7 @@
 	struct ccp_dm_workarea exp, src;
 	struct ccp_data dst;
 	struct ccp_op op;
-	unsigned int ksb_count, i_len, o_len;
+	unsigned int sb_count, i_len, o_len;
 	int ret;
 
 	if (rsa->key_size > CCP_RSA_MAX_WIDTH)
@@ -1208,16 +1241,17 @@
 	o_len = ((rsa->key_size + 255) / 256) * 32;
 	i_len = o_len * 2;
 
-	ksb_count = o_len / CCP_KSB_BYTES;
+	sb_count = o_len / CCP_SB_BYTES;
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
 	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
-	if (!op.ksb_key)
+	op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
+
+	if (!op.sb_key)
 		return -EIO;
 
-	/* The RSA exponent may span multiple (32-byte) KSB entries and must
+	/* The RSA exponent may span multiple (32-byte) SB entries and must
 	 * be in little endian format. Reverse copy each 32-byte chunk
 	 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
 	 * and each byte within that chunk and do not perform any byte swap
@@ -1225,14 +1259,14 @@
 	 */
 	ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
 	if (ret)
-		goto e_ksb;
+		goto e_sb;
 
 	ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
-				      CCP_KSB_BYTES, false);
+				      CCP_SB_BYTES, false);
 	if (ret)
 		goto e_exp;
-	ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
-			      CCP_PASSTHRU_BYTESWAP_NOOP);
+	ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_NOOP);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_exp;
@@ -1247,12 +1281,12 @@
 		goto e_exp;
 
 	ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
-				      CCP_KSB_BYTES, false);
+				      CCP_SB_BYTES, false);
 	if (ret)
 		goto e_src;
 	src.address += o_len;	/* Adjust the address for the copy operation */
 	ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
-				      CCP_KSB_BYTES, false);
+				      CCP_SB_BYTES, false);
 	if (ret)
 		goto e_src;
 	src.address -= o_len;	/* Reset the address to original value */
@@ -1274,7 +1308,7 @@
 	op.u.rsa.mod_size = rsa->key_size;
 	op.u.rsa.input_len = i_len;
 
-	ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
+	ret = cmd_q->ccp->vdata->perform->rsa(&op);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_dst;
@@ -1291,8 +1325,8 @@
 e_exp:
 	ccp_dm_free(&exp);
 
-e_ksb:
-	ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
+e_sb:
+	cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
 
 	return ret;
 }
@@ -1306,7 +1340,7 @@
 	struct ccp_op op;
 	bool in_place = false;
 	unsigned int i;
-	int ret;
+	int ret = 0;
 
 	if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
 		return -EINVAL;
@@ -1321,26 +1355,26 @@
 			return -EINVAL;
 	}
 
-	BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+	BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 
 	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
 		/* Load the mask */
-		op.ksb_key = cmd_q->ksb_key;
+		op.sb_key = cmd_q->sb_key;
 
 		ret = ccp_init_dm_workarea(&mask, cmd_q,
-					   CCP_PASSTHRU_KSB_COUNT *
-					   CCP_KSB_BYTES,
+					   CCP_PASSTHRU_SB_COUNT *
+					   CCP_SB_BYTES,
 					   DMA_TO_DEVICE);
 		if (ret)
 			return ret;
 
 		ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
-		ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
-				      CCP_PASSTHRU_BYTESWAP_NOOP);
+		ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_mask;
@@ -1399,7 +1433,7 @@
 		op.dst.u.dma.offset = dst.sg_wa.sg_used;
 		op.dst.u.dma.length = op.src.u.dma.length;
 
-		ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+		ret = cmd_q->ccp->vdata->perform->passthru(&op);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
 			goto e_dst;
@@ -1448,7 +1482,7 @@
 			return -EINVAL;
 	}
 
-	BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+	BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
@@ -1456,13 +1490,13 @@
 
 	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
 		/* Load the mask */
-		op.ksb_key = cmd_q->ksb_key;
+		op.sb_key = cmd_q->sb_key;
 
 		mask.length = pt->mask_len;
 		mask.dma.address = pt->mask;
 		mask.dma.length = pt->mask_len;
 
-		ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+		ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
 				     CCP_PASSTHRU_BYTESWAP_NOOP);
 		if (ret) {
 			cmd->engine_error = cmd_q->cmd_error;
@@ -1484,7 +1518,7 @@
 	op.dst.u.dma.offset = 0;
 	op.dst.u.dma.length = pt->src_len;
 
-	ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+	ret = cmd_q->ccp->vdata->perform->passthru(&op);
 	if (ret)
 		cmd->engine_error = cmd_q->cmd_error;
 
@@ -1514,7 +1548,7 @@
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 
 	/* Concatenate the modulus and the operands. Both the modulus and
 	 * the operands must be in little endian format.  Since the input
@@ -1575,7 +1609,7 @@
 
 	op.u.ecc.function = cmd->u.ecc.function;
 
-	ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
+	ret = cmd_q->ccp->vdata->perform->ecc(&op);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_dst;
@@ -1639,7 +1673,7 @@
 
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 
 	/* Concatenate the modulus and the operands. Both the modulus and
 	 * the operands must be in little endian format.  Since the input
@@ -1677,7 +1711,7 @@
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
 
-	/* Set the first point Z coordianate to 1 */
+	/* Set the first point Z coordinate to 1 */
 	*src.address = 0x01;
 	src.address += CCP_ECC_OPERAND_SIZE;
 
@@ -1696,7 +1730,7 @@
 			goto e_src;
 		src.address += CCP_ECC_OPERAND_SIZE;
 
-		/* Set the second point Z coordianate to 1 */
+		/* Set the second point Z coordinate to 1 */
 		*src.address = 0x01;
 		src.address += CCP_ECC_OPERAND_SIZE;
 	} else {
@@ -1739,7 +1773,7 @@
 
 	op.u.ecc.function = cmd->u.ecc.function;
 
-	ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
+	ret = cmd_q->ccp->vdata->perform->ecc(&op);
 	if (ret) {
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_dst;
@@ -1810,7 +1844,7 @@
 	cmd->engine_error = 0;
 	cmd_q->cmd_error = 0;
 	cmd_q->int_rcvd = 0;
-	cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+	cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
 
 	switch (cmd->engine) {
 	case CCP_ENGINE_AES:
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0bf262e..28a9996 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -25,9 +26,6 @@
 
 #include "ccp-dev.h"
 
-#define IO_BAR				2
-#define IO_OFFSET			0x20000
-
 #define MSIX_VECTORS			2
 
 struct ccp_msix {
@@ -143,10 +141,11 @@
 			free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
 				 dev);
 		pci_disable_msix(pdev);
-	} else {
+	} else if (ccp->irq) {
 		free_irq(ccp->irq, dev);
 		pci_disable_msi(pdev);
 	}
+	ccp->irq = 0;
 }
 
 static int ccp_find_mmio_area(struct ccp_device *ccp)
@@ -156,10 +155,11 @@
 	resource_size_t io_len;
 	unsigned long io_flags;
 
-	io_flags = pci_resource_flags(pdev, IO_BAR);
-	io_len = pci_resource_len(pdev, IO_BAR);
-	if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
-		return IO_BAR;
+	io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
+	io_len = pci_resource_len(pdev, ccp->vdata->bar);
+	if ((io_flags & IORESOURCE_MEM) &&
+	    (io_len >= (ccp->vdata->offset + 0x800)))
+		return ccp->vdata->bar;
 
 	return -EIO;
 }
@@ -216,7 +216,7 @@
 		dev_err(dev, "pci_iomap failed\n");
 		goto e_device;
 	}
-	ccp->io_regs = ccp->io_map + IO_OFFSET;
+	ccp->io_regs = ccp->io_map + ccp->vdata->offset;
 
 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
 	if (ret) {
@@ -230,6 +230,9 @@
 
 	dev_set_drvdata(dev, ccp);
 
+	if (ccp->vdata->setup)
+		ccp->vdata->setup(ccp);
+
 	ret = ccp->vdata->perform->init(ccp);
 	if (ret)
 		goto e_iomap;
@@ -322,6 +325,8 @@
 
 static const struct pci_device_id ccp_pci_table[] = {
 	{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
+	{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
+	{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
 	/* Last entry must be zero */
 	{ 0, }
 };
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index eee2c7e..e09d405 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -636,20 +636,12 @@
 
 static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
 {
-	u32 ret;
-
-	ret = readl(dev->bar[0] + reg);
-
-	return ret;
+	return readl(dev->bar[0] + reg);
 }
 
 static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
 {
-	u32 ret;
-
-	ret = readl(dev->bar[1] + reg);
-
-	return ret;
+	return readl(dev->bar[1] + reg);
 }
 
 static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 68e8aa9..a2e77b8 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -71,6 +71,7 @@
 #define DRIVER_FLAGS_MD5		BIT(21)
 
 #define IMG_HASH_QUEUE_LENGTH		20
+#define IMG_HASH_DMA_BURST		4
 #define IMG_HASH_DMA_THRESHOLD		64
 
 #ifdef __LITTLE_ENDIAN
@@ -102,8 +103,10 @@
 	unsigned long		op;
 
 	size_t			bufcnt;
-	u8 buffer[0] __aligned(sizeof(u32));
 	struct ahash_request	fallback_req;
+
+	/* Zero length buffer must remain last member of struct */
+	u8 buffer[0] __aligned(sizeof(u32));
 };
 
 struct img_hash_ctx {
@@ -340,7 +343,7 @@
 	dma_conf.direction = DMA_MEM_TO_DEV;
 	dma_conf.dst_addr = hdev->bus_addr;
 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dma_conf.dst_maxburst = 16;
+	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 	dma_conf.device_fc = false;
 
 	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
@@ -361,7 +364,7 @@
 	size_t nbytes, bleft, wsend, len, tbc;
 	struct scatterlist tsg;
 
-	if (!ctx->sg)
+	if (!hdev->req || !ctx->sg)
 		return;
 
 	addr = sg_virt(ctx->sg);
@@ -587,6 +590,32 @@
 	return crypto_ahash_finup(&rctx->fallback_req);
 }
 
+static int img_hash_import(struct ahash_request *req, const void *in)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int img_hash_export(struct ahash_request *req, void *out)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
 static int img_hash_digest(struct ahash_request *req)
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -643,10 +672,9 @@
 	return err;
 }
 
-static int img_hash_cra_init(struct crypto_tfm *tfm)
+static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 {
 	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-	const char *alg_name = crypto_tfm_alg_name(tfm);
 	int err = -ENOMEM;
 
 	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
@@ -658,6 +686,7 @@
 	}
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct img_hash_request_ctx) +
+				 crypto_ahash_reqsize(ctx->fallback) +
 				 IMG_HASH_DMA_THRESHOLD);
 
 	return 0;
@@ -666,6 +695,26 @@
 	return err;
 }
 
+static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "md5-generic");
+}
+
+static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha1-generic");
+}
+
+static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha224-generic");
+}
+
+static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha256-generic");
+}
+
 static void img_hash_cra_exit(struct crypto_tfm *tfm)
 {
 	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
@@ -711,9 +760,12 @@
 		.update = img_hash_update,
 		.final = img_hash_final,
 		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
 		.digest = img_hash_digest,
 		.halg = {
 			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct md5_state),
 			.base = {
 				.cra_name = "md5",
 				.cra_driver_name = "img-md5",
@@ -723,7 +775,7 @@
 				CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct img_hash_ctx),
-				.cra_init = img_hash_cra_init,
+				.cra_init = img_hash_cra_md5_init,
 				.cra_exit = img_hash_cra_exit,
 				.cra_module = THIS_MODULE,
 			}
@@ -734,9 +786,12 @@
 		.update = img_hash_update,
 		.final = img_hash_final,
 		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
 		.digest = img_hash_digest,
 		.halg = {
 			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct sha1_state),
 			.base = {
 				.cra_name = "sha1",
 				.cra_driver_name = "img-sha1",
@@ -746,7 +801,7 @@
 				CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct img_hash_ctx),
-				.cra_init = img_hash_cra_init,
+				.cra_init = img_hash_cra_sha1_init,
 				.cra_exit = img_hash_cra_exit,
 				.cra_module = THIS_MODULE,
 			}
@@ -757,9 +812,12 @@
 		.update = img_hash_update,
 		.final = img_hash_final,
 		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
 		.digest = img_hash_digest,
 		.halg = {
 			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct sha256_state),
 			.base = {
 				.cra_name = "sha224",
 				.cra_driver_name = "img-sha224",
@@ -769,7 +827,7 @@
 				CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = SHA224_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct img_hash_ctx),
-				.cra_init = img_hash_cra_init,
+				.cra_init = img_hash_cra_sha224_init,
 				.cra_exit = img_hash_cra_exit,
 				.cra_module = THIS_MODULE,
 			}
@@ -780,9 +838,12 @@
 		.update = img_hash_update,
 		.final = img_hash_final,
 		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
 		.digest = img_hash_digest,
 		.halg = {
 			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct sha256_state),
 			.base = {
 				.cra_name = "sha256",
 				.cra_driver_name = "img-sha256",
@@ -792,7 +853,7 @@
 				CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct img_hash_ctx),
-				.cra_init = img_hash_cra_init,
+				.cra_init = img_hash_cra_sha256_init,
 				.cra_exit = img_hash_cra_exit,
 				.cra_module = THIS_MODULE,
 			}
@@ -971,7 +1032,7 @@
 	err = img_register_algs(hdev);
 	if (err)
 		goto err_algs;
-	dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
+	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
 
 	return 0;
 
@@ -1013,11 +1074,38 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int img_hash_suspend(struct device *dev)
+{
+	struct img_hash_dev *hdev = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(hdev->hash_clk);
+	clk_disable_unprepare(hdev->sys_clk);
+
+	return 0;
+}
+
+static int img_hash_resume(struct device *dev)
+{
+	struct img_hash_dev *hdev = dev_get_drvdata(dev);
+
+	clk_prepare_enable(hdev->hash_clk);
+	clk_prepare_enable(hdev->sys_clk);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_hash_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
+};
+
 static struct platform_driver img_hash_driver = {
 	.probe		= img_hash_probe,
 	.remove		= img_hash_remove,
 	.driver		= {
 		.name	= "img-hash-accelerator",
+		.pm	= &img_hash_pm_ops,
 		.of_match_table	= of_match_ptr(img_hash_match),
 	}
 };
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 2296934..7868765 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -447,9 +447,8 @@
 
 	if (!npe_running(npe_c)) {
 		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
-		if (ret) {
-			return ret;
-		}
+		if (ret)
+			goto npe_release;
 		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 			goto npe_error;
 	} else {
@@ -473,7 +472,8 @@
 	default:
 		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 			npe_name(npe_c));
-		return -ENODEV;
+		ret = -ENODEV;
+		goto npe_release;
 	}
 	/* buffer_pool will also be used to sometimes store the hmac,
 	 * so assure it is large enough
@@ -512,6 +512,7 @@
 err:
 	dma_pool_destroy(ctx_pool);
 	dma_pool_destroy(buffer_pool);
+npe_release:
 	npe_release(npe_c);
 	return ret;
 }
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index d64af86..37dadb2 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -166,6 +166,7 @@
 			if (!req)
 				break;
 
+			ctx = crypto_tfm_ctx(req->tfm);
 			mv_cesa_complete_req(ctx, req, 0);
 		}
 	}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 82e0f4e6..9f28468 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -374,7 +374,7 @@
 	.complete = mv_cesa_ahash_complete,
 };
 
-static int mv_cesa_ahash_init(struct ahash_request *req,
+static void mv_cesa_ahash_init(struct ahash_request *req,
 			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
 {
 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
@@ -390,8 +390,6 @@
 	creq->op_tmpl = *tmpl;
 	creq->len = 0;
 	creq->algo_le = algo_le;
-
-	return 0;
 }
 
 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
@@ -405,15 +403,16 @@
 	return 0;
 }
 
-static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
+static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 {
 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	bool cached = false;
 
-	if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
-		*cached = true;
+	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
+		cached = true;
 
 		if (!req->nbytes)
-			return 0;
+			return cached;
 
 		sg_pcopy_to_buffer(req->src, creq->src_nents,
 				   creq->cache + creq->cache_ptr,
@@ -422,7 +421,7 @@
 		creq->cache_ptr += req->nbytes;
 	}
 
-	return 0;
+	return cached;
 }
 
 static struct mv_cesa_op_ctx *
@@ -455,7 +454,6 @@
 
 static int
 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
-			    struct mv_cesa_ahash_dma_iter *dma_iter,
 			    struct mv_cesa_ahash_req *creq,
 			    gfp_t flags)
 {
@@ -586,7 +584,7 @@
 	 * Add the cache (left-over data from a previous block) first.
 	 * This will never overflow the SRAM size.
 	 */
-	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
+	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 	if (ret)
 		goto err_free_tdma;
 
@@ -668,7 +666,6 @@
 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 {
 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
-	int ret;
 
 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 	if (creq->src_nents < 0) {
@@ -676,17 +673,15 @@
 		return creq->src_nents;
 	}
 
-	ret = mv_cesa_ahash_cache_req(req, cached);
-	if (ret)
-		return ret;
+	*cached = mv_cesa_ahash_cache_req(req);
 
 	if (*cached)
 		return 0;
 
 	if (cesa_dev->caps->has_tdma)
-		ret = mv_cesa_ahash_dma_req_init(req);
-
-	return ret;
+		return mv_cesa_ahash_dma_req_init(req);
+	else
+		return 0;
 }
 
 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
@@ -805,13 +800,14 @@
 	struct mv_cesa_op_ctx tmpl = { };
 
 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+
+	mv_cesa_ahash_init(req, &tmpl, true);
+
 	creq->state[0] = MD5_H0;
 	creq->state[1] = MD5_H1;
 	creq->state[2] = MD5_H2;
 	creq->state[3] = MD5_H3;
 
-	mv_cesa_ahash_init(req, &tmpl, true);
-
 	return 0;
 }
 
@@ -873,14 +869,15 @@
 	struct mv_cesa_op_ctx tmpl = { };
 
 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
 	creq->state[0] = SHA1_H0;
 	creq->state[1] = SHA1_H1;
 	creq->state[2] = SHA1_H2;
 	creq->state[3] = SHA1_H3;
 	creq->state[4] = SHA1_H4;
 
-	mv_cesa_ahash_init(req, &tmpl, false);
-
 	return 0;
 }
 
@@ -942,6 +939,9 @@
 	struct mv_cesa_op_ctx tmpl = { };
 
 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
 	creq->state[0] = SHA256_H0;
 	creq->state[1] = SHA256_H1;
 	creq->state[2] = SHA256_H2;
@@ -951,8 +951,6 @@
 	creq->state[6] = SHA256_H6;
 	creq->state[7] = SHA256_H7;
 
-	mv_cesa_ahash_init(req, &tmpl, false);
-
 	return 0;
 }
 
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 86a065b..9fd7a5f 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -261,6 +261,7 @@
 	tdma->op = op;
 	tdma->byte_cnt = cpu_to_le32(size | BIT(31));
 	tdma->src = cpu_to_le32(dma_handle);
+	tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
 	tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
 
 	return op;
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6b658f..104e9ce 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1091,11 +1091,8 @@
 
 	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
 
-	if (pdev->dev.of_node)
-		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-	else
-		irq = platform_get_irq(pdev, 0);
-	if (irq < 0 || irq == NO_IRQ) {
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
 		ret = irq;
 		goto err;
 	}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ff383ef..ee4be1b0 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -668,7 +668,9 @@
 		return PTR_ERR(scc->clk);
 	}
 
-	clk_prepare_enable(scc->clk);
+	ret = clk_prepare_enable(scc->clk);
+	if (ret)
+		return ret;
 
 	/* clear error status register */
 	writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 4ab53a60..fe32dd9 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -35,7 +35,8 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
 
 #define DST_MAXBURST			4
 #define DMA_MIN				(DST_MAXBURST * sizeof(u32))
@@ -85,6 +86,8 @@
 #define AES_REG_IRQ_DATA_OUT           BIT(2)
 #define DEFAULT_TIMEOUT		(5*HZ)
 
+#define DEFAULT_AUTOSUSPEND_DELAY	1000
+
 #define FLAGS_MODE_MASK		0x000f
 #define FLAGS_ENCRYPT		BIT(0)
 #define FLAGS_CBC		BIT(1)
@@ -103,6 +106,7 @@
 	int		keylen;
 	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
 	unsigned long	flags;
+	struct crypto_skcipher	*fallback;
 };
 
 struct omap_aes_reqctx {
@@ -238,11 +242,19 @@
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
+	int err;
+
 	if (!(dd->flags & FLAGS_INIT)) {
 		dd->flags |= FLAGS_INIT;
 		dd->err = 0;
 	}
 
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		dev_err(dd->dev, "failed to get sync: %d\n", err);
+		return err;
+	}
+
 	return 0;
 }
 
@@ -319,20 +331,12 @@
 
 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
 {
-	struct omap_aes_dev *dd = NULL, *tmp;
+	struct omap_aes_dev *dd;
 
 	spin_lock_bh(&list_lock);
-	if (!ctx->dd) {
-		list_for_each_entry(tmp, &dev_list, list) {
-			/* FIXME: take fist available aes core */
-			dd = tmp;
-			break;
-		}
-		ctx->dd = dd;
-	} else {
-		/* already found before */
-		dd = ctx->dd;
-	}
+	dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
+	list_move_tail(&dd->list, &dev_list);
+	ctx->dd = dd;
 	spin_unlock_bh(&list_lock);
 
 	return dd;
@@ -519,7 +523,10 @@
 
 	pr_debug("err: %d\n", err);
 
-	crypto_finalize_request(dd->engine, req, err);
+	crypto_finalize_cipher_request(dd->engine, req, err);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -592,7 +599,7 @@
 				 struct ablkcipher_request *req)
 {
 	if (req)
-		return crypto_transfer_request_to_engine(dd->engine, req);
+		return crypto_transfer_cipher_request_to_engine(dd->engine, req);
 
 	return 0;
 }
@@ -602,7 +609,7 @@
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 			crypto_ablkcipher_reqtfm(req));
-	struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+	struct omap_aes_dev *dd = ctx->dd;
 	struct omap_aes_reqctx *rctx;
 
 	if (!dd)
@@ -648,7 +655,7 @@
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 			crypto_ablkcipher_reqtfm(req));
-	struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+	struct omap_aes_dev *dd = ctx->dd;
 
 	if (!dd)
 		return -ENODEV;
@@ -696,11 +703,29 @@
 			crypto_ablkcipher_reqtfm(req));
 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 	struct omap_aes_dev *dd;
+	int ret;
 
 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 
+	if (req->nbytes < 200) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags, NULL,
+					      NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+
+		if (mode & FLAGS_ENCRYPT)
+			ret = crypto_skcipher_encrypt(subreq);
+		else
+			ret = crypto_skcipher_decrypt(subreq);
+
+		skcipher_request_zero(subreq);
+		return ret;
+	}
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 		return -ENODEV;
@@ -716,6 +741,7 @@
 			   unsigned int keylen)
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int ret;
 
 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 		   keylen != AES_KEYSIZE_256)
@@ -726,6 +752,14 @@
 	memcpy(ctx->key, key, keylen);
 	ctx->keylen = keylen;
 
+	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+						 CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (!ret)
+		return 0;
+
 	return 0;
 }
 
@@ -761,22 +795,16 @@
 
 static int omap_aes_cra_init(struct crypto_tfm *tfm)
 {
-	struct omap_aes_dev *dd = NULL;
-	int err;
+	const char *name = crypto_tfm_alg_name(tfm);
+	const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+	struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *blk;
 
-	/* Find AES device, currently picks the first device */
-	spin_lock_bh(&list_lock);
-	list_for_each_entry(dd, &dev_list, list) {
-		break;
-	}
-	spin_unlock_bh(&list_lock);
+	blk = crypto_alloc_skcipher(name, 0, flags);
+	if (IS_ERR(blk))
+		return PTR_ERR(blk);
 
-	err = pm_runtime_get_sync(dd->dev);
-	if (err < 0) {
-		dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
-			__func__, err);
-		return err;
-	}
+	ctx->fallback = blk;
 
 	tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
 
@@ -785,16 +813,12 @@
 
 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
 {
-	struct omap_aes_dev *dd = NULL;
+	struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	/* Find AES device, currently picks the first device */
-	spin_lock_bh(&list_lock);
-	list_for_each_entry(dd, &dev_list, list) {
-		break;
-	}
-	spin_unlock_bh(&list_lock);
+	if (ctx->fallback)
+		crypto_free_skcipher(ctx->fallback);
 
-	pm_runtime_put_sync(dd->dev);
+	ctx->fallback = NULL;
 }
 
 /* ********************** ALGS ************************************ */
@@ -806,7 +830,7 @@
 	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
-				  CRYPTO_ALG_ASYNC,
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
 	.cra_alignmask		= 0,
@@ -828,7 +852,7 @@
 	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
-				  CRYPTO_ALG_ASYNC,
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
 	.cra_alignmask		= 0,
@@ -854,7 +878,7 @@
 	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
-				  CRYPTO_ALG_ASYNC,
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
 	.cra_alignmask		= 0,
@@ -1140,6 +1164,9 @@
 	}
 	dd->phys_base = res.start;
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
 	pm_runtime_enable(dev);
 	err = pm_runtime_get_sync(dev);
 	if (err < 0) {
@@ -1186,6 +1213,19 @@
 	list_add_tail(&dd->list, &dev_list);
 	spin_unlock(&list_lock);
 
+	/* Initialize crypto engine */
+	dd->engine = crypto_engine_alloc_init(dev, 1);
+	if (!dd->engine) {
+		err = -ENOMEM;
+		goto err_engine;
+	}
+
+	dd->engine->prepare_cipher_request = omap_aes_prepare_req;
+	dd->engine->cipher_one_request = omap_aes_crypt_req;
+	err = crypto_engine_start(dd->engine);
+	if (err)
+		goto err_engine;
+
 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
 		if (!dd->pdata->algs_info[i].registered) {
 			for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
@@ -1203,26 +1243,17 @@
 		}
 	}
 
-	/* Initialize crypto engine */
-	dd->engine = crypto_engine_alloc_init(dev, 1);
-	if (!dd->engine)
-		goto err_algs;
-
-	dd->engine->prepare_request = omap_aes_prepare_req;
-	dd->engine->crypt_one_request = omap_aes_crypt_req;
-	err = crypto_engine_start(dd->engine);
-	if (err)
-		goto err_engine;
-
 	return 0;
-err_engine:
-	crypto_engine_exit(dd->engine);
 err_algs:
 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
 			crypto_unregister_alg(
 					&dd->pdata->algs_info[i].algs_list[j]);
 
+err_engine:
+	if (dd->engine)
+		crypto_engine_exit(dd->engine);
+
 	omap_aes_dma_cleanup(dd);
 err_irq:
 	tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 5691434..a6f6553 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -39,6 +39,7 @@
 #include <crypto/scatterwalk.h>
 #include <crypto/des.h>
 #include <crypto/algapi.h>
+#include <crypto/engine.h>
 
 #define DST_MAXBURST			2
 
@@ -506,7 +507,7 @@
 	pr_debug("err: %d\n", err);
 
 	pm_runtime_put(dd->dev);
-	crypto_finalize_request(dd->engine, req, err);
+	crypto_finalize_cipher_request(dd->engine, req, err);
 }
 
 static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@@ -574,7 +575,7 @@
 				 struct ablkcipher_request *req)
 {
 	if (req)
-		return crypto_transfer_request_to_engine(dd->engine, req);
+		return crypto_transfer_cipher_request_to_engine(dd->engine, req);
 
 	return 0;
 }
@@ -1078,6 +1079,19 @@
 	list_add_tail(&dd->list, &dev_list);
 	spin_unlock(&list_lock);
 
+	/* Initialize des crypto engine */
+	dd->engine = crypto_engine_alloc_init(dev, 1);
+	if (!dd->engine) {
+		err = -ENOMEM;
+		goto err_engine;
+	}
+
+	dd->engine->prepare_cipher_request = omap_des_prepare_req;
+	dd->engine->cipher_one_request = omap_des_crypt_req;
+	err = crypto_engine_start(dd->engine);
+	if (err)
+		goto err_engine;
+
 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
 			algp = &dd->pdata->algs_info[i].algs_list[j];
@@ -1093,27 +1107,18 @@
 		}
 	}
 
-	/* Initialize des crypto engine */
-	dd->engine = crypto_engine_alloc_init(dev, 1);
-	if (!dd->engine)
-		goto err_algs;
-
-	dd->engine->prepare_request = omap_des_prepare_req;
-	dd->engine->crypt_one_request = omap_des_crypt_req;
-	err = crypto_engine_start(dd->engine);
-	if (err)
-		goto err_engine;
-
 	return 0;
 
-err_engine:
-	crypto_engine_exit(dd->engine);
 err_algs:
 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
 			crypto_unregister_alg(
 					&dd->pdata->algs_info[i].algs_list[j]);
 
+err_engine:
+	if (dd->engine)
+		crypto_engine_exit(dd->engine);
+
 	omap_des_dma_cleanup(dd);
 err_irq:
 	tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7fe4eef..d0b16e5 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,9 +112,10 @@
 #define FLAGS_DMA_READY		6
 #define FLAGS_AUTO_XOR		7
 #define FLAGS_BE32_SHA1		8
+#define FLAGS_SGS_COPIED	9
+#define FLAGS_SGS_ALLOCED	10
 /* context flags */
 #define FLAGS_FINUP		16
-#define FLAGS_SG		17
 
 #define FLAGS_MODE_SHIFT	18
 #define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
@@ -134,7 +135,8 @@
 #define OMAP_ALIGN_MASK		(sizeof(u32)-1)
 #define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
 
-#define BUFLEN			PAGE_SIZE
+#define BUFLEN			SHA512_BLOCK_SIZE
+#define OMAP_SHA_DMA_THRESHOLD	256
 
 struct omap_sham_dev;
 
@@ -147,12 +149,12 @@
 	size_t			digcnt;
 	size_t			bufcnt;
 	size_t			buflen;
-	dma_addr_t		dma_addr;
 
 	/* walk state */
 	struct scatterlist	*sg;
-	struct scatterlist	sgl;
-	unsigned int		offset;	/* offset in current sg */
+	struct scatterlist	sgl[2];
+	int			offset;	/* offset in current sg */
+	int			sg_len;
 	unsigned int		total;	/* total request */
 
 	u8			buffer[0] OMAP_ALIGNED;
@@ -223,6 +225,7 @@
 	struct dma_chan		*dma_lch;
 	struct tasklet_struct	done_task;
 	u8			polling_mode;
+	u8			xmit_buf[BUFLEN];
 
 	unsigned long		flags;
 	struct crypto_queue	queue;
@@ -510,12 +513,14 @@
 			      SHA_REG_IRQSTATUS_INPUT_RDY);
 }
 
-static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
-			      size_t length, int final)
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
+			      int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	int count, len32, bs32, offset = 0;
-	const u32 *buffer = (const u32 *)buf;
+	const u32 *buffer;
+	int mlen;
+	struct sg_mapping_iter mi;
 
 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
@@ -525,6 +530,7 @@
 
 	/* should be non-zero before next lines to disable clocks later */
 	ctx->digcnt += length;
+	ctx->total -= length;
 
 	if (final)
 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
@@ -534,16 +540,35 @@
 	len32 = DIV_ROUND_UP(length, sizeof(u32));
 	bs32 = get_block_size(ctx) / sizeof(u32);
 
+	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+
+	mlen = 0;
+
 	while (len32) {
 		if (dd->pdata->poll_irq(dd))
 			return -ETIMEDOUT;
 
-		for (count = 0; count < min(len32, bs32); count++, offset++)
+		for (count = 0; count < min(len32, bs32); count++, offset++) {
+			if (!mlen) {
+				sg_miter_next(&mi);
+				mlen = mi.length;
+				if (!mlen) {
+					pr_err("sg miter failure.\n");
+					return -EINVAL;
+				}
+				offset = 0;
+				buffer = mi.addr;
+			}
 			omap_sham_write(dd, SHA_REG_DIN(dd, count),
 					buffer[offset]);
+			mlen -= 4;
+		}
 		len32 -= min(len32, bs32);
 	}
 
+	sg_miter_stop(&mi);
+
 	return -EINPROGRESS;
 }
 
@@ -555,22 +580,27 @@
 	tasklet_schedule(&dd->done_task);
 }
 
-static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
-			      size_t length, int final, int is_sg)
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
+			      int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	struct dma_async_tx_descriptor *tx;
 	struct dma_slave_config cfg;
-	int len32, ret, dma_min = get_block_size(ctx);
+	int ret;
 
 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
+	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
+		dev_err(dd->dev, "dma_map_sg error\n");
+		return -EINVAL;
+	}
+
 	memset(&cfg, 0, sizeof(cfg));
 
 	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
 
 	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 	if (ret) {
@@ -578,30 +608,12 @@
 		return ret;
 	}
 
-	len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
-
-	if (is_sg) {
-		/*
-		 * The SG entry passed in may not have the 'length' member
-		 * set correctly so use a local SG entry (sgl) with the
-		 * proper value for 'length' instead.  If this is not done,
-		 * the dmaengine may try to DMA the incorrect amount of data.
-		 */
-		sg_init_table(&ctx->sgl, 1);
-		sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
-		ctx->sgl.offset = ctx->sg->offset;
-		sg_dma_len(&ctx->sgl) = len32;
-		sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
-
-		tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
-			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	} else {
-		tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
-			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	}
+	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
+				     DMA_MEM_TO_DEV,
+				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 	if (!tx) {
-		dev_err(dd->dev, "prep_slave_sg/single() failed\n");
+		dev_err(dd->dev, "prep_slave_sg failed\n");
 		return -EINVAL;
 	}
 
@@ -611,6 +623,7 @@
 	dd->pdata->write_ctrl(dd, length, final, 1);
 
 	ctx->digcnt += length;
+	ctx->total -= length;
 
 	if (final)
 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
@@ -625,188 +638,256 @@
 	return -EINPROGRESS;
 }
 
-static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
-				const u8 *data, size_t length)
+static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
+				   struct scatterlist *sg, int bs, int new_len)
 {
-	size_t count = min(length, ctx->buflen - ctx->bufcnt);
+	int n = sg_nents(sg);
+	struct scatterlist *tmp;
+	int offset = ctx->offset;
 
-	count = min(count, ctx->total);
-	if (count <= 0)
-		return 0;
-	memcpy(ctx->buffer + ctx->bufcnt, data, count);
-	ctx->bufcnt += count;
+	if (ctx->bufcnt)
+		n++;
 
-	return count;
+	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+	if (!ctx->sg)
+		return -ENOMEM;
+
+	sg_init_table(ctx->sg, n);
+
+	tmp = ctx->sg;
+
+	ctx->sg_len = 0;
+
+	if (ctx->bufcnt) {
+		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+		tmp = sg_next(tmp);
+		ctx->sg_len++;
+	}
+
+	while (sg && new_len) {
+		int len = sg->length - offset;
+
+		if (offset) {
+			offset -= sg->length;
+			if (offset < 0)
+				offset = 0;
+		}
+
+		if (new_len < len)
+			len = new_len;
+
+		if (len > 0) {
+			new_len -= len;
+			sg_set_page(tmp, sg_page(sg), len, sg->offset);
+			if (new_len <= 0)
+				sg_mark_end(tmp);
+			tmp = sg_next(tmp);
+			ctx->sg_len++;
+		}
+
+		sg = sg_next(sg);
+	}
+
+	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+
+	ctx->bufcnt = 0;
+
+	return 0;
 }
 
-static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
+			      struct scatterlist *sg, int bs, int new_len)
 {
-	size_t count;
-	const u8 *vaddr;
+	int pages;
+	void *buf;
+	int len;
 
-	while (ctx->sg) {
-		vaddr = kmap_atomic(sg_page(ctx->sg));
-		vaddr += ctx->sg->offset;
+	len = new_len + ctx->bufcnt;
 
-		count = omap_sham_append_buffer(ctx,
-				vaddr + ctx->offset,
-				ctx->sg->length - ctx->offset);
+	pages = get_order(ctx->total);
 
-		kunmap_atomic((void *)vaddr);
+	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+	if (!buf) {
+		pr_err("Couldn't allocate pages for unaligned cases.\n");
+		return -ENOMEM;
+	}
 
-		if (!count)
+	if (ctx->bufcnt)
+		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
+				 ctx->total - ctx->bufcnt, 0);
+	sg_init_table(ctx->sgl, 1);
+	sg_set_buf(ctx->sgl, buf, len);
+	ctx->sg = ctx->sgl;
+	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
+	ctx->sg_len = 1;
+	ctx->bufcnt = 0;
+	ctx->offset = 0;
+
+	return 0;
+}
+
+static int omap_sham_align_sgs(struct scatterlist *sg,
+			       int nbytes, int bs, bool final,
+			       struct omap_sham_reqctx *rctx)
+{
+	int n = 0;
+	bool aligned = true;
+	bool list_ok = true;
+	struct scatterlist *sg_tmp = sg;
+	int new_len;
+	int offset = rctx->offset;
+
+	if (!sg || !sg->length || !nbytes)
+		return 0;
+
+	new_len = nbytes;
+
+	if (offset)
+		list_ok = false;
+
+	if (final)
+		new_len = DIV_ROUND_UP(new_len, bs) * bs;
+	else
+		new_len = new_len / bs * bs;
+
+	while (nbytes > 0 && sg_tmp) {
+		n++;
+
+		if (offset < sg_tmp->length) {
+			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
+				aligned = false;
+				break;
+			}
+
+			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
+				aligned = false;
+				break;
+			}
+		}
+
+		if (offset) {
+			offset -= sg_tmp->length;
+			if (offset < 0) {
+				nbytes += offset;
+				offset = 0;
+			}
+		} else {
+			nbytes -= sg_tmp->length;
+		}
+
+		sg_tmp = sg_next(sg_tmp);
+
+		if (nbytes < 0) {
+			list_ok = false;
 			break;
-		ctx->offset += count;
-		ctx->total -= count;
-		if (ctx->offset == ctx->sg->length) {
-			ctx->sg = sg_next(ctx->sg);
-			if (ctx->sg)
-				ctx->offset = 0;
-			else
-				ctx->total = 0;
 		}
 	}
 
+	if (!aligned)
+		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
+	else if (!list_ok)
+		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+
+	rctx->sg_len = n;
+	rctx->sg = sg;
+
 	return 0;
 }
 
-static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
-					struct omap_sham_reqctx *ctx,
-					size_t length, int final)
+static int omap_sham_prepare_request(struct ahash_request *req, bool update)
 {
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+	int bs;
 	int ret;
+	int nbytes;
+	bool final = rctx->flags & BIT(FLAGS_FINUP);
+	int xmit_len, hash_later;
 
-	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
-				       DMA_TO_DEVICE);
-	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
-		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
-		return -EINVAL;
-	}
-
-	ctx->flags &= ~BIT(FLAGS_SG);
-
-	ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
-	if (ret != -EINPROGRESS)
-		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
-				 DMA_TO_DEVICE);
-
-	return ret;
-}
-
-static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
-{
-	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	unsigned int final;
-	size_t count;
-
-	omap_sham_append_sg(ctx);
-
-	final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
-
-	dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
-					 ctx->bufcnt, ctx->digcnt, final);
-
-	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
-		count = ctx->bufcnt;
-		ctx->bufcnt = 0;
-		return omap_sham_xmit_dma_map(dd, ctx, count, final);
-	}
-
-	return 0;
-}
-
-/* Start address alignment */
-#define SG_AA(sg)	(IS_ALIGNED(sg->offset, sizeof(u32)))
-/* SHA1 block size alignment */
-#define SG_SA(sg, bs)	(IS_ALIGNED(sg->length, bs))
-
-static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
-{
-	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	unsigned int length, final, tail;
-	struct scatterlist *sg;
-	int ret, bs;
-
-	if (!ctx->total)
+	if (!req)
 		return 0;
 
-	if (ctx->bufcnt || ctx->offset)
-		return omap_sham_update_dma_slow(dd);
+	bs = get_block_size(rctx);
 
-	/*
-	 * Don't use the sg interface when the transfer size is less
-	 * than the number of elements in a DMA frame.  Otherwise,
-	 * the dmaengine infrastructure will calculate that it needs
-	 * to transfer 0 frames which ultimately fails.
-	 */
-	if (ctx->total < get_block_size(ctx))
-		return omap_sham_update_dma_slow(dd);
+	if (update)
+		nbytes = req->nbytes;
+	else
+		nbytes = 0;
 
-	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
-			ctx->digcnt, ctx->bufcnt, ctx->total);
+	rctx->total = nbytes + rctx->bufcnt;
 
-	sg = ctx->sg;
-	bs = get_block_size(ctx);
+	if (!rctx->total)
+		return 0;
 
-	if (!SG_AA(sg))
-		return omap_sham_update_dma_slow(dd);
+	if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+		int len = bs - rctx->bufcnt % bs;
 
-	if (!sg_is_last(sg) && !SG_SA(sg, bs))
-		/* size is not BLOCK_SIZE aligned */
-		return omap_sham_update_dma_slow(dd);
+		if (len > nbytes)
+			len = nbytes;
+		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
+					 0, len, 0);
+		rctx->bufcnt += len;
+		nbytes -= len;
+		rctx->offset = len;
+	}
 
-	length = min(ctx->total, sg->length);
+	if (rctx->bufcnt)
+		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
 
-	if (sg_is_last(sg)) {
-		if (!(ctx->flags & BIT(FLAGS_FINUP))) {
-			/* not last sg must be BLOCK_SIZE aligned */
-			tail = length & (bs - 1);
-			/* without finup() we need one block to close hash */
-			if (!tail)
-				tail = bs;
-			length -= tail;
+	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
+	if (ret)
+		return ret;
+
+	xmit_len = rctx->total;
+
+	if (!IS_ALIGNED(xmit_len, bs)) {
+		if (final)
+			xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
+		else
+			xmit_len = xmit_len / bs * bs;
+	}
+
+	hash_later = rctx->total - xmit_len;
+	if (hash_later < 0)
+		hash_later = 0;
+
+	if (rctx->bufcnt && nbytes) {
+		/* have data from previous operation and current */
+		sg_init_table(rctx->sgl, 2);
+		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+
+		sg_chain(rctx->sgl, 2, req->src);
+
+		rctx->sg = rctx->sgl;
+
+		rctx->sg_len++;
+	} else if (rctx->bufcnt) {
+		/* have buffered data only */
+		sg_init_table(rctx->sgl, 1);
+		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
+
+		rctx->sg = rctx->sgl;
+
+		rctx->sg_len = 1;
+	}
+
+	if (hash_later) {
+		if (req->nbytes) {
+			scatterwalk_map_and_copy(rctx->buffer, req->src,
+						 req->nbytes - hash_later,
+						 hash_later, 0);
+		} else {
+			memcpy(rctx->buffer, rctx->buffer + xmit_len,
+			       hash_later);
 		}
+		rctx->bufcnt = hash_later;
+	} else {
+		rctx->bufcnt = 0;
 	}
 
-	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
-		dev_err(dd->dev, "dma_map_sg  error\n");
-		return -EINVAL;
-	}
-
-	ctx->flags |= BIT(FLAGS_SG);
-
-	ctx->total -= length;
-	ctx->offset = length; /* offset where to start slow */
-
-	final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
-
-	ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
-	if (ret != -EINPROGRESS)
-		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
-
-	return ret;
-}
-
-static int omap_sham_update_cpu(struct omap_sham_dev *dd)
-{
-	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int bufcnt, final;
-
-	if (!ctx->total)
-		return 0;
-
-	omap_sham_append_sg(ctx);
-
-	final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
-
-	dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
-		ctx->bufcnt, ctx->digcnt, final);
-
-	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
-		bufcnt = ctx->bufcnt;
-		ctx->bufcnt = 0;
-		return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
-	}
+	if (!final)
+		rctx->total = xmit_len;
 
 	return 0;
 }
@@ -815,18 +896,9 @@
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
+	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
 
-	if (ctx->flags & BIT(FLAGS_SG)) {
-		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
-		if (ctx->sg->length == ctx->offset) {
-			ctx->sg = sg_next(ctx->sg);
-			if (ctx->sg)
-				ctx->offset = 0;
-		}
-	} else {
-		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
-				 DMA_TO_DEVICE);
-	}
+	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 
 	return 0;
 }
@@ -887,6 +959,8 @@
 
 	ctx->bufcnt = 0;
 	ctx->digcnt = 0;
+	ctx->total = 0;
+	ctx->offset = 0;
 	ctx->buflen = BUFLEN;
 
 	if (tctx->flags & BIT(FLAGS_HMAC)) {
@@ -909,14 +983,19 @@
 	struct ahash_request *req = dd->req;
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	int err;
+	bool final = ctx->flags & BIT(FLAGS_FINUP);
 
 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 		 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
 
+	if (ctx->total < get_block_size(ctx) ||
+	    ctx->total < OMAP_SHA_DMA_THRESHOLD)
+		ctx->flags |= BIT(FLAGS_CPU);
+
 	if (ctx->flags & BIT(FLAGS_CPU))
-		err = omap_sham_update_cpu(dd);
+		err = omap_sham_xmit_cpu(dd, ctx->total, final);
 	else
-		err = omap_sham_update_dma_start(dd);
+		err = omap_sham_xmit_dma(dd, ctx->total, final);
 
 	/* wait for dma completion before can take more data */
 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -930,7 +1009,7 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	int err = 0, use_dma = 1;
 
-	if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
+	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
 		/*
 		 * faster to handle last block with cpu or
 		 * use cpu when dma is not present.
@@ -938,9 +1017,9 @@
 		use_dma = 0;
 
 	if (use_dma)
-		err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
+		err = omap_sham_xmit_dma(dd, ctx->total, 1);
 	else
-		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
 
 	ctx->bufcnt = 0;
 
@@ -988,6 +1067,17 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_dev *dd = ctx->dd;
 
+	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+		free_pages((unsigned long)sg_virt(ctx->sg),
+			   get_order(ctx->sg->length));
+
+	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+		kfree(ctx->sg);
+
+	ctx->sg = NULL;
+
+	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+
 	if (!err) {
 		dd->pdata->copy_hash(req, 1);
 		if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1005,9 +1095,6 @@
 
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
-
-	/* handle new request */
-	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
@@ -1018,6 +1105,7 @@
 	unsigned long flags;
 	int err = 0, ret = 0;
 
+retry:
 	spin_lock_irqsave(&dd->lock, flags);
 	if (req)
 		ret = ahash_enqueue_request(&dd->queue, req);
@@ -1041,6 +1129,10 @@
 	dd->req = req;
 	ctx = ahash_request_ctx(req);
 
+	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+	if (err)
+		goto err1;
+
 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 						ctx->op, req->nbytes);
 
@@ -1061,11 +1153,19 @@
 		err = omap_sham_final_req(dd);
 	}
 err1:
-	if (err != -EINPROGRESS)
+	dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+	if (err != -EINPROGRESS) {
 		/* done_task will not finish it, so do it here */
 		omap_sham_finish_req(req, err);
+		req = NULL;
 
-	dev_dbg(dd->dev, "exit, err: %d\n", err);
+		/*
+		 * Execute next request immediately if there is anything
+		 * in queue.
+		 */
+		goto retry;
+	}
 
 	return ret;
 }
@@ -1085,34 +1185,15 @@
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_dev *dd = ctx->dd;
-	int bs = get_block_size(ctx);
 
 	if (!req->nbytes)
 		return 0;
 
-	ctx->total = req->nbytes;
-	ctx->sg = req->src;
-	ctx->offset = 0;
-
-	if (ctx->flags & BIT(FLAGS_FINUP)) {
-		if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
-			/*
-			* OMAP HW accel works only with buffers >= 9
-			* will switch to bypass in final()
-			* final has the same request and data
-			*/
-			omap_sham_append_sg(ctx);
-			return 0;
-		} else if ((ctx->bufcnt + ctx->total <= bs) ||
-			   dd->polling_mode) {
-			/*
-			 * faster to use CPU for short transfers or
-			 * use cpu when dma is not present.
-			 */
-			ctx->flags |= BIT(FLAGS_CPU);
-		}
-	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
-		omap_sham_append_sg(ctx);
+	if (ctx->total + req->nbytes < ctx->buflen) {
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+					 0, req->nbytes, 0);
+		ctx->bufcnt += req->nbytes;
+		ctx->total += req->nbytes;
 		return 0;
 	}
 
@@ -1137,9 +1218,20 @@
 {
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int offset = 0;
+
+	/*
+	 * If we are running HMAC on limited hardware support, skip
+	 * the ipad in the beginning of the buffer if we are going for
+	 * software fallback algorithm.
+	 */
+	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
+	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
+		offset = get_block_size(ctx);
 
 	return omap_sham_shash_digest(tctx->fallback, req->base.flags,
-				      ctx->buffer, ctx->bufcnt, req->result);
+				      ctx->buffer + offset,
+				      ctx->bufcnt - offset, req->result);
 }
 
 static int omap_sham_final(struct ahash_request *req)
@@ -1154,10 +1246,11 @@
 	/*
 	 * OMAP HW accel works only with buffers >= 9.
 	 * HMAC is always >= 9 because ipad == block size.
-	 * If buffersize is less than 240, we use fallback SW encoding,
-	 * as using DMA + HW in this case doesn't provide any benefit.
+	 * If buffersize is less than DMA_THRESHOLD, we use fallback
+	 * SW encoding, as using DMA + HW in this case doesn't provide
+	 * any benefit.
 	 */
-	if ((ctx->digcnt + ctx->bufcnt) < 240)
+	if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
 		return omap_sham_final_shash(req);
 	else if (ctx->bufcnt)
 		return omap_sham_enqueue(req, OP_FINAL);
@@ -1323,6 +1416,25 @@
 	}
 }
 
+static int omap_sham_export(struct ahash_request *req, void *out)
+{
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
+
+	return 0;
+}
+
+static int omap_sham_import(struct ahash_request *req, const void *in)
+{
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+	const struct omap_sham_reqctx *ctx_in = in;
+
+	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
+
+	return 0;
+}
+
 static struct ahash_alg algs_sha1_md5[] = {
 {
 	.init		= omap_sham_init,
@@ -1341,7 +1453,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -1440,7 +1552,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA224_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -1462,7 +1574,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -1535,7 +1647,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA384_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -1557,7 +1669,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -1624,12 +1736,8 @@
 	}
 
 	if (test_bit(FLAGS_CPU, &dd->flags)) {
-		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
-			/* hash or semi-hash ready */
-			err = omap_sham_update_cpu(dd);
-			if (err != -EINPROGRESS)
-				goto finish;
-		}
+		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
+			goto finish;
 	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
 		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
 			omap_sham_update_dma_stop(dd);
@@ -1641,8 +1749,6 @@
 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
 			/* hash or semi-hash ready */
 			clear_bit(FLAGS_DMA_READY, &dd->flags);
-			err = omap_sham_update_dma_start(dd);
-			if (err != -EINPROGRESS)
 				goto finish;
 		}
 	}
@@ -1653,6 +1759,10 @@
 	dev_dbg(dd->dev, "update done: err: %d\n", err);
 	/* finish curent request */
 	omap_sham_finish_req(dd->req, err);
+
+	/* If we are not busy, process next req */
+	if (!test_bit(FLAGS_BUSY, &dd->flags))
+		omap_sham_handle_queue(dd, NULL);
 }
 
 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
@@ -1977,8 +2087,14 @@
 
 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
-			err = crypto_register_ahash(
-					&dd->pdata->algs_info[i].algs_list[j]);
+			struct ahash_alg *alg;
+
+			alg = &dd->pdata->algs_info[i].algs_list[j];
+			alg->export = omap_sham_export;
+			alg->import = omap_sham_import;
+			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
+					      BUFLEN;
+			err = crypto_register_ahash(alg);
 			if (err)
 				goto err_algs;
 
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 2f2681d..afc9a0a 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -55,7 +55,7 @@
 #define ADF_C3XXX_MAX_ACCELERATORS 3
 #define ADF_C3XXX_MAX_ACCELENGINES 6
 #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
-#define ADF_C3XXX_ACCELERATORS_MASK 0x3
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
 #define ADF_C3XXX_ACCELENGINES_MASK 0x3F
 #define ADF_C3XXX_ETR_MAX_BANKS 16
 #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index ce7c462..3744b22 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -146,6 +146,7 @@
 	dma_addr_t phy_addr;
 	dma_addr_t const_tbl_addr;
 	void *virt_addr;
+	void *virt_tbl_addr;
 	void __iomem *mailbox_addr;
 	struct mutex lock;	/* protects adf_admin_comms struct */
 };
@@ -251,17 +252,19 @@
 		return -ENOMEM;
 	}
 
-	admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
-					       (void *) const_tab, 1024,
-					       DMA_TO_DEVICE);
-
-	if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
-				       admin->const_tbl_addr))) {
+	admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
+						   PAGE_SIZE,
+						   &admin->const_tbl_addr,
+						   GFP_KERNEL);
+	if (!admin->virt_tbl_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
 				  admin->virt_addr, admin->phy_addr);
 		kfree(admin);
 		return -ENOMEM;
 	}
+
+	memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
 	reg_val = (u64)admin->phy_addr;
 	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
 	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
@@ -282,9 +285,10 @@
 	if (admin->virt_addr)
 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
 				  admin->virt_addr, admin->phy_addr);
+	if (admin->virt_tbl_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_tbl_addr, admin->const_tbl_addr);
 
-	dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
-			 DMA_TO_DEVICE);
 	mutex_destroy(&admin->lock);
 	kfree(admin);
 	accel_dev->admin = NULL;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 9b961b3..e2454d9 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -967,10 +967,6 @@
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	unsigned int ae;
 
-	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
-					GFP_KERNEL);
-	if (!obj_handle->uword_buf)
-		return -ENOMEM;
 	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
 	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
 					     obj_handle->obj_hdr->file_buff;
@@ -982,6 +978,10 @@
 		pr_err("QAT: UOF incompatible\n");
 		return -EINVAL;
 	}
+	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!obj_handle->uword_buf)
+		return -ENOMEM;
 	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
 	if (!obj_handle->obj_hdr->file_buff ||
 	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index af50825..d0f80c6 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -304,11 +304,9 @@
 	usleep_range(10, 20);
 	reset_control_deassert(crypto_info->rst);
 
-	err = devm_add_action(dev, rk_crypto_action, crypto_info);
-	if (err) {
-		reset_control_assert(crypto_info->rst);
+	err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
+	if (err)
 		goto err_crypto;
-	}
 
 	spin_lock_init(&crypto_info->lock);
 
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 3830d7c..90efd10 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -29,7 +29,8 @@
 	u32 tx_cnt = 0;
 	u32 spaces;
 	u32 v;
-	int i, err = 0;
+	int err = 0;
+	unsigned int i;
 	unsigned int ileft = areq->nbytes;
 	unsigned int oleft = areq->nbytes;
 	unsigned int todo;
@@ -139,7 +140,8 @@
 	u32 tx_cnt = 0;
 	u32 v;
 	u32 spaces;
-	int i, err = 0;
+	int err = 0;
+	unsigned int i;
 	unsigned int ileft = areq->nbytes;
 	unsigned int oleft = areq->nbytes;
 	unsigned int todo;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 107cd2a..3ac6c6c 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -172,45 +172,45 @@
 },
 {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 	.alg.crypto = {
-			.cra_name = "cbc(des3_ede)",
-			.cra_driver_name = "cbc-des3-sun4i-ss",
-			.cra_priority = 300,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
-			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
-			.cra_module = THIS_MODULE,
-			.cra_alignmask = 3,
-			.cra_type = &crypto_ablkcipher_type,
-			.cra_init = sun4i_ss_cipher_init,
-			.cra_u.ablkcipher = {
-				.min_keysize    = DES3_EDE_KEY_SIZE,
-				.max_keysize    = DES3_EDE_KEY_SIZE,
-				.ivsize         = DES3_EDE_BLOCK_SIZE,
-				.setkey         = sun4i_ss_des3_setkey,
-				.encrypt        = sun4i_ss_cbc_des3_encrypt,
-				.decrypt        = sun4i_ss_cbc_des3_decrypt,
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "cbc-des3-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_u.ablkcipher = {
+			.min_keysize    = DES3_EDE_KEY_SIZE,
+			.max_keysize    = DES3_EDE_KEY_SIZE,
+			.ivsize         = DES3_EDE_BLOCK_SIZE,
+			.setkey         = sun4i_ss_des3_setkey,
+			.encrypt        = sun4i_ss_cbc_des3_encrypt,
+			.decrypt        = sun4i_ss_cbc_des3_decrypt,
 		}
 	}
 },
 {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 	.alg.crypto = {
-			.cra_name = "ecb(des3_ede)",
-			.cra_driver_name = "ecb-des3-sun4i-ss",
-			.cra_priority = 300,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
-			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
-			.cra_module = THIS_MODULE,
-			.cra_alignmask = 3,
-			.cra_type = &crypto_ablkcipher_type,
-			.cra_init = sun4i_ss_cipher_init,
-			.cra_u.ablkcipher = {
-				.min_keysize    = DES3_EDE_KEY_SIZE,
-				.max_keysize    = DES3_EDE_KEY_SIZE,
-				.ivsize         = DES3_EDE_BLOCK_SIZE,
-				.setkey         = sun4i_ss_des3_setkey,
-				.encrypt        = sun4i_ss_ecb_des3_encrypt,
-				.decrypt        = sun4i_ss_ecb_des3_decrypt,
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "ecb-des3-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_u.ablkcipher = {
+			.min_keysize    = DES3_EDE_KEY_SIZE,
+			.max_keysize    = DES3_EDE_KEY_SIZE,
+			.ivsize         = DES3_EDE_BLOCK_SIZE,
+			.setkey         = sun4i_ss_des3_setkey,
+			.encrypt        = sun4i_ss_ecb_des3_encrypt,
+			.decrypt        = sun4i_ss_ecb_des3_decrypt,
 		}
 	}
 },
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index ff80314..0de2f62 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -20,6 +20,15 @@
 
 int sun4i_hash_crainit(struct crypto_tfm *tfm)
 {
+	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+	struct sun4i_ss_alg_template *algt;
+
+	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+	op->ss = algt->ss;
+
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct sun4i_req_ctx));
 	return 0;
@@ -32,13 +41,10 @@
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 	struct sun4i_ss_alg_template *algt;
-	struct sun4i_ss_ctx *ss;
 
 	memset(op, 0, sizeof(struct sun4i_req_ctx));
 
 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
-	ss = algt->ss;
-	op->ss = algt->ss;
 	op->mode = algt->mode;
 
 	return 0;
@@ -129,6 +135,9 @@
 	return 0;
 }
 
+#define SS_HASH_UPDATE 1
+#define SS_HASH_FINAL 2
+
 /*
  * sun4i_hash_update: update hash engine
  *
@@ -156,7 +165,7 @@
  * write remaining data in op->buf
  * final state op->len=56
  */
-int sun4i_hash_update(struct ahash_request *areq)
+static int sun4i_hash(struct ahash_request *areq)
 {
 	u32 v, ivmode = 0;
 	unsigned int i = 0;
@@ -167,8 +176,9 @@
 	 */
 
 	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
-	struct sun4i_ss_ctx *ss = op->ss;
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+	struct sun4i_ss_ctx *ss = tfmctx->ss;
 	unsigned int in_i = 0; /* advancement in the current SG */
 	unsigned int end;
 	/*
@@ -180,22 +190,30 @@
 	u32 spaces, rx_cnt = SS_RX_DEFAULT;
 	size_t copied = 0;
 	struct sg_mapping_iter mi;
+	unsigned int j = 0;
+	int zeros;
+	unsigned int index, padlen;
+	__be64 bits;
+	u32 bf[32];
+	u32 wb = 0;
+	unsigned int nwait, nbw = 0;
+	struct scatterlist *in_sg = areq->src;
 
 	dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
 		__func__, crypto_tfm_alg_name(areq->base.tfm),
 		op->byte_count, areq->nbytes, op->mode,
 		op->len, op->hash[0]);
 
-	if (areq->nbytes == 0)
+	if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0)
 		return 0;
 
 	/* protect against overflow */
-	if (areq->nbytes > UINT_MAX - op->len) {
+	if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
 		dev_err(ss->dev, "Cannot process too large request\n");
 		return -EINVAL;
 	}
 
-	if (op->len + areq->nbytes < 64) {
+	if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) {
 		/* linearize data to op->buf */
 		copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
 					    op->buf + op->len, areq->nbytes, 0);
@@ -203,14 +221,6 @@
 		return 0;
 	}
 
-	end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
-
-	if (end > areq->nbytes || areq->nbytes - end > 63) {
-		dev_err(ss->dev, "ERROR: Bound error %u %u\n",
-			end, areq->nbytes);
-		return -EINVAL;
-	}
-
 	spin_lock_bh(&ss->slock);
 
 	/*
@@ -225,6 +235,34 @@
 	/* Enable the device */
 	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
 
+	if ((op->flags & SS_HASH_UPDATE) == 0)
+		goto hash_final;
+
+	/* start of handling data */
+	if ((op->flags & SS_HASH_FINAL) == 0) {
+		end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+		if (end > areq->nbytes || areq->nbytes - end > 63) {
+			dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+				end, areq->nbytes);
+			err = -EINVAL;
+			goto release_ss;
+		}
+	} else {
+		/* Since we have the flag final, we can go up to modulo 4 */
+		end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+	}
+
+	/* TODO if SGlen % 4 and op->len == 0 then DMA */
+	i = 1;
+	while (in_sg && i == 1) {
+		if ((in_sg->length % 4) != 0)
+			i = 0;
+		in_sg = sg_next(in_sg);
+	}
+	if (i == 1 && op->len == 0)
+		dev_dbg(ss->dev, "We can DMA\n");
+
 	i = 0;
 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
@@ -285,7 +323,11 @@
 			}
 		}
 	} while (i < end);
-	/* final linear */
+
+	/*
+	 * Now we have written to the device all that we can,
+	 * store the remaining bytes in op->buf
+	 */
 	if ((areq->nbytes - i) < 64) {
 		while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
 			/* how many bytes we can read from current SG */
@@ -304,13 +346,21 @@
 
 	sg_miter_stop(&mi);
 
+	/*
+	 * End of data process
+	 * Now if we have the flag final go to finalize part
+	 * If not, store the partial hash
+	 */
+	if ((op->flags & SS_HASH_FINAL) > 0)
+		goto hash_final;
+
 	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
 	i = 0;
 	do {
 		v = readl(ss->base + SS_CTL);
 		i++;
 	} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
-	if (i >= SS_TIMEOUT) {
+	if (unlikely(i >= SS_TIMEOUT)) {
 		dev_err_ratelimited(ss->dev,
 				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
 				    i, SS_TIMEOUT, v, areq->nbytes);
@@ -318,56 +368,24 @@
 		goto release_ss;
 	}
 
-	/* get the partial hash only if something was written */
 	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
 		op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
 
-release_ss:
-	writel(0, ss->base + SS_CTL);
-	spin_unlock_bh(&ss->slock);
-	return err;
-}
+	goto release_ss;
 
 /*
- * sun4i_hash_final: finalize hashing operation
+ * hash_final: finalize hashing operation
  *
  * If we have some remaining bytes, we write them.
  * Then ask the SS for finalizing the hashing operation
  *
  * I do not check RX FIFO size in this function since the size is 32
  * after each enabling and this function neither write more than 32 words.
+ * If we come from the update part, we cannot have more than
+ * 3 remaining bytes to write and SS is fast enough to not care about it.
  */
-int sun4i_hash_final(struct ahash_request *areq)
-{
-	u32 v, ivmode = 0;
-	unsigned int i;
-	unsigned int j = 0;
-	int zeros, err = 0;
-	unsigned int index, padlen;
-	__be64 bits;
-	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
-	struct sun4i_ss_ctx *ss = op->ss;
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
-	u32 bf[32];
-	u32 wb = 0;
-	unsigned int nwait, nbw = 0;
 
-	dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
-		__func__, op->byte_count, areq->nbytes, op->mode,
-		op->len, op->hash[0]);
-
-	spin_lock_bh(&ss->slock);
-
-	/*
-	 * if we have already written something,
-	 * restore the partial hash state
-	 */
-	if (op->byte_count > 0) {
-		ivmode = SS_IV_ARBITRARY;
-		for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
-			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
-	}
-	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+hash_final:
 
 	/* write the remaining words of the wait buffer */
 	if (op->len > 0) {
@@ -428,7 +446,7 @@
 
 	/*
 	 * Wait for SS to finish the hash.
-	 * The timeout could happen only in case of bad overcloking
+	 * The timeout could happen only in case of bad overclocking
 	 * or driver bug.
 	 */
 	i = 0;
@@ -436,7 +454,7 @@
 		v = readl(ss->base + SS_CTL);
 		i++;
 	} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
-	if (i >= SS_TIMEOUT) {
+	if (unlikely(i >= SS_TIMEOUT)) {
 		dev_err_ratelimited(ss->dev,
 				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
 				    i, SS_TIMEOUT, v, areq->nbytes);
@@ -463,30 +481,41 @@
 	return err;
 }
 
+int sun4i_hash_final(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	op->flags = SS_HASH_FINAL;
+	return sun4i_hash(areq);
+}
+
+int sun4i_hash_update(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	op->flags = SS_HASH_UPDATE;
+	return sun4i_hash(areq);
+}
+
 /* sun4i_hash_finup: finalize hashing operation after an update */
 int sun4i_hash_finup(struct ahash_request *areq)
 {
-	int err;
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
 
-	err = sun4i_hash_update(areq);
-	if (err != 0)
-		return err;
-
-	return sun4i_hash_final(areq);
+	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+	return sun4i_hash(areq);
 }
 
 /* combo of init/update/final functions */
 int sun4i_hash_digest(struct ahash_request *areq)
 {
 	int err;
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
 
 	err = sun4i_hash_init(areq);
 	if (err != 0)
 		return err;
 
-	err = sun4i_hash_update(areq);
-	if (err != 0)
-		return err;
-
-	return sun4i_hash_final(areq);
+	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+	return sun4i_hash(areq);
 }
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index 8e9c05f..f04c0f8 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -163,7 +163,7 @@
 	u32 hash[5]; /* for storing SS_IVx register */
 	char buf[64];
 	unsigned int len;
-	struct sun4i_ss_ctx *ss;
+	int flags;
 };
 
 int sun4i_hash_crainit(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index a83ead1..c3d524e 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -1,6 +1,7 @@
 config CRYPTO_DEV_VMX_ENCRYPT
 	tristate "Encryption acceleration support on P8 CPU"
 	depends on CRYPTO_DEV_VMX
+	select CRYPTO_GHASH
 	default m
 	help
 	  Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 6c999cb0..27a94a1 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -26,16 +26,13 @@
 #include <linux/hardirq.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
+#include <crypto/ghash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/hash.h>
 #include <crypto/b128ops.h>
 
 #define IN_INTERRUPT in_interrupt()
 
-#define GHASH_BLOCK_SIZE (16)
-#define GHASH_DIGEST_SIZE (16)
-#define GHASH_KEY_LEN (16)
-
 void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
 void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
 void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -55,16 +52,11 @@
 
 static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
 {
-	const char *alg;
+	const char *alg = "ghash-generic";
 	struct crypto_shash *fallback;
 	struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
 	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (!(alg = crypto_tfm_alg_name(tfm))) {
-		printk(KERN_ERR "Failed to get algorithm name.\n");
-		return -ENOENT;
-	}
-
 	fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
 	if (IS_ERR(fallback)) {
 		printk(KERN_ERR
@@ -78,10 +70,18 @@
 	crypto_shash_set_flags(fallback,
 			       crypto_shash_get_flags((struct crypto_shash
 						       *) tfm));
-	ctx->fallback = fallback;
 
-	shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
-	    + crypto_shash_descsize(fallback);
+	/* Check if the descsize defined in the algorithm is still enough. */
+	if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+	    + crypto_shash_descsize(fallback)) {
+		printk(KERN_ERR
+		       "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+		       alg,
+		       shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+		       crypto_shash_descsize(fallback));
+		return -EINVAL;
+	}
+	ctx->fallback = fallback;
 
 	return 0;
 }
@@ -113,7 +113,7 @@
 {
 	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
 
-	if (keylen != GHASH_KEY_LEN)
+	if (keylen != GHASH_BLOCK_SIZE)
 		return -EINVAL;
 
 	preempt_disable();
@@ -211,7 +211,8 @@
 	.update = p8_ghash_update,
 	.final = p8_ghash_final,
 	.setkey = p8_ghash_setkey,
-	.descsize = sizeof(struct p8_ghash_desc_ctx),
+	.descsize = sizeof(struct p8_ghash_desc_ctx)
+		+ sizeof(struct ghash_desc_ctx),
 	.base = {
 		 .cra_name = "ghash",
 		 .cra_driver_name = "p8_ghash",
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index cedab75..daadd20a 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,4 +23,9 @@
 
 	  Say Y if unsure
 
+config NR_DEV_DAX
+	int "Maximum number of Device-DAX instances"
+	default 32768
+	range 256 2147483647
+
 endif
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 29f600f2..0e499bf 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -13,15 +13,25 @@
 #include <linux/pagemap.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/mount.h>
 #include <linux/pfn_t.h>
+#include <linux/hash.h>
+#include <linux/cdev.h>
 #include <linux/slab.h>
 #include <linux/dax.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include "dax.h"
 
-static int dax_major;
+static dev_t dax_devt;
 static struct class *dax_class;
 static DEFINE_IDA(dax_minor_ida);
+static int nr_dax = CONFIG_NR_DEV_DAX;
+module_param(nr_dax, int, S_IRUGO);
+static struct vfsmount *dax_mnt;
+static struct kmem_cache *dax_cache __read_mostly;
+static struct super_block *dax_superblock __read_mostly;
+MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
 
 /**
  * struct dax_region - mapping infrastructure for dax devices
@@ -48,7 +58,7 @@
  * struct dax_dev - subdivision of a dax region
  * @region - parent region
  * @dev - device backing the character device
- * @kref - enable this data to be tracked in filp->private_data
+ * @cdev - core chardev data
  * @alive - !alive + rcu grace period == no new mappings can be established
  * @id - child id in the region
  * @num_resources - number of physical address extents in this device
@@ -56,14 +66,126 @@
  */
 struct dax_dev {
 	struct dax_region *region;
-	struct device *dev;
-	struct kref kref;
+	struct inode *inode;
+	struct device dev;
+	struct cdev cdev;
 	bool alive;
 	int id;
 	int num_resources;
 	struct resource res[0];
 };
 
+static struct inode *dax_alloc_inode(struct super_block *sb)
+{
+	return kmem_cache_alloc(dax_cache, GFP_KERNEL);
+}
+
+static void dax_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	kmem_cache_free(dax_cache, inode);
+}
+
+static void dax_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, dax_i_callback);
+}
+
+static const struct super_operations dax_sops = {
+	.statfs = simple_statfs,
+	.alloc_inode = dax_alloc_inode,
+	.destroy_inode = dax_destroy_inode,
+	.drop_inode = generic_delete_inode,
+};
+
+static struct dentry *dax_mount(struct file_system_type *fs_type,
+		int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
+}
+
+static struct file_system_type dax_type = {
+	.name = "dax",
+	.mount = dax_mount,
+	.kill_sb = kill_anon_super,
+};
+
+static int dax_test(struct inode *inode, void *data)
+{
+	return inode->i_cdev == data;
+}
+
+static int dax_set(struct inode *inode, void *data)
+{
+	inode->i_cdev = data;
+	return 0;
+}
+
+static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
+{
+	struct inode *inode;
+
+	inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
+			dax_test, dax_set, cdev);
+
+	if (!inode)
+		return NULL;
+
+	if (inode->i_state & I_NEW) {
+		inode->i_mode = S_IFCHR;
+		inode->i_flags = S_DAX;
+		inode->i_rdev = devt;
+		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+		unlock_new_inode(inode);
+	}
+	return inode;
+}
+
+static void init_once(void *inode)
+{
+	inode_init_once(inode);
+}
+
+static int dax_inode_init(void)
+{
+	int rc;
+
+	dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
+			(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+			 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+			init_once);
+	if (!dax_cache)
+		return -ENOMEM;
+
+	rc = register_filesystem(&dax_type);
+	if (rc)
+		goto err_register_fs;
+
+	dax_mnt = kern_mount(&dax_type);
+	if (IS_ERR(dax_mnt)) {
+		rc = PTR_ERR(dax_mnt);
+		goto err_mount;
+	}
+	dax_superblock = dax_mnt->mnt_sb;
+
+	return 0;
+
+ err_mount:
+	unregister_filesystem(&dax_type);
+ err_register_fs:
+	kmem_cache_destroy(dax_cache);
+
+	return rc;
+}
+
+static void dax_inode_exit(void)
+{
+	kern_unmount(dax_mnt);
+	unregister_filesystem(&dax_type);
+	kmem_cache_destroy(dax_cache);
+}
+
 static void dax_region_free(struct kref *kref)
 {
 	struct dax_region *dax_region;
@@ -78,28 +200,17 @@
 }
 EXPORT_SYMBOL_GPL(dax_region_put);
 
-static void dax_dev_free(struct kref *kref)
-{
-	struct dax_dev *dax_dev;
-
-	dax_dev = container_of(kref, struct dax_dev, kref);
-	dax_region_put(dax_dev->region);
-	kfree(dax_dev);
-}
-
-static void dax_dev_put(struct dax_dev *dax_dev)
-{
-	kref_put(&dax_dev->kref, dax_dev_free);
-}
-
 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
 		struct resource *res, unsigned int align, void *addr,
 		unsigned long pfn_flags)
 {
 	struct dax_region *dax_region;
 
-	dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+	if (!IS_ALIGNED(res->start, align)
+			|| !IS_ALIGNED(resource_size(res), align))
+		return NULL;
 
+	dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
 	if (!dax_region)
 		return NULL;
 
@@ -116,10 +227,15 @@
 }
 EXPORT_SYMBOL_GPL(alloc_dax_region);
 
+static struct dax_dev *to_dax_dev(struct device *dev)
+{
+	return container_of(dev, struct dax_dev, dev);
+}
+
 static ssize_t size_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct dax_dev *dax_dev = dev_get_drvdata(dev);
+	struct dax_dev *dax_dev = to_dax_dev(dev);
 	unsigned long long size = 0;
 	int i;
 
@@ -144,180 +260,11 @@
 	NULL,
 };
 
-static void unregister_dax_dev(void *_dev)
-{
-	struct device *dev = _dev;
-	struct dax_dev *dax_dev = dev_get_drvdata(dev);
-	struct dax_region *dax_region = dax_dev->region;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	/*
-	 * Note, rcu is not protecting the liveness of dax_dev, rcu is
-	 * ensuring that any fault handlers that might have seen
-	 * dax_dev->alive == true, have completed.  Any fault handlers
-	 * that start after synchronize_rcu() has started will abort
-	 * upon seeing dax_dev->alive == false.
-	 */
-	dax_dev->alive = false;
-	synchronize_rcu();
-
-	get_device(dev);
-	device_unregister(dev);
-	ida_simple_remove(&dax_region->ida, dax_dev->id);
-	ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
-	put_device(dev);
-	dax_dev_put(dax_dev);
-}
-
-int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
-		int count)
-{
-	struct device *parent = dax_region->dev;
-	struct dax_dev *dax_dev;
-	struct device *dev;
-	int rc, minor;
-	dev_t dev_t;
-
-	dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
-	if (!dax_dev)
-		return -ENOMEM;
-	memcpy(dax_dev->res, res, sizeof(*res) * count);
-	dax_dev->num_resources = count;
-	kref_init(&dax_dev->kref);
-	dax_dev->alive = true;
-	dax_dev->region = dax_region;
-	kref_get(&dax_region->kref);
-
-	dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
-	if (dax_dev->id < 0) {
-		rc = dax_dev->id;
-		goto err_id;
-	}
-
-	minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
-	if (minor < 0) {
-		rc = minor;
-		goto err_minor;
-	}
-
-	dev_t = MKDEV(dax_major, minor);
-	dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
-			dax_attribute_groups, "dax%d.%d", dax_region->id,
-			dax_dev->id);
-	if (IS_ERR(dev)) {
-		rc = PTR_ERR(dev);
-		goto err_create;
-	}
-	dax_dev->dev = dev;
-
-	rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
-	if (rc)
-		return rc;
-
-	return 0;
-
- err_create:
-	ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
-	ida_simple_remove(&dax_region->ida, dax_dev->id);
- err_id:
-	dax_dev_put(dax_dev);
-
-	return rc;
-}
-EXPORT_SYMBOL_GPL(devm_create_dax_dev);
-
-/* return an unmapped area aligned to the dax region specified alignment */
-static unsigned long dax_dev_get_unmapped_area(struct file *filp,
-		unsigned long addr, unsigned long len, unsigned long pgoff,
-		unsigned long flags)
-{
-	unsigned long off, off_end, off_align, len_align, addr_align, align;
-	struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
-	struct dax_region *dax_region;
-
-	if (!dax_dev || addr)
-		goto out;
-
-	dax_region = dax_dev->region;
-	align = dax_region->align;
-	off = pgoff << PAGE_SHIFT;
-	off_end = off + len;
-	off_align = round_up(off, align);
-
-	if ((off_end <= off_align) || ((off_end - off_align) < align))
-		goto out;
-
-	len_align = len + align;
-	if ((off + len_align) < off)
-		goto out;
-
-	addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
-			pgoff, flags);
-	if (!IS_ERR_VALUE(addr_align)) {
-		addr_align += (off - addr_align) & (align - 1);
-		return addr_align;
-	}
- out:
-	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-
-static int __match_devt(struct device *dev, const void *data)
-{
-	const dev_t *devt = data;
-
-	return dev->devt == *devt;
-}
-
-static struct device *dax_dev_find(dev_t dev_t)
-{
-	return class_find_device(dax_class, NULL, &dev_t, __match_devt);
-}
-
-static int dax_dev_open(struct inode *inode, struct file *filp)
-{
-	struct dax_dev *dax_dev = NULL;
-	struct device *dev;
-
-	dev = dax_dev_find(inode->i_rdev);
-	if (!dev)
-		return -ENXIO;
-
-	device_lock(dev);
-	dax_dev = dev_get_drvdata(dev);
-	if (dax_dev) {
-		dev_dbg(dev, "%s\n", __func__);
-		filp->private_data = dax_dev;
-		kref_get(&dax_dev->kref);
-		inode->i_flags = S_DAX;
-	}
-	device_unlock(dev);
-
-	if (!dax_dev) {
-		put_device(dev);
-		return -ENXIO;
-	}
-	return 0;
-}
-
-static int dax_dev_release(struct inode *inode, struct file *filp)
-{
-	struct dax_dev *dax_dev = filp->private_data;
-	struct device *dev = dax_dev->dev;
-
-	dev_dbg(dax_dev->dev, "%s\n", __func__);
-	dax_dev_put(dax_dev);
-	put_device(dev);
-
-	return 0;
-}
-
 static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 		const char *func)
 {
 	struct dax_region *dax_region = dax_dev->region;
-	struct device *dev = dax_dev->dev;
+	struct device *dev = &dax_dev->dev;
 	unsigned long mask;
 
 	if (!dax_dev->alive)
@@ -382,7 +329,7 @@
 		struct vm_fault *vmf)
 {
 	unsigned long vaddr = (unsigned long) vmf->virtual_address;
-	struct device *dev = dax_dev->dev;
+	struct device *dev = &dax_dev->dev;
 	struct dax_region *dax_region;
 	int rc = VM_FAULT_SIGBUS;
 	phys_addr_t phys;
@@ -422,7 +369,7 @@
 	struct file *filp = vma->vm_file;
 	struct dax_dev *dax_dev = filp->private_data;
 
-	dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+	dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
 			current->comm, (vmf->flags & FAULT_FLAG_WRITE)
 			? "write" : "read", vma->vm_start, vma->vm_end);
 	rcu_read_lock();
@@ -437,7 +384,7 @@
 		unsigned int flags)
 {
 	unsigned long pmd_addr = addr & PMD_MASK;
-	struct device *dev = dax_dev->dev;
+	struct device *dev = &dax_dev->dev;
 	struct dax_region *dax_region;
 	phys_addr_t phys;
 	pgoff_t pgoff;
@@ -479,7 +426,7 @@
 	struct file *filp = vma->vm_file;
 	struct dax_dev *dax_dev = filp->private_data;
 
-	dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+	dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
 			current->comm, (flags & FAULT_FLAG_WRITE)
 			? "write" : "read", vma->vm_start, vma->vm_end);
 
@@ -490,81 +437,257 @@
 	return rc;
 }
 
-static void dax_dev_vm_open(struct vm_area_struct *vma)
-{
-	struct file *filp = vma->vm_file;
-	struct dax_dev *dax_dev = filp->private_data;
-
-	dev_dbg(dax_dev->dev, "%s\n", __func__);
-	kref_get(&dax_dev->kref);
-}
-
-static void dax_dev_vm_close(struct vm_area_struct *vma)
-{
-	struct file *filp = vma->vm_file;
-	struct dax_dev *dax_dev = filp->private_data;
-
-	dev_dbg(dax_dev->dev, "%s\n", __func__);
-	dax_dev_put(dax_dev);
-}
-
 static const struct vm_operations_struct dax_dev_vm_ops = {
 	.fault = dax_dev_fault,
 	.pmd_fault = dax_dev_pmd_fault,
-	.open = dax_dev_vm_open,
-	.close = dax_dev_vm_close,
 };
 
-static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct dax_dev *dax_dev = filp->private_data;
 	int rc;
 
-	dev_dbg(dax_dev->dev, "%s\n", __func__);
+	dev_dbg(&dax_dev->dev, "%s\n", __func__);
 
 	rc = check_vma(dax_dev, vma, __func__);
 	if (rc)
 		return rc;
 
-	kref_get(&dax_dev->kref);
 	vma->vm_ops = &dax_dev_vm_ops;
 	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
 	return 0;
+}
 
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_get_unmapped_area(struct file *filp,
+		unsigned long addr, unsigned long len, unsigned long pgoff,
+		unsigned long flags)
+{
+	unsigned long off, off_end, off_align, len_align, addr_align, align;
+	struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
+	struct dax_region *dax_region;
+
+	if (!dax_dev || addr)
+		goto out;
+
+	dax_region = dax_dev->region;
+	align = dax_region->align;
+	off = pgoff << PAGE_SHIFT;
+	off_end = off + len;
+	off_align = round_up(off, align);
+
+	if ((off_end <= off_align) || ((off_end - off_align) < align))
+		goto out;
+
+	len_align = len + align;
+	if ((off + len_align) < off)
+		goto out;
+
+	addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+			pgoff, flags);
+	if (!IS_ERR_VALUE(addr_align)) {
+		addr_align += (off - addr_align) & (align - 1);
+		return addr_align;
+	}
+ out:
+	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int dax_open(struct inode *inode, struct file *filp)
+{
+	struct dax_dev *dax_dev;
+
+	dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
+	dev_dbg(&dax_dev->dev, "%s\n", __func__);
+	inode->i_mapping = dax_dev->inode->i_mapping;
+	inode->i_mapping->host = dax_dev->inode;
+	filp->f_mapping = inode->i_mapping;
+	filp->private_data = dax_dev;
+	inode->i_flags = S_DAX;
+
+	return 0;
+}
+
+static int dax_release(struct inode *inode, struct file *filp)
+{
+	struct dax_dev *dax_dev = filp->private_data;
+
+	dev_dbg(&dax_dev->dev, "%s\n", __func__);
+	return 0;
 }
 
 static const struct file_operations dax_fops = {
 	.llseek = noop_llseek,
 	.owner = THIS_MODULE,
-	.open = dax_dev_open,
-	.release = dax_dev_release,
-	.get_unmapped_area = dax_dev_get_unmapped_area,
-	.mmap = dax_dev_mmap,
+	.open = dax_open,
+	.release = dax_release,
+	.get_unmapped_area = dax_get_unmapped_area,
+	.mmap = dax_mmap,
 };
 
+static void dax_dev_release(struct device *dev)
+{
+	struct dax_dev *dax_dev = to_dax_dev(dev);
+	struct dax_region *dax_region = dax_dev->region;
+
+	ida_simple_remove(&dax_region->ida, dax_dev->id);
+	ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
+	dax_region_put(dax_region);
+	iput(dax_dev->inode);
+	kfree(dax_dev);
+}
+
+static void unregister_dax_dev(void *dev)
+{
+	struct dax_dev *dax_dev = to_dax_dev(dev);
+	struct cdev *cdev = &dax_dev->cdev;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	/*
+	 * Note, rcu is not protecting the liveness of dax_dev, rcu is
+	 * ensuring that any fault handlers that might have seen
+	 * dax_dev->alive == true, have completed.  Any fault handlers
+	 * that start after synchronize_rcu() has started will abort
+	 * upon seeing dax_dev->alive == false.
+	 */
+	dax_dev->alive = false;
+	synchronize_rcu();
+	unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
+	cdev_del(cdev);
+	device_unregister(dev);
+}
+
+struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
+		struct resource *res, int count)
+{
+	struct device *parent = dax_region->dev;
+	struct dax_dev *dax_dev;
+	int rc = 0, minor, i;
+	struct device *dev;
+	struct cdev *cdev;
+	dev_t dev_t;
+
+	dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
+	if (!dax_dev)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		if (!IS_ALIGNED(res[i].start, dax_region->align)
+				|| !IS_ALIGNED(resource_size(&res[i]),
+					dax_region->align)) {
+			rc = -EINVAL;
+			break;
+		}
+		dax_dev->res[i].start = res[i].start;
+		dax_dev->res[i].end = res[i].end;
+	}
+
+	if (i < count)
+		goto err_id;
+
+	dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+	if (dax_dev->id < 0) {
+		rc = dax_dev->id;
+		goto err_id;
+	}
+
+	minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
+	if (minor < 0) {
+		rc = minor;
+		goto err_minor;
+	}
+
+	dev_t = MKDEV(MAJOR(dax_devt), minor);
+	dev = &dax_dev->dev;
+	dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
+	if (!dax_dev->inode) {
+		rc = -ENOMEM;
+		goto err_inode;
+	}
+
+	/* device_initialize() so cdev can reference kobj parent */
+	device_initialize(dev);
+
+	cdev = &dax_dev->cdev;
+	cdev_init(cdev, &dax_fops);
+	cdev->owner = parent->driver->owner;
+	cdev->kobj.parent = &dev->kobj;
+	rc = cdev_add(&dax_dev->cdev, dev_t, 1);
+	if (rc)
+		goto err_cdev;
+
+	/* from here on we're committed to teardown via dax_dev_release() */
+	dax_dev->num_resources = count;
+	dax_dev->alive = true;
+	dax_dev->region = dax_region;
+	kref_get(&dax_region->kref);
+
+	dev->devt = dev_t;
+	dev->class = dax_class;
+	dev->parent = parent;
+	dev->groups = dax_attribute_groups;
+	dev->release = dax_dev_release;
+	dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
+	rc = device_add(dev);
+	if (rc) {
+		put_device(dev);
+		return ERR_PTR(rc);
+	}
+
+	rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
+	if (rc)
+		return ERR_PTR(rc);
+
+	return dax_dev;
+
+ err_cdev:
+	iput(dax_dev->inode);
+ err_inode:
+	ida_simple_remove(&dax_minor_ida, minor);
+ err_minor:
+	ida_simple_remove(&dax_region->ida, dax_dev->id);
+ err_id:
+	kfree(dax_dev);
+
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_create_dax_dev);
+
 static int __init dax_init(void)
 {
 	int rc;
 
-	rc = register_chrdev(0, "dax", &dax_fops);
-	if (rc < 0)
+	rc = dax_inode_init();
+	if (rc)
 		return rc;
-	dax_major = rc;
+
+	nr_dax = max(nr_dax, 256);
+	rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
+	if (rc)
+		goto err_chrdev;
 
 	dax_class = class_create(THIS_MODULE, "dax");
 	if (IS_ERR(dax_class)) {
-		unregister_chrdev(dax_major, "dax");
-		return PTR_ERR(dax_class);
+		rc = PTR_ERR(dax_class);
+		goto err_class;
 	}
 
 	return 0;
+
+ err_class:
+	unregister_chrdev_region(dax_devt, nr_dax);
+ err_chrdev:
+	dax_inode_exit();
+	return rc;
 }
 
 static void __exit dax_exit(void)
 {
 	class_destroy(dax_class);
-	unregister_chrdev(dax_major, "dax");
+	unregister_chrdev_region(dax_devt, nr_dax);
 	ida_destroy(&dax_minor_ida);
+	dax_inode_exit();
 }
 
 MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
index d8b8f1f..ddd829a 100644
--- a/drivers/dax/dax.h
+++ b/drivers/dax/dax.h
@@ -13,12 +13,13 @@
 #ifndef __DAX_H__
 #define __DAX_H__
 struct device;
+struct dax_dev;
 struct resource;
 struct dax_region;
 void dax_region_put(struct dax_region *dax_region);
 struct dax_region *alloc_dax_region(struct device *parent,
 		int region_id, struct resource *res, unsigned int align,
 		void *addr, unsigned long flags);
-int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
-		int count);
+struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
+		struct resource *res, int count);
 #endif /* __DAX_H__ */
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 1f01e98..9630d88 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -24,7 +24,7 @@
 	struct completion cmp;
 };
 
-struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
+static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
 {
 	return container_of(ref, struct dax_pmem, ref);
 }
@@ -61,6 +61,7 @@
 	int rc;
 	void *addr;
 	struct resource res;
+	struct dax_dev *dax_dev;
 	struct nd_pfn_sb *pfn_sb;
 	struct dax_pmem *dax_pmem;
 	struct nd_region *nd_region;
@@ -126,12 +127,12 @@
 		return -ENOMEM;
 
 	/* TODO: support for subdividing a dax region... */
-	rc = devm_create_dax_dev(dax_region, &res, 1);
+	dax_dev = devm_create_dax_dev(dax_region, &res, 1);
 
 	/* child dax_dev instances now own the lifetime of the dax_region */
 	dax_region_put(dax_region);
 
-	return rc;
+	return PTR_ERR_OR_ZERO(dax_dev);
 }
 
 static struct nd_device_driver dax_pmem_driver = {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 478006b..bf3ea76 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -137,6 +137,10 @@
 
 	cur_time = jiffies;
 
+	/* Immediately exit if previous_freq is not initialized yet. */
+	if (!devfreq->previous_freq)
+		goto out;
+
 	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
 	if (prev_lev < 0) {
 		ret = prev_lev;
@@ -594,17 +598,19 @@
 	if (devfreq->governor)
 		err = devfreq->governor->event_handler(devfreq,
 					DEVFREQ_GOV_START, NULL);
-	mutex_unlock(&devfreq_list_lock);
 	if (err) {
 		dev_err(dev, "%s: Unable to start governor for the device\n",
 			__func__);
 		goto err_init;
 	}
+	mutex_unlock(&devfreq_list_lock);
 
 	return devfreq;
 
 err_init:
 	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
+
 	device_unregister(&devfreq->dev);
 err_out:
 	return ERR_PTR(err);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 0fdae86..cd94980 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -17,6 +17,7 @@
 	tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
 	depends on ARCH_EXYNOS || COMPILE_TEST
 	select PM_OPP
+	select REGMAP_MMIO
 	help
 	  This add the devfreq-event driver for Exynos SoC. It provides NoC
 	  (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index a584140..49e712a 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -176,9 +176,6 @@
 	return 0;
 
 out:
-	edata->load_count = 0;
-	edata->total_count = 0;
-
 	dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
 
 	return ret;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60..cf04d24 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+				      enum dma_data_direction direction)
+{
+	bool write = (direction == DMA_BIDIRECTIONAL ||
+		      direction == DMA_TO_DEVICE);
+	struct reservation_object *resv = dmabuf->resv;
+	long ret;
+
+	/* Wait on any implicit rendering fences */
+	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+						  MAX_SCHEDULE_TIMEOUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
 
 /**
  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@
 	if (dmabuf->ops->begin_cpu_access)
 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
+	/* Ensure that all fences are waited upon - but we first allow
+	 * the native handler the chance to do so more efficiently if it
+	 * chooses. A double invocation here will be reasonably cheap no-op.
+	 */
+	if (ret == 0)
+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
index a8731c8..f1989fc 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/fence-array.c
@@ -99,6 +99,7 @@
 	.wait = fence_default_wait,
 	.release = fence_array_release,
 };
+EXPORT_SYMBOL(fence_array_ops);
 
 /**
  * fence_array_create - Create a custom fence array
@@ -106,14 +107,14 @@
  * @fences:		[in]	array containing the fences
  * @context:		[in]	fence context to use
  * @seqno:		[in]	sequence number to use
- * @signal_on_any	[in]	signal on any fence in the array
+ * @signal_on_any:	[in]	signal on any fence in the array
  *
  * Allocate a fence_array object and initialize the base fence with fence_init().
  * In case of error it returns NULL.
  *
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
  * and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
  *
  * If @signal_on_any is true the fence array signals if any fence in the array
  * signals, otherwise it signals when all fences in the array signal.
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 9566a62..723d8af 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -205,7 +205,7 @@
  * @fence: the shared fence to add
  *
  * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
  */
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct fence *fence)
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index fab9520..2dd4c3d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -135,10 +135,16 @@
 	int i;
 
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-		   sync_status_str(atomic_read(&sync_file->status)));
+		   sync_status_str(!fence_is_signaled(sync_file->fence)));
 
-	for (i = 0; i < sync_file->num_fences; ++i)
-		sync_print_fence(s, sync_file->cbs[i].fence, true);
+	if (fence_is_array(sync_file->fence)) {
+		struct fence_array *array = to_fence_array(sync_file->fence);
+
+		for (i = 0; i < array->num_fences; ++i)
+			sync_print_fence(s, array->fences[i], true);
+	} else {
+		sync_print_fence(s, sync_file->fence, true);
+	}
 }
 
 static int sync_debugfs_show(struct seq_file *s, void *unused)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 9aaa608..b29a9e8 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -28,11 +28,11 @@
 
 static const struct file_operations sync_file_fops;
 
-static struct sync_file *sync_file_alloc(int size)
+static struct sync_file *sync_file_alloc(void)
 {
 	struct sync_file *sync_file;
 
-	sync_file = kzalloc(size, GFP_KERNEL);
+	sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
 	if (!sync_file)
 		return NULL;
 
@@ -45,6 +45,8 @@
 
 	init_waitqueue_head(&sync_file->wq);
 
+	INIT_LIST_HEAD(&sync_file->cb.node);
+
 	return sync_file;
 
 err:
@@ -54,14 +56,11 @@
 
 static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
 {
-	struct sync_file_cb *check;
 	struct sync_file *sync_file;
 
-	check = container_of(cb, struct sync_file_cb, cb);
-	sync_file = check->sync_file;
+	sync_file = container_of(cb, struct sync_file, cb);
 
-	if (atomic_dec_and_test(&sync_file->status))
-		wake_up_all(&sync_file->wq);
+	wake_up_all(&sync_file->wq);
 }
 
 /**
@@ -76,23 +75,17 @@
 {
 	struct sync_file *sync_file;
 
-	sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+	sync_file = sync_file_alloc();
 	if (!sync_file)
 		return NULL;
 
-	sync_file->num_fences = 1;
-	atomic_set(&sync_file->status, 1);
+	sync_file->fence = fence;
+
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
 		 fence->ops->get_timeline_name(fence), fence->context,
 		 fence->seqno);
 
-	sync_file->cbs[0].fence = fence;
-	sync_file->cbs[0].sync_file = sync_file;
-	if (fence_add_callback(fence, &sync_file->cbs[0].cb,
-			       fence_check_cb_func))
-		atomic_dec(&sync_file->status);
-
 	return sync_file;
 }
 EXPORT_SYMBOL(sync_file_create);
@@ -121,14 +114,73 @@
 	return NULL;
 }
 
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
-			     struct fence *fence)
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd:		sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct fence *sync_file_get_fence(int fd)
 {
-	sync_file->cbs[*i].fence = fence;
-	sync_file->cbs[*i].sync_file = sync_file;
+	struct sync_file *sync_file;
+	struct fence *fence;
 
-	if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
-				fence_check_cb_func)) {
+	sync_file = sync_file_fdget(fd);
+	if (!sync_file)
+		return NULL;
+
+	fence = fence_get(sync_file->fence);
+	fput(sync_file->file);
+
+	return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+			       struct fence **fences, int num_fences)
+{
+	struct fence_array *array;
+
+	/*
+	 * The reference for the fences in the new sync_file and held
+	 * in add_fence() during the merge procedure, so for num_fences == 1
+	 * we already own a new reference to the fence. For num_fence > 1
+	 * we own the reference of the fence_array creation.
+	 */
+	if (num_fences == 1) {
+		sync_file->fence = fences[0];
+		kfree(fences);
+	} else {
+		array = fence_array_create(num_fences, fences,
+					   fence_context_alloc(1), 1, false);
+		if (!array)
+			return -ENOMEM;
+
+		sync_file->fence = &array->base;
+	}
+
+	return 0;
+}
+
+static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+{
+	if (fence_is_array(sync_file->fence)) {
+		struct fence_array *array = to_fence_array(sync_file->fence);
+
+		*num_fences = array->num_fences;
+		return array->fences;
+	}
+
+	*num_fences = 1;
+	return &sync_file->fence;
+}
+
+static void add_fence(struct fence **fences, int *i, struct fence *fence)
+{
+	fences[*i] = fence;
+
+	if (!fence_is_signaled(fence)) {
 		fence_get(fence);
 		(*i)++;
 	}
@@ -147,16 +199,24 @@
 static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 					 struct sync_file *b)
 {
-	int num_fences = a->num_fences + b->num_fences;
 	struct sync_file *sync_file;
-	int i, i_a, i_b;
-	unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+	struct fence **fences, **nfences, **a_fences, **b_fences;
+	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
-	sync_file = sync_file_alloc(size);
+	sync_file = sync_file_alloc();
 	if (!sync_file)
 		return NULL;
 
-	atomic_set(&sync_file->status, num_fences);
+	a_fences = get_fences(a, &a_num_fences);
+	b_fences = get_fences(b, &b_num_fences);
+	if (a_num_fences > INT_MAX - b_num_fences)
+		return NULL;
+
+	num_fences = a_num_fences + b_num_fences;
+
+	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+	if (!fences)
+		goto err;
 
 	/*
 	 * Assume sync_file a and b are both ordered and have no
@@ -165,55 +225,69 @@
 	 * If a sync_file can only be created with sync_file_merge
 	 * and sync_file_create, this is a reasonable assumption.
 	 */
-	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
-		struct fence *pt_a = a->cbs[i_a].fence;
-		struct fence *pt_b = b->cbs[i_b].fence;
+	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+		struct fence *pt_a = a_fences[i_a];
+		struct fence *pt_b = b_fences[i_b];
 
 		if (pt_a->context < pt_b->context) {
-			sync_file_add_pt(sync_file, &i, pt_a);
+			add_fence(fences, &i, pt_a);
 
 			i_a++;
 		} else if (pt_a->context > pt_b->context) {
-			sync_file_add_pt(sync_file, &i, pt_b);
+			add_fence(fences, &i, pt_b);
 
 			i_b++;
 		} else {
 			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
-				sync_file_add_pt(sync_file, &i, pt_a);
+				add_fence(fences, &i, pt_a);
 			else
-				sync_file_add_pt(sync_file, &i, pt_b);
+				add_fence(fences, &i, pt_b);
 
 			i_a++;
 			i_b++;
 		}
 	}
 
-	for (; i_a < a->num_fences; i_a++)
-		sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+	for (; i_a < a_num_fences; i_a++)
+		add_fence(fences, &i, a_fences[i_a]);
 
-	for (; i_b < b->num_fences; i_b++)
-		sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+	for (; i_b < b_num_fences; i_b++)
+		add_fence(fences, &i, b_fences[i_b]);
 
-	if (num_fences > i)
-		atomic_sub(num_fences - i, &sync_file->status);
-	sync_file->num_fences = i;
+	if (i == 0)
+		fences[i++] = fence_get(a_fences[0]);
+
+	if (num_fences > i) {
+		nfences = krealloc(fences, i * sizeof(*fences),
+				  GFP_KERNEL);
+		if (!nfences)
+			goto err;
+
+		fences = nfences;
+	}
+
+	if (sync_file_set_fence(sync_file, fences, i) < 0) {
+		kfree(fences);
+		goto err;
+	}
 
 	strlcpy(sync_file->name, name, sizeof(sync_file->name));
 	return sync_file;
+
+err:
+	fput(sync_file->file);
+	return NULL;
+
 }
 
 static void sync_file_free(struct kref *kref)
 {
 	struct sync_file *sync_file = container_of(kref, struct sync_file,
 						     kref);
-	int i;
 
-	for (i = 0; i < sync_file->num_fences; ++i) {
-		fence_remove_callback(sync_file->cbs[i].fence,
-				      &sync_file->cbs[i].cb);
-		fence_put(sync_file->cbs[i].fence);
-	}
-
+	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+		fence_remove_callback(sync_file->fence, &sync_file->cb);
+	fence_put(sync_file->fence);
 	kfree(sync_file);
 }
 
@@ -228,17 +302,17 @@
 static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 {
 	struct sync_file *sync_file = file->private_data;
-	int status;
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	status = atomic_read(&sync_file->status);
+	if (!poll_does_not_wait(wait) &&
+	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+		if (fence_add_callback(sync_file->fence, &sync_file->cb,
+				       fence_check_cb_func) < 0)
+			wake_up_all(&sync_file->wq);
+	}
 
-	if (!status)
-		return POLLIN;
-	if (status < 0)
-		return POLLERR;
-	return 0;
+	return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
 }
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -315,8 +389,9 @@
 {
 	struct sync_file_info info;
 	struct sync_fence_info *fence_info = NULL;
+	struct fence **fences;
 	__u32 size;
-	int ret, i;
+	int num_fences, ret, i;
 
 	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
 		return -EFAULT;
@@ -324,6 +399,8 @@
 	if (info.flags || info.pad)
 		return -EINVAL;
 
+	fences = get_fences(sync_file, &num_fences);
+
 	/*
 	 * Passing num_fences = 0 means that userspace doesn't want to
 	 * retrieve any sync_fence_info. If num_fences = 0 we skip filling
@@ -333,16 +410,16 @@
 	if (!info.num_fences)
 		goto no_fences;
 
-	if (info.num_fences < sync_file->num_fences)
+	if (info.num_fences < num_fences)
 		return -EINVAL;
 
-	size = sync_file->num_fences * sizeof(*fence_info);
+	size = num_fences * sizeof(*fence_info);
 	fence_info = kzalloc(size, GFP_KERNEL);
 	if (!fence_info)
 		return -ENOMEM;
 
-	for (i = 0; i < sync_file->num_fences; ++i)
-		sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+	for (i = 0; i < num_fences; i++)
+		sync_fill_fence_info(fences[i], &fence_info[i]);
 
 	if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
 			 size)) {
@@ -352,11 +429,8 @@
 
 no_fences:
 	strlcpy(info.name, sync_file->name, sizeof(info.name));
-	info.status = atomic_read(&sync_file->status);
-	if (info.status >= 0)
-		info.status = !info.status;
-
-	info.num_fences = sync_file->num_fences;
+	info.status = fence_is_signaled(sync_file->fence);
+	info.num_fences = num_fences;
 
 	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
 		ret = -EFAULT;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977..180f0a9 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@
 
 	lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
 					  PCILYNX_MAX_REGISTER);
+	if (lynx->registers == NULL) {
+		dev_err(&dev->dev, "Failed to map registers\n");
+		ret = -ENOMEM;
+		goto fail_deallocate_lynx;
+	}
 
 	lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
 				sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@
 	    lynx->rcv_buffer == NULL) {
 		dev_err(&dev->dev, "Failed to allocate receive buffer\n");
 		ret = -ENOMEM;
-		goto fail_deallocate;
+		goto fail_deallocate_buffers;
 	}
 	lynx->rcv_start_pcl->next	= cpu_to_le32(lynx->rcv_pcl_bus);
 	lynx->rcv_pcl->next		= cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@
 		dev_err(&dev->dev,
 			"Failed to allocate shared interrupt %d\n", dev->irq);
 		ret = -EIO;
-		goto fail_deallocate;
+		goto fail_deallocate_buffers;
 	}
 
 	lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@
 	reg_write(lynx, PCI_INT_ENABLE, 0);
 	free_irq(lynx->pci_device->irq, lynx);
 
-fail_deallocate:
+fail_deallocate_buffers:
 	if (lynx->rcv_start_pcl)
 		pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
 				lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@
 		pci_free_consistent(lynx->pci_device, PAGE_SIZE,
 				lynx->rcv_buffer, lynx->rcv_buffer_bus);
 	iounmap(lynx->registers);
+
+fail_deallocate_lynx:
 	kfree(lynx);
 
 fail_disable:
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 45c8817..e422568 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -794,6 +794,22 @@
 	}
 
 	mutex_init(&chip->i2c_lock);
+	/*
+	 * In case we have an i2c-mux controlled by a GPIO provided by an
+	 * expander using the same driver higher on the device tree, read the
+	 * i2c adapter nesting depth and use the retrieved value as lockdep
+	 * subclass for chip->i2c_lock.
+	 *
+	 * REVISIT: This solution is not complete. It protects us from lockdep
+	 * false positives when the expander controlling the i2c-mux is on
+	 * a different level on the device tree, but not when it's on the same
+	 * level on a different branch (in which case the subclass number
+	 * would be the same).
+	 *
+	 * TODO: Once a correct solution is developed, a similar fix should be
+	 * applied to all other i2c-controlled GPIO expanders (and potentially
+	 * regmap-i2c).
+	 */
 	lockdep_set_subclass(&chip->i2c_lock,
 			     i2c_adapter_depth(client->adapter));
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fc35731..483059a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -108,33 +108,13 @@
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-config DRM_TDFX
-	tristate "3dfx Banshee/Voodoo3+"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
-	  graphics card.  If M is selected, the module will be called tdfx.
-
 source "drivers/gpu/drm/arm/Kconfig"
 
-config DRM_R128
-	tristate "ATI Rage 128"
-	depends on DRM && PCI
-	select FW_LOADER
-	help
-	  Choose this option if you have an ATI Rage 128 graphics card.  If M
-	  is selected, the module will be called r128.  AGP support for
-	  this card is strongly suggested (unless you have a PCI version).
-
 config DRM_RADEON
 	tristate "ATI Radeon"
 	depends on DRM && PCI
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
 	select FW_LOADER
         select DRM_KMS_HELPER
-	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	select POWER_SUPPLY
 	select HWMON
@@ -153,12 +133,8 @@
 config DRM_AMDGPU
 	tristate "AMD GPU"
 	depends on DRM && PCI
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
 	select FW_LOADER
         select DRM_KMS_HELPER
-	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	select POWER_SUPPLY
 	select HWMON
@@ -171,55 +147,11 @@
 	  If M is selected, the module will be called amdgpu.
 
 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/powerplay/Kconfig"
-
-source "drivers/gpu/drm/amd/acp/Kconfig"
 
 source "drivers/gpu/drm/nouveau/Kconfig"
 
-config DRM_I810
-	tristate "Intel I810"
-	# !PREEMPT because of missing ioctl locking
-	depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
-	help
-	  Choose this option if you have an Intel I810 graphics card.  If M is
-	  selected, the module will be called i810.  AGP support is required
-	  for this driver to work.
-
 source "drivers/gpu/drm/i915/Kconfig"
 
-config DRM_MGA
-	tristate "Matrox g200/g400"
-	depends on DRM && PCI
-	select FW_LOADER
-	help
-	  Choose this option if you have a Matrox G200, G400 or G450 graphics
-	  card.  If M is selected, the module will be called mga.  AGP
-	  support is required for this driver to work.
-
-config DRM_SIS
-	tristate "SiS video cards"
-	depends on DRM && AGP
-	depends on FB_SIS || FB_SIS=n
-	help
-	  Choose this option if you have a SiS 630 or compatible video
-          chipset. If M is selected the module will be called sis. AGP
-          support is required for this driver to work.
-
-config DRM_VIA
-	tristate "Via unichrome video cards"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a Via unichrome or compatible video
-	  chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
-	tristate "Savage video cards"
-	depends on DRM && PCI
-	help
-	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
-	  chipset. If M is selected the module will be called savage.
-
 config DRM_VGEM
 	tristate "Virtual GEM provider"
 	depends on DRM
@@ -290,3 +222,81 @@
 source "drivers/gpu/drm/hisilicon/Kconfig"
 
 source "drivers/gpu/drm/mediatek/Kconfig"
+
+# Keep legacy drivers last
+
+menuconfig DRM_LEGACY
+	bool "Enable legacy drivers (DANGEROUS)"
+	depends on DRM
+	help
+	  Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
+	  APIs to user-space, which can be used to circumvent access
+	  restrictions and other security measures. For backwards compatibility
+	  those drivers are still available, but their use is highly
+	  inadvisable and might harm your system.
+
+	  You are recommended to use the safe modeset-only drivers instead, and
+	  perform 3D emulation in user-space.
+
+	  Unless you have strong reasons to go rogue, say "N".
+
+if DRM_LEGACY
+
+config DRM_TDFX
+	tristate "3dfx Banshee/Voodoo3+"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+	  graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+	tristate "ATI Rage 128"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have an ATI Rage 128 graphics card.  If M
+	  is selected, the module will be called r128.  AGP support for
+	  this card is strongly suggested (unless you have a PCI version).
+
+config DRM_I810
+	tristate "Intel I810"
+	# !PREEMPT because of missing ioctl locking
+	depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+	help
+	  Choose this option if you have an Intel I810 graphics card.  If M is
+	  selected, the module will be called i810.  AGP support is required
+	  for this driver to work.
+
+config DRM_MGA
+	tristate "Matrox g200/g400"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have a Matrox G200, G400 or G450 graphics
+	  card.  If M is selected, the module will be called mga.  AGP
+	  support is required for this driver to work.
+
+config DRM_SIS
+	tristate "SiS video cards"
+	depends on DRM && AGP
+	depends on FB_SIS || FB_SIS=n
+	help
+	  Choose this option if you have a SiS 630 or compatible video
+	  chipset. If M is selected the module will be called sis. AGP
+	  support is required for this driver to work.
+
+config DRM_VIA
+	tristate "Via unichrome video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Via unichrome or compatible video
+	  chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+	tristate "Savage video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+	  chipset. If M is selected the module will be called savage.
+
+endif # DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0238bf8..25c7204 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,10 @@
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
 		drm_trace_points.o drm_global.o drm_prime.o \
 		drm_rect.o drm_vma_manager.o drm_flip_work.o \
-		drm_modeset_lock.o drm_atomic.o drm_bridge.o
+		drm_modeset_lock.o drm_atomic.o drm_bridge.o \
+		drm_framebuffer.o drm_connector.o drm_blend.o \
+		drm_encoder.o drm_mode_object.o drm_property.o \
+		drm_plane.o drm_color_mgmt.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -24,7 +27,7 @@
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
 		drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
-		drm_simple_kms_helper.o drm_blend.o
+		drm_simple_kms_helper.o drm_modeset_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -46,7 +49,7 @@
 obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
 obj-$(CONFIG_DRM_MGA)	+= mga/
 obj-$(CONFIG_DRM_I810)	+= i810/
-obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_I915)	+= i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_VC4)  += vc4/
 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7335c04..61360e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+	bool "Enable amdgpu support for SI parts"
+	depends on DRM_AMDGPU
+	help
+	  Choose this option if you want to enable experimental support
+	  for SI asics.
+
 config DRM_AMDGPU_CIK
 	bool "Enable amdgpu support for CIK parts"
 	depends on DRM_AMDGPU
@@ -25,3 +32,4 @@
 	  Selecting this option creates a debugfs file to inspect the mapped
 	  pages. Uses more memory for housekeeping, enable only for debugging.
 
+source "drivers/gpu/drm/amd/acp/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7fcdce..248a05d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,13 +23,16 @@
 	amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
 	atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
-	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+	amdgpu_gtt_mgr.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
 	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
 	amdgpu_amdkfd_gfx_v7.o
 
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
 amdgpu-y += \
 	vi.o
 
@@ -50,15 +53,13 @@
 amdgpu-y += \
 	amdgpu_dpm.o \
 	amdgpu_powerplay.o \
-	cz_smc.o cz_dpm.o \
-	tonga_smc.o tonga_dpm.o \
-	fiji_smc.o fiji_dpm.o \
-	iceland_smc.o iceland_dpm.o
+	cz_smc.o cz_dpm.o
 
 # add DCE block
 amdgpu-y += \
 	dce_v10_0.o \
-	dce_v11_0.o
+	dce_v11_0.o \
+	dce_virtual.o
 
 # add GFX block
 amdgpu-y += \
@@ -110,14 +111,10 @@
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
 amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
 
-ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
-
 include $(FULL_AMD_PATH)/powerplay/Makefile
 
 amdgpu-y += $(AMD_POWERPLAY_FILES)
 
-endif
-
 obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
 
 CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 0619269..b8d6667 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,6 +90,7 @@
 #define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
+#define ENCODER_OBJECT_ID_VIRTUAL                 0x28
 
 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
 
@@ -119,6 +120,7 @@
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL               0x17
 
 /* deleted */
 
@@ -147,6 +149,7 @@
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID7                     0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL                 0x08
 
 /****************************************************/
 /* Graphics Object ID Bit definition                */
@@ -408,6 +411,10 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 700c56b..039b57e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -51,11 +51,13 @@
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
 #include "amdgpu_gds.h"
 #include "amd_powerplay.h"
 #include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
+#include "amdgpu_virt.h"
 
 /*
  * Modules parameters.
@@ -63,6 +65,7 @@
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
 extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
 extern int amdgpu_audio;
@@ -91,6 +94,9 @@
 extern unsigned amdgpu_cg_mask;
 extern unsigned amdgpu_pg_mask;
 extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -105,7 +111,7 @@
 #define AMDGPU_MAX_RINGS			16
 #define AMDGPU_MAX_GFX_RINGS			1
 #define AMDGPU_MAX_COMPUTE_RINGS		8
-#define AMDGPU_MAX_VCE_RINGS			2
+#define AMDGPU_MAX_VCE_RINGS			3
 
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES		2
@@ -248,10 +254,9 @@
 			 uint64_t pe, uint64_t src,
 			 unsigned count);
 	/* write pte one entry at a time with addr mapping */
-	void (*write_pte)(struct amdgpu_ib *ib,
-			  const dma_addr_t *pages_addr, uint64_t pe,
-			  uint64_t addr, unsigned count,
-			  uint32_t incr, uint32_t flags);
+	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+			  uint64_t value, unsigned count,
+			  uint32_t incr);
 	/* for linear pte/pde updates without addr mapping */
 	void (*set_pte_pde)(struct amdgpu_ib *ib,
 			    uint64_t pe,
@@ -316,6 +321,10 @@
 	/* note usage for clock and power gating */
 	void (*begin_use)(struct amdgpu_ring *ring);
 	void (*end_use)(struct amdgpu_ring *ring);
+	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+	unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+	unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
 };
 
 /*
@@ -396,48 +405,8 @@
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 /*
- * TTM.
+ * BO.
  */
-
-#define AMDGPU_TTM_LRU_SIZE	20
-
-struct amdgpu_mman_lru {
-	struct list_head		*lru[TTM_NUM_MEM_TYPES];
-	struct list_head		*swap_lru;
-};
-
-struct amdgpu_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct drm_global_reference	mem_global_ref;
-	struct ttm_bo_device		bdev;
-	bool				mem_global_referenced;
-	bool				initialized;
-
-#if defined(CONFIG_DEBUG_FS)
-	struct dentry			*vram;
-	struct dentry			*gtt;
-#endif
-
-	/* buffer handling */
-	const struct amdgpu_buffer_funcs	*buffer_funcs;
-	struct amdgpu_ring			*buffer_funcs_ring;
-	/* Scheduler entity for buffer moves */
-	struct amd_sched_entity			entity;
-
-	/* custom LRU management */
-	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
-	/* guard for log2_size array, don't add anything in between */
-	struct amdgpu_mman_lru			guard;
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
-		       uint64_t src_offset,
-		       uint64_t dst_offset,
-		       uint32_t byte_count,
-		       struct reservation_object *resv,
-		       struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
 struct amdgpu_bo_list_entry {
 	struct amdgpu_bo		*robj;
 	struct ttm_validate_buffer	tv;
@@ -476,8 +445,6 @@
 #define AMDGPU_GEM_DOMAIN_MAX		0x3
 
 struct amdgpu_bo {
-	/* Protected by gem.mutex */
-	struct list_head		list;
 	/* Protected by tbo.reserved */
 	u32				prefered_domains;
 	u32				allowed_domains;
@@ -500,10 +467,12 @@
 	struct amdgpu_device		*adev;
 	struct drm_gem_object		gem_base;
 	struct amdgpu_bo		*parent;
+	struct amdgpu_bo		*shadow;
 
 	struct ttm_bo_kmap_obj		dma_buf_vmap;
 	struct amdgpu_mn		*mn;
 	struct list_head		mn_list;
+	struct list_head		shadow_list;
 };
 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
 
@@ -653,6 +622,7 @@
 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 /*
  * GPU MC structures, functions & helpers
@@ -679,6 +649,8 @@
 	uint32_t                fw_version;
 	struct amdgpu_irq_src	vm_fault;
 	uint32_t		vram_type;
+	uint32_t                srbm_soft_reset;
+	struct amdgpu_mode_mc_save save;
 };
 
 /*
@@ -723,13 +695,14 @@
  */
 
 struct amdgpu_flip_work {
-	struct work_struct		flip_work;
+	struct delayed_work		flip_work;
 	struct work_struct		unpin_work;
 	struct amdgpu_device		*adev;
 	int				crtc_id;
+	u32				target_vblank;
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
-	struct amdgpu_bo		*old_rbo;
+	struct amdgpu_bo		*old_abo;
 	struct fence			*excl;
 	unsigned			shared_count;
 	struct fence			**shared;
@@ -817,13 +790,17 @@
 /* maximum number of VMIDs */
 #define AMDGPU_NUM_VM	16
 
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
+
 /* number of entries in page table */
 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
 
 /* PTBs (Page Table Blocks) need to be aligned to 32K */
 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
 
 #define AMDGPU_PTE_VALID	(1 << 0)
 #define AMDGPU_PTE_SYSTEM	(1 << 1)
@@ -835,10 +812,7 @@
 #define AMDGPU_PTE_READABLE	(1 << 5)
 #define AMDGPU_PTE_WRITEABLE	(1 << 6)
 
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB	(0 << 7)
-#define AMDGPU_PTE_FRAG_64KB	(4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
 
 /* How to programm VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER	0
@@ -848,6 +822,7 @@
 struct amdgpu_vm_pt {
 	struct amdgpu_bo_list_entry	entry;
 	uint64_t			addr;
+	uint64_t			shadow_addr;
 };
 
 struct amdgpu_vm {
@@ -950,7 +925,6 @@
 		      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 				    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -959,7 +933,7 @@
 			     struct amdgpu_sync *sync);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
-			struct ttm_mem_reg *mem);
+			bool clear);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo);
 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -994,6 +968,7 @@
 	spinlock_t		ring_lock;
 	struct fence            **fences;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
+	bool preamble_presented;
 };
 
 struct amdgpu_ctx_mgr {
@@ -1197,6 +1172,10 @@
 	unsigned			ce_ram_size;
 	struct amdgpu_cu_info		cu_info;
 	const struct amdgpu_gfx_funcs	*funcs;
+
+	/* reset mask */
+	uint32_t                        grbm_soft_reset;
+	uint32_t                        srbm_soft_reset;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1249,11 +1228,16 @@
 	struct fence			*fence;
 	uint64_t			bytes_moved_threshold;
 	uint64_t			bytes_moved;
+	struct amdgpu_bo_list_entry	*evictable;
 
 	/* user fence */
 	struct amdgpu_bo_list_entry	uf_entry;
 };
 
+#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
+
 struct amdgpu_job {
 	struct amd_sched_job    base;
 	struct amdgpu_device	*adev;
@@ -1262,9 +1246,10 @@
 	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
 	struct fence		*fence; /* the hw fence */
+	uint32_t		preamble_status;
 	uint32_t		num_ibs;
 	void			*owner;
-	uint64_t		ctx;
+	uint64_t		fence_ctx; /* the fence_context this job uses */
 	bool                    vm_needs_flush;
 	unsigned		vm_id;
 	uint64_t		vm_pd_addr;
@@ -1685,6 +1670,7 @@
 	bool			address_64_bit;
 	bool			use_ctx_buf;
 	struct amd_sched_entity entity;
+	uint32_t                srbm_soft_reset;
 };
 
 /*
@@ -1711,6 +1697,8 @@
 	struct amdgpu_irq_src	irq;
 	unsigned		harvest_config;
 	struct amd_sched_entity	entity;
+	uint32_t                srbm_soft_reset;
+	unsigned		num_rings;
 };
 
 /*
@@ -1728,9 +1716,14 @@
 
 struct amdgpu_sdma {
 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+	//SI DMA has a difference trap irq number for the second engine
+	struct amdgpu_irq_src	trap_irq_1;
+#endif
 	struct amdgpu_irq_src	trap_irq;
 	struct amdgpu_irq_src	illegal_inst_irq;
 	int			num_instances;
+	uint32_t                    srbm_soft_reset;
 };
 
 /*
@@ -1832,6 +1825,7 @@
 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
 				   u8 *bios, u32 length_bytes);
+	void (*detect_hw_virtualization) (struct amdgpu_device *adev);
 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
 			     u32 sh_num, u32 reg_offset, u32 *value);
 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1841,8 +1835,9 @@
 	/* MM block clocks */
 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
-	/* query virtual capabilities */
-	u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+	/* static power management */
+	int (*get_pcie_lanes)(struct amdgpu_device *adev);
+	void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
 };
 
 /*
@@ -1935,16 +1930,6 @@
 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
-
-/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
-struct amdgpu_virtualization {
-	bool supports_sr_iov;
-	bool is_virtual;
-	u32 caps;
-};
-
 /*
  * Core structure, functions and helpers.
  */
@@ -1958,6 +1943,8 @@
 	bool valid;
 	bool sw;
 	bool hw;
+	bool late_initialized;
+	bool hang;
 };
 
 struct amdgpu_device {
@@ -2016,6 +2003,8 @@
 	spinlock_t pcie_idx_lock;
 	amdgpu_rreg_t			pcie_rreg;
 	amdgpu_wreg_t			pcie_wreg;
+	amdgpu_rreg_t			pciep_rreg;
+	amdgpu_wreg_t			pciep_wreg;
 	/* protects concurrent UVD register access */
 	spinlock_t uvd_ctx_idx_lock;
 	amdgpu_rreg_t			uvd_ctx_rreg;
@@ -2056,7 +2045,16 @@
 	atomic64_t			num_evictions;
 	atomic_t			gpu_reset_counter;
 
+	/* data for buffer migration throttling */
+	struct {
+		spinlock_t		lock;
+		s64			last_update_us;
+		s64			accum_us; /* accumulated microseconds */
+		u32			log2_max_MBps;
+	} mm_stats;
+
 	/* display */
+	bool				enable_virtual_display;
 	struct amdgpu_mode_info		mode_info;
 	struct work_struct		hotplug_work;
 	struct amdgpu_irq_src		crtc_irq;
@@ -2119,6 +2117,14 @@
 	struct kfd_dev          *kfd;
 
 	struct amdgpu_virtualization virtualization;
+
+	/* link all shadow bo */
+	struct list_head                shadow_list;
+	struct mutex                    shadow_list_lock;
+	/* link all gtt */
+	spinlock_t			gtt_list_lock;
+	struct list_head                gtt_list;
+
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2151,6 +2157,8 @@
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2194,6 +2202,9 @@
 #define REG_GET_FIELD(value, reg, field)				\
 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
 
+#define WREG32_FIELD(reg, field, val)	\
+	WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
 /*
  * BIOS helpers.
  */
@@ -2237,14 +2248,17 @@
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
@@ -2259,9 +2273,13 @@
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2293,6 +2311,11 @@
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
 
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+	((adev)->pp_enabled ? \
+		(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+		-EINVAL)
+
 #define amdgpu_dpm_get_temperature(adev) \
 	((adev)->pp_enabled ?						\
 	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2344,11 +2367,6 @@
 	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
 	      (adev)->pm.funcs->powergate_vce((adev), (g)))
 
-#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
-	      (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
-
 #define amdgpu_dpm_get_current_power_state(adev) \
 	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
 
@@ -2389,6 +2407,7 @@
 
 /* Common functions */
 int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2397,7 +2416,7 @@
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
 		       struct amdgpu_ring **out_ring);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -2414,6 +2433,10 @@
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
 					     const u32 *registers,
 					     const u32 array_size);
@@ -2425,11 +2448,13 @@
 void amdgpu_unregister_atpx_handler(void);
 bool amdgpu_has_atpx_dgpu_power_cntl(void);
 bool amdgpu_is_atpx_hybrid(void);
+bool amdgpu_atpx_dgpu_req_power_for_displays(void);
 #else
 static inline void amdgpu_register_atpx_handler(void) {}
 static inline void amdgpu_unregister_atpx_handler(void) {}
 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
+static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
 #endif
 
 /*
@@ -2446,8 +2471,8 @@
 				 struct drm_file *file_priv);
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2493,6 +2518,7 @@
 struct amdgpu_bo_va_mapping *
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 		       uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
 
 #include "amdgpu_object.h"
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b73..5796539 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@
 #endif
 		}
 	}
+	if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+		if ((adev->flags & AMD_IS_PX) &&
+		    amdgpu_atpx_dgpu_req_power_for_displays()) {
+			pm_runtime_get_sync(adev->ddev->dev);
+			/* Just fire off a uevent and let userspace tell us what to do */
+			drm_helper_hpd_irq_event(adev->ddev);
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
+		}
+	}
 	/* TODO: check other events */
 
 	/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d08..dba8a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@
 	return r;
 }
 
-u32 pool_to_domain(enum kgd_memory_pool p)
-{
-	switch (p) {
-	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
-	default: return AMDGPU_GEM_DOMAIN_GTT;
-	}
-}
-
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 			void **mem_obj, uint64_t *gpu_addr,
 			void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 362bedc..1a0a5f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -103,11 +103,11 @@
 				uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-				unsigned int timeout, uint32_t pipe_id,
+				unsigned int utimeout, uint32_t pipe_id,
 				uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-				unsigned int timeout);
+				unsigned int utimeout);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
 					unsigned int watch_point_id,
@@ -437,11 +437,12 @@
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-				unsigned int timeout, uint32_t pipe_id,
+				unsigned int utimeout, uint32_t pipe_id,
 				uint32_t queue_id)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	uint32_t temp;
+	int timeout = utimeout;
 
 	acquire_queue(kgd, pipe_id, queue_id);
 	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -452,9 +453,8 @@
 		temp = RREG32(mmCP_HQD_ACTIVE);
 		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
 			break;
-		if (timeout == 0) {
-			pr_err("kfd: cp queue preemption time out (%dms)\n",
-				temp);
+		if (timeout <= 0) {
+			pr_err("kfd: cp queue preemption time out.\n");
 			release_queue(kgd);
 			return -ETIME;
 		}
@@ -467,12 +467,13 @@
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-				unsigned int timeout)
+				unsigned int utimeout)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct cik_sdma_rlc_registers *m;
 	uint32_t sdma_base_addr;
 	uint32_t temp;
+	int timeout = utimeout;
 
 	m = get_sdma_mqd(mqd);
 	sdma_base_addr = get_sdma_base_addr(m);
@@ -485,7 +486,7 @@
 		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
 		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
 			break;
-		if (timeout == 0)
+		if (timeout <= 0)
 			return -ETIME;
 		msleep(20);
 		timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 04b744d..6697612 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -62,10 +62,10 @@
 		uint32_t pipe_id, uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-				unsigned int timeout, uint32_t pipe_id,
+				unsigned int utimeout, uint32_t pipe_id,
 				uint32_t queue_id);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-				unsigned int timeout);
+				unsigned int utimeout);
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
@@ -349,11 +349,12 @@
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-				unsigned int timeout, uint32_t pipe_id,
+				unsigned int utimeout, uint32_t pipe_id,
 				uint32_t queue_id)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	uint32_t temp;
+	int timeout = utimeout;
 
 	acquire_queue(kgd, pipe_id, queue_id);
 
@@ -363,9 +364,8 @@
 		temp = RREG32(mmCP_HQD_ACTIVE);
 		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
 			break;
-		if (timeout == 0) {
-			pr_err("kfd: cp queue preemption time out (%dms)\n",
-				temp);
+		if (timeout <= 0) {
+			pr_err("kfd: cp queue preemption time out.\n");
 			release_queue(kgd);
 			return -ETIME;
 		}
@@ -378,12 +378,13 @@
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-				unsigned int timeout)
+				unsigned int utimeout)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct cik_sdma_rlc_registers *m;
 	uint32_t sdma_base_addr;
 	uint32_t temp;
+	int timeout = utimeout;
 
 	m = get_sdma_mqd(mqd);
 	sdma_base_addr = get_sdma_base_addr(m);
@@ -396,7 +397,7 @@
 		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
 		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
 			break;
-		if (timeout == 0)
+		if (timeout <= 0)
 			return -ETIME;
 		msleep(20);
 		timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index fe872b8..8e6bf54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -259,6 +259,33 @@
 	DRM_MODE_CONNECTOR_Unknown
 };
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+
+	if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+	if (path_obj->ucNumOfDispPath)
+		return true;
+	else
+		return false;
+}
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
 {
 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -964,6 +991,48 @@
 		return -EINVAL;
 
 	switch (crev) {
+	case 2:
+	case 3:
+	case 5:
+		/* r6xx, r7xx, evergreen, ni, si.
+		 * TODO: add support for asic_type <= CHIP_RV770*/
+		if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+			args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v3.ucPostDiv;
+			dividers->enable_post_div = (args.v3.ucCntlFlag &
+						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+			dividers->enable_dithen = (args.v3.ucCntlFlag &
+						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+			dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+			dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+			dividers->ref_div = args.v3.ucRefDiv;
+			dividers->vco_mode = (args.v3.ucCntlFlag &
+					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+		} else {
+			/* for SI we use ComputeMemoryClockParam for memory plls */
+			if (adev->asic_type >= CHIP_TAHITI)
+				return -EINVAL;
+			args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+			if (strobe_mode)
+				args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v5.ucPostDiv;
+			dividers->enable_post_div = (args.v5.ucCntlFlag &
+						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+			dividers->enable_dithen = (args.v5.ucCntlFlag &
+						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+			dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+			dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+			dividers->ref_div = args.v5.ucRefDiv;
+			dividers->vco_mode = (args.v5.ucCntlFlag &
+					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+		}
+		break;
 	case 4:
 		/* fusion */
 		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
@@ -1108,6 +1177,32 @@
 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+					  u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+
+	*vddc = 0;
+	*vddci = 0;
+	*mvdd = 0;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+		if ((frev == 2) && (crev >= 2)) {
+			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+			*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+		}
+	}
+}
+
 union set_voltage {
 	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
 	struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1115,6 +1210,52 @@
 	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
 };
 
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+		args.v2.ucVoltageMode = 0;
+		args.v2.usVoltageLevel = 0;
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+						      u16 *voltage,
+						      u16 leakage_idx)
+{
+	return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
 void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
 				 u16 voltage_level,
 				 u8 voltage_type)
@@ -1335,6 +1476,50 @@
 	return NULL;
 }
 
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(adev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 3:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+								      voltage_type,
+								      VOLTAGE_OBJ_SVID2);
+				if (voltage_object) {
+					*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+					*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+
+	}
+	return 0;
+}
+
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
 				u8 voltage_type, u8 voltage_mode)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8c2e696..1735615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -140,6 +140,8 @@
 							  uint8_t id);
 void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
 
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
 
 int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
@@ -206,5 +208,19 @@
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+			     u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+						      u16 *voltage,
+						      u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+					  u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+				       u8 clock_type,
+				       u32 clock,
+				       bool strobe_mode,
+				       struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+			      u8 voltage_type,
+			      u8 *svd_gpio_id, u8 *svc_gpio_id);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 10b5ddf..dae35a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@
 	acpi_handle handle;
 	struct amdgpu_atpx_functions functions;
 	bool is_hybrid;
+	bool dgpu_req_power_for_displays;
 };
 
 static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@
 	return amdgpu_atpx_priv.atpx.is_hybrid;
 }
 
+bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+	return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -204,6 +209,10 @@
 		atpx->is_hybrid = true;
 	}
 
+	atpx->dgpu_req_power_for_displays = false;
+	if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
+		atpx->dgpu_req_power_for_displays = true;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a4..3453052 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@
 	start_jiffies = jiffies;
 	for (i = 0; i < n; i++) {
 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+				       false);
 		if (r)
 			goto exit_do_move;
 		r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index bc0440f..7a8bfa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -616,7 +616,7 @@
 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 }
 
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state)
 {
@@ -637,7 +637,7 @@
 	return r;
 }
 
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_powergating_state state)
 {
@@ -711,6 +711,47 @@
 	return -EINVAL;
 }
 
+static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
+					enum cgs_ucode_id type)
+{
+	CGS_FUNC_ADEV;
+	uint16_t fw_version;
+
+	switch (type) {
+		case CGS_UCODE_ID_SDMA0:
+			fw_version = adev->sdma.instance[0].fw_version;
+			break;
+		case CGS_UCODE_ID_SDMA1:
+			fw_version = adev->sdma.instance[1].fw_version;
+			break;
+		case CGS_UCODE_ID_CP_CE:
+			fw_version = adev->gfx.ce_fw_version;
+			break;
+		case CGS_UCODE_ID_CP_PFP:
+			fw_version = adev->gfx.pfp_fw_version;
+			break;
+		case CGS_UCODE_ID_CP_ME:
+			fw_version = adev->gfx.me_fw_version;
+			break;
+		case CGS_UCODE_ID_CP_MEC:
+			fw_version = adev->gfx.mec_fw_version;
+			break;
+		case CGS_UCODE_ID_CP_MEC_JT1:
+			fw_version = adev->gfx.mec_fw_version;
+			break;
+		case CGS_UCODE_ID_CP_MEC_JT2:
+			fw_version = adev->gfx.mec_fw_version;
+			break;
+		case CGS_UCODE_ID_RLC_G:
+			fw_version = adev->gfx.rlc_fw_version;
+			break;
+		default:
+			DRM_ERROR("firmware type %d do not have version\n", type);
+			fw_version = 0;
+	}
+	return fw_version;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type,
 					struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@
 		info->mc_addr = gpu_addr;
 		info->image_size = data_size;
 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 	} else {
 		char fw_name[30] = {0};
@@ -848,6 +890,12 @@
 	case CGS_SYSTEM_INFO_GFX_SE_INFO:
 		sys_info->value = adev->gfx.config.max_shader_engines;
 		break;
+	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+		sys_info->value = adev->pdev->subsystem_device;
+		break;
+	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+		sys_info->value = adev->pdev->subsystem_vendor;
+		break;
 	default:
 		return -ENODEV;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ff0b55a..e3281d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -168,12 +168,12 @@
 		}
 
 		/* Any defined maximum tmds clock limit we must not exceed? */
-		if (connector->max_tmds_clock > 0) {
+		if (connector->display_info.max_tmds_clock > 0) {
 			/* mode_clock is clock in kHz for mode to be modeset on this connector */
 			mode_clock = amdgpu_connector->pixelclock_for_modeset;
 
 			/* Maximum allowable input clock in kHz */
-			max_tmds_clock = connector->max_tmds_clock * 1000;
+			max_tmds_clock = connector->display_info.max_tmds_clock;
 
 			DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
 				  connector->name, mode_clock, max_tmds_clock);
@@ -765,12 +765,20 @@
 	return ret;
 }
 
+static void amdgpu_connector_unregister(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	if (amdgpu_connector->ddc_bus->has_aux) {
+		drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
+		amdgpu_connector->ddc_bus->has_aux = false;
+	}
+}
+
 static void amdgpu_connector_destroy(struct drm_connector *connector)
 {
 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-	if (amdgpu_connector->ddc_bus->has_aux)
-		drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
 	amdgpu_connector_free_edid(connector);
 	kfree(amdgpu_connector->con_priv);
 	drm_connector_unregister(connector);
@@ -824,6 +832,7 @@
 	.dpms = drm_helper_connector_dpms,
 	.detect = amdgpu_connector_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.set_property = amdgpu_connector_set_lcd_property,
 };
@@ -934,6 +943,7 @@
 	.dpms = drm_helper_connector_dpms,
 	.detect = amdgpu_connector_vga_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.set_property = amdgpu_connector_set_property,
 };
@@ -1201,6 +1211,7 @@
 	.detect = amdgpu_connector_dvi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = amdgpu_connector_set_property,
+	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.force = amdgpu_connector_dvi_force,
 };
@@ -1491,6 +1502,7 @@
 	.detect = amdgpu_connector_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = amdgpu_connector_set_property,
+	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.force = amdgpu_connector_dvi_force,
 };
@@ -1500,10 +1512,93 @@
 	.detect = amdgpu_connector_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = amdgpu_connector_set_lcd_property,
+	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.force = amdgpu_connector_dvi_force,
 };
 
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_encoder *encoder;
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+			return encoder;
+	}
+
+	/* pick the first one */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+	if (encoder) {
+		amdgpu_connector_add_common_modes(encoder, connector);
+	}
+
+	return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+					   struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static int
+amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+	return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static int
+amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+	return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+	.get_modes = amdgpu_connector_virtual_get_modes,
+	.mode_valid = amdgpu_connector_virtual_mode_valid,
+	.best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+	.dpms = amdgpu_connector_virtual_dpms,
+	.detect = amdgpu_connector_virtual_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = amdgpu_connector_virtual_set_property,
+	.destroy = amdgpu_connector_destroy,
+	.force = amdgpu_connector_virtual_force,
+};
+
 void
 amdgpu_connector_add(struct amdgpu_device *adev,
 		      uint32_t connector_id,
@@ -1888,6 +1983,17 @@
 			connector->interlace_allowed = false;
 			connector->doublescan_allowed = false;
 			break;
+		case DRM_MODE_CONNECTOR_VIRTUAL:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0307ff5..b0f6e69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -91,6 +91,7 @@
 				      uint32_t *offset)
 {
 	struct drm_gem_object *gobj;
+	unsigned long size;
 
 	gobj = drm_gem_object_lookup(p->filp, data->handle);
 	if (gobj == NULL)
@@ -101,6 +102,11 @@
 	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 	p->uf_entry.tv.shared = true;
 	p->uf_entry.user_pages = NULL;
+
+	size = amdgpu_bo_size(p->uf_entry.robj);
+	if (size != PAGE_SIZE || (data->offset + 8) > size)
+		return -EINVAL;
+
 	*offset = data->offset;
 
 	drm_gem_object_unreference_unlocked(gobj);
@@ -235,70 +241,212 @@
 	return ret;
 }
 
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	/* Since accum_us is incremented by a million per second, just
+	 * multiply it by the number of MB/s to get the number of bytes.
+	 */
+	return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+	if (!adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
  */
 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 {
-	u64 real_vram_size = adev->mc.real_vram_size;
-	u64 vram_usage = atomic64_read(&adev->vram_usage);
+	s64 time_us, increment_us;
+	u64 max_bytes;
+	u64 free_vram, total_vram, used_vram;
 
-	/* This function is based on the current VRAM usage.
+	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
+	 * throttling.
 	 *
-	 * - If all of VRAM is free, allow relocating the number of bytes that
-	 *   is equal to 1/4 of the size of VRAM for this IB.
-
-	 * - If more than one half of VRAM is occupied, only allow relocating
-	 *   1 MB of data for this IB.
-	 *
-	 * - From 0 to one half of used VRAM, the threshold decreases
-	 *   linearly.
-	 *         __________________
-	 * 1/4 of -|\               |
-	 * VRAM    | \              |
-	 *         |  \             |
-	 *         |   \            |
-	 *         |    \           |
-	 *         |     \          |
-	 *         |      \         |
-	 *         |       \________|1 MB
-	 *         |----------------|
-	 *    VRAM 0 %             100 %
-	 *         used            used
-	 *
-	 * Note: It's a threshold, not a limit. The threshold must be crossed
-	 * for buffer relocations to stop, so any buffer of an arbitrary size
-	 * can be moved as long as the threshold isn't crossed before
-	 * the relocation takes place. We don't want to disable buffer
-	 * relocations completely.
-	 *
-	 * The idea is that buffers should be placed in VRAM at creation time
-	 * and TTM should only do a minimum number of relocations during
-	 * command submission. In practice, you need to submit at least
-	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
-	 *
-	 * Also, things can get pretty crazy under memory pressure and actual
-	 * VRAM usage can change a lot, so playing safe even at 50% does
-	 * consistently increase performance.
+	 * It means that in order to get full max MBps, at least 5 IBs per
+	 * second must be submitted and not more than 200ms apart from each
+	 * other.
 	 */
+	const s64 us_upper_bound = 200000;
 
-	u64 half_vram = real_vram_size >> 1;
-	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
-	u64 bytes_moved_threshold = half_free_vram >> 1;
-	return max(bytes_moved_threshold, 1024*1024ull);
+	if (!adev->mm_stats.log2_max_MBps)
+		return 0;
+
+	total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+	used_vram = atomic64_read(&adev->vram_usage);
+	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+	spin_lock(&adev->mm_stats.lock);
+
+	/* Increase the amount of accumulated us. */
+	time_us = ktime_to_us(ktime_get());
+	increment_us = time_us - adev->mm_stats.last_update_us;
+	adev->mm_stats.last_update_us = time_us;
+	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+                                      us_upper_bound);
+
+	/* This prevents the short period of low performance when the VRAM
+	 * usage is low and the driver is in debt or doesn't have enough
+	 * accumulated us to fill VRAM quickly.
+	 *
+	 * The situation can occur in these cases:
+	 * - a lot of VRAM is freed by userspace
+	 * - the presence of a big buffer causes a lot of evictions
+	 *   (solution: split buffers into smaller ones)
+	 *
+	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+	 * accum_us to a positive number.
+	 */
+	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+		s64 min_us;
+
+		/* Be more aggresive on dGPUs. Try to fill a portion of free
+		 * VRAM now.
+		 */
+		if (!(adev->flags & AMD_IS_APU))
+			min_us = bytes_to_us(adev, free_vram / 4);
+		else
+			min_us = 0; /* Reset accum_us on APUs. */
+
+		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+	}
+
+	/* This returns 0 if the driver is in debt to disallow (optional)
+	 * buffer moves.
+	 */
+	max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+	spin_unlock(&adev->mm_stats.lock);
+	return max_bytes;
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+					 u64 num_bytes)
+{
+	spin_lock(&adev->mm_stats.lock);
+	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+	spin_unlock(&adev->mm_stats.lock);
+}
+
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+				 struct amdgpu_bo *bo)
+{
+	u64 initial_bytes_moved;
+	uint32_t domain;
+	int r;
+
+	if (bo->pin_count)
+		return 0;
+
+	/* Don't move this buffer if we have depleted our allowance
+	 * to move it. Don't move anything if the threshold is zero.
+	 */
+	if (p->bytes_moved < p->bytes_moved_threshold)
+		domain = bo->prefered_domains;
+	else
+		domain = bo->allowed_domains;
+
+retry:
+	amdgpu_ttm_placement_from_domain(bo, domain);
+	initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+		initial_bytes_moved;
+
+	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+		domain = bo->allowed_domains;
+		goto retry;
+	}
+
+	return r;
+}
+
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+				struct amdgpu_bo_list_entry *lobj)
+{
+	uint32_t domain = lobj->robj->allowed_domains;
+	int r;
+
+	if (!p->evictable)
+		return false;
+
+	for (;&p->evictable->tv.head != &p->validated;
+	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+		struct amdgpu_bo_list_entry *candidate = p->evictable;
+		struct amdgpu_bo *bo = candidate->robj;
+		u64 initial_bytes_moved;
+		uint32_t other;
+
+		/* If we reached our current BO we can forget it */
+		if (candidate == lobj)
+			break;
+
+		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+		/* Check if this BO is in one of the domains we need space for */
+		if (!(other & domain))
+			continue;
+
+		/* Check if we can move this BO somewhere else */
+		other = bo->allowed_domains & ~domain;
+		if (!other)
+			continue;
+
+		/* Good we can try to move this BO somewhere else */
+		amdgpu_ttm_placement_from_domain(bo, other);
+		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+			initial_bytes_moved;
+
+		if (unlikely(r))
+			break;
+
+		p->evictable = list_prev_entry(p->evictable, tv.head);
+		list_move(&candidate->tv.head, &p->validated);
+
+		return true;
+	}
+
+	return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
 	struct amdgpu_bo_list_entry *lobj;
-	u64 initial_bytes_moved;
 	int r;
 
 	list_for_each_entry(lobj, validated, tv.head) {
 		struct amdgpu_bo *bo = lobj->robj;
 		bool binding_userptr = false;
 		struct mm_struct *usermm;
-		uint32_t domain;
 
 		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 		if (usermm && usermm != current->mm)
@@ -313,35 +461,19 @@
 			binding_userptr = true;
 		}
 
-		if (bo->pin_count)
-			continue;
+		if (p->evictable == lobj)
+			p->evictable = NULL;
 
-		/* Avoid moving this one if we have moved too many buffers
-		 * for this IB already.
-		 *
-		 * Note that this allows moving at least one buffer of
-		 * any size, because it doesn't take the current "bo"
-		 * into account. We don't want to disallow buffer moves
-		 * completely.
-		 */
-		if (p->bytes_moved <= p->bytes_moved_threshold)
-			domain = bo->prefered_domains;
-		else
-			domain = bo->allowed_domains;
-
-	retry:
-		amdgpu_ttm_placement_from_domain(bo, domain);
-		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
-			       initial_bytes_moved;
-
-		if (unlikely(r)) {
-			if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
-				domain = bo->allowed_domains;
-				goto retry;
-			}
+		do {
+			r = amdgpu_cs_bo_validate(p, bo);
+		} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+		if (r)
 			return r;
+
+		if (bo->shadow) {
+			r = amdgpu_cs_bo_validate(p, bo);
+			if (r)
+				return r;
 		}
 
 		if (binding_userptr) {
@@ -386,8 +518,10 @@
 
 		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 					   &duplicates);
-		if (unlikely(r != 0))
+		if (unlikely(r != 0)) {
+			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 			goto error_free_pages;
+		}
 
 		/* Without a BO list we don't have userptr BOs */
 		if (!p->bo_list)
@@ -427,9 +561,10 @@
 		/* Unreserve everything again. */
 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 
-		/* We tried to often, just abort */
+		/* We tried too many times, just abort */
 		if (!--tries) {
 			r = -EDEADLK;
+			DRM_ERROR("deadlock in %s\n", __func__);
 			goto error_free_pages;
 		}
 
@@ -441,11 +576,13 @@
 							 sizeof(struct page*));
 			if (!e->user_pages) {
 				r = -ENOMEM;
+				DRM_ERROR("calloc failure in %s\n", __func__);
 				goto error_free_pages;
 			}
 
 			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 			if (r) {
+				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 				drm_free_large(e->user_pages);
 				e->user_pages = NULL;
 				goto error_free_pages;
@@ -460,14 +597,23 @@
 
 	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 	p->bytes_moved = 0;
+	p->evictable = list_last_entry(&p->validated,
+				       struct amdgpu_bo_list_entry,
+				       tv.head);
 
 	r = amdgpu_cs_list_validate(p, &duplicates);
-	if (r)
+	if (r) {
+		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 		goto error_validate;
+	}
 
 	r = amdgpu_cs_list_validate(p, &p->validated);
-	if (r)
+	if (r) {
+		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 		goto error_validate;
+	}
+
+	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
 
 	fpriv->vm.last_eviction_counter =
 		atomic64_read(&p->adev->num_evictions);
@@ -499,8 +645,12 @@
 		}
 	}
 
-	if (p->uf_entry.robj)
-		p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+	if (!r && p->uf_entry.robj) {
+		struct amdgpu_bo *uf = p->uf_entry.robj;
+
+		r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
+		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+	}
 
 error_validate:
 	if (r) {
@@ -617,7 +767,7 @@
 			if (bo_va == NULL)
 				continue;
 
-			r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+			r = amdgpu_vm_bo_update(adev, bo_va, false);
 			if (r)
 				return r;
 
@@ -710,6 +860,14 @@
 		if (r)
 			return r;
 
+		if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+			parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+			if (!parser->ctx->preamble_presented) {
+				parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+				parser->ctx->preamble_presented = true;
+			}
+		}
+
 		if (parser->job->ring && parser->job->ring != ring)
 			return -EINVAL;
 
@@ -849,7 +1007,7 @@
 	}
 
 	job->owner = p->filp;
-	job->ctx = entity->fence_context;
+	job->fence_ctx = entity->fence_context;
 	p->fence = fence_get(&job->base.s_fence->finished);
 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
 	job->uf_sequence = cs->out.handle;
@@ -1015,3 +1173,29 @@
 
 	return NULL;
 }
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+	unsigned i;
+	int r;
+
+	if (!parser->bo_list)
+		return 0;
+
+	for (i = 0; i < parser->bo_list->num_entries; i++) {
+		struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+		if (unlikely(r))
+			return r;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17e1362..a5e2fcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@
 		ctx->rings[i].sequence = 1;
 		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
 	}
+
+	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
 	/* create context entity for each ring */
 	for (i = 0; i < adev->num_rings; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -60,6 +63,7 @@
 			amd_sched_entity_fini(&adev->rings[j]->sched,
 					      &ctx->rings[j].entity);
 		kfree(ctx->fences);
+		ctx->fences = NULL;
 		return r;
 	}
 	return 0;
@@ -77,6 +81,7 @@
 		for (j = 0; j < amdgpu_sched_jobs; ++j)
 			fence_put(ctx->rings[i].fences[j]);
 	kfree(ctx->fences);
+	ctx->fences = NULL;
 
 	for (i = 0; i < adev->num_rings; i++)
 		amd_sched_entity_fini(&adev->rings[i]->sched,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39c01b9..b4f4a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -41,16 +41,26 @@
 #include "atom.h"
 #include "amdgpu_atombios.h"
 #include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "cik.h"
 #endif
 #include "vi.h"
 #include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
+#include <linux/firmware.h>
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
 
 static const char *amdgpu_asic_name[] = {
+	"TAHITI",
+	"PITCAIRN",
+	"VERDE",
+	"OLAND",
+	"HAINAN",
 	"BONAIRE",
 	"KAVERI",
 	"KABINI",
@@ -101,7 +111,7 @@
 		    bool always_indirect)
 {
 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-	
+
 	if ((reg * 4) < adev->rmmio_size && !always_indirect)
 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 	else {
@@ -642,6 +652,46 @@
 
 }
 
+static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+{
+	if (amdgpu_sriov_vf(adev))
+		return false;
+
+	if (amdgpu_passthrough(adev)) {
+		/* for FIJI: In whole GPU pass-through virtualization case
+		 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
+		 * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
+		 * but if we force vPost do in pass-through case, the driver reload will hang.
+		 * whether doing vPost depends on amdgpu_card_posted if smc version is above
+		 * 00160e00 for FIJI.
+		 */
+		if (adev->asic_type == CHIP_FIJI) {
+			int err;
+			uint32_t fw_ver;
+			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
+			/* force vPost if error occured */
+			if (err)
+				return true;
+
+			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+			if (fw_ver >= 0x00160e00)
+				return !amdgpu_card_posted(adev);
+		}
+	} else {
+		/* in bare-metal case, amdgpu_card_posted return false
+		 * after system reboot/boot, and return true if driver
+		 * reloaded.
+		 * we shouldn't do vPost after driver reload otherwise GPU
+		 * could hang.
+		 */
+		if (amdgpu_card_posted(adev))
+			return false;
+	}
+
+	/* we assume vPost is neede for all other cases */
+	return true;
+}
+
 /**
  * amdgpu_dummy_page_init - init dummy page used by the driver
  *
@@ -1026,7 +1076,7 @@
 		/* don't suspend or resume card normally */
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-		amdgpu_resume_kms(dev, true, true);
+		amdgpu_device_resume(dev, true, true);
 
 		dev->pdev->d3_delay = d3_delay;
 
@@ -1036,7 +1086,7 @@
 		printk(KERN_INFO "amdgpu: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		amdgpu_suspend_kms(dev, true, true);
+		amdgpu_device_suspend(dev, true, true);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1181,10 +1231,38 @@
 	return 1;
 }
 
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+	adev->enable_virtual_display = false;
+
+	if (amdgpu_virtual_display) {
+		struct drm_device *ddev = adev->ddev;
+		const char *pci_address_name = pci_name(ddev->pdev);
+		char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+		pciaddstr_tmp = pciaddstr;
+		while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+			if (!strcmp(pci_address_name, pciaddname)) {
+				adev->enable_virtual_display = true;
+				break;
+			}
+		}
+
+		DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+				 amdgpu_virtual_display, pci_address_name,
+				 adev->enable_virtual_display);
+
+		kfree(pciaddstr);
+	}
+}
+
 static int amdgpu_early_init(struct amdgpu_device *adev)
 {
 	int i, r;
 
+	amdgpu_whether_enable_virtual_display(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
@@ -1202,6 +1280,18 @@
 		if (r)
 			return r;
 		break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_VERDE:
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		adev->family = AMDGPU_FAMILY_SI;
+		r = si_set_ip_blocks(adev);
+		if (r)
+			return r;
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
@@ -1318,19 +1408,25 @@
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if (!adev->ip_block_status[i].valid)
 			continue;
-		/* enable clockgating to save power */
-		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-								    AMD_CG_STATE_GATE);
-		if (r) {
-			DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
-			return r;
-		}
 		if (adev->ip_blocks[i].funcs->late_init) {
 			r = adev->ip_blocks[i].funcs->late_init((void *)adev);
 			if (r) {
 				DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 				return r;
 			}
+			adev->ip_block_status[i].late_initialized = true;
+		}
+		/* skip CG for VCE/UVD, it's handled specially */
+		if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+		    adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+			/* enable clockgating to save power */
+			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+									    AMD_CG_STATE_GATE);
+			if (r) {
+				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].funcs->name, r);
+				return r;
+			}
 		}
 	}
 
@@ -1341,6 +1437,30 @@
 {
 	int i, r;
 
+	/* need to disable SMC first */
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].hw)
+			continue;
+		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
+			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+									    AMD_CG_STATE_UNGATE);
+			if (r) {
+				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].funcs->name, r);
+				return r;
+			}
+			r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+			/* XXX handle errors */
+			if (r) {
+				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].funcs->name, r);
+			}
+			adev->ip_block_status[i].hw = false;
+			break;
+		}
+	}
+
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 		if (!adev->ip_block_status[i].hw)
 			continue;
@@ -1376,8 +1496,11 @@
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (!adev->ip_block_status[i].late_initialized)
+			continue;
 		if (adev->ip_blocks[i].funcs->late_fini)
 			adev->ip_blocks[i].funcs->late_fini((void *)adev);
+		adev->ip_block_status[i].late_initialized = false;
 	}
 
 	return 0;
@@ -1433,13 +1556,10 @@
 	return 0;
 }
 
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
 {
-#ifdef CONFIG_X86
-	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
-	return false;
-#endif
+	if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 }
 
 /**
@@ -1461,6 +1581,7 @@
 {
 	int r, i;
 	bool runtime = false;
+	u32 max_MBps;
 
 	adev->shutdown = false;
 	adev->dev = &pdev->dev;
@@ -1484,6 +1605,8 @@
 	adev->smc_wreg = &amdgpu_invalid_wreg;
 	adev->pcie_rreg = &amdgpu_invalid_rreg;
 	adev->pcie_wreg = &amdgpu_invalid_wreg;
+	adev->pciep_rreg = &amdgpu_invalid_rreg;
+	adev->pciep_wreg = &amdgpu_invalid_wreg;
 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
 	adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1520,9 +1643,22 @@
 	spin_lock_init(&adev->didt_idx_lock);
 	spin_lock_init(&adev->gc_cac_idx_lock);
 	spin_lock_init(&adev->audio_endpt_idx_lock);
+	spin_lock_init(&adev->mm_stats.lock);
 
-	adev->rmmio_base = pci_resource_start(adev->pdev, 5);
-	adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+	INIT_LIST_HEAD(&adev->shadow_list);
+	mutex_init(&adev->shadow_list_lock);
+
+	INIT_LIST_HEAD(&adev->gtt_list);
+	spin_lock_init(&adev->gtt_list_lock);
+
+	if (adev->asic_type >= CHIP_BONAIRE) {
+		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+	} else {
+		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+	}
+
 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
 	if (adev->rmmio == NULL) {
 		return -ENOMEM;
@@ -1530,8 +1666,9 @@
 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-	/* doorbell bar mapping */
-	amdgpu_doorbell_init(adev);
+	if (adev->asic_type >= CHIP_BONAIRE)
+		/* doorbell bar mapping */
+		amdgpu_doorbell_init(adev);
 
 	/* io port mapping */
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1579,25 +1716,24 @@
 		goto failed;
 	}
 
-	/* See if the asic supports SR-IOV */
-	adev->virtualization.supports_sr_iov =
-		amdgpu_atombios_has_gpu_virtualization_table(adev);
-
-	/* Check if we are executing in a virtualized environment */
-	adev->virtualization.is_virtual = amdgpu_device_is_virtual();
-	adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+	/* detect if we are with an SRIOV vbios */
+	amdgpu_device_detect_sriov_bios(adev);
 
 	/* Post card if necessary */
-	if (!amdgpu_card_posted(adev) ||
-	    (adev->virtualization.is_virtual &&
-	     !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+	if (amdgpu_vpost_needed(adev)) {
 		if (!adev->bios) {
-			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+			dev_err(adev->dev, "no vBIOS found\n");
 			r = -EINVAL;
 			goto failed;
 		}
-		DRM_INFO("GPU not posted. posting now...\n");
-		amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		DRM_INFO("GPU posting now...\n");
+		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		if (r) {
+			dev_err(adev->dev, "gpu post error!\n");
+			goto failed;
+		}
+	} else {
+		DRM_INFO("GPU post is not needed\n");
 	}
 
 	/* Initialize clocks */
@@ -1628,6 +1764,14 @@
 
 	adev->accel_working = true;
 
+	/* Initialize the buffer migration limit. */
+	if (amdgpu_moverate >= 0)
+		max_MBps = amdgpu_moverate;
+	else
+		max_MBps = 8; /* Allow 8 MB/s. */
+	/* Get a log2 for easy divisions. */
+	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
 	amdgpu_fbdev_init(adev);
 
 	r = amdgpu_ib_pool_init(adev);
@@ -1732,7 +1876,8 @@
 	adev->rio_mem = NULL;
 	iounmap(adev->rmmio);
 	adev->rmmio = NULL;
-	amdgpu_doorbell_fini(adev);
+	if (adev->asic_type >= CHIP_BONAIRE)
+		amdgpu_doorbell_fini(adev);
 	amdgpu_debugfs_regs_cleanup(adev);
 	amdgpu_debugfs_remove_files(adev);
 }
@@ -1742,7 +1887,7 @@
  * Suspend & resume.
  */
 /**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
  *
  * @pdev: drm dev pointer
  * @state: suspend state
@@ -1751,7 +1896,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 {
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
@@ -1819,6 +1964,10 @@
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
+	} else {
+		r = amdgpu_asic_reset(adev);
+		if (r)
+			DRM_ERROR("amdgpu asic reset failed\n");
 	}
 
 	if (fbcon) {
@@ -1830,7 +1979,7 @@
 }
 
 /**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
  *
  * @pdev: drm dev pointer
  *
@@ -1838,7 +1987,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -1848,22 +1997,26 @@
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	if (fbcon) {
+	if (fbcon)
 		console_lock();
-	}
+
 	if (resume) {
 		pci_set_power_state(dev->pdev, PCI_D0);
 		pci_restore_state(dev->pdev);
-		if (pci_enable_device(dev->pdev)) {
+		r = pci_enable_device(dev->pdev);
+		if (r) {
 			if (fbcon)
 				console_unlock();
-			return -1;
+			return r;
 		}
 	}
 
 	/* post card */
-	if (!amdgpu_card_posted(adev))
-		amdgpu_atom_asic_init(adev->mode_info.atom_context);
+	if (!amdgpu_card_posted(adev) || !resume) {
+		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		if (r)
+			DRM_ERROR("amdgpu asic init failed\n");
+	}
 
 	r = amdgpu_resume(adev);
 	if (r)
@@ -1937,6 +2090,135 @@
 	return 0;
 }
 
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+	int i;
+	bool asic_hang = false;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_blocks[i].funcs->check_soft_reset)
+			adev->ip_block_status[i].hang =
+				adev->ip_blocks[i].funcs->check_soft_reset(adev);
+		if (adev->ip_block_status[i].hang) {
+			DRM_INFO("IP block:%d is hang!\n", i);
+			asic_hang = true;
+		}
+	}
+	return asic_hang;
+}
+
+static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->pre_soft_reset) {
+			r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+			if (adev->ip_block_status[i].hang) {
+				DRM_INFO("Some block need full reset!\n");
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->soft_reset) {
+			r = adev->ip_blocks[i].funcs->soft_reset(adev);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+		if (adev->ip_block_status[i].hang &&
+		    adev->ip_blocks[i].funcs->post_soft_reset)
+			r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+	if (adev->flags & AMD_IS_APU)
+		return false;
+
+	return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+					   struct amdgpu_ring *ring,
+					   struct amdgpu_bo *bo,
+					   struct fence **fence)
+{
+	uint32_t domain;
+	int r;
+
+       if (!bo->shadow)
+               return 0;
+
+       r = amdgpu_bo_reserve(bo, false);
+       if (r)
+               return r;
+       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       /* if bo has been evicted, then no need to recover */
+       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+               r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+						 NULL, fence, true);
+               if (r) {
+                       DRM_ERROR("recover page table failed!\n");
+                       goto err;
+               }
+       }
+err:
+       amdgpu_bo_unreserve(bo);
+       return r;
+}
+
 /**
  * amdgpu_gpu_reset - reset the asic
  *
@@ -1949,6 +2231,12 @@
 {
 	int i, r;
 	int resched;
+	bool need_full_reset;
+
+	if (!amdgpu_check_soft_reset(adev)) {
+		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+		return 0;
+	}
 
 	atomic_inc(&adev->gpu_reset_counter);
 
@@ -1967,40 +2255,93 @@
 	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
 	amdgpu_fence_driver_force_completion(adev);
 
-	/* save scratch */
-	amdgpu_atombios_scratch_regs_save(adev);
-	r = amdgpu_suspend(adev);
+	need_full_reset = amdgpu_need_full_reset(adev);
+
+	if (!need_full_reset) {
+		amdgpu_pre_soft_reset(adev);
+		r = amdgpu_soft_reset(adev);
+		amdgpu_post_soft_reset(adev);
+		if (r || amdgpu_check_soft_reset(adev)) {
+			DRM_INFO("soft reset failed, will fallback to full reset!\n");
+			need_full_reset = true;
+		}
+	}
+
+	if (need_full_reset) {
+		/* save scratch */
+		amdgpu_atombios_scratch_regs_save(adev);
+		r = amdgpu_suspend(adev);
 
 retry:
-	/* Disable fb access */
-	if (adev->mode_info.num_crtc) {
-		struct amdgpu_mode_mc_save save;
-		amdgpu_display_stop_mc_access(adev, &save);
-		amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
-	}
+		/* Disable fb access */
+		if (adev->mode_info.num_crtc) {
+			struct amdgpu_mode_mc_save save;
+			amdgpu_display_stop_mc_access(adev, &save);
+			amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+		}
 
-	r = amdgpu_asic_reset(adev);
-	/* post card */
-	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		r = amdgpu_asic_reset(adev);
+		/* post card */
+		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
-	if (!r) {
-		dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
-		r = amdgpu_resume(adev);
+		if (!r) {
+			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+			r = amdgpu_resume(adev);
+		}
+		/* restore scratch */
+		amdgpu_atombios_scratch_regs_restore(adev);
 	}
-	/* restore scratch */
-	amdgpu_atombios_scratch_regs_restore(adev);
 	if (!r) {
+		amdgpu_irq_gpu_reset_resume_helper(adev);
+		if (need_full_reset && amdgpu_need_backup(adev)) {
+			r = amdgpu_ttm_recover_gart(adev);
+			if (r)
+				DRM_ERROR("gart recovery failed!!!\n");
+		}
 		r = amdgpu_ib_ring_tests(adev);
 		if (r) {
 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
 			r = amdgpu_suspend(adev);
+			need_full_reset = true;
 			goto retry;
 		}
+		/**
+		 * recovery vm page tables, since we cannot depend on VRAM is
+		 * consistent after gpu full reset.
+		 */
+		if (need_full_reset && amdgpu_need_backup(adev)) {
+			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+			struct amdgpu_bo *bo, *tmp;
+			struct fence *fence = NULL, *next = NULL;
 
+			DRM_INFO("recover vram bo from shadow\n");
+			mutex_lock(&adev->shadow_list_lock);
+			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+				if (fence) {
+					r = fence_wait(fence, false);
+					if (r) {
+						WARN(r, "recovery from shadow isn't comleted\n");
+						break;
+					}
+				}
+
+				fence_put(fence);
+				fence = next;
+			}
+			mutex_unlock(&adev->shadow_list_lock);
+			if (fence) {
+				r = fence_wait(fence, false);
+				if (r)
+					WARN(r, "recovery from shadow isn't comleted\n");
+			}
+			fence_put(fence);
+		}
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			struct amdgpu_ring *ring = adev->rings[i];
 			if (!ring)
 				continue;
+
 			amd_sched_job_recovery(&ring->sched);
 			kthread_unpark(ring->sched.thread);
 		}
@@ -2020,7 +2361,6 @@
 		/* bad news, how to tell it to userspace ? */
 		dev_info(adev->dev, "GPU reset failed\n");
 	}
-	amdgpu_irq_gpu_reset_resume_helper(adev);
 
 	return r;
 }
@@ -2178,22 +2518,26 @@
 	struct amdgpu_device *adev = f->f_inode->i_private;
 	ssize_t result = 0;
 	int r;
-	bool use_bank;
+	bool pm_pg_lock, use_bank;
 	unsigned instance_bank, sh_bank, se_bank;
 
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	/* are we reading registers for which a PG lock is necessary? */
+	pm_pg_lock = (*pos >> 23) & 1;
+
 	if (*pos & (1ULL << 62)) {
 		se_bank = (*pos >> 24) & 0x3FF;
 		sh_bank = (*pos >> 34) & 0x3FF;
 		instance_bank = (*pos >> 44) & 0x3FF;
 		use_bank = 1;
-		*pos &= 0xFFFFFF;
 	} else {
 		use_bank = 0;
 	}
 
+	*pos &= 0x3FFFF;
+
 	if (use_bank) {
 		if (sh_bank >= adev->gfx.config.max_sh_per_se ||
 		    se_bank >= adev->gfx.config.max_shader_engines)
@@ -2203,6 +2547,9 @@
 					sh_bank, instance_bank);
 	}
 
+	if (pm_pg_lock)
+		mutex_lock(&adev->pm.mutex);
+
 	while (size) {
 		uint32_t value;
 
@@ -2228,6 +2575,9 @@
 		mutex_unlock(&adev->grbm_idx_mutex);
 	}
 
+	if (pm_pg_lock)
+		mutex_unlock(&adev->pm.mutex);
+
 	return result;
 }
 
@@ -2385,7 +2735,7 @@
 	while (size) {
 		uint32_t value;
 
-		value = RREG32_SMC(*pos >> 2);
+		value = RREG32_SMC(*pos);
 		r = put_user(value, (uint32_t *)buf);
 		if (r)
 			return r;
@@ -2416,7 +2766,7 @@
 		if (r)
 			return r;
 
-		WREG32_SMC(*pos >> 2, value);
+		WREG32_SMC(*pos, value);
 
 		result += 4;
 		buf += 4;
@@ -2438,12 +2788,12 @@
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
-	config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
 	if (!config)
 		return -ENOMEM;
 
 	/* version, increment each time something is added */
-	config[no_regs++] = 0;
+	config[no_regs++] = 2;
 	config[no_regs++] = adev->gfx.config.max_shader_engines;
 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2468,6 +2818,15 @@
 	config[no_regs++] = adev->gfx.config.gb_addr_config;
 	config[no_regs++] = adev->gfx.config.num_rbs;
 
+	/* rev==1 */
+	config[no_regs++] = adev->rev_id;
+	config[no_regs++] = adev->pg_flags;
+	config[no_regs++] = adev->cg_flags;
+
+	/* rev==2 */
+	config[no_regs++] = adev->family;
+	config[no_regs++] = adev->external_rev_id;
+
 	while (size && (*pos < no_regs * 4)) {
 		uint32_t value;
 
@@ -2488,6 +2847,29 @@
 	return result;
 }
 
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	int idx, r;
+	int32_t value;
+
+	if (size != 4 || *pos & 0x3)
+		return -EINVAL;
+
+	/* convert offset to sensor number */
+	idx = *pos >> 2;
+
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+		r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
+	else
+		return -EINVAL;
+
+	if (!r)
+		r = put_user(value, (int32_t *)buf);
+
+	return !r ? 4 : r;
+}
 
 static const struct file_operations amdgpu_debugfs_regs_fops = {
 	.owner = THIS_MODULE,
@@ -2520,12 +2902,19 @@
 	.llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_sensor_read,
+	.llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_fops,
 	&amdgpu_debugfs_regs_didt_fops,
 	&amdgpu_debugfs_regs_pcie_fops,
 	&amdgpu_debugfs_regs_smc_fops,
 	&amdgpu_debugfs_gca_config_fops,
+	&amdgpu_debugfs_sensors_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2534,6 +2923,7 @@
 	"amdgpu_regs_pcie",
 	"amdgpu_regs_smc",
 	"amdgpu_gca_config",
+	"amdgpu_sensors",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76f9602..083e2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@
 		container_of(cb, struct amdgpu_flip_work, cb);
 
 	fence_put(f);
-	schedule_work(&work->flip_work);
+	schedule_work(&work->flip_work.work);
 }
 
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
+	struct delayed_work *delayed_work =
+		container_of(__work, struct delayed_work, work);
 	struct amdgpu_flip_work *work =
-		container_of(__work, struct amdgpu_flip_work, flip_work);
+		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
 	struct amdgpu_device *adev = work->adev;
 	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
 	unsigned long flags;
-	unsigned i, repcnt = 4;
-	int vpos, hpos, stat, min_udelay = 0;
-	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+	unsigned i;
+	int vpos, hpos;
 
 	if (amdgpu_flip_handle_fence(work, &work->excl))
 		return;
@@ -81,55 +82,23 @@
 		if (amdgpu_flip_handle_fence(work, &work->shared[i]))
 			return;
 
-	/* We borrow the event spin lock for protecting flip_status */
-	spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
-	/* If this happens to execute within the "virtually extended" vblank
-	 * interval before the start of the real vblank interval then it needs
-	 * to delay programming the mmio flip until the real vblank is entered.
-	 * This prevents completing a flip too early due to the way we fudge
-	 * our vblank counter and vblank timestamps in order to work around the
-	 * problem that the hw fires vblank interrupts before actual start of
-	 * vblank (when line buffer refilling is done for a frame). It
-	 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
-	 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
-	 *
-	 * In practice this won't execute very often unless on very fast
-	 * machines because the time window for this to happen is very small.
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip
 	 */
-	while (amdgpuCrtc->enabled && --repcnt) {
-		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-		 * start in hpos, and to the "fudged earlier" vblank start in
-		 * vpos.
-		 */
-		stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
-						  GET_DISTANCE_TO_VBLANKSTART,
-						  &vpos, &hpos, NULL, NULL,
-						  &crtc->hwmode);
-
-		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-		    !(vpos >= 0 && hpos <= 0))
-			break;
-
-		/* Sleep at least until estimated real start of hw vblank */
-		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-		if (min_udelay > vblank->framedur_ns / 2000) {
-			/* Don't wait ridiculously long - something is wrong */
-			repcnt = 0;
-			break;
-		}
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-		usleep_range(min_udelay, 2 * min_udelay);
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (amdgpuCrtc->enabled &&
+	    (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+					&vpos, &hpos, NULL, NULL,
+					&crtc->hwmode)
+	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+	    (int)(work->target_vblank -
+		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+		return;
 	}
 
-	if (!repcnt)
-		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-				 "framedur %d, linedur %d, stat %d, vpos %d, "
-				 "hpos %d\n", work->crtc_id, min_udelay,
-				 vblank->framedur_ns / 1000,
-				 vblank->linedur_ns / 1000, stat, vpos, hpos);
+	/* We borrow the event spin lock for protecting flip_status */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 	/* Do the flip (mmio) */
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -154,25 +123,25 @@
 	int r;
 
 	/* unpin of the old buffer */
-	r = amdgpu_bo_reserve(work->old_rbo, false);
+	r = amdgpu_bo_reserve(work->old_abo, false);
 	if (likely(r == 0)) {
-		r = amdgpu_bo_unpin(work->old_rbo);
+		r = amdgpu_bo_unpin(work->old_abo);
 		if (unlikely(r != 0)) {
 			DRM_ERROR("failed to unpin buffer after flip\n");
 		}
-		amdgpu_bo_unreserve(work->old_rbo);
+		amdgpu_bo_unreserve(work->old_abo);
 	} else
 		DRM_ERROR("failed to reserve buffer after flip\n");
 
-	amdgpu_bo_unref(&work->old_rbo);
+	amdgpu_bo_unref(&work->old_abo);
 	kfree(work->shared);
 	kfree(work);
 }
 
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-			  struct drm_framebuffer *fb,
-			  struct drm_pending_vblank_event *event,
-			  uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event,
+				 uint32_t page_flip_flags, uint32_t target)
 {
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -181,7 +150,7 @@
 	struct amdgpu_framebuffer *new_amdgpu_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_flip_work *work;
-	struct amdgpu_bo *new_rbo;
+	struct amdgpu_bo *new_abo;
 	unsigned long flags;
 	u64 tiling_flags;
 	u64 base;
@@ -191,7 +160,7 @@
 	if (work == NULL)
 		return -ENOMEM;
 
-	INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+	INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
 	INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
 
 	work->event = event;
@@ -204,28 +173,28 @@
 	obj = old_amdgpu_fb->obj;
 
 	/* take a reference to the old object */
-	work->old_rbo = gem_to_amdgpu_bo(obj);
-	amdgpu_bo_ref(work->old_rbo);
+	work->old_abo = gem_to_amdgpu_bo(obj);
+	amdgpu_bo_ref(work->old_abo);
 
 	new_amdgpu_fb = to_amdgpu_framebuffer(fb);
 	obj = new_amdgpu_fb->obj;
-	new_rbo = gem_to_amdgpu_bo(obj);
+	new_abo = gem_to_amdgpu_bo(obj);
 
 	/* pin the new buffer */
-	r = amdgpu_bo_reserve(new_rbo, false);
+	r = amdgpu_bo_reserve(new_abo, false);
 	if (unlikely(r != 0)) {
-		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		DRM_ERROR("failed to reserve new abo buffer before flip\n");
 		goto cleanup;
 	}
 
-	r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+	r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
 	if (unlikely(r != 0)) {
 		r = -EINVAL;
-		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		DRM_ERROR("failed to pin new abo buffer before flip\n");
 		goto unreserve;
 	}
 
-	r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+	r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
 					      &work->shared_count,
 					      &work->shared);
 	if (unlikely(r != 0)) {
@@ -233,16 +202,12 @@
 		goto unpin;
 	}
 
-	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
-	amdgpu_bo_unreserve(new_rbo);
+	amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
+	amdgpu_bo_unreserve(new_abo);
 
 	work->base = base;
-
-	r = drm_crtc_vblank_get(crtc);
-	if (r) {
-		DRM_ERROR("failed to get vblank before flip\n");
-		goto pflip_cleanup;
-	}
+	work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+		amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
 
 	/* we borrow the event spin lock for protecting flip_wrok */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 		r = -EBUSY;
-		goto vblank_cleanup;
+		goto pflip_cleanup;
 	}
 
 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,26 +227,23 @@
 	/* update crtc fb */
 	crtc->primary->fb = fb;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-	amdgpu_flip_work_func(&work->flip_work);
+	amdgpu_flip_work_func(&work->flip_work.work);
 	return 0;
 
-vblank_cleanup:
-	drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
-	if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
-		DRM_ERROR("failed to reserve new rbo in error path\n");
+	if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+		DRM_ERROR("failed to reserve new abo in error path\n");
 		goto cleanup;
 	}
 unpin:
-	if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
-		DRM_ERROR("failed to unpin new rbo in error path\n");
+	if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
+		DRM_ERROR("failed to unpin new abo in error path\n");
 	}
 unreserve:
-	amdgpu_bo_unreserve(new_rbo);
+	amdgpu_bo_unreserve(new_abo);
 
 cleanup:
-	amdgpu_bo_unref(&work->old_rbo);
+	amdgpu_bo_unref(&work->old_abo);
 	fence_put(work->excl);
 	for (i = 0; i < work->shared_count; ++i)
 		fence_put(work->shared[i]);
@@ -335,7 +297,7 @@
 	return ret;
 }
 
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
 	"NONE",
 	"INTERNAL_LVDS",
 	"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@
 	"TRAVIS",
 	"INTERNAL_VCE",
 	"INTERNAL_UNIPHY3",
+	"HDMI_ANX9805",
+	"INTERNAL_AMCLK",
+	"VIRTUAL",
 };
 
 static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf..14f57d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@
 	printk("\n");
 }
 
+
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_crtc *crtc;
 	struct amdgpu_crtc *amdgpu_crtc;
-	u32 line_time_us, vblank_lines;
+	u32 vblank_in_pixels;
 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 			amdgpu_crtc = to_amdgpu_crtc(crtc);
 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
-				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
-					amdgpu_crtc->hw_mode.clock;
-				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+				vblank_in_pixels =
+					amdgpu_crtc->hw_mode.crtc_htotal *
+					(amdgpu_crtc->hw_mode.crtc_vblank_end -
 					amdgpu_crtc->hw_mode.crtc_vdisplay +
-					(amdgpu_crtc->v_border * 2);
-				vblank_time_us = vblank_lines * line_time_us;
+					(amdgpu_crtc->v_border * 2));
+
+				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
 				break;
 			}
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9aa533c..71ed27e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -53,13 +53,19 @@
  * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
  *           at the end of IBs.
  * - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
+ * - 3.7.0 - Add support for VCE clock list packet
+ * - 3.8.0 - Add support raster config init in the kernel
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	3
+#define KMS_DRIVER_MINOR	8
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
 int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
 int amdgpu_audio = -1;
@@ -84,11 +90,14 @@
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
 int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
 unsigned amdgpu_cg_mask = 0xffffffff;
 unsigned amdgpu_pg_mask = 0xffffffff;
 char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -96,6 +105,9 @@
 MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
 module_param_named(gartsize, amdgpu_gart_size, int, 0600);
 
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
 MODULE_PARM_DESC(benchmark, "Run benchmark");
 module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
 
@@ -162,13 +174,17 @@
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 
 MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
 module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
-#endif
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
 
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@@ -185,7 +201,84 @@
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
 static const struct pci_device_id pciidlist[] = {
+#ifdef  CONFIG_DRM_AMDGPU_SI
+	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+	{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+	{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+	{0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+	{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+	{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
 	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -341,7 +434,7 @@
 #ifdef CONFIG_X86
 	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-	remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
 	kfree(ap);
 
 	return 0;
@@ -383,32 +476,70 @@
 	drm_put_dev(dev);
 }
 
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+	/* if we are running in a VM, make sure the device
+	 * torn down properly on reboot/shutdown.
+	 * unfortunately we can't detect certain
+	 * hypervisors so just do this all the time.
+	 */
+	amdgpu_pci_remove(pdev);
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_suspend_kms(drm_dev, true, true);
+	return amdgpu_device_suspend(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_resume_kms(drm_dev, true, true);
+
+	/* GPU comes up enabled by the bios on resume */
+	if (amdgpu_device_is_px(drm_dev)) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
+	return amdgpu_device_resume(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_suspend_kms(drm_dev, false, true);
+	return amdgpu_device_suspend(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_thaw(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return amdgpu_resume_kms(drm_dev, false, true);
+	return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_device_resume(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -426,7 +557,7 @@
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 
-	ret = amdgpu_suspend_kms(drm_dev, false, false);
+	ret = amdgpu_device_suspend(drm_dev, false, false);
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_ignore_hotplug(pdev);
@@ -459,7 +590,7 @@
 		return ret;
 	pci_set_master(pdev);
 
-	ret = amdgpu_resume_kms(drm_dev, false, false);
+	ret = amdgpu_device_resume(drm_dev, false, false);
 	drm_kms_helper_poll_enable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -513,8 +644,8 @@
 	.resume = amdgpu_pmops_resume,
 	.freeze = amdgpu_pmops_freeze,
 	.thaw = amdgpu_pmops_thaw,
-	.poweroff = amdgpu_pmops_freeze,
-	.restore = amdgpu_pmops_resume,
+	.poweroff = amdgpu_pmops_poweroff,
+	.restore = amdgpu_pmops_restore,
 	.runtime_suspend = amdgpu_pmops_runtime_suspend,
 	.runtime_resume = amdgpu_pmops_runtime_resume,
 	.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -596,6 +727,7 @@
 	.id_table = pciidlist,
 	.probe = amdgpu_pci_probe,
 	.remove = amdgpu_pci_remove,
+	.shutdown = amdgpu_pci_shutdown,
 	.driver.pm = &amdgpu_pm_ops,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9191467..9fb8aa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -25,7 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -48,8 +48,35 @@
 	struct amdgpu_device *adev;
 };
 
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+	struct amdgpu_fbdev *rfbdev = info->par;
+	struct amdgpu_device *adev = rfbdev->adev;
+	int ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0 && ret != -EACCES) {
+		pm_runtime_mark_last_busy(adev->ddev->dev);
+		pm_runtime_put_autosuspend(adev->ddev->dev);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+	struct amdgpu_fbdev *rfbdev = info->par;
+	struct amdgpu_device *adev = rfbdev->adev;
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+	return 0;
+}
+
 static struct fb_ops amdgpufb_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = amdgpufb_open,
+	.fb_release = amdgpufb_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -88,14 +115,14 @@
 
 static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
 {
-	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
+	struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 	int ret;
 
-	ret = amdgpu_bo_reserve(rbo, false);
+	ret = amdgpu_bo_reserve(abo, false);
 	if (likely(ret == 0)) {
-		amdgpu_bo_kunmap(rbo);
-		amdgpu_bo_unpin(rbo);
-		amdgpu_bo_unreserve(rbo);
+		amdgpu_bo_kunmap(abo);
+		amdgpu_bo_unpin(abo);
+		amdgpu_bo_unreserve(abo);
 	}
 	drm_gem_object_unreference_unlocked(gobj);
 }
@@ -106,7 +133,7 @@
 {
 	struct amdgpu_device *adev = rfbdev->adev;
 	struct drm_gem_object *gobj = NULL;
-	struct amdgpu_bo *rbo = NULL;
+	struct amdgpu_bo *abo = NULL;
 	bool fb_tiled = false; /* useful for testing */
 	u32 tiling_flags = 0;
 	int ret;
@@ -132,30 +159,30 @@
 		       aligned_size);
 		return -ENOMEM;
 	}
-	rbo = gem_to_amdgpu_bo(gobj);
+	abo = gem_to_amdgpu_bo(gobj);
 
 	if (fb_tiled)
 		tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
 
-	ret = amdgpu_bo_reserve(rbo, false);
+	ret = amdgpu_bo_reserve(abo, false);
 	if (unlikely(ret != 0))
 		goto out_unref;
 
 	if (tiling_flags) {
-		ret = amdgpu_bo_set_tiling_flags(rbo,
+		ret = amdgpu_bo_set_tiling_flags(abo,
 						 tiling_flags);
 		if (ret)
 			dev_err(adev->dev, "FB failed to set tiling flags\n");
 	}
 
 
-	ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+	ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
 	if (ret) {
-		amdgpu_bo_unreserve(rbo);
+		amdgpu_bo_unreserve(abo);
 		goto out_unref;
 	}
-	ret = amdgpu_bo_kmap(rbo, NULL);
-	amdgpu_bo_unreserve(rbo);
+	ret = amdgpu_bo_kmap(abo, NULL);
+	amdgpu_bo_unreserve(abo);
 	if (ret) {
 		goto out_unref;
 	}
@@ -177,7 +204,7 @@
 	struct drm_framebuffer *fb = NULL;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
-	struct amdgpu_bo *rbo = NULL;
+	struct amdgpu_bo *abo = NULL;
 	int ret;
 	unsigned long tmp;
 
@@ -196,7 +223,7 @@
 		return ret;
 	}
 
-	rbo = gem_to_amdgpu_bo(gobj);
+	abo = gem_to_amdgpu_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
 	info = drm_fb_helper_alloc_fbi(helper);
@@ -219,7 +246,7 @@
 	/* setup helper */
 	rfbdev->helper.fb = fb;
 
-	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
+	memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
 
 	strcpy(info->fix.id, "amdgpudrmfb");
 
@@ -228,11 +255,11 @@
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &amdgpufb_ops;
 
-	tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
+	tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
 	info->fix.smem_start = adev->mc.aper_base + tmp;
-	info->fix.smem_len = amdgpu_bo_size(rbo);
-	info->screen_base = rbo->kptr;
-	info->screen_size = amdgpu_bo_size(rbo);
+	info->fix.smem_len = amdgpu_bo_size(abo);
+	info->screen_base = abo->kptr;
+	info->screen_size = amdgpu_bo_size(abo);
 
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
@@ -249,7 +276,7 @@
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
 	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)adev->mc.aper_base);
-	DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
+	DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
 	DRM_INFO("fb depth is %d\n", fb->depth);
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
@@ -259,7 +286,7 @@
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 out_unref:
-	if (rbo) {
+	if (abo) {
 
 	}
 	if (fb && ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 0b109ae..3a2e42f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -454,6 +454,7 @@
 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
 			fence_put(ring->fence_drv.fences[j]);
 		kfree(ring->fence_drv.fences);
+		ring->fence_drv.fences = NULL;
 		ring->fence_drv.initialized = false;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0feea34..21a1242 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -238,7 +238,7 @@
 	t = offset / AMDGPU_GPU_PAGE_SIZE;
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 		adev->gart.pages[p] = NULL;
 #endif
 		page_base = adev->dummy_page.addr;
@@ -286,7 +286,7 @@
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 
 	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 		adev->gart.pages[p] = pagelist[i];
 #endif
 		if (adev->gart.ptr) {
@@ -331,7 +331,7 @@
 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
 
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	/* Allocate pages table */
 	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
 	if (adev->gart.pages == NULL) {
@@ -357,7 +357,7 @@
 		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
 	}
 	adev->gart.ready = false;
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	vfree(adev->gart.pages);
 	adev->gart.pages = NULL;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index 503d540..e73728d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -31,14 +31,6 @@
 #define AMDGPU_GWS_SHIFT	PAGE_SHIFT
 #define AMDGPU_OA_SHIFT		PAGE_SHIFT
 
-#define AMDGPU_PL_GDS		TTM_PL_PRIV0
-#define AMDGPU_PL_GWS		TTM_PL_PRIV1
-#define AMDGPU_PL_OA		TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS		TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS		TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA		TTM_PL_FLAG_PRIV2
-
 struct amdgpu_ring;
 struct amdgpu_bo;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 88fbed2..a7ea9a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -118,23 +118,23 @@
  */
 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
-	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = rbo->adev;
+	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = abo->adev;
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
 	struct amdgpu_bo_va *bo_va;
 	int r;
-	r = amdgpu_bo_reserve(rbo, false);
+	r = amdgpu_bo_reserve(abo, false);
 	if (r)
 		return r;
 
-	bo_va = amdgpu_vm_bo_find(vm, rbo);
+	bo_va = amdgpu_vm_bo_find(vm, abo);
 	if (!bo_va) {
-		bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
+		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
 	} else {
 		++bo_va->ref_count;
 	}
-	amdgpu_bo_unreserve(rbo);
+	amdgpu_bo_unreserve(abo);
 	return 0;
 }
 
@@ -528,7 +528,7 @@
 		goto error_unreserve;
 
 	if (operation == AMDGPU_VA_OP_MAP)
-		r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+		r = amdgpu_vm_bo_update(adev, bo_va, false);
 
 error_unreserve:
 	ttm_eu_backoff_reservation(&ticket, &list);
@@ -547,7 +547,7 @@
 	struct drm_gem_object *gobj;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	struct amdgpu_bo_va *bo_va;
 	struct ttm_validate_buffer tv, tv_pd;
 	struct ww_acquire_ctx ticket;
@@ -587,10 +587,10 @@
 	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
-	rbo = gem_to_amdgpu_bo(gobj);
+	abo = gem_to_amdgpu_bo(gobj);
 	INIT_LIST_HEAD(&list);
 	INIT_LIST_HEAD(&duplicates);
-	tv.bo = &rbo->tbo;
+	tv.bo = &abo->tbo;
 	tv.shared = true;
 	list_add(&tv.head, &list);
 
@@ -604,7 +604,7 @@
 		return r;
 	}
 
-	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
+	bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 	if (!bo_va) {
 		ttm_eu_backoff_reservation(&ticket, &list);
 		drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
new file mode 100644
index 0000000..f86c844
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_gtt_mgr {
+	struct drm_mm mm;
+	spinlock_t lock;
+	uint64_t available;
+};
+
+/**
+ * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of GTT
+ *
+ * Allocate and initialize the GTT manager.
+ */
+static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
+			       unsigned long p_size)
+{
+	struct amdgpu_gtt_mgr *mgr;
+
+	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return -ENOMEM;
+
+	drm_mm_init(&mgr->mm, 0, p_size);
+	spin_lock_init(&mgr->lock);
+	mgr->available = p_size;
+	man->priv = mgr;
+	return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_fini - free and destroy GTT manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+{
+	struct amdgpu_gtt_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	if (!drm_mm_clean(&mgr->mm)) {
+		spin_unlock(&mgr->lock);
+		return -EBUSY;
+	}
+
+	drm_mm_takedown(&mgr->mm);
+	spin_unlock(&mgr->lock);
+	kfree(mgr);
+	man->priv = NULL;
+	return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_alloc - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate the address space for a node.
+ */
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *tbo,
+			 const struct ttm_place *place,
+			 struct ttm_mem_reg *mem)
+{
+	struct amdgpu_gtt_mgr *mgr = man->priv;
+	struct drm_mm_node *node = mem->mm_node;
+	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
+	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	unsigned long fpfn, lpfn;
+	int r;
+
+	if (node->start != AMDGPU_BO_INVALID_OFFSET)
+		return 0;
+
+	if (place)
+		fpfn = place->fpfn;
+	else
+		fpfn = 0;
+
+	if (place && place->lpfn)
+		lpfn = place->lpfn;
+	else
+		lpfn = man->size;
+
+	if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
+		sflags = DRM_MM_SEARCH_BELOW;
+		aflags = DRM_MM_CREATE_TOP;
+	}
+
+	spin_lock(&mgr->lock);
+	r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
+						mem->page_alignment, 0,
+						fpfn, lpfn, sflags, aflags);
+	spin_unlock(&mgr->lock);
+
+	if (!r) {
+		mem->start = node->start;
+		if (&tbo->mem == mem)
+			tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
+			    tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
+	}
+
+	return r;
+}
+
+/**
+ * amdgpu_gtt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Dummy, allocate the node but no space for it yet.
+ */
+static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+			      struct ttm_buffer_object *tbo,
+			      const struct ttm_place *place,
+			      struct ttm_mem_reg *mem)
+{
+	struct amdgpu_gtt_mgr *mgr = man->priv;
+	struct drm_mm_node *node;
+	int r;
+
+	spin_lock(&mgr->lock);
+	if (mgr->available < mem->num_pages) {
+		spin_unlock(&mgr->lock);
+		return 0;
+	}
+	mgr->available -= mem->num_pages;
+	spin_unlock(&mgr->lock);
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->start = AMDGPU_BO_INVALID_OFFSET;
+	mem->mm_node = node;
+
+	if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
+		r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
+		if (unlikely(r)) {
+			kfree(node);
+			mem->mm_node = NULL;
+		}
+	} else {
+		mem->start = node->start;
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
+			       struct ttm_mem_reg *mem)
+{
+	struct amdgpu_gtt_mgr *mgr = man->priv;
+	struct drm_mm_node *node = mem->mm_node;
+
+	if (!node)
+		return;
+
+	spin_lock(&mgr->lock);
+	if (node->start != AMDGPU_BO_INVALID_OFFSET)
+		drm_mm_remove_node(node);
+	mgr->available += mem->num_pages;
+	spin_unlock(&mgr->lock);
+
+	kfree(node);
+	mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_gtt_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
+				  const char *prefix)
+{
+	struct amdgpu_gtt_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	drm_mm_debug_table(&mgr->mm, prefix);
+	spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
+	amdgpu_gtt_mgr_init,
+	amdgpu_gtt_mgr_fini,
+	amdgpu_gtt_mgr_new,
+	amdgpu_gtt_mgr_del,
+	amdgpu_gtt_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 31a6763..91d3673 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -158,8 +158,8 @@
 };
 
 struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
-					    struct amdgpu_i2c_bus_rec *rec,
-					    const char *name)
+					  const struct amdgpu_i2c_bus_rec *rec,
+					  const char *name)
 {
 	struct amdgpu_i2c_chan *i2c;
 	int ret;
@@ -186,10 +186,8 @@
 			 "AMDGPU i2c hw bus %s", name);
 		i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else {
 		/* set the amdgpu bit adapter */
 		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -222,6 +220,7 @@
 {
 	if (!i2c)
 		return;
+	WARN_ON(i2c->has_aux);
 	i2c_del_adapter(&i2c->adapter);
 	kfree(i2c);
 }
@@ -251,8 +250,8 @@
 
 /* Add additional buses */
 void amdgpu_i2c_add(struct amdgpu_device *adev,
-		     struct amdgpu_i2c_bus_rec *rec,
-		     const char *name)
+		    const struct amdgpu_i2c_bus_rec *rec,
+		    const char *name)
 {
 	struct drm_device *dev = adev->ddev;
 	int i;
@@ -268,7 +267,7 @@
 /* looks up bus based on id */
 struct amdgpu_i2c_chan *
 amdgpu_i2c_lookup(struct amdgpu_device *adev,
-		   struct amdgpu_i2c_bus_rec *i2c_bus)
+		  const struct amdgpu_i2c_bus_rec *i2c_bus)
 {
 	int i;
 
@@ -338,7 +337,7 @@
 
 /* ddc router switching */
 void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
 {
 	u8 val;
 
@@ -367,7 +366,7 @@
 
 /* clock/data router switching */
 void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector)
 {
 	u8 val;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index d81e19b..63c2ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -25,20 +25,20 @@
 #define __AMDGPU_I2C_H__
 
 struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
-					    struct amdgpu_i2c_bus_rec *rec,
-					    const char *name);
+					  const struct amdgpu_i2c_bus_rec *rec,
+					  const char *name);
 void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
 void amdgpu_i2c_init(struct amdgpu_device *adev);
 void amdgpu_i2c_fini(struct amdgpu_device *adev);
 void amdgpu_i2c_add(struct amdgpu_device *adev,
-		     struct amdgpu_i2c_bus_rec *rec,
-		     const char *name);
+		    const struct amdgpu_i2c_bus_rec *rec,
+		    const char *name);
 struct amdgpu_i2c_chan *
 amdgpu_i2c_lookup(struct amdgpu_device *adev,
-		   struct amdgpu_i2c_bus_rec *i2c_bus);
+		  const struct amdgpu_i2c_bus_rec *i2c_bus);
 void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector);
 void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ec1282a..6a6c86c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -124,7 +124,8 @@
 	bool skip_preamble, need_ctx_switch;
 	unsigned patch_offset = ~0;
 	struct amdgpu_vm *vm;
-	uint64_t ctx;
+	uint64_t fence_ctx;
+	uint32_t status = 0, alloc_size;
 
 	unsigned i;
 	int r = 0;
@@ -135,14 +136,14 @@
 	/* ring tests don't use a job */
 	if (job) {
 		vm = job->vm;
-		ctx = job->ctx;
+		fence_ctx = job->fence_ctx;
 	} else {
 		vm = NULL;
-		ctx = 0;
+		fence_ctx = 0;
 	}
 
 	if (!ring->ready) {
-		dev_err(adev->dev, "couldn't schedule ib\n");
+		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
 		return -EINVAL;
 	}
 
@@ -151,7 +152,10 @@
 		return -EINVAL;
 	}
 
-	r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+	alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+		num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+	r = amdgpu_ring_alloc(ring, alloc_size);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
 		return r;
@@ -174,13 +178,22 @@
 	/* always set cond_exec_polling to CONTINUE */
 	*ring->cond_exe_cpu_addr = 1;
 
-	skip_preamble = ring->current_ctx == ctx;
-	need_ctx_switch = ring->current_ctx != ctx;
+	skip_preamble = ring->current_ctx == fence_ctx;
+	need_ctx_switch = ring->current_ctx != fence_ctx;
+	if (job && ring->funcs->emit_cntxcntl) {
+		if (need_ctx_switch)
+			status |= AMDGPU_HAVE_CTX_SWITCH;
+		status |= job->preamble_status;
+		amdgpu_ring_emit_cntxcntl(ring, status);
+	}
+
 	for (i = 0; i < num_ibs; ++i) {
 		ib = &ibs[i];
 
 		/* drop preamble IBs if we don't have a context switch */
-		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+			skip_preamble &&
+			!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
 			continue;
 
 		amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@
 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-	ring->current_ctx = ctx;
+	ring->current_ctx = fence_ctx;
+	if (ring->funcs->emit_switch_buffer)
+		amdgpu_ring_emit_switch_buffer(ring);
 	amdgpu_ring_commit(ring);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 534fc04..3ab4c65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -40,32 +40,15 @@
 
 	/* Allocate ring buffer */
 	if (adev->irq.ih.ring_obj == NULL) {
-		r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
-				     PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, NULL, &adev->irq.ih.ring_obj);
+		r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+					    &adev->irq.ih.ring_obj,
+					    &adev->irq.ih.gpu_addr,
+					    (void **)&adev->irq.ih.ring);
 		if (r) {
 			DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-		if (unlikely(r != 0))
-			return r;
-		r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
-				  AMDGPU_GEM_DOMAIN_GTT,
-				  &adev->irq.ih.gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-			DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
-			return r;
-		}
-		r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
-				   (void **)&adev->irq.ih.ring);
-		amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-		if (r) {
-			DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
-			return r;
-		}
 	}
 	return 0;
 }
@@ -136,8 +119,6 @@
  */
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
 {
-	int r;
-
 	if (adev->irq.ih.use_bus_addr) {
 		if (adev->irq.ih.ring) {
 			/* add 8 bytes for the rptr/wptr shadows and
@@ -149,17 +130,9 @@
 			adev->irq.ih.ring = NULL;
 		}
 	} else {
-		if (adev->irq.ih.ring_obj) {
-			r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-			if (likely(r == 0)) {
-				amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
-				amdgpu_bo_unpin(adev->irq.ih.ring_obj);
-				amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-			}
-			amdgpu_bo_unref(&adev->irq.ih.ring_obj);
-			adev->irq.ih.ring = NULL;
-			adev->irq.ih.ring_obj = NULL;
-		}
+		amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+				      &adev->irq.ih.gpu_addr,
+				      (void **)&adev->irq.ih.ring);
 		amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
 		amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 7ef0935..f016464 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -70,6 +70,7 @@
 	/* gen irq stuff */
 	struct irq_domain		*domain; /* GPU irq controller domain */
 	unsigned			virq[AMDGPU_MAX_IRQ_SRC_ID];
+	uint32_t                        srbm_soft_reset;
 };
 
 void amdgpu_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40..8c58079 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -91,7 +91,7 @@
 		amdgpu_ib_free(job->adev, &job->ibs[i], f);
 }
 
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
@@ -124,7 +124,7 @@
 		return r;
 
 	job->owner = owner;
-	job->ctx = entity->fence_context;
+	job->fence_ctx = entity->fence_context;
 	*f = fence_get(&job->base.s_fence->finished);
 	amdgpu_job_free_resources(job);
 	amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d942654..c2c7fb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -292,14 +292,14 @@
 			type = AMD_IP_BLOCK_TYPE_UVD;
 			ring_mask = adev->uvd.ring.ready ? 1 : 0;
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-			ib_size_alignment = 8;
+			ib_size_alignment = 16;
 			break;
 		case AMDGPU_HW_IP_VCE:
 			type = AMD_IP_BLOCK_TYPE_VCE;
-			for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+			for (i = 0; i < adev->vce.num_rings; i++)
 				ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-			ib_size_alignment = 8;
+			ib_size_alignment = 1;
 			break;
 		default:
 			return -EINVAL;
@@ -373,6 +373,9 @@
 	case AMDGPU_INFO_NUM_BYTES_MOVED:
 		ui64 = atomic64_read(&adev->num_bytes_moved);
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_NUM_EVICTIONS:
+		ui64 = atomic64_read(&adev->num_evictions);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
 	case AMDGPU_INFO_VRAM_USAGE:
 		ui64 = atomic64_read(&adev->vram_usage);
 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -539,12 +542,16 @@
 		return r;
 
 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-	if (unlikely(!fpriv))
-		return -ENOMEM;
+	if (unlikely(!fpriv)) {
+		r = -ENOMEM;
+		goto out_suspend;
+	}
 
 	r = amdgpu_vm_init(adev, &fpriv->vm);
-	if (r)
-		goto error_free;
+	if (r) {
+		kfree(fpriv);
+		goto out_suspend;
+	}
 
 	mutex_init(&fpriv->bo_list_lock);
 	idr_init(&fpriv->bo_list_handles);
@@ -553,12 +560,9 @@
 
 	file_priv->driver_priv = fpriv;
 
+out_suspend:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
-	return 0;
-
-error_free:
-	kfree(fpriv);
 
 	return r;
 }
@@ -597,6 +601,9 @@
 
 	kfree(fpriv);
 	file_priv->driver_priv = NULL;
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -611,6 +618,7 @@
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv)
 {
+	pm_runtime_get_sync(dev->dev);
 }
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6b1d7d3..7b0eff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -39,6 +39,8 @@
 #include <drm/drm_plane_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
 
 struct amdgpu_bo;
 struct amdgpu_device;
@@ -339,6 +341,8 @@
 	int			num_dig; /* number of dig blocks */
 	int			disp_priority;
 	const struct amdgpu_display_funcs *funcs;
+	struct hrtimer vblank_timer;
+	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@
 void amdgpu_print_display_setup(struct drm_device *dev);
 int amdgpu_modeset_create_props(struct amdgpu_device *adev);
 int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
-			  struct drm_framebuffer *fb,
-			  struct drm_pending_vblank_event *event,
-			  uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event,
+				 uint32_t page_flip_flags, uint32_t target);
 extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6f0873c..aa074fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,20 +38,17 @@
 #include "amdgpu_trace.h"
 
 
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
 
 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
 						struct ttm_mem_reg *mem)
 {
-	u64 ret = 0;
-	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
-		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
-			   adev->mc.visible_vram_size ?
-			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
-			   mem->size;
-	}
-	return ret;
+	if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+		return 0;
+
+	return ((mem->start << PAGE_SHIFT) + mem->size) >
+		adev->mc.visible_vram_size ?
+		adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+		mem->size;
 }
 
 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +96,11 @@
 
 	drm_gem_object_release(&bo->gem_base);
 	amdgpu_bo_unref(&bo->parent);
+	if (!list_empty(&bo->shadow_list)) {
+		mutex_lock(&bo->adev->shadow_list_lock);
+		list_del_init(&bo->shadow_list);
+		mutex_unlock(&bo->adev->shadow_list_lock);
+	}
 	kfree(bo->metadata);
 	kfree(bo);
 }
@@ -112,90 +114,99 @@
 
 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 				      struct ttm_placement *placement,
-				      struct ttm_place *placements,
+				      struct ttm_place *places,
 				      u32 domain, u64 flags)
 {
-	u32 c = 0, i;
-
-	placement->placement = placements;
-	placement->busy_placement = placements;
+	u32 c = 0;
 
 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
 		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
-			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
-			placements[c].fpfn =
-				adev->mc.visible_vram_size >> PAGE_SHIFT;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+		    !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+		    adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			places[c].fpfn = visible_pfn;
+			places[c].lpfn = 0;
+			places[c].flags = TTM_PL_FLAG_WC |
+				TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+				TTM_PL_FLAG_TOPDOWN;
+			c++;
 		}
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 			TTM_PL_FLAG_VRAM;
-		if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
-			placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+			places[c].lpfn = visible_pfn;
+		else
+			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
-		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_TT;
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+			places[c].flags |= TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED;
-		} else {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
-		}
+		else
+			places[c].flags |= TTM_PL_FLAG_CACHED;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
-		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_SYSTEM;
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+			places[c].flags |= TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED;
-		} else {
-			placements[c].fpfn = 0;
-			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
-		}
+		else
+			places[c].flags |= TTM_PL_FLAG_CACHED;
+		c++;
 	}
 
 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_GDS;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+		c++;
 	}
+
 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_GWS;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+		c++;
 	}
+
 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
-			AMDGPU_PL_FLAG_OA;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+		c++;
 	}
 
 	if (!c) {
-		placements[c].fpfn = 0;
-		placements[c++].flags = TTM_PL_MASK_CACHING |
-			TTM_PL_FLAG_SYSTEM;
+		places[c].fpfn = 0;
+		places[c].lpfn = 0;
+		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+		c++;
 	}
-	placement->num_placement = c;
-	placement->num_busy_placement = c;
 
-	for (i = 0; i < c; i++) {
-		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
-			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
-			!placements[i].fpfn)
-			placements[i].lpfn =
-				adev->mc.visible_vram_size >> PAGE_SHIFT;
-		else
-			placements[i].lpfn = 0;
-	}
+	placement->num_placement = c;
+	placement->placement = places;
+
+	placement->num_busy_placement = c;
+	placement->busy_placement = places;
 }
 
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 {
-	amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
-				  rbo->placements, domain, rbo->flags);
+	amdgpu_ttm_placement_init(abo->adev, &abo->placement,
+				  abo->placements, domain, abo->flags);
 }
 
 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -211,6 +222,98 @@
 	bo->placement.busy_placement = bo->placements;
 }
 
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+			    unsigned long size, int align,
+			    u32 domain, struct amdgpu_bo **bo_ptr,
+			    u64 *gpu_addr, void **cpu_addr)
+{
+	int r;
+
+	r = amdgpu_bo_create(adev, size, align, true, domain,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, NULL, bo_ptr);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_reserve(*bo_ptr, false);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+		goto error_free;
+	}
+
+	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+	if (r) {
+		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+		goto error_unreserve;
+	}
+
+	if (cpu_addr) {
+		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+		if (r) {
+			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+			goto error_unreserve;
+		}
+	}
+
+	amdgpu_bo_unreserve(*bo_ptr);
+
+	return 0;
+
+error_unreserve:
+	amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+	amdgpu_bo_unref(bo_ptr);
+
+	return r;
+}
+
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+			   void **cpu_addr)
+{
+	if (*bo == NULL)
+		return;
+
+	if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+		if (cpu_addr)
+			amdgpu_bo_kunmap(*bo);
+
+		amdgpu_bo_unpin(*bo);
+		amdgpu_bo_unreserve(*bo);
+	}
+	amdgpu_bo_unref(bo);
+
+	if (gpu_addr)
+		*gpu_addr = 0;
+
+	if (cpu_addr)
+		*cpu_addr = NULL;
+}
+
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 				unsigned long size, int byte_align,
 				bool kernel, u32 domain, u64 flags,
@@ -249,7 +352,7 @@
 		return r;
 	}
 	bo->adev = adev;
-	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
 	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
 					 AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +380,79 @@
 	if (unlikely(r != 0)) {
 		return r;
 	}
+
+	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+		struct fence *fence;
+
+		if (adev->mman.buffer_funcs_ring == NULL ||
+		   !adev->mman.buffer_funcs_ring->ready) {
+			r = -EBUSY;
+			goto fail_free;
+		}
+
+		r = amdgpu_bo_reserve(bo, false);
+		if (unlikely(r != 0))
+			goto fail_free;
+
+		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+		if (unlikely(r != 0))
+			goto fail_unreserve;
+
+		amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+		amdgpu_bo_fence(bo, fence, false);
+		amdgpu_bo_unreserve(bo);
+		fence_put(bo->tbo.moving);
+		bo->tbo.moving = fence_get(fence);
+		fence_put(fence);
+	}
 	*bo_ptr = bo;
 
 	trace_amdgpu_bo_create(bo);
 
 	return 0;
+
+fail_unreserve:
+	amdgpu_bo_unreserve(bo);
+fail_free:
+	amdgpu_bo_unref(&bo);
+	return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+				   unsigned long size, int byte_align,
+				   struct amdgpu_bo *bo)
+{
+	struct ttm_placement placement = {0};
+	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+	int r;
+
+	if (bo->shadow)
+		return 0;
+
+	bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+	memset(&placements, 0,
+	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+	amdgpu_ttm_placement_init(adev, &placement,
+				  placements, AMDGPU_GEM_DOMAIN_GTT,
+				  AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+	r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+					AMDGPU_GEM_DOMAIN_GTT,
+					AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+					NULL, &placement,
+					bo->tbo.resv,
+					&bo->shadow);
+	if (!r) {
+		bo->shadow->parent = amdgpu_bo_ref(bo);
+		mutex_lock(&adev->shadow_list_lock);
+		list_add_tail(&bo->shadow_list, &adev->shadow_list);
+		mutex_unlock(&adev->shadow_list_lock);
+	}
+
+	return r;
 }
 
 int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +464,7 @@
 {
 	struct ttm_placement placement = {0};
 	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+	int r;
 
 	memset(&placements, 0,
 	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +472,83 @@
 	amdgpu_ttm_placement_init(adev, &placement,
 				  placements, domain, flags);
 
-	return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-					   domain, flags, sg, &placement,
-					   resv, bo_ptr);
+	r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+					domain, flags, sg, &placement,
+					resv, bo_ptr);
+	if (r)
+		return r;
+
+	if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+		if (r)
+			amdgpu_bo_unref(bo_ptr);
+	}
+
+	return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+			       struct amdgpu_ring *ring,
+			       struct amdgpu_bo *bo,
+			       struct reservation_object *resv,
+			       struct fence **fence,
+			       bool direct)
+
+{
+	struct amdgpu_bo *shadow = bo->shadow;
+	uint64_t bo_addr, shadow_addr;
+	int r;
+
+	if (!shadow)
+		return -EINVAL;
+
+	bo_addr = amdgpu_bo_gpu_offset(bo);
+	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+	r = reservation_object_reserve_shared(bo->tbo.resv);
+	if (r)
+		goto err;
+
+	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+			       amdgpu_bo_size(bo), resv, fence,
+			       direct);
+	if (!r)
+		amdgpu_bo_fence(bo, *fence, true);
+
+err:
+	return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring,
+				  struct amdgpu_bo *bo,
+				  struct reservation_object *resv,
+				  struct fence **fence,
+				  bool direct)
+
+{
+	struct amdgpu_bo *shadow = bo->shadow;
+	uint64_t bo_addr, shadow_addr;
+	int r;
+
+	if (!shadow)
+		return -EINVAL;
+
+	bo_addr = amdgpu_bo_gpu_offset(bo);
+	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+	r = reservation_object_reserve_shared(bo->tbo.resv);
+	if (r)
+		goto err;
+
+	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+			       amdgpu_bo_size(bo), resv, fence,
+			       direct);
+	if (!r)
+		amdgpu_bo_fence(bo, *fence, true);
+
+err:
+	return r;
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +626,17 @@
 		return -EINVAL;
 
 	if (bo->pin_count) {
+		uint32_t mem_type = bo->tbo.mem.mem_type;
+
+		if (domain != amdgpu_mem_type_to_domain(mem_type))
+			return -EINVAL;
+
 		bo->pin_count++;
 		if (gpu_addr)
 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
 
 		if (max_offset != 0) {
-			u64 domain_start;
-			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
-				domain_start = bo->adev->mc.vram_start;
-			else
-				domain_start = bo->adev->mc.gtt_start;
+			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 			WARN_ON_ONCE(max_offset <
 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
 		}
@@ -401,7 +648,8 @@
 		/* force to pin into visible video ram */
 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
 		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
-		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+		    (!max_offset || max_offset >
+		     bo->adev->mc.visible_vram_size)) {
 			if (WARN_ON_ONCE(min_offset >
 					 bo->adev->mc.visible_vram_size))
 				return -EINVAL;
@@ -420,19 +668,28 @@
 	}
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-	if (likely(r == 0)) {
-		bo->pin_count = 1;
-		if (gpu_addr != NULL)
-			*gpu_addr = amdgpu_bo_gpu_offset(bo);
-		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-				bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
-		} else
-			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
-	} else {
+	if (unlikely(r)) {
 		dev_err(bo->adev->dev, "%p pin failed\n", bo);
+		goto error;
 	}
+	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+	if (unlikely(r)) {
+		dev_err(bo->adev->dev, "%p bind failed\n", bo);
+		goto error;
+	}
+
+	bo->pin_count = 1;
+	if (gpu_addr != NULL)
+		*gpu_addr = amdgpu_bo_gpu_offset(bo);
+	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+		bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+			bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+		bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+	}
+
+error:
 	return r;
 }
 
@@ -457,16 +714,20 @@
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 	}
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
-	if (likely(r == 0)) {
-		if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-				bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
-		} else
-			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
-	} else {
+	if (unlikely(r)) {
 		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+		goto error;
 	}
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+		bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+			bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+		bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+	}
+
+error:
 	return r;
 }
 
@@ -588,23 +849,23 @@
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *new_mem)
 {
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 		return;
 
-	rbo = container_of(bo, struct amdgpu_bo, tbo);
-	amdgpu_vm_bo_invalidate(rbo->adev, rbo);
+	abo = container_of(bo, struct amdgpu_bo, tbo);
+	amdgpu_vm_bo_invalidate(abo->adev, abo);
 
 	/* update statistics */
 	if (!new_mem)
 		return;
 
 	/* move_notify is called before move happens */
-	amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+	amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
 
-	trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
+	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -637,7 +898,8 @@
 	for (i = 0; i < abo->placement.num_placement; i++) {
 		/* Force into visible VRAM */
 		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
-		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+		    (!abo->placements[i].lpfn ||
+		     abo->placements[i].lpfn > lpfn))
 			abo->placements[i].lpfn = lpfn;
 	}
 	r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -674,3 +936,24 @@
 	else
 		reservation_object_add_excl_fence(resv, fence);
 }
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo:	amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+		     !amdgpu_ttm_is_bound(bo->tbo.ttm));
+	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+		     !bo->pin_count);
+	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+
+	return bo->tbo.offset;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bdb01d9..8255034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -31,6 +31,8 @@
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 
+#define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
+
 /**
  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
  * @mem_type:	ttm memory type
@@ -85,21 +87,6 @@
 	ttm_bo_unreserve(&bo->tbo);
 }
 
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo:	amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
-	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-	return bo->tbo.offset;
-}
-
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
 	return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +126,12 @@
 				struct ttm_placement *placement,
 			        struct reservation_object *resv,
 				struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+			    unsigned long size, int align,
+			    u32 domain, struct amdgpu_bo **bo_ptr,
+			    u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+			   void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +158,19 @@
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+			       struct amdgpu_ring *ring,
+			       struct amdgpu_bo *bo,
+			       struct reservation_object *resv,
+			       struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring,
+				  struct amdgpu_bo *bo,
+				  struct reservation_object *resv,
+				  struct fence **fence,
+				  bool direct);
+
 
 /*
  * sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index d153149..8e67c12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -25,6 +25,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "atombios_encoders.h"
+#include "amdgpu_pll.h"
 #include <asm/div64.h>
 #include <linux/gcd.h>
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5cc7052..accc908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1103,54 +1103,46 @@
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-	if (adev->pp_enabled)
+	if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+		/* enable/disable UVD */
+		mutex_lock(&adev->pm.mutex);
 		amdgpu_dpm_powergate_uvd(adev, !enable);
-	else {
-		if (adev->pm.funcs->powergate_uvd) {
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		if (enable) {
 			mutex_lock(&adev->pm.mutex);
-			/* enable/disable UVD */
-			amdgpu_dpm_powergate_uvd(adev, !enable);
+			adev->pm.dpm.uvd_active = true;
+			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
-			if (enable) {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.uvd_active = true;
-				adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
-				mutex_unlock(&adev->pm.mutex);
-			} else {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.uvd_active = false;
-				mutex_unlock(&adev->pm.mutex);
-			}
-			amdgpu_pm_compute_clocks(adev);
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.uvd_active = false;
+			mutex_unlock(&adev->pm.mutex);
 		}
-
+		amdgpu_pm_compute_clocks(adev);
 	}
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-	if (adev->pp_enabled)
+	if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+		/* enable/disable VCE */
+		mutex_lock(&adev->pm.mutex);
 		amdgpu_dpm_powergate_vce(adev, !enable);
-	else {
-		if (adev->pm.funcs->powergate_vce) {
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		if (enable) {
 			mutex_lock(&adev->pm.mutex);
-			amdgpu_dpm_powergate_vce(adev, !enable);
+			adev->pm.dpm.vce_active = true;
+			/* XXX select vce level based on ring/task */
+			adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
-			if (enable) {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.vce_active = true;
-				/* XXX select vce level based on ring/task */
-				adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
-				mutex_unlock(&adev->pm.mutex);
-			} else {
-				mutex_lock(&adev->pm.mutex);
-				adev->pm.dpm.vce_active = false;
-				mutex_unlock(&adev->pm.mutex);
-			}
-			amdgpu_pm_compute_clocks(adev);
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.vce_active = false;
+			mutex_unlock(&adev->pm.mutex);
 		}
+		amdgpu_pm_compute_clocks(adev);
 	}
 }
 
@@ -1330,6 +1322,64 @@
  */
 #if defined(CONFIG_DEBUG_FS)
 
+static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+{
+	int32_t value;
+
+	/* sanity check PP is enabled */
+	if (!(adev->powerplay.pp_funcs &&
+	      adev->powerplay.pp_funcs->read_sensor))
+	      return -EINVAL;
+
+	/* GPU Clocks */
+	seq_printf(m, "GFX Clocks and Power:\n");
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+		seq_printf(m, "\t%u mV (VDDNB)\n", value);
+	seq_printf(m, "\n");
+
+	/* GPU Temp */
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
+
+	/* GPU Load */
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+		seq_printf(m, "GPU Load: %u %%\n", value);
+	seq_printf(m, "\n");
+
+	/* UVD clocks */
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+		if (!value) {
+			seq_printf(m, "UVD: Disabled\n");
+		} else {
+			seq_printf(m, "UVD: Enabled\n");
+			if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+				seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+			if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+				seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+		}
+	}
+	seq_printf(m, "\n");
+
+	/* VCE clocks */
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+		if (!value) {
+			seq_printf(m, "VCE: Disabled\n");
+		} else {
+			seq_printf(m, "VCE: Enabled\n");
+			if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+				seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+		}
+	}
+
+	return 0;
+}
+
 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1345,11 +1395,11 @@
 	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
 		seq_printf(m, "PX asic powered off\n");
 	} else if (adev->pp_enabled) {
-		amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+		return amdgpu_debugfs_pm_info_pp(m, adev);
 	} else {
 		mutex_lock(&adev->pm.mutex);
 		if (adev->pm.funcs->debugfs_print_current_performance_level)
-			amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+			adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
 		else
 			seq_printf(m, "Debugfs support not implemented for this asic\n");
 		mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index c5738a22..7532ff8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -30,6 +30,7 @@
 #include "amdgpu_pm.h"
 #include <drm/amdgpu_drm.h>
 #include "amdgpu_powerplay.h"
+#include "si_dpm.h"
 #include "cik_dpm.h"
 #include "vi_dpm.h"
 
@@ -41,7 +42,6 @@
 	amd_pp = &(adev->powerplay);
 
 	if (adev->pp_enabled) {
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 		struct amd_pp_init *pp_init;
 
 		pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
@@ -52,15 +52,21 @@
 		pp_init->chip_family = adev->family;
 		pp_init->chip_id = adev->asic_type;
 		pp_init->device = amdgpu_cgs_create_device(adev);
-		pp_init->powercontainment_enabled = amdgpu_powercontainment;
-
 		ret = amd_powerplay_init(pp_init, amd_pp);
 		kfree(pp_init);
-#endif
 	} else {
 		amd_pp->pp_handle = (void *)adev;
 
 		switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+		case CHIP_TAHITI:
+		case CHIP_PITCAIRN:
+		case CHIP_VERDE:
+		case CHIP_OLAND:
+		case CHIP_HAINAN:
+			amd_pp->ip_funcs = &si_dpm_ip_funcs;
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 		case CHIP_BONAIRE:
 		case CHIP_HAWAII:
@@ -72,15 +78,6 @@
 			amd_pp->ip_funcs = &kv_dpm_ip_funcs;
 			break;
 #endif
-		case CHIP_TOPAZ:
-			amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
-			break;
-		case CHIP_TONGA:
-			amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
-			break;
-		case CHIP_FIJI:
-			amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
-			break;
 		case CHIP_CARRIZO:
 		case CHIP_STONEY:
 			amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -98,19 +95,17 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int ret = 0;
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 	switch (adev->asic_type) {
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
-		adev->pp_enabled = true;
-		break;
 	case CHIP_TONGA:
 	case CHIP_FIJI:
-		adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+	case CHIP_TOPAZ:
+		adev->pp_enabled = true;
 		break;
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
-		adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+		adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
 		break;
 	/* These chips don't have powerplay implemenations */
 	case CHIP_BONAIRE:
@@ -118,14 +113,10 @@
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
 	case CHIP_KAVERI:
-	case CHIP_TOPAZ:
 	default:
 		adev->pp_enabled = false;
 		break;
 	}
-#else
-	adev->pp_enabled = false;
-#endif
 
 	ret = amdgpu_powerplay_init(adev);
 	if (ret)
@@ -147,12 +138,11 @@
 		ret = adev->powerplay.ip_funcs->late_init(
 					adev->powerplay.pp_handle);
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 	if (adev->pp_enabled && adev->pm.dpm_enabled) {
 		amdgpu_pm_sysfs_init(adev);
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
 	}
-#endif
+
 	return ret;
 }
 
@@ -165,10 +155,8 @@
 		ret = adev->powerplay.ip_funcs->sw_init(
 					adev->powerplay.pp_handle);
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 	if (adev->pp_enabled)
 		adev->pm.dpm_enabled = true;
-#endif
 
 	return ret;
 }
@@ -219,7 +207,6 @@
 
 static void amdgpu_pp_late_fini(void *handle)
 {
-#ifdef CONFIG_DRM_AMD_POWERPLAY
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (adev->pp_enabled) {
@@ -230,7 +217,6 @@
 	if (adev->powerplay.ip_funcs->late_fini)
 		adev->powerplay.ip_funcs->late_fini(
 			  adev->powerplay.pp_handle);
-#endif
 }
 
 static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 85aeb0a..3cb5e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -222,33 +222,16 @@
 
 	/* Allocate ring buffer */
 	if (ring->ring_obj == NULL) {
-		r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     NULL, NULL, &ring->ring_obj);
+		r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+					    AMDGPU_GEM_DOMAIN_GTT,
+					    &ring->ring_obj,
+					    &ring->gpu_addr,
+					    (void **)&ring->ring);
 		if (r) {
 			dev_err(adev->dev, "(%d) ring create failed\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(ring->ring_obj, false);
-		if (unlikely(r != 0))
-			return r;
-		r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
-					&ring->gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(ring->ring_obj);
-			dev_err(adev->dev, "(%d) ring pin failed\n", r);
-			return r;
-		}
-		r = amdgpu_bo_kmap(ring->ring_obj,
-				       (void **)&ring->ring);
-
 		memset((void *)ring->ring, 0, ring->ring_size);
-
-		amdgpu_bo_unreserve(ring->ring_obj);
-		if (r) {
-			dev_err(adev->dev, "(%d) ring map failed\n", r);
-			return r;
-		}
 	}
 	ring->ptr_mask = (ring->ring_size / 4) - 1;
 	ring->max_dw = max_dw;
@@ -269,29 +252,20 @@
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-	int r;
-	struct amdgpu_bo *ring_obj;
-
-	ring_obj = ring->ring_obj;
 	ring->ready = false;
-	ring->ring = NULL;
-	ring->ring_obj = NULL;
 
 	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_wb_free(ring->adev, ring->wptr_offs);
 
-	if (ring_obj) {
-		r = amdgpu_bo_reserve(ring_obj, false);
-		if (likely(r == 0)) {
-			amdgpu_bo_kunmap(ring_obj);
-			amdgpu_bo_unpin(ring_obj);
-			amdgpu_bo_unreserve(ring_obj);
-		}
-		amdgpu_bo_unref(&ring_obj);
-	}
+	amdgpu_bo_free_kernel(&ring->ring_obj,
+			      &ring->gpu_addr,
+			      (void **)&ring->ring);
+
 	amdgpu_debugfs_ring_fini(ring);
+
+	ring->adev->rings[ring->idx] = NULL;
 }
 
 /*
@@ -371,8 +345,8 @@
 	ent = debugfs_create_file(name,
 				  S_IFREG | S_IRUGO, root,
 				  ring, &amdgpu_debugfs_ring_fops);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
+	if (!ent)
+		return -ENOMEM;
 
 	i_size_write(ent->d_inode, ring->ring_size + 12);
 	ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4..b827c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -111,7 +111,7 @@
 		amdgpu_bo_kunmap(gtt_obj[i]);
 
 		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-				       size, NULL, &fence);
+				       size, NULL, &fence, false);
 
 		if (r) {
 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@
 		amdgpu_bo_kunmap(vram_obj);
 
 		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-				       size, NULL, &fence);
+				       size, NULL, &fence, false);
 
 		if (r) {
 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 0d8d65e..067e5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -247,7 +247,7 @@
 	    TP_ARGS(mapping)
 );
 
-TRACE_EVENT(amdgpu_vm_set_page,
+TRACE_EVENT(amdgpu_vm_set_ptes,
 	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
 		     uint32_t incr, uint32_t flags),
 	    TP_ARGS(pe, addr, count, incr, flags),
@@ -271,6 +271,24 @@
 		      __entry->flags, __entry->count)
 );
 
+TRACE_EVENT(amdgpu_vm_copy_ptes,
+	    TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
+	    TP_ARGS(pe, src, count),
+	    TP_STRUCT__entry(
+			     __field(u64, pe)
+			     __field(u64, src)
+			     __field(u32, count)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pe = pe;
+			   __entry->src = src;
+			   __entry->count = count;
+			   ),
+	    TP_printk("pe=%010Lx, src=%010Lx, count=%u",
+		      __entry->pe, __entry->src, __entry->count)
+);
+
 TRACE_EVENT(amdgpu_vm_flush,
 	    TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
 	    TP_ARGS(pd_addr, ring, id),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 716f2af..dcaf691 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,6 +34,7 @@
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include <linux/seq_file.h>
@@ -74,7 +75,7 @@
 	ttm_mem_global_release(ref->object);
 }
 
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
 	struct drm_global_reference *global_ref;
 	struct amdgpu_ring *ring;
@@ -88,10 +89,10 @@
 	global_ref->init = &amdgpu_ttm_mem_global_init;
 	global_ref->release = &amdgpu_ttm_mem_global_release;
 	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM memory accounting "
 			  "subsystem.\n");
-		return r;
+		goto error_mem;
 	}
 
 	adev->mman.bo_global_ref.mem_glob =
@@ -102,26 +103,30 @@
 	global_ref->init = &ttm_bo_global_init;
 	global_ref->release = &ttm_bo_global_release;
 	r = drm_global_item_ref(global_ref);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		return r;
+		goto error_bo;
 	}
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
 				  rq, amdgpu_sched_jobs);
-	if (r != 0) {
+	if (r) {
 		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
-		drm_global_item_unref(&adev->mman.mem_global_ref);
-		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-		return r;
+		goto error_entity;
 	}
 
 	adev->mman.mem_global_referenced = true;
 
 	return 0;
+
+error_entity:
+	drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+	drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+	return r;
 }
 
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -155,7 +160,7 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_TT:
-		man->func = &ttm_bo_manager_func;
+		man->func = &amdgpu_gtt_mgr_func;
 		man->gpu_offset = adev->mc.gtt_start;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
@@ -190,12 +195,13 @@
 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	static struct ttm_place placements = {
 		.fpfn = 0,
 		.lpfn = 0,
 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 	};
+	unsigned i;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 		placement->placement = &placements;
@@ -204,28 +210,44 @@
 		placement->num_busy_placement = 1;
 		return;
 	}
-	rbo = container_of(bo, struct amdgpu_bo, tbo);
+	abo = container_of(bo, struct amdgpu_bo, tbo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (rbo->adev->mman.buffer_funcs_ring->ready == false)
-			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
-		else
-			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+		if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+		} else {
+			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+			for (i = 0; i < abo->placement.num_placement; ++i) {
+				if (!(abo->placements[i].flags &
+				      TTM_PL_FLAG_TT))
+					continue;
+
+				if (abo->placements[i].lpfn)
+					continue;
+
+				/* set an upper limit to force directly
+				 * allocating address space for the BO.
+				 */
+				abo->placements[i].lpfn =
+					abo->adev->mc.gtt_size >> PAGE_SHIFT;
+			}
+		}
 		break;
 	case TTM_PL_TT:
 	default:
-		amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 	}
-	*placement = rbo->placement;
+	*placement = abo->placement;
 }
 
 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-	struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+	struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
 
 	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
 		return -EPERM;
-	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+	return drm_vma_node_verify_access(&abo->gem_base.vma_node,
+					  filp->private_data);
 }
 
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -251,26 +273,30 @@
 
 	adev = amdgpu_get_adev(bo->bdev);
 	ring = adev->mman.buffer_funcs_ring;
-	old_start = (u64)old_mem->start << PAGE_SHIFT;
-	new_start = (u64)new_mem->start << PAGE_SHIFT;
 
 	switch (old_mem->mem_type) {
-	case TTM_PL_VRAM:
-		old_start += adev->mc.vram_start;
-		break;
 	case TTM_PL_TT:
-		old_start += adev->mc.gtt_start;
+		r = amdgpu_ttm_bind(bo, old_mem);
+		if (r)
+			return r;
+
+	case TTM_PL_VRAM:
+		old_start = (u64)old_mem->start << PAGE_SHIFT;
+		old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
 		break;
 	default:
 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 		return -EINVAL;
 	}
 	switch (new_mem->mem_type) {
-	case TTM_PL_VRAM:
-		new_start += adev->mc.vram_start;
-		break;
 	case TTM_PL_TT:
-		new_start += adev->mc.gtt_start;
+		r = amdgpu_ttm_bind(bo, new_mem);
+		if (r)
+			return r;
+
+	case TTM_PL_VRAM:
+		new_start = (u64)new_mem->start << PAGE_SHIFT;
+		new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
 		break;
 	default:
 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,7 +311,7 @@
 
 	r = amdgpu_copy_buffer(ring, old_start, new_start,
 			       new_mem->num_pages * PAGE_SIZE, /* bytes */
-			       bo->resv, &fence);
+			       bo->resv, &fence, false);
 	if (r)
 		return r;
 
@@ -314,7 +340,7 @@
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = 0;
+	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
@@ -335,7 +361,7 @@
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
@@ -361,14 +387,14 @@
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = 0;
+	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -435,8 +461,7 @@
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, interruptible,
-				       no_wait_gpu, new_mem);
+		r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
 		if (r) {
 			return r;
 		}
@@ -524,15 +549,19 @@
 	spinlock_t              guptasklock;
 	struct list_head        guptasks;
 	atomic_t		mmu_invalidations;
+	struct list_head        list;
 };
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
-	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+	unsigned int flags = 0;
 	unsigned pinned = 0;
 	int r;
 
+	if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+		flags |= FOLL_WRITE;
+
 	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 		/* check that we only use anonymous memory
 		   to prevent problems with writeback */
@@ -555,7 +584,7 @@
 		list_add(&guptask.list, &gtt->guptasks);
 		spin_unlock(&gtt->guptasklock);
 
-		r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+		r = get_user_pages(userptr, num_pages, flags, p, NULL);
 
 		spin_lock(&gtt->guptasklock);
 		list_del(&guptask.list);
@@ -641,7 +670,6 @@
 				   struct ttm_mem_reg *bo_mem)
 {
 	struct amdgpu_ttm_tt *gtt = (void*)ttm;
-	uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
 	int r;
 
 	if (gtt->userptr) {
@@ -651,7 +679,6 @@
 			return r;
 		}
 	}
-	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
 	if (!ttm->num_pages) {
 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 		     ttm->num_pages, bo_mem, ttm);
@@ -662,14 +689,71 @@
 	    bo_mem->mem_type == AMDGPU_PL_OA)
 		return -EINVAL;
 
+	return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+{
+	struct ttm_tt *ttm = bo->ttm;
+	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+	uint32_t flags;
+	int r;
+
+	if (!ttm || amdgpu_ttm_is_bound(ttm))
+		return 0;
+
+	r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
+				 NULL, bo_mem);
+	if (r) {
+		DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
+		return r;
+	}
+
+	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
 		ttm->pages, gtt->ttm.dma_address, flags);
 
 	if (r) {
-		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-			  ttm->num_pages, (unsigned)gtt->offset);
+		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+			  ttm->num_pages, gtt->offset);
 		return r;
 	}
+	spin_lock(&gtt->adev->gtt_list_lock);
+	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+	spin_unlock(&gtt->adev->gtt_list_lock);
+	return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+	struct amdgpu_ttm_tt *gtt, *tmp;
+	struct ttm_mem_reg bo_mem;
+	uint32_t flags;
+	int r;
+
+	bo_mem.mem_type = TTM_PL_TT;
+	spin_lock(&adev->gtt_list_lock);
+	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+				     flags);
+		if (r) {
+			spin_unlock(&adev->gtt_list_lock);
+			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+				  gtt->ttm.ttm.num_pages, gtt->offset);
+			return r;
+		}
+	}
+	spin_unlock(&adev->gtt_list_lock);
 	return 0;
 }
 
@@ -677,12 +761,19 @@
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+	if (gtt->userptr)
+		amdgpu_ttm_tt_unpin_userptr(ttm);
+
+	if (!amdgpu_ttm_is_bound(ttm))
+		return 0;
+
 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
 	if (gtt->adev->gart.ready)
 		amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
 
-	if (gtt->userptr)
-		amdgpu_ttm_tt_unpin_userptr(ttm);
+	spin_lock(&gtt->adev->gtt_list_lock);
+	list_del_init(&gtt->list);
+	spin_unlock(&gtt->adev->gtt_list_lock);
 
 	return 0;
 }
@@ -720,6 +811,7 @@
 		kfree(gtt);
 		return NULL;
 	}
+	INIT_LIST_HEAD(&gtt->list);
 	return &gtt->ttm.ttm;
 }
 
@@ -991,10 +1083,6 @@
 	unsigned i, j;
 	int r;
 
-	r = amdgpu_ttm_global_init(adev);
-	if (r) {
-		return r;
-	}
 	/* No others user of address space so set it to 0 */
 	r = ttm_bo_device_init(&adev->mman.bdev,
 			       adev->mman.bo_global_ref.ref.object,
@@ -1159,7 +1247,7 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct fence **fence)
+		       struct fence **fence, bool direct_submit)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
@@ -1203,8 +1291,79 @@
 
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 	WARN_ON(job->ibs[0].length_dw > num_dw);
+	if (direct_submit) {
+		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+				       NULL, NULL, fence);
+		job->fence = fence_get(*fence);
+		if (r)
+			DRM_ERROR("Error scheduling IBs (%d)\n", r);
+		amdgpu_job_free(job);
+	} else {
+		r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		if (r)
+			goto error_free;
+	}
+
+	return r;
+
+error_free:
+	amdgpu_job_free(job);
+	return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+		uint32_t src_data,
+		struct reservation_object *resv,
+		struct fence **fence)
+{
+	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_job *job;
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+	uint32_t max_bytes, byte_count;
+	uint64_t dst_offset;
+	unsigned int num_loops, num_dw;
+	unsigned int i;
+	int r;
+
+	byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+	/* for IB padding */
+	while (num_dw & 0x7)
+		num_dw++;
+
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+	if (r)
+		return r;
+
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &job->sync, resv,
+				AMDGPU_FENCE_OWNER_UNDEFINED);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			goto error_free;
+		}
+	}
+
+	dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+	for (i = 0; i < num_loops; i++) {
+		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+				dst_offset, cur_size_in_bytes);
+
+		dst_offset += cur_size_in_bytes;
+		byte_count -= cur_size_in_bytes;
+	}
+
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
 	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
-			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+			AMDGPU_FENCE_OWNER_UNDEFINED, fence);
 	if (r)
 		goto error_free;
 
@@ -1395,3 +1554,8 @@
 
 #endif
 }
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+	return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644
index 0000000..9812c80
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS		(TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS		(TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA		(TTM_PL_PRIV + 2)
+
+#define AMDGPU_PL_FLAG_GDS		(TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS		(TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA		(TTM_PL_FLAG_PRIV << 2)
+
+#define AMDGPU_TTM_LRU_SIZE	20
+
+struct amdgpu_mman_lru {
+	struct list_head		*lru[TTM_NUM_MEM_TYPES];
+	struct list_head		*swap_lru;
+};
+
+struct amdgpu_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*vram;
+	struct dentry			*gtt;
+#endif
+
+	/* buffer handling */
+	const struct amdgpu_buffer_funcs	*buffer_funcs;
+	struct amdgpu_ring			*buffer_funcs_ring;
+	/* Scheduler entity for buffer moves */
+	struct amd_sched_entity			entity;
+
+	/* custom LRU management */
+	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
+	/* guard for log2_size array, don't add anything in between */
+	struct amdgpu_mman_lru			guard;
+};
+
+extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *tbo,
+			 const struct ttm_place *place,
+			 struct ttm_mem_reg *mem);
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+		       uint64_t src_offset,
+		       uint64_t dst_offset,
+		       uint32_t byte_count,
+		       struct reservation_object *resv,
+		       struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+			uint32_t src_data,
+			struct reservation_object *resv,
+			struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cc95f1..cb3d252 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,40 +247,32 @@
 	const struct common_firmware_header *header = NULL;
 
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+			       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
-		err = -ENOMEM;
 		goto failed;
 	}
 
 	err = amdgpu_bo_reserve(*bo, false);
 	if (err) {
-		amdgpu_bo_unref(bo);
 		dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
-		goto failed;
+		goto failed_reserve;
 	}
 
 	err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
 	if (err) {
-		amdgpu_bo_unreserve(*bo);
-		amdgpu_bo_unref(bo);
 		dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
-		goto failed;
+		goto failed_pin;
 	}
 
 	err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
-		amdgpu_bo_unpin(*bo);
-		amdgpu_bo_unreserve(*bo);
-		amdgpu_bo_unref(bo);
-		goto failed;
+		goto failed_kmap;
 	}
 
 	amdgpu_bo_unreserve(*bo);
 
-	fw_offset = 0;
 	for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
 		ucode = &adev->firmware.ucode[i];
 		if (ucode->fw) {
@@ -290,10 +282,16 @@
 			fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 		}
 	}
+	return 0;
 
+failed_kmap:
+	amdgpu_bo_unpin(*bo);
+failed_pin:
+	amdgpu_bo_unreserve(*bo);
+failed_reserve:
+	amdgpu_bo_unref(bo);
 failed:
-	if (err)
-		adev->firmware.smu_load = false;
+	adev->firmware.smu_load = false;
 
 	return err;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 4aa993d..e3281ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -201,39 +201,14 @@
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		  +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
 		  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
-	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			     NULL, NULL, &adev->uvd.vcpu_bo);
+	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+				    &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
 	}
 
-	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-	if (r) {
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
-		return r;
-	}
-
-	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
-			  &adev->uvd.gpu_addr);
-	if (r) {
-		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
-		return r;
-	}
-
-	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
-	if (r) {
-		dev_err(adev->dev, "(%d) UVD map failed\n", r);
-		return r;
-	}
-
-	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
 	ring = &adev->uvd.ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 	r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -274,22 +249,13 @@
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
-	int r;
-
 	kfree(adev->uvd.saved_bo);
 
 	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-	if (adev->uvd.vcpu_bo) {
-		r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-		if (!r) {
-			amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-			amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-			amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-		}
-
-		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-	}
+	amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+			      &adev->uvd.gpu_addr,
+			      (void **)&adev->uvd.cpu_addr);
 
 	amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -323,7 +289,7 @@
 	if (!adev->uvd.saved_bo)
 		return -ENOMEM;
 
-	memcpy(adev->uvd.saved_bo, ptr, size);
+	memcpy_fromio(adev->uvd.saved_bo, ptr, size);
 
 	return 0;
 }
@@ -340,7 +306,7 @@
 	ptr = adev->uvd.cpu_addr;
 
 	if (adev->uvd.saved_bo != NULL) {
-		memcpy(ptr, adev->uvd.saved_bo, size);
+		memcpy_toio(ptr, adev->uvd.saved_bo, size);
 		kfree(adev->uvd.saved_bo);
 		adev->uvd.saved_bo = NULL;
 	} else {
@@ -349,11 +315,11 @@
 
 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-		memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
-			(adev->uvd.fw->size) - offset);
+		memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+			    le32_to_cpu(hdr->ucode_size_bytes));
 		size -= le32_to_cpu(hdr->ucode_size_bytes);
 		ptr += le32_to_cpu(hdr->ucode_size_bytes);
-		memset(ptr, 0, size);
+		memset_io(ptr, 0, size);
 	}
 
 	return 0;
@@ -385,12 +351,12 @@
 	}
 }
 
-static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
 {
 	int i;
-	for (i = 0; i < rbo->placement.num_placement; ++i) {
-		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
-		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+	for (i = 0; i < abo->placement.num_placement; ++i) {
+		abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+		abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
 	}
 }
 
@@ -843,6 +809,7 @@
 				return r;
 			break;
 		case mmUVD_ENGINE_CNTL:
+		case mmUVD_NO_OP:
 			break;
 		default:
 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -915,6 +882,10 @@
 		return -EINVAL;
 	}
 
+	r = amdgpu_cs_sysvm_access_required(parser);
+	if (r)
+		return r;
+
 	ctx.parser = parser;
 	ctx.buf_sizes = buf_sizes;
 	ctx.ib_idx = ib_idx;
@@ -981,8 +952,10 @@
 	ib->ptr[3] = addr >> 32;
 	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
 	ib->ptr[5] = 0;
-	for (i = 6; i < 16; ++i)
-		ib->ptr[i] = PACKET2(0);
+	for (i = 6; i < 16; i += 2) {
+		ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+		ib->ptr[i+1] = 0;
+	}
 	ib->length_dw = 16;
 
 	if (direct) {
@@ -1114,15 +1087,9 @@
 {
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
-	unsigned i, fences, handles = 0;
+	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
-	for (i = 0; i < adev->uvd.max_handles; ++i)
-		if (atomic_read(&adev->uvd.handles[i]))
-			++handles;
-
-	if (fences == 0 && handles == 0) {
+	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {
 			amdgpu_dpm_enable_uvd(adev, false);
 		} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 05865ce..7fe8fd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -210,6 +210,8 @@
  */
 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 {
+	unsigned i;
+
 	if (adev->vce.vcpu_bo == NULL)
 		return 0;
 
@@ -217,8 +219,8 @@
 
 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
 
-	amdgpu_ring_fini(&adev->vce.ring[0]);
-	amdgpu_ring_fini(&adev->vce.ring[1]);
+	for (i = 0; i < adev->vce.num_rings; i++)
+		amdgpu_ring_fini(&adev->vce.ring[i]);
 
 	release_firmware(adev->vce.fw);
 	mutex_destroy(&adev->vce.idle_mutex);
@@ -282,8 +284,8 @@
 
 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
-		(adev->vce.fw->size) - offset);
+	memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+		    adev->vce.fw->size - offset);
 
 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
 
@@ -303,9 +305,12 @@
 {
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, vce.idle_work.work);
+	unsigned i, count = 0;
 
-	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
-	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
+	for (i = 0; i < adev->vce.num_rings; i++)
+		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
+
+	if (count == 0) {
 		if (adev->pm.dpm_enabled) {
 			amdgpu_dpm_enable_vce(adev, false);
 		} else {
@@ -634,7 +639,11 @@
 	uint32_t allocated = 0;
 	uint32_t tmp, handle = 0;
 	uint32_t *size = &tmp;
-	int i, r = 0, idx = 0;
+	int i, r, idx = 0;
+
+	r = amdgpu_cs_sysvm_access_required(p);
+	if (r)
+		return r;
 
 	while (idx < ib->length_dw) {
 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -687,6 +696,21 @@
 		case 0x04000008: /* rdo */
 		case 0x04000009: /* vui */
 		case 0x05000002: /* auxiliary buffer */
+		case 0x05000009: /* clock table */
+			break;
+
+		case 0x0500000c: /* hw config */
+			switch (p->adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+			case CHIP_KAVERI:
+			case CHIP_MULLINS:
+#endif
+			case CHIP_CARRIZO:
+				break;
+			default:
+				r = -EINVAL;
+				goto out;
+			}
 			break;
 
 		case 0x03000001: /* encode */
@@ -799,6 +823,18 @@
 	amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
@@ -850,8 +886,8 @@
 	struct fence *fence = NULL;
 	long r;
 
-	/* skip vce ring1 ib test for now, since it's not reliable */
-	if (ring == &ring->adev->vce.ring[1])
+	/* skip vce ring1/2 ib test for now, since it's not reliable */
+	if (ring != &ring->adev->vce.ring[0])
 		return 0;
 
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 63f83d0..12729d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -42,5 +42,7 @@
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
new file mode 100644
index 0000000..2c37a37
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+#ifndef AMDGPU_VIRT_H
+#define AMDGPU_VIRT_H
+
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
+/* GPU virtualization */
+struct amdgpu_virtualization {
+	uint32_t virtual_caps;
+};
+
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
+
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
+
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+	return false;
+#endif
+}
+
+#endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 80120fa..06f2432 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -51,19 +51,22 @@
  * SI supports 16.
  */
 
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
 /* Local structure. Encapsulate some VM table update parameters to reduce
  * the number of function parameters
  */
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+	/* amdgpu device we do this update for */
+	struct amdgpu_device *adev;
 	/* address where to copy page table entries from */
 	uint64_t src;
-	/* DMA addresses to use for mapping */
-	dma_addr_t *pages_addr;
 	/* indirect buffer to fill with commands */
 	struct amdgpu_ib *ib;
+	/* Function which actually does the update */
+	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+		     uint64_t addr, unsigned count, uint32_t incr,
+		     uint32_t flags);
+	/* indicate update pt or its shadow */
+	bool shadow;
 };
 
 /**
@@ -467,10 +470,9 @@
 }
 
 /**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
  *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
  * @pe: addr of the page entry
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -480,32 +482,46 @@
  * Traces the parameters and calls the right asic functions
  * to setup the page table using the DMA.
  */
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
-				   struct amdgpu_vm_update_params
-					*vm_update_params,
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+				  uint64_t pe, uint64_t addr,
+				  unsigned count, uint32_t incr,
+				  uint32_t flags)
+{
+	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+	if (count < 3) {
+		amdgpu_vm_write_pte(params->adev, params->ib, pe,
+				    addr | flags, count, incr);
+
+	} else {
+		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
+				      count, incr, flags);
+	}
+}
+
+/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
 				   uint64_t pe, uint64_t addr,
 				   unsigned count, uint32_t incr,
 				   uint32_t flags)
 {
-	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
+	uint64_t src = (params->src + (addr >> 12) * 8);
 
-	if (vm_update_params->src) {
-		amdgpu_vm_copy_pte(adev, vm_update_params->ib,
-			pe, (vm_update_params->src + (addr >> 12) * 8), count);
 
-	} else if (vm_update_params->pages_addr) {
-		amdgpu_vm_write_pte(adev, vm_update_params->ib,
-			vm_update_params->pages_addr,
-			pe, addr, count, incr, flags);
+	trace_amdgpu_vm_copy_ptes(pe, src, count);
 
-	} else if (count < 3) {
-		amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
-				    count, incr, flags);
-
-	} else {
-		amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
-				      count, incr, flags);
-	}
+	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
 }
 
 /**
@@ -523,12 +539,11 @@
 	struct amdgpu_ring *ring;
 	struct fence *fence = NULL;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	unsigned entries;
 	uint64_t addr;
 	int r;
 
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -539,6 +554,10 @@
 	if (r)
 		goto error;
 
+	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+	if (r)
+		goto error;
+
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
@@ -546,9 +565,10 @@
 	if (r)
 		goto error;
 
-	vm_update_params.ib = &job->ibs[0];
-	amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
-			       0, 0);
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.ib = &job->ibs[0];
+	amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
 	WARN_ON(job->ibs[0].length_dw > 64);
@@ -577,55 +597,46 @@
  * Look up the physical address of the page that the pte resolves
  * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
 	uint64_t result;
 
-	if (pages_addr) {
-		/* page table offset */
-		result = pages_addr[addr >> PAGE_SHIFT];
+	/* page table offset */
+	result = pages_addr[addr >> PAGE_SHIFT];
 
-		/* in case cpu page size != gpu page size*/
-		result |= addr & (~PAGE_MASK);
-
-	} else {
-		/* No mapping required */
-		result = addr;
-	}
+	/* in case cpu page size != gpu page size*/
+	result |= addr & (~PAGE_MASK);
 
 	result &= 0xFFFFFFFFFFFFF000ULL;
 
 	return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+					 struct amdgpu_vm *vm,
+					 bool shadow)
 {
 	struct amdgpu_ring *ring;
-	struct amdgpu_bo *pd = vm->page_directory;
-	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+	struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+		vm->page_directory;
+	uint64_t pd_addr;
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	struct fence *fence = NULL;
 
 	int r;
 
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
+	if (!pd)
+		return 0;
+
+	r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
+	if (r)
+		return r;
+
+	pd_addr = amdgpu_bo_gpu_offset(pd);
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	/* padding, etc. */
@@ -638,7 +649,9 @@
 	if (r)
 		return r;
 
-	vm_update_params.ib = &job->ibs[0];
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.ib = &job->ibs[0];
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -648,20 +661,34 @@
 		if (bo == NULL)
 			continue;
 
+		if (bo->shadow) {
+			struct amdgpu_bo *shadow = bo->shadow;
+
+			r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+			if (r)
+				return r;
+		}
+
 		pt = amdgpu_bo_gpu_offset(bo);
-		if (vm->page_tables[pt_idx].addr == pt)
-			continue;
-		vm->page_tables[pt_idx].addr = pt;
+		if (!shadow) {
+			if (vm->page_tables[pt_idx].addr == pt)
+				continue;
+			vm->page_tables[pt_idx].addr = pt;
+		} else {
+			if (vm->page_tables[pt_idx].shadow_addr == pt)
+				continue;
+			vm->page_tables[pt_idx].shadow_addr = pt;
+		}
 
 		pde = pd_addr + pt_idx * 8;
 		if (((last_pde + 8 * count) != pde) ||
-		    ((last_pt + incr * count) != pt)) {
+		    ((last_pt + incr * count) != pt) ||
+		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, &vm_update_params,
-						       last_pde, last_pt,
-						       count, incr,
-						       AMDGPU_PTE_VALID);
+				amdgpu_vm_do_set_ptes(&params, last_pde,
+						      last_pt, count, incr,
+						      AMDGPU_PTE_VALID);
 			}
 
 			count = 1;
@@ -673,15 +700,14 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, &vm_update_params,
-					last_pde, last_pt,
-					count, incr, AMDGPU_PTE_VALID);
+		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+				      count, incr, AMDGPU_PTE_VALID);
 
-	if (vm_update_params.ib->length_dw != 0) {
-		amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+	if (params.ib->length_dw != 0) {
+		amdgpu_ring_pad_ib(ring, params.ib);
 		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
 				 AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(vm_update_params.ib->length_dw > ndw);
+		WARN_ON(params.ib->length_dw > ndw);
 		r = amdgpu_job_submit(job, ring, &vm->entity,
 				      AMDGPU_FENCE_OWNER_VM, &fence);
 		if (r)
@@ -703,21 +729,135 @@
 	return r;
 }
 
-/**
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
  * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm)
+{
+	int r;
+
+	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+	if (r)
+		return r;
+	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
+/**
+ * amdgpu_vm_update_ptes - make sure that page tables are valid
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+ */
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+				  struct amdgpu_vm *vm,
+				  uint64_t start, uint64_t end,
+				  uint64_t dst, uint32_t flags)
+{
+	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+
+	uint64_t cur_pe_start, cur_nptes, cur_dst;
+	uint64_t addr; /* next GPU address to be updated */
+	uint64_t pt_idx;
+	struct amdgpu_bo *pt;
+	unsigned nptes; /* next number of ptes to be updated */
+	uint64_t next_pe_start;
+
+	/* initialize the variables */
+	addr = start;
+	pt_idx = addr >> amdgpu_vm_block_size;
+	pt = vm->page_tables[pt_idx].entry.robj;
+	if (params->shadow) {
+		if (!pt->shadow)
+			return;
+		pt = vm->page_tables[pt_idx].entry.robj->shadow;
+	}
+	if ((addr & ~mask) == (end & ~mask))
+		nptes = end - addr;
+	else
+		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+	cur_pe_start = amdgpu_bo_gpu_offset(pt);
+	cur_pe_start += (addr & mask) * 8;
+	cur_nptes = nptes;
+	cur_dst = dst;
+
+	/* for next ptb*/
+	addr += nptes;
+	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+
+	/* walk over the address space and update the page tables */
+	while (addr < end) {
+		pt_idx = addr >> amdgpu_vm_block_size;
+		pt = vm->page_tables[pt_idx].entry.robj;
+		if (params->shadow) {
+			if (!pt->shadow)
+				return;
+			pt = vm->page_tables[pt_idx].entry.robj->shadow;
+		}
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+		next_pe_start = amdgpu_bo_gpu_offset(pt);
+		next_pe_start += (addr & mask) * 8;
+
+		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
+			/* The next ptb is consecutive to current ptb.
+			 * Don't call the update function now.
+			 * Will update two ptbs together in future.
+			*/
+			cur_nptes += nptes;
+		} else {
+			params->func(params, cur_pe_start, cur_dst, cur_nptes,
+				     AMDGPU_GPU_PAGE_SIZE, flags);
+
+			cur_pe_start = next_pe_start;
+			cur_nptes = nptes;
+			cur_dst = dst;
+		}
+
+		/* for next ptb*/
+		addr += nptes;
+		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+	}
+
+	params->func(params, cur_pe_start, cur_dst, cur_nptes,
+		     AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
  * @flags: hw mapping flags
  */
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
-				struct amdgpu_vm_update_params
-					*vm_update_params,
-				uint64_t pe_start, uint64_t pe_end,
-				uint64_t addr, uint32_t flags)
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
+				struct amdgpu_vm *vm,
+				uint64_t start, uint64_t end,
+				uint64_t dst, uint32_t flags)
 {
 	/**
 	 * The MC L1 TLB supports variable sized pages, based on a fragment
@@ -739,138 +879,39 @@
 	 */
 
 	/* SI and newer are optimized for 64KB */
-	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
-	uint64_t frag_align = 0x80;
+	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
+	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
 
-	uint64_t frag_start = ALIGN(pe_start, frag_align);
-	uint64_t frag_end = pe_end & ~(frag_align - 1);
-
-	unsigned count;
-
-	/* Abort early if there isn't anything to do */
-	if (pe_start == pe_end)
-		return;
+	uint64_t frag_start = ALIGN(start, frag_align);
+	uint64_t frag_end = end & ~(frag_align - 1);
 
 	/* system pages are non continuously */
-	if (vm_update_params->src || vm_update_params->pages_addr ||
-		!(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
+	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+	    (frag_start >= frag_end)) {
 
-		count = (pe_end - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
-				       addr, count, AMDGPU_GPU_PAGE_SIZE,
-				       flags);
+		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
 		return;
 	}
 
 	/* handle the 4K area at the beginning */
-	if (pe_start != frag_start) {
-		count = (frag_start - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
-				       count, AMDGPU_GPU_PAGE_SIZE, flags);
-		addr += AMDGPU_GPU_PAGE_SIZE * count;
+	if (start != frag_start) {
+		amdgpu_vm_update_ptes(params, vm, start, frag_start,
+				      dst, flags);
+		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
 	}
 
 	/* handle the area in the middle */
-	count = (frag_end - frag_start) / 8;
-	amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
-			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+			      flags | frag_flags);
 
 	/* handle the 4K area at the end */
-	if (frag_end != pe_end) {
-		addr += AMDGPU_GPU_PAGE_SIZE * count;
-		count = (pe_end - frag_end) / 8;
-		amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
-				       count, AMDGPU_GPU_PAGE_SIZE, flags);
+	if (frag_end != end) {
+		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
 	}
 }
 
 /**
- * amdgpu_vm_update_ptes - make sure that page tables are valid
- *
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to, the next dst inside the function
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end.
- */
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
-				  struct amdgpu_vm_update_params
-					*vm_update_params,
-				  struct amdgpu_vm *vm,
-				  uint64_t start, uint64_t end,
-				  uint64_t dst, uint32_t flags)
-{
-	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
-
-	uint64_t cur_pe_start, cur_pe_end, cur_dst;
-	uint64_t addr; /* next GPU address to be updated */
-	uint64_t pt_idx;
-	struct amdgpu_bo *pt;
-	unsigned nptes; /* next number of ptes to be updated */
-	uint64_t next_pe_start;
-
-	/* initialize the variables */
-	addr = start;
-	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = vm->page_tables[pt_idx].entry.robj;
-
-	if ((addr & ~mask) == (end & ~mask))
-		nptes = end - addr;
-	else
-		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
-
-	cur_pe_start = amdgpu_bo_gpu_offset(pt);
-	cur_pe_start += (addr & mask) * 8;
-	cur_pe_end = cur_pe_start + 8 * nptes;
-	cur_dst = dst;
-
-	/* for next ptb*/
-	addr += nptes;
-	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
-	/* walk over the address space and update the page tables */
-	while (addr < end) {
-		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = vm->page_tables[pt_idx].entry.robj;
-
-		if ((addr & ~mask) == (end & ~mask))
-			nptes = end - addr;
-		else
-			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
-
-		next_pe_start = amdgpu_bo_gpu_offset(pt);
-		next_pe_start += (addr & mask) * 8;
-
-		if (cur_pe_end == next_pe_start) {
-			/* The next ptb is consecutive to current ptb.
-			 * Don't call amdgpu_vm_frag_ptes now.
-			 * Will update two ptbs together in future.
-			*/
-			cur_pe_end += 8 * nptes;
-		} else {
-			amdgpu_vm_frag_ptes(adev, vm_update_params,
-					    cur_pe_start, cur_pe_end,
-					    cur_dst, flags);
-
-			cur_pe_start = next_pe_start;
-			cur_pe_end = next_pe_start + 8 * nptes;
-			cur_dst = dst;
-		}
-
-		/* for next ptb*/
-		addr += nptes;
-		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-	}
-
-	amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
-			    cur_pe_end, cur_dst, flags);
-}
-
-/**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
@@ -900,14 +941,19 @@
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_vm_update_params vm_update_params;
+	struct amdgpu_pte_update_params params;
 	struct fence *f = NULL;
 	int r;
 
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.src = src;
+
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-	memset(&vm_update_params, 0, sizeof(vm_update_params));
-	vm_update_params.src = src;
-	vm_update_params.pages_addr = pages_addr;
+
+	memset(&params, 0, sizeof(params));
+	params.adev = adev;
+	params.src = src;
 
 	/* sync to everything on unmapping */
 	if (!(flags & AMDGPU_PTE_VALID))
@@ -924,30 +970,53 @@
 	/* padding, etc. */
 	ndw = 64;
 
-	if (vm_update_params.src) {
+	if (src) {
 		/* only copy commands needed */
 		ndw += ncmds * 7;
 
-	} else if (vm_update_params.pages_addr) {
-		/* header for write data commands */
-		ndw += ncmds * 4;
+		params.func = amdgpu_vm_do_copy_ptes;
 
-		/* body of write data command */
+	} else if (pages_addr) {
+		/* copy commands needed */
+		ndw += ncmds * 7;
+
+		/* and also PTEs */
 		ndw += nptes * 2;
 
+		params.func = amdgpu_vm_do_copy_ptes;
+
 	} else {
 		/* set page commands needed */
 		ndw += ncmds * 10;
 
 		/* two extra commands for begin/end of fragment */
 		ndw += 2 * 10;
+
+		params.func = amdgpu_vm_do_set_ptes;
 	}
 
 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 	if (r)
 		return r;
 
-	vm_update_params.ib = &job->ibs[0];
+	params.ib = &job->ibs[0];
+
+	if (!src && pages_addr) {
+		uint64_t *pte;
+		unsigned i;
+
+		/* Put the PTEs at the end of the IB. */
+		i = ndw - nptes * 2;
+		pte= (uint64_t *)&(job->ibs->ptr[i]);
+		params.src = job->ibs->gpu_addr + i * 4;
+
+		for (i = 0; i < nptes; ++i) {
+			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+						    AMDGPU_GPU_PAGE_SIZE);
+			pte[i] |= flags;
+		}
+		addr = 0;
+	}
 
 	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
 	if (r)
@@ -962,11 +1031,13 @@
 	if (r)
 		goto error_free;
 
-	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
-			      last + 1, addr, flags);
+	params.shadow = true;
+	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+	params.shadow = false;
+	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
 
-	amdgpu_ring_pad_ib(ring, vm_update_params.ib);
-	WARN_ON(vm_update_params.ib->length_dw > ndw);
+	amdgpu_ring_pad_ib(ring, params.ib);
+	WARN_ON(params.ib->length_dw > ndw);
 	r = amdgpu_job_submit(job, ring, &vm->entity,
 			      AMDGPU_FENCE_OWNER_VM, &f);
 	if (r)
@@ -1062,28 +1133,32 @@
  *
  * @adev: amdgpu_device pointer
  * @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
  *
  * Fill in the page table entries for @bo_va.
  * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
-			struct ttm_mem_reg *mem)
+			bool clear)
 {
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	dma_addr_t *pages_addr = NULL;
 	uint32_t gtt_flags, flags;
+	struct ttm_mem_reg *mem;
 	struct fence *exclusive;
 	uint64_t addr;
 	int r;
 
-	if (mem) {
+	if (clear) {
+		mem = NULL;
+		addr = 0;
+		exclusive = NULL;
+	} else {
 		struct ttm_dma_tt *ttm;
 
+		mem = &bo_va->bo->tbo.mem;
 		addr = (u64)mem->start << PAGE_SHIFT;
 		switch (mem->mem_type) {
 		case TTM_PL_TT:
@@ -1101,13 +1176,11 @@
 		}
 
 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
-	} else {
-		addr = 0;
-		exclusive = NULL;
 	}
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-	gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+		adev == bo_va->bo->adev) ? flags : 0;
 
 	spin_lock(&vm->status_lock);
 	if (!list_empty(&bo_va->vm_status))
@@ -1134,7 +1207,7 @@
 	spin_lock(&vm->status_lock);
 	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	list_del_init(&bo_va->vm_status);
-	if (!mem)
+	if (clear)
 		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
@@ -1197,7 +1270,7 @@
 			struct amdgpu_bo_va, vm_status);
 		spin_unlock(&vm->status_lock);
 
-		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+		r = amdgpu_vm_bo_update(adev, bo_va, true);
 		if (r)
 			return r;
 
@@ -1342,7 +1415,8 @@
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+				     AMDGPU_GEM_CREATE_SHADOW,
 				     NULL, resv, &pt);
 		if (r)
 			goto error_free;
@@ -1354,10 +1428,20 @@
 
 		r = amdgpu_vm_clear_bo(adev, vm, pt);
 		if (r) {
+			amdgpu_bo_unref(&pt->shadow);
 			amdgpu_bo_unref(&pt);
 			goto error_free;
 		}
 
+		if (pt->shadow) {
+			r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
+			if (r) {
+				amdgpu_bo_unref(&pt->shadow);
+				amdgpu_bo_unref(&pt);
+				goto error_free;
+			}
+		}
+
 		entry->robj = pt;
 		entry->priority = 0;
 		entry->tv.bo = &entry->robj->tbo;
@@ -1541,7 +1625,8 @@
 
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+			     AMDGPU_GEM_CREATE_SHADOW,
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		goto error_free_sched_entity;
@@ -1551,14 +1636,25 @@
 		goto error_free_page_directory;
 
 	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
-	amdgpu_bo_unreserve(vm->page_directory);
 	if (r)
-		goto error_free_page_directory;
+		goto error_unreserve;
+
+	if (vm->page_directory->shadow) {
+		r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
+		if (r)
+			goto error_unreserve;
+	}
+
 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+	amdgpu_bo_unreserve(vm->page_directory);
 
 	return 0;
 
+error_unreserve:
+	amdgpu_bo_unreserve(vm->page_directory);
+
 error_free_page_directory:
+	amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory);
 	vm->page_directory = NULL;
 
@@ -1600,10 +1696,18 @@
 		kfree(mapping);
 	}
 
-	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
-		amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+		struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+
+		if (!pt)
+			continue;
+
+		amdgpu_bo_unref(&pt->shadow);
+		amdgpu_bo_unref(&pt);
+	}
 	drm_free_large(vm->page_tables);
 
+	amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory);
 	fence_put(vm->page_directory_fence);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49a39b1..f7d236f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -497,7 +497,13 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
-			args.v6.ucPpll = ATOM_EXT_PLL1;
+			if (adev->asic_type == CHIP_TAHITI ||
+			    adev->asic_type == CHIP_PITCAIRN ||
+			    adev->asic_type == CHIP_VERDE ||
+			    adev->asic_type == CHIP_OLAND)
+				args.v6.ucPpll = ATOM_PPLL0;
+			else
+				args.v6.ucPpll = ATOM_EXT_PLL1;
 			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 7f85c2c..f81068b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -88,7 +88,6 @@
 
 	/* timeout */
 	if (args.v2.ucReplyStatus == 1) {
-		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
 		r = -ETIMEDOUT;
 		goto done;
 	}
@@ -339,22 +338,21 @@
 {
 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
 	u8 msg[DP_DPCD_SIZE];
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < 7; i++) {
-		ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-				       DP_DPCD_SIZE);
-		if (ret == DP_DPCD_SIZE) {
-			memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+	ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+			       msg, DP_DPCD_SIZE);
+	if (ret == DP_DPCD_SIZE) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-			DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-				      dig_connector->dpcd);
+		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+			      dig_connector->dpcd);
 
-			amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+		amdgpu_atombios_dp_probe_oui(amdgpu_connector);
 
-			return 0;
-		}
+		return 0;
 	}
+
 	dig_connector->dpcd[0] = 0;
 	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index bc56c8a..b374653 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -27,6 +27,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 
 #define TARGET_HW_I2C_CLOCK 50
 
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a5c94b4..1d8c375 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5396,7 +5396,7 @@
 	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
 		       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
 
-	ci_dpm_powergate_uvd(adev, false);
+	ci_dpm_powergate_uvd(adev, true);
 
 	if (!amdgpu_ci_is_smc_running(adev))
 		return;
@@ -5874,7 +5874,10 @@
 	pi->pcie_dpm_key_disabled = 0;
 	pi->thermal_sclk_dpm_enabled = 0;
 
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
 
 	pi->mclk_strobe_mode_threshold = 40000;
 	pi->mclk_stutter_mode_threshold = 40000;
@@ -6033,7 +6036,7 @@
 
 	pi->caps_dynamic_ac_timing = true;
 
-	pi->uvd_power_gated = false;
+	pi->uvd_power_gated = true;
 
 	/* make sure dc limits are valid */
 	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6176,8 +6179,6 @@
 	if (ret)
 		return ret;
 
-	ci_dpm_powergate_uvd(adev, true);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4efc901..a845b6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,6 +67,7 @@
 
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
 
 /*
  * Indirect registers accessor
@@ -962,12 +963,6 @@
 	return true;
 }
 
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
-	/* CIK does not support SR-IOV */
-	return 0;
-}
-
 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
 	{mmGB_ADDR_CONFIG, false},
@@ -1640,6 +1635,12 @@
 		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+	if (is_virtual_machine()) /* passthrough mode */
+		adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
 static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1708,6 +1709,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1776,6 +1845,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1844,6 +1981,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1912,6 +2117,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1980,32 +2253,128 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-	switch (adev->asic_type) {
-	case CHIP_BONAIRE:
-		adev->ip_blocks = bonaire_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
-		break;
-	case CHIP_HAWAII:
-		adev->ip_blocks = hawaii_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
-		break;
-	case CHIP_KAVERI:
-		adev->ip_blocks = kaveri_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
-		break;
-	case CHIP_KABINI:
-		adev->ip_blocks = kabini_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
-		break;
-	case CHIP_MULLINS:
-		adev->ip_blocks = mullins_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
-		break;
-	default:
-		/* FIXME: not supported yet */
-		return -EINVAL;
+	if (adev->enable_virtual_display) {
+		switch (adev->asic_type) {
+		case CHIP_BONAIRE:
+			adev->ip_blocks = bonaire_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+			break;
+		case CHIP_HAWAII:
+			adev->ip_blocks = hawaii_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+			break;
+		case CHIP_KAVERI:
+			adev->ip_blocks = kaveri_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+			break;
+		case CHIP_KABINI:
+			adev->ip_blocks = kabini_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+			break;
+		case CHIP_MULLINS:
+			adev->ip_blocks = mullins_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
+	} else {
+		switch (adev->asic_type) {
+		case CHIP_BONAIRE:
+			adev->ip_blocks = bonaire_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+			break;
+		case CHIP_HAWAII:
+			adev->ip_blocks = hawaii_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+			break;
+		case CHIP_KAVERI:
+			adev->ip_blocks = kaveri_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+			break;
+		case CHIP_KABINI:
+			adev->ip_blocks = kabini_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+			break;
+		case CHIP_MULLINS:
+			adev->ip_blocks = mullins_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -2015,13 +2384,13 @@
 {
 	.read_disabled_bios = &cik_read_disabled_bios,
 	.read_bios_from_rom = &cik_read_bios_from_rom,
+	.detect_hw_virtualization = cik_detect_hw_virtualization,
 	.read_register = &cik_read_register,
 	.reset = &cik_asic_reset,
 	.set_vga_state = &cik_vga_set_state,
 	.get_xclk = &cik_get_xclk,
 	.set_uvd_clocks = &cik_set_uvd_clocks,
 	.set_vce_clocks = &cik_set_vce_clocks,
-	.get_virtual_caps = &cik_get_virtual_caps,
 };
 
 static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 77fdd99..cb952ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -695,24 +695,16 @@
 				 uint64_t pe, uint64_t src,
 				 unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
-			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -720,39 +712,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-				  const dma_addr_t *pages_addr, uint64_t pe,
-				  uint64_t addr, unsigned count,
-				  uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				  uint64_t value, unsigned count,
+				  uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
-			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+		SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -768,40 +748,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
-				    uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				    uint64_t addr, unsigned count,
 				    uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -887,6 +848,22 @@
 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* cik_sdma_ring_emit_hdp_flush */
+		3 + /* cik_sdma_ring_emit_hdp_invalidate */
+		6 + /* cik_sdma_ring_emit_pipeline_sync */
+		12 + /* cik_sdma_ring_emit_vm_flush */
+		9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 				 bool enable)
 {
@@ -1262,6 +1239,8 @@
 	.test_ib = cik_sdma_ring_test_ib,
 	.insert_nop = cik_sdma_ring_insert_nop,
 	.pad_ib = cik_sdma_ring_pad_ib,
+	.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+	.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00..8659852 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@
 	MTYPE_NONCACHED = 3
 };
 
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x)				((x) << 0)
+#define RB_MAP_PKR0_MASK			(0x3 << 0)
+#define RB_MAP_PKR1(x)				((x) << 2)
+#define RB_MAP_PKR1_MASK			(0x3 << 2)
+#define RB_XSEL2(x)				((x) << 4)
+#define RB_XSEL2_MASK				(0x3 << 4)
+#define RB_XSEL					(1 << 6)
+#define RB_YSEL					(1 << 7)
+#define PKR_MAP(x)				((x) << 8)
+#define PKR_MAP_MASK				(0x3 << 8)
+#define PKR_XSEL(x)				((x) << 10)
+#define PKR_XSEL_MASK				(0x3 << 10)
+#define PKR_YSEL(x)				((x) << 12)
+#define PKR_YSEL_MASK				(0x3 << 12)
+#define SC_MAP(x)				((x) << 16)
+#define SC_MAP_MASK				(0x3 << 16)
+#define SC_XSEL(x)				((x) << 18)
+#define SC_XSEL_MASK				(0x3 << 18)
+#define SC_YSEL(x)				((x) << 20)
+#define SC_YSEL_MASK				(0x3 << 20)
+#define SE_MAP(x)				((x) << 24)
+#define SE_MAP_MASK				(0x3 << 24)
+#define SE_XSEL(x)				((x) << 26)
+#define SE_XSEL_MASK				(0x3 << 26)
+#define SE_YSEL(x)				((x) << 28)
+#define SE_YSEL_MASK				(0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x)				((x) << 0)
+#define SE_PAIR_MAP_MASK			(0x3 << 0)
+#define SE_PAIR_XSEL(x)				((x) << 2)
+#define SE_PAIR_XSEL_MASK			(0x3 << 2)
+#define SE_PAIR_YSEL(x)				((x) << 4)
+#define SE_PAIR_YSEL_MASK			(0x3 << 4)
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 2a11413..3c082e14 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -44,6 +44,7 @@
 
 static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
 static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
 
 static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
 {
@@ -350,6 +351,8 @@
 
 		ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
 		if (ps == NULL) {
+			for (j = 0; j < i; j++)
+				kfree(adev->pm.dpm.ps[j].ps_priv);
 			kfree(adev->pm.dpm.ps);
 			return -ENOMEM;
 		}
@@ -409,11 +412,11 @@
 
 	ret = amdgpu_get_platform_caps(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	ret = amdgpu_parse_extended_power_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	pi->sram_end = SMC_RAM_END;
 
@@ -435,7 +438,11 @@
 		pi->caps_td_ramping = true;
 		pi->caps_tcp_ramping = true;
 	}
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
+
 	pi->voting_clients = 0x00c00033;
 	pi->auto_thermal_throttling_enabled = true;
 	pi->bapm_enabled = false;
@@ -463,23 +470,26 @@
 
 	ret = cz_parse_sys_info_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	cz_patch_voltage_values(adev);
 	cz_construct_boot_state(adev);
 
 	ret = cz_parse_power_table(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	ret = cz_process_firmware_header(adev);
 	if (ret)
-		return ret;
+		goto err;
 
 	pi->dpm_enabled = true;
 	pi->uvd_dynamic_pg = false;
 
 	return 0;
+err:
+	cz_dpm_fini(adev);
+	return ret;
 }
 
 static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -668,17 +678,12 @@
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	pi->active_process_mask = 0;
-
 }
 
 static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
 							void **table)
 {
-	int ret = 0;
-
-	ret = cz_smu_download_pptable(adev, table);
-
-	return ret;
+	return cz_smu_download_pptable(adev, table);
 }
 
 static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -818,9 +823,9 @@
 	pi->sclk_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].clk;
-	else {
+	} else {
 		DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].clk;
 	}
@@ -846,9 +851,9 @@
 	pi->uvd_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].vclk;
-	else {
+	} else {
 		DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].vclk;
 	}
@@ -874,9 +879,9 @@
 	pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].ecclk;
-	else {
+	} else {
 		/* future BIOS would fix this error */
 		DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].ecclk;
@@ -903,9 +908,9 @@
 	pi->acp_dpm.hard_min_clk = 0;
 	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
 	level = cz_get_argument(adev);
-	if (level < table->count)
+	if (level < table->count) {
 		clock = table->entries[level].clk;
-	else {
+	} else {
 		DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
 		clock = table->entries[table->count - 1].clk;
 	}
@@ -930,7 +935,6 @@
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	pi->low_sclk_interrupt_threshold = 0;
-
 }
 
 static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1203,7 +1207,7 @@
 	int ret;
 
 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
-			pi->caps_td_ramping || pi->caps_tcp_ramping) {
+	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
 		if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
 			ret = cz_disable_cgpg(adev);
 			if (ret) {
@@ -1277,7 +1281,7 @@
 	ps->force_high = false;
 	ps->need_dfs_bypass = true;
 	pi->video_start = new_rps->dclk || new_rps->vclk ||
-				new_rps->evclk || new_rps->ecclk;
+			  new_rps->evclk || new_rps->ecclk;
 
 	if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
 			ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1335,7 +1339,6 @@
 	}
 
 	cz_reset_acp_boot_level(adev);
-
 	cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
 
 	return 0;
@@ -1511,14 +1514,16 @@
 	return 0;
 }
 
-/* borrowed from KV, need future unify */
 static int cz_dpm_get_temperature(struct amdgpu_device *adev)
 {
 	int actual_temp = 0;
-	uint32_t temp = RREG32_SMC(0xC0300E0C);
+	uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+	uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
 
-	if (temp)
+	if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
 		actual_temp = 1000 * ((temp / 8) - 49);
+	else
+		actual_temp = 1000 * (temp / 8);
 
 	return actual_temp;
 }
@@ -1665,7 +1670,6 @@
 	struct amdgpu_ps *ps = &pi->requested_rps;
 
 	cz_update_current_ps(adev, ps);
-
 }
 
 static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2108,29 +2112,58 @@
 			/* disable clockgating so we can properly shut down the block */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_CG_STATE_UNGATE);
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+				return;
+			}
+
 			/* shutdown the UVD block */
 			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_PG_STATE_GATE);
-			/* XXX: check for errors */
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+				return;
+			}
 		}
 		cz_update_uvd_dpm(adev, gate);
-		if (pi->caps_uvd_pg)
+		if (pi->caps_uvd_pg) {
 			/* power off the UVD block */
-			cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+			ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+				return;
+			}
+		}
 	} else {
 		if (pi->caps_uvd_pg) {
 			/* power on the UVD block */
 			if (pi->uvd_dynamic_pg)
-				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+				ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
 			else
-				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+				ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+				return;
+			}
+
 			/* re-init the UVD block */
 			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_PG_STATE_UNGATE);
+
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+				return;
+			}
+
 			/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 							    AMD_CG_STATE_GATE);
-			/* XXX: check for errors */
+			if (ret) {
+				DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+				return;
+			}
 		}
 		cz_update_uvd_dpm(adev, gate);
 	}
@@ -2168,7 +2201,6 @@
 	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
 	if (pi->caps_stable_power_state) {
 		pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
 	} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
 		/* leave it as set by user */
 		/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index ac7fee7..aed7033 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -29,6 +29,8 @@
 #include "cz_smumgr.h"
 #include "smu_ucode_xfer_cz.h"
 #include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
 
 #include "smu/smu_8_0_d.h"
 #include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@
 	return priv;
 }
 
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
 {
 	int i;
 	u32 content = 0, tmp;
@@ -99,13 +101,6 @@
 	return 0;
 }
 
-int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
-						u16 msg, u32 parameter)
-{
-	WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
-	return cz_send_msg_to_smc_async(adev, msg);
-}
-
 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
 						u16 msg, u32 parameter)
 {
@@ -140,7 +135,7 @@
 	return 0;
 }
 
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
 						u32 value, u32 limit)
 {
 	int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c1b04e9..4108c68 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@
  */
 static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 {
-	unsigned i = 0;
+	unsigned i = 100;
 
 	if (crtc >= adev->mode_info.num_crtc)
 		return;
@@ -233,14 +233,16 @@
 	 * wait for another frame.
 	 */
 	while (dce_v10_0_is_in_vblank(adev, crtc)) {
-		if (i++ % 100 == 0) {
+		if (i++ == 100) {
+			i = 0;
 			if (!dce_v10_0_is_counter_moving(adev, crtc))
 				break;
 		}
 	}
 
 	while (!dce_v10_0_is_in_vblank(adev, crtc)) {
-		if (i++ % 100 == 0) {
+		if (i++ == 100) {
+			i = 0;
 			if (!dce_v10_0_is_counter_moving(adev, crtc))
 				break;
 		}
@@ -425,16 +427,6 @@
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
-		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-			/* don't try to enable hpd on eDP or LVDS avoid breaking the
-			 * aux dp channel on imac and help (but not completely fix)
-			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
-			 * also avoid interrupt storms during dpms.
-			 */
-			continue;
-		}
-
 		switch (amdgpu_connector->hpd.hpd) {
 		case AMDGPU_HPD_1:
 			idx = 0;
@@ -458,6 +450,19 @@
 			continue;
 		}
 
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			continue;
+		}
+
 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -646,8 +651,8 @@
 
 		if (save->crtc_enabled[i]) {
 			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
 				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
 			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +717,45 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		num_crtc = 6;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v10_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -2063,7 +2107,7 @@
 	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	uint64_t fb_location, tiling_flags;
 	uint32_t fb_format, fb_pitch_pixels;
 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2071,6 +2115,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -2090,23 +2135,23 @@
 	 * just update base pointers
 	 */
 	obj = amdgpu_fb->obj;
-	rbo = gem_to_amdgpu_bo(obj);
-	r = amdgpu_bo_reserve(rbo, false);
+	abo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
 		return r;
 
 	if (atomic) {
-		fb_location = amdgpu_bo_gpu_offset(rbo);
+		fb_location = amdgpu_bo_gpu_offset(abo);
 	} else {
-		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unreserve(abo);
 			return -EINVAL;
 		}
 	}
 
-	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
-	amdgpu_bo_unreserve(rbo);
+	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+	amdgpu_bo_unreserve(abo);
 
 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
 
@@ -2182,8 +2227,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2275,17 +2321,17 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r != 0))
 			return r;
-		amdgpu_bo_unpin(rbo);
-		amdgpu_bo_unreserve(rbo);
+		amdgpu_bo_unpin(abo);
+		amdgpu_bo_unreserve(abo);
 	}
 
 	/* Bytes per pixel may have changed */
@@ -2698,7 +2744,7 @@
 	.gamma_set = dce_v10_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v10_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2765,16 +2811,16 @@
 	if (crtc->primary->fb) {
 		int r;
 		struct amdgpu_framebuffer *amdgpu_fb;
-		struct amdgpu_bo *rbo;
+		struct amdgpu_bo *abo;
 
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r))
-			DRM_ERROR("failed to reserve rbo before unpin\n");
+			DRM_ERROR("failed to reserve abo before unpin\n");
 		else {
-			amdgpu_bo_unpin(rbo);
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unpin(abo);
+			amdgpu_bo_unreserve(abo);
 		}
 	}
 	/* disable the GRPH */
@@ -2962,10 +3008,11 @@
 	dce_v10_0_set_display_funcs(adev);
 	dce_v10_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
 	case CHIP_TONGA:
-		adev->mode_info.num_crtc = 6; /* XXX 7??? */
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 7;
 		break;
@@ -3141,6 +3188,13 @@
 	return 0;
 }
 
+static bool dce_v10_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return dce_v10_0_is_display_hung(adev);
+}
+
 static int dce_v10_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0, tmp;
@@ -3512,6 +3566,7 @@
 	.resume = dce_v10_0_resume,
 	.is_idle = dce_v10_0_is_idle,
 	.wait_for_idle = dce_v10_0_wait_for_idle,
+	.check_soft_reset = dce_v10_0_check_soft_reset,
 	.soft_reset = dce_v10_0_soft_reset,
 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
 	.set_powergating_state = dce_v10_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index 1bfa48d..e3dc04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
 
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d4bf133..f264b8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -443,16 +443,6 @@
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
-		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-			/* don't try to enable hpd on eDP or LVDS avoid breaking the
-			 * aux dp channel on imac and help (but not completely fix)
-			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
-			 * also avoid interrupt storms during dpms.
-			 */
-			continue;
-		}
-
 		switch (amdgpu_connector->hpd.hpd) {
 		case AMDGPU_HPD_1:
 			idx = 0;
@@ -476,6 +466,19 @@
 			continue;
 		}
 
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			continue;
+		}
+
 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -673,6 +676,53 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_CARRIZO:
+		num_crtc = 3;
+		break;
+	case CHIP_STONEY:
+		num_crtc = 2;
+		break;
+	case CHIP_POLARIS10:
+		num_crtc = 6;
+		break;
+	case CHIP_POLARIS11:
+		num_crtc = 5;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v11_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -2038,7 +2088,7 @@
 	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	uint64_t fb_location, tiling_flags;
 	uint32_t fb_format, fb_pitch_pixels;
 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2046,6 +2096,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -2065,23 +2116,23 @@
 	 * just update base pointers
 	 */
 	obj = amdgpu_fb->obj;
-	rbo = gem_to_amdgpu_bo(obj);
-	r = amdgpu_bo_reserve(rbo, false);
+	abo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
 		return r;
 
 	if (atomic) {
-		fb_location = amdgpu_bo_gpu_offset(rbo);
+		fb_location = amdgpu_bo_gpu_offset(abo);
 	} else {
-		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unreserve(abo);
 			return -EINVAL;
 		}
 	}
 
-	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
-	amdgpu_bo_unreserve(rbo);
+	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+	amdgpu_bo_unreserve(abo);
 
 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
 
@@ -2157,8 +2208,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2250,17 +2302,17 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r != 0))
 			return r;
-		amdgpu_bo_unpin(rbo);
-		amdgpu_bo_unreserve(rbo);
+		amdgpu_bo_unpin(abo);
+		amdgpu_bo_unreserve(abo);
 	}
 
 	/* Bytes per pixel may have changed */
@@ -2708,7 +2760,7 @@
 	.gamma_set = dce_v11_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v11_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2775,16 +2827,16 @@
 	if (crtc->primary->fb) {
 		int r;
 		struct amdgpu_framebuffer *amdgpu_fb;
-		struct amdgpu_bo *rbo;
+		struct amdgpu_bo *abo;
 
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r))
-			DRM_ERROR("failed to reserve rbo before unpin\n");
+			DRM_ERROR("failed to reserve abo before unpin\n");
 		else {
-			amdgpu_bo_unpin(rbo);
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unpin(abo);
+			amdgpu_bo_unreserve(abo);
 		}
 	}
 	/* disable the GRPH */
@@ -2999,24 +3051,22 @@
 	dce_v11_0_set_display_funcs(adev);
 	dce_v11_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
-		adev->mode_info.num_crtc = 3;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
 	case CHIP_STONEY:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
 	case CHIP_POLARIS10:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_POLARIS11:
-		adev->mode_info.num_crtc = 5;
 		adev->mode_info.num_hpd = 5;
 		adev->mode_info.num_dig = 5;
 		break;
@@ -3109,6 +3159,7 @@
 
 	dce_v11_0_afmt_fini(adev);
 
+	drm_mode_config_cleanup(adev->ddev);
 	adev->mode_info.mode_config_initialized = false;
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 84e4618..1f58a65 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
 
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644
index 0000000..b948d6c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -0,0 +1,3176 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET,
+	(0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+	uint32_t	reg;
+	uint32_t	vblank;
+	uint32_t	vline;
+	uint32_t	hpd;
+
+} interrupt_status_offsets[6] = { {
+	.reg = DISP_INTERRUPT_STATUS,
+	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+	.reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+	DC_HPD1_INT_CONTROL,
+	DC_HPD2_INT_CONTROL,
+	DC_HPD3_INT_CONTROL,
+	DC_HPD4_INT_CONTROL,
+	DC_HPD5_INT_CONTROL,
+	DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+				     u32 block_offset, u32 reg)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+	return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+				      u32 block_offset, u32 reg, u32 v)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+	unsigned i = 100;
+
+	if (crtc >= adev->mode_info.num_crtc)
+		return;
+
+	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (dce_v6_0_is_in_vblank(adev, crtc)) {
+		if (i++ == 100) {
+			i = 0;
+			if (!dce_v6_0_is_counter_moving(adev, crtc))
+				break;
+		}
+	}
+
+	while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+		if (i++ == 100) {
+			i = 0;
+			if (!dce_v6_0_is_counter_moving(adev, crtc))
+				break;
+		}
+	}
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+			       int crtc_id, u64 crtc_base, bool async)
+{
+	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	/* flip at hsync for async, default is vsync */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+	       EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* post the write */
+	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position)
+{
+	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+		return -EINVAL;
+	*vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+	*position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+			       enum amdgpu_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case AMDGPU_HPD_1:
+		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_2:
+		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_3:
+		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_4:
+		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_5:
+		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case AMDGPU_HPD_6:
+		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+
+	return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+				      enum amdgpu_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+	switch (hpd) {
+	case AMDGPU_HPD_1:
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_2:
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_3:
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_4:
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		break;
+	case AMDGPU_HPD_5:
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+	case AMDGPU_HPD_6:
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		switch (amdgpu_connector->hpd.hpd) {
+		case AMDGPU_HPD_1:
+			WREG32(DC_HPD1_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_2:
+			WREG32(DC_HPD2_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_3:
+			WREG32(DC_HPD3_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_4:
+			WREG32(DC_HPD4_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_5:
+			WREG32(DC_HPD5_CONTROL, tmp);
+			break;
+		case AMDGPU_HPD_6:
+			WREG32(DC_HPD6_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+			switch (amdgpu_connector->hpd.hpd) {
+			case AMDGPU_HPD_1:
+				dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_2:
+				dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_3:
+				dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_4:
+				dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_5:
+				dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_6:
+				dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+				break;
+			default:
+				continue;
+			}
+
+			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+			dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			continue;
+		}
+
+		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+	}
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		switch (amdgpu_connector->hpd.hpd) {
+		case AMDGPU_HPD_1:
+			WREG32(DC_HPD1_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_2:
+			WREG32(DC_HPD2_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_3:
+			WREG32(DC_HPD3_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_4:
+			WREG32(DC_HPD4_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_5:
+			WREG32(DC_HPD5_CONTROL, 0);
+			break;
+		case AMDGPU_HPD_6:
+			WREG32(DC_HPD6_CONTROL, 0);
+			break;
+		default:
+			break;
+		}
+		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+	}
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+	return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+	DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+	return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+				    struct amdgpu_mode_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count;
+	int i, j;
+
+	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(VGA_RENDER_CONTROL, 0);
+
+	/* blank the display controllers */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+			if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+				dce_v6_0_vblank_wait(adev, i);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = evergreen_get_vblank_counter(adev, i);
+			for (j = 0; j < adev->usec_timeout; j++) {
+				if (evergreen_get_vblank_counter(adev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+				      struct amdgpu_mode_mc_save *save)
+{
+	u32 tmp;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(adev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(adev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)adev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)adev->mc.vram_start);
+	}
+
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x7) != 3) {
+				tmp &= ~0x7;
+				tmp |= 0x3;
+				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < adev->usec_timeout; j++) {
+				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	/* Unlock vga access */
+	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+					  bool render)
+{
+	if (!render) 
+		WREG32(R_000300_VGA_RENDER_CONTROL,
+			RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		bpc = amdgpu_connector_get_monitor_bpc(connector);
+		dither = amdgpu_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	if (bpc == 0)
+		return;
+
+
+	switch (bpc) {
+	case 6:
+		if (dither == AMDGPU_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN);
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == AMDGPU_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce6_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a, b;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(32);
+	b.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+	/* First calculate the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(mc_latency + 512);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(b, c);
+
+	c.full = dfixed_const(dmif_size);
+	b.full = dfixed_div(c, b);
+
+	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	if (dce_v6_0_average_bandwidth(wm) <=
+	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+	if (dce_v6_0_average_bandwidth(wm) <=
+	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+					struct amdgpu_crtc *amdgpu_crtc,
+					u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+	struct dce6_wm_params wm_low, wm_high;
+	u32 dram_channels;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (amdgpu_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		dram_channels = si_get_number_of_dram_channels(adev);
+
+		/* watermark for high clocks */
+		if (adev->pm.dpm_enabled) {
+			wm_high.yclk =
+				amdgpu_dpm_get_mclk(adev, false) * 10;
+			wm_high.sclk =
+				amdgpu_dpm_get_sclk(adev, false) * 10;
+		} else {
+			wm_high.yclk = adev->pm.current_mclk * 10;
+			wm_high.sclk = adev->pm.current_sclk * 10;
+		}
+
+		wm_high.disp_clk = mode->clock;
+		wm_high.src_width = mode->crtc_hdisplay;
+		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.blank_time = line_time - wm_high.active_time;
+		wm_high.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_high.interlaced = true;
+		wm_high.vsc = amdgpu_crtc->vsc;
+		wm_high.vtaps = 1;
+		if (amdgpu_crtc->rmx_type != RMX_OFF)
+			wm_high.vtaps = 2;
+		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_high.lb_size = lb_size;
+		wm_high.dram_channels = dram_channels;
+		wm_high.num_heads = num_heads;
+
+		if (adev->pm.dpm_enabled) {
+		/* watermark for low clocks */
+			wm_low.yclk =
+				amdgpu_dpm_get_mclk(adev, true) * 10;
+			wm_low.sclk =
+				amdgpu_dpm_get_sclk(adev, true) * 10;
+		} else {
+			wm_low.yclk = adev->pm.current_mclk * 10;
+			wm_low.sclk = adev->pm.current_sclk * 10;
+		}
+
+		wm_low.disp_clk = mode->clock;
+		wm_low.src_width = mode->crtc_hdisplay;
+		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.blank_time = line_time - wm_low.active_time;
+		wm_low.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm_low.interlaced = true;
+		wm_low.vsc = amdgpu_crtc->vsc;
+		wm_low.vtaps = 1;
+		if (amdgpu_crtc->rmx_type != RMX_OFF)
+			wm_low.vtaps = 2;
+		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm_low.lb_size = lb_size;
+		wm_low.dram_channels = dram_channels;
+		wm_low.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+		/* set for low clocks */
+		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+		    !dce_v6_0_check_latency_hiding(&wm_high) ||
+		    (adev->mode_info.disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+		    !dce_v6_0_check_latency_hiding(&wm_low) ||
+		    (adev->mode_info.disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+	/* save values for DPM */
+	amdgpu_crtc->line_time = line_time;
+	amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+				   struct amdgpu_crtc *amdgpu_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 21:20:
+	 *  0 - half lb
+	 *  2 - whole lb, other crtc must be disabled
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (amdgpu_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+	       DC_LB_MEMORY_CONFIG(tmp));
+
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
+	if (amdgpu_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 2:
+			return 8192 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	if (!adev->mode_info.mode_config_initialized)
+		return;
+
+	amdgpu_update_display_priority(adev);
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+		mode0 = &adev->mode_info.crtcs[i]->base.mode;
+		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+	int i;
+	u32 offset, tmp;
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		offset = adev->mode_info.audio.pin[i].offset;
+		tmp = RREG32_AUDIO_ENDPT(offset,
+				      AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+		if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+			adev->mode_info.audio.pin[i].connected = false;
+		else
+			adev->mode_info.audio.pin[i].connected = true;
+	}
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+	int i;
+
+	dce_v6_0_audio_get_connected_pins(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		if (adev->mode_info.audio.pin[i].connected)
+			return &adev->mode_info.audio.pin[i];
+	}
+	DRM_ERROR("No connected audio pins found!\n");
+	return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+	struct amdgpu_device *adev = encoder->dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	u32 offset;
+
+	if (!dig || !dig->afmt || !dig->afmt->pin)
+		return;
+
+	offset = dig->afmt->offset;
+
+	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+						struct drm_display_mode *mode)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+				  struct amdgpu_audio_pin *pin,
+				  bool enable)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+	(0x1780 - 0x1780),
+	(0x1786 - 0x1780),
+	(0x178c - 0x1780),
+	(0x1792 - 0x1780),
+	(0x1798 - 0x1780),
+	(0x179d - 0x1780),
+	(0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+					       void *buffer, size_t size)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+	DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode)
+{
+	DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (enable && dig->afmt->enabled)
+		return;
+	if (!enable && !dig->afmt->enabled)
+		return;
+
+	if (!enable && dig->afmt->pin) {
+		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+		dig->afmt->pin = NULL;
+	}
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < adev->mode_info.num_dig; i++)
+		adev->mode_info.afmt[i] = NULL;
+
+	/* DCE6 has audio blocks tied to DIG encoders */
+	for (i = 0; i < adev->mode_info.num_dig; i++) {
+		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+		if (adev->mode_info.afmt[i]) {
+			adev->mode_info.afmt[i]->offset = dig_offsets[i];
+			adev->mode_info.afmt[i]->id = i;
+		} else {
+			for (j = 0; j < i; j++) {
+				kfree(adev->mode_info.afmt[j]);
+				adev->mode_info.afmt[j] = NULL;
+			}
+			DRM_ERROR("Out of memory allocating afmt table\n");
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->mode_info.num_dig; i++) {
+		kfree(adev->mode_info.afmt[i]);
+		adev->mode_info.afmt[i] = NULL;
+	}
+}
+
+static const u32 vga_control_regs[6] =
+{
+	AVIVO_D1VGA_CONTROL,
+	AVIVO_D2VGA_CONTROL,
+	EVERGREEN_D3VGA_CONTROL,
+	EVERGREEN_D4VGA_CONTROL,
+	EVERGREEN_D5VGA_CONTROL,
+	EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	u32 vga_control;
+
+	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+				     struct drm_framebuffer *fb,
+				     int x, int y, int atomic)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_framebuffer *amdgpu_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *abo;
+	uint64_t fb_location, tiling_flags;
+	uint32_t fb_format, fb_pitch_pixels, pipe_config;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 viewport_w, viewport_h;
+	int r;
+	bool bypass_lut = false;
+
+	/* no fb bound */
+	if (!atomic && !crtc->primary->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		amdgpu_fb = to_amdgpu_framebuffer(fb);
+		target_fb = fb;
+	} else {
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		target_fb = crtc->primary->fb;
+	}
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	obj = amdgpu_fb->obj;
+	abo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(abo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	if (atomic) {
+		fb_location = amdgpu_bo_gpu_offset(abo);
+	} else {
+		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			amdgpu_bo_unreserve(abo);
+			return -EINVAL;
+		}
+	}
+
+	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+	amdgpu_bo_unreserve(abo);
+
+	switch (target_fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		break;
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_BGRA5551:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_RGB565:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_BGRA1010102:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+		bypass_lut = true;
+		break;
+	default:
+		DRM_ERROR("Unsupported screen format %s\n",
+			  drm_get_format_name(target_fb->pixel_format));
+		return -EINVAL;
+	}
+
+	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+		fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+	}
+
+	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+	fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+	dce_v6_0_vga_enable(crtc, false);
+
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+	/*
+	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+	 * retain the full precision throughout the pipeline.
+	 */
+	WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+		 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+		 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+	if (bypass_lut)
+		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+	WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+	dce_v6_0_grph_enable(crtc, true);
+
+	WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+		       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+	WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->primary->fb) {
+		amdgpu_fb = to_amdgpu_framebuffer(fb);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
+		if (unlikely(r != 0))
+			return r;
+		amdgpu_bo_unpin(abo);
+		amdgpu_bo_unreserve(abo);
+	}
+
+	/* Bytes per pixel may have changed */
+	dce_v6_0_bandwidth_update(adev);
+
+	return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+		       EVERGREEN_INTERLEAVE_EN);
+	else
+		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+		       (amdgpu_crtc->lut_r[i] << 20) |
+		       (amdgpu_crtc->lut_g[i] << 10) |
+		       (amdgpu_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(0) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		return dig->linkb ? 1 : 0;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		return dig->linkb ? 3 : 2;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return dig->linkb ? 5 : 4;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		return 6;
+	default:
+		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+		return 0;
+	}
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	u32 pll_in_use;
+	int pll;
+
+	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+		if (adev->clock.dp_extclk)
+			/* skip PPLL programming if using ext clock */
+			return ATOM_PPLL_INVALID;
+		else
+			return ATOM_PPLL0;
+	} else {
+		/* use the same PPLL for all monitors with the same clock */
+		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+		if (pll != ATOM_PPLL_INVALID)
+			return pll;
+	}
+
+	/*  PPLL1, and PPLL2 */
+	pll_in_use = amdgpu_pll_get_use_mask(crtc);
+	if (!(pll_in_use & (1 << ATOM_PPLL2)))
+		return ATOM_PPLL2;
+	if (!(pll_in_use & (1 << ATOM_PPLL1)))
+		return ATOM_PPLL1;
+	DRM_ERROR("unable to allocate a PPLL\n");
+	return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	uint32_t cur_lock;
+
+	cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+	if (lock)
+		cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+	else
+		cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+	WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+
+	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+
+	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	       upper_32_bits(amdgpu_crtc->cursor_addr));
+	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       lower_32_bits(amdgpu_crtc->cursor_addr));
+
+	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   EVERGREEN_CURSOR_EN |
+		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+				       int x, int y)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_device *adev = crtc->dev->dev_private;
+	int xorigin = 0, yorigin = 0;
+
+	int w = amdgpu_crtc->cursor_width;
+
+	/* avivo cursor are offset into the total surface */
+	x += crtc->x;
+	y += crtc->y;
+	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+	if (x < 0) {
+		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+		y = 0;
+	}
+
+	WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+	WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+	WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+	amdgpu_crtc->cursor_x = x;
+	amdgpu_crtc->cursor_y = y;
+	return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+				     int x, int y)
+{
+	int ret;
+
+	dce_v6_0_lock_cursor(crtc, true);
+	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+	dce_v6_0_lock_cursor(crtc, false);
+
+	return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+				     struct drm_file *file_priv,
+				     uint32_t handle,
+				     uint32_t width,
+				     uint32_t height,
+				     int32_t hot_x,
+				     int32_t hot_y)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *aobj;
+	int ret;
+
+	if (!handle) {
+		/* turn off cursor */
+		dce_v6_0_hide_cursor(crtc);
+		obj = NULL;
+		goto unpin;
+	}
+
+	if ((width > amdgpu_crtc->max_cursor_width) ||
+	    (height > amdgpu_crtc->max_cursor_height)) {
+		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+		return -ENOENT;
+	}
+
+	aobj = gem_to_amdgpu_bo(obj);
+	ret = amdgpu_bo_reserve(aobj, false);
+	if (ret != 0) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+	amdgpu_bo_unreserve(aobj);
+	if (ret) {
+		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+		drm_gem_object_unreference_unlocked(obj);
+		return ret;
+	}
+
+	amdgpu_crtc->cursor_width = width;
+	amdgpu_crtc->cursor_height = height;
+
+	dce_v6_0_lock_cursor(crtc, true);
+
+	if (hot_x != amdgpu_crtc->cursor_hot_x ||
+	    hot_y != amdgpu_crtc->cursor_hot_y) {
+		int x, y;
+
+		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+		dce_v6_0_cursor_move_locked(crtc, x, y);
+
+		amdgpu_crtc->cursor_hot_x = hot_x;
+		amdgpu_crtc->cursor_hot_y = hot_y;
+	}
+
+	dce_v6_0_show_cursor(crtc);
+	dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+	if (amdgpu_crtc->cursor_bo) {
+		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+		ret = amdgpu_bo_reserve(aobj, false);
+		if (likely(ret == 0)) {
+			amdgpu_bo_unpin(aobj);
+			amdgpu_bo_unreserve(aobj);
+		}
+		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+	}
+
+	amdgpu_crtc->cursor_bo = obj;
+	return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (amdgpu_crtc->cursor_bo) {
+		dce_v6_0_lock_cursor(crtc, true);
+
+		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+					    amdgpu_crtc->cursor_y);
+
+		dce_v6_0_show_cursor(crtc);
+		dce_v6_0_lock_cursor(crtc, false);
+	}
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				   u16 *blue, uint32_t size)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int i;
+
+	/* userspace palettes are always correct as is */
+	for (i = 0; i < size; i++) {
+		amdgpu_crtc->lut_r[i] = red[i] >> 6;
+		amdgpu_crtc->lut_g[i] = green[i] >> 6;
+		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+	}
+	dce_v6_0_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
+	.cursor_move = dce_v6_0_crtc_cursor_move,
+	.gamma_set = dce_v6_0_crtc_gamma_set,
+	.set_config = amdgpu_crtc_set_config,
+	.destroy = dce_v6_0_crtc_destroy,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	unsigned type;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		amdgpu_crtc->enabled = true;
+		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+		dce_v6_0_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+		if (amdgpu_crtc->enabled)
+			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+		amdgpu_crtc->enabled = false;
+		break;
+	}
+	/* adjust pm to dpms */
+	amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+	/* disable crtc pair power gating before programming */
+	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_atom_ss ss;
+	int i;
+
+	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct amdgpu_framebuffer *amdgpu_fb;
+		struct amdgpu_bo *abo;
+
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve abo before unpin\n");
+		else {
+			amdgpu_bo_unpin(abo);
+			amdgpu_bo_unreserve(abo);
+		}
+	}
+	/* disable the GRPH */
+	dce_v6_0_grph_enable(crtc, false);
+
+	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->mode_info.crtcs[i] &&
+		    adev->mode_info.crtcs[i]->enabled &&
+		    i != amdgpu_crtc->crtc_id &&
+		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+			/* one other crtc is using this pll don't turn
+			 * off the pll
+			 */
+			goto done;
+		}
+	}
+
+	switch (amdgpu_crtc->pll_id) {
+	case ATOM_PPLL1:
+	case ATOM_PPLL2:
+		/* disable the ppll */
+		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	default:
+		break;
+	}
+done:
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->adjusted_clock = 0;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode,
+				  int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	if (!amdgpu_crtc->adjusted_clock)
+		return -EINVAL;
+
+	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+	amdgpu_atombios_crtc_scaler_setup(crtc);
+	dce_v6_0_cursor_reset(crtc);
+	/* update the hw version fpr dpm */
+	amdgpu_crtc->hw_mode = *adjusted_mode;
+
+	return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			amdgpu_crtc->encoder = encoder;
+			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+		amdgpu_crtc->encoder = NULL;
+		amdgpu_crtc->connector = NULL;
+		return false;
+	}
+	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+		return false;
+
+	return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y, enum mode_set_atomic state)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+	.dpms = dce_v6_0_crtc_dpms,
+	.mode_fixup = dce_v6_0_crtc_mode_fixup,
+	.mode_set = dce_v6_0_crtc_mode_set,
+	.mode_set_base = dce_v6_0_crtc_set_base,
+	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+	.prepare = dce_v6_0_crtc_prepare,
+	.commit = dce_v6_0_crtc_commit,
+	.load_lut = dce_v6_0_crtc_load_lut,
+	.disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	int i;
+
+	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (amdgpu_crtc == NULL)
+		return -ENOMEM;
+
+	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+	amdgpu_crtc->crtc_id = index;
+	adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+	for (i = 0; i < 256; i++) {
+		amdgpu_crtc->lut_r[i] = i << 2;
+		amdgpu_crtc->lut_g[i] = i << 2;
+		amdgpu_crtc->lut_b[i] = i << 2;
+	}
+
+	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->adjusted_clock = 0;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+	return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+	dce_v6_0_set_display_funcs(adev);
+	dce_v6_0_set_irq_funcs(adev);
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		break;
+	case CHIP_OLAND:
+		adev->mode_info.num_crtc = 2;
+		adev->mode_info.num_hpd = 2;
+		adev->mode_info.num_dig = 2;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+	int r, i;
+	bool ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+		if (r)
+			return r;
+	}
+
+	for (i = 8; i < 20; i += 2) {
+		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+		if (r)
+			return r;
+	}
+
+	/* HPD hotplug */
+	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+	if (r)
+		return r;
+
+	adev->mode_info.mode_config_initialized = true;
+
+	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+	adev->ddev->mode_config.async_page_flip = true;
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	/* allocate crtcs */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = dce_v6_0_crtc_init(adev, i);
+		if (r)
+			return r;
+	}
+
+	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+	if (ret)
+		amdgpu_print_display_setup(adev->ddev);
+	else
+		return -EINVAL;
+
+	/* setup afmt */
+	r = dce_v6_0_afmt_init(adev);
+	if (r)
+		return r;
+
+	r = dce_v6_0_audio_init(adev);
+	if (r)
+		return r;
+
+	drm_kms_helper_poll_init(adev->ddev);
+
+	return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	kfree(adev->mode_info.bios_hardcoded_edid);
+
+	drm_kms_helper_poll_fini(adev->ddev);
+
+	dce_v6_0_audio_fini(adev);
+	dce_v6_0_afmt_fini(adev);
+
+	drm_mode_config_cleanup(adev->ddev);
+	adev->mode_info.mode_config_initialized = false;
+
+	return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* init dig PHYs, disp eng pll */
+	amdgpu_atombios_encoder_init_dig(adev);
+	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+	/* initialize hpd */
+	dce_v6_0_hpd_init(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+	}
+
+	dce_v6_0_pageflip_interrupt_init(adev);
+
+	return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dce_v6_0_hpd_fini(adev);
+
+	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+	}
+
+	dce_v6_0_pageflip_interrupt_fini(adev);
+
+	return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_atombios_scratch_regs_save(adev);
+
+	return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	ret = dce_v6_0_hw_init(handle);
+
+	amdgpu_atombios_scratch_regs_restore(adev);
+
+	/* turn on the BL */
+	if (adev->mode_info.bl_encoder) {
+		u8 bl_level = amdgpu_display_backlight_get_level(adev,
+								  adev->mode_info.bl_encoder);
+		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+						    bl_level);
+	}
+
+	return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+	return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+	return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+						     int crtc,
+						     enum amdgpu_interrupt_state state)
+{
+	u32 reg_block, interrupt_mask;
+
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	switch (crtc) {
+	case 0:
+		reg_block = SI_CRTC0_REGISTER_OFFSET;
+		break;
+	case 1:
+		reg_block = SI_CRTC1_REGISTER_OFFSET;
+		break;
+	case 2:
+		reg_block = SI_CRTC2_REGISTER_OFFSET;
+		break;
+	case 3:
+		reg_block = SI_CRTC3_REGISTER_OFFSET;
+		break;
+	case 4:
+		reg_block = SI_CRTC4_REGISTER_OFFSET;
+		break;
+	case 5:
+		reg_block = SI_CRTC5_REGISTER_OFFSET;
+		break;
+	default:
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask &= ~VBLANK_INT_MASK;
+		WREG32(INT_MASK + reg_block, interrupt_mask);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask |= VBLANK_INT_MASK;
+		WREG32(INT_MASK + reg_block, interrupt_mask);
+		break;
+	default:
+		break;
+	}
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+						    int crtc,
+						    enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+	switch (type) {
+	case AMDGPU_HPD_1:
+		dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_2:
+		dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_3:
+		dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_4:
+		dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_5:
+		dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+		break;
+	case AMDGPU_HPD_6:
+		dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+		break;
+	default:
+		DRM_DEBUG("invalid hdp %d\n", type);
+		return 0;
+	}
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CRTC_IRQ_VBLANK1:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK2:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK3:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK4:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK5:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VBLANK6:
+		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE1:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE2:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE3:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE4:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE5:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+		break;
+	case AMDGPU_CRTC_IRQ_VLINE6:
+		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *source,
+			     struct amdgpu_iv_entry *entry)
+{
+	unsigned crtc = entry->src_id - 1;
+	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+	switch (entry->src_data) {
+	case 0: /* vblank */
+		if (disp_int & interrupt_status_offsets[crtc].vblank)
+			WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+		else
+			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+		if (amdgpu_irq_enabled(adev, source, irq_type)) {
+			drm_handle_vblank(adev->ddev, crtc);
+		}
+		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+		break;
+	case 1: /* vline */
+		if (disp_int & interrupt_status_offsets[crtc].vline)
+			WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+		else
+			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+		break;
+	default:
+		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+		break;
+	}
+
+	return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+						 struct amdgpu_irq_src *src,
+						 unsigned type,
+						 enum amdgpu_interrupt_state state)
+{
+	u32 reg;
+
+	if (type >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", type);
+		return -EINVAL;
+	}
+
+	reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+	if (state == AMDGPU_IRQ_STATE_DISABLE)
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+	else
+		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+	return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+				 struct amdgpu_irq_src *source,
+				 struct amdgpu_iv_entry *entry)
+{
+		unsigned long flags;
+	unsigned crtc_id;
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct amdgpu_flip_work *works;
+
+	crtc_id = (entry->src_id - 8) >> 1;
+	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	if (crtc_id >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+
+	if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+		WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+	/* IRQ could occur when in initial stage */
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	works = amdgpu_crtc->pflip_works;
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+						"AMDGPU_FLIP_SUBMITTED(%d)\n",
+						amdgpu_crtc->pflip_status,
+						AMDGPU_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return 0;
+	}
+
+	/* page flip completed. clean up */
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	amdgpu_crtc->pflip_works = NULL;
+
+	/* wakeup usersapce */
+	if (works->event)
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+	schedule_work(&works->unpin_work);
+
+	return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+			    struct amdgpu_irq_src *source,
+			    struct amdgpu_iv_entry *entry)
+{
+	uint32_t disp_int, mask, int_control, tmp;
+	unsigned hpd;
+
+	if (entry->src_data >= adev->mode_info.num_hpd) {
+		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+		return 0;
+	}
+
+	hpd = entry->src_data;
+	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+	mask = interrupt_status_offsets[hpd].hpd;
+	int_control = hpd_int_control_offsets[hpd];
+
+	if (disp_int & mask) {
+		tmp = RREG32(int_control);
+		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+		WREG32(int_control, tmp);
+		schedule_work(&adev->hotplug_work);
+		DRM_INFO("IH: HPD%d\n", hpd + 1);
+	}
+
+	return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+	.name = "dce_v6_0",
+	.early_init = dce_v6_0_early_init,
+	.late_init = NULL,
+	.sw_init = dce_v6_0_sw_init,
+	.sw_fini = dce_v6_0_sw_fini,
+	.hw_init = dce_v6_0_hw_init,
+	.hw_fini = dce_v6_0_hw_fini,
+	.suspend = dce_v6_0_suspend,
+	.resume = dce_v6_0_resume,
+	.is_idle = dce_v6_0_is_idle,
+	.wait_for_idle = dce_v6_0_wait_for_idle,
+	.soft_reset = dce_v6_0_soft_reset,
+	.set_clockgating_state = dce_v6_0_set_clockgating_state,
+	.set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+	/* need to call this here rather than in prepare() since we need some crtc info */
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	/* set scaler clears this on some chips */
+	dce_v6_0_set_interleave(encoder->crtc, mode);
+
+	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		dce_v6_0_afmt_enable(encoder, true);
+		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+	}
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+	struct amdgpu_device *adev = encoder->dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+	if ((amdgpu_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+		if (dig) {
+			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+		}
+	}
+
+	amdgpu_atombios_scratch_regs_lock(adev, true);
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (amdgpu_connector->router.cd_valid)
+			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			amdgpu_atombios_encoder_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	amdgpu_atombios_encoder_set_crtc_source(encoder);
+	/* set up the FMT blocks */
+	dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* need to call this here as we need the crtc set up */
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig;
+
+	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (amdgpu_atombios_encoder_is_digital(encoder)) {
+		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+			dce_v6_0_afmt_enable(encoder, false);
+		dig = amdgpu_encoder->enc_priv;
+		dig->dig_encoder = -1;
+	}
+	amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+		      struct drm_display_mode *mode,
+		      struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+	.dpms = dce_v6_0_ext_dpms,
+	.mode_fixup = dce_v6_0_ext_mode_fixup,
+	.prepare = dce_v6_0_ext_prepare,
+	.mode_set = dce_v6_0_ext_mode_set,
+	.commit = dce_v6_0_ext_commit,
+	.disable = dce_v6_0_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+	.dpms = amdgpu_atombios_encoder_dpms,
+	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+	.prepare = dce_v6_0_encoder_prepare,
+	.mode_set = dce_v6_0_encoder_mode_set,
+	.commit = dce_v6_0_encoder_commit,
+	.disable = dce_v6_0_encoder_disable,
+	.detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+	.dpms = amdgpu_atombios_encoder_dpms,
+	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+	.prepare = dce_v6_0_encoder_prepare,
+	.mode_set = dce_v6_0_encoder_mode_set,
+	.commit = dce_v6_0_encoder_commit,
+	.detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+	kfree(amdgpu_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+	.destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+				 uint32_t encoder_enum,
+				 uint32_t supported_device,
+				 u16 caps)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->encoder_enum == encoder_enum) {
+			amdgpu_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return;
+
+	encoder = &amdgpu_encoder->base;
+	switch (adev->mode_info.num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	amdgpu_encoder->enc_priv = NULL;
+	amdgpu_encoder->encoder_enum = encoder_enum;
+	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	amdgpu_encoder->devices = supported_device;
+	amdgpu_encoder->rmx_type = RMX_OFF;
+	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+	amdgpu_encoder->is_ext_encoder = false;
+	amdgpu_encoder->caps = caps;
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+				 DRM_MODE_ENCODER_DAC, NULL);
+		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			amdgpu_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+		}
+		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		amdgpu_encoder->is_ext_encoder = true;
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_LVDS, NULL);
+		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_DAC, NULL);
+		else
+			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+					 DRM_MODE_ENCODER_TMDS, NULL);
+		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+		break;
+	}
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
+	.bandwidth_update = &dce_v6_0_bandwidth_update,
+	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
+	.vblank_wait = &dce_v6_0_vblank_wait,
+	.is_display_hung = &dce_v6_0_is_display_hung,
+	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+	.hpd_sense = &dce_v6_0_hpd_sense,
+	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+	.page_flip = &dce_v6_0_page_flip,
+	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+	.add_encoder = &dce_v6_0_encoder_add,
+	.add_connector = &amdgpu_connector_add,
+	.stop_mc_access = &dce_v6_0_stop_mc_access,
+	.resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.funcs == NULL)
+		adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+	.set = dce_v6_0_set_crtc_interrupt_state,
+	.process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+	.set = dce_v6_0_set_pageflip_interrupt_state,
+	.process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+	.set = dce_v6_0_set_hpd_interrupt_state,
+	.process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 5983e31..6a55281 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,9 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4fdfab1..5966166 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@
  */
 static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 {
-	unsigned i = 0;
+	unsigned i = 100;
 
 	if (crtc >= adev->mode_info.num_crtc)
 		return;
@@ -182,14 +182,16 @@
 	 * wait for another frame.
 	 */
 	while (dce_v8_0_is_in_vblank(adev, crtc)) {
-		if (i++ % 100 == 0) {
+		if (i++ == 100) {
+			i = 0;
 			if (!dce_v8_0_is_counter_moving(adev, crtc))
 				break;
 		}
 	}
 
 	while (!dce_v8_0_is_in_vblank(adev, crtc)) {
-		if (i++ % 100 == 0) {
+		if (i++ == 100) {
+			i = 0;
 			if (!dce_v8_0_is_counter_moving(adev, crtc))
 				break;
 		}
@@ -395,15 +397,6 @@
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
-		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-			/* don't try to enable hpd on eDP or LVDS avoid breaking the
-			 * aux dp channel on imac and help (but not completely fix)
-			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
-			 * also avoid interrupt storms during dpms.
-			 */
-			continue;
-		}
 		switch (amdgpu_connector->hpd.hpd) {
 		case AMDGPU_HPD_1:
 			WREG32(mmDC_HPD1_CONTROL, tmp);
@@ -426,6 +419,45 @@
 		default:
 			break;
 		}
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+			switch (amdgpu_connector->hpd.hpd) {
+			case AMDGPU_HPD_1:
+				dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_2:
+				dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_3:
+				dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_4:
+				dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_5:
+				dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+				break;
+			case AMDGPU_HPD_6:
+				dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+				break;
+			default:
+				continue;
+			}
+
+			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+			dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			continue;
+		}
+
 		dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
@@ -604,6 +636,52 @@
 	WREG32(mmVGA_RENDER_CONTROL, tmp);
 }
 
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+		num_crtc = 6;
+		break;
+	case CHIP_KAVERI:
+		num_crtc = 4;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		num_crtc = 2;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v8_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+									 CRTC_CONTROL, CRTC_MASTER_EN);
+			if (crtc_enabled) {
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -1501,13 +1579,13 @@
 
 			if (sad->format == eld_reg_to_type[i][1]) {
 				if (sad->channels > max_channels) {
-				value = (sad->channels <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
-				(sad->byte2 <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
-				(sad->freq <<
-				 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
-				max_channels = sad->channels;
+					value = (sad->channels <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+					        (sad->byte2 <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+					        (sad->freq <<
+						 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+					max_channels = sad->channels;
 				}
 
 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1613,7 +1691,7 @@
 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 	uint32_t offset = dig->afmt->offset;
 
-	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+	WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
 	WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
 
 	WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1693,6 +1771,7 @@
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
+
 	offset = dig->afmt->offset;
 
 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1817,7 +1896,7 @@
 
 	WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
 		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
-		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+		  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
 
 	WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
 		 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1826,13 +1905,12 @@
 	WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
 		  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
 
-	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
 	WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
 	WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
 	WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
 	WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
 
-	/* enable audio after to setting up hw */
+	/* enable audio after setting up hw */
 	dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
 }
 
@@ -1944,7 +2022,7 @@
 	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
-	struct amdgpu_bo *rbo;
+	struct amdgpu_bo *abo;
 	uint64_t fb_location, tiling_flags;
 	uint32_t fb_format, fb_pitch_pixels;
 	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
@@ -1952,6 +2030,7 @@
 	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1971,23 +2050,23 @@
 	 * just update base pointers
 	 */
 	obj = amdgpu_fb->obj;
-	rbo = gem_to_amdgpu_bo(obj);
-	r = amdgpu_bo_reserve(rbo, false);
+	abo = gem_to_amdgpu_bo(obj);
+	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
 		return r;
 
 	if (atomic) {
-		fb_location = amdgpu_bo_gpu_offset(rbo);
+		fb_location = amdgpu_bo_gpu_offset(abo);
 	} else {
-		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unreserve(abo);
 			return -EINVAL;
 		}
 	}
 
-	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
-	amdgpu_bo_unreserve(rbo);
+	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+	amdgpu_bo_unreserve(abo);
 
 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
 
@@ -1999,7 +2078,7 @@
 	case DRM_FORMAT_XRGB4444:
 	case DRM_FORMAT_ARGB4444:
 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
-			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
 #ifdef __BIG_ENDIAN
 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
 #endif
@@ -2056,8 +2135,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -2137,17 +2217,17 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r != 0))
 			return r;
-		amdgpu_bo_unpin(rbo);
-		amdgpu_bo_unreserve(rbo);
+		amdgpu_bo_unpin(abo);
+		amdgpu_bo_unreserve(abo);
 	}
 
 	/* Bytes per pixel may have changed */
@@ -2552,7 +2632,7 @@
 	.gamma_set = dce_v8_0_crtc_gamma_set,
 	.set_config = amdgpu_crtc_set_config,
 	.destroy = dce_v8_0_crtc_destroy,
-	.page_flip = amdgpu_crtc_page_flip,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
 };
 
 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2619,16 +2699,16 @@
 	if (crtc->primary->fb) {
 		int r;
 		struct amdgpu_framebuffer *amdgpu_fb;
-		struct amdgpu_bo *rbo;
+		struct amdgpu_bo *abo;
 
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-		r = amdgpu_bo_reserve(rbo, false);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
 		if (unlikely(r))
-			DRM_ERROR("failed to reserve rbo before unpin\n");
+			DRM_ERROR("failed to reserve abo before unpin\n");
 		else {
-			amdgpu_bo_unpin(rbo);
-			amdgpu_bo_unreserve(rbo);
+			amdgpu_bo_unpin(abo);
+			amdgpu_bo_unreserve(abo);
 		}
 	}
 	/* disable the GRPH */
@@ -2653,7 +2733,7 @@
 	case ATOM_PPLL2:
 		/* disable the ppll */
 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
-					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
 		break;
 	case ATOM_PPLL0:
 		/* disable the ppll */
@@ -2803,21 +2883,20 @@
 	dce_v8_0_set_display_funcs(adev);
 	dce_v8_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_KAVERI:
-		adev->mode_info.num_crtc = 4;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 7;
 		break;
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6; /* ? */
 		break;
@@ -3236,7 +3315,6 @@
 			drm_handle_vblank(adev->ddev, crtc);
 		}
 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
 		break;
 	case 1: /* vline */
 		if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3245,7 +3323,6 @@
 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
 		break;
 	default:
 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7701685..7d0770c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -26,4 +26,6 @@
 
 extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
 
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644
index 0000000..c2bd9f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+	return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+	return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+			      int crtc_id, u64 crtc_base, bool async)
+{
+	return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position)
+{
+	*vbl = 0;
+	*position = 0;
+
+	return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+			       enum amdgpu_hpd_id hpd)
+{
+	return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+				      enum amdgpu_hpd_id hpd)
+{
+	return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+	return false;
+}
+
+static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+			      struct amdgpu_mode_mc_save *save)
+{
+	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		dce_v8_0_disable_dce(adev);
+		break;
+#endif
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		dce_v10_0_disable_dce(adev);
+		break;
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		dce_v11_0_disable_dce(adev);
+		break;
+	case CHIP_TOPAZ:
+		/* no DCE */
+		return;
+	default:
+		DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+	}
+
+	return;
+}
+static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+				struct amdgpu_mode_mc_save *save)
+{
+	return;
+}
+
+static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+				    bool render)
+{
+	return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+	return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+				      u16 *green, u16 *blue, uint32_t size)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	int i;
+
+	/* userspace palettes are always correct as is */
+	for (i = 0; i < size; i++) {
+		amdgpu_crtc->lut_r[i] = red[i] >> 6;
+		amdgpu_crtc->lut_g[i] = green[i] >> 6;
+		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+	}
+
+	return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+	.cursor_set2 = NULL,
+	.cursor_move = NULL,
+	.gamma_set = dce_virtual_crtc_gamma_set,
+	.set_config = amdgpu_crtc_set_config,
+	.destroy = dce_virtual_crtc_destroy,
+	.page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	unsigned type;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		amdgpu_crtc->enabled = true;
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+		amdgpu_crtc->enabled = false;
+		break;
+	}
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (crtc->primary->fb) {
+		int r;
+		struct amdgpu_framebuffer *amdgpu_fb;
+		struct amdgpu_bo *abo;
+
+		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		r = amdgpu_bo_reserve(abo, false);
+		if (unlikely(r))
+			DRM_ERROR("failed to reserve abo before unpin\n");
+		else {
+			amdgpu_bo_unpin(abo);
+			amdgpu_bo_unreserve(abo);
+		}
+	}
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode,
+				  int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	/* update the hw version fpr dpm */
+	amdgpu_crtc->hw_mode = *adjusted_mode;
+
+	return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			amdgpu_crtc->encoder = encoder;
+			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+		amdgpu_crtc->encoder = NULL;
+		amdgpu_crtc->connector = NULL;
+		return false;
+	}
+
+	return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+	return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y, enum mode_set_atomic state)
+{
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+	.dpms = dce_virtual_crtc_dpms,
+	.mode_fixup = dce_virtual_crtc_mode_fixup,
+	.mode_set = dce_virtual_crtc_mode_set,
+	.mode_set_base = dce_virtual_crtc_set_base,
+	.mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+	.prepare = dce_virtual_crtc_prepare,
+	.commit = dce_virtual_crtc_commit,
+	.load_lut = dce_virtual_crtc_load_lut,
+	.disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+	struct amdgpu_crtc *amdgpu_crtc;
+	int i;
+
+	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (amdgpu_crtc == NULL)
+		return -ENOMEM;
+
+	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+	amdgpu_crtc->crtc_id = index;
+	adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+	for (i = 0; i < 256; i++) {
+		amdgpu_crtc->lut_r[i] = i << 2;
+		amdgpu_crtc->lut_g[i] = i << 2;
+		amdgpu_crtc->lut_b[i] = i << 2;
+	}
+
+	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+	amdgpu_crtc->encoder = NULL;
+	amdgpu_crtc->connector = NULL;
+	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+	return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+	dce_virtual_set_display_funcs(adev);
+	dce_virtual_set_irq_funcs(adev);
+
+	adev->mode_info.num_crtc = 1;
+	adev->mode_info.num_hpd = 1;
+	adev->mode_info.num_dig = 1;
+	return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_i2c_bus_rec ddc_bus;
+	struct amdgpu_router router;
+	struct amdgpu_hpd hpd;
+
+	/* look up gpio for ddc, hpd */
+	ddc_bus.valid = false;
+	hpd.hpd = AMDGPU_HPD_NONE;
+	/* needed for aux chan transactions */
+	ddc_bus.hpd = hpd.hpd;
+
+	memset(&router, 0, sizeof(router));
+	router.ddc_valid = false;
+	router.cd_valid = false;
+	amdgpu_display_add_connector(adev,
+				      0,
+				      ATOM_DEVICE_CRT1_SUPPORT,
+				      DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+				      CONNECTOR_OBJECT_ID_VIRTUAL,
+				      &hpd,
+				      &router);
+
+	amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+							ATOM_DEVICE_CRT1_SUPPORT,
+							0);
+
+	amdgpu_link_encoder_connector(adev->ddev);
+
+	return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+	int r, i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+	if (r)
+		return r;
+
+	adev->ddev->max_vblank_count = 0;
+
+	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	adev->ddev->mode_config.preferred_depth = 24;
+	adev->ddev->mode_config.prefer_shadow = 1;
+
+	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+	r = amdgpu_modeset_create_props(adev);
+	if (r)
+		return r;
+
+	adev->ddev->mode_config.max_width = 16384;
+	adev->ddev->mode_config.max_height = 16384;
+
+	/* allocate crtcs */
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		r = dce_virtual_crtc_init(adev, i);
+		if (r)
+			return r;
+	}
+
+	dce_virtual_get_connector_info(adev);
+	amdgpu_print_display_setup(adev->ddev);
+
+	drm_kms_helper_poll_init(adev->ddev);
+
+	adev->mode_info.mode_config_initialized = true;
+	return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	kfree(adev->mode_info.bios_hardcoded_edid);
+
+	drm_kms_helper_poll_fini(adev->ddev);
+
+	drm_mode_config_cleanup(adev->ddev);
+	adev->mode_info.mode_config_initialized = false;
+	return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+	return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+	return dce_virtual_hw_init(handle);
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+	return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+	.name = "dce_virtual",
+	.early_init = dce_virtual_early_init,
+	.late_init = NULL,
+	.sw_init = dce_virtual_sw_init,
+	.sw_fini = dce_virtual_sw_fini,
+	.hw_init = dce_virtual_hw_init,
+	.hw_fini = dce_virtual_hw_fini,
+	.suspend = dce_virtual_suspend,
+	.resume = dce_virtual_resume,
+	.is_idle = dce_virtual_is_idle,
+	.wait_for_idle = dce_virtual_wait_for_idle,
+	.soft_reset = dce_virtual_soft_reset,
+	.set_clockgating_state = dce_virtual_set_clockgating_state,
+	.set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+		      struct drm_display_mode *mode,
+		      struct drm_display_mode *adjusted_mode)
+{
+	return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+	return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+
+	/* set the active encoder to connector routing */
+	amdgpu_encoder_set_active_device(encoder);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+	.dpms = dce_virtual_encoder_dpms,
+	.mode_fixup = dce_virtual_encoder_mode_fixup,
+	.prepare = dce_virtual_encoder_prepare,
+	.mode_set = dce_virtual_encoder_mode_set,
+	.commit = dce_virtual_encoder_commit,
+	.disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	kfree(amdgpu_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+	.destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+				 uint32_t encoder_enum,
+				 uint32_t supported_device,
+				 u16 caps)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->encoder_enum == encoder_enum) {
+			amdgpu_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+	if (!amdgpu_encoder)
+		return;
+
+	encoder = &amdgpu_encoder->base;
+	encoder->possible_crtcs = 0x1;
+	amdgpu_encoder->enc_priv = NULL;
+	amdgpu_encoder->encoder_enum = encoder_enum;
+	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	amdgpu_encoder->devices = supported_device;
+	amdgpu_encoder->rmx_type = RMX_OFF;
+	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+	amdgpu_encoder->is_ext_encoder = false;
+	amdgpu_encoder->caps = caps;
+
+	drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+					 DRM_MODE_ENCODER_VIRTUAL, NULL);
+	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+	DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+	.set_vga_render_state = &dce_virtual_set_vga_render_state,
+	.bandwidth_update = &dce_virtual_bandwidth_update,
+	.vblank_get_counter = &dce_virtual_vblank_get_counter,
+	.vblank_wait = &dce_virtual_vblank_wait,
+	.is_display_hung = &dce_virtual_is_display_hung,
+	.backlight_set_level = NULL,
+	.backlight_get_level = NULL,
+	.hpd_sense = &dce_virtual_hpd_sense,
+	.hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+	.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+	.page_flip = &dce_virtual_page_flip,
+	.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+	.add_encoder = &dce_virtual_encoder_add,
+	.add_connector = &amdgpu_connector_add,
+	.stop_mc_access = &dce_virtual_stop_mc_access,
+	.resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.funcs == NULL)
+		adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+	struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+	struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+	unsigned crtc = 0;
+	drm_handle_vblank(adev->ddev, crtc);
+	dce_virtual_pageflip_irq(adev, NULL, NULL);
+	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+						     int crtc,
+						     enum amdgpu_interrupt_state state)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	if (state && !adev->mode_info.vsync_timer_enabled) {
+		DRM_DEBUG("Enable software vsync timer\n");
+		hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+		adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+		hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	} else if (!state && adev->mode_info.vsync_timer_enabled) {
+		DRM_DEBUG("Disable software vsync timer\n");
+		hrtimer_cancel(&adev->mode_info.vblank_timer);
+	}
+
+	adev->mode_info.vsync_timer_enabled = state;
+	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CRTC_IRQ_VBLANK1:
+		dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+					  int crtc)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+			      struct amdgpu_irq_src *source,
+			      struct amdgpu_iv_entry *entry)
+{
+	unsigned crtc = 0;
+	unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+	dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+	if (amdgpu_irq_enabled(adev, source, irq_type)) {
+		drm_handle_vblank(adev->ddev, crtc);
+	}
+	dce_virtual_pageflip_irq(adev, NULL, NULL);
+	DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+	return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	if (type >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", type);
+		return -EINVAL;
+	}
+	DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+	return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry)
+{
+	unsigned long flags;
+	unsigned crtc_id = 0;
+	struct amdgpu_crtc *amdgpu_crtc;
+	struct amdgpu_flip_work *works;
+
+	crtc_id = 0;
+	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+	if (crtc_id >= adev->mode_info.num_crtc) {
+		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+
+	/* IRQ could occur when in initial stage */
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	spin_lock_irqsave(&adev->ddev->event_lock, flags);
+	works = amdgpu_crtc->pflip_works;
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+			"AMDGPU_FLIP_SUBMITTED(%d)\n",
+			amdgpu_crtc->pflip_status,
+			AMDGPU_FLIP_SUBMITTED);
+		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+		return 0;
+	}
+
+	/* page flip completed. clean up */
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+	amdgpu_crtc->pflip_works = NULL;
+
+	/* wakeup usersapce */
+	if (works->event)
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&amdgpu_crtc->base);
+	schedule_work(&works->unpin_work);
+
+	return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+	.set = dce_virtual_set_crtc_irq_state,
+	.process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+	.set = dce_virtual_set_pageflip_irq_state,
+	.process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+	adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
similarity index 75%
rename from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
rename to drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index 5983e31..e239243 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -21,21 +21,11 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
 #endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75..0000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_smum.h"
-
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int fiji_dpm_early_init(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	fiji_dpm_set_funcs(adev);
-
-	return 0;
-}
-
-static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
-{
-	char fw_name[30] = "amdgpu/fiji_smc.bin";
-	int err;
-
-	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-	if (err)
-		goto out;
-	err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-	if (err) {
-		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-		release_firmware(adev->pm.fw);
-		adev->pm.fw = NULL;
-	}
-	return err;
-}
-
-static int fiji_dpm_sw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	ret = fiji_dpm_init_microcode(adev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int fiji_dpm_sw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	release_firmware(adev->pm.fw);
-	adev->pm.fw = NULL;
-
-	return 0;
-}
-
-static int fiji_dpm_hw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-
-	ret = fiji_smu_init(adev);
-	if (ret) {
-		DRM_ERROR("SMU initialization failed\n");
-		goto fail;
-	}
-
-	ret = fiji_smu_start(adev);
-	if (ret) {
-		DRM_ERROR("SMU start failed\n");
-		goto fail;
-	}
-
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-
-fail:
-	adev->firmware.smu_load = false;
-	mutex_unlock(&adev->pm.mutex);
-	return -EINVAL;
-}
-
-static int fiji_dpm_hw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	mutex_lock(&adev->pm.mutex);
-	fiji_smu_fini(adev);
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-}
-
-static int fiji_dpm_suspend(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	fiji_dpm_hw_fini(adev);
-
-	return 0;
-}
-
-static int fiji_dpm_resume(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	fiji_dpm_hw_init(adev);
-
-	return 0;
-}
-
-static int fiji_dpm_set_clockgating_state(void *handle,
-			enum amd_clockgating_state state)
-{
-	return 0;
-}
-
-static int fiji_dpm_set_powergating_state(void *handle,
-			enum amd_powergating_state state)
-{
-	return 0;
-}
-
-const struct amd_ip_funcs fiji_dpm_ip_funcs = {
-	.name = "fiji_dpm",
-	.early_init = fiji_dpm_early_init,
-	.late_init = NULL,
-	.sw_init = fiji_dpm_sw_init,
-	.sw_fini = fiji_dpm_sw_fini,
-	.hw_init = fiji_dpm_hw_init,
-	.hw_fini = fiji_dpm_hw_fini,
-	.suspend = fiji_dpm_suspend,
-	.resume = fiji_dpm_resume,
-	.is_idle = NULL,
-	.wait_for_idle = NULL,
-	.soft_reset = NULL,
-	.set_clockgating_state = fiji_dpm_set_clockgating_state,
-	.set_powergating_state = fiji_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
-	.get_temperature = NULL,
-	.pre_set_power_state = NULL,
-	.set_power_state = NULL,
-	.post_set_power_state = NULL,
-	.display_configuration_changed = NULL,
-	.get_sclk = NULL,
-	.get_mclk = NULL,
-	.print_power_state = NULL,
-	.debugfs_print_current_performance_level = NULL,
-	.force_performance_level = NULL,
-	.vblank_too_short = NULL,
-	.powergate_uvd = NULL,
-};
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
-{
-	if (NULL == adev->pm.funcs)
-		adev->pm.funcs = &fiji_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba..0000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_ppsmc.h"
-#include "fiji_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-#define FIJI_SMC_SIZE 0x20000
-
-static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
-	uint32_t val;
-
-	if (smc_address & 3)
-		return -EINVAL;
-
-	if ((smc_address + 3) > limit)
-		return -EINVAL;
-
-	WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	return 0;
-}
-
-static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-	uint32_t addr;
-	uint32_t data, orig_data;
-	int result = 0;
-	uint32_t extra_shift;
-	unsigned long flags;
-
-	if (smc_start_address & 3)
-		return -EINVAL;
-
-	if ((smc_start_address + byte_count) > limit)
-		return -EINVAL;
-
-	addr = smc_start_address;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	while (byte_count >= 4) {
-		/* Bytes are written into the SMC addres space with the MSB first */
-		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-		result = fiji_set_smc_sram_address(adev, addr, limit);
-
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-
-		src += 4;
-		byte_count -= 4;
-		addr += 4;
-	}
-
-	if (0 != byte_count) {
-		/* Now write odd bytes left, do a read modify write cycle */
-		data = 0;
-
-		result = fiji_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		orig_data = RREG32(mmSMC_IND_DATA_0);
-		extra_shift = 8 * (4 - byte_count);
-
-		while (byte_count > 0) {
-			data = (data << 8) + *src++;
-			byte_count--;
-		}
-
-		data <<= extra_shift;
-		data |= (orig_data & ~((~0UL) << extra_shift));
-
-		result = fiji_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-	}
-
-out:
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int fiji_program_jump_on_start(struct amdgpu_device *adev)
-{
-	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-	return 0;
-}
-
-static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32(mmSMC_RESP_0);
-		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, 0x20000);
-	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send message\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-	if (!fiji_is_smc_ram_running(adev))
-	{
-		return -EINVAL;
-	}
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send message\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-						PPSMC_Msg msg)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-						PPSMC_Msg msg,
-						uint32_t parameter)
-{
-	if (!fiji_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return fiji_send_msg_to_smc(adev, msg);
-}
-
-static int fiji_send_msg_to_smc_with_parameter_without_waiting(
-					struct amdgpu_device *adev,
-					PPSMC_Msg msg, uint32_t parameter)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return fiji_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	if (!fiji_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-#endif
-
-static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-	const struct smc_firmware_header_v1_0 *hdr;
-	uint32_t ucode_size;
-	uint32_t ucode_start_address;
-	const uint8_t *src;
-	uint32_t val;
-	uint32_t byte_count;
-	uint32_t *data;
-	unsigned long flags;
-
-	if (!adev->pm.fw)
-		return -EINVAL;
-
-	/* Skip SMC ucode loading on SR-IOV capable boards.
-	 * vbios does this for us in asic_init in that case.
-	 */
-	if (adev->virtualization.supports_sr_iov)
-		return 0;
-
-	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-	amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-	src = (const uint8_t *)
-		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-	if (ucode_size & 3) {
-		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-		return -EINVAL;
-	}
-
-	if (ucode_size > FIJI_SMC_SIZE) {
-		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	byte_count = ucode_size;
-	data = (uint32_t *)src;
-	for (; byte_count >= 4; data++, byte_count -= 4)
-		WREG32(mmSMC_IND_DATA_0, data[0]);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-	return 0;
-}
-
-#if 0 /* not used yet */
-static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
-				uint32_t smc_address,
-				uint32_t *value,
-				uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = fiji_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		*value = RREG32(mmSMC_IND_DATA_0);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
-				uint32_t smc_address,
-				uint32_t value,
-				uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = fiji_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		WREG32(mmSMC_IND_DATA_0, value);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int fiji_smu_stop_smc(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case UCODE_ID_SDMA0:
-			return AMDGPU_UCODE_ID_SDMA0;
-		case UCODE_ID_SDMA1:
-			return AMDGPU_UCODE_ID_SDMA1;
-		case UCODE_ID_CP_CE:
-			return AMDGPU_UCODE_ID_CP_CE;
-		case UCODE_ID_CP_PFP:
-			return AMDGPU_UCODE_ID_CP_PFP;
-		case UCODE_ID_CP_ME:
-			return AMDGPU_UCODE_ID_CP_ME;
-		case UCODE_ID_CP_MEC:
-		case UCODE_ID_CP_MEC_JT1:
-		case UCODE_ID_CP_MEC_JT2:
-			return AMDGPU_UCODE_ID_CP_MEC1;
-		case UCODE_ID_RLC_G:
-			return AMDGPU_UCODE_ID_RLC_G;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return AMDGPU_UCODE_ID_MAXIMUM;
-	}
-}
-
-static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-						uint32_t fw_type,
-						struct SMU_Entry *entry)
-{
-	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
-	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-	const struct gfx_firmware_header_v1_0 *header = NULL;
-	uint64_t gpu_addr;
-	uint32_t data_size;
-
-	if (ucode->fw == NULL)
-		return -EINVAL;
-	gpu_addr  = ucode->mc_addr;
-	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-	data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
-		(fw_type == UCODE_ID_CP_MEC_JT2)) {
-		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
-		data_size = le32_to_cpu(header->jt_size) << 2;
-	}
-
-	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-	entry->id = (uint16_t)fw_type;
-	entry->image_addr_high = upper_32_bits(gpu_addr);
-	entry->image_addr_low = lower_32_bits(gpu_addr);
-	entry->meta_data_addr_high = 0;
-	entry->meta_data_addr_low = 0;
-	entry->data_size_byte = data_size;
-	entry->num_register_entries = 0;
-
-	if (fw_type == UCODE_ID_RLC_G)
-		entry->flags = 1;
-	else
-		entry->flags = 0;
-
-	return 0;
-}
-
-static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
-{
-	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
-	struct SMU_DRAMData_TOC *toc;
-	uint32_t fw_to_load;
-
-	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
-	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
-	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
-	toc = (struct SMU_DRAMData_TOC *)private->header;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	if (!adev->firmware.smu_load)
-		return 0;
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for RLC\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for CE\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for PFP\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for ME\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-		return -EINVAL;
-	}
-
-	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-		return -EINVAL;
-	}
-
-	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK |
-			UCODE_ID_SDMA0_MASK |
-			UCODE_ID_SDMA1_MASK |
-			UCODE_ID_CP_CE_MASK |
-			UCODE_ID_CP_ME_MASK |
-			UCODE_ID_CP_PFP_MASK |
-			UCODE_ID_CP_MEC_MASK;
-
-	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-		DRM_ERROR("Fail to request SMU load ucode\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case AMDGPU_UCODE_ID_SDMA0:
-			return UCODE_ID_SDMA0_MASK;
-		case AMDGPU_UCODE_ID_SDMA1:
-			return UCODE_ID_SDMA1_MASK;
-		case AMDGPU_UCODE_ID_CP_CE:
-			return UCODE_ID_CP_CE_MASK;
-		case AMDGPU_UCODE_ID_CP_PFP:
-			return UCODE_ID_CP_PFP_MASK;
-		case AMDGPU_UCODE_ID_CP_ME:
-			return UCODE_ID_CP_ME_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC1:
-			return UCODE_ID_CP_MEC_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC2:
-			return UCODE_ID_CP_MEC_MASK;
-		case AMDGPU_UCODE_ID_RLC_G:
-			return UCODE_ID_RLC_G_MASK;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return 0;
-	}
-}
-
-static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
-					uint32_t fw_type)
-{
-	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
-	int i;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("check firmware loading failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
-	int result;
-	uint32_t val;
-	int i;
-
-	/* Assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	result = fiji_smu_upload_firmware_image(adev);
-	if (result)
-		return result;
-
-	/* Clear status */
-	WREG32_SMC(ixSMU_STATUS, 0);
-
-	/* Enable clock */
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	/* De-assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	/* Set SMU Auto Start */
-	val = RREG32_SMC(ixSMU_INPUT_DATA);
-	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
-	WREG32_SMC(ixSMU_INPUT_DATA, val);
-
-	/* Clear firmware interrupt enable flag */
-	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixRCU_UC_EVENTS);
-		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Interrupt is not enabled by firmware\n");
-		return -EINVAL;
-	}
-
-	/* Call Test SMU message with 0x20000 offset
-	 * to trigger SMU start
-	 */
-	fiji_send_msg_to_smc_offset(adev);
-	DRM_INFO("[FM]try triger smu start\n");
-	/* Wait for done bit to be set */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixSMU_STATUS);
-		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Timeout for SMU start\n");
-		return -EINVAL;
-	}
-
-	/* Check pass/failed indicator */
-	val = RREG32_SMC(ixSMU_STATUS);
-	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
-		DRM_ERROR("SMU Firmware start failed\n");
-		return -EINVAL;
-	}
-	DRM_INFO("[FM]smu started\n");
-	/* Wait for firmware to initialize */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixFIRMWARE_FLAGS);
-		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("SMU firmware initialization failed\n");
-		return -EINVAL;
-	}
-	DRM_INFO("[FM]smu initialized\n");
-
-	return 0;
-}
-
-static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
-	int i, result;
-	uint32_t val;
-
-	/* wait for smc boot up */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixRCU_UC_EVENTS);
-		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
-		if (val)
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("SMC boot sequence is not completed\n");
-		return -EINVAL;
-	}
-
-	/* Clear firmware interrupt enable flag */
-	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-	/* Assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	result = fiji_smu_upload_firmware_image(adev);
-	if (result)
-		return result;
-
-	/* Set smc instruct start point at 0x0 */
-	fiji_program_jump_on_start(adev);
-
-	/* Enable clock */
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	/* De-assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	/* Wait for firmware to initialize */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixFIRMWARE_FLAGS);
-		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Timeout for SMC firmware initialization\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int fiji_smu_start(struct amdgpu_device *adev)
-{
-	int result;
-	uint32_t val;
-
-	if (!fiji_is_smc_ram_running(adev)) {
-		val = RREG32_SMC(ixSMU_FIRMWARE);
-		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
-			DRM_INFO("[FM]start smu in nonprotection mode\n");
-			result = fiji_smu_start_in_non_protection_mode(adev);
-			if (result)
-				return result;
-		} else {
-			DRM_INFO("[FM]start smu in protection mode\n");
-			result = fiji_smu_start_in_protection_mode(adev);
-			if (result)
-				return result;
-		}
-	}
-
-	return fiji_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
-	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
-	.request_smu_load_fw = NULL,
-	.request_smu_specific_fw = NULL,
-};
-
-int fiji_smu_init(struct amdgpu_device *adev)
-{
-	struct fiji_smu_private_data *private;
-	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	uint32_t smu_internal_buffer_size = 200*4096;
-	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
-	uint64_t mc_addr;
-	void *toc_buf_ptr;
-	void *smu_buf_ptr;
-	int ret;
-
-	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
-	if (NULL == private)
-		return -ENOMEM;
-
-	/* allocate firmware buffers */
-	if (adev->firmware.smu_load)
-		amdgpu_ucode_init_bo(adev);
-
-	adev->smu.priv = private;
-	adev->smu.fw_flags = 0;
-
-	/* Allocate FW image data structure and header buffer */
-	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM,
-			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, NULL, toc_buf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-		return -ENOMEM;
-	}
-
-	/* Allocate buffer for SMU internal buffer */
-	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM,
-			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, NULL, smu_buf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
-		return -ENOMEM;
-	}
-
-	/* Retrieve GPU address for header buffer and internal buffer */
-	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-	if (ret) {
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to reserve the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to pin the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to map the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	amdgpu_bo_unreserve(adev->smu.toc_buf);
-	private->header_addr_low = lower_32_bits(mc_addr);
-	private->header_addr_high = upper_32_bits(mc_addr);
-	private->header = toc_buf_ptr;
-
-	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
-	if (ret) {
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to pin the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to map the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	amdgpu_bo_unreserve(adev->smu.smu_buf);
-	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
-	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
-	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
-
-	return 0;
-}
-
-int fiji_smu_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_unref(&adev->smu.toc_buf);
-	amdgpu_bo_unref(&adev->smu.smu_buf);
-	kfree(adev->smu.priv);
-	adev->smu.priv = NULL;
-	if (adev->firmware.fw_buf)
-		amdgpu_ucode_fini_bo(adev);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644
index 0000000..40abb6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -0,0 +1,3362 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS     1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE                    (1 << 3)
+#define DYN_PER_CU_PG_ENABLE                       (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+	(0x8000 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x98f4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe80 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89bc >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c1c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x98f0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xe7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9148 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9150 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x897c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8d8c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac54 >> 2),
+	0X00000000,
+	0x3,
+	(0x9c00 << 16) | (0x98f8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9910 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9914 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9918 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x991c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9920 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9924 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9928 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x992c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9930 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9934 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9938 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x993c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9940 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9944 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9948 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x994c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9950 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9954 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9958 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x995c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9960 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9964 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9968 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x996c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9970 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9974 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9978 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x997c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9980 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9984 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9988 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x998c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c04 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c08 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9b7c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0xe84 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x89c0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x914c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x8c20 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9354 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9060 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9364 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9100 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x913c >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e0 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e4 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x90e8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8bcc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b24 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e50 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8c0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e58 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8e5c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9508 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x950c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9494 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac0c >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xae00 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0xac08 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88d4 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88c8 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x88cc >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x89b0 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8b10 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x8a14 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9830 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9834 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9838 >> 2),
+	0x00000000,
+	(0x9c00 << 16) | (0x9a10 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8000 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8001 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8040 << 16) | (0x9874 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9870 >> 2),
+	0x00000000,
+	(0x8041 << 16) | (0x9874 >> 2),
+	0x00000000,
+	0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+	const struct gfx_firmware_header_v1_0 *cp_hdr;
+	const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.me_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+	if (err)
+		goto out;
+	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+	rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+	if (err) {
+		printk(KERN_ERR
+		       "gfx6: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		release_firmware(adev->gfx.pfp_fw);
+		adev->gfx.pfp_fw = NULL;
+		release_firmware(adev->gfx.me_fw);
+		adev->gfx.me_fw = NULL;
+		release_firmware(adev->gfx.ce_fw);
+		adev->gfx.ce_fw = NULL;
+		release_firmware(adev->gfx.rlc_fw);
+		adev->gfx.rlc_fw = NULL;
+	}
+	return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+	const u32 num_tile_mode_states = 32;
+	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+	switch (adev->gfx.config.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	if (adev->asic_type == CHIP_VERDE ||
+		adev->asic_type == CHIP_OLAND ||
+		adev->asic_type == CHIP_HAINAN) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 1: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 2:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 3:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 4:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 7:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 8: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 11:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 13:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 15:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 16:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 17:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 21:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25: 
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+	} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+	} else{
+
+		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+	}
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+				  u32 sh_num, u32 instance)
+{
+	u32 data;
+
+	if (instance == 0xffffffff)
+		data = INSTANCE_BROADCAST_WRITES;
+	else
+		data = INSTANCE_INDEX(instance);
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+	return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+				    u32 max_rb_num_per_se,
+				    u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	data &= BACKEND_DISABLE_MASK;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+	return data & mask;
+}
+
+static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
+{
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		*rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
+			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+		break;
+	case CHIP_VERDE:
+		*rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+		break;
+	case CHIP_OLAND:
+		*rconf |= RB_YSEL;
+		break;
+	case CHIP_HAINAN:
+		*rconf |= 0x0;
+		break;
+	default:
+		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+		break;
+	}
+}
+
+static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+						    u32 raster_config, unsigned rb_mask,
+						    unsigned num_rb)
+{
+	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+	unsigned rb_per_se = num_rb / num_se;
+	unsigned se_mask[4];
+	unsigned se;
+
+	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+	for (se = 0; se < num_se; se++) {
+		unsigned raster_config_se = raster_config;
+		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+		int idx = (se / 2) * 2;
+
+		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+			raster_config_se &= ~SE_MAP_MASK;
+
+			if (!se_mask[idx]) {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+			} else {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+			}
+		}
+
+		pkr0_mask &= rb_mask;
+		pkr1_mask &= rb_mask;
+		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+			raster_config_se &= ~PKR_MAP_MASK;
+
+			if (!pkr0_mask) {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+			} else {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+			}
+		}
+
+		if (rb_per_se >= 2) {
+			unsigned rb0_mask = 1 << (se * rb_per_se);
+			unsigned rb1_mask = rb0_mask << 1;
+
+			rb0_mask &= rb_mask;
+			rb1_mask &= rb_mask;
+			if (!rb0_mask || !rb1_mask) {
+				raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+				if (!rb0_mask) {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+				} else {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+				}
+			}
+
+			if (rb_per_se > 2) {
+				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+				rb1_mask = rb0_mask << 1;
+				rb0_mask &= rb_mask;
+				rb1_mask &= rb_mask;
+				if (!rb0_mask || !rb1_mask) {
+					raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+					if (!rb0_mask) {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+					} else {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+					}
+				}
+			}
+		}
+
+		/* GRBM_GFX_INDEX has a different offset on SI */
+		gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+		WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+	}
+
+	/* GRBM_GFX_INDEX has a different offset on SI */
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+			      u32 se_num, u32 sh_per_se,
+			      u32 max_rb_num_per_se)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+	unsigned num_rb_pipes;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	adev->gfx.config.backend_enable_mask = enabled_rbs;
+	adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+			     adev->gfx.config.max_shader_engines, 16);
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		gfx_v6_0_raster_config(adev, &data);
+
+		if (!adev->gfx.config.backend_enable_mask ||
+				adev->gfx.config.num_rbs >= num_rb_pipes)
+			WREG32(PA_SC_RASTER_CONFIG, data);
+		else
+			gfx_v6_0_write_harvested_raster_configs(adev, data,
+								adev->gfx.config.backend_enable_mask,
+								num_rb_pipes);
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	data &= INACTIVE_CUS_MASK;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask;
+	u32 active_cu = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		adev->gfx.config.max_shader_engines = 2;
+		adev->gfx.config.max_tile_pipes = 12;
+		adev->gfx.config.max_cu_per_sh = 8;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 12;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PITCAIRN:
+		adev->gfx.config.max_shader_engines = 2;
+		adev->gfx.config.max_tile_pipes = 8;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+
+	case CHIP_VERDE:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 2;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_OLAND:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 6;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 2;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAINAN:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 5;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 1;
+		adev->gfx.config.max_texture_channel_caches = 2;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 1);
+	WREG32(SRBM_INT_ACK, 1);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+	adev->gfx.config.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (adev->gfx.config.mem_row_size_in_kb > 4)
+		adev->gfx.config.mem_row_size_in_kb = 4;
+	adev->gfx.config.shader_engine_tile_size = 32;
+	adev->gfx.config.num_gpus = 1;
+	adev->gfx.config.multi_gpu_tile_size = 64;
+
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (adev->gfx.config.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+	adev->gfx.config.gb_addr_config = gb_addr_config;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+	if (adev->has_uvd) {
+		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+	}
+#endif
+	gfx_v6_0_tiling_mode_table_init(adev);
+
+	gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+		    adev->gfx.config.max_sh_per_se,
+		    adev->gfx.config.max_backends_per_se);
+
+	gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+		     adev->gfx.config.max_sh_per_se,
+		     adev->gfx.config.max_cu_per_sh);
+
+	gfx_v6_0_get_cu_info(adev);
+
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(CP_PERFMON_CNTL, 0);
+	WREG32(SQ_CONFIG, 0);
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+	int i;
+
+	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+		adev->gfx.scratch.free[i] = true;
+		adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+	}
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = amdgpu_gfx_scratch_get(adev, &scratch);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+
+	r = amdgpu_ring_alloc(ring, 3);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		amdgpu_gfx_scratch_free(adev, scratch);
+		return r;
+	}
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	amdgpu_gfx_scratch_free(adev, scratch);
+	return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	/* flush hdp cache */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, HDP_DEBUG0);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+				     u64 seq, unsigned flags)
+{
+	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+	/* flush read cache over gart */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+			  PACKET3_TC_ACTION_ENA |
+			  PACKET3_SH_KCACHE_ACTION_ENA |
+			  PACKET3_SH_ICACHE_ACTION_ENA);
+	amdgpu_ring_write(ring, 0xFFFFFFFF);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+	amdgpu_ring_write(ring, lower_32_bits(seq));
+	amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
+{
+	u32 header, control = 0;
+
+	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
+	if (ctx_switch) {
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+
+	if (ib->flags & AMDGPU_IB_FLAG_CE)
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	else
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+	control |= ib->length_dw | (vm_id << 24);
+
+	amdgpu_ring_write(ring, header);
+	amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_ib ib;
+	struct fence *f = NULL;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	long r;
+
+	r = amdgpu_gfx_scratch_get(adev, &scratch);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+		goto err1;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait_timeout(f, false, timeout);
+	if (r == 0) {
+		DRM_ERROR("amdgpu: IB test timed out\n");
+		r = -ETIMEDOUT;
+		goto err2;
+	} else if (r < 0) {
+		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		goto err2;
+	}
+	tmp = RREG32(scratch);
+	if (tmp == 0xDEADBEEF) {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
+	} else {
+		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+
+err2:
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
+	return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+	int i;
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+			adev->gfx.gfx_ring[i].ready = false;
+		for (i = 0; i < adev->gfx.num_compute_rings; i++)
+			adev->gfx.compute_ring[i].ready = false;
+	}
+	udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+	unsigned i;
+	const struct gfx_firmware_header_v1_0 *pfp_hdr;
+	const struct gfx_firmware_header_v1_0 *ce_hdr;
+	const struct gfx_firmware_header_v1_0 *me_hdr;
+	const __le32 *fw_data;
+	u32 fw_size;
+
+	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+		return -EINVAL;
+
+	gfx_v6_0_cp_gfx_enable(adev, false);
+	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+	/* PFP */
+	fw_data = (const __le32 *)
+		(adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	/* CE */
+	fw_data = (const __le32 *)
+		(adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_CE_UCODE_ADDR, 0);
+
+	/* ME */
+	fw_data = (const __be32 *)
+		(adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < fw_size; i++)
+		WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+	int r, i;
+
+	r = amdgpu_ring_alloc(ring, 7 + 4);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	amdgpu_ring_write(ring, 0x1);
+	amdgpu_ring_write(ring, 0x0);
+	amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+	amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	amdgpu_ring_write(ring, 0xc000);
+	amdgpu_ring_write(ring, 0xe000);
+	amdgpu_ring_commit(ring);
+
+	gfx_v6_0_cp_gfx_enable(adev, true);
+
+	r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+	if (r) {
+		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				amdgpu_ring_write(ring,
+						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+				for (i = 0; i < ext->reg_count; i++)
+					amdgpu_ring_write(ring, ext->extent[i]);
+			}
+		}
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	amdgpu_ring_write(ring, 0);
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	amdgpu_ring_write(ring, 0x00000316);
+	amdgpu_ring_write(ring, 0x0000000e);
+	amdgpu_ring_write(ring, 0x00000010);
+
+	amdgpu_ring_commit(ring);
+
+	return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+	u64 rptr_addr;
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, 0);
+	WREG32(SCRATCH_ADDR, 0);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &adev->gfx.gfx_ring[0];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	WREG32(SCRATCH_UMSK, 0);
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+	/* start the rings */
+	gfx_v6_0_cp_gfx_start(adev);
+	ring->ready = true;
+	r = amdgpu_ring_test_ring(ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->gfx.gfx_ring[0])
+		return RREG32(CP_RB0_WPTR);
+	else if (ring == &adev->gfx.compute_ring[0])
+		return RREG32(CP_RB1_WPTR);
+	else if (ring == &adev->gfx.compute_ring[1])
+		return RREG32(CP_RB2_WPTR);
+	else
+		BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	WREG32(CP_RB0_WPTR, ring->wptr);
+	(void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring == &adev->gfx.compute_ring[0]) {
+		WREG32(CP_RB1_WPTR, ring->wptr);
+		(void)RREG32(CP_RB1_WPTR);
+	} else if (ring == &adev->gfx.compute_ring[1]) {
+		WREG32(CP_RB2_WPTR, ring->wptr);
+		(void)RREG32(CP_RB2_WPTR);
+	} else {
+		BUG();
+	}
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+	u64 rptr_addr;
+
+	/* ring1  - compute only */
+	/* Set ring buffer size */
+
+	ring = &adev->gfx.compute_ring[0];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB1_CNTL, tmp);
+
+	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB1_WPTR, ring->wptr);
+
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB1_CNTL, tmp);
+	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+	ring = &adev->gfx.compute_ring[1];
+	rb_bufsz = order_base_2(ring->ring_size / 8);
+	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB2_CNTL, tmp);
+
+	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB2_WPTR, ring->wptr);
+	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+	WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB2_CNTL, tmp);
+	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+	adev->gfx.compute_ring[0].ready = true;
+	adev->gfx.compute_ring[1].ready = true;
+
+	r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+	if (r) {
+		adev->gfx.compute_ring[0].ready = false;
+		return r;
+	}
+
+	r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+	if (r) {
+		adev->gfx.compute_ring[1].ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+	gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+	return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+					       bool enable)
+{	
+	u32 tmp = RREG32(CP_INT_CNTL_RING0);
+	u32 mask;
+	int i;
+
+	if (enable)
+		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	else
+		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING0, tmp);
+
+	if (!enable) {
+		/* read a gfx register */
+		tmp = RREG32(DB_DEPTH_INFO);
+
+		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+	int r;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+	r = gfx_v6_0_cp_load_microcode(adev);
+	if (r)
+		return r;
+
+	r = gfx_v6_0_cp_gfx_resume(adev);
+	if (r)
+		return r;
+	r = gfx_v6_0_cp_compute_resume(adev);
+	if (r)
+		return r;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
+				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, 0xffffffff);
+	amdgpu_ring_write(ring, 4); /* poll interval */
+
+	if (usepfp) {
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+					unsigned vm_id, uint64_t pd_addr)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+	/* write new base address */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	if (vm_id < 8) {
+		amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+	} else {
+		amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+	}
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+				 WRITE_DATA_DST_SEL(0)));
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 1 << vm_id);
+
+	/* wait for the invalidate to complete */
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+				 WAIT_REG_MEM_ENGINE(0))); /* me */
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0); /* ref */
+	amdgpu_ring_write(ring, 0); /* mask */
+	amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+	if (usepfp) {
+		/* sync PFP to ME, otherwise we might get invalid PFP reads */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+		amdgpu_ring_write(ring, 0x0);
+
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gfx.rlc.save_restore_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+		adev->gfx.rlc.save_restore_obj = NULL;
+	}
+
+	if (adev->gfx.rlc.clear_state_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+		adev->gfx.rlc.clear_state_obj = NULL;
+	}
+
+	if (adev->gfx.rlc.cp_table_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+		adev->gfx.rlc.cp_table_obj = NULL;
+	}
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+	const u32 *src_ptr;
+	volatile u32 *dst_ptr;
+	u32 dws, i;
+	u64 reg_list_mc_addr;
+	const struct cs_section_def *cs_data;
+	int r;
+
+	adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+	adev->gfx.rlc.reg_list_size =
+			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+	adev->gfx.rlc.cs_data = si_cs_data;
+	src_ptr = adev->gfx.rlc.reg_list;
+	dws = adev->gfx.rlc.reg_list_size;
+	cs_data = adev->gfx.rlc.cs_data;
+
+	if (src_ptr) {
+		/* save restore block */
+		if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, NULL,
+					     &adev->gfx.rlc.save_restore_obj);
+
+			if (r) {
+				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+				return r;
+			}
+		}
+
+		r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+		if (unlikely(r != 0)) {
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+				  &adev->gfx.rlc.save_restore_gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+			dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+
+		r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		/* write the sr buffer */
+		dst_ptr = adev->gfx.rlc.sr_ptr;
+		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+	}
+
+	if (cs_data) {
+		/* clear state block */
+		adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+		dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+		if (adev->gfx.rlc.clear_state_obj == NULL) {
+			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, NULL,
+					     &adev->gfx.rlc.clear_state_obj);
+
+			if (r) {
+				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+				gfx_v6_0_rlc_fini(adev);
+				return r;
+			}
+		}
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0)) {
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+				  &adev->gfx.rlc.clear_state_gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+
+		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+			gfx_v6_0_rlc_fini(adev);
+			return r;
+		}
+		/* set up the cs buffer */
+		dst_ptr = adev->gfx.rlc.cs_ptr;
+		reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+		dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+		dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+		dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+		gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+	}
+
+	return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_LB_CNTL);
+	if (enable)
+		tmp |= LOAD_BALANCE_ENABLE;
+	else
+		tmp &= ~LOAD_BALANCE_ENABLE;
+	WREG32(RLC_LB_CNTL, tmp);
+
+	if (!enable) {
+		gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		WREG32(SPI_LB_CU_MASK, 0x00ff);
+	}
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+			break;
+		udelay(1);
+	}
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+	u32 tmp;
+
+	tmp = RREG32(RLC_CNTL);
+	if (tmp != rlc)
+		WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_CNTL);
+
+	if (data & RLC_ENABLE) {
+		data &= ~RLC_ENABLE;
+		WREG32(RLC_CNTL, data);
+
+		gfx_v6_0_wait_for_rlc_serdes(adev);
+	}
+
+	return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+	WREG32(RLC_CNTL, 0);
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+	tmp |= SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+	tmp &= ~SOFT_RESET_RLC;
+	WREG32(GRBM_SOFT_RESET, tmp);
+	udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	/* Enable LBPW only for DDR3 */
+	tmp = RREG32(MC_SEQ_MISC0);
+	if ((tmp & 0xF0000000) == 0xB0000000)
+		return true;
+	return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+	u32 i;
+	const struct rlc_firmware_header_v1_0 *hdr;
+	const __le32 *fw_data;
+	u32 fw_size;
+
+
+	if (!adev->gfx.rlc_fw)
+		return -EINVAL;
+
+	gfx_v6_0_rlc_stop(adev);
+	gfx_v6_0_rlc_reset(adev);
+	gfx_v6_0_init_pg(adev);
+	gfx_v6_0_init_cg(adev);
+
+	WREG32(RLC_RL_BASE, 0);
+	WREG32(RLC_RL_SIZE, 0);
+	WREG32(RLC_LB_CNTL, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+	WREG32(RLC_LB_CNTR_INIT, 0);
+	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+	fw_data = (const __le32 *)
+		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+	for (i = 0; i < fw_size; i++) {
+		WREG32(RLC_UCODE_ADDR, i);
+		WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+	gfx_v6_0_rlc_start(adev);
+
+	return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+	u32 data, orig, tmp;
+
+	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+		gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+		gfx_v6_0_wait_for_rlc_serdes(adev);
+		gfx_v6_0_update_rlc(adev, tmp);
+
+		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+		data |= CGCG_EN | CGLS_EN;
+	} else {
+		gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+		RREG32(CB_CGTT_SCLK_CTRL);
+
+		data &= ~(CGCG_EN | CGLS_EN);
+	}
+
+	if (orig != data)
+		WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+	u32 data, orig, tmp = 0;
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data = 0x96940200;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+			orig = data = RREG32(CP_MEM_SLP_CNTL);
+			data |= CP_MEM_LS_EN;
+			if (orig != data)
+				WREG32(CP_MEM_SLP_CNTL, data);
+		}
+
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data &= 0xffffffc0;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+		gfx_v6_0_update_rlc(adev, tmp);
+	} else {
+		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+		data |= 0x00000003;
+		if (orig != data)
+			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+		data = RREG32(CP_MEM_SLP_CNTL);
+		if (data & CP_MEM_LS_EN) {
+			data &= ~CP_MEM_LS_EN;
+			WREG32(CP_MEM_SLP_CNTL, data);
+		}
+		orig = data = RREG32(CGTS_SM_CTRL_REG);
+		data |= LS_OVERRIDE | OVERRIDE;
+		if (orig != data)
+			WREG32(CGTS_SM_CTRL_REG, data);
+
+		tmp = gfx_v6_0_halt_rlc(adev);
+
+		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+		gfx_v6_0_update_rlc(adev, tmp);
+	}
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+			       bool enable)
+{
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	if (enable) {
+		gfx_v6_0_enable_mgcg(adev, true);
+		gfx_v6_0_enable_cgcg(adev, true);
+	} else {
+		gfx_v6_0_enable_cgcg(adev, false);
+		gfx_v6_0_enable_mgcg(adev, false);
+	}
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+						bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+						bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+		data &= ~0x8000;
+	else
+		data |= 0x8000;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+	const __le32 *fw_data;
+	volatile u32 *dst_ptr;
+	int me, i, max_me = 4;
+	u32 bo_offset = 0;
+	u32 table_offset, table_size;
+
+	if (adev->asic_type == CHIP_KAVERI)
+		max_me = 5;
+
+	if (adev->gfx.rlc.cp_table_ptr == NULL)
+		return;
+
+	dst_ptr = adev->gfx.rlc.cp_table_ptr;
+	for (me = 0; me < max_me; me++) {
+		if (me == 0) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.ce_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 1) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.pfp_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 2) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.me_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else if (me == 3) {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		} else {
+			const struct gfx_firmware_header_v1_0 *hdr =
+				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+			fw_data = (const __le32 *)
+				(adev->gfx.mec2_fw->data +
+				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+			table_offset = le32_to_cpu(hdr->jt_offset);
+			table_size = le32_to_cpu(hdr->jt_size);
+		}
+
+		for (i = 0; i < table_size; i ++) {
+			dst_ptr[bo_offset + i] =
+				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+		}
+
+		bo_offset += table_size;
+	}
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+				     bool enable)
+{
+
+	u32 tmp;
+
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+		WREG32(RLC_TTOP_D, tmp);
+
+		tmp = RREG32(RLC_PG_CNTL);
+		tmp |= GFX_PG_ENABLE;
+		WREG32(RLC_PG_CNTL, tmp);
+
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp |= AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+	} else {
+		tmp = RREG32(RLC_AUTO_PG_CTRL);
+		tmp &= ~AUTO_PG_EN;
+		WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+		tmp = RREG32(DB_RENDER_CONTROL);
+	}
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+					 u32 se, u32 sh)
+{
+
+	u32 mask = 0, tmp, tmp1;
+	int i;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+	gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	tmp &= 0xffff0000;
+
+	tmp |= tmp1;
+	tmp >>= 16;
+
+	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+
+	return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+	u32 i, j, k, active_cu_number = 0;
+
+	u32 mask, counter, cu_bitmap;
+	u32 tmp = 0;
+
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			mask = 1;
+			cu_bitmap = 0;
+			counter  = 0;
+			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+				if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+					if (counter < 2)
+						cu_bitmap |= mask;
+					counter++;
+				}
+				mask <<= 1;
+			}
+
+			active_cu_number += counter;
+			tmp |= (cu_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+	tmp = RREG32(RLC_MAX_PG_CU);
+	tmp &= ~MAX_PU_CU_MASK;
+	tmp |= MAX_PU_CU(active_cu_number);
+	WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+					    bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+		data |= STATIC_PER_CU_PG_ENABLE;
+	else
+		data &= ~STATIC_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+					     bool enable)
+{
+	u32 data, orig;
+
+	orig = data = RREG32(RLC_PG_CNTL);
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+		data |= DYN_PER_CU_PG_ENABLE;
+	else
+		data &= ~DYN_PER_CU_PG_ENABLE;
+	if (orig != data)
+		WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_PG_CNTL);
+	tmp |= GFX_PG_SRC;
+	WREG32(RLC_PG_CNTL, tmp);
+
+	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+	tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+	tmp &= ~GRBM_REG_SGIT_MASK;
+	tmp |= GRBM_REG_SGIT(0x700);
+	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+	WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+	gfx_v6_0_enable_gfx_cgpg(adev, enable);
+	gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+	gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+	u32 count = 0;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (adev->gfx.rlc.cs_data == NULL)
+		return 0;
+
+	/* begin clear state */
+	count += 2;
+	/* context control state */
+	count += 3;
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT)
+				count += 2 + ext->reg_count;
+			else
+				return 0;
+		}
+	}
+	/* pa_sc_raster_config */
+	count += 3;
+	/* end clear state */
+	count += 2;
+	/* clear state */
+	count += 2;
+
+	return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+				    volatile u32 *buffer)
+{
+	u32 count = 0, i;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (adev->gfx.rlc.cs_data == NULL)
+		return;
+	if (buffer == NULL)
+		return;
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+				for (i = 0; i < ext->reg_count; i++)
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
+			} else {
+				return;
+			}
+		}
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		buffer[count++] = cpu_to_le32(0x2a00126a);
+		break;
+	case CHIP_VERDE:
+		buffer[count++] = cpu_to_le32(0x0000124a);
+		break;
+	case CHIP_OLAND:
+		buffer[count++] = cpu_to_le32(0x00000082);
+		break;
+	case CHIP_HAINAN:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	default:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+		gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_init_gfx_cgpg(adev);
+			gfx_v6_0_enable_cp_pg(adev, true);
+			gfx_v6_0_enable_gds_pg(adev, true);
+		} else {
+			WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+		}
+		gfx_v6_0_init_ao_cu_mask(adev);
+		gfx_v6_0_update_gfx_pg(adev, true);
+	} else {
+
+		WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+	}
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_update_gfx_pg(adev, false);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_enable_cp_pg(adev, false);
+			gfx_v6_0_enable_gds_pg(adev, false);
+		}
+	}
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+	uint64_t clock;
+
+	mutex_lock(&adev->gfx.gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&adev->gfx.gpu_clock_mutex);
+	return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, 0x80000000);
+	amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+		3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v6_0_ring_emit_vm_flush */
+		14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+	.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+	.select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+	adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+	adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+	gfx_v6_0_set_ring_funcs(adev);
+	gfx_v6_0_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i, r;
+
+	r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+	if (r)
+		return r;
+
+	gfx_v6_0_scratch_init(adev);
+
+	r = gfx_v6_0_init_microcode(adev);
+	if (r) {
+		DRM_ERROR("Failed to load gfx firmware!\n");
+		return r;
+	}
+
+	r = gfx_v6_0_rlc_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+		ring = &adev->gfx.gfx_ring[i];
+		ring->ring_obj = NULL;
+		sprintf(ring->name, "gfx");
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     0x80000000, 0xf,
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+				     AMDGPU_RING_TYPE_GFX);
+		if (r)
+			return r;
+	}
+
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		unsigned irq_type;
+
+		if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+			DRM_ERROR("Too many (%d) compute rings!\n", i);
+			break;
+		}
+		ring = &adev->gfx.compute_ring[i];
+		ring->ring_obj = NULL;
+		ring->use_doorbell = false;
+		ring->doorbell_index = 0;
+		ring->me = 1;
+		ring->pipe = i;
+		ring->queue = i;
+		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     0x80000000, 0xf,
+				     &adev->gfx.eop_irq, irq_type,
+				     AMDGPU_RING_TYPE_COMPUTE);
+		if (r)
+			return r;
+	}
+
+	return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+	int i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+	for (i = 0; i < adev->gfx.num_compute_rings; i++)
+		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+	gfx_v6_0_rlc_fini(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gfx_v6_0_gpu_init(adev);
+
+	r = gfx_v6_0_rlc_resume(adev);
+	if (r)
+		return r;
+
+	r = gfx_v6_0_cp_resume(adev);
+	if (r)
+		return r;
+
+	adev->gfx.ce_ram_size = 0x8000;
+
+	return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gfx_v6_0_cp_enable(adev, false);
+	gfx_v6_0_rlc_stop(adev);
+	gfx_v6_0_fini_pg(adev);
+
+	return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+		return false;
+	else
+		return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (gfx_v6_0_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+						 enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+						     int ring,
+						     enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+	switch (state){
+	case AMDGPU_IRQ_STATE_DISABLE:
+		if (ring == 0) {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+			cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+			break;
+		} else {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+			cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+			break;
+
+		}
+	case AMDGPU_IRQ_STATE_ENABLE:
+		if (ring == 0) {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+			cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+			break;
+		} else {
+			cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+			cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+			WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+			break;
+
+		}
+
+	default:
+		BUG();
+		break;
+
+	}
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+					      struct amdgpu_irq_src *src,
+					      unsigned type,
+					      enum amdgpu_interrupt_state state)
+{
+	u32 cp_int_cntl;
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+		WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+					    struct amdgpu_irq_src *src,
+					    unsigned type,
+					    enum amdgpu_interrupt_state state)
+{
+	switch (type) {
+	case AMDGPU_CP_IRQ_GFX_EOP:
+		gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+		break;
+	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+		gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+		break;
+	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+		gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+			    struct amdgpu_irq_src *source,
+			    struct amdgpu_iv_entry *entry)
+{
+	switch (entry->ring_id) {
+	case 0:
+		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+		break;
+	case 1:
+	case 2:
+		amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+				 struct amdgpu_irq_src *source,
+				 struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal register access in command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+				  struct amdgpu_irq_src *source,
+				  struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal instruction in command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	bool gate = false;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_CG_STATE_GATE)
+		gate = true;
+
+	gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+	if (gate) {
+		gfx_v6_0_enable_mgcg(adev, true);
+		gfx_v6_0_enable_cgcg(adev, true);
+	} else {
+		gfx_v6_0_enable_cgcg(adev, false);
+		gfx_v6_0_enable_mgcg(adev, false);
+	}
+	gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+	return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	bool gate = false;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_PG_STATE_GATE)
+		gate = true;
+
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v6_0_update_gfx_pg(adev, gate);
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+			gfx_v6_0_enable_cp_pg(adev, gate);
+			gfx_v6_0_enable_gds_pg(adev, gate);
+		}
+	}
+
+	return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+	.name = "gfx_v6_0",
+	.early_init = gfx_v6_0_early_init,
+	.late_init = NULL,
+	.sw_init = gfx_v6_0_sw_init,
+	.sw_fini = gfx_v6_0_sw_fini,
+	.hw_init = gfx_v6_0_hw_init,
+	.hw_fini = gfx_v6_0_hw_fini,
+	.suspend = gfx_v6_0_suspend,
+	.resume = gfx_v6_0_resume,
+	.is_idle = gfx_v6_0_is_idle,
+	.wait_for_idle = gfx_v6_0_wait_for_idle,
+	.soft_reset = gfx_v6_0_soft_reset,
+	.set_clockgating_state = gfx_v6_0_set_clockgating_state,
+	.set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+	.get_rptr = gfx_v6_0_ring_get_rptr,
+	.get_wptr = gfx_v6_0_ring_get_wptr,
+	.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+	.parse_cs = NULL,
+	.emit_ib = gfx_v6_0_ring_emit_ib,
+	.emit_fence = gfx_v6_0_ring_emit_fence,
+	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+	.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+	.test_ring = gfx_v6_0_ring_test_ring,
+	.test_ib = gfx_v6_0_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+	.get_rptr = gfx_v6_0_ring_get_rptr,
+	.get_wptr = gfx_v6_0_ring_get_wptr,
+	.set_wptr = gfx_v6_0_ring_set_wptr_compute,
+	.parse_cs = NULL,
+	.emit_ib = gfx_v6_0_ring_emit_ib,
+	.emit_fence = gfx_v6_0_ring_emit_fence,
+	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+	.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+	.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+	.test_ring = gfx_v6_0_ring_test_ring,
+	.test_ib = gfx_v6_0_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+		adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+	for (i = 0; i < adev->gfx.num_compute_rings; i++)
+		adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+	.set = gfx_v6_0_set_eop_interrupt_state,
+	.process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+	.set = gfx_v6_0_set_priv_reg_fault_state,
+	.process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+	.set = gfx_v6_0_set_priv_inst_fault_state,
+	.process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+	adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+	adev->gfx.priv_reg_irq.num_types = 1;
+	adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+	adev->gfx.priv_inst_irq.num_types = 1;
+	adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+	int i, j, k, counter, active_cu_number = 0;
+	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+	memset(cu_info, 0, sizeof(*cu_info));
+
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			mask = 1;
+			ao_bitmap = 0;
+			counter = 0;
+			bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+			cu_info->bitmap[i][j] = bitmap;
+
+			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+				if (bitmap & mask) {
+					if (counter < 2)
+						ao_bitmap |= mask;
+					counter ++;
+				}
+				mask <<= 1;
+			}
+			active_cu_number += counter;
+			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+		}
+	}
+
+	cu_info->number = active_cu_number;
+	cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index 5983e31..b9657e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,9 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 425413f..71116da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@
 	return (~data) & mask;
 }
 
+static void
+gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+			  SE_XSEL(1) | SE_YSEL(1);
+		*rconf1 |= 0x0;
+		break;
+	case CHIP_HAWAII:
+		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
+			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
+			  SE_YSEL(3);
+		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+			   SE_PAIR_YSEL(2);
+		break;
+	case CHIP_KAVERI:
+		*rconf |= RB_MAP_PKR0(2);
+		*rconf1 |= 0x0;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		*rconf |= 0x0;
+		*rconf1 |= 0x0;
+		break;
+	default:
+		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+		break;
+	}
+}
+
+static void
+gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+					u32 raster_config, u32 raster_config_1,
+					unsigned rb_mask, unsigned num_rb)
+{
+	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+	unsigned rb_per_se = num_rb / num_se;
+	unsigned se_mask[4];
+	unsigned se;
+
+	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+			     (!se_mask[2] && !se_mask[3]))) {
+		raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+		if (!se_mask[0] && !se_mask[1]) {
+			raster_config_1 |=
+				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+		} else {
+			raster_config_1 |=
+				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+		}
+	}
+
+	for (se = 0; se < num_se; se++) {
+		unsigned raster_config_se = raster_config;
+		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+		int idx = (se / 2) * 2;
+
+		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+			raster_config_se &= ~SE_MAP_MASK;
+
+			if (!se_mask[idx]) {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+			} else {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+			}
+		}
+
+		pkr0_mask &= rb_mask;
+		pkr1_mask &= rb_mask;
+		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+			raster_config_se &= ~PKR_MAP_MASK;
+
+			if (!pkr0_mask) {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+			} else {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+			}
+		}
+
+		if (rb_per_se >= 2) {
+			unsigned rb0_mask = 1 << (se * rb_per_se);
+			unsigned rb1_mask = rb0_mask << 1;
+
+			rb0_mask &= rb_mask;
+			rb1_mask &= rb_mask;
+			if (!rb0_mask || !rb1_mask) {
+				raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+				if (!rb0_mask) {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+				} else {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+				}
+			}
+
+			if (rb_per_se > 2) {
+				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+				rb1_mask = rb0_mask << 1;
+				rb0_mask &= rb_mask;
+				rb1_mask &= rb_mask;
+				if (!rb0_mask || !rb1_mask) {
+					raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+					if (!rb0_mask) {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+					} else {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+					}
+				}
+			}
+		}
+
+		/* GRBM_GFX_INDEX has a different offset on CI+ */
+		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+	}
+
+	/* GRBM_GFX_INDEX has a different offset on CI+ */
+	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
 /**
  * gfx_v7_0_setup_rb - setup the RBs on the asic
  *
@@ -1658,9 +1799,11 @@
 {
 	int i, j;
 	u32 data;
+	u32 raster_config = 0, raster_config_1 = 0;
 	u32 active_rbs = 0;
 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
 					adev->gfx.config.max_sh_per_se;
+	unsigned num_rb_pipes;
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@
 		}
 	}
 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
 
 	adev->gfx.config.backend_enable_mask = active_rbs;
 	adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+			     adev->gfx.config.max_shader_engines, 16);
+
+	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
+
+	if (!adev->gfx.config.backend_enable_mask ||
+			adev->gfx.config.num_rbs >= num_rb_pipes) {
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+	} else {
+		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+							adev->gfx.config.backend_enable_mask,
+							num_rb_pipes);
+	}
+	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 /**
@@ -2096,6 +2254,25 @@
 	amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	uint32_t dw2 = 0;
+
+	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+		/* set load_global_config & load_global_uconfig */
+		dw2 |= 0x8001;
+		/* set load_cs_sh_regs */
+		dw2 |= 0x01000000;
+		/* set load_per_context_state & load_gfx_sh_regs */
+		dw2 |= 0x10002;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, dw2);
+	amdgpu_ring_write(ring, 0);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -2443,7 +2620,7 @@
 	return 0;
 }
 
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	return ring->adev->wb.wb[ring->rptr_offs];
 }
@@ -2463,11 +2640,6 @@
 	(void)RREG32(mmCP_RB0_WPTR);
 }
 
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-	return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
 	/* XXX check if swapping is necessary on BE */
@@ -4182,6 +4354,41 @@
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+		3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v7_0_ring_emit_vm_flush */
+		7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4471,24 +4678,21 @@
 	}
 
 	/* reserve GDS, GWS and OA resource for gfx */
-	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GDS, 0,
-			NULL, NULL, &adev->gds.gds_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+				    &adev->gds.gds_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-		PAGE_SIZE, true,
-		AMDGPU_GEM_DOMAIN_GWS, 0,
-		NULL, NULL, &adev->gds.gws_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+				    &adev->gds.gws_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_OA, 0,
-			NULL, NULL, &adev->gds.oa_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+				    &adev->gds.oa_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
@@ -4504,9 +4708,9 @@
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4937,7 +5141,7 @@
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
-	.get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
 	.parse_cs = NULL,
@@ -4952,10 +5156,13 @@
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
-	.get_rptr = gfx_v7_0_ring_get_rptr_compute,
+	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
 	.parse_cs = NULL,
@@ -4970,6 +5177,8 @@
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b818461..ee6a48a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -703,7 +703,10 @@
 						 polaris10_golden_common_all,
 						 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
 		WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
-		if (adev->pdev->revision == 0xc7) {
+		if (adev->pdev->revision == 0xc7 &&
+		    ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+		     (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+		     (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
 			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
 			amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
 		}
@@ -1233,10 +1236,9 @@
 	if (adev->gfx.rlc.clear_state_obj) {
 		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
 		if (unlikely(r != 0))
-			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
 		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
 		adev->gfx.rlc.clear_state_obj = NULL;
 	}
@@ -1248,7 +1250,6 @@
 			dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 		amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
 		adev->gfx.rlc.cp_table_obj = NULL;
 	}
@@ -1290,14 +1291,14 @@
 				  &adev->gfx.rlc.clear_state_gpu_addr);
 		if (r) {
 			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
 			gfx_v8_0_rlc_fini(adev);
 			return r;
 		}
 
 		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
 		if (r) {
-			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+			dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
 			gfx_v8_0_rlc_fini(adev);
 			return r;
 		}
@@ -1332,7 +1333,7 @@
 				  &adev->gfx.rlc.cp_table_gpu_addr);
 		if (r) {
 			amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-			dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+			dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
 			return r;
 		}
 		r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1345,7 +1346,6 @@
 
 		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
 		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
 	}
 
 	return 0;
@@ -1361,7 +1361,6 @@
 			dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
 		amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
 		amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
 		adev->gfx.mec.hpd_eop_obj = NULL;
 	}
@@ -2082,24 +2081,21 @@
 	}
 
 	/* reserve GDS, GWS and OA resource for gfx */
-	r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
-			NULL, &adev->gds.gds_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+				    &adev->gds.gds_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
-		PAGE_SIZE, true,
-		AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
-		NULL, &adev->gds.gws_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+				    &adev->gds.gws_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
-	r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
-			PAGE_SIZE, true,
-			AMDGPU_GEM_DOMAIN_OA, 0, NULL,
-			NULL, &adev->gds.oa_gfx_bo);
+	r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+				    &adev->gds.oa_gfx_bo, NULL, NULL);
 	if (r)
 		return r;
 
@@ -2117,9 +2113,9 @@
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-	amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2127,9 +2123,7 @@
 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
 
 	gfx_v8_0_mec_fini(adev);
-
 	gfx_v8_0_rlc_fini(adev);
-
 	gfx_v8_0_free_microcode(adev);
 
 	return 0;
@@ -3465,19 +3459,16 @@
 	else
 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
-	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+	if (se_num == 0xffffffff)
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (se_num == 0xffffffff) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (sh_num == 0xffffffff) {
+	else
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+	if (sh_num == 0xffffffff)
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	} else {
+	else
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	}
+
 	WREG32(mmGRBM_GFX_INDEX, data);
 }
 
@@ -3490,11 +3481,10 @@
 {
 	u32 data, mask;
 
-	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+	data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
+		RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+	data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
 
 	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
 				       adev->gfx.config.max_sh_per_se);
@@ -3502,13 +3492,163 @@
 	return (~data) & mask;
 }
 
+static void
+gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+			  RB_XSEL2(1) | PKR_MAP(2) |
+			  PKR_XSEL(1) | PKR_YSEL(1) |
+			  SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
+		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+			   SE_PAIR_YSEL(2);
+		break;
+	case CHIP_TONGA:
+	case CHIP_POLARIS10:
+		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+			  SE_XSEL(1) | SE_YSEL(1);
+		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
+			   SE_PAIR_YSEL(2);
+		break;
+	case CHIP_TOPAZ:
+	case CHIP_CARRIZO:
+		*rconf |= RB_MAP_PKR0(2);
+		*rconf1 |= 0x0;
+		break;
+	case CHIP_POLARIS11:
+		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+			  SE_XSEL(1) | SE_YSEL(1);
+		*rconf1 |= 0x0;
+		break;
+	case CHIP_STONEY:
+		*rconf |= 0x0;
+		*rconf1 |= 0x0;
+		break;
+	default:
+		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+		break;
+	}
+}
+
+static void
+gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+					u32 raster_config, u32 raster_config_1,
+					unsigned rb_mask, unsigned num_rb)
+{
+	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+	unsigned rb_per_se = num_rb / num_se;
+	unsigned se_mask[4];
+	unsigned se;
+
+	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+			     (!se_mask[2] && !se_mask[3]))) {
+		raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+		if (!se_mask[0] && !se_mask[1]) {
+			raster_config_1 |=
+				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+		} else {
+			raster_config_1 |=
+				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+		}
+	}
+
+	for (se = 0; se < num_se; se++) {
+		unsigned raster_config_se = raster_config;
+		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+		int idx = (se / 2) * 2;
+
+		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+			raster_config_se &= ~SE_MAP_MASK;
+
+			if (!se_mask[idx]) {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+			} else {
+				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+			}
+		}
+
+		pkr0_mask &= rb_mask;
+		pkr1_mask &= rb_mask;
+		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+			raster_config_se &= ~PKR_MAP_MASK;
+
+			if (!pkr0_mask) {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+			} else {
+				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+			}
+		}
+
+		if (rb_per_se >= 2) {
+			unsigned rb0_mask = 1 << (se * rb_per_se);
+			unsigned rb1_mask = rb0_mask << 1;
+
+			rb0_mask &= rb_mask;
+			rb1_mask &= rb_mask;
+			if (!rb0_mask || !rb1_mask) {
+				raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+				if (!rb0_mask) {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+				} else {
+					raster_config_se |=
+						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+				}
+			}
+
+			if (rb_per_se > 2) {
+				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+				rb1_mask = rb0_mask << 1;
+				rb0_mask &= rb_mask;
+				rb1_mask &= rb_mask;
+				if (!rb0_mask || !rb1_mask) {
+					raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+					if (!rb0_mask) {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+					} else {
+						raster_config_se |=
+							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+					}
+				}
+			}
+		}
+
+		/* GRBM_GFX_INDEX has a different offset on VI */
+		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+	}
+
+	/* GRBM_GFX_INDEX has a different offset on VI */
+	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
 {
 	int i, j;
 	u32 data;
+	u32 raster_config = 0, raster_config_1 = 0;
 	u32 active_rbs = 0;
 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
 					adev->gfx.config.max_sh_per_se;
+	unsigned num_rb_pipes;
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3520,10 +3660,26 @@
 		}
 	}
 	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
 
 	adev->gfx.config.backend_enable_mask = active_rbs;
 	adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+			     adev->gfx.config.max_shader_engines, 16);
+
+	gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
+
+	if (!adev->gfx.config.backend_enable_mask ||
+			adev->gfx.config.num_rbs >= num_rb_pipes) {
+		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+	} else {
+		gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+							adev->gfx.config.backend_enable_mask,
+							num_rb_pipes);
+	}
+
+	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 /**
@@ -3576,16 +3732,12 @@
 	u32 tmp;
 	int i;
 
-	tmp = RREG32(mmGRBM_CNTL);
-	tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
-	WREG32(mmGRBM_CNTL, tmp);
-
+	WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
 	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
 
 	gfx_v8_0_tiling_mode_table_init(adev);
-
 	gfx_v8_0_setup_rb(adev);
 	gfx_v8_0_get_cu_info(adev);
 
@@ -3769,9 +3921,7 @@
 				sizeof(indirect_start_offsets)/sizeof(int));
 
 	/* save and restore list */
-	temp = RREG32(mmRLC_SRM_CNTL);
-	temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
-	WREG32(mmRLC_SRM_CNTL, temp);
+	WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
 
 	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3808,11 +3958,7 @@
 
 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
 {
-	uint32_t data;
-
-	data = RREG32(mmRLC_SRM_CNTL);
-	data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
-	WREG32(mmRLC_SRM_CNTL, data);
+	WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
 }
 
 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
@@ -3822,75 +3968,34 @@
 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
 			      AMD_PG_SUPPORT_GFX_SMG |
 			      AMD_PG_SUPPORT_GFX_DMG)) {
-		data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
-		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
-		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
-		WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
+		WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
-		data = 0;
-		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
-		data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
+		data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+		data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
 		WREG32(mmRLC_PG_DELAY, data);
 
-		data = RREG32(mmRLC_PG_DELAY_2);
-		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
-		data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
-		WREG32(mmRLC_PG_DELAY_2, data);
-
-		data = RREG32(mmRLC_AUTO_PG_CTRL);
-		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
-		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
-		WREG32(mmRLC_AUTO_PG_CTRL, data);
+		WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+		WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
 	}
 }
 
 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
 						bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
 						  bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-	else
-		data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
@@ -3927,36 +4032,26 @@
 	}
 }
 
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmRLC_CNTL);
-
-	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
-	WREG32(mmRLC_CNTL, tmp);
+	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
 
 	gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
 	gfx_v8_0_wait_for_rlc_serdes(adev);
 }
 
 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
-	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
-	WREG32(mmGRBM_SOFT_RESET, tmp);
+	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 	udelay(50);
-	tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
-	WREG32(mmGRBM_SOFT_RESET, tmp);
+
+	WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
 	udelay(50);
 }
 
 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(mmRLC_CNTL);
-
-	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
-	WREG32(mmRLC_CNTL, tmp);
+	WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
 
 	/* carrizo do enable cp interrupt after cp inited */
 	if (!(adev->flags & AMD_IS_APU))
@@ -3992,20 +4087,26 @@
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
 	int r;
+	u32 tmp;
 
 	gfx_v8_0_rlc_stop(adev);
 
 	/* disable CG */
-	WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+	tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+		 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
 	if (adev->asic_type == CHIP_POLARIS11 ||
-		adev->asic_type == CHIP_POLARIS10)
-		WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+	    adev->asic_type == CHIP_POLARIS10) {
+		tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+		tmp &= ~0x3;
+		WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+	}
 
 	/* disable PG */
 	WREG32(mmRLC_PG_CNTL, 0);
 
 	gfx_v8_0_rlc_reset(adev);
-
 	gfx_v8_0_init_pg(adev);
 
 	if (!adev->pp_enabled) {
@@ -4300,12 +4401,10 @@
 	gfx_v8_0_cp_gfx_start(adev);
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
-	if (r) {
+	if (r)
 		ring->ready = false;
-		return r;
-	}
 
-	return 0;
+	return r;
 }
 
 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4980,7 +5079,6 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	gfx_v8_0_init_golden_registers(adev);
-
 	gfx_v8_0_gpu_init(adev);
 
 	r = gfx_v8_0_rlc_resume(adev);
@@ -4988,8 +5086,6 @@
 		return r;
 
 	r = gfx_v8_0_cp_resume(adev);
-	if (r)
-		return r;
 
 	return r;
 }
@@ -5037,25 +5133,22 @@
 static int gfx_v8_0_wait_for_idle(void *handle)
 {
 	unsigned i;
-	u32 tmp;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		/* read MC_STATUS */
-		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
-		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+		if (gfx_v8_0_is_idle(handle))
 			return 0;
+
 		udelay(1);
 	}
 	return -ETIMEDOUT;
 }
 
-static int gfx_v8_0_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
 	u32 tmp;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* GRBM_STATUS */
 	tmp = RREG32(mmGRBM_STATUS);
@@ -5064,16 +5157,12 @@
 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
-		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+		   GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
-	}
-
-	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
-		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
-						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 	}
@@ -5084,73 +5173,200 @@
 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 
+	if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+	    REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPF, 1);
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPC, 1);
+		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+						SOFT_RESET_CPG, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+						SOFT_RESET_GRBM, 1);
+	}
+
 	/* SRBM_STATUS */
 	tmp = RREG32(mmSRBM_STATUS);
 	if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+	if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+						SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
 
 	if (grbm_soft_reset || srbm_soft_reset) {
-		/* stop the rlc */
-		gfx_v8_0_rlc_stop(adev);
+		adev->gfx.grbm_soft_reset = grbm_soft_reset;
+		adev->gfx.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->gfx.grbm_soft_reset = 0;
+		adev->gfx.srbm_soft_reset = 0;
+		return false;
+	}
+}
 
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+				  struct amdgpu_ring *ring)
+{
+	int i;
+
+	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+	if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+		u32 tmp;
+		tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+		tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+				    DEQUEUE_REQ, 2);
+		WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+				break;
+			udelay(1);
+		}
+	}
+}
+
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+	if ((!adev->gfx.grbm_soft_reset) &&
+	    (!adev->gfx.srbm_soft_reset))
+		return 0;
+
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+	/* stop the rlc */
+	gfx_v8_0_rlc_stop(adev);
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
 		/* Disable GFX parsing/prefetching */
 		gfx_v8_0_cp_gfx_enable(adev, false);
 
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+		int i;
+
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+			gfx_v8_0_inactive_hqd(adev, ring);
+		}
 		/* Disable MEC parsing/prefetching */
 		gfx_v8_0_cp_compute_enable(adev, false);
+	}
 
-		if (grbm_soft_reset || srbm_soft_reset) {
-			tmp = RREG32(mmGMCON_DEBUG);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_STALL, 1);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_CLEAR, 1);
-			WREG32(mmGMCON_DEBUG, tmp);
+       return 0;
+}
 
-			udelay(50);
-		}
+static int gfx_v8_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
 
-		if (grbm_soft_reset) {
-			tmp = RREG32(mmGRBM_SOFT_RESET);
-			tmp |= grbm_soft_reset;
-			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
-			WREG32(mmGRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmGRBM_SOFT_RESET);
+	if ((!adev->gfx.grbm_soft_reset) &&
+	    (!adev->gfx.srbm_soft_reset))
+		return 0;
 
-			udelay(50);
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
-			tmp &= ~grbm_soft_reset;
-			WREG32(mmGRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmGRBM_SOFT_RESET);
-		}
-
-		if (srbm_soft_reset) {
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-			tmp |= srbm_soft_reset;
-			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-			WREG32(mmSRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-
-			udelay(50);
-
-			tmp &= ~srbm_soft_reset;
-			WREG32(mmSRBM_SOFT_RESET, tmp);
-			tmp = RREG32(mmSRBM_SOFT_RESET);
-		}
-
-		if (grbm_soft_reset || srbm_soft_reset) {
-			tmp = RREG32(mmGMCON_DEBUG);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_STALL, 0);
-			tmp = REG_SET_FIELD(tmp,
-					    GMCON_DEBUG, GFX_CLEAR, 0);
-			WREG32(mmGMCON_DEBUG, tmp);
-		}
-
-		/* Wait a little for things to settle down */
+	if (grbm_soft_reset || srbm_soft_reset) {
+		tmp = RREG32(mmGMCON_DEBUG);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+		WREG32(mmGMCON_DEBUG, tmp);
 		udelay(50);
 	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+	}
+
+	if (grbm_soft_reset || srbm_soft_reset) {
+		tmp = RREG32(mmGMCON_DEBUG);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+		tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+		WREG32(mmGMCON_DEBUG, tmp);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+			      struct amdgpu_ring *ring)
+{
+	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+	WREG32(mmCP_HQD_PQ_RPTR, 0);
+	WREG32(mmCP_HQD_PQ_WPTR, 0);
+	vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+	if ((!adev->gfx.grbm_soft_reset) &&
+	    (!adev->gfx.srbm_soft_reset))
+		return 0;
+
+	grbm_soft_reset = adev->gfx.grbm_soft_reset;
+	srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+		gfx_v8_0_cp_gfx_resume(adev);
+
+	if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+	    REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+		int i;
+
+		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+			gfx_v8_0_init_hqd(adev, ring);
+		}
+		gfx_v8_0_cp_compute_resume(adev);
+	}
+	gfx_v8_0_rlc_start(adev);
+
 	return 0;
 }
 
@@ -5269,8 +5485,6 @@
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
 						       bool enable)
 {
-	uint32_t data, temp;
-
 	if (adev->asic_type == CHIP_POLARIS11)
 		/* Send msg to SMU via Powerplay */
 		amdgpu_set_powergating_state(adev,
@@ -5278,83 +5492,35 @@
 					     enable ?
 					     AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
 
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable static MGPG */
-	if (enable)
-		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
 							bool enable)
 {
-	uint32_t data, temp;
-
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable dynamic MGPG */
-	if (enable)
-		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
 		bool enable)
 {
-	uint32_t data, temp;
-
-	temp = data = RREG32(mmRLC_PG_CNTL);
-	/* Enable quick PG */
-	if (enable)
-		data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-
-	if (temp != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
 					  bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
 }
 
 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
 						bool enable)
 {
-	u32 data, orig;
-
-	orig = data = RREG32(mmRLC_PG_CNTL);
-
-	if (enable)
-		data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-	else
-		data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-
-	if (orig != data)
-		WREG32(mmRLC_PG_CNTL, data);
+	WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
 
 	/* Read any GFX register to wake up GFX. */
 	if (!enable)
-		data = RREG32(mmDB_RENDER_CONTROL);
+		RREG32(mmDB_RENDER_CONTROL);
 }
 
 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -5430,15 +5596,15 @@
 
 	data = RREG32(mmRLC_SERDES_WR_CTRL);
 	if (adev->asic_type == CHIP_STONEY)
-			data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
-			RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
-			RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
-			RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
-			RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
-			RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
-			RLC_SERDES_WR_CTRL__POWER_UP_MASK |
-			RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
-			RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
 	else
 		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
 			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5461,10 +5627,10 @@
 
 #define MSG_ENTER_RLC_SAFE_MODE     1
 #define MSG_EXIT_RLC_SAFE_MODE      0
-
-#define RLC_GPR_REG2__REQ_MASK           0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT     0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK       0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
 
 static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
 {
@@ -5494,7 +5660,7 @@
 		}
 
 		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+			if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
 				break;
 			udelay(1);
 		}
@@ -5522,7 +5688,7 @@
 	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+		if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
 			break;
 		udelay(1);
 	}
@@ -5554,7 +5720,7 @@
 		}
 
 		for (i = 0; i < adev->usec_timeout; i++) {
-			if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+			if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
 				break;
 			udelay(1);
 		}
@@ -5581,7 +5747,7 @@
 	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+		if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
 			break;
 		udelay(1);
 	}
@@ -5622,21 +5788,12 @@
 	/* It is disabled by HW by default */
 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
-			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
 				/* 1 - RLC memory Light sleep */
-				temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
-				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
-				if (temp != data)
-					WREG32(mmRLC_MEM_SLP_CNTL, data);
-			}
+				WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
 
-			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
-				/* 2 - CP memory Light sleep */
-				temp = data = RREG32(mmCP_MEM_SLP_CNTL);
-				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
-				if (temp != data)
-					WREG32(mmCP_MEM_SLP_CNTL, data);
-			}
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+				WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
 		}
 
 		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5834,6 +5991,76 @@
 	return 0;
 }
 
+static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
+					  enum amd_clockgating_state state)
+{
+	uint32_t msg_id, pp_state;
+	void *pp_handle = adev->powerplay.pp_handle;
+
+	if (state == AMD_CG_STATE_UNGATE)
+		pp_state = 0;
+	else
+		pp_state = PP_STATE_CG | PP_STATE_LS;
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_CG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_MG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	return 0;
+}
+
+static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
+					  enum amd_clockgating_state state)
+{
+	uint32_t msg_id, pp_state;
+	void *pp_handle = adev->powerplay.pp_handle;
+
+	if (state == AMD_CG_STATE_UNGATE)
+		pp_state = 0;
+	else
+		pp_state = PP_STATE_CG | PP_STATE_LS;
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_CG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_3D,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_MG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_RLC,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_CP,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	return 0;
+}
+
 static int gfx_v8_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
@@ -5846,33 +6073,33 @@
 		gfx_v8_0_update_gfx_clock_gating(adev,
 						 state == AMD_CG_STATE_GATE ? true : false);
 		break;
+	case CHIP_TONGA:
+		gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
+		break;
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+		break;
 	default:
 		break;
 	}
 	return 0;
 }
 
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
-	rptr = ring->adev->wb.wb[ring->rptr_offs];
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs];
 }
 
 static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	u32 wptr;
 
 	if (ring->use_doorbell)
 		/* XXX check if swapping is necessary on BE */
-		wptr = ring->adev->wb.wb[ring->wptr_offs];
+		return ring->adev->wb.wb[ring->wptr_offs];
 	else
-		wptr = RREG32(mmCP_RB0_WPTR);
-
-	return wptr;
+		return RREG32(mmCP_RB0_WPTR);
 }
 
 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5939,12 +6166,6 @@
 {
 	u32 header, control = 0;
 
-	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
-	if (ctx_switch) {
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-	}
-
 	if (ib->flags & AMDGPU_IB_FLAG_CE)
 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
 	else
@@ -5971,9 +6192,9 @@
 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
 	amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-					  (2 << 0) |
+				(2 << 0) |
 #endif
-					  (ib->gpu_addr & 0xFFFFFFFC));
+				(ib->gpu_addr & 0xFFFFFFFC));
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
 	amdgpu_ring_write(ring, control);
 }
@@ -6014,14 +6235,6 @@
 	amdgpu_ring_write(ring, seq);
 	amdgpu_ring_write(ring, 0xffffffff);
 	amdgpu_ring_write(ring, 4); /* poll interval */
-
-	if (usepfp) {
-		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-	}
 }
 
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6029,6 +6242,10 @@
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
 
+	/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+	if (usepfp)
+		amdgpu_ring_insert_nop(ring, 128);
+
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
 				 WRITE_DATA_DST_SEL(0)) |
@@ -6068,18 +6285,11 @@
 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
 		amdgpu_ring_write(ring, 0x0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
+		/* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+		amdgpu_ring_insert_nop(ring, 128);
 	}
 }
 
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-	return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
 	return ring->adev->wb.wb[ring->wptr_offs];
@@ -6115,36 +6325,88 @@
 	amdgpu_ring_write(ring, upper_32_bits(seq));
 }
 
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+	amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+	uint32_t dw2 = 0;
+
+	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+		/* set load_global_config & load_global_uconfig */
+		dw2 |= 0x8001;
+		/* set load_cs_sh_regs */
+		dw2 |= 0x01000000;
+		/* set load_per_context_state & load_gfx_sh_regs for GFX */
+		dw2 |= 0x10002;
+
+		/* set load_ce_ram if preamble presented */
+		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+			dw2 |= 0x10000000;
+	} else {
+		/* still load_ce_ram if this is the first time preamble presented
+		 * although there is no context switch happens.
+		 */
+		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+			dw2 |= 0x10000000;
+	}
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	amdgpu_ring_write(ring, dw2);
+	amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+		2 + /* gfx_v8_ring_emit_sb */
+		3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+	return
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v8_0_ring_emit_vm_flush */
+		7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 						 enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    TIME_STAMP_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl =
-			REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-				      TIME_STAMP_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
 						     int me, int pipe,
 						     enum amdgpu_interrupt_state state)
 {
-	u32 mec_int_cntl, mec_int_cntl_reg;
-
 	/*
 	 * amdgpu controls only pipe 0 of MEC1. That's why this function only
 	 * handles the setting of interrupts for this specific pipe. All other
@@ -6154,7 +6416,6 @@
 	if (me == 1) {
 		switch (pipe) {
 		case 0:
-			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
 			break;
 		default:
 			DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -6165,22 +6426,8 @@
 		return;
 	}
 
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		mec_int_cntl = RREG32(mec_int_cntl_reg);
-		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-					     TIME_STAMP_INT_ENABLE, 0);
-		WREG32(mec_int_cntl_reg, mec_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		mec_int_cntl = RREG32(mec_int_cntl_reg);
-		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
-					     TIME_STAMP_INT_ENABLE, 1);
-		WREG32(mec_int_cntl_reg, mec_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 }
 
 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -6188,24 +6435,8 @@
 					     unsigned type,
 					     enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_REG_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_REG_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 	return 0;
 }
@@ -6215,24 +6446,8 @@
 					      unsigned type,
 					      enum amdgpu_interrupt_state state)
 {
-	u32 cp_int_cntl;
-
-	switch (state) {
-	case AMDGPU_IRQ_STATE_DISABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_INSTR_INT_ENABLE, 0);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	case AMDGPU_IRQ_STATE_ENABLE:
-		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
-		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
-					    PRIV_INSTR_INT_ENABLE, 1);
-		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
-		break;
-	default:
-		break;
-	}
+	WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+		     state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
 
 	return 0;
 }
@@ -6338,13 +6553,16 @@
 	.resume = gfx_v8_0_resume,
 	.is_idle = gfx_v8_0_is_idle,
 	.wait_for_idle = gfx_v8_0_wait_for_idle,
+	.check_soft_reset = gfx_v8_0_check_soft_reset,
+	.pre_soft_reset = gfx_v8_0_pre_soft_reset,
 	.soft_reset = gfx_v8_0_soft_reset,
+	.post_soft_reset = gfx_v8_0_post_soft_reset,
 	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
 	.set_powergating_state = gfx_v8_0_set_powergating_state,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
-	.get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
 	.parse_cs = NULL,
@@ -6359,10 +6577,14 @@
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.emit_switch_buffer = gfx_v8_ring_emit_sb,
+	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
-	.get_rptr = gfx_v8_0_ring_get_rptr_compute,
+	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
 	.parse_cs = NULL,
@@ -6377,6 +6599,8 @@
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6479,15 +6703,12 @@
 {
 	u32 data, mask;
 
-	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
-	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
-	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+	data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+		RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
 	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
-	return (~data) & mask;
+	return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
 }
 
 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index bc82c79..ebed1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,6 +26,4 @@
 
 extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
 
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644
index 0000000..b13c8aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+	SI_CRTC0_REGISTER_OFFSET,
+	SI_CRTC1_REGISTER_OFFSET,
+	SI_CRTC2_REGISTER_OFFSET,
+	SI_CRTC3_REGISTER_OFFSET,
+	SI_CRTC4_REGISTER_OFFSET,
+	SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+			     struct amdgpu_mode_mc_save *save)
+{
+	u32 blackout;
+
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_stop_mc_access(adev, save);
+
+	gmc_v6_0_wait_for_idle((void *)adev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout = REG_SET_FIELD(blackout,
+					 mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+			       struct amdgpu_mode_mc_save *save)
+{
+	u32 tmp;
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+	tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+	WREG32(BIF_FB_EN, tmp);
+
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+
+	err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+	if (err) {
+		dev_err(adev->dev,
+		       "si_mc: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		release_firmware(adev->mc.fw);
+		adev->mc.fw = NULL;
+	}
+	return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+	const __le32 *new_fw_data = NULL;
+	u32 running;
+	const __le32 *new_io_mc_regs = NULL;
+	int i, regs_size, ucode_size;
+	const struct mc_firmware_header_v1_0 *hdr;
+
+	if (!adev->mc.fw)
+		return -EINVAL;
+
+	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+	amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+	new_io_mc_regs = (const __le32 *)
+		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+	new_fw_data = (const __le32 *)
+		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+			WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+		}
+		/* load the MC ucode */
+		for (i = 0; i < ucode_size; i++) {
+			WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+		}
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+
+	}
+
+	return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+				       struct amdgpu_mc *mc)
+{
+	if (mc->mc_vram_size > 0xFFC0000000ULL) {
+		dev_warn(adev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xFFC0000000ULL;
+		mc->mc_vram_size = 0xFFC0000000ULL;
+	}
+	amdgpu_vram_location(adev, &adev->mc, 0);
+	adev->mc.gtt_base_align = 0;
+	amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+		WREG32((0xb05 + j), 0x00000000);
+		WREG32((0xb06 + j), 0x00000000);
+		WREG32((0xb07 + j), 0x00000000);
+		WREG32((0xb08 + j), 0x00000000);
+		WREG32((0xb09 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	gmc_v6_0_mc_stop(adev, &save);
+
+	if (gmc_v6_0_wait_for_idle((void *)adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       adev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       adev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       adev->vram_scratch.gpu_addr >> 12);
+	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+	if (gmc_v6_0_wait_for_idle((void *)adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+	}
+	gmc_v6_0_mc_resume(adev, &save);
+	amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+	u32 tmp;
+	int chansize, numchan;
+
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	adev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+	/* size in MB on si */
+	adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	adev->mc.visible_vram_size = adev->mc.aper_size;
+
+	/* unless the user had overridden it, set the gart
+	 * size equal to the 1024 or vram, whichever is larger.
+	 */
+	if (amdgpu_gart_size == -1)
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+	else
+		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+	return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+					uint32_t vmid)
+{
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+	WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+				     void *cpu_pt_addr,
+				     uint32_t gpu_page_idx,
+				     uint64_t addr,
+				     uint32_t flags)
+{
+	void __iomem *ptr = (void *)cpu_pt_addr;
+	uint64_t value;
+
+	value = addr & 0xFFFFFFFFFFFFF000ULL;
+	value |= flags;
+	writeq(value, ptr + (gpu_page_idx * 8));
+
+	return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+					      bool value)
+{
+	u32 tmp;
+
+	tmp = RREG32(VM_CONTEXT1_CNTL);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+			    xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+	WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+	int r, i;
+
+	if (adev->gart.robj == NULL) {
+		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = amdgpu_gart_table_vram_pin(adev);
+	if (r)
+		return r;
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_FRAGMENT_PROCESSING |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       BANK_SELECT(4) |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(adev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x575, 0);
+	WREG32(0x576, 0);
+	WREG32(0x577, 0);
+
+	/* empty context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+			       adev->gart.table_addr >> 12);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+			       adev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(adev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned long long)adev->gart.table_addr);
+	adev->gart.ready = true;
+	return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gart.robj) {
+		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+		return 0;
+	}
+	r = amdgpu_gart_init(adev);
+	if (r)
+		return r;
+	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+	return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+	/*unsigned i;
+
+	for (i = 1; i < 16; ++i) {
+		uint32_t reg;
+		if (i < 8)
+			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+		else
+			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+	}*/
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+	amdgpu_gart_table_vram_free(adev);
+	amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+	/*
+	 * number of VMs
+	 * VMID 0 is reserved for System
+	 * amdgpu graphics/compute will use VMIDs 1-7
+	 * amdkfd will use VMIDs 8-15
+	 */
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
+
+	/* base offset of vram pages */
+	if (adev->flags & AMD_IS_APU) {
+		u64 tmp = RREG32(MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		adev->vm_manager.vram_base_offset = tmp;
+	} else
+		adev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+				     u32 status, u32 addr, u32 mc_client)
+{
+	u32 mc_id;
+	u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+	u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+					xxPROTECTIONS);
+	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+	mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+			      xxMEMORY_CLIENT_ID);
+
+	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+	       protections, vmid, addr,
+	       REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+			     xxMEMORY_CLIENT_RW) ?
+	       "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+	MC_HUB_MISC_HUB_CG,
+	MC_HUB_MISC_SIP_CG,
+	MC_HUB_MISC_VM_CG,
+	MC_XPB_CLK_GAT,
+	ATC_MISC_CG,
+	MC_CITF_MISC_WR_CG,
+	MC_CITF_MISC_RD_CG,
+	MC_CITF_MISC_VM_CG,
+	VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+	VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+	MC_HUB_MISC_VM_CG__ENABLE_MASK,
+	MC_XPB_CLK_GAT__ENABLE_MASK,
+	ATC_MISC_CG__ENABLE_MASK,
+	MC_CITF_MISC_WR_CG__ENABLE_MASK,
+	MC_CITF_MISC_RD_CG__ENABLE_MASK,
+	MC_CITF_MISC_VM_CG__ENABLE_MASK,
+	VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+				  bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+			data |= mc_cg_ls_en[i];
+		else
+			data &= ~mc_cg_ls_en[i];
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+				    bool enable)
+{
+	int i;
+	u32 orig, data;
+
+	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+		orig = data = RREG32(mc_cg_registers[i]);
+		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+			data |= mc_cg_en[i];
+		else
+			data &= ~mc_cg_en[i];
+		if (data != orig)
+			WREG32(mc_cg_registers[i], data);
+	}
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+				     bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+	} else {
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+	}
+
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+				     bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+	else
+		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+	if (orig != data)
+		WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+				   bool enable)
+{
+	u32 orig, data;
+
+	orig = data = RREG32(HDP_MEM_POWER_LS);
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+	else
+		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+	if (orig != data)
+		WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+	switch (mc_seq_vram_type) {
+	case MC_SEQ_MISC0__MT__GDDR1:
+		return AMDGPU_VRAM_TYPE_GDDR1;
+	case MC_SEQ_MISC0__MT__DDR2:
+		return AMDGPU_VRAM_TYPE_DDR2;
+	case MC_SEQ_MISC0__MT__GDDR3:
+		return AMDGPU_VRAM_TYPE_GDDR3;
+	case MC_SEQ_MISC0__MT__GDDR4:
+		return AMDGPU_VRAM_TYPE_GDDR4;
+	case MC_SEQ_MISC0__MT__GDDR5:
+		return AMDGPU_VRAM_TYPE_GDDR5;
+	case MC_SEQ_MISC0__MT__DDR3:
+		return AMDGPU_VRAM_TYPE_DDR3;
+	default:
+		return AMDGPU_VRAM_TYPE_UNKNOWN;
+	}
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v6_0_set_gart_funcs(adev);
+	gmc_v6_0_set_irq_funcs(adev);
+
+	if (adev->flags & AMD_IS_APU) {
+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+	} else {
+		u32 tmp = RREG32(MC_SEQ_MISC0);
+		tmp &= MC_SEQ_MISC0__MT__MASK;
+		adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+	int r;
+	int dma_bits;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+	if (r)
+		return r;
+
+	adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+	adev->mc.mc_mask = 0xffffffffffULL;
+
+	adev->need_dma32 = false;
+	dma_bits = adev->need_dma32 ? 32 : 40;
+	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		adev->need_dma32 = true;
+		dma_bits = 32;
+		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+	}
+	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+		dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+	}
+
+	r = gmc_v6_0_init_microcode(adev);
+	if (r) {
+		dev_err(adev->dev, "Failed to load mc firmware!\n");
+		return r;
+	}
+
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
+	r = gmc_v6_0_mc_init(adev);
+	if (r)
+		return r;
+
+	r = amdgpu_bo_init(adev);
+	if (r)
+		return r;
+
+	r = gmc_v6_0_gart_init(adev);
+	if (r)
+		return r;
+
+	if (!adev->vm_manager.enabled) {
+		r = gmc_v6_0_vm_init(adev);
+		if (r) {
+			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+			return r;
+		}
+		adev->vm_manager.enabled = true;
+	}
+
+	return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->vm_manager.enabled) {
+		gmc_v6_0_vm_fini(adev);
+		adev->vm_manager.enabled = false;
+	}
+	gmc_v6_0_gart_fini(adev);
+	amdgpu_gem_force_release(adev);
+	amdgpu_bo_fini(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	gmc_v6_0_mc_program(adev);
+
+	if (!(adev->flags & AMD_IS_APU)) {
+		r = gmc_v6_0_mc_load_microcode(adev);
+		if (r) {
+			dev_err(adev->dev, "Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = gmc_v6_0_gart_enable(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+	gmc_v6_0_gart_disable(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->vm_manager.enabled) {
+		gmc_v6_0_vm_fini(adev);
+		adev->vm_manager.enabled = false;
+	}
+	gmc_v6_0_hw_fini(adev);
+
+	return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = gmc_v6_0_hw_init(adev);
+	if (r)
+		return r;
+
+	if (!adev->vm_manager.enabled) {
+		r = gmc_v6_0_vm_init(adev);
+		if (r) {
+			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+			return r;
+		}
+		adev->vm_manager.enabled = true;
+	}
+
+	return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+		return false;
+
+	return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+	unsigned i;
+	u32 tmp;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+					       SRBM_STATUS__MCC_BUSY_MASK |
+					       SRBM_STATUS__MCD_BUSY_MASK |
+					       SRBM_STATUS__VMC_BUSY_MASK);
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_mode_mc_save save;
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+						mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+		if (!(adev->flags & AMD_IS_APU))
+			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+							mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+	}
+
+	if (srbm_soft_reset) {
+		gmc_v6_0_mc_stop(adev, &save);
+		if (gmc_v6_0_wait_for_idle(adev)) {
+			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+		}
+
+
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		gmc_v6_0_mc_resume(adev, &save);
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *src,
+					     unsigned type,
+					     enum amdgpu_interrupt_state state)
+{
+	u32 tmp;
+	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+	switch (state) {
+	case AMDGPU_IRQ_STATE_DISABLE:
+		tmp = RREG32(VM_CONTEXT0_CNTL);
+		tmp &= ~bits;
+		WREG32(VM_CONTEXT0_CNTL, tmp);
+		tmp = RREG32(VM_CONTEXT1_CNTL);
+		tmp &= ~bits;
+		WREG32(VM_CONTEXT1_CNTL, tmp);
+		break;
+	case AMDGPU_IRQ_STATE_ENABLE:
+		tmp = RREG32(VM_CONTEXT0_CNTL);
+		tmp |= bits;
+		WREG32(VM_CONTEXT0_CNTL, tmp);
+		tmp = RREG32(VM_CONTEXT1_CNTL);
+		tmp |= bits;
+		WREG32(VM_CONTEXT1_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	u32 addr, status;
+
+	addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+	status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+	WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+	if (!addr && !status)
+		return 0;
+
+	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+		gmc_v6_0_set_fault_enable_default(adev, false);
+
+	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+		entry->src_id, entry->src_data);
+	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		addr);
+	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		status);
+	gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+	return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+	.name = "gmc_v6_0",
+	.early_init = gmc_v6_0_early_init,
+	.late_init = gmc_v6_0_late_init,
+	.sw_init = gmc_v6_0_sw_init,
+	.sw_fini = gmc_v6_0_sw_fini,
+	.hw_init = gmc_v6_0_hw_init,
+	.hw_fini = gmc_v6_0_hw_fini,
+	.suspend = gmc_v6_0_suspend,
+	.resume = gmc_v6_0_resume,
+	.is_idle = gmc_v6_0_is_idle,
+	.wait_for_idle = gmc_v6_0_wait_for_idle,
+	.soft_reset = gmc_v6_0_soft_reset,
+	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
+	.set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+	.set = gmc_v6_0_vm_fault_interrupt_state,
+	.process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+	if (adev->gart.gart_funcs == NULL)
+		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->mc.vm_fault.num_types = 1;
+	adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 5983e31..42c4fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,9 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0b0f086..aa0c4b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -183,7 +183,7 @@
 	const struct mc_firmware_header_v1_0 *hdr;
 	const __le32 *fw_data = NULL;
 	const __le32 *io_mc_regs = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	int i, ucode_size, regs_size;
 
 	if (!adev->mc.fw)
@@ -203,11 +203,6 @@
 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -239,9 +234,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -393,7 +385,7 @@
 	 * size equal to the 1024 or vram, whichever is larger.
 	 */
 	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
 	else
 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -953,6 +945,11 @@
 		return r;
 	}
 
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
 	r = gmc_v7_0_mc_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2aee2c6..c22ef14 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -261,7 +261,7 @@
 	const struct mc_firmware_header_v1_0 *hdr;
 	const __le32 *fw_data = NULL;
 	const __le32 *io_mc_regs = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	int i, ucode_size, regs_size;
 
 	if (!adev->mc.fw)
@@ -269,8 +269,10 @@
 
 	/* Skip MC ucode loading on SR-IOV capable boards.
 	 * vbios does this for us in asic_init in that case.
+	 * Skip MC ucode loading on VF, because hypervisor will do that
+	 * for this adaptor.
 	 */
-	if (adev->virtualization.supports_sr_iov)
+	if (amdgpu_sriov_bios(adev))
 		return 0;
 
 	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
@@ -287,11 +289,6 @@
 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -323,9 +320,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -477,7 +471,7 @@
 	 * size equal to the 1024 or vram, whichever is larger.
 	 */
 	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+		adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
 	else
 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
 
@@ -957,6 +951,11 @@
 		return r;
 	}
 
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+
 	r = gmc_v8_0_mc_init(adev);
 	if (r)
 		return r;
@@ -1100,9 +1099,8 @@
 
 }
 
-static int gmc_v8_0_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
 {
-	struct amdgpu_mode_mc_save save;
 	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1117,13 +1115,41 @@
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
+	if (srbm_soft_reset) {
+		adev->mc.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->mc.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->mc.srbm_soft_reset)
+		return 0;
+
+	gmc_v8_0_mc_stop(adev, &adev->mc.save);
+	if (gmc_v8_0_wait_for_idle(adev)) {
+		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+	}
+
+	return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->mc.srbm_soft_reset)
+		return 0;
+	srbm_soft_reset = adev->mc.srbm_soft_reset;
 
 	if (srbm_soft_reset) {
-		gmc_v8_0_mc_stop(adev, &save);
-		if (gmc_v8_0_wait_for_idle((void *)adev)) {
-			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
-		}
-
+		u32 tmp;
 
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
@@ -1139,14 +1165,22 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		gmc_v8_0_mc_resume(adev, &save);
-		udelay(50);
 	}
 
 	return 0;
 }
 
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->mc.srbm_soft_reset)
+		return 0;
+
+	gmc_v8_0_mc_resume(adev, &adev->mc.save);
+	return 0;
+}
+
 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 					     struct amdgpu_irq_src *src,
 					     unsigned type,
@@ -1414,7 +1448,10 @@
 	.resume = gmc_v8_0_resume,
 	.is_idle = gmc_v8_0_is_idle,
 	.wait_for_idle = gmc_v8_0_wait_for_idle,
+	.check_soft_reset = gmc_v8_0_check_soft_reset,
+	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
 	.soft_reset = gmc_v8_0_soft_reset,
+	.post_soft_reset = gmc_v8_0_post_soft_reset,
 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
 	.set_powergating_state = gmc_v8_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad..0000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "iceland_smum.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int iceland_dpm_early_init(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	iceland_dpm_set_funcs(adev);
-
-	return 0;
-}
-
-static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
-{
-	char fw_name[30] = "amdgpu/topaz_smc.bin";
-	int err;
-
-	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-	if (err)
-		goto out;
-	err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-	if (err) {
-		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-		release_firmware(adev->pm.fw);
-		adev->pm.fw = NULL;
-	}
-	return err;
-}
-
-static int iceland_dpm_sw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	ret = iceland_dpm_init_microcode(adev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int iceland_dpm_sw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	release_firmware(adev->pm.fw);
-	adev->pm.fw = NULL;
-
-	return 0;
-}
-
-static int iceland_dpm_hw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-
-	/* smu init only needs to be called at startup, not resume.
-	 * It should be in sw_init, but requires the fw info gathered
-	 * in sw_init from other IP modules.
-	 */
-	ret = iceland_smu_init(adev);
-	if (ret) {
-		DRM_ERROR("SMU initialization failed\n");
-		goto fail;
-	}
-
-	ret = iceland_smu_start(adev);
-	if (ret) {
-		DRM_ERROR("SMU start failed\n");
-		goto fail;
-	}
-
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-
-fail:
-	adev->firmware.smu_load = false;
-	mutex_unlock(&adev->pm.mutex);
-	return -EINVAL;
-}
-
-static int iceland_dpm_hw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-	/* smu fini only needs to be called at teardown, not suspend.
-	 * It should be in sw_fini, but we put it here for symmetry
-	 * with smu init.
-	 */
-	iceland_smu_fini(adev);
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-}
-
-static int iceland_dpm_suspend(void *handle)
-{
-	return 0;
-}
-
-static int iceland_dpm_resume(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-
-	ret = iceland_smu_start(adev);
-	if (ret) {
-		DRM_ERROR("SMU start failed\n");
-		goto fail;
-	}
-
-fail:
-	mutex_unlock(&adev->pm.mutex);
-	return ret;
-}
-
-static int iceland_dpm_set_clockgating_state(void *handle,
-			enum amd_clockgating_state state)
-{
-	return 0;
-}
-
-static int iceland_dpm_set_powergating_state(void *handle,
-			enum amd_powergating_state state)
-{
-	return 0;
-}
-
-const struct amd_ip_funcs iceland_dpm_ip_funcs = {
-	.name = "iceland_dpm",
-	.early_init = iceland_dpm_early_init,
-	.late_init = NULL,
-	.sw_init = iceland_dpm_sw_init,
-	.sw_fini = iceland_dpm_sw_fini,
-	.hw_init = iceland_dpm_hw_init,
-	.hw_fini = iceland_dpm_hw_fini,
-	.suspend = iceland_dpm_suspend,
-	.resume = iceland_dpm_resume,
-	.is_idle = NULL,
-	.wait_for_idle = NULL,
-	.soft_reset = NULL,
-	.set_clockgating_state = iceland_dpm_set_clockgating_state,
-	.set_powergating_state = iceland_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
-	.get_temperature = NULL,
-	.pre_set_power_state = NULL,
-	.set_power_state = NULL,
-	.post_set_power_state = NULL,
-	.display_configuration_changed = NULL,
-	.get_sclk = NULL,
-	.get_mclk = NULL,
-	.print_power_state = NULL,
-	.debugfs_print_current_performance_level = NULL,
-	.force_performance_level = NULL,
-	.vblank_too_short = NULL,
-	.powergate_uvd = NULL,
-};
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
-{
-	if (NULL == adev->pm.funcs)
-		adev->pm.funcs = &iceland_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index 2118399..0000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "ppsmc.h"
-#include "iceland_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#define ICELAND_SMC_SIZE 0x20000
-
-static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
-					uint32_t smc_address, uint32_t limit)
-{
-	uint32_t val;
-
-	if (smc_address & 3)
-		return -EINVAL;
-
-	if ((smc_address + 3) > limit)
-		return -EINVAL;
-
-	WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	return 0;
-}
-
-static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
-				     uint32_t smc_start_address,
-				     const uint8_t *src,
-				     uint32_t byte_count, uint32_t limit)
-{
-	uint32_t addr;
-	uint32_t data, orig_data;
-	int result = 0;
-	uint32_t extra_shift;
-	unsigned long flags;
-
-	if (smc_start_address & 3)
-		return -EINVAL;
-
-	if ((smc_start_address + byte_count) > limit)
-		return -EINVAL;
-
-	addr = smc_start_address;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	while (byte_count >= 4) {
-		/* Bytes are written into the SMC addres space with the MSB first */
-		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-		result = iceland_set_smc_sram_address(adev, addr, limit);
-
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-
-		src += 4;
-		byte_count -= 4;
-		addr += 4;
-	}
-
-	if (0 != byte_count) {
-		/* Now write odd bytes left, do a read modify write cycle */
-		data = 0;
-
-		result = iceland_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		orig_data = RREG32(mmSMC_IND_DATA_0);
-		extra_shift = 8 * (4 - byte_count);
-
-		while (byte_count > 0) {
-			data = (data << 8) + *src++;
-			byte_count--;
-		}
-
-		data <<= extra_shift;
-		data |= (orig_data & ~((~0UL) << extra_shift));
-
-		result = iceland_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-	}
-
-out:
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-void iceland_start_smc(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-void iceland_reset_smc(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static int iceland_program_jump_on_start(struct amdgpu_device *adev)
-{
-	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-	iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-	return 0;
-}
-
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-void iceland_start_smc_clock(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32(mmSMC_RESP_0);
-		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-	if (!iceland_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send message\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-						   PPSMC_Msg msg)
-{
-	if (!iceland_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-						  PPSMC_Msg msg,
-						  uint32_t parameter)
-{
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return iceland_send_msg_to_smc(adev, msg);
-}
-
-static int iceland_send_msg_to_smc_with_parameter_without_waiting(
-					struct amdgpu_device *adev,
-					PPSMC_Msg msg, uint32_t parameter)
-{
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return iceland_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	if (!iceland_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-#endif
-
-static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-	const struct smc_firmware_header_v1_0 *hdr;
-	uint32_t ucode_size;
-	uint32_t ucode_start_address;
-	const uint8_t *src;
-	uint32_t val;
-	uint32_t byte_count;
-	uint32_t data;
-	unsigned long flags;
-	int i;
-
-	if (!adev->pm.fw)
-		return -EINVAL;
-
-	/* Skip SMC ucode loading on SR-IOV capable boards.
-	 * vbios does this for us in asic_init in that case.
-	 */
-	if (adev->virtualization.supports_sr_iov)
-		return 0;
-
-	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-	amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-	src = (const uint8_t *)
-		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-	if (ucode_size & 3) {
-		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-		return -EINVAL;
-	}
-
-	if (ucode_size > ICELAND_SMC_SIZE) {
-		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixRCU_UC_EVENTS);
-		if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
-			break;
-		udelay(1);
-	}
-	val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
-	WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
-
-	iceland_stop_smc_clock(adev);
-	iceland_reset_smc(adev);
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	byte_count = ucode_size;
-	while (byte_count >= 4) {
-		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-		WREG32(mmSMC_IND_DATA_0, data);
-		src += 4;
-		byte_count -= 4;
-	}
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-	return 0;
-}
-
-#if 0 /* not used yet */
-static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
-				       uint32_t smc_address,
-				       uint32_t *value,
-				       uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = iceland_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		*value = RREG32(mmSMC_IND_DATA_0);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
-					uint32_t smc_address,
-					uint32_t value,
-					uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = iceland_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		WREG32(mmSMC_IND_DATA_0, value);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int iceland_smu_stop_smc(struct amdgpu_device *adev)
-{
-	iceland_reset_smc(adev);
-	iceland_stop_smc_clock(adev);
-
-	return 0;
-}
-#endif
-
-static int iceland_smu_start_smc(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	iceland_program_jump_on_start(adev);
-	iceland_start_smc_clock(adev);
-	iceland_start_smc(adev);
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixFIRMWARE_FLAGS);
-		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
-			break;
-		udelay(1);
-	}
-	return 0;
-}
-
-static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case UCODE_ID_SDMA0:
-			return AMDGPU_UCODE_ID_SDMA0;
-		case UCODE_ID_SDMA1:
-			return AMDGPU_UCODE_ID_SDMA1;
-		case UCODE_ID_CP_CE:
-			return AMDGPU_UCODE_ID_CP_CE;
-		case UCODE_ID_CP_PFP:
-			return AMDGPU_UCODE_ID_CP_PFP;
-		case UCODE_ID_CP_ME:
-			return AMDGPU_UCODE_ID_CP_ME;
-		case UCODE_ID_CP_MEC:
-		case UCODE_ID_CP_MEC_JT1:
-			return AMDGPU_UCODE_ID_CP_MEC1;
-		case UCODE_ID_CP_MEC_JT2:
-			return AMDGPU_UCODE_ID_CP_MEC2;
-		case UCODE_ID_RLC_G:
-			return AMDGPU_UCODE_ID_RLC_G;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return AMDGPU_UCODE_ID_MAXIMUM;
-	}
-}
-
-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case AMDGPU_UCODE_ID_SDMA0:
-			return UCODE_ID_SDMA0_MASK;
-		case AMDGPU_UCODE_ID_SDMA1:
-			return UCODE_ID_SDMA1_MASK;
-		case AMDGPU_UCODE_ID_CP_CE:
-			return UCODE_ID_CP_CE_MASK;
-		case AMDGPU_UCODE_ID_CP_PFP:
-			return UCODE_ID_CP_PFP_MASK;
-		case AMDGPU_UCODE_ID_CP_ME:
-			return UCODE_ID_CP_ME_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC1:
-			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC2:
-			return UCODE_ID_CP_MEC_MASK;
-		case AMDGPU_UCODE_ID_RLC_G:
-			return UCODE_ID_RLC_G_MASK;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return 0;
-	}
-}
-
-static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-						      uint32_t fw_type,
-						      struct SMU_Entry *entry)
-{
-	enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
-	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-	const struct gfx_firmware_header_v1_0 *header = NULL;
-	uint64_t gpu_addr;
-	uint32_t data_size;
-
-	if (ucode->fw == NULL)
-		return -EINVAL;
-
-	gpu_addr  = ucode->mc_addr;
-	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-	data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-	entry->id = (uint16_t)fw_type;
-	entry->image_addr_high = upper_32_bits(gpu_addr);
-	entry->image_addr_low = lower_32_bits(gpu_addr);
-	entry->meta_data_addr_high = 0;
-	entry->meta_data_addr_low = 0;
-	entry->data_size_byte = data_size;
-	entry->num_register_entries = 0;
-	entry->flags = 0;
-
-	return 0;
-}
-
-static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
-{
-	struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
-	struct SMU_DRAMData_TOC *toc;
-	uint32_t fw_to_load;
-
-	toc = (struct SMU_DRAMData_TOC *)private->header;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	if (!adev->firmware.smu_load)
-		return 0;
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for RLC\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for CE\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for PFP\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for ME\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-		return -EINVAL;
-	}
-
-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-		return -EINVAL;
-	}
-
-	iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-	iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK |
-			UCODE_ID_SDMA0_MASK |
-			UCODE_ID_SDMA1_MASK |
-			UCODE_ID_CP_CE_MASK |
-			UCODE_ID_CP_ME_MASK |
-			UCODE_ID_CP_PFP_MASK |
-			UCODE_ID_CP_MEC_MASK |
-			UCODE_ID_CP_MEC_JT1_MASK;
-
-
-	if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-		DRM_ERROR("Fail to request SMU load ucode\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
-					    uint32_t fw_type)
-{
-	uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
-	int i;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("check firmware loading failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int iceland_smu_start(struct amdgpu_device *adev)
-{
-	int result;
-
-	result = iceland_smu_upload_firmware_image(adev);
-	if (result)
-		return result;
-	result = iceland_smu_start_smc(adev);
-	if (result)
-		return result;
-
-	return iceland_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
-	.check_fw_load_finish = iceland_smu_check_fw_load_finish,
-	.request_smu_load_fw = NULL,
-	.request_smu_specific_fw = NULL,
-};
-
-int iceland_smu_init(struct amdgpu_device *adev)
-{
-	struct iceland_smu_private_data *private;
-	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-	uint64_t mc_addr;
-	void *toc_buf_ptr;
-	int ret;
-
-	private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
-	if (NULL == private)
-		return -ENOMEM;
-
-	/* allocate firmware buffers */
-	if (adev->firmware.smu_load)
-		amdgpu_ucode_init_bo(adev);
-
-	adev->smu.priv = private;
-	adev->smu.fw_flags = 0;
-
-	/* Allocate FW image data structure and header buffer */
-	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM,
-			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, NULL, toc_buf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-		return -ENOMEM;
-	}
-
-	/* Retrieve GPU address for header buffer and internal buffer */
-	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-	if (ret) {
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to reserve the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to pin the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to map the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	amdgpu_bo_unreserve(adev->smu.toc_buf);
-	private->header_addr_low = lower_32_bits(mc_addr);
-	private->header_addr_high = upper_32_bits(mc_addr);
-	private->header = toc_buf_ptr;
-
-	adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
-
-	return 0;
-}
-
-int iceland_smu_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_unref(&adev->smu.toc_buf);
-	kfree(adev->smu.priv);
-	adev->smu.priv = NULL;
-	if (adev->firmware.fw_buf)
-		amdgpu_ucode_fini_bo(adev);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a845e88..f8618a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2845,7 +2845,11 @@
 		pi->caps_tcp_ramping = true;
 	}
 
-	pi->caps_sclk_ds = true;
+	if (amdgpu_sclk_deep_sleep_en)
+		pi->caps_sclk_ds = true;
+	else
+		pi->caps_sclk_ds = false;
+
 	pi->enable_auto_thermal_throttling = true;
 	pi->disable_nb_ps3_in_battery = false;
 	if (amdgpu_bapm == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644
index 0000000..055321f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT                                10000
+#define R600_BSP_DFLT                                0x41EB
+#define R600_BSU_DFLT                                0x2
+#define R600_AH_DFLT                                 5
+#define R600_RLP_DFLT                                25
+#define R600_RMP_DFLT                                65
+#define R600_LHP_DFLT                                40
+#define R600_LMP_DFLT                                15
+#define R600_TD_DFLT                                 0
+#define R600_UTC_DFLT_00                             0x24
+#define R600_UTC_DFLT_01                             0x22
+#define R600_UTC_DFLT_02                             0x22
+#define R600_UTC_DFLT_03                             0x22
+#define R600_UTC_DFLT_04                             0x22
+#define R600_UTC_DFLT_05                             0x22
+#define R600_UTC_DFLT_06                             0x22
+#define R600_UTC_DFLT_07                             0x22
+#define R600_UTC_DFLT_08                             0x22
+#define R600_UTC_DFLT_09                             0x22
+#define R600_UTC_DFLT_10                             0x22
+#define R600_UTC_DFLT_11                             0x22
+#define R600_UTC_DFLT_12                             0x22
+#define R600_UTC_DFLT_13                             0x22
+#define R600_UTC_DFLT_14                             0x22
+#define R600_DTC_DFLT_00                             0x24
+#define R600_DTC_DFLT_01                             0x22
+#define R600_DTC_DFLT_02                             0x22
+#define R600_DTC_DFLT_03                             0x22
+#define R600_DTC_DFLT_04                             0x22
+#define R600_DTC_DFLT_05                             0x22
+#define R600_DTC_DFLT_06                             0x22
+#define R600_DTC_DFLT_07                             0x22
+#define R600_DTC_DFLT_08                             0x22
+#define R600_DTC_DFLT_09                             0x22
+#define R600_DTC_DFLT_10                             0x22
+#define R600_DTC_DFLT_11                             0x22
+#define R600_DTC_DFLT_12                             0x22
+#define R600_DTC_DFLT_13                             0x22
+#define R600_DTC_DFLT_14                             0x22
+#define R600_VRC_DFLT                                0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT                1000
+#define R600_BACKBIASRESPONSETIME_DFLT               1000
+#define R600_VRU_DFLT                                0x3
+#define R600_SPLLSTEPTIME_DFLT                       0x1000
+#define R600_SPLLSTEPUNIT_DFLT                       0x3
+#define R600_TPU_DFLT                                0
+#define R600_TPC_DFLT                                0x200
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+#define R600_GICST_DFLT                              0x200
+#define R600_FCT_DFLT                                0x0400
+#define R600_FCTU_DFLT                               0
+#define R600_CTXCGTT3DRPHC_DFLT                      0x20
+#define R600_CTXCGTT3DRSDC_DFLT                      0x40
+#define R600_VDDC3DOORPHC_DFLT                       0x100
+#define R600_VDDC3DOORSDC_DFLT                       0x7
+#define R600_VDDC3DOORSU_DFLT                        0
+#define R600_MPLLLOCKTIME_DFLT                       100
+#define R600_MPLLRESETTIME_DFLT                      150
+#define R600_VCOSTEPPCT_DFLT                          20
+#define R600_ENDINGVCOSTEPPCT_DFLT                    5
+#define R600_REFERENCEDIVIDER_DFLT                    4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+	R600_POWER_LEVEL_LOW = 0,
+	R600_POWER_LEVEL_MEDIUM = 1,
+	R600_POWER_LEVEL_HIGH = 2,
+	R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+	R600_TD_AUTO,
+	R600_TD_UP,
+	R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+	R600_DISPLAY_WATERMARK_LOW = 0,
+	R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+    R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    R600_PM_DISPLAY_GAP_VBLANK       = 1,
+    R600_PM_DISPLAY_GAP_WATERMARK    = 2,
+    R600_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a64715d..565dab3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -190,12 +190,8 @@
  */
 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
 	/* XXX check if swapping is necessary on BE */
-	rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -749,24 +745,16 @@
 				  uint64_t pe, uint64_t src,
 				  unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -774,39 +762,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-				   const dma_addr_t *pages_addr, uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				   uint64_t value, unsigned count,
+				   uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = pe;
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -822,40 +798,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
-				     uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				     uint64_t addr, unsigned count,
 				     uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -945,6 +902,22 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* sdma_v2_4_ring_emit_hdp_flush */
+		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+		12 + /* sdma_v2_4_ring_emit_vm_flush */
+		10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v2_4_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1263,6 +1236,8 @@
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.insert_nop = sdma_v2_4_ring_insert_nop,
 	.pad_ib = sdma_v2_4_ring_pad_ib,
+	.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+	.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 653ce5e..a9d1094 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -335,12 +335,8 @@
  */
 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
 	/* XXX check if swapping is necessary on BE */
-	rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -499,31 +495,6 @@
 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
-	unsigned ret;
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
-	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
-	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
-	amdgpu_ring_write(ring, 1);
-	ret = ring->wptr;/* this is the offset we need patch later */
-	amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
-	return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
-	unsigned cur;
-	BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
-	cur = ring->wptr - 1;
-	if (likely(cur > offset))
-		ring->ring[offset] = cur - offset;
-	else
-		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -976,24 +947,16 @@
 				  uint64_t pe, uint64_t src,
 				  unsigned count)
 {
-	while (count) {
-		unsigned bytes = count * 8;
-		if (bytes > 0x1FFFF8)
-			bytes = 0x1FFFF8;
+	unsigned bytes = count * 8;
 
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = bytes;
-		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-		ib->ptr[ib->length_dw++] = lower_32_bits(src);
-		ib->ptr[ib->length_dw++] = upper_32_bits(src);
-		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-		pe += bytes;
-		src += bytes;
-		count -= bytes / 8;
-	}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = bytes;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 }
 
 /**
@@ -1001,39 +964,27 @@
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
  *
  * Update PTEs by writing them manually using sDMA (CIK).
  */
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-				   const dma_addr_t *pages_addr, uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags)
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				   uint64_t value, unsigned count,
+				   uint32_t incr)
 {
-	uint64_t value;
-	unsigned ndw;
+	unsigned ndw = count * 2;
 
-	while (count) {
-		ndw = count * 2;
-		if (ndw > 0xFFFFE)
-			ndw = 0xFFFFE;
-
-		/* for non-physically contiguous pages (system) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-			SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
-		ib->ptr[ib->length_dw++] = pe;
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = ndw;
-		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			value = amdgpu_vm_map_gart(pages_addr, addr);
-			addr += incr;
-			value |= flags;
-			ib->ptr[ib->length_dw++] = value;
-			ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		}
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = ndw;
+	for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
 	}
 }
 
@@ -1049,40 +1000,21 @@
  *
  * Update the page tables using sDMA (CIK).
  */
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
-				     uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 				     uint64_t addr, unsigned count,
 				     uint32_t incr, uint32_t flags)
 {
-	uint64_t value;
-	unsigned ndw;
-
-	while (count) {
-		ndw = count;
-		if (ndw > 0x7FFFF)
-			ndw = 0x7FFFF;
-
-		if (flags & AMDGPU_PTE_VALID)
-			value = addr;
-		else
-			value = 0;
-
-		/* for physically contiguous pages (vram) */
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
-		ib->ptr[ib->length_dw++] = pe; /* dst addr */
-		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-		ib->ptr[ib->length_dw++] = flags; /* mask */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = value; /* value */
-		ib->ptr[ib->length_dw++] = upper_32_bits(value);
-		ib->ptr[ib->length_dw++] = incr; /* increment size */
-		ib->ptr[ib->length_dw++] = 0;
-		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
-		pe += ndw * 8;
-		addr += ndw * incr;
-		count -= ndw;
-	}
+	/* for physically contiguous pages (vram) */
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	ib->ptr[ib->length_dw++] = flags; /* mask */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+	ib->ptr[ib->length_dw++] = incr; /* increment size */
+	ib->ptr[ib->length_dw++] = 0;
+	ib->ptr[ib->length_dw++] = count; /* number of entries */
 }
 
 /**
@@ -1172,6 +1104,22 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* sdma_v3_0_ring_emit_hdp_flush */
+		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+		12 + /* sdma_v3_0_ring_emit_vm_flush */
+		10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v3_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1320,28 +1268,77 @@
 	return -ETIMEDOUT;
 }
 
-static int sdma_v3_0_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
 {
-	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS2);
 
-	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
-		/* sdma0 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+	    (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-	}
-	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
-		/* sdma1 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
-		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
 	}
 
 	if (srbm_soft_reset) {
+		adev->sdma.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->sdma.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	if (!adev->sdma.srbm_soft_reset)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+		sdma_v3_0_ctx_switch_enable(adev, false);
+		sdma_v3_0_enable(adev, false);
+	}
+
+	return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	if (!adev->sdma.srbm_soft_reset)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+		sdma_v3_0_gfx_resume(adev);
+		sdma_v3_0_rlc_resume(adev);
+	}
+
+	return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (!adev->sdma.srbm_soft_reset)
+		return 0;
+
+	srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1559,6 +1556,9 @@
 	.resume = sdma_v3_0_resume,
 	.is_idle = sdma_v3_0_is_idle,
 	.wait_for_idle = sdma_v3_0_wait_for_idle,
+	.check_soft_reset = sdma_v3_0_check_soft_reset,
+	.pre_soft_reset = sdma_v3_0_pre_soft_reset,
+	.post_soft_reset = sdma_v3_0_post_soft_reset,
 	.soft_reset = sdma_v3_0_soft_reset,
 	.set_clockgating_state = sdma_v3_0_set_clockgating_state,
 	.set_powergating_state = sdma_v3_0_set_powergating_state,
@@ -1579,6 +1579,8 @@
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.insert_nop = sdma_v3_0_ring_insert_nop,
 	.pad_ib = sdma_v3_0_ring_pad_ib,
+	.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644
index 0000000..dc9511c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x09df, 0x00000003, 0x000007ff,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x2a00126a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x00000200, 0x000002fb,
+	0x2b04, 0xffffffff, 0x0000543b,
+	0x2b03, 0xffffffff, 0xa9210876,
+	0x2234, 0xffffffff, 0x000fff40,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0504, 0x20000000, 0x20fffed8,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+	0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601005,
+	0x311f, 0xffffffff, 0x10104040,
+	0x3122, 0xffffffff, 0x0100000a,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000f4,
+	0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x2a00126a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f7,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x32761054,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601004,
+	0x311f, 0xffffffff, 0x10102020,
+	0x3122, 0xffffffff, 0x01000020,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+	0xd4f, 0xffffffff, 0x40000,
+	0xd4e, 0xffffffff, 0x200010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x7007,
+	0xd4e, 0xffffffff, 0x300010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x400000,
+	0xd4e, 0xffffffff, 0x100010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x120200,
+	0xd4e, 0xffffffff, 0x500010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x1e1e16,
+	0xd4e, 0xffffffff, 0x600010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x171f1e,
+	0xd4e, 0xffffffff, 0x700010ff,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4f, 0xffffffff, 0x0,
+	0xd4e, 0xffffffff, 0x9ff,
+	0xd40, 0xffffffff, 0x0,
+	0xd41, 0xffffffff, 0x10000800,
+	0xd41, 0xffffffff, 0xf,
+	0xd41, 0xffffffff, 0xf,
+	0xd40, 0xffffffff, 0x4,
+	0xd41, 0xffffffff, 0x1000051e,
+	0xd41, 0xffffffff, 0xffff,
+	0xd41, 0xffffffff, 0xffff,
+	0xd40, 0xffffffff, 0x8,
+	0xd41, 0xffffffff, 0x80500,
+	0xd40, 0xffffffff, 0x12,
+	0xd41, 0xffffffff, 0x9050c,
+	0xd40, 0xffffffff, 0x1d,
+	0xd41, 0xffffffff, 0xb052c,
+	0xd40, 0xffffffff, 0x2a,
+	0xd41, 0xffffffff, 0x1053e,
+	0xd40, 0xffffffff, 0x2d,
+	0xd41, 0xffffffff, 0x10546,
+	0xd40, 0xffffffff, 0x30,
+	0xd41, 0xffffffff, 0xa054e,
+	0xd40, 0xffffffff, 0x3c,
+	0xd41, 0xffffffff, 0x1055f,
+	0xd40, 0xffffffff, 0x3f,
+	0xd41, 0xffffffff, 0x10567,
+	0xd40, 0xffffffff, 0x42,
+	0xd41, 0xffffffff, 0x1056f,
+	0xd40, 0xffffffff, 0x45,
+	0xd41, 0xffffffff, 0x10572,
+	0xd40, 0xffffffff, 0x48,
+	0xd41, 0xffffffff, 0x20575,
+	0xd40, 0xffffffff, 0x4c,
+	0xd41, 0xffffffff, 0x190801,
+	0xd40, 0xffffffff, 0x67,
+	0xd41, 0xffffffff, 0x1082a,
+	0xd40, 0xffffffff, 0x6a,
+	0xd41, 0xffffffff, 0x1b082d,
+	0xd40, 0xffffffff, 0x87,
+	0xd41, 0xffffffff, 0x310851,
+	0xd40, 0xffffffff, 0xba,
+	0xd41, 0xffffffff, 0x891,
+	0xd40, 0xffffffff, 0xbc,
+	0xd41, 0xffffffff, 0x893,
+	0xd40, 0xffffffff, 0xbe,
+	0xd41, 0xffffffff, 0x20895,
+	0xd40, 0xffffffff, 0xc2,
+	0xd41, 0xffffffff, 0x20899,
+	0xd40, 0xffffffff, 0xc6,
+	0xd41, 0xffffffff, 0x2089d,
+	0xd40, 0xffffffff, 0xca,
+	0xd41, 0xffffffff, 0x8a1,
+	0xd40, 0xffffffff, 0xcc,
+	0xd41, 0xffffffff, 0x8a3,
+	0xd40, 0xffffffff, 0xce,
+	0xd41, 0xffffffff, 0x308a5,
+	0xd40, 0xffffffff, 0xd3,
+	0xd41, 0xffffffff, 0x6d08cd,
+	0xd40, 0xffffffff, 0x142,
+	0xd41, 0xffffffff, 0x2000095a,
+	0xd41, 0xffffffff, 0x1,
+	0xd40, 0xffffffff, 0x144,
+	0xd41, 0xffffffff, 0x301f095b,
+	0xd40, 0xffffffff, 0x165,
+	0xd41, 0xffffffff, 0xc094d,
+	0xd40, 0xffffffff, 0x173,
+	0xd41, 0xffffffff, 0xf096d,
+	0xd40, 0xffffffff, 0x184,
+	0xd41, 0xffffffff, 0x15097f,
+	0xd40, 0xffffffff, 0x19b,
+	0xd41, 0xffffffff, 0xc0998,
+	0xd40, 0xffffffff, 0x1a9,
+	0xd41, 0xffffffff, 0x409a7,
+	0xd40, 0xffffffff, 0x1af,
+	0xd41, 0xffffffff, 0xcdc,
+	0xd40, 0xffffffff, 0x1b1,
+	0xd41, 0xffffffff, 0x800,
+	0xd42, 0xffffffff, 0x6c9b2000,
+	0xd44, 0xfc00, 0x2000,
+	0xd51, 0xffffffff, 0xfc0,
+	0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x033f1005,
+	0x311f, 0xffffffff, 0x10808020,
+	0x3122, 0xffffffff, 0x00800008,
+	0x30c5, 0xffffffff, 0x00001000,
+	0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0x00200000, 0x50100000,
+
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x0903, 0x000007ff, 0x00000000,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xf000001f, 0x00000007,
+	0x2285, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0xa0d4, 0x3f3f3fff, 0x0000124a,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a3, 0x01ff1f3f, 0x00000000,
+	0x23a2, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x23a1, 0x01ff1f3f, 0x00000000,
+
+	0x23a1, 0x01ff1f3f, 0x00000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b01, 0x000003ff, 0x00000003,
+	0x2b05, 0x000003ff, 0x00000003,
+	0x2b05, 0x000003ff, 0x00000003,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2b03, 0xffffffff, 0x00001032,
+	0x2235, 0x0000001f, 0x00000010,
+	0x2235, 0x0000001f, 0x00000010,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x031e, 0x00000080, 0x00000000,
+	0x340c, 0x000300c0, 0x00800040,
+	0x360c, 0x000300c0, 0x00800040,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f9, 0x00200000, 0x50100000,
+	0x1c0c, 0x31000311, 0x00000011,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x00000082,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x07ffffff, 0x03000000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f3,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00003210,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+	0x3109, 0xffffffff, 0x00601005,
+	0x311f, 0xffffffff, 0x10104040,
+	0x3122, 0xffffffff, 0x0100000a,
+	0x30c5, 0xffffffff, 0x00000800,
+	0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+	0x2684, 0x00010000, 0x00018208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x4595, 0xff000fff, 0x00000100,
+	0x340c, 0x000300c0, 0x00800040,
+	0x3630, 0xff000fff, 0x00000100,
+	0x360c, 0x000300c0, 0x00800040,
+	0x0ab9, 0x00073ffe, 0x000022a2,
+	0x0903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000001f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0x22c4, 0x0000ff0f, 0x00000000,
+	0xa393, 0x07ffffff, 0x4e000000,
+	0xa0d4, 0x3f3f3fff, 0x00000000,
+	0x000c, 0x000000ff, 0x0040,
+	0x000d, 0x00000040, 0x00004040,
+	0x2440, 0x03e00000, 0x03600000,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f1,
+	0x2b04, 0xffffffff, 0x00000000,
+	0x2b03, 0xffffffff, 0x00003210,
+	0x2235, 0x0000001f, 0x00000010,
+	0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+	0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2476, 0xffffffff, 0x00070006,
+	0x2477, 0xffffffff, 0x00090008,
+	0x2478, 0xffffffff, 0x0000000c,
+	0x2479, 0xffffffff, 0x000b000a,
+	0x247a, 0xffffffff, 0x000e000d,
+	0x247b, 0xffffffff, 0x00080007,
+	0x247c, 0xffffffff, 0x000a0009,
+	0x247d, 0xffffffff, 0x0000000d,
+	0x247e, 0xffffffff, 0x000c000b,
+	0x247f, 0xffffffff, 0x000f000e,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2499, 0xffffffff, 0x000e000d,
+	0x249a, 0xffffffff, 0x0010000f,
+	0x249b, 0xffffffff, 0x00000013,
+	0x249c, 0xffffffff, 0x00120011,
+	0x249d, 0xffffffff, 0x00150014,
+	0x249e, 0xffffffff, 0x000f000e,
+	0x249f, 0xffffffff, 0x00110010,
+	0x24a0, 0xffffffff, 0x00000014,
+	0x24a1, 0xffffffff, 0x00130012,
+	0x24a2, 0xffffffff, 0x00160015,
+	0x24a3, 0xffffffff, 0x0010000f,
+	0x24a4, 0xffffffff, 0x00120011,
+	0x24a5, 0xffffffff, 0x00000015,
+	0x24a6, 0xffffffff, 0x00140013,
+	0x24a7, 0xffffffff, 0x00170016,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2480, 0xffffffff, 0x00090008,
+	0x2481, 0xffffffff, 0x000b000a,
+	0x2482, 0xffffffff, 0x000c000f,
+	0x2483, 0xffffffff, 0x000e000d,
+	0x2484, 0xffffffff, 0x00110010,
+	0x2485, 0xffffffff, 0x000a0009,
+	0x2486, 0xffffffff, 0x000c000b,
+	0x2487, 0xffffffff, 0x0000000f,
+	0x2488, 0xffffffff, 0x000e000d,
+	0x2489, 0xffffffff, 0x00110010,
+	0x248a, 0xffffffff, 0x000b000a,
+	0x248b, 0xffffffff, 0x000d000c,
+	0x248c, 0xffffffff, 0x00000010,
+	0x248d, 0xffffffff, 0x000f000e,
+	0x248e, 0xffffffff, 0x00120011,
+	0x248f, 0xffffffff, 0x000c000b,
+	0x2490, 0xffffffff, 0x000e000d,
+	0x2491, 0xffffffff, 0x00000011,
+	0x2492, 0xffffffff, 0x0010000f,
+	0x2493, 0xffffffff, 0x00130012,
+	0x2494, 0xffffffff, 0x000d000c,
+	0x2495, 0xffffffff, 0x000f000e,
+	0x2496, 0xffffffff, 0x00100013,
+	0x2497, 0xffffffff, 0x00120011,
+	0x2498, 0xffffffff, 0x00150014,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x40b, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0x1579, 0xff000fff, 0x00000100,
+	0x157a, 0x00000001, 0x00000001,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+	0x3100, 0xffffffff, 0xfffffffc,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2698, 0xffffffff, 0x00000100,
+	0x24a9, 0xffffffff, 0x00000100,
+	0x3059, 0xffffffff, 0x00000100,
+	0x25dd, 0xffffffff, 0x00000100,
+	0x2261, 0xffffffff, 0x06000100,
+	0x2286, 0xffffffff, 0x00000100,
+	0x24a8, 0xffffffff, 0x00000100,
+	0x30e0, 0xffffffff, 0x00000100,
+	0x22ca, 0xffffffff, 0x00000100,
+	0x2451, 0xffffffff, 0x00000100,
+	0x2362, 0xffffffff, 0x00000100,
+	0x2363, 0xffffffff, 0x00000100,
+	0x240c, 0xffffffff, 0x00000100,
+	0x240d, 0xffffffff, 0x00000100,
+	0x240e, 0xffffffff, 0x00000100,
+	0x240f, 0xffffffff, 0x00000100,
+	0x2b60, 0xffffffff, 0x00000100,
+	0x2b15, 0xffffffff, 0x00000100,
+	0x225f, 0xffffffff, 0x06000100,
+	0x261a, 0xffffffff, 0x00000100,
+	0x2544, 0xffffffff, 0x00000100,
+	0x2bc1, 0xffffffff, 0x00000100,
+	0x2b81, 0xffffffff, 0x00000100,
+	0x2527, 0xffffffff, 0x00000100,
+	0x200b, 0xffffffff, 0xe0000000,
+	0x2458, 0xffffffff, 0x00010000,
+	0x2459, 0xffffffff, 0x00030002,
+	0x245a, 0xffffffff, 0x00040007,
+	0x245b, 0xffffffff, 0x00060005,
+	0x245c, 0xffffffff, 0x00090008,
+	0x245d, 0xffffffff, 0x00020001,
+	0x245e, 0xffffffff, 0x00040003,
+	0x245f, 0xffffffff, 0x00000007,
+	0x2460, 0xffffffff, 0x00060005,
+	0x2461, 0xffffffff, 0x00090008,
+	0x2462, 0xffffffff, 0x00030002,
+	0x2463, 0xffffffff, 0x00050004,
+	0x2464, 0xffffffff, 0x00000008,
+	0x2465, 0xffffffff, 0x00070006,
+	0x2466, 0xffffffff, 0x000a0009,
+	0x2467, 0xffffffff, 0x00040003,
+	0x2468, 0xffffffff, 0x00060005,
+	0x2469, 0xffffffff, 0x00000009,
+	0x246a, 0xffffffff, 0x00080007,
+	0x246b, 0xffffffff, 0x000b000a,
+	0x246c, 0xffffffff, 0x00050004,
+	0x246d, 0xffffffff, 0x00070006,
+	0x246e, 0xffffffff, 0x0008000b,
+	0x246f, 0xffffffff, 0x000a0009,
+	0x2470, 0xffffffff, 0x000d000c,
+	0x2471, 0xffffffff, 0x00060005,
+	0x2472, 0xffffffff, 0x00080007,
+	0x2473, 0xffffffff, 0x0000000b,
+	0x2474, 0xffffffff, 0x000a0009,
+	0x2475, 0xffffffff, 0x000d000c,
+	0x2454, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x311e, 0xffffffff, 0x00000080,
+	0x3101, 0xffffffff, 0x0020003f,
+	0xc, 0xffffffff, 0x0000001c,
+	0xd, 0x000f0000, 0x000f0000,
+	0x583, 0xffffffff, 0x00000100,
+	0x409, 0xffffffff, 0x00000100,
+	0x82a, 0xffffffff, 0x00000104,
+	0x993, 0x000c0000, 0x000c0000,
+	0x992, 0x000c0000, 0x000c0000,
+	0xbd4, 0x00000001, 0x00000001,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3430, 0xfffffff0, 0x00000100,
+	0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(AMDGPU_PCIE_INDEX, reg);
+	(void)RREG32(AMDGPU_PCIE_INDEX);
+	r = RREG32(AMDGPU_PCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(AMDGPU_PCIE_INDEX, reg);
+	(void)RREG32(AMDGPU_PCIE_INDEX);
+	WREG32(AMDGPU_PCIE_DATA, v);
+	(void)RREG32(AMDGPU_PCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, (reg));
+	r = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, (reg));
+	WREG32(SMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+	{GRBM_STATUS, false},
+	{GB_ADDR_CONFIG, false},
+	{MC_ARB_RAMCFG, false},
+	{GB_TILE_MODE0, false},
+	{GB_TILE_MODE1, false},
+	{GB_TILE_MODE2, false},
+	{GB_TILE_MODE3, false},
+	{GB_TILE_MODE4, false},
+	{GB_TILE_MODE5, false},
+	{GB_TILE_MODE6, false},
+	{GB_TILE_MODE7, false},
+	{GB_TILE_MODE8, false},
+	{GB_TILE_MODE9, false},
+	{GB_TILE_MODE10, false},
+	{GB_TILE_MODE11, false},
+	{GB_TILE_MODE12, false},
+	{GB_TILE_MODE13, false},
+	{GB_TILE_MODE14, false},
+	{GB_TILE_MODE15, false},
+	{GB_TILE_MODE16, false},
+	{GB_TILE_MODE17, false},
+	{GB_TILE_MODE18, false},
+	{GB_TILE_MODE19, false},
+	{GB_TILE_MODE20, false},
+	{GB_TILE_MODE21, false},
+	{GB_TILE_MODE22, false},
+	{GB_TILE_MODE23, false},
+	{GB_TILE_MODE24, false},
+	{GB_TILE_MODE25, false},
+	{GB_TILE_MODE26, false},
+	{GB_TILE_MODE27, false},
+	{GB_TILE_MODE28, false},
+	{GB_TILE_MODE29, false},
+	{GB_TILE_MODE30, false},
+	{GB_TILE_MODE31, false},
+	{CC_RB_BACKEND_DISABLE, false, true},
+	{GC_USER_RB_BACKEND_DISABLE, false, true},
+	{PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+					  u32 se_num, u32 sh_num,
+					  u32 reg_offset)
+{
+	uint32_t val;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+	val = RREG32(reg_offset);
+
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+	return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+			     u32 sh_num, u32 reg_offset, u32 *value)
+{
+	uint32_t i;
+
+	*value = 0;
+	for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+		if (reg_offset != si_allowed_read_registers[i].reg_offset)
+			continue;
+
+		if (!si_allowed_read_registers[i].untouched)
+			*value = si_allowed_read_registers[i].grbm_indexed ?
+				 si_read_indexed_register(adev, se_num,
+							   sh_num, reg_offset) :
+				 RREG32(reg_offset);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control = 0;
+	u32 d2vga_control = 0;
+	u32 vga_render_control = 0;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	if (adev->mode_info.num_crtc) {
+		d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+		d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+		vga_render_control = RREG32(VGA_RENDER_CONTROL);
+	}
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	if (adev->mode_info.num_crtc) {
+		/* Disable VGA mode */
+		WREG32(AVIVO_D1VGA_CONTROL,
+		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_D2VGA_CONTROL,
+		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(VGA_RENDER_CONTROL,
+		       (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = amdgpu_read_bios(adev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	if (adev->mode_info.num_crtc) {
+		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+		WREG32(VGA_RENDER_CONTROL, vga_render_control);
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~(1<<0);
+		temp |= (1<<1);
+	} else {
+		temp &= ~(1<<1);
+	}
+	WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+        u32 reference_clock = adev->clock.spll.reference_freq;
+	u32 tmp;
+
+	tmp = RREG32(CG_CLKPIN_CNTL_2);
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	tmp = RREG32(CG_CLKPIN_CNTL);
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+	return 0;
+}
+
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+	if (is_virtual_machine()) /* passthrough mode */
+		adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+	.read_disabled_bios = &si_read_disabled_bios,
+	.detect_hw_virtualization = si_detect_hw_virtualization,
+	.read_register = &si_read_register,
+	.reset = &si_asic_reset,
+	.set_vga_state = &si_vga_set_state,
+	.get_xclk = &si_get_xclk,
+	.set_uvd_clocks = &si_set_uvd_clocks,
+	.set_vce_clocks = NULL,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+	return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->smc_rreg = &si_smc_rreg;
+	adev->smc_wreg = &si_smc_wreg;
+	adev->pcie_rreg = &si_pcie_rreg;
+	adev->pcie_wreg = &si_pcie_wreg;
+	adev->pciep_rreg = &si_pciep_rreg;
+	adev->pciep_wreg = &si_pciep_wreg;
+	adev->uvd_ctx_rreg = NULL;
+	adev->uvd_ctx_wreg = NULL;
+	adev->didt_rreg = NULL;
+	adev->didt_wreg = NULL;
+
+	adev->asic_funcs = &si_asic_funcs;
+
+	adev->rev_id = si_get_rev_id(adev);
+	adev->external_rev_id = 0xFF;
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+			adev->pg_flags = 0;
+		break;
+	case CHIP_PITCAIRN:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+
+	case CHIP_VERDE:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		//???
+		adev->external_rev_id = adev->rev_id + 0x14;
+		break;
+	case CHIP_OLAND:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+	case CHIP_HAINAN:
+		adev->cg_flags =
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+	return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+	return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 tahiti_golden_registers2,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+		break;
+	case CHIP_PITCAIRN:
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_golden_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 pitcairn_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+	case CHIP_VERDE:
+		amdgpu_program_register_sequence(adev,
+						 verde_golden_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 verde_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 verde_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 verde_pg_init,
+						 (const u32)ARRAY_SIZE(verde_pg_init));
+		break;
+	case CHIP_OLAND:
+		amdgpu_program_register_sequence(adev,
+						 oland_golden_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 oland_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+		amdgpu_program_register_sequence(adev,
+						 oland_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+	case CHIP_HAINAN:
+		amdgpu_program_register_sequence(adev,
+						 hainan_golden_registers,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 hainan_golden_registers2,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
+		amdgpu_program_register_sequence(adev,
+						 hainan_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+		break;
+
+
+	default:
+		BUG();
+	}
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+	struct pci_dev *root = adev->pdev->bus->self;
+	int bridge_pos, gpu_pos;
+	u32 speed_cntl, mask, current_data_rate;
+	int ret, i;
+	u16 tmp16;
+
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
+	if (amdgpu_pcie_gen2 == 0)
+		return;
+
+	if (adev->flags & AMD_IS_APU)
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+		LC_CURRENT_DATA_RATE_SHIFT;
+	if (mask & DRM_PCIE_SPEED_80) {
+		if (current_data_rate == 2) {
+			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	} else if (mask & DRM_PCIE_SPEED_50) {
+		if (current_data_rate == 1) {
+			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	}
+
+	bridge_pos = pci_pcie_cap(root);
+	if (!bridge_pos)
+		return;
+
+	gpu_pos = pci_pcie_cap(adev->pdev);
+	if (!gpu_pos)
+		return;
+
+	if (mask & DRM_PCIE_SPEED_80) {
+		if (current_data_rate != 2) {
+			u16 bridge_cfg, gpu_cfg;
+			u16 bridge_cfg2, gpu_cfg2;
+			u32 max_lw, current_lw, tmp;
+
+			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+			pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+			if (current_lw < max_lw) {
+				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+				if (tmp & LC_RENEGOTIATION_SUPPORT) {
+					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+				}
+			}
+
+			for (i = 0; i < 10; i++) {
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+					break;
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp |= LC_REDO_EQ;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+				mdelay(100);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+				tmp &= ~LC_SET_QUIESCE;
+				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+			}
+		}
+	}
+
+	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+	tmp16 &= ~0xf;
+	if (mask & DRM_PCIE_SPEED_80)
+		tmp16 |= 3;
+	else if (mask & DRM_PCIE_SPEED_50)
+		tmp16 |= 2;
+	else
+		tmp16 |= 1;
+	pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+	u32 data, orig;
+	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+	bool disable_clkreq = false;
+
+	if (amdgpu_aspm == 0)
+		return;
+
+	if (adev->flags & AMD_IS_APU)
+		return;
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+	data &= ~LC_XMIT_N_FTS_MASK;
+	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+	data |= LC_GO_TO_RECOVERY;
+	if (orig != data)
+		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+	orig = data = RREG32_PCIE(PCIE_P_CNTL);
+	data |= P_IGNORE_EDB_ERR;
+	if (orig != data)
+		WREG32_PCIE(PCIE_P_CNTL, data);
+
+	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+	data |= LC_PMI_TO_L1_DIS;
+	if (!disable_l0s)
+		data |= LC_L0S_INACTIVITY(7);
+
+	if (!disable_l1) {
+		data |= LC_L1_INACTIVITY(7);
+		data &= ~LC_PMI_TO_L1_DIS;
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+		if (!disable_plloff_in_l1) {
+			bool clk_req_support;
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+			if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+				data &= ~PLL_RAMP_UP_TIME_0_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+				data &= ~PLL_RAMP_UP_TIME_1_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+				data &= ~PLL_RAMP_UP_TIME_2_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+				data &= ~PLL_RAMP_UP_TIME_3_MASK;
+				if (orig != data)
+					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+			}
+			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+			data |= LC_DYN_LANES_PWR_STATE(3);
+			if (orig != data)
+				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+			data &= ~LS2_EXIT_TIME_MASK;
+			if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+				data |= LS2_EXIT_TIME(5);
+			if (orig != data)
+				si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+			if (!disable_clkreq &&
+			    !pci_is_root_bus(adev->pdev->bus)) {
+				struct pci_dev *root = adev->pdev->bus->self;
+				u32 lnkcap;
+
+				clk_req_support = false;
+				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+					clk_req_support = true;
+			} else {
+				clk_req_support = false;
+			}
+
+			if (clk_req_support) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+				orig = data = RREG32(THM_CLK_CNTL);
+				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+				if (orig != data)
+					WREG32(THM_CLK_CNTL, data);
+
+				orig = data = RREG32(MISC_CLK_CNTL);
+				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+				if (orig != data)
+					WREG32(MISC_CLK_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL);
+				data &= ~BCLK_AS_XCLK;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL, data);
+
+				orig = data = RREG32(CG_CLKPIN_CNTL_2);
+				data &= ~FORCE_BIF_REFCLK_EN;
+				if (orig != data)
+					WREG32(CG_CLKPIN_CNTL_2, data);
+
+				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+				data &= ~MPLL_CLKOUT_SEL_MASK;
+				data |= MPLL_CLKOUT_SEL(4);
+				if (orig != data)
+					WREG32(MPLL_BYPASSCLK_SEL, data);
+
+				orig = data = RREG32(SPLL_CNTL_MODE);
+				data &= ~SPLL_REFCLK_SEL_MASK;
+				if (orig != data)
+					WREG32(SPLL_CNTL_MODE, data);
+			}
+		}
+	} else {
+		if (orig != data)
+			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+	}
+
+	orig = data = RREG32_PCIE(PCIE_CNTL2);
+	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+	if (orig != data)
+		WREG32_PCIE(PCIE_CNTL2, data);
+
+	if (!disable_l0s) {
+		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+			data = RREG32_PCIE(PCIE_LC_STATUS1);
+			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+				data &= ~LC_L0S_INACTIVITY_MASK;
+				if (orig != data)
+					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+			}
+		}
+	}
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+	int readrq;
+	u16 v;
+
+	readrq = pcie_get_readrq(adev->pdev);
+	v = ffs(readrq) - 8;
+	if ((v == 0) || (v == 6) || (v == 7))
+		pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_fix_pci_max_read_req_size(adev);
+	si_init_golden_registers(adev);
+	si_pcie_gen3_enable(adev);
+	si_program_aspm(adev);
+
+	return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+	return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+	return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+					    enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+					    enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+	.name = "si_common",
+	.early_init = si_common_early_init,
+	.late_init = NULL,
+	.sw_init = si_common_sw_init,
+	.sw_fini = si_common_sw_fini,
+	.hw_init = si_common_hw_init,
+	.hw_fini = si_common_hw_fini,
+	.suspend = si_common_suspend,
+	.resume = si_common_resume,
+	.is_idle = si_common_is_idle,
+	.wait_for_idle = si_common_wait_for_idle,
+	.soft_reset = si_common_soft_reset,
+	.set_clockgating_state = si_common_set_clockgating_state,
+	.set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_dma_ip_funcs,
+	},
+/*	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &si_null_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_null_ip_funcs,
+	},
+	*/
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &si_dma_ip_funcs,
+	},
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_VERDE:
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_OLAND:
+		adev->ip_blocks = verde_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+		break;
+	case CHIP_HAINAN:
+		adev->ip_blocks = hainan_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+		break;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/si.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/si.h
index 5983e31..959d7b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,13 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __SI_H__
+#define __SI_H__
 
-#include "ppsmc.h"
+extern const struct amd_ip_funcs si_common_ip_funcs;
 
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+void si_srbm_select(struct amdgpu_device *adev,
+		     u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644
index 0000000..de35819
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+	DMA0_REGISTER_OFFSET,
+	DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+	WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+				struct amdgpu_ib *ib,
+				unsigned vm_id, bool ctx_switch)
+{
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+	amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+	amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				      unsigned flags)
+{
+
+	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+	/* write the fence */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	amdgpu_ring_write(ring, seq);
+	/* optionally write high bits as well */
+	if (write64bit) {
+		addr += 4;
+		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+		amdgpu_ring_write(ring, addr & 0xfffffffc);
+		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+		amdgpu_ring_write(ring, upper_32_bits(seq));
+	}
+	/* generate an interrupt */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 rb_cntl;
+	unsigned i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+		/* dma0 */
+		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+		rb_cntl &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+		ring->ready = false;
+	}
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+	int i, r;
+	uint64_t rptr_addr;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+		rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = amdgpu_ring_test_ring(ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+	}
+
+	return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp;
+	u64 gpu_addr;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+
+	r = amdgpu_ring_alloc(ring, 4);
+	if (r) {
+		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		amdgpu_wb_free(adev, index);
+		return r;
+	}
+
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = le32_to_cpu(adev->wb.wb[index]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	amdgpu_wb_free(adev, index);
+
+	return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_ib ib;
+	struct fence *f = NULL;
+	unsigned index;
+	u32 tmp = 0;
+	u64 gpu_addr;
+	long r;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+	memset(&ib, 0, sizeof(ib));
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+		goto err0;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+	ib.ptr[1] = lower_32_bits(gpu_addr);
+	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait_timeout(f, false, timeout);
+	if (r == 0) {
+		DRM_ERROR("amdgpu: IB test timed out\n");
+		r = -ETIMEDOUT;
+		goto err1;
+	} else if (r < 0) {
+		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		goto err1;
+	}
+	tmp = le32_to_cpu(adev->wb.wb[index]);
+	if (tmp == 0xDEADBEEF) {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
+	} else {
+		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+
+err1:
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
+err0:
+	amdgpu_wb_free(adev, index);
+	return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+			       uint64_t pe, uint64_t src,
+			       unsigned count)
+{
+	unsigned bytes = count * 8;
+
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+					      1, 0, 0, bytes);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = lower_32_bits(src);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+				uint64_t value, unsigned count,
+				uint32_t incr)
+{
+	unsigned ndw = count * 2;
+
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+	for (; ndw > 0; ndw -= 2) {
+		ib->ptr[ib->length_dw++] = lower_32_bits(value);
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		value += incr;
+	}
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+				     uint64_t pe,
+				     uint64_t addr, unsigned count,
+				     uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		if (flags & AMDGPU_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		pe += ndw * 4;
+		addr += (ndw / 2) * incr;
+		count -= ndw / 2;
+	}
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+	while (ib->length_dw & 0x7)
+		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	/* wait for idle */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+			  (1 << 27)); /* Poll memory */
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+	amdgpu_ring_write(ring, seq); /* value */
+	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+				      unsigned vm_id, uint64_t pd_addr)
+{
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm_id < 8)
+		amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+	else
+		amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+	amdgpu_ring_write(ring, 1 << vm_id);
+
+	/* wait for invalidate to complete */
+	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 0xff << 16); /* retry */
+	amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+	amdgpu_ring_write(ring, 0); /* value */
+	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		3 + /* si_dma_ring_emit_hdp_flush */
+		3 + /* si_dma_ring_emit_hdp_invalidate */
+		6 + /* si_dma_ring_emit_pipeline_sync */
+		12 + /* si_dma_ring_emit_vm_flush */
+		9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->sdma.num_instances = 2;
+
+	si_dma_set_ring_funcs(adev);
+	si_dma_set_buffer_funcs(adev);
+	si_dma_set_vm_pte_funcs(adev);
+	si_dma_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	int r, i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* DMA0 trap event */
+	r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+	if (r)
+		return r;
+
+	/* DMA1 trap event */
+	r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+	if (r)
+		return r;
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
+		ring->ring_obj = NULL;
+		ring->use_doorbell = false;
+		sprintf(ring->name, "sdma%d", i);
+		r = amdgpu_ring_init(adev, ring, 1024,
+				     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+				     &adev->sdma.trap_irq,
+				     (i == 0) ?
+				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+				     AMDGPU_RING_TYPE_SDMA);
+		if (r)
+			return r;
+	}
+
+	return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++)
+		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+	return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_dma_stop(adev);
+
+	return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS2);
+
+	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+	    return false;
+
+	return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (si_dma_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+	return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+					struct amdgpu_irq_src *src,
+					unsigned type,
+					enum amdgpu_interrupt_state state)
+{
+	u32 sdma_cntl;
+
+	switch (type) {
+	case AMDGPU_SDMA_IRQ_TRAP0:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+			sdma_cntl &= ~TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+			sdma_cntl |= TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	case AMDGPU_SDMA_IRQ_TRAP1:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+			sdma_cntl &= ~TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+			sdma_cntl |= TRAP_ENABLE;
+			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+	return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      struct amdgpu_iv_entry *entry)
+{
+	amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+	return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+					      struct amdgpu_irq_src *source,
+					      struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal instruction in SDMA command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	u32 orig, data, offset;
+	int i;
+	bool enable;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data &= ~MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+		}
+	} else {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			if (i == 0)
+				offset = DMA0_REGISTER_OFFSET;
+			else
+				offset = DMA1_REGISTER_OFFSET;
+			orig = data = RREG32(DMA_POWER_CNTL + offset);
+			data |= MEM_POWER_OVERRIDE;
+			if (data != orig)
+				WREG32(DMA_POWER_CNTL + offset, data);
+
+			orig = data = RREG32(DMA_CLK_CTRL + offset);
+			data = 0xff000000;
+			if (data != orig)
+				WREG32(DMA_CLK_CTRL + offset, data);
+		}
+	}
+
+	return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	u32 tmp;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	WREG32(DMA_PGFSM_WRITE,  0x00002000);
+	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+	for (tmp = 0; tmp < 5; tmp++)
+		WREG32(DMA_PGFSM_WRITE, 0);
+
+	return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+	.name = "si_dma",
+	.early_init = si_dma_early_init,
+	.late_init = NULL,
+	.sw_init = si_dma_sw_init,
+	.sw_fini = si_dma_sw_fini,
+	.hw_init = si_dma_hw_init,
+	.hw_fini = si_dma_hw_fini,
+	.suspend = si_dma_suspend,
+	.resume = si_dma_resume,
+	.is_idle = si_dma_is_idle,
+	.wait_for_idle = si_dma_wait_for_idle,
+	.soft_reset = si_dma_soft_reset,
+	.set_clockgating_state = si_dma_set_clockgating_state,
+	.set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+	.get_rptr = si_dma_ring_get_rptr,
+	.get_wptr = si_dma_ring_get_wptr,
+	.set_wptr = si_dma_ring_set_wptr,
+	.parse_cs = NULL,
+	.emit_ib = si_dma_ring_emit_ib,
+	.emit_fence = si_dma_ring_emit_fence,
+	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+	.emit_vm_flush = si_dma_ring_emit_vm_flush,
+	.emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+	.test_ring = si_dma_ring_test_ring,
+	.test_ib = si_dma_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = si_dma_ring_pad_ib,
+	.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+	.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->sdma.num_instances; i++)
+		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+	.set = si_dma_set_trap_irq_state,
+	.process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+	.set = si_dma_set_trap_irq_state,
+	.process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+	.process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+	adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+	adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+				       uint64_t src_offset,
+				       uint64_t dst_offset,
+				       uint32_t byte_count)
+{
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+					      1, 0, 0, byte_count);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+				       uint32_t src_data,
+				       uint64_t dst_offset,
+				       uint32_t byte_count)
+{
+	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+					      0, 0, 0, byte_count / 4);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+	.copy_max_bytes = 0xffff8,
+	.copy_num_dw = 5,
+	.emit_copy_buffer = si_dma_emit_copy_buffer,
+
+	.fill_max_bytes = 0xffff8,
+	.fill_num_dw = 4,
+	.emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mman.buffer_funcs == NULL) {
+		adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+	}
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+	.copy_pte = si_dma_vm_copy_pte,
+	.write_pte = si_dma_vm_write_pte,
+	.set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	if (adev->vm_manager.vm_pte_funcs == NULL) {
+		adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			adev->vm_manager.vm_pte_rings[i] =
+				&adev->sdma.instance[i].ring;
+
+		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+	}
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/si_dma.h
index 5983e31..3a3e0c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,9 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs si_dma_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644
index 0000000..3de7bca
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -0,0 +1,8012 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END                 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ     1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4                                    0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+	struct _ATOM_PPLIB_FANTABLE fan;
+	struct _ATOM_PPLIB_FANTABLE2 fan2;
+	struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_UTC_DFLT_00,
+	R600_UTC_DFLT_01,
+	R600_UTC_DFLT_02,
+	R600_UTC_DFLT_03,
+	R600_UTC_DFLT_04,
+	R600_UTC_DFLT_05,
+	R600_UTC_DFLT_06,
+	R600_UTC_DFLT_07,
+	R600_UTC_DFLT_08,
+	R600_UTC_DFLT_09,
+	R600_UTC_DFLT_10,
+	R600_UTC_DFLT_11,
+	R600_UTC_DFLT_12,
+	R600_UTC_DFLT_13,
+	R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+	R600_DTC_DFLT_00,
+	R600_DTC_DFLT_01,
+	R600_DTC_DFLT_02,
+	R600_DTC_DFLT_03,
+	R600_DTC_DFLT_04,
+	R600_DTC_DFLT_05,
+	R600_DTC_DFLT_06,
+	R600_DTC_DFLT_07,
+	R600_DTC_DFLT_08,
+	R600_DTC_DFLT_09,
+	R600_DTC_DFLT_10,
+	R600_DTC_DFLT_11,
+	R600_DTC_DFLT_12,
+	R600_DTC_DFLT_13,
+	R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+	{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+	((1 << 16) | 27027),
+	6,
+	0,
+	4,
+	95,
+	{
+		0UL,
+		0UL,
+		4521550UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		40
+	},
+	595000000UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+	{ 1159409, 0, 0, 0, 0 },
+	{ 777, 0, 0, 0, 0 },
+	2,
+	54000,
+	127000,
+	25,
+	2,
+	10,
+	13,
+	{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+	{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+	{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+	85,
+	false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+	{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+	{ 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+	0x5,
+	0xAFC8,
+	0x64,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+	{ 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+	{ 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+	85,
+	true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+	{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+	0x5,
+	0xAFC8,
+	0x69,
+	0x32,
+	1,
+	0,
+	0x10,
+	{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+	85,
+	true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+	((1 << 16) | 27027),
+	5,
+	0,
+	6,
+	100,
+	{
+		51600000UL,
+		1800000UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	45000,
+	100,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+	{  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+	5,
+	55000,
+	0x69,
+	0xA,
+	1,
+	0,
+	0x3,
+	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	7,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+	{ 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0 },
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+	0,
+	false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
+	5,
+	55000,
+	105,
+	0xA,
+	1,
+	0,
+	0x10,
+	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+	{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+	90,
+	true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+	{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+	{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+	{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+	{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+	{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+	{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+	{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+	{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+	{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+	{ 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+	((1 << 16) | 0x6993),
+	5,
+	0,
+	9,
+	105,
+	{
+		0UL,
+		0UL,
+		7194395UL,
+		309631529UL,
+		-1270850L,
+		4513710L,
+		100
+	},
+	117830498UL,
+	12,
+	{
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0,
+		0
+	},
+	true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+				      u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+	struct si_power_info *pi = adev->pm.dpm.priv;
+	return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+						     u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+	s64 tmp;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+	temperature = div64_s64(drm_int2fixp(t), 1000);
+
+	t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+	t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+	av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+	t_ref = drm_int2fixp(coeff->t_ref);
+
+	tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+	kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+	kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+					     const struct ni_leakage_coeffients *coeff,
+					     u16 v,
+					     s32 t,
+					     u32 i_leakage,
+					     u32 *leakage)
+{
+	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+					       const u32 fixed_kt, u16 v,
+					       u32 ileakage, u32 *leakage)
+{
+	s64 kt, kv, leakage_w, i_leakage, vddc;
+
+	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+	vddc = div64_s64(drm_int2fixp(v), 1000);
+
+	kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+	*leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+				       const struct ni_leakage_coeffients *coeff,
+				       const u32 fixed_kt,
+				       u16 v,
+				       u32 i_leakage,
+				       u32 *leakage)
+{
+	si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+				   struct si_dte_data *dte_data)
+{
+	u32 p_limit1 = adev->pm.dpm.tdp_limit;
+	u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+	u32 k = dte_data->k;
+	u32 t_max = dte_data->max_t;
+	u32 t_split[5] = { 10, 15, 20, 25, 30 };
+	u32 t_0 = dte_data->t0;
+	u32 i;
+
+	if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+		dte_data->tdep_count = 3;
+
+		for (i = 0; i < k; i++) {
+			dte_data->r[i] =
+				(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+				(p_limit2  * (u32)100);
+		}
+
+		dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+		for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+			dte_data->tdep_r[i] = dte_data->r[4];
+		}
+	} else {
+		DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+	}
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+	struct ni_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+	struct  si_ps *ps = aps->ps_priv;
+
+	return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	bool update_dte_from_pl2 = false;
+
+	if (adev->asic_type == CHIP_TAHITI) {
+		si_pi->cac_weights = cac_weights_tahiti;
+		si_pi->lcac_config = lcac_tahiti;
+		si_pi->cac_override = cac_override_tahiti;
+		si_pi->powertune_data = &powertune_data_tahiti;
+		si_pi->dte_data = dte_data_tahiti;
+
+		switch (adev->pdev->device) {
+		case 0x6798:
+			si_pi->dte_data.enable_dte_by_default = true;
+			break;
+		case 0x6799:
+			si_pi->dte_data = dte_data_new_zealand;
+			break;
+		case 0x6790:
+		case 0x6791:
+		case 0x6792:
+		case 0x679E:
+			si_pi->dte_data = dte_data_aruba_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679B:
+			si_pi->dte_data = dte_data_malta;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x679A:
+			si_pi->dte_data = dte_data_tahiti_pro;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			if (si_pi->dte_data.enable_dte_by_default == true)
+				DRM_ERROR("DTE is not enabled!\n");
+			break;
+		}
+	} else if (adev->asic_type == CHIP_PITCAIRN) {
+		si_pi->cac_weights = cac_weights_pitcairn;
+		si_pi->lcac_config = lcac_pitcairn;
+		si_pi->cac_override = cac_override_pitcairn;
+		si_pi->powertune_data = &powertune_data_pitcairn;
+
+		switch (adev->pdev->device) {
+		case 0x6810:
+		case 0x6818:
+			si_pi->dte_data = dte_data_curacao_xt;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6819:
+		case 0x6811:
+			si_pi->dte_data = dte_data_curacao_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6800:
+		case 0x6806:
+			si_pi->dte_data = dte_data_neptune_xt;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->dte_data = dte_data_pitcairn;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_VERDE) {
+		si_pi->lcac_config = lcac_cape_verde;
+		si_pi->cac_override = cac_override_cape_verde;
+		si_pi->powertune_data = &powertune_data_cape_verde;
+
+		switch (adev->pdev->device) {
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+		case 0x6835:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682C:
+			si_pi->cac_weights = cac_weights_cape_verde_pro;
+			si_pi->dte_data = dte_data_sun_xt;
+			break;
+		case 0x6825:
+		case 0x6827:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6824:
+		case 0x682D:
+			si_pi->cac_weights = cac_weights_chelsea_xt;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x682F:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		case 0x6820:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xtx;
+			break;
+		case 0x6821:
+			si_pi->cac_weights = cac_weights_heathrow;
+			si_pi->dte_data = dte_data_venus_xt;
+			break;
+		case 0x6823:
+		case 0x682B:
+		case 0x6822:
+		case 0x682A:
+			si_pi->cac_weights = cac_weights_chelsea_pro;
+			si_pi->dte_data = dte_data_venus_pro;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_cape_verde;
+			si_pi->dte_data = dte_data_cape_verde;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_OLAND) {
+		si_pi->lcac_config = lcac_mars_pro;
+		si_pi->cac_override = cac_override_oland;
+		si_pi->powertune_data = &powertune_data_mars_pro;
+		si_pi->dte_data = dte_data_mars_pro;
+
+		switch (adev->pdev->device) {
+		case 0x6601:
+		case 0x6621:
+		case 0x6603:
+		case 0x6605:
+			si_pi->cac_weights = cac_weights_mars_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6600:
+		case 0x6606:
+		case 0x6620:
+		case 0x6604:
+			si_pi->cac_weights = cac_weights_mars_xt;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6611:
+		case 0x6613:
+		case 0x6608:
+			si_pi->cac_weights = cac_weights_oland_pro;
+			update_dte_from_pl2 = true;
+			break;
+		case 0x6610:
+			si_pi->cac_weights = cac_weights_oland_xt;
+			update_dte_from_pl2 = true;
+			break;
+		default:
+			si_pi->cac_weights = cac_weights_oland;
+			si_pi->lcac_config = lcac_oland;
+			si_pi->cac_override = cac_override_oland;
+			si_pi->powertune_data = &powertune_data_oland;
+			si_pi->dte_data = dte_data_oland;
+			break;
+		}
+	} else if (adev->asic_type == CHIP_HAINAN) {
+		si_pi->cac_weights = cac_weights_hainan;
+		si_pi->lcac_config = lcac_oland;
+		si_pi->cac_override = cac_override_oland;
+		si_pi->powertune_data = &powertune_data_hainan;
+		si_pi->dte_data = dte_data_sun_xt;
+		update_dte_from_pl2 = true;
+	} else {
+		DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+		return;
+	}
+
+	ni_pi->enable_power_containment = false;
+	ni_pi->enable_cac = false;
+	ni_pi->enable_sq_ramping = false;
+	si_pi->enable_dte = false;
+
+	if (si_pi->powertune_data->enable_powertune_by_default) {
+		ni_pi->enable_power_containment = true;
+		ni_pi->enable_cac = true;
+		if (si_pi->dte_data.enable_dte_by_default) {
+			si_pi->enable_dte = true;
+			if (update_dte_from_pl2)
+				si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+		}
+		ni_pi->enable_sq_ramping = true;
+	}
+
+	ni_pi->driver_calculate_cac_leakage = true;
+	ni_pi->cac_configuration_required = true;
+
+	if (ni_pi->cac_configuration_required) {
+		ni_pi->support_cac_long_term_average = true;
+		si_pi->dyn_powertune_data.l2_lta_window_size =
+			si_pi->powertune_data->l2_lta_window_size_default;
+		si_pi->dyn_powertune_data.lts_truncate =
+			si_pi->powertune_data->lts_truncate_default;
+	} else {
+		ni_pi->support_cac_long_term_average = false;
+		si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+		si_pi->dyn_powertune_data.lts_truncate = 0;
+	}
+
+	si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+	return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+	u32 xclk;
+	u32 wintime;
+	u32 cac_window;
+	u32 cac_window_size;
+
+	xclk = amdgpu_asic_get_xclk(adev);
+
+	if (xclk == 0)
+		return 0;
+
+	cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+	cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+	wintime = (cac_window_size * 100) / xclk;
+
+	return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+	return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+					    bool adjust_polarity,
+					    u32 tdp_adjustment,
+					    u32 *tdp_limit,
+					    u32 *near_tdp_limit)
+{
+	u32 adjustment_delta, max_tdp_limit;
+
+	if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+		return -EINVAL;
+
+	max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+	if (adjust_polarity) {
+		*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+		*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+	} else {
+		*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+		adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
+		if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+			*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+		else
+			*near_tdp_limit = 0;
+	}
+
+	if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+		return -EINVAL;
+	if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+				      struct amdgpu_ps *amdgpu_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		PP_SIslands_PAPMParameters *papm_parm;
+		struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+		u32 tdp_limit;
+		u32 near_tdp_limit;
+		int ret;
+
+		if (scaling_factor == 0)
+			return -EINVAL;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		ret = si_calculate_adjusted_tdp_limits(adev,
+						       false, /* ??? */
+						       adev->pm.dpm.tdp_adjustment,
+						       &tdp_limit,
+						       &near_tdp_limit);
+		if (ret)
+			return ret;
+
+		smc_table->dpm2Params.TDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+						   offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+						  (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+						  sizeof(u32) * 3,
+						  si_pi->sram_end);
+		if (ret)
+			return ret;
+
+		if (si_pi->enable_ppm) {
+			papm_parm = &si_pi->papm_parm;
+			memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+			papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+			papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+			papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+			papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+			papm_parm->PlatformPowerLimit = 0xffffffff;
+			papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+			ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+							  (u8 *)papm_parm,
+							  sizeof(PP_SIslands_PAPMParameters),
+							  si_pi->sram_end);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+					struct amdgpu_ps *amdgpu_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (ni_pi->enable_power_containment) {
+		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+		int ret;
+
+		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+		smc_table->dpm2Params.NearTDPLimit =
+			cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+		smc_table->dpm2Params.SafePowerLimit =
+			cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  (si_pi->state_table_start +
+						   offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+						   offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+						  (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+						  sizeof(u32) * 2,
+						  si_pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+					       const u16 prev_std_vddc,
+					       const u16 curr_std_vddc)
+{
+	u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+	u64 prev_vddc = (u64)prev_std_vddc;
+	u64 curr_vddc = (u64)curr_std_vddc;
+	u64 pwr_efficiency_ratio, n, d;
+
+	if ((prev_vddc == 0) || (curr_vddc == 0))
+		return 0;
+
+	n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+	d = prev_vddc * prev_vddc;
+	pwr_efficiency_ratio = div64_u64(n, d);
+
+	if (pwr_efficiency_ratio > (u64)0xFFFF)
+		return 0;
+
+	return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+					    struct amdgpu_ps *amdgpu_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+	    amdgpu_state->vclk && amdgpu_state->dclk)
+		return true;
+
+	return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+						struct amdgpu_ps *amdgpu_state,
+						SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	SISLANDS_SMC_VOLTAGE_VALUE vddc;
+	u32 prev_sclk;
+	u32 max_sclk;
+	u32 min_sclk;
+	u16 prev_std_vddc;
+	u16 curr_std_vddc;
+	int i;
+	u16 pwr_efficiency_ratio;
+	u8 max_ps_percent;
+	bool disable_uvd_power_tune;
+	int ret;
+
+	if (ni_pi->enable_power_containment == false)
+		return 0;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+	smc_state->levels[0].dpm2.MaxPS = 0;
+	smc_state->levels[0].dpm2.NearTDPDec = 0;
+	smc_state->levels[0].dpm2.AboveSafeInc = 0;
+	smc_state->levels[0].dpm2.BelowSafeInc = 0;
+	smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	for (i = 1; i < state->performance_level_count; i++) {
+		prev_sclk = state->performance_levels[i-1].sclk;
+		max_sclk  = state->performance_levels[i].sclk;
+		if (i == 1)
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+		else
+			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+		if (prev_sclk > max_sclk)
+			return -EINVAL;
+
+		if ((max_ps_percent == 0) ||
+		    (prev_sclk == max_sclk) ||
+		    disable_uvd_power_tune)
+			min_sclk = max_sclk;
+		else if (i == 1)
+			min_sclk = prev_sclk;
+		else
+			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+		if (min_sclk < state->performance_levels[0].sclk)
+			min_sclk = state->performance_levels[0].sclk;
+
+		if (min_sclk == 0)
+			return -EINVAL;
+
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i-1].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+		if (ret)
+			return ret;
+
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						state->performance_levels[i].vddc, &vddc);
+		if (ret)
+			return ret;
+
+		ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+		if (ret)
+			return ret;
+
+		pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+									   prev_std_vddc, curr_std_vddc);
+
+		smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+		smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+		smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+		smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+		smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+	}
+
+	return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	u32 sq_power_throttle, sq_power_throttle2;
+	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+	int i;
+
+	if (state->performance_level_count == 0)
+		return -EINVAL;
+
+	if (smc_state->levelCount != state->performance_level_count)
+		return -EINVAL;
+
+	if (adev->pm.dpm.sq_ramping_threshold == 0)
+		return -EINVAL;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+		enable_sq_ramping = false;
+
+	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+		enable_sq_ramping = false;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		sq_power_throttle = 0;
+		sq_power_throttle2 = 0;
+
+		if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+		    enable_sq_ramping) {
+			sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+			sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+			sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+			sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+			sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+		} else {
+			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+		}
+
+		smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+	}
+
+	return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+				       struct amdgpu_ps *amdgpu_new_state,
+				       bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_power_containment) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->pc_enabled = false;
+				} else {
+					ni_pi->pc_enabled = true;
+				}
+			}
+		} else {
+			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+			if (smc_result != PPSMC_Result_OK)
+				ret = -EINVAL;
+			ni_pi->pc_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret = 0;
+	struct si_dte_data *dte_data = &si_pi->dte_data;
+	Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+	u32 table_size;
+	u8 tdep_count;
+	u32 i;
+
+	if (dte_data == NULL)
+		si_pi->enable_dte = false;
+
+	if (si_pi->enable_dte == false)
+		return 0;
+
+	if (dte_data->k <= 0)
+		return -EINVAL;
+
+	dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+	if (dte_tables == NULL) {
+		si_pi->enable_dte = false;
+		return -ENOMEM;
+	}
+
+	table_size = dte_data->k;
+
+	if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+		table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+	tdep_count = dte_data->tdep_count;
+	if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+		tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+	dte_tables->K = cpu_to_be32(table_size);
+	dte_tables->T0 = cpu_to_be32(dte_data->t0);
+	dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+	dte_tables->WindowSize = dte_data->window_size;
+	dte_tables->temp_select = dte_data->temp_select;
+	dte_tables->DTE_mode = dte_data->dte_mode;
+	dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+	if (tdep_count > 0)
+		table_size--;
+
+	for (i = 0; i < table_size; i++) {
+		dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+		dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
+	}
+
+	dte_tables->Tdep_count = tdep_count;
+
+	for (i = 0; i < (u32)tdep_count; i++) {
+		dte_tables->T_limits[i] = dte_data->t_limits[i];
+		dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+		dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+	}
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+					  (u8 *)dte_tables,
+					  sizeof(Smc_SIslands_DTE_Configuration),
+					  si_pi->sram_end);
+	kfree(dte_tables);
+
+	return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+					  u16 *max, u16 *min)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_cac_leakage_table *table =
+		&adev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i;
+	u32 v0_loadline;
+
+	if (table == NULL)
+		return -EINVAL;
+
+	*max = 0;
+	*min = 0xFFFF;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].vddc > *max)
+			*max = table->entries[i].vddc;
+		if (table->entries[i].vddc < *min)
+			*min = table->entries[i].vddc;
+	}
+
+	if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+		return -EINVAL;
+
+	v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+	if (v0_loadline > 0xFFFFUL)
+		return -EINVAL;
+
+	*min = (u16)v0_loadline;
+
+	if ((*min > *max) || (*max == 0) || (*min == 0))
+		return -EINVAL;
+
+	return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+	return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+		SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+				     PP_SIslands_CacConfig *cac_tables,
+				     u16 vddc_max, u16 vddc_min, u16 vddc_step,
+				     u16 t0, u16 t_step)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 leakage;
+	unsigned int i, j;
+	s32 t;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+	for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+		t = (1000 * (i * t_step + t0));
+
+		for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+			voltage = vddc_max - (vddc_step * j);
+
+			si_calculate_leakage_for_v_and_t(adev,
+							 &si_pi->powertune_data->leakage_coefficients,
+							 voltage,
+							 t,
+							 si_pi->dyn_powertune_data.cac_leakage,
+							 &leakage);
+
+			smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+			if (smc_leakage > 0xFFFF)
+				smc_leakage = 0xFFFF;
+
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+		}
+	}
+	return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+					    PP_SIslands_CacConfig *cac_tables,
+					    u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 leakage;
+	unsigned int i, j;
+	u32 smc_leakage;
+	u32 scaling_factor;
+	u16 voltage;
+
+	scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+	for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+		voltage = vddc_max - (vddc_step * j);
+
+		si_calculate_leakage_for_v(adev,
+					   &si_pi->powertune_data->leakage_coefficients,
+					   si_pi->powertune_data->fixed_kt,
+					   voltage,
+					   si_pi->dyn_powertune_data.cac_leakage,
+					   &leakage);
+
+		smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+		if (smc_leakage > 0xFFFF)
+			smc_leakage = 0xFFFF;
+
+		for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+				cpu_to_be16((u16)smc_leakage);
+	}
+	return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PP_SIslands_CacConfig *cac_tables = NULL;
+	u16 vddc_max, vddc_min, vddc_step;
+	u16 t0, t_step;
+	u32 load_line_slope, reg;
+	int ret = 0;
+	u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+	if (ni_pi->enable_cac == false)
+		return 0;
+
+	cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+	if (!cac_tables)
+		return -ENOMEM;
+
+	reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+	reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+	WREG32(CG_CAC_CTRL, reg);
+
+	si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+	si_pi->dyn_powertune_data.dc_pwr_value =
+		si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+	si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+	si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+	si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+	ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+	if (ret)
+		goto done_free;
+
+	vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+	vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+	t_step = 4;
+	t0 = 60;
+
+	if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+		ret = si_init_dte_leakage_table(adev, cac_tables,
+						vddc_max, vddc_min, vddc_step,
+						t0, t_step);
+	else
+		ret = si_init_simplified_leakage_table(adev, cac_tables,
+						       vddc_max, vddc_min, vddc_step);
+	if (ret)
+		goto done_free;
+
+	load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+	cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+	cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+	cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+	cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+	cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+	cac_tables->R_LL = cpu_to_be32(load_line_slope);
+	cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+	cac_tables->calculation_repeats = cpu_to_be32(2);
+	cac_tables->dc_cac = cpu_to_be32(0);
+	cac_tables->log2_PG_LKG_SCALE = 12;
+	cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+	cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+	cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+					  (u8 *)cac_tables,
+					  sizeof(PP_SIslands_CacConfig),
+					  si_pi->sram_end);
+
+	if (ret)
+		goto done_free;
+
+	ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+	if (ret) {
+		ni_pi->enable_cac = false;
+		ni_pi->enable_power_containment = false;
+	}
+
+	kfree(cac_tables);
+
+	return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+					   const struct si_cac_config_reg *cac_config_regs)
+{
+	const struct si_cac_config_reg *config_regs = cac_config_regs;
+	u32 data = 0, offset;
+
+	if (!config_regs)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				data = RREG32_SMC(offset);
+			break;
+		default:
+			data = RREG32(config_regs->offset);
+			break;
+		}
+
+		data &= ~config_regs->mask;
+		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+		switch (config_regs->type) {
+		case SISLANDS_CACCONFIG_CGIND:
+			offset = SMC_CG_IND_START + config_regs->offset;
+			if (offset < SMC_CG_IND_END)
+				WREG32_SMC(offset, data);
+			break;
+		default:
+			WREG32(config_regs->offset, data);
+			break;
+		}
+		config_regs++;
+	}
+	return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+
+	if ((ni_pi->enable_cac == false) ||
+	    (ni_pi->cac_configuration_required == false))
+		return 0;
+
+	ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+	if (ret)
+		return ret;
+	ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+			     struct amdgpu_ps *amdgpu_new_state,
+			     bool enable)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (ni_pi->enable_cac) {
+		if (enable) {
+			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+				if (ni_pi->support_cac_long_term_average) {
+					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+					if (smc_result != PPSMC_Result_OK)
+						ni_pi->support_cac_long_term_average = false;
+				}
+
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+					ni_pi->cac_enabled = false;
+				} else {
+					ni_pi->cac_enabled = true;
+				}
+
+				if (si_pi->enable_dte) {
+					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+					if (smc_result != PPSMC_Result_OK)
+						ret = -EINVAL;
+				}
+			}
+		} else if (ni_pi->cac_enabled) {
+			if (si_pi->enable_dte)
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+			ni_pi->cac_enabled = false;
+
+			if (ni_pi->support_cac_long_term_average)
+				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+		}
+	}
+	return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+	SISLANDS_SMC_SCLK_VALUE sclk_params;
+	u32 fb_div, p_div;
+	u32 clk_s, clk_v;
+	u32 sclk = 0;
+	int ret = 0;
+	u32 tmp;
+	int i;
+
+	if (si_pi->spll_table_start == 0)
+		return -EINVAL;
+
+	spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+	if (spll_table == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+		if (ret)
+			break;
+		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+		fb_div &= ~0x00001FFF;
+		fb_div >>= 1;
+		clk_v >>= 6;
+
+		if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+			ret = -EINVAL;
+		if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+			ret = -EINVAL;
+		if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+			ret = -EINVAL;
+		if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+			ret = -EINVAL;
+
+		if (ret)
+			break;
+
+		tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+			((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+		spll_table->freq[i] = cpu_to_be32(tmp);
+
+		tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+			((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+		spll_table->ss[i] = cpu_to_be32(tmp);
+
+		sclk += 512;
+	}
+
+
+	if (!ret)
+		ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+						  (u8 *)spll_table,
+						  sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+						  si_pi->sram_end);
+
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	kfree(spll_table);
+
+	return ret;
+}
+
+struct si_dpm_quirk {
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 max_sclk;
+	u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+	{ 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+						   u16 vce_voltage)
+{
+	u16 highest_leakage = 0;
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int i;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++){
+		if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+			highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+	}
+
+	if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+		return highest_leakage;
+
+	return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+				    u32 evclk, u32 ecclk, u16 *voltage)
+{
+	u32 i;
+	int ret = -EINVAL;
+	struct amdgpu_vce_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	if (((evclk == 0) && (ecclk == 0)) ||
+	    (table && (table->count == 0))) {
+		*voltage = 0;
+		return 0;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if ((evclk <= table->entries[i].evclk) &&
+		    (ecclk <= table->entries[i].ecclk)) {
+			*voltage = table->entries[i].v;
+			ret = 0;
+			break;
+		}
+	}
+
+	/* if no match return the highest voltage */
+	if (ret)
+		*voltage = table->entries[table->count - 1].v;
+
+	*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+	return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+	/* we never hit the non-gddr5 limit so disable it */
+	u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+				u32 arb_freq_src, u32 arb_freq_dest)
+{
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 burst_time;
+	u32 mc_cg_config;
+
+	switch (arb_freq_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
+		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_freq_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F2:
+		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F3:
+		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+	WREG32(MC_CG_CONFIG, mc_cg_config);
+	WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+	return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+			  struct amdgpu_ps *rps)
+{
+	struct si_ps *new_ps = si_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+	eg_pi->current_rps = *rps;
+	ni_pi->current_ps = *new_ps;
+	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+			    struct amdgpu_ps *rps)
+{
+	struct si_ps *new_ps = si_get_ps(rps);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+	eg_pi->requested_rps = *rps;
+	ni_pi->requested_ps = *new_ps;
+	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+					   struct amdgpu_ps *new_ps,
+					   struct amdgpu_ps *old_ps)
+{
+	struct si_ps *new_state = si_get_ps(new_ps);
+	struct si_ps *current_state = si_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+					  struct amdgpu_ps *new_ps,
+					  struct amdgpu_ps *old_ps)
+{
+	struct si_ps *new_state = si_get_ps(new_ps);
+	struct si_ps *current_state = si_get_ps(old_ps);
+
+	if ((new_ps->vclk == old_ps->vclk) &&
+	    (new_ps->dclk == old_ps->dclk))
+		return;
+
+	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+		return;
+
+	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++)
+		if (voltage <= table->entries[i].value)
+			return table->entries[i].value;
+
+	return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+		                u32 max_clock, u32 requested_clock)
+{
+	unsigned int i;
+
+	if ((clocks == NULL) || (clocks->count == 0))
+		return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+	for (i = 0; i < clocks->count; i++) {
+		if (clocks->values[i] >= requested_clock)
+			return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+	}
+
+	return (clocks->values[clocks->count - 1] < max_clock) ?
+		clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+			      u32 max_mclk, u32 requested_mclk)
+{
+	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+				    max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+		              u32 max_sclk, u32 requested_sclk)
+{
+	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+				    max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+							    u32 *max_clock)
+{
+	u32 i, clock = 0;
+
+	if ((table == NULL) || (table->count == 0)) {
+		*max_clock = clock;
+		return;
+	}
+
+	for (i = 0; i < table->count; i++) {
+		if (clock < table->entries[i].clk)
+			clock = table->entries[i].clk;
+	}
+	*max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+					       u32 clock, u16 max_voltage, u16 *voltage)
+{
+	u32 i;
+
+	if ((table == NULL) || (table->count == 0))
+		return;
+
+	for (i= 0; i < table->count; i++) {
+		if (clock <= table->entries[i].clk) {
+			if (*voltage < table->entries[i].v)
+				*voltage = (u16)((table->entries[i].v < max_voltage) ?
+					   table->entries[i].v : max_voltage);
+			return;
+		}
+	}
+
+	*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+					  const struct amdgpu_clock_and_voltage_limits *max_limits,
+					  struct rv7xx_pl *pl)
+{
+
+	if ((pl->mclk == 0) || (pl->sclk == 0))
+		return;
+
+	if (pl->mclk == pl->sclk)
+		return;
+
+	if (pl->mclk > pl->sclk) {
+		if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+			pl->sclk = btc_get_valid_sclk(adev,
+						      max_limits->sclk,
+						      (pl->mclk +
+						      (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+						      adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+	} else {
+		if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+			pl->mclk = btc_get_valid_mclk(adev,
+						      max_limits->mclk,
+						      pl->sclk -
+						      adev->pm.dpm.dyn_state.sclk_mclk_delta);
+	}
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+					  u16 max_vddc, u16 max_vddci,
+					  u16 *vddc, u16 *vddci)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	u16 new_voltage;
+
+	if ((0 == *vddc) || (0 == *vddci))
+		return;
+
+	if (*vddc > *vddci) {
+		if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+						       (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+		}
+	} else {
+		if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+			new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+						       (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+			*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+		}
+	}
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+					       u32 sys_mask,
+					       enum amdgpu_pcie_gen asic_gen,
+					       enum amdgpu_pcie_gen default_gen)
+{
+	switch (asic_gen) {
+	case AMDGPU_PCIE_GEN1:
+		return AMDGPU_PCIE_GEN1;
+	case AMDGPU_PCIE_GEN2:
+		return AMDGPU_PCIE_GEN2;
+	case AMDGPU_PCIE_GEN3:
+		return AMDGPU_PCIE_GEN3;
+	default:
+		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+			return AMDGPU_PCIE_GEN3;
+		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+			return AMDGPU_PCIE_GEN2;
+		else
+			return AMDGPU_PCIE_GEN1;
+	}
+	return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			    u32 *p, u32 *u)
+{
+	u32 b_c = 0;
+	u32 i_c;
+	u32 tmp;
+
+	i_c = (i * r_c) / 100;
+	tmp = i_c >> p_b;
+
+	while (tmp) {
+		b_c++;
+		tmp >>= 1;
+	}
+
+	*u = (b_c + 1) / 2;
+	*p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+	u32 k, a, ah, al;
+	u32 t1;
+
+	if ((fl == 0) || (fh == 0) || (fl > fh))
+		return -EINVAL;
+
+	k = (100 * fh) / fl;
+	t1 = (t * (k - 100));
+	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+	a = (a + 5) / 10;
+	ah = ((a * t) + 5000) / 10000;
+	al = a - ah;
+
+	*th = t - ah;
+	*tl = t + al;
+
+	return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+		return true;
+	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+		return true;
+	return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+	return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	u16 vddc;
+
+	if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+		pi->max_vddc = 0;
+	else
+		pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct amdgpu_atom_ss ss;
+
+	pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						       ASIC_INTERNAL_ENGINE_SS, 0);
+	pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						       ASIC_INTERNAL_MEMORY_SS, 0);
+
+	if (pi->sclk_ss || pi->mclk_ss)
+		pi->dynamic_ss = true;
+	else
+		pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	struct  si_ps *ps = si_get_ps(rps);
+	struct amdgpu_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching = false;
+	bool disable_sclk_switching = false;
+	u32 mclk, sclk;
+	u16 vddc, vddci, min_vce_voltage = 0;
+	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+	u32 max_sclk = 0, max_mclk = 0;
+	int i;
+	struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+	/* Apply dpm quirks */
+	while (p && p->chip_device != 0) {
+		if (adev->pdev->vendor == p->chip_vendor &&
+		    adev->pdev->device == p->chip_device &&
+		    adev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    adev->pdev->subsystem_device == p->subsys_device) {
+			max_sclk = p->max_sclk;
+			max_mclk = p->max_mclk;
+			break;
+		}
+		++p;
+	}
+	/* limit mclk on all R7 370 parts for stability */
+	if (adev->pdev->device == 0x6811 &&
+	    adev->pdev->revision == 0x81)
+		max_mclk = 120000;
+	/* limit sclk/mclk on Jet parts for stability */
+	if (adev->pdev->device == 0x6665 &&
+	    adev->pdev->revision == 0xc3) {
+		max_sclk = 75000;
+		max_mclk = 80000;
+	}
+	/* Limit clocks for some HD8600 parts */
+	if (adev->pdev->device == 0x6660 &&
+	    adev->pdev->revision == 0x83) {
+		max_sclk = 75000;
+		max_mclk = 80000;
+	}
+
+	if (rps->vce_active) {
+		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+		si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+					 &min_vce_voltage);
+	} else {
+		rps->evclk = 0;
+		rps->ecclk = 0;
+	}
+
+	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+	    si_dpm_vblank_too_short(adev))
+		disable_mclk_switching = true;
+
+	if (rps->vclk || rps->dclk) {
+		disable_mclk_switching = true;
+		disable_sclk_switching = true;
+	}
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	for (i = ps->performance_level_count - 2; i >= 0; i--) {
+		if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+			ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+	}
+	if (adev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+			if (ps->performance_levels[i].vddc > max_limits->vddc)
+				ps->performance_levels[i].vddc = max_limits->vddc;
+			if (ps->performance_levels[i].vddci > max_limits->vddci)
+				ps->performance_levels[i].vddci = max_limits->vddci;
+		}
+	}
+
+	/* limit clocks to max supported clocks based on voltage dependency tables */
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+							&max_sclk_vddc);
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+							&max_mclk_vddci);
+	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+							&max_mclk_vddc);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (max_sclk_vddc) {
+			if (ps->performance_levels[i].sclk > max_sclk_vddc)
+				ps->performance_levels[i].sclk = max_sclk_vddc;
+		}
+		if (max_mclk_vddci) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddci)
+				ps->performance_levels[i].mclk = max_mclk_vddci;
+		}
+		if (max_mclk_vddc) {
+			if (ps->performance_levels[i].mclk > max_mclk_vddc)
+				ps->performance_levels[i].mclk = max_mclk_vddc;
+		}
+		if (max_mclk) {
+			if (ps->performance_levels[i].mclk > max_mclk)
+				ps->performance_levels[i].mclk = max_mclk;
+		}
+		if (max_sclk) {
+			if (ps->performance_levels[i].sclk > max_sclk)
+				ps->performance_levels[i].sclk = max_sclk;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+		vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+	} else {
+		mclk = ps->performance_levels[0].mclk;
+		vddci = ps->performance_levels[0].vddci;
+	}
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+		vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+	} else {
+		sclk = ps->performance_levels[0].sclk;
+		vddc = ps->performance_levels[0].vddc;
+	}
+
+	if (rps->vce_active) {
+		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+	}
+
+	/* adjusted low state */
+	ps->performance_levels[0].sclk = sclk;
+	ps->performance_levels[0].mclk = mclk;
+	ps->performance_levels[0].vddc = vddc;
+	ps->performance_levels[0].vddci = vddci;
+
+	if (disable_sclk_switching) {
+		sclk = ps->performance_levels[0].sclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (sclk < ps->performance_levels[i].sclk)
+				sclk = ps->performance_levels[i].sclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].sclk = sclk;
+			ps->performance_levels[i].vddc = vddc;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+				ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+			if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+				ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+		}
+	}
+
+	if (disable_mclk_switching) {
+		mclk = ps->performance_levels[0].mclk;
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (mclk < ps->performance_levels[i].mclk)
+				mclk = ps->performance_levels[i].mclk;
+		}
+		for (i = 0; i < ps->performance_level_count; i++) {
+			ps->performance_levels[i].mclk = mclk;
+			ps->performance_levels[i].vddci = vddci;
+		}
+	} else {
+		for (i = 1; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+		}
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++)
+		btc_adjust_clock_combinations(adev, max_limits,
+					      &ps->performance_levels[i]);
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc < min_vce_voltage)
+			ps->performance_levels[i].vddc = min_vce_voltage;
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+						   ps->performance_levels[i].sclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddci, &ps->performance_levels[i].vddci);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						   ps->performance_levels[i].mclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+						   adev->clock.current_dispclk,
+						   max_limits->vddc,  &ps->performance_levels[i].vddc);
+	}
+
+	for (i = 0; i < ps->performance_level_count; i++) {
+		btc_apply_voltage_delta_rules(adev,
+					      max_limits->vddc, max_limits->vddci,
+					      &ps->performance_levels[i].vddc,
+					      &ps->performance_levels[i].vddci);
+	}
+
+	ps->dc_compatible = true;
+	for (i = 0; i < ps->performance_level_count; i++) {
+		if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+			ps->dc_compatible = false;
+	}
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+				     u16 reg_offset, u32 *value)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	return amdgpu_si_read_smc_sram_dword(adev,
+					     si_pi->soft_regs_start + reg_offset, value,
+					     si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+				      u16 reg_offset, u32 value)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	return amdgpu_si_write_smc_sram_dword(adev,
+					      si_pi->soft_regs_start + reg_offset,
+					      value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+	bool ret = false;
+	u32 tmp, width, row, column, bank, density;
+	bool is_memory_gddr5, is_special;
+
+	tmp = RREG32(MC_SEQ_MISC0);
+	is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+	is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+		& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+	WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+	width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+	tmp = RREG32(MC_ARB_RAMCFG);
+	row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+	column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+	bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+	density = (1 << (row + column - 20 + bank)) * width;
+
+	if ((adev->pdev->device == 0x6819) &&
+	    is_memory_gddr5 && is_special && (density == 0x400))
+		ret = true;
+
+	return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u16 vddc, count = 0;
+	int i, ret;
+
+	for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+		ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+		if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+			si_pi->leakage_voltage.entries[count].voltage = vddc;
+			si_pi->leakage_voltage.entries[count].leakage_index =
+				SISLANDS_LEAKAGE_INDEX0 + i;
+			count++;
+		}
+	}
+	si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+						     u32 index, u16 *leakage_voltage)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int i;
+
+	if (leakage_voltage == NULL)
+		return -EINVAL;
+
+	if ((index & 0xff00) != 0xff00)
+		return -EINVAL;
+
+	if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+		return -EINVAL;
+
+	if (index < SISLANDS_LEAKAGE_INDEX0)
+		return -EINVAL;
+
+	for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+		if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+			*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+			return 0;
+		}
+	}
+	return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	bool want_thermal_protection;
+	enum amdgpu_dpm_event_src dpm_event_src;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+		if (pi->thermal_protection)
+			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	} else {
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+	}
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+					   enum amdgpu_dpm_auto_throttle_src source,
+					   bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+	if (enable)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+					       u32 thermal_level)
+{
+	PPSMC_Result ret;
+
+	if (thermal_level == 0) {
+		ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+		if (ret == PPSMC_Result_OK)
+			return 0;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+	if (ac_power)
+		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(SMC_SCRATCH0, parameter);
+	return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+				   enum amdgpu_dpm_forced_level level)
+{
+	struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+	struct  si_ps *ps = si_get_ps(rps);
+	u32 levels = ps->performance_level_count;
+
+	if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+			return -EINVAL;
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+			return -EINVAL;
+
+		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	adev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+	amdgpu_si_program_jump_on_start(adev);
+	amdgpu_si_start_smc(adev);
+	amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+	amdgpu_si_reset_smc(adev);
+	amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->state_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->soft_regs_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->mc_reg_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->fan_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->arb_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->cac_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->dte_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->spll_table_start = tmp;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev,
+					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+					    SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	si_pi->papm_cfg_table_start = tmp;
+
+	return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+	si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+	si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+	si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+	si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+	si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+	si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+	si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+	si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+	si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+	si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+	si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+	si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+					  bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+	else
+		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+	udelay(25000);
+
+	return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+	int i;
+
+	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+	udelay(7000);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(SMC_RESP_0) == 1)
+			break;
+		udelay(1000);
+	}
+
+	return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+				     bool has_display)
+{
+	PPSMC_Msg msg = has_display ?
+		PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+	return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+	u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+	u32 vddc_dly, acpi_dly, vbi_dly;
+	u32 reference_clock;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+	voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+	backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+	if (voltage_response_time == 0)
+		voltage_response_time = 1000;
+
+	acpi_delay_time = 15000;
+	vbi_time_out = 100000;
+
+	reference_clock = amdgpu_asic_get_xclk(adev);
+
+	vddc_dly = (voltage_response_time  * reference_clock) / 100;
+	acpi_dly = (acpi_delay_time * reference_clock) / 100;
+	vbi_dly  = (vbi_time_out * reference_clock) / 100;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	u32 tmp;
+
+	/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+	if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+		tmp = 0x10;
+	else
+		tmp = 0x1;
+
+	if (eg_pi->sclk_deep_sleep) {
+		WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+		WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+			 ~AUTOSCALE_ON_SS_CLEAR);
+	}
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp, pipe;
+	int i;
+
+	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	if (adev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	if (adev->pm.dpm.new_active_crtc_count > 1)
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+	else
+		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+	if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+	    (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+		/* find the first active crtc */
+		for (i = 0; i < adev->mode_info.num_crtc; i++) {
+			if (adev->pm.dpm.new_active_crtcs & (1 << i))
+				break;
+		}
+		if (i == adev->mode_info.num_crtc)
+			pipe = 0;
+		else
+			pipe = i;
+
+		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+	}
+
+	/* Setting this to false forces the performance state to low if the crtcs are disabled.
+	 * This can be a problem on PowerXpress systems or if you want to use the card
+	 * for offscreen rendering or compute if there are no crtcs enabled.
+	 */
+	si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	if (enable) {
+		if (pi->sclk_ss)
+			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+	} else {
+		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+	}
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	r600_calculate_u_and_p(pi->asi,
+			       xclk,
+			       16,
+			       &pi->bsp,
+			       &pi->bsu);
+
+	r600_calculate_u_and_p(pi->pasi,
+			       xclk,
+			       16,
+			       &pi->pbsp,
+			       &pi->pbsu);
+
+
+        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+	WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+	int i;
+	enum r600_td td = R600_TD_DFLT;
+
+	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+		WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+	if (td == R600_TD_AUTO)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+	else
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+	if (td == R600_TD_UP)
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+	if (td == R600_TD_DOWN)
+		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+	WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+	WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+	u8 mc_para_index;
+
+	if (memory_clock < 10000)
+		mc_para_index = 0;
+	else if (memory_clock >= 80000)
+		mc_para_index = 0x0f;
+	else
+		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+	return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+	u8 mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500)
+			mc_para_index = 0x00;
+		else if (memory_clock > 47500)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 10000) / 2500);
+	} else {
+		if (memory_clock < 65000)
+			mc_para_index = 0x00;
+		else if (memory_clock > 135000)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 60000) / 5000);
+	}
+	return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	bool strobe_mode = false;
+	u8 result = 0;
+
+	if (mclk <= pi->mclk_strobe_mode_threshold)
+		strobe_mode = true;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+		result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+	else
+		result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+	if (strobe_mode)
+		result |= SISLANDS_SMC_STROBE_ENABLE;
+
+	return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	amdgpu_si_reset_smc(adev);
+	amdgpu_si_smc_clock(adev, false);
+
+	return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+					      const struct atom_voltage_table *table,
+					      const struct amdgpu_phase_shedding_limits_table *limits)
+{
+	u32 data, num_bits, num_levels;
+
+	if ((table == NULL) || (limits == NULL))
+		return false;
+
+	data = table->mask_low;
+
+	num_bits = hweight32(data);
+
+	if (num_bits == 0)
+		return false;
+
+	num_levels = (1 << num_bits);
+
+	if (table->count != num_levels)
+		return false;
+
+	if (limits->count != (num_levels - 1))
+		return false;
+
+	return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+					      u32 max_voltage_steps,
+					      struct atom_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= max_voltage_steps)
+		return;
+
+	diff = voltage_table->count - max_voltage_steps;
+
+	for (i= 0; i < max_voltage_steps; i++)
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+	voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+				     struct atom_voltage_table *voltage_table)
+{
+	u32 i;
+
+	if (voltage_dependency_table == NULL)
+		return -EINVAL;
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+
+	voltage_table->count = voltage_dependency_table->count;
+	for (i = 0; i < voltage_table->count; i++) {
+		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+
+	if (pi->voltage_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddc_voltage_table);
+	} else if (si_pi->voltage_control_svi2) {
+		ret = si_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						&eg_pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	} else {
+		return -EINVAL;
+	}
+
+	if (eg_pi->vddci_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+
+		if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &eg_pi->vddci_voltage_table);
+	}
+	if (si_pi->vddci_control_svi2) {
+		ret = si_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						&eg_pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->mvdd_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+						    VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+		if (ret) {
+			pi->mvdd_control = false;
+			return ret;
+		}
+
+		if (si_pi->mvdd_voltage_table.count == 0) {
+			pi->mvdd_control = false;
+			return -EINVAL;
+		}
+
+		if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+			si_trim_voltage_table_to_fit_state_table(adev,
+								 SISLANDS_MAX_NO_VREG_STEPS,
+								 &si_pi->mvdd_voltage_table);
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+						    VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+		if (ret)
+			si_pi->vddc_phase_shed_control = false;
+
+		if ((si_pi->vddc_phase_shed_table.count == 0) ||
+		    (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+			si_pi->vddc_phase_shed_control = false;
+	}
+
+	return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+					  const struct atom_voltage_table *voltage_table,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	unsigned int i;
+
+	for (i = 0; i < voltage_table->count; i++)
+		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+					  SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u8 i;
+
+	if (si_pi->voltage_control_svi2) {
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+			si_pi->svc_gpio_id);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+			si_pi->svd_gpio_id);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+					   2);
+	} else {
+		if (eg_pi->vddc_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+				cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+			for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+				if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+					table->maxVDDCIndexInPPTable = i;
+					break;
+				}
+			}
+		}
+
+		if (eg_pi->vddci_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+				cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+		}
+
+
+		if (si_pi->mvdd_voltage_table.count) {
+			si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+				cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+		}
+
+		if (si_pi->vddc_phase_shed_control) {
+			if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+							      &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+				si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
+					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+				si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+							   (u32)si_pi->vddc_phase_shed_table.phase_delay);
+			} else {
+				si_pi->vddc_phase_shed_control = false;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+				     const struct atom_voltage_table *table,
+				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < table->count; i++) {
+		if (value <= table->entries[i].value) {
+			voltage->index = (u8)i;
+			voltage->value = cpu_to_be16(table->entries[i].value);
+			break;
+		}
+	}
+
+	if (i >= table->count)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+				  SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (pi->mvdd_control) {
+		if (mclk <= pi->mvdd_split_frequency)
+			voltage->index = 0;
+		else
+			voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+		voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+	}
+	return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+				    u16 *std_voltage)
+{
+	u16 v_index;
+	bool voltage_found = false;
+	*std_voltage = be16_to_cpu(voltage->value);
+
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+			if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+				return -EINVAL;
+
+			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+				if (be16_to_cpu(voltage->value) ==
+				    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+					voltage_found = true;
+					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+						*std_voltage =
+							adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+					else
+						*std_voltage =
+							adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+					break;
+				}
+			}
+
+			if (!voltage_found) {
+				for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+					if (be16_to_cpu(voltage->value) <=
+					    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+						voltage_found = true;
+						if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+							*std_voltage =
+								adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+						else
+							*std_voltage =
+								adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+						break;
+					}
+				}
+			}
+		} else {
+			if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+				*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+		}
+	}
+
+	return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+					 u16 value, u8 index,
+					 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	voltage->index = index;
+	voltage->value = cpu_to_be16(value);
+
+	return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+					    const struct amdgpu_phase_shedding_limits_table *limits,
+					    u16 voltage, u32 sclk, u32 mclk,
+					    SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+	unsigned int i;
+
+	for (i = 0; i < limits->count; i++) {
+		if ((voltage <= limits->entries[i].voltage) &&
+		    (sclk <= limits->entries[i].sclk) &&
+		    (mclk <= limits->entries[i].mclk))
+			break;
+	}
+
+	smc_voltage->phase_settings = (u8)i;
+
+	return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+	return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+					      tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+	return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+					    &tmp, si_pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp = (tmp >> 24) & 0xff;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+					    u32 engine_clock)
+{
+	u32 dram_rows;
+	u32 dram_refresh_rate;
+	u32 mc_arb_rfsh_rate;
+	u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+	if (tmp >= 4)
+		dram_rows = 16384;
+	else
+		dram_rows = 1 << (tmp + 10);
+
+	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+	return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+						struct rv7xx_pl *pl,
+						SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+	u32 burst_time;
+
+	arb_regs->mc_arb_rfsh_rate =
+		(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+	amdgpu_atombios_set_engine_dram_timings(adev,
+					    pl->sclk,
+		                            pl->mclk);
+
+	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+	arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+	return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+						  struct amdgpu_ps *amdgpu_state,
+						  unsigned int first_arb_set)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int i, ret = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+		if (ret)
+			break;
+		ret = amdgpu_si_copy_bytes_to_smc(adev,
+						  si_pi->arb_table_start +
+						  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+						  (u8 *)&arb_regs,
+						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+						  si_pi->sram_end);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+					       struct amdgpu_ps *amdgpu_new_state)
+{
+	return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+						      SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+					  struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (pi->mvdd_control)
+		return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+						 si_pi->mvdd_bootup_value, voltage);
+
+	return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_initial_state,
+					 SISLANDS_SMC_STATETABLE *table)
+{
+	struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 reg;
+	int ret;
+
+	table->initialState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(si_pi->clock_registers.dll_cntl);
+	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+	table->initialState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->initialState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->initialState.levels[0].mclk.mclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+	table->initialState.levels[0].sclk.sclk_value =
+		cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+	table->initialState.levels[0].arbRefreshState =
+		SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+	table->initialState.levels[0].ACIndex = 0;
+
+	ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+					initial_state->performance_levels[0].vddc,
+					&table->initialState.levels[0].vddc);
+
+	if (!ret) {
+		u16 std_vddc;
+
+		ret = si_get_std_voltage_value(adev,
+					       &table->initialState.levels[0].vddc,
+					       &std_vddc);
+		if (!ret)
+			si_populate_std_voltage_value(adev, std_vddc,
+						      table->initialState.levels[0].vddc.index,
+						      &table->initialState.levels[0].std_vddc);
+	}
+
+	if (eg_pi->vddci_control)
+		si_populate_voltage_value(adev,
+					  &eg_pi->vddci_voltage_table,
+					  initial_state->performance_levels[0].vddci,
+					  &table->initialState.levels[0].vddci);
+
+	if (si_pi->vddc_phase_shed_control)
+		si_populate_phase_shedding_value(adev,
+						 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						 initial_state->performance_levels[0].vddc,
+						 initial_state->performance_levels[0].sclk,
+						 initial_state->performance_levels[0].mclk,
+						 &table->initialState.levels[0].vddc);
+
+	si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+	reg = CG_R(0xffff) | CG_L(0);
+	table->initialState.levels[0].aT = cpu_to_be32(reg);
+	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+	table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		table->initialState.levels[0].strobeMode =
+			si_get_strobe_mode_settings(adev,
+						    initial_state->performance_levels[0].mclk);
+
+		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+			table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+		else
+			table->initialState.levels[0].mcFlags =  0;
+	}
+
+	table->initialState.levelCount = 1;
+
+	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	table->initialState.levels[0].dpm2.MaxPS = 0;
+	table->initialState.levels[0].dpm2.NearTDPDec = 0;
+	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+	table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+				      SISLANDS_SMC_STATETABLE *table)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32 reg;
+	int ret;
+
+	table->ACPIState = table->initialState;
+
+	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc) {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(adev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+			if (!ret)
+				si_populate_std_voltage_value(adev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+		if (si_pi->vddc_phase_shed_control) {
+			si_populate_phase_shedding_value(adev,
+							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->acpi_vddc,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+		}
+	} else {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+						pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+		if (!ret) {
+			u16 std_vddc;
+
+			ret = si_get_std_voltage_value(adev,
+						       &table->ACPIState.levels[0].vddc, &std_vddc);
+
+			if (!ret)
+				si_populate_std_voltage_value(adev, std_vddc,
+							      table->ACPIState.levels[0].vddc.index,
+							      &table->ACPIState.levels[0].std_vddc);
+		}
+		table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+										    si_pi->sys_pcie_mask,
+										    si_pi->boot_pcie_gen,
+										    AMDGPU_PCIE_GEN1);
+
+		if (si_pi->vddc_phase_shed_control)
+			si_populate_phase_shedding_value(adev,
+							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+							 pi->min_vddc_in_table,
+							 0,
+							 0,
+							 &table->ACPIState.levels[0].vddc);
+	}
+
+	if (pi->acpi_vddc) {
+		if (eg_pi->acpi_vddci)
+			si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+						  eg_pi->acpi_vddci,
+						  &table->ACPIState.levels[0].vddci);
+	}
+
+	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+	table->ACPIState.levels[0].mclk.vDLL_CNTL =
+		cpu_to_be32(dll_cntl);
+	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+		cpu_to_be32(mclk_pwrmgt_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+		cpu_to_be32(mpll_ad_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+		cpu_to_be32(mpll_dq_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+		cpu_to_be32(mpll_func_cntl);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+		cpu_to_be32(mpll_func_cntl_1);
+	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+		cpu_to_be32(mpll_func_cntl_2);
+	table->ACPIState.levels[0].mclk.vMPLL_SS =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+	table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+		cpu_to_be32(spll_func_cntl);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+		cpu_to_be32(spll_func_cntl_2);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+		cpu_to_be32(spll_func_cntl_3);
+	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+		cpu_to_be32(spll_func_cntl_4);
+
+	table->ACPIState.levels[0].mclk.mclk_value = 0;
+	table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+	si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+	if (eg_pi->dynamic_ac_timing)
+		table->ACPIState.levels[0].ACIndex = 0;
+
+	table->ACPIState.levels[0].dpm2.MaxPS = 0;
+	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+	table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+	reg = MIN_POWER_MASK | MAX_POWER_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+	return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+				 SISLANDS_SMC_SWSTATE *state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	u32 sclk_in_sr = 1350; /* ??? */
+	int ret;
+
+	ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+					    &state->levels[0]);
+	if (!ret) {
+		if (eg_pi->sclk_deep_sleep) {
+			if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+			else
+				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+		}
+		if (ulv->one_pcie_lane_in_ulv)
+			state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+		state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+		state->levels[0].ACIndex = 1;
+		state->levels[0].std_vddc = state->levels[0].vddc;
+		state->levelCount = 1;
+
+		state->flags |= PPSMC_SWSTATE_FLAG_DC;
+	}
+
+	return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+	int ret;
+
+	ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+						   &arb_regs);
+	if (ret)
+		return ret;
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+				   ulv->volt_change_delay);
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev,
+					  si_pi->arb_table_start +
+					  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+					  (u8 *)&arb_regs,
+					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+					  si_pi->sram_end);
+
+	return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+	pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
+	int ret;
+	u32 lane_width;
+	u32 vr_hot_gpio;
+
+	si_populate_smc_voltage_tables(adev, table);
+
+	switch (adev->pm.int_thermal_type) {
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+		break;
+	case THERMAL_TYPE_NONE:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+		break;
+	default:
+		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+		break;
+	}
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+		if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+			table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+	}
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+		vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+					   vr_hot_gpio);
+	}
+
+	ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+	if (ret)
+		return ret;
+
+	ret = si_populate_smc_acpi_state(adev, table);
+	if (ret)
+		return ret;
+
+	table->driverState = table->initialState;
+
+	ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+						     SISLANDS_INITIAL_STATE_ARB_INDEX);
+	if (ret)
+		return ret;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		ret = si_populate_ulv_state(adev, &table->ULVState);
+		if (ret)
+			return ret;
+
+		ret = si_program_ulv_memory_timing_parameters(adev);
+		if (ret)
+			return ret;
+
+		WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+		WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+		lane_width = amdgpu_get_pcie_lanes(adev);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	} else {
+		table->ULVState = table->initialState;
+	}
+
+	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+					   (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+					   si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+				    u32 engine_clock,
+				    SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+	u64 tmp;
+	u32 reference_clock = adev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+					     engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+
+	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+	do_div(tmp, reference_clock);
+	fbdiv = (u32) tmp;
+
+	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
+
+	if (pi->sclk_ss) {
+		struct amdgpu_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~CLK_S_MASK;
+			cg_spll_spread_spectrum |= CLK_S(clk_s);
+			cg_spll_spread_spectrum |= SSEN;
+
+			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+		}
+	}
+
+	sclk->sclk_value = engine_clock;
+	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+	return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+				  u32 engine_clock,
+				  SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+	SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+	int ret;
+
+	ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+	if (!ret) {
+		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+	}
+
+	return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+				  u32 engine_clock,
+				  u32 memory_clock,
+				  SISLANDS_SMC_MCLK_VALUE *mclk,
+				  bool strobe_mode,
+				  bool dll_state_on)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32  dll_cntl = si_pi->clock_registers.dll_cntl;
+	u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+	u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+	u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+	u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+	u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+	u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+	u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+	u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+	struct atom_mpll_param mpll_param;
+	int ret;
+
+	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+	if (ret)
+		return ret;
+
+	mpll_func_cntl &= ~BWCTRL_MASK;
+	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+			YCLK_POST_DIV(mpll_param.post_div);
+	}
+
+	if (pi->mclk_ss) {
+		struct amdgpu_atom_ss ss;
+		u32 freq_nom;
+		u32 tmp;
+		u32 reference_clock = adev->clock.mpll.reference_freq;
+
+		if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+			freq_nom = memory_clock * 4;
+		else
+			freq_nom = memory_clock * 2;
+
+		tmp = freq_nom / reference_clock;
+		tmp = tmp * tmp;
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+		                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+			u32 clks = reference_clock * 5 / ss.rate;
+			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+		        mpll_ss1 &= ~CLKV_MASK;
+		        mpll_ss1 |= CLKV(clkv);
+
+		        mpll_ss2 &= ~CLKS_MASK;
+		        mpll_ss2 |= CLKS(clks);
+		}
+	}
+
+	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+	else
+		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+	mclk->mclk_value = cpu_to_be32(memory_clock);
+	mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+	mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+	mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+	return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+			       struct amdgpu_ps *amdgpu_state,
+			       SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct  si_ps *ps = si_get_ps(amdgpu_state);
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	int i;
+
+	for (i = 0; i < ps->performance_level_count - 1; i++)
+		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+	smc_state->levels[ps->performance_level_count - 1].bSP =
+		cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+					 struct rv7xx_pl *pl,
+					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	int ret;
+	bool dll_state_on;
+	u16 std_vddc;
+	bool gmc_pg = false;
+
+	if (eg_pi->pcie_performance_request &&
+	    (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+		level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+	else
+		level->gen2PCIE = (u8)pl->pcie_gen;
+
+	ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+	if (ret)
+		return ret;
+
+	level->mcFlags =  0;
+
+	if (pi->mclk_stutter_mode_threshold &&
+	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+	    !eg_pi->uvd_enabled &&
+	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+	    (adev->pm.dpm.new_active_crtc_count <= 2)) {
+		level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+		if (gmc_pg)
+			level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+	}
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		if (pl->mclk > pi->mclk_edc_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+			level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+		level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+		if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+			if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = false;
+		}
+	} else {
+		level->strobeMode = si_get_strobe_mode_settings(adev,
+								pl->mclk);
+
+		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+	}
+
+	ret = si_populate_mclk_value(adev,
+				     pl->sclk,
+				     pl->mclk,
+				     &level->mclk,
+				     (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+	if (ret)
+		return ret;
+
+	ret = si_populate_voltage_value(adev,
+					&eg_pi->vddc_voltage_table,
+					pl->vddc, &level->vddc);
+	if (ret)
+		return ret;
+
+
+	ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+	if (ret)
+		return ret;
+
+	ret = si_populate_std_voltage_value(adev, std_vddc,
+					    level->vddc.index, &level->std_vddc);
+	if (ret)
+		return ret;
+
+	if (eg_pi->vddci_control) {
+		ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+						pl->vddci, &level->vddci);
+		if (ret)
+			return ret;
+	}
+
+	if (si_pi->vddc_phase_shed_control) {
+		ret = si_populate_phase_shedding_value(adev,
+						       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						       pl->vddc,
+						       pl->sclk,
+						       pl->mclk,
+						       &level->vddc);
+		if (ret)
+			return ret;
+	}
+
+	level->MaxPoweredUpCU = si_pi->max_cu;
+
+	ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+	return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+			     struct amdgpu_ps *amdgpu_state,
+			     SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	u32 a_t;
+	u32 t_l, t_h;
+	u32 high_bsp;
+	int i, ret;
+
+	if (state->performance_level_count >= 9)
+		return -EINVAL;
+
+	if (state->performance_level_count < 2) {
+		a_t = CG_R(0xffff) | CG_L(0);
+		smc_state->levels[0].aT = cpu_to_be32(a_t);
+		return 0;
+	}
+
+	smc_state->levels[0].aT = cpu_to_be32(0);
+
+	for (i = 0; i <= state->performance_level_count - 2; i++) {
+		ret = r600_calculate_at(
+			(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+			100 * R600_AH_DFLT,
+			state->performance_levels[i + 1].sclk,
+			state->performance_levels[i].sclk,
+			&t_l,
+			&t_h);
+
+		if (ret) {
+			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+		}
+
+		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+		a_t |= CG_R(t_l * pi->bsp / 20000);
+		smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+		high_bsp = (i == state->performance_level_count - 2) ?
+			pi->pbsp : pi->bsp;
+		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+	}
+
+	return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported)
+		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+			0 : -EINVAL;
+
+	return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+				       struct amdgpu_ps *amdgpu_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(adev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+	const struct  si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+
+	if (state->performance_levels[0].mclk != ulv->pl.mclk)
+		return false;
+
+	/* XXX validate against display requirements! */
+
+	for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+		if (adev->clock.current_dispclk <=
+		    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+			if (ulv->pl.vddc <
+			    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+				return false;
+		}
+	}
+
+	if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+		return false;
+
+	return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+						       struct amdgpu_ps *amdgpu_new_state)
+{
+	const struct si_power_info *si_pi = si_get_pi(adev);
+	const struct si_ulv_param *ulv = &si_pi->ulv;
+
+	if (ulv->supported) {
+		if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+	}
+	return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+					 struct amdgpu_ps *amdgpu_state,
+					 SISLANDS_SMC_SWSTATE *smc_state)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct ni_power_info *ni_pi = ni_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *state = si_get_ps(amdgpu_state);
+	int i, ret;
+	u32 threshold;
+	u32 sclk_in_sr = 1350; /* ??? */
+
+	if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+		return -EINVAL;
+
+	threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+	if (amdgpu_state->vclk && amdgpu_state->dclk) {
+		eg_pi->uvd_enabled = true;
+		if (eg_pi->smu_uvd_hs)
+			smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+	} else {
+		eg_pi->uvd_enabled = false;
+	}
+
+	if (state->dc_compatible)
+		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+	smc_state->levelCount = 0;
+	for (i = 0; i < state->performance_level_count; i++) {
+		if (eg_pi->sclk_deep_sleep) {
+			if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+				if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+				else
+					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+			}
+		}
+
+		ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+						    &smc_state->levels[i]);
+		smc_state->levels[i].arbRefreshState =
+			(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+		if (ret)
+			return ret;
+
+		if (ni_pi->enable_power_containment)
+			smc_state->levels[i].displayWatermark =
+				(state->performance_levels[i].sclk < threshold) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+		else
+			smc_state->levels[i].displayWatermark = (i < 2) ?
+				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+		if (eg_pi->dynamic_ac_timing)
+			smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+		else
+			smc_state->levels[i].ACIndex = 0;
+
+		smc_state->levelCount++;
+	}
+
+	si_write_smc_soft_register(adev,
+				   SI_SMC_SOFT_REGISTER_watermark_threshold,
+				   threshold / 512);
+
+	si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+	ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+	if (ret)
+		ni_pi->enable_power_containment = false;
+
+	ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+	if (ret)
+		ni_pi->enable_sq_ramping = false;
+
+	return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+			      struct amdgpu_ps *amdgpu_new_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
+	int ret;
+	u32 address = si_pi->state_table_start +
+		offsetof(SISLANDS_SMC_STATETABLE, driverState);
+	u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+		((new_state->performance_level_count - 1) *
+		 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+	SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+	memset(smc_state, 0, state_size);
+
+	ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+	if (ret)
+		return ret;
+
+	return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+					   state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	int ret = 0;
+
+	if (ulv->supported && ulv->pl.vddc) {
+		u32 address = si_pi->state_table_start +
+			offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+		SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+		u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+		memset(smc_state, 0, state_size);
+
+		ret = si_populate_ulv_state(adev, smc_state);
+		if (!ret)
+			ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+							  state_size, si_pi->sram_end);
+	}
+
+	return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+	struct amdgpu_crtc *amdgpu_crtc = NULL;
+	int i;
+
+	if (adev->pm.dpm.new_active_crtc_count == 0)
+		return 0;
+
+	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+		if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+			amdgpu_crtc = adev->mode_info.crtcs[i];
+			break;
+		}
+	}
+
+	if (amdgpu_crtc == NULL)
+		return 0;
+
+	if (amdgpu_crtc->line_time <= 0)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_crtc_index,
+				       amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+				       amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	if (si_write_smc_soft_register(adev,
+				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+				       amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+		return 0;
+
+	return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+				       struct si_mc_reg_table *table)
+{
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+			return -EINVAL;
+		switch (table->mc_reg_address[i].s1) {
+		case MC_SEQ_MISC1:
+			temp_reg = RREG32(MC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(MC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++)
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				j++;
+				if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+					return -EINVAL;
+			}
+			break;
+		case MC_SEQ_RESERVE_M:
+			temp_reg = RREG32(MC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+			for(k = 0; k < table->num_entries; k++)
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			j++;
+			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+	switch (in_reg) {
+	case  MC_SEQ_RAS_TIMING:
+		*out_reg = MC_SEQ_RAS_TIMING_LP;
+		break;
+	case MC_SEQ_CAS_TIMING:
+		*out_reg = MC_SEQ_CAS_TIMING_LP;
+		break;
+	case MC_SEQ_MISC_TIMING:
+		*out_reg = MC_SEQ_MISC_TIMING_LP;
+		break;
+	case MC_SEQ_MISC_TIMING2:
+		*out_reg = MC_SEQ_MISC_TIMING2_LP;
+		break;
+	case MC_SEQ_RD_CTL_D0:
+		*out_reg = MC_SEQ_RD_CTL_D0_LP;
+		break;
+	case MC_SEQ_RD_CTL_D1:
+		*out_reg = MC_SEQ_RD_CTL_D1_LP;
+		break;
+	case MC_SEQ_WR_CTL_D0:
+		*out_reg = MC_SEQ_WR_CTL_D0_LP;
+		break;
+	case MC_SEQ_WR_CTL_D1:
+		*out_reg = MC_SEQ_WR_CTL_D1_LP;
+		break;
+	case MC_PMG_CMD_EMRS:
+		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+	case MC_PMG_CMD_MRS:
+		*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+		break;
+	case MC_PMG_CMD_MRS1:
+		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+	case MC_SEQ_PMG_TIMING:
+		*out_reg = MC_SEQ_PMG_TIMING_LP;
+		break;
+	case MC_PMG_CMD_MRS2:
+		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+	case MC_SEQ_WR_CTL_2:
+		*out_reg = MC_SEQ_WR_CTL_2_LP;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++)
+		table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+				      struct si_mc_reg_table *si_table)
+{
+	u8 i, j;
+
+	if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	si_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		si_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			si_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+	si_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct atom_mc_reg_table *table;
+	struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+	u8 module_index = rv770_get_memory_module_index(adev);
+	int ret;
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = si_copy_vbios_mc_reg_table(table, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_s0_mc_reg_index(si_table);
+
+	ret = si_set_mc_special_registers(adev, si_table);
+	if (ret)
+		goto init_mc_done;
+
+	si_set_valid_flag(si_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+					 SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+				break;
+			mc_reg_table->address[i].s0 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+	mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+				    SMC_SIslands_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for(i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+						 struct rv7xx_pl *pl,
+						 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 i = 0;
+
+	for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+		if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+					   struct amdgpu_ps *amdgpu_state,
+					   SMC_SIslands_MCRegisters *mc_reg_table)
+{
+	struct si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		si_convert_mc_reg_table_entry_to_smc(adev,
+						     &state->performance_levels[i],
+						     &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+	}
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+				    struct amdgpu_ps *amdgpu_boot_state)
+{
+	struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct si_ulv_param *ulv = &si_pi->ulv;
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+	si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+	si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+					     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+				&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+				si_pi->mc_reg_table.last,
+				si_pi->mc_reg_table.valid_flag);
+
+	if (ulv->supported && ulv->pl.vddc != 0)
+		si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+						     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+	else
+		si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+					&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+					si_pi->mc_reg_table.last,
+					si_pi->mc_reg_table.valid_flag);
+
+	si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+					   (u8 *)smc_mc_reg_table,
+					   sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+				  struct amdgpu_ps *amdgpu_new_state)
+{
+	struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 address = si_pi->mc_reg_table_start +
+		offsetof(SMC_SIslands_MCRegisters,
+			 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+	si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+	return amdgpu_si_copy_bytes_to_smc(adev, address,
+					   (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+					   sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+					   si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+						      struct amdgpu_ps *amdgpu_state)
+{
+	struct si_ps *state = si_get_ps(amdgpu_state);
+	int i;
+	u16 pcie_speed, max_speed = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		pcie_speed = state->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+	return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+	u32 speed_cntl;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+	return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+							     struct amdgpu_ps *amdgpu_new_state,
+							     struct amdgpu_ps *amdgpu_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+	enum amdgpu_pcie_gen current_link_speed;
+
+	if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+		current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+	else
+		current_link_speed = si_pi->force_pcie_gen;
+
+	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+	si_pi->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+		case AMDGPU_PCIE_GEN3:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+				break;
+			si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+			if (current_link_speed == AMDGPU_PCIE_GEN2)
+				break;
+		case AMDGPU_PCIE_GEN2:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+				break;
+#endif
+		default:
+			si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			si_pi->pspp_notify_required = true;
+	}
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+							   struct amdgpu_ps *amdgpu_new_state,
+							   struct amdgpu_ps *amdgpu_current_state)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+	u8 request;
+
+	if (si_pi->pspp_notify_required) {
+		if (target_link_speed == AMDGPU_PCIE_GEN3)
+			request = PCIE_PERF_REQ_PECI_GEN3;
+		else if (target_link_speed == AMDGPU_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN1;
+
+		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+		    (si_get_current_pcie_speed(adev) > 0))
+			return;
+
+#if defined(CONFIG_ACPI)
+		amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+	}
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+			 bool ds_status_on, u32 count_write)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+	if (eg_pi->sclk_deep_sleep) {
+		if (ds_status_on)
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		else
+			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+				PPSMC_Result_OK) ? 0 : -EINVAL;
+	}
+	return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+
+	if (adev->asic_type == CHIP_VERDE) {
+		switch (adev->pdev->device) {
+		case 0x6820:
+		case 0x6825:
+		case 0x6821:
+		case 0x6823:
+		case 0x6827:
+			si_pi->max_cu = 10;
+			break;
+		case 0x682D:
+		case 0x6824:
+		case 0x682F:
+		case 0x6826:
+			si_pi->max_cu = 8;
+			break;
+		case 0x6828:
+		case 0x6830:
+		case 0x6831:
+		case 0x6838:
+		case 0x6839:
+		case 0x683D:
+			si_pi->max_cu = 10;
+			break;
+		case 0x683B:
+		case 0x683F:
+		case 0x6829:
+			si_pi->max_cu = 8;
+			break;
+		default:
+			si_pi->max_cu = 0;
+			break;
+		}
+	} else {
+		si_pi->max_cu = 0;
+	}
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+							     struct amdgpu_clock_voltage_dependency_table *table)
+{
+	u32 i;
+	int j;
+	u16 leakage_voltage;
+
+	if (table) {
+		for (i = 0; i < table->count; i++) {
+			switch (si_get_leakage_voltage_from_leakage_index(adev,
+									  table->entries[i].v,
+									  &leakage_voltage)) {
+			case 0:
+				table->entries[i].v = leakage_voltage;
+				break;
+			case -EAGAIN:
+				return -EINVAL;
+			case -EINVAL:
+			default:
+				break;
+			}
+		}
+
+		for (j = (table->count - 2); j >= 0; j--) {
+			table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+				table->entries[j].v : table->entries[j + 1].v;
+		}
+	}
+	return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+	ret = si_patch_single_dependency_table_based_on_leakage(adev,
+								&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+	if (ret)
+		DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+	return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+					  struct amdgpu_ps *amdgpu_new_state,
+					  struct amdgpu_ps *amdgpu_current_state)
+{
+	u32 lane_width;
+	u32 new_lane_width =
+		(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+	u32 current_lane_width =
+		(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+	if (new_lane_width != current_lane_width) {
+		amdgpu_set_pcie_lanes(adev, new_lane_width);
+		lane_width = amdgpu_get_pcie_lanes(adev);
+		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+	}
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+	si_read_clock_registers(adev);
+	si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+				   bool enable)
+{
+	u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+	if (enable) {
+		PPSMC_Result result;
+
+		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+		WREG32(CG_THERMAL_INT, thermal_int);
+		result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	} else {
+		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+		WREG32(CG_THERMAL_INT, thermal_int);
+	}
+
+	return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+	adev->pm.dpm.thermal.min_temp = low_temp;
+	adev->pm.dpm.thermal.max_temp = high_temp;
+
+	return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+		si_pi->fan_ctrl_default_mode = tmp;
+		tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+		si_pi->t_min = tmp;
+		si_pi->fan_ctrl_is_in_default_mode = false;
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+	tmp |= TMIN(0);
+	WREG32(CG_FDO_CTRL2, tmp);
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+	tmp |= FDO_PWM_MODE(mode);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+	u32 duty100;
+	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	u16 fdo_min, slope1, slope2;
+	u32 reference_clock, tmp;
+	int ret;
+	u64 tmp64;
+
+	if (!si_pi->fan_table_start) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (u16)tmp64;
+
+	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+	fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+	fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+	fan_table.slope1 = cpu_to_be16(slope1);
+	fan_table.slope2 = cpu_to_be16(slope2);
+	fan_table.fdo_min = cpu_to_be16(fdo_min);
+	fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+	fan_table.hys_up = cpu_to_be16(1);
+	fan_table.hys_slope = cpu_to_be16(1);
+	fan_table.temp_resp_lim = cpu_to_be16(5);
+	reference_clock = amdgpu_asic_get_xclk(adev);
+
+	fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+						reference_clock) / 1600);
+	fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+	tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+	fan_table.temp_src = (uint8_t)tmp;
+
+	ret = amdgpu_si_copy_bytes_to_smc(adev,
+					  si_pi->fan_table_start,
+					  (u8 *)(&fan_table),
+					  sizeof(fan_table),
+					  si_pi->sram_end);
+
+	if (ret) {
+		DRM_ERROR("Failed to load fan table to the SMC.");
+		adev->pm.dpm.fan.ucode_fan_control = false;
+	}
+
+	return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result ret;
+
+	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = true;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	PPSMC_Result ret;
+
+	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+	if (ret == PPSMC_Result_OK) {
+		si_pi->fan_is_controlled_by_smc = false;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+				      u32 *speed)
+{
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+	duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (u32)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+				      u32 speed)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return -EINVAL;
+
+	if (speed > 100)
+		return -EINVAL;
+
+	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (u32)tmp64;
+
+	tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+	tmp |= FDO_STATIC_DUTY(duty);
+	WREG32(CG_FDO_CTRL0, tmp);
+
+	return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			si_fan_ctrl_stop_smc_fan_control(adev);
+		si_fan_ctrl_set_static_mode(adev, mode);
+	} else {
+		/* restart auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			si_thermal_start_smc_fan_control(adev);
+		else
+			si_fan_ctrl_set_default_mode(adev);
+	}
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (si_pi->fan_is_controlled_by_smc)
+		return 0;
+
+	tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+	return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 *speed)
+{
+	u32 tach_period;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+	if (tach_period == 0)
+		return -ENOENT;
+
+	*speed = 60 * xclk * 10000 / tach_period;
+
+	return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 speed)
+{
+	u32 tach_period, tmp;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	if ((speed < adev->pm.fan_min_rpm) ||
+	    (speed > adev->pm.fan_max_rpm))
+		return -EINVAL;
+
+	if (adev->pm.dpm.fan.ucode_fan_control)
+		si_fan_ctrl_stop_smc_fan_control(adev);
+
+	tach_period = 60 * xclk * 10000 / (8 * speed);
+	tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+	tmp |= TARGET_PERIOD(tach_period);
+	WREG32(CG_TACH_CTRL, tmp);
+
+	si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+	return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+	struct si_power_info *si_pi = si_get_pi(adev);
+	u32 tmp;
+
+	if (!si_pi->fan_ctrl_is_in_default_mode) {
+		tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+		tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+		WREG32(CG_FDO_CTRL2, tmp);
+
+		tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+		tmp |= TMIN(si_pi->t_min);
+		WREG32(CG_FDO_CTRL2, tmp);
+		si_pi->fan_ctrl_is_in_default_mode = true;
+	}
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		si_fan_ctrl_start_smc_fan_control(adev);
+		si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+	}
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	if (adev->pm.fan_pulses_per_revolution) {
+		tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+		tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+		WREG32(CG_TACH_CTRL, tmp);
+	}
+
+	tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+	tmp |= TACH_PWM_RESP_RATE(0x28);
+	WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+	int ret;
+
+	si_thermal_initialize(adev);
+	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		ret = si_halt_smc(adev);
+		if (ret)
+			return ret;
+		ret = si_thermal_setup_fan_table(adev);
+		if (ret)
+			return ret;
+		ret = si_resume_smc(adev);
+		if (ret)
+			return ret;
+		si_thermal_start_smc_fan_control(adev);
+	}
+
+	return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+	if (!adev->pm.no_fan) {
+		si_fan_ctrl_set_default_mode(adev);
+		si_fan_ctrl_stop_smc_fan_control(adev);
+	}
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+	int ret;
+
+	if (amdgpu_si_is_smc_running(adev))
+		return -EINVAL;
+	if (pi->voltage_control || si_pi->voltage_control_svi2)
+		si_enable_voltage_control(adev, true);
+	if (pi->mvdd_control)
+		si_get_mvdd_configuration(adev);
+	if (pi->voltage_control || si_pi->voltage_control_svi2) {
+		ret = si_construct_voltage_tables(adev);
+		if (ret) {
+			DRM_ERROR("si_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_initialize_mc_reg_table(adev);
+		if (ret)
+			eg_pi->dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		si_enable_spread_spectrum(adev, true);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(adev, true);
+	si_setup_bsp(adev);
+	si_program_git(adev);
+	si_program_tp(adev);
+	si_program_tpp(adev);
+	si_program_sstp(adev);
+	si_enable_display_gap(adev);
+	si_program_vc(adev);
+	ret = si_upload_firmware(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_firmware failed\n");
+		return ret;
+	}
+	ret = si_process_firmware_header(adev);
+	if (ret) {
+		DRM_ERROR("si_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = si_initial_switch_from_arb_f0_to_f1(adev);
+	if (ret) {
+		DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = si_init_smc_table(adev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_table failed\n");
+		return ret;
+	}
+	ret = si_init_smc_spll_table(adev);
+	if (ret) {
+		DRM_ERROR("si_init_smc_spll_table failed\n");
+		return ret;
+	}
+	ret = si_init_arb_table_index(adev);
+	if (ret) {
+		DRM_ERROR("si_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_populate_mc_reg_table(adev, boot_ps);
+		if (ret) {
+			DRM_ERROR("si_populate_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_initialize_smc_cac_tables(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+		return ret;
+	}
+	ret = si_initialize_hardware_cac_manager(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+		return ret;
+	}
+	ret = si_initialize_smc_dte_tables(adev);
+	if (ret) {
+		DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits(adev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+		return ret;
+	}
+	ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+	if (ret) {
+		DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+		return ret;
+	}
+	si_program_response_times(adev);
+	si_program_ds_registers(adev);
+	si_dpm_start_smc(adev);
+	ret = si_notify_smc_display_change(adev, false);
+	if (ret) {
+		DRM_ERROR("si_notify_smc_display_change failed\n");
+		return ret;
+	}
+	si_enable_sclk_control(adev, true);
+	si_start_dpm(adev);
+
+	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+	si_thermal_start_thermal_controller(adev);
+	ni_update_current_ps(adev, boot_ps);
+
+	return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+	int ret;
+
+	ret = si_thermal_enable_alert(adev, false);
+	if (ret)
+		return ret;
+	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = si_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return;
+	si_thermal_stop_thermal_controller(adev);
+	si_disable_ulv(adev);
+	si_clear_vc(adev);
+	if (pi->thermal_protection)
+		si_enable_thermal_protection(adev, false);
+	si_enable_power_containment(adev, boot_ps, false);
+	si_enable_smc_cac(adev, boot_ps, false);
+	si_enable_spread_spectrum(adev, false);
+	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	si_stop_dpm(adev);
+	si_reset_to_default(adev);
+	si_dpm_stop_smc(adev);
+	si_force_switch_to_arb_f0(adev);
+
+	ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+	struct amdgpu_ps *new_ps = &requested_ps;
+
+	ni_update_requested_ps(adev, new_ps);
+	si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+	return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+	struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+	int ret;
+
+	ret = si_restrict_performance_levels_before_switch(adev);
+	if (ret)
+		return ret;
+	ret = si_halt_smc(adev);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits(adev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+	if (ret)
+		return ret;
+	ret = si_resume_smc(adev);
+	if (ret)
+		return ret;
+	ret = si_set_sw_state(adev);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+	struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+	int ret;
+
+	ret = si_disable_ulv(adev);
+	if (ret) {
+		DRM_ERROR("si_disable_ulv failed\n");
+		return ret;
+	}
+	ret = si_restrict_performance_levels_before_switch(adev);
+	if (ret) {
+		DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+		return ret;
+	}
+	if (eg_pi->pcie_performance_request)
+		si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+	ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+	ret = si_enable_power_containment(adev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(adev, new_ps, false);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_halt_smc(adev);
+	if (ret) {
+		DRM_ERROR("si_halt_smc failed\n");
+		return ret;
+	}
+	ret = si_upload_sw_state(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_upload_sw_state failed\n");
+		return ret;
+	}
+	ret = si_upload_smc_data(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_smc_data failed\n");
+		return ret;
+	}
+	ret = si_upload_ulv_state(adev);
+	if (ret) {
+		DRM_ERROR("si_upload_ulv_state failed\n");
+		return ret;
+	}
+	if (eg_pi->dynamic_ac_timing) {
+		ret = si_upload_mc_reg_table(adev, new_ps);
+		if (ret) {
+			DRM_ERROR("si_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = si_program_memory_timing_parameters(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+	ret = si_resume_smc(adev);
+	if (ret) {
+		DRM_ERROR("si_resume_smc failed\n");
+		return ret;
+	}
+	ret = si_set_sw_state(adev);
+	if (ret) {
+		DRM_ERROR("si_set_sw_state failed\n");
+		return ret;
+	}
+	ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+	if (eg_pi->pcie_performance_request)
+		si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+	ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+		return ret;
+	}
+	ret = si_enable_smc_cac(adev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = si_enable_power_containment(adev, new_ps, true);
+	if (ret) {
+		DRM_ERROR("si_enable_power_containment failed\n");
+		return ret;
+	}
+
+	ret = si_power_control_set_level(adev);
+	if (ret) {
+		DRM_ERROR("si_power_control_set_level failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+	ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+	si_restrict_performance_levels_before_switch(adev);
+	si_disable_ulv(adev);
+	si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+	si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+					  struct amdgpu_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
+		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		adev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+				      struct amdgpu_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct si_power_info *si_pi = si_get_pi(adev);
+	struct  si_ps *ps = si_get_ps(rps);
+	u16 leakage_voltage;
+	struct rv7xx_pl *pl = &ps->performance_levels[index];
+	int ret;
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+	pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+	pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+	pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+	pl->pcie_gen = r600_get_pcie_gen_support(adev,
+						 si_pi->sys_pcie_mask,
+						 si_pi->boot_pcie_gen,
+						 clock_info->si.ucPCIEGen);
+
+	/* patch up vddc if necessary */
+	ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+							&leakage_voltage);
+	if (ret == 0)
+		pl->vddc = leakage_voltage;
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_vddc = pl->vddc;
+		eg_pi->acpi_vddci = pl->vddci;
+		si_pi->acpi_pcie_gen = pl->pcie_gen;
+	}
+
+	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+	    index == 0) {
+		/* XXX disable for A0 tahiti */
+		si_pi->ulv.supported = false;
+		si_pi->ulv.pl = *pl;
+		si_pi->ulv.one_pcie_lane_in_ulv = false;
+		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+		si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+		si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+	}
+
+	if (pi->min_vddc_in_table > pl->vddc)
+		pi->min_vddc_in_table = pl->vddc;
+
+	if (pi->max_vddc_in_table < pl->vddc)
+		pi->max_vddc_in_table = pl->vddc;
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		u16 vddc, vddci, mvdd;
+		amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+		pl->mclk = adev->clock.default_mclk;
+		pl->sclk = adev->clock.default_sclk;
+		pl->vddc = vddc;
+		pl->vddci = vddci;
+		si_pi->mvdd_bootup_value = mvdd;
+	}
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+	}
+}
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct  si_ps *ps;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	amdgpu_add_thermal_controller(adev);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+				  state_array->ucNumEntries, GFP_KERNEL);
+	if (!adev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(adev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		adev->pm.dpm.ps[i].ps_priv = ps;
+		si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			si_parse_pplib_clock_info(adev,
+						  &adev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+		u32 sclk, mclk;
+		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		adev->pm.dpm.vce_states[i].sclk = sclk;
+		adev->pm.dpm.vce_states[i].mclk = mclk;
+	}
+
+	return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+	struct rv7xx_power_info *pi;
+	struct evergreen_power_info *eg_pi;
+	struct ni_power_info *ni_pi;
+	struct si_power_info *si_pi;
+	struct atom_clock_dividers dividers;
+	int ret;
+	u32 mask;
+
+	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+	if (si_pi == NULL)
+		return -ENOMEM;
+	adev->pm.dpm.priv = si_pi;
+	ni_pi = &si_pi->ni;
+	eg_pi = &ni_pi->eg;
+	pi = &eg_pi->rv7xx;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret)
+		si_pi->sys_pcie_mask = 0;
+	else
+		si_pi->sys_pcie_mask = mask;
+	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+	si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+	si_set_max_cu_value(adev);
+
+	rv770_get_max_vddc(adev);
+	si_get_leakage_vddc(adev);
+	si_patch_dependency_tables_based_on_leakage(adev);
+
+	pi->acpi_vddc = 0;
+	eg_pi->acpi_vddci = 0;
+	pi->min_vddc_in_table = 0;
+	pi->max_vddc_in_table = 0;
+
+	ret = amdgpu_get_platform_caps(adev);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_parse_extended_power_table(adev);
+	if (ret)
+		return ret;
+
+	ret = si_parse_power_table(adev);
+	if (ret)
+		return ret;
+
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		amdgpu_free_extended_power_table(adev);
+		return -ENOMEM;
+	}
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	if (adev->pm.dpm.voltage_response_time == 0)
+		adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+	if (adev->pm.dpm.backbias_response_time == 0)
+		adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+					     0, false, &dividers);
+	if (ret)
+		pi->ref_div = dividers.ref_div + 1;
+	else
+		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+	eg_pi->smu_uvd_hs = false;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	if (si_is_special_1gb_platform(adev))
+		pi->mclk_stutter_mode_threshold = 0;
+	else
+		pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+	pi->mclk_edc_enable_threshold = 40000;
+	eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+	pi->voltage_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!pi->voltage_control) {
+		si_pi->voltage_control_svi2 =
+			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						    VOLTAGE_OBJ_SVID2);
+		if (si_pi->voltage_control_svi2)
+			amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+						  &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+	}
+
+	pi->mvdd_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+					    VOLTAGE_OBJ_GPIO_LUT);
+
+	eg_pi->vddci_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+					    VOLTAGE_OBJ_GPIO_LUT);
+	if (!eg_pi->vddci_control)
+		si_pi->vddci_control_svi2 =
+			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+						    VOLTAGE_OBJ_SVID2);
+
+	si_pi->vddc_phase_shed_control =
+		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+					    VOLTAGE_OBJ_PHASE_LUT);
+
+	rv770_get_engine_memory_ss(adev);
+
+	pi->asi = RV770_ASI_DFLT;
+	pi->pasi = CYPRESS_HASI_DFLT;
+	pi->vrc = SISLANDS_VRC_DFLT;
+
+	pi->gfx_clock_gating = true;
+
+	eg_pi->sclk_deep_sleep = true;
+	si_pi->sclk_deep_sleep_above_low = false;
+
+	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	eg_pi->dynamic_ac_timing = true;
+
+	eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+	eg_pi->pcie_performance_request =
+		amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+	eg_pi->pcie_performance_request = false;
+#endif
+
+	si_pi->sram_end = SMC_RAM_END;
+
+	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	si_initialize_powertune_defaults(adev);
+
+	/* make sure dc limits are valid */
+	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	si_pi->fan_ctrl_is_in_default_mode = true;
+
+	return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	if (adev->pm.dpm.ps)
+		for (i = 0; i < adev->pm.dpm.num_ps; i++)
+			kfree(adev->pm.dpm.ps[i].ps_priv);
+	kfree(adev->pm.dpm.ps);
+	kfree(adev->pm.dpm.priv);
+	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+						    struct seq_file *m)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct amdgpu_ps *rps = &eg_pi->current_rps;
+	struct  si_ps *ps = si_get_ps(rps);
+	struct rv7xx_pl *pl;
+	u32 current_index =
+		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+		CURRENT_STATE_INDEX_SHIFT;
+
+	if (current_index >= ps->performance_level_count) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		pl = &ps->performance_levels[current_index];
+		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+	}
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      unsigned type,
+				      enum amdgpu_interrupt_state state)
+{
+	u32 cg_thermal_int;
+
+	switch (type) {
+	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int |= THERM_INT_MASK_HIGH;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int |= THERM_INT_MASK_LOW;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+			cg_thermal_int &= ~THERM_INT_MASK_LOW;
+			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+				    struct amdgpu_irq_src *source,
+				    struct amdgpu_iv_entry *entry)
+{
+	bool queue_thermal = false;
+
+	if (entry == NULL)
+		return -EINVAL;
+
+	switch (entry->src_id) {
+	case 230: /* thermal low to high */
+		DRM_DEBUG("IH: thermal low to high\n");
+		adev->pm.dpm.thermal.high_to_low = false;
+		queue_thermal = true;
+		break;
+	case 231: /* thermal high to low */
+		DRM_DEBUG("IH: thermal high to low\n");
+		adev->pm.dpm.thermal.high_to_low = true;
+		queue_thermal = true;
+		break;
+	default:
+		break;
+	}
+
+	if (queue_thermal)
+		schedule_work(&adev->pm.dpm.thermal.work);
+
+	return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
+
+	ret = si_set_temperature_range(adev);
+	if (ret)
+		return ret;
+#if 0 //TODO ?
+	si_dpm_powergate_uvd(adev, true);
+#endif
+	return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+		chip_name = "tahiti";
+		break;
+	case CHIP_PITCAIRN:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->device == 0x6810) ||
+		    (adev->pdev->device == 0x6811) ||
+		    (adev->pdev->device == 0x6816) ||
+		    (adev->pdev->device == 0x6817) ||
+		    (adev->pdev->device == 0x6806))
+			chip_name = "pitcairn_k";
+		else
+			chip_name = "pitcairn";
+		break;
+	case CHIP_VERDE:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0x87) ||
+		    (adev->pdev->device == 0x6820) ||
+		    (adev->pdev->device == 0x6821) ||
+		    (adev->pdev->device == 0x6822) ||
+		    (adev->pdev->device == 0x6823) ||
+		    (adev->pdev->device == 0x682A) ||
+		    (adev->pdev->device == 0x682B))
+			chip_name = "verde_k";
+		else
+			chip_name = "verde";
+		break;
+	case CHIP_OLAND:
+		if ((adev->pdev->revision == 0xC7) ||
+		    (adev->pdev->revision == 0x80) ||
+		    (adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->device == 0x6604) ||
+		    (adev->pdev->device == 0x6605))
+			chip_name = "oland_k";
+		else
+			chip_name = "oland";
+		break;
+	case CHIP_HAINAN:
+		if ((adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0xC3) ||
+		    (adev->pdev->device == 0x6664) ||
+		    (adev->pdev->device == 0x6665) ||
+		    (adev->pdev->device == 0x6667))
+			chip_name = "hainan_k";
+		else
+			chip_name = "hainan";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+			  err, fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	/* default to balanced state */
+	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	adev->pm.default_sclk = adev->clock.default_sclk;
+	adev->pm.default_mclk = adev->clock.default_mclk;
+	adev->pm.current_sclk = adev->clock.default_sclk;
+	adev->pm.current_mclk = adev->clock.default_mclk;
+	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (amdgpu_dpm == 0)
+		return 0;
+
+	ret = si_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+	mutex_lock(&adev->pm.mutex);
+	ret = si_dpm_init(adev);
+	if (ret)
+		goto dpm_failed;
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+	if (amdgpu_dpm == 1)
+		amdgpu_pm_print_power_states(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_INFO("amdgpu: dpm initialized\n");
+
+	return 0;
+
+dpm_failed:
+	si_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_ERROR("amdgpu: dpm initialization failed\n");
+	return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+	amdgpu_pm_sysfs_fini(adev);
+	si_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+
+	return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+	int ret;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	mutex_lock(&adev->pm.mutex);
+	si_dpm_setup_asic(adev);
+	ret = si_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+	mutex_unlock(&adev->pm.mutex);
+
+	return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		si_dpm_disable(adev);
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		/* disable dpm */
+		si_dpm_disable(adev);
+		/* reset the power state */
+		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+		mutex_unlock(&adev->pm.mutex);
+	}
+	return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		/* asic init will reset to the boot state */
+		mutex_lock(&adev->pm.mutex);
+		si_dpm_setup_asic(adev);
+		ret = si_dpm_enable(adev);
+		if (ret)
+			adev->pm.dpm_enabled = false;
+		else
+			adev->pm.dpm_enabled = true;
+		mutex_unlock(&adev->pm.mutex);
+		if (adev->pm.dpm_enabled)
+			amdgpu_pm_compute_clocks(adev);
+	}
+	return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+	/* XXX */
+	return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+	/* XXX */
+	return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+					enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+					enum amd_powergating_state state)
+{
+	return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = (actual_temp * 1000);
+
+	return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].sclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].mclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+				     struct amdgpu_ps *rps)
+{
+	struct  si_ps *ps = si_get_ps(rps);
+	struct rv7xx_pl *pl;
+	int i;
+
+	amdgpu_dpm_print_class_info(rps->class, rps->class2);
+	amdgpu_dpm_print_cap_info(rps->caps);
+	DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->performance_level_count; i++) {
+		pl = &ps->performance_levels[i];
+		if (adev->asic_type >= CHIP_TAHITI)
+			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+		else
+			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+	}
+	amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_dpm_set_dpm_funcs(adev);
+	si_dpm_set_irq_funcs(adev);
+	return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+	.name = "si_dpm",
+	.early_init = si_dpm_early_init,
+	.late_init = si_dpm_late_init,
+	.sw_init = si_dpm_sw_init,
+	.sw_fini = si_dpm_sw_fini,
+	.hw_init = si_dpm_hw_init,
+	.hw_fini = si_dpm_hw_fini,
+	.suspend = si_dpm_suspend,
+	.resume = si_dpm_resume,
+	.is_idle = si_dpm_is_idle,
+	.wait_for_idle = si_dpm_wait_for_idle,
+	.soft_reset = si_dpm_soft_reset,
+	.set_clockgating_state = si_dpm_set_clockgating_state,
+	.set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+	.get_temperature = &si_dpm_get_temp,
+	.pre_set_power_state = &si_dpm_pre_set_power_state,
+	.set_power_state = &si_dpm_set_power_state,
+	.post_set_power_state = &si_dpm_post_set_power_state,
+	.display_configuration_changed = &si_dpm_display_configuration_changed,
+	.get_sclk = &si_dpm_get_sclk,
+	.get_mclk = &si_dpm_get_mclk,
+	.print_power_state = &si_dpm_print_power_state,
+	.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+	.force_performance_level = &si_dpm_force_performance_level,
+	.vblank_too_short = &si_dpm_vblank_too_short,
+	.set_fan_control_mode = &si_dpm_set_fan_control_mode,
+	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
+	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+	if (adev->pm.funcs == NULL)
+		adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+	.set = si_dpm_set_interrupt_state,
+	.process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644
index 0000000..51ce21c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG                                    0x96f
+#define MC_ARB_CG                                       0x9fa
+#define		CG_ARB_REQ(x)				((x) << 0)
+#define		CG_ARB_REQ_MASK				(0xff << 0)
+
+#define	MC_ARB_DRAM_TIMING_1				0x9fc
+#define	MC_ARB_DRAM_TIMING_2				0x9fd
+#define	MC_ARB_DRAM_TIMING_3				0x9fe
+#define	MC_ARB_DRAM_TIMING2_1				0x9ff
+#define	MC_ARB_DRAM_TIMING2_2				0xa00
+#define	MC_ARB_DRAM_TIMING2_3				0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT                                1000
+#define CYPRESS_HASI_DFLT                               400000
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+#define RV770_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
+
+#define SMC_STROBE_RATIO    0x0F
+#define SMC_STROBE_ENABLE   0x10
+
+#define SMC_MC_EDC_RD_FLAG  0x01
+#define SMC_MC_EDC_WR_FLAG  0x02
+#define SMC_MC_RTT_ENABLE   0x04
+#define SMC_MC_STUTTER_EN   0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX  4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO    0x0F
+#define NISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define NISLANDS_SMC_MC_STUTTER_EN   0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT               0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT                  1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
+
+#define SISLANDS_LEAKAGE_INDEX0     0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT  4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define SISLANDS_ACPI_STATE_ARB_INDEX       1
+#define SISLANDS_ULV_STATE_ARB_INDEX        2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC          10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define SISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H                   99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M                   99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN         10
+
+#define SISLANDS_VRC_DFLT                               0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT             1687
+#define SISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT                      0x1f007550
+
+#define SI_ASI_DFLT                                10000
+#define SI_BSP_DFLT                                0x41EB
+#define SI_BSU_DFLT                                0x2
+#define SI_AH_DFLT                                 5
+#define SI_RLP_DFLT                                25
+#define SI_RMP_DFLT                                65
+#define SI_LHP_DFLT                                40
+#define SI_LMP_DFLT                                15
+#define SI_TD_DFLT                                 0
+#define SI_UTC_DFLT_00                             0x24
+#define SI_UTC_DFLT_01                             0x22
+#define SI_UTC_DFLT_02                             0x22
+#define SI_UTC_DFLT_03                             0x22
+#define SI_UTC_DFLT_04                             0x22
+#define SI_UTC_DFLT_05                             0x22
+#define SI_UTC_DFLT_06                             0x22
+#define SI_UTC_DFLT_07                             0x22
+#define SI_UTC_DFLT_08                             0x22
+#define SI_UTC_DFLT_09                             0x22
+#define SI_UTC_DFLT_10                             0x22
+#define SI_UTC_DFLT_11                             0x22
+#define SI_UTC_DFLT_12                             0x22
+#define SI_UTC_DFLT_13                             0x22
+#define SI_UTC_DFLT_14                             0x22
+#define SI_DTC_DFLT_00                             0x24
+#define SI_DTC_DFLT_01                             0x22
+#define SI_DTC_DFLT_02                             0x22
+#define SI_DTC_DFLT_03                             0x22
+#define SI_DTC_DFLT_04                             0x22
+#define SI_DTC_DFLT_05                             0x22
+#define SI_DTC_DFLT_06                             0x22
+#define SI_DTC_DFLT_07                             0x22
+#define SI_DTC_DFLT_08                             0x22
+#define SI_DTC_DFLT_09                             0x22
+#define SI_DTC_DFLT_10                             0x22
+#define SI_DTC_DFLT_11                             0x22
+#define SI_DTC_DFLT_12                             0x22
+#define SI_DTC_DFLT_13                             0x22
+#define SI_DTC_DFLT_14                             0x22
+#define SI_VRC_DFLT                                0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT                1000
+#define SI_BACKBIASRESPONSETIME_DFLT               1000
+#define SI_VRU_DFLT                                0x3
+#define SI_SPLLSTEPTIME_DFLT                       0x1000
+#define SI_SPLLSTEPUNIT_DFLT                       0x3
+#define SI_TPU_DFLT                                0
+#define SI_TPC_DFLT                                0x200
+#define SI_SSTU_DFLT                               0
+#define SI_SST_DFLT                                0x00C8
+#define SI_GICST_DFLT                              0x200
+#define SI_FCT_DFLT                                0x0400
+#define SI_FCTU_DFLT                               0
+#define SI_CTXCGTT3DRPHC_DFLT                      0x20
+#define SI_CTXCGTT3DRSDC_DFLT                      0x40
+#define SI_VDDC3DOORPHC_DFLT                       0x100
+#define SI_VDDC3DOORSDC_DFLT                       0x7
+#define SI_VDDC3DOORSU_DFLT                        0
+#define SI_MPLLLOCKTIME_DFLT                       100
+#define SI_MPLLRESETTIME_DFLT                      150
+#define SI_VCOSTEPPCT_DFLT                          20
+#define SI_ENDINGVCOSTEPPCT_DFLT                    5
+#define SI_REFERENCEDIVIDER_DFLT                    4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+	NISLANDS_DCCAC_LEVEL_0 = 0,
+	NISLANDS_DCCAC_LEVEL_1,
+	NISLANDS_DCCAC_LEVEL_2,
+	NISLANDS_DCCAC_LEVEL_3,
+	NISLANDS_DCCAC_LEVEL_4,
+	NISLANDS_DCCAC_LEVEL_5,
+	NISLANDS_DCCAC_LEVEL_6,
+	NISLANDS_DCCAC_LEVEL_7,
+	NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+	SISLANDS_CACCONFIG_MMR = 0,
+	SISLANDS_CACCONFIG_CGIND,
+	SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+	SI_POWER_LEVEL_LOW = 0,
+	SI_POWER_LEVEL_MEDIUM = 1,
+	SI_POWER_LEVEL_HIGH = 2,
+	SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+	SI_TD_AUTO,
+	SI_TD_UP,
+	SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+	SI_DISPLAY_WATERMARK_LOW = 0,
+	SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+    SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    SI_PM_DISPLAY_GAP_VBLANK       = 1,
+    SI_PM_DISPLAY_GAP_WATERMARK    = 2,
+    SI_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+	u32 at;
+	u32 bt;
+	u32 av;
+	u32 bv;
+	s32 t_slope;
+	s32 t_intercept;
+	u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+    uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+    SMC_Evergreen_MCRegisterSet         data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+    uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_NIslands_MCRegisterSet          data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+	bool supported;
+	struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 mc_arb_rfsh_rate;
+	u32 mc_arb_burst_time;
+};
+
+struct at {
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+};
+
+struct ni_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL2;
+    uint32_t        vMPLL_FUNC_CNTL3;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+    RV770_SMC_MCLK_VALUE    mclk770;
+    RV730_SMC_MCLK_VALUE    mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                 arbValue;
+    union{
+        uint8_t             seqValue;
+        uint8_t             ACIndex;
+    };
+    uint8_t                 displayWatermark;
+    uint8_t                 gen2PCIE;
+    uint8_t                 gen2XSP;
+    uint8_t                 backbias;
+    uint8_t                 strobeMode;
+    uint8_t                 mcFlags;
+    uint32_t                aT;
+    uint32_t                bSP;
+    RV770_SMC_SCLK_VALUE    sclk;
+    RV7XX_SMC_MCLK_VALUE    mclk;
+    RV770_SMC_VOLTAGE_VALUE vddc;
+    RV770_SMC_VOLTAGE_VALUE mvdd;
+    RV770_SMC_VOLTAGE_VALUE vddci;
+    uint8_t                 reserved1;
+    uint8_t                 reserved2;
+    uint8_t                 stateFlags;
+    uint8_t                 padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+    uint8_t           flags;
+    uint8_t           padding1;
+    uint8_t           padding2;
+    uint8_t           padding3;
+    RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+    uint8_t             thermalProtectType;
+    uint8_t             systemFlags;
+    uint8_t             maxVDDCIndexInPPTable;
+    uint8_t             extraFlags;
+    uint8_t             highSMIO[MAX_NO_VREG_STEPS];
+    uint32_t            lowSMIO[MAX_NO_VREG_STEPS];
+    RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+    RV770_SMC_SWSTATE   initialState;
+    RV770_SMC_SWSTATE   ACPIState;
+    RV770_SMC_SWSTATE   driverState;
+    RV770_SMC_SWSTATE   ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+	u16 vddc;
+	u8 vddc_index;
+	u8 high_smio;
+	u32 low_smio;
+};
+
+struct rv770_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_ad_func_cntl_2;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_dq_func_cntl_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 mclk_pwrmgt_cntl;
+	u32 dll_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl2;
+	u32 mpll_func_cntl3;
+	u32 mpll_ss;
+	u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+	struct rv770_clock_registers rv770;
+	struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+	/* flags */
+	bool mem_gddr5;
+	bool pcie_gen2;
+	bool dynamic_pcie_gen2;
+	bool acpi_pcie_gen2;
+	bool boot_in_gen2;
+	bool voltage_control; /* vddc */
+	bool mvdd_control;
+	bool sclk_ss;
+	bool mclk_ss;
+	bool dynamic_ss;
+	bool gfx_clock_gating;
+	bool mg_clock_gating;
+	bool mgcgtssm;
+	bool power_gating;
+	bool thermal_protection;
+	bool display_gap;
+	bool dcodt;
+	bool ulps;
+	/* registers */
+	union r7xx_clock_registers clk_regs;
+	u32 s0_vid_lower_smio_cntl;
+	/* voltage */
+	u32 vddc_mask_low;
+	u32 mvdd_mask_low;
+	u32 mvdd_split_frequency;
+	u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+	u16 max_vddc;
+	u16 max_vddc_in_table;
+	u16 min_vddc_in_table;
+	struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+	u8 valid_vddc_entries;
+	/* dc odt */
+	u32 mclk_odt_threshold;
+	u8 odt_value_0[2];
+	u8 odt_value_1[2];
+	/* stored values */
+	u32 boot_sclk;
+	u16 acpi_vddc;
+	u32 ref_div;
+	u32 active_auto_throttle_sources;
+	u32 mclk_stutter_mode_threshold;
+	u32 mclk_strobe_mode_threshold;
+	u32 mclk_edc_enable_threshold;
+	u32 bsp;
+	u32 bsu;
+	u32 pbsp;
+	u32 pbsu;
+	u32 dsp;
+	u32 psp;
+	u32 asi;
+	u32 pasi;
+	u32 vrc;
+	u32 restricted_levels;
+	u32 rlp;
+	u32 rmp;
+	u32 lhp;
+	u32 lmp;
+	/* smc offsets */
+	u16 state_table_start;
+	u16 soft_regs_start;
+	u16 sram_end;
+	/* scratch structs */
+	RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci; /* eg+ only */
+	u32 flags;
+	enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+	struct rv7xx_pl high;
+	struct rv7xx_pl medium;
+	struct rv7xx_pl low;
+	bool dc_compatible;
+};
+
+struct si_ps {
+	u16 performance_level_count;
+	bool dc_compatible;
+	struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 i_leakage;
+	s32 leakage_minimum_temperature;
+	u32 pwr_const;
+	u32 dc_cac_value;
+	u32 bif_cac_value;
+	u32 lkge_pwr;
+	u8 mc_wr_weight;
+	u8 mc_rd_weight;
+	u8 allow_ovrflw;
+	u8 num_win_tdp;
+	u8 l2num_win_tdp;
+	u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+	/* must be first! */
+	struct rv7xx_power_info rv7xx;
+	/* flags */
+	bool vddci_control;
+	bool dynamic_ac_timing;
+	bool abm;
+	bool mcls;
+	bool light_sleep;
+	bool memory_transition;
+	bool pcie_performance_request;
+	bool pcie_performance_request_registered;
+	bool sclk_deep_sleep;
+	bool dll_default_on;
+	bool ls_clock_gating;
+	bool smu_uvd_hs;
+	bool uvd_enabled;
+	/* stored values */
+	u16 acpi_vddci;
+	u8 mvdd_high_index;
+	u8 mvdd_low_index;
+	u32 mclk_edc_wr_enable_threshold;
+	struct evergreen_mc_reg_table mc_reg_table;
+	struct atom_voltage_table vddc_voltage_table;
+	struct atom_voltage_table vddci_voltage_table;
+	struct evergreen_arb_registers bootup_arb_registers;
+	struct evergreen_ulv_param ulv;
+	struct at ats[2];
+	/* smc offsets */
+	u16 mc_reg_table_start;
+	struct amdgpu_ps current_rps;
+	struct rv7xx_ps current_ps;
+	struct amdgpu_ps requested_rps;
+	struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+    uint8_t     MaxPS;
+    uint8_t     TgtAct;
+    uint8_t     MaxPS_StepInc;
+    uint8_t     MaxPS_StepDec;
+    uint8_t     PSST;
+    uint8_t     NearTDPDec;
+    uint8_t     AboveSafeInc;
+    uint8_t     BelowSafeInc;
+    uint8_t     PSDeltaLimit;
+    uint8_t     PSDeltaWin;
+    uint8_t     Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_FUNC_CNTL_4;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL_1;
+    uint32_t        vMPLL_FUNC_CNTL_2;
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     arbValue;
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     reserved1;
+    uint8_t                     reserved2;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    NISLANDS_SMC_SCLK_VALUE     sclk;
+    NISLANDS_SMC_MCLK_VALUE     mclk;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    NISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    NISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint32_t                    powergate_en;
+    uint8_t                     hUp;
+    uint8_t                     hDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    reserved[2];
+    PP_NIslands_Dpm2PerfLevel   dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    PP_NIslands_DPM2Parameters          dpm2Params;
+    NISLANDS_SMC_SWSTATE                initialState;
+    NISLANDS_SMC_SWSTATE                ACPIState;
+    NISLANDS_SMC_SWSTATE                ULVState;
+    NISLANDS_SMC_SWSTATE                driverState;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+	/* must be first! */
+	struct evergreen_power_info eg;
+	struct ni_clock_registers clock_registers;
+	struct ni_mc_reg_table mc_reg_table;
+	u32 mclk_rtt_mode_threshold;
+	/* flags */
+	bool use_power_boost_limit;
+	bool support_cac_long_term_average;
+	bool cac_enabled;
+	bool cac_configuration_required;
+	bool driver_calculate_cac_leakage;
+	bool pc_enabled;
+	bool enable_power_containment;
+	bool enable_cac;
+	bool enable_sq_ramping;
+	/* smc offsets */
+	u16 arb_table_start;
+	u16 fan_table_start;
+	u16 cac_table_start;
+	u16 spll_table_start;
+	/* CAC stuff */
+	struct ni_cac_data cac_data;
+	u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+	const struct ni_cac_weights *cac_weights;
+	u8 lta_window_size;
+	u8 lts_truncate;
+	struct si_ps current_ps;
+	struct si_ps requested_ps;
+	/* scratch structs */
+	SMC_NIslands_MCRegisters smc_mc_reg_table;
+	NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+	u32 cac_window;
+	u32 l2_lta_window_size_default;
+	u8 lts_truncate_default;
+	u8 shift_n_default;
+	u8 operating_temp;
+	struct ni_leakage_coeffients leakage_coefficients;
+	u32 fixed_kt;
+	u32 lkge_lut_v0_percent;
+	u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+	bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+	u32 cac_leakage;
+	s32 leakage_minimum_temperature;
+	u32 wintime;
+	u32 l2_lta_window_size;
+	u8 lts_truncate;
+	u8 shift_n;
+	u8 dc_pwr_value;
+	bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+	u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+	u32 k;
+	u32 t0;
+	u32 max_t;
+	u8 window_size;
+	u8 temp_select;
+	u8 dte_mode;
+	u8 tdep_count;
+	u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+	u32 t_threshold;
+	bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 dll_cntl;
+	u32 mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl_1;
+	u32 mpll_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+	u16 voltage;
+	u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+	u16 count;
+	struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+	bool supported;
+	u32 cg_ulv_control;
+	u32 cg_ulv_parameter;
+	u32 volt_change_delay;
+	struct rv7xx_pl pl;
+	bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+	/* must be first! */
+	struct ni_power_info ni;
+	struct si_clock_registers clock_registers;
+	struct si_mc_reg_table mc_reg_table;
+	struct atom_voltage_table mvdd_voltage_table;
+	struct atom_voltage_table vddc_phase_shed_table;
+	struct si_leakage_voltage leakage_voltage;
+	u16 mvdd_bootup_value;
+	struct si_ulv_param ulv;
+	u32 max_cu;
+	/* pcie gen */
+	enum amdgpu_pcie_gen force_pcie_gen;
+	enum amdgpu_pcie_gen boot_pcie_gen;
+	enum amdgpu_pcie_gen acpi_pcie_gen;
+	u32 sys_pcie_mask;
+	/* flags */
+	bool enable_dte;
+	bool enable_ppm;
+	bool vddc_phase_shed_control;
+	bool pspp_notify_required;
+	bool sclk_deep_sleep_above_low;
+	bool voltage_control_svi2;
+	bool vddci_control_svi2;
+	/* smc offsets */
+	u32 sram_end;
+	u32 state_table_start;
+	u32 soft_regs_start;
+	u32 mc_reg_table_start;
+	u32 arb_table_start;
+	u32 cac_table_start;
+	u32 dte_table_start;
+	u32 spll_table_start;
+	u32 papm_cfg_table_start;
+	u32 fan_table_start;
+	/* CAC stuff */
+	const struct si_cac_config_reg *cac_weights;
+	const struct si_cac_config_reg *lcac_config;
+	const struct si_cac_config_reg *cac_override;
+	const struct si_powertune_data *powertune_data;
+	struct si_dyn_powertune_data dyn_powertune_data;
+	/* DTE stuff */
+	struct si_dte_data dte_data;
+	/* scratch structs */
+	SMC_SIslands_MCRegisters smc_mc_reg_table;
+	SISLANDS_SMC_STATETABLE smc_statetable;
+	PP_SIslands_PAPMParameters papm_parm;
+	/* SVI2 */
+	u8 svd_gpio_id;
+	u8 svc_gpio_id;
+	/* fan control */
+	bool fan_ctrl_is_in_default_mode;
+	u32 t_min;
+	u32 fan_ctrl_default_mode;
+	bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644
index 0000000..8fae3d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	adev->irq.ih.enabled = true;
+}
+  
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	adev->irq.ih.enabled = false;
+	adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+	u64 wptr_off;
+
+	si_ih_disable_interrupts(adev);
+	WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+	ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+		     IH_WPTR_OVERFLOW_CLEAR |
+		     (rb_bufsz << 1) |
+		     IH_WPTR_WRITEBACK_ENABLE;
+
+	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+	WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	if (adev->irq.msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	pci_set_master(adev->pdev);
+	si_ih_enable_interrupts(adev);
+
+	return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+	si_ih_disable_interrupts(adev);
+	mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+	u32 wptr, tmp;
+
+	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+			     struct amdgpu_iv_entry *entry)
+{
+	u32 ring_index = adev->irq.ih.rptr >> 2;
+	uint32_t dw[4];
+
+	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+	entry->src_id = dw[0] & 0xff;
+	entry->src_data = dw[1] & 0xfffffff;
+	entry->ring_id = dw[2] & 0xff;
+	entry->vm_id = (dw[2] >> 8) & 0xff;
+
+	adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+	WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_ih_set_interrupt_funcs(adev);
+
+	return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+	if (r)
+		return r;
+
+	return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev);
+
+	return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	si_ih_irq_disable(adev);
+
+	return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		return false;
+
+	return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+	unsigned i;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (si_ih_is_idle(handle))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+	.name = "si_ih",
+	.early_init = si_ih_early_init,
+	.late_init = NULL,
+	.sw_init = si_ih_sw_init,
+	.sw_fini = si_ih_sw_fini,
+	.hw_init = si_ih_hw_init,
+	.hw_fini = si_ih_hw_fini,
+	.suspend = si_ih_suspend,
+	.resume = si_ih_resume,
+	.is_idle = si_ih_is_idle,
+	.wait_for_idle = si_ih_wait_for_idle,
+	.soft_reset = si_ih_soft_reset,
+	.set_clockgating_state = si_ih_set_clockgating_state,
+	.set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+	.get_wptr = si_ih_get_wptr,
+	.decode_iv = si_ih_decode_iv,
+	.set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+	if (adev->irq.ih_funcs == NULL)
+		adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
similarity index 72%
copy from drivers/gpu/drm/amd/amdgpu/iceland_smum.h
copy to drivers/gpu/drm/amd/amdgpu/si_ih.h
index 5983e31..f3e3a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,21 +21,9 @@
  *
  */
 
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
 
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-	uint8_t *header;
-	uint8_t *mec_image;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs si_ih_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644
index 0000000..668ba99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(SMC_IND_INDEX_0, smc_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+	return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+				u32 smc_start_address,
+				const u8 *src, u32 byte_count, u32 limit)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 data, original_data, addr, extra_shift;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(SMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* SMC address space is BE */
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = si_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(SMC_IND_DATA_0, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+	tmp &= ~RST_REG;
+
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+	RREG32(CB_CGTT_SCLK_CTRL);
+
+	tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+	      RST_REG;
+	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+	return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	if (enable)
+		tmp &= ~CK_DISABLE;
+	else
+		tmp |= CK_DISABLE;
+
+	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+	u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+	u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+	if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+		return true;
+
+	return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+				       PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return PPSMC_Result_Failed;
+
+	WREG32(SMC_MESSAGE_0, msg);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(SMC_RESP_0);
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+
+	return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_si_is_smc_running(adev))
+		return PPSMC_Result_OK;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & CKEN) == 0)
+			break;
+		udelay(1);
+	}
+
+	return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	unsigned long flags;
+	u32 ucode_start_address;
+	u32 ucode_size;
+	const u8 *src;
+	u32 data;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	src = (const u8 *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+	if (ucode_size & 3)
+		return -EINVAL;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(SMC_IND_INDEX_0, ucode_start_address);
+	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+	while (ucode_size >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		WREG32(SMC_IND_DATA_0, data);
+
+		src += 4;
+		ucode_size -= 4;
+	}
+	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				  u32 *value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(SMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				   u32 value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = si_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		WREG32(SMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644
index 0000000..d2930ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+    uint8_t MaxPS;
+    uint8_t TgtAct;
+    uint8_t MaxPS_StepInc;
+    uint8_t MaxPS_StepDec;
+    uint8_t PSSamplingTime;
+    uint8_t NearTDPDec;
+    uint8_t AboveSafeInc;
+    uint8_t BelowSafeInc;
+    uint8_t PSDeltaLimit;
+    uint8_t PSDeltaWin;
+    uint16_t PwrEfficiencyRatio;
+    uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+    uint32_t    dpm2Flags;
+    uint8_t     CurrPSkip;
+    uint8_t     CurrPSkipPowerShift;
+    uint8_t     CurrPSkipTDP;
+    uint8_t     CurrPSkipOCP;
+    uint8_t     MaxSPLLIndex;
+    uint8_t     MinSPLLIndex;
+    uint8_t     CurrSPLLIndex;
+    uint8_t     InfSweepMode;
+    uint8_t     InfSweepDir;
+    uint8_t     TDPexceeded;
+    uint8_t     reserved;
+    uint8_t     SwitchDownThreshold;
+    uint32_t    SwitchDownCounter;
+    uint32_t    SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+    uint32_t    MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+    uint32_t    EstimatedDGPU_T;
+    uint32_t    EstimatedDGPU_P;
+    uint32_t    EstimatedAPU_T;
+    uint32_t    EstimatedAPU_P;
+    uint8_t     dGPU_T_Limit_Exceeded;
+    uint8_t     reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+    uint32_t    NearTDPLimitTherm;
+    uint32_t    NearTDPLimitPAPM;
+    uint32_t    PlatformPowerLimit;
+    uint32_t    dGPU_T_Limit;
+    uint32_t    dGPU_T_Warning;
+    uint32_t    dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t    vCG_SPLL_FUNC_CNTL;
+    uint32_t    vCG_SPLL_FUNC_CNTL_2;
+    uint32_t    vCG_SPLL_FUNC_CNTL_3;
+    uint32_t    vCG_SPLL_FUNC_CNTL_4;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t    sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t    vMPLL_FUNC_CNTL;
+    uint32_t    vMPLL_FUNC_CNTL_1;
+    uint32_t    vMPLL_FUNC_CNTL_2;
+    uint32_t    vMPLL_AD_FUNC_CNTL;
+    uint32_t    vMPLL_DQ_FUNC_CNTL;
+    uint32_t    vMCLK_PWRMGT_CNTL;
+    uint32_t    vDLL_CNTL;
+    uint32_t    vMPLL_SS;
+    uint32_t    vMPLL_SS2;
+    uint32_t    mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t    value;
+    uint8_t     index;
+    uint8_t     phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     UVDWatermark;
+    uint8_t                     VCEWatermark;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint8_t                     padding;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    SISLANDS_SMC_SCLK_VALUE     sclk;
+    SISLANDS_SMC_MCLK_VALUE     mclk;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    SISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint8_t                     hysteresisUp;
+    uint8_t                     hysteresisDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    MaxPoweredUpCU;
+    SISLANDS_SMC_VOLTAGE_VALUE  high_temp_vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  low_temp_vddc;
+    uint32_t                    reserved[2];
+    PP_SIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO    0x0F
+#define SISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define SISLANDS_SMC_MC_STUTTER_EN   0x08
+#define SISLANDS_SMC_MC_PG_EN        0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
+#define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
+    PP_SIslands_DPM2Parameters          dpm2Params;
+    SISLANDS_SMC_SWSTATE                initialState;
+    SISLANDS_SMC_SWSTATE                ACPIState;
+    SISLANDS_SMC_SWSTATE                ULVState;
+    SISLANDS_SMC_SWSTATE                driverState;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout         0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg               0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi               0x28
+#define SI_SMC_SOFT_REGISTER_seq_index                0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time            0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim          0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold      0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay     0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay    0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay           0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us             0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index               0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width  0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen   0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio              0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type     0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
+
+struct PP_SIslands_FanTable
+{
+	uint8_t  fdo_mode;
+	uint8_t  padding;
+	int16_t  temp_min;
+	int16_t  temp_med;
+	int16_t  temp_max;
+	int16_t  slope1;
+	int16_t  slope2;
+	int16_t  fdo_min;
+	int16_t  hys_up;
+	int16_t  hys_down;
+	int16_t  hys_slope;
+	int16_t  temp_resp_lim;
+	int16_t  temp_curr;
+	int16_t  slope_curr;
+	int16_t  pwm_curr;
+	uint32_t refresh_period;
+	int16_t  fdo_max;
+	uint8_t  temp_src;
+	int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I  7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+    uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+    uint32_t   lkge_lut_V0;
+    uint32_t   lkge_lut_Vstep;
+    uint32_t   WinTime;
+    uint32_t   R_LL;
+    uint32_t   calculation_repeats;
+    uint32_t   l2numWin_TDP;
+    uint32_t   dc_cac;
+    uint8_t    lts_truncate_n;
+    uint8_t    SHIFT_N;
+    uint8_t    log2_PG_LKG_SCALE;
+    uint8_t    cac_temp;
+    uint32_t   lkge_lut_T0;
+    uint32_t   lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+    uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_SIslands_MCRegisterSet          data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  mc_arb_burst_time;
+    uint8_t  padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+    uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t K;
+    uint32_t T0;
+    uint32_t MaxT;
+    uint8_t  WindowSize;
+    uint8_t  Tdep_count;
+    uint8_t  temp_select;
+    uint8_t  DTE_mode;
+    uint8_t  T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable                0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable            0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration          0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters            0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+				u32 smc_start_address,
+				const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				  u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+				   u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4..0000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_smum.h"
-
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int tonga_dpm_early_init(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	tonga_dpm_set_funcs(adev);
-
-	return 0;
-}
-
-static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
-{
-	char fw_name[30] = "amdgpu/tonga_smc.bin";
-	int err;
-	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-	if (err)
-		goto out;
-	err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
-	if (err) {
-		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-		release_firmware(adev->pm.fw);
-		adev->pm.fw = NULL;
-	}
-	return err;
-}
-
-static int tonga_dpm_sw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	ret = tonga_dpm_init_microcode(adev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int tonga_dpm_sw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	release_firmware(adev->pm.fw);
-	adev->pm.fw = NULL;
-
-	return 0;
-}
-
-static int tonga_dpm_hw_init(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-
-	/* smu init only needs to be called at startup, not resume.
-	 * It should be in sw_init, but requires the fw info gathered
-	 * in sw_init from other IP modules.
-	 */
-	ret = tonga_smu_init(adev);
-	if (ret) {
-		DRM_ERROR("SMU initialization failed\n");
-		goto fail;
-	}
-
-	ret = tonga_smu_start(adev);
-	if (ret) {
-		DRM_ERROR("SMU start failed\n");
-		goto fail;
-	}
-
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-
-fail:
-	adev->firmware.smu_load = false;
-	mutex_unlock(&adev->pm.mutex);
-	return -EINVAL;
-}
-
-static int tonga_dpm_hw_fini(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	mutex_lock(&adev->pm.mutex);
-	/* smu fini only needs to be called at teardown, not suspend.
-	 * It should be in sw_fini, but we put it here for symmetry
-	 * with smu init.
-	 */
-	tonga_smu_fini(adev);
-	mutex_unlock(&adev->pm.mutex);
-	return 0;
-}
-
-static int tonga_dpm_suspend(void *handle)
-{
-	return tonga_dpm_hw_fini(handle);
-}
-
-static int tonga_dpm_resume(void *handle)
-{
-	return tonga_dpm_hw_init(handle);
-}
-
-static int tonga_dpm_set_clockgating_state(void *handle,
-			enum amd_clockgating_state state)
-{
-	return 0;
-}
-
-static int tonga_dpm_set_powergating_state(void *handle,
-			enum amd_powergating_state state)
-{
-	return 0;
-}
-
-const struct amd_ip_funcs tonga_dpm_ip_funcs = {
-	.name = "tonga_dpm",
-	.early_init = tonga_dpm_early_init,
-	.late_init = NULL,
-	.sw_init = tonga_dpm_sw_init,
-	.sw_fini = tonga_dpm_sw_fini,
-	.hw_init = tonga_dpm_hw_init,
-	.hw_fini = tonga_dpm_hw_fini,
-	.suspend = tonga_dpm_suspend,
-	.resume = tonga_dpm_resume,
-	.is_idle = NULL,
-	.wait_for_idle = NULL,
-	.soft_reset = NULL,
-	.set_clockgating_state = tonga_dpm_set_clockgating_state,
-	.set_powergating_state = tonga_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
-	.get_temperature = NULL,
-	.pre_set_power_state = NULL,
-	.set_power_state = NULL,
-	.post_set_power_state = NULL,
-	.display_configuration_changed = NULL,
-	.get_sclk = NULL,
-	.get_mclk = NULL,
-	.print_power_state = NULL,
-	.debugfs_print_current_performance_level = NULL,
-	.force_performance_level = NULL,
-	.vblank_too_short = NULL,
-	.powergate_uvd = NULL,
-};
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
-{
-	if (NULL == adev->pm.funcs)
-		adev->pm.funcs = &tonga_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index c920558..b4ea229 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,10 +373,10 @@
 	return -ETIMEDOUT;
 }
 
-static int tonga_ih_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
 {
-	u32 srbm_soft_reset = 0;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS);
 
 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,46 @@
 						SOFT_RESET_IH, 1);
 
 	if (srbm_soft_reset) {
+		adev->irq.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->irq.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->irq.srbm_soft_reset)
+		return 0;
+
+	return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->irq.srbm_soft_reset)
+		return 0;
+
+	return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->irq.srbm_soft_reset)
+		return 0;
+	srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
+		u32 tmp;
+
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +467,10 @@
 	.resume = tonga_ih_resume,
 	.is_idle = tonga_ih_is_idle,
 	.wait_for_idle = tonga_ih_wait_for_idle,
+	.check_soft_reset = tonga_ih_check_soft_reset,
+	.pre_soft_reset = tonga_ih_pre_soft_reset,
 	.soft_reset = tonga_ih_soft_reset,
+	.post_soft_reset = tonga_ih_post_soft_reset,
 	.set_clockgating_state = tonga_ih_set_clockgating_state,
 	.set_powergating_state = tonga_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de18..0000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_ppsmc.h"
-#include "tonga_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#define TONGA_SMC_SIZE 0x20000
-
-static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
-	uint32_t val;
-
-	if (smc_address & 3)
-		return -EINVAL;
-
-	if ((smc_address + 3) > limit)
-		return -EINVAL;
-
-	WREG32(mmSMC_IND_INDEX_0, smc_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	return 0;
-}
-
-static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-	uint32_t addr;
-	uint32_t data, orig_data;
-	int result = 0;
-	uint32_t extra_shift;
-	unsigned long flags;
-
-	if (smc_start_address & 3)
-		return -EINVAL;
-
-	if ((smc_start_address + byte_count) > limit)
-		return -EINVAL;
-
-	addr = smc_start_address;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	while (byte_count >= 4) {
-		/* Bytes are written into the SMC addres space with the MSB first */
-		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-		result = tonga_set_smc_sram_address(adev, addr, limit);
-
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-
-		src += 4;
-		byte_count -= 4;
-		addr += 4;
-	}
-
-	if (0 != byte_count) {
-		/* Now write odd bytes left, do a read modify write cycle */
-		data = 0;
-
-		result = tonga_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		orig_data = RREG32(mmSMC_IND_DATA_0);
-		extra_shift = 8 * (4 - byte_count);
-
-		while (byte_count > 0) {
-			data = (data << 8) + *src++;
-			byte_count--;
-		}
-
-		data <<= extra_shift;
-		data |= (orig_data & ~((~0UL) << extra_shift));
-
-		result = tonga_set_smc_sram_address(adev, addr, limit);
-		if (result)
-			goto out;
-
-		WREG32(mmSMC_IND_DATA_0, data);
-	}
-
-out:
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int tonga_program_jump_on_start(struct amdgpu_device *adev)
-{
-	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
-	tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
-	return 0;
-}
-
-static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
-	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32(mmSMC_RESP_0);
-		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, 0x20000);
-	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send message\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
-	if (!tonga_is_smc_ram_running(adev))
-	{
-		return -EINVAL;
-	}
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send message\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
-						PPSMC_Msg msg)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
-						PPSMC_Msg msg,
-						uint32_t parameter)
-{
-	if (!tonga_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return tonga_send_msg_to_smc(adev, msg);
-}
-
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
-					struct amdgpu_device *adev,
-					PPSMC_Msg msg, uint32_t parameter)
-{
-	if (wait_smu_response(adev)) {
-		DRM_ERROR("Failed to send previous message\n");
-		return -EINVAL;
-	}
-
-	WREG32(mmSMC_MSG_ARG_0, parameter);
-
-	return tonga_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
-	int i;
-	uint32_t val;
-
-	if (!tonga_is_smc_ram_running(adev))
-		return -EINVAL;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout)
-		return -EINVAL;
-
-	return 0;
-}
-#endif
-
-static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
-	const struct smc_firmware_header_v1_0 *hdr;
-	uint32_t ucode_size;
-	uint32_t ucode_start_address;
-	const uint8_t *src;
-	uint32_t val;
-	uint32_t byte_count;
-	uint32_t *data;
-	unsigned long flags;
-
-	if (!adev->pm.fw)
-		return -EINVAL;
-
-	/* Skip SMC ucode loading on SR-IOV capable boards.
-	 * vbios does this for us in asic_init in that case.
-	 */
-	if (adev->virtualization.supports_sr_iov)
-		return 0;
-
-	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-	amdgpu_ucode_print_smc_hdr(&hdr->header);
-
-	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
-	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
-	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
-	src = (const uint8_t *)
-		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
-	if (ucode_size & 3) {
-		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
-		return -EINVAL;
-	}
-
-	if (ucode_size > TONGA_SMC_SIZE) {
-		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
-	byte_count = ucode_size;
-	data = (uint32_t *)src;
-	for (; byte_count >= 4; data++, byte_count -= 4)
-		WREG32(mmSMC_IND_DATA_0, data[0]);
-
-	val = RREG32(mmSMC_IND_ACCESS_CNTL);
-	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	WREG32(mmSMC_IND_ACCESS_CNTL, val);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
-	return 0;
-}
-
-#if 0 /* not used yet */
-static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
-				uint32_t smc_address,
-				uint32_t *value,
-				uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = tonga_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		*value = RREG32(mmSMC_IND_DATA_0);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
-				uint32_t smc_address,
-				uint32_t value,
-				uint32_t limit)
-{
-	int result;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	result = tonga_set_smc_sram_address(adev, smc_address, limit);
-	if (result == 0)
-		WREG32(mmSMC_IND_DATA_0, value);
-	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-	return result;
-}
-
-static int tonga_smu_stop_smc(struct amdgpu_device *adev)
-{
-	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case UCODE_ID_SDMA0:
-			return AMDGPU_UCODE_ID_SDMA0;
-		case UCODE_ID_SDMA1:
-			return AMDGPU_UCODE_ID_SDMA1;
-		case UCODE_ID_CP_CE:
-			return AMDGPU_UCODE_ID_CP_CE;
-		case UCODE_ID_CP_PFP:
-			return AMDGPU_UCODE_ID_CP_PFP;
-		case UCODE_ID_CP_ME:
-			return AMDGPU_UCODE_ID_CP_ME;
-		case UCODE_ID_CP_MEC:
-		case UCODE_ID_CP_MEC_JT1:
-			return AMDGPU_UCODE_ID_CP_MEC1;
-		case UCODE_ID_CP_MEC_JT2:
-			return AMDGPU_UCODE_ID_CP_MEC2;
-		case UCODE_ID_RLC_G:
-			return AMDGPU_UCODE_ID_RLC_G;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return AMDGPU_UCODE_ID_MAXIMUM;
-	}
-}
-
-static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
-						uint32_t fw_type,
-						struct SMU_Entry *entry)
-{
-	enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
-	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
-	const struct gfx_firmware_header_v1_0 *header = NULL;
-	uint64_t gpu_addr;
-	uint32_t data_size;
-
-	if (ucode->fw == NULL)
-		return -EINVAL;
-
-	gpu_addr  = ucode->mc_addr;
-	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
-	data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
-	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
-		(fw_type == UCODE_ID_CP_MEC_JT2)) {
-		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
-		data_size = le32_to_cpu(header->jt_size) << 2;
-	}
-
-	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
-	entry->id = (uint16_t)fw_type;
-	entry->image_addr_high = upper_32_bits(gpu_addr);
-	entry->image_addr_low = lower_32_bits(gpu_addr);
-	entry->meta_data_addr_high = 0;
-	entry->meta_data_addr_low = 0;
-	entry->data_size_byte = data_size;
-	entry->num_register_entries = 0;
-
-	if (fw_type == UCODE_ID_RLC_G)
-		entry->flags = 1;
-	else
-		entry->flags = 0;
-
-	return 0;
-}
-
-static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
-{
-	struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
-	struct SMU_DRAMData_TOC *toc;
-	uint32_t fw_to_load;
-
-	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
-	tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
-	tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
-	toc = (struct SMU_DRAMData_TOC *)private->header;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	if (!adev->firmware.smu_load)
-		return 0;
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for RLC\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for CE\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for PFP\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for ME\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
-		return -EINVAL;
-	}
-
-	if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
-			&toc->entry[toc->num_entries++])) {
-		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
-		return -EINVAL;
-	}
-
-	tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
-	tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK |
-			UCODE_ID_SDMA0_MASK |
-			UCODE_ID_SDMA1_MASK |
-			UCODE_ID_CP_CE_MASK |
-			UCODE_ID_CP_ME_MASK |
-			UCODE_ID_CP_PFP_MASK |
-			UCODE_ID_CP_MEC_MASK;
-
-	if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
-		DRM_ERROR("Fail to request SMU load ucode\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
-	switch (fw_type) {
-		case AMDGPU_UCODE_ID_SDMA0:
-			return UCODE_ID_SDMA0_MASK;
-		case AMDGPU_UCODE_ID_SDMA1:
-			return UCODE_ID_SDMA1_MASK;
-		case AMDGPU_UCODE_ID_CP_CE:
-			return UCODE_ID_CP_CE_MASK;
-		case AMDGPU_UCODE_ID_CP_PFP:
-			return UCODE_ID_CP_PFP_MASK;
-		case AMDGPU_UCODE_ID_CP_ME:
-			return UCODE_ID_CP_ME_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC1:
-			return UCODE_ID_CP_MEC_MASK;
-		case AMDGPU_UCODE_ID_CP_MEC2:
-			return UCODE_ID_CP_MEC_MASK;
-		case AMDGPU_UCODE_ID_RLC_G:
-			return UCODE_ID_RLC_G_MASK;
-		default:
-			DRM_ERROR("ucode type is out of range!\n");
-			return 0;
-	}
-}
-
-static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
-					uint32_t fw_type)
-{
-	uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
-	int i;
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("check firmware loading failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
-	int result;
-	uint32_t val;
-	int i;
-
-	/* Assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	result = tonga_smu_upload_firmware_image(adev);
-	if (result)
-		return result;
-
-	/* Clear status */
-	WREG32_SMC(ixSMU_STATUS, 0);
-
-	/* Enable clock */
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	/* De-assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	/* Set SMU Auto Start */
-	val = RREG32_SMC(ixSMU_INPUT_DATA);
-	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
-	WREG32_SMC(ixSMU_INPUT_DATA, val);
-
-	/* Clear firmware interrupt enable flag */
-	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixRCU_UC_EVENTS);
-		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Interrupt is not enabled by firmware\n");
-		return -EINVAL;
-	}
-
-	/* Call Test SMU message with 0x20000 offset
-	 * to trigger SMU start
-	 */
-	tonga_send_msg_to_smc_offset(adev);
-
-	/* Wait for done bit to be set */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixSMU_STATUS);
-		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Timeout for SMU start\n");
-		return -EINVAL;
-	}
-
-	/* Check pass/failed indicator */
-	val = RREG32_SMC(ixSMU_STATUS);
-	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
-		DRM_ERROR("SMU Firmware start failed\n");
-		return -EINVAL;
-	}
-
-	/* Wait for firmware to initialize */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixFIRMWARE_FLAGS);
-		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("SMU firmware initialization failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
-	int i, result;
-	uint32_t val;
-
-	/* wait for smc boot up */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixRCU_UC_EVENTS);
-		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
-		if (val)
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("SMC boot sequence is not completed\n");
-		return -EINVAL;
-	}
-
-	/* Clear firmware interrupt enable flag */
-	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
-	/* Assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	result = tonga_smu_upload_firmware_image(adev);
-	if (result)
-		return result;
-
-	/* Set smc instruct start point at 0x0 */
-	tonga_program_jump_on_start(adev);
-
-	/* Enable clock */
-	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
-	/* De-assert reset */
-	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
-	/* Wait for firmware to initialize */
-	for (i = 0; i < adev->usec_timeout; i++) {
-		val = RREG32_SMC(ixFIRMWARE_FLAGS);
-		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
-			break;
-		udelay(1);
-	}
-
-	if (i == adev->usec_timeout) {
-		DRM_ERROR("Timeout for SMC firmware initialization\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int tonga_smu_start(struct amdgpu_device *adev)
-{
-	int result;
-	uint32_t val;
-
-	if (!tonga_is_smc_ram_running(adev)) {
-		val = RREG32_SMC(ixSMU_FIRMWARE);
-		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
-			result = tonga_smu_start_in_non_protection_mode(adev);
-			if (result)
-				return result;
-		} else {
-			result = tonga_smu_start_in_protection_mode(adev);
-			if (result)
-				return result;
-		}
-	}
-
-	return tonga_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
-	.check_fw_load_finish = tonga_smu_check_fw_load_finish,
-	.request_smu_load_fw = NULL,
-	.request_smu_specific_fw = NULL,
-};
-
-int tonga_smu_init(struct amdgpu_device *adev)
-{
-	struct tonga_smu_private_data *private;
-	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	uint32_t smu_internal_buffer_size = 200*4096;
-	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
-	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
-	uint64_t mc_addr;
-	void *toc_buf_ptr;
-	void *smu_buf_ptr;
-	int ret;
-
-	private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
-	if (NULL == private)
-		return -ENOMEM;
-
-	/* allocate firmware buffers */
-	if (adev->firmware.smu_load)
-		amdgpu_ucode_init_bo(adev);
-
-	adev->smu.priv = private;
-	adev->smu.fw_flags = 0;
-
-	/* Allocate FW image data structure and header buffer */
-	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM,
-			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, NULL, toc_buf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
-		return -ENOMEM;
-	}
-
-	/* Allocate buffer for SMU internal buffer */
-	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM,
-			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-			       NULL, NULL, smu_buf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
-		return -ENOMEM;
-	}
-
-	/* Retrieve GPU address for header buffer and internal buffer */
-	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
-	if (ret) {
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to reserve the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to pin the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.toc_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to map the TOC buffer\n");
-		return -EINVAL;
-	}
-
-	amdgpu_bo_unreserve(adev->smu.toc_buf);
-	private->header_addr_low = lower_32_bits(mc_addr);
-	private->header_addr_high = upper_32_bits(mc_addr);
-	private->header = toc_buf_ptr;
-
-	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
-	if (ret) {
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to pin the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
-	if (ret) {
-		amdgpu_bo_unreserve(adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.smu_buf);
-		amdgpu_bo_unref(&adev->smu.toc_buf);
-		DRM_ERROR("Failed to map the SMU internal buffer\n");
-		return -EINVAL;
-	}
-
-	amdgpu_bo_unreserve(adev->smu.smu_buf);
-	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
-	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
-	adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
-
-	return 0;
-}
-
-int tonga_smu_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_unref(&adev->smu.toc_buf);
-	amdgpu_bo_unref(&adev->smu.smu_buf);
-	kfree(adev->smu.priv);
-	adev->smu.priv = NULL;
-	if (adev->firmware.fw_buf)
-		amdgpu_ucode_fini_bo(adev);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
deleted file mode 100644
index c031ff9..0000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_SMUMGR_H
-#define TONGA_SMUMGR_H
-
-#include "tonga_ppsmc.h"
-
-int tonga_smu_init(struct amdgpu_device *adev);
-int tonga_smu_fini(struct amdgpu_device *adev);
-int tonga_smu_start(struct amdgpu_device *adev);
-
-struct tonga_smu_private_data
-{
-	uint8_t *header;
-	uint32_t smu_buffer_addr_high;
-	uint32_t smu_buffer_addr_low;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 132e613..f6c9415 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,7 +116,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -526,6 +526,20 @@
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v4_2_ring_emit_hdp_flush */
+		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+		14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -756,6 +770,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 101de136..400c16f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -112,7 +112,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -577,6 +577,20 @@
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v5_0_ring_emit_hdp_flush */
+		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+		14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+}
+
 static bool uvd_v5_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7f21102..ab3df6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,7 +116,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -396,21 +396,14 @@
 
 	uvd_v6_0_mc_resume(adev);
 
-	/* Set dynamic clock gating in S/W control mode */
-	if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
-		uvd_v6_0_set_sw_clock_gating(adev);
-	} else {
-		/* disable clock gating */
-		uint32_t data = RREG32(mmUVD_CGC_CTRL);
-		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-		WREG32(mmUVD_CGC_CTRL, data);
-	}
+	/* disable clock gating */
+	WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
 
 	/* disable interupt */
-	WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 
 	/* stall UMC and register bus before resetting VCPU */
-	WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
 	mdelay(1);
 
 	/* put LMI, VCPU, RBC etc... into reset */
@@ -426,7 +419,7 @@
 	mdelay(5);
 
 	/* take UVD block out of reset */
-	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
 	mdelay(5);
 
 	/* initialize UVD memory controller */
@@ -461,7 +454,7 @@
 	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 
 	/* enable UMC */
-	WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 
 	/* boot up the VCPU */
 	WREG32(mmUVD_SOFT_RESET, 0);
@@ -481,11 +474,9 @@
 			break;
 
 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
-		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
 		mdelay(10);
-		WREG32_P(mmUVD_SOFT_RESET, 0,
-			 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
 		mdelay(10);
 		r = -1;
 	}
@@ -502,15 +493,14 @@
 	/* clear the bit 4 of UVD_STATUS */
 	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
+	/* force RBC into idle state */
 	rb_bufsz = order_base_2(ring->ring_size);
-	tmp = 0;
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-	/* force RBC into idle state */
 	WREG32(mmUVD_RBC_RB_CNTL, tmp);
 
 	/* set the write pointer delay */
@@ -531,7 +521,7 @@
 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 	WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 
-	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 
 	return 0;
 }
@@ -735,6 +725,31 @@
 	amdgpu_ring_write(ring, 0xE);
 }
 
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+	return
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		20 + /* uvd_v6_0_ring_emit_vm_flush */
+		14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -748,20 +763,82 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+		if (uvd_v6_0_is_idle(handle))
 			return 0;
 	}
 	return -ETIMEDOUT;
 }
 
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
+static bool uvd_v6_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+	if (srbm_soft_reset) {
+		adev->uvd.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->uvd.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
+static int uvd_v6_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	uvd_v6_0_stop(adev);
+	if (!adev->uvd.srbm_soft_reset)
+		return 0;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+	uvd_v6_0_stop(adev);
+	return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset;
+
+	if (!adev->uvd.srbm_soft_reset)
+		return 0;
+	srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+	if (srbm_soft_reset) {
+		u32 tmp;
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->uvd.srbm_soft_reset)
+		return 0;
+
 	mdelay(5);
 
 	return uvd_v6_0_start(adev);
@@ -902,21 +979,15 @@
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-	static int curstate = -1;
 
 	if (adev->asic_type == CHIP_FIJI ||
-			adev->asic_type == CHIP_POLARIS10)
-		uvd_v6_set_bypass_mode(adev, enable);
+	    adev->asic_type == CHIP_POLARIS10)
+		uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
-	if (curstate == state)
-		return 0;
-
-	curstate = state;
-	if (enable) {
+	if (state == AMD_CG_STATE_GATE) {
 		/* disable HW gating and enable Sw gating */
 		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
@@ -946,6 +1017,8 @@
 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
 		return 0;
 
+	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v6_0_stop(adev);
 		return 0;
@@ -966,7 +1039,10 @@
 	.resume = uvd_v6_0_resume,
 	.is_idle = uvd_v6_0_is_idle,
 	.wait_for_idle = uvd_v6_0_wait_for_idle,
+	.check_soft_reset = uvd_v6_0_check_soft_reset,
+	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
 	.soft_reset = uvd_v6_0_soft_reset,
+	.post_soft_reset = uvd_v6_0_post_soft_reset,
 	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
 	.set_powergating_state = uvd_v6_0_set_powergating_state,
 };
@@ -986,6 +1062,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1005,6 +1083,8 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
+	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 80a37a6..76e64ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -30,16 +30,17 @@
 #include "amdgpu.h"
 #include "amdgpu_vce.h"
 #include "cikd.h"
-
 #include "vce/vce_2_0_d.h"
 #include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
 #define VCE_V2_0_FW_SIZE	(256 * 1024)
 #define VCE_V2_0_STACK_SIZE	(64 * 1024)
 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
 
 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@
 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
 }
 
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+			if (status & 0x337f)
+				return 0;
+			mdelay(10);
+		}
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < 10; ++i) {
+		for (j = 0; j < 100; ++j) {
+			uint32_t status = RREG32(mmVCE_STATUS);
+
+			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+				return 0;
+			mdelay(10);
+		}
+
+		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+		WREG32_P(mmVCE_SOFT_RESET,
+			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+		WREG32_P(mmVCE_SOFT_RESET, 0,
+			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(10);
+	}
+
+	return -ETIMEDOUT;
+}
+
 /**
  * vce_v2_0_start - start VCE block
  *
@@ -106,7 +150,7 @@
 static int vce_v2_0_start(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	int i, j, r;
+	int r;
 
 	vce_v2_0_mc_resume(adev);
 
@@ -127,36 +171,12 @@
 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
-	WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
-	WREG32_P(mmVCE_SOFT_RESET,
-		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 	mdelay(100);
+	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 
-	WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-	for (i = 0; i < 10; ++i) {
-		uint32_t status;
-		for (j = 0; j < 100; ++j) {
-			status = RREG32(mmVCE_STATUS);
-			if (status & 2)
-				break;
-			mdelay(10);
-		}
-		r = 0;
-		if (status & 2)
-			break;
-
-		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-				~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(10);
-		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(10);
-		r = -1;
-	}
+	r = vce_v2_0_firmware_loaded(adev);
 
 	/* clear BUSY flag */
 	WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -173,6 +193,8 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	adev->vce.num_rings = 2;
+
 	vce_v2_0_set_ring_funcs(adev);
 	vce_v2_0_set_irq_funcs(adev);
 
@@ -182,7 +204,7 @@
 static int vce_v2_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* VCE */
@@ -199,19 +221,14 @@
 	if (r)
 		return r;
 
-	ring = &adev->vce.ring[0];
-	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
-
-	ring = &adev->vce.ring[1];
-	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		ring = &adev->vce.ring[i];
+		sprintf(ring->name, "vce%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		if (r)
+			return r;
+	}
 
 	return r;
 }
@@ -234,29 +251,23 @@
 
 static int vce_v2_0_hw_init(void *handle)
 {
-	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	r = vce_v2_0_start(adev);
+	/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
 	if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
 		return 0;
 
-	ring = &adev->vce.ring[0];
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
-		return r;
-	}
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
 
-	ring = &adev->vce.ring[1];
-	ring->ready = true;
-	r = amdgpu_ring_test_ring(ring);
-	if (r) {
-		ring->ready = false;
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+		if (r)
+			return r;
+		else
+			adev->vce.ring[i].ready = true;
 	}
 
 	DRM_INFO("VCE initialized successfully.\n");
@@ -338,47 +349,50 @@
 
 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 {
-	u32 orig, tmp;
-
-	if (gated) {
-		if (vce_v2_0_wait_for_idle(adev)) {
-			DRM_INFO("VCE is busy, Can't set clock gateing");
-			return;
-		}
-		WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(100);
-		WREG32(mmVCE_STATUS, 0);
-	} else {
-		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-		mdelay(100);
+	if (vce_v2_0_wait_for_idle(adev)) {
+		DRM_INFO("VCE is busy, Can't set clock gateing");
+		return;
 	}
 
-	tmp = RREG32(mmVCE_CLOCK_GATING_B);
-	tmp &= ~0x00060006;
-	if (gated) {
-		tmp |= 0xe10000;
-	} else {
-		tmp |= 0xe1;
-		tmp &= ~0xe10000;
+	WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+	if (vce_v2_0_lmi_clean(adev)) {
+		DRM_INFO("LMI is busy, Can't set clock gateing");
+		return;
 	}
-	WREG32(mmVCE_CLOCK_GATING_B, tmp);
 
-	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
-	tmp &= ~0x1fe000;
-	tmp &= ~0xff000000;
-	if (tmp != orig)
-		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
-	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-	tmp &= ~0x3fc;
-	if (tmp != orig)
-		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+	WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+	WREG32_P(mmVCE_SOFT_RESET,
+		 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+		 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+	WREG32(mmVCE_STATUS, 0);
 
 	if (gated)
 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-	WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+	/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
+	if (gated) {
+		/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+		WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
+	} else {
+		/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+		WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
+	}
+
+	/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+	WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
+
+	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
+
+	WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+	if(!gated) {
+		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+		mdelay(100);
+		WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+		vce_v2_0_firmware_loaded(adev);
+		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+	}
 }
 
 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +472,7 @@
 	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 
 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 
 	vce_v2_0_init_cg(adev);
 }
@@ -474,11 +486,11 @@
 
 static int vce_v2_0_wait_for_idle(void *handle)
 {
-	unsigned i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	unsigned i;
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+		if (vce_v2_0_is_idle(handle))
 			return 0;
 	}
 	return -ETIMEDOUT;
@@ -488,8 +500,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 	mdelay(5);
 
 	return vce_v2_0_start(adev);
@@ -516,10 +527,8 @@
 	DRM_DEBUG("IH: VCE\n");
 	switch (entry->src_data) {
 	case 0:
-		amdgpu_fence_process(&adev->vce.ring[0]);
-		break;
 	case 1:
-		amdgpu_fence_process(&adev->vce.ring[1]);
+		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
 		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -530,11 +539,28 @@
 	return 0;
 }
 
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+	else
+		tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
 static int vce_v2_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+	vce_v2_0_set_bypass_mode(adev, enable);
 
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
@@ -596,12 +622,16 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+	.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
-	adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+	int i;
+
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c271abf..8533269 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,6 +37,9 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
@@ -67,8 +70,10 @@
 
 	if (ring == &adev->vce.ring[0])
 		return RREG32(mmVCE_RB_RPTR);
-	else
+	else if (ring == &adev->vce.ring[1])
 		return RREG32(mmVCE_RB_RPTR2);
+	else
+		return RREG32(mmVCE_RB_RPTR3);
 }
 
 /**
@@ -84,8 +89,10 @@
 
 	if (ring == &adev->vce.ring[0])
 		return RREG32(mmVCE_RB_WPTR);
-	else
+	else if (ring == &adev->vce.ring[1])
 		return RREG32(mmVCE_RB_WPTR2);
+	else
+		return RREG32(mmVCE_RB_WPTR3);
 }
 
 /**
@@ -101,108 +108,80 @@
 
 	if (ring == &adev->vce.ring[0])
 		WREG32(mmVCE_RB_WPTR, ring->wptr);
-	else
+	else if (ring == &adev->vce.ring[1])
 		WREG32(mmVCE_RB_WPTR2, ring->wptr);
+	else
+		WREG32(mmVCE_RB_WPTR3, ring->wptr);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
 {
-	u32 tmp, data;
-
-	tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
-	if (override)
-		data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-	else
-		data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
-	if (tmp != data)
-		WREG32(mmVCE_RB_ARB_CTRL, data);
+	WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
 }
 
 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
 					     bool gated)
 {
-	u32 tmp, data;
+	u32 data;
+
 	/* Set Override to disable Clock Gating */
 	vce_v3_0_override_vce_clock_gating(adev, true);
 
-	if (!gated) {
-		/* Force CLOCK ON for VCE_CLOCK_GATING_B,
-		 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-		 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
-		 */
-		tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+	/* This function enables MGCG which is controlled by firmware.
+	   With the clocks in the gated state the core is still
+	   accessible but the firmware will throttle the clocks on the
+	   fly as necessary.
+	*/
+	if (gated) {
+		data = RREG32(mmVCE_CLOCK_GATING_B);
 		data |= 0x1ff;
 		data &= ~0xef0000;
-		if (tmp != data)
-			WREG32(mmVCE_CLOCK_GATING_B, data);
+		WREG32(mmVCE_CLOCK_GATING_B, data);
 
-		/* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
-		 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
-		 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+		data = RREG32(mmVCE_UENC_CLOCK_GATING);
 		data |= 0x3ff000;
 		data &= ~0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING, data);
+		WREG32(mmVCE_UENC_CLOCK_GATING, data);
 
-		/* set VCE_UENC_CLOCK_GATING_2 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
 		data |= 0x2;
-		data &= ~0x2;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+		data &= ~0x00010000;
+		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
-		/* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
-		tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 		data |= 0x37f;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
-		/* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
-		tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
 		data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-				0x8;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+			VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			0x8;
+		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
 	} else {
-		/* Force CLOCK OFF for VCE_CLOCK_GATING_B,
-		 * {*, *_FORCE_OFF} = {*, 1}
-		 * set VREG to Dynamic, as it can't be OFF
-		 */
-		tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+		data = RREG32(mmVCE_CLOCK_GATING_B);
 		data &= ~0x80010;
 		data |= 0xe70008;
-		if (tmp != data)
-			WREG32(mmVCE_CLOCK_GATING_B, data);
-		/* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
-		 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
-		 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
-		 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+		WREG32(mmVCE_CLOCK_GATING_B, data);
+
+		data = RREG32(mmVCE_UENC_CLOCK_GATING);
 		data |= 0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING, data);
-		/* Set VCE_UENC_CLOCK_GATING_2 */
-		tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+		WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+		data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
 		data |= 0x10000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
-		/* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
-		tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 		data &= ~0xffc00000;
-		if (tmp != data)
-			WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
-		/* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
-		tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
 		data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
-				VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
-				0x8);
-		if (tmp != data)
-			WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+			  VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+			  VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
+			  0x8);
+		WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
 	}
 	vce_v3_0_override_vce_clock_gating(adev, false);
 }
@@ -221,12 +200,9 @@
 		}
 
 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-		WREG32_P(mmVCE_SOFT_RESET,
-			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 		mdelay(10);
-		WREG32_P(mmVCE_SOFT_RESET, 0,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 		mdelay(10);
 	}
 
@@ -259,43 +235,34 @@
 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
+	ring = &adev->vce.ring[2];
+	WREG32(mmVCE_RB_RPTR3, ring->wptr);
+	WREG32(mmVCE_RB_WPTR3, ring->wptr);
+	WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+	WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+	WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (idx = 0; idx < 2; ++idx) {
 		if (adev->vce.harvest_config & (1 << idx))
 			continue;
 
-		if (idx == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
 		vce_v3_0_mc_resume(adev, idx);
-
-		WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
-		         ~VCE_STATUS__JOB_BUSY_MASK);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
 		if (adev->asic_type >= CHIP_STONEY)
 			WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
 		else
-			WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
-				~VCE_VCPU_CNTL__CLK_EN_MASK);
+			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 
-		WREG32_P(mmVCE_SOFT_RESET, 0,
-			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 		mdelay(100);
 
 		r = vce_v3_0_firmware_loaded(adev);
 
 		/* clear BUSY flag */
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-
-		/* Set Clock-Gating off */
-		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
-			vce_v3_0_set_vce_sw_clock_gating(adev, false);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
 		if (r) {
 			DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -304,7 +271,7 @@
 		}
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -319,33 +286,25 @@
 		if (adev->vce.harvest_config & (1 << idx))
 			continue;
 
-		if (idx == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-				GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-				~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
 
 		if (adev->asic_type >= CHIP_STONEY)
 			WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
 		else
-			WREG32_P(mmVCE_VCPU_CNTL, 0,
-				~VCE_VCPU_CNTL__CLK_EN_MASK);
+			WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
 		/* hold on ECPU */
-		WREG32_P(mmVCE_SOFT_RESET,
-			 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-			 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 
 		/* clear BUSY flag */
-		WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
 
 		/* Set Clock-Gating off */
 		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -399,6 +358,8 @@
 	    (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
 		return -ENOENT;
 
+	adev->vce.num_rings = 3;
+
 	vce_v3_0_set_ring_funcs(adev);
 	vce_v3_0_set_irq_funcs(adev);
 
@@ -409,7 +370,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring;
-	int r;
+	int r, i;
 
 	/* VCE */
 	r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -425,19 +386,14 @@
 	if (r)
 		return r;
 
-	ring = &adev->vce.ring[0];
-	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
-
-	ring = &adev->vce.ring[1];
-	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-	if (r)
-		return r;
+	for (i = 0; i < adev->vce.num_rings; i++) {
+		ring = &adev->vce.ring[i];
+		sprintf(ring->name, "vce%d", i);
+		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		if (r)
+			return r;
+	}
 
 	return r;
 }
@@ -467,10 +423,10 @@
 	if (r)
 		return r;
 
-	adev->vce.ring[0].ready = false;
-	adev->vce.ring[1].ready = false;
+	for (i = 0; i < adev->vce.num_rings; i++)
+		adev->vce.ring[i].ready = false;
 
-	for (i = 0; i < 2; i++) {
+	for (i = 0; i < adev->vce.num_rings; i++) {
 		r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
 		if (r)
 			return r;
@@ -534,7 +490,7 @@
 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
-	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+	WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
 
 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -573,9 +529,7 @@
 	}
 
 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
-	WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
-		 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 }
 
 static bool vce_v3_0_is_idle(void *handle)
@@ -601,20 +555,107 @@
 	return -ETIMEDOUT;
 }
 
+#define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
+#define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
+#define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+				      VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static bool vce_v3_0_check_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 srbm_soft_reset = 0;
+
+	/* According to VCE team , we should use VCE_STATUS instead
+	 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+	 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+	 * instance's registers are accessed
+	 * (0 for 1st instance, 10 for 2nd instance).
+	 *
+	 *VCE_STATUS
+	 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
+	 *|----+----+-----------+----+----+----+----------+---------+----|
+	 *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
+	 *
+	 * VCE team suggest use bit 3--bit 6 for busy status check
+	 */
+	mutex_lock(&adev->grbm_idx_mutex);
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+	if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+	}
+	WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	if (srbm_soft_reset) {
+		adev->vce.srbm_soft_reset = srbm_soft_reset;
+		return true;
+	} else {
+		adev->vce.srbm_soft_reset = 0;
+		return false;
+	}
+}
+
 static int vce_v3_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	u32 mask = 0;
+	u32 srbm_soft_reset;
 
-	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
-	mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+	srbm_soft_reset = adev->vce.srbm_soft_reset;
 
-	WREG32_P(mmSRBM_SOFT_RESET, mask,
-		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
-		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
+	if (srbm_soft_reset) {
+		u32 tmp;
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+	}
+
+	return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+
 	mdelay(5);
 
-	return vce_v3_0_start(adev);
+	return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!adev->vce.srbm_soft_reset)
+		return 0;
+
+	mdelay(5);
+
+	return vce_v3_0_resume(adev);
 }
 
 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -637,13 +678,12 @@
 {
 	DRM_DEBUG("IH: VCE\n");
 
-	WREG32_P(mmVCE_SYS_INT_STATUS,
-		VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
-		~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+	WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
 
 	switch (entry->src_data) {
 	case 0:
 	case 1:
+	case 2:
 		amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
 		break;
 	default:
@@ -655,7 +695,7 @@
 	return 0;
 }
 
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -674,8 +714,10 @@
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	int i;
 
-	if (adev->asic_type == CHIP_POLARIS10)
-		vce_v3_set_bypass_mode(adev, enable);
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+		(adev->asic_type == CHIP_TONGA) ||
+		(adev->asic_type == CHIP_FIJI))
+		vce_v3_0_set_bypass_mode(adev, enable);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
 		return 0;
@@ -686,13 +728,7 @@
 		if (adev->vce.harvest_config & (1 << i))
 			continue;
 
-		if (i == 0)
-			WREG32_P(mmGRBM_GFX_INDEX, 0,
-					~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-		else
-			WREG32_P(mmGRBM_GFX_INDEX,
-					GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
-					~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+		WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
 
 		if (enable) {
 			/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -711,7 +747,7 @@
 		vce_v3_0_set_vce_sw_clock_gating(adev, enable);
 	}
 
-	WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+	WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	return 0;
@@ -739,6 +775,60 @@
 		return vce_v3_0_start(adev);
 }
 
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+		struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+			 unsigned int vm_id, uint64_t pd_addr)
+{
+	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+	amdgpu_ring_write(ring, vm_id);
+	amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+	return
+		5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+	return
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+	return
+		6 + /* vce_v3_0_emit_vm_flush */
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 	.name = "vce_v3_0",
 	.early_init = vce_v3_0_early_init,
@@ -751,12 +841,15 @@
 	.resume = vce_v3_0_resume,
 	.is_idle = vce_v3_0_is_idle,
 	.wait_for_idle = vce_v3_0_wait_for_idle,
+	.check_soft_reset = vce_v3_0_check_soft_reset,
+	.pre_soft_reset = vce_v3_0_pre_soft_reset,
 	.soft_reset = vce_v3_0_soft_reset,
+	.post_soft_reset = vce_v3_0_post_soft_reset,
 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
 	.set_powergating_state = vce_v3_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
@@ -769,12 +862,42 @@
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+	.get_rptr = vce_v3_0_ring_get_rptr,
+	.get_wptr = vce_v3_0_ring_get_wptr,
+	.set_wptr = vce_v3_0_ring_set_wptr,
+	.parse_cs = NULL,
+	.emit_ib = vce_v3_0_ring_emit_ib,
+	.emit_vm_flush = vce_v3_0_emit_vm_flush,
+	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+	.emit_fence = amdgpu_vce_ring_emit_fence,
+	.test_ring = amdgpu_vce_ring_test_ring,
+	.test_ib = amdgpu_vce_ring_test_ib,
+	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
+	.begin_use = amdgpu_vce_ring_begin_use,
+	.end_use = amdgpu_vce_ring_end_use,
+	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
-	adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+	int i;
+
+	if (adev->asic_type >= CHIP_STONEY) {
+		for (i = 0; i < adev->vce.num_rings; i++)
+			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+		DRM_INFO("VCE enabled in VM mode\n");
+	} else {
+		for (i = 0; i < adev->vce.num_rings; i++)
+			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+		DRM_INFO("VCE enabled in physical mode\n");
+	}
 }
 
 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 03a31c5..c0d9aad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,7 +77,11 @@
 #if defined(CONFIG_DRM_AMD_ACP)
 #include "amdgpu_acp.h"
 #endif
+#include "dce_virtual.h"
 
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -444,18 +448,21 @@
 	return true;
 }
 
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
 {
-	u32 caps = 0;
-	u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+	uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+	/* bit0: 0 means pf and 1 means vf */
+	/* bit31: 0 means disable IOV and 1 means enable */
+	if (reg & 1)
+		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
 
-	if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-		caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+	if (reg & 0x80000000)
+		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
 
-	if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-		caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
-	return caps;
+	if (reg == 0) {
+		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+			adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+	}
 }
 
 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -822,6 +829,60 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &iceland_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &sdma_v2_4_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -890,6 +951,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 5,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v5_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -958,6 +1087,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1026,6 +1223,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 11,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1103,34 +1368,142 @@
 #endif
 };
 
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cz_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 11,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &dce_virtual_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+#if defined(CONFIG_DRM_AMD_ACP)
+	{
+		.type = AMD_IP_BLOCK_TYPE_ACP,
+		.major = 2,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &acp_ip_funcs,
+	},
+#endif
+};
+
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-	switch (adev->asic_type) {
-	case CHIP_TOPAZ:
-		adev->ip_blocks = topaz_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
-		break;
-	case CHIP_FIJI:
-		adev->ip_blocks = fiji_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
-		break;
-	case CHIP_TONGA:
-		adev->ip_blocks = tonga_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
-		break;
-	case CHIP_POLARIS11:
-	case CHIP_POLARIS10:
-		adev->ip_blocks = polaris11_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
-		break;
-	case CHIP_CARRIZO:
-	case CHIP_STONEY:
-		adev->ip_blocks = cz_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
-		break;
-	default:
-		/* FIXME: not supported yet */
-		return -EINVAL;
+	if (adev->enable_virtual_display) {
+		switch (adev->asic_type) {
+		case CHIP_TOPAZ:
+			adev->ip_blocks = topaz_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+			break;
+		case CHIP_FIJI:
+			adev->ip_blocks = fiji_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+			break;
+		case CHIP_TONGA:
+			adev->ip_blocks = tonga_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			adev->ip_blocks = polaris11_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+			break;
+
+		case CHIP_CARRIZO:
+		case CHIP_STONEY:
+			adev->ip_blocks = cz_ip_blocks_vd;
+			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
+	} else {
+		switch (adev->asic_type) {
+		case CHIP_TOPAZ:
+			adev->ip_blocks = topaz_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+			break;
+		case CHIP_FIJI:
+			adev->ip_blocks = fiji_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+			break;
+		case CHIP_TONGA:
+			adev->ip_blocks = tonga_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			adev->ip_blocks = polaris11_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+			break;
+		case CHIP_CARRIZO:
+		case CHIP_STONEY:
+			adev->ip_blocks = cz_ip_blocks;
+			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+			break;
+		default:
+			/* FIXME: not supported yet */
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -1154,13 +1527,13 @@
 {
 	.read_disabled_bios = &vi_read_disabled_bios,
 	.read_bios_from_rom = &vi_read_bios_from_rom,
+	.detect_hw_virtualization = vi_detect_hw_virtualization,
 	.read_register = &vi_read_register,
 	.reset = &vi_asic_reset,
 	.set_vga_state = &vi_vga_set_state,
 	.get_xclk = &vi_get_xclk,
 	.set_uvd_clocks = &vi_set_uvd_clocks,
 	.set_vce_clocks = &vi_set_vce_clocks,
-	.get_virtual_caps = &vi_get_virtual_caps,
 };
 
 static int vi_common_early_init(void *handle)
@@ -1248,8 +1621,17 @@
 			AMD_CG_SUPPORT_HDP_MGCG |
 			AMD_CG_SUPPORT_HDP_LS |
 			AMD_CG_SUPPORT_SDMA_MGCG |
-			AMD_CG_SUPPORT_SDMA_LS;
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_VCE_MGCG;
+		/* rev0 hardware requires workarounds to support PG */
 		adev->pg_flags = 0;
+		if (adev->rev_id != 0x00) {
+			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+				AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_GFX_PIPELINE |
+				AMD_PG_SUPPORT_UVD |
+				AMD_PG_SUPPORT_VCE;
+		}
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
 	case CHIP_STONEY:
@@ -1267,14 +1649,24 @@
 			AMD_CG_SUPPORT_HDP_MGCG |
 			AMD_CG_SUPPORT_HDP_LS |
 			AMD_CG_SUPPORT_SDMA_MGCG |
-			AMD_CG_SUPPORT_SDMA_LS;
-		adev->external_rev_id = adev->rev_id + 0x1;
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_VCE_MGCG;
+		adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+			AMD_PG_SUPPORT_GFX_SMG |
+			AMD_PG_SUPPORT_GFX_PIPELINE |
+			AMD_PG_SUPPORT_UVD |
+			AMD_PG_SUPPORT_VCE;
+		adev->external_rev_id = adev->rev_id + 0x61;
 		break;
 	default:
 		/* FIXME: not supported yet */
 		return -EINVAL;
 	}
 
+	/* in early init stage, vbios code won't work */
+	if (adev->asic_funcs->detect_hw_virtualization)
+		amdgpu_asic_detect_hw_virtualization(adev);
+
 	if (amdgpu_smc_load_fw && smc_enabled)
 		adev->firmware.smu_load = true;
 
@@ -1418,6 +1810,63 @@
 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
 }
 
+static int vi_common_set_clockgating_state_by_smu(void *handle,
+					   enum amd_clockgating_state state)
+{
+	uint32_t msg_id, pp_state;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	void *pp_handle = adev->powerplay.pp_handle;
+
+	if (state == AMD_CG_STATE_UNGATE)
+		pp_state = 0;
+	else
+		pp_state = PP_STATE_CG | PP_STATE_LS;
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_MC,
+		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_SDMA,
+		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_HDP,
+		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_BIF,
+		       PP_STATE_SUPPORT_LS,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_BIF,
+		       PP_STATE_SUPPORT_CG,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_DRM,
+		       PP_STATE_SUPPORT_LS,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+		       PP_BLOCK_SYS_ROM,
+		       PP_STATE_SUPPORT_CG,
+		       pp_state);
+	amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+	return 0;
+}
+
 static int vi_common_set_clockgating_state(void *handle,
 					   enum amd_clockgating_state state)
 {
@@ -1443,6 +1892,10 @@
 		vi_update_hdp_light_sleep(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
 		break;
+	case CHIP_TONGA:
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+		vi_common_set_clockgating_state_by_smu(adev, state);
 	default:
 		break;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 062ee16..11746f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -369,4 +369,45 @@
 #define VCE_CMD_IB_AUTO	0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
+#define VCE_CMD_IB_VM           0x00000102
+#define VCE_CMD_WAIT_GE         0x00000106
+#define VCE_CMD_UPDATE_PTB      0x00000107
+#define VCE_CMD_FLUSH_TLB       0x00000108
+
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x)				((x) << 0)
+#define RB_MAP_PKR0_MASK			(0x3 << 0)
+#define RB_MAP_PKR1(x)				((x) << 2)
+#define RB_MAP_PKR1_MASK			(0x3 << 2)
+#define RB_XSEL2(x)				((x) << 4)
+#define RB_XSEL2_MASK				(0x3 << 4)
+#define RB_XSEL					(1 << 6)
+#define RB_YSEL					(1 << 7)
+#define PKR_MAP(x)				((x) << 8)
+#define PKR_MAP_MASK				(0x3 << 8)
+#define PKR_XSEL(x)				((x) << 10)
+#define PKR_XSEL_MASK				(0x3 << 10)
+#define PKR_YSEL(x)				((x) << 12)
+#define PKR_YSEL_MASK				(0x3 << 12)
+#define SC_MAP(x)				((x) << 16)
+#define SC_MAP_MASK				(0x3 << 16)
+#define SC_XSEL(x)				((x) << 18)
+#define SC_XSEL_MASK				(0x3 << 18)
+#define SC_YSEL(x)				((x) << 20)
+#define SC_YSEL_MASK				(0x3 << 20)
+#define SE_MAP(x)				((x) << 24)
+#define SE_MAP_MASK				(0x3 << 24)
+#define SE_XSEL(x)				((x) << 26)
+#define SE_XSEL_MASK				(0x3 << 26)
+#define SE_YSEL(x)				((x) << 28)
+#define SE_YSEL_MASK				(0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x)				((x) << 0)
+#define SE_PAIR_MAP_MASK			(0x3 << 0)
+#define SE_PAIR_XSEL(x)				((x) << 2)
+#define SE_PAIR_XSEL_MASK			(0x3 << 2)
+#define SE_PAIR_YSEL(x)				((x) << 4)
+#define SE_PAIR_YSEL_MASK			(0x3 << 4)
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index a7d3cb3..453c5d6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -142,13 +142,15 @@
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-	pr_debug("mapping doorbell page:\n");
-	pr_debug("     target user address == 0x%08llX\n",
-			(unsigned long long) vma->vm_start);
-	pr_debug("     physical address    == 0x%08llX\n", address);
-	pr_debug("     vm_flags            == 0x%04lX\n", vma->vm_flags);
-	pr_debug("     size                == 0x%04lX\n",
-			 doorbell_process_allocation());
+	pr_debug("kfd: mapping doorbell page in %s\n"
+		 "     target user address == 0x%08llX\n"
+		 "     physical address    == 0x%08llX\n"
+		 "     vm_flags            == 0x%04lX\n"
+		 "     size                == 0x%04lX\n",
+		 __func__,
+		 (unsigned long long) vma->vm_start, address, vma->vm_flags,
+		 doorbell_process_allocation());
+
 
 	return io_remap_pfn_range(vma,
 				vma->vm_start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9beae87..d135cd0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -47,6 +47,9 @@
 	pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
 			__func__, KFD_QUEUE_TYPE_HIQ, queue_size);
 
+	memset(&prop, 0, sizeof(prop));
+	memset(&nop, 0, sizeof(nop));
+
 	nop.opcode = IT_NOP;
 	nop.type = PM4_TYPE_3;
 	nop.u32all |= PM4_COUNT_ZERO;
@@ -121,7 +124,7 @@
 	prop.eop_ring_buffer_address = kq->eop_gpu_addr;
 	prop.eop_ring_buffer_size = PAGE_SIZE;
 
-	if (init_queue(&kq->queue, prop) != 0)
+	if (init_queue(&kq->queue, &prop) != 0)
 		goto err_init_queue;
 
 	kq->queue->device = dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 80113c3..4750cab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -619,7 +619,7 @@
 /* Queue Context Management */
 struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
 
-int init_queue(struct queue **q, struct queue_properties properties);
+int init_queue(struct queue **q, const struct queue_properties *properties);
 void uninit_queue(struct queue *q);
 void print_queue_properties(struct queue_properties *q);
 void print_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 4f3849a..ef7c8de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -404,58 +404,47 @@
 {
 	struct kfd_process *p;
 	struct kfd_process_device *pdd;
-	int idx, i;
 
 	BUG_ON(dev == NULL);
 
-	idx = srcu_read_lock(&kfd_processes_srcu);
-
 	/*
 	 * Look for the process that matches the pasid. If there is no such
 	 * process, we either released it in amdkfd's own notifier, or there
 	 * is a bug. Unfortunately, there is no way to tell...
 	 */
-	hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-		if (p->pasid == pasid) {
+	p = kfd_lookup_process_by_pasid(pasid);
+	if (!p)
+		return;
 
-			srcu_read_unlock(&kfd_processes_srcu, idx);
+	pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-			pr_debug("Unbinding process %d from IOMMU\n", pasid);
+	if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+		kfd_dbgmgr_destroy(dev->dbgmgr);
 
-			mutex_lock(&p->mutex);
+	pqm_uninit(&p->pqm);
 
-			if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-				kfd_dbgmgr_destroy(dev->dbgmgr);
+	pdd = kfd_get_process_device_data(dev, p);
 
-			pqm_uninit(&p->pqm);
+	if (!pdd) {
+		mutex_unlock(&p->mutex);
+		return;
+	}
 
-			pdd = kfd_get_process_device_data(dev, p);
+	if (pdd->reset_wavefronts) {
+		dbgdev_wave_reset_wavefronts(pdd->dev, p);
+		pdd->reset_wavefronts = false;
+	}
 
-			if (!pdd) {
-				mutex_unlock(&p->mutex);
-				return;
-			}
+	/*
+	 * Just mark pdd as unbound, because we still need it
+	 * to call amd_iommu_unbind_pasid() in when the
+	 * process exits.
+	 * We don't call amd_iommu_unbind_pasid() here
+	 * because the IOMMU called us.
+	 */
+	pdd->bound = false;
 
-			if (pdd->reset_wavefronts) {
-				dbgdev_wave_reset_wavefronts(pdd->dev, p);
-				pdd->reset_wavefronts = false;
-			}
-
-			/*
-			 * Just mark pdd as unbound, because we still need it
-			 * to call amd_iommu_unbind_pasid() in when the
-			 * process exits.
-			 * We don't call amd_iommu_unbind_pasid() here
-			 * because the IOMMU called us.
-			 */
-			pdd->bound = false;
-
-			mutex_unlock(&p->mutex);
-
-			return;
-		}
-
-	srcu_read_unlock(&kfd_processes_srcu, idx);
+	mutex_unlock(&p->mutex);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7b69070..e1fb40b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -129,7 +129,7 @@
 	q_properties->vmid = 0;
 	q_properties->queue_id = qid;
 
-	retval = init_queue(q, *q_properties);
+	retval = init_queue(q, q_properties);
 	if (retval != 0)
 		goto err_init_queue;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 9a0c90b..0ab1970 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -63,7 +63,7 @@
 	pr_debug("Queue Device Address: 0x%p\n", q->device);
 }
 
-int init_queue(struct queue **q, struct queue_properties properties)
+int init_queue(struct queue **q, const struct queue_properties *properties)
 {
 	struct queue *tmp;
 
@@ -73,7 +73,7 @@
 	if (!tmp)
 		return -ENOMEM;
 
-	memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+	memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
 
 	*q = tmp;
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 884c96f..1e50647 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1090,19 +1090,21 @@
 {
 	uint32_t hashout;
 	uint32_t buf[7];
+	uint64_t local_mem_size;
 	int i;
 
 	if (!gpu)
 		return 0;
 
+	local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+
 	buf[0] = gpu->pdev->devfn;
 	buf[1] = gpu->pdev->subsystem_vendor;
 	buf[2] = gpu->pdev->subsystem_device;
 	buf[3] = gpu->pdev->device;
 	buf[4] = gpu->pdev->bus->number;
-	buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
-			& 0xffffffff);
-	buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+	buf[5] = lower_32_bits(local_mem_size);
+	buf[6] = upper_32_bits(local_mem_size);
 
 	for (i = 0, hashout = 0; i < 7; i++)
 		hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index a74a0d2..bec8125 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -29,7 +29,12 @@
  * Supported ASIC types
  */
 enum amd_asic_type {
-	CHIP_BONAIRE = 0,
+	CHIP_TAHITI = 0,
+	CHIP_PITCAIRN,
+	CHIP_VERDE,
+	CHIP_OLAND,
+	CHIP_HAINAN,
+	CHIP_BONAIRE,
 	CHIP_KAVERI,
 	CHIP_KABINI,
 	CHIP_HAWAII,
@@ -159,8 +164,14 @@
 	bool (*is_idle)(void *handle);
 	/* poll for idle */
 	int (*wait_for_idle)(void *handle);
+	/* check soft reset the IP block */
+	bool (*check_soft_reset)(void *handle);
+	/* pre soft reset the IP block */
+	int (*pre_soft_reset)(void *handle);
 	/* soft reset the IP block */
 	int (*soft_reset)(void *handle);
+	/* post soft reset the IP block */
+	int (*post_soft_reset)(void *handle);
 	/* enable/disable cg for the IP block */
 	int (*set_clockgating_state)(void *handle,
 				     enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644
index 0000000..66e39cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+    {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+    {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+    {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+    { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644
index 0000000..895c8e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x196c
+#define SI_DC_GPIO_HPD_A                         0x196d
+#define SI_DC_GPIO_HPD_EN                        0x196e
+#define SI_DC_GPIO_HPD_Y                         0x196f
+
+#define SI_GRPH_CONTROL                          0x1a01
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644
index 0000000..c57eff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -0,0 +1,2461 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+#define SI_MAX_SH_GPRS		 	256
+#define SI_MAX_TEMP_GPRS         	16
+#define SI_MAX_SH_THREADS        	256
+#define SI_MAX_SH_STACK_ENTRIES  	4096
+#define SI_MAX_FRC_EOV_CNT       	16384
+#define SI_MAX_BACKENDS          	8
+#define SI_MAX_BACKENDS_MASK     	0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS             	12
+#define SI_MAX_SIMDS_MASK        	0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES            	8
+#define SI_MAX_PIPES_MASK        	0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM           	0xFFFF
+#define SI_MAX_TCC               	16
+#define SI_MAX_TCC_MASK          	0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS 		8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0                              0x80
+#define SMC_IND_DATA_0                               0x81
+
+#define SMC_IND_ACCESS_CNTL                          0x8A
+#       define AUTO_INCREMENT_IND_0                  (1 << 0)
+#define SMC_MESSAGE_0                                0x8B
+#define SMC_RESP_0                                   0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START                    0xc0030000
+#define SMC_CG_IND_END                      0xc0040000
+
+#define	CG_CGTT_LOCAL_0				0x400
+#define	CG_CGTT_LOCAL_1				0x401
+
+/* SMC IND registers */
+#define	SMC_SYSCON_RESET_CNTL				0x80000000
+#       define RST_REG                                  (1 << 0)
+#define	SMC_SYSCON_CLOCK_CNTL_0				0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define VGA_HDP_CONTROL  				0xCA
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x13F
+#define		DCCG_DISP1_SLOW_SELECT(x)		((x) << 0)
+#define		DCCG_DISP1_SLOW_SELECT_MASK		(7 << 0)
+#define		DCCG_DISP1_SLOW_SELECT_SHIFT		0
+#define		DCCG_DISP2_SLOW_SELECT(x)		((x) << 4)
+#define		DCCG_DISP2_SLOW_SELECT_MASK		(7 << 4)
+#define		DCCG_DISP2_SLOW_SELECT_SHIFT		4
+
+#define	CG_SPLL_FUNC_CNTL				0x180
+#define		SPLL_RESET				(1 << 0)
+#define		SPLL_SLEEP				(1 << 1)
+#define		SPLL_BYPASS_EN				(1 << 3)
+#define		SPLL_REF_DIV(x)				((x) << 4)
+#define		SPLL_REF_DIV_MASK			(0x3f << 4)
+#define		SPLL_PDIV_A(x)				((x) << 20)
+#define		SPLL_PDIV_A_MASK			(0x7f << 20)
+#define		SPLL_PDIV_A_SHIFT			20
+#define	CG_SPLL_FUNC_CNTL_2				0x181
+#define		SCLK_MUX_SEL(x)				((x) << 0)
+#define		SCLK_MUX_SEL_MASK			(0x1ff << 0)
+#define		SPLL_CTLREQ_CHG				(1 << 23)
+#define		SCLK_MUX_UPDATE				(1 << 26)
+#define	CG_SPLL_FUNC_CNTL_3				0x182
+#define		SPLL_FB_DIV(x)				((x) << 0)
+#define		SPLL_FB_DIV_MASK			(0x3ffffff << 0)
+#define		SPLL_FB_DIV_SHIFT			0
+#define		SPLL_DITHEN				(1 << 28)
+#define	CG_SPLL_FUNC_CNTL_4				0x183
+
+#define	SPLL_STATUS					0x185
+#define		SPLL_CHG_STATUS				(1 << 1)
+#define	SPLL_CNTL_MODE					0x186
+#define		SPLL_SW_DIR_CONTROL			(1 << 0)
+#	define SPLL_REFCLK_SEL(x)			((x) << 26)
+#	define SPLL_REFCLK_SEL_MASK			(3 << 26)
+
+#define	CG_SPLL_SPREAD_SPECTRUM				0x188
+#define		SSEN					(1 << 0)
+#define		CLK_S(x)				((x) << 4)
+#define		CLK_S_MASK				(0xfff << 4)
+#define		CLK_S_SHIFT				4
+#define	CG_SPLL_SPREAD_SPECTRUM_2			0x189
+#define		CLK_V(x)				((x) << 0)
+#define		CLK_V_MASK				(0x3ffffff << 0)
+#define		CLK_V_SHIFT				0
+
+#define	CG_SPLL_AUTOSCALE_CNTL				0x18b
+#       define AUTOSCALE_ON_SS_CLEAR                    (1 << 9)
+
+/* discrete uvd clocks */
+#define	CG_UPLL_FUNC_CNTL				0x18d
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_VCO_MODE_MASK			0x00000600
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define	CG_UPLL_FUNC_CNTL_2				0x18e
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define	CG_UPLL_FUNC_CNTL_3				0x18f
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define	CG_UPLL_FUNC_CNTL_4                             0x191
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define	CG_UPLL_FUNC_CNTL_5				0x192
+#	define RESET_ANTI_MUX_MASK			0x00000200
+#define	CG_UPLL_SPREAD_SPECTRUM				0x194
+#	define SSEN_MASK				0x00000001
+
+#define	MPLL_BYPASSCLK_SEL				0x197
+#	define MPLL_CLKOUT_SEL(x)			((x) << 8)
+#	define MPLL_CLKOUT_SEL_MASK			0xFF00
+
+#define CG_CLKPIN_CNTL                                    0x198
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0x199
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define	THM_CLK_CNTL					0x19b
+#	define CMON_CLK_SEL(x)				((x) << 0)
+#	define CMON_CLK_SEL_MASK			0xFF
+#	define TMON_CLK_SEL(x)				((x) << 8)
+#	define TMON_CLK_SEL_MASK			0xFF00
+#define	MISC_CLK_CNTL					0x19c
+#	define DEEP_SLEEP_CLK_SEL(x)			((x) << 0)
+#	define DEEP_SLEEP_CLK_SEL_MASK			0xFF
+#	define ZCLK_SEL(x)				((x) << 8)
+#	define ZCLK_SEL_MASK				0xFF00
+
+#define	CG_THERMAL_CTRL					0x1c0
+#define 	DPM_EVENT_SRC(x)			((x) << 0)
+#define 	DPM_EVENT_SRC_MASK			(7 << 0)
+#define		DIG_THERM_DPM(x)			((x) << 14)
+#define		DIG_THERM_DPM_MASK			0x003FC000
+#define		DIG_THERM_DPM_SHIFT			14
+#define	CG_THERMAL_STATUS				0x1c1
+#define		FDO_PWM_DUTY(x)				((x) << 9)
+#define		FDO_PWM_DUTY_MASK			(0xff << 9)
+#define		FDO_PWM_DUTY_SHIFT			9
+#define	CG_THERMAL_INT					0x1c2
+#define		DIG_THERM_INTH(x)			((x) << 8)
+#define		DIG_THERM_INTH_MASK			0x0000FF00
+#define		DIG_THERM_INTH_SHIFT			8
+#define		DIG_THERM_INTL(x)			((x) << 16)
+#define		DIG_THERM_INTL_MASK			0x00FF0000
+#define		DIG_THERM_INTL_SHIFT			16
+#define 	THERM_INT_MASK_HIGH			(1 << 24)
+#define 	THERM_INT_MASK_LOW			(1 << 25)
+
+#define	CG_MULT_THERMAL_CTRL					0x1c4
+#define		TEMP_SEL(x)					((x) << 20)
+#define		TEMP_SEL_MASK					(0xff << 20)
+#define		TEMP_SEL_SHIFT					20
+#define	CG_MULT_THERMAL_STATUS					0x1c5
+#define		ASIC_MAX_TEMP(x)				((x) << 0)
+#define		ASIC_MAX_TEMP_MASK				0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT				0
+#define		CTF_TEMP(x)					((x) << 9)
+#define		CTF_TEMP_MASK					0x0003fe00
+#define		CTF_TEMP_SHIFT					9
+
+#define	CG_FDO_CTRL0					0x1d5
+#define		FDO_STATIC_DUTY(x)			((x) << 0)
+#define		FDO_STATIC_DUTY_MASK			0x000000FF
+#define		FDO_STATIC_DUTY_SHIFT			0
+#define	CG_FDO_CTRL1					0x1d6
+#define		FMAX_DUTY100(x)				((x) << 0)
+#define		FMAX_DUTY100_MASK			0x000000FF
+#define		FMAX_DUTY100_SHIFT			0
+#define	CG_FDO_CTRL2					0x1d7
+#define		TMIN(x)					((x) << 0)
+#define		TMIN_MASK				0x000000FF
+#define		TMIN_SHIFT				0
+#define		FDO_PWM_MODE(x)				((x) << 11)
+#define		FDO_PWM_MODE_MASK			(7 << 11)
+#define		FDO_PWM_MODE_SHIFT			11
+#define		TACH_PWM_RESP_RATE(x)			((x) << 25)
+#define		TACH_PWM_RESP_RATE_MASK			(0x7f << 25)
+#define		TACH_PWM_RESP_RATE_SHIFT		25
+
+#define CG_TACH_CTRL                                    0x1dc
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x1dd
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define GENERAL_PWRMGT                                  0x1e0
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#define CG_TPC                                            0x1e1
+#define SCLK_PWRMGT_CNTL                                  0x1e2
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x1e6
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_FTV                                            0x1ef
+
+#define CG_FFCT_0                                         0x1f0
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x1ff
+#       define BSP(x)					((x) << 0)
+#       define BSP_MASK					(0xffff << 0)
+#       define BSU(x)					((x) << 16)
+#       define BSU_MASK					(0xf << 16)
+#define CG_AT                                           0x200
+#       define CG_R(x)					((x) << 0)
+#       define CG_R_MASK				(0xffff << 0)
+#       define CG_L(x)					((x) << 16)
+#       define CG_L_MASK				(0xffff << 16)
+
+#define CG_GIT                                          0x201
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x203
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x20a
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define	CG_ULV_CONTROL					0x21e
+#define	CG_ULV_PARAMETER				0x21f
+
+#define	SMC_SCRATCH0					0x221
+
+#define	CG_CAC_CTRL					0x22e
+#	define CAC_WINDOW(x)				((x) << 0)
+#	define CAC_WINDOW_MASK				0x00ffffff
+
+#define DMIF_ADDR_CONFIG  				0x2F5
+
+#define DMIF_ADDR_CALC  				0x300
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0328
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define	SRBM_STATUS				        0x394
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x398
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3A0
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0x3A1
+
+#define SRBM_READ_ERROR					0x3A6
+#define SRBM_INT_CNTL					0x3A8
+#define SRBM_INT_ACK					0x3AA
+
+#define	SRBM_STATUS2				        0x3B1
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_L2_CNTL					0x500
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x501
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x502
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x503
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x504
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define		PAGE_TABLE_BLOCK_SIZE(x)			(((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL				0x505
+#define VM_CONTEXT0_CNTL2				0x50C
+#define VM_CONTEXT1_CNTL2				0x50D
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x50E
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x50F
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x510
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x511
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x512
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x513
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x514
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x515
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x53f
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x537
+#define		PROTECTIONS_MASK			(0xf << 0)
+#define		PROTECTIONS_SHIFT			0
+		/* bit 0: range
+		 * bit 1: pde0
+		 * bit 2: valid
+		 * bit 3: read
+		 * bit 4: write
+		 */
+#define		MEMORY_CLIENT_ID_MASK			(0xff << 12)
+#define		MEMORY_CLIENT_ID_SHIFT			12
+#define		MEMORY_CLIENT_RW_MASK			(1 << 24)
+#define		MEMORY_CLIENT_RW_SHIFT			24
+#define		FAULT_VMID_MASK				(0xf << 25)
+#define		FAULT_VMID_SHIFT			25
+
+#define VM_INVALIDATE_REQUEST				0x51E
+#define VM_INVALIDATE_RESPONSE				0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x547
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x54F
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x550
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x551
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x552
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x553
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x554
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x555
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x556
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x557
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x558
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x55F
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x560
+
+#define VM_L2_CG           				0x570
+#define		MC_CG_ENABLE				(1 << 18)
+#define		MC_LS_ENABLE				(1 << 19)
+
+#define MC_SHARED_CHMAP						0x801
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x802
+
+#define	MC_VM_FB_LOCATION				0x809
+#define	MC_VM_AGP_TOP					0x80A
+#define	MC_VM_AGP_BOT					0x80B
+#define	MC_VM_AGP_BASE					0x80C
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x80D
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x80E
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x80F
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x819
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x82B
+
+#define MC_HUB_MISC_HUB_CG           			0x82E
+#define MC_HUB_MISC_VM_CG           			0x82F
+
+#define MC_HUB_MISC_SIP_CG           			0x830
+
+#define MC_XPB_CLK_GAT           			0x91E
+
+#define MC_CITF_MISC_RD_CG           			0x992
+#define MC_CITF_MISC_WR_CG           			0x993
+#define MC_CITF_MISC_VM_CG           			0x994
+
+#define	MC_ARB_RAMCFG					0x9D8
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_ARB_DRAM_TIMING				0x9DD
+#define	MC_ARB_DRAM_TIMING2				0x9DE
+
+#define MC_ARB_BURST_TIME                               0xA02
+#define		STATE0(x)				((x) << 0)
+#define		STATE0_MASK				(0x1f << 0)
+#define		STATE0_SHIFT				0
+#define		STATE1(x)				((x) << 5)
+#define		STATE1_MASK				(0x1f << 5)
+#define		STATE1_SHIFT				5
+#define		STATE2(x)				((x) << 10)
+#define		STATE2_MASK				(0x1f << 10)
+#define		STATE2_SHIFT				10
+#define		STATE3(x)				((x) << 15)
+#define		STATE3_MASK				(0x1f << 15)
+#define		STATE3_SHIFT				15
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0xA3A
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1 << 31)
+
+#define MC_SEQ_SUP_CNTL           			0xA32
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0xA33
+#define MC_PMG_AUTO_CMD           			0xA34
+
+#define MC_IO_PAD_CNTL_D0           			0xA74
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_RAS_TIMING                               0xA28
+#define MC_SEQ_CAS_TIMING                               0xA29
+#define MC_SEQ_MISC_TIMING                              0xA2A
+#define MC_SEQ_MISC_TIMING2                             0xA2B
+#define MC_SEQ_PMG_TIMING                               0xA2C
+#define MC_SEQ_RD_CTL_D0                                0xA2D
+#define MC_SEQ_RD_CTL_D1                                0xA2E
+#define MC_SEQ_WR_CTL_D0                                0xA2F
+#define MC_SEQ_WR_CTL_D1                                0xA30
+
+#define MC_SEQ_MISC0           				0xA80
+#define 	MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define 	MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define 	MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define 	MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define 	MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define 	MC_SEQ_MISC0_REV_ID_VALUE               1
+#define 	MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define 	MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define 	MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0xA81
+#define MC_SEQ_RESERVE_M                                0xA82
+#define MC_PMG_CMD_EMRS                                 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0xA91
+#define MC_SEQ_IO_DEBUG_DATA           			0xA92
+
+#define MC_SEQ_MISC5                                    0xA95
+#define MC_SEQ_MISC6                                    0xA96
+
+#define MC_SEQ_MISC7                                    0xA99
+
+#define MC_SEQ_RAS_TIMING_LP                            0xA9B
+#define MC_SEQ_CAS_TIMING_LP                            0xA9C
+#define MC_SEQ_MISC_TIMING_LP                           0xA9D
+#define MC_SEQ_MISC_TIMING2_LP                          0xA9E
+#define MC_SEQ_WR_CTL_D0_LP                             0xA9F
+#define MC_SEQ_WR_CTL_D1_LP                             0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP                           0xAA2
+
+#define MC_PMG_CMD_MRS                                  0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP                             0xAC7
+#define MC_SEQ_RD_CTL_D1_LP                             0xAC8
+
+#define MC_PMG_CMD_MRS1                                 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0xAD2
+#define MC_SEQ_PMG_TIMING_LP                            0xAD3
+
+#define MC_SEQ_WR_CTL_2                                 0xAD5
+#define MC_SEQ_WR_CTL_2_LP                              0xAD6
+#define MC_PMG_CMD_MRS2                                 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0xAD8
+
+#define	MCLK_PWRMGT_CNTL				0xAE8
+#       define DLL_SPEED(x)				((x) << 0)
+#       define DLL_SPEED_MASK				(0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define	DLL_CNTL					0xAE9
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define	MPLL_CNTL_MODE					0xAEC
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#define	MPLL_FUNC_CNTL					0xAED
+#define		BWCTRL(x)				((x) << 20)
+#define		BWCTRL_MASK				(0xff << 20)
+#define	MPLL_FUNC_CNTL_1				0xAEE
+#define		VCO_MODE(x)				((x) << 0)
+#define		VCO_MODE_MASK				(3 << 0)
+#define		CLKFRAC(x)				((x) << 4)
+#define		CLKFRAC_MASK				(0xfff << 4)
+#define		CLKF(x)					((x) << 16)
+#define		CLKF_MASK				(0xfff << 16)
+#define	MPLL_FUNC_CNTL_2				0xAEF
+#define	MPLL_AD_FUNC_CNTL				0xAF0
+#define		YCLK_POST_DIV(x)			((x) << 0)
+#define		YCLK_POST_DIV_MASK			(7 << 0)
+#define	MPLL_DQ_FUNC_CNTL				0xAF1
+#define		YCLK_SEL(x)				((x) << 4)
+#define		YCLK_SEL_MASK				(1 << 4)
+
+#define	MPLL_SS1					0xAF3
+#define		CLKV(x)					((x) << 0)
+#define		CLKV_MASK				(0x3ffffff << 0)
+#define	MPLL_SS2					0xAF4
+#define		CLKS(x)					((x) << 0)
+#define		CLKS_MASK				(0xfff << 0)
+
+#define	HDP_HOST_PATH_CNTL				0xB00
+#define 	CLOCK_GATING_DIS			(1 << 23)
+#define	HDP_NONSURFACE_BASE				0xB01
+#define	HDP_NONSURFACE_INFO				0xB02
+#define	HDP_NONSURFACE_SIZE				0xB03
+
+#define HDP_DEBUG0  					0xBCC
+
+#define HDP_ADDR_CONFIG  				0xBD2
+#define HDP_MISC_CNTL					0xBD3
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+#define HDP_MEM_POWER_LS				0xBD4
+#define 	HDP_LS_ENABLE				(1 << 0)
+
+#define ATC_MISC_CG           				0xCD4
+
+#define IH_RB_CNTL                                        0xF80
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0xF81
+#define IH_RB_RPTR                                        0xF82
+#define IH_RB_WPTR                                        0xF83
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0xF84
+#define IH_RB_WPTR_ADDR_LO                                0xF85
+#define IH_CNTL                                           0xF86
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	CONFIG_MEMSIZE					0x150A
+
+#define INTERRUPT_CNTL                                    0x151A
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x1520
+
+#define	BIF_FB_EN						0x1524
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX                       0x1780
+#       define AZ_ENDPOINT_REG_INDEX(x)                  (((x) & 0xff) << 0)
+#       define AZ_ENDPOINT_REG_WRITE_EN                  (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA                        0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER          0x25
+#define		SPEAKER_ALLOCATION(x)			(((x) & 0x7f) << 0)
+#define		SPEAKER_ALLOCATION_MASK			(0x7f << 0)
+#define		SPEAKER_ALLOCATION_SHIFT		0
+#define		HDMI_CONNECTION				(1 << 16)
+#define		DP_CONNECTION				(1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0        0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1        0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2        0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3        0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4        0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5        0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6        0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7        0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8        0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9        0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10       0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11       0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12       0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13       0x35 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL         0x54
+#       define AUDIO_ENABLED                             (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT  0x56
+#define		PORT_CONNECTIVITY_MASK				(3 << 30)
+#define		PORT_CONNECTIVITY_SHIFT				30
+
+#define	DC_LB_MEMORY_SPLIT					0x1AC3
+#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
+
+#define	PRIORITY_A_CNT						0x1AC6
+#define		PRIORITY_MARK_MASK				0x7fff
+#define		PRIORITY_OFF					(1 << 16)
+#define		PRIORITY_ALWAYS_ON				(1 << 20)
+#define	PRIORITY_B_CNT						0x1AC7
+
+#define	DPG_PIPE_ARBITRATION_CONTROL3				0x1B32
+#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
+#define	DPG_PIPE_LATENCY_CONTROL				0x1B33
+#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x1AEE
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x1AEF
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x1AD0
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x183D
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x183E
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x183F
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x1840
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x1853
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x1854
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x1A16
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x1A17
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DAC_AUTODETECT_INT_CONTROL			0x19F2
+
+#define DC_HPD1_INT_STATUS                              0x1807
+#define DC_HPD2_INT_STATUS                              0x180A
+#define DC_HPD3_INT_STATUS                              0x180D
+#define DC_HPD4_INT_STATUS                              0x1810
+#define DC_HPD5_INT_STATUS                              0x1813
+#define DC_HPD6_INT_STATUS                              0x1816
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x1808
+#define DC_HPD2_INT_CONTROL                             0x180B
+#define DC_HPD3_INT_CONTROL                             0x180E
+#define DC_HPD4_INT_CONTROL                             0x1811
+#define DC_HPD5_INT_CONTROL                             0x1814
+#define DC_HPD6_INT_CONTROL                             0x1817
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x1809
+#define DC_HPD2_CONTROL                                   0x180C
+#define DC_HPD3_CONTROL                                   0x180F
+#define DC_HPD4_CONTROL                                   0x1812
+#define DC_HPD5_CONTROL                                   0x1815
+#define DC_HPD6_CONTROL                                   0x1818
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x1B35
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE                           0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL            (1 << 4)   /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE                           0x05b0
+#define DCCG_AUDIO_DTO0_MODULE                          0x05b4
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL                          0x1c4f
+#define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define	GRBM_CNTL					0x2000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x2002
+#define		RLC_RQ_PENDING 					(1 << 0)
+#define		RLC_BUSY 					(1 << 8)
+#define		TC_BUSY 					(1 << 9)
+
+#define	GRBM_STATUS					0x2004
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x2005
+#define	GRBM_STATUS_SE1					0x2006
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	GRBM_SOFT_RESET					0x2008
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_RLC					(1 << 2)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_BCI					(1 << 7)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x200B
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define GRBM_INT_CNTL                                   0x2018
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define	CP_STRMOUT_CNTL					0x213F
+#define	SCRATCH_REG0					0x2140
+#define	SCRATCH_REG1					0x2141
+#define	SCRATCH_REG2					0x2142
+#define	SCRATCH_REG3					0x2143
+#define	SCRATCH_REG4					0x2144
+#define	SCRATCH_REG5					0x2145
+#define	SCRATCH_REG6					0x2146
+#define	SCRATCH_REG7					0x2147
+
+#define	SCRATCH_UMSK					0x2150
+#define	SCRATCH_ADDR					0x2151
+
+#define	CP_SEM_WAIT_TIMER				0x216F
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x2172
+
+#define CP_ME_CNTL					0x21B6
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_COHER_CNTL2					0x217A
+
+#define	CP_RB2_RPTR					0x21BE
+#define	CP_RB1_RPTR					0x21BF
+#define	CP_RB0_RPTR					0x21C0
+#define	CP_RB_WPTR_DELAY				0x21C1
+
+#define	CP_QUEUE_THRESHOLDS				0x21D8
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define CP_MEQ_THRESHOLDS				0x21D9
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	CP_PERFMON_CNTL					0x21FF
+
+#define	VGT_VTX_VECT_EJECT_REG				0x222C
+
+#define	VGT_CACHE_INVALIDATION				0x2231
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ESGS_RING_SIZE				0x2232
+#define	VGT_GSVS_RING_SIZE				0x2233
+
+#define	VGT_GS_VERTEX_REUSE				0x2235
+
+#define	VGT_PRIMITIVE_TYPE				0x2256
+#define	VGT_INDEX_TYPE					0x2257
+
+#define	VGT_NUM_INDICES					0x225C
+#define	VGT_NUM_INSTANCES				0x225D
+
+#define	VGT_TF_RING_SIZE				0x2262
+
+#define	VGT_HS_OFFCHIP_PARAM				0x226C
+
+#define	VGT_TF_MEMORY_BASE				0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x226F
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x2270
+
+#define	PA_CL_ENHANCE					0x2285
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x2298
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x22C4
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x22C9
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x22F3
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x22FC
+
+#define	SQ_CONFIG					0x2300
+
+#define	SQC_CACHES					0x2302
+
+#define SQ_POWER_THROTTLE                               0x2396
+#define		MIN_POWER(x)				((x) << 0)
+#define		MIN_POWER_MASK				(0x3fff << 0)
+#define		MIN_POWER_SHIFT				0
+#define		MAX_POWER(x)				((x) << 16)
+#define		MAX_POWER_MASK				(0x3fff << 16)
+#define		MAX_POWER_SHIFT				0
+#define SQ_POWER_THROTTLE2                              0x2397
+#define		MAX_POWER_DELTA(x)			((x) << 0)
+#define		MAX_POWER_DELTA_MASK			(0x3fff << 0)
+#define		MAX_POWER_DELTA_SHIFT			0
+#define		STI_SIZE(x)				((x) << 16)
+#define		STI_SIZE_MASK				(0x3ff << 16)
+#define		STI_SIZE_SHIFT				16
+#define		LTI_RATIO(x)				((x) << 27)
+#define		LTI_RATIO_MASK				(0xf << 27)
+#define		LTI_RATIO_SHIFT				27
+
+#define	SX_DEBUG_1					0x2418
+
+#define	SPI_STATIC_THREAD_MGMT_1			0x2438
+#define	SPI_STATIC_THREAD_MGMT_2			0x2439
+#define	SPI_STATIC_THREAD_MGMT_3			0x243A
+#define	SPI_PS_MAX_WAVE_ID				0x243B
+
+#define	SPI_CONFIG_CNTL					0x2440
+
+#define	SPI_CONFIG_CNTL_1				0x244F
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	CGTS_TCC_DISABLE				0x2452
+#define	CGTS_USER_TCC_DISABLE				0x2453
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x2454
+#define		OVERRIDE				(1 << 21)
+#define		LS_OVERRIDE				(1 << 22)
+
+#define	SPI_LB_CU_MASK					0x24D5
+
+#define	TA_CNTL_AUX					0x2542
+
+#define CC_RB_BACKEND_DISABLE				0x263D
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x263E
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x2644
+#       define MICRO_TILE_MODE(x)				((x) << 0)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define BANK_WIDTH(x)					((x) << 14)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 16)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 18)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 20)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+#define	GB_TILE_MODE1					0x2645
+#define	GB_TILE_MODE2					0x2646
+#define	GB_TILE_MODE3					0x2647
+#define	GB_TILE_MODE4					0x2648
+#define	GB_TILE_MODE5					0x2649
+#define	GB_TILE_MODE6					0x264a
+#define	GB_TILE_MODE7					0x264b
+#define	GB_TILE_MODE8					0x264c
+#define	GB_TILE_MODE9					0x264d
+#define	GB_TILE_MODE10					0x264e
+#define	GB_TILE_MODE11					0x264f
+#define	GB_TILE_MODE12					0x2650
+#define	GB_TILE_MODE13					0x2651
+#define	GB_TILE_MODE14					0x2652
+#define	GB_TILE_MODE15					0x2653
+#define	GB_TILE_MODE16					0x2654
+#define	GB_TILE_MODE17					0x2655
+#define	GB_TILE_MODE18					0x2656
+#define	GB_TILE_MODE19					0x2657
+#define	GB_TILE_MODE20					0x2658
+#define	GB_TILE_MODE21					0x2659
+#define	GB_TILE_MODE22					0x265a
+#define	GB_TILE_MODE23					0x265b
+#define	GB_TILE_MODE24					0x265c
+#define	GB_TILE_MODE25					0x265d
+#define	GB_TILE_MODE26					0x265e
+#define	GB_TILE_MODE27					0x265f
+#define	GB_TILE_MODE28					0x2660
+#define	GB_TILE_MODE29					0x2661
+#define	GB_TILE_MODE30					0x2662
+#define	GB_TILE_MODE31					0x2663
+
+#define	CB_PERFCOUNTER0_SELECT0				0x2688
+#define	CB_PERFCOUNTER0_SELECT1				0x2689
+#define	CB_PERFCOUNTER1_SELECT0				0x268A
+#define	CB_PERFCOUNTER1_SELECT1				0x268B
+#define	CB_PERFCOUNTER2_SELECT0				0x268C
+#define	CB_PERFCOUNTER2_SELECT1				0x268D
+#define	CB_PERFCOUNTER3_SELECT0				0x268E
+#define	CB_PERFCOUNTER3_SELECT1				0x268F
+
+#define	CB_CGTT_SCLK_CTRL				0x2698
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x26DF
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0x2B03
+#define	TCP_CHAN_STEER_HI				0x2B94
+
+#define	CP_RB0_BASE					0x3040
+#define	CP_RB0_CNTL					0x3041
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+
+#define	CP_RB0_RPTR_ADDR				0x3043
+#define	CP_RB0_RPTR_ADDR_HI				0x3044
+#define	CP_RB0_WPTR					0x3045
+
+#define	CP_PFP_UCODE_ADDR				0x3054
+#define	CP_PFP_UCODE_DATA				0x3055
+#define	CP_ME_RAM_RADDR					0x3056
+#define	CP_ME_RAM_WADDR					0x3057
+#define	CP_ME_RAM_DATA					0x3058
+
+#define	CP_CE_UCODE_ADDR				0x305A
+#define	CP_CE_UCODE_DATA				0x305B
+
+#define	CP_RB1_BASE					0x3060
+#define	CP_RB1_CNTL					0x3061
+#define	CP_RB1_RPTR_ADDR				0x3062
+#define	CP_RB1_RPTR_ADDR_HI				0x3063
+#define	CP_RB1_WPTR					0x3064
+#define	CP_RB2_BASE					0x3065
+#define	CP_RB2_CNTL					0x3066
+#define	CP_RB2_RPTR_ADDR				0x3067
+#define	CP_RB2_RPTR_ADDR_HI				0x3068
+#define	CP_RB2_WPTR					0x3069
+#define CP_INT_CNTL_RING0                               0x306A
+#define CP_INT_CNTL_RING1                               0x306B
+#define CP_INT_CNTL_RING2                               0x306C
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0x306D
+#define CP_INT_STATUS_RING1                             0x306E
+#define CP_INT_STATUS_RING2                             0x306F
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define	CP_MEM_SLP_CNTL					0x3079
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define	CP_DEBUG					0x307F
+
+#define RLC_CNTL                                          0x30C0
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0x30C1
+#define RLC_RL_SIZE                                       0x30C2
+#define RLC_LB_CNTL                                       0x30C3
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE                         0x30C4
+#define RLC_LB_CNTR_MAX                                   0x30C5
+#define RLC_LB_CNTR_INIT                                  0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0x30C8
+
+#define RLC_UCODE_ADDR                                    0x30CB
+#define RLC_UCODE_DATA                                    0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB                           0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0x30D0
+#define RLC_MC_CNTL                                       0x30D1
+#define RLC_UCODE_CNTL                                    0x30D2
+#define RLC_STAT                                          0x30D3
+#       define RLC_BUSY_STATUS                            (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+#       define GFX_LS_STATUS                              (1 << 3)
+
+#define	RLC_PG_CNTL					0x30D7
+#	define GFX_PG_ENABLE				(1 << 0)
+#	define GFX_PG_SRC				(1 << 1)
+
+#define	RLC_CGTT_MGCG_OVERRIDE				0x3100
+#define	RLC_CGCG_CGLS_CTRL				0x3101
+#	define CGCG_EN					(1 << 0)
+#	define CGLS_EN					(1 << 1)
+
+#define	RLC_TTOP_D					0x3105
+#	define RLC_PUD(x)				((x) << 0)
+#	define RLC_PUD_MASK				(0xff << 0)
+#	define RLC_PDD(x)				((x) << 8)
+#	define RLC_PDD_MASK				(0xff << 8)
+#	define RLC_TTPD(x)				((x) << 16)
+#	define RLC_TTPD_MASK				(0xff << 16)
+#	define RLC_MSD(x)				((x) << 24)
+#	define RLC_MSD_MASK				(0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK                               0x3107
+
+#define	RLC_PG_AO_CU_MASK				0x310B
+#define	RLC_MAX_PG_CU					0x310C
+#	define MAX_PU_CU(x)				((x) << 0)
+#	define MAX_PU_CU_MASK				(0xff << 0)
+#define	RLC_AUTO_PG_CTRL				0x310C
+#	define AUTO_PG_EN				(1 << 0)
+#	define GRBM_REG_SGIT(x)				((x) << 3)
+#	define GRBM_REG_SGIT_MASK			(0xffff << 3)
+#	define PG_AFTER_GRBM_REG_ST(x)			((x) << 19)
+#	define PG_AFTER_GRBM_REG_ST_MASK		(0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0                       0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1                       0x3116
+#define RLC_SERDES_WR_CTRL                                0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0                          0x3119
+#define RLC_SERDES_MASTER_BUSY_1                          0x311A
+
+#define RLC_GCPM_GENERAL_3                                0x311E
+
+#define	DB_RENDER_CONTROL				0xA000
+
+#define DB_DEPTH_INFO                                   0xA00F
+
+#define PA_SC_RASTER_CONFIG                             0xA0D4
+#	define RB_MAP_PKR0(x)				((x) << 0)
+#	define RB_MAP_PKR0_MASK				(0x3 << 0)
+#	define RB_MAP_PKR1(x)				((x) << 2)
+#	define RB_MAP_PKR1_MASK				(0x3 << 2)
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+#	define RB_XSEL2(x)				((x) << 4)
+#	define RB_XSEL2_MASK				(0x3 << 4)
+#	define RB_XSEL					(1 << 6)
+#	define RB_YSEL					(1 << 7)
+#	define PKR_MAP(x)				((x) << 8)
+#	define PKR_MAP_MASK				(0x3 << 8)
+#       define RASTER_CONFIG_PKR_MAP_0			0
+#       define RASTER_CONFIG_PKR_MAP_1			1
+#       define RASTER_CONFIG_PKR_MAP_2			2
+#       define RASTER_CONFIG_PKR_MAP_3			3
+#	define PKR_XSEL(x)				((x) << 10)
+#	define PKR_XSEL_MASK				(0x3 << 10)
+#	define PKR_YSEL(x)				((x) << 12)
+#	define PKR_YSEL_MASK				(0x3 << 12)
+#	define SC_MAP(x)				((x) << 16)
+#	define SC_MAP_MASK				(0x3 << 16)
+#	define SC_XSEL(x)				((x) << 18)
+#	define SC_XSEL_MASK				(0x3 << 18)
+#	define SC_YSEL(x)				((x) << 20)
+#	define SC_YSEL_MASK				(0x3 << 20)
+#	define SE_MAP(x)				((x) << 24)
+#	define SE_MAP_MASK				(0x3 << 24)
+#       define RASTER_CONFIG_SE_MAP_0			0
+#       define RASTER_CONFIG_SE_MAP_1			1
+#       define RASTER_CONFIG_SE_MAP_2			2
+#       define RASTER_CONFIG_SE_MAP_3			3
+#	define SE_XSEL(x)				((x) << 26)
+#	define SE_XSEL_MASK				(0x3 << 26)
+#	define SE_YSEL(x)				((x) << 28)
+#	define SE_YSEL_MASK				(0x3 << 28)
+
+
+#define VGT_EVENT_INITIATOR                             0xA2A4
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PB0_PIF_PWRDOWN_2                                 0x17
+#       define PLL_POWER_STATE_IN_TXS2_2(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_2(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_SHIFT             10
+#       define PLL_RAMP_UP_TIME_2(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_2_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_2_SHIFT                   24
+#define PB0_PIF_PWRDOWN_3                                 0x18
+#       define PLL_POWER_STATE_IN_TXS2_3(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_3(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_SHIFT             10
+#       define PLL_RAMP_UP_TIME_3(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_3_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_3_SHIFT                   24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+
+#define PB1_PIF_PWRDOWN_2                                 0x17
+#define PB1_PIF_PWRDOWN_3                                 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2                                        0x1c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+#define PCIE_LC_STATUS1                                   0x28 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x40 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0xa3 /* PCIE_P */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0xb1
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0xb5 /* PCIE_P */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0xb6 /* PCIE_P */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG				0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG			0x3bd5
+#define UVD_RBC_RB_RPTR					0x3da4
+#define UVD_RBC_RB_WPTR					0x3da5
+#define UVD_STATUS					0x3daf
+
+#define	UVD_CGC_CTRL					0x3dc2
+#	define DCM					(1 << 0)
+#	define CG_DT(x)					((x) << 2)
+#	define CG_DT_MASK				(0xf << 2)
+#	define CLK_OD(x)				((x) << 6)
+#	define CLK_OD_MASK				(0x1f << 6)
+
+ /* UVD CTX indirect */
+#define	UVD_CGC_MEM_CTRL				0xC0
+#define	UVD_CGC_CTRL2					0xC1
+#	define DYN_OR_EN				(1 << 0)
+#	define DYN_RR_EN				(1 << 1)
+#	define G_DIV_ID(x)				((x) << 2)
+#	define G_DIV_ID_MASK				(0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			GDS_PARTITION_BASE		2
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ALLOC_GDS				0x1B
+#define	PACKET3_WRITE_GDS_RAM				0x1C
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC					0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
+#define	PACKET3_INDIRECT_BUFFER				0x3F
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+		 */
+#define		INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_LOAD_CONFIG_REG				0x5F
+#define	PACKET3_LOAD_CONTEXT_REG			0x60
+#define	PACKET3_LOAD_SH_REG				0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00002000
+#define		PACKET3_SET_CONFIG_REG_END			0x00002c00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x000a000
+#define		PACKET3_SET_CONTEXT_REG_END			0x000a400
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x00002c00
+#define		PACKET3_SET_SH_REG_END				0x00003000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_ME_WRITE				0x7A
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_CE_WRITE				0x7F
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SET_CE_DE_COUNTERS			0x89
+#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x200 /* not a register */
+
+#define DMA_RB_CNTL                                       0x3400
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0x3401
+#define DMA_RB_RPTR                                       0x3402
+#define DMA_RB_WPTR                                       0x3403
+
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+
+#define DMA_IB_CNTL                                       0x3409
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0x340a
+#define DMA_CNTL                                          0x340b
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0x340d
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG  				  0x342e
+
+#define	DMA_POWER_CNTL					0x342f
+#       define MEM_POWER_OVERRIDE                       (1 << 8)
+#define	DMA_CLK_CTRL					0x3430
+
+#define	DMA_PG						0x3435
+#	define PG_CNTL_ENABLE				(1 << 0)
+#define	DMA_PGFSM_CONFIG				0x3436
+#define	DMA_PGFSM_WRITE					0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_POLL_REG_MEM				  0xe
+#define	DMA_PACKET_NOP					  0xf
+
+#define VCE_STATUS					0x20004
+#define VCE_VCPU_CNTL					0x20014
+#define		VCE_CLK_EN				(1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0				0x20024
+#define VCE_VCPU_CACHE_SIZE0				0x20028
+#define VCE_VCPU_CACHE_OFFSET1				0x2002c
+#define VCE_VCPU_CACHE_SIZE1				0x20030
+#define VCE_VCPU_CACHE_OFFSET2				0x20034
+#define VCE_VCPU_CACHE_SIZE2				0x20038
+#define VCE_SOFT_RESET					0x20120
+#define 	VCE_ECPU_SOFT_RESET			(1 << 0)
+#define 	VCE_FME_SOFT_RESET			(1 << 2)
+#define VCE_RB_BASE_LO2					0x2016c
+#define VCE_RB_BASE_HI2					0x20170
+#define VCE_RB_SIZE2					0x20174
+#define VCE_RB_RPTR2					0x20178
+#define VCE_RB_WPTR2					0x2017c
+#define VCE_RB_BASE_LO					0x20180
+#define VCE_RB_BASE_HI					0x20184
+#define VCE_RB_SIZE					0x20188
+#define VCE_RB_RPTR					0x2018c
+#define VCE_RB_WPTR					0x20190
+#define VCE_CLOCK_GATING_A				0x202f8
+#define VCE_CLOCK_GATING_B				0x202fc
+#define VCE_UENC_CLOCK_GATING				0x205bc
+#define VCE_UENC_REG_CLOCK_GATING			0x205c0
+#define VCE_FW_REG_STATUS				0x20e10
+#	define VCE_FW_REG_STATUS_BUSY			(1 << 0)
+#	define VCE_FW_REG_STATUS_PASS			(1 << 3)
+#	define VCE_FW_REG_STATUS_DONE			(1 << 11)
+#define VCE_LMI_FW_START_KEYSEL				0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL			0x20e20
+#define VCE_LMI_CTRL2					0x20e74
+#define VCE_LMI_CTRL					0x20e98
+#define VCE_LMI_VM_CTRL					0x20ea0
+#define VCE_LMI_SWAP_CNTL				0x20eb4
+#define VCE_LMI_SWAP_CNTL1				0x20eb8
+#define VCE_LMI_CACHE_CTRL				0x20ef4
+
+#define VCE_CMD_NO_OP					0x00000000
+#define VCE_CMD_END					0x00000001
+#define VCE_CMD_IB					0x00000002
+#define VCE_CMD_FENCE					0x00000003
+#define VCE_CMD_TRAP					0x00000004
+#define VCE_CMD_IB_AUTO					0x00000005
+#define VCE_CMD_SEMAPHORE				0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET                0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET                0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET                0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET                0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET                0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET                0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX		        0x0000
+#define AMDGPU_MM_DATA		        0x0001
+
+#define VERDE_NUM_CRTC 6
+#define	BLACKOUT_MODE_MASK			0x00000007
+#define	VGA_RENDER_CONTROL			0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT                           0x1ac0
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL					0x00cc
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE            (1 << 0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT          (1 << 8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT   (1 << 9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN      (1 << 16)
+#       define AVIVO_DVGA_CONTROL_ROTATE                 (1 << 24)
+#define AVIVO_D2VGA_CONTROL					0x00ce
+
+#define R600_BUS_CNTL                                           0x1508
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+
+#define R600_ROM_CNTL                              0x580
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL                0x1bf2
+#define FMT_TRUNCATE_EN               (1 << 0)
+#define FMT_TRUNCATE_DEPTH            (1 << 4)
+#define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL            (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#define FMT_25FRC_SEL(x)              ((x) << 26)
+#define FMT_50FRC_SEL(x)              ((x) << 28)
+#define FMT_75FRC_SEL(x)              ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL                        0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR                       0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE                        0x1a78
+
+#define EVERGREEN_GRPH_ENABLE                           0x1a00
+#define EVERGREEN_GRPH_CONTROL                          0x1a01
+#define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP                0
+#define EVERGREEN_GRPH_DEPTH_16BPP               1
+#define EVERGREEN_GRPH_DEPTH_32BPP               2
+#define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK               0
+#define EVERGREEN_ADDR_SURF_4_BANK               1
+#define EVERGREEN_ADDR_SURF_8_BANK               2
+#define EVERGREEN_ADDR_SURF_16_BANK              3
+#define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED            0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#define EVERGREEN_GRPH_FORMAT_AI88               3
+#define EVERGREEN_GRPH_FORMAT_MONO16             4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+
+#define EVERGREEN_D3VGA_CONTROL                         0xf8
+#define EVERGREEN_D4VGA_CONTROL                         0xf9
+#define EVERGREEN_D5VGA_CONTROL                         0xfa
+#define EVERGREEN_D6VGA_CONTROL                         0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH                            0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x1a0a
+#define EVERGREEN_GRPH_X_START                          0x1a0b
+#define EVERGREEN_GRPH_Y_START                          0x1a0c
+#define EVERGREEN_GRPH_X_END                            0x1a0d
+#define EVERGREEN_GRPH_Y_END                            0x1a0e
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START                        0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE                         0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT                        0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x1a66
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x1a67
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x1a69
+#define EVERGREEN_CUR_POSITION                          0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT                          0x1a6b
+#define EVERGREEN_CUR_COLOR1                            0x1a6c
+#define EVERGREEN_CUR_COLOR2                            0x1a6d
+#define EVERGREEN_CUR_UPDATE                            0x1a6e
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL                           0x1a35
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x1a3c
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x1a58
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x1a59
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x1aa0
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x1a2d
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x1a31
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL                         0x1a10
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK	0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK	0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK	0x400
+
+#define	BLACKOUT_MODE_MASK			0x00000007
+#define	VGA_RENDER_CONTROL			0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK	0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
+#define MC_SEQ_MISC0__MT__DDR2   0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
+#define MC_SEQ_MISC0__MT__HBM    0x60000000
+#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+
+#define CONFIG_CNTL	0x1509
+#define CC_DRM_ID_STRAPS	0X1559
+#define AMDGPU_PCIE_INDEX	0xc
+#define AMDGPU_PCIE_DATA	0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0x3412
+#define DMA_MODE                                          0x342f
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK    10000
+#define TCLK            (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK		0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define	PCIE_PORT_INDEX					0xe
+#define	PCIE_PORT_DATA					0xf
+#define EVERGREEN_PIF_PHY0_INDEX                        0x8
+#define EVERGREEN_PIF_PHY0_DATA                         0xc
+#define EVERGREEN_PIF_PHY1_INDEX                        0x10
+#define EVERGREEN_PIF_PHY1_DATA				0x14
+
+#define	MC_VM_FB_OFFSET					0x81a
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
index f3e53b1..19802e9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_SEMA_CNTL                                                         0x3d00
 #define mmUVD_LMI_EXT40_ADDR                                                    0x3d26
 #define mmUVD_CTX_INDEX                                                         0x3d28
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
index eb4cf53..cc972d2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
@@ -34,6 +34,7 @@
 #define mmUVD_UDEC_ADDR_CONFIG                                                  0x3bd3
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index ec69869..378f4b6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -35,6 +35,7 @@
 #define mmUVD_UDEC_DB_ADDR_CONFIG                                               0x3bd4
 #define mmUVD_UDEC_DBW_ADDR_CONFIG                                              0x3bd5
 #define mmUVD_POWER_STATUS_U                                                    0x3bfd
+#define mmUVD_NO_OP                                                             0x3bff
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW                                          0x3c69
 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH                                         0x3c68
 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW                                          0x3c67
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 3493da5..4a4d379 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -494,6 +494,7 @@
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
@@ -526,6 +527,7 @@
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
old mode 100644
new mode 100755
index b86aba9..df7c18b
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -119,6 +119,8 @@
 	CGS_SYSTEM_INFO_PG_FLAGS,
 	CGS_SYSTEM_INFO_GFX_CU_INFO,
 	CGS_SYSTEM_INFO_GFX_SE_INFO,
+	CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+	CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
@@ -159,6 +161,7 @@
  */
 struct cgs_firmware_info {
 	uint16_t		version;
+	uint16_t		fw_version;
 	uint16_t		feature_version;
 	uint32_t		image_size;
 	uint64_t		mc_addr;
diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig
deleted file mode 100644
index af38033..0000000
--- a/drivers/gpu/drm/amd/powerplay/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DRM_AMD_POWERPLAY
-	bool  "Enable AMD powerplay component"
-	depends on DRM_AMDGPU
-	default n
-	help
-	  select this option will enable AMD powerplay component.
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index abbb658b..7174f7a 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,6 +31,7 @@
 #include "eventmanager.h"
 #include "pp_debug.h"
 
+
 #define PP_CHECK(handle)						\
 	do {								\
 		if ((handle) == NULL || (handle)->pp_valid != PP_VALID)	\
@@ -162,12 +163,12 @@
 	pp_handle = (struct pp_instance *)handle;
 	eventmgr = pp_handle->eventmgr;
 
-	if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
+	if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
 		eventmgr->pp_eventmgr_fini(eventmgr);
 
 	smumgr = pp_handle->smu_mgr;
 
-	if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
+	if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
 		smumgr->smumgr_funcs->smu_fini != NULL)
 		smumgr->smumgr_funcs->smu_fini(smumgr);
 
@@ -190,11 +191,9 @@
 }
 
 
-static int pp_set_clockgating_state(void *handle,
-				    enum amd_clockgating_state state)
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr  *hwmgr;
-	uint32_t msg_id, pp_state;
 
 	if (handle == NULL)
 		return -EINVAL;
@@ -208,76 +207,7 @@
 		return 0;
 	}
 
-	if (state == AMD_CG_STATE_UNGATE)
-		pp_state = 0;
-	else
-		pp_state = PP_STATE_CG | PP_STATE_LS;
-
-	/* Enable/disable GFX blocks clock gating through SMU */
-	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-			PP_BLOCK_GFX_CG,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-			PP_BLOCK_GFX_3D,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-			PP_BLOCK_GFX_RLC,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-			PP_BLOCK_GFX_CP,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-			PP_BLOCK_GFX_MG,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
-	/* Enable/disable System blocks clock gating through SMU */
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_BIF,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_BIF,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_MC,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_ROM,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_DRM,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_HDP,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-			PP_BLOCK_SYS_SDMA,
-			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-			pp_state);
-	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
-	return 0;
+	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
 }
 
 static int pp_set_powergating_state(void *handle,
@@ -361,7 +291,7 @@
 	.is_idle = pp_is_idle,
 	.wait_for_idle = pp_wait_for_idle,
 	.soft_reset = pp_sw_reset,
-	.set_clockgating_state = pp_set_clockgating_state,
+	.set_clockgating_state = NULL,
 	.set_powergating_state = pp_set_powergating_state,
 };
 
@@ -537,7 +467,6 @@
 		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 		break;
 	case AMD_PP_EVENT_READJUST_POWER_STATE:
-		pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
 		ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
 		break;
 	default:
@@ -576,28 +505,6 @@
 	}
 }
 
-static void
-pp_debugfs_print_current_performance_level(void *handle,
-					       struct seq_file *m)
-{
-	struct pp_hwmgr  *hwmgr;
-
-	if (handle == NULL)
-		return;
-
-	hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
-		return;
-
-	if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
-		printk(KERN_INFO "%s was not implemented.\n", __func__);
-		return;
-	}
-
-	hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
-}
-
 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr  *hwmgr;
@@ -764,15 +671,12 @@
 	PP_CHECK_HW(hwmgr);
 
 	if (!hwmgr->hardcode_pp_table) {
-		hwmgr->hardcode_pp_table =
-				kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+						   hwmgr->soft_pp_table_size,
+						   GFP_KERNEL);
 
 		if (!hwmgr->hardcode_pp_table)
 			return -ENOMEM;
-
-		/* to avoid powerplay crash when hardcode pptable is empty */
-		memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
-				hwmgr->soft_pp_table_size);
 	}
 
 	memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -897,6 +801,25 @@
 	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 }
 
+static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->read_sensor == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
+
+	return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_temperature = pp_dpm_get_temperature,
 	.load_firmware = pp_dpm_load_fw,
@@ -909,7 +832,6 @@
 	.powergate_vce = pp_dpm_powergate_vce,
 	.powergate_uvd = pp_dpm_powergate_uvd,
 	.dispatch_tasks = pp_dpm_dispatch_tasks,
-	.print_current_performance_level = pp_debugfs_print_current_performance_level,
 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
 	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -923,6 +845,7 @@
 	.set_sclk_od = pp_dpm_set_sclk_od,
 	.get_mclk_od = pp_dpm_get_mclk_od,
 	.set_mclk_od = pp_dpm_set_mclk_od,
+	.read_sensor = pp_dpm_read_sensor,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b..8cee4e0 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@
 	uninitialize_display_phy_access_tasks,
 	disable_gfx_voltage_island_power_gating_tasks,
 	disable_gfx_clock_gating_tasks,
+	uninitialize_thermal_controller_tasks,
 	set_boot_state_tasks,
 	adjust_power_state_tasks,
 	disable_dynamic_state_management_tasks,
@@ -262,6 +263,8 @@
 	unblock_adjust_power_state_tasks,
 	set_cpu_power_state,
 	notify_hw_power_source_tasks,
+	get_2d_performance_state_tasks,
+	set_performance_state_tasks,
 	/* updateDALConfigurationTasks,
 	variBrightDisplayConfigurationChangeTasks, */
 	adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index a46225c..4899088 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -70,11 +70,12 @@
 	int i;
 
 	table_entries = hwmgr->num_ps;
+
 	state = hwmgr->ps;
 
 	for (i = 0; i < table_entries; i++) {
 		if (state->id == *state_id) {
-			hwmgr->request_ps = state;
+			memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
 			return 0;
 		}
 		state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -100,13 +101,14 @@
 	if (requested == NULL)
 		return 0;
 
+	phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
 	if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
 		equal = false;
 
 	if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
-		phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
 		phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
-		hwmgr->current_ps = requested;
+		memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index f7ce4cb..5fff1d6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,14 +3,12 @@
 # It provides the hardware management services for the driver.
 
 HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
-	       hardwaremanager.o pp_acpi.o cz_hwmgr.o \
-               cz_clockpowergating.o \
-	       tonga_processpptables.o ppatomctrl.o \
-               tonga_hwmgr.o pppcielanes.o  tonga_thermal.o\
-               fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
-               fiji_clockpowergating.o fiji_thermal.o \
-	       polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
-	       polaris10_clockpowergating.o
+		hardwaremanager.o pp_acpi.o cz_hwmgr.o \
+		cz_clockpowergating.o pppcielanes.o\
+		process_pptables_v1_0.o ppatomctrl.o \
+		smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
+		smu7_clockpowergating.o
+
 
 AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 8cc0df9..9604249 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -178,7 +178,6 @@
 	int result;
 
 	cz_hwmgr->gfx_ramp_step = 256*25/100;
-
 	cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
 
 	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
@@ -186,33 +185,19 @@
 
 	cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
 	cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-
 	cz_hwmgr->clock_slow_down_freq = 25000;
-
 	cz_hwmgr->skip_clock_slow_down = 1;
-
 	cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-
 	cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-
 	cz_hwmgr->voting_rights_clients = 0x00C00033;
-
 	cz_hwmgr->static_screen_threshold = 8;
-
 	cz_hwmgr->ddi_power_gating_disabled = 0;
-
 	cz_hwmgr->bapm_enabled = 1;
-
 	cz_hwmgr->voltage_drop_threshold = 0;
-
 	cz_hwmgr->gfx_power_gating_threshold = 500;
-
 	cz_hwmgr->vce_slow_sclk_threshold = 20000;
-
 	cz_hwmgr->dce_slow_sclk_threshold = 30000;
-
 	cz_hwmgr->disable_driver_thermal_policy = 1;
-
 	cz_hwmgr->disable_nb_ps3_in_battery = 0;
 
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -221,9 +206,6 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 				    PHM_PlatformCaps_NonABMSupportInPPLib);
 
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					   PHM_PlatformCaps_SclkDeepSleep);
-
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 					PHM_PlatformCaps_DynamicM3Arbiter);
 
@@ -233,9 +215,7 @@
 				  PHM_PlatformCaps_DynamicPatchPowerState);
 
 	cz_hwmgr->thermal_auto_throttling_treshold = 0;
-
 	cz_hwmgr->tdr_clock = 0;
-
 	cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
 
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -450,19 +430,12 @@
 			(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
 
 	cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-
 	cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-
 	cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-
 	cz_hwmgr->boot_power_level.forceNBPstate = 0;
-
 	cz_hwmgr->boot_power_level.hysteresis_up = 0;
-
 	cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-
 	cz_hwmgr->boot_power_level.display_wm = 0;
-
 	cz_hwmgr->boot_power_level.vce_wm = 0;
 
 	return 0;
@@ -749,7 +722,6 @@
 		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
 
 	clock = hwmgr->display_config.min_core_set_clock;
-;
 	if (clock == 0)
 		printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
 
@@ -832,7 +804,7 @@
 
 	smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_SetWatermarkFrequency,
-				      cz_hwmgr->sclk_dpm.soft_max_clk);
+					cz_hwmgr->sclk_dpm.soft_max_clk);
 
 	return 0;
 }
@@ -858,9 +830,9 @@
 		PP_DBG_LOG("enabling ALL SMU features.\n");
 		dpm_features |= NB_DPM_MASK;
 		ret = smum_send_msg_to_smc_with_parameter(
-							     hwmgr->smumgr,
-					 PPSMC_MSG_EnableAllSmuFeatures,
-							     dpm_features);
+							  hwmgr->smumgr,
+							  PPSMC_MSG_EnableAllSmuFeatures,
+							  dpm_features);
 		if (ret == 0)
 			cz_hwmgr->is_nb_dpm_enabled = true;
 	}
@@ -1246,7 +1218,7 @@
 
 static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr != NULL || hwmgr->backend != NULL) {
+	if (hwmgr != NULL && hwmgr->backend != NULL) {
 		kfree(hwmgr->backend);
 		kfree(hwmgr);
 	}
@@ -1402,10 +1374,12 @@
 						   PPSMC_MSG_SetUvdHardMin));
 
 			cz_enable_disable_uvd_dpm(hwmgr, true);
-		} else
+		} else {
 			cz_enable_disable_uvd_dpm(hwmgr, true);
-	} else
+		}
+	} else {
 		cz_enable_disable_uvd_dpm(hwmgr, false);
+	}
 
 	return 0;
 }
@@ -1564,78 +1538,6 @@
 	return sizeof(struct cz_power_state);
 }
 
-static void
-cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
-	struct phm_clock_voltage_dependency_table *table =
-				hwmgr->dyn_state.vddc_dependency_on_sclk;
-
-	struct phm_vce_clock_voltage_dependency_table *vce_table =
-		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
-		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
-					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
-	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
-	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
-					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
-	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
-	uint16_t vddnb, vddgfx;
-	int result;
-
-	if (sclk_index >= NUM_SCLK_LEVELS) {
-		seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
-	} else {
-		sclk = table->entries[sclk_index].clk;
-		seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
-	}
-
-	tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
-		CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
-	vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
-	tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
-		CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
-	vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
-	seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
-	seq_printf(m, "\n uvd    %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
-	if (!cz_hwmgr->uvd_power_gated) {
-		if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-			seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
-		} else {
-			vclk = uvd_table->entries[uvd_index].vclk;
-			dclk = uvd_table->entries[uvd_index].dclk;
-			seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
-		}
-	}
-
-	seq_printf(m, "\n vce    %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
-	if (!cz_hwmgr->vce_power_gated) {
-		if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
-			seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
-		} else {
-			ecclk = vce_table->entries[vce_index].ecclk;
-			seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
-		}
-	}
-
-	result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
-	if (0 == result) {
-		activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
-		activity_percent = activity_percent > 100 ? 100 : activity_percent;
-	} else {
-		activity_percent = 50;
-	}
-
-	seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
-}
-
 static void cz_hw_print_display_cfg(
 	const struct cc6_settings *cc6_settings)
 {
@@ -1690,13 +1592,10 @@
 	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
 
 	if (separation_time !=
-		hw_data->cc6_settings.cpu_pstate_separation_time
-		|| cc6_disable !=
-		hw_data->cc6_settings.cpu_cc6_disable
-		|| pstate_disable !=
-		hw_data->cc6_settings.cpu_pstate_disable
-		|| pstate_switch_disable !=
-		hw_data->cc6_settings.nb_pstate_switch_disable) {
+	    hw_data->cc6_settings.cpu_pstate_separation_time ||
+	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
 
 		hw_data->cc6_settings.cc6_setting_changed = true;
 
@@ -1799,8 +1698,7 @@
 	ps = cast_const_PhwCzPowerState(state);
 
 	level_index = index > ps->level - 1 ? ps->level - 1 : index;
-
-	level->coreClock  = ps->levels[level_index].engineClock;
+	level->coreClock = ps->levels[level_index].engineClock;
 
 	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
 		for (i = 1; i < ps->level; i++) {
@@ -1887,6 +1785,125 @@
 	return 0;
 }
 
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+	int actual_temp = 0;
+	uint32_t val = cgs_read_ind_register(hwmgr->device,
+					     CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+	uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+	if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+		actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	else
+		actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return actual_temp;
+}
+
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+	struct phm_clock_voltage_dependency_table *table =
+				hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+	struct phm_vce_clock_voltage_dependency_table *vce_table =
+		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+	uint16_t vddnb, vddgfx;
+	int result;
+
+	switch (idx) {
+	case AMDGPU_PP_SENSOR_GFX_SCLK:
+		if (sclk_index < NUM_SCLK_LEVELS) {
+			sclk = table->entries[sclk_index].clk;
+			*value = sclk;
+			return 0;
+		}
+		return -EINVAL;
+	case AMDGPU_PP_SENSOR_VDDNB:
+		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+		vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+		*value = vddnb;
+		return 0;
+	case AMDGPU_PP_SENSOR_VDDGFX:
+		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+		vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+		*value = vddgfx;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_VCLK:
+		if (!cz_hwmgr->uvd_power_gated) {
+			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				vclk = uvd_table->entries[uvd_index].vclk;
+				*value = vclk;
+				return 0;
+			}
+		}
+		*value = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_DCLK:
+		if (!cz_hwmgr->uvd_power_gated) {
+			if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				dclk = uvd_table->entries[uvd_index].dclk;
+				*value = dclk;
+				return 0;
+			}
+		}
+		*value = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_VCE_ECCLK:
+		if (!cz_hwmgr->vce_power_gated) {
+			if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+				return -EINVAL;
+			} else {
+				ecclk = vce_table->entries[vce_index].ecclk;
+				*value = ecclk;
+				return 0;
+			}
+		}
+		*value = 0;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_LOAD:
+		result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+		if (0 == result) {
+			activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+			activity_percent = activity_percent > 100 ? 100 : activity_percent;
+		} else {
+			activity_percent = 50;
+		}
+		*value = activity_percent;
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_POWER:
+		*value = cz_hwmgr->uvd_power_gated ? 0 : 1;
+		return 0;
+	case AMDGPU_PP_SENSOR_VCE_POWER:
+		*value = cz_hwmgr->vce_power_gated ? 0 : 1;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_TEMP:
+		*value = cz_thermal_get_temperature(hwmgr);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
 	.backend_init = cz_hwmgr_backend_init,
 	.backend_fini = cz_hwmgr_backend_fini,
@@ -1902,7 +1919,6 @@
 	.patch_boot_state = cz_dpm_patch_boot_state,
 	.get_pp_table_entry = cz_dpm_get_pp_table_entry,
 	.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
-	.print_current_perforce_level = cz_print_current_perforce_level,
 	.set_cpu_power_state = cz_set_cpu_power_state,
 	.store_cc6_data = cz_store_cc6_data,
 	.force_clock_level = cz_force_clock_level,
@@ -1912,6 +1928,7 @@
 	.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
 	.get_clock_by_type = cz_get_clock_by_type,
 	.get_max_high_clocks = cz_get_max_high_clocks,
+	.read_sensor = cz_read_sensor,
 };
 
 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe820..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "fiji_clockpowergating.h"
-#include "fiji_ppsmc.h"
-#include "fiji_hwmgr.h"
-
-int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	data->uvd_power_gated = false;
-	data->vce_power_gated = false;
-	data->samu_power_gated = false;
-	data->acp_power_gated = false;
-
-	return 0;
-}
-
-int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (data->uvd_power_gated == bgate)
-		return 0;
-
-	data->uvd_power_gated = bgate;
-
-	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-					  AMD_IP_BLOCK_TYPE_UVD,
-					  AMD_CG_STATE_GATE);
-		fiji_update_uvd_dpm(hwmgr, true);
-	} else {
-		fiji_update_uvd_dpm(hwmgr, false);
-		cgs_set_clockgating_state(hwmgr->device,
-					  AMD_IP_BLOCK_TYPE_UVD,
-					  AMD_CG_STATE_UNGATE);
-	}
-
-	return 0;
-}
-
-int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_set_power_state_input states;
-	const struct pp_power_state  *pcurrent;
-	struct pp_power_state  *requested;
-
-	if (data->vce_power_gated == bgate)
-		return 0;
-
-	data->vce_power_gated = bgate;
-
-	pcurrent = hwmgr->current_ps;
-	requested = hwmgr->request_ps;
-
-	states.pcurrent_state = &(pcurrent->hardware);
-	states.pnew_state = &(requested->hardware);
-
-	fiji_update_vce_dpm(hwmgr, &states);
-	fiji_enable_disable_vce_dpm(hwmgr, !bgate);
-
-	return 0;
-}
-
-int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (data->samu_power_gated == bgate)
-		return 0;
-
-	data->samu_power_gated = bgate;
-
-	if (bgate)
-		fiji_update_samu_dpm(hwmgr, true);
-	else
-		fiji_update_samu_dpm(hwmgr, false);
-
-	return 0;
-}
-
-int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (data->acp_power_gated == bgate)
-		return 0;
-
-	data->acp_power_gated = bgate;
-
-	if (bgate)
-		fiji_update_acp_dpm(hwmgr, true);
-	else
-		fiji_update_acp_dpm(hwmgr, false);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
deleted file mode 100644
index 33af5f5..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_CLOCK_POWER_GATING_H_
-#define _FIJI_CLOCK_POWER_GATING_H_
-
-#include "fiji_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_DYN_DEFAULTS_H
-#define FIJI_DYN_DEFAULTS_H
-
-/** \file
-* Volcanic Islands Dynamic default parameters.
-*/
-
-enum FIJIdpm_TrendDetection
-{
-    FIJIAdpm_TrendDetection_AUTO,
-    FIJIAdpm_TrendDetection_UP,
-    FIJIAdpm_TrendDetection_DOWN
-};
-typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
-
-/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
-
-/* Bit vector representing same fields as hardware register. */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0    0x3FFFC102  /* CP_Gfx_busy ????
-                                                         * HDP_busy
-                                                         * IH_busy
-                                                         * UVD_busy
-                                                         * VCE_busy
-                                                         * ACP_busy
-                                                         * SAMU_busy
-                                                         * SDMA enabled */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1    0x000400  /* FE_Gfx_busy  - Intended for primary usage.   Rest are for flexibility. ????
-                                                       * SH_Gfx_busy
-                                                       * RB_Gfx_busy
-                                                       * VCE_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2    0xC00080  /* SH_Gfx_busy - Intended for primary usage.   Rest are for flexibility.
-                                                       * FE_Gfx_busy
-                                                       * RB_Gfx_busy
-                                                       * ACP_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3    0xC00200  /* RB_Gfx_busy - Intended for primary usage.   Rest are for flexibility.
-                                                       * FE_Gfx_busy
-                                                       * SH_Gfx_busy
-                                                       * UVD_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4    0xC01680  /* UVD_busy
-                                                       * VCE_busy
-                                                       * ACP_busy
-                                                       * SAMU_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5    0xC00033  /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6    0xC00033  /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7    0x3FFFC000  /* GFX, HDP */
-
-
-/* thermal protection counter (units). */
-#define PPFIJI_THERMALPROTECTCOUNTER_DFLT            0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT    0
-
-/* static screen threshold */
-#define PPFIJI_STATICSCREENTHRESHOLD_DFLT        0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPFIJI_REFERENCEDIVIDER_DFLT                  4
-
-/* ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPFIJI_CGULVPARAMETER_DFLT			0x00040035
-#define PPFIJI_CGULVCONTROL_DFLT			0x00007450
-#define PPFIJI_TARGETACTIVITY_DFLT			30 /* 30%*/
-#define PPFIJI_MCLK_TARGETACTIVITY_DFLT		10 /* 10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 120a9e2..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5599 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-
-#include "hwmgr.h"
-#include "fiji_smumgr.h"
-#include "atombios.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "cgs_common.h"
-#include "fiji_dyn_defaults.h"
-#include "fiji_powertune.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "pppcielanes.h"
-#include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "pp_acpi.h"
-#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
-#include "ppinterrupt.h"
-
-#include "fiji_clockpowergating.h"
-#include "fiji_thermal.h"
-
-#define VOLTAGE_SCALE	4
-#define SMC_RAM_END		0x40000
-#define VDDC_VDDCI_DELTA	300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_CG_ARB_FREQ_F0           0x0a /* boot-up default */
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-/* From smc_reg.h */
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000  /* First byte after SMC_CG_IND */
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            300
-
-#define ixSWRST_COMMAND_1           0x1400103
-#define MC_SEQ_CNTL__CAC_EN_MASK    0x40000000
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-    DPM_EVENT_SRC_ANALOG = 0,               /* Internal analog trip point */
-    DPM_EVENT_SRC_EXTERNAL = 1,             /* External (GPIO 17) signal */
-    DPM_EVENT_SRC_DIGITAL = 2,              /* Internal digital trip point (DIG_THERM_DPM) */
-    DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,   /* Internal analog or external */
-    DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4   /* Internal digital or external */
-};
-
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct fiji_power_state *cast_phw_fiji_power_state(
-				  struct pp_hw_power_state *hw_ps)
-{
-	PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL;);
-
-	return (struct fiji_power_state *)hw_ps;
-}
-
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
-				 const struct pp_hw_power_state *hw_ps)
-{
-	PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL;);
-
-	return (const struct fiji_power_state *)hw_ps;
-}
-
-static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-			? true : false;
-}
-
-static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_ulv_parm *ulv = &data->ulv;
-
-	ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
-	data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
-	data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
-	data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
-	data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
-	data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
-	data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
-	data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
-	data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
-
-	data->static_screen_threshold_unit =
-			PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
-	data->static_screen_threshold =
-			PPFIJI_STATICSCREENTHRESHOLD_DFLT;
-
-	/* Unset ABM cap as it moved to DAL.
-	 * Add PHM_PlatformCaps_NonABMSupportInPPLib
-	 * for re-direct ABM related request to DAL
-	 */
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ABM);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_NonABMSupportInPPLib);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DynamicACTiming);
-
-	fiji_initialize_power_tune_defaults(hwmgr);
-
-	data->mclk_stutter_mode_threshold = 60000;
-	data->pcie_gen_performance.max = PP_PCIEGen1;
-	data->pcie_gen_performance.min = PP_PCIEGen3;
-	data->pcie_gen_power_saving.max = PP_PCIEGen1;
-	data->pcie_gen_power_saving.min = PP_PCIEGen3;
-	data->pcie_lane_performance.max = 0;
-	data->pcie_lane_performance.min = 16;
-	data->pcie_lane_power_saving.max = 0;
-	data->pcie_lane_power_saving.min = 16;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DynamicUVDState);
-}
-
-static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
-	phm_ppt_v1_voltage_lookup_table *lookup_table,
-	uint16_t virtual_voltage_id, int32_t *sclk)
-{
-	uint8_t entryId;
-	uint8_t voltageId;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
-	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
-	for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
-		voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
-		if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
-			break;
-	}
-
-	PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
-			"Can't find requested voltage id in vdd_dep_on_sclk table!",
-			return -EINVAL;
-			);
-
-	*sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
-
-	return 0;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint16_t    vv_id;
-	uint16_t    vddc = 0;
-	uint16_t    evv_default = 1150;
-	uint16_t    i, j;
-	uint32_t  sclk = 0;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-	int result;
-
-	for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
-		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-		if (!fiji_get_sclk_for_voltage_evv(hwmgr,
-				table_info->vddc_lookup_table, vv_id, &sclk)) {
-			if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_ClockStretcher)) {
-				for (j = 1; j < sclk_table->count; j++) {
-					if (sclk_table->entries[j].clk == sclk &&
-							sclk_table->entries[j].cks_enable == 0) {
-						sclk += 5000;
-						break;
-					}
-				}
-			}
-
-			if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_EnableDriverEVV))
-				result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
-						VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
-			else
-				result = -EINVAL;
-
-			if (result)
-				result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
-						VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
-
-			/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-			PP_ASSERT_WITH_CODE((vddc < 2000),
-					"Invalid VDDC value, greater than 2v!", result = -EINVAL;);
-
-			if (result)
-				/* 1.15V is the default safe value for Fiji */
-				vddc = evv_default;
-
-			/* the voltage should not be zero nor equal to leakage ID */
-			if (vddc != 0 && vddc != vv_id) {
-				data->vddc_leakage.actual_voltage
-				[data->vddc_leakage.count] = vddc;
-				data->vddc_leakage.leakage_id
-				[data->vddc_leakage.count] = vv_id;
-				data->vddc_leakage.count++;
-			}
-		}
-	}
-	return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-		uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
-{
-	uint32_t index;
-
-	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
-	for (index = 0; index < leakage_table->count; index++) {
-		/* if this voltage matches a leakage voltage ID */
-		/* patch with actual leakage voltage */
-		if (leakage_table->leakage_id[index] == *voltage) {
-			*voltage = leakage_table->actual_voltage[index];
-			break;
-		}
-	}
-
-	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param     hwmgr  the address of the powerplay hardware manager.
-* @param     pointer to voltage lookup table
-* @param     pointer to leakage table
-* @return     always 0
-*/
-static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *lookup_table,
-		struct fiji_leakage_voltage *leakage_table)
-{
-	uint32_t i;
-
-	for (i = 0; i < lookup_table->count; i++)
-		fiji_patch_with_vdd_leakage(hwmgr,
-				&lookup_table->entries[i].us_vdd, leakage_table);
-
-	return 0;
-}
-
-static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
-		struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
-		uint16_t *vddc)
-{
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
-	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-			table_info->max_clock_voltage_on_dc.vddc;
-	return 0;
-}
-
-static int fiji_patch_voltage_dependency_tables_with_lookup_table(
-		struct pp_hwmgr *hwmgr)
-{
-	uint8_t entryId;
-	uint8_t voltageId;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
-			table_info->vdd_dep_on_mclk;
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-
-	for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-		voltageId = sclk_table->entries[entryId].vddInd;
-		sclk_table->entries[entryId].vddc =
-				table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-		voltageId = mclk_table->entries[entryId].vddInd;
-		mclk_table->entries[entryId].vddc =
-			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	for (entryId = 0; entryId < mm_table->count; ++entryId) {
-		voltageId = mm_table->entries[entryId].vddcInd;
-		mm_table->entries[entryId].vddc =
-			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	return 0;
-
-}
-
-static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	/* Need to determine if we need calculated voltage. */
-	return 0;
-}
-
-static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-	/* Need to determine if we need calculated voltage from mm table. */
-	return 0;
-}
-
-static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
-		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-	uint32_t table_size, i, j;
-	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-	table_size = lookup_table->count;
-
-	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-		"Lookup table is empty", return -EINVAL);
-
-	/* Sorting voltages */
-	for (i = 0; i < table_size - 1; i++) {
-		for (j = i + 1; j > 0; j--) {
-			if (lookup_table->entries[j].us_vdd <
-					lookup_table->entries[j - 1].us_vdd) {
-				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
-				lookup_table->entries[j - 1] = lookup_table->entries[j];
-				lookup_table->entries[j] = tmp_voltage_lookup_record;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	int tmp_result;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
-			table_info->vddc_lookup_table, &(data->vddc_leakage));
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
-			&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
-	if(tmp_result)
-		result = tmp_result;
-
-	return result;
-}
-
-static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-			table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-			table_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-		"VDD dependency on SCLK table is missing.	\
-		This table is mandatory", return -EINVAL);
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-		"VDD dependency on SCLK table has to have is missing.	\
-		This table is mandatory", return -EINVAL);
-
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-		"VDD dependency on MCLK table is missing.	\
-		This table is mandatory", return -EINVAL);
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-		"VDD dependency on MCLK table has to have is missing.	 \
-		This table is mandatory", return -EINVAL);
-
-	data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
-	data->max_vddc_in_pptable =	(uint16_t)allowed_sclk_vdd_table->
-			entries[allowed_sclk_vdd_table->count - 1].vddc;
-
-	table_info->max_clock_voltage_on_ac.sclk =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-	table_info->max_clock_voltage_on_ac.mclk =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-	table_info->max_clock_voltage_on_ac.vddc =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-	table_info->max_clock_voltage_on_ac.vddci =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
-		table_info->max_clock_voltage_on_ac.sclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
-		table_info->max_clock_voltage_on_ac.mclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
-		table_info->max_clock_voltage_on_ac.vddc;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
-		table_info->max_clock_voltage_on_ac.vddci;
-
-	return 0;
-}
-
-static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-	uint32_t speedCntl = 0;
-
-	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-			ixPCIE_LC_SPEED_CNTL);
-	return((uint16_t)PHM_GET_FIELD(speedCntl,
-			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
-	uint32_t link_width;
-
-	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
-	PP_ASSERT_WITH_CODE((7 >= link_width),
-			"Invalid PCIe lane width!", return 0);
-
-	return decode_pcie_lane_width(link_width);
-}
-
-/** Patch the Boot State to match VBIOS boot clocks and voltage.
-*
-* @param hwmgr Pointer to the hardware manager.
-* @param pPowerState The address of the PowerState instance being created.
-*
-*/
-static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
-		struct pp_hw_power_state *hw_ps)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
-	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-	uint16_t size;
-	uint8_t frev, crev;
-	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-	/* First retrieve the Boot clocks and VDDC from the firmware info table.
-	 * We assume here that fw_info is unchanged if this call fails.
-	 */
-	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-			hwmgr->device, index,
-			&size, &frev, &crev);
-	if (!fw_info)
-		/* During a test, there is no firmware info table. */
-		return 0;
-
-	/* Patch the state. */
-	data->vbios_boot_state.sclk_bootup_value =
-			le32_to_cpu(fw_info->ulDefaultEngineClock);
-	data->vbios_boot_state.mclk_bootup_value =
-			le32_to_cpu(fw_info->ulDefaultMemoryClock);
-	data->vbios_boot_state.mvdd_bootup_value =
-			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-	data->vbios_boot_state.vddc_bootup_value =
-			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-	data->vbios_boot_state.vddci_bootup_value =
-			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-	data->vbios_boot_state.pcie_gen_bootup_value =
-			fiji_get_current_pcie_speed(hwmgr);
-	data->vbios_boot_state.pcie_lane_bootup_value =
-			(uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
-
-	/* set boot power state */
-	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-	return 0;
-}
-
-static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-	return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data;
-	uint32_t i;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	bool stay_in_boot;
-	int result;
-
-	data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
-	if (data == NULL)
-		return -ENOMEM;
-
-	hwmgr->backend = data;
-
-	data->dll_default_on = false;
-	data->sram_end = SMC_RAM_END;
-
-	for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
-		data->activity_target[i] = FIJI_AT_DFLT;
-
-	data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
-	data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
-	data->mclk_dpm0_activity_target = 0xa;
-
-	data->sclk_dpm_key_disabled = 0;
-	data->mclk_dpm_key_disabled = 0;
-	data->pcie_dpm_key_disabled = 0;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_UnTabledHardwareInterface);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_TablelessHardwareInterface);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep);
-
-	data->gpio_debug = 0;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DynamicPatchPowerState);
-
-	/* need to set voltage control types before EVV patching */
-	data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
-	data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
-	data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
-
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-
-	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
-		data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableMVDDControl))
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
-			data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-
-	if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableMVDDControl);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ControlVDDCI)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-			data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-			data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-	}
-
-	if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ControlVDDCI);
-
-	if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ClockStretcher);
-
-	fiji_init_dpm_defaults(hwmgr);
-
-	/* Get leakage voltage based on leakage ID. */
-	fiji_get_evv_voltages(hwmgr);
-
-	/* Patch our voltage dependency table with actual leakage voltage
-	 * We need to perform leakage translation before it's used by other functions
-	 */
-	fiji_complete_dependency_tables(hwmgr);
-
-	/* Parse pptable data read from VBIOS */
-	fiji_set_private_data_based_on_pptable(hwmgr);
-
-	/* ULV Support */
-	data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
-
-	/* Initalize Dynamic State Adjustment Rule Settings */
-	result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
-	if (!result) {
-		data->uvd_enabled = false;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_EnableSMU7ThermalManagement);
-		data->vddc_phase_shed_control = false;
-	}
-
-	stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StayInBootState);
-
-	if (0 == result) {
-		struct cgs_system_info sys_info = {0};
-
-		data->is_tlu_enabled = false;
-		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-				FIJI_MAX_HARDWARE_POWERLEVELS;
-		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-		hwmgr->platform_descriptor.minimumClocksReductionPercentage  = 50;
-
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
-		if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ucFanControlMode) {
-			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
-					hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
-					hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
-					table_info->cac_dtp_table->usOperatingTempMinLimit;
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
-					table_info->cac_dtp_table->usOperatingTempMaxLimit;
-			hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
-					table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
-					table_info->cac_dtp_table->usOperatingTempStep;
-			hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
-					table_info->cac_dtp_table->usTargetOperatingTemp;
-
-			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_ODFuzzyFanControlSupport);
-		}
-
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-		else
-			data->pcie_gen_cap = (uint32_t)sys_info.value;
-		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-			data->pcie_spc_cap = 20;
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-		else
-			data->pcie_lane_cap = (uint32_t)sys_info.value;
-	} else {
-		/* Ignore return value in here, we are cleaning up a mess. */
-		fiji_hwmgr_backend_fini(hwmgr);
-	}
-
-	return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	data->clock_registers.vCG_SPLL_FUNC_CNTL =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_FUNC_CNTL);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_FUNC_CNTL_2);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_FUNC_CNTL_3);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_FUNC_CNTL_4);
-	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_SPREAD_SPECTRUM);
-	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_SPLL_SPREAD_SPECTRUM_2);
-
-	return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t temp;
-
-	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-			 MC_SEQ_MISC0_GDDR5_SHIFT));
-
-	return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-	return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	data->uvd_power_gated = false;
-	data->vce_power_gated = false;
-	data->samu_power_gated = false;
-	data->acp_power_gated = false;
-	data->pg_acp_init = true;
-
-	return 0;
-}
-
-static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	data->low_sclk_interrupt_threshold = 0;
-
-	return 0;
-}
-
-static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = fiji_read_clock_registers(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to read clock registers!", result = tmp_result);
-
-	tmp_result = fiji_get_memory_type(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to get memory type!", result = tmp_result);
-
-	tmp_result = fiji_enable_acpi_power_management(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable ACPI power management!", result = tmp_result);
-
-	tmp_result = fiji_init_power_gate_state(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to init power gate state!", result = tmp_result);
-
-	tmp_result = tonga_get_mc_microcode_version(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to get MC microcode version!", result = tmp_result);
-
-	tmp_result = fiji_init_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to init sclk threshold!", result = tmp_result);
-
-	return result;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-*/
-static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-	const struct fiji_hwmgr *data =
-			(const struct fiji_hwmgr *)(hwmgr->backend);
-
-	return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-	/* enable voltage control */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-	return 0;
-}
-
-/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    vol_table  the pointer to changing voltage table
-* @return    0 in success
-*/
-
-static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
-		struct pp_atomctrl_voltage_table *vol_table)
-{
-	uint32_t i, j;
-	uint16_t vvalue;
-	bool found = false;
-	struct pp_atomctrl_voltage_table *table;
-
-	PP_ASSERT_WITH_CODE((NULL != vol_table),
-			"Voltage Table empty.", return -EINVAL);
-	table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
-			GFP_KERNEL);
-
-	if (NULL == table)
-		return -ENOMEM;
-
-	table->mask_low = vol_table->mask_low;
-	table->phase_delay = vol_table->phase_delay;
-
-	for (i = 0; i < vol_table->count; i++) {
-		vvalue = vol_table->entries[i].value;
-		found = false;
-
-		for (j = 0; j < table->count; j++) {
-			if (vvalue == table->entries[j].value) {
-				found = true;
-				break;
-			}
-		}
-
-		if (!found) {
-			table->entries[table->count].value = vvalue;
-			table->entries[table->count].smio_low =
-					vol_table->entries[i].smio_low;
-			table->count++;
-		}
-	}
-
-	memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
-	kfree(table);
-
-	return 0;
-}
-
-static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-	uint32_t i;
-	int result;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
-
-	PP_ASSERT_WITH_CODE((0 != dep_table->count),
-			"Voltage Dependency Table empty.", return -EINVAL);
-
-	vol_table->mask_low = 0;
-	vol_table->phase_delay = 0;
-	vol_table->count = dep_table->count;
-
-	for (i = 0; i < dep_table->count; i++) {
-		vol_table->entries[i].value = dep_table->entries[i].mvdd;
-		vol_table->entries[i].smio_low = 0;
-	}
-
-	result = fiji_trim_voltage_table(hwmgr, vol_table);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to trim MVDD table.", return result);
-
-	return 0;
-}
-
-static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-	uint32_t i;
-	int result;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
-
-	PP_ASSERT_WITH_CODE((0 != dep_table->count),
-			"Voltage Dependency Table empty.", return -EINVAL);
-
-	vol_table->mask_low = 0;
-	vol_table->phase_delay = 0;
-	vol_table->count = dep_table->count;
-
-	for (i = 0; i < dep_table->count; i++) {
-		vol_table->entries[i].value = dep_table->entries[i].vddci;
-		vol_table->entries[i].smio_low = 0;
-	}
-
-	result = fiji_trim_voltage_table(hwmgr, vol_table);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to trim VDDCI table.", return result);
-
-	return 0;
-}
-
-static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-	int i = 0;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
-
-	PP_ASSERT_WITH_CODE((0 != lookup_table->count),
-			"Voltage Lookup Table empty.", return -EINVAL);
-
-	vol_table->mask_low = 0;
-	vol_table->phase_delay = 0;
-
-	vol_table->count = lookup_table->count;
-
-	for (i = 0; i < vol_table->count; i++) {
-		vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
-		vol_table->entries[i].smio_low = 0;
-	}
-
-	return 0;
-}
-
-/* ---- Voltage Tables ----
- * If the voltage table would be bigger than
- * what will fit into the state table on
- * the SMC keep only the higher entries.
- */
-static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
-		uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
-{
-	unsigned int i, diff;
-
-	if (vol_table->count <= max_vol_steps)
-		return;
-
-	diff = vol_table->count - max_vol_steps;
-
-	for (i = 0; i < max_vol_steps; i++)
-		vol_table->entries[i] = vol_table->entries[i + diff];
-
-	vol_table->count = max_vol_steps;
-
-	return;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	int result;
-
-	if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-				VOLTAGE_TYPE_MVDDC,	VOLTAGE_OBJ_GPIO_LUT,
-				&(data->mvdd_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve MVDD table.",
-				return result);
-	} else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-		result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
-				table_info->vdd_dep_on_mclk);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 MVDD table from dependancy table.",
-				return result;);
-	}
-
-	if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
-				&(data->vddci_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve VDDCI table.",
-				return result);
-	} else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-		result = fiji_get_svi2_vddci_voltage_table(hwmgr,
-				table_info->vdd_dep_on_mclk);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
-				return result);
-	}
-
-	if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		result = fiji_get_svi2_vdd_voltage_table(hwmgr,
-				table_info->vddc_lookup_table);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDC table from lookup table.",
-				return result);
-	}
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
-			"Too many voltage values for VDDC. Trimming to fit state table.",
-			fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-					SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
-			"Too many voltage values for VDDCI. Trimming to fit state table.",
-			fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-					SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
-
-	PP_ASSERT_WITH_CODE(
-			(data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
-			"Too many voltage values for MVDD. Trimming to fit state table.",
-			fiji_trim_voltage_table_to_fit_state_table(hwmgr,
-					SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
-
-	return 0;
-}
-
-static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-	/* Program additional LP registers
-	 * that are no longer programmed by VBIOS
-	 */
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
-			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
-	return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_program_static_screen_threshold_parameters(
-		struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* Set static screen threshold unit */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-			data->static_screen_threshold_unit);
-	/* Set static screen threshold */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-			data->static_screen_threshold);
-
-	return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-	uint32_t displayGap =
-			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixCG_DISPLAY_GAP_CNTL);
-
-	displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
-			DISP_GAP, DISPLAY_GAP_IGNORE);
-
-	displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
-			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_DISPLAY_GAP_CNTL, displayGap);
-
-	return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* Clear reset for voting clients before enabling DPM */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-	return 0;
-}
-
-static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	/* Reset voting clients before disabling DPM */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_0, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_1, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_2, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_3, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_4, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_5, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_6, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_7, 0);
-
-	return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-	uint32_t tmp;
-	int result;
-	bool error = false;
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, DpmTable),
-			&tmp, data->sram_end);
-
-	if (0 == result)
-		data->dpm_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, SoftRegisters),
-			&tmp, data->sram_end);
-
-	if (!result) {
-		data->soft_regs_start = tmp;
-		smu_data->soft_regs_start = tmp;
-	}
-
-	error |= (0 != result);
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, mcRegisterTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->mc_reg_table_start = tmp;
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, FanTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->fan_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->arb_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU73_Firmware_Header, Version),
-			&tmp, data->sram_end);
-
-	if (!result)
-		hwmgr->microcode_version_info.SMC = tmp;
-
-	error |= (0 != result);
-
-	return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-		uint32_t arb_src, uint32_t arb_dest)
-{
-	uint32_t mc_arb_dram_timing;
-	uint32_t mc_arb_dram_timing2;
-	uint32_t burst_time;
-	uint32_t mc_cg_config;
-
-	switch (arb_src) {
-	case MC_CG_ARB_FREQ_F0:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-		break;
-	case MC_CG_ARB_FREQ_F1:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (arb_dest) {
-	case MC_CG_ARB_FREQ_F0:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-		break;
-	case MC_CG_ARB_FREQ_F1:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-	mc_cg_config |= 0x0000000F;
-	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
-	return 0;
-}
-
-/**
-* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   if success then 0;
-*/
-static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
-	return fiji_copy_and_switch_arb_sets(hwmgr,
-			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
-	uint32_t tmp;
-
-	tmp = (cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
-			0x0000ff00) >> 8;
-
-	if (tmp == MC_CG_ARB_FREQ_F0)
-		return 0;
-
-	return fiji_copy_and_switch_arb_sets(hwmgr,
-			tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
-		struct fiji_single_dpm_table *dpm_table, uint32_t count)
-{
-	int i;
-	PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
-			"Fatal error, can not set up single DPM table entries "
-			"to exceed max number!",);
-
-	dpm_table->count = count;
-	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
-		dpm_table->dpm_levels[i].enabled = false;
-
-	return 0;
-}
-
-static void fiji_setup_pcie_table_entry(
-	struct fiji_single_dpm_table *dpm_table,
-	uint32_t index, uint32_t pcie_gen,
-	uint32_t pcie_lanes)
-{
-	dpm_table->dpm_levels[index].value = pcie_gen;
-	dpm_table->dpm_levels[index].param1 = pcie_lanes;
-	dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-	uint32_t i, max_entry;
-
-	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
-			data->use_pcie_power_saving_levels), "No pcie performance levels!",
-			return -EINVAL);
-
-	if (data->use_pcie_performance_levels &&
-			!data->use_pcie_power_saving_levels) {
-		data->pcie_gen_power_saving = data->pcie_gen_performance;
-		data->pcie_lane_power_saving = data->pcie_lane_performance;
-	} else if (!data->use_pcie_performance_levels &&
-			data->use_pcie_power_saving_levels) {
-		data->pcie_gen_performance = data->pcie_gen_power_saving;
-		data->pcie_lane_performance = data->pcie_lane_power_saving;
-	}
-
-	fiji_reset_single_dpm_table(hwmgr,
-			&data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
-
-	if (pcie_table != NULL) {
-		/* max_entry is used to make sure we reserve one PCIE level
-		 * for boot level (fix for A+A PSPP issue).
-		 * If PCIE table from PPTable have ULV entry + 8 entries,
-		 * then ignore the last entry.*/
-		max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
-				SMU73_MAX_LEVELS_LINK : pcie_table->count;
-		for (i = 1; i < max_entry; i++) {
-			fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
-					get_pcie_gen_support(data->pcie_gen_cap,
-							pcie_table->entries[i].gen_speed),
-					get_pcie_lane_support(data->pcie_lane_cap,
-							pcie_table->entries[i].lane_width));
-		}
-		data->dpm_table.pcie_speed_table.count = max_entry - 1;
-	} else {
-		/* Hardcode Pcie Table */
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Min_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Min_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-
-		data->dpm_table.pcie_speed_table.count = 6;
-	}
-	/* Populate last level for boot PCIE level, but do not increment count. */
-	fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-			data->dpm_table.pcie_speed_table.count,
-			get_pcie_gen_support(data->pcie_gen_cap,
-					PP_Min_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap,
-					PP_Max_PCIELane));
-
-	return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i;
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
-			table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-			table_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
-			"SCLK dependency table is missing. This table is mandatory",
-			return -EINVAL);
-	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
-			"SCLK dependency table has to have is missing. "
-			"This table is mandatory",
-			return -EINVAL);
-
-	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
-			"MCLK dependency table is missing. This table is mandatory",
-			return -EINVAL);
-	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
-			"MCLK dependency table has to have is missing. "
-			"This table is mandatory",
-			return -EINVAL);
-
-	/* clear the state table to reset everything to default */
-	fiji_reset_single_dpm_table(hwmgr,
-			&data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
-	fiji_reset_single_dpm_table(hwmgr,
-			&data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
-
-	/* Initialize Sclk DPM table based on allow Sclk values */
-	data->dpm_table.sclk_table.count = 0;
-	for (i = 0; i < dep_sclk_table->count; i++) {
-		if (i == 0 || data->dpm_table.sclk_table.dpm_levels
-				[data->dpm_table.sclk_table.count - 1].value !=
-						dep_sclk_table->entries[i].clk) {
-			data->dpm_table.sclk_table.dpm_levels
-			[data->dpm_table.sclk_table.count].value =
-					dep_sclk_table->entries[i].clk;
-			data->dpm_table.sclk_table.dpm_levels
-			[data->dpm_table.sclk_table.count].enabled =
-					(i == 0) ? true : false;
-			data->dpm_table.sclk_table.count++;
-		}
-	}
-
-	/* Initialize Mclk DPM table based on allow Mclk values */
-	data->dpm_table.mclk_table.count = 0;
-	for (i=0; i<dep_mclk_table->count; i++) {
-		if ( i==0 || data->dpm_table.mclk_table.dpm_levels
-				[data->dpm_table.mclk_table.count - 1].value !=
-						dep_mclk_table->entries[i].clk) {
-			data->dpm_table.mclk_table.dpm_levels
-			[data->dpm_table.mclk_table.count].value =
-					dep_mclk_table->entries[i].clk;
-			data->dpm_table.mclk_table.dpm_levels
-			[data->dpm_table.mclk_table.count].enabled =
-					(i == 0) ? true : false;
-			data->dpm_table.mclk_table.count++;
-		}
-	}
-
-	/* setup PCIE gen speed levels */
-	fiji_setup_default_pcie_table(hwmgr);
-
-	/* save a copy of the default DPM table */
-	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
-			sizeof(struct fiji_dpm_table));
-
-	return 0;
-}
-
-/**
- * @brief PhwFiji_GetVoltageOrder
- *  Returns index of requested voltage record in lookup(table)
- * @param lookup_table - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t fiji_get_voltage_index(
-		struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
-	uint8_t count = (uint8_t) (lookup_table->count);
-	uint8_t i;
-
-	PP_ASSERT_WITH_CODE((NULL != lookup_table),
-			"Lookup Table empty.", return 0);
-	PP_ASSERT_WITH_CODE((0 != count),
-			"Lookup Table empty.", return 0);
-
-	for (i = 0; i < lookup_table->count; i++) {
-		/* find first voltage equal or bigger than requested */
-		if (lookup_table->entries[i].us_vdd >= voltage)
-			return i;
-	}
-	/* voltage is bigger than max voltage in the table */
-	return i - 1;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	uint32_t count;
-	uint8_t index;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-			table_info->vddc_lookup_table;
-	/* tables is already swapped, so in order to use the value from it,
-	 * we need to swap it back.
-	 * We are populating vddc CAC data to BapmVddc table
-	 * in split and merged mode
-	 */
-	for( count = 0; count<lookup_table->count; count++) {
-		index = fiji_get_voltage_index(lookup_table,
-				data->vddc_voltage_table.entries[count].value);
-		table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
-				(lookup_table->entries[index].us_cac_low *
-						VOLTAGE_SCALE)) / 25);
-		table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
-				(lookup_table->entries[index].us_cac_high *
-						VOLTAGE_SCALE)) / 25);
-	}
-
-	return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	int result;
-
-	result = fiji_populate_cac_table(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate CAC voltage tables to SMC",
-			return -EINVAL);
-
-	return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_Ulv *state)
-{
-	int result = 0;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	state->CcPwrDynRm = 0;
-	state->CcPwrDynRm1 = 0;
-
-	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-	state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
-			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
-
-	state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
-	if (!result) {
-		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-		CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-	}
-	return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int32_t fiji_get_dpm_level_enable_mask_value(
-		struct fiji_single_dpm_table* dpm_table)
-{
-	int32_t i;
-	int32_t mask = 0;
-
-	for (i = dpm_table->count; i > 0; i--) {
-		mask = mask << 1;
-		if (dpm_table->dpm_levels[i - 1].enabled)
-			mask |= 0x1;
-		else
-			mask &= 0xFFFFFFFE;
-	}
-	return mask;
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_dpm_table *dpm_table = &data->dpm_table;
-	int i;
-
-	/* Index (dpm_table->pcie_speed_table.count)
-	 * is reserved for PCIE boot level. */
-	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-		table->LinkLevel[i].PcieGenSpeed  =
-				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-				dpm_table->pcie_speed_table.dpm_levels[i].param1);
-		table->LinkLevel[i].EnabledForActivity = 1;
-		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-	}
-
-	data->smc_state_table.LinkLevelCount =
-			(uint8_t)dpm_table->pcie_speed_table.count;
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-	return 0;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-		uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
-	const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-	uint32_t ref_clock;
-	uint32_t ref_divider;
-	uint32_t fbdiv;
-	int result;
-
-	/* get the engine clock dividers for this clock value */
-	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
-
-	PP_ASSERT_WITH_CODE(result == 0,
-			"Error retrieving Engine Clock dividers from VBIOS.",
-			return result);
-
-	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
-	ref_clock = atomctrl_get_reference_clock(hwmgr);
-	ref_divider = 1 + dividers.uc_pll_ref_div;
-
-	/* low 14 bits is fraction and high 12 bits is divider */
-	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-	/* SPLL_FUNC_CNTL setup */
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-			SPLL_REF_DIV, dividers.uc_pll_ref_div);
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-			SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-	/* SPLL_FUNC_CNTL_3 setup*/
-	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-			SPLL_FB_DIV, fbdiv);
-
-	/* set to use fractional accumulation*/
-	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-			SPLL_DITHEN, 1);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-		struct pp_atomctrl_internal_ss_info ssInfo;
-
-		uint32_t vco_freq = clock * dividers.uc_pll_post_div;
-		if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
-				vco_freq, &ssInfo)) {
-			/*
-			 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-			 * ss_info.speed_spectrum_rate -- in unit of khz
-			 *
-			 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
-			 */
-			uint32_t clk_s = ref_clock * 5 /
-					(ref_divider * ssInfo.speed_spectrum_rate);
-			/* clkv = 2 * D * fbdiv / NS */
-			uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
-					fbdiv / (clk_s * 10000);
-
-			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
-			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
-					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
-		}
-	}
-
-	sclk->SclkFrequency        = clock;
-	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-	return 0;
-}
-
-static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
-{
-	uint32_t  i;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct pp_atomctrl_voltage_table *vddci_table =
-			&(data->vddci_voltage_table);
-
-	for (i = 0; i < vddci_table->count; i++) {
-		if (vddci_table->entries[i].value >= vddci)
-			return vddci_table->entries[i].value;
-	}
-
-	PP_ASSERT_WITH_CODE(false,
-			"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-			return vddci_table->entries[i-1].value);
-}
-
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-		struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
-		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-	uint32_t i;
-	uint16_t vddci;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	*voltage = *mvdd = 0;
-
-	/* clock - voltage dependency table is empty table */
-	if (dep_table->count == 0)
-		return -EINVAL;
-
-	for (i = 0; i < dep_table->count; i++) {
-		/* find first sclk bigger than request */
-		if (dep_table->entries[i].clk >= clock) {
-			*voltage |= (dep_table->entries[i].vddc *
-					VOLTAGE_SCALE) << VDDC_SHIFT;
-			if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
-				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
-						VOLTAGE_SCALE) << VDDCI_SHIFT;
-			else if (dep_table->entries[i].vddci)
-				*voltage |= (dep_table->entries[i].vddci *
-						VOLTAGE_SCALE) << VDDCI_SHIFT;
-			else {
-				vddci = fiji_find_closest_vddci(hwmgr,
-						(dep_table->entries[i].vddc -
-								(uint16_t)data->vddc_vddci_delta));
-				*voltage |= (vddci * VOLTAGE_SCALE) <<	VDDCI_SHIFT;
-			}
-
-			if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
-					VOLTAGE_SCALE;
-			else if (dep_table->entries[i].mvdd)
-				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
-					VOLTAGE_SCALE;
-
-			*voltage |= 1 << PHASES_SHIFT;
-			return 0;
-		}
-	}
-
-	/* sclk is bigger than max sclk in the dependence table */
-	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-	if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
-		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
-				VOLTAGE_SCALE) << VDDCI_SHIFT;
-	else if (dep_table->entries[i-1].vddci) {
-		vddci = fiji_find_closest_vddci(hwmgr,
-				(dep_table->entries[i].vddc -
-						(uint16_t)data->vddc_vddci_delta));
-		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-	}
-
-	if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-	else if (dep_table->entries[i].mvdd)
-		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-	return 0;
-}
-
-static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
-		uint32_t clock_insr)
-{
-	uint8_t i;
-	uint32_t temp;
-	uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
-
-	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
-	for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-		temp = clock >> i;
-
-		if (temp >= min || i == 0)
-			break;
-	}
-	return i;
-}
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-		uint32_t clock, uint16_t sclk_al_threshold,
-		struct SMU73_Discrete_GraphicsLevel *level)
-{
-	int result;
-	/* PP_Clocks minClocks; */
-	uint32_t threshold, mvdd;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
-	/* populate graphics levels */
-	result = fiji_get_dependency_volt_by_clk(hwmgr,
-			table_info->vdd_dep_on_sclk, clock,
-			&level->MinVoltage, &mvdd);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"can not find VDDC voltage value for "
-			"VDDC engine clock dependency table",
-			return result);
-
-	level->SclkFrequency = clock;
-	level->ActivityLevel = sclk_al_threshold;
-	level->CcPwrDynRm = 0;
-	level->CcPwrDynRm1 = 0;
-	level->EnabledForActivity = 0;
-	level->EnabledForThrottle = 1;
-	level->UpHyst = 10;
-	level->DownHyst = 0;
-	level->VoltageDownHyst = 0;
-	level->PowerThrottle = 0;
-
-	threshold = clock * data->fast_watermark_threshold / 100;
-
-
-	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-		level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
-								hwmgr->display_config.min_core_set_clock_in_sr);
-
-
-	/* Default to slow, highest DPM level will be
-	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-	 */
-	level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
-	return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_dpm_table *dpm_table = &data->dpm_table;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-	uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
-	int result = 0;
-	uint32_t array = data->dpm_table_start +
-			offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
-	uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
-			SMU73_MAX_LEVELS_GRAPHICS;
-	struct SMU73_Discrete_GraphicsLevel *levels =
-			data->smc_state_table.GraphicsLevel;
-	uint32_t i, max_entry;
-	uint8_t hightest_pcie_level_enabled = 0,
-			lowest_pcie_level_enabled = 0,
-			mid_pcie_level_enabled = 0,
-			count = 0;
-
-	for (i = 0; i < dpm_table->sclk_table.count; i++) {
-		result = fiji_populate_single_graphic_level(hwmgr,
-				dpm_table->sclk_table.dpm_levels[i].value,
-				(uint16_t)data->activity_target[i],
-				&levels[i]);
-		if (result)
-			return result;
-
-		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-		if (i > 1)
-			levels[i].DeepSleepDivId = 0;
-	}
-
-	/* Only enable level 0 for now.*/
-	levels[0].EnabledForActivity = 1;
-
-	/* set highest level watermark to high */
-	levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
-			PPSMC_DISPLAY_WATERMARK_HIGH;
-
-	data->smc_state_table.GraphicsDpmLevelCount =
-			(uint8_t)dpm_table->sclk_table.count;
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-	if (pcie_table != NULL) {
-		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-				"There must be 1 or more PCIE levels defined in PPTable.",
-				return -EINVAL);
-		max_entry = pcie_entry_cnt - 1;
-		for (i = 0; i < dpm_table->sclk_table.count; i++)
-			levels[i].pcieDpmLevel =
-					(uint8_t) ((i < max_entry)? i : max_entry);
-	} else {
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << (hightest_pcie_level_enabled + 1))) != 0 ))
-			hightest_pcie_level_enabled++;
-
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << lowest_pcie_level_enabled)) == 0 ))
-			lowest_pcie_level_enabled++;
-
-		while ((count < hightest_pcie_level_enabled) &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
-			count++;
-
-		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
-				hightest_pcie_level_enabled?
-						(lowest_pcie_level_enabled + 1 + count) :
-						hightest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to hightest_pcie_level_enabled */
-		for(i = 2; i < dpm_table->sclk_table.count; i++)
-			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to lowest_pcie_level_enabled */
-		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to mid_pcie_level_enabled */
-		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-	}
-	/* level count will send to smc once at init smc table and never change */
-	result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-			(uint32_t)array_size, data->sram_end);
-
-	return result;
-}
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP  Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz,       800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
-	if (mem_clock <= 10000) return 0x0;
-	if (mem_clock <= 15000) return 0x1;
-	if (mem_clock <= 20000) return 0x2;
-	if (mem_clock <= 25000) return 0x3;
-	if (mem_clock <= 30000) return 0x4;
-	if (mem_clock <= 35000) return 0x5;
-	if (mem_clock <= 40000) return 0x6;
-	if (mem_clock <= 45000) return 0x7;
-	if (mem_clock <= 50000) return 0x8;
-	if (mem_clock <= 55000) return 0x9;
-	if (mem_clock <= 60000) return 0xa;
-	if (mem_clock <= 65000) return 0xb;
-	if (mem_clock <= 70000) return 0xc;
-	if (mem_clock <= 75000) return 0xd;
-	if (mem_clock <= 80000) return 0xe;
-	/* mem_clock > 800MHz */
-	return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    clock   the memory clock to use to populate the structure
-* @param    sclk    the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
-    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
-	struct pp_atomctrl_memory_clock_param mem_param;
-	int result;
-
-	result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to get Memory PLL Dividers.",);
-
-	/* Save the result data to outpupt memory level structure */
-	mclk->MclkFrequency   = clock;
-	mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
-	mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
-
-	return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-		uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int result = 0;
-
-	if (table_info->vdd_dep_on_mclk) {
-		result = fiji_get_dependency_volt_by_clk(hwmgr,
-				table_info->vdd_dep_on_mclk, clock,
-				&mem_level->MinVoltage, &mem_level->MinMvdd);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find MinVddc voltage value from memory "
-				"VDDC voltage dependency table", return result);
-	}
-
-	mem_level->EnabledForThrottle = 1;
-	mem_level->EnabledForActivity = 0;
-	mem_level->UpHyst = 0;
-	mem_level->DownHyst = 100;
-	mem_level->VoltageDownHyst = 0;
-	mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-	mem_level->StutterEnable = false;
-
-	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-	/* enable stutter mode if all the follow condition applied
-	 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
-	 * &(data->DisplayTiming.numExistingDisplays));
-	 */
-	data->display_timing.num_existing_displays = 1;
-
-	if ((data->mclk_stutter_mode_threshold) &&
-		(clock <= data->mclk_stutter_mode_threshold) &&
-		(!data->is_uvd_enabled) &&
-		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-				STUTTER_ENABLE) & 0x1))
-		mem_level->StutterEnable = true;
-
-	result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
-	if (!result) {
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-	}
-	return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_dpm_table *dpm_table = &data->dpm_table;
-	int result;
-	/* populate MCLK dpm table to SMU7 */
-	uint32_t array = data->dpm_table_start +
-			offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
-	uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
-			SMU73_MAX_LEVELS_MEMORY;
-	struct SMU73_Discrete_MemoryLevel *levels =
-			data->smc_state_table.MemoryLevel;
-	uint32_t i;
-
-	for (i = 0; i < dpm_table->mclk_table.count; i++) {
-		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-				"can not populate memory level as memory clock is zero",
-				return -EINVAL);
-		result = fiji_populate_single_memory_level(hwmgr,
-				dpm_table->mclk_table.dpm_levels[i].value,
-				&levels[i]);
-		if (result)
-			return result;
-	}
-
-	/* Only enable level 0 for now. */
-	levels[0].EnabledForActivity = 1;
-
-	/* in order to prevent MC activity from stutter mode to push DPM up.
-	 * the UVD change complements this by putting the MCLK in
-	 * a higher state by default such that we are not effected by
-	 * up threshold or and MCLK DPM latency.
-	 */
-	levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
-	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-	data->smc_state_table.MemoryDpmLevelCount =
-			(uint8_t)dpm_table->mclk_table.count;
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-	/* set highest level watermark to high */
-	levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
-			PPSMC_DISPLAY_WATERMARK_HIGH;
-
-	/* level count will send to smc once at init smc table and never change */
-	result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-			(uint32_t)array_size, data->sram_end);
-
-	return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-		uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-	const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i = 0;
-
-	if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-		/* find mvdd value which clock is more than request */
-		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-				break;
-			}
-		}
-		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-				"MVDD Voltage is outside the supported range.",
-				return -EINVAL);
-	} else
-		return -EINVAL;
-
-	return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-		SMU73_Discrete_DpmTable *table)
-{
-	int result = 0;
-	const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	SMIO_Pattern vol_level;
-	uint32_t mvdd;
-	uint16_t us_mvdd;
-	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
-	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-	if (!data->sclk_dpm_key_disabled) {
-		/* Get MinVoltage and Frequency from DPM0,
-		 * already converted to SMC_UL */
-		table->ACPILevel.SclkFrequency =
-				data->dpm_table.sclk_table.dpm_levels[0].value;
-		result = fiji_get_dependency_volt_by_clk(hwmgr,
-				table_info->vdd_dep_on_sclk,
-				table->ACPILevel.SclkFrequency,
-				&table->ACPILevel.MinVoltage, &mvdd);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Cannot find ACPI VDDC voltage value "
-				"in Clock Dependency Table",);
-	} else {
-		table->ACPILevel.SclkFrequency =
-				data->vbios_boot_state.sclk_bootup_value;
-		table->ACPILevel.MinVoltage =
-				data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
-	}
-
-	/* get the engine clock dividers for this clock value */
-	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-			table->ACPILevel.SclkFrequency,  &dividers);
-	PP_ASSERT_WITH_CODE(result == 0,
-			"Error retrieving Engine Clock dividers from VBIOS.",
-			return result);
-
-	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-	table->ACPILevel.DeepSleepDivId = 0;
-
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-			SPLL_PWRON, 0);
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-			SPLL_RESET, 1);
-	spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
-			SCLK_MUX_SEL, 4);
-
-	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-	table->ACPILevel.CcPwrDynRm = 0;
-	table->ACPILevel.CcPwrDynRm1 = 0;
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-	if (!data->mclk_dpm_key_disabled) {
-		/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-		table->MemoryACPILevel.MclkFrequency =
-				data->dpm_table.mclk_table.dpm_levels[0].value;
-		result = fiji_get_dependency_volt_by_clk(hwmgr,
-				table_info->vdd_dep_on_mclk,
-				table->MemoryACPILevel.MclkFrequency,
-				&table->MemoryACPILevel.MinVoltage, &mvdd);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Cannot find ACPI VDDCI voltage value "
-				"in Clock Dependency Table",);
-	} else {
-		table->MemoryACPILevel.MclkFrequency =
-				data->vbios_boot_state.mclk_bootup_value;
-		table->MemoryACPILevel.MinVoltage =
-				data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
-	}
-
-	us_mvdd = 0;
-	if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-			(data->mclk_dpm_key_disabled))
-		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-	else {
-		if (!fiji_populate_mvdd_value(hwmgr,
-				data->dpm_table.mclk_table.dpm_levels[0].value,
-				&vol_level))
-			us_mvdd = vol_level.Voltage;
-	}
-
-	table->MemoryACPILevel.MinMvdd =
-			PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
-	table->MemoryACPILevel.EnabledForThrottle = 0;
-	table->MemoryACPILevel.EnabledForActivity = 0;
-	table->MemoryACPILevel.UpHyst = 0;
-	table->MemoryACPILevel.DownHyst = 100;
-	table->MemoryACPILevel.VoltageDownHyst = 0;
-	table->MemoryACPILevel.ActivityLevel =
-			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-	table->MemoryACPILevel.StutterEnable = false;
-	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-	return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-		SMU73_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	table->VceLevelCount = (uint8_t)(mm_table->count);
-	table->VceBootLevel = 0;
-
-	for(count = 0; count < table->VceLevelCount; count++) {
-		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-		table->VceLevel[count].MinVoltage = 0;
-		table->VceLevel[count].MinVoltage |=
-				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-		table->VceLevel[count].MinVoltage |=
-				((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
-						VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/*retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->VceLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for VCE engine clock",
-				return result);
-
-		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-	}
-	return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-		SMU73_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	table->AcpLevelCount = (uint8_t)(mm_table->count);
-	table->AcpBootLevel = 0;
-
-	for (count = 0; count < table->AcpLevelCount; count++) {
-		table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
-		table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-				VOLTAGE_SCALE) << VDDC_SHIFT;
-		table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-				data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->AcpLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for engine clock", return result);
-
-		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
-	}
-	return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-		SMU73_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	table->SamuBootLevel = 0;
-	table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-	for (count = 0; count < table->SamuLevelCount; count++) {
-		/* not sure whether we need evclk or not */
-		table->SamuLevel[count].MinVoltage = 0;
-		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-				VOLTAGE_SCALE) << VDDC_SHIFT;
-		table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-				data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->SamuLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for samu clock", return result);
-
-		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-	}
-	return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-		int32_t eng_clock, int32_t mem_clock,
-		struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-	uint32_t dram_timing;
-	uint32_t dram_timing2;
-	uint32_t burstTime;
-	ULONG state, trrds, trrdl;
-	int result;
-
-	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-			eng_clock, mem_clock);
-	PP_ASSERT_WITH_CODE(result == 0,
-			"Error calling VBIOS to set DRAM_TIMING.", return result);
-
-	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-	burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
-	state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
-	trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
-	trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
-	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-	arb_regs->McArbBurstTime   = (uint8_t)burstTime;
-	arb_regs->TRRDS            = (uint8_t)trrds;
-	arb_regs->TRRDL            = (uint8_t)trrdl;
-
-	return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
-	uint32_t i, j;
-	int result = 0;
-
-	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-			result = fiji_populate_memory_timing_parameters(hwmgr,
-					data->dpm_table.sclk_table.dpm_levels[i].value,
-					data->dpm_table.mclk_table.dpm_levels[j].value,
-					&arb_regs.entries[i][j]);
-			if (result)
-				break;
-		}
-	}
-
-	if (!result)
-		result = fiji_copy_bytes_to_smc(
-				hwmgr->smumgr,
-				data->arb_table_start,
-				(uint8_t *)&arb_regs,
-				sizeof(SMU73_Discrete_MCArbDramTimingTable),
-				data->sram_end);
-	return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	table->UvdLevelCount = (uint8_t)(mm_table->count);
-	table->UvdBootLevel = 0;
-
-	for (count = 0; count < table->UvdLevelCount; count++) {
-		table->UvdLevel[count].MinVoltage = 0;
-		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-				VOLTAGE_SCALE) << VDDC_SHIFT;
-		table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-				data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->UvdLevel[count].VclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for Vclk clock", return result);
-
-		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->UvdLevel[count].DclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for Dclk clock", return result);
-
-		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
-	}
-	return result;
-}
-
-static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
-		uint32_t value, uint32_t *boot_level)
-{
-	int result = -EINVAL;
-	uint32_t i;
-
-	for (i = 0; i < table->count; i++) {
-		if (value == table->dpm_levels[i].value) {
-			*boot_level = i;
-			result = 0;
-		}
-	}
-	return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	int result = 0;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	table->GraphicsBootLevel = 0;
-	table->MemoryBootLevel = 0;
-
-	/* find boot level from dpm table */
-	result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
-			data->vbios_boot_state.sclk_bootup_value,
-			(uint32_t *)&(table->GraphicsBootLevel));
-
-	result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
-			data->vbios_boot_state.mclk_bootup_value,
-			(uint32_t *)&(table->MemoryBootLevel));
-
-	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-			VOLTAGE_SCALE;
-	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-			VOLTAGE_SCALE;
-	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-			VOLTAGE_SCALE;
-
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-	return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint8_t count, level;
-
-	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-	for (level = 0; level < count; level++) {
-		if(table_info->vdd_dep_on_sclk->entries[level].clk >=
-				data->vbios_boot_state.sclk_bootup_value) {
-			data->smc_state_table.GraphicsBootLevel = level;
-			break;
-		}
-	}
-
-	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-	for (level = 0; level < count; level++) {
-		if(table_info->vdd_dep_on_mclk->entries[level].clk >=
-				data->vbios_boot_state.mclk_bootup_value) {
-			data->smc_state_table.MemoryBootLevel = level;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-	uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-			volt_with_cks, value;
-	uint16_t clock_freq_u16;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-			volt_offset = 0;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-
-	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-	/* Read SMU_Eefuse to read and calculate RO and determine
-	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-	 */
-	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixSMU_EFUSE_0 + (146 * 4));
-	efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixSMU_EFUSE_0 + (148 * 4));
-	efuse &= 0xFF000000;
-	efuse = efuse >> 24;
-	efuse2 &= 0xF;
-
-	if (efuse2 == 1)
-		ro = (2300 - 1350) * efuse / 255 + 1350;
-	else
-		ro = (2500 - 1000) * efuse / 255 + 1000;
-
-	if (ro >= 1660)
-		type = 0;
-	else
-		type = 1;
-
-	/* Populate Stretch amount */
-	data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-	for (i = 0; i < sclk_table->count; i++) {
-		data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-				sclk_table->entries[i].cks_enable << i;
-		volt_without_cks = (uint32_t)((14041 *
-			(sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-			(4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-		volt_with_cks = (uint32_t)((13946 *
-			(sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-			(3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
-		if (volt_without_cks >= volt_with_cks)
-			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-					sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
-		data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-	}
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-			STRETCH_ENABLE, 0x0);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-			masterReset, 0x1);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-			staticEnable, 0x1);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-			masterReset, 0x0);
-
-	/* Populate CKS Lookup Table */
-	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-		stretch_amount2 = 0;
-	else if (stretch_amount == 3 || stretch_amount == 4)
-		stretch_amount2 = 1;
-	else {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ClockStretcher);
-		PP_ASSERT_WITH_CODE(false,
-				"Stretch Amount in PPTable not supported\n",
-				return -EINVAL);
-	}
-
-	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixPWR_CKS_CNTL);
-	value &= 0xFFC2FF87;
-	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-			fiji_clock_stretcher_lookup_table[stretch_amount2][0];
-	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-			fiji_clock_stretcher_lookup_table[stretch_amount2][1];
-	clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
-			GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
-			SclkFrequency) / 100);
-	if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
-			clock_freq_u16 &&
-	    fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
-			clock_freq_u16) {
-		/* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-		value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-		/* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-		value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-		/* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-		value |= (fiji_clock_stretch_amount_conversion
-				[fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
-				 [stretch_amount]) << 3;
-	}
-	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
-			CKS_LOOKUPTableEntry[0].minFreq);
-	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
-			CKS_LOOKUPTableEntry[0].maxFreq);
-	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-			fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-			(fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixPWR_CKS_CNTL, value);
-
-	/* Populate DDT Lookup Table */
-	for (i = 0; i < 4; i++) {
-		/* Assign the minimum and maximum VID stored
-		 * in the last row of Clock Stretcher Voltage Table.
-		 */
-		data->smc_state_table.ClockStretcherDataTable.
-		ClockStretcherDataTableEntry[i].minVID =
-				(uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
-		data->smc_state_table.ClockStretcherDataTable.
-		ClockStretcherDataTableEntry[i].maxVID =
-				(uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
-		/* Loop through each SCLK and check the frequency
-		 * to see if it lies within the frequency for clock stretcher.
-		 */
-		for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
-			cks_setting = 0;
-			clock_freq = PP_SMC_TO_HOST_UL(
-					data->smc_state_table.GraphicsLevel[j].SclkFrequency);
-			/* Check the allowed frequency against the sclk level[j].
-			 *  Sclk's endianness has already been converted,
-			 *  and it's in 10Khz unit,
-			 *  as opposed to Data table, which is in Mhz unit.
-			 */
-			if (clock_freq >=
-					(fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
-				cks_setting |= 0x2;
-				if (clock_freq <
-						(fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
-					cks_setting |= 0x1;
-			}
-			data->smc_state_table.ClockStretcherDataTable.
-			ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
-		}
-		CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
-				ClockStretcherDataTable.
-				ClockStretcherDataTableEntry[i].setting);
-	}
-
-	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-	value &= 0xFFFFFFFE;
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-	return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
-		struct SMU73_Discrete_DpmTable *table)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint16_t config;
-
-	config = VR_MERGED_WITH_VDDC;
-	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-	/* Set Vddc Voltage Controller */
-	if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		config = VR_SVI2_PLANE_1;
-		table->VRConfig |= config;
-	} else {
-		PP_ASSERT_WITH_CODE(false,
-				"VDDC should be on SVI2 control in merged mode!",);
-	}
-	/* Set Vddci Voltage Controller */
-	if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-		config = VR_SVI2_PLANE_2;  /* only in merged mode */
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	} else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-		config = VR_SMIO_PATTERN_1;
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	} else {
-		config = VR_STATIC_VOLTAGE;
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	}
-	/* Set Mvdd Voltage Controller */
-	if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-		config = VR_SVI2_PLANE_2;
-		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-	} else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		config = VR_SMIO_PATTERN_2;
-		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-	} else {
-		config = VR_STATIC_VOLTAGE;
-		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-	}
-
-	return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
-static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
-	const struct fiji_ulv_parm *ulv = &(data->ulv);
-	uint8_t i;
-	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
-	result = fiji_setup_default_dpm_tables(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to setup default DPM tables!", return result);
-
-	if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
-		fiji_populate_smc_voltage_tables(hwmgr, table);
-
-	table->SystemFlags = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition))
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StepVddc))
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-	if (data->is_memory_gddr5)
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-	if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
-		result = fiji_populate_ulv_state(hwmgr, table);
-		PP_ASSERT_WITH_CODE(0 == result,
-				"Failed to initialize ULV state!", return result);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
-	}
-
-	result = fiji_populate_smc_link_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Link Level!", return result);
-
-	result = fiji_populate_all_graphic_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Graphics Level!", return result);
-
-	result = fiji_populate_all_memory_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Memory Level!", return result);
-
-	result = fiji_populate_smc_acpi_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize ACPI Level!", return result);
-
-	result = fiji_populate_smc_vce_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize VCE Level!", return result);
-
-	result = fiji_populate_smc_acp_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize ACP Level!", return result);
-
-	result = fiji_populate_smc_samu_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize SAMU Level!", return result);
-
-	/* Since only the initial state is completely set up at this point
-	 * (the other states are just copies of the boot state) we only
-	 * need to populate the  ARB settings for the initial state.
-	 */
-	result = fiji_program_memory_timing_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to Write ARB settings for the initial state.", return result);
-
-	result = fiji_populate_smc_uvd_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize UVD Level!", return result);
-
-	result = fiji_populate_smc_boot_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Boot Level!", return result);
-
-	result = fiji_populate_smc_initailial_state(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Boot State!", return result);
-
-	result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to populate BAPM Parameters!", return result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ClockStretcher)) {
-		result = fiji_populate_clock_stretcher_data_table(hwmgr);
-		PP_ASSERT_WITH_CODE(0 == result,
-				"Failed to populate Clock Stretcher Data Table!",
-				return result);
-	}
-
-	table->GraphicsVoltageChangeEnable  = 1;
-	table->GraphicsThermThrottleEnable  = 1;
-	table->GraphicsInterval = 1;
-	table->VoltageInterval  = 1;
-	table->ThermalInterval  = 1;
-	table->TemperatureLimitHigh =
-			table_info->cac_dtp_table->usTargetOperatingTemp *
-			FIJI_Q88_FORMAT_CONVERSION_UNIT;
-	table->TemperatureLimitLow  =
-			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-			FIJI_Q88_FORMAT_CONVERSION_UNIT;
-	table->MemoryVoltageChangeEnable = 1;
-	table->MemoryInterval = 1;
-	table->VoltageResponseTime = 0;
-	table->PhaseResponseTime = 0;
-	table->MemoryThermThrottleEnable = 1;
-	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
-	table->PCIeGenInterval = 1;
-	table->VRConfig = 0;
-
-	result = fiji_populate_vr_config(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to populate VRConfig setting!", return result);
-
-	table->ThermGpio = 17;
-	table->SclkStepSize = 0x4000;
-
-	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_RegulatorHot);
-	} else {
-		table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_RegulatorHot);
-	}
-
-	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-			&gpio_pin)) {
-		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_AutomaticDCTransition);
-	} else {
-		table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_AutomaticDCTransition);
-	}
-
-	/* Thermal Output GPIO */
-	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-			&gpio_pin)) {
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ThermalOutGPIO);
-
-		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-		/* For porlarity read GPIOPAD_A with assigned Gpio pin
-		 * since VBIOS will program this register to set 'inactive state',
-		 * driver can then determine 'active state' from this and
-		 * program SMU with correct polarity
-		 */
-		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-				(1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-		/* if required, combine VRHot/PCC with thermal out GPIO */
-		if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_RegulatorHot) &&
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_CombinePCCWithThermalSignal))
-			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-	} else {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ThermalOutGPIO);
-		table->ThermOutGpio = 17;
-		table->ThermOutPolarity = 1;
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-	}
-
-	for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
-		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-	result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
-			data->dpm_table_start +
-			offsetof(SMU73_Discrete_DpmTable, SystemFlags),
-			(uint8_t *)&(table->SystemFlags),
-			sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
-			data->sram_end);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to upload dpm data to SMC memory!", return result);
-
-	return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-	const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t tmp;
-	int result;
-
-	/* This is a read-modify-write on the first byte of the ARB table.
-	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-	 * is the field 'current'.
-	 * This solution is ugly, but we never write the whole table only
-	 * individual fields in it.
-	 * In reality this field should not be in that structure
-	 * but in a soft register.
-	 */
-	result = fiji_read_smc_sram_dword(hwmgr->smumgr,
-			data->arb_table_start, &tmp, data->sram_end);
-
-	if (result)
-		return result;
-
-	tmp &= 0x00FFFFFF;
-	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-	return fiji_write_smc_sram_dword(hwmgr->smumgr,
-			data->arb_table_start,  tmp, data->sram_end);
-}
-
-static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
-	if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_RegulatorHot))
-		return smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
-	return 0;
-}
-
-static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			SCLK_PWRMGT_OFF, 0);
-	return 0;
-}
-
-static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_ulv_parm *ulv = &(data->ulv);
-
-	if (ulv->ulv_supported)
-		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
-	return 0;
-}
-
-static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_ulv_parm *ulv = &(data->ulv);
-
-	if (ulv->ulv_supported)
-		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
-	return 0;
-}
-
-static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep)) {
-		if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to enable Master Deep Sleep switch failed!",
-					return -1);
-	} else {
-		if (smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to disable Master Deep Sleep switch failed!",
-					return -1);
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep)) {
-		if (smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to disable Master Deep Sleep switch failed!",
-					return -1);
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t   val, val0, val2;
-	uint32_t   i, cpl_cntl, cpl_threshold, mc_threshold;
-
-	/* enable SCLK dpm */
-	if(!data->sclk_dpm_key_disabled)
-		PP_ASSERT_WITH_CODE(
-		(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
-		"Failed to enable SCLK DPM during DPM Start Function!",
-		return -1);
-
-	/* enable MCLK dpm */
-	if(0 == data->mclk_dpm_key_disabled) {
-		cpl_threshold = 0;
-		mc_threshold = 0;
-
-		/* Read per MCD tile (0 - 7) */
-		for (i = 0; i < 8; i++) {
-			PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
-			val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
-			if (0xf0000000 != val) {
-				/* count number of MCQ that has channel(s) enabled */
-				cpl_threshold++;
-				/* only harvest 3 or full 4 supported */
-				mc_threshold = val ? 3 : 4;
-			}
-		}
-		PP_ASSERT_WITH_CODE(0 != cpl_threshold,
-				"Number of MCQ is zero!", return -EINVAL;);
-
-		mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
-				LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
-						LCAC_MC0_CNTL__MC0_ENABLE_MASK;
-		cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
-				LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
-						LCAC_CPL_CNTL__CPL_ENABLE_MASK;
-		cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_MC0_CNTL, mc_threshold);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_MC1_CNTL, mc_threshold);
-		if (8 == cpl_threshold) {
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC2_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC3_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC4_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC5_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC6_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC7_CNTL, mc_threshold);
-		}
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_CPL_CNTL, cpl_cntl);
-
-		udelay(5);
-
-		mc_threshold = mc_threshold |
-				(1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
-		cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_MC0_CNTL, mc_threshold);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_MC1_CNTL, mc_threshold);
-		if (8 == cpl_threshold) {
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC2_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC3_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC4_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC5_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC6_CNTL, mc_threshold);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixLCAC_MC7_CNTL, mc_threshold);
-		}
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixLCAC_CPL_CNTL, cpl_cntl);
-
-		/* Program CAC_EN per MCD (0-7) Tile */
-		val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
-		val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
-				MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
-
-		for (i = 0; i < 8; i++) {
-			/* Enable MCD i Tile read & write */
-			val2  = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
-					(1 << i));
-			cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
-			/* Enbale CAC_ON MCD i Tile */
-			val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
-			val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
-			cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
-		}
-		/* Set MC_CONFIG_MCD back to its default setting val0 */
-		cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
-
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_Enable)),
-				"Failed to enable MCLK DPM during DPM Start Function!",
-				return -1);
-	}
-	return 0;
-}
-
-static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/*enable general power management */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-			GLOBAL_PWRMGT_EN, 1);
-	/* enable sclk deep sleep */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			DYNAMIC_PM_EN, 1);
-	/* prepare for PCIE DPM */
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			data->soft_regs_start + offsetof(SMU73_SoftRegisters,
-					VoltageChangeTimeout), 0x1000);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-			SWRST_COMMAND_1, RESETLC, 0x0);
-
-	PP_ASSERT_WITH_CODE(
-			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					PPSMC_MSG_Voltage_Cntl_Enable)),
-			"Failed to enable voltage DPM during DPM Start Function!",
-			return -1);
-
-	if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
-		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
-		return -1;
-	}
-
-	/* enable PCIE dpm */
-	if(!data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Enable)),
-				"Failed to enable pcie DPM during DPM Start Function!",
-				return -1);
-	}
-
-	return 0;
-}
-
-static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* disable SCLK dpm */
-	if (!data->sclk_dpm_key_disabled)
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_DPM_Disable) == 0),
-				"Failed to disable SCLK DPM!",
-				return -1);
-
-	/* disable MCLK dpm */
-	if (!data->mclk_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
-				"Failed to force MCLK DPM0!",
-				return -1);
-
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_Disable) == 0),
-				"Failed to disable MCLK DPM!",
-				return -1);
-	}
-
-	return 0;
-}
-
-static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* disable general power management */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-			GLOBAL_PWRMGT_EN, 0);
-	/* disable sclk deep sleep */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			DYNAMIC_PM_EN, 0);
-
-	/* disable PCIE dpm */
-	if (!data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Disable) == 0),
-				"Failed to disable pcie DPM during DPM Stop Function!",
-				return -1);
-	}
-
-	if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
-		printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
-		return -1;
-	}
-
-	PP_ASSERT_WITH_CODE(
-			(smum_send_msg_to_smc(hwmgr->smumgr,
-					PPSMC_MSG_Voltage_Cntl_Disable) == 0),
-			"Failed to disable voltage DPM during DPM Stop Function!",
-			return -1);
-
-	return 0;
-}
-
-static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
-		uint32_t sources)
-{
-	bool protection;
-	enum DPM_EVENT_SRC src;
-
-	switch (sources) {
-	default:
-		printk(KERN_ERR "Unknown throttling event sources.");
-		/* fall through */
-	case 0:
-		protection = false;
-		/* src is unused */
-		break;
-	case (1 << PHM_AutoThrottleSource_Thermal):
-		protection = true;
-		src = DPM_EVENT_SRC_DIGITAL;
-		break;
-	case (1 << PHM_AutoThrottleSource_External):
-		protection = true;
-		src = DPM_EVENT_SRC_EXTERNAL;
-		break;
-	case (1 << PHM_AutoThrottleSource_External) |
-			(1 << PHM_AutoThrottleSource_Thermal):
-		protection = true;
-		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
-		break;
-	}
-	/* Order matters - don't enable thermal protection for the wrong source. */
-	if (protection) {
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
-				DPM_EVENT_SRC, src);
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-				THERMAL_PROTECTION_DIS,
-				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_ThermalController));
-	} else
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-				THERMAL_PROTECTION_DIS, 1);
-}
-
-static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-		PHM_AutoThrottleSource source)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (!(data->active_auto_throttle_sources & (1 << source))) {
-		data->active_auto_throttle_sources |= 1 << source;
-		fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-	}
-	return 0;
-}
-
-static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-	return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-		PHM_AutoThrottleSource source)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (data->active_auto_throttle_sources & (1 << source)) {
-		data->active_auto_throttle_sources &= ~(1 << source);
-		fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-	}
-	return 0;
-}
-
-static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-	return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
-	PP_ASSERT_WITH_CODE(result == 0,
-			"DPM is already running right now, no need to enable DPM!",
-			return 0);
-
-	if (fiji_voltage_control(hwmgr)) {
-		tmp_result = fiji_enable_voltage_control(hwmgr);
-		PP_ASSERT_WITH_CODE(tmp_result == 0,
-				"Failed to enable voltage control!",
-				result = tmp_result);
-	}
-
-	if (fiji_voltage_control(hwmgr)) {
-		tmp_result = fiji_construct_voltage_tables(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to contruct voltage tables!",
-				result = tmp_result);
-	}
-
-	tmp_result = fiji_initialize_mc_reg_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize MC reg table!", result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalController))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
-	tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program static screen threshold parameters!",
-			result = tmp_result);
-
-	tmp_result = fiji_enable_display_gap(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable display gap!", result = tmp_result);
-
-	tmp_result = fiji_program_voting_clients(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program voting clients!", result = tmp_result);
-
-	tmp_result = fiji_process_firmware_header(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to process firmware header!", result = tmp_result);
-
-	tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize switch from ArbF0 to F1!",
-			result = tmp_result);
-
-	tmp_result = fiji_init_smc_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize SMC table!", result = tmp_result);
-
-	tmp_result = fiji_init_arb_table_index(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize ARB table index!", result = tmp_result);
-
-	tmp_result = fiji_populate_pm_fuses(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to populate PM fuses!", result = tmp_result);
-
-	tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
-	tmp_result = tonga_notify_smc_display_change(hwmgr, false);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to notify no display!", result = tmp_result);
-
-	tmp_result = fiji_enable_sclk_control(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable SCLK control!", result = tmp_result);
-
-	tmp_result = fiji_enable_ulv(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable ULV!", result = tmp_result);
-
-	tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable deep sleep master switch!", result = tmp_result);
-
-	tmp_result = fiji_start_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to start DPM!", result = tmp_result);
-
-	tmp_result = fiji_enable_smc_cac(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable SMC CAC!", result = tmp_result);
-
-	tmp_result = fiji_enable_power_containment(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable power containment!", result = tmp_result);
-
-	tmp_result = fiji_power_control_set_level(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to power control set level!", result = tmp_result);
-
-	tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable thermal auto throttle!", result = tmp_result);
-
-	return result;
-}
-
-static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
-	PP_ASSERT_WITH_CODE(tmp_result == 0,
-			"DPM is not running right now, no need to disable DPM!",
-			return 0);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalController))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
-	tmp_result = fiji_disable_power_containment(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable power containment!", result = tmp_result);
-
-	tmp_result = fiji_disable_smc_cac(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable SMC CAC!", result = tmp_result);
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
-	tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable thermal auto throttle!", result = tmp_result);
-
-	tmp_result = fiji_stop_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to stop DPM!", result = tmp_result);
-
-	tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable deep sleep master switch!", result = tmp_result);
-
-	tmp_result = fiji_disable_ulv(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable ULV!", result = tmp_result);
-
-	tmp_result = fiji_clear_voting_clients(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to clear voting clients!", result = tmp_result);
-
-	tmp_result = fiji_reset_to_default(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to reset to default!", result = tmp_result);
-
-	tmp_result = fiji_force_switch_to_arbf0(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to force to switch arbf0!", result = tmp_result);
-
-	return result;
-}
-
-static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t level, tmp;
-
-	if (!data->sclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_SCLKDPM_SetEnabledMask,
-						(1 << level));
-		}
-	}
-
-	if (!data->mclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_SetEnabledMask,
-						(1 << level));
-		}
-	}
-
-	if (!data->pcie_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_ForceLevel,
-						(1 << level));
-		}
-	}
-	return 0;
-}
-
-static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	phm_apply_dal_min_voltage_request(hwmgr);
-
-	if (!data->sclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-	}
-	return 0;
-}
-
-static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (!fiji_is_dpm_running(hwmgr))
-		return -EINVAL;
-
-	if (!data->pcie_dpm_key_disabled) {
-		smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_PCIeDPM_UnForceLevel);
-	}
-
-	return fiji_upload_dpmlevel_enable_mask(hwmgr);
-}
-
-static uint32_t fiji_get_lowest_enabled_level(
-		struct pp_hwmgr *hwmgr, uint32_t mask)
-{
-	uint32_t level = 0;
-
-	while(0 == (mask & (1 << level)))
-		level++;
-
-	return level;
-}
-
-static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data =
-			(struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t level;
-
-	if (!data->sclk_dpm_key_disabled)
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			level = fiji_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
-							    (1 << level));
-
-	}
-
-	if (!data->mclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			level = fiji_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
-							    (1 << level));
-		}
-	}
-
-	if (!data->pcie_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-			level = fiji_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_PCIeDPM_ForceLevel,
-							    (1 << level));
-		}
-	}
-
-	return 0;
-
-}
-static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
-				enum amd_dpm_forced_level level)
-{
-	int ret = 0;
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = fiji_force_dpm_highest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = fiji_force_dpm_lowest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		ret = fiji_unforce_dpm_levels(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	default:
-		break;
-	}
-
-	hwmgr->dpm_level = level;
-
-	return ret;
-}
-
-static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-	return sizeof(struct fiji_power_state);
-}
-
-static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-		void *state, struct pp_power_state *power_state,
-		void *pp_table, uint32_t classification_flag)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_power_state  *fiji_power_state =
-			(struct fiji_power_state *)(&(power_state->hardware));
-	struct fiji_performance_level *performance_level;
-	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-	ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-			(ATOM_Tonga_SCLK_Dependency_Table *)
-			(((unsigned long)powerplay_table) +
-				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-			(ATOM_Tonga_MCLK_Dependency_Table *)
-			(((unsigned long)powerplay_table) +
-				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-	/* The following fields are not initialized here: id orderedList allStatesList */
-	power_state->classification.ui_label =
-			(le16_to_cpu(state_entry->usClassification) &
-			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-	power_state->classification.flags = classification_flag;
-	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-	power_state->classification.temporary_state = false;
-	power_state->classification.to_be_deleted = false;
-
-	power_state->validation.disallowOnDC =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-					ATOM_Tonga_DISALLOW_ON_DC));
-
-	power_state->pcie.lanes = 0;
-
-	power_state->display.disableFrameModulation = false;
-	power_state->display.limitRefreshrate = false;
-	power_state->display.enableVariBright =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-					ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-	power_state->validation.supportedPowerLevels = 0;
-	power_state->uvd_clocks.VCLK = 0;
-	power_state->uvd_clocks.DCLK = 0;
-	power_state->temperatures.min = 0;
-	power_state->temperatures.max = 0;
-
-	performance_level = &(fiji_power_state->performance_levels
-			[fiji_power_state->performance_level_count++]);
-
-	PP_ASSERT_WITH_CODE(
-			(fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
-			"Performance levels exceeds SMC limit!",
-			return -1);
-
-	PP_ASSERT_WITH_CODE(
-			(fiji_power_state->performance_level_count <=
-					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-			"Performance levels exceeds Driver limit!",
-			return -1);
-
-	/* Performance levels are arranged from low to high. */
-	performance_level->memory_clock = mclk_dep_table->entries
-			[state_entry->ucMemoryClockIndexLow].ulMclk;
-	performance_level->engine_clock = sclk_dep_table->entries
-			[state_entry->ucEngineClockIndexLow].ulSclk;
-	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-			state_entry->ucPCIEGenLow);
-	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-			state_entry->ucPCIELaneHigh);
-
-	performance_level = &(fiji_power_state->performance_levels
-			[fiji_power_state->performance_level_count++]);
-	performance_level->memory_clock = mclk_dep_table->entries
-			[state_entry->ucMemoryClockIndexHigh].ulMclk;
-	performance_level->engine_clock = sclk_dep_table->entries
-			[state_entry->ucEngineClockIndexHigh].ulSclk;
-	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-			state_entry->ucPCIEGenHigh);
-	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-			state_entry->ucPCIELaneHigh);
-
-	return 0;
-}
-
-static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-		unsigned long entry_index, struct pp_power_state *state)
-{
-	int result;
-	struct fiji_power_state *ps;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-			table_info->vdd_dep_on_mclk;
-
-	state->hardware.magic = PHM_VIslands_Magic;
-
-	ps = (struct fiji_power_state *)(&state->hardware);
-
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
-			fiji_get_pp_table_entry_callback_func);
-
-	/* This is the earliest time we have all the dependency table and the VBIOS boot state
-	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-	 */
-	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-		if (dep_mclk_table->entries[0].clk !=
-				data->vbios_boot_state.mclk_bootup_value)
-			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot MCLK level");
-		if (dep_mclk_table->entries[0].vddci !=
-				data->vbios_boot_state.vddci_bootup_value)
-			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot VDDCI level");
-	}
-
-	/* set DC compatible flag if this state supports DC */
-	if (!state->validation.disallowOnDC)
-		ps->dc_compatible = true;
-
-	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
-		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
-	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
-	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
-	if (!result) {
-		uint32_t i;
-
-		switch (state->classification.ui_label) {
-		case PP_StateUILabel_Performance:
-			data->use_pcie_performance_levels = true;
-
-			for (i = 0; i < ps->performance_level_count; i++) {
-				if (data->pcie_gen_performance.max <
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.max =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_performance.min >
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.min =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_performance.max <
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.max =
-							ps->performance_levels[i].pcie_lane;
-
-				if (data->pcie_lane_performance.min >
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.min =
-							ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		case PP_StateUILabel_Battery:
-			data->use_pcie_power_saving_levels = true;
-
-			for (i = 0; i < ps->performance_level_count; i++) {
-				if (data->pcie_gen_power_saving.max <
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.max =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_power_saving.min >
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.min =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_power_saving.max <
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.max =
-							ps->performance_levels[i].pcie_lane;
-
-				if (data->pcie_lane_power_saving.min >
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.min =
-							ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		default:
-			break;
-		}
-	}
-	return 0;
-}
-
-static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-				struct pp_power_state  *request_ps,
-			const struct pp_power_state *current_ps)
-{
-	struct fiji_power_state *fiji_ps =
-				cast_phw_fiji_power_state(&request_ps->hardware);
-	uint32_t sclk;
-	uint32_t mclk;
-	struct PP_Clocks minimum_clocks = {0};
-	bool disable_mclk_switching;
-	bool disable_mclk_switching_for_frame_lock;
-	struct cgs_display_info info = {0};
-	const struct phm_clock_and_voltage_limits *max_limits;
-	uint32_t i;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int32_t count;
-	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-	data->battery_state = (PP_StateUILabel_Battery ==
-			request_ps->classification.ui_label);
-
-	PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
-				 "VI should always have 2 performance levels",);
-
-	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-	/* Cap clock DPM tables at DC MAX if it is in DC. */
-	if (PP_PowerSource_DC == hwmgr->power_source) {
-		for (i = 0; i < fiji_ps->performance_level_count; i++) {
-			if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
-				fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
-			if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
-				fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
-		}
-	}
-
-	fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
-	fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
-	fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-	/* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StablePState)) {
-		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-		for (count = table_info->vdd_dep_on_sclk->count - 1;
-				count >= 0; count--) {
-			if (stable_pstate_sclk >=
-					table_info->vdd_dep_on_sclk->entries[count].clk) {
-				stable_pstate_sclk =
-						table_info->vdd_dep_on_sclk->entries[count].clk;
-				break;
-			}
-		}
-
-		if (count < 0)
-			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
-		stable_pstate_mclk = max_limits->mclk;
-
-		minimum_clocks.engineClock = stable_pstate_sclk;
-		minimum_clocks.memoryClock = stable_pstate_mclk;
-	}
-
-	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-	fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
-				hwmgr->platform_descriptor.overdriveLimit.engineClock),
-				"Overdrive sclk exceeds limit",
-				hwmgr->gfx_arbiter.sclk_over_drive =
-						hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-			fiji_ps->performance_levels[1].engine_clock =
-					hwmgr->gfx_arbiter.sclk_over_drive;
-	}
-
-	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
-				hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-				"Overdrive mclk exceeds limit",
-				hwmgr->gfx_arbiter.mclk_over_drive =
-						hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-			fiji_ps->performance_levels[1].memory_clock =
-					hwmgr->gfx_arbiter.mclk_over_drive;
-	}
-
-	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-				    hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-	disable_mclk_switching = (1 < info.display_count) ||
-				    disable_mclk_switching_for_frame_lock;
-
-	sclk = fiji_ps->performance_levels[0].engine_clock;
-	mclk = fiji_ps->performance_levels[0].memory_clock;
-
-	if (disable_mclk_switching)
-		mclk = fiji_ps->performance_levels
-		[fiji_ps->performance_level_count - 1].memory_clock;
-
-	if (sclk < minimum_clocks.engineClock)
-		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
-				max_limits->sclk : minimum_clocks.engineClock;
-
-	if (mclk < minimum_clocks.memoryClock)
-		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
-				max_limits->mclk : minimum_clocks.memoryClock;
-
-	fiji_ps->performance_levels[0].engine_clock = sclk;
-	fiji_ps->performance_levels[0].memory_clock = mclk;
-
-	fiji_ps->performance_levels[1].engine_clock =
-		(fiji_ps->performance_levels[1].engine_clock >=
-				fiji_ps->performance_levels[0].engine_clock) ?
-						fiji_ps->performance_levels[1].engine_clock :
-						fiji_ps->performance_levels[0].engine_clock;
-
-	if (disable_mclk_switching) {
-		if (mclk < fiji_ps->performance_levels[1].memory_clock)
-			mclk = fiji_ps->performance_levels[1].memory_clock;
-
-		fiji_ps->performance_levels[0].memory_clock = mclk;
-		fiji_ps->performance_levels[1].memory_clock = mclk;
-	} else {
-		if (fiji_ps->performance_levels[1].memory_clock <
-				fiji_ps->performance_levels[0].memory_clock)
-			fiji_ps->performance_levels[1].memory_clock =
-					fiji_ps->performance_levels[0].memory_clock;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StablePState)) {
-		for (i = 0; i < fiji_ps->performance_level_count; i++) {
-			fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-			fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-			fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-			fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct fiji_power_state *fiji_ps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	uint32_t sclk = fiji_ps->performance_levels
-			[fiji_ps->performance_level_count - 1].engine_clock;
-	struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	uint32_t mclk = fiji_ps->performance_levels
-			[fiji_ps->performance_level_count - 1].memory_clock;
-	uint32_t i;
-	struct cgs_display_info info = {0};
-
-	data->need_update_smu7_dpm_table = 0;
-
-	for (i = 0; i < sclk_table->count; i++) {
-		if (sclk == sclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= sclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-	else {
-		if(data->display_timing.min_clock_in_sr !=
-			hwmgr->display_config.min_core_set_clock_in_sr)
-			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-	}
-
-	for (i = 0; i < mclk_table->count; i++) {
-		if (mclk == mclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= mclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-	return 0;
-}
-
-static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
-		const struct fiji_power_state *fiji_ps)
-{
-	uint32_t i;
-	uint32_t sclk, max_sclk = 0;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
-	for (i = 0; i < fiji_ps->performance_level_count; i++) {
-		sclk = fiji_ps->performance_levels[i].engine_clock;
-		if (max_sclk < sclk)
-			max_sclk = sclk;
-	}
-
-	for (i = 0; i < dpm_table->sclk_table.count; i++) {
-		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
-					dpm_table->pcie_speed_table.dpm_levels
-					[dpm_table->pcie_speed_table.count - 1].value :
-					dpm_table->pcie_speed_table.dpm_levels[i].value);
-	}
-
-	return 0;
-}
-
-static int fiji_request_link_speed_change_before_state_change(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_power_state *fiji_nps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-	const struct fiji_power_state *fiji_cps =
-			cast_const_phw_fiji_power_state(states->pcurrent_state);
-
-	uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
-	uint16_t current_link_speed;
-
-	if (data->force_pcie_gen == PP_PCIEGenInvalid)
-		current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
-	else
-		current_link_speed = data->force_pcie_gen;
-
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-	data->pspp_notify_required = false;
-	if (target_link_speed > current_link_speed) {
-		switch(target_link_speed) {
-		case PP_PCIEGen3:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-				break;
-			data->force_pcie_gen = PP_PCIEGen2;
-			if (current_link_speed == PP_PCIEGen2)
-				break;
-		case PP_PCIEGen2:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-				break;
-		default:
-			data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
-			break;
-		}
-	} else {
-		if (target_link_speed < current_link_speed)
-			data->pspp_notify_required = true;
-	}
-
-	return 0;
-}
-
-static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-		PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-				    "Trying to freeze SCLK DPM when DPM is disabled",
-				    );
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_FreezeLevel),
-				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-				return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		 DPMTABLE_OD_UPDATE_MCLK)) {
-		PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-				    "Trying to freeze MCLK DPM when DPM is disabled",
-				    );
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MCLKDPM_FreezeLevel),
-				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-				return -1);
-	}
-
-	return 0;
-}
-
-static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result = 0;
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct fiji_power_state *fiji_ps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t sclk = fiji_ps->performance_levels
-			[fiji_ps->performance_level_count - 1].engine_clock;
-	uint32_t mclk = fiji_ps->performance_levels
-			[fiji_ps->performance_level_count - 1].memory_clock;
-	struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
-	struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
-	uint32_t dpm_count, clock_percent;
-	uint32_t i;
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-		dpm_table->sclk_table.dpm_levels
-		[dpm_table->sclk_table.count - 1].value = sclk;
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_OD6PlusinACSupport) ||
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_OD6PlusinDCSupport)) {
-		/* Need to do calculation based on the golden DPM table
-		 * as the Heatmap GPU Clock axis is also based on the default values
-		 */
-			PP_ASSERT_WITH_CODE(
-				(golden_dpm_table->sclk_table.dpm_levels
-						[golden_dpm_table->sclk_table.count - 1].value != 0),
-				"Divide by 0!",
-				return -1);
-			dpm_count = dpm_table->sclk_table.count < 2 ?
-					0 : dpm_table->sclk_table.count - 2;
-			for (i = dpm_count; i > 1; i--) {
-				if (sclk > golden_dpm_table->sclk_table.dpm_levels
-						[golden_dpm_table->sclk_table.count-1].value) {
-					clock_percent =
-						((sclk - golden_dpm_table->sclk_table.dpm_levels
-							[golden_dpm_table->sclk_table.count-1].value) * 100) /
-						golden_dpm_table->sclk_table.dpm_levels
-							[golden_dpm_table->sclk_table.count-1].value;
-
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value +
-							(golden_dpm_table->sclk_table.dpm_levels[i].value *
-								clock_percent)/100;
-
-				} else if (golden_dpm_table->sclk_table.dpm_levels
-						[dpm_table->sclk_table.count-1].value > sclk) {
-					clock_percent =
-						((golden_dpm_table->sclk_table.dpm_levels
-						[golden_dpm_table->sclk_table.count - 1].value - sclk) *
-								100) /
-						golden_dpm_table->sclk_table.dpm_levels
-							[golden_dpm_table->sclk_table.count-1].value;
-
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value -
-							(golden_dpm_table->sclk_table.dpm_levels[i].value *
-									clock_percent) / 100;
-				} else
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-		dpm_table->mclk_table.dpm_levels
-			[dpm_table->mclk_table.count - 1].value = mclk;
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_OD6PlusinACSupport) ||
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-			PP_ASSERT_WITH_CODE(
-					(golden_dpm_table->mclk_table.dpm_levels
-						[golden_dpm_table->mclk_table.count-1].value != 0),
-					"Divide by 0!",
-					return -1);
-			dpm_count = dpm_table->mclk_table.count < 2 ?
-					0 : dpm_table->mclk_table.count - 2;
-			for (i = dpm_count; i > 1; i--) {
-				if (mclk > golden_dpm_table->mclk_table.dpm_levels
-						[golden_dpm_table->mclk_table.count-1].value) {
-					clock_percent = ((mclk -
-							golden_dpm_table->mclk_table.dpm_levels
-							[golden_dpm_table->mclk_table.count-1].value) * 100) /
-							golden_dpm_table->mclk_table.dpm_levels
-							[golden_dpm_table->mclk_table.count-1].value;
-
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value +
-							(golden_dpm_table->mclk_table.dpm_levels[i].value *
-									clock_percent) / 100;
-
-				} else if (golden_dpm_table->mclk_table.dpm_levels
-						[dpm_table->mclk_table.count-1].value > mclk) {
-					clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
-							[golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
-									golden_dpm_table->mclk_table.dpm_levels
-									[golden_dpm_table->mclk_table.count-1].value;
-
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value -
-							(golden_dpm_table->mclk_table.dpm_levels[i].value *
-									clock_percent) / 100;
-				} else
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-		result = fiji_populate_all_graphic_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-				return result);
-	}
-
-	if (data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-		/*populate MCLK dpm table to SMU7 */
-		result = fiji_populate_all_memory_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-				return result);
-	}
-
-	return result;
-}
-
-static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-			  struct fiji_single_dpm_table * dpm_table,
-			     uint32_t low_limit, uint32_t high_limit)
-{
-	uint32_t i;
-
-	for (i = 0; i < dpm_table->count; i++) {
-		if ((dpm_table->dpm_levels[i].value < low_limit) ||
-		    (dpm_table->dpm_levels[i].value > high_limit))
-			dpm_table->dpm_levels[i].enabled = false;
-		else
-			dpm_table->dpm_levels[i].enabled = true;
-	}
-	return 0;
-}
-
-static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
-		const struct fiji_power_state *fiji_ps)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t high_limit_count;
-
-	PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
-			"power state did not have any performance level",
-			return -1);
-
-	high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
-
-	fiji_trim_single_dpm_states(hwmgr,
-			&(data->dpm_table.sclk_table),
-			fiji_ps->performance_levels[0].engine_clock,
-			fiji_ps->performance_levels[high_limit_count].engine_clock);
-
-	fiji_trim_single_dpm_states(hwmgr,
-			&(data->dpm_table.mclk_table),
-			fiji_ps->performance_levels[0].memory_clock,
-			fiji_ps->performance_levels[high_limit_count].memory_clock);
-
-	return 0;
-}
-
-static int fiji_generate_dpm_level_enable_mask(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result;
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_power_state *fiji_ps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-
-	result = fiji_trim_dpm_states(hwmgr, fiji_ps);
-	if (result)
-		return result;
-
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-	data->last_mclk_dpm_enable_mask =
-			data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-
-	if (data->uvd_enabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
-			data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-	}
-
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-			fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-	return 0;
-}
-
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-				  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
-				  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-			PPSMC_MSG_VCEDPM_Enable :
-			PPSMC_MSG_VCEDPM_Disable);
-}
-
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-			PPSMC_MSG_SAMUDPM_Enable :
-			PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-			PPSMC_MSG_ACPDPM_Enable :
-			PPSMC_MSG_ACPDPM_Disable);
-}
-
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		data->smc_state_table.UvdBootLevel = 0;
-		if (table_info->mm_dep_table->count > 0)
-			data->smc_state_table.UvdBootLevel =
-					(uint8_t) (table_info->mm_dep_table->count - 1);
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0x00FFFFFF;
-		mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_UVDDPM) ||
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_UVDDPM_SetEnabledMask,
-					(uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-	}
-
-	return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_power_state *fiji_nps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-	const struct fiji_power_state *fiji_cps =
-			cast_const_phw_fiji_power_state(states->pcurrent_state);
-
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (fiji_nps->vce_clks.evclk >0 &&
-	(fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
-		data->smc_state_table.VceBootLevel =
-				(uint8_t) (table_info->mm_dep_table->count - 1);
-
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFF00FFFF;
-		mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState)) {
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_VCEDPM_SetEnabledMask,
-					(uint32_t)1 << data->smc_state_table.VceBootLevel);
-
-			fiji_enable_disable_vce_dpm(hwmgr, true);
-		} else if (fiji_nps->vce_clks.evclk == 0 &&
-				fiji_cps != NULL &&
-				fiji_cps->vce_clks.evclk > 0)
-			fiji_enable_disable_vce_dpm(hwmgr, false);
-	}
-
-	return 0;
-}
-
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		data->smc_state_table.SamuBootLevel =
-				(uint8_t) (table_info->mm_dep_table->count - 1);
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFFFFFF00;
-		mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SAMUDPM_SetEnabledMask,
-					(uint32_t)(1 << data->smc_state_table.SamuBootLevel));
-	}
-
-	return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		data->smc_state_table.AcpBootLevel =
-				(uint8_t) (table_info->mm_dep_table->count - 1);
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFFFF00FF;
-		mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_ACPDPM_SetEnabledMask,
-						(uint32_t)(1 << data->smc_state_table.AcpBootLevel));
-	}
-
-	return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
-}
-
-static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	int result = 0;
-	uint32_t low_sclk_interrupt_threshold = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkThrottleLowNotification)
-		&& (hwmgr->gfx_arbiter.sclk_threshold !=
-				data->low_sclk_interrupt_threshold)) {
-		data->low_sclk_interrupt_threshold =
-				hwmgr->gfx_arbiter.sclk_threshold;
-		low_sclk_interrupt_threshold =
-				data->low_sclk_interrupt_threshold;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-		result = fiji_copy_bytes_to_smc(
-				hwmgr->smumgr,
-				data->dpm_table_start +
-				offsetof(SMU73_Discrete_DpmTable,
-					LowSclkInterruptThreshold),
-				(uint8_t *)&low_sclk_interrupt_threshold,
-				sizeof(uint32_t),
-				data->sram_end);
-	}
-
-	return result;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-		return fiji_program_memory_timing_parameters(hwmgr);
-
-	return 0;
-}
-
-static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-		PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze SCLK DPM when DPM is disabled",
-				    );
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-			return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-		PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze MCLK DPM when DPM is disabled",
-				    );
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-		    return -1);
-	}
-
-	data->need_update_smu7_dpm_table = 0;
-
-	return 0;
-}
-
-/* Look up the voltaged based on DAL's requested level.
- * and then send the requested VDDC voltage to SMC
- */
-static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
-	return;
-}
-
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* Apply minimum voltage based on DAL's request level */
-	fiji_apply_dal_minimum_voltage_request(hwmgr);
-
-	if (0 == data->sclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this,
-		 *  we should skip this message.
-		 */
-		if (!fiji_is_dpm_running(hwmgr))
-			printk(KERN_ERR "[ powerplay ] "
-					"Trying to set Enable Mask when DPM is disabled \n");
-
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == result),
-				"Set Sclk Dpm enable Mask failed", return -1);
-		}
-	}
-
-	if (0 == data->mclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this,
-		 *  we should skip this message.
-		 */
-		if (!fiji_is_dpm_running(hwmgr))
-			printk(KERN_ERR "[ powerplay ]"
-					" Trying to set Enable Mask when DPM is disabled \n");
-
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == result),
-				"Set Mclk Dpm enable Mask failed", return -1);
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_notify_link_speed_change_after_state_change(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_power_state *fiji_ps =
-			cast_const_phw_fiji_power_state(states->pnew_state);
-	uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
-	uint8_t  request;
-
-	if (data->pspp_notify_required) {
-		if (target_link_speed == PP_PCIEGen3)
-			request = PCIE_PERF_REQ_GEN3;
-		else if (target_link_speed == PP_PCIEGen2)
-			request = PCIE_PERF_REQ_GEN2;
-		else
-			request = PCIE_PERF_REQ_GEN1;
-
-		if(request == PCIE_PERF_REQ_GEN1 &&
-				fiji_get_current_pcie_speed(hwmgr) > 0)
-			return 0;
-
-		if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
-			if (PP_PCIEGen2 == target_link_speed)
-				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-			else
-				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-		}
-	}
-
-	return 0;
-}
-
-static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
-		const void *input)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to find DPM states clocks in DPM table!",
-			result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result =
-			fiji_request_link_speed_change_before_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to request link speed change before state change!",
-				result = tmp_result);
-	}
-
-	tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-	tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to populate and upload SCLK MCLK DPM levels!",
-			result = tmp_result);
-
-	tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to generate DPM level enabled mask!",
-			result = tmp_result);
-
-	tmp_result = fiji_update_vce_dpm(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to update VCE DPM!",
-			result = tmp_result);
-
-	tmp_result = fiji_update_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to update SCLK threshold!",
-			result = tmp_result);
-
-	tmp_result = fiji_program_mem_timing_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program memory timing parameters!",
-			result = tmp_result);
-
-	tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to unfreeze SCLK MCLK DPM!",
-			result = tmp_result);
-
-	tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to upload DPM level enabled mask!",
-			result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result =
-			fiji_notify_link_speed_change_after_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to notify link speed change after state change!",
-				result = tmp_result);
-	}
-
-	return result;
-}
-
-static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct fiji_power_state  *fiji_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-	if (low)
-		return fiji_ps->performance_levels[0].engine_clock;
-	else
-		return fiji_ps->performance_levels
-				[fiji_ps->performance_level_count-1].engine_clock;
-}
-
-static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct fiji_power_state  *fiji_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-	if (low)
-		return fiji_ps->performance_levels[0].memory_clock;
-	else
-		return fiji_ps->performance_levels
-				[fiji_ps->performance_level_count-1].memory_clock;
-}
-
-static void fiji_print_current_perforce_level(
-		struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-	uint32_t sclk, mclk, activity_percent = 0;
-	uint32_t offset;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
-	sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
-	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n",
-			mclk / 100, sclk / 100);
-
-	offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
-	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-	activity_percent += 0x80;
-	activity_percent >>= 8;
-
-	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t num_active_displays = 0;
-	uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-	uint32_t display_gap2;
-	uint32_t pre_vbi_time_in_us;
-	uint32_t frame_time_in_us;
-	uint32_t ref_clock;
-	uint32_t refresh_rate = 0;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
-
-	info.mode_info = &mode_info;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_active_displays = info.display_count;
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-			DISP_GAP, (num_active_displays > 0)?
-			DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-	ref_clock = mode_info.ref_clock;
-	refresh_rate = mode_info.refresh_rate;
-
-	if (refresh_rate == 0)
-		refresh_rate = 60;
-
-	frame_time_in_us = 1000000 / refresh_rate;
-
-	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			data->soft_regs_start +
-			offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			data->soft_regs_start +
-			offsetof(SMU73_SoftRegisters, VBlankTimeout),
-			(frame_time_in_us - pre_vbi_time_in_us));
-
-	if (num_active_displays == 1)
-		tonga_notify_smc_display_change(hwmgr, true);
-
-	return 0;
-}
-
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-	return fiji_program_display_gap(hwmgr);
-}
-
-static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
-		uint16_t us_max_fan_pwm)
-{
-	hwmgr->thermal_controller.
-	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
-		uint16_t us_max_fan_rpm)
-{
-	hwmgr->thermal_controller.
-	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int fiji_dpm_set_interrupt_state(void *private_data,
-					 unsigned src_id, unsigned type,
-					 int enabled)
-{
-	uint32_t cg_thermal_int;
-	struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	switch (type) {
-	case AMD_THERMAL_IRQ_LOW_TO_HIGH:
-		if (enabled) {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		} else {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		}
-		break;
-
-	case AMD_THERMAL_IRQ_HIGH_TO_LOW:
-		if (enabled) {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		} else {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		}
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-					const void *thermal_interrupt_info)
-{
-	int result;
-	const struct pp_interrupt_registration_info *info =
-			(const struct pp_interrupt_registration_info *)
-			thermal_interrupt_info;
-
-	if (info == NULL)
-		return -EINVAL;
-
-	result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
-				fiji_dpm_set_interrupt_state,
-				info->call_back, info->context);
-
-	if (result)
-		return -EINVAL;
-
-	result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
-				fiji_dpm_set_interrupt_state,
-				info->call_back, info->context);
-
-	if (result)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-	if (mode) {
-		/* stop auto-manage */
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl))
-			fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
-		fiji_fan_ctrl_set_static_mode(hwmgr, mode);
-	} else
-		/* restart auto-manage */
-		fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-	return 0;
-}
-
-static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr->fan_ctrl_is_in_default_mode)
-		return hwmgr->fan_ctrl_default_mode;
-	else
-		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		return -EINVAL;
-
-	switch (type) {
-	case PP_SCLK:
-		if (!data->sclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-		break;
-
-	case PP_MCLK:
-		if (!data->mclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-		break;
-
-	case PP_PCIE:
-	{
-		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-		uint32_t level = 0;
-
-		while (tmp >>= 1)
-			level++;
-
-		if (!data->pcie_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_PCIeDPM_ForceLevel,
-					level);
-		break;
-	}
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, char *buf)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-	int i, now, size = 0;
-	uint32_t clock, pcie_speed;
-
-	switch (type) {
-	case PP_SCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < sclk_table->count; i++) {
-			if (clock > sclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < sclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, sclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_MCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < mclk_table->count; i++) {
-			if (clock > mclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < mclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, mclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_PCIE:
-		pcie_speed = fiji_get_current_pcie_speed(hwmgr);
-		for (i = 0; i < pcie_table->count; i++) {
-			if (pcie_speed != pcie_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < pcie_table->count; i++)
-			size += sprintf(buf + size, "%d: %s %s\n", i,
-					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
-					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-					(i == now) ? "*" : "");
-		break;
-	default:
-		break;
-	}
-	return size;
-}
-
-static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
-							   const struct fiji_performance_level *pl2)
-{
-	return ((pl1->memory_clock == pl2->memory_clock) &&
-		  (pl1->engine_clock == pl2->engine_clock) &&
-		  (pl1->pcie_gen == pl2->pcie_gen) &&
-		  (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-	const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
-	const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
-	int i;
-
-	if (equal == NULL || psa == NULL || psb == NULL)
-		return -EINVAL;
-
-	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
-	if (psa->performance_level_count != psb->performance_level_count) {
-		*equal = false;
-		return 0;
-	}
-
-	for (i = 0; i < psa->performance_level_count; i++) {
-		if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-			/* If we have found even one performance level pair that is different the states are different. */
-			*equal = false;
-			return 0;
-		}
-	}
-
-	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
-	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
-	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
-	*equal &= (psa->acp_clk == psb->acp_clk);
-
-	return 0;
-}
-
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	bool is_update_required = false;
-	struct cgs_display_info info = {0,0,NULL};
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		is_update_required = true;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
-			is_update_required = true;
-	}
-
-	return is_update_required;
-}
-
-static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct fiji_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	int value;
-
-	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-			100 /
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return value;
-}
-
-static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	struct pp_power_state  *ps;
-	struct fiji_power_state  *fiji_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-	fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-			value / 100 +
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return 0;
-}
-
-static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct fiji_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	int value;
-
-	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-			100 /
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return value;
-}
-
-static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	struct pp_power_state  *ps;
-	struct fiji_power_state  *fiji_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
-	fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-			value / 100 +
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return 0;
-}
-
-static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
-	.backend_init = &fiji_hwmgr_backend_init,
-	.backend_fini = &fiji_hwmgr_backend_fini,
-	.asic_setup = &fiji_setup_asic_task,
-	.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
-	.dynamic_state_management_disable = &fiji_disable_dpm_tasks,
-	.force_dpm_level = &fiji_dpm_force_dpm_level,
-	.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
-	.get_power_state_size = &fiji_get_power_state_size,
-	.get_pp_table_entry = &fiji_get_pp_table_entry,
-	.patch_boot_state = &fiji_patch_boot_state,
-	.apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
-	.power_state_set = &fiji_set_power_state_tasks,
-	.get_sclk = &fiji_dpm_get_sclk,
-	.get_mclk = &fiji_dpm_get_mclk,
-	.print_current_perforce_level = &fiji_print_current_perforce_level,
-	.powergate_uvd = &fiji_phm_powergate_uvd,
-	.powergate_vce = &fiji_phm_powergate_vce,
-	.disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
-	.notify_smc_display_config_after_ps_adjustment =
-			&tonga_notify_smc_display_config_after_ps_adjustment,
-	.display_config_changed = &fiji_display_configuration_changed_task,
-	.set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
-	.set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
-	.get_temperature = fiji_thermal_get_temperature,
-	.stop_thermal_controller = fiji_thermal_stop_thermal_controller,
-	.get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
-	.get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
-	.set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
-	.reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
-	.get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
-	.set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
-	.uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
-	.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
-	.set_fan_control_mode = fiji_set_fan_control_mode,
-	.get_fan_control_mode = fiji_get_fan_control_mode,
-	.check_states_equal = fiji_check_states_equal,
-	.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
-	.force_clock_level = fiji_force_clock_level,
-	.print_clock_levels = fiji_print_clock_levels,
-	.get_sclk_od = fiji_get_sclk_od,
-	.set_sclk_od = fiji_set_sclk_od,
-	.get_mclk_od = fiji_get_mclk_od,
-	.set_mclk_od = fiji_set_mclk_od,
-};
-
-int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-	hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
-	pp_fiji_thermal_initialize(hwmgr);
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_HWMGR_H_
-#define _FIJI_HWMGR_H_
-
-#include "hwmgr.h"
-#include "smu73.h"
-#include "smu73_discrete.h"
-#include "ppatomctrl.h"
-#include "fiji_ppsmc.h"
-#include "pp_endian.h"
-
-#define FIJI_MAX_HARDWARE_POWERLEVELS	2
-#define FIJI_AT_DFLT	30
-
-#define FIJI_VOLTAGE_CONTROL_NONE                   0x0
-#define FIJI_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define FIJI_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define FIJI_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-struct fiji_performance_level {
-	uint32_t  memory_clock;
-	uint32_t  engine_clock;
-	uint16_t  pcie_gen;
-	uint16_t  pcie_lane;
-};
-
-struct fiji_uvd_clocks {
-	uint32_t  vclk;
-	uint32_t  dclk;
-};
-
-struct fiji_vce_clocks {
-	uint32_t  evclk;
-	uint32_t  ecclk;
-};
-
-struct fiji_power_state {
-    uint32_t                  magic;
-    struct fiji_uvd_clocks    uvd_clks;
-    struct fiji_vce_clocks    vce_clks;
-    uint32_t                  sam_clk;
-    uint32_t                  acp_clk;
-    uint16_t                  performance_level_count;
-    bool                      dc_compatible;
-    uint32_t                  sclk_threshold;
-    struct fiji_performance_level  performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct fiji_dpm_level {
-	bool	enabled;
-    uint32_t	value;
-    uint32_t	param1;
-};
-
-#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define FIJI_MINIMUM_ENGINE_CLOCK 2500
-
-struct fiji_single_dpm_table {
-	uint32_t		count;
-	struct fiji_dpm_level	dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct fiji_dpm_table {
-	struct fiji_single_dpm_table  sclk_table;
-	struct fiji_single_dpm_table  mclk_table;
-	struct fiji_single_dpm_table  pcie_speed_table;
-	struct fiji_single_dpm_table  vddc_table;
-	struct fiji_single_dpm_table  vddci_table;
-	struct fiji_single_dpm_table  mvdd_table;
-};
-
-struct fiji_clock_registers {
-	uint32_t  vCG_SPLL_FUNC_CNTL;
-	uint32_t  vCG_SPLL_FUNC_CNTL_2;
-	uint32_t  vCG_SPLL_FUNC_CNTL_3;
-	uint32_t  vCG_SPLL_FUNC_CNTL_4;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-	uint32_t  vDLL_CNTL;
-	uint32_t  vMCLK_PWRMGT_CNTL;
-	uint32_t  vMPLL_AD_FUNC_CNTL;
-	uint32_t  vMPLL_DQ_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL_1;
-	uint32_t  vMPLL_FUNC_CNTL_2;
-	uint32_t  vMPLL_SS1;
-	uint32_t  vMPLL_SS2;
-};
-
-struct fiji_voltage_smio_registers {
-	uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define FIJI_MAX_LEAKAGE_COUNT  8
-struct fiji_leakage_voltage {
-	uint16_t  count;
-	uint16_t  leakage_id[FIJI_MAX_LEAKAGE_COUNT];
-	uint16_t  actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
-};
-
-struct fiji_vbios_boot_state {
-	uint16_t    mvdd_bootup_value;
-	uint16_t    vddc_bootup_value;
-	uint16_t    vddci_bootup_value;
-	uint32_t    sclk_bootup_value;
-	uint32_t    mclk_bootup_value;
-	uint16_t    pcie_gen_bootup_value;
-	uint16_t    pcie_lane_bootup_value;
-};
-
-struct fiji_bacos {
-	uint32_t                       best_match;
-	uint32_t                       baco_flags;
-	struct fiji_performance_level  performance_level;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct fiji_ulv_parm {
-	bool                           ulv_supported;
-	uint32_t                       cg_ulv_parameter;
-	uint32_t                       ulv_volt_change_delay;
-	struct fiji_performance_level  ulv_power_level;
-};
-
-struct fiji_display_timing {
-	uint32_t  min_clock_in_sr;
-	uint32_t  num_existing_displays;
-};
-
-struct fiji_dpmlevel_enable_mask {
-	uint32_t  uvd_dpm_enable_mask;
-	uint32_t  vce_dpm_enable_mask;
-	uint32_t  acp_dpm_enable_mask;
-	uint32_t  samu_dpm_enable_mask;
-	uint32_t  sclk_dpm_enable_mask;
-	uint32_t  mclk_dpm_enable_mask;
-	uint32_t  pcie_dpm_enable_mask;
-};
-
-struct fiji_pcie_perf_range {
-	uint16_t  max;
-	uint16_t  min;
-};
-
-struct fiji_hwmgr {
-	struct fiji_dpm_table			dpm_table;
-	struct fiji_dpm_table			golden_dpm_table;
-
-	uint32_t						voting_rights_clients0;
-	uint32_t						voting_rights_clients1;
-	uint32_t						voting_rights_clients2;
-	uint32_t						voting_rights_clients3;
-	uint32_t						voting_rights_clients4;
-	uint32_t						voting_rights_clients5;
-	uint32_t						voting_rights_clients6;
-	uint32_t						voting_rights_clients7;
-	uint32_t						static_screen_threshold_unit;
-	uint32_t						static_screen_threshold;
-	uint32_t						voltage_control;
-	uint32_t						vddc_vddci_delta;
-
-	uint32_t						active_auto_throttle_sources;
-
-	struct fiji_clock_registers            clock_registers;
-	struct fiji_voltage_smio_registers      voltage_smio_registers;
-
-	bool                           is_memory_gddr5;
-	uint16_t                       acpi_vddc;
-	bool                           pspp_notify_required;
-	uint16_t                       force_pcie_gen;
-	uint16_t                       acpi_pcie_gen;
-	uint32_t                       pcie_gen_cap;
-	uint32_t                       pcie_lane_cap;
-	uint32_t                       pcie_spc_cap;
-	struct fiji_leakage_voltage          vddc_leakage;
-	struct fiji_leakage_voltage          Vddci_leakage;
-
-	uint32_t                             mvdd_control;
-	uint32_t                             vddc_mask_low;
-	uint32_t                             mvdd_mask_low;
-	uint16_t                            max_vddc_in_pptable;
-	uint16_t                            min_vddc_in_pptable;
-	uint16_t                            max_vddci_in_pptable;
-	uint16_t                            min_vddci_in_pptable;
-	uint32_t                             mclk_strobe_mode_threshold;
-	uint32_t                             mclk_stutter_mode_threshold;
-	uint32_t                             mclk_edc_enable_threshold;
-	uint32_t                             mclk_edcwr_enable_threshold;
-	bool                                is_uvd_enabled;
-	struct fiji_vbios_boot_state        vbios_boot_state;
-
-	bool                           battery_state;
-	bool                           is_tlu_enabled;
-
-	/* ---- SMC SRAM Address of firmware header tables ---- */
-	uint32_t                             sram_end;
-	uint32_t                             dpm_table_start;
-	uint32_t                             soft_regs_start;
-	uint32_t                             mc_reg_table_start;
-	uint32_t                             fan_table_start;
-	uint32_t                             arb_table_start;
-	struct SMU73_Discrete_DpmTable       smc_state_table;
-	struct SMU73_Discrete_Ulv            ulv_setting;
-
-	/* ---- Stuff originally coming from Evergreen ---- */
-	uint32_t                             vddci_control;
-	struct pp_atomctrl_voltage_table     vddc_voltage_table;
-	struct pp_atomctrl_voltage_table     vddci_voltage_table;
-	struct pp_atomctrl_voltage_table     mvdd_voltage_table;
-
-	uint32_t                             mgcg_cgtt_local2;
-	uint32_t                             mgcg_cgtt_local3;
-	uint32_t                             gpio_debug;
-	uint32_t                             mc_micro_code_feature;
-	uint32_t                             highest_mclk;
-	uint16_t                             acpi_vddci;
-	uint8_t                              mvdd_high_index;
-	uint8_t                              mvdd_low_index;
-	bool                                 dll_default_on;
-	bool                                 performance_request_registered;
-
-	/* ---- Low Power Features ---- */
-	struct fiji_bacos                    bacos;
-	struct fiji_ulv_parm                 ulv;
-
-	/* ---- CAC Stuff ---- */
-	uint32_t                       cac_table_start;
-	bool                           cac_configuration_required;
-	bool                           driver_calculate_cac_leakage;
-	bool                           cac_enabled;
-
-	/* ---- DPM2 Parameters ---- */
-	uint32_t                       power_containment_features;
-	bool                           enable_dte_feature;
-	bool                           enable_tdc_limit_feature;
-	bool                           enable_pkg_pwr_tracking_feature;
-	bool                           disable_uvd_power_tune_feature;
-	const struct fiji_pt_defaults  *power_tune_defaults;
-	struct SMU73_Discrete_PmFuses  power_tune_table;
-	uint32_t                       dte_tj_offset;
-	uint32_t                       fast_watermark_threshold;
-
-	/* ---- Phase Shedding ---- */
-	bool                           vddc_phase_shed_control;
-
-	/* ---- DI/DT ---- */
-	struct fiji_display_timing        display_timing;
-
-	/* ---- Thermal Temperature Setting ---- */
-	struct fiji_dpmlevel_enable_mask     dpm_level_enable_mask;
-	uint32_t                             need_update_smu7_dpm_table;
-	uint32_t                             sclk_dpm_key_disabled;
-	uint32_t                             mclk_dpm_key_disabled;
-	uint32_t                             pcie_dpm_key_disabled;
-	uint32_t                             min_engine_clocks;
-	struct fiji_pcie_perf_range          pcie_gen_performance;
-	struct fiji_pcie_perf_range          pcie_lane_performance;
-	struct fiji_pcie_perf_range          pcie_gen_power_saving;
-	struct fiji_pcie_perf_range          pcie_lane_power_saving;
-	bool                                 use_pcie_performance_levels;
-	bool                                 use_pcie_power_saving_levels;
-	uint32_t                             activity_target[SMU73_MAX_LEVELS_GRAPHICS];
-	uint32_t                             mclk_activity_target;
-	uint32_t                             mclk_dpm0_activity_target;
-	uint32_t                             low_sclk_interrupt_threshold;
-	uint32_t                             last_mclk_dpm_enable_mask;
-	bool                                 uvd_enabled;
-
-	/* ---- Power Gating States ---- */
-	bool                           uvd_power_gated;
-	bool                           vce_power_gated;
-	bool                           samu_power_gated;
-	bool                           acp_power_gated;
-	bool                           pg_acp_init;
-	bool                           frtc_enabled;
-	bool                           frtc_status_changed;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define FIJI_Q88_FORMAT_CONVERSION_UNIT             256
-
-enum Fiji_I2CLineID {
-    Fiji_I2CLineID_DDC1 = 0x90,
-    Fiji_I2CLineID_DDC2 = 0x91,
-    Fiji_I2CLineID_DDC3 = 0x92,
-    Fiji_I2CLineID_DDC4 = 0x93,
-    Fiji_I2CLineID_DDC5 = 0x94,
-    Fiji_I2CLineID_DDC6 = 0x95,
-    Fiji_I2CLineID_SCLSDA = 0x96,
-    Fiji_I2CLineID_DDCVGA = 0x97
-};
-
-#define Fiji_I2C_DDC1DATA          0
-#define Fiji_I2C_DDC1CLK           1
-#define Fiji_I2C_DDC2DATA          2
-#define Fiji_I2C_DDC2CLK           3
-#define Fiji_I2C_DDC3DATA          4
-#define Fiji_I2C_DDC3CLK           5
-#define Fiji_I2C_SDA               40
-#define Fiji_I2C_SCL               41
-#define Fiji_I2C_DDC4DATA          65
-#define Fiji_I2C_DDC4CLK           66
-#define Fiji_I2C_DDC5DATA          0x48
-#define Fiji_I2C_DDC5CLK           0x49
-#define Fiji_I2C_DDC6DATA          0x4a
-#define Fiji_I2C_DDC6CLK           0x4b
-#define Fiji_I2C_DDCVGADATA        0x4c
-#define Fiji_I2C_DDCVGACLK         0x4d
-
-#define FIJI_UNUSED_GPIO_PIN       0x7F
-
-extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index 4465845..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "fiji_hwmgr.h"
-#include "fiji_powertune.h"
-#include "fiji_smumgr.h"
-#include "smu73_discrete.h"
-#include "pp_debug.h"
-
-#define VOLTAGE_SCALE  4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-
-const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-		/*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
-		{1,               0xF,             0xFD,
-		/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
-		0x19,        5,               45}
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct  phm_ppt_v1_information *table_info =
-			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t tmp = 0;
-
-	if(table_info &&
-			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-			table_info->cac_dtp_table->usPowerTuneDataSetID)
-		fiji_hwmgr->power_tune_defaults =
-				&fiji_power_tune_data_set_array
-				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-	else
-		fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-	/* Assume disabled */
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_CAC);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SQRamping);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DBRamping);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_TDRamping);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_TCPRamping);
-
-	fiji_hwmgr->dte_tj_offset = tmp;
-
-	if (!tmp) {
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_CAC);
-
-		fiji_hwmgr->fast_watermark_threshold = 100;
-
-		if (hwmgr->powercontainment_enabled) {
-			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_PowerContainment);
-			tmp = 1;
-			fiji_hwmgr->enable_dte_feature = tmp ? false : true;
-			fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
-			fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
-		}
-	}
-}
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- *  This is the unit expected by SMC firmware
- */
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-	uint32_t tmp;
-	tmp = raw_setting * 4096 / 100;
-	return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
-{
-	switch (line) {
-	case Fiji_I2CLineID_DDC1 :
-		*scl = Fiji_I2C_DDC1CLK;
-		*sda = Fiji_I2C_DDC1DATA;
-		break;
-	case Fiji_I2CLineID_DDC2 :
-		*scl = Fiji_I2C_DDC2CLK;
-		*sda = Fiji_I2C_DDC2DATA;
-		break;
-	case Fiji_I2CLineID_DDC3 :
-		*scl = Fiji_I2C_DDC3CLK;
-		*sda = Fiji_I2C_DDC3DATA;
-		break;
-	case Fiji_I2CLineID_DDC4 :
-		*scl = Fiji_I2C_DDC4CLK;
-		*sda = Fiji_I2C_DDC4DATA;
-		break;
-	case Fiji_I2CLineID_DDC5 :
-		*scl = Fiji_I2C_DDC5CLK;
-		*sda = Fiji_I2C_DDC5DATA;
-		break;
-	case Fiji_I2CLineID_DDC6 :
-		*scl = Fiji_I2C_DDC6CLK;
-		*sda = Fiji_I2C_DDC6DATA;
-		break;
-	case Fiji_I2CLineID_SCLSDA :
-		*scl = Fiji_I2C_SCL;
-		*sda = Fiji_I2C_SDA;
-		break;
-	case Fiji_I2CLineID_DDCVGA :
-		*scl = Fiji_I2C_DDCVGACLK;
-		*sda = Fiji_I2C_DDCVGADATA;
-		break;
-	default:
-		*scl = 0;
-		*sda = 0;
-		break;
-	}
-}
-
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-	SMU73_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-	struct pp_advance_fan_control_parameters *fan_table=
-			&hwmgr->thermal_controller.advanceFanControlParameters;
-	uint8_t uc_scl, uc_sda;
-
-	/* TDP number of fraction bits are changed from 8 to 7 for Fiji
-	 * as requested by SMC team
-	 */
-	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
-			(uint16_t)(cac_dtp_table->usTDP * 128));
-	dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
-			(uint16_t)(cac_dtp_table->usTDP * 128));
-
-	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-			"Target Operating Temp is out of Range!",);
-
-	dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
-	dpm_table->GpuTjHyst = 8;
-
-	dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
-	/* The following are for new Fiji Multi-input fan/thermal control */
-	dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTargetOperatingTemp * 256);
-	dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitHotspot * 256);
-	dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitLiquid1 * 256);
-	dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitLiquid2 * 256);
-	dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitVrVddc * 256);
-	dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitVrMvdd * 256);
-	dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitPlx * 256);
-
-	dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainEdge));
-	dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainHotspot));
-	dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainLiquid));
-	dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainVrVddc));
-	dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
-	dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainPlx));
-	dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainHbm));
-
-	dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
-	dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
-	dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
-	dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
-	get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
-	dpm_table->Liquid_I2C_LineSCL = uc_scl;
-	dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
-	get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
-	dpm_table->Vr_I2C_LineSCL = uc_scl;
-	dpm_table->Vr_I2C_LineSDA = uc_sda;
-
-	get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
-	dpm_table->Plx_I2C_LineSCL = uc_scl;
-	dpm_table->Plx_I2C_LineSDA = uc_sda;
-
-	return 0;
-}
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-    struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-    const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
-    data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-    data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-    data->power_tune_table.SviLoadLineTrimVddC = 3;
-    data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-    return 0;
-}
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-	uint16_t tdc_limit;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
-	/* TDC number of fraction bits are changed from 8 to 7
-	 * for Fiji as requested by SMC team
-	 */
-	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-	data->power_tune_table.TDC_VDDC_PkgLimit =
-			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-	data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-	data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-	return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-	uint32_t temp;
-
-	if (fiji_read_smc_sram_dword(hwmgr->smumgr,
-			fuse_table_offset +
-			offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
-			(uint32_t *)&temp, data->sram_end))
-		PP_ASSERT_WITH_CODE(false,
-				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-				return -EINVAL);
-	else {
-		data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-		data->power_tune_table.LPMLTemperatureMin =
-				(uint8_t)((temp >> 16) & 0xff);
-		data->power_tune_table.LPMLTemperatureMax =
-				(uint8_t)((temp >> 8) & 0xff);
-		data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-	}
-	return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-	int i;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* Currently not used. Set all to zero. */
-	for (i = 0; i < 16; i++)
-		data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-	return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if( (hwmgr->thermal_controller.advanceFanControlParameters.
-			usFanOutputSensitivity & (1 << 15)) ||
-			0 == hwmgr->thermal_controller.advanceFanControlParameters.
-			usFanOutputSensitivity )
-		hwmgr->thermal_controller.advanceFanControlParameters.
-		usFanOutputSensitivity = hwmgr->thermal_controller.
-			advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-	data->power_tune_table.FuzzyFan_PwmSetDelta =
-			PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
-					advanceFanControlParameters.usFanOutputSensitivity);
-	return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-	int i;
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	/* Currently not used. Set all to zero. */
-	for (i = 0; i < 16; i++)
-		data->power_tune_table.GnbLPML[i] = 0;
-
-	return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- /*   int  i, min, max;
-    struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-    uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
-    uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
-
-    min = max = pHiVID[0];
-    for (i = 0; i < 8; i++) {
-        if (0 != pHiVID[i]) {
-            if (min > pHiVID[i])
-                min = pHiVID[i];
-            if (max < pHiVID[i])
-                max = pHiVID[i];
-        }
-
-        if (0 != pLoVID[i]) {
-            if (min > pLoVID[i])
-                min = pLoVID[i];
-            if (max < pLoVID[i])
-                max = pLoVID[i];
-        }
-    }
-
-    PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
-    data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
-    data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
-*/
-    return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-	uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-	data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
-	data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
-	return 0;
-}
-
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	uint32_t pm_fuse_table_offset;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment)) {
-		if (fiji_read_smc_sram_dword(hwmgr->smumgr,
-				SMU7_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU73_Firmware_Header, PmFuseTable),
-				&pm_fuse_table_offset, data->sram_end))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to get pm_fuse_table_offset Failed!",
-					return -EINVAL);
-
-		/* DW6 */
-		if (fiji_populate_svi_load_line(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate SviLoadLine Failed!",
-					return -EINVAL);
-		/* DW7 */
-		if (fiji_populate_tdc_limit(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate TDCLimit Failed!", return -EINVAL);
-		/* DW8 */
-		if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate TdcWaterfallCtl, "
-					"LPMLTemperature Min and Max Failed!",
-					return -EINVAL);
-
-		/* DW9-DW12 */
-		if (0 != fiji_populate_temperature_scaler(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate LPMLTemperatureScaler Failed!",
-					return -EINVAL);
-
-		/* DW13-DW14 */
-		if(fiji_populate_fuzzy_fan(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate Fuzzy Fan Control parameters Failed!",
-					return -EINVAL);
-
-		/* DW15-DW18 */
-		if (fiji_populate_gnb_lpml(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate GnbLPML Failed!",
-					return -EINVAL);
-
-		/* DW19 */
-		if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate GnbLPML Min and Max Vid Failed!",
-					return -EINVAL);
-
-		/* DW20 */
-		if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-					"Sidd Failed!", return -EINVAL);
-
-		if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
-				(uint8_t *)&data->power_tune_table,
-				sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to download PmFuseTable Failed!",
-					return -EINVAL);
-	}
-	return 0;
-}
-
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	int result = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_CAC)) {
-		int smc_result;
-		smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-				(uint16_t)(PPSMC_MSG_EnableCac));
-		PP_ASSERT_WITH_CODE((0 == smc_result),
-				"Failed to enable CAC in SMC.", result = -1);
-
-        data->cac_enabled = (0 == smc_result) ? true : false;
-	}
-	return result;
-}
-
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	int result = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_CAC) && data->cac_enabled) {
-		int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-				(uint16_t)(PPSMC_MSG_DisableCac));
-		PP_ASSERT_WITH_CODE((smc_result == 0),
-				"Failed to disable CAC in SMC.", result = -1);
-
-		data->cac_enabled = false;
-	}
-	return result;
-}
-
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-	if(data->power_containment_features &
-			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
-		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_PkgPwrSetLimit, n);
-	return 0;
-}
-
-static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
-	return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
-			PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int smc_result;
-	int result = 0;
-
-	data->power_containment_features = 0;
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment)) {
-		if (data->enable_dte_feature) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_EnableDTE));
-			PP_ASSERT_WITH_CODE((0 == smc_result),
-					"Failed to enable DTE in SMC.", result = -1;);
-			if (0 == smc_result)
-				data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
-		}
-
-		if (data->enable_tdc_limit_feature) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_TDCLimitEnable));
-			PP_ASSERT_WITH_CODE((0 == smc_result),
-					"Failed to enable TDCLimit in SMC.", result = -1;);
-			if (0 == smc_result)
-				data->power_containment_features |=
-						POWERCONTAINMENT_FEATURE_TDCLimit;
-		}
-
-		if (data->enable_pkg_pwr_tracking_feature) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
-			PP_ASSERT_WITH_CODE((0 == smc_result),
-					"Failed to enable PkgPwrTracking in SMC.", result = -1;);
-			if (0 == smc_result) {
-				struct phm_cac_tdp_table *cac_table =
-						table_info->cac_dtp_table;
-				uint32_t default_limit =
-					(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
-				data->power_containment_features |=
-						POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
-				if (fiji_set_power_limit(hwmgr, default_limit))
-					printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
-			}
-		}
-	}
-	return result;
-}
-
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	int result = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment) &&
-			data->power_containment_features) {
-		int smc_result;
-
-		if (data->power_containment_features &
-				POWERCONTAINMENT_FEATURE_TDCLimit) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_TDCLimitDisable));
-			PP_ASSERT_WITH_CODE((smc_result == 0),
-					"Failed to disable TDCLimit in SMC.",
-					result = smc_result);
-		}
-
-		if (data->power_containment_features &
-				POWERCONTAINMENT_FEATURE_DTE) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_DisableDTE));
-			PP_ASSERT_WITH_CODE((smc_result == 0),
-					"Failed to disable DTE in SMC.",
-					result = smc_result);
-		}
-
-		if (data->power_containment_features &
-				POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
-			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
-					(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
-			PP_ASSERT_WITH_CODE((smc_result == 0),
-					"Failed to disable PkgPwrTracking in SMC.",
-					result = smc_result);
-		}
-		data->power_containment_features = 0;
-	}
-
-	return result;
-}
-
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-	int adjust_percent, target_tdp;
-	int result = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment)) {
-		/* adjustment percentage has already been validated */
-		adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
-				hwmgr->platform_descriptor.TDPAdjustment :
-				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
-		/* SMC requested that target_tdp to be 7 bit fraction in DPM table
-		 * but message to be 8 bit fraction for messages
-		 */
-		target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
-		result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
-	}
-
-	return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec7724..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_POWERTUNE_H
-#define FIJI_POWERTUNE_H
-
-enum fiji_pt_config_reg_type {
-	FIJI_CONFIGREG_MMR = 0,
-	FIJI_CONFIGREG_SMC_IND,
-	FIJI_CONFIGREG_DIDT_IND,
-	FIJI_CONFIGREG_CACHE,
-	FIJI_CONFIGREG_MAX
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK             0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT           0x6
-#define DIDT_TD_CTRL0__UNUSED_0_MASK             0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT           0x6
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK            0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT          0x6
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xe0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001d
-
-struct fiji_pt_config_reg {
-	uint32_t                           offset;
-	uint32_t                           mask;
-	uint32_t                           shift;
-	uint32_t                           value;
-	enum fiji_pt_config_reg_type       type;
-};
-
-struct fiji_pt_defaults
-{
-    uint8_t   SviLoadLineEn;
-    uint8_t   SviLoadLineVddC;
-    uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-    uint8_t   TDC_MAWt;
-    uint8_t   TdcWaterfallCtl;
-    uint8_t   DTEAmbientTempBase;
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
-
-#endif  /* FIJI_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_THERMAL_H
-#define FIJI_THERMAL_H
-
-#include "hwmgr.h"
-
-#define FIJI_THERMAL_HIGH_ALERT_MASK         0x1
-#define FIJI_THERMAL_LOW_ALERT_MASK          0x2
-
-#define FIJI_THERMAL_MINIMUM_TEMP_READING    -256
-#define FIJI_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define FIJI_THERMAL_MINIMUM_ALERT_TEMP      0
-#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 789f98a..14f8c1f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -24,8 +24,6 @@
 #include "hwmgr.h"
 #include "hardwaremanager.h"
 #include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
 #include "pp_debug.h"
 
 #define PHM_FUNC_CHECK(hw) \
@@ -34,38 +32,6 @@
 			return -EINVAL;				\
 	} while (0)
 
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
-	if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
-		acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
 bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
 {
 	return hwmgr->block_hw_access;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 27e0762..1167205 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -32,13 +32,22 @@
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "ppsmc.h"
-
-#define VOLTAGE_SCALE               4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
 
 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
 
 int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -56,10 +65,12 @@
 	hwmgr->device = pp_init->device;
 	hwmgr->chip_family = pp_init->chip_family;
 	hwmgr->chip_id = pp_init->chip_id;
-	hwmgr->hw_revision = pp_init->rev_id;
 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
 	hwmgr->power_source = PP_PowerSource_AC;
-	hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
+	hwmgr->pp_table_version = PP_TABLE_V1;
+
+	hwmgr_init_default_caps(hwmgr);
+	hwmgr_set_user_specify_caps(hwmgr);
 
 	switch (hwmgr->chip_family) {
 	case AMDGPU_FAMILY_CZ:
@@ -67,26 +78,38 @@
 		break;
 	case AMDGPU_FAMILY_VI:
 		switch (hwmgr->chip_id) {
+		case CHIP_TOPAZ:
+			topaz_set_asic_special_caps(hwmgr);
+			hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+						PP_VBI_TIME_SUPPORT_MASK |
+						PP_ENABLE_GFX_CG_THRU_SMU);
+			hwmgr->pp_table_version = PP_TABLE_V0;
+			break;
 		case CHIP_TONGA:
-			tonga_hwmgr_init(hwmgr);
+			tonga_set_asic_special_caps(hwmgr);
+			hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+						PP_VBI_TIME_SUPPORT_MASK);
 			break;
 		case CHIP_FIJI:
-			fiji_hwmgr_init(hwmgr);
+			fiji_set_asic_special_caps(hwmgr);
+			hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+						PP_VBI_TIME_SUPPORT_MASK |
+						PP_ENABLE_GFX_CG_THRU_SMU);
 			break;
 		case CHIP_POLARIS11:
 		case CHIP_POLARIS10:
-			polaris10_hwmgr_init(hwmgr);
+			polaris_set_asic_special_caps(hwmgr);
+			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
 			break;
 		default:
 			return -EINVAL;
 		}
+		smu7_hwmgr_init(hwmgr);
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	phm_init_dynamic_caps(hwmgr);
-
 	return 0;
 }
 
@@ -105,6 +128,8 @@
 	kfree(hwmgr->set_temperature_range.function_list);
 
 	kfree(hwmgr->ps);
+	kfree(hwmgr->current_ps);
+	kfree(hwmgr->request_ps);
 	kfree(hwmgr);
 	return 0;
 }
@@ -129,10 +154,17 @@
 					  sizeof(struct pp_power_state);
 
 	hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
 	if (hwmgr->ps == NULL)
 		return -ENOMEM;
 
+	hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+	if (hwmgr->request_ps == NULL)
+		return -ENOMEM;
+
+	hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+	if (hwmgr->current_ps == NULL)
+		return -ENOMEM;
+
 	state = hwmgr->ps;
 
 	for (i = 0; i < table_entries; i++) {
@@ -140,7 +172,8 @@
 
 		if (state->classification.flags & PP_StateClassificationFlag_Boot) {
 			hwmgr->boot_ps = state;
-			hwmgr->current_ps = hwmgr->request_ps = state;
+			memcpy(hwmgr->current_ps, state, size);
+			memcpy(hwmgr->request_ps, state, size);
 		}
 
 		state->id = i + 1; /* assigned unique num for every power state id */
@@ -150,6 +183,7 @@
 		state = (struct pp_power_state *)((unsigned long)state + size);
 	}
 
+
 	return 0;
 }
 
@@ -182,30 +216,6 @@
 	return 0;
 }
 
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-				uint32_t index, uint32_t value, uint32_t mask)
-{
-	uint32_t i;
-	uint32_t cur_value;
-
-	if (hwmgr == NULL || hwmgr->device == NULL) {
-		printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < hwmgr->usec_timeout; i++) {
-		cur_value = cgs_read_register(hwmgr->device, index);
-		if ((cur_value & mask) != (value & mask))
-			break;
-		udelay(1);
-	}
-
-	/* timeout means wrong logic*/
-	if (i == hwmgr->usec_timeout)
-		return -1;
-	return 0;
-}
-
 
 /**
  * Returns once the part of the register indicated by the mask has
@@ -227,21 +237,7 @@
 	phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
 }
 
-void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
-					uint32_t indirect_port,
-					uint32_t index,
-					uint32_t value,
-					uint32_t mask)
-{
-	if (hwmgr == NULL || hwmgr->device == NULL) {
-		printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
-		return;
-	}
 
-	cgs_write_register(hwmgr->device, indirect_port, index);
-	phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
-				      value, mask);
-}
 
 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
 {
@@ -403,12 +399,9 @@
 
 	struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
 
-	PP_ASSERT_WITH_CODE(count <= max,
-			"Fatal error, can not set up single DPM table entries to exceed max number!",
-			   );
+	dpm_table->count = count > max ? max : count;
 
-	dpm_table->count = count;
-	for (i = 0; i < max; i++)
+	for (i = 0; i < dpm_table->count; i++)
 		dpm_table->dpm_level[i].enabled = false;
 
 	return 0;
@@ -462,6 +455,27 @@
 	return i - 1;
 }
 
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+		uint32_t voltage)
+{
+	uint8_t count = (uint8_t) (voltage_table->count);
+	uint8_t i = 0;
+
+	PP_ASSERT_WITH_CODE((NULL != voltage_table),
+		"Voltage Table empty.", return 0;);
+	PP_ASSERT_WITH_CODE((0 != count),
+		"Voltage Table empty.", return 0;);
+
+	for (i = 0; i < count; i++) {
+		/* find first voltage bigger than requested */
+		if (voltage_table->entries[i].value >= voltage)
+			return i;
+	}
+
+	/* voltage is bigger than max voltage in the table */
+	return i - 1;
+}
+
 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
 {
 	uint32_t  i;
@@ -549,7 +563,8 @@
 		table_clk_vlt->entries[2].v = 810;
 		table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
 		table_clk_vlt->entries[3].v = 900;
-		pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+		if (pptable_info != NULL)
+			pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
 		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
 	}
 
@@ -615,3 +630,186 @@
 	printk(KERN_ERR "DAL requested level can not"
 			" found a available voltage in VDDC DPM Table \n");
 }
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+	if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+		acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_DynamicPatchPowerState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DynamicPowerManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SMC);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_DynamicUVDState);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+	return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+	if (amdgpu_sclk_deep_sleep_en)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep);
+	else
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep);
+
+	if (amdgpu_powercontainment)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_PowerContainment);
+	else
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			    PHM_PlatformCaps_PowerContainment);
+
+	hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+	return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+	uint32_t vol;
+	int ret = 0;
+
+	if (hwmgr->chip_id < CHIP_POLARIS10) {
+		atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+		if (*voltage >= 2000 || *voltage == 0)
+			*voltage = 1150;
+	} else {
+		ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+		*voltage = (uint16_t)vol/100;
+	}
+	return ret;
+}
+
+int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+	/* power tune caps Assume disabled */
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_SQRamping);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_DBRamping);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_TDRamping);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_TCPRamping);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+							PHM_PlatformCaps_CAC);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_RegulatorHot);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_AutomaticDCTransition);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_TablelessHardwareInterface);
+
+	if (hwmgr->chip_id == CHIP_POLARIS11)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SPLLShutdownSupport);
+	return 0;
+}
+
+int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TCPRamping);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TablelessHardwareInterface);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC);
+	return 0;
+}
+
+int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TCPRamping);
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			 PHM_PlatformCaps_TablelessHardwareInterface);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC);
+
+	return 0;
+}
+
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TCPRamping);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			 PHM_PlatformCaps_TablelessHardwareInterface);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		    PHM_PlatformCaps_EVV);
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644
index f78ffd9..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_DYN_DEFAULTS_H
-#define POLARIS10_DYN_DEFAULTS_H
-
-
-enum Polaris10dpm_TrendDetection {
-	Polaris10Adpm_TrendDetection_AUTO,
-	Polaris10Adpm_TrendDetection_UP,
-	Polaris10Adpm_TrendDetection_DOWN
-};
-typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
-
-/*  We need to fill in the default values */
-
-
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1              0x000400
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000
-
-
-#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT            0x200
-#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT        0
-#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT            0x00C8
-#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200
-#define PPPOLARIS10_REFERENCEDIVIDER_DFLT                  4
-
-#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPPOLARIS10_CGULVPARAMETER_DFLT                    0x00040035
-#define PPPOLARIS10_CGULVCONTROL_DFLT                      0x00007450
-#define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
-#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT                10
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644
index 769636a..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ /dev/null
@@ -1,5290 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <asm/div64.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_dyn_defaults.h"
-#include "polaris10_smumgr.h"
-#include "pp_debug.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "tonga_pptable.h"
-#include "pppcielanes.h"
-#include "amd_pcie_helpers.h"
-#include "hardwaremanager.h"
-#include "tonga_processpptables.h"
-#include "cgs_common.h"
-#include "smu74.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "polaris10_thermal.h"
-#include "polaris10_clockpowergating.h"
-
-#define MC_CG_ARB_FREQ_F0           0x0a
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0       0x05
-#define MC_CG_SEQ_DRAMCONF_S1       0x06
-#define MC_CG_SEQ_YCLK_SUSPEND      0x04
-#define MC_CG_SEQ_YCLK_RESUME       0x0a
-
-
-#define SMC_RAM_END 0x40000
-
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            200
-
-#define MEM_FREQ_LOW_LATENCY        25000
-#define MEM_FREQ_HIGH_LATENCY       80000
-
-#define MEM_LATENCY_HIGH            45
-#define MEM_LATENCY_LOW             35
-#define MEM_LATENCY_ERR             0xFFFF
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-
-#define PCIE_BUS_CLK                10000
-#define TCLK                        (PCIE_BUS_CLK / 10)
-
-
-static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/*  [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/*  [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-	DPM_EVENT_SRC_ANALOG = 0,
-	DPM_EVENT_SRC_EXTERNAL = 1,
-	DPM_EVENT_SRC_DIGITAL = 2,
-	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
-	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
-};
-
-static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct polaris10_power_state *cast_phw_polaris10_power_state(
-				  struct pp_hw_power_state *hw_ps)
-{
-	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL);
-
-	return (struct polaris10_power_state *)hw_ps;
-}
-
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
-				 const struct pp_hw_power_state *hw_ps)
-{
-	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL);
-
-	return (const struct polaris10_power_state *)hw_ps;
-}
-
-static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-			? true : false;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
-	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
-	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
-	return 0;
-}
-
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-	uint32_t speedCntl = 0;
-
-	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-			ixPCIE_LC_SPEED_CNTL);
-	return((uint16_t)PHM_GET_FIELD(speedCntl,
-			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
-	uint32_t link_width;
-
-	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
-	PP_ASSERT_WITH_CODE((7 >= link_width),
-			"Invalid PCIe lane width!", return 0);
-
-	return decode_pcie_lane_width(link_width);
-}
-
-/**
-* Enable voltage control
-*
-* @param    pHwMgr  the address of the powerplay hardware manager.
-* @return   always PP_Result_OK
-*/
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
-{
-	PP_ASSERT_WITH_CODE(
-		(hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
-		"Failed to enable voltage DPM during DPM Start Function!",
-		return 1;
-	);
-
-	return 0;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-*/
-static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-	const struct polaris10_hwmgr *data =
-			(const struct polaris10_hwmgr *)(hwmgr->backend);
-
-	return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-	/* enable voltage control */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-	return 0;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	int result;
-
-	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
-				&(data->mvdd_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve MVDD table.",
-				return result);
-	} else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-		result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
-				table_info->vdd_dep_on_mclk);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 MVDD table from dependancy table.",
-				return result;);
-	}
-
-	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
-				&(data->vddci_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve VDDCI table.",
-				return result);
-	} else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-		result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
-				table_info->vdd_dep_on_mclk);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
-				return result);
-	}
-
-	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
-				table_info->vddc_lookup_table);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to retrieve SVI2 VDDC table from lookup table.",
-				return result);
-	}
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
-			"Too many voltage values for VDDC. Trimming to fit state table.",
-			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
-								&(data->vddc_voltage_table)));
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
-			"Too many voltage values for VDDCI. Trimming to fit state table.",
-			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
-					&(data->vddci_voltage_table)));
-
-	PP_ASSERT_WITH_CODE(
-			(data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
-			"Too many voltage values for MVDD. Trimming to fit state table.",
-			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
-							   &(data->mvdd_voltage_table)));
-
-	return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_program_static_screen_threshold_parameters(
-							struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* Set static screen threshold unit */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-			data->static_screen_threshold_unit);
-	/* Set static screen threshold */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-			data->static_screen_threshold);
-
-	return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-	uint32_t display_gap =
-			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-					ixCG_DISPLAY_GAP_CNTL);
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-			DISP_GAP, DISPLAY_GAP_IGNORE);
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
-			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-	return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* Clear reset for voting clients before enabling DPM */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-	return 0;
-}
-
-static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	/* Reset voting clients before disabling DPM */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_0, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_1, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_2, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_3, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_4, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_5, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_6, 0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_FREQ_TRAN_VOTING_7, 0);
-
-	return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-	uint32_t tmp;
-	int result;
-	bool error = false;
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, DpmTable),
-			&tmp, data->sram_end);
-
-	if (0 == result)
-		data->dpm_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, SoftRegisters),
-			&tmp, data->sram_end);
-
-	if (!result) {
-		data->soft_regs_start = tmp;
-		smu_data->soft_regs_start = tmp;
-	}
-
-	error |= (0 != result);
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, mcRegisterTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->mc_reg_table_start = tmp;
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, FanTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->fan_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
-			&tmp, data->sram_end);
-
-	if (!result)
-		data->arb_table_start = tmp;
-
-	error |= (0 != result);
-
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION +
-			offsetof(SMU74_Firmware_Header, Version),
-			&tmp, data->sram_end);
-
-	if (!result)
-		hwmgr->microcode_version_info.SMC = tmp;
-
-	error |= (0 != result);
-
-	return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-		uint32_t arb_src, uint32_t arb_dest)
-{
-	uint32_t mc_arb_dram_timing;
-	uint32_t mc_arb_dram_timing2;
-	uint32_t burst_time;
-	uint32_t mc_cg_config;
-
-	switch (arb_src) {
-	case MC_CG_ARB_FREQ_F0:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-		break;
-	case MC_CG_ARB_FREQ_F1:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (arb_dest) {
-	case MC_CG_ARB_FREQ_F0:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-		break;
-	case MC_CG_ARB_FREQ_F1:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-	mc_cg_config |= 0x0000000F;
-	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
-	return 0;
-}
-
-static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
-	return polaris10_copy_and_switch_arb_sets(hwmgr,
-			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
-	uint32_t tmp;
-
-	tmp = (cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
-			0x0000ff00) >> 8;
-
-	if (tmp == MC_CG_ARB_FREQ_F0)
-		return 0;
-
-	return polaris10_copy_and_switch_arb_sets(hwmgr,
-			tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-	uint32_t i, max_entry;
-
-	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
-			data->use_pcie_power_saving_levels), "No pcie performance levels!",
-			return -EINVAL);
-
-	if (data->use_pcie_performance_levels &&
-			!data->use_pcie_power_saving_levels) {
-		data->pcie_gen_power_saving = data->pcie_gen_performance;
-		data->pcie_lane_power_saving = data->pcie_lane_performance;
-	} else if (!data->use_pcie_performance_levels &&
-			data->use_pcie_power_saving_levels) {
-		data->pcie_gen_performance = data->pcie_gen_power_saving;
-		data->pcie_lane_performance = data->pcie_lane_power_saving;
-	}
-
-	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
-					SMU74_MAX_LEVELS_LINK,
-					MAX_REGULAR_DPM_NUMBER);
-
-	if (pcie_table != NULL) {
-		/* max_entry is used to make sure we reserve one PCIE level
-		 * for boot level (fix for A+A PSPP issue).
-		 * If PCIE table from PPTable have ULV entry + 8 entries,
-		 * then ignore the last entry.*/
-		max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
-				SMU74_MAX_LEVELS_LINK : pcie_table->count;
-		for (i = 1; i < max_entry; i++) {
-			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
-					get_pcie_gen_support(data->pcie_gen_cap,
-							pcie_table->entries[i].gen_speed),
-					get_pcie_lane_support(data->pcie_lane_cap,
-							pcie_table->entries[i].lane_width));
-		}
-		data->dpm_table.pcie_speed_table.count = max_entry - 1;
-
-		/* Setup BIF_SCLK levels */
-		for (i = 0; i < max_entry; i++)
-			data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
-	} else {
-		/* Hardcode Pcie Table */
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Min_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Min_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-				get_pcie_gen_support(data->pcie_gen_cap,
-						PP_Max_PCIEGen),
-				get_pcie_lane_support(data->pcie_lane_cap,
-						PP_Max_PCIELane));
-
-		data->dpm_table.pcie_speed_table.count = 6;
-	}
-	/* Populate last level for boot PCIE level, but do not increment count. */
-	phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-			data->dpm_table.pcie_speed_table.count,
-			get_pcie_gen_support(data->pcie_gen_cap,
-					PP_Min_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap,
-					PP_Max_PCIELane));
-
-	return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i;
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
-			table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-			table_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
-			"SCLK dependency table is missing. This table is mandatory",
-			return -EINVAL);
-	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
-			"SCLK dependency table has to have is missing."
-			"This table is mandatory",
-			return -EINVAL);
-
-	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
-			"MCLK dependency table is missing. This table is mandatory",
-			return -EINVAL);
-	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
-			"MCLK dependency table has to have is missing."
-			"This table is mandatory",
-			return -EINVAL);
-
-	/* clear the state table to reset everything to default */
-	phm_reset_single_dpm_table(
-			&data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
-	phm_reset_single_dpm_table(
-			&data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
-
-
-	/* Initialize Sclk DPM table based on allow Sclk values */
-	data->dpm_table.sclk_table.count = 0;
-	for (i = 0; i < dep_sclk_table->count; i++) {
-		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
-						dep_sclk_table->entries[i].clk) {
-
-			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
-					dep_sclk_table->entries[i].clk;
-
-			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
-					(i == 0) ? true : false;
-			data->dpm_table.sclk_table.count++;
-		}
-	}
-
-	/* Initialize Mclk DPM table based on allow Mclk values */
-	data->dpm_table.mclk_table.count = 0;
-	for (i = 0; i < dep_mclk_table->count; i++) {
-		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
-				[data->dpm_table.mclk_table.count - 1].value !=
-						dep_mclk_table->entries[i].clk) {
-			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
-							dep_mclk_table->entries[i].clk;
-			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
-							(i == 0) ? true : false;
-			data->dpm_table.mclk_table.count++;
-		}
-	}
-
-	/* setup PCIE gen speed levels */
-	polaris10_setup_default_pcie_table(hwmgr);
-
-	/* save a copy of the default DPM table */
-	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
-			sizeof(struct polaris10_dpm_table));
-
-	return 0;
-}
-
-uint8_t convert_to_vid(uint16_t vddc)
-{
-	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-			SMU74_Discrete_DpmTable *table)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t count, level;
-
-	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		count = data->mvdd_voltage_table.count;
-		if (count > SMU_MAX_SMIO_LEVELS)
-			count = SMU_MAX_SMIO_LEVELS;
-		for (level = 0; level < count; level++) {
-			table->SmioTable2.Pattern[level].Voltage =
-				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-			table->SmioTable2.Pattern[level].Smio =
-				(uint8_t) level;
-			table->Smio[level] |=
-				data->mvdd_voltage_table.entries[level].smio_low;
-		}
-		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-		table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
-	}
-
-	return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
-					struct SMU74_Discrete_DpmTable *table)
-{
-	uint32_t count, level;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	count = data->vddci_voltage_table.count;
-
-	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-		if (count > SMU_MAX_SMIO_LEVELS)
-			count = SMU_MAX_SMIO_LEVELS;
-		for (level = 0; level < count; ++level) {
-			table->SmioTable1.Pattern[level].Voltage =
-				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
-			table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
-			table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
-		}
-	}
-
-	table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
-	return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	uint32_t count;
-	uint8_t index;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-			table_info->vddc_lookup_table;
-	/* tables is already swapped, so in order to use the value from it,
-	 * we need to swap it back.
-	 * We are populating vddc CAC data to BapmVddc table
-	 * in split and merged mode
-	 */
-	for (count = 0; count < lookup_table->count; count++) {
-		index = phm_get_voltage_index(lookup_table,
-				data->vddc_voltage_table.entries[count].value);
-		table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
-		table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
-		table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
-	}
-
-	return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	polaris10_populate_smc_vddci_table(hwmgr, table);
-	polaris10_populate_smc_mvdd_table(hwmgr, table);
-	polaris10_populate_cac_table(hwmgr, table);
-
-	return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_Ulv *state)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	state->CcPwrDynRm = 0;
-	state->CcPwrDynRm1 = 0;
-
-	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
-			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
-	state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
-	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-	return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-	int i;
-
-	/* Index (dpm_table->pcie_speed_table.count)
-	 * is reserved for PCIE boot level. */
-	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-		table->LinkLevel[i].PcieGenSpeed  =
-				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-				dpm_table->pcie_speed_table.dpm_levels[i].param1);
-		table->LinkLevel[i].EnabledForActivity = 1;
-		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-	}
-
-	data->smc_state_table.LinkLevelCount =
-			(uint8_t)dpm_table->pcie_speed_table.count;
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-	return 0;
-}
-
-static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
-{
-	uint32_t reference_clock, tmp;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
-
-	info.mode_info = &mode_info;
-
-	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
-	if (tmp)
-		return TCLK;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	reference_clock = mode_info.ref_clock;
-
-	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
-	if (0 != tmp)
-		return reference_clock / 4;
-
-	return reference_clock;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-		uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
-	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
-	struct pp_atomctrl_clock_dividers_ai dividers;
-
-	uint32_t ref_clock;
-	uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
-	uint8_t i;
-	int result;
-	uint64_t temp;
-
-	sclk_setting->SclkFrequency = clock;
-	/* get the engine clock dividers for this clock value */
-	result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
-	if (result == 0) {
-		sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
-		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
-		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
-		sclk_setting->PllRange = dividers.ucSclkPllRange;
-		sclk_setting->Sclk_slew_rate = 0x400;
-		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
-		sclk_setting->Pcc_down_slew_rate = 0xffff;
-		sclk_setting->SSc_En = dividers.ucSscEnable;
-		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
-		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
-		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
-		return result;
-	}
-
-	ref_clock = polaris10_get_xclk(hwmgr);
-
-	for (i = 0; i < NUM_SCLK_RANGE; i++) {
-		if (clock > data->range_table[i].trans_lower_frequency
-		&& clock <= data->range_table[i].trans_upper_frequency) {
-			sclk_setting->PllRange = i;
-			break;
-		}
-	}
-
-	sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-	temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-	temp <<= 0x10;
-	do_div(temp, ref_clock);
-	sclk_setting->Fcw_frac = temp & 0xffff;
-
-	pcc_target_percent = 10; /*  Hardcode 10% for now. */
-	pcc_target_freq = clock - (clock * pcc_target_percent / 100);
-	sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
-	ss_target_percent = 2; /*  Hardcode 2% for now. */
-	sclk_setting->SSc_En = 0;
-	if (ss_target_percent) {
-		sclk_setting->SSc_En = 1;
-		ss_target_freq = clock - (clock * ss_target_percent / 100);
-		sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-		temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-		temp <<= 0x10;
-		do_div(temp, ref_clock);
-		sclk_setting->Fcw1_frac = temp & 0xffff;
-	}
-
-	return 0;
-}
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
-		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-	uint32_t i;
-	uint16_t vddci;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	*voltage = *mvdd = 0;
-
-	/* clock - voltage dependency table is empty table */
-	if (dep_table->count == 0)
-		return -EINVAL;
-
-	for (i = 0; i < dep_table->count; i++) {
-		/* find first sclk bigger than request */
-		if (dep_table->entries[i].clk >= clock) {
-			*voltage |= (dep_table->entries[i].vddc *
-					VOLTAGE_SCALE) << VDDC_SHIFT;
-			if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
-				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
-						VOLTAGE_SCALE) << VDDCI_SHIFT;
-			else if (dep_table->entries[i].vddci)
-				*voltage |= (dep_table->entries[i].vddci *
-						VOLTAGE_SCALE) << VDDCI_SHIFT;
-			else {
-				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-						(dep_table->entries[i].vddc -
-								(uint16_t)data->vddc_vddci_delta));
-				*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-			}
-
-			if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
-					VOLTAGE_SCALE;
-			else if (dep_table->entries[i].mvdd)
-				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
-					VOLTAGE_SCALE;
-
-			*voltage |= 1 << PHASES_SHIFT;
-			return 0;
-		}
-	}
-
-	/* sclk is bigger than max sclk in the dependence table */
-	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-	if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
-		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
-				VOLTAGE_SCALE) << VDDCI_SHIFT;
-	else if (dep_table->entries[i-1].vddci) {
-		vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-				(dep_table->entries[i].vddc -
-						(uint16_t)data->vddc_vddci_delta));
-		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-	}
-
-	if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-	else if (dep_table->entries[i].mvdd)
-		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-	return 0;
-}
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
-{ {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
-  {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
-  {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
-  {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
-{
-	uint32_t i, ref_clk;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
-	struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
-	ref_clk = polaris10_get_xclk(hwmgr);
-
-	if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
-		for (i = 0; i < NUM_SCLK_RANGE; i++) {
-			table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
-			table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
-			table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
-			table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
-			table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
-			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-		}
-		return;
-	}
-
-	for (i = 0; i < NUM_SCLK_RANGE; i++) {
-
-		data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
-		data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
-		table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
-		table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
-		table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
-		table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
-		table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
-		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-	}
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-		uint32_t clock, uint16_t sclk_al_threshold,
-		struct SMU74_Discrete_GraphicsLevel *level)
-{
-	int result, i, temp;
-	/* PP_Clocks minClocks; */
-	uint32_t mvdd;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	SMU_SclkSetting curr_sclk_setting = { 0 };
-
-	result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
-	/* populate graphics levels */
-	result = polaris10_get_dependency_volt_by_clk(hwmgr,
-			table_info->vdd_dep_on_sclk, clock,
-			&level->MinVoltage, &mvdd);
-
-	PP_ASSERT_WITH_CODE((0 == result),
-			"can not find VDDC voltage value for "
-			"VDDC engine clock dependency table",
-			return result);
-	level->ActivityLevel = sclk_al_threshold;
-
-	level->CcPwrDynRm = 0;
-	level->CcPwrDynRm1 = 0;
-	level->EnabledForActivity = 0;
-	level->EnabledForThrottle = 1;
-	level->UpHyst = 10;
-	level->DownHyst = 0;
-	level->VoltageDownHyst = 0;
-	level->PowerThrottle = 0;
-
-	/*
-	* TODO: get minimum clocks from dal configaration
-	* PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
-	*/
-	/* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
-
-	/* get level->DeepSleepDivId
-	if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-		level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
-	*/
-	PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
-	for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-		temp = clock >> i;
-
-		if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
-			break;
-	}
-
-	level->DeepSleepDivId = i;
-
-	/* Default to slow, highest DPM level will be
-	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-	 */
-	if (data->update_up_hyst)
-		level->UpHyst = (uint8_t)data->up_hyst;
-	if (data->update_down_hyst)
-		level->DownHyst = (uint8_t)data->down_hyst;
-
-	level->SclkSetting = curr_sclk_setting;
-
-	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
-	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
-	return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-	uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
-	int result = 0;
-	uint32_t array = data->dpm_table_start +
-			offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
-	uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
-			SMU74_MAX_LEVELS_GRAPHICS;
-	struct SMU74_Discrete_GraphicsLevel *levels =
-			data->smc_state_table.GraphicsLevel;
-	uint32_t i, max_entry;
-	uint8_t hightest_pcie_level_enabled = 0,
-		lowest_pcie_level_enabled = 0,
-		mid_pcie_level_enabled = 0,
-		count = 0;
-
-	polaris10_get_sclk_range_table(hwmgr);
-
-	for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
-		result = polaris10_populate_single_graphic_level(hwmgr,
-				dpm_table->sclk_table.dpm_levels[i].value,
-				(uint16_t)data->activity_target[i],
-				&(data->smc_state_table.GraphicsLevel[i]));
-		if (result)
-			return result;
-
-		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-		if (i > 1)
-			levels[i].DeepSleepDivId = 0;
-	}
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_SPLLShutdownSupport))
-		data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
-	data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-	data->smc_state_table.GraphicsDpmLevelCount =
-			(uint8_t)dpm_table->sclk_table.count;
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
-	if (pcie_table != NULL) {
-		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-				"There must be 1 or more PCIE levels defined in PPTable.",
-				return -EINVAL);
-		max_entry = pcie_entry_cnt - 1;
-		for (i = 0; i < dpm_table->sclk_table.count; i++)
-			levels[i].pcieDpmLevel =
-					(uint8_t) ((i < max_entry) ? i : max_entry);
-	} else {
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << (hightest_pcie_level_enabled + 1))) != 0))
-			hightest_pcie_level_enabled++;
-
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << lowest_pcie_level_enabled)) == 0))
-			lowest_pcie_level_enabled++;
-
-		while ((count < hightest_pcie_level_enabled) &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
-			count++;
-
-		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
-				hightest_pcie_level_enabled ?
-						(lowest_pcie_level_enabled + 1 + count) :
-						hightest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to hightest_pcie_level_enabled */
-		for (i = 2; i < dpm_table->sclk_table.count; i++)
-			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to lowest_pcie_level_enabled */
-		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to mid_pcie_level_enabled */
-		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-	}
-	/* level count will send to smc once at init smc table and never change */
-	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-			(uint32_t)array_size, data->sram_end);
-
-	return result;
-}
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-		uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int result = 0;
-	struct cgs_display_info info = {0, 0, NULL};
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (table_info->vdd_dep_on_mclk) {
-		result = polaris10_get_dependency_volt_by_clk(hwmgr,
-				table_info->vdd_dep_on_mclk, clock,
-				&mem_level->MinVoltage, &mem_level->MinMvdd);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find MinVddc voltage value from memory "
-				"VDDC voltage dependency table", return result);
-	}
-
-	mem_level->MclkFrequency = clock;
-	mem_level->EnabledForThrottle = 1;
-	mem_level->EnabledForActivity = 0;
-	mem_level->UpHyst = 0;
-	mem_level->DownHyst = 100;
-	mem_level->VoltageDownHyst = 0;
-	mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-	mem_level->StutterEnable = false;
-	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-	data->display_timing.num_existing_displays = info.display_count;
-
-	if ((data->mclk_stutter_mode_threshold) &&
-		(clock <= data->mclk_stutter_mode_threshold) &&
-		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-				STUTTER_ENABLE) & 0x1))
-		mem_level->StutterEnable = true;
-
-	if (!result) {
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-	}
-	return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-	int result;
-	/* populate MCLK dpm table to SMU7 */
-	uint32_t array = data->dpm_table_start +
-			offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
-	uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
-			SMU74_MAX_LEVELS_MEMORY;
-	struct SMU74_Discrete_MemoryLevel *levels =
-			data->smc_state_table.MemoryLevel;
-	uint32_t i;
-
-	for (i = 0; i < dpm_table->mclk_table.count; i++) {
-		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-				"can not populate memory level as memory clock is zero",
-				return -EINVAL);
-		result = polaris10_populate_single_memory_level(hwmgr,
-				dpm_table->mclk_table.dpm_levels[i].value,
-				&levels[i]);
-		if (i == dpm_table->mclk_table.count - 1) {
-			levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-			levels[i].EnabledForActivity = 1;
-		}
-		if (result)
-			return result;
-	}
-
-	/* In order to prevent MC activity from stutter mode to push DPM up,
-	 * the UVD change complements this by putting the MCLK in
-	 * a higher state by default such that we are not affected by
-	 * up threshold or and MCLK DPM latency.
-	 */
-	levels[0].ActivityLevel = 0x1f;
-	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-	data->smc_state_table.MemoryDpmLevelCount =
-			(uint8_t)dpm_table->mclk_table.count;
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
-	/* level count will send to smc once at init smc table and never change */
-	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
-			(uint32_t)array_size, data->sram_end);
-
-	return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-		uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i = 0;
-
-	if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-		/* find mvdd value which clock is more than request */
-		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-				break;
-			}
-		}
-		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-				"MVDD Voltage is outside the supported range.",
-				return -EINVAL);
-	} else
-		return -EINVAL;
-
-	return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-		SMU74_Discrete_DpmTable *table)
-{
-	int result = 0;
-	uint32_t sclk_frequency;
-	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	SMIO_Pattern vol_level;
-	uint32_t mvdd;
-	uint16_t us_mvdd;
-
-	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-
-	/* Get MinVoltage and Frequency from DPM0,
-	 * already converted to SMC_UL */
-	sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
-	result = polaris10_get_dependency_volt_by_clk(hwmgr,
-			table_info->vdd_dep_on_sclk,
-			sclk_frequency,
-			&table->ACPILevel.MinVoltage, &mvdd);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Cannot find ACPI VDDC voltage value "
-			"in Clock Dependency Table",
-			);
-
-
-	result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
-	PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-	table->ACPILevel.DeepSleepDivId = 0;
-	table->ACPILevel.CcPwrDynRm = 0;
-	table->ACPILevel.CcPwrDynRm1 = 0;
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
-	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
-	/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-	table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
-	result = polaris10_get_dependency_volt_by_clk(hwmgr,
-			table_info->vdd_dep_on_mclk,
-			table->MemoryACPILevel.MclkFrequency,
-			&table->MemoryACPILevel.MinVoltage, &mvdd);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Cannot find ACPI VDDCI voltage value "
-			"in Clock Dependency Table",
-			);
-
-	us_mvdd = 0;
-	if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-			(data->mclk_dpm_key_disabled))
-		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-	else {
-		if (!polaris10_populate_mvdd_value(hwmgr,
-				data->dpm_table.mclk_table.dpm_levels[0].value,
-				&vol_level))
-			us_mvdd = vol_level.Voltage;
-	}
-
-	if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
-		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
-	else
-		table->MemoryACPILevel.MinMvdd = 0;
-
-	table->MemoryACPILevel.StutterEnable = false;
-
-	table->MemoryACPILevel.EnabledForThrottle = 0;
-	table->MemoryACPILevel.EnabledForActivity = 0;
-	table->MemoryACPILevel.UpHyst = 0;
-	table->MemoryACPILevel.DownHyst = 100;
-	table->MemoryACPILevel.VoltageDownHyst = 0;
-	table->MemoryACPILevel.ActivityLevel =
-			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-	return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-		SMU74_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t vddci;
-
-	table->VceLevelCount = (uint8_t)(mm_table->count);
-	table->VceBootLevel = 0;
-
-	for (count = 0; count < table->VceLevelCount; count++) {
-		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-		table->VceLevel[count].MinVoltage = 0;
-		table->VceLevel[count].MinVoltage |=
-				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-		if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-		else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-		else
-			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
-		table->VceLevel[count].MinVoltage |=
-				(vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/*retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->VceLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for VCE engine clock",
-				return result);
-
-		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-	}
-	return result;
-}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-		SMU74_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t vddci;
-
-	table->SamuBootLevel = 0;
-	table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-	for (count = 0; count < table->SamuLevelCount; count++) {
-		/* not sure whether we need evclk or not */
-		table->SamuLevel[count].MinVoltage = 0;
-		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-				VOLTAGE_SCALE) << VDDC_SHIFT;
-
-		if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-		else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-		else
-			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-		table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->SamuLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for samu clock", return result);
-
-		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-	}
-	return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-		int32_t eng_clock, int32_t mem_clock,
-		SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-	uint32_t dram_timing;
-	uint32_t dram_timing2;
-	uint32_t burst_time;
-	int result;
-
-	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-			eng_clock, mem_clock);
-	PP_ASSERT_WITH_CODE(result == 0,
-			"Error calling VBIOS to set DRAM_TIMING.", return result);
-
-	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-	burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
-	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-	arb_regs->McArbBurstTime   = (uint8_t)burst_time;
-
-	return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
-	uint32_t i, j;
-	int result = 0;
-
-	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-			result = polaris10_populate_memory_timing_parameters(hwmgr,
-					data->dpm_table.sclk_table.dpm_levels[i].value,
-					data->dpm_table.mclk_table.dpm_levels[j].value,
-					&arb_regs.entries[i][j]);
-			if (result == 0)
-				result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
-			if (result != 0)
-				return result;
-		}
-	}
-
-	result = polaris10_copy_bytes_to_smc(
-			hwmgr->smumgr,
-			data->arb_table_start,
-			(uint8_t *)&arb_regs,
-			sizeof(SMU74_Discrete_MCArbDramTimingTable),
-			data->sram_end);
-	return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	int result = -EINVAL;
-	uint8_t count;
-	struct pp_atomctrl_clock_dividers_vi dividers;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t vddci;
-
-	table->UvdLevelCount = (uint8_t)(mm_table->count);
-	table->UvdBootLevel = 0;
-
-	for (count = 0; count < table->UvdLevelCount; count++) {
-		table->UvdLevel[count].MinVoltage = 0;
-		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-				VOLTAGE_SCALE) << VDDC_SHIFT;
-
-		if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-		else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-		else
-			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-		table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->UvdLevel[count].VclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for Vclk clock", return result);
-
-		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-				table->UvdLevel[count].DclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for Dclk clock", return result);
-
-		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-	}
-
-	return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	int result = 0;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	table->GraphicsBootLevel = 0;
-	table->MemoryBootLevel = 0;
-
-	/* find boot level from dpm table */
-	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-			data->vbios_boot_state.sclk_bootup_value,
-			(uint32_t *)&(table->GraphicsBootLevel));
-
-	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-			data->vbios_boot_state.mclk_bootup_value,
-			(uint32_t *)&(table->MemoryBootLevel));
-
-	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-			VOLTAGE_SCALE;
-	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-			VOLTAGE_SCALE;
-	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-			VOLTAGE_SCALE;
-
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-	return 0;
-}
-
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint8_t count, level;
-
-	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
-	for (level = 0; level < count; level++) {
-		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
-				data->vbios_boot_state.sclk_bootup_value) {
-			data->smc_state_table.GraphicsBootLevel = level;
-			break;
-		}
-	}
-
-	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-	for (level = 0; level < count; level++) {
-		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
-				data->vbios_boot_state.mclk_bootup_value) {
-			data->smc_state_table.MemoryBootLevel = level;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-	uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint8_t i, stretch_amount, volt_offset = 0;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-
-	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-	/* Read SMU_Eefuse to read and calculate RO and determine
-	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-	 */
-	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixSMU_EFUSE_0 + (67 * 4));
-	efuse &= 0xFF000000;
-	efuse = efuse >> 24;
-
-	if (hwmgr->chip_id == CHIP_POLARIS10) {
-		min = 1000;
-		max = 2300;
-	} else {
-		min = 1100;
-		max = 2100;
-	}
-
-	ro = efuse * (max -min)/255 + min;
-
-	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-	for (i = 0; i < sclk_table->count; i++) {
-		data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-				sclk_table->entries[i].cks_enable << i;
-		if (hwmgr->chip_id == CHIP_POLARIS10) {
-			volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
-						(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
-			volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
-					(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
-		} else {
-			volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
-						(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
-			volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
-					(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
-		}
-
-		if (volt_without_cks >= volt_with_cks)
-			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-					sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
-		data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-	}
-
-	data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
-	/* Populate CKS Lookup Table */
-	if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
-			stretch_amount != 4 && stretch_amount != 5) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ClockStretcher);
-		PP_ASSERT_WITH_CODE(false,
-				"Stretch Amount in PPTable not supported\n",
-				return -EINVAL);
-	}
-
-	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-	value &= 0xFFFFFFFE;
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-	return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
-		struct SMU74_Discrete_DpmTable *table)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint16_t config;
-
-	config = VR_MERGED_WITH_VDDC;
-	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-	/* Set Vddc Voltage Controller */
-	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		config = VR_SVI2_PLANE_1;
-		table->VRConfig |= config;
-	} else {
-		PP_ASSERT_WITH_CODE(false,
-				"VDDC should be on SVI2 control in merged mode!",
-				);
-	}
-	/* Set Vddci Voltage Controller */
-	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-		config = VR_SVI2_PLANE_2;  /* only in merged mode */
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	} else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-		config = VR_SMIO_PATTERN_1;
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	} else {
-		config = VR_STATIC_VOLTAGE;
-		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-	}
-	/* Set Mvdd Voltage Controller */
-	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-		config = VR_SVI2_PLANE_2;
-		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
-			offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
-	} else {
-		config = VR_STATIC_VOLTAGE;
-		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-	}
-
-	return 0;
-}
-
-
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
-	int result = 0;
-	struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
-	AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
-	AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
-	uint32_t tmp, i;
-	struct pp_smumgr *smumgr = hwmgr->smumgr;
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-
-
-	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-		return result;
-
-	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
-	if (0 == result) {
-		table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
-		table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
-		table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
-		table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
-		table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
-		table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
-		table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
-		table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
-		table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
-		table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
-		table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
-		table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
-		table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
-		table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
-		table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
-		table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
-		table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
-		AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
-		AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
-		AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
-		AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
-		AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
-		AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
-		AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
-		for (i = 0; i < NUM_VFT_COLUMNS; i++) {
-			AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
-			AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
-		}
-
-		result = polaris10_read_smc_sram_dword(smumgr,
-				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
-				&tmp, data->sram_end);
-
-		polaris10_copy_bytes_to_smc(smumgr,
-					tmp,
-					(uint8_t *)&AVFS_meanNsigma,
-					sizeof(AVFS_meanNsigma_t),
-					data->sram_end);
-
-		result = polaris10_read_smc_sram_dword(smumgr,
-				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
-				&tmp, data->sram_end);
-		polaris10_copy_bytes_to_smc(smumgr,
-					tmp,
-					(uint8_t *)&AVFS_SclkOffset,
-					sizeof(AVFS_Sclk_Offset_t),
-					data->sram_end);
-
-		data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
-						(avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
-						(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
-						(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
-		data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
-	}
-	return result;
-}
-
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
-	const struct polaris10_ulv_parm *ulv = &(data->ulv);
-	uint8_t i;
-	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-	pp_atomctrl_clock_dividers_vi dividers;
-
-	result = polaris10_setup_default_dpm_tables(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to setup default DPM tables!", return result);
-
-	if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
-		polaris10_populate_smc_voltage_tables(hwmgr, table);
-
-	table->SystemFlags = 0;
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition))
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StepVddc))
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-	if (data->is_memory_gddr5)
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-	if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
-		result = polaris10_populate_ulv_state(hwmgr, table);
-		PP_ASSERT_WITH_CODE(0 == result,
-				"Failed to initialize ULV state!", return result);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
-	}
-
-	result = polaris10_populate_smc_link_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Link Level!", return result);
-
-	result = polaris10_populate_all_graphic_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Graphics Level!", return result);
-
-	result = polaris10_populate_all_memory_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Memory Level!", return result);
-
-	result = polaris10_populate_smc_acpi_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize ACPI Level!", return result);
-
-	result = polaris10_populate_smc_vce_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize VCE Level!", return result);
-
-	result = polaris10_populate_smc_samu_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize SAMU Level!", return result);
-
-	/* Since only the initial state is completely set up at this point
-	 * (the other states are just copies of the boot state) we only
-	 * need to populate the  ARB settings for the initial state.
-	 */
-	result = polaris10_program_memory_timing_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to Write ARB settings for the initial state.", return result);
-
-	result = polaris10_populate_smc_uvd_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize UVD Level!", return result);
-
-	result = polaris10_populate_smc_boot_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Boot Level!", return result);
-
-	result = polaris10_populate_smc_initailial_state(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize Boot State!", return result);
-
-	result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to populate BAPM Parameters!", return result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ClockStretcher)) {
-		result = polaris10_populate_clock_stretcher_data_table(hwmgr);
-		PP_ASSERT_WITH_CODE(0 == result,
-				"Failed to populate Clock Stretcher Data Table!",
-				return result);
-	}
-
-	result = polaris10_populate_avfs_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
-	table->CurrSclkPllRange = 0xff;
-	table->GraphicsVoltageChangeEnable  = 1;
-	table->GraphicsThermThrottleEnable  = 1;
-	table->GraphicsInterval = 1;
-	table->VoltageInterval  = 1;
-	table->ThermalInterval  = 1;
-	table->TemperatureLimitHigh =
-			table_info->cac_dtp_table->usTargetOperatingTemp *
-			POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
-	table->TemperatureLimitLow  =
-			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-			POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
-	table->MemoryVoltageChangeEnable = 1;
-	table->MemoryInterval = 1;
-	table->VoltageResponseTime = 0;
-	table->PhaseResponseTime = 0;
-	table->MemoryThermThrottleEnable = 1;
-	table->PCIeBootLinkLevel = 0;
-	table->PCIeGenInterval = 1;
-	table->VRConfig = 0;
-
-	result = polaris10_populate_vr_config(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to populate VRConfig setting!", return result);
-
-	table->ThermGpio = 17;
-	table->SclkStepSize = 0x4000;
-
-	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-	} else {
-		table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_RegulatorHot);
-	}
-
-	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-			&gpio_pin)) {
-		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_AutomaticDCTransition);
-	} else {
-		table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_AutomaticDCTransition);
-	}
-
-	/* Thermal Output GPIO */
-	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-			&gpio_pin)) {
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ThermalOutGPIO);
-
-		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-		/* For porlarity read GPIOPAD_A with assigned Gpio pin
-		 * since VBIOS will program this register to set 'inactive state',
-		 * driver can then determine 'active state' from this and
-		 * program SMU with correct polarity
-		 */
-		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
-					& (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-		/* if required, combine VRHot/PCC with thermal out GPIO */
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
-		&& phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
-			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-	} else {
-		table->ThermOutGpio = 17;
-		table->ThermOutPolarity = 1;
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-	}
-
-	/* Populate BIF_SCLK levels into SMC DPM table */
-	for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
-		PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
-		if (i == 0)
-			table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-		else
-			table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-	}
-
-	for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
-		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
-			data->dpm_table_start +
-			offsetof(SMU74_Discrete_DpmTable, SystemFlags),
-			(uint8_t *)&(table->SystemFlags),
-			sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
-			data->sram_end);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to upload dpm data to SMC memory!", return result);
-
-	return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t tmp;
-	int result;
-
-	/* This is a read-modify-write on the first byte of the ARB table.
-	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-	 * is the field 'current'.
-	 * This solution is ugly, but we never write the whole table only
-	 * individual fields in it.
-	 * In reality this field should not be in that structure
-	 * but in a soft register.
-	 */
-	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			data->arb_table_start, &tmp, data->sram_end);
-
-	if (result)
-		return result;
-
-	tmp &= 0x00FFFFFF;
-	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-	return polaris10_write_smc_sram_dword(hwmgr->smumgr,
-			data->arb_table_start, tmp, data->sram_end);
-}
-
-static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_RegulatorHot))
-		return smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
-	return 0;
-}
-
-static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			SCLK_PWRMGT_OFF, 0);
-	return 0;
-}
-
-static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_ulv_parm *ulv = &(data->ulv);
-
-	if (ulv->ulv_supported)
-		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
-	return 0;
-}
-
-static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_ulv_parm *ulv = &(data->ulv);
-
-	if (ulv->ulv_supported)
-		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
-	return 0;
-}
-
-static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep)) {
-		if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to enable Master Deep Sleep switch failed!",
-					return -1);
-	} else {
-		if (smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to disable Master Deep Sleep switch failed!",
-					return -1);
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep)) {
-		if (smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to disable Master Deep Sleep switch failed!",
-					return -1);
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t soft_register_value = 0;
-	uint32_t handshake_disables_offset = data->soft_regs_start
-				+ offsetof(SMU74_SoftRegisters, HandshakeDisables);
-
-	/* enable SCLK dpm */
-	if (!data->sclk_dpm_key_disabled)
-		PP_ASSERT_WITH_CODE(
-		(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
-		"Failed to enable SCLK DPM during DPM Start Function!",
-		return -1);
-
-	/* enable MCLK dpm */
-	if (0 == data->mclk_dpm_key_disabled) {
-/* Disable UVD - SMU handshake for MCLK. */
-		soft_register_value = cgs_read_ind_register(hwmgr->device,
-					CGS_IND_REG__SMC, handshake_disables_offset);
-		soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-				handshake_disables_offset, soft_register_value);
-
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_Enable)),
-				"Failed to enable MCLK DPM during DPM Start Function!",
-				return -1);
-
-		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
-		udelay(10);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
-	}
-
-	return 0;
-}
-
-static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/*enable general power management */
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-			GLOBAL_PWRMGT_EN, 1);
-
-	/* enable sclk deep sleep */
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			DYNAMIC_PM_EN, 1);
-
-	/* prepare for PCIE DPM */
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			data->soft_regs_start + offsetof(SMU74_SoftRegisters,
-					VoltageChangeTimeout), 0x1000);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
-			SWRST_COMMAND_1, RESETLC, 0x0);
-/*
-	PP_ASSERT_WITH_CODE(
-			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					PPSMC_MSG_Voltage_Cntl_Enable)),
-			"Failed to enable voltage DPM during DPM Start Function!",
-			return -1);
-*/
-
-	if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
-		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
-		return -1;
-	}
-
-	/* enable PCIE dpm */
-	if (0 == data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Enable)),
-				"Failed to enable pcie DPM during DPM Start Function!",
-				return -1);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_Falcon_QuickTransition)) {
-		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_EnableACDCGPIOInterrupt)),
-				"Failed to enable AC DC GPIO Interrupt!",
-				);
-	}
-
-	return 0;
-}
-
-static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* disable SCLK dpm */
-	if (!data->sclk_dpm_key_disabled)
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_DPM_Disable) == 0),
-				"Failed to disable SCLK DPM!",
-				return -1);
-
-	/* disable MCLK dpm */
-	if (!data->mclk_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_Disable) == 0),
-				"Failed to disable MCLK DPM!",
-				return -1);
-	}
-
-	return 0;
-}
-
-static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* disable general power management */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-			GLOBAL_PWRMGT_EN, 0);
-	/* disable sclk deep sleep */
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
-			DYNAMIC_PM_EN, 0);
-
-	/* disable PCIE dpm */
-	if (!data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Disable) == 0),
-				"Failed to disable pcie DPM during DPM Stop Function!",
-				return -1);
-	}
-
-	if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
-		printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
-		return -1;
-	}
-
-	return 0;
-}
-
-static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
-{
-	bool protection;
-	enum DPM_EVENT_SRC src;
-
-	switch (sources) {
-	default:
-		printk(KERN_ERR "Unknown throttling event sources.");
-		/* fall through */
-	case 0:
-		protection = false;
-		/* src is unused */
-		break;
-	case (1 << PHM_AutoThrottleSource_Thermal):
-		protection = true;
-		src = DPM_EVENT_SRC_DIGITAL;
-		break;
-	case (1 << PHM_AutoThrottleSource_External):
-		protection = true;
-		src = DPM_EVENT_SRC_EXTERNAL;
-		break;
-	case (1 << PHM_AutoThrottleSource_External) |
-			(1 << PHM_AutoThrottleSource_Thermal):
-		protection = true;
-		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
-		break;
-	}
-	/* Order matters - don't enable thermal protection for the wrong source. */
-	if (protection) {
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
-				DPM_EVENT_SRC, src);
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-				THERMAL_PROTECTION_DIS,
-				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_ThermalController));
-	} else
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
-				THERMAL_PROTECTION_DIS, 1);
-}
-
-static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-		PHM_AutoThrottleSource source)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (!(data->active_auto_throttle_sources & (1 << source))) {
-		data->active_auto_throttle_sources |= 1 << source;
-		polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-	}
-	return 0;
-}
-
-static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-	return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
-		PHM_AutoThrottleSource source)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (data->active_auto_throttle_sources & (1 << source)) {
-		data->active_auto_throttle_sources &= ~(1 << source);
-		polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
-	}
-	return 0;
-}
-
-static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
-	return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	data->pcie_performance_request = true;
-
-	return 0;
-}
-
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-	tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
-	PP_ASSERT_WITH_CODE(result == 0,
-			"DPM is already running right now, no need to enable DPM!",
-			return 0);
-
-	if (polaris10_voltage_control(hwmgr)) {
-		tmp_result = polaris10_enable_voltage_control(hwmgr);
-		PP_ASSERT_WITH_CODE(tmp_result == 0,
-				"Failed to enable voltage control!",
-				result = tmp_result);
-
-		tmp_result = polaris10_construct_voltage_tables(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to contruct voltage tables!",
-				result = tmp_result);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalController))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
-	tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program static screen threshold parameters!",
-			result = tmp_result);
-
-	tmp_result = polaris10_enable_display_gap(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable display gap!", result = tmp_result);
-
-	tmp_result = polaris10_program_voting_clients(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program voting clients!", result = tmp_result);
-
-	tmp_result = polaris10_process_firmware_header(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to process firmware header!", result = tmp_result);
-
-	tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize switch from ArbF0 to F1!",
-			result = tmp_result);
-
-	tmp_result = polaris10_init_smc_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize SMC table!", result = tmp_result);
-
-	tmp_result = polaris10_init_arb_table_index(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize ARB table index!", result = tmp_result);
-
-	tmp_result = polaris10_populate_pm_fuses(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to populate PM fuses!", result = tmp_result);
-
-	tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
-
-	tmp_result = polaris10_enable_sclk_control(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable SCLK control!", result = tmp_result);
-
-	tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable voltage control!", result = tmp_result);
-
-	tmp_result = polaris10_enable_ulv(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable ULV!", result = tmp_result);
-
-	tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable deep sleep master switch!", result = tmp_result);
-
-	tmp_result = polaris10_enable_didt_config(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to enable deep sleep master switch!", result = tmp_result);
-
-	tmp_result = polaris10_start_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to start DPM!", result = tmp_result);
-
-	tmp_result = polaris10_enable_smc_cac(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable SMC CAC!", result = tmp_result);
-
-	tmp_result = polaris10_enable_power_containment(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable power containment!", result = tmp_result);
-
-	tmp_result = polaris10_power_control_set_level(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to power control set level!", result = tmp_result);
-
-	tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable thermal auto throttle!", result = tmp_result);
-
-	tmp_result = polaris10_pcie_performance_request(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"pcie performance request failed!", result = tmp_result);
-
-	return result;
-}
-
-int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
-	PP_ASSERT_WITH_CODE(tmp_result == 0,
-			"DPM is not running right now, no need to disable DPM!",
-			return 0);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalController))
-		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
-	tmp_result = polaris10_disable_power_containment(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable power containment!", result = tmp_result);
-
-	tmp_result = polaris10_disable_smc_cac(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable SMC CAC!", result = tmp_result);
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
-	tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable thermal auto throttle!", result = tmp_result);
-
-	tmp_result = polaris10_stop_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to stop DPM!", result = tmp_result);
-
-	tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable deep sleep master switch!", result = tmp_result);
-
-	tmp_result = polaris10_disable_ulv(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to disable ULV!", result = tmp_result);
-
-	tmp_result = polaris10_clear_voting_clients(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to clear voting clients!", result = tmp_result);
-
-	tmp_result = polaris10_reset_to_default(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to reset to default!", result = tmp_result);
-
-	tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
-	PP_ASSERT_WITH_CODE((tmp_result == 0),
-			"Failed to force to switch arbf0!", result = tmp_result);
-
-	return result;
-}
-
-int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-
-	return 0;
-}
-
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-	return phm_hwmgr_backend_fini(hwmgr);
-}
-
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_DynamicPatchPowerState);
-
-	if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_EnableMVDDControl);
-
-	if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_ControlVDDCI);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			 PHM_PlatformCaps_TablelessHardwareInterface);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableSMU7ThermalManagement);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DynamicPowerManagement);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_UnTabledHardwareInterface);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_TablelessHardwareInterface);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_SMC);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_NonABMSupportInPPLib);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_DynamicUVDState);
-
-	/* power tune caps Assume disabled */
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_SQRamping);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_DBRamping);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_TDRamping);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_TCPRamping);
-
-	if (hwmgr->powercontainment_enabled)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			    PHM_PlatformCaps_PowerContainment);
-	else
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			    PHM_PlatformCaps_PowerContainment);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-							PHM_PlatformCaps_CAC);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_RegulatorHot);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_AutomaticDCTransition);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_ODFuzzyFanControlSupport);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
-	if (hwmgr->chip_id == CHIP_POLARIS11)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_SPLLShutdownSupport);
-	return 0;
-}
-
-static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	polaris10_initialize_power_tune_defaults(hwmgr);
-
-	data->pcie_gen_performance.max = PP_PCIEGen1;
-	data->pcie_gen_performance.min = PP_PCIEGen3;
-	data->pcie_gen_power_saving.max = PP_PCIEGen1;
-	data->pcie_gen_power_saving.min = PP_PCIEGen3;
-	data->pcie_lane_performance.max = 0;
-	data->pcie_lane_performance.min = 16;
-	data->pcie_lane_power_saving.max = 0;
-	data->pcie_lane_power_saving.min = 16;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint16_t vv_id;
-	uint32_t vddc = 0;
-	uint16_t i, j;
-	uint32_t sclk = 0;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-	int result;
-
-	for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
-		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-		if (!phm_get_sclk_for_voltage_evv(hwmgr,
-				table_info->vddc_lookup_table, vv_id, &sclk)) {
-			if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_ClockStretcher)) {
-				for (j = 1; j < sclk_table->count; j++) {
-					if (sclk_table->entries[j].clk == sclk &&
-							sclk_table->entries[j].cks_enable == 0) {
-						sclk += 5000;
-						break;
-					}
-				}
-			}
-
-			if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
-						VOLTAGE_TYPE_VDDC,
-						sclk, vv_id, &vddc) != 0) {
-				printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
-				continue;
-			}
-
-			/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
-			 * real voltage level in unit of 0.01mv */
-			PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
-					"Invalid VDDC value", result = -EINVAL;);
-
-			/* the voltage should not be zero nor equal to leakage ID */
-			if (vddc != 0 && vddc != vv_id) {
-				data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
-				data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
-				data->vddc_leakage.count++;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-		uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
-{
-	uint32_t index;
-
-	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
-	for (index = 0; index < leakage_table->count; index++) {
-		/* if this voltage matches a leakage voltage ID */
-		/* patch with actual leakage voltage */
-		if (leakage_table->leakage_id[index] == *voltage) {
-			*voltage = leakage_table->actual_voltage[index];
-			break;
-		}
-	}
-
-	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param     hwmgr  the address of the powerplay hardware manager.
-* @param     pointer to voltage lookup table
-* @param     pointer to leakage table
-* @return     always 0
-*/
-static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *lookup_table,
-		struct polaris10_leakage_voltage *leakage_table)
-{
-	uint32_t i;
-
-	for (i = 0; i < lookup_table->count; i++)
-		polaris10_patch_with_vdd_leakage(hwmgr,
-				&lookup_table->entries[i].us_vdd, leakage_table);
-
-	return 0;
-}
-
-static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
-		struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
-		uint16_t *vddc)
-{
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
-	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-			table_info->max_clock_voltage_on_dc.vddc;
-	return 0;
-}
-
-static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
-		struct pp_hwmgr *hwmgr)
-{
-	uint8_t entryId;
-	uint8_t voltageId;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-			table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
-			table_info->vdd_dep_on_mclk;
-	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-			table_info->mm_dep_table;
-
-	for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-		voltageId = sclk_table->entries[entryId].vddInd;
-		sclk_table->entries[entryId].vddc =
-				table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-		voltageId = mclk_table->entries[entryId].vddInd;
-		mclk_table->entries[entryId].vddc =
-			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	for (entryId = 0; entryId < mm_table->count; ++entryId) {
-		voltageId = mm_table->entries[entryId].vddcInd;
-		mm_table->entries[entryId].vddc =
-			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	return 0;
-
-}
-
-static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	/* Need to determine if we need calculated voltage. */
-	return 0;
-}
-
-static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-	/* Need to determine if we need calculated voltage from mm table. */
-	return 0;
-}
-
-static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
-		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-	uint32_t table_size, i, j;
-	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-	table_size = lookup_table->count;
-
-	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-		"Lookup table is empty", return -EINVAL);
-
-	/* Sorting voltages */
-	for (i = 0; i < table_size - 1; i++) {
-		for (j = i + 1; j > 0; j--) {
-			if (lookup_table->entries[j].us_vdd <
-					lookup_table->entries[j - 1].us_vdd) {
-				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
-				lookup_table->entries[j - 1] = lookup_table->entries[j];
-				lookup_table->entries[j] = tmp_voltage_lookup_record;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	int tmp_result;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
-			table_info->vddc_lookup_table, &(data->vddc_leakage));
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
-			&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
-	if (tmp_result)
-		result = tmp_result;
-
-	tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
-	if (tmp_result)
-		result = tmp_result;
-
-	return result;
-}
-
-static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-						table_info->vdd_dep_on_sclk;
-	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-						table_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-		"VDD dependency on SCLK table is missing.	\
-		This table is mandatory", return -EINVAL);
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-		"VDD dependency on SCLK table has to have is missing.	\
-		This table is mandatory", return -EINVAL);
-
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-		"VDD dependency on MCLK table is missing.	\
-		This table is mandatory", return -EINVAL);
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-		"VDD dependency on MCLK table has to have is missing.	 \
-		This table is mandatory", return -EINVAL);
-
-	table_info->max_clock_voltage_on_ac.sclk =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-	table_info->max_clock_voltage_on_ac.mclk =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-	table_info->max_clock_voltage_on_ac.vddc =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-	table_info->max_clock_voltage_on_ac.vddci =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
-
-	return 0;
-}
-
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
-{
-	struct phm_ppt_v1_information *table_info =
-		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-			table_info->vdd_dep_on_mclk;
-	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-			table_info->vddc_lookup_table;
-	uint32_t i;
-
-	if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
-		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
-			return 0;
-
-		for (i = 0; i < lookup_table->count; i++) {
-			if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
-				dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
-				return 0;
-			}
-		}
-	}
-	return 0;
-}
-
-
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data;
-	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-	uint32_t temp_reg;
-	int result;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
-	if (data == NULL)
-		return -ENOMEM;
-
-	hwmgr->backend = data;
-
-	data->dll_default_on = false;
-	data->sram_end = SMC_RAM_END;
-	data->mclk_dpm0_activity_target = 0xa;
-	data->disable_dpm_mask = 0xFF;
-	data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
-	data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
-	data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-	data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-
-	data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
-	data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
-	data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
-	data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
-	data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
-	data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
-	data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
-	data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
-
-	data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
-	data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
-
-	/* need to set voltage control types before EVV patching */
-	data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-	data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-	data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-
-	data->enable_tdc_limit_feature = true;
-	data->enable_pkg_pwr_tracking_feature = true;
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-	data->mclk_stutter_mode_threshold = 40000;
-
-	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
-		data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableMVDDControl)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
-			data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
-		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
-			data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ControlVDDCI)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-			data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
-		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-			data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-	}
-
-	if (table_info->cac_dtp_table->usClockStretchAmount != 0)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-					PHM_PlatformCaps_ClockStretcher);
-
-	polaris10_set_features_platform_caps(hwmgr);
-
-	polaris10_patch_voltage_workaround(hwmgr);
-	polaris10_init_dpm_defaults(hwmgr);
-
-	/* Get leakage voltage based on leakage ID. */
-	result = polaris10_get_evv_voltages(hwmgr);
-
-	if (result) {
-		printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
-		return -1;
-	}
-
-	polaris10_complete_dependency_tables(hwmgr);
-	polaris10_set_private_data_based_on_pptable(hwmgr);
-
-	/* Initalize Dynamic State Adjustment Rule Settings */
-	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
-	if (0 == result) {
-		struct cgs_system_info sys_info = {0};
-
-		data->is_tlu_enabled = false;
-
-		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-							POLARIS10_MAX_HARDWARE_POWERLEVELS;
-		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
-
-		if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
-			temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-			switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
-			case 0:
-				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
-				break;
-			case 1:
-				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
-				break;
-			case 2:
-				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
-				break;
-			case 3:
-				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
-				break;
-			case 4:
-				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
-				break;
-			default:
-				PP_ASSERT_WITH_CODE(0,
-				"Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
-				);
-				break;
-			}
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
-		}
-
-		if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
-			hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
-				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
-				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
-				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
-
-			table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
-									(table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
-
-			table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-			table_info->cac_dtp_table->usOperatingTempStep = 1;
-			table_info->cac_dtp_table->usOperatingTempHyst = 1;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
-				       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
-			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
-				       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
-				       table_info->cac_dtp_table->usOperatingTempMinLimit;
-
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
-				       table_info->cac_dtp_table->usOperatingTempMaxLimit;
-
-			hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
-				       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-
-			hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
-				       table_info->cac_dtp_table->usOperatingTempStep;
-
-			hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
-				       table_info->cac_dtp_table->usTargetOperatingTemp;
-		}
-
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-		else
-			data->pcie_gen_cap = (uint32_t)sys_info.value;
-		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-			data->pcie_spc_cap = 20;
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-		else
-			data->pcie_lane_cap = (uint32_t)sys_info.value;
-
-		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
-		hwmgr->platform_descriptor.clockStep.engineClock = 500;
-		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
-	} else {
-		/* Ignore return value in here, we are cleaning up a mess. */
-		polaris10_hwmgr_backend_fini(hwmgr);
-	}
-
-	return 0;
-}
-
-static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t level, tmp;
-
-	if (!data->pcie_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_ForceLevel, level);
-		}
-	}
-
-	if (!data->sclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_SCLKDPM_SetEnabledMask,
-						(1 << level));
-		}
-	}
-
-	if (!data->mclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++;
-
-			if (level)
-				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_MCLKDPM_SetEnabledMask,
-						(1 << level));
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	phm_apply_dal_min_voltage_request(hwmgr);
-
-	if (!data->sclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-	}
-
-	if (!data->mclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-	}
-
-	return 0;
-}
-
-static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (!polaris10_is_dpm_running(hwmgr))
-		return -EINVAL;
-
-	if (!data->pcie_dpm_key_disabled) {
-		smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_PCIeDPM_UnForceLevel);
-	}
-
-	return polaris10_upload_dpm_level_enable_mask(hwmgr);
-}
-
-static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data =
-			(struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t level;
-
-	if (!data->sclk_dpm_key_disabled)
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			level = phm_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
-							    (1 << level));
-
-	}
-
-	if (!data->mclk_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			level = phm_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
-							    (1 << level));
-		}
-	}
-
-	if (!data->pcie_dpm_key_disabled) {
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-			level = phm_get_lowest_enabled_level(hwmgr,
-							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-							    PPSMC_MSG_PCIeDPM_ForceLevel,
-							    (level));
-		}
-	}
-
-	return 0;
-
-}
-static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
-				enum amd_dpm_forced_level level)
-{
-	int ret = 0;
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = polaris10_force_dpm_highest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = polaris10_force_dpm_lowest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		ret = polaris10_unforce_dpm_levels(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	default:
-		break;
-	}
-
-	hwmgr->dpm_level = level;
-
-	return ret;
-}
-
-static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-	return sizeof(struct polaris10_power_state);
-}
-
-
-static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-				struct pp_power_state *request_ps,
-			const struct pp_power_state *current_ps)
-{
-
-	struct polaris10_power_state *polaris10_ps =
-				cast_phw_polaris10_power_state(&request_ps->hardware);
-	uint32_t sclk;
-	uint32_t mclk;
-	struct PP_Clocks minimum_clocks = {0};
-	bool disable_mclk_switching;
-	bool disable_mclk_switching_for_frame_lock;
-	struct cgs_display_info info = {0};
-	const struct phm_clock_and_voltage_limits *max_limits;
-	uint32_t i;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int32_t count;
-	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-	data->battery_state = (PP_StateUILabel_Battery ==
-			request_ps->classification.ui_label);
-
-	PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
-				 "VI should always have 2 performance levels",
-				);
-
-	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-	/* Cap clock DPM tables at DC MAX if it is in DC. */
-	if (PP_PowerSource_DC == hwmgr->power_source) {
-		for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-			if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
-				polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
-			if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
-				polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
-		}
-	}
-
-	polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
-	polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-	/* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StablePState)) {
-		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-		for (count = table_info->vdd_dep_on_sclk->count - 1;
-				count >= 0; count--) {
-			if (stable_pstate_sclk >=
-					table_info->vdd_dep_on_sclk->entries[count].clk) {
-				stable_pstate_sclk =
-						table_info->vdd_dep_on_sclk->entries[count].clk;
-				break;
-			}
-		}
-
-		if (count < 0)
-			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
-		stable_pstate_mclk = max_limits->mclk;
-
-		minimum_clocks.engineClock = stable_pstate_sclk;
-		minimum_clocks.memoryClock = stable_pstate_mclk;
-	}
-
-	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-	polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
-				hwmgr->platform_descriptor.overdriveLimit.engineClock),
-				"Overdrive sclk exceeds limit",
-				hwmgr->gfx_arbiter.sclk_over_drive =
-						hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-			polaris10_ps->performance_levels[1].engine_clock =
-					hwmgr->gfx_arbiter.sclk_over_drive;
-	}
-
-	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
-				hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-				"Overdrive mclk exceeds limit",
-				hwmgr->gfx_arbiter.mclk_over_drive =
-						hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-			polaris10_ps->performance_levels[1].memory_clock =
-					hwmgr->gfx_arbiter.mclk_over_drive;
-	}
-
-	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-				    hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-
-	disable_mclk_switching = (1 < info.display_count) ||
-				    disable_mclk_switching_for_frame_lock;
-
-	sclk = polaris10_ps->performance_levels[0].engine_clock;
-	mclk = polaris10_ps->performance_levels[0].memory_clock;
-
-	if (disable_mclk_switching)
-		mclk = polaris10_ps->performance_levels
-		[polaris10_ps->performance_level_count - 1].memory_clock;
-
-	if (sclk < minimum_clocks.engineClock)
-		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
-				max_limits->sclk : minimum_clocks.engineClock;
-
-	if (mclk < minimum_clocks.memoryClock)
-		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
-				max_limits->mclk : minimum_clocks.memoryClock;
-
-	polaris10_ps->performance_levels[0].engine_clock = sclk;
-	polaris10_ps->performance_levels[0].memory_clock = mclk;
-
-	polaris10_ps->performance_levels[1].engine_clock =
-		(polaris10_ps->performance_levels[1].engine_clock >=
-				polaris10_ps->performance_levels[0].engine_clock) ?
-						polaris10_ps->performance_levels[1].engine_clock :
-						polaris10_ps->performance_levels[0].engine_clock;
-
-	if (disable_mclk_switching) {
-		if (mclk < polaris10_ps->performance_levels[1].memory_clock)
-			mclk = polaris10_ps->performance_levels[1].memory_clock;
-
-		polaris10_ps->performance_levels[0].memory_clock = mclk;
-		polaris10_ps->performance_levels[1].memory_clock = mclk;
-	} else {
-		if (polaris10_ps->performance_levels[1].memory_clock <
-				polaris10_ps->performance_levels[0].memory_clock)
-			polaris10_ps->performance_levels[1].memory_clock =
-					polaris10_ps->performance_levels[0].memory_clock;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StablePState)) {
-		for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-			polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-			polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-			polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-			polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-		}
-	}
-	return 0;
-}
-
-
-static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct polaris10_power_state  *polaris10_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-	if (low)
-		return polaris10_ps->performance_levels[0].memory_clock;
-	else
-		return polaris10_ps->performance_levels
-				[polaris10_ps->performance_level_count-1].memory_clock;
-}
-
-static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct polaris10_power_state  *polaris10_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-	if (low)
-		return polaris10_ps->performance_levels[0].engine_clock;
-	else
-		return polaris10_ps->performance_levels
-				[polaris10_ps->performance_level_count-1].engine_clock;
-}
-
-static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-					struct pp_hw_power_state *hw_ps)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
-	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-	uint16_t size;
-	uint8_t frev, crev;
-	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-	/* First retrieve the Boot clocks and VDDC from the firmware info table.
-	 * We assume here that fw_info is unchanged if this call fails.
-	 */
-	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-			hwmgr->device, index,
-			&size, &frev, &crev);
-	if (!fw_info)
-		/* During a test, there is no firmware info table. */
-		return 0;
-
-	/* Patch the state. */
-	data->vbios_boot_state.sclk_bootup_value =
-			le32_to_cpu(fw_info->ulDefaultEngineClock);
-	data->vbios_boot_state.mclk_bootup_value =
-			le32_to_cpu(fw_info->ulDefaultMemoryClock);
-	data->vbios_boot_state.mvdd_bootup_value =
-			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-	data->vbios_boot_state.vddc_bootup_value =
-			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-	data->vbios_boot_state.vddci_bootup_value =
-			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-	data->vbios_boot_state.pcie_gen_bootup_value =
-			phm_get_current_pcie_speed(hwmgr);
-
-	data->vbios_boot_state.pcie_lane_bootup_value =
-			(uint16_t)phm_get_current_pcie_lane_number(hwmgr);
-
-	/* set boot power state */
-	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-	return 0;
-}
-
-static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-		void *state, struct pp_power_state *power_state,
-		void *pp_table, uint32_t classification_flag)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_power_state  *polaris10_power_state =
-			(struct polaris10_power_state *)(&(power_state->hardware));
-	struct polaris10_performance_level *performance_level;
-	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-	PPTable_Generic_SubTable_Header *sclk_dep_table =
-			(PPTable_Generic_SubTable_Header *)
-			(((unsigned long)powerplay_table) +
-				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
-	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-			(ATOM_Tonga_MCLK_Dependency_Table *)
-			(((unsigned long)powerplay_table) +
-				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-	/* The following fields are not initialized here: id orderedList allStatesList */
-	power_state->classification.ui_label =
-			(le16_to_cpu(state_entry->usClassification) &
-			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-	power_state->classification.flags = classification_flag;
-	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-	power_state->classification.temporary_state = false;
-	power_state->classification.to_be_deleted = false;
-
-	power_state->validation.disallowOnDC =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-					ATOM_Tonga_DISALLOW_ON_DC));
-
-	power_state->pcie.lanes = 0;
-
-	power_state->display.disableFrameModulation = false;
-	power_state->display.limitRefreshrate = false;
-	power_state->display.enableVariBright =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
-					ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-	power_state->validation.supportedPowerLevels = 0;
-	power_state->uvd_clocks.VCLK = 0;
-	power_state->uvd_clocks.DCLK = 0;
-	power_state->temperatures.min = 0;
-	power_state->temperatures.max = 0;
-
-	performance_level = &(polaris10_power_state->performance_levels
-			[polaris10_power_state->performance_level_count++]);
-
-	PP_ASSERT_WITH_CODE(
-			(polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
-			"Performance levels exceeds SMC limit!",
-			return -1);
-
-	PP_ASSERT_WITH_CODE(
-			(polaris10_power_state->performance_level_count <=
-					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-			"Performance levels exceeds Driver limit!",
-			return -1);
-
-	/* Performance levels are arranged from low to high. */
-	performance_level->memory_clock = mclk_dep_table->entries
-			[state_entry->ucMemoryClockIndexLow].ulMclk;
-	if (sclk_dep_table->ucRevId == 0)
-		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
-			[state_entry->ucEngineClockIndexLow].ulSclk;
-	else if (sclk_dep_table->ucRevId == 1)
-		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
-			[state_entry->ucEngineClockIndexLow].ulSclk;
-	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-			state_entry->ucPCIEGenLow);
-	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-			state_entry->ucPCIELaneHigh);
-
-	performance_level = &(polaris10_power_state->performance_levels
-			[polaris10_power_state->performance_level_count++]);
-	performance_level->memory_clock = mclk_dep_table->entries
-			[state_entry->ucMemoryClockIndexHigh].ulMclk;
-
-	if (sclk_dep_table->ucRevId == 0)
-		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
-			[state_entry->ucEngineClockIndexHigh].ulSclk;
-	else if (sclk_dep_table->ucRevId == 1)
-		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
-			[state_entry->ucEngineClockIndexHigh].ulSclk;
-
-	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
-			state_entry->ucPCIEGenHigh);
-	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-			state_entry->ucPCIELaneHigh);
-
-	return 0;
-}
-
-static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-		unsigned long entry_index, struct pp_power_state *state)
-{
-	int result;
-	struct polaris10_power_state *ps;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-			table_info->vdd_dep_on_mclk;
-
-	state->hardware.magic = PHM_VIslands_Magic;
-
-	ps = (struct polaris10_power_state *)(&state->hardware);
-
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
-			polaris10_get_pp_table_entry_callback_func);
-
-	/* This is the earliest time we have all the dependency table and the VBIOS boot state
-	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-	 */
-	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-		if (dep_mclk_table->entries[0].clk !=
-				data->vbios_boot_state.mclk_bootup_value)
-			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot MCLK level");
-		if (dep_mclk_table->entries[0].vddci !=
-				data->vbios_boot_state.vddci_bootup_value)
-			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot VDDCI level");
-	}
-
-	/* set DC compatible flag if this state supports DC */
-	if (!state->validation.disallowOnDC)
-		ps->dc_compatible = true;
-
-	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
-		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
-	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
-	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
-	if (!result) {
-		uint32_t i;
-
-		switch (state->classification.ui_label) {
-		case PP_StateUILabel_Performance:
-			data->use_pcie_performance_levels = true;
-			for (i = 0; i < ps->performance_level_count; i++) {
-				if (data->pcie_gen_performance.max <
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.max =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_performance.min >
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.min =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_performance.max <
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.max =
-							ps->performance_levels[i].pcie_lane;
-				if (data->pcie_lane_performance.min >
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.min =
-							ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		case PP_StateUILabel_Battery:
-			data->use_pcie_power_saving_levels = true;
-
-			for (i = 0; i < ps->performance_level_count; i++) {
-				if (data->pcie_gen_power_saving.max <
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.max =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_power_saving.min >
-						ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.min =
-							ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_power_saving.max <
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.max =
-							ps->performance_levels[i].pcie_lane;
-
-				if (data->pcie_lane_power_saving.min >
-						ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.min =
-							ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		default:
-			break;
-		}
-	}
-	return 0;
-}
-
-static void
-polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-	uint32_t sclk, mclk, activity_percent;
-	uint32_t offset;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
-	sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
-	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n",
-			mclk / 100, sclk / 100);
-
-	offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
-	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-	activity_percent += 0x80;
-	activity_percent >>= 8;
-
-	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct polaris10_power_state *polaris10_ps =
-			cast_const_phw_polaris10_power_state(states->pnew_state);
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	uint32_t sclk = polaris10_ps->performance_levels
-			[polaris10_ps->performance_level_count - 1].engine_clock;
-	struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	uint32_t mclk = polaris10_ps->performance_levels
-			[polaris10_ps->performance_level_count - 1].memory_clock;
-	struct PP_Clocks min_clocks = {0};
-	uint32_t i;
-	struct cgs_display_info info = {0};
-
-	data->need_update_smu7_dpm_table = 0;
-
-	for (i = 0; i < sclk_table->count; i++) {
-		if (sclk == sclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= sclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-	else {
-	/* TODO: Check SCLK in DAL's minimum clocks
-	 * in case DeepSleep divider update is required.
-	 */
-		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
-			(min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
-				data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
-			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-	}
-
-	for (i = 0; i < mclk_table->count; i++) {
-		if (mclk == mclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= mclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-	return 0;
-}
-
-static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
-		const struct polaris10_power_state *polaris10_ps)
-{
-	uint32_t i;
-	uint32_t sclk, max_sclk = 0;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
-	for (i = 0; i < polaris10_ps->performance_level_count; i++) {
-		sclk = polaris10_ps->performance_levels[i].engine_clock;
-		if (max_sclk < sclk)
-			max_sclk = sclk;
-	}
-
-	for (i = 0; i < dpm_table->sclk_table.count; i++) {
-		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
-					dpm_table->pcie_speed_table.dpm_levels
-					[dpm_table->pcie_speed_table.count - 1].value :
-					dpm_table->pcie_speed_table.dpm_levels[i].value);
-	}
-
-	return 0;
-}
-
-static int polaris10_request_link_speed_change_before_state_change(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_power_state *polaris10_nps =
-			cast_const_phw_polaris10_power_state(states->pnew_state);
-	const struct polaris10_power_state *polaris10_cps =
-			cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
-	uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
-	uint16_t current_link_speed;
-
-	if (data->force_pcie_gen == PP_PCIEGenInvalid)
-		current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
-	else
-		current_link_speed = data->force_pcie_gen;
-
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-	data->pspp_notify_required = false;
-
-	if (target_link_speed > current_link_speed) {
-		switch (target_link_speed) {
-		case PP_PCIEGen3:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-				break;
-			data->force_pcie_gen = PP_PCIEGen2;
-			if (current_link_speed == PP_PCIEGen2)
-				break;
-		case PP_PCIEGen2:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-				break;
-		default:
-			data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
-			break;
-		}
-	} else {
-		if (target_link_speed < current_link_speed)
-			data->pspp_notify_required = true;
-	}
-
-	return 0;
-}
-
-static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-		PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-				    "Trying to freeze SCLK DPM when DPM is disabled",
-				);
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_FreezeLevel),
-				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-				return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		 DPMTABLE_OD_UPDATE_MCLK)) {
-		PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-				    "Trying to freeze MCLK DPM when DPM is disabled",
-				);
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_MCLKDPM_FreezeLevel),
-				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-				return -1);
-	}
-
-	return 0;
-}
-
-static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result = 0;
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct polaris10_power_state *polaris10_ps =
-			cast_const_phw_polaris10_power_state(states->pnew_state);
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t sclk = polaris10_ps->performance_levels
-			[polaris10_ps->performance_level_count - 1].engine_clock;
-	uint32_t mclk = polaris10_ps->performance_levels
-			[polaris10_ps->performance_level_count - 1].memory_clock;
-	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
-	struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
-	uint32_t dpm_count, clock_percent;
-	uint32_t i;
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-		dpm_table->sclk_table.dpm_levels
-		[dpm_table->sclk_table.count - 1].value = sclk;
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-		/* Need to do calculation based on the golden DPM table
-		 * as the Heatmap GPU Clock axis is also based on the default values
-		 */
-			PP_ASSERT_WITH_CODE(
-				(golden_dpm_table->sclk_table.dpm_levels
-						[golden_dpm_table->sclk_table.count - 1].value != 0),
-				"Divide by 0!",
-				return -1);
-			dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
-
-			for (i = dpm_count; i > 1; i--) {
-				if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
-					clock_percent =
-					      ((sclk
-						- golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
-						) * 100)
-						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value +
-							(golden_dpm_table->sclk_table.dpm_levels[i].value *
-								clock_percent)/100;
-
-				} else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
-					clock_percent =
-						((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
-						- sclk) * 100)
-						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value -
-							(golden_dpm_table->sclk_table.dpm_levels[i].value *
-									clock_percent) / 100;
-				} else
-					dpm_table->sclk_table.dpm_levels[i].value =
-							golden_dpm_table->sclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-		dpm_table->mclk_table.dpm_levels
-			[dpm_table->mclk_table.count - 1].value = mclk;
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-			PP_ASSERT_WITH_CODE(
-					(golden_dpm_table->mclk_table.dpm_levels
-						[golden_dpm_table->mclk_table.count-1].value != 0),
-					"Divide by 0!",
-					return -1);
-			dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
-			for (i = dpm_count; i > 1; i--) {
-				if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
-					clock_percent = ((mclk -
-					golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
-					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value +
-							(golden_dpm_table->mclk_table.dpm_levels[i].value *
-							clock_percent) / 100;
-
-				} else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
-					clock_percent = (
-					 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
-					* 100)
-					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value -
-							(golden_dpm_table->mclk_table.dpm_levels[i].value *
-									clock_percent) / 100;
-				} else
-					dpm_table->mclk_table.dpm_levels[i].value =
-							golden_dpm_table->mclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-		result = polaris10_populate_all_graphic_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-				return result);
-	}
-
-	if (data->need_update_smu7_dpm_table &
-			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-		/*populate MCLK dpm table to SMU7 */
-		result = polaris10_populate_all_memory_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-				return result);
-	}
-
-	return result;
-}
-
-static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-			  struct polaris10_single_dpm_table *dpm_table,
-			uint32_t low_limit, uint32_t high_limit)
-{
-	uint32_t i;
-
-	for (i = 0; i < dpm_table->count; i++) {
-		if ((dpm_table->dpm_levels[i].value < low_limit)
-		|| (dpm_table->dpm_levels[i].value > high_limit))
-			dpm_table->dpm_levels[i].enabled = false;
-		else
-			dpm_table->dpm_levels[i].enabled = true;
-	}
-
-	return 0;
-}
-
-static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
-		const struct polaris10_power_state *polaris10_ps)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t high_limit_count;
-
-	PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
-			"power state did not have any performance level",
-			return -1);
-
-	high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
-
-	polaris10_trim_single_dpm_states(hwmgr,
-			&(data->dpm_table.sclk_table),
-			polaris10_ps->performance_levels[0].engine_clock,
-			polaris10_ps->performance_levels[high_limit_count].engine_clock);
-
-	polaris10_trim_single_dpm_states(hwmgr,
-			&(data->dpm_table.mclk_table),
-			polaris10_ps->performance_levels[0].memory_clock,
-			polaris10_ps->performance_levels[high_limit_count].memory_clock);
-
-	return 0;
-}
-
-static int polaris10_generate_dpm_level_enable_mask(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result;
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_power_state *polaris10_ps =
-			cast_const_phw_polaris10_power_state(states->pnew_state);
-
-	result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
-	if (result)
-		return result;
-
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-	return 0;
-}
-
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-			PPSMC_MSG_UVDDPM_Enable :
-			PPSMC_MSG_UVDDPM_Disable);
-}
-
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-			PPSMC_MSG_VCEDPM_Enable :
-			PPSMC_MSG_VCEDPM_Disable);
-}
-
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
-			PPSMC_MSG_SAMUDPM_Enable :
-			PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		data->smc_state_table.UvdBootLevel = 0;
-		if (table_info->mm_dep_table->count > 0)
-			data->smc_state_table.UvdBootLevel =
-					(uint8_t) (table_info->mm_dep_table->count - 1);
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0x00FFFFFF;
-		mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_UVDDPM) ||
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_UVDDPM_SetEnabledMask,
-					(uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-	}
-
-	return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-						PHM_PlatformCaps_StablePState))
-			data->smc_state_table.VceBootLevel =
-				(uint8_t) (table_info->mm_dep_table->count - 1);
-		else
-			data->smc_state_table.VceBootLevel = 0;
-
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFF00FFFF;
-		mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_VCEDPM_SetEnabledMask,
-					(uint32_t)1 << data->smc_state_table.VceBootLevel);
-	}
-
-	polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
-
-	return 0;
-}
-
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-	if (!bgate) {
-		data->smc_state_table.SamuBootLevel = 0;
-		mm_boot_level_offset = data->dpm_table_start +
-				offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFFFFFF00;
-		mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
-		cgs_write_ind_register(hwmgr->device,
-				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SAMUDPM_SetEnabledMask,
-					(uint32_t)(1 << data->smc_state_table.SamuBootLevel));
-	}
-
-	return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	int result = 0;
-	uint32_t low_sclk_interrupt_threshold = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkThrottleLowNotification)
-		&& (hwmgr->gfx_arbiter.sclk_threshold !=
-				data->low_sclk_interrupt_threshold)) {
-		data->low_sclk_interrupt_threshold =
-				hwmgr->gfx_arbiter.sclk_threshold;
-		low_sclk_interrupt_threshold =
-				data->low_sclk_interrupt_threshold;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-		result = polaris10_copy_bytes_to_smc(
-				hwmgr->smumgr,
-				data->dpm_table_start +
-				offsetof(SMU74_Discrete_DpmTable,
-					LowSclkInterruptThreshold),
-				(uint8_t *)&low_sclk_interrupt_threshold,
-				sizeof(uint32_t),
-				data->sram_end);
-	}
-
-	return result;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-		return polaris10_program_memory_timing_parameters(hwmgr);
-
-	return 0;
-}
-
-static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-		PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze SCLK DPM when DPM is disabled",
-				);
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-			return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-		PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze MCLK DPM when DPM is disabled",
-				);
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-		    return -1);
-	}
-
-	data->need_update_smu7_dpm_table = 0;
-
-	return 0;
-}
-
-static int polaris10_notify_link_speed_change_after_state_change(
-		struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_power_state *polaris10_ps =
-			cast_const_phw_polaris10_power_state(states->pnew_state);
-	uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
-	uint8_t  request;
-
-	if (data->pspp_notify_required) {
-		if (target_link_speed == PP_PCIEGen3)
-			request = PCIE_PERF_REQ_GEN3;
-		else if (target_link_speed == PP_PCIEGen2)
-			request = PCIE_PERF_REQ_GEN2;
-		else
-			request = PCIE_PERF_REQ_GEN1;
-
-		if (request == PCIE_PERF_REQ_GEN1 &&
-				phm_get_current_pcie_speed(hwmgr) > 0)
-			return 0;
-
-		if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
-			if (PP_PCIEGen2 == target_link_speed)
-				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-			else
-				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-		(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
-	return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
-}
-
-
-
-static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-	int tmp_result, result = 0;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to find DPM states clocks in DPM table!",
-			result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result =
-			polaris10_request_link_speed_change_before_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to request link speed change before state change!",
-				result = tmp_result);
-	}
-
-	tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-	tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to populate and upload SCLK MCLK DPM levels!",
-			result = tmp_result);
-
-	tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to generate DPM level enabled mask!",
-			result = tmp_result);
-
-	tmp_result = polaris10_update_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to update SCLK threshold!",
-			result = tmp_result);
-
-	tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to program memory timing parameters!",
-			result = tmp_result);
-
-	tmp_result = polaris10_notify_smc_display(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to notify smc display settings!",
-			result = tmp_result);
-
-	tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to unfreeze SCLK MCLK DPM!",
-			result = tmp_result);
-
-	tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to upload DPM level enabled mask!",
-			result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result =
-			polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to notify link speed change after state change!",
-				result = tmp_result);
-	}
-	data->apply_optimized_settings = false;
-	return result;
-}
-
-static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-	hwmgr->thermal_controller.
-	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
-	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
-	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
-}
-
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
-	uint32_t num_active_displays = 0;
-	struct cgs_display_info info = {0};
-	info.mode_info = NULL;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	num_active_displays = info.display_count;
-
-	if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
-		polaris10_notify_smc_display_change(hwmgr, false);
-
-
-	return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always OK
-*/
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t num_active_displays = 0;
-	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-	uint32_t display_gap2;
-	uint32_t pre_vbi_time_in_us;
-	uint32_t frame_time_in_us;
-	uint32_t ref_clock;
-	uint32_t refresh_rate = 0;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
-
-	info.mode_info = &mode_info;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_active_displays = info.display_count;
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-	ref_clock = mode_info.ref_clock;
-	refresh_rate = mode_info.refresh_rate;
-
-	if (0 == refresh_rate)
-		refresh_rate = 60;
-
-	frame_time_in_us = 1000000 / refresh_rate;
-
-	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-	data->frame_time_x2 = frame_time_in_us * 2 / 100;
-
-	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
-	return 0;
-}
-
-
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-	return polaris10_program_display_gap(hwmgr);
-}
-
-/**
-*  Set maximum target operating fan output RPM
-*
-* @param    hwmgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanRpm:  max operating fan RPM value.
-* @return   The response that came from the SMC.
-*/
-static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
-{
-	hwmgr->thermal_controller.
-	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-					const void *thermal_interrupt_info)
-{
-	return 0;
-}
-
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	bool is_update_required = false;
-	struct cgs_display_info info = {0, 0, NULL};
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
-	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
-		if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
-			(min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
-				data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
-			is_update_required = true;
-*/
-	return is_update_required;
-}
-
-static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
-							   const struct polaris10_performance_level *pl2)
-{
-	return ((pl1->memory_clock == pl2->memory_clock) &&
-		  (pl1->engine_clock == pl2->engine_clock) &&
-		  (pl1->pcie_gen == pl2->pcie_gen) &&
-		  (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-	const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
-	const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
-	int i;
-
-	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
-		return -EINVAL;
-
-	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
-	if (psa->performance_level_count != psb->performance_level_count) {
-		*equal = false;
-		return 0;
-	}
-
-	for (i = 0; i < psa->performance_level_count; i++) {
-		if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-			/* If we have found even one performance level pair that is different the states are different. */
-			*equal = false;
-			return 0;
-		}
-	}
-
-	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
-	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
-	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
-
-	return 0;
-}
-
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	uint32_t vbios_version;
-
-	/*  Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
-
-	phm_get_mc_microcode_version(hwmgr);
-	vbios_version = hwmgr->microcode_version_info.MC & 0xf;
-	/*  Full version of MC ucode has already been loaded. */
-	if (vbios_version == 0) {
-		data->need_long_memory_training = false;
-		return 0;
-	}
-
-	data->need_long_memory_training = false;
-
-/*
- *	PPMCME_FirmwareDescriptorEntry *pfd = NULL;
-	pfd = &tonga_mcmeFirmware;
-	if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
-		polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
-					pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
-					pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
-*/
-	return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
-						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
-						& CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
-
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
-						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
-						& CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
-
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
-						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
-						& CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
-
-	return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t temp;
-
-	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-			 MC_SEQ_MISC0_GDDR5_SHIFT));
-
-	return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-	return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	data->uvd_power_gated = false;
-	data->vce_power_gated = false;
-	data->samu_power_gated = false;
-
-	return 0;
-}
-
-static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	data->low_sclk_interrupt_threshold = 0;
-
-	return 0;
-}
-
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	polaris10_upload_mc_firmware(hwmgr);
-
-	tmp_result = polaris10_read_clock_registers(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to read clock registers!", result = tmp_result);
-
-	tmp_result = polaris10_get_memory_type(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to get memory type!", result = tmp_result);
-
-	tmp_result = polaris10_enable_acpi_power_management(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable ACPI power management!", result = tmp_result);
-
-	tmp_result = polaris10_init_power_gate_state(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to init power gate state!", result = tmp_result);
-
-	tmp_result = phm_get_mc_microcode_version(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to get MC microcode version!", result = tmp_result);
-
-	tmp_result = polaris10_init_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to init sclk threshold!", result = tmp_result);
-
-	return result;
-}
-
-static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		return -EINVAL;
-
-	switch (type) {
-	case PP_SCLK:
-		if (!data->sclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-		break;
-	case PP_MCLK:
-		if (!data->mclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-		break;
-	case PP_PCIE:
-	{
-		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-		uint32_t level = 0;
-
-		while (tmp >>= 1)
-			level++;
-
-		if (!data->pcie_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_PCIeDPM_ForceLevel,
-					level);
-		break;
-	}
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
-	uint32_t speedCntl = 0;
-
-	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
-	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
-			ixPCIE_LC_SPEED_CNTL);
-	return((uint16_t)PHM_GET_FIELD(speedCntl,
-			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, char *buf)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-	int i, now, size = 0;
-	uint32_t clock, pcie_speed;
-
-	switch (type) {
-	case PP_SCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < sclk_table->count; i++) {
-			if (clock > sclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < sclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, sclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_MCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < mclk_table->count; i++) {
-			if (clock > mclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < mclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, mclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_PCIE:
-		pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
-		for (i = 0; i < pcie_table->count; i++) {
-			if (pcie_speed != pcie_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < pcie_table->count; i++)
-			size += sprintf(buf + size, "%d: %s %s\n", i,
-					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
-					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-					(i == now) ? "*" : "");
-		break;
-	default:
-		break;
-	}
-	return size;
-}
-
-static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-	if (mode) {
-		/* stop auto-manage */
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl))
-			polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-		polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
-	} else
-		/* restart auto-manage */
-		polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-	return 0;
-}
-
-static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr->fan_ctrl_is_in_default_mode)
-		return hwmgr->fan_ctrl_default_mode;
-	else
-		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct polaris10_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	int value;
-
-	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-			100 /
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return value;
-}
-
-static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	struct pp_power_state  *ps;
-	struct polaris10_power_state  *polaris10_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-	polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-			value / 100 +
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return 0;
-}
-
-static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct polaris10_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	int value;
-
-	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-			100 /
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return value;
-}
-
-static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct polaris10_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	struct pp_power_state  *ps;
-	struct polaris10_power_state  *polaris10_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
-	polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-			value / 100 +
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return 0;
-}
-static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
-	.backend_init = &polaris10_hwmgr_backend_init,
-	.backend_fini = &polaris10_hwmgr_backend_fini,
-	.asic_setup = &polaris10_setup_asic_task,
-	.dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
-	.apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
-	.force_dpm_level = &polaris10_force_dpm_level,
-	.power_state_set = polaris10_set_power_state_tasks,
-	.get_power_state_size = polaris10_get_power_state_size,
-	.get_mclk = polaris10_dpm_get_mclk,
-	.get_sclk = polaris10_dpm_get_sclk,
-	.patch_boot_state = polaris10_dpm_patch_boot_state,
-	.get_pp_table_entry = polaris10_get_pp_table_entry,
-	.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
-	.print_current_perforce_level = polaris10_print_current_perforce_level,
-	.powerdown_uvd = polaris10_phm_powerdown_uvd,
-	.powergate_uvd = polaris10_phm_powergate_uvd,
-	.powergate_vce = polaris10_phm_powergate_vce,
-	.disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
-	.update_clock_gatings = polaris10_phm_update_clock_gatings,
-	.notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
-	.display_config_changed = polaris10_display_configuration_changed_task,
-	.set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
-	.set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
-	.get_temperature = polaris10_thermal_get_temperature,
-	.stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
-	.get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
-	.get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
-	.set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
-	.reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
-	.get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
-	.set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
-	.uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
-	.register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
-	.check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
-	.check_states_equal = polaris10_check_states_equal,
-	.set_fan_control_mode = polaris10_set_fan_control_mode,
-	.get_fan_control_mode = polaris10_get_fan_control_mode,
-	.force_clock_level = polaris10_force_clock_level,
-	.print_clock_levels = polaris10_print_clock_levels,
-	.enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
-	.get_sclk_od = polaris10_get_sclk_od,
-	.set_sclk_od = polaris10_set_sclk_od,
-	.get_mclk_od = polaris10_get_mclk_od,
-	.set_mclk_od = polaris10_set_mclk_od,
-};
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-	hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
-	pp_polaris10_thermal_initialize(hwmgr);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
deleted file mode 100644
index 33c3394..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_HWMGR_H
-#define POLARIS10_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu74.h"
-#include "smu74_discrete.h"
-#include "ppatomctrl.h"
-#include "polaris10_ppsmc.h"
-#include "polaris10_powertune.h"
-
-#define POLARIS10_MAX_HARDWARE_POWERLEVELS	2
-
-#define POLARIS10_VOLTAGE_CONTROL_NONE                   0x0
-#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define POLARIS10_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-struct polaris10_performance_level {
-	uint32_t  memory_clock;
-	uint32_t  engine_clock;
-	uint16_t  pcie_gen;
-	uint16_t  pcie_lane;
-};
-
-struct polaris10_uvd_clocks {
-	uint32_t  vclk;
-	uint32_t  dclk;
-};
-
-struct polaris10_vce_clocks {
-	uint32_t  evclk;
-	uint32_t  ecclk;
-};
-
-struct polaris10_power_state {
-	uint32_t                  magic;
-	struct polaris10_uvd_clocks    uvd_clks;
-	struct polaris10_vce_clocks    vce_clks;
-	uint32_t                  sam_clk;
-	uint16_t                  performance_level_count;
-	bool                      dc_compatible;
-	uint32_t                  sclk_threshold;
-	struct polaris10_performance_level  performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct polaris10_dpm_level {
-	bool	enabled;
-	uint32_t	value;
-	uint32_t	param1;
-};
-
-#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
-
-struct polaris10_single_dpm_table {
-	uint32_t		count;
-	struct polaris10_dpm_level	dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct polaris10_dpm_table {
-	struct polaris10_single_dpm_table  sclk_table;
-	struct polaris10_single_dpm_table  mclk_table;
-	struct polaris10_single_dpm_table  pcie_speed_table;
-	struct polaris10_single_dpm_table  vddc_table;
-	struct polaris10_single_dpm_table  vddci_table;
-	struct polaris10_single_dpm_table  mvdd_table;
-};
-
-struct polaris10_clock_registers {
-	uint32_t  vCG_SPLL_FUNC_CNTL;
-	uint32_t  vCG_SPLL_FUNC_CNTL_2;
-	uint32_t  vCG_SPLL_FUNC_CNTL_3;
-	uint32_t  vCG_SPLL_FUNC_CNTL_4;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-	uint32_t  vDLL_CNTL;
-	uint32_t  vMCLK_PWRMGT_CNTL;
-	uint32_t  vMPLL_AD_FUNC_CNTL;
-	uint32_t  vMPLL_DQ_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL_1;
-	uint32_t  vMPLL_FUNC_CNTL_2;
-	uint32_t  vMPLL_SS1;
-	uint32_t  vMPLL_SS2;
-};
-
-#define DISABLE_MC_LOADMICROCODE   1
-#define DISABLE_MC_CFGPROGRAMMING  2
-
-struct polaris10_voltage_smio_registers {
-	uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define POLARIS10_MAX_LEAKAGE_COUNT  8
-
-struct polaris10_leakage_voltage {
-	uint16_t  count;
-	uint16_t  leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
-	uint16_t  actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
-};
-
-struct polaris10_vbios_boot_state {
-	uint16_t    mvdd_bootup_value;
-	uint16_t    vddc_bootup_value;
-	uint16_t    vddci_bootup_value;
-	uint32_t    sclk_bootup_value;
-	uint32_t    mclk_bootup_value;
-	uint16_t    pcie_gen_bootup_value;
-	uint16_t    pcie_lane_bootup_value;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct polaris10_ulv_parm {
-	bool                           ulv_supported;
-	uint32_t                       cg_ulv_parameter;
-	uint32_t                       ulv_volt_change_delay;
-	struct polaris10_performance_level  ulv_power_level;
-};
-
-struct polaris10_display_timing {
-	uint32_t  min_clock_in_sr;
-	uint32_t  num_existing_displays;
-};
-
-struct polaris10_dpmlevel_enable_mask {
-	uint32_t  uvd_dpm_enable_mask;
-	uint32_t  vce_dpm_enable_mask;
-	uint32_t  acp_dpm_enable_mask;
-	uint32_t  samu_dpm_enable_mask;
-	uint32_t  sclk_dpm_enable_mask;
-	uint32_t  mclk_dpm_enable_mask;
-	uint32_t  pcie_dpm_enable_mask;
-};
-
-struct polaris10_pcie_perf_range {
-	uint16_t  max;
-	uint16_t  min;
-};
-struct polaris10_range_table {
-	uint32_t trans_lower_frequency; /* in 10khz */
-	uint32_t trans_upper_frequency;
-};
-
-struct polaris10_hwmgr {
-	struct polaris10_dpm_table			dpm_table;
-	struct polaris10_dpm_table			golden_dpm_table;
-	SMU74_Discrete_DpmTable				smc_state_table;
-	struct SMU74_Discrete_Ulv            ulv_setting;
-
-	struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
-	uint32_t						voting_rights_clients0;
-	uint32_t						voting_rights_clients1;
-	uint32_t						voting_rights_clients2;
-	uint32_t						voting_rights_clients3;
-	uint32_t						voting_rights_clients4;
-	uint32_t						voting_rights_clients5;
-	uint32_t						voting_rights_clients6;
-	uint32_t						voting_rights_clients7;
-	uint32_t						static_screen_threshold_unit;
-	uint32_t						static_screen_threshold;
-	uint32_t						voltage_control;
-	uint32_t						vddc_vddci_delta;
-
-	uint32_t						active_auto_throttle_sources;
-
-	struct polaris10_clock_registers            clock_registers;
-	struct polaris10_voltage_smio_registers      voltage_smio_registers;
-
-	bool                           is_memory_gddr5;
-	uint16_t                       acpi_vddc;
-	bool                           pspp_notify_required;
-	uint16_t                       force_pcie_gen;
-	uint16_t                       acpi_pcie_gen;
-	uint32_t                       pcie_gen_cap;
-	uint32_t                       pcie_lane_cap;
-	uint32_t                       pcie_spc_cap;
-	struct polaris10_leakage_voltage          vddc_leakage;
-	struct polaris10_leakage_voltage          Vddci_leakage;
-
-	uint32_t                             mvdd_control;
-	uint32_t                             vddc_mask_low;
-	uint32_t                             mvdd_mask_low;
-	uint16_t                            max_vddc_in_pptable;
-	uint16_t                            min_vddc_in_pptable;
-	uint16_t                            max_vddci_in_pptable;
-	uint16_t                            min_vddci_in_pptable;
-	uint32_t                             mclk_strobe_mode_threshold;
-	uint32_t                             mclk_stutter_mode_threshold;
-	uint32_t                             mclk_edc_enable_threshold;
-	uint32_t                             mclk_edcwr_enable_threshold;
-	bool                                is_uvd_enabled;
-	struct polaris10_vbios_boot_state        vbios_boot_state;
-
-	bool                           pcie_performance_request;
-	bool                           battery_state;
-	bool                           is_tlu_enabled;
-
-	/* ---- SMC SRAM Address of firmware header tables ---- */
-	uint32_t                             sram_end;
-	uint32_t                             dpm_table_start;
-	uint32_t                             soft_regs_start;
-	uint32_t                             mc_reg_table_start;
-	uint32_t                             fan_table_start;
-	uint32_t                             arb_table_start;
-
-	/* ---- Stuff originally coming from Evergreen ---- */
-	uint32_t                             vddci_control;
-	struct pp_atomctrl_voltage_table     vddc_voltage_table;
-	struct pp_atomctrl_voltage_table     vddci_voltage_table;
-	struct pp_atomctrl_voltage_table     mvdd_voltage_table;
-
-	uint32_t                             mgcg_cgtt_local2;
-	uint32_t                             mgcg_cgtt_local3;
-	uint32_t                             gpio_debug;
-	uint32_t                             mc_micro_code_feature;
-	uint32_t                             highest_mclk;
-	uint16_t                             acpi_vddci;
-	uint8_t                              mvdd_high_index;
-	uint8_t                              mvdd_low_index;
-	bool                                 dll_default_on;
-	bool                                 performance_request_registered;
-
-	/* ---- Low Power Features ---- */
-	struct polaris10_ulv_parm                 ulv;
-
-	/* ---- CAC Stuff ---- */
-	uint32_t                       cac_table_start;
-	bool                           cac_configuration_required;
-	bool                           driver_calculate_cac_leakage;
-	bool                           cac_enabled;
-
-	/* ---- DPM2 Parameters ---- */
-	uint32_t                       power_containment_features;
-	bool                           enable_dte_feature;
-	bool                           enable_tdc_limit_feature;
-	bool                           enable_pkg_pwr_tracking_feature;
-	bool                           disable_uvd_power_tune_feature;
-	const struct polaris10_pt_defaults       *power_tune_defaults;
-	struct SMU74_Discrete_PmFuses  power_tune_table;
-	uint32_t                       dte_tj_offset;
-	uint32_t                       fast_watermark_threshold;
-
-	/* ---- Phase Shedding ---- */
-	bool                           vddc_phase_shed_control;
-
-	/* ---- DI/DT ---- */
-	struct polaris10_display_timing        display_timing;
-	uint32_t                      bif_sclk_table[SMU74_MAX_LEVELS_LINK];
-
-	/* ---- Thermal Temperature Setting ---- */
-	struct polaris10_dpmlevel_enable_mask     dpm_level_enable_mask;
-	uint32_t                                  need_update_smu7_dpm_table;
-	uint32_t                                  sclk_dpm_key_disabled;
-	uint32_t                                  mclk_dpm_key_disabled;
-	uint32_t                                  pcie_dpm_key_disabled;
-	uint32_t                                  min_engine_clocks;
-	struct polaris10_pcie_perf_range          pcie_gen_performance;
-	struct polaris10_pcie_perf_range          pcie_lane_performance;
-	struct polaris10_pcie_perf_range          pcie_gen_power_saving;
-	struct polaris10_pcie_perf_range          pcie_lane_power_saving;
-	bool                                      use_pcie_performance_levels;
-	bool                                      use_pcie_power_saving_levels;
-	uint32_t                                  activity_target[SMU74_MAX_LEVELS_GRAPHICS];
-	uint32_t                                  mclk_activity_target;
-	uint32_t                                  mclk_dpm0_activity_target;
-	uint32_t                                  low_sclk_interrupt_threshold;
-	uint32_t                                  last_mclk_dpm_enable_mask;
-	bool                                      uvd_enabled;
-
-	/* ---- Power Gating States ---- */
-	bool                           uvd_power_gated;
-	bool                           vce_power_gated;
-	bool                           samu_power_gated;
-	bool                           need_long_memory_training;
-
-	/* Application power optimization parameters */
-	bool                               update_up_hyst;
-	bool                               update_down_hyst;
-	uint32_t                           down_hyst;
-	uint32_t                           up_hyst;
-	uint32_t disable_dpm_mask;
-	bool apply_optimized_settings;
-	uint32_t                              avfs_vdroop_override_setting;
-	bool                                  apply_avfs_cks_off_voltage;
-	uint32_t                              frame_time_x2;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT             256
-
-enum Polaris10_I2CLineID {
-	Polaris10_I2CLineID_DDC1 = 0x90,
-	Polaris10_I2CLineID_DDC2 = 0x91,
-	Polaris10_I2CLineID_DDC3 = 0x92,
-	Polaris10_I2CLineID_DDC4 = 0x93,
-	Polaris10_I2CLineID_DDC5 = 0x94,
-	Polaris10_I2CLineID_DDC6 = 0x95,
-	Polaris10_I2CLineID_SCLSDA = 0x96,
-	Polaris10_I2CLineID_DDCVGA = 0x97
-};
-
-#define POLARIS10_I2C_DDC1DATA          0
-#define POLARIS10_I2C_DDC1CLK           1
-#define POLARIS10_I2C_DDC2DATA          2
-#define POLARIS10_I2C_DDC2CLK           3
-#define POLARIS10_I2C_DDC3DATA          4
-#define POLARIS10_I2C_DDC3CLK           5
-#define POLARIS10_I2C_SDA               40
-#define POLARIS10_I2C_SCL               41
-#define POLARIS10_I2C_DDC4DATA          65
-#define POLARIS10_I2C_DDC4CLK           66
-#define POLARIS10_I2C_DDC5DATA          0x48
-#define POLARIS10_I2C_DDC5CLK           0x49
-#define POLARIS10_I2C_DDC6DATA          0x4a
-#define POLARIS10_I2C_DDC6CLK           0x4b
-#define POLARIS10_I2C_DDCVGADATA        0x4c
-#define POLARIS10_I2C_DDCVGACLK         0x4d
-
-#define POLARIS10_UNUSED_GPIO_PIN       0x7F
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
deleted file mode 100644
index bc78e28..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef POLARIS10_POWERTUNE_H
-#define POLARIS10_POWERTUNE_H
-
-enum polaris10_pt_config_reg_type {
-	POLARIS10_CONFIGREG_MMR = 0,
-	POLARIS10_CONFIGREG_SMC_IND,
-	POLARIS10_CONFIGREG_DIDT_IND,
-	POLARIS10_CONFIGREG_GC_CAC_IND,
-	POLARIS10_CONFIGREG_CACHE,
-	POLARIS10_CONFIGREG_MAX
-};
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
-#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
-
-#define ixGC_CAC_CNTL 0x0000
-#define ixDIDT_SQ_STALL_CTRL 0x0004
-#define  ixDIDT_SQ_TUNING_CTRL 0x0005
-#define ixDIDT_TD_STALL_CTRL 0x0044
-#define ixDIDT_TD_TUNING_CTRL 0x0045
-#define ixDIDT_TCP_STALL_CTRL 0x0064
-#define ixDIDT_TCP_TUNING_CTRL 0x0065
-
-struct polaris10_pt_config_reg {
-	uint32_t                           offset;
-	uint32_t                           mask;
-	uint32_t                           shift;
-	uint32_t                           value;
-	enum polaris10_pt_config_reg_type       type;
-};
-
-struct polaris10_pt_defaults {
-	uint8_t   SviLoadLineEn;
-	uint8_t   SviLoadLineVddC;
-	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-	uint8_t   TDC_MAWt;
-	uint8_t   TdcWaterfallCtl;
-	uint8_t   DTEAmbientTempBase;
-
-	uint32_t  DisplayCac;
-	uint32_t  BAPM_TEMP_GRADIENT;
-	uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
-
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
-#endif  /* POLARIS10_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644
index b206632..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <asm/div64.h>
-#include "polaris10_thermal.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_smumgr.h"
-#include "polaris10_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
-		struct phm_fan_speed_info *fan_speed_info)
-{
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	fan_speed_info->supports_percent_read = true;
-	fan_speed_info->supports_percent_write = true;
-	fan_speed_info->min_percent = 0;
-	fan_speed_info->max_percent = 100;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
-		hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-		fan_speed_info->supports_rpm_read = true;
-		fan_speed_info->supports_rpm_write = true;
-		fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
-		fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
-	} else {
-		fan_speed_info->min_rpm = 0;
-		fan_speed_info->max_rpm = 0;
-	}
-
-	return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
-		uint32_t *speed)
-{
-	uint32_t duty100;
-	uint32_t duty;
-	uint64_t tmp64;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL1, FMAX_DUTY100);
-	duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
-	if (duty100 == 0)
-		return -EINVAL;
-
-
-	tmp64 = (uint64_t)duty * 100;
-	do_div(tmp64, duty100);
-	*speed = (uint32_t)tmp64;
-
-	if (*speed > 100)
-		*speed = 100;
-
-	return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-	uint32_t tach_period;
-	uint32_t crystal_clock_freq;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-			(hwmgr->thermal_controller.fanInfo.
-				ucTachometerPulsesPerRevolution == 0))
-		return 0;
-
-	tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_TACH_STATUS, TACH_PERIOD);
-
-	if (tach_period == 0)
-		return -EINVAL;
-
-	crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-	*speed = 60 * crystal_clock_freq * 10000 / tach_period;
-
-	return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param    hwmgr  the address of the powerplay hardware manager.
-*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
-	if (hwmgr->fan_ctrl_is_in_default_mode) {
-		hwmgr->fan_ctrl_default_mode =
-				PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,	CGS_IND_REG__SMC,
-						CG_FDO_CTRL2, FDO_PWM_MODE);
-		hwmgr->tmin =
-				PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-						CG_FDO_CTRL2, TMIN);
-		hwmgr->fan_ctrl_is_in_default_mode = false;
-	}
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL2, TMIN, 0);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
-	return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
-	if (!hwmgr->fan_ctrl_is_in_default_mode) {
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_FDO_CTRL2, TMIN, hwmgr->tmin);
-		hwmgr->fan_ctrl_is_in_default_mode = true;
-	}
-
-	return 0;
-}
-
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-		result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_FanSpeedInTableIsRPM))
-			hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
-					hwmgr->thermal_controller.
-					advanceFanControlParameters.usMaxFanRPM);
-		else
-			hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
-					hwmgr->thermal_controller.
-					advanceFanControlParameters.usMaxFanPWM);
-
-	} else {
-		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-		result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-	}
-
-	if (!result && hwmgr->thermal_controller.
-			advanceFanControlParameters.ucTargetTemperature)
-		result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_SetFanTemperatureTarget,
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ucTargetTemperature);
-
-	return result;
-}
-
-
-int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
-}
-
-/**
-* Set Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
-		uint32_t speed)
-{
-	uint32_t duty100;
-	uint32_t duty;
-	uint64_t tmp64;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	if (speed > 100)
-		speed = 100;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_MicrocodeFanControl))
-		polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL1, FMAX_DUTY100);
-
-	if (duty100 == 0)
-		return -EINVAL;
-
-	tmp64 = (uint64_t)speed * duty100;
-	do_div(tmp64, 100);
-	duty = (uint32_t)tmp64;
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
-	return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_MicrocodeFanControl)) {
-		result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-		if (!result)
-			result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
-	} else
-		result = polaris10_fan_ctrl_set_default_mode(hwmgr);
-
-	return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-	uint32_t tach_period;
-	uint32_t crystal_clock_freq;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
-			(hwmgr->thermal_controller.fanInfo.
-			ucTachometerPulsesPerRevolution == 0) ||
-			(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
-			(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
-		return 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_MicrocodeFanControl))
-		polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-	crystal_clock_freq = tonga_get_xclk(hwmgr);
-
-	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_TACH_STATUS, TACH_PERIOD, tach_period);
-
-	return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-	int temp;
-
-	temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-	/* Bit 9 means the reading is lower than the lowest usable value. */
-	if (temp & 0x200)
-		temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
-	else
-		temp = temp & 0x1ff;
-
-	temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param    hwmgr The address of the hardware manager.
-* @param    range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-		uint32_t low_temp, uint32_t high_temp)
-{
-	uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
-			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
-			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	if (low < low_temp)
-		low = low_temp;
-	if (high > high_temp)
-		high = high_temp;
-
-	if (low > high)
-		return -EINVAL;
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, DIG_THERM_INTH,
-			(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, DIG_THERM_INTL,
-			(low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_CTRL, DIG_THERM_DPM,
-			(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
-	return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_TACH_CTRL, EDGE_PER_REV,
-				hwmgr->thermal_controller.fanInfo.
-				ucTachometerPulsesPerRevolution - 1);
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
-	return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
-	uint32_t alert;
-
-	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, THERM_INT_MASK);
-	alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-	/* send message to SMU to enable internal thermal interrupts */
-	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param    hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
-	uint32_t alert;
-
-	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, THERM_INT_MASK);
-	alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-	/* send message to SMU to disable internal thermal interrupts */
-	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param    hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-	int result = polaris10_thermal_disable_alert(hwmgr);
-
-	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-		polaris10_fan_ctrl_set_default_mode(hwmgr);
-
-	return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-	uint32_t duty100;
-	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-	uint16_t fdo_min, slope1, slope2;
-	uint32_t reference_clock;
-	int res;
-	uint64_t tmp64;
-
-	if (data->fan_table_start == 0) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL1, FMAX_DUTY100);
-
-	if (duty100 == 0) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-			usPWMMin * duty100;
-	do_div(tmp64, 10000);
-	fdo_min = (uint16_t)tmp64;
-
-	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMin) / 100);
-	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMed) / 100);
-	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-	fan_table.Slope1 = cpu_to_be16(slope1);
-	fan_table.Slope2 = cpu_to_be16(slope2);
-
-	fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-	fan_table.HystDown = cpu_to_be16(hwmgr->
-			thermal_controller.advanceFanControlParameters.ucTHyst);
-
-	fan_table.HystUp = cpu_to_be16(1);
-
-	fan_table.HystSlope = cpu_to_be16(1);
-
-	fan_table.TempRespLim = cpu_to_be16(5);
-
-	reference_clock = tonga_get_xclk(hwmgr);
-
-	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-			thermal_controller.advanceFanControlParameters.ulCycleDelay *
-			reference_clock) / 1600);
-
-	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-			hwmgr->device, CGS_IND_REG__SMC,
-			CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-	res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
-			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-			data->sram_end);
-
-	if (!res && hwmgr->thermal_controller.
-			advanceFanControlParameters.ucMinimumPWMLimit)
-		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_SetFanMinPwm,
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ucMinimumPWMLimit);
-
-	if (!res && hwmgr->thermal_controller.
-			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_SetFanSclkTarget,
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-	if (res)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-
-	return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled
- * PHM_PlatformCaps_MicrocodeFanControl even after
- * this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_MicrocodeFanControl)) {
-		polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
-		polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-	}
-
-	return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
-	if (range == NULL)
-		return -EINVAL;
-
-	return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from initialize thermal controller routine
-*/
-int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-    return polaris10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from enable alert routine
-*/
-int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	return polaris10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from disable alert routine
-*/
-static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	return polaris10_thermal_disable_alert(hwmgr);
-}
-
-static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	int ret;
-	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-		return 0;
-
-	ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-			PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
-	ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
-			0 : -1;
-
-	if (!ret)
-		/* If this param is not changed, this function could fire unnecessarily */
-		smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
-	return ret;
-}
-
-static const struct phm_master_table_item
-polaris10_thermal_start_thermal_controller_master_list[] = {
-	{NULL, tf_polaris10_thermal_initialize},
-	{NULL, tf_polaris10_thermal_set_temperature_range},
-	{NULL, tf_polaris10_thermal_enable_alert},
-	{NULL, tf_polaris10_thermal_avfs_enable},
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this
- * so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
-	{NULL, tf_polaris10_thermal_setup_fan_table},
-	{NULL, tf_polaris10_thermal_start_smc_fan_control},
-	{NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_start_thermal_controller_master = {
-	0,
-	PHM_MasterTableFlag_None,
-	polaris10_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-polaris10_thermal_set_temperature_range_master_list[] = {
-	{NULL, tf_polaris10_thermal_disable_alert},
-	{NULL, tf_polaris10_thermal_set_temperature_range},
-	{NULL, tf_polaris10_thermal_enable_alert},
-	{NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_set_temperature_range_master = {
-	0,
-	PHM_MasterTableFlag_None,
-	polaris10_thermal_set_temperature_range_master_list
-};
-
-int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-		polaris10_fan_ctrl_set_default_mode(hwmgr);
-	return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param    hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	result = phm_construct_table(hwmgr,
-			&polaris10_thermal_set_temperature_range_master,
-			&(hwmgr->set_temperature_range));
-
-	if (!result) {
-		result = phm_construct_table(hwmgr,
-				&polaris10_thermal_start_thermal_controller_master,
-				&(hwmgr->start_thermal_controller));
-		if (result)
-			phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
-	}
-
-	if (!result)
-		hwmgr->fan_ctrl_is_in_default_mode = true;
-	return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644
index 62f8cbc..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _POLARIS10_THERMAL_H_
-#define _POLARIS10_THERMAL_H_
-
-#include "hwmgr.h"
-
-#define POLARIS10_THERMAL_HIGH_ALERT_MASK         0x1
-#define POLARIS10_THERMAL_LOW_ALERT_MASK          0x2
-
-#define POLARIS10_THERMAL_MINIMUM_TEMP_READING    -256
-#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP      0
-#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 26f3e30..1126bd4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -22,7 +22,6 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
 
 #include "ppatomctrl.h"
 #include "atombios.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
similarity index 99%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
rename to drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
index f127198..1e870f5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
@@ -164,7 +164,7 @@
 typedef struct _ATOM_Tonga_State_Array {
 	UCHAR ucRevId;
 	UCHAR ucNumEntries;		/* Number of entries. */
-	ATOM_Tonga_State states[1];	/* Dynamically allocate entries. */
+	ATOM_Tonga_State entries[1];	/* Dynamically allocate entries. */
 } ATOM_Tonga_State_Array;
 
 typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
similarity index 81%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
rename to drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index cfb647f..7de701d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -22,15 +22,14 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
 
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
 #include "pp_debug.h"
 #include "hwmgr.h"
 #include "cgs_common.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
 
 /**
  * Private Function used during initialization.
@@ -154,12 +153,14 @@
 static int get_vddc_lookup_table(
 		struct pp_hwmgr	*hwmgr,
 		phm_ppt_v1_voltage_lookup_table	**lookup_table,
-		const ATOM_Tonga_Voltage_Lookup_Table	*vddc_lookup_pp_tables,
-		uint32_t	max_levels
+		const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+		uint32_t max_levels
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_voltage_lookup_table *table;
+	phm_ppt_v1_voltage_lookup_record *record;
+	ATOM_Tonga_Voltage_Lookup_Record *atom_record;
 
 	PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
 		"Invalid CAC Leakage PowerPlay Table!", return 1);
@@ -177,15 +178,17 @@
 	table->count = vddc_lookup_pp_tables->ucNumEntries;
 
 	for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
-		table->entries[i].us_calculated = 0;
-		table->entries[i].us_vdd =
-			vddc_lookup_pp_tables->entries[i].usVdd;
-		table->entries[i].us_cac_low =
-			vddc_lookup_pp_tables->entries[i].usCACLow;
-		table->entries[i].us_cac_mid =
-			vddc_lookup_pp_tables->entries[i].usCACMid;
-		table->entries[i].us_cac_high =
-			vddc_lookup_pp_tables->entries[i].usCACHigh;
+		record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_voltage_lookup_record,
+					entries, table, i);
+		atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_Voltage_Lookup_Record,
+					entries, vddc_lookup_pp_tables, i);
+		record->us_calculated = 0;
+		record->us_vdd = atom_record->usVdd;
+		record->us_cac_low = atom_record->usCACLow;
+		record->us_cac_mid = atom_record->usCACMid;
+		record->us_cac_high = atom_record->usCACHigh;
 	}
 
 	*lookup_table = table;
@@ -314,11 +317,12 @@
 static int get_valid_clk(
 		struct pp_hwmgr *hwmgr,
 		struct phm_clock_array **clk_table,
-		const phm_ppt_v1_clock_voltage_dependency_table  * clk_volt_pp_table
+		phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
 		)
 {
 	uint32_t table_size, i;
 	struct phm_clock_array *table;
+	phm_ppt_v1_clock_voltage_dependency_record *dep_record;
 
 	PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
 		"Invalid PowerPlay Table!", return -1);
@@ -335,9 +339,12 @@
 
 	table->count = (uint32_t)clk_volt_pp_table->count;
 
-	for (i = 0; i < table->count; i++)
-		table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
+	for (i = 0; i < table->count; i++) {
+		dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+				phm_ppt_v1_clock_voltage_dependency_record,
+				entries, clk_volt_pp_table, i);
+		table->values[i] = (uint32_t)dep_record->clk;
+	}
 	*clk_table = table;
 
 	return 0;
@@ -346,7 +353,7 @@
 static int get_hard_limits(
 		struct pp_hwmgr *hwmgr,
 		struct phm_clock_and_voltage_limits *limits,
-		const ATOM_Tonga_Hard_Limit_Table * limitable
+		ATOM_Tonga_Hard_Limit_Table const *limitable
 		)
 {
 	PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
@@ -364,11 +371,13 @@
 static int get_mclk_voltage_dependency_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
-		const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
+		ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+	phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+	ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
 
 	PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
 		"Invalid PowerPlay Table!", return -1);
@@ -386,16 +395,17 @@
 	mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
 
 	for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
-		mclk_table->entries[i].vddInd =
-			mclk_dep_table->entries[i].ucVddcInd;
-		mclk_table->entries[i].vdd_offset =
-			mclk_dep_table->entries[i].usVddgfxOffset;
-		mclk_table->entries[i].vddci =
-			mclk_dep_table->entries[i].usVddci;
-		mclk_table->entries[i].mvdd =
-			mclk_dep_table->entries[i].usMvdd;
-		mclk_table->entries[i].clk =
-			mclk_dep_table->entries[i].ulMclk;
+		mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_clock_voltage_dependency_record,
+						entries, mclk_table, i);
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+						entries, mclk_dep_table, i);
+		mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+		mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+		mclk_table_record->vddci = mclk_dep_record->usVddci;
+		mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+		mclk_table_record->clk = mclk_dep_record->ulMclk;
 	}
 
 	*pp_tonga_mclk_dep_table = mclk_table;
@@ -406,15 +416,17 @@
 static int get_sclk_voltage_dependency_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-		const PPTable_Generic_SubTable_Header *sclk_dep_table
+		PPTable_Generic_SubTable_Header const  *sclk_dep_table
 		)
 {
 	uint32_t table_size, i;
 	phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+	phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
 
 	if (sclk_dep_table->ucRevId < 1) {
 		const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
 			    (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+		ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
 
 		PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
 			"Invalid PowerPlay Table!", return -1);
@@ -432,20 +444,23 @@
 		sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
 
 		for (i = 0; i < tonga_table->ucNumEntries; i++) {
-			sclk_table->entries[i].vddInd =
-				tonga_table->entries[i].ucVddInd;
-			sclk_table->entries[i].vdd_offset =
-				tonga_table->entries[i].usVddcOffset;
-			sclk_table->entries[i].clk =
-				tonga_table->entries[i].ulSclk;
-			sclk_table->entries[i].cks_enable =
-				(((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-			sclk_table->entries[i].cks_voffset =
-				(tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+			sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_SCLK_Dependency_Record,
+						entries, tonga_table, i);
+			sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_clock_voltage_dependency_record,
+						entries, sclk_table, i);
+			sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+			sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+			sclk_table_record->clk = sclk_dep_record->ulSclk;
+			sclk_table_record->cks_enable =
+				(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+			sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
 		}
 	} else {
 		const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
 			    (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+		ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
 
 		PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
 			"Invalid PowerPlay Table!", return -1);
@@ -463,17 +478,19 @@
 		sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
 
 		for (i = 0; i < polaris_table->ucNumEntries; i++) {
-			sclk_table->entries[i].vddInd =
-				polaris_table->entries[i].ucVddInd;
-			sclk_table->entries[i].vdd_offset =
-				polaris_table->entries[i].usVddcOffset;
-			sclk_table->entries[i].clk =
-				polaris_table->entries[i].ulSclk;
-			sclk_table->entries[i].cks_enable =
-				(((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-			sclk_table->entries[i].cks_voffset =
-				(polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-			sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+			sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Polaris_SCLK_Dependency_Record,
+						entries, polaris_table, i);
+			sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_clock_voltage_dependency_record,
+						entries, sclk_table, i);
+			sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+			sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+			sclk_table_record->clk = sclk_dep_record->ulSclk;
+			sclk_table_record->cks_enable =
+				(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+			sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+			sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
 		}
 	}
 	*pp_tonga_sclk_dep_table = sclk_table;
@@ -484,16 +501,19 @@
 static int get_pcie_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-		const PPTable_Generic_SubTable_Header * pTable
+		PPTable_Generic_SubTable_Header const *ptable
 		)
 {
 	uint32_t table_size, i, pcie_count;
 	phm_ppt_v1_pcie_table *pcie_table;
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_pcie_record *pcie_record;
 
-	if (pTable->ucRevId < 1) {
-		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
+	if (ptable->ucRevId < 1) {
+		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+		ATOM_Tonga_PCIE_Record *atom_pcie_record;
+
 		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
 			"Invalid PowerPlay Table!", return -1);
 
@@ -519,18 +539,23 @@
 			Disregarding the excess entries... \n");
 
 		pcie_table->count = pcie_count;
-
 		for (i = 0; i < pcie_count; i++) {
-			pcie_table->entries[i].gen_speed =
-				atom_pcie_table->entries[i].ucPCIEGenSpeed;
-			pcie_table->entries[i].lane_width =
-				atom_pcie_table->entries[i].usPCIELaneWidth;
+			pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_pcie_record,
+						entries, pcie_table, i);
+			atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_PCIE_Record,
+						entries, atom_pcie_table, i);
+			pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+			pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
 		}
 
 		*pp_tonga_pcie_table = pcie_table;
 	} else {
 		/* Polaris10/Polaris11 and newer. */
-		const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+		const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+		ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
 		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
 			"Invalid PowerPlay Table!", return -1);
 
@@ -558,12 +583,15 @@
 		pcie_table->count = pcie_count;
 
 		for (i = 0; i < pcie_count; i++) {
-			pcie_table->entries[i].gen_speed =
-				atom_pcie_table->entries[i].ucPCIEGenSpeed;
-			pcie_table->entries[i].lane_width =
-				atom_pcie_table->entries[i].usPCIELaneWidth;
-			pcie_table->entries[i].pcie_sclk =
-				atom_pcie_table->entries[i].ulPCIE_Sclk;
+			pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						phm_ppt_v1_pcie_record,
+						entries, pcie_table, i);
+			atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Polaris10_PCIE_Record,
+						entries, atom_pcie_table, i);
+			pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+			pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+			pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
 		}
 
 		*pp_tonga_pcie_table = pcie_table;
@@ -685,6 +713,7 @@
 	uint32_t table_size, i;
 	const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
 	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+	phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
 
 	PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
 		"Invalid PowerPlay Table!", return -1);
@@ -701,14 +730,19 @@
 	mm_table->count = mm_dependency_table->ucNumEntries;
 
 	for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
-		mm_dependency_record = &mm_dependency_table->entries[i];
-		mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
-		mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
-		mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
-		mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
-		mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
-		mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
-		mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
+		mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_MM_Dependency_Record,
+						entries, mm_dependency_table, i);
+		mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					phm_ppt_v1_mm_clock_voltage_dependency_record,
+					entries, mm_table, i);
+		mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+		mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+		mm_table_record->aclk = mm_dependency_record->ulAClk;
+		mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+		mm_table_record->eclk = mm_dependency_record->ulEClk;
+		mm_table_record->vclk = mm_dependency_record->ulVClk;
+		mm_table_record->dclk = mm_dependency_record->ulDClk;
 	}
 
 	*tonga_mm_table = mm_table;
@@ -1015,7 +1049,7 @@
 	return 0;
 }
 
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 {
 	int result = 0;
 	const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1066,7 +1100,7 @@
 	return result;
 }
 
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1110,14 +1144,14 @@
 	return 0;
 }
 
-const struct pp_table_func tonga_pptable_funcs = {
-	.pptable_init = tonga_pp_tables_initialize,
-	.pptable_fini = tonga_pp_tables_uninitialize,
+const struct pp_table_func pptable_v1_0_funcs = {
+	.pptable_init = pp_tables_v1_0_initialize,
+	.pptable_fini = pp_tables_v1_0_uninitialize,
 };
 
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
 {
-	const ATOM_Tonga_State_Array * state_arrays;
+	ATOM_Tonga_State_Array const *state_arrays;
 	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
 
 	PP_ASSERT_WITH_CODE((NULL != pp_table),
@@ -1164,6 +1198,71 @@
 	return result;
 }
 
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+	const ATOM_Tonga_VCE_State_Table *vce_state_table =
+				(ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+
+	if (vce_state_table == NULL)
+		return 0;
+
+	return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+		struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+	const ATOM_Tonga_VCE_State_Record *vce_state_record;
+	ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+	ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+	ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+	const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+	const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usVCEStateTableOffset));
+	const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+	const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+	const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+							  + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+	PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+			 "Requested state entry ID is out of range!",
+			 return -EINVAL);
+
+	vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_VCE_State_Record,
+					entries, vce_state_table, i);
+	sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_SCLK_Dependency_Record,
+					entries, sclk_dep_table,
+					vce_state_record->ucSCLKIndex);
+	mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MM_Dependency_Record,
+					entries, mm_dep_table,
+					vce_state_record->ucVCEClockIndex);
+	*flag = vce_state_record->ucFlag;
+
+	vce_state->evclk = mm_dep_record->ulEClk;
+	vce_state->ecclk = mm_dep_record->ulEClk;
+	vce_state->sclk = sclk_dep_record->ulSclk;
+
+	if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+					entries, mclk_dep_table,
+					mclk_dep_table->ucNumEntries - 1);
+	else
+		mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+					ATOM_Tonga_MCLK_Dependency_Record,
+					entries, mclk_dep_table,
+					vce_state_record->ucMCLKIndex);
+
+	vce_state->mclk = mclk_dep_record->ulMclk;
+	return 0;
+}
+
 /**
 * Create a Power State out of an entry in the PowerPlay table.
 * This function is called by the hardware back-end.
@@ -1172,15 +1271,17 @@
 * @param power_state The address of the PowerState instance being created.
 * @return -1 if the entry cannot be retrieved.
 */
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
 		uint32_t entry_index, struct pp_power_state *power_state,
 		int (*call_back_func)(struct pp_hwmgr *, void *,
 				struct pp_power_state *, void *, uint32_t))
 {
 	int result = 0;
-	const ATOM_Tonga_State_Array * state_arrays;
+	const ATOM_Tonga_State_Array *state_arrays;
 	const ATOM_Tonga_State *state_entry;
 	const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+	int i, j;
+	uint32_t flags = 0;
 
 	PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
 	power_state->classification.bios_index = entry_index;
@@ -1197,7 +1298,9 @@
 		PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
 				"Invalid PowerPlay Table State Array Entry.", return -1);
 
-		state_entry = &(state_arrays->states[entry_index]);
+		state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+						ATOM_Tonga_State, entries,
+						state_arrays, entry_index);
 
 		result = call_back_func(hwmgr, (void *)state_entry, power_state,
 				(void *)pp_table,
@@ -1210,5 +1313,13 @@
 			PP_StateClassificationFlag_Boot))
 		result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
 
+	hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+	if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+		for (j = 0; j < i; j++)
+			ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+	}
+
 	return result;
 }
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
similarity index 81%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
rename to drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
index d24b888..b9710ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
@@ -20,14 +20,14 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
 
 #include "hwmgr.h"
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
 		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
 				struct pp_power_state *, void *, uint32_t));
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 6c321b0..ccf7ebe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1523,7 +1523,7 @@
 
 int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
 							unsigned long i,
-							struct PP_VCEState *vce_state,
+							struct pp_vce_state *vce_state,
 							void **clock_info,
 							unsigned long *flag)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
similarity index 76%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
rename to drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index b5edb51..6eb6db1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -21,9 +21,53 @@
  *
  */
 
-#include "polaris10_clockpowergating.h"
+#include "smu7_hwmgr.h"
+#include "smu7_clockpowergating.h"
+#include "smu7_common.h"
 
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+			PPSMC_MSG_UVDDPM_Enable :
+			PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+			PPSMC_MSG_VCEDPM_Enable :
+			PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+			PPSMC_MSG_SAMUDPM_Enable :
+			PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (!bgate)
+		smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
+	return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (!bgate)
+		smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
+	return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (!bgate)
+		smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
+	return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_uvd_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -31,7 +75,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_uvd_power_gating(hwmgr)) {
 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +91,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +99,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +107,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +116,7 @@
 	return 0;
 }
 
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -81,27 +125,24 @@
 	return 0;
 }
 
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	data->uvd_power_gated = false;
 	data->vce_power_gated = false;
 	data->samu_power_gated = false;
 
-	polaris10_phm_powerup_uvd(hwmgr);
-	polaris10_phm_powerup_vce(hwmgr);
-	polaris10_phm_powerup_samu(hwmgr);
+	smu7_powerup_uvd(hwmgr);
+	smu7_powerup_vce(hwmgr);
+	smu7_powerup_samu(hwmgr);
 
 	return 0;
 }
 
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if (data->uvd_power_gated == bgate)
-		return 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	data->uvd_power_gated = bgate;
 
@@ -109,11 +150,11 @@
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_GATE);
-		polaris10_update_uvd_dpm(hwmgr, true);
-		polaris10_phm_powerdown_uvd(hwmgr);
+		smu7_update_uvd_dpm(hwmgr, true);
+		smu7_powerdown_uvd(hwmgr);
 	} else {
-		polaris10_phm_powerup_uvd(hwmgr);
-		polaris10_update_uvd_dpm(hwmgr, false);
+		smu7_powerup_uvd(hwmgr);
+		smu7_update_uvd_dpm(hwmgr, false);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_UNGATE);
@@ -122,9 +163,9 @@
 	return 0;
 }
 
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	if (data->vce_power_gated == bgate)
 		return 0;
@@ -135,11 +176,11 @@
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_GATE);
-		polaris10_update_vce_dpm(hwmgr, true);
-		polaris10_phm_powerdown_vce(hwmgr);
+		smu7_update_vce_dpm(hwmgr, true);
+		smu7_powerdown_vce(hwmgr);
 	} else {
-		polaris10_phm_powerup_vce(hwmgr);
-		polaris10_update_vce_dpm(hwmgr, false);
+		smu7_powerup_vce(hwmgr);
+		smu7_update_vce_dpm(hwmgr, false);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_UNGATE);
@@ -147,9 +188,9 @@
 	return 0;
 }
 
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	if (data->samu_power_gated == bgate)
 		return 0;
@@ -157,22 +198,25 @@
 	data->samu_power_gated = bgate;
 
 	if (bgate) {
-		polaris10_update_samu_dpm(hwmgr, true);
-		polaris10_phm_powerdown_samu(hwmgr);
+		smu7_update_samu_dpm(hwmgr, true);
+		smu7_powerdown_samu(hwmgr);
 	} else {
-		polaris10_phm_powerup_samu(hwmgr);
-		polaris10_update_samu_dpm(hwmgr, false);
+		smu7_powerup_samu(hwmgr);
+		smu7_update_samu_dpm(hwmgr, false);
 	}
 
 	return 0;
 }
 
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
 					const uint32_t *msg_id)
 {
 	PPSMC_Msg msg;
 	uint32_t value;
 
+	if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
+		return 0;
+
 	switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
 	case PP_GROUP_GFX:
 		switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
@@ -185,7 +229,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			if (PP_STATE_SUPPORT_LS & *msg_id) {
 				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
@@ -195,7 +239,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -208,7 +252,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 
 			if  (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -219,7 +263,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -232,7 +276,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -245,7 +289,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -259,12 +303,12 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
 		default:
-			return -1;
+			return -EINVAL;
 		}
 		break;
 
@@ -279,7 +323,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			if  (PP_STATE_SUPPORT_LS & *msg_id) {
 				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -289,7 +333,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -302,7 +346,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 
 			if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -313,7 +357,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -326,7 +370,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			if (PP_STATE_SUPPORT_LS & *msg_id) {
 				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -336,7 +380,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -349,7 +393,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 
 			if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -360,7 +404,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -373,7 +417,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 
 			if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -384,7 +428,7 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
@@ -397,18 +441,18 @@
 
 				if (smum_send_msg_to_smc_with_parameter(
 						hwmgr->smumgr, msg, value))
-					return -1;
+					return -EINVAL;
 			}
 			break;
 
 		default:
-			return -1;
+			return -EINVAL;
 
 		}
 		break;
 
 	default:
-		return -1;
+		return -EINVAL;
 
 	}
 
@@ -419,7 +463,7 @@
  * Powerplay will only control the static per CU Power Gating.
  * Dynamic per CU Power Gating will be done in gfx.
  */
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
 {
 	struct cgs_system_info sys_info = {0};
 	uint32_t active_cus;
@@ -432,8 +476,8 @@
 
 	if (result)
 		return -EINVAL;
-	else
-		active_cus = sys_info.value;
+
+	active_cus = sys_info.value;
 
 	if (enable)
 		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
similarity index 63%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
rename to drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 88d68cb..d52a28c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -21,20 +21,20 @@
  *
  */
 
-#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
-#define _POLARIS10_CLOCK_POWER_GATING_H_
+#ifndef _SMU7_CLOCK_POWER_GATING_H_
+#define _SMU7_CLOCK__POWER_GATING_H_
 
-#include "polaris10_hwmgr.h"
+#include "smu7_hwmgr.h"
 #include "pp_asicblocks.h"
 
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
 					const uint32_t *msg_id);
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
 
-#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
new file mode 100644
index 0000000..f967613
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_DYN_DEFAULTS_H
+#define _SMU7_DYN_DEFAULTS_H
+
+
+/*  We need to fill in the default values */
+
+
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1              0x000400
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000
+
+
+#define SMU7_THERMALPROTECTCOUNTER_DFLT            0x200
+#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT        0
+#define SMU7_STATICSCREENTHRESHOLD_DFLT            0x00C8
+#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200
+#define SMU7_REFERENCEDIVIDER_DFLT                  4
+
+#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT             1687
+
+#define SMU7_CGULVPARAMETER_DFLT                    0x00040035
+#define SMU7_CGULVCONTROL_DFLT                      0x00007450
+#define SMU7_TARGETACTIVITY_DFLT                     50
+#define SMU7_MCLK_TARGETACTIVITY_DFLT                10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
new file mode 100644
index 0000000..609996c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -0,0 +1,4372 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pptable_v1_0.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "process_pptables_v1_0.h"
+#include "cgs_common.h"
+
+#include "smu7_common.h"
+
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
+#include "smu7_dyn_defaults.h"
+#include "smu7_thermal.h"
+#include "smu7_clockpowergating.h"
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+#define SMC_CG_IND_START            0xc0030000
+#define SMC_CG_IND_END              0xc0040000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+
+#define MEM_FREQ_LOW_LATENCY        25000
+#define MEM_FREQ_HIGH_LATENCY       80000
+
+#define MEM_LATENCY_HIGH            45
+#define MEM_LATENCY_LOW             35
+#define MEM_LATENCY_ERR             0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+	DPM_EVENT_SRC_ANALOG = 0,
+	DPM_EVENT_SRC_EXTERNAL = 1,
+	DPM_EVENT_SRC_DIGITAL = 2,
+	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct smu7_power_state *cast_phw_smu7_power_state(
+				  struct pp_hw_power_state *hw_ps)
+{
+	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+				"Invalid Powerstate Type!",
+				 return NULL);
+
+	return (struct smu7_power_state *)hw_ps;
+}
+
+const struct smu7_power_state *cast_const_phw_smu7_power_state(
+				 const struct pp_hw_power_state *hw_ps)
+{
+	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+				"Invalid Powerstate Type!",
+				 return NULL);
+
+	return (const struct smu7_power_state *)hw_ps;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+	return 0;
+}
+
+uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speedCntl = 0;
+
+	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+			ixPCIE_LC_SPEED_CNTL);
+	return((uint16_t)PHM_GET_FIELD(speedCntl,
+			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+	uint32_t link_width;
+
+	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+	PP_ASSERT_WITH_CODE((7 >= link_width),
+			"Invalid PCIe lane width!", return 0);
+
+	return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @return   always PP_Result_OK
+*/
+int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+
+	return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+*/
+static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+	const struct smu7_hwmgr *data =
+			(const struct smu7_hwmgr *)(hwmgr->backend);
+
+	return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+	/* enable voltage control */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+	return 0;
+}
+
+static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
+		struct phm_clock_voltage_dependency_table *voltage_dependency_table
+		)
+{
+	uint32_t i;
+
+	PP_ASSERT_WITH_CODE((NULL != voltage_table),
+			"Voltage Dependency Table empty.", return -EINVAL;);
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+	voltage_table->count = voltage_dependency_table->count;
+
+	for (i = 0; i < voltage_dependency_table->count; i++) {
+		voltage_table->entries[i].value =
+			voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+
+/**
+* Create Voltage Tables.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	int result = 0;
+	uint32_t tmp;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+				&(data->mvdd_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve MVDD table.",
+				return result);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		if (hwmgr->pp_table_version == PP_TABLE_V1)
+			result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+					table_info->vdd_dep_on_mclk);
+		else if (hwmgr->pp_table_version == PP_TABLE_V0)
+			result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
+					hwmgr->dyn_state.mvdd_dependency_on_mclk);
+
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve SVI2 MVDD table from dependancy table.",
+				return result;);
+	}
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+				&(data->vddci_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve VDDCI table.",
+				return result);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		if (hwmgr->pp_table_version == PP_TABLE_V1)
+			result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+					table_info->vdd_dep_on_mclk);
+		else if (hwmgr->pp_table_version == PP_TABLE_V0)
+			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
+					hwmgr->dyn_state.vddci_dependency_on_mclk);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
+				return result);
+	}
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+		/* VDDGFX has only SVI2 voltage control */
+		result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
+					table_info->vddgfx_lookup_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
+	}
+
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+					VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
+					&data->vddc_voltage_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve VDDC table.", return result;);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+
+		if (hwmgr->pp_table_version == PP_TABLE_V0)
+			result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
+					hwmgr->dyn_state.vddc_dependency_on_mclk);
+		else if (hwmgr->pp_table_version == PP_TABLE_V1)
+			result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+				table_info->vddc_lookup_table);
+
+		PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+	}
+
+	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+	PP_ASSERT_WITH_CODE(
+			(data->vddc_voltage_table.count <= tmp),
+		"Too many voltage values for VDDC. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(tmp,
+						&(data->vddc_voltage_table)));
+
+	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+	PP_ASSERT_WITH_CODE(
+			(data->vddgfx_voltage_table.count <= tmp),
+		"Too many voltage values for VDDC. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(tmp,
+						&(data->vddgfx_voltage_table)));
+
+	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+	PP_ASSERT_WITH_CODE(
+			(data->vddci_voltage_table.count <= tmp),
+		"Too many voltage values for VDDCI. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(tmp,
+					&(data->vddci_voltage_table)));
+
+	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+	PP_ASSERT_WITH_CODE(
+			(data->mvdd_voltage_table.count <= tmp),
+		"Too many voltage values for MVDD. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(tmp,
+						&(data->mvdd_voltage_table)));
+
+	return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_program_static_screen_threshold_parameters(
+							struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/* Set static screen threshold unit */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+			data->static_screen_threshold_unit);
+	/* Set static screen threshold */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+			data->static_screen_threshold);
+
+	return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+	uint32_t display_gap =
+			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+					ixCG_DISPLAY_GAP_CNTL);
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+			DISP_GAP, DISPLAY_GAP_IGNORE);
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/* Clear reset for voting clients before enabling DPM */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+	return 0;
+}
+
+static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	/* Reset voting clients before disabling DPM */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_0, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_1, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_2, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_3, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_4, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_5, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_6, 0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_7, 0);
+
+	return 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+		uint32_t arb_src, uint32_t arb_dest)
+{
+	uint32_t mc_arb_dram_timing;
+	uint32_t mc_arb_dram_timing2;
+	uint32_t burst_time;
+	uint32_t mc_cg_config;
+
+	switch (arb_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+	mc_cg_config |= 0x0000000F;
+	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+	return 0;
+}
+
+static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+	return smu7_copy_and_switch_arb_sets(hwmgr,
+			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+	uint32_t tmp;
+
+	tmp = (cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+			0x0000ff00) >> 8;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return smu7_copy_and_switch_arb_sets(hwmgr,
+			tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = NULL;
+
+	uint32_t i, max_entry;
+	uint32_t tmp;
+
+	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+			data->use_pcie_power_saving_levels), "No pcie performance levels!",
+			return -EINVAL);
+
+	if (table_info != NULL)
+		pcie_table = table_info->pcie_table;
+
+	if (data->use_pcie_performance_levels &&
+			!data->use_pcie_power_saving_levels) {
+		data->pcie_gen_power_saving = data->pcie_gen_performance;
+		data->pcie_lane_power_saving = data->pcie_lane_performance;
+	} else if (!data->use_pcie_performance_levels &&
+			data->use_pcie_power_saving_levels) {
+		data->pcie_gen_performance = data->pcie_gen_power_saving;
+		data->pcie_lane_performance = data->pcie_lane_power_saving;
+	}
+	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+					tmp,
+					MAX_REGULAR_DPM_NUMBER);
+
+	if (pcie_table != NULL) {
+		/* max_entry is used to make sure we reserve one PCIE level
+		 * for boot level (fix for A+A PSPP issue).
+		 * If PCIE table from PPTable have ULV entry + 8 entries,
+		 * then ignore the last entry.*/
+		max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
+		for (i = 1; i < max_entry; i++) {
+			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+					get_pcie_gen_support(data->pcie_gen_cap,
+							pcie_table->entries[i].gen_speed),
+					get_pcie_lane_support(data->pcie_lane_cap,
+							pcie_table->entries[i].lane_width));
+		}
+		data->dpm_table.pcie_speed_table.count = max_entry - 1;
+		smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
+	} else {
+		/* Hardcode Pcie Table */
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Min_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Min_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+
+		data->dpm_table.pcie_speed_table.count = 6;
+	}
+	/* Populate last level for boot PCIE level, but do not increment count. */
+	phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+			data->dpm_table.pcie_speed_table.count,
+			get_pcie_gen_support(data->pcie_gen_cap,
+					PP_Min_PCIEGen),
+			get_pcie_lane_support(data->pcie_lane_cap,
+					PP_Max_PCIELane));
+
+	return 0;
+}
+
+static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
+
+	phm_reset_single_dpm_table(
+			&data->dpm_table.sclk_table,
+				smum_get_mac_definition(hwmgr->smumgr,
+					SMU_MAX_LEVELS_GRAPHICS),
+					MAX_REGULAR_DPM_NUMBER);
+	phm_reset_single_dpm_table(
+			&data->dpm_table.mclk_table,
+			smum_get_mac_definition(hwmgr->smumgr,
+				SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
+
+	phm_reset_single_dpm_table(
+			&data->dpm_table.vddc_table,
+				smum_get_mac_definition(hwmgr->smumgr,
+					SMU_MAX_LEVELS_VDDC),
+					MAX_REGULAR_DPM_NUMBER);
+	phm_reset_single_dpm_table(
+			&data->dpm_table.vddci_table,
+			smum_get_mac_definition(hwmgr->smumgr,
+				SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
+
+	phm_reset_single_dpm_table(
+			&data->dpm_table.mvdd_table,
+				smum_get_mac_definition(hwmgr->smumgr,
+					SMU_MAX_LEVELS_MVDD),
+					MAX_REGULAR_DPM_NUMBER);
+	return 0;
+}
+/*
+ * This function is to initialize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+
+static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
+		hwmgr->dyn_state.vddc_dependency_on_sclk;
+	struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
+		hwmgr->dyn_state.vddc_dependency_on_mclk;
+	struct phm_cac_leakage_table *std_voltage_table =
+		hwmgr->dyn_state.cac_leakage_table;
+	uint32_t i;
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+		"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
+		"SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
+		"VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+
+	/* Initialize Sclk DPM table based on allow Sclk values*/
+	data->dpm_table.sclk_table.count = 0;
+
+	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
+				allowed_vdd_sclk_table->entries[i].clk) {
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+				allowed_vdd_sclk_table->entries[i].clk;
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+			data->dpm_table.sclk_table.count++;
+		}
+	}
+
+	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+	/* Initialize Mclk DPM table based on allow Mclk values */
+	data->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
+			allowed_vdd_mclk_table->entries[i].clk) {
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+				allowed_vdd_mclk_table->entries[i].clk;
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+			data->dpm_table.mclk_table.count++;
+		}
+	}
+
+	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
+	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
+		/* param1 is for corresponding std voltage */
+		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+	}
+
+	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
+	allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	if (NULL != allowed_vdd_mclk_table) {
+		/* Initialize Vddci DPM table based on allow Mclk values */
+		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+			data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+		}
+		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
+	}
+
+	allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
+
+	if (NULL != allowed_vdd_mclk_table) {
+		/*
+		 * Initialize MVDD DPM table based on allow Mclk
+		 * values
+		 */
+		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+		}
+		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
+	}
+
+	return 0;
+}
+
+static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i;
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+
+	if (table_info == NULL)
+		return -EINVAL;
+
+	dep_sclk_table = table_info->vdd_dep_on_sclk;
+	dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+			"SCLK dependency table is missing.",
+			return -EINVAL);
+	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+			"SCLK dependency table count is 0.",
+			return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+			"MCLK dependency table is missing.",
+			return -EINVAL);
+	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+			"MCLK dependency table count is 0",
+			return -EINVAL);
+
+	/* Initialize Sclk DPM table based on allow Sclk values */
+	data->dpm_table.sclk_table.count = 0;
+	for (i = 0; i < dep_sclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+						dep_sclk_table->entries[i].clk) {
+
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+					dep_sclk_table->entries[i].clk;
+
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+					(i == 0) ? true : false;
+			data->dpm_table.sclk_table.count++;
+		}
+	}
+
+	/* Initialize Mclk DPM table based on allow Mclk values */
+	data->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < dep_mclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+				[data->dpm_table.mclk_table.count - 1].value !=
+						dep_mclk_table->entries[i].clk) {
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+							dep_mclk_table->entries[i].clk;
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+							(i == 0) ? true : false;
+			data->dpm_table.mclk_table.count++;
+		}
+	}
+
+	return 0;
+}
+
+int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	smu7_reset_dpm_tables(hwmgr);
+
+	if (hwmgr->pp_table_version == PP_TABLE_V1)
+		smu7_setup_dpm_tables_v1(hwmgr);
+	else if (hwmgr->pp_table_version == PP_TABLE_V0)
+		smu7_setup_dpm_tables_v0(hwmgr);
+
+	smu7_setup_default_pcie_table(hwmgr);
+
+	/* save a copy of the default DPM table */
+	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+			sizeof(struct smu7_dpm_table));
+	return 0;
+}
+
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reference_clock, tmp;
+	struct cgs_display_info info = {0};
+	struct cgs_mode_info mode_info;
+
+	info.mode_info = &mode_info;
+
+	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+	if (tmp)
+		return TCLK;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	reference_clock = mode_info.ref_clock;
+
+	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+	if (0 != tmp)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_RegulatorHot))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+	return 0;
+}
+
+static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+			SCLK_PWRMGT_OFF, 0);
+	return 0;
+}
+
+static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->ulv_supported)
+		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+	return 0;
+}
+
+static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->ulv_supported)
+		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+	return 0;
+}
+
+static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep)) {
+		if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to enable Master Deep Sleep switch failed!",
+					return -EINVAL);
+	} else {
+		if (smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to disable Master Deep Sleep switch failed!",
+					return -EINVAL);
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep)) {
+		if (smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to disable Master Deep Sleep switch failed!",
+					return -EINVAL);
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t soft_register_value = 0;
+	uint32_t handshake_disables_offset = data->soft_regs_start
+				+ smum_get_offsetof(hwmgr->smumgr,
+					SMU_SoftRegisters, HandshakeDisables);
+
+	soft_register_value = cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, handshake_disables_offset);
+	soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+					SMU_UVD_MCLK_HANDSHAKE_DISABLE);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			handshake_disables_offset, soft_register_value);
+	return 0;
+}
+
+static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/* enable SCLK dpm */
+	if (!data->sclk_dpm_key_disabled)
+		PP_ASSERT_WITH_CODE(
+		(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+		"Failed to enable SCLK DPM during DPM Start Function!",
+		return -EINVAL);
+
+	/* enable MCLK dpm */
+	if (0 == data->mclk_dpm_key_disabled) {
+		if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
+			smu7_disable_handshake_uvd(hwmgr);
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_MCLKDPM_Enable)),
+				"Failed to enable MCLK DPM during DPM Start Function!",
+				return -EINVAL);
+
+		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+		udelay(10);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+	}
+
+	return 0;
+}
+
+static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/*enable general power management */
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+			GLOBAL_PWRMGT_EN, 1);
+
+	/* enable sclk deep sleep */
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+			DYNAMIC_PM_EN, 1);
+
+	/* prepare for PCIE DPM */
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			data->soft_regs_start +
+			smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+						VoltageChangeTimeout), 0x1000);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+			SWRST_COMMAND_1, RESETLC, 0x0);
+
+	PP_ASSERT_WITH_CODE(
+			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					PPSMC_MSG_Voltage_Cntl_Enable)),
+			"Failed to enable voltage DPM during DPM Start Function!",
+			return -EINVAL);
+
+
+	if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
+		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+		return -EINVAL;
+	}
+
+	/* enable PCIE dpm */
+	if (0 == data->pcie_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_Enable)),
+				"Failed to enable pcie DPM during DPM Start Function!",
+				return -EINVAL);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_Falcon_QuickTransition)) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_EnableACDCGPIOInterrupt)),
+				"Failed to enable AC DC GPIO Interrupt!",
+				);
+	}
+
+	return 0;
+}
+
+static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/* disable SCLK dpm */
+	if (!data->sclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to disable SCLK DPM when DPM is disabled",
+				return 0);
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+	}
+
+	/* disable MCLK dpm */
+	if (!data->mclk_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to disable MCLK DPM when DPM is disabled",
+				return 0);
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
+	}
+
+	return 0;
+}
+
+static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	/* disable general power management */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+			GLOBAL_PWRMGT_EN, 0);
+	/* disable sclk deep sleep */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+			DYNAMIC_PM_EN, 0);
+
+	/* disable PCIE dpm */
+	if (!data->pcie_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_Disable) == 0),
+				"Failed to disable pcie DPM during DPM Stop Function!",
+				return -EINVAL);
+	}
+
+	smu7_disable_sclk_mclk_dpm(hwmgr);
+
+	PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+			"Trying to disable voltage DPM when DPM is disabled",
+			return 0);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
+
+	return 0;
+}
+
+static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+	bool protection;
+	enum DPM_EVENT_SRC src;
+
+	switch (sources) {
+	default:
+		printk(KERN_ERR "Unknown throttling event sources.");
+		/* fall through */
+	case 0:
+		protection = false;
+		/* src is unused */
+		break;
+	case (1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External):
+		protection = true;
+		src = DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External) |
+			(1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+		break;
+	}
+	/* Order matters - don't enable thermal protection for the wrong source. */
+	if (protection) {
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+				DPM_EVENT_SRC, src);
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS,
+				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ThermalController));
+	} else
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS, 1);
+}
+
+static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (!(data->active_auto_throttle_sources & (1 << source))) {
+		data->active_auto_throttle_sources |= 1 << source;
+		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->active_auto_throttle_sources & (1 << source)) {
+		data->active_auto_throttle_sources &= ~(1 << source);
+		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	data->pcie_performance_request = true;
+
+	return 0;
+}
+
+int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result = 0;
+	int result = 0;
+
+	tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
+	PP_ASSERT_WITH_CODE(tmp_result == 0,
+			"DPM is already running right now, no need to enable DPM!",
+			return 0);
+
+	if (smu7_voltage_control(hwmgr)) {
+		tmp_result = smu7_enable_voltage_control(hwmgr);
+		PP_ASSERT_WITH_CODE(tmp_result == 0,
+				"Failed to enable voltage control!",
+				result = tmp_result);
+
+		tmp_result = smu7_construct_voltage_tables(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to contruct voltage tables!",
+				result = tmp_result);
+	}
+	smum_initialize_mc_reg_table(hwmgr);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalController))
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+	tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to program static screen threshold parameters!",
+			result = tmp_result);
+
+	tmp_result = smu7_enable_display_gap(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable display gap!", result = tmp_result);
+
+	tmp_result = smu7_program_voting_clients(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to program voting clients!", result = tmp_result);
+
+	tmp_result = smum_process_firmware_header(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to process firmware header!", result = tmp_result);
+
+	tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to initialize switch from ArbF0 to F1!",
+			result = tmp_result);
+
+	result = smu7_setup_default_dpm_tables(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to setup default DPM tables!", return result);
+
+	tmp_result = smum_init_smc_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to initialize SMC table!", result = tmp_result);
+
+	tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
+
+	tmp_result = smu7_enable_sclk_control(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable SCLK control!", result = tmp_result);
+
+	tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable voltage control!", result = tmp_result);
+
+	tmp_result = smu7_enable_ulv(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable ULV!", result = tmp_result);
+
+	tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable deep sleep master switch!", result = tmp_result);
+
+	tmp_result = smu7_enable_didt_config(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to enable deep sleep master switch!", result = tmp_result);
+
+	tmp_result = smu7_start_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to start DPM!", result = tmp_result);
+
+	tmp_result = smu7_enable_smc_cac(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable SMC CAC!", result = tmp_result);
+
+	tmp_result = smu7_enable_power_containment(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable power containment!", result = tmp_result);
+
+	tmp_result = smu7_power_control_set_level(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to power control set level!", result = tmp_result);
+
+	tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable thermal auto throttle!", result = tmp_result);
+
+	tmp_result = smu7_pcie_performance_request(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"pcie performance request failed!", result = tmp_result);
+
+	return 0;
+}
+
+int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+
+	tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
+	PP_ASSERT_WITH_CODE(tmp_result == 0,
+			"DPM is not running right now, no need to disable DPM!",
+			return 0);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalController))
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+	tmp_result = smu7_disable_power_containment(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable power containment!", result = tmp_result);
+
+	tmp_result = smu7_disable_smc_cac(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable SMC CAC!", result = tmp_result);
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+	tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable thermal auto throttle!", result = tmp_result);
+
+	if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+					"Failed to disable AVFS!",
+					return -EINVAL);
+	}
+
+	tmp_result = smu7_stop_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to stop DPM!", result = tmp_result);
+
+	tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable deep sleep master switch!", result = tmp_result);
+
+	tmp_result = smu7_disable_ulv(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable ULV!", result = tmp_result);
+
+	tmp_result = smu7_clear_voting_clients(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to clear voting clients!", result = tmp_result);
+
+	tmp_result = smu7_reset_to_default(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to reset to default!", result = tmp_result);
+
+	tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to force to switch arbf0!", result = tmp_result);
+
+	return result;
+}
+
+int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+	return 0;
+}
+
+static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	data->dll_default_on = false;
+	data->mclk_dpm0_activity_target = 0xa;
+	data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
+	data->vddc_vddgfx_delta = 300;
+	data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
+	data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
+	data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+	data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+	data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+	data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+	data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+	data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+	data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+	data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+
+	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+	data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+	/* need to set voltage control types before EVV patching */
+	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+	data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
+	data->enable_tdc_limit_feature = true;
+	data->enable_pkg_pwr_tracking_feature = true;
+	data->force_pcie_gen = PP_PCIEGenInvalid;
+	data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+
+	data->fast_watermark_threshold = 100;
+	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ControlVDDGFX)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+			VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
+			data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+		}
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EnableMVDDControl)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ControlVDDGFX);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ControlVDDCI)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_EnableMVDDControl);
+
+	if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ControlVDDCI);
+
+	if ((hwmgr->pp_table_version != PP_TABLE_V0)
+		&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_ClockStretcher);
+
+	data->pcie_gen_performance.max = PP_PCIEGen1;
+	data->pcie_gen_performance.min = PP_PCIEGen3;
+	data->pcie_gen_power_saving.max = PP_PCIEGen1;
+	data->pcie_gen_power_saving.min = PP_PCIEGen3;
+	data->pcie_lane_performance.max = 0;
+	data->pcie_lane_performance.min = 16;
+	data->pcie_lane_power_saving.max = 0;
+	data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint16_t vv_id;
+	uint16_t vddc = 0;
+	uint16_t vddgfx = 0;
+	uint16_t i, j;
+	uint32_t sclk = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
+
+
+	if (table_info == NULL)
+		return -EINVAL;
+
+	sclk_table = table_info->vdd_dep_on_sclk;
+
+	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+
+		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+			if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
+				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+							PHM_PlatformCaps_ClockStretcher)) {
+					for (j = 1; j < sclk_table->count; j++) {
+						if (sclk_table->entries[j].clk == sclk &&
+								sclk_table->entries[j].cks_enable == 0) {
+							sclk += 5000;
+							break;
+						}
+					}
+				}
+				if (0 == atomctrl_get_voltage_evv_on_sclk
+				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+				     vv_id, &vddgfx)) {
+					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
+
+					/* the voltage should not be zero nor equal to leakage ID */
+					if (vddgfx != 0 && vddgfx != vv_id) {
+						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
+						data->vddcgfx_leakage.count++;
+					}
+				} else {
+					printk("Error retrieving EVV voltage value!\n");
+				}
+			}
+		} else {
+
+			if ((hwmgr->pp_table_version == PP_TABLE_V0)
+				|| !phm_get_sclk_for_voltage_evv(hwmgr,
+					table_info->vddc_lookup_table, vv_id, &sclk)) {
+				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ClockStretcher)) {
+					for (j = 1; j < sclk_table->count; j++) {
+						if (sclk_table->entries[j].clk == sclk &&
+								sclk_table->entries[j].cks_enable == 0) {
+							sclk += 5000;
+							break;
+						}
+					}
+				}
+
+				if (phm_get_voltage_evv_on_sclk(hwmgr,
+							VOLTAGE_TYPE_VDDC,
+							sclk, vv_id, &vddc) == 0) {
+					if (vddc >= 2000 || vddc == 0)
+						return -EINVAL;
+				} else {
+					printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+					continue;
+				}
+
+				/* the voltage should not be zero nor equal to leakage ID */
+				if (vddc != 0 && vddc != vv_id) {
+					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
+					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+					data->vddc_leakage.count++;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param     hwmgr  the address of the powerplay hardware manager.
+ * @param     pointer to changing voltage
+ * @param     pointer to leakage table
+ */
+static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+		uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+	uint32_t index;
+
+	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
+	for (index = 0; index < leakage_table->count; index++) {
+		/* if this voltage matches a leakage voltage ID */
+		/* patch with actual leakage voltage */
+		if (leakage_table->leakage_id[index] == *voltage) {
+			*voltage = leakage_table->actual_voltage[index];
+			break;
+		}
+	}
+
+	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param     hwmgr  the address of the powerplay hardware manager.
+* @param     pointer to voltage lookup table
+* @param     pointer to leakage table
+* @return     always 0
+*/
+static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+		phm_ppt_v1_voltage_lookup_table *lookup_table,
+		struct smu7_leakage_voltage *leakage_table)
+{
+	uint32_t i;
+
+	for (i = 0; i < lookup_table->count; i++)
+		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+				&lookup_table->entries[i].us_vdd, leakage_table);
+
+	return 0;
+}
+
+static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
+		struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
+		uint16_t *vddc)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+			table_info->max_clock_voltage_on_dc.vddc;
+	return 0;
+}
+
+static int smu7_patch_voltage_dependency_tables_with_lookup_table(
+		struct pp_hwmgr *hwmgr)
+{
+	uint8_t entry_id;
+	uint8_t voltage_id;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+			table_info->vdd_dep_on_mclk;
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+			voltage_id = sclk_table->entries[entry_id].vddInd;
+			sclk_table->entries[entry_id].vddgfx =
+				table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
+		}
+	} else {
+		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+			voltage_id = sclk_table->entries[entry_id].vddInd;
+			sclk_table->entries[entry_id].vddc =
+				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+		}
+	}
+
+	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+		voltage_id = mclk_table->entries[entry_id].vddInd;
+		mclk_table->entries[entry_id].vddc =
+			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+	}
+
+	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+		voltage_id = mm_table->entries[entry_id].vddcInd;
+		mm_table->entries[entry_id].vddc =
+			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+	}
+
+	return 0;
+
+}
+
+static int phm_add_voltage(struct pp_hwmgr *hwmgr,
+			phm_ppt_v1_voltage_lookup_table *look_up_table,
+			phm_ppt_v1_voltage_lookup_record *record)
+{
+	uint32_t i;
+
+	PP_ASSERT_WITH_CODE((NULL != look_up_table),
+		"Lookup Table empty.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
+		"Lookup Table empty.", return -EINVAL);
+
+	i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+	PP_ASSERT_WITH_CODE((i >= look_up_table->count),
+		"Lookup Table is full.", return -EINVAL);
+
+	/* This is to avoid entering duplicate calculated records. */
+	for (i = 0; i < look_up_table->count; i++) {
+		if (look_up_table->entries[i].us_vdd == record->us_vdd) {
+			if (look_up_table->entries[i].us_calculated == 1)
+				return 0;
+			break;
+		}
+	}
+
+	look_up_table->entries[i].us_calculated = 1;
+	look_up_table->entries[i].us_vdd = record->us_vdd;
+	look_up_table->entries[i].us_cac_low = record->us_cac_low;
+	look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
+	look_up_table->entries[i].us_cac_high = record->us_cac_high;
+	/* Only increment the count when we're appending, not replacing duplicate entry. */
+	if (i == look_up_table->count)
+		look_up_table->count++;
+
+	return 0;
+}
+
+
+static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+	uint8_t entry_id;
+	struct phm_ppt_v1_voltage_lookup_record v_record;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
+	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
+
+	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+			if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
+				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+					sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+			else
+				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+					sclk_table->entries[entry_id].vdd_offset;
+
+			sclk_table->entries[entry_id].vddc =
+				v_record.us_cac_low = v_record.us_cac_mid =
+				v_record.us_cac_high = v_record.us_vdd;
+
+			phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
+		}
+
+		for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+			if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
+				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+					mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+			else
+				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+					mclk_table->entries[entry_id].vdd_offset;
+
+			mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+		}
+	}
+	return 0;
+}
+
+static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+	uint8_t entry_id;
+	struct phm_ppt_v1_voltage_lookup_record v_record;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
+
+	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+		for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
+			if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
+				v_record.us_vdd = mm_table->entries[entry_id].vddc +
+					mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
+			else
+				v_record.us_vdd = mm_table->entries[entry_id].vddc +
+					mm_table->entries[entry_id].vddgfx_offset;
+
+			/* Add the calculated VDDGFX to the VDDGFX lookup table */
+			mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+		}
+	}
+	return 0;
+}
+
+static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+	uint32_t table_size, i, j;
+	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+	table_size = lookup_table->count;
+
+	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+		"Lookup table is empty", return -EINVAL);
+
+	/* Sorting voltages */
+	for (i = 0; i < table_size - 1; i++) {
+		for (j = i + 1; j > 0; j--) {
+			if (lookup_table->entries[j].us_vdd <
+					lookup_table->entries[j - 1].us_vdd) {
+				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+				lookup_table->entries[j - 1] = lookup_table->entries[j];
+				lookup_table->entries[j] = tmp_voltage_lookup_record;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	int tmp_result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+			table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
+		if (tmp_result != 0)
+			result = tmp_result;
+
+		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+			&table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
+	} else {
+
+		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+				table_info->vddc_lookup_table, &(data->vddc_leakage));
+		if (tmp_result)
+			result = tmp_result;
+
+		tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+				&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+		if (tmp_result)
+			result = tmp_result;
+	}
+
+	tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+	if (tmp_result)
+		result = tmp_result;
+
+	return result;
+}
+
+static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+						table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+						table_info->vdd_dep_on_mclk;
+
+	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+		"VDD dependency on SCLK table is missing.",
+		return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+		"VDD dependency on SCLK table has to have is missing.",
+		return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+		"VDD dependency on MCLK table is missing",
+		return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+		"VDD dependency on MCLK table has to have is missing.",
+		return -EINVAL);
+
+	table_info->max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+	table_info->max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+	table_info->max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+	table_info->max_clock_voltage_on_ac.vddci =
+		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
+
+	return 0;
+}
+
+int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+	struct phm_ppt_v1_voltage_lookup_table *lookup_table;
+	uint32_t i;
+	uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+	struct cgs_system_info sys_info = {0};
+
+	if (table_info != NULL) {
+		dep_mclk_table = table_info->vdd_dep_on_mclk;
+		lookup_table = table_info->vddc_lookup_table;
+	} else
+		return 0;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	hw_revision = (uint32_t)sys_info.value;
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	sub_sys_id = (uint32_t)sys_info.value;
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	sub_vendor_id = (uint32_t)sys_info.value;
+
+	if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+			((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+		    (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+		    (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
+		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+			return 0;
+
+		for (i = 0; i < lookup_table->count; i++) {
+			if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+				dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+				return 0;
+			}
+		}
+	}
+	return 0;
+}
+
+static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
+{
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+	uint32_t temp_reg;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+		temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+		case 0:
+			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+			break;
+		case 1:
+			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+			break;
+		case 2:
+			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+			break;
+		case 3:
+			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+			break;
+		case 4:
+			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+			break;
+		default:
+			PP_ASSERT_WITH_CODE(0,
+			"Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+			);
+			break;
+		}
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+	}
+
+	if (table_info == NULL)
+		return 0;
+
+	if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+		hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+		table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+								(table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
+
+		table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+		table_info->cac_dtp_table->usOperatingTempStep = 1;
+		table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+			       table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+			       table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+		hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+			       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+		hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+			       table_info->cac_dtp_table->usOperatingTempStep;
+
+		hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+			       table_info->cac_dtp_table->usTargetOperatingTemp;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ODFuzzyFanControlSupport);
+	}
+
+	return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param     hwmgr  the address of the powerplay hardware manager.
+ * @param     pointer to changing voltage
+ * @param     pointer to leakage table
+ */
+static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+		uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+	uint32_t index;
+
+	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
+	for (index = 0; index < leakage_table->count; index++) {
+		/* if this voltage matches a leakage voltage ID */
+		/* patch with actual leakage voltage */
+		if (leakage_table->leakage_id[index] == *voltage) {
+			*voltage = leakage_table->actual_voltage[index];
+			break;
+		}
+	}
+
+	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+
+static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
+			      struct phm_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+						&data->vddc_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
+			       struct phm_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+							&data->vddci_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_vce_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+							&data->vddc_leakage);
+
+	return 0;
+}
+
+
+static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_uvd_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+							&data->vddc_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
+					 struct phm_phase_shedding_limits_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
+							&data->vddc_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
+				   struct phm_samu_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+							&data->vddc_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
+				  struct phm_acp_clock_voltage_dependency_table *tab)
+{
+	uint16_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab)
+		for (i = 0; i < tab->count; i++)
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+					&data->vddc_leakage);
+
+	return 0;
+}
+
+static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
+				     struct phm_clock_and_voltage_limits *tab)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab) {
+		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
+							&data->vddc_leakage);
+		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
+							&data->vddci_leakage);
+	}
+
+	return 0;
+}
+
+static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
+{
+	uint32_t i;
+	uint32_t vddc;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (tab) {
+		for (i = 0; i < tab->count; i++) {
+			vddc = (uint32_t)(tab->entries[i].Vddc);
+			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
+			tab->entries[i].Vddc = (uint16_t)vddc;
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
+{
+	int tmp;
+
+	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
+	if (tmp)
+		return -EINVAL;
+
+	tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
+	if (tmp)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+	struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+	struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
+		"VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
+		"VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
+		"VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
+		"VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+	data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
+	data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
+		data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
+		data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+	}
+
+	if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+		hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
+
+	return 0;
+}
+
+int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data;
+	int result;
+
+	data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	hwmgr->backend = data;
+
+	smu7_patch_voltage_workaround(hwmgr);
+	smu7_init_dpm_defaults(hwmgr);
+
+	/* Get leakage voltage based on leakage ID. */
+	result = smu7_get_evv_voltages(hwmgr);
+
+	if (result) {
+		printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
+		return -EINVAL;
+	}
+
+	if (hwmgr->pp_table_version == PP_TABLE_V1) {
+		smu7_complete_dependency_tables(hwmgr);
+		smu7_set_private_data_based_on_pptable_v1(hwmgr);
+	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+		smu7_patch_dependency_tables_with_leakage(hwmgr);
+		smu7_set_private_data_based_on_pptable_v0(hwmgr);
+	}
+
+	/* Initalize Dynamic State Adjustment Rule Settings */
+	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+	if (0 == result) {
+		struct cgs_system_info sys_info = {0};
+
+		data->is_tlu_enabled = false;
+
+		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+							SMU7_MAX_HARDWARE_POWERLEVELS;
+		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		else
+			data->pcie_gen_cap = (uint32_t)sys_info.value;
+		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+			data->pcie_spc_cap = 20;
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+		else
+			data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+		hwmgr->platform_descriptor.clockStep.engineClock = 500;
+		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+		smu7_thermal_parameter_init(hwmgr);
+	} else {
+		/* Ignore return value in here, we are cleaning up a mess. */
+		phm_hwmgr_backend_fini(hwmgr);
+	}
+
+	return 0;
+}
+
+static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t level, tmp;
+
+	if (!data->pcie_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_ForceLevel, level);
+		}
+	}
+
+	if (!data->sclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_SCLKDPM_SetEnabledMask,
+						(1 << level));
+		}
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_MCLKDPM_SetEnabledMask,
+						(1 << level));
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->pp_table_version == PP_TABLE_V1)
+		phm_apply_dal_min_voltage_request(hwmgr);
+/* TO DO  for v0 iceland and Ci*/
+
+	if (!data->sclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+	}
+
+	return 0;
+}
+
+static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (!smum_is_dpm_running(hwmgr))
+		return -EINVAL;
+
+	if (!data->pcie_dpm_key_disabled) {
+		smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_PCIeDPM_UnForceLevel);
+	}
+
+	return smu7_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data =
+			(struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t level;
+
+	if (!data->sclk_dpm_key_disabled)
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
+							    (1 << level));
+
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							    (1 << level));
+		}
+	}
+
+	if (!data->pcie_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_PCIeDPM_ForceLevel,
+							    (level));
+		}
+	}
+
+	return 0;
+
+}
+static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
+				enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = smu7_force_dpm_highest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = smu7_force_dpm_lowest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = smu7_unforce_dpm_levels(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
+
+	hwmgr->dpm_level = level;
+
+	return ret;
+}
+
+static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+	return sizeof(struct smu7_power_state);
+}
+
+
+static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+				struct pp_power_state *request_ps,
+			const struct pp_power_state *current_ps)
+{
+
+	struct smu7_power_state *smu7_ps =
+				cast_phw_smu7_power_state(&request_ps->hardware);
+	uint32_t sclk;
+	uint32_t mclk;
+	struct PP_Clocks minimum_clocks = {0};
+	bool disable_mclk_switching;
+	bool disable_mclk_switching_for_frame_lock;
+	struct cgs_display_info info = {0};
+	const struct phm_clock_and_voltage_limits *max_limits;
+	uint32_t i;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int32_t count;
+	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+	data->battery_state = (PP_StateUILabel_Battery ==
+			request_ps->classification.ui_label);
+
+	PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
+				 "VI should always have 2 performance levels",
+				);
+
+	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+	/* Cap clock DPM tables at DC MAX if it is in DC. */
+	if (PP_PowerSource_DC == hwmgr->power_source) {
+		for (i = 0; i < smu7_ps->performance_level_count; i++) {
+			if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
+				smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
+			if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
+				smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
+		}
+	}
+
+	smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+	smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+	minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
+	minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState)) {
+		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+		for (count = table_info->vdd_dep_on_sclk->count - 1;
+				count >= 0; count--) {
+			if (stable_pstate_sclk >=
+					table_info->vdd_dep_on_sclk->entries[count].clk) {
+				stable_pstate_sclk =
+						table_info->vdd_dep_on_sclk->entries[count].clk;
+				break;
+			}
+		}
+
+		if (count < 0)
+			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+		stable_pstate_mclk = max_limits->mclk;
+
+		minimum_clocks.engineClock = stable_pstate_sclk;
+		minimum_clocks.memoryClock = stable_pstate_mclk;
+	}
+
+	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+	smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+				hwmgr->platform_descriptor.overdriveLimit.engineClock),
+				"Overdrive sclk exceeds limit",
+				hwmgr->gfx_arbiter.sclk_over_drive =
+						hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+			smu7_ps->performance_levels[1].engine_clock =
+					hwmgr->gfx_arbiter.sclk_over_drive;
+	}
+
+	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+				"Overdrive mclk exceeds limit",
+				hwmgr->gfx_arbiter.mclk_over_drive =
+						hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+			smu7_ps->performance_levels[1].memory_clock =
+					hwmgr->gfx_arbiter.mclk_over_drive;
+	}
+
+	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+				    hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+	disable_mclk_switching = (1 < info.display_count) ||
+				    disable_mclk_switching_for_frame_lock;
+
+	sclk = smu7_ps->performance_levels[0].engine_clock;
+	mclk = smu7_ps->performance_levels[0].memory_clock;
+
+	if (disable_mclk_switching)
+		mclk = smu7_ps->performance_levels
+		[smu7_ps->performance_level_count - 1].memory_clock;
+
+	if (sclk < minimum_clocks.engineClock)
+		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+				max_limits->sclk : minimum_clocks.engineClock;
+
+	if (mclk < minimum_clocks.memoryClock)
+		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+				max_limits->mclk : minimum_clocks.memoryClock;
+
+	smu7_ps->performance_levels[0].engine_clock = sclk;
+	smu7_ps->performance_levels[0].memory_clock = mclk;
+
+	smu7_ps->performance_levels[1].engine_clock =
+		(smu7_ps->performance_levels[1].engine_clock >=
+				smu7_ps->performance_levels[0].engine_clock) ?
+						smu7_ps->performance_levels[1].engine_clock :
+						smu7_ps->performance_levels[0].engine_clock;
+
+	if (disable_mclk_switching) {
+		if (mclk < smu7_ps->performance_levels[1].memory_clock)
+			mclk = smu7_ps->performance_levels[1].memory_clock;
+
+		smu7_ps->performance_levels[0].memory_clock = mclk;
+		smu7_ps->performance_levels[1].memory_clock = mclk;
+	} else {
+		if (smu7_ps->performance_levels[1].memory_clock <
+				smu7_ps->performance_levels[0].memory_clock)
+			smu7_ps->performance_levels[1].memory_clock =
+					smu7_ps->performance_levels[0].memory_clock;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState)) {
+		for (i = 0; i < smu7_ps->performance_level_count; i++) {
+			smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+			smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+			smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+			smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+		}
+	}
+	return 0;
+}
+
+
+static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct smu7_power_state  *smu7_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+	if (low)
+		return smu7_ps->performance_levels[0].memory_clock;
+	else
+		return smu7_ps->performance_levels
+				[smu7_ps->performance_level_count-1].memory_clock;
+}
+
+static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct smu7_power_state  *smu7_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+	if (low)
+		return smu7_ps->performance_levels[0].engine_clock;
+	else
+		return smu7_ps->performance_levels
+				[smu7_ps->performance_level_count-1].engine_clock;
+}
+
+static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *hw_ps)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
+	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+	uint16_t size;
+	uint8_t frev, crev;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+	/* First retrieve the Boot clocks and VDDC from the firmware info table.
+	 * We assume here that fw_info is unchanged if this call fails.
+	 */
+	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+			hwmgr->device, index,
+			&size, &frev, &crev);
+	if (!fw_info)
+		/* During a test, there is no firmware info table. */
+		return 0;
+
+	/* Patch the state. */
+	data->vbios_boot_state.sclk_bootup_value =
+			le32_to_cpu(fw_info->ulDefaultEngineClock);
+	data->vbios_boot_state.mclk_bootup_value =
+			le32_to_cpu(fw_info->ulDefaultMemoryClock);
+	data->vbios_boot_state.mvdd_bootup_value =
+			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+	data->vbios_boot_state.vddc_bootup_value =
+			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+	data->vbios_boot_state.vddci_bootup_value =
+			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+	data->vbios_boot_state.pcie_gen_bootup_value =
+			smu7_get_current_pcie_speed(hwmgr);
+
+	data->vbios_boot_state.pcie_lane_bootup_value =
+			(uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
+
+	/* set boot power state */
+	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+	return 0;
+}
+
+static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	unsigned long ret = 0;
+
+	if (hwmgr->pp_table_version == PP_TABLE_V0) {
+		result = pp_tables_get_num_of_entries(hwmgr, &ret);
+		return result ? 0 : ret;
+	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+		result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
+		return result;
+	}
+	return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
+		void *state, struct pp_power_state *power_state,
+		void *pp_table, uint32_t classification_flag)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_power_state  *smu7_power_state =
+			(struct smu7_power_state *)(&(power_state->hardware));
+	struct smu7_performance_level *performance_level;
+	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+	PPTable_Generic_SubTable_Header *sclk_dep_table =
+			(PPTable_Generic_SubTable_Header *)
+			(((unsigned long)powerplay_table) +
+				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
+	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+			(ATOM_Tonga_MCLK_Dependency_Table *)
+			(((unsigned long)powerplay_table) +
+				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+	/* The following fields are not initialized here: id orderedList allStatesList */
+	power_state->classification.ui_label =
+			(le16_to_cpu(state_entry->usClassification) &
+			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+	power_state->classification.flags = classification_flag;
+	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+	power_state->classification.temporary_state = false;
+	power_state->classification.to_be_deleted = false;
+
+	power_state->validation.disallowOnDC =
+			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+					ATOM_Tonga_DISALLOW_ON_DC));
+
+	power_state->pcie.lanes = 0;
+
+	power_state->display.disableFrameModulation = false;
+	power_state->display.limitRefreshrate = false;
+	power_state->display.enableVariBright =
+			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+					ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+	power_state->validation.supportedPowerLevels = 0;
+	power_state->uvd_clocks.VCLK = 0;
+	power_state->uvd_clocks.DCLK = 0;
+	power_state->temperatures.min = 0;
+	power_state->temperatures.max = 0;
+
+	performance_level = &(smu7_power_state->performance_levels
+			[smu7_power_state->performance_level_count++]);
+
+	PP_ASSERT_WITH_CODE(
+			(smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+			"Performance levels exceeds SMC limit!",
+			return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(
+			(smu7_power_state->performance_level_count <=
+					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+			"Performance levels exceeds Driver limit!",
+			return -EINVAL);
+
+	/* Performance levels are arranged from low to high. */
+	performance_level->memory_clock = mclk_dep_table->entries
+			[state_entry->ucMemoryClockIndexLow].ulMclk;
+	if (sclk_dep_table->ucRevId == 0)
+		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+			[state_entry->ucEngineClockIndexLow].ulSclk;
+	else if (sclk_dep_table->ucRevId == 1)
+		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+			[state_entry->ucEngineClockIndexLow].ulSclk;
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+			state_entry->ucPCIEGenLow);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+			state_entry->ucPCIELaneHigh);
+
+	performance_level = &(smu7_power_state->performance_levels
+			[smu7_power_state->performance_level_count++]);
+	performance_level->memory_clock = mclk_dep_table->entries
+			[state_entry->ucMemoryClockIndexHigh].ulMclk;
+
+	if (sclk_dep_table->ucRevId == 0)
+		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+			[state_entry->ucEngineClockIndexHigh].ulSclk;
+	else if (sclk_dep_table->ucRevId == 1)
+		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+			[state_entry->ucEngineClockIndexHigh].ulSclk;
+
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+			state_entry->ucPCIEGenHigh);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+			state_entry->ucPCIELaneHigh);
+
+	return 0;
+}
+
+static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
+		unsigned long entry_index, struct pp_power_state *state)
+{
+	int result;
+	struct smu7_power_state *ps;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+			table_info->vdd_dep_on_mclk;
+
+	state->hardware.magic = PHM_VIslands_Magic;
+
+	ps = (struct smu7_power_state *)(&state->hardware);
+
+	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
+			smu7_get_pp_table_entry_callback_func_v1);
+
+	/* This is the earliest time we have all the dependency table and the VBIOS boot state
+	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+	 */
+	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+		if (dep_mclk_table->entries[0].clk !=
+				data->vbios_boot_state.mclk_bootup_value)
+			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot MCLK level");
+		if (dep_mclk_table->entries[0].vddci !=
+				data->vbios_boot_state.vddci_bootup_value)
+			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot VDDCI level");
+	}
+
+	/* set DC compatible flag if this state supports DC */
+	if (!state->validation.disallowOnDC)
+		ps->dc_compatible = true;
+
+	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+	if (!result) {
+		uint32_t i;
+
+		switch (state->classification.ui_label) {
+		case PP_StateUILabel_Performance:
+			data->use_pcie_performance_levels = true;
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_performance.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_performance.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_performance.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.max =
+							ps->performance_levels[i].pcie_lane;
+				if (data->pcie_lane_performance.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		case PP_StateUILabel_Battery:
+			data->use_pcie_power_saving_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_power_saving.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_power_saving.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_power_saving.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_power_saving.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *power_state,
+					unsigned int index, const void *clock_info)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
+	const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
+	struct smu7_performance_level *performance_level;
+	uint32_t engine_clock, memory_clock;
+	uint16_t pcie_gen_from_bios;
+
+	engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
+	memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
+
+	if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
+		data->highest_mclk = memory_clock;
+
+	performance_level = &(ps->performance_levels
+			[ps->performance_level_count++]);
+
+	PP_ASSERT_WITH_CODE(
+			(ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+			"Performance levels exceeds SMC limit!",
+			return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(
+			(ps->performance_level_count <=
+					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+			"Performance levels exceeds Driver limit!",
+			return -EINVAL);
+
+	/* Performance levels are arranged from low to high. */
+	performance_level->memory_clock = memory_clock;
+	performance_level->engine_clock = engine_clock;
+
+	pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
+
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
+
+	return 0;
+}
+
+static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
+		unsigned long entry_index, struct pp_power_state *state)
+{
+	int result;
+	struct smu7_power_state *ps;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_clock_voltage_dependency_table *dep_mclk_table =
+			hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+	memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
+
+	state->hardware.magic = PHM_VIslands_Magic;
+
+	ps = (struct smu7_power_state *)(&state->hardware);
+
+	result = pp_tables_get_entry(hwmgr, entry_index, state,
+			smu7_get_pp_table_entry_callback_func_v0);
+
+	/*
+	 * This is the earliest time we have all the dependency table
+	 * and the VBIOS boot state as
+	 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
+	 * state if there is only one VDDCI/MCLK level, check if it's
+	 * the same as VBIOS boot state
+	 */
+	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+		if (dep_mclk_table->entries[0].clk !=
+				data->vbios_boot_state.mclk_bootup_value)
+			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot MCLK level");
+		if (dep_mclk_table->entries[0].v !=
+				data->vbios_boot_state.vddci_bootup_value)
+			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot VDDCI level");
+	}
+
+	/* set DC compatible flag if this state supports DC */
+	if (!state->validation.disallowOnDC)
+		ps->dc_compatible = true;
+
+	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+	if (!result) {
+		uint32_t i;
+
+		switch (state->classification.ui_label) {
+		case PP_StateUILabel_Performance:
+			data->use_pcie_performance_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_performance.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_performance.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_performance.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_performance.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		case PP_StateUILabel_Battery:
+			data->use_pcie_power_saving_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_power_saving.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_power_saving.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_power_saving.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_power_saving.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+		unsigned long entry_index, struct pp_power_state *state)
+{
+	if (hwmgr->pp_table_version == PP_TABLE_V0)
+		return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
+	else if (hwmgr->pp_table_version == PP_TABLE_V1)
+		return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
+
+	return 0;
+}
+
+static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+	uint32_t sclk, mclk, activity_percent;
+	uint32_t offset;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	switch (idx) {
+	case AMDGPU_PP_SENSOR_GFX_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+		*value = sclk;
+		return 0;
+	case AMDGPU_PP_SENSOR_GFX_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+		*value = mclk;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_LOAD:
+		offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+								SMU_SoftRegisters,
+								AverageGraphicsActivity);
+
+		activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+		activity_percent += 0x80;
+		activity_percent >>= 8;
+		*value = activity_percent > 100 ? 100 : activity_percent;
+		return 0;
+	case AMDGPU_PP_SENSOR_GPU_TEMP:
+		*value = smu7_thermal_get_temperature(hwmgr);
+		return 0;
+	case AMDGPU_PP_SENSOR_UVD_POWER:
+		*value = data->uvd_power_gated ? 0 : 1;
+		return 0;
+	case AMDGPU_PP_SENSOR_VCE_POWER:
+		*value = data->vce_power_gated ? 0 : 1;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	const struct smu7_power_state *smu7_ps =
+			cast_const_phw_smu7_power_state(states->pnew_state);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	uint32_t sclk = smu7_ps->performance_levels
+			[smu7_ps->performance_level_count - 1].engine_clock;
+	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	uint32_t mclk = smu7_ps->performance_levels
+			[smu7_ps->performance_level_count - 1].memory_clock;
+	struct PP_Clocks min_clocks = {0};
+	uint32_t i;
+	struct cgs_display_info info = {0};
+
+	data->need_update_smu7_dpm_table = 0;
+
+	for (i = 0; i < sclk_table->count; i++) {
+		if (sclk == sclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= sclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	else {
+	/* TODO: Check SCLK in DAL's minimum clocks
+	 * in case DeepSleep divider update is required.
+	 */
+		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+			(min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
+				data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+	}
+
+	for (i = 0; i < mclk_table->count; i++) {
+		if (mclk == mclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= mclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+	return 0;
+}
+
+static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+		const struct smu7_power_state *smu7_ps)
+{
+	uint32_t i;
+	uint32_t sclk, max_sclk = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+	for (i = 0; i < smu7_ps->performance_level_count; i++) {
+		sclk = smu7_ps->performance_levels[i].engine_clock;
+		if (max_sclk < sclk)
+			max_sclk = sclk;
+	}
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+					dpm_table->pcie_speed_table.dpm_levels
+					[dpm_table->pcie_speed_table.count - 1].value :
+					dpm_table->pcie_speed_table.dpm_levels[i].value);
+	}
+
+	return 0;
+}
+
+static int smu7_request_link_speed_change_before_state_change(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	const struct smu7_power_state *smu7_nps =
+			cast_const_phw_smu7_power_state(states->pnew_state);
+	const struct smu7_power_state *polaris10_cps =
+			cast_const_phw_smu7_power_state(states->pcurrent_state);
+
+	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
+	uint16_t current_link_speed;
+
+	if (data->force_pcie_gen == PP_PCIEGenInvalid)
+		current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
+	else
+		current_link_speed = data->force_pcie_gen;
+
+	data->force_pcie_gen = PP_PCIEGenInvalid;
+	data->pspp_notify_required = false;
+
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+		case PP_PCIEGen3:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+				break;
+			data->force_pcie_gen = PP_PCIEGen2;
+			if (current_link_speed == PP_PCIEGen2)
+				break;
+		case PP_PCIEGen2:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+				break;
+		default:
+			data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			data->pspp_notify_required = true;
+	}
+
+	return 0;
+}
+
+static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to freeze SCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_FreezeLevel),
+				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+				return -EINVAL);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		 DPMTABLE_OD_UPDATE_MCLK)) {
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to freeze MCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_MCLKDPM_FreezeLevel),
+				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+				return -EINVAL);
+	}
+
+	return 0;
+}
+
+static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result = 0;
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	const struct smu7_power_state *smu7_ps =
+			cast_const_phw_smu7_power_state(states->pnew_state);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t sclk = smu7_ps->performance_levels
+			[smu7_ps->performance_level_count - 1].engine_clock;
+	uint32_t mclk = smu7_ps->performance_levels
+			[smu7_ps->performance_level_count - 1].memory_clock;
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+	uint32_t dpm_count, clock_percent;
+	uint32_t i;
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+		dpm_table->sclk_table.dpm_levels
+		[dpm_table->sclk_table.count - 1].value = sclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+		/* Need to do calculation based on the golden DPM table
+		 * as the Heatmap GPU Clock axis is also based on the default values
+		 */
+			PP_ASSERT_WITH_CODE(
+				(golden_dpm_table->sclk_table.dpm_levels
+						[golden_dpm_table->sclk_table.count - 1].value != 0),
+				"Divide by 0!",
+				return -EINVAL);
+			dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+			for (i = dpm_count; i > 1; i--) {
+				if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+					clock_percent =
+					      ((sclk
+						- golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+						) * 100)
+						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value +
+							(golden_dpm_table->sclk_table.dpm_levels[i].value *
+								clock_percent)/100;
+
+				} else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+					clock_percent =
+						((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+						- sclk) * 100)
+						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value -
+							(golden_dpm_table->sclk_table.dpm_levels[i].value *
+									clock_percent) / 100;
+				} else
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+		dpm_table->mclk_table.dpm_levels
+			[dpm_table->mclk_table.count - 1].value = mclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+			PP_ASSERT_WITH_CODE(
+					(golden_dpm_table->mclk_table.dpm_levels
+						[golden_dpm_table->mclk_table.count-1].value != 0),
+					"Divide by 0!",
+					return -EINVAL);
+			dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+			for (i = dpm_count; i > 1; i--) {
+				if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+					clock_percent = ((mclk -
+					golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value +
+							(golden_dpm_table->mclk_table.dpm_levels[i].value *
+							clock_percent) / 100;
+
+				} else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+					clock_percent = (
+					 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+					* 100)
+					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value -
+							(golden_dpm_table->mclk_table.dpm_levels[i].value *
+									clock_percent) / 100;
+				} else
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+	if (data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+		result = smum_populate_all_graphic_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	if (data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+		/*populate MCLK dpm table to SMU7 */
+		result = smum_populate_all_memory_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	return result;
+}
+
+static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+			  struct smu7_single_dpm_table *dpm_table,
+			uint32_t low_limit, uint32_t high_limit)
+{
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->count; i++) {
+		if ((dpm_table->dpm_levels[i].value < low_limit)
+		|| (dpm_table->dpm_levels[i].value > high_limit))
+			dpm_table->dpm_levels[i].enabled = false;
+		else
+			dpm_table->dpm_levels[i].enabled = true;
+	}
+
+	return 0;
+}
+
+static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
+		const struct smu7_power_state *smu7_ps)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t high_limit_count;
+
+	PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
+			"power state did not have any performance level",
+			return -EINVAL);
+
+	high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
+
+	smu7_trim_single_dpm_states(hwmgr,
+			&(data->dpm_table.sclk_table),
+			smu7_ps->performance_levels[0].engine_clock,
+			smu7_ps->performance_levels[high_limit_count].engine_clock);
+
+	smu7_trim_single_dpm_states(hwmgr,
+			&(data->dpm_table.mclk_table),
+			smu7_ps->performance_levels[0].memory_clock,
+			smu7_ps->performance_levels[high_limit_count].memory_clock);
+
+	return 0;
+}
+
+static int smu7_generate_dpm_level_enable_mask(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result;
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	const struct smu7_power_state *smu7_ps =
+			cast_const_phw_smu7_power_state(states->pnew_state);
+
+	result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+	if (result)
+		return result;
+
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+	return 0;
+}
+
+static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to Unfreeze SCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+			return -EINVAL);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+				"Trying to Unfreeze MCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+		    return -EINVAL);
+	}
+
+	data->need_update_smu7_dpm_table = 0;
+
+	return 0;
+}
+
+static int smu7_notify_link_speed_change_after_state_change(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	const struct smu7_power_state *smu7_ps =
+			cast_const_phw_smu7_power_state(states->pnew_state);
+	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
+	uint8_t  request;
+
+	if (data->pspp_notify_required) {
+		if (target_link_speed == PP_PCIEGen3)
+			request = PCIE_PERF_REQ_GEN3;
+		else if (target_link_speed == PP_PCIEGen2)
+			request = PCIE_PERF_REQ_GEN2;
+		else
+			request = PCIE_PERF_REQ_GEN1;
+
+		if (request == PCIE_PERF_REQ_GEN1 &&
+				smu7_get_current_pcie_speed(hwmgr) > 0)
+			return 0;
+
+		if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+			if (PP_PCIEGen2 == target_link_speed)
+				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+			else
+				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+		}
+	}
+
+	return 0;
+}
+
+static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+	return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+}
+
+static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int tmp_result, result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to find DPM states clocks in DPM table!",
+			result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result =
+			smu7_request_link_speed_change_before_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to request link speed change before state change!",
+				result = tmp_result);
+	}
+
+	tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+	tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to populate and upload SCLK MCLK DPM levels!",
+			result = tmp_result);
+
+	tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to generate DPM level enabled mask!",
+			result = tmp_result);
+
+	tmp_result = smum_update_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to update SCLK threshold!",
+			result = tmp_result);
+
+	tmp_result = smu7_notify_smc_display(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to notify smc display settings!",
+			result = tmp_result);
+
+	tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to unfreeze SCLK MCLK DPM!",
+			result = tmp_result);
+
+	tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to upload DPM level enabled mask!",
+			result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result =
+			smu7_notify_link_speed_change_after_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to notify link speed change after state change!",
+				result = tmp_result);
+	}
+	data->apply_optimized_settings = false;
+	return result;
+}
+
+static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+	hwmgr->thermal_controller.
+	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
+}
+
+int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+	uint32_t num_active_displays = 0;
+	struct cgs_display_info info = {0};
+
+	info.mode_info = NULL;
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	num_active_displays = info.display_count;
+
+	if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+		smu7_notify_smc_display_change(hwmgr, false);
+
+	return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always OK
+*/
+int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t num_active_displays = 0;
+	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+	uint32_t display_gap2;
+	uint32_t pre_vbi_time_in_us;
+	uint32_t frame_time_in_us;
+	uint32_t ref_clock;
+	uint32_t refresh_rate = 0;
+	struct cgs_display_info info = {0};
+	struct cgs_mode_info mode_info;
+
+	info.mode_info = &mode_info;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	num_active_displays = info.display_count;
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	ref_clock = mode_info.ref_clock;
+	refresh_rate = mode_info.refresh_rate;
+
+	if (0 == refresh_rate)
+		refresh_rate = 60;
+
+	frame_time_in_us = 1000000 / refresh_rate;
+
+	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+	data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
+	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+							SMU_SoftRegisters,
+							PreVBlankGap), 0x64);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+							SMU_SoftRegisters,
+							VBlankTimeout),
+					(frame_time_in_us - pre_vbi_time_in_us));
+
+	return 0;
+}
+
+int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+	return smu7_program_display_gap(hwmgr);
+}
+
+/**
+*  Set maximum target operating fan output RPM
+*
+* @param    hwmgr:  the address of the powerplay hardware manager.
+* @param    usMaxFanRpm:  max operating fan RPM value.
+* @return   The response that came from the SMC.
+*/
+static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+	hwmgr->thermal_controller.
+	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+					const void *thermal_interrupt_info)
+{
+	return 0;
+}
+
+bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	bool is_update_required = false;
+	struct cgs_display_info info = {0, 0, NULL};
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		is_update_required = true;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+			(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
+			hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+			is_update_required = true;
+	}
+	return is_update_required;
+}
+
+static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
+							   const struct smu7_performance_level *pl2)
+{
+	return ((pl1->memory_clock == pl2->memory_clock) &&
+		  (pl1->engine_clock == pl2->engine_clock) &&
+		  (pl1->pcie_gen == pl2->pcie_gen) &&
+		  (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+	const struct smu7_power_state *psa;
+	const struct smu7_power_state *psb;
+	int i;
+
+	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+		return -EINVAL;
+
+	psa = cast_const_phw_smu7_power_state(pstate1);
+	psb = cast_const_phw_smu7_power_state(pstate2);
+	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
+	if (psa->performance_level_count != psb->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < psa->performance_level_count; i++) {
+		if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+			/* If we have found even one performance level pair that is different the states are different. */
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+	return 0;
+}
+
+int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	uint32_t vbios_version;
+	uint32_t tmp;
+
+	/* Read MC indirect register offset 0x9F bits [3:0] to see
+	 * if VBIOS has already loaded a full version of MC ucode
+	 * or not.
+	 */
+
+	smu7_get_mc_microcode_version(hwmgr);
+	vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+
+	data->need_long_memory_training = false;
+
+	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
+							ixMC_IO_DEBUG_UP_13);
+	tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+	if (tmp & (1 << 23)) {
+		data->mem_latency_high = MEM_LATENCY_HIGH;
+		data->mem_latency_low = MEM_LATENCY_LOW;
+	} else {
+		data->mem_latency_high = 330;
+		data->mem_latency_low = 330;
+	}
+
+	return 0;
+}
+
+static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
+	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
+	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
+		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
+	data->clock_registers.vDLL_CNTL                  =
+		cgs_read_register(hwmgr->device, mmDLL_CNTL);
+	data->clock_registers.vMCLK_PWRMGT_CNTL          =
+		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
+	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
+		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
+	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
+		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
+	data->clock_registers.vMPLL_FUNC_CNTL            =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
+	data->clock_registers.vMPLL_FUNC_CNTL_1          =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
+	data->clock_registers.vMPLL_FUNC_CNTL_2          =
+		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
+	data->clock_registers.vMPLL_SS1                  =
+		cgs_read_register(hwmgr->device, mmMPLL_SS1);
+	data->clock_registers.vMPLL_SS2                  =
+		cgs_read_register(hwmgr->device, mmMPLL_SS2);
+	return 0;
+
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t temp;
+
+	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+			 MC_SEQ_MISC0_GDDR5_SHIFT));
+
+	return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	data->uvd_power_gated = false;
+	data->vce_power_gated = false;
+	data->samu_power_gated = false;
+
+	return 0;
+}
+
+static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	data->low_sclk_interrupt_threshold = 0;
+	return 0;
+}
+
+int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+
+	smu7_upload_mc_firmware(hwmgr);
+
+	tmp_result = smu7_read_clock_registers(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to read clock registers!", result = tmp_result);
+
+	tmp_result = smu7_get_memory_type(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to get memory type!", result = tmp_result);
+
+	tmp_result = smu7_enable_acpi_power_management(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable ACPI power management!", result = tmp_result);
+
+	tmp_result = smu7_init_power_gate_state(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to init power gate state!", result = tmp_result);
+
+	tmp_result = smu7_get_mc_microcode_version(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to get MC microcode version!", result = tmp_result);
+
+	tmp_result = smu7_init_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to init sclk threshold!", result = tmp_result);
+
+	return result;
+}
+
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		if (!data->sclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+		break;
+	case PP_MCLK:
+		if (!data->mclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+		break;
+	case PP_PCIE:
+	{
+		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+		uint32_t level = 0;
+
+		while (tmp >>= 1)
+			level++;
+
+		if (!data->pcie_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_ForceLevel,
+					level);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+	int i, now, size = 0;
+	uint32_t clock, pcie_speed;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			if (clock > sclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < mclk_table->count; i++) {
+			if (clock > mclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, mclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_PCIE:
+		pcie_speed = smu7_get_current_pcie_speed(hwmgr);
+		for (i = 0; i < pcie_table->count; i++) {
+			if (pcie_speed != pcie_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < pcie_table->count; i++)
+			size += sprintf(buf + size, "%d: %s %s\n", i,
+					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
+static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl))
+			smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+		smu7_fan_ctrl_set_static_mode(hwmgr, mode);
+	} else
+		/* restart auto-manage */
+		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+	return 0;
+}
+
+static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr->fan_ctrl_is_in_default_mode)
+		return hwmgr->fan_ctrl_default_mode;
+	else
+		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct smu7_single_dpm_table *golden_sclk_table =
+			&(data->golden_dpm_table.sclk_table);
+	int value;
+
+	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+			100 /
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+	return value;
+}
+
+static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *golden_sclk_table =
+			&(data->golden_dpm_table.sclk_table);
+	struct pp_power_state  *ps;
+	struct smu7_power_state  *smu7_ps;
+
+	if (value > 20)
+		value = 20;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+			value / 100 +
+			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+	return 0;
+}
+
+static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct smu7_single_dpm_table *golden_mclk_table =
+			&(data->golden_dpm_table.mclk_table);
+	int value;
+
+	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+			100 /
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+	return value;
+}
+
+static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_single_dpm_table *golden_mclk_table =
+			&(data->golden_dpm_table.mclk_table);
+	struct pp_power_state  *ps;
+	struct smu7_power_state  *smu7_ps;
+
+	if (value > 20)
+		value = 20;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+			value / 100 +
+			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+	return 0;
+}
+
+
+static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+	int i;
+
+	if (table_info == NULL)
+		return -EINVAL;
+
+	dep_sclk_table = table_info->vdd_dep_on_sclk;
+
+	for (i = 0; i < dep_sclk_table->count; i++) {
+		clocks->clock[i] = dep_sclk_table->entries[i].clk;
+		clocks->count++;
+	}
+	return 0;
+}
+
+static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
+		return data->mem_latency_high;
+	else if (clk >= MEM_FREQ_HIGH_LATENCY)
+		return data->mem_latency_low;
+	else
+		return MEM_LATENCY_ERR;
+}
+
+static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+	int i;
+
+	if (table_info == NULL)
+		return -EINVAL;
+
+	dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+	for (i = 0; i < dep_mclk_table->count; i++) {
+		clocks->clock[i] = dep_mclk_table->entries[i].clk;
+		clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+						dep_mclk_table->entries[i].clk);
+		clocks->count++;
+	}
+	return 0;
+}
+
+static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+						struct amd_pp_clocks *clocks)
+{
+	switch (type) {
+	case amd_pp_sys_clock:
+		smu7_get_sclks(hwmgr, clocks);
+		break;
+	case amd_pp_mem_clock:
+		smu7_get_mclks(hwmgr, clocks);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+	.backend_init = &smu7_hwmgr_backend_init,
+	.backend_fini = &phm_hwmgr_backend_fini,
+	.asic_setup = &smu7_setup_asic_task,
+	.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
+	.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
+	.force_dpm_level = &smu7_force_dpm_level,
+	.power_state_set = smu7_set_power_state_tasks,
+	.get_power_state_size = smu7_get_power_state_size,
+	.get_mclk = smu7_dpm_get_mclk,
+	.get_sclk = smu7_dpm_get_sclk,
+	.patch_boot_state = smu7_dpm_patch_boot_state,
+	.get_pp_table_entry = smu7_get_pp_table_entry,
+	.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
+	.powerdown_uvd = smu7_powerdown_uvd,
+	.powergate_uvd = smu7_powergate_uvd,
+	.powergate_vce = smu7_powergate_vce,
+	.disable_clock_power_gating = smu7_disable_clock_power_gating,
+	.update_clock_gatings = smu7_update_clock_gatings,
+	.notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
+	.display_config_changed = smu7_display_configuration_changed_task,
+	.set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
+	.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
+	.get_temperature = smu7_thermal_get_temperature,
+	.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
+	.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
+	.get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
+	.set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+	.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
+	.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
+	.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
+	.uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
+	.register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
+	.check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
+	.check_states_equal = smu7_check_states_equal,
+	.set_fan_control_mode = smu7_set_fan_control_mode,
+	.get_fan_control_mode = smu7_get_fan_control_mode,
+	.force_clock_level = smu7_force_clock_level,
+	.print_clock_levels = smu7_print_clock_levels,
+	.enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+	.get_sclk_od = smu7_get_sclk_od,
+	.set_sclk_od = smu7_set_sclk_od,
+	.get_mclk_od = smu7_get_mclk_od,
+	.set_mclk_od = smu7_set_mclk_od,
+	.get_clock_by_type = smu7_get_clock_by_type,
+	.read_sensor = smu7_read_sensor,
+	.dynamic_state_management_disable = smu7_disable_dpm_tasks,
+};
+
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+		uint32_t clock_insr)
+{
+	uint8_t i;
+	uint32_t temp;
+	uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+	for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		temp = clock >> i;
+
+		if (temp >= min || i == 0)
+			break;
+	}
+	return i;
+}
+
+int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+	int ret = 0;
+
+	hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
+	if (hwmgr->pp_table_version == PP_TABLE_V0)
+		hwmgr->pptable_func = &pptable_funcs;
+	else if (hwmgr->pp_table_version == PP_TABLE_V1)
+		hwmgr->pptable_func = &pptable_v1_0_funcs;
+
+	pp_smu7_thermal_initialize(hwmgr);
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
new file mode 100644
index 0000000..27e7f76
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_HWMGR_H
+#define _SMU7_HWMGR_H
+
+#include "hwmgr.h"
+#include "ppatomctrl.h"
+
+#define SMU7_MAX_HARDWARE_POWERLEVELS   2
+
+#define SMU7_VOLTAGE_CONTROL_NONE                   0x0
+#define SMU7_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define SMU7_VOLTAGE_CONTROL_BY_SVID2               0x2
+#define SMU7_VOLTAGE_CONTROL_MERGED                 0x3
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+enum gpu_pt_config_reg_type {
+	GPU_CONFIGREG_MMR = 0,
+	GPU_CONFIGREG_SMC_IND,
+	GPU_CONFIGREG_DIDT_IND,
+	GPU_CONFIGREG_GC_CAC_IND,
+	GPU_CONFIGREG_CACHE,
+	GPU_CONFIGREG_MAX
+};
+
+struct gpu_pt_config_reg {
+	uint32_t                           offset;
+	uint32_t                           mask;
+	uint32_t                           shift;
+	uint32_t                           value;
+	enum gpu_pt_config_reg_type       type;
+};
+
+struct smu7_performance_level {
+	uint32_t  memory_clock;
+	uint32_t  engine_clock;
+	uint16_t  pcie_gen;
+	uint16_t  pcie_lane;
+};
+
+struct smu7_thermal_temperature_setting {
+	long temperature_low;
+	long temperature_high;
+	long temperature_shutdown;
+};
+
+struct smu7_uvd_clocks {
+	uint32_t  vclk;
+	uint32_t  dclk;
+};
+
+struct smu7_vce_clocks {
+	uint32_t  evclk;
+	uint32_t  ecclk;
+};
+
+struct smu7_power_state {
+	uint32_t                  magic;
+	struct smu7_uvd_clocks    uvd_clks;
+	struct smu7_vce_clocks    vce_clks;
+	uint32_t                  sam_clk;
+	uint16_t                  performance_level_count;
+	bool                      dc_compatible;
+	uint32_t                  sclk_threshold;
+	struct smu7_performance_level  performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct smu7_dpm_level {
+	bool	enabled;
+	uint32_t	value;
+	uint32_t	param1;
+};
+
+#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define SMU7_MINIMUM_ENGINE_CLOCK 2500
+
+struct smu7_single_dpm_table {
+	uint32_t		count;
+	struct smu7_dpm_level	dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct smu7_dpm_table {
+	struct smu7_single_dpm_table  sclk_table;
+	struct smu7_single_dpm_table  mclk_table;
+	struct smu7_single_dpm_table  pcie_speed_table;
+	struct smu7_single_dpm_table  vddc_table;
+	struct smu7_single_dpm_table  vddci_table;
+	struct smu7_single_dpm_table  mvdd_table;
+};
+
+struct smu7_clock_registers {
+	uint32_t  vCG_SPLL_FUNC_CNTL;
+	uint32_t  vCG_SPLL_FUNC_CNTL_2;
+	uint32_t  vCG_SPLL_FUNC_CNTL_3;
+	uint32_t  vCG_SPLL_FUNC_CNTL_4;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t  vDLL_CNTL;
+	uint32_t  vMCLK_PWRMGT_CNTL;
+	uint32_t  vMPLL_AD_FUNC_CNTL;
+	uint32_t  vMPLL_DQ_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL_1;
+	uint32_t  vMPLL_FUNC_CNTL_2;
+	uint32_t  vMPLL_SS1;
+	uint32_t  vMPLL_SS2;
+};
+
+#define DISABLE_MC_LOADMICROCODE   1
+#define DISABLE_MC_CFGPROGRAMMING  2
+
+struct smu7_voltage_smio_registers {
+	uint32_t vS0_VID_LOWER_SMIO_CNTL;
+};
+
+#define SMU7_MAX_LEAKAGE_COUNT  8
+
+struct smu7_leakage_voltage {
+	uint16_t  count;
+	uint16_t  leakage_id[SMU7_MAX_LEAKAGE_COUNT];
+	uint16_t  actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
+};
+
+struct smu7_vbios_boot_state {
+	uint16_t    mvdd_bootup_value;
+	uint16_t    vddc_bootup_value;
+	uint16_t    vddci_bootup_value;
+	uint16_t    vddgfx_bootup_value;
+	uint32_t    sclk_bootup_value;
+	uint32_t    mclk_bootup_value;
+	uint16_t    pcie_gen_bootup_value;
+	uint16_t    pcie_lane_bootup_value;
+};
+
+struct smu7_display_timing {
+	uint32_t  min_clock_in_sr;
+	uint32_t  num_existing_displays;
+};
+
+struct smu7_dpmlevel_enable_mask {
+	uint32_t  uvd_dpm_enable_mask;
+	uint32_t  vce_dpm_enable_mask;
+	uint32_t  acp_dpm_enable_mask;
+	uint32_t  samu_dpm_enable_mask;
+	uint32_t  sclk_dpm_enable_mask;
+	uint32_t  mclk_dpm_enable_mask;
+	uint32_t  pcie_dpm_enable_mask;
+};
+
+struct smu7_pcie_perf_range {
+	uint16_t  max;
+	uint16_t  min;
+};
+
+struct smu7_hwmgr {
+	struct smu7_dpm_table			dpm_table;
+	struct smu7_dpm_table			golden_dpm_table;
+
+	uint32_t						voting_rights_clients0;
+	uint32_t						voting_rights_clients1;
+	uint32_t						voting_rights_clients2;
+	uint32_t						voting_rights_clients3;
+	uint32_t						voting_rights_clients4;
+	uint32_t						voting_rights_clients5;
+	uint32_t						voting_rights_clients6;
+	uint32_t						voting_rights_clients7;
+	uint32_t						static_screen_threshold_unit;
+	uint32_t						static_screen_threshold;
+	uint32_t						voltage_control;
+	uint32_t						vdd_gfx_control;
+	uint32_t						vddc_vddgfx_delta;
+	uint32_t						active_auto_throttle_sources;
+
+	struct smu7_clock_registers            clock_registers;
+
+	bool                           is_memory_gddr5;
+	uint16_t                       acpi_vddc;
+	bool                           pspp_notify_required;
+	uint16_t                       force_pcie_gen;
+	uint16_t                       acpi_pcie_gen;
+	uint32_t                       pcie_gen_cap;
+	uint32_t                       pcie_lane_cap;
+	uint32_t                       pcie_spc_cap;
+	struct smu7_leakage_voltage          vddc_leakage;
+	struct smu7_leakage_voltage          vddci_leakage;
+	struct smu7_leakage_voltage          vddcgfx_leakage;
+
+	uint32_t                             mvdd_control;
+	uint32_t                             vddc_mask_low;
+	uint32_t                             mvdd_mask_low;
+	uint16_t                            max_vddc_in_pptable;
+	uint16_t                            min_vddc_in_pptable;
+	uint16_t                            max_vddci_in_pptable;
+	uint16_t                            min_vddci_in_pptable;
+	bool                                is_uvd_enabled;
+	struct smu7_vbios_boot_state        vbios_boot_state;
+
+	bool                           pcie_performance_request;
+	bool                           battery_state;
+	bool                           is_tlu_enabled;
+	bool                           disable_handshake;
+	bool                           smc_voltage_control_enabled;
+	bool                           vbi_time_out_support;
+
+	uint32_t                       soft_regs_start;
+	/* ---- Stuff originally coming from Evergreen ---- */
+	uint32_t                             vddci_control;
+	struct pp_atomctrl_voltage_table     vddc_voltage_table;
+	struct pp_atomctrl_voltage_table     vddci_voltage_table;
+	struct pp_atomctrl_voltage_table     mvdd_voltage_table;
+	struct pp_atomctrl_voltage_table     vddgfx_voltage_table;
+
+	uint32_t                             mgcg_cgtt_local2;
+	uint32_t                             mgcg_cgtt_local3;
+	uint32_t                             gpio_debug;
+	uint32_t                             mc_micro_code_feature;
+	uint32_t                             highest_mclk;
+	uint16_t                             acpi_vddci;
+	uint8_t                              mvdd_high_index;
+	uint8_t                              mvdd_low_index;
+	bool                                 dll_default_on;
+	bool                                 performance_request_registered;
+
+	/* ---- Low Power Features ---- */
+	bool                           ulv_supported;
+
+	/* ---- CAC Stuff ---- */
+	uint32_t                       cac_table_start;
+	bool                           cac_configuration_required;
+	bool                           driver_calculate_cac_leakage;
+	bool                           cac_enabled;
+
+	/* ---- DPM2 Parameters ---- */
+	uint32_t                       power_containment_features;
+	bool                           enable_dte_feature;
+	bool                           enable_tdc_limit_feature;
+	bool                           enable_pkg_pwr_tracking_feature;
+	bool                           disable_uvd_power_tune_feature;
+
+
+	uint32_t                       dte_tj_offset;
+	uint32_t                       fast_watermark_threshold;
+
+	/* ---- Phase Shedding ---- */
+	bool                           vddc_phase_shed_control;
+
+	/* ---- DI/DT ---- */
+	struct smu7_display_timing        display_timing;
+
+	/* ---- Thermal Temperature Setting ---- */
+	struct smu7_thermal_temperature_setting  thermal_temp_setting;
+	struct smu7_dpmlevel_enable_mask     dpm_level_enable_mask;
+	uint32_t                                  need_update_smu7_dpm_table;
+	uint32_t                                  sclk_dpm_key_disabled;
+	uint32_t                                  mclk_dpm_key_disabled;
+	uint32_t                                  pcie_dpm_key_disabled;
+	uint32_t                                  min_engine_clocks;
+	struct smu7_pcie_perf_range          pcie_gen_performance;
+	struct smu7_pcie_perf_range          pcie_lane_performance;
+	struct smu7_pcie_perf_range          pcie_gen_power_saving;
+	struct smu7_pcie_perf_range          pcie_lane_power_saving;
+	bool                                      use_pcie_performance_levels;
+	bool                                      use_pcie_power_saving_levels;
+	uint32_t                                  mclk_activity_target;
+	uint32_t                                  mclk_dpm0_activity_target;
+	uint32_t                                  low_sclk_interrupt_threshold;
+	uint32_t                                  last_mclk_dpm_enable_mask;
+	bool                                      uvd_enabled;
+
+	/* ---- Power Gating States ---- */
+	bool                           uvd_power_gated;
+	bool                           vce_power_gated;
+	bool                           samu_power_gated;
+	bool                           need_long_memory_training;
+
+	/* Application power optimization parameters */
+	bool                               update_up_hyst;
+	bool                               update_down_hyst;
+	uint32_t                           down_hyst;
+	uint32_t                           up_hyst;
+	uint32_t disable_dpm_mask;
+	bool apply_optimized_settings;
+
+	uint32_t                              avfs_vdroop_override_setting;
+	bool                                  apply_avfs_cks_off_voltage;
+	uint32_t                              frame_time_x2;
+	uint16_t                              mem_latency_high;
+	uint16_t                              mem_latency_low;
+};
+
+/* To convert to Q8.8 format for firmware */
+#define SMU7_Q88_FORMAT_CONVERSION_UNIT             256
+
+enum SMU7_I2CLineID {
+	SMU7_I2CLineID_DDC1 = 0x90,
+	SMU7_I2CLineID_DDC2 = 0x91,
+	SMU7_I2CLineID_DDC3 = 0x92,
+	SMU7_I2CLineID_DDC4 = 0x93,
+	SMU7_I2CLineID_DDC5 = 0x94,
+	SMU7_I2CLineID_DDC6 = 0x95,
+	SMU7_I2CLineID_SCLSDA = 0x96,
+	SMU7_I2CLineID_DDCVGA = 0x97
+};
+
+#define SMU7_I2C_DDC1DATA          0
+#define SMU7_I2C_DDC1CLK           1
+#define SMU7_I2C_DDC2DATA          2
+#define SMU7_I2C_DDC2CLK           3
+#define SMU7_I2C_DDC3DATA          4
+#define SMU7_I2C_DDC3CLK           5
+#define SMU7_I2C_SDA               40
+#define SMU7_I2C_SCL               41
+#define SMU7_I2C_DDC4DATA          65
+#define SMU7_I2C_DDC4CLK           66
+#define SMU7_I2C_DDC5DATA          0x48
+#define SMU7_I2C_DDC5CLK           0x49
+#define SMU7_I2C_DDC6DATA          0x4a
+#define SMU7_I2C_DDC6CLK           0x4b
+#define SMU7_I2C_DDCVGADATA        0x4c
+#define SMU7_I2C_DDCVGACLK         0x4d
+
+#define SMU7_UNUSED_GPIO_PIN       0x7F
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+		uint32_t clock_insr);
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
similarity index 62%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
rename to drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index b9cb240..41b634f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -20,546 +20,364 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-
 #include "hwmgr.h"
 #include "smumgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_smumgr.h"
-#include "smu74_discrete.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
 #include "pp_debug.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "oss/oss_3_0_sh_mask.h"
+#include "smu7_common.h"
 
 #define VOLTAGE_SCALE  4
-#define POWERTUNE_DEFAULT_SET_MAX    1
 
-uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
 
-struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  *      Offset                             Mask                                                Shift                                               Value       Type
  * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  */
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, GPU_CONFIGREG_GC_CAC_IND },
 
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
 
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, GPU_CONFIGREG_GC_CAC_IND },
 
 	{   0xFFFFFFFF  }
 };
 
-struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  *      Offset                             Mask                                                Shift                                               Value       Type
  * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  */
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060011, GPU_CONFIGREG_GC_CAC_IND },
 
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0011, GPU_CONFIGREG_GC_CAC_IND },
 
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
-	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100011, GPU_CONFIGREG_GC_CAC_IND },
+	{   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900011, GPU_CONFIGREG_GC_CAC_IND },
 
 	{   0xFFFFFFFF  }
 };
 
-struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  *      Offset                             Mask                                                Shift                                               Value       Type
  * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  */
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
 	{   0xFFFFFFFF  }
 };
 
-struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
 /* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  *      Offset                             Mask                                                Shift                                               Value       Type
  * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  */
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0008,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0008,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0008,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     GPU_CONFIGREG_DIDT_IND },
 
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     POLARIS10_CONFIGREG_DIDT_IND },
-	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     POLARIS10_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+	{   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
 	{   0xFFFFFFFF  }
 };
 
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-	/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
-	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
-	{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
-	{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
-	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
 
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct  phm_ppt_v1_information *table_info =
-			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (table_info &&
-			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-			table_info->cac_dtp_table->usPowerTuneDataSetID)
-		polaris10_hwmgr->power_tune_defaults =
-				&polaris10_power_tune_data_set_array
-				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-	else
-		polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-	uint32_t tmp;
-	tmp = raw_setting * 4096 / 100;
-	return (uint16_t)tmp;
-}
-
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-	SMU74_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-	struct pp_advance_fan_control_parameters *fan_table=
-			&hwmgr->thermal_controller.advanceFanControlParameters;
-	int i, j, k;
-	const uint16_t *pdef1;
-	const uint16_t *pdef2;
-
-	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-	dpm_table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
-	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-				"Target Operating Temp is out of Range!",
-				);
-
-	dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTargetOperatingTemp * 256);
-	dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-			cac_dtp_table->usTemperatureLimitHotspot * 256);
-	dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainEdge));
-	dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
-			scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
-	pdef1 = defaults->BAPMTI_R;
-	pdef2 = defaults->BAPMTI_RC;
-
-	for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
-		for (j = 0; j < SMU74_DTE_SOURCES; j++) {
-			for (k = 0; k < SMU74_DTE_SINKS; k++) {
-				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
-				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
-				pdef1++;
-				pdef2++;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
-	data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-	data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-	data->power_tune_table.SviLoadLineTrimVddC = 3;
-	data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-	return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-	uint16_t tdc_limit;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
-	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-	data->power_tune_table.TDC_VDDC_PkgLimit =
-			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-	data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-	data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-	return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-	uint32_t temp;
-
-	if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
-			fuse_table_offset +
-			offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
-			(uint32_t *)&temp, data->sram_end))
-		PP_ASSERT_WITH_CODE(false,
-				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-				return -EINVAL);
-	else {
-		data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-		data->power_tune_table.LPMLTemperatureMin =
-				(uint8_t)((temp >> 16) & 0xff);
-		data->power_tune_table.LPMLTemperatureMax =
-				(uint8_t)((temp >> 8) & 0xff);
-		data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-	}
-	return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-	int i;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* Currently not used. Set all to zero. */
-	for (i = 0; i < 16; i++)
-		data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-	return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
-		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
-		hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
-			hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-	data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
-				hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
-	return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-	int i;
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-	/* Currently not used. Set all to zero. */
-	for (i = 0; i < 16; i++)
-		data->power_tune_table.GnbLPML[i] = 0;
-
-	return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
-	return 0;
-}
-
-static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
 {
 
 	uint32_t en = enable ? 1 : 0;
@@ -608,29 +426,29 @@
 	return result;
 }
 
-static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
-				struct polaris10_pt_config_reg *cac_config_regs)
+static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+				const struct gpu_pt_config_reg *cac_config_regs)
 {
-	struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+	const struct gpu_pt_config_reg *config_regs = cac_config_regs;
 	uint32_t cache = 0;
 	uint32_t data = 0;
 
 	PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
 
 	while (config_regs->offset != 0xFFFFFFFF) {
-		if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+		if (config_regs->type == GPU_CONFIGREG_CACHE)
 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 		else {
 			switch (config_regs->type) {
-			case POLARIS10_CONFIGREG_SMC_IND:
+			case GPU_CONFIGREG_SMC_IND:
 				data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
 				break;
 
-			case POLARIS10_CONFIGREG_DIDT_IND:
+			case GPU_CONFIGREG_DIDT_IND:
 				data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
 				break;
 
-			case POLARIS10_CONFIGREG_GC_CAC_IND:
+			case GPU_CONFIGREG_GC_CAC_IND:
 				data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
 				break;
 
@@ -644,15 +462,15 @@
 			data |= cache;
 
 			switch (config_regs->type) {
-			case POLARIS10_CONFIGREG_SMC_IND:
+			case GPU_CONFIGREG_SMC_IND:
 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
 				break;
 
-			case POLARIS10_CONFIGREG_DIDT_IND:
+			case GPU_CONFIGREG_DIDT_IND:
 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
 				break;
 
-			case POLARIS10_CONFIGREG_GC_CAC_IND:
+			case GPU_CONFIGREG_GC_CAC_IND:
 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
 				break;
 
@@ -669,7 +487,7 @@
 	return 0;
 }
 
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 {
 	int result;
 	uint32_t num_se = 0;
@@ -699,20 +517,20 @@
 			cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
 
 			if (hwmgr->chip_id == CHIP_POLARIS10) {
-				result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-				result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
 			} else if (hwmgr->chip_id == CHIP_POLARIS11) {
-				result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-				result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
 			}
 		}
 		cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
 
-		result = polaris10_enable_didt(hwmgr, true);
+		result = smu7_enable_didt(hwmgr, true);
 		PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
 
 		/* TO DO Post DIDT enable clock gating */
@@ -721,7 +539,7 @@
 	return 0;
 }
 
-int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
@@ -731,7 +549,7 @@
 		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
 		/* TO DO Pre DIDT disable clock gating */
 
-		result = polaris10_enable_didt(hwmgr, false);
+		result = smu7_enable_didt(hwmgr, false);
 		PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
 		/* TO DO Post DIDT enable clock gating */
 	}
@@ -739,95 +557,9 @@
 	return 0;
 }
 
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-	uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-	data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
-	data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
-	return 0;
-}
-
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-	uint32_t pm_fuse_table_offset;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_PowerContainment)) {
-		if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
-				SMU7_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU74_Firmware_Header, PmFuseTable),
-				&pm_fuse_table_offset, data->sram_end))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to get pm_fuse_table_offset Failed!",
-					return -EINVAL);
-
-		if (polaris10_populate_svi_load_line(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate SviLoadLine Failed!",
-					return -EINVAL);
-
-		if (polaris10_populate_tdc_limit(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate TDCLimit Failed!", return -EINVAL);
-
-		if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate TdcWaterfallCtl, "
-					"LPMLTemperature Min and Max Failed!",
-					return -EINVAL);
-
-		if (0 != polaris10_populate_temperature_scaler(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate LPMLTemperatureScaler Failed!",
-					return -EINVAL);
-
-		if (polaris10_populate_fuzzy_fan(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate Fuzzy Fan Control parameters Failed!",
-					return -EINVAL);
-
-		if (polaris10_populate_gnb_lpml(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate GnbLPML Failed!",
-					return -EINVAL);
-
-		if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate GnbLPML Min and Max Vid Failed!",
-					return -EINVAL);
-
-		if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-					"Sidd Failed!", return -EINVAL);
-
-		if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
-				(uint8_t *)&data->power_tune_table,
-				(sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
-			PP_ASSERT_WITH_CODE(false,
-					"Attempt to download PmFuseTable Failed!",
-					return -EINVAL);
-	}
-	return 0;
-}
-
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	int result = 0;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -843,9 +575,9 @@
 	return result;
 }
 
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	int result = 0;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -860,9 +592,9 @@
 	return result;
 }
 
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	if (data->power_containment_features &
 			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
@@ -871,21 +603,27 @@
 	return 0;
 }
 
-static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
 {
 	return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
 			PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
 }
 
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
 	int smc_result;
 	int result = 0;
+	struct phm_cac_tdp_table *cac_table;
 
 	data->power_containment_features = 0;
+	if (hwmgr->pp_table_version == PP_TABLE_V1)
+		cac_table = table_info->cac_dtp_table;
+	else
+		cac_table = hwmgr->dyn_state.cac_dtp_table;
+
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_PowerContainment)) {
 
@@ -905,15 +643,13 @@
 			PP_ASSERT_WITH_CODE((0 == smc_result),
 					"Failed to enable PkgPwrTracking in SMC.", result = -1;);
 			if (0 == smc_result) {
-				struct phm_cac_tdp_table *cac_table =
-						table_info->cac_dtp_table;
 				uint32_t default_limit =
 					(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
 
 				data->power_containment_features |=
 						POWERCONTAINMENT_FEATURE_PkgPwrLimit;
 
-				if (polaris10_set_power_limit(hwmgr, default_limit))
+				if (smu7_set_power_limit(hwmgr, default_limit))
 					printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
 			}
 		}
@@ -921,9 +657,9 @@
 	return result;
 }
 
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
 {
-	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	int result = 0;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -963,14 +699,19 @@
 	return result;
 }
 
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+	struct phm_cac_tdp_table *cac_table;
+
 	int adjust_percent, target_tdp;
 	int result = 0;
 
+	if (hwmgr->pp_table_version == PP_TABLE_V1)
+		cac_table = table_info->cac_dtp_table;
+	else
+		cac_table = hwmgr->dyn_state.cac_dtp_table;
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_PowerContainment)) {
 		/* adjustment percentage has already been validated */
@@ -981,7 +722,7 @@
 		 * but message to be 8 bit fraction for messages
 		 */
 		target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
-		result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+		result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
 	}
 
 	return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
new file mode 100644
index 0000000..22f86b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU7_POWERTUNE_H
+#define _SMU7_POWERTUNE_H
+
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+
+#define ixGC_CAC_CNTL 0x0000
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+
+
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
+#endif  /* DGPU_POWERTUNE_H */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
similarity index 65%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
rename to drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 92976b6..fb6c6f6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -20,18 +20,15 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include <asm/div64.h>
-#include "fiji_thermal.h"
-#include "fiji_hwmgr.h"
-#include "fiji_smumgr.h"
-#include "fiji_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
 
-int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+#include <asm/div64.h>
+#include "smu7_thermal.h"
+#include "smu7_hwmgr.h"
+#include "smu7_common.h"
+
+int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
 		struct phm_fan_speed_info *fan_speed_info)
 {
-
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
 		return 0;
 
@@ -55,7 +52,7 @@
 	return 0;
 }
 
-int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
 		uint32_t *speed)
 {
 	uint32_t duty100;
@@ -84,7 +81,7 @@
 	return 0;
 }
 
-int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
 {
 	uint32_t tach_period;
 	uint32_t crystal_clock_freq;
@@ -100,9 +97,9 @@
 	if (tach_period == 0)
 		return -EINVAL;
 
-	crystal_clock_freq = tonga_get_xclk(hwmgr);
+	crystal_clock_freq = smu7_get_xclk(hwmgr);
 
-	*speed = 60 * crystal_clock_freq * 10000/ tach_period;
+	*speed = 60 * crystal_clock_freq * 10000 / tach_period;
 
 	return 0;
 }
@@ -113,7 +110,7 @@
 *           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
 * @exception Should always succeed.
 */
-int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
 
 	if (hwmgr->fan_ctrl_is_in_default_mode) {
@@ -139,7 +136,7 @@
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @exception Should always succeed.
 */
-int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
 {
 	if (!hwmgr->fan_ctrl_is_in_default_mode) {
 		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,7 +149,7 @@
 	return 0;
 }
 
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
@@ -187,7 +184,7 @@
 }
 
 
-int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
 	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
 }
@@ -198,7 +195,7 @@
 * @param    speed is the percentage value (0% - 100%) to be set.
 * @exception Fails is the 100% setting appears to be 0.
 */
-int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
 		uint32_t speed)
 {
 	uint32_t duty100;
@@ -213,7 +210,7 @@
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_MicrocodeFanControl))
-		fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
+		smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
 
 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_FDO_CTRL1, FMAX_DUTY100);
@@ -228,7 +225,7 @@
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
 
-	return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+	return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
 }
 
 /**
@@ -236,7 +233,7 @@
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @exception Always succeeds.
 */
-int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
@@ -245,11 +242,11 @@
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_MicrocodeFanControl)) {
-		result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+		result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
 		if (!result)
-			result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
+			result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
 	} else
-		result = fiji_fan_ctrl_set_default_mode(hwmgr);
+		result = smu7_fan_ctrl_set_default_mode(hwmgr);
 
 	return result;
 }
@@ -260,7 +257,7 @@
 * @param    speed is the percentage value (min - max) to be set.
 * @exception Fails is the speed not lie between min and max.
 */
-int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
 {
 	uint32_t tach_period;
 	uint32_t crystal_clock_freq;
@@ -272,14 +269,18 @@
 			(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
 		return 0;
 
-	crystal_clock_freq = tonga_get_xclk(hwmgr);
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl))
+		smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+	crystal_clock_freq = smu7_get_xclk(hwmgr);
 
 	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
 
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 				CG_TACH_STATUS, TACH_PERIOD, tach_period);
 
-	return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+	return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
 }
 
 /**
@@ -287,7 +288,7 @@
 *
 * @param    hwmgr The address of the hardware manager.
 */
-int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 {
 	int temp;
 
@@ -296,7 +297,7 @@
 
 	/* Bit 9 means the reading is lower than the lowest usable value. */
 	if (temp & 0x200)
-		temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
+		temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
 	else
 		temp = temp & 0x1ff;
 
@@ -312,12 +313,12 @@
 * @param    range Temperature range to be programmed for high and low alert signals
 * @exception PP_Result_BadInput if the input data is not valid.
 */
-static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 		uint32_t low_temp, uint32_t high_temp)
 {
-	uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
+	uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
+	uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 
 	if (low < low_temp)
@@ -346,7 +347,7 @@
 *
 * @param    hwmgr The address of the hardware manager.
 */
-static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
 {
 	if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
 		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -365,13 +366,13 @@
 *
 * @param    hwmgr The address of the hardware manager.
 */
-static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 {
 	uint32_t alert;
 
 	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_THERMAL_INT, THERM_INT_MASK);
-	alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+	alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_THERMAL_INT, THERM_INT_MASK, alert);
 
@@ -383,13 +384,13 @@
 * Disable thermal alerts on the RV770 thermal controller.
 * @param    hwmgr The address of the hardware manager.
 */
-static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
 {
 	uint32_t alert;
 
 	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_THERMAL_INT, THERM_INT_MASK);
-	alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+	alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_THERMAL_INT, THERM_INT_MASK, alert);
 
@@ -402,129 +403,17 @@
 * Currently just disables alerts.
 * @param    hwmgr The address of the hardware manager.
 */
-int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 {
-	int result = fiji_thermal_disable_alert(hwmgr);
+	int result = smu7_thermal_disable_alert(hwmgr);
 
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		fiji_fan_ctrl_set_default_mode(hwmgr);
+	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+		smu7_fan_ctrl_set_default_mode(hwmgr);
 
 	return result;
 }
 
 /**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
-		void *input, void *output, void *storage, int result)
-{
-	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-	uint32_t duty100;
-	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-	uint16_t fdo_min, slope1, slope2;
-	uint32_t reference_clock;
-	int res;
-	uint64_t tmp64;
-
-	if (data->fan_table_start == 0) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-			CG_FDO_CTRL1, FMAX_DUTY100);
-
-	if (duty100 == 0) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-			usPWMMin * duty100;
-	do_div(tmp64, 10000);
-	fdo_min = (uint16_t)tmp64;
-
-	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMin) / 100);
-	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMed) / 100);
-	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-			thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-	fan_table.Slope1 = cpu_to_be16(slope1);
-	fan_table.Slope2 = cpu_to_be16(slope2);
-
-	fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-	fan_table.HystDown = cpu_to_be16(hwmgr->
-			thermal_controller.advanceFanControlParameters.ucTHyst);
-
-	fan_table.HystUp = cpu_to_be16(1);
-
-	fan_table.HystSlope = cpu_to_be16(1);
-
-	fan_table.TempRespLim = cpu_to_be16(5);
-
-	reference_clock = tonga_get_xclk(hwmgr);
-
-	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-			thermal_controller.advanceFanControlParameters.ulCycleDelay *
-			reference_clock) / 1600);
-
-	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-			hwmgr->device, CGS_IND_REG__SMC,
-			CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-	res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
-			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-			data->sram_end);
-
-	if (!res && hwmgr->thermal_controller.
-			advanceFanControlParameters.ucMinimumPWMLimit)
-		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_SetFanMinPwm,
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ucMinimumPWMLimit);
-
-	if (!res && hwmgr->thermal_controller.
-			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-				PPSMC_MSG_SetFanSclkTarget,
-				hwmgr->thermal_controller.
-				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-	if (res)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl);
-
-	return 0;
-}
-
-/**
 * Start the fan control on the SMC.
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @param    pInput the pointer to input data
@@ -533,7 +422,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 /* If the fantable setup has failed we could have disabled
@@ -543,8 +432,8 @@
 */
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_MicrocodeFanControl)) {
-		fiji_fan_ctrl_start_smc_fan_control(hwmgr);
-		fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+		smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+		smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
 	}
 
 	return 0;
@@ -559,7 +448,7 @@
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
 	struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
@@ -567,7 +456,7 @@
 	if (range == NULL)
 		return -EINVAL;
 
-	return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
+	return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
 }
 
 /**
@@ -579,10 +468,10 @@
 * @param    Result the last failure code
 * @return   result from initialize thermal controller routine
 */
-int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
-    return fiji_thermal_initialize(hwmgr);
+	return smu7_thermal_initialize(hwmgr);
 }
 
 /**
@@ -594,10 +483,10 @@
 * @param    Result the last failure code
 * @return   result from enable alert routine
 */
-int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
-	return fiji_thermal_enable_alert(hwmgr);
+	return smu7_thermal_enable_alert(hwmgr);
 }
 
 /**
@@ -609,53 +498,54 @@
 * @param    Result the last failure code
 * @return   result from disable alert routine
 */
-static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 		void *input, void *output, void *storage, int result)
 {
-	return fiji_thermal_disable_alert(hwmgr);
+	return smu7_thermal_disable_alert(hwmgr);
 }
 
 static const struct phm_master_table_item
-fiji_thermal_start_thermal_controller_master_list[] = {
-	{NULL, tf_fiji_thermal_initialize},
-	{NULL, tf_fiji_thermal_set_temperature_range},
-	{NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_start_thermal_controller_master_list[] = {
+	{NULL, tf_smu7_thermal_initialize},
+	{NULL, tf_smu7_thermal_set_temperature_range},
+	{NULL, tf_smu7_thermal_enable_alert},
+	{NULL, smum_thermal_avfs_enable},
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-	{NULL, tf_fiji_thermal_setup_fan_table},
-	{NULL, tf_fiji_thermal_start_smc_fan_control},
+	{NULL, smum_thermal_setup_fan_table},
+	{NULL, tf_smu7_thermal_start_smc_fan_control},
 	{NULL, NULL}
 };
 
 static const struct phm_master_table_header
-fiji_thermal_start_thermal_controller_master = {
+phm_thermal_start_thermal_controller_master = {
 	0,
 	PHM_MasterTableFlag_None,
-	fiji_thermal_start_thermal_controller_master_list
+	phm_thermal_start_thermal_controller_master_list
 };
 
 static const struct phm_master_table_item
-fiji_thermal_set_temperature_range_master_list[] = {
-	{NULL, tf_fiji_thermal_disable_alert},
-	{NULL, tf_fiji_thermal_set_temperature_range},
-	{NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_set_temperature_range_master_list[] = {
+	{NULL, tf_smu7_thermal_disable_alert},
+	{NULL, tf_smu7_thermal_set_temperature_range},
+	{NULL, tf_smu7_thermal_enable_alert},
 	{NULL, NULL}
 };
 
 static const struct phm_master_table_header
-fiji_thermal_set_temperature_range_master = {
+phm_thermal_set_temperature_range_master = {
 	0,
 	PHM_MasterTableFlag_None,
-	fiji_thermal_set_temperature_range_master_list
+	phm_thermal_set_temperature_range_master_list
 };
 
-int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
 {
 	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-		fiji_fan_ctrl_set_default_mode(hwmgr);
+		smu7_fan_ctrl_set_default_mode(hwmgr);
 	return 0;
 }
 
@@ -664,17 +554,17 @@
 * @param    hwmgr The address of the hardware manager.
 * @exception Any error code from the low-level communication.
 */
-int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
 {
 	int result;
 
 	result = phm_construct_table(hwmgr,
-			&fiji_thermal_set_temperature_range_master,
+			&phm_thermal_set_temperature_range_master,
 			&(hwmgr->set_temperature_range));
 
 	if (!result) {
 		result = phm_construct_table(hwmgr,
-				&fiji_thermal_start_thermal_controller_master,
+				&phm_thermal_start_thermal_controller_master,
 				&(hwmgr->start_thermal_controller));
 		if (result)
 			phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
new file mode 100644
index 0000000..6face97
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_THERMAL_H_
+#define _SMU7_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define SMU7_THERMAL_HIGH_ALERT_MASK         0x1
+#define SMU7_THERMAL_LOW_ALERT_MASK          0x2
+
+#define SMU7_THERMAL_MINIMUM_TEMP_READING    -256
+#define SMU7_THERMAL_MAXIMUM_TEMP_READING    255
+
+#define SMU7_THERMAL_MINIMUM_ALERT_TEMP      0
+#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP      255
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644
index e58d038..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_ppsmc.h"
-#include "tonga_hwmgr.h"
-
-int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cf_want_uvd_power_gating(hwmgr))
-		return smum_send_msg_to_smc(hwmgr->smumgr,
-						     PPSMC_MSG_UVDPowerOFF);
-	return 0;
-}
-
-int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cf_want_uvd_power_gating(hwmgr)) {
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				  PHM_PlatformCaps_UVDDynamicPowerGating)) {
-			return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-								PPSMC_MSG_UVDPowerON, 1);
-		} else {
-			return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-								PPSMC_MSG_UVDPowerON, 0);
-		}
-	}
-
-	return 0;
-}
-
-int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cf_want_vce_power_gating(hwmgr))
-		return smum_send_msg_to_smc(hwmgr->smumgr,
-						  PPSMC_MSG_VCEPowerOFF);
-	return 0;
-}
-
-int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
-	if (phm_cf_want_vce_power_gating(hwmgr))
-		return smum_send_msg_to_smc(hwmgr->smumgr,
-						  PPSMC_MSG_VCEPowerON);
-	return 0;
-}
-
-int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
-	int ret = 0;
-
-	switch (block) {
-	case PHM_AsicBlock_UVD_MVC:
-	case PHM_AsicBlock_UVD:
-	case PHM_AsicBlock_UVD_HD:
-	case PHM_AsicBlock_UVD_SD:
-		if (gating == PHM_ClockGateSetting_StaticOff)
-			ret = tonga_phm_powerdown_uvd(hwmgr);
-		else
-			ret = tonga_phm_powerup_uvd(hwmgr);
-		break;
-	case PHM_AsicBlock_GFX:
-	default:
-		break;
-	}
-
-	return ret;
-}
-
-int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	data->uvd_power_gated = false;
-	data->vce_power_gated = false;
-
-	tonga_phm_powerup_uvd(hwmgr);
-	tonga_phm_powerup_vce(hwmgr);
-
-	return 0;
-}
-
-int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	if (data->uvd_power_gated == bgate)
-		return 0;
-
-	data->uvd_power_gated = bgate;
-
-	if (bgate) {
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_PG_STATE_GATE);
-		tonga_update_uvd_dpm(hwmgr, true);
-		tonga_phm_powerdown_uvd(hwmgr);
-	} else {
-		tonga_phm_powerup_uvd(hwmgr);
-		cgs_set_powergating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_PG_STATE_UNGATE);
-		cgs_set_clockgating_state(hwmgr->device,
-						AMD_IP_BLOCK_TYPE_UVD,
-						AMD_PG_STATE_GATE);
-
-		tonga_update_uvd_dpm(hwmgr, false);
-	}
-
-	return 0;
-}
-
-int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct phm_set_power_state_input states;
-	const struct pp_power_state  *pcurrent;
-	struct pp_power_state  *requested;
-
-	pcurrent = hwmgr->current_ps;
-	requested = hwmgr->request_ps;
-
-	states.pcurrent_state = &(pcurrent->hardware);
-	states.pnew_state = &(requested->hardware);
-
-	if (phm_cf_want_vce_power_gating(hwmgr)) {
-		if (data->vce_power_gated != bgate) {
-			if (bgate) {
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_CG_STATE_UNGATE);
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_GATE);
-				tonga_enable_disable_vce_dpm(hwmgr, false);
-				data->vce_power_gated = true;
-			} else {
-				tonga_phm_powerup_vce(hwmgr);
-				data->vce_power_gated = false;
-				cgs_set_powergating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_UNGATE);
-				cgs_set_clockgating_state(
-							hwmgr->device,
-							AMD_IP_BLOCK_TYPE_VCE,
-							AMD_PG_STATE_GATE);
-
-				tonga_update_vce_dpm(hwmgr, &states);
-				tonga_enable_disable_vce_dpm(hwmgr, true);
-				return 0;
-			}
-		}
-	} else {
-		tonga_update_vce_dpm(hwmgr, &states);
-		tonga_enable_disable_vce_dpm(hwmgr, true);
-		return 0;
-	}
-
-	if (!data->vce_power_gated)
-		tonga_update_vce_dpm(hwmgr, &states);
-
-	return 0;
-}
-
-int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
-					const uint32_t *msg_id)
-{
-	PPSMC_Msg msg;
-	uint32_t value;
-
-	switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
-	case PP_GROUP_GFX:
-		switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-		case PP_BLOCK_GFX_CG:
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-				  ? PPSMC_MSG_EnableClockGatingFeature
-				  : PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_GFX_CGCG_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			if (PP_STATE_SUPPORT_LS & *msg_id) {
-				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-					? PPSMC_MSG_EnableClockGatingFeature
-					: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_GFX_CGLS_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		case PP_BLOCK_GFX_MG:
-			/* For GFX MGCG, there are three different ones;
-			 * CPF, RLC, and all others.  CPF MGCG will not be used for Tonga.
-			 * For GFX MGLS, Tonga will not support it.
-			 * */
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		default:
-			return -1;
-		}
-		break;
-
-	case PP_GROUP_SYS:
-		switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
-		case PP_BLOCK_SYS_BIF:
-			if (PP_STATE_SUPPORT_LS & *msg_id) {
-				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_BIF_MGLS_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		case PP_BLOCK_SYS_MC:
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_MC_MGCG_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-
-			if (PP_STATE_SUPPORT_LS & *msg_id) {
-				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_MC_MGLS_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-
-			}
-			break;
-
-		case PP_BLOCK_SYS_HDP:
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-					? PPSMC_MSG_EnableClockGatingFeature
-					: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_HDP_MGCG_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-
-			if (PP_STATE_SUPPORT_LS & *msg_id) {
-				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-					? PPSMC_MSG_EnableClockGatingFeature
-					: PPSMC_MSG_DisableClockGatingFeature;
-
-				value = CG_SYS_HDP_MGLS_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		case PP_BLOCK_SYS_SDMA:
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_SDMA_MGCG_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-
-			if (PP_STATE_SUPPORT_LS & *msg_id) {
-				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-
-				value = CG_SYS_SDMA_MGLS_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		case PP_BLOCK_SYS_ROM:
-			if (PP_STATE_SUPPORT_CG & *msg_id) {
-				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
-				? PPSMC_MSG_EnableClockGatingFeature
-				: PPSMC_MSG_DisableClockGatingFeature;
-				value = CG_SYS_ROM_MASK;
-
-				if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
-					return -1;
-			}
-			break;
-
-		default:
-			return -1;
-
-		}
-		break;
-
-	default:
-		return -1;
-
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
deleted file mode 100644
index 8bc38cb..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _TONGA_CLOCK_POWER_GATING_H_
-#define _TONGA_CLOCK_POWER_GATING_H_
-
-#include "tonga_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644
index 080d69d..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_DYN_DEFAULTS_H
-#define TONGA_DYN_DEFAULTS_H
-
-
-/** \file
- * Volcanic Islands Dynamic default parameters.
- */
-
-enum TONGAdpm_TrendDetection {
-	TONGAdpm_TrendDetection_AUTO,
-	TONGAdpm_TrendDetection_UP,
-	TONGAdpm_TrendDetection_DOWN
-};
-typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
-
-/* Bit vector representing same fields as hardware register. */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102  /* CP_Gfx_busy */
-/* HDP_busy */
-/* IH_busy */
-/* DRM_busy */
-/* DRMDMA_busy */
-/* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-/* AVP_busy  */
-/* SDMA enabled */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1              0x000400  /* FE_Gfx_busy  - Intended for primary usage.   Rest are for flexibility. */
-/* SH_Gfx_busy */
-/* RB_Gfx_busy */
-/* VCE_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080  /* SH_Gfx_busy - Intended for primary usage.   Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* RB_Gfx_busy */
-/* ACP_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200  /* RB_Gfx_busy - Intended for primary usage.   Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* SH_Gfx_busy */
-/* UVD_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680  /* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033  /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033  /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000  /* GFX, HDP, DRMDMA */
-
-
-/* thermal protection counter (units).*/
-#define PPTONGA_THERMALPROTECTCOUNTER_DFLT            0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT        0
-
-/* static screen threshold */
-#define PPTONGA_STATICSCREENTHRESHOLD_DFLT            0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPTONGA_REFERENCEDIVIDER_DFLT                  4
-
-/*
- * ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-
-#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT             1687
-
-#define PPTONGA_CGULVPARAMETER_DFLT                    0x00040035
-#define PPTONGA_CGULVCONTROL_DFLT                      0x00007450
-#define PPTONGA_TARGETACTIVITY_DFLT                     30  /*30%  */
-#define PPTONGA_MCLK_TARGETACTIVITY_DFLT                10  /*10%  */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644
index c7dc111..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ /dev/null
@@ -1,6276 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include <atombios.h>
-#include "tonga_hwmgr.h"
-#include "pptable.h"
-#include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "tonga_ppsmc.h"
-#include "cgs_common.h"
-#include "pppcielanes.h"
-#include "tonga_dyn_defaults.h"
-#include "smumgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_thermal.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "cgs_linux.h"
-#include "eventmgr.h"
-#include "amd_pcie_helpers.h"
-
-#define MC_CG_ARB_FREQ_F0           0x0a
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0       0x05
-#define MC_CG_SEQ_DRAMCONF_S1       0x06
-#define MC_CG_SEQ_YCLK_SUSPEND      0x04
-#define MC_CG_SEQ_YCLK_RESUME       0x0a
-
-#define PCIE_BUS_CLK                10000
-#define TCLK                        (PCIE_BUS_CLK / 10)
-
-#define SMC_RAM_END 0x40000
-#define SMC_CG_IND_START            0xc0030000
-#define SMC_CG_IND_END              0xc0040000  /* First byte after SMC_CG_IND*/
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
-#define VDDC_VDDCI_DELTA            200
-#define VDDC_VDDGFX_DELTA           300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-typedef uint32_t PECI_RegistryValue;
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
-	{600, 1050, 3, 0},
-	{600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
-	{ {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-	{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
-	{0, 1, 3, 2, 4, 5},
-	{0, 2, 4, 5, 6, 5} };
-
-/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
-	DPM_EVENT_SRC_ANALOG = 0,               /* Internal analog trip point */
-	DPM_EVENT_SRC_EXTERNAL = 1,             /* External (GPIO 17) signal */
-	DPM_EVENT_SRC_DIGITAL = 2,              /* Internal digital trip point (DIG_THERM_DPM) */
-	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,   /* Internal analog or external */
-	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4   /* Internal digital or external */
-};
-typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
-
-static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct tonga_power_state *cast_phw_tonga_power_state(
-				  struct pp_hw_power_state *hw_ps)
-{
-	if (hw_ps == NULL)
-		return NULL;
-
-	PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL);
-
-	return (struct tonga_power_state *)hw_ps;
-}
-
-const struct tonga_power_state *cast_const_phw_tonga_power_state(
-				 const struct pp_hw_power_state *hw_ps)
-{
-	if (hw_ps == NULL)
-		return NULL;
-
-	PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
-				"Invalid Powerstate Type!",
-				 return NULL);
-
-	return (const struct tonga_power_state *)hw_ps;
-}
-
-int tonga_add_voltage(struct pp_hwmgr *hwmgr,
-	phm_ppt_v1_voltage_lookup_table *look_up_table,
-	phm_ppt_v1_voltage_lookup_record *record)
-{
-	uint32_t i;
-	PP_ASSERT_WITH_CODE((NULL != look_up_table),
-		"Lookup Table empty.", return -1;);
-	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
-		"Lookup Table empty.", return -1;);
-	PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
-		"Lookup Table is full.", return -1;);
-
-	/* This is to avoid entering duplicate calculated records. */
-	for (i = 0; i < look_up_table->count; i++) {
-		if (look_up_table->entries[i].us_vdd == record->us_vdd) {
-			if (look_up_table->entries[i].us_calculated == 1)
-				return 0;
-			else
-				break;
-		}
-	}
-
-	look_up_table->entries[i].us_calculated = 1;
-	look_up_table->entries[i].us_vdd = record->us_vdd;
-	look_up_table->entries[i].us_cac_low = record->us_cac_low;
-	look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
-	look_up_table->entries[i].us_cac_high = record->us_cac_high;
-	/* Only increment the count when we're appending, not replacing duplicate entry. */
-	if (i == look_up_table->count)
-		look_up_table->count++;
-
-	return 0;
-}
-
-int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
-	PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
-	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
-}
-
-uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
-		uint32_t voltage)
-{
-	uint8_t count = (uint8_t) (voltage_table->count);
-	uint8_t i = 0;
-
-	PP_ASSERT_WITH_CODE((NULL != voltage_table),
-		"Voltage Table empty.", return 0;);
-	PP_ASSERT_WITH_CODE((0 != count),
-		"Voltage Table empty.", return 0;);
-
-	for (i = 0; i < count; i++) {
-		/* find first voltage bigger than requested */
-		if (voltage_table->entries[i].value >= voltage)
-			return i;
-	}
-
-	/* voltage is bigger than max voltage in the table */
-	return i - 1;
-}
-
-/**
- * @brief PhwTonga_GetVoltageOrder
- *  Returns index of requested voltage record in lookup(table)
- * @param hwmgr - pointer to hardware manager
- * @param lookupTable - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
-		uint16_t voltage)
-{
-	uint8_t count = (uint8_t) (look_up_table->count);
-	uint8_t i;
-
-	PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
-	PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
-
-	for (i = 0; i < count; i++) {
-		/* find first voltage equal or bigger than requested */
-		if (look_up_table->entries[i].us_vdd >= voltage)
-			return i;
-	}
-
-	/* voltage is bigger than max voltage in the table */
-	return i-1;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-	/*
-	 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
-	 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
-	 * whereas voltage control is a fundemental change that will not be disabled
-	 */
-
-	return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-					FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
-}
-
-/**
- * Re-generate the DPM level mask value
- * @param    hwmgr      the address of the hardware manager
- */
-static uint32_t tonga_get_dpm_level_enable_mask_value(
-			struct tonga_single_dpm_table * dpm_table)
-{
-	uint32_t i;
-	uint32_t mask_value = 0;
-
-	for (i = dpm_table->count; i > 0; i--) {
-		mask_value = mask_value << 1;
-
-		if (dpm_table->dpm_levels[i-1].enabled)
-			mask_value |= 0x1;
-		else
-			mask_value &= 0xFFFFFFFE;
-	}
-	return mask_value;
-}
-
-/**
- * Retrieve DPM default values from registry (if available)
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	phw_tonga_ulv_parm *ulv = &(data->ulv);
-	uint32_t tmp;
-
-	ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
-	data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
-	data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
-	data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
-	data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
-	data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
-	data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
-	data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
-	data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
-
-	data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
-	data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_ABM);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_NonABMSupportInPPLib);
-
-	tmp = 0;
-	if (tmp == 0)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DynamicACTiming);
-
-	tmp = 0;
-	if (0 != tmp)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DisableMemoryTransition);
-
-	data->mclk_strobe_mode_threshold = 40000;
-	data->mclk_stutter_mode_threshold = 30000;
-	data->mclk_edc_enable_threshold = 40000;
-	data->mclk_edc_wr_enable_threshold = 40000;
-
-	tmp = 0;
-	if (tmp != 0)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_DisableMCLS);
-
-	data->pcie_gen_performance.max = PP_PCIEGen1;
-	data->pcie_gen_performance.min = PP_PCIEGen3;
-	data->pcie_gen_power_saving.max = PP_PCIEGen1;
-	data->pcie_gen_power_saving.min = PP_PCIEGen3;
-
-	data->pcie_lane_performance.max = 0;
-	data->pcie_lane_performance.min = 16;
-	data->pcie_lane_power_saving.max = 0;
-	data->pcie_lane_power_saving.min = 16;
-
-	tmp = 0;
-
-	if (tmp)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkThrottleLowNotification);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_DynamicUVDState);
-
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	int result = 0;
-	uint32_t low_sclk_interrupt_threshold = 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkThrottleLowNotification)
-		&& (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
-		data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-		low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-		result = tonga_copy_bytes_to_smc(
-				hwmgr->smumgr,
-				data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
-				LowSclkInterruptThreshold),
-				(uint8_t *)&low_sclk_interrupt_threshold,
-				sizeof(uint32_t),
-				data->sram_end
-				);
-	}
-
-	return result;
-}
-
-/**
- * Find SCLK value that is associated with specified virtual_voltage_Id.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    virtual_voltage_Id  voltageId to look for.
- * @param    sclk output value .
- * @return   always 0 if success and 2 if association not found
- */
-static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
-	phm_ppt_v1_voltage_lookup_table *lookup_table,
-	uint16_t virtual_voltage_id, uint32_t *sclk)
-{
-	uint8_t entryId;
-	uint8_t voltageId;
-	struct phm_ppt_v1_information *pptable_info =
-					(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
-
-	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
-	for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
-		voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
-		if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
-			break;
-	}
-
-	PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
-			"Can't find requested voltage id in vdd_dep_on_sclk table!",
-			return -1;
-			);
-
-	*sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
-
-	return 0;
-}
-
-/**
- * Get Leakage VDDC based on leakage ID.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   2 if vddgfx returned is greater than 2V or if BIOS
- */
-int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-	uint16_t    virtual_voltage_id;
-	uint16_t    vddc = 0;
-	uint16_t    vddgfx = 0;
-	uint16_t    i, j;
-	uint32_t  sclk = 0;
-
-	/* retrieve voltage for leakage ID (0xff01 + i) */
-	for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
-		virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-
-		/* in split mode we should have only vddgfx EVV leakages */
-		if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-			if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
-						pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
-				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-							PHM_PlatformCaps_ClockStretcher)) {
-					for (j = 1; j < sclk_table->count; j++) {
-						if (sclk_table->entries[j].clk == sclk &&
-								sclk_table->entries[j].cks_enable == 0) {
-							sclk += 5000;
-							break;
-						}
-					}
-				}
-				if (0 == atomctrl_get_voltage_evv_on_sclk
-				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
-				     virtual_voltage_id, &vddgfx)) {
-					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
-					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
-					/* the voltage should not be zero nor equal to leakage ID */
-					if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
-						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
-						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
-						data->vddcgfx_leakage.count++;
-					}
-				} else {
-					printk("Error retrieving EVV voltage value!\n");
-				}
-			}
-		} else {
-			/*  in merged mode we have only vddc EVV leakages */
-			if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
-						pptable_info->vddc_lookup_table,
-						virtual_voltage_id, &sclk)) {
-				if (0 == atomctrl_get_voltage_evv_on_sclk
-				    (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
-				     virtual_voltage_id, &vddc)) {
-					/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-					PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
-
-					/* the voltage should not be zero nor equal to leakage ID */
-					if (vddc != 0 && vddc != virtual_voltage_id) {
-						data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
-						data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
-						data->vddc_leakage.count++;
-					}
-				} else {
-					printk("Error retrieving EVV voltage value!\n");
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
-int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* enable SCLK dpm */
-	if (0 == data->sclk_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						   PPSMC_MSG_DPM_Enable)),
-				"Failed to enable SCLK DPM during DPM Start Function!",
-				return -1);
-	}
-
-	/* enable MCLK dpm */
-	if (0 == data->mclk_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					     PPSMC_MSG_MCLKDPM_Enable)),
-				"Failed to enable MCLK DPM during DPM Start Function!",
-				return -1);
-
-		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_CPL_CNTL, 0x100005);/*Read */
-
-		udelay(10);
-
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixLCAC_CPL_CNTL, 0x500005);/* write */
-
-	}
-
-	return 0;
-}
-
-int tonga_start_dpm(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* enable general power management */
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
-	/* enable sclk deep sleep */
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
-
-	/* prepare for PCIE DPM */
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
-			offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
-	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
-
-	PP_ASSERT_WITH_CODE(
-			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					PPSMC_MSG_Voltage_Cntl_Enable)),
-			"Failed to enable voltage DPM during DPM Start Function!",
-			return -1);
-
-	if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
-		PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
-	}
-
-	/* enable PCIE dpm */
-	if (0 == data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Enable)),
-				"Failed to enable pcie DPM during DPM Start Function!",
-				return -1
-				);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_Falcon_QuickTransition)) {
-				   smum_send_msg_to_smc(hwmgr->smumgr,
-				    PPSMC_MSG_EnableACDCGPIOInterrupt);
-	}
-
-	return 0;
-}
-
-int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* disable SCLK dpm */
-	if (0 == data->sclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-		PP_ASSERT_WITH_CODE(
-				!tonga_is_dpm_running(hwmgr),
-				"Trying to Disable SCLK DPM when DPM is disabled",
-				return -1
-				);
-
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						  PPSMC_MSG_DPM_Disable)),
-				"Failed to disable SCLK DPM during DPM stop Function!",
-				return -1);
-	}
-
-	/* disable MCLK dpm */
-	if (0 == data->mclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-		PP_ASSERT_WITH_CODE(
-				!tonga_is_dpm_running(hwmgr),
-				"Trying to Disable MCLK DPM when DPM is disabled",
-				return -1
-				);
-
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					    PPSMC_MSG_MCLKDPM_Disable)),
-				"Failed to Disable MCLK DPM during DPM stop Function!",
-				return -1);
-	}
-
-	return 0;
-}
-
-int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
-	/* disable sclk deep sleep*/
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
-
-	/* disable PCIE dpm */
-	if (0 == data->pcie_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-		PP_ASSERT_WITH_CODE(
-				!tonga_is_dpm_running(hwmgr),
-				"Trying to Disable PCIE DPM when DPM is disabled",
-				return -1
-				);
-		PP_ASSERT_WITH_CODE(
-				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-						PPSMC_MSG_PCIeDPM_Disable)),
-				"Failed to disable pcie DPM during DPM stop Function!",
-				return -1);
-	}
-
-	if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
-		PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
-
-	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-	PP_ASSERT_WITH_CODE(
-			!tonga_is_dpm_running(hwmgr),
-			"Trying to Disable Voltage CNTL when DPM is disabled",
-			return -1
-			);
-
-	PP_ASSERT_WITH_CODE(
-			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					PPSMC_MSG_Voltage_Cntl_Disable)),
-			"Failed to disable voltage DPM during DPM stop Function!",
-			return -1);
-
-	return 0;
-}
-
-int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
-
-	return 0;
-}
-
-/**
- * Send a message to the SMC and return a parameter
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: pointer to the received parameter
- * @return   The response that came from the SMC.
- */
-PPSMC_Result tonga_send_msg_to_smc_return_parameter(
-		struct pp_hwmgr *hwmgr,
-		PPSMC_Msg msg,
-		uint32_t *parameter)
-{
-	int result;
-
-	result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
-
-	if ((0 == result) && parameter) {
-		*parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-	}
-
-	return result;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t level_mask = 1 << n;
-
-	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-	PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-			    "Trying to force SCLK when DPM is disabled",
-			    return -1;);
-	if (0 == data->sclk_dpm_key_disabled)
-		return (0 == smum_send_msg_to_smc_with_parameter(
-							     hwmgr->smumgr,
-		     (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
-						    level_mask) ? 0 : 1);
-
-	return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t level_mask = 1 << n;
-
-	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message. */
-	PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-			    "Trying to Force MCLK when DPM is disabled",
-			    return -1;);
-	if (0 == data->mclk_dpm_key_disabled)
-		return (0 == smum_send_msg_to_smc_with_parameter(
-								hwmgr->smumgr,
-								(PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
-								level_mask) ? 0 : 1);
-
-	return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    n     :  DPM level
- * @return   The response that came from the SMC.
- */
-int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-	PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-			    "Trying to Force PCIE level when DPM is disabled",
-			    return -1;);
-	if (0 == data->pcie_dpm_key_disabled)
-		return (0 == smum_send_msg_to_smc_with_parameter(
-							     hwmgr->smumgr,
-			   (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
-								n) ? 0 : 1);
-
-	return 0;
-}
-
-/**
- * Set the initial state by calling SMC to switch to this state directly
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
-{
-	/*
-	* SMC only stores one state that SW will ask to switch too,
-	* so we switch the the just uploaded one
-	*/
-	return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
-	uint32_t tmp;
-	int result;
-	bool error = false;
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, DpmTable),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		data->dpm_table_start = tmp;
-	}
-
-	error |= (0 != result);
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, SoftRegisters),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		data->soft_regs_start = tmp;
-		tonga_smu->ulSoftRegsStart = tmp;
-	}
-
-	error |= (0 != result);
-
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, mcRegisterTable),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		data->mc_reg_table_start = tmp;
-	}
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, FanTable),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		data->fan_table_start = tmp;
-	}
-
-	error |= (0 != result);
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		data->arb_table_start = tmp;
-	}
-
-	error |= (0 != result);
-
-
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				SMU72_FIRMWARE_HEADER_LOCATION +
-				offsetof(SMU72_Firmware_Header, Version),
-				&tmp, data->sram_end);
-
-	if (0 == result) {
-		hwmgr->microcode_version_info.SMC = tmp;
-	}
-
-	error |= (0 != result);
-
-	return error ? 1 : 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
-	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
-	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
-	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
-		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
-	data->clock_registers.vDLL_CNTL                  =
-		cgs_read_register(hwmgr->device, mmDLL_CNTL);
-	data->clock_registers.vMCLK_PWRMGT_CNTL          =
-		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
-	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
-		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
-	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
-		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
-	data->clock_registers.vMPLL_FUNC_CNTL            =
-		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
-	data->clock_registers.vMPLL_FUNC_CNTL_1          =
-		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
-	data->clock_registers.vMPLL_FUNC_CNTL_2          =
-		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
-	data->clock_registers.vMPLL_SS1                  =
-		cgs_read_register(hwmgr->device, mmMPLL_SS1);
-	data->clock_registers.vMPLL_SS2                  =
-		cgs_read_register(hwmgr->device, mmMPLL_SS2);
-
-	return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t temp;
-
-	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-	data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-			 MC_SEQ_MISC0_GDDR5_SHIFT));
-
-	return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
-	return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	data->uvd_power_gated = false;
-	data->vce_power_gated = false;
-	data->samu_power_gated = false;
-	data->acp_power_gated = false;
-	data->pg_acp_init = true;
-
-	return 0;
-}
-
-/**
- * Checks if DPM is enabled
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
-{
-	/*
-	 * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
-	 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
-	 * whereas voltage control is a fundemental change that will not be disabled
-	 */
-	return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
-}
-
-/**
- * Checks if DPM is stopped
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	if (tonga_is_dpm_running(hwmgr)) {
-		/* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
-		if (!data->dpm_table_start) {
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * Remove repeated voltage values and create table with unique values.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    voltage_table  the pointer to changing voltage table
- * @return    1 in success
- */
-
-static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
-			pp_atomctrl_voltage_table *voltage_table)
-{
-	uint32_t table_size, i, j;
-	uint16_t vvalue;
-	bool bVoltageFound = false;
-	pp_atomctrl_voltage_table *table;
-
-	PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
-	table_size = sizeof(pp_atomctrl_voltage_table);
-	table = kzalloc(table_size, GFP_KERNEL);
-
-	if (NULL == table)
-		return -ENOMEM;
-
-	memset(table, 0x00, table_size);
-	table->mask_low = voltage_table->mask_low;
-	table->phase_delay = voltage_table->phase_delay;
-
-	for (i = 0; i < voltage_table->count; i++) {
-		vvalue = voltage_table->entries[i].value;
-		bVoltageFound = false;
-
-		for (j = 0; j < table->count; j++) {
-			if (vvalue == table->entries[j].value) {
-				bVoltageFound = true;
-				break;
-			}
-		}
-
-		if (!bVoltageFound) {
-			table->entries[table->count].value = vvalue;
-			table->entries[table->count].smio_low =
-				voltage_table->entries[i].smio_low;
-			table->count++;
-		}
-	}
-
-	memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
-
-	kfree(table);
-
-	return 0;
-}
-
-static int tonga_get_svi2_vdd_ci_voltage_table(
-		struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
-{
-	uint32_t i;
-	int result;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
-
-	PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
-			"Voltage Dependency Table empty.", return -1;);
-
-	vddci_voltage_table->mask_low = 0;
-	vddci_voltage_table->phase_delay = 0;
-	vddci_voltage_table->count = voltage_dependency_table->count;
-
-	for (i = 0; i < voltage_dependency_table->count; i++) {
-		vddci_voltage_table->entries[i].value =
-			voltage_dependency_table->entries[i].vddci;
-		vddci_voltage_table->entries[i].smio_low = 0;
-	}
-
-	result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
-	PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to trim VDDCI table.", return result;);
-
-	return 0;
-}
-
-
-
-static int tonga_get_svi2_vdd_voltage_table(
-		struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *look_up_table,
-		pp_atomctrl_voltage_table *voltage_table)
-{
-	uint8_t i = 0;
-
-	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
-			"Voltage Lookup Table empty.", return -1;);
-
-	voltage_table->mask_low = 0;
-	voltage_table->phase_delay = 0;
-
-	voltage_table->count = look_up_table->count;
-
-	for (i = 0; i < voltage_table->count; i++) {
-		voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
-		voltage_table->entries[i].smio_low = 0;
-	}
-
-	return 0;
-}
-
-/*
- * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
- * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
- */
-
-static void tonga_trim_voltage_table_to_fit_state_table(
-		struct pp_hwmgr *hwmgr,
-		uint32_t max_voltage_steps,
-		pp_atomctrl_voltage_table *voltage_table)
-{
-	unsigned int i, diff;
-
-	if (voltage_table->count <= max_voltage_steps) {
-		return;
-	}
-
-	diff = voltage_table->count - max_voltage_steps;
-
-	for (i = 0; i < max_voltage_steps; i++) {
-		voltage_table->entries[i] = voltage_table->entries[i + diff];
-	}
-
-	voltage_table->count = max_voltage_steps;
-
-	return;
-}
-
-/**
- * Create Voltage Tables.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int result;
-
-	/* MVDD has only GPIO voltage control */
-	if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-					VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve MVDD table.", return result;);
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-		/* GPIO voltage */
-		result = atomctrl_get_voltage_table_v3(hwmgr,
-					VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve VDDCI table.", return result;);
-	} else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-		/* SVI2 voltage */
-		result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
-					pptable_info->vdd_dep_on_mclk);
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-		/* VDDGFX has only SVI2 voltage control */
-		result = tonga_get_svi2_vdd_voltage_table(hwmgr,
-					pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		/* VDDC has only SVI2 voltage control */
-		result = tonga_get_svi2_vdd_voltage_table(hwmgr,
-					pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
-	}
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
-			"Too many voltage values for VDDC. Trimming to fit state table.",
-			tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-			SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
-			);
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
-			"Too many voltage values for VDDGFX. Trimming to fit state table.",
-			tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-			SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
-			);
-
-	PP_ASSERT_WITH_CODE(
-			(data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
-			"Too many voltage values for VDDCI. Trimming to fit state table.",
-			tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-			SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
-			);
-
-	PP_ASSERT_WITH_CODE(
-			(data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
-			"Too many voltage values for MVDD. Trimming to fit state table.",
-			tonga_trim_voltage_table_to_fit_state_table(hwmgr,
-			SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
-			);
-
-	return 0;
-}
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	unsigned int count;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-		table->VddcLevelCount = data->vddc_voltage_table.count;
-		for (count = 0; count < table->VddcLevelCount; count++) {
-			table->VddcTable[count] =
-				PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
-		}
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-	}
-	return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	unsigned int count;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-		table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
-		for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
-			table->VddGfxTable[count] =
-				PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
-		}
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
-	}
-	return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t count;
-
-	table->VddciLevelCount = data->vddci_voltage_table.count;
-	for (count = 0; count < table->VddciLevelCount; count++) {
-		if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-			table->VddciTable[count] =
-				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-		} else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-			table->SmioTable1.Pattern[count].Voltage =
-				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
-			table->SmioTable1.Pattern[count].Smio =
-				(uint8_t) count;
-			table->Smio[count] |=
-				data->vddci_voltage_table.entries[count].smio_low;
-			table->VddciTable[count] =
-				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-		}
-	}
-
-	table->SmioMask1 = data->vddci_voltage_table.mask_low;
-	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
-	return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t count;
-
-	if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		table->MvddLevelCount = data->mvdd_voltage_table.count;
-		for (count = 0; count < table->MvddLevelCount; count++) {
-			table->SmioTable2.Pattern[count].Voltage =
-				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-			table->SmioTable2.Pattern[count].Smio =
-				(uint8_t) count;
-			table->Smio[count] |=
-				data->mvdd_voltage_table.entries[count].smio_low;
-		}
-		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-	}
-
-	return 0;
-}
-
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
-	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	uint32_t count;
-	uint8_t index;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
-	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
-
-	/* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
-	uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
-	uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
-	for (count = 0; count < vddcLevelCount; count++) {
-		/* We are populating vddc CAC data to BapmVddc table in split and merged mode */
-		index = tonga_get_voltage_index(vddc_lookup_table,
-			data->vddc_voltage_table.entries[count].value);
-		table->BapmVddcVidLoSidd[count] =
-			convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-		table->BapmVddcVidHiSidd[count] =
-			convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-		table->BapmVddcVidHiSidd2[count] =
-			convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-	}
-
-	if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
-		/* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
-		for (count = 0; count < vddgfxLevelCount; count++) {
-			index = tonga_get_voltage_index(vddgfx_lookup_table,
-				data->vddgfx_voltage_table.entries[count].value);
-			table->BapmVddGfxVidLoSidd[count] =
-				convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
-			table->BapmVddGfxVidHiSidd[count] =
-				convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
-			table->BapmVddGfxVidHiSidd2[count] =
-				convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
-		}
-	} else {
-		for (count = 0; count < vddcLevelCount; count++) {
-			index = tonga_get_voltage_index(vddc_lookup_table,
-				data->vddc_voltage_table.entries[count].value);
-			table->BapmVddGfxVidLoSidd[count] =
-				convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-			table->BapmVddGfxVidHiSidd[count] =
-				convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-			table->BapmVddGfxVidHiSidd2[count] =
-				convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-		}
-	}
-
-	return 0;
-}
-
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-
-int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-	SMU72_Discrete_DpmTable *table)
-{
-	int result;
-
-	result = tonga_populate_smc_vddc_table(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate VDDC voltage table to SMC", return -1);
-
-	result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate VDDCI voltage table to SMC", return -1);
-
-	result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate VDDGFX voltage table to SMC", return -1);
-
-	result = tonga_populate_smc_mvdd_table(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate MVDD voltage table to SMC", return -1);
-
-	result = tonga_populate_cac_tables(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"can not populate CAC voltage tables to SMC", return -1);
-
-	return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint16_t config;
-
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-		/*  Splitted mode */
-		config = VR_SVI2_PLANE_1;
-		table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-		if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-			config = VR_SVI2_PLANE_2;
-			table->VRConfig |= config;
-		} else {
-			printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
-		}
-	} else {
-		/* Merged mode  */
-		config = VR_MERGED_WITH_VDDC;
-		table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-		/* Set Vddc Voltage Controller  */
-		if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-			config = VR_SVI2_PLANE_1;
-			table->VRConfig |= config;
-		} else {
-			printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
-		}
-	}
-
-	/* Set Vddci Voltage Controller  */
-	if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
-		config = VR_SVI2_PLANE_2;  /* only in merged mode */
-		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-	} else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
-		config = VR_SMIO_PATTERN_1;
-		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-	}
-
-	/* Set Mvdd Voltage Controller */
-	if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-		config = VR_SMIO_PATTERN_2;
-		table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
-	}
-
-	return 0;
-}
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
-	phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
-	uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-	uint32_t i = 0;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	/* clock - voltage dependency table is empty table */
-	if (allowed_clock_voltage_table->count == 0)
-		return -1;
-
-	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
-		/* find first sclk bigger than request */
-		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
-			voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-								allowed_clock_voltage_table->entries[i].vddgfx);
-
-			voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-								allowed_clock_voltage_table->entries[i].vddc);
-
-			if (allowed_clock_voltage_table->entries[i].vddci) {
-				voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-									allowed_clock_voltage_table->entries[i].vddci);
-			} else {
-				voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-									allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
-			}
-
-			if (allowed_clock_voltage_table->entries[i].mvdd) {
-				*mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-			}
-
-			voltage->Phases = 1;
-			return 0;
-		}
-	}
-
-	/* sclk is bigger than max sclk in the dependence table */
-	voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-		allowed_clock_voltage_table->entries[i-1].vddgfx);
-	voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-		allowed_clock_voltage_table->entries[i-1].vddc);
-
-	if (allowed_clock_voltage_table->entries[i-1].vddci) {
-		voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
-			allowed_clock_voltage_table->entries[i-1].vddci);
-	}
-	if (allowed_clock_voltage_table->entries[i-1].mvdd) {
-		*mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-	}
-
-	return 0;
-}
-
-/**
- * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
-{
-	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
-}
-
-int tonga_populate_memory_timing_parameters(
-		struct pp_hwmgr *hwmgr,
-		uint32_t engine_clock,
-		uint32_t memory_clock,
-		struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
-		)
-{
-	uint32_t dramTiming;
-	uint32_t dramTiming2;
-	uint32_t burstTime;
-	int result;
-
-	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-				engine_clock, memory_clock);
-
-	PP_ASSERT_WITH_CODE(result == 0,
-		"Error calling VBIOS to set DRAM_TIMING.", return result);
-
-	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
-	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
-	arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
-	return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	int result = 0;
-	SMU72_Discrete_MCArbDramTimingTable  arb_regs;
-	uint32_t i, j;
-
-	memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
-	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-			result = tonga_populate_memory_timing_parameters
-				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
-				 data->dpm_table.mclk_table.dpm_levels[j].value,
-				 &arb_regs.entries[i][j]);
-
-			if (0 != result) {
-				break;
-			}
-		}
-	}
-
-	if (0 == result) {
-		result = tonga_copy_bytes_to_smc(
-				hwmgr->smumgr,
-				data->arb_table_start,
-				(uint8_t *)&arb_regs,
-				sizeof(SMU72_Discrete_MCArbDramTimingTable),
-				data->sram_end
-				);
-	}
-
-	return result;
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_dpm_table *dpm_table = &data->dpm_table;
-	uint32_t i;
-
-	/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
-	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-		table->LinkLevel[i].PcieGenSpeed  =
-			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-		table->LinkLevel[i].PcieLaneCount =
-			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
-		table->LinkLevel[i].EnabledForActivity =
-			1;
-		table->LinkLevel[i].SPC =
-			(uint8_t)(data->pcie_spc_cap & 0xff);
-		table->LinkLevel[i].DownThreshold =
-			PP_HOST_TO_SMC_UL(5);
-		table->LinkLevel[i].UpThreshold =
-			PP_HOST_TO_SMC_UL(30);
-	}
-
-	data->smc_state_table.LinkLevelCount =
-		(uint8_t)dpm_table->pcie_speed_table.count;
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-		tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-	return 0;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-					SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-
-	uint8_t count;
-	pp_atomctrl_clock_dividers_vi dividers;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	table->UvdLevelCount = (uint8_t) (mm_table->count);
-	table->UvdBootLevel = 0;
-
-	for (count = 0; count < table->UvdLevelCount; count++) {
-		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-		table->UvdLevel[count].MinVoltage.Vddc =
-			tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-						mm_table->entries[count].vddc);
-		table->UvdLevel[count].MinVoltage.VddGfx =
-			(data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-			tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-						mm_table->entries[count].vddgfx) : 0;
-		table->UvdLevel[count].MinVoltage.Vddci =
-			tonga_get_voltage_id(&data->vddci_voltage_table,
-					     mm_table->entries[count].vddc - data->vddc_vddci_delta);
-		table->UvdLevel[count].MinVoltage.Phases = 1;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-							  table->UvdLevel[count].VclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				    "can not find divide id for Vclk clock", return result);
-
-		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-							  table->UvdLevel[count].DclkFrequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				    "can not find divide id for Dclk clock", return result);
-
-		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-		//CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
-	}
-
-	return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-		SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-
-	uint8_t count;
-	pp_atomctrl_clock_dividers_vi dividers;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	table->VceLevelCount = (uint8_t) (mm_table->count);
-	table->VceBootLevel = 0;
-
-	for (count = 0; count < table->VceLevelCount; count++) {
-		table->VceLevel[count].Frequency =
-			mm_table->entries[count].eclk;
-		table->VceLevel[count].MinVoltage.Vddc =
-			tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-				mm_table->entries[count].vddc);
-		table->VceLevel[count].MinVoltage.VddGfx =
-			(data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-			tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-				mm_table->entries[count].vddgfx) : 0;
-		table->VceLevel[count].MinVoltage.Vddci =
-			tonga_get_voltage_id(&data->vddci_voltage_table,
-				mm_table->entries[count].vddc - data->vddc_vddci_delta);
-		table->VceLevel[count].MinVoltage.Phases = 1;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-					table->VceLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"can not find divide id for VCE engine clock", return result);
-
-		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-	}
-
-	return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-		SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-	uint8_t count;
-	pp_atomctrl_clock_dividers_vi dividers;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	table->AcpLevelCount = (uint8_t) (mm_table->count);
-	table->AcpBootLevel = 0;
-
-	for (count = 0; count < table->AcpLevelCount; count++) {
-		table->AcpLevel[count].Frequency =
-			pptable_info->mm_dep_table->entries[count].aclk;
-		table->AcpLevel[count].MinVoltage.Vddc =
-			tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-			mm_table->entries[count].vddc);
-		table->AcpLevel[count].MinVoltage.VddGfx =
-			(data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-			tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-				mm_table->entries[count].vddgfx) : 0;
-		table->AcpLevel[count].MinVoltage.Vddci =
-			tonga_get_voltage_id(&data->vddci_voltage_table,
-				mm_table->entries[count].vddc - data->vddc_vddci_delta);
-		table->AcpLevel[count].MinVoltage.Phases = 1;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-			table->AcpLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-			"can not find divide id for engine clock", return result);
-
-		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-	}
-
-	return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-	SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-	uint8_t count;
-	pp_atomctrl_clock_dividers_vi dividers;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	table->SamuBootLevel = 0;
-	table->SamuLevelCount = (uint8_t) (mm_table->count);
-
-	for (count = 0; count < table->SamuLevelCount; count++) {
-		/* not sure whether we need evclk or not */
-		table->SamuLevel[count].Frequency =
-			pptable_info->mm_dep_table->entries[count].samclock;
-		table->SamuLevel[count].MinVoltage.Vddc =
-			tonga_get_voltage_index(pptable_info->vddc_lookup_table,
-				mm_table->entries[count].vddc);
-		table->SamuLevel[count].MinVoltage.VddGfx =
-			(data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
-			tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
-				mm_table->entries[count].vddgfx) : 0;
-		table->SamuLevel[count].MinVoltage.Vddci =
-			tonga_get_voltage_id(&data->vddci_voltage_table,
-				mm_table->entries[count].vddc - data->vddc_vddci_delta);
-		table->SamuLevel[count].MinVoltage.Phases = 1;
-
-		/* retrieve divider value for VBIOS */
-		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-					table->SamuLevel[count].Frequency, &dividers);
-		PP_ASSERT_WITH_CODE((0 == result),
-			"can not find divide id for samu clock", return result);
-
-		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-	}
-
-	return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    memory_clock the memory clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
-		struct pp_hwmgr *hwmgr,
-		uint32_t memory_clock,
-		SMU72_Discrete_MemoryLevel *mclk,
-		bool strobe_mode,
-		bool dllStateOn
-		)
-{
-	const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
-	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
-	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
-	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
-	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
-	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
-	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
-	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
-	pp_atomctrl_memory_clock_param mpll_param;
-	int result;
-
-	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
-				memory_clock, &mpll_param, strobe_mode);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
-	/* MPLL_FUNC_CNTL setup*/
-	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
-	/* MPLL_FUNC_CNTL_1 setup*/
-	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
-	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
-	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
-	/* MPLL_AD_FUNC_CNTL setup*/
-	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
-							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
-	if (data->is_memory_GDDR5) {
-		/* MPLL_DQ_FUNC_CNTL setup*/
-		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
-		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
-		/*
-		 ************************************
-		 Fref = Reference Frequency
-		 NF = Feedback divider ratio
-		 NR = Reference divider ratio
-		 Fnom = Nominal VCO output frequency = Fref * NF / NR
-		 Fs = Spreading Rate
-		 D = Percentage down-spread / 2
-		 Fint = Reference input frequency to PFD = Fref / NR
-		 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
-		 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
-		 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
-		 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
-		 *************************************
-		 */
-		pp_atomctrl_internal_ss_info ss_info;
-		uint32_t freq_nom;
-		uint32_t tmp;
-		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
-		/* for GDDR5 for all modes and DDR3 */
-		if (1 == mpll_param.qdr)
-			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
-		else
-			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
-		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
-		tmp = (freq_nom / reference_clock);
-		tmp = tmp * tmp;
-
-		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
-			/* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
-			/* ss.Info.speed_spectrum_rate -- in unit of khz */
-			/* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
-			/*     = reference_clock * 5 / speed_spectrum_rate */
-			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
-			/* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
-			/*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
-			uint32_t clkv =
-				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
-							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
-			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
-			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
-		}
-	}
-
-	/* MCLK_PWRMGT_CNTL setup */
-	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
-	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
-	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
-	/* Save the result data to outpupt memory level structure */
-	mclk->MclkFrequency   = memory_clock;
-	mclk->MpllFuncCntl    = mpll_func_cntl;
-	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
-	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
-	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
-	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
-	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
-	mclk->DllCntl         = dll_cntl;
-	mclk->MpllSs1         = mpll_ss1;
-	mclk->MpllSs2         = mpll_ss2;
-
-	return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
-		bool strobe_mode)
-{
-	uint8_t mc_para_index;
-
-	if (strobe_mode) {
-		if (memory_clock < 12500) {
-			mc_para_index = 0x00;
-		} else if (memory_clock > 47500) {
-			mc_para_index = 0x0f;
-		} else {
-			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
-		}
-	} else {
-		if (memory_clock < 65000) {
-			mc_para_index = 0x00;
-		} else if (memory_clock > 135000) {
-			mc_para_index = 0x0f;
-		} else {
-			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
-		}
-	}
-
-	return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
-	uint8_t mc_para_index;
-
-	if (memory_clock < 10000) {
-		mc_para_index = 0;
-	} else if (memory_clock >= 80000) {
-		mc_para_index = 0x0f;
-	} else {
-		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-	}
-
-	return mc_para_index;
-}
-
-static int tonga_populate_single_memory_level(
-		struct pp_hwmgr *hwmgr,
-		uint32_t memory_clock,
-		SMU72_Discrete_MemoryLevel *memory_level
-		)
-{
-	uint32_t minMvdd = 0;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int result = 0;
-	bool dllStateOn;
-	struct cgs_display_info info = {0};
-
-
-	if (NULL != pptable_info->vdd_dep_on_mclk) {
-		result = tonga_get_dependecy_volt_by_clk(hwmgr,
-			pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
-		PP_ASSERT_WITH_CODE((0 == result),
-			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
-	}
-
-	if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
-		memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
-	} else {
-		memory_level->MinMvdd = minMvdd;
-	}
-	memory_level->EnabledForThrottle = 1;
-	memory_level->EnabledForActivity = 0;
-	memory_level->UpHyst = 0;
-	memory_level->DownHyst = 100;
-	memory_level->VoltageDownHyst = 0;
-
-	/* Indicates maximum activity level for this performance level.*/
-	memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-	memory_level->StutterEnable = 0;
-	memory_level->StrobeEnable = 0;
-	memory_level->EdcReadEnable = 0;
-	memory_level->EdcWriteEnable = 0;
-	memory_level->RttEnable = 0;
-
-	/* default set to low watermark. Highest level will be set to high later.*/
-	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	data->display_timing.num_existing_displays = info.display_count;
-
-	if ((data->mclk_stutter_mode_threshold != 0) &&
-	    (memory_clock <= data->mclk_stutter_mode_threshold) &&
-	    (!data->is_uvd_enabled)
-	    && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
-	    && (data->display_timing.num_existing_displays <= 2)
-	    && (data->display_timing.num_existing_displays != 0))
-		memory_level->StutterEnable = 1;
-
-	/* decide strobe mode*/
-	memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
-		(memory_clock <= data->mclk_strobe_mode_threshold);
-
-	/* decide EDC mode and memory clock ratio*/
-	if (data->is_memory_GDDR5) {
-		memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
-					memory_level->StrobeEnable);
-
-		if ((data->mclk_edc_enable_threshold != 0) &&
-				(memory_clock > data->mclk_edc_enable_threshold)) {
-			memory_level->EdcReadEnable = 1;
-		}
-
-		if ((data->mclk_edc_wr_enable_threshold != 0) &&
-				(memory_clock > data->mclk_edc_wr_enable_threshold)) {
-			memory_level->EdcWriteEnable = 1;
-		}
-
-		if (memory_level->StrobeEnable) {
-			if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
-					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
-				dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-			} else {
-				dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
-			}
-
-		} else {
-			dllStateOn = data->dll_defaule_on;
-		}
-	} else {
-		memory_level->StrobeRatio =
-			tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
-		dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-	}
-
-	result = tonga_calculate_mclk_params(hwmgr,
-		memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
-
-	if (0 == result) {
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
-		/* MCLK frequency in units of 10KHz*/
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
-		/* Indicates maximum activity level for this performance level.*/
-		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
-		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
-	}
-
-	return result;
-}
-
-/**
- * Populates the SMC MVDD structure using the provided memory clock.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
- * @param    voltage     the SMC VOLTAGE structure to be populated
- */
-int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
-	const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i = 0;
-
-	if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-		/* find mvdd value which clock is more than request */
-		for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
-			if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
-				/* Always round to higher voltage. */
-				smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
-				break;
-			}
-		}
-
-		PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
-			"MVDD Voltage is outside the supported range.", return -1);
-
-	} else {
-		return -1;
-	}
-
-	return 0;
-}
-
-
-static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
-	SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-	const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	pp_atomctrl_clock_dividers_vi dividers;
-	SMIO_Pattern voltage_level;
-	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
-	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
-	/* The ACPI state should not do DPM on DC (or ever).*/
-	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-	table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
-	/* assign zero for now*/
-	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
-	/* get the engine clock dividers for this clock value*/
-	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-		table->ACPILevel.SclkFrequency,  &dividers);
-
-	PP_ASSERT_WITH_CODE(result == 0,
-		"Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-	/* divider ID for required SCLK*/
-	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-	table->ACPILevel.DeepSleepDivId = 0;
-
-	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
-	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
-	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
-							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
-
-	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-	table->ACPILevel.CcPwrDynRm = 0;
-	table->ACPILevel.CcPwrDynRm1 = 0;
-
-
-	/* For various features to be enabled/disabled while this level is active.*/
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-	/* SCLK frequency in units of 10KHz*/
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
-	table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
-
-	/*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
-	if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
-		table->MemoryACPILevel.MinMvdd =
-			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
-	else
-		table->MemoryACPILevel.MinMvdd = 0;
-
-	/* Force reset on DLL*/
-	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
-	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
-	/* Disable DLL in ACPIState*/
-	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
-	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
-	/* Enable DLL bypass signal*/
-	dll_cntl            = PHM_SET_FIELD(dll_cntl,
-		DLL_CNTL, MRDCK0_BYPASS, 0);
-	dll_cntl            = PHM_SET_FIELD(dll_cntl,
-		DLL_CNTL, MRDCK1_BYPASS, 0);
-
-	table->MemoryACPILevel.DllCntl            =
-		PP_HOST_TO_SMC_UL(dll_cntl);
-	table->MemoryACPILevel.MclkPwrmgtCntl     =
-		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
-	table->MemoryACPILevel.MpllAdFuncCntl     =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
-	table->MemoryACPILevel.MpllDqFuncCntl     =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
-	table->MemoryACPILevel.MpllFuncCntl       =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
-	table->MemoryACPILevel.MpllFuncCntl_1     =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
-	table->MemoryACPILevel.MpllFuncCntl_2     =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
-	table->MemoryACPILevel.MpllSs1            =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
-	table->MemoryACPILevel.MpllSs2            =
-		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
-	table->MemoryACPILevel.EnabledForThrottle = 0;
-	table->MemoryACPILevel.EnabledForActivity = 0;
-	table->MemoryACPILevel.UpHyst = 0;
-	table->MemoryACPILevel.DownHyst = 100;
-	table->MemoryACPILevel.VoltageDownHyst = 0;
-	/* Indicates maximum activity level for this performance level.*/
-	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-	table->MemoryACPILevel.StutterEnable = 0;
-	table->MemoryACPILevel.StrobeEnable = 0;
-	table->MemoryACPILevel.EdcReadEnable = 0;
-	table->MemoryACPILevel.EdcWriteEnable = 0;
-	table->MemoryACPILevel.RttEnable = 0;
-
-	return result;
-}
-
-static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
-{
-	int result = 0;
-	uint32_t i;
-
-	for (i = 0; i < table->count; i++) {
-		if (value == table->dpm_levels[i].value) {
-			*boot_level = i;
-			result = 0;
-		}
-	}
-	return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-			SMU72_Discrete_DpmTable *table)
-{
-	int result = 0;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	table->GraphicsBootLevel  = 0;        /* 0 == DPM[0] (low), etc. */
-	table->MemoryBootLevel    = 0;        /* 0 == DPM[0] (low), etc. */
-
-	/* find boot level from dpm table*/
-	result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
-	data->vbios_boot_state.sclk_bootup_value,
-	(uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
-
-	if (0 != result) {
-		data->smc_state_table.GraphicsBootLevel = 0;
-		printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
-			in dependency table. Using Graphics DPM level 0!");
-		result = 0;
-	}
-
-	result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
-		data->vbios_boot_state.mclk_bootup_value,
-		(uint32_t *)&(data->smc_state_table.MemoryBootLevel));
-
-	if (0 != result) {
-		data->smc_state_table.MemoryBootLevel = 0;
-		printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
-			in dependency table. Using Memory DPM level 0!");
-		result = 0;
-	}
-
-	table->BootVoltage.Vddc =
-		tonga_get_voltage_id(&(data->vddc_voltage_table),
-			data->vbios_boot_state.vddc_bootup_value);
-	table->BootVoltage.VddGfx =
-		tonga_get_voltage_id(&(data->vddgfx_voltage_table),
-			data->vbios_boot_state.vddgfx_bootup_value);
-	table->BootVoltage.Vddci =
-		tonga_get_voltage_id(&(data->vddci_voltage_table),
-			data->vbios_boot_state.vddci_bootup_value);
-	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
-	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-	return result;
-}
-
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-		uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
-	const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	pp_atomctrl_clock_dividers_vi dividers;
-	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-	uint32_t    reference_clock;
-	uint32_t reference_divider;
-	uint32_t fbdiv;
-	int result;
-
-	/* get the engine clock dividers for this clock value*/
-	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
-
-	PP_ASSERT_WITH_CODE(result == 0,
-		"Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
-	reference_clock = atomctrl_get_reference_clock(hwmgr);
-
-	reference_divider = 1 + dividers.uc_pll_ref_div;
-
-	/* low 14 bits is fraction and high 12 bits is divider*/
-	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-	/* SPLL_FUNC_CNTL setup*/
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-		CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
-	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-		CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-	/* SPLL_FUNC_CNTL_3 setup*/
-	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-		CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
-	/* set to use fractional accumulation*/
-	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-		CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-		pp_atomctrl_internal_ss_info ss_info;
-
-		uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
-		if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
-			/*
-			* ss_info.speed_spectrum_percentage -- in unit of 0.01%
-			* ss_info.speed_spectrum_rate -- in unit of khz
-			*/
-			/* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
-			uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
-			/* clkv = 2 * D * fbdiv / NS */
-			uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
-			cg_spll_spread_spectrum =
-				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
-			cg_spll_spread_spectrum =
-				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-			cg_spll_spread_spectrum_2 =
-				PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
-		}
-	}
-
-	sclk->SclkFrequency        = engine_clock;
-	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-	return 0;
-}
-
-static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
-		uint32_t min_engine_clock_in_sr)
-{
-	uint32_t i, temp;
-	uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
-
-	PP_ASSERT_WITH_CODE((engine_clock >= min),
-			"Engine clock can't satisfy stutter requirement!", return 0);
-
-	for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
-		temp = engine_clock >> i;
-
-		if(temp >= min || i == 0)
-			break;
-	}
-	return (uint8_t)i;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
-{
-	int result;
-	uint32_t threshold;
-	uint32_t mvdd;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-
-	/* populate graphics levels*/
-	result = tonga_get_dependecy_volt_by_clk(hwmgr,
-		pptable_info->vdd_dep_on_sclk, engine_clock,
-		&graphic_level->MinVoltage, &mvdd);
-	PP_ASSERT_WITH_CODE((0 == result),
-		"can not find VDDC voltage value for VDDC	\
-		engine clock dependency table", return result);
-
-	/* SCLK frequency in units of 10KHz*/
-	graphic_level->SclkFrequency = engine_clock;
-
-	/* Indicates maximum activity level for this performance level. 50% for now*/
-	graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
-	graphic_level->CcPwrDynRm = 0;
-	graphic_level->CcPwrDynRm1 = 0;
-	/* this level can be used if activity is high enough.*/
-	graphic_level->EnabledForActivity = 0;
-	/* this level can be used for throttling.*/
-	graphic_level->EnabledForThrottle = 1;
-	graphic_level->UpHyst = 0;
-	graphic_level->DownHyst = 0;
-	graphic_level->VoltageDownHyst = 0;
-	graphic_level->PowerThrottle = 0;
-
-	threshold = engine_clock * data->fast_watemark_threshold / 100;
-/*
-	*get the DAL clock. do it in funture.
-	PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
-	data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-*/
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_SclkDeepSleep))
-		graphic_level->DeepSleepDivId =
-				tonga_get_sleep_divider_id_from_clock(engine_clock,
-						data->display_timing.min_clock_insr);
-
-	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
-	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-	if (0 == result) {
-		/* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
-		/* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
-		CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
-		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
-	}
-
-	return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct tonga_dpm_table *dpm_table = &data->dpm_table;
-	phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
-	uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
-	int result = 0;
-	uint32_t level_array_adress = data->dpm_table_start +
-		offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-	uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
-		SMU72_MAX_LEVELS_GRAPHICS;   /* 64 -> long; 32 -> int*/
-	SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
-	uint32_t i, maxEntry;
-	uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
-	PECI_RegistryValue reg_value;
-	memset(levels, 0x00, level_array_size);
-
-	for (i = 0; i < dpm_table->sclk_table.count; i++) {
-		result = tonga_populate_single_graphic_level(hwmgr,
-					dpm_table->sclk_table.dpm_levels[i].value,
-					(uint16_t)data->activity_target[i],
-					&(data->smc_state_table.GraphicsLevel[i]));
-
-		if (0 != result)
-			return result;
-
-		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-		if (i > 1)
-			data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-
-		if (0 == i) {
-			reg_value = 0;
-			if (reg_value != 0)
-				data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
-		}
-
-		if (1 == i) {
-			reg_value = 0;
-			if (reg_value != 0)
-				data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
-		}
-	}
-
-	/* Only enable level 0 for now. */
-	data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
-	/* set highest level watermark to high */
-	if (dpm_table->sclk_table.count > 1)
-		data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
-			PPSMC_DISPLAY_WATERMARK_HIGH;
-
-	data->smc_state_table.GraphicsDpmLevelCount =
-		(uint8_t)dpm_table->sclk_table.count;
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-		tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-	if (pcie_table != NULL) {
-		PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
-			"There must be 1 or more PCIE levels defined in PPTable.", return -1);
-		maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
-		for (i = 0; i < dpm_table->sclk_table.count; i++) {
-			data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
-				(uint8_t) ((i < maxEntry) ? i : maxEntry);
-		}
-	} else {
-		if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
-			printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
-
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-					(1<<(highest_pcie_level_enabled+1))) != 0)) {
-			highest_pcie_level_enabled++;
-		}
-
-		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-					(1<<lowest_pcie_level_enabled)) == 0)) {
-			lowest_pcie_level_enabled++;
-		}
-
-		while ((count < highest_pcie_level_enabled) &&
-				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-					(1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
-			count++;
-		}
-		mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
-			(lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
-		/* set pcieDpmLevel to highest_pcie_level_enabled*/
-		for (i = 2; i < dpm_table->sclk_table.count; i++) {
-			data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-		}
-
-		/* set pcieDpmLevel to lowest_pcie_level_enabled*/
-		data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-		/* set pcieDpmLevel to mid_pcie_level_enabled*/
-		data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-	}
-	/* level count will send to smc once at init smc table and never change*/
-	result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
-	if (0 != result)
-		return result;
-
-	return 0;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-
-static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_dpm_table *dpm_table = &data->dpm_table;
-	int result;
-	/* populate MCLK dpm table to SMU7 */
-	uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
-	uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
-	SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
-	uint32_t i;
-
-	memset(levels, 0x00, level_array_size);
-
-	for (i = 0; i < dpm_table->mclk_table.count; i++) {
-		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-			"can not populate memory level as memory clock is zero", return -1);
-		result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
-			&(data->smc_state_table.MemoryLevel[i]));
-		if (0 != result) {
-			return result;
-		}
-	}
-
-	/* Only enable level 0 for now.*/
-	data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
-	/*
-	* in order to prevent MC activity from stutter mode to push DPM up.
-	* the UVD change complements this by putting the MCLK in a higher state
-	* by default such that we are not effected by up threshold or and MCLK DPM latency.
-	*/
-	data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
-	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
-	data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-	/* set highest level watermark to high*/
-	data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
-	/* level count will send to smc once at init smc table and never change*/
-	result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
-		level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
-	if (0 != result) {
-		return result;
-	}
-
-	return 0;
-}
-
-struct TONGA_DLL_SPEED_SETTING {
-	uint16_t            Min;                          /* Minimum Data Rate*/
-	uint16_t            Max;                          /* Maximum Data Rate*/
-	uint32_t			dll_speed;                     /* The desired DLL_SPEED setting*/
-};
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-	return 0;
-}
-
-/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
-
-
-static int tonga_reset_single_dpm_table(
-	struct pp_hwmgr *hwmgr,
-	struct tonga_single_dpm_table *dpm_table,
-	uint32_t count)
-{
-	uint32_t i;
-	if (!(count <= MAX_REGULAR_DPM_NUMBER))
-		printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
-			table entries to exceed max number! \n");
-
-	dpm_table->count = count;
-	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
-		dpm_table->dpm_levels[i].enabled = false;
-	}
-
-	return 0;
-}
-
-static void tonga_setup_pcie_table_entry(
-	struct tonga_single_dpm_table *dpm_table,
-	uint32_t index, uint32_t pcie_gen,
-	uint32_t pcie_lanes)
-{
-	dpm_table->dpm_levels[index].value = pcie_gen;
-	dpm_table->dpm_levels[index].param1 = pcie_lanes;
-	dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
-	uint32_t i, maxEntry;
-
-	if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
-		data->pcie_gen_power_saving = data->pcie_gen_performance;
-		data->pcie_lane_power_saving = data->pcie_lane_performance;
-	} else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
-		data->pcie_gen_performance = data->pcie_gen_power_saving;
-		data->pcie_lane_performance = data->pcie_lane_power_saving;
-	}
-
-	tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
-
-	if (pcie_table != NULL) {
-		/*
-		* maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
-		* If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
-		*/
-		maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
-						SMU72_MAX_LEVELS_LINK : pcie_table->count;
-		for (i = 1; i < maxEntry; i++) {
-			tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
-				get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
-				get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		}
-		data->dpm_table.pcie_speed_table.count = maxEntry - 1;
-	} else {
-		/* Hardcode Pcie Table */
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
-			get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
-			get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-		data->dpm_table.pcie_speed_table.count = 6;
-	}
-	/* Populate last level for boot PCIE level, but do not increment count. */
-	tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
-		data->dpm_table.pcie_speed_table.count,
-		get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
-		get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-
-	return 0;
-
-}
-
-/*
- * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these state tables to the allowed range based
- * on the power policy or external client requests, such as UVD request, etc.
- */
-static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t i;
-
-	phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
-		pptable_info->vdd_dep_on_sclk;
-	phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
-		pptable_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
-		"SCLK dependency table is missing. This table is mandatory", return -1);
-	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
-		"SCLK dependency table has to have is missing. This table is mandatory", return -1);
-
-	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
-		"MCLK dependency table is missing. This table is mandatory", return -1);
-	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
-		"VMCLK dependency table has to have is missing. This table is mandatory", return -1);
-
-	/* clear the state table to reset everything to default */
-	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
-	tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
-	tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
-	/* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
-	/* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
-	/* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
-	/* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
-
-	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
-		"SCLK dependency table is missing. This table is mandatory", return -1);
-	/* Initialize Sclk DPM table based on allow Sclk values*/
-	data->dpm_table.sclk_table.count = 0;
-
-	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
-				allowed_vdd_sclk_table->entries[i].clk) {
-			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
-				allowed_vdd_sclk_table->entries[i].clk;
-			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
-			data->dpm_table.sclk_table.count++;
-		}
-	}
-
-	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
-		"MCLK dependency table is missing. This table is mandatory", return -1);
-	/* Initialize Mclk DPM table based on allow Mclk values */
-	data->dpm_table.mclk_table.count = 0;
-	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
-			allowed_vdd_mclk_table->entries[i].clk) {
-			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
-				allowed_vdd_mclk_table->entries[i].clk;
-			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
-			data->dpm_table.mclk_table.count++;
-		}
-	}
-
-	/* setup PCIE gen speed levels*/
-	tonga_setup_default_pcie_tables(hwmgr);
-
-	/* save a copy of the default DPM table*/
-	memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
-
-	return 0;
-}
-
-int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
-		const struct tonga_power_state *bootState)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint8_t count, level;
-
-	count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
-	for (level = 0; level < count; level++) {
-		if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
-			bootState->performance_levels[0].engine_clock) {
-			data->smc_state_table.GraphicsBootLevel = level;
-			break;
-		}
-	}
-
-	count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
-	for (level = 0; level < count; level++) {
-		if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
-			bootState->performance_levels[0].memory_clock) {
-			data->smc_state_table.MemoryBootLevel = level;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    pInput  the pointer to input data (PowerState)
- * @return   always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	SMU72_Discrete_DpmTable  *table = &(data->smc_state_table);
-	const phw_tonga_ulv_parm *ulv = &(data->ulv);
-	uint8_t i;
-	PECI_RegistryValue reg_value;
-	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-	result = tonga_setup_default_dpm_tables(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to setup default DPM tables!", return result;);
-	memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
-	if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
-		tonga_populate_smc_voltage_tables(hwmgr, table);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition)) {
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_StepVddc)) {
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-	}
-
-	if (data->is_memory_GDDR5) {
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-	}
-
-	i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
-	if (i == 1 || i == 0) {
-		table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
-	}
-
-	if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
-		PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to initialize ULV state!", return result;);
-
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
-	}
-
-	result = tonga_populate_smc_link_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize Link Level!", return result;);
-
-	result = tonga_populate_all_graphic_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize Graphics Level!", return result;);
-
-	result = tonga_populate_all_memory_levels(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize Memory Level!", return result;);
-
-	result = tonga_populate_smv_acpi_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize ACPI Level!", return result;);
-
-	result = tonga_populate_smc_vce_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize VCE Level!", return result;);
-
-	result = tonga_populate_smc_acp_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize ACP Level!", return result;);
-
-	result = tonga_populate_smc_samu_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize SAMU Level!", return result;);
-
-	/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
-	/* need to populate the  ARB settings for the initial state. */
-	result = tonga_program_memory_timing_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to Write ARB settings for the initial state.", return result;);
-
-	result = tonga_populate_smc_uvd_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize UVD Level!", return result;);
-
-	result = tonga_populate_smc_boot_level(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize Boot Level!", return result;);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ClockStretcher)) {
-		result = tonga_populate_clock_stretcher_data_table(hwmgr);
-		PP_ASSERT_WITH_CODE(0 == result,
-			"Failed to populate Clock Stretcher Data Table!", return result;);
-	}
-	table->GraphicsVoltageChangeEnable  = 1;
-	table->GraphicsThermThrottleEnable  = 1;
-	table->GraphicsInterval = 1;
-	table->VoltageInterval  = 1;
-	table->ThermalInterval  = 1;
-	table->TemperatureLimitHigh =
-		pptable_info->cac_dtp_table->usTargetOperatingTemp *
-		TONGA_Q88_FORMAT_CONVERSION_UNIT;
-	table->TemperatureLimitLow =
-		(pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-		TONGA_Q88_FORMAT_CONVERSION_UNIT;
-	table->MemoryVoltageChangeEnable  = 1;
-	table->MemoryInterval  = 1;
-	table->VoltageResponseTime  = 0;
-	table->PhaseResponseTime  = 0;
-	table->MemoryThermThrottleEnable  = 1;
-
-	/*
-	* Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
-	* SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
-	* but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
-	* To avoid it, we set PCIeBootLinkLevel to highest dpm level
-	*/
-	PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
-			"There must be 1 or more PCIE levels defined in PPTable.",
-			return -1);
-
-	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
-	table->PCIeGenInterval  = 1;
-
-	result = tonga_populate_vr_config(hwmgr, table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to populate VRConfig setting!", return result);
-
-	table->ThermGpio  = 17;
-	table->SclkStepSize = 0x4000;
-
-	reg_value = 0;
-	if ((0 == reg_value) &&
-		(atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
-						&gpio_pin_assignment))) {
-		table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_RegulatorHot);
-	} else {
-		table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_RegulatorHot);
-	}
-
-	/* ACDC Switch GPIO */
-	reg_value = 0;
-	if ((0 == reg_value) &&
-		(atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-						&gpio_pin_assignment))) {
-		table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition);
-	} else {
-		table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition);
-	}
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_Falcon_QuickTransition);
-
-	reg_value = 0;
-	if (1 == reg_value) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_AutomaticDCTransition);
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_Falcon_QuickTransition);
-	}
-
-	reg_value = 0;
-	if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
-			THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalOutGPIO);
-
-		table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
-		table->ThermOutPolarity =
-			(0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-			(1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
-
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-		/* if required, combine VRHot/PCC with thermal out GPIO*/
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_RegulatorHot) &&
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_CombinePCCWithThermalSignal)){
-			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-		}
-	} else {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ThermalOutGPIO);
-
-		table->ThermOutGpio = 17;
-		table->ThermOutPolarity = 1;
-		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-	}
-
-	for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
-		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-	}
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-	result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
-										offsetof(SMU72_Discrete_DpmTable, SystemFlags),
-										(uint8_t *)&(table->SystemFlags),
-										sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
-										data->sram_end);
-
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to upload dpm data to SMC memory!", return result;);
-
-	return result;
-}
-
-/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
-static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
-	return;
-}
-
-int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
-	PPSMC_Result result;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* Apply minimum voltage based on DAL's request level */
-	tonga_apply_dal_minimum_voltage_request(hwmgr);
-
-	if (0 == data->sclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-		if (tonga_is_dpm_running(hwmgr))
-			printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
-		if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			result = smum_send_msg_to_smc_with_parameter(
-								hwmgr->smumgr,
-				(PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
-				data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == result),
-				"Set Sclk Dpm enable Mask failed", return -1);
-		}
-	}
-
-	if (0 == data->mclk_dpm_key_disabled) {
-		/* Checking if DPM is running.  If we discover hang because of this, we should skip this message.*/
-		if (tonga_is_dpm_running(hwmgr))
-			printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
-		if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
-			result = smum_send_msg_to_smc_with_parameter(
-								hwmgr->smumgr,
-				(PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
-				data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == result),
-				"Set Mclk Dpm enable Mask failed", return -1);
-		}
-	}
-
-	return 0;
-}
-
-
-int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
-	uint32_t level, tmp;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->pcie_dpm_key_disabled) {
-		/* PCIE */
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++ ;
-
-			if (0 != level) {
-				PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
-					"force highest pcie dpm state failed!", return -1);
-			}
-		}
-	}
-
-	if (0 == data->sclk_dpm_key_disabled) {
-		/* SCLK */
-		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++ ;
-
-			if (0 != level) {
-				PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
-					"force highest sclk dpm state failed!", return -1);
-				if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-					CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
-					printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-						Curr_Sclk_Index does not match the level \n");
-
-			}
-		}
-	}
-
-	if (0 == data->mclk_dpm_key_disabled) {
-		/* MCLK */
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
-			level = 0;
-			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-			while (tmp >>= 1)
-				level++ ;
-
-			if (0 != level) {
-				PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
-					"force highest mclk dpm state failed!", return -1);
-				if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
-					printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-						Curr_Mclk_Index does not match the level \n");
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
-	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
-	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
-	return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
-	uint32_t table_size;
-	struct phm_clock_voltage_dependency_table *table_clk_vlt;
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	hwmgr->dyn_state.mclk_sclk_ratio = 4;
-	hwmgr->dyn_state.sclk_mclk_delta = 15000;      /* 150 MHz */
-	hwmgr->dyn_state.vddc_vddci_delta = 200;       /* 200mV */
-
-	/* initialize vddc_dep_on_dal_pwrl table */
-	table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
-	table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
-	if (NULL == table_clk_vlt) {
-		printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
-		return -ENOMEM;
-	} else {
-		table_clk_vlt->count = 4;
-		table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
-		table_clk_vlt->entries[0].v = 0;
-		table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
-		table_clk_vlt->entries[1].v = 720;
-		table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
-		table_clk_vlt->entries[2].v = 810;
-		table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
-		table_clk_vlt->entries[3].v = 900;
-		pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
-		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
-	}
-
-	return 0;
-}
-
-static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
-		pptable_info->vdd_dep_on_sclk;
-	phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
-		pptable_info->vdd_dep_on_mclk;
-
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-		"VDD dependency on SCLK table is missing.	\
-		This table is mandatory", return -1);
-	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-		"VDD dependency on SCLK table has to have is missing.	\
-		This table is mandatory", return -1);
-
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-		"VDD dependency on MCLK table is missing.	\
-		This table is mandatory", return -1);
-	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
-		"VDD dependency on MCLK table has to have is missing.	 \
-		This table is mandatory", return -1);
-
-	data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
-	data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-
-	pptable_info->max_clock_voltage_on_ac.sclk =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
-	pptable_info->max_clock_voltage_on_ac.mclk =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
-	pptable_info->max_clock_voltage_on_ac.vddc =
-		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-	pptable_info->max_clock_voltage_on_ac.vddci =
-		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
-	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
-		pptable_info->max_clock_voltage_on_ac.sclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
-		pptable_info->max_clock_voltage_on_ac.mclk;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
-		pptable_info->max_clock_voltage_on_ac.vddc;
-	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
-		pptable_info->max_clock_voltage_on_ac.vddci;
-
-	return 0;
-}
-
-int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	int result = 1;
-
-	PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
-			     "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
-			     return result);
-
-	if (0 == data->pcie_dpm_key_disabled) {
-		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
-							     hwmgr->smumgr,
-					PPSMC_MSG_PCIeDPM_UnForceLevel)),
-					   "unforce pcie level failed!",
-								return -1);
-	}
-
-	result = tonga_upload_dpm_level_enable_mask(hwmgr);
-
-	return result;
-}
-
-static uint32_t tonga_get_lowest_enable_level(
-				struct pp_hwmgr *hwmgr, uint32_t level_mask)
-{
-	uint32_t level = 0;
-
-	while (0 == (level_mask & (1 << level)))
-		level++;
-
-	return level;
-}
-
-static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
-	uint32_t level;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->pcie_dpm_key_disabled) {
-		/* PCIE */
-		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
-			level = tonga_get_lowest_enable_level(hwmgr,
-							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
-					    "force lowest pcie dpm state failed!", return -1);
-		}
-	}
-
-	if (0 == data->sclk_dpm_key_disabled) {
-		/* SCLK */
-		if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
-			level = tonga_get_lowest_enable_level(hwmgr,
-							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-
-			PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
-					    "force sclk dpm state failed!", return -1);
-
-			if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-							 CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
-				printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index.	\
-				Curr_Sclk_Index does not match the level \n");
-		}
-	}
-
-	if (0 == data->mclk_dpm_key_disabled) {
-		/* MCLK */
-		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
-			level = tonga_get_lowest_enable_level(hwmgr,
-							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
-			PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
-					    "force lowest mclk dpm state failed!", return -1);
-			if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-							 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
-				printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
-						Curr_Mclk_Index does not match the level \n");
-		}
-	}
-
-	return 0;
-}
-
-static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
-{
-	uint8_t entryId;
-	uint8_t voltageId;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-		for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-			voltageId = sclk_table->entries[entryId].vddInd;
-			sclk_table->entries[entryId].vddgfx =
-				pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
-		}
-	} else {
-		for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-			voltageId = sclk_table->entries[entryId].vddInd;
-			sclk_table->entries[entryId].vddc =
-				pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-		}
-	}
-
-	for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-		voltageId = mclk_table->entries[entryId].vddInd;
-		mclk_table->entries[entryId].vddc =
-			pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	for (entryId = 0; entryId < mm_table->count; ++entryId) {
-		voltageId = mm_table->entries[entryId].vddcInd;
-		mm_table->entries[entryId].vddc =
-			pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
-	}
-
-	return 0;
-
-}
-
-static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	uint8_t entryId;
-	phm_ppt_v1_voltage_lookup_record v_record;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
-	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-
-	if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-		for (entryId = 0; entryId < sclk_table->count; ++entryId) {
-			if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
-				v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
-					sclk_table->entries[entryId].vdd_offset - 0xFFFF;
-			else
-				v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
-					sclk_table->entries[entryId].vdd_offset;
-
-			sclk_table->entries[entryId].vddc =
-				v_record.us_cac_low = v_record.us_cac_mid =
-				v_record.us_cac_high = v_record.us_vdd;
-
-			tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
-		}
-
-		for (entryId = 0; entryId < mclk_table->count; ++entryId) {
-			if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
-				v_record.us_vdd = mclk_table->entries[entryId].vddc +
-					mclk_table->entries[entryId].vdd_offset - 0xFFFF;
-			else
-				v_record.us_vdd = mclk_table->entries[entryId].vddc +
-					mclk_table->entries[entryId].vdd_offset;
-
-			mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
-				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
-			tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
-		}
-	}
-
-	return 0;
-
-}
-
-static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
-	uint32_t entryId;
-	phm_ppt_v1_voltage_lookup_record v_record;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
-	if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-		for (entryId = 0; entryId < mm_table->count; entryId++) {
-			if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
-				v_record.us_vdd = mm_table->entries[entryId].vddc +
-					mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
-			else
-				v_record.us_vdd = mm_table->entries[entryId].vddc +
-					mm_table->entries[entryId].vddgfx_offset;
-
-			/* Add the calculated VDDGFX to the VDDGFX lookup table */
-			mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
-				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
-			tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
-		}
-	}
-	return 0;
-}
-
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to changing voltage
- * @param     pointer to leakage table
- */
-static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
-		uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
-{
-	uint32_t leakage_index;
-
-	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
-	for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
-		/* if this voltage matches a leakage voltage ID */
-		/* patch with actual leakage voltage */
-		if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
-			*voltage = pLeakageTable->actual_voltage[leakage_index];
-			break;
-		}
-	}
-
-	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
-		printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
- * Patch voltage lookup table by EVV leakages.
- *
- * @param     hwmgr  the address of the powerplay hardware manager.
- * @param     pointer to voltage lookup table
- * @param     pointer to leakage table
- * @return     always 0
- */
-static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *lookup_table,
-		phw_tonga_leakage_voltage *pLeakageTable)
-{
-	uint32_t i;
-
-	for (i = 0; i < lookup_table->count; i++) {
-		tonga_patch_with_vdd_leakage(hwmgr,
-			&lookup_table->entries[i].us_vdd, pLeakageTable);
-	}
-
-	return 0;
-}
-
-static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
-		phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
-{
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
-	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-		pptable_info->max_clock_voltage_on_dc.vddc;
-
-	return 0;
-}
-
-static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
-		struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
-		uint16_t *Vddgfx)
-{
-	tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
-	return 0;
-}
-
-int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
-		phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
-	uint32_t table_size, i, j;
-	phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
-	table_size = lookup_table->count;
-
-	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
-		"Lookup table is empty", return -1);
-
-	/* Sorting voltages */
-	for (i = 0; i < table_size - 1; i++) {
-		for (j = i + 1; j > 0; j--) {
-			if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
-				tmp_voltage_lookup_record = lookup_table->entries[j-1];
-				lookup_table->entries[j-1] = lookup_table->entries[j];
-				lookup_table->entries[j] = tmp_voltage_lookup_record;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	int tmp_result;
-	tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
-		tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
-			pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
-		if (tmp_result != 0)
-			result = tmp_result;
-
-		tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
-			&(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
-		if (tmp_result != 0)
-			result = tmp_result;
-	} else {
-		tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
-			pptable_info->vddc_lookup_table, &(data->vddc_leakage));
-		if (tmp_result != 0)
-			result = tmp_result;
-
-		tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
-			&(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
-		if (tmp_result != 0)
-			result = tmp_result;
-	}
-
-	tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
-	if (tmp_result != 0)
-		result = tmp_result;
-
-	tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
-	if (tmp_result != 0)
-		result = tmp_result;
-
-	tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
-	if (tmp_result != 0)
-		result = tmp_result;
-
-	tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
-	if (tmp_result != 0)
-		result = tmp_result;
-
-	tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
-	if (tmp_result != 0)
-		result = tmp_result;
-
-	return result;
-}
-
-int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	data->low_sclk_interrupt_threshold = 0;
-
-	return 0;
-}
-
-int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = tonga_read_clock_registers(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to read clock registers!", result = tmp_result);
-
-	tmp_result = tonga_get_memory_type(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to get memory type!", result = tmp_result);
-
-	tmp_result = tonga_enable_acpi_power_management(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to enable ACPI power management!", result = tmp_result);
-
-	tmp_result = tonga_init_power_gate_state(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to init power gate state!", result = tmp_result);
-
-	tmp_result = tonga_get_mc_microcode_version(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to get MC microcode version!", result = tmp_result);
-
-	tmp_result = tonga_init_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to init sclk threshold!", result = tmp_result);
-
-	return result;
-}
-
-/**
- * Enable voltage control
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
-	/* enable voltage control */
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
-	return 0;
-}
-
-/**
- * Checks if we want to support voltage control
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- */
-bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
-{
-	const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/*---------------------------MC----------------------------*/
-
-uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
-	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
-{
-	bool result = true;
-
-	switch (inReg) {
-	case  mmMC_SEQ_RAS_TIMING:
-		*outReg = mmMC_SEQ_RAS_TIMING_LP;
-		break;
-
-	case  mmMC_SEQ_DLL_STBY:
-		*outReg = mmMC_SEQ_DLL_STBY_LP;
-		break;
-
-	case  mmMC_SEQ_G5PDX_CMD0:
-		*outReg = mmMC_SEQ_G5PDX_CMD0_LP;
-		break;
-
-	case  mmMC_SEQ_G5PDX_CMD1:
-		*outReg = mmMC_SEQ_G5PDX_CMD1_LP;
-		break;
-
-	case  mmMC_SEQ_G5PDX_CTRL:
-		*outReg = mmMC_SEQ_G5PDX_CTRL_LP;
-		break;
-
-	case mmMC_SEQ_CAS_TIMING:
-		*outReg = mmMC_SEQ_CAS_TIMING_LP;
-		break;
-
-	case mmMC_SEQ_MISC_TIMING:
-		*outReg = mmMC_SEQ_MISC_TIMING_LP;
-		break;
-
-	case mmMC_SEQ_MISC_TIMING2:
-		*outReg = mmMC_SEQ_MISC_TIMING2_LP;
-		break;
-
-	case mmMC_SEQ_PMG_DVS_CMD:
-		*outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
-		break;
-
-	case mmMC_SEQ_PMG_DVS_CTL:
-		*outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
-		break;
-
-	case mmMC_SEQ_RD_CTL_D0:
-		*outReg = mmMC_SEQ_RD_CTL_D0_LP;
-		break;
-
-	case mmMC_SEQ_RD_CTL_D1:
-		*outReg = mmMC_SEQ_RD_CTL_D1_LP;
-		break;
-
-	case mmMC_SEQ_WR_CTL_D0:
-		*outReg = mmMC_SEQ_WR_CTL_D0_LP;
-		break;
-
-	case mmMC_SEQ_WR_CTL_D1:
-		*outReg = mmMC_SEQ_WR_CTL_D1_LP;
-		break;
-
-	case mmMC_PMG_CMD_EMRS:
-		*outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
-		break;
-
-	case mmMC_PMG_CMD_MRS:
-		*outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
-		break;
-
-	case mmMC_PMG_CMD_MRS1:
-		*outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
-		break;
-
-	case mmMC_SEQ_PMG_TIMING:
-		*outReg = mmMC_SEQ_PMG_TIMING_LP;
-		break;
-
-	case mmMC_PMG_CMD_MRS2:
-		*outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
-		break;
-
-	case mmMC_SEQ_WR_CTL_2:
-		*outReg = mmMC_SEQ_WR_CTL_2_LP;
-		break;
-
-	default:
-		result = false;
-		break;
-	}
-
-	return result;
-}
-
-int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
-{
-	uint32_t i;
-	uint16_t address;
-
-	for (i = 0; i < table->last; i++) {
-		table->mc_reg_address[i].s0 =
-			tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
-			? address : table->mc_reg_address[i].s1;
-	}
-	return 0;
-}
-
-int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
-{
-	uint8_t i, j;
-
-	PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-		"Invalid VramInfo table.", return -1);
-	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
-		"Invalid VramInfo table.", return -1);
-
-	for (i = 0; i < table->last; i++) {
-		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-	}
-	ni_table->last = table->last;
-
-	for (i = 0; i < table->num_entries; i++) {
-		ni_table->mc_reg_table_entry[i].mclk_max =
-			table->mc_reg_table_entry[i].mclk_max;
-		for (j = 0; j < table->last; j++) {
-			ni_table->mc_reg_table_entry[i].mc_data[j] =
-				table->mc_reg_table_entry[i].mc_data[j];
-		}
-	}
-
-	ni_table->num_entries = table->num_entries;
-
-	return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
- *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3.   need to set these data for each clock range
- *
- * @param    hwmgr the address of the powerplay hardware manager.
- * @param    table the address of MCRegTable
- * @return   always 0
- */
-int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
-{
-	uint8_t i, j, k;
-	uint32_t temp_reg;
-	const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	for (i = 0, j = table->last; i < table->last; i++) {
-		PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-			"Invalid VramInfo table.", return -1);
-		switch (table->mc_reg_address[i].s1) {
-		/*
-		* mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
-		* Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
-		*/
-		case mmMC_SEQ_MISC1:
-			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
-			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
-			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
-			for (k = 0; k < table->num_entries; k++) {
-				table->mc_reg_table_entry[k].mc_data[j] =
-					((temp_reg & 0xffff0000)) |
-					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
-			}
-			j++;
-			PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-				"Invalid VramInfo table.", return -1);
-
-			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
-			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
-			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
-			for (k = 0; k < table->num_entries; k++) {
-				table->mc_reg_table_entry[k].mc_data[j] =
-					(temp_reg & 0xffff0000) |
-					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
-				if (!data->is_memory_GDDR5) {
-					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
-				}
-			}
-			j++;
-			PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-				"Invalid VramInfo table.", return -1);
-
-			if (!data->is_memory_GDDR5) {
-				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
-				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
-				for (k = 0; k < table->num_entries; k++) {
-					table->mc_reg_table_entry[k].mc_data[j] =
-						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
-				}
-				j++;
-				PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-					"Invalid VramInfo table.", return -1);
-			}
-
-			break;
-
-		case mmMC_SEQ_RESERVE_M:
-			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
-			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
-			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
-			for (k = 0; k < table->num_entries; k++) {
-				table->mc_reg_table_entry[k].mc_data[j] =
-					(temp_reg & 0xffff0000) |
-					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-			}
-			j++;
-			PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-				"Invalid VramInfo table.", return -1);
-			break;
-
-		default:
-			break;
-		}
-
-	}
-
-	table->last = j;
-
-	return 0;
-}
-
-int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
-{
-	uint8_t i, j;
-	for (i = 0; i < table->last; i++) {
-		for (j = 1; j < table->num_entries; j++) {
-			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
-				table->mc_reg_table_entry[j].mc_data[i]) {
-				table->validflag |= (1<<i);
-				break;
-			}
-		}
-	}
-
-	return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-	pp_atomctrl_mc_reg_table *table;
-	phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
-	uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
-	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
-	if (NULL == table)
-		return -ENOMEM;
-
-	/* Program additional LP registers that are no longer programmed by VBIOS */
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
-	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
-	memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
-	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
-	if (0 == result)
-		result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
-	if (0 == result) {
-		tonga_set_s0_mc_reg_index(ni_table);
-		result = tonga_set_mc_special_registers(hwmgr, ni_table);
-	}
-
-	if (0 == result)
-		tonga_set_valid_flag(ni_table);
-
-	kfree(table);
-	return result;
-}
-
-/*
-* Copy one arb setting to another and then switch the active set.
-* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
-*/
-int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
-		uint32_t arbFreqSrc, uint32_t arbFreqDest)
-{
-	uint32_t mc_arb_dram_timing;
-	uint32_t mc_arb_dram_timing2;
-	uint32_t burst_time;
-	uint32_t mc_cg_config;
-
-	switch (arbFreqSrc) {
-	case MC_CG_ARB_FREQ_F0:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-		break;
-
-	case MC_CG_ARB_FREQ_F1:
-		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
-		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
-		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
-		break;
-
-	default:
-		return -1;
-	}
-
-	switch (arbFreqDest) {
-	case MC_CG_ARB_FREQ_F0:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
-		break;
-
-	case MC_CG_ARB_FREQ_F1:
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
-		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
-		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
-		break;
-
-	default:
-		return -1;
-	}
-
-	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
-	mc_cg_config |= 0x0000000F;
-	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
-	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
-
-	return 0;
-}
-
-/**
- * Initial switch from ARB F0->F1
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
-{
-	return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-	const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t tmp;
-	int result;
-
-	/*
-	* This is a read-modify-write on the first byte of the ARB table.
-	* The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
-	* This solution is ugly, but we never write the whole table only individual fields in it.
-	* In reality this field should not be in that structure but in a soft register.
-	*/
-	result = tonga_read_smc_sram_dword(hwmgr->smumgr,
-				data->arb_table_start, &tmp, data->sram_end);
-
-	if (0 != result)
-		return result;
-
-	tmp &= 0x00FFFFFF;
-	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-	return tonga_write_smc_sram_dword(hwmgr->smumgr,
-			data->arb_table_start,  tmp, data->sram_end);
-}
-
-int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
-{
-	const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	uint32_t i, j;
-
-	for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
-		if (data->tonga_mc_reg_table.validflag & 1<<j) {
-			PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
-				"Index of mc_reg_table->address[] array out of boundary", return -1);
-			mc_reg_table->address[i].s0 =
-				PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
-			mc_reg_table->address[i].s1 =
-				PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
-			i++;
-		}
-	}
-
-	mc_reg_table->last = (uint8_t)i;
-
-	return 0;
-}
-
-/*convert register values from driver to SMC format */
-void tonga_convert_mc_registers(
-	const phw_tonga_mc_reg_entry * pEntry,
-	SMU72_Discrete_MCRegisterSet *pData,
-	uint32_t numEntries, uint32_t validflag)
-{
-	uint32_t i, j;
-
-	for (i = 0, j = 0; j < numEntries; j++) {
-		if (validflag & 1<<j) {
-			pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
-			i++;
-		}
-	}
-}
-
-/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
-int tonga_convert_mc_reg_table_entry_to_smc(
-		struct pp_hwmgr *hwmgr,
-		const uint32_t memory_clock,
-		SMU72_Discrete_MCRegisterSet *mc_reg_table_data
-		)
-{
-	const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t i = 0;
-
-	for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
-		if (memory_clock <=
-			data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
-			break;
-		}
-	}
-
-	if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
-		--i;
-
-	tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
-		mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
-
-	return 0;
-}
-
-int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
-		SMU72_Discrete_MCRegisters *mc_reg_table)
-{
-	int result = 0;
-	tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	int res;
-	uint32_t i;
-
-	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
-		res = tonga_convert_mc_reg_table_entry_to_smc(
-				hwmgr,
-				data->dpm_table.mclk_table.dpm_levels[i].value,
-				&mc_reg_table->data[i]
-				);
-
-		if (0 != res)
-			result = res;
-	}
-
-	return result;
-}
-
-int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-	int result;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
-	result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
-	result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
-	PP_ASSERT_WITH_CODE(0 == result,
-		"Failed to initialize MCRegTable for driver state!", return result;);
-
-	return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
-			(uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
-}
-
-/**
- * Programs static screed detection parameters
- *
- * @param   hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* Set static screen threshold unit*/
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
-		CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
-		data->static_screen_threshold_unit);
-	/* Set static screen threshold*/
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
-		CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
-		data->static_screen_threshold);
-
-	return 0;
-}
-
-/**
- * Setup display gap for glitch free memory clock switching.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
-	uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
-							CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-
-	display_gap = PHM_SET_FIELD(display_gap,
-					CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
-
-	display_gap = PHM_SET_FIELD(display_gap,
-					CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-	return 0;
-}
-
-/**
- * Programs activity state transition voting clients
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
-	tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
-	/* Clear reset for voting clients before enabling DPM */
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-		SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-		SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-		ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
-	return 0;
-}
-
-
-int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = tonga_check_for_dpm_stopped(hwmgr);
-
-	if (cf_tonga_voltage_control(hwmgr)) {
-		tmp_result = tonga_enable_voltage_control(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to enable voltage control!", result = tmp_result);
-
-		tmp_result = tonga_construct_voltage_tables(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to contruct voltage tables!", result = tmp_result);
-	}
-
-	tmp_result = tonga_initialize_mc_reg_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to initialize MC reg table!", result = tmp_result);
-
-	tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to program static screen threshold parameters!", result = tmp_result);
-
-	tmp_result = tonga_enable_display_gap(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to enable display gap!", result = tmp_result);
-
-	tmp_result = tonga_program_voting_clients(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to program voting clients!", result = tmp_result);
-
-	tmp_result = tonga_process_firmware_header(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to process firmware header!", result = tmp_result);
-
-	tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
-
-	tmp_result = tonga_init_smc_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to initialize SMC table!", result = tmp_result);
-
-	tmp_result = tonga_init_arb_table_index(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to initialize ARB table index!", result = tmp_result);
-
-	tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to populate initialize MC Reg table!", result = tmp_result);
-
-	tmp_result = tonga_notify_smc_display_change(hwmgr, false);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to notify no display!", result = tmp_result);
-
-	/* enable SCLK control */
-	tmp_result = tonga_enable_sclk_control(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to enable SCLK control!", result = tmp_result);
-
-	/* enable DPM */
-	tmp_result = tonga_start_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to start DPM!", result = tmp_result);
-
-	return result;
-}
-
-int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = tonga_check_for_dpm_running(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"SMC is still running!", return 0);
-
-	tmp_result = tonga_stop_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to stop DPM!", result = tmp_result);
-
-	tmp_result = tonga_reset_to_default(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-		"Failed to reset to default!", result = tmp_result);
-
-	return result;
-}
-
-int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	result = tonga_set_boot_state(hwmgr);
-	if (0 != result)
-		printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
-
-	return result;
-}
-
-int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
-	return phm_hwmgr_backend_fini(hwmgr);
-}
-
-/**
- * Initializes the Volcanic Islands Hardware Manager
- *
- * @param   hwmgr the address of the powerplay hardware manager.
- * @return   1 if success; otherwise appropriate error code.
- */
-int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
-	int result = 0;
-	SMU72_Discrete_DpmTable  *table = NULL;
-	tonga_hwmgr *data;
-	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	phw_tonga_ulv_parm *ulv;
-	struct cgs_system_info sys_info = {0};
-
-	PP_ASSERT_WITH_CODE((NULL != hwmgr),
-		"Invalid Parameter!", return -1;);
-
-	data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
-	if (data == NULL)
-		return -ENOMEM;
-
-	hwmgr->backend = data;
-
-	data->dll_defaule_on = false;
-	data->sram_end = SMC_RAM_END;
-
-	data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
-	data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
-
-	data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-	data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
-	data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_DisableVoltageIsland);
-
-	data->sclk_dpm_key_disabled = 0;
-	data->mclk_dpm_key_disabled = 0;
-	data->pcie_dpm_key_disabled = 0;
-	data->pcc_monitor_enabled = 0;
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_UnTabledHardwareInterface);
-
-	data->gpio_debug = 0;
-	data->engine_clock_data = 0;
-	data->memory_clock_data = 0;
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_DynamicPatchPowerState);
-
-	/* need to set voltage control types before EVV patching*/
-	data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
-	data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
-	data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
-	data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-
-	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-				VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
-		data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ControlVDDGFX)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-			VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
-			data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-		}
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ControlVDDGFX);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableMVDDControl)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-					VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
-			data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
-		}
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_EnableMVDDControl);
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ControlVDDCI)) {
-		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-					VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
-			data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
-		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
-						VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
-			data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
-	}
-
-	if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_ControlVDDCI);
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_TablelessHardwareInterface);
-
-	if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
-		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-			PHM_PlatformCaps_ClockStretcher);
-
-	/* Initializes DPM default values*/
-	tonga_initialize_dpm_defaults(hwmgr);
-
-	/* Get leakage voltage based on leakage ID.*/
-	PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
-		"Get EVV Voltage Failed.  Abort Driver loading!", return -1);
-
-	tonga_complete_dependency_tables(hwmgr);
-
-	/* Parse pptable data read from VBIOS*/
-	tonga_set_private_var_based_on_pptale(hwmgr);
-
-	/* ULV Support*/
-	ulv = &(data->ulv);
-	ulv->ulv_supported = false;
-
-	/* Initalize Dynamic State Adjustment Rule Settings*/
-	result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-	if (result)
-		printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
-	data->uvd_enabled = false;
-
-	table = &(data->smc_state_table);
-
-	/*
-	* if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
-	* Peak Current Control feature is enabled and we should program PCC HW register
-	*/
-	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
-		uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
-										CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-
-		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
-		case 0:
-			temp_reg = PHM_SET_FIELD(temp_reg,
-				CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
-			break;
-		case 1:
-			temp_reg = PHM_SET_FIELD(temp_reg,
-				CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
-			break;
-		case 2:
-			temp_reg = PHM_SET_FIELD(temp_reg,
-				CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
-			break;
-		case 3:
-			temp_reg = PHM_SET_FIELD(temp_reg,
-				CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
-			break;
-		case 4:
-			temp_reg = PHM_SET_FIELD(temp_reg,
-				CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
-			break;
-		default:
-			printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register!	\
-				Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
-			break;
-		}
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-			ixCNB_PWRMGT_CNTL, temp_reg);
-	}
-
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_EnableSMU7ThermalManagement);
-	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-		PHM_PlatformCaps_SMU7);
-
-	data->vddc_phase_shed_control = false;
-
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		      PHM_PlatformCaps_UVDPowerGating);
-	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-		      PHM_PlatformCaps_VCEPowerGating);
-	sys_info.size = sizeof(struct cgs_system_info);
-	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
-	result = cgs_query_system_info(hwmgr->device, &sys_info);
-	if (!result) {
-		if (sys_info.value & AMD_PG_SUPPORT_UVD)
-			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				      PHM_PlatformCaps_UVDPowerGating);
-		if (sys_info.value & AMD_PG_SUPPORT_VCE)
-			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-				      PHM_PlatformCaps_VCEPowerGating);
-	}
-
-	if (0 == result) {
-		data->is_tlu_enabled = false;
-		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
-			TONGA_MAX_HARDWARE_POWERLEVELS;
-		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
-		hwmgr->platform_descriptor.minimumClocksReductionPercentage  = 50;
-
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
-		else
-			data->pcie_gen_cap = (uint32_t)sys_info.value;
-		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-			data->pcie_spc_cap = 20;
-		sys_info.size = sizeof(struct cgs_system_info);
-		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
-		result = cgs_query_system_info(hwmgr->device, &sys_info);
-		if (result)
-			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
-		else
-			data->pcie_lane_cap = (uint32_t)sys_info.value;
-	} else {
-		/* Ignore return value in here, we are cleaning up a mess. */
-		tonga_hwmgr_backend_fini(hwmgr);
-	}
-
-	return result;
-}
-
-static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
-		enum amd_dpm_forced_level level)
-{
-	int ret = 0;
-
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = tonga_force_dpm_highest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = tonga_force_dpm_lowest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		ret = tonga_unforce_dpm_levels(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	default:
-		break;
-	}
-
-	hwmgr->dpm_level = level;
-	return ret;
-}
-
-static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
-				struct pp_power_state  *prequest_ps,
-			const struct pp_power_state *pcurrent_ps)
-{
-	struct tonga_power_state *tonga_ps =
-				cast_phw_tonga_power_state(&prequest_ps->hardware);
-
-	uint32_t sclk;
-	uint32_t mclk;
-	struct PP_Clocks minimum_clocks = {0};
-	bool disable_mclk_switching;
-	bool disable_mclk_switching_for_frame_lock;
-	struct cgs_display_info info = {0};
-	const struct phm_clock_and_voltage_limits *max_limits;
-	uint32_t i;
-	tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	int32_t count;
-	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
-	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
-	PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
-				 "VI should always have 2 performance levels",
-				 );
-
-	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
-			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
-			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
-	if (PP_PowerSource_DC == hwmgr->power_source) {
-		for (i = 0; i < tonga_ps->performance_level_count; i++) {
-			if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
-				tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
-			if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
-				tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
-		}
-	}
-
-	tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
-	tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
-
-	tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
-	/* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-
-		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
-		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
-		for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
-			if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
-				stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
-				break;
-			}
-		}
-
-		if (count < 0)
-			stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
-
-		stable_pstate_mclk = max_limits->mclk;
-
-		minimum_clocks.engineClock = stable_pstate_sclk;
-		minimum_clocks.memoryClock = stable_pstate_mclk;
-	}
-
-	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
-		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
-	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
-		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
-	tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
-	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
-					"Overdrive sclk exceeds limit",
-					hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
-		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
-			tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
-	}
-
-	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
-		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
-			"Overdrive mclk exceeds limit",
-			hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
-		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
-			tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
-	}
-
-	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
-				    hwmgr->platform_descriptor.platformCaps,
-				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-	disable_mclk_switching = (1 < info.display_count) ||
-				    disable_mclk_switching_for_frame_lock;
-
-	sclk  = tonga_ps->performance_levels[0].engine_clock;
-	mclk  = tonga_ps->performance_levels[0].memory_clock;
-
-	if (disable_mclk_switching)
-		mclk  = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
-
-	if (sclk < minimum_clocks.engineClock)
-		sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
-
-	if (mclk < minimum_clocks.memoryClock)
-		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
-
-	tonga_ps->performance_levels[0].engine_clock = sclk;
-	tonga_ps->performance_levels[0].memory_clock = mclk;
-
-	tonga_ps->performance_levels[1].engine_clock =
-		(tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
-			      tonga_ps->performance_levels[1].engine_clock :
-			      tonga_ps->performance_levels[0].engine_clock;
-
-	if (disable_mclk_switching) {
-		if (mclk < tonga_ps->performance_levels[1].memory_clock)
-			mclk = tonga_ps->performance_levels[1].memory_clock;
-
-		tonga_ps->performance_levels[0].memory_clock = mclk;
-		tonga_ps->performance_levels[1].memory_clock = mclk;
-	} else {
-		if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
-			tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
-	}
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-		for (i=0; i < tonga_ps->performance_level_count; i++) {
-			tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
-			tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
-			tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
-			tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
-		}
-	}
-
-	return 0;
-}
-
-int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
-	return sizeof(struct tonga_power_state);
-}
-
-static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct tonga_power_state  *tonga_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-	if (low)
-		return tonga_ps->performance_levels[0].memory_clock;
-	else
-		return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-}
-
-static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
-	struct pp_power_state  *ps;
-	struct tonga_power_state  *tonga_ps;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-	if (low)
-		return tonga_ps->performance_levels[0].engine_clock;
-	else
-		return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-}
-
-static uint16_t tonga_get_current_pcie_speed(
-						   struct pp_hwmgr *hwmgr)
-{
-	uint32_t speed_cntl = 0;
-
-	speed_cntl = cgs_read_ind_register(hwmgr->device,
-						   CGS_IND_REG__PCIE,
-						   ixPCIE_LC_SPEED_CNTL);
-	return((uint16_t)PHM_GET_FIELD(speed_cntl,
-			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int tonga_get_current_pcie_lane_number(
-						   struct pp_hwmgr *hwmgr)
-{
-	uint32_t link_width;
-
-	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
-							CGS_IND_REG__PCIE,
-						  PCIE_LC_LINK_WIDTH_CNTL,
-							LC_LINK_WIDTH_RD);
-
-	PP_ASSERT_WITH_CODE((7 >= link_width),
-			"Invalid PCIe lane width!", return 0);
-
-	return decode_pcie_lane_width(link_width);
-}
-
-static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
-					struct pp_hw_power_state *hw_ps)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
-	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
-	uint16_t size;
-	uint8_t frev, crev;
-	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-	/* First retrieve the Boot clocks and VDDC from the firmware info table.
-	 * We assume here that fw_info is unchanged if this call fails.
-	 */
-	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-			hwmgr->device, index,
-			&size, &frev, &crev);
-	if (!fw_info)
-		/* During a test, there is no firmware info table. */
-		return 0;
-
-	/* Patch the state. */
-	data->vbios_boot_state.sclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultEngineClock);
-	data->vbios_boot_state.mclk_bootup_value  = le32_to_cpu(fw_info->ulDefaultMemoryClock);
-	data->vbios_boot_state.mvdd_bootup_value  = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
-	data->vbios_boot_state.vddc_bootup_value  = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
-	data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
-	data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
-	data->vbios_boot_state.pcie_lane_bootup_value =
-			(uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
-
-	/* set boot power state */
-	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
-	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
-	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
-	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
-	return 0;
-}
-
-static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
-		void *state, struct pp_power_state *power_state,
-		void *pp_table, uint32_t classification_flag)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	struct tonga_power_state  *tonga_ps =
-			(struct tonga_power_state *)(&(power_state->hardware));
-
-	struct tonga_performance_level *performance_level;
-
-	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-
-	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
-			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-
-	ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-			(ATOM_Tonga_SCLK_Dependency_Table *)
-			(((unsigned long)powerplay_table) +
-			le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
-	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-			(ATOM_Tonga_MCLK_Dependency_Table *)
-			(((unsigned long)powerplay_table) +
-			le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
-	/* The following fields are not initialized here: id orderedList allStatesList */
-	power_state->classification.ui_label =
-			(le16_to_cpu(state_entry->usClassification) &
-			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
-			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
-	power_state->classification.flags = classification_flag;
-	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
-	power_state->classification.temporary_state = false;
-	power_state->classification.to_be_deleted = false;
-
-	power_state->validation.disallowOnDC =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
-
-	power_state->pcie.lanes = 0;
-
-	power_state->display.disableFrameModulation = false;
-	power_state->display.limitRefreshrate = false;
-	power_state->display.enableVariBright =
-			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
-
-	power_state->validation.supportedPowerLevels = 0;
-	power_state->uvd_clocks.VCLK = 0;
-	power_state->uvd_clocks.DCLK = 0;
-	power_state->temperatures.min = 0;
-	power_state->temperatures.max = 0;
-
-	performance_level = &(tonga_ps->performance_levels
-			[tonga_ps->performance_level_count++]);
-
-	PP_ASSERT_WITH_CODE(
-			(tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
-			"Performance levels exceeds SMC limit!",
-			return -1);
-
-	PP_ASSERT_WITH_CODE(
-			(tonga_ps->performance_level_count <=
-					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
-			"Performance levels exceeds Driver limit!",
-			return -1);
-
-	/* Performance levels are arranged from low to high. */
-	performance_level->memory_clock =
-				le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
-
-	performance_level->engine_clock =
-				le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
-
-	performance_level->pcie_gen = get_pcie_gen_support(
-							data->pcie_gen_cap,
-					     state_entry->ucPCIEGenLow);
-
-	performance_level->pcie_lane = get_pcie_lane_support(
-						    data->pcie_lane_cap,
-					   state_entry->ucPCIELaneHigh);
-
-	performance_level =
-			&(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
-
-	performance_level->memory_clock =
-				le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
-
-	performance_level->engine_clock =
-				le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
-
-	performance_level->pcie_gen = get_pcie_gen_support(
-							data->pcie_gen_cap,
-					    state_entry->ucPCIEGenHigh);
-
-	performance_level->pcie_lane = get_pcie_lane_support(
-						    data->pcie_lane_cap,
-					   state_entry->ucPCIELaneHigh);
-
-	return 0;
-}
-
-static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
-		    unsigned long entry_index, struct pp_power_state *ps)
-{
-	int result;
-	struct tonga_power_state *tonga_ps;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
-					   table_info->vdd_dep_on_mclk;
-
-	ps->hardware.magic = PhwTonga_Magic;
-
-	tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
-
-	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
-			tonga_get_pp_table_entry_callback_func);
-
-	/* This is the earliest time we have all the dependency table and the VBIOS boot state
-	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
-	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
-	 */
-	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
-		if (dep_mclk_table->entries[0].clk !=
-				data->vbios_boot_state.mclk_bootup_value)
-			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot MCLK level");
-		if (dep_mclk_table->entries[0].vddci !=
-				data->vbios_boot_state.vddci_bootup_value)
-			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
-					"does not match VBIOS boot VDDCI level");
-	}
-
-	/* set DC compatible flag if this state supports DC */
-	if (!ps->validation.disallowOnDC)
-		tonga_ps->dc_compatible = true;
-
-	if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
-		data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
-	else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
-		if (data->bacos.best_match == 0xffff) {
-			/* For V.I. use boot state as base BACO state */
-			data->bacos.best_match = PP_StateClassificationFlag_Boot;
-			data->bacos.performance_level = tonga_ps->performance_levels[0];
-		}
-	}
-
-	tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
-	tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
-
-	if (!result) {
-		uint32_t i;
-
-		switch (ps->classification.ui_label) {
-		case PP_StateUILabel_Performance:
-			data->use_pcie_performance_levels = true;
-
-			for (i = 0; i < tonga_ps->performance_level_count; i++) {
-				if (data->pcie_gen_performance.max <
-						tonga_ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.max =
-							tonga_ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_performance.min >
-						tonga_ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_performance.min =
-							tonga_ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_performance.max <
-						tonga_ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.max =
-							tonga_ps->performance_levels[i].pcie_lane;
-
-				if (data->pcie_lane_performance.min >
-						tonga_ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_performance.min =
-							tonga_ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		case PP_StateUILabel_Battery:
-			data->use_pcie_power_saving_levels = true;
-
-			for (i = 0; i < tonga_ps->performance_level_count; i++) {
-				if (data->pcie_gen_power_saving.max <
-						tonga_ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.max =
-							tonga_ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_gen_power_saving.min >
-						tonga_ps->performance_levels[i].pcie_gen)
-					data->pcie_gen_power_saving.min =
-							tonga_ps->performance_levels[i].pcie_gen;
-
-				if (data->pcie_lane_power_saving.max <
-						tonga_ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.max =
-							tonga_ps->performance_levels[i].pcie_lane;
-
-				if (data->pcie_lane_power_saving.min >
-						tonga_ps->performance_levels[i].pcie_lane)
-					data->pcie_lane_power_saving.min =
-							tonga_ps->performance_levels[i].pcie_lane;
-			}
-			break;
-		default:
-			break;
-		}
-	}
-	return 0;
-}
-
-static void
-tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
-	uint32_t sclk, mclk, activity_percent;
-	uint32_t offset;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
-
-	sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
-
-	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n", mclk/100, sclk/100);
-
-	offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
-	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
-	activity_percent += 0x80;
-	activity_percent >>= 8;
-
-	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
-	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
-	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
-	uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-	struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
-	uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-	struct PP_Clocks min_clocks = {0};
-	uint32_t i;
-	struct cgs_display_info info = {0};
-
-	data->need_update_smu7_dpm_table = 0;
-
-	for (i = 0; i < psclk_table->count; i++) {
-		if (sclk == psclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= psclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-	else {
-	/* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
-		if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
-			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
-	}
-
-	for (i=0; i < pmclk_table->count; i++) {
-		if (mclk == pmclk_table->dpm_levels[i].value)
-			break;
-	}
-
-	if (i >= pmclk_table->count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
-	return 0;
-}
-
-static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
-{
-	uint32_t i;
-	uint32_t sclk, max_sclk = 0;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
-	for (i = 0; i < hw_ps->performance_level_count; i++) {
-		sclk = hw_ps->performance_levels[i].engine_clock;
-		if (max_sclk < sclk)
-			max_sclk = sclk;
-	}
-
-	for (i = 0; i < pdpm_table->sclk_table.count; i++) {
-		if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
-			return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
-					pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
-					pdpm_table->pcie_speed_table.dpm_levels[i].value);
-	}
-
-	return 0;
-}
-
-static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
-	const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
-	uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
-	uint16_t current_link_speed;
-
-	if (data->force_pcie_gen == PP_PCIEGenInvalid)
-		current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
-	else
-		current_link_speed = data->force_pcie_gen;
-
-	data->force_pcie_gen = PP_PCIEGenInvalid;
-	data->pspp_notify_required = false;
-	if (target_link_speed > current_link_speed) {
-		switch(target_link_speed) {
-		case PP_PCIEGen3:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
-				break;
-			data->force_pcie_gen = PP_PCIEGen2;
-			if (current_link_speed == PP_PCIEGen2)
-				break;
-		case PP_PCIEGen2:
-			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
-				break;
-		default:
-			data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
-			break;
-		}
-	} else {
-		if (target_link_speed < current_link_speed)
-			data->pspp_notify_required = true;
-	}
-
-	return 0;
-}
-
-static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-		PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-				    "Trying to freeze SCLK DPM when DPM is disabled",
-			);
-		PP_ASSERT_WITH_CODE(
-			0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					  PPSMC_MSG_SCLKDPM_FreezeLevel),
-			"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
-			return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		 DPMTABLE_OD_UPDATE_MCLK)) {
-		PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-				    "Trying to freeze MCLK DPM when DPM is disabled",
-			);
-		PP_ASSERT_WITH_CODE(
-			0 == smum_send_msg_to_smc(hwmgr->smumgr,
-							PPSMC_MSG_MCLKDPM_FreezeLevel),
-			"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
-			return -1);
-	}
-
-	return 0;
-}
-
-static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result = 0;
-
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-	uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-	struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
-	struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
-	uint32_t dpm_count, clock_percent;
-	uint32_t i;
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
-		pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-		/* Need to do calculation based on the golden DPM table
-		 * as the Heatmap GPU Clock axis is also based on the default values
-		 */
-			PP_ASSERT_WITH_CODE(
-				(pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
-				"Divide by 0!",
-				return -1);
-			dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
-			for (i = dpm_count; i > 1; i--) {
-				if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
-					clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
-							pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
-					pdpm_table->sclk_table.dpm_levels[i].value =
-							pgolden_dpm_table->sclk_table.dpm_levels[i].value +
-							(pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-
-				} else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
-					clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
-								pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
-					pdpm_table->sclk_table.dpm_levels[i].value =
-							pgolden_dpm_table->sclk_table.dpm_levels[i].value -
-							(pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-				} else
-					pdpm_table->sclk_table.dpm_levels[i].value =
-							pgolden_dpm_table->sclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
-		pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
-			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
-			PP_ASSERT_WITH_CODE(
-					(pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
-					"Divide by 0!",
-					return -1);
-			dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
-			for (i = dpm_count; i > 1; i--) {
-				if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
-						clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
-								    pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
-						pdpm_table->mclk_table.dpm_levels[i].value =
-										pgolden_dpm_table->mclk_table.dpm_levels[i].value +
-										(pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-
-				} else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
-						clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
-								    pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
-						pdpm_table->mclk_table.dpm_levels[i].value =
-									pgolden_dpm_table->mclk_table.dpm_levels[i].value -
-									(pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-				} else
-					pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
-			}
-		}
-	}
-
-	if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-		result = tonga_populate_all_graphic_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-			"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-			return result);
-	}
-
-	if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-		/*populate MCLK dpm table to SMU7 */
-		result = tonga_populate_all_memory_levels(hwmgr);
-		PP_ASSERT_WITH_CODE((0 == result),
-				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-				return result);
-	}
-
-	return result;
-}
-
-static	int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
-			  struct tonga_single_dpm_table * pdpm_table,
-			     uint32_t low_limit, uint32_t high_limit)
-{
-	uint32_t i;
-
-	for (i = 0; i < pdpm_table->count; i++) {
-		if ((pdpm_table->dpm_levels[i].value < low_limit) ||
-		    (pdpm_table->dpm_levels[i].value > high_limit))
-			pdpm_table->dpm_levels[i].enabled = false;
-		else
-			pdpm_table->dpm_levels[i].enabled = true;
-	}
-	return 0;
-}
-
-static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t high_limit_count;
-
-	PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
-				"power state did not have any performance level",
-				 return -1);
-
-	high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
-
-	tonga_trim_single_dpm_states(hwmgr,
-					&(data->dpm_table.sclk_table),
-					hw_state->performance_levels[0].engine_clock,
-					hw_state->performance_levels[high_limit_count].engine_clock);
-
-	tonga_trim_single_dpm_states(hwmgr,
-						&(data->dpm_table.mclk_table),
-						hw_state->performance_levels[0].memory_clock,
-						hw_state->performance_levels[high_limit_count].memory_clock);
-
-	return 0;
-}
-
-static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
-{
-	int result;
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-
-	result = tonga_trim_dpm_states(hwmgr, tonga_ps);
-	if (0 != result)
-		return result;
-
-	data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
-	data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
-	data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-	if (data->uvd_enabled)
-		data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-
-	data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
-	return 0;
-}
-
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-				  (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
-				  (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
-}
-
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
-				  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
-				  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (!bgate) {
-		data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
-		mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0x00FFFFFF;
-		mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
-		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-						PPSMC_MSG_UVDDPM_SetEnabledMask,
-						(uint32_t)(1 << data->smc_state_table.UvdBootLevel));
-	}
-
-	return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
-	const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
-	uint32_t mm_boot_level_offset, mm_boot_level_value;
-	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-	if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
-		data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
-
-		mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-		mm_boot_level_offset /= 4;
-		mm_boot_level_offset *= 4;
-		mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
-		mm_boot_level_value &= 0xFF00FFFF;
-		mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
-		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_VCEDPM_SetEnabledMask,
-				(uint32_t)(1 << data->smc_state_table.VceBootLevel));
-
-		tonga_enable_disable_vce_dpm(hwmgr, true);
-	} else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
-		tonga_enable_disable_vce_dpm(hwmgr, false);
-
-	return 0;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	uint32_t address;
-	int32_t result;
-
-	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
-		return 0;
-
-
-	memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
-
-	result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
-
-	if(result != 0)
-		return result;
-
-
-	address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
-	return  tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
-				 (uint8_t *)&data->mc_reg_table.data[0],
-				sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
-				data->sram_end);
-}
-
-static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	if (data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-		return tonga_program_memory_timing_parameters(hwmgr);
-
-	return 0;
-}
-
-static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	if (0 == data->need_update_smu7_dpm_table)
-		return 0;
-
-	if ((0 == data->sclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table &
-		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
-		PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze SCLK DPM when DPM is disabled",
-			);
-		PP_ASSERT_WITH_CODE(
-			 0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
-			return -1);
-	}
-
-	if ((0 == data->mclk_dpm_key_disabled) &&
-		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
-		PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
-				    "Trying to Unfreeze MCLK DPM when DPM is disabled",
-				);
-		PP_ASSERT_WITH_CODE(
-			 0 == smum_send_msg_to_smc(hwmgr->smumgr,
-					 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
-		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
-		    return -1);
-	}
-
-	data->need_update_smu7_dpm_table = 0;
-
-	return 0;
-}
-
-static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
-	const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-	uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
-	uint8_t  request;
-
-	if (data->pspp_notify_required  ||
-	    data->pcie_performance_request) {
-		if (target_link_speed == PP_PCIEGen3)
-			request = PCIE_PERF_REQ_GEN3;
-		else if (target_link_speed == PP_PCIEGen2)
-			request = PCIE_PERF_REQ_GEN2;
-		else
-			request = PCIE_PERF_REQ_GEN1;
-
-		if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
-			data->pcie_performance_request = false;
-			return 0;
-		}
-
-		if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
-			if (PP_PCIEGen2 == target_link_speed)
-				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
-			else
-				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
-		}
-	}
-
-	data->pcie_performance_request = false;
-	return 0;
-}
-
-static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
-	int tmp_result, result = 0;
-
-	tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
-	}
-
-	tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
-	tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
-
-	tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
-
-	tmp_result = tonga_update_vce_dpm(hwmgr, input);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
-
-	tmp_result = tonga_update_sclk_threshold(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
-
-	tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
-
-	tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
-
-	tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
-
-	tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
-		tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
-		PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
-	}
-
-	return result;
-}
-
-/**
-*  Set maximum target operating fan output PWM
-*
-* @param    pHwMgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanPwm:  max operating fan PWM in percents
-* @return   The response that came from the SMC.
-*/
-static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-	hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
-	uint32_t num_active_displays = 0;
-	struct cgs_display_info info = {0};
-	info.mode_info = NULL;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	num_active_displays = info.display_count;
-
-	if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
-		tonga_notify_smc_display_change(hwmgr, false);
-	else
-		tonga_notify_smc_display_change(hwmgr, true);
-
-	return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always OK
-*/
-int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	uint32_t num_active_displays = 0;
-	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-	uint32_t display_gap2;
-	uint32_t pre_vbi_time_in_us;
-	uint32_t frame_time_in_us;
-	uint32_t ref_clock;
-	uint32_t refresh_rate = 0;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
-
-	info.mode_info = &mode_info;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_active_displays = info.display_count;
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
-	ref_clock = mode_info.ref_clock;
-	refresh_rate = mode_info.refresh_rate;
-
-	if(0 == refresh_rate)
-		refresh_rate = 60;
-
-	frame_time_in_us = 1000000 / refresh_rate;
-
-	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
-	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
-
-	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-	if (num_active_displays == 1)
-		tonga_notify_smc_display_change(hwmgr, true);
-
-	return 0;
-}
-
-int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-
-	tonga_program_display_gap(hwmgr);
-
-	/* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
-	return 0;
-}
-
-/**
-*  Set maximum target operating fan output RPM
-*
-* @param    pHwMgr:  the address of the powerplay hardware manager.
-* @param    usMaxFanRpm:  max operating fan RPM value.
-* @return   The response that came from the SMC.
-*/
-static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
-	hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
-
-	if (phm_is_hw_access_blocked(hwmgr))
-		return 0;
-
-	return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
-{
-	uint32_t reference_clock;
-	uint32_t tc;
-	uint32_t divide;
-
-	ATOM_FIRMWARE_INFO *fw_info;
-	uint16_t size;
-	uint8_t frev, crev;
-	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
-	tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
-	if (tc)
-		return TCLK;
-
-	fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
-						  &size, &frev, &crev);
-
-	if (!fw_info)
-		return 0;
-
-	reference_clock = le16_to_cpu(fw_info->usReferenceClock);
-
-	divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
-	if (0 != divide)
-		return reference_clock / 4;
-
-	return reference_clock;
-}
-
-int tonga_dpm_set_interrupt_state(void *private_data,
-					 unsigned src_id, unsigned type,
-					 int enabled)
-{
-	uint32_t cg_thermal_int;
-	struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
-	if (hwmgr == NULL)
-		return -EINVAL;
-
-	switch (type) {
-	case AMD_THERMAL_IRQ_LOW_TO_HIGH:
-		if (enabled) {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		} else {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		}
-		break;
-
-	case AMD_THERMAL_IRQ_HIGH_TO_LOW:
-		if (enabled) {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		} else {
-			cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
-		}
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
-					const void *thermal_interrupt_info)
-{
-	int result;
-	const struct pp_interrupt_registration_info *info =
-			(const struct pp_interrupt_registration_info *)thermal_interrupt_info;
-
-	if (info == NULL)
-		return -EINVAL;
-
-	result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
-				tonga_dpm_set_interrupt_state,
-				info->call_back, info->context);
-
-	if (result)
-		return -EINVAL;
-
-	result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
-				tonga_dpm_set_interrupt_state,
-				info->call_back, info->context);
-
-	if (result)
-		return -EINVAL;
-
-	return 0;
-}
-
-bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	bool is_update_required = false;
-	struct cgs_display_info info = {0,0,NULL};
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
-		is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
-	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
-		if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
-			is_update_required = true;
-*/
-	return is_update_required;
-}
-
-static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
-							   const struct tonga_performance_level *pl2)
-{
-	return ((pl1->memory_clock == pl2->memory_clock) &&
-		  (pl1->engine_clock == pl2->engine_clock) &&
-		  (pl1->pcie_gen == pl2->pcie_gen) &&
-		  (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
-	const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
-	const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
-	int i;
-
-	if (equal == NULL || psa == NULL || psb == NULL)
-		return -EINVAL;
-
-	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
-	if (psa->performance_level_count != psb->performance_level_count) {
-		*equal = false;
-		return 0;
-	}
-
-	for (i = 0; i < psa->performance_level_count; i++) {
-		if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
-			/* If we have found even one performance level pair that is different the states are different. */
-			*equal = false;
-			return 0;
-		}
-	}
-
-	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
-	*equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
-	*equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
-	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
-	*equal &= (psa->acp_clk == psb->acp_clk);
-
-	return 0;
-}
-
-static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-	if (mode) {
-		/* stop auto-manage */
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-				PHM_PlatformCaps_MicrocodeFanControl))
-			tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-		tonga_fan_ctrl_set_static_mode(hwmgr, mode);
-	} else
-		/* restart auto-manage */
-		tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
-	return 0;
-}
-
-static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr->fan_ctrl_is_in_default_mode)
-		return hwmgr->fan_ctrl_default_mode;
-	else
-		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-				CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		return -EINVAL;
-
-	switch (type) {
-	case PP_SCLK:
-		if (!data->sclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
-		break;
-	case PP_MCLK:
-		if (!data->mclk_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
-		break;
-	case PP_PCIE:
-	{
-		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
-		uint32_t level = 0;
-
-		while (tmp >>= 1)
-			level++;
-
-		if (!data->pcie_dpm_key_disabled)
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_PCIeDPM_ForceLevel,
-					level);
-		break;
-	}
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, char *buf)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
-	int i, now, size = 0;
-	uint32_t clock, pcie_speed;
-
-	switch (type) {
-	case PP_SCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < sclk_table->count; i++) {
-			if (clock > sclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < sclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, sclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_MCLK:
-		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
-		for (i = 0; i < mclk_table->count; i++) {
-			if (clock > mclk_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < mclk_table->count; i++)
-			size += sprintf(buf + size, "%d: %uMhz %s\n",
-					i, mclk_table->dpm_levels[i].value / 100,
-					(i == now) ? "*" : "");
-		break;
-	case PP_PCIE:
-		pcie_speed = tonga_get_current_pcie_speed(hwmgr);
-		for (i = 0; i < pcie_table->count; i++) {
-			if (pcie_speed != pcie_table->dpm_levels[i].value)
-				continue;
-			break;
-		}
-		now = i;
-
-		for (i = 0; i < pcie_table->count; i++)
-			size += sprintf(buf + size, "%d: %s %s\n", i,
-					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
-					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
-					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
-					(i == now) ? "*" : "");
-		break;
-	default:
-		break;
-	}
-	return size;
-}
-
-static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
-	struct tonga_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	int value;
-
-	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-			100 /
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return value;
-}
-
-static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *golden_sclk_table =
-			&(data->golden_dpm_table.sclk_table);
-	struct pp_power_state  *ps;
-	struct tonga_power_state  *tonga_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-	tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
-			value / 100 +
-			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
-	return 0;
-}
-
-static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
-	struct tonga_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	int value;
-
-	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-			100 /
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return value;
-}
-
-static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	struct tonga_single_dpm_table *golden_mclk_table =
-			&(data->golden_dpm_table.mclk_table);
-	struct pp_power_state  *ps;
-	struct tonga_power_state  *tonga_ps;
-
-	if (value > 20)
-		value = 20;
-
-	ps = hwmgr->request_ps;
-
-	if (ps == NULL)
-		return -EINVAL;
-
-	tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
-	tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
-			value / 100 +
-			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
-	return 0;
-}
-
-static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
-	.backend_init = &tonga_hwmgr_backend_init,
-	.backend_fini = &tonga_hwmgr_backend_fini,
-	.asic_setup = &tonga_setup_asic_task,
-	.dynamic_state_management_enable = &tonga_enable_dpm_tasks,
-	.dynamic_state_management_disable = &tonga_disable_dpm_tasks,
-	.apply_state_adjust_rules = tonga_apply_state_adjust_rules,
-	.force_dpm_level = &tonga_force_dpm_level,
-	.power_state_set = tonga_set_power_state_tasks,
-	.get_power_state_size = tonga_get_power_state_size,
-	.get_mclk = tonga_dpm_get_mclk,
-	.get_sclk = tonga_dpm_get_sclk,
-	.patch_boot_state = tonga_dpm_patch_boot_state,
-	.get_pp_table_entry = tonga_get_pp_table_entry,
-	.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
-	.print_current_perforce_level = tonga_print_current_perforce_level,
-	.powerdown_uvd = tonga_phm_powerdown_uvd,
-	.powergate_uvd = tonga_phm_powergate_uvd,
-	.powergate_vce = tonga_phm_powergate_vce,
-	.disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
-	.update_clock_gatings = tonga_phm_update_clock_gatings,
-	.notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
-	.display_config_changed = tonga_display_configuration_changed_task,
-	.set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
-	.set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
-	.get_temperature = tonga_thermal_get_temperature,
-	.stop_thermal_controller = tonga_thermal_stop_thermal_controller,
-	.get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
-	.get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
-	.set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
-	.reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
-	.get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
-	.set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
-	.uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
-	.register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
-	.check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
-	.check_states_equal = tonga_check_states_equal,
-	.set_fan_control_mode = tonga_set_fan_control_mode,
-	.get_fan_control_mode = tonga_get_fan_control_mode,
-	.force_clock_level = tonga_force_clock_level,
-	.print_clock_levels = tonga_print_clock_levels,
-	.get_sclk_od = tonga_get_sclk_od,
-	.set_sclk_od = tonga_set_sclk_od,
-	.get_mclk_od = tonga_get_mclk_od,
-	.set_mclk_od = tonga_set_mclk_od,
-};
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
-	hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
-	hwmgr->pptable_func = &tonga_pptable_funcs;
-	pp_tonga_thermal_initialize(hwmgr);
-	return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644
index 3961884..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_HWMGR_H
-#define TONGA_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu72_discrete.h"
-#include "ppatomctrl.h"
-#include "ppinterrupt.h"
-#include "tonga_powertune.h"
-#include "pp_endian.h"
-
-#define TONGA_MAX_HARDWARE_POWERLEVELS 2
-#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
-
-struct tonga_performance_level {
-	uint32_t	memory_clock;
-	uint32_t	engine_clock;
-	uint16_t    pcie_gen;
-	uint16_t    pcie_lane;
-};
-
-struct _phw_tonga_bacos {
-	uint32_t                          best_match;
-	uint32_t                          baco_flags;
-	struct tonga_performance_level		  performance_level;
-};
-typedef struct _phw_tonga_bacos phw_tonga_bacos;
-
-struct _phw_tonga_uvd_clocks {
-	uint32_t   VCLK;
-	uint32_t   DCLK;
-};
-
-typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
-
-struct _phw_tonga_vce_clocks {
-	uint32_t   EVCLK;
-	uint32_t   ECCLK;
-};
-
-typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
-
-struct tonga_power_state {
-	uint32_t                    magic;
-	phw_tonga_uvd_clocks        uvd_clocks;
-	phw_tonga_vce_clocks        vce_clocks;
-	uint32_t                    sam_clk;
-	uint32_t                    acp_clk;
-	uint16_t                    performance_level_count;
-	bool                        dc_compatible;
-	uint32_t                    sclk_threshold;
-	struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct _phw_tonga_dpm_level {
-	bool		enabled;
-	uint32_t    value;
-	uint32_t    param1;
-};
-typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
-
-#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define TONGA_MINIMUM_ENGINE_CLOCK 2500
-
-struct tonga_single_dpm_table {
-	uint32_t count;
-	phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct tonga_dpm_table {
-	struct tonga_single_dpm_table  sclk_table;
-	struct tonga_single_dpm_table  mclk_table;
-	struct tonga_single_dpm_table  pcie_speed_table;
-	struct tonga_single_dpm_table  vddc_table;
-	struct tonga_single_dpm_table  vdd_gfx_table;
-	struct tonga_single_dpm_table  vdd_ci_table;
-	struct tonga_single_dpm_table  mvdd_table;
-};
-typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
-
-
-struct _phw_tonga_clock_regisiters {
-	uint32_t  vCG_SPLL_FUNC_CNTL;
-	uint32_t  vCG_SPLL_FUNC_CNTL_2;
-	uint32_t  vCG_SPLL_FUNC_CNTL_3;
-	uint32_t  vCG_SPLL_FUNC_CNTL_4;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
-	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
-	uint32_t  vDLL_CNTL;
-	uint32_t  vMCLK_PWRMGT_CNTL;
-	uint32_t  vMPLL_AD_FUNC_CNTL;
-	uint32_t  vMPLL_DQ_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL;
-	uint32_t  vMPLL_FUNC_CNTL_1;
-	uint32_t  vMPLL_FUNC_CNTL_2;
-	uint32_t  vMPLL_SS1;
-	uint32_t  vMPLL_SS2;
-};
-typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
-
-struct _phw_tonga_voltage_smio_registers {
-	uint32_t vs0_vid_lower_smio_cntl;
-};
-typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
-
-
-struct _phw_tonga_mc_reg_entry {
-	uint32_t mclk_max;
-	uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
-
-struct _phw_tonga_mc_reg_table {
-	uint8_t   last;               /* number of registers*/
-	uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
-	uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
-	phw_tonga_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
-	SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
-
-#define DISABLE_MC_LOADMICROCODE   1
-#define DISABLE_MC_CFGPROGRAMMING  2
-
-/*Ultra Low Voltage parameter structure */
-struct _phw_tonga_ulv_parm{
-	bool	ulv_supported;
-	uint32_t   ch_ulv_parameter;
-	uint32_t   ulv_volt_change_delay;
-	struct tonga_performance_level   ulv_power_level;
-};
-typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
-
-#define TONGA_MAX_LEAKAGE_COUNT  8
-
-struct _phw_tonga_leakage_voltage {
-	uint16_t  count;
-	uint16_t  leakage_id[TONGA_MAX_LEAKAGE_COUNT];
-	uint16_t  actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
-};
-typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
-
-struct _phw_tonga_display_timing {
-	uint32_t min_clock_insr;
-	uint32_t num_existing_displays;
-};
-typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
-
-struct _phw_tonga_dpmlevel_enable_mask {
-	uint32_t uvd_dpm_enable_mask;
-	uint32_t vce_dpm_enable_mask;
-	uint32_t acp_dpm_enable_mask;
-	uint32_t samu_dpm_enable_mask;
-	uint32_t sclk_dpm_enable_mask;
-	uint32_t mclk_dpm_enable_mask;
-	uint32_t pcie_dpm_enable_mask;
-};
-typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
-
-struct _phw_tonga_pcie_perf_range {
-	uint16_t max;
-	uint16_t min;
-};
-typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
-
-struct _phw_tonga_vbios_boot_state {
-	uint16_t					mvdd_bootup_value;
-	uint16_t					vddc_bootup_value;
-	uint16_t					vddci_bootup_value;
-	uint16_t					vddgfx_bootup_value;
-	uint32_t					sclk_bootup_value;
-	uint32_t					mclk_bootup_value;
-	uint16_t					pcie_gen_bootup_value;
-	uint16_t					pcie_lane_bootup_value;
-};
-typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
-
-#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
-#define DPMTABLE_UPDATE_SCLK        0x00000004
-#define DPMTABLE_UPDATE_MCLK        0x00000008
-
-/* We need to review which fields are needed. */
-/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
-struct tonga_hwmgr {
-	struct tonga_dpm_table               dpm_table;
-	struct tonga_dpm_table               golden_dpm_table;
-
-	uint32_t                           voting_rights_clients0;
-	uint32_t                           voting_rights_clients1;
-	uint32_t                           voting_rights_clients2;
-	uint32_t                           voting_rights_clients3;
-	uint32_t                           voting_rights_clients4;
-	uint32_t                           voting_rights_clients5;
-	uint32_t                           voting_rights_clients6;
-	uint32_t                           voting_rights_clients7;
-	uint32_t                           static_screen_threshold_unit;
-	uint32_t                           static_screen_threshold;
-	uint32_t                           voltage_control;
-	uint32_t                           vdd_gfx_control;
-
-	uint32_t                           vddc_vddci_delta;
-	uint32_t                           vddc_vddgfx_delta;
-
-	struct pp_interrupt_registration_info    internal_high_thermal_interrupt_info;
-	struct pp_interrupt_registration_info    internal_low_thermal_interrupt_info;
-	struct pp_interrupt_registration_info    smc_to_host_interrupt_info;
-	uint32_t                          active_auto_throttle_sources;
-
-	struct pp_interrupt_registration_info    external_throttle_interrupt;
-	irq_handler_func_t             external_throttle_callback;
-	void                             *external_throttle_context;
-
-	struct pp_interrupt_registration_info    ctf_interrupt_info;
-	irq_handler_func_t             ctf_callback;
-	void                             *ctf_context;
-
-	phw_tonga_clock_registers	  clock_registers;
-	phw_tonga_voltage_smio_registers  voltage_smio_registers;
-
-	bool	is_memory_GDDR5;
-	uint16_t                          acpi_vddc;
-	bool	pspp_notify_required;        /* Flag to indicate if PSPP notification to SBIOS is required */
-	uint16_t                          force_pcie_gen;            /* The forced PCI-E speed if not 0xffff */
-	uint16_t                          acpi_pcie_gen;             /* The PCI-E speed at ACPI time */
-	uint32_t                           pcie_gen_cap;             /* The PCI-E speed capabilities bitmap from CAIL */
-	uint32_t                           pcie_lane_cap;            /* The PCI-E lane capabilities bitmap from CAIL */
-	uint32_t                           pcie_spc_cap;             /* Symbol Per Clock Capabilities from registry */
-	phw_tonga_leakage_voltage	vddc_leakage;            /* The Leakage VDDC supported (based on leakage ID).*/
-	phw_tonga_leakage_voltage	vddcgfx_leakage;         /* The Leakage VDDC supported (based on leakage ID). */
-	phw_tonga_leakage_voltage	vddci_leakage;           /* The Leakage VDDCI supported (based on leakage ID). */
-
-	uint32_t                           mvdd_control;
-	uint32_t                           vddc_mask_low;
-	uint32_t                           mvdd_mask_low;
-	uint16_t                          max_vddc_in_pp_table;        /* the maximum VDDC value in the powerplay table*/
-	uint16_t                          min_vddc_in_pp_table;
-	uint16_t                          max_vddci_in_pp_table;       /* the maximum VDDCI value in the powerplay table */
-	uint16_t                          min_vddci_in_pp_table;
-	uint32_t                           mclk_strobe_mode_threshold;
-	uint32_t                           mclk_stutter_mode_threshold;
-	uint32_t                           mclk_edc_enable_threshold;
-	uint32_t                           mclk_edc_wr_enable_threshold;
-	bool	is_uvd_enabled;
-	bool	is_xdma_enabled;
-	phw_tonga_vbios_boot_state      vbios_boot_state;
-
-	bool                         battery_state;
-	bool                         is_tlu_enabled;
-	bool                         pcie_performance_request;
-
-	/* -------------- SMC SRAM Address of firmware header tables ----------------*/
-	uint32_t                           sram_end;                 /* The first address after the SMC SRAM. */
-	uint32_t                           dpm_table_start;          /* The start of the dpm table in the SMC SRAM. */
-	uint32_t                           soft_regs_start;          /* The start of the soft registers in the SMC SRAM. */
-	uint32_t                           mc_reg_table_start;       /* The start of the mc register table in the SMC SRAM. */
-	uint32_t                           fan_table_start;          /* The start of the fan table in the SMC SRAM. */
-	uint32_t                           arb_table_start;          /* The start of the ARB setting table in the SMC SRAM. */
-	SMU72_Discrete_DpmTable         smc_state_table;             /* The carbon copy of the SMC state table. */
-	SMU72_Discrete_MCRegisters      mc_reg_table;
-	SMU72_Discrete_Ulv              ulv_setting;                 /* The carbon copy of ULV setting. */
-	/* -------------- Stuff originally coming from Evergreen --------------------*/
-	phw_tonga_mc_reg_table			tonga_mc_reg_table;
-	uint32_t                         vdd_ci_control;
-	pp_atomctrl_voltage_table        vddc_voltage_table;
-	pp_atomctrl_voltage_table        vddci_voltage_table;
-	pp_atomctrl_voltage_table        vddgfx_voltage_table;
-	pp_atomctrl_voltage_table        mvdd_voltage_table;
-
-	uint32_t                           mgcg_cgtt_local2;
-	uint32_t                           mgcg_cgtt_local3;
-	uint32_t                           gpio_debug;
-	uint32_t							mc_micro_code_feature;
-	uint32_t							highest_mclk;
-	uint16_t                          acpi_vdd_ci;
-	uint8_t                           mvdd_high_index;
-	uint8_t                           mvdd_low_index;
-	bool                         dll_defaule_on;
-	bool                         performance_request_registered;
-
-	/* ----------------- Low Power Features ---------------------*/
-	phw_tonga_bacos					bacos;
-	phw_tonga_ulv_parm              ulv;
-	/* ----------------- CAC Stuff ---------------------*/
-	uint32_t					cac_table_start;
-	bool                         cac_configuration_required;    /* TRUE if PP_CACConfigurationRequired == 1 */
-	bool                         driver_calculate_cac_leakage;  /* TRUE if PP_DriverCalculateCACLeakage == 1 */
-	bool                         cac_enabled;
-	/* ----------------- DPM2 Parameters ---------------------*/
-	uint32_t					power_containment_features;
-	bool                         enable_bapm_feature;
-	bool                         enable_tdc_limit_feature;
-	bool                         enable_pkg_pwr_tracking_feature;
-	bool                         disable_uvd_power_tune_feature;
-	phw_tonga_pt_defaults           *power_tune_defaults;
-	SMU72_Discrete_PmFuses           power_tune_table;
-	uint32_t                           ul_dte_tj_offset;             /* Fudge factor in DPM table to correct HW DTE errors */
-	uint32_t                           fast_watemark_threshold;      /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
-
-	/* ----------------- Phase Shedding ---------------------*/
-	bool                         vddc_phase_shed_control;
-	/* --------------------- DI/DT --------------------------*/
-	phw_tonga_display_timing       display_timing;
-	/* --------- ReadRegistry data for memory and engine clock margins ---- */
-	uint32_t                           engine_clock_data;
-	uint32_t                           memory_clock_data;
-	/* -------- Thermal Temperature Setting --------------*/
-	phw_tonga_dpmlevel_enable_mask     dpm_level_enable_mask;
-	uint32_t                           need_update_smu7_dpm_table;
-	uint32_t                           sclk_dpm_key_disabled;
-	uint32_t                           mclk_dpm_key_disabled;
-	uint32_t                           pcie_dpm_key_disabled;
-	uint32_t                           min_engine_clocks; /* used to store the previous dal min sclock */
-	phw_tonga_pcie_perf_range       pcie_gen_performance;
-	phw_tonga_pcie_perf_range       pcie_lane_performance;
-	phw_tonga_pcie_perf_range       pcie_gen_power_saving;
-	phw_tonga_pcie_perf_range       pcie_lane_power_saving;
-	bool                            use_pcie_performance_levels;
-	bool                            use_pcie_power_saving_levels;
-	uint32_t                           activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
-	uint32_t                           mclk_activity_target;
-	uint32_t                           low_sclk_interrupt_threshold;
-	uint32_t                           last_mclk_dpm_enable_mask;
-	bool								uvd_enabled;
-	uint32_t                           pcc_monitor_enabled;
-
-	/* --------- Power Gating States ------------*/
-	bool                           uvd_power_gated;  /* 1: gated, 0:not gated */
-	bool                           vce_power_gated;  /* 1: gated, 0:not gated */
-	bool                           samu_power_gated; /* 1: gated, 0:not gated */
-	bool                           acp_power_gated;  /* 1: gated, 0:not gated */
-	bool                           pg_acp_init;
-};
-
-typedef struct tonga_hwmgr tonga_hwmgr;
-
-#define TONGA_DPM2_NEAR_TDP_DEC          10
-#define TONGA_DPM2_ABOVE_SAFE_INC        5
-#define TONGA_DPM2_BELOW_SAFE_INC        20
-
-#define TONGA_DPM2_LTA_WINDOW_SIZE       7  /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
-
-#define TONGA_DPM2_LTS_TRUNCATE          0
-
-#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT            80  /* Maximum 100 */
-
-#define TONGA_DPM2_MAXPS_PERCENT_H                   90  /* Maximum 0xFF */
-#define TONGA_DPM2_MAXPS_PERCENT_M                   90  /* Maximum 0xFF */
-
-#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN         50
-
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
-#define TONGA_DPM2_SQ_RAMP_MIN_POWER                 0x12
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
-#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE  0x1E
-#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO  0xF
-
-#define TONGA_VOLTAGE_CONTROL_NONE                   0x0
-#define TONGA_VOLTAGE_CONTROL_BY_GPIO                0x1
-#define TONGA_VOLTAGE_CONTROL_BY_SVID2               0x2
-#define TONGA_VOLTAGE_CONTROL_MERGED                 0x3
-
-#define TONGA_Q88_FORMAT_CONVERSION_UNIT             256 /*To convert to Q8.8 format for firmware */
-
-#define TONGA_UNUSED_GPIO_PIN                        0x7F
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644
index 47ef1ca..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-#include "tonga_thermal.h"
-#include "tonga_hwmgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_ppsmc.h"
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-/**
-* Get Fan Speed Control Parameters.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pSpeed is the address of the structure where the result is to be placed.
-* @exception Always succeeds except if we cannot zero out the output structure.
-*/
-int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
-{
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	fan_speed_info->supports_percent_read = true;
-	fan_speed_info->supports_percent_write = true;
-	fan_speed_info->min_percent = 0;
-	fan_speed_info->max_percent = 100;
-
-	if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-		fan_speed_info->supports_rpm_read = true;
-		fan_speed_info->supports_rpm_write = true;
-		fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
-		fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
-	} else {
-		fan_speed_info->min_rpm = 0;
-		fan_speed_info->max_rpm = 0;
-	}
-
-	return 0;
-}
-
-/**
-* Get Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pSpeed is the address of the structure where the result is to be placed.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-	uint32_t duty100;
-	uint32_t duty;
-	uint64_t tmp64;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-	duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
-	if (0 == duty100)
-		return -EINVAL;
-
-
-	tmp64 = (uint64_t)duty * 100;
-	do_div(tmp64, duty100);
-	*speed = (uint32_t)tmp64;
-
-	if (*speed > 100)
-		*speed = 100;
-
-	return 0;
-}
-
-/**
-* Get Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the address of the structure where the result is to be placed.
-* @exception Returns not supported if no fan is found or if pulses per revolution are not set
-*/
-int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
-	return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param    hwmgr  the address of the powerplay hardware manager.
-*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
-	if (hwmgr->fan_ctrl_is_in_default_mode) {
-		hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
-		hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
-		hwmgr->fan_ctrl_is_in_default_mode = false;
-	}
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
-	return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
-	if (!hwmgr->fan_ctrl_is_in_default_mode) {
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
-		hwmgr->fan_ctrl_is_in_default_mode = true;
-	}
-
-	return 0;
-}
-
-int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
-		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
-		result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ?  0 : -EINVAL;
-/*
-		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
-			hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
-		else
-			hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
-*/
-	} else {
-		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
-		result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ?  0 : -EINVAL;
-	}
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
-	if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
-		result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
-								hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
-*/
-	return result;
-}
-
-
-int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
-	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ?  0 : -EINVAL;
-}
-
-/**
-* Set Fan Speed in percent.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-	uint32_t duty100;
-	uint32_t duty;
-	uint64_t tmp64;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return -EINVAL;
-
-	if (speed > 100)
-		speed = 100;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-		tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-	if (0 == duty100)
-		return -EINVAL;
-
-	tmp64 = (uint64_t)speed * duty100;
-	do_div(tmp64, 100);
-	duty = (uint32_t)tmp64;
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
-	return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
-		result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-		if (0 == result)
-			result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
-	} else
-		result = tonga_fan_ctrl_set_default_mode(hwmgr);
-
-	return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
-	return 0;
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
-	int temp;
-
-	temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-/* Bit 9 means the reading is lower than the lowest usable value. */
-	if (0 != (0x200 & temp))
-		temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
-	else
-		temp = (temp & 0x1ff);
-
-	temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param    hwmgr The address of the hardware manager.
-* @param    range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
-{
-	uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-	if (low < low_temp)
-		low = low_temp;
-	if (high > high_temp)
-		high = high_temp;
-
-	if (low > high)
-		return -EINVAL;
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
-	return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-	if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
-		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-						CG_TACH_CTRL, EDGE_PER_REV,
-						hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
-
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
-	return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
-	uint32_t alert;
-
-	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
-	alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-	/* send message to SMU to enable internal thermal interrupts */
-	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param    hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
-	uint32_t alert;
-
-	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
-	alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
-	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
-	/* send message to SMU to disable internal thermal interrupts */
-	return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param    hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-	int result = tonga_thermal_disable_alert(hwmgr);
-
-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		tonga_fan_ctrl_set_default_mode(hwmgr);
-
-	return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-	SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-	uint32_t duty100;
-	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-	uint16_t fdo_min, slope1, slope2;
-	uint32_t reference_clock;
-	int res;
-	uint64_t tmp64;
-
-	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-		return 0;
-
-	if (0 == data->fan_table_start) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-	if (0 == duty100) {
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-		return 0;
-	}
-
-	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
-	do_div(tmp64, 10000);
-	fdo_min = (uint16_t)tmp64;
-
-	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
-	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
-	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-	fan_table.Slope1 = cpu_to_be16(slope1);
-	fan_table.Slope2 = cpu_to_be16(slope2);
-
-	fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
-	fan_table.HystUp = cpu_to_be16(1);
-
-	fan_table.HystSlope = cpu_to_be16(1);
-
-	fan_table.TempRespLim = cpu_to_be16(5);
-
-	reference_clock = tonga_get_xclk(hwmgr);
-
-	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
-	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-	fan_table.FanControl_GL_Flag = 1;
-
-	res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
-	if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
-		res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
-						hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
-
-	if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
-		res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
-					hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
-
-	if (0 != res)
-		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-*/
-	return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
-		tonga_fan_ctrl_start_smc_fan_control(hwmgr);
-		tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-	}
-
-	return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-	struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
-	if (range == NULL)
-		return -EINVAL;
-
-	return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from initialize thermal controller routine
-*/
-int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-    return tonga_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from enable alert routine
-*/
-int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-	return tonga_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from disable alert routine
-*/
-static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-	return tonga_thermal_disable_alert(hwmgr);
-}
-
-static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
-	{ NULL, tf_tonga_thermal_initialize },
-	{ NULL, tf_tonga_thermal_set_temperature_range },
-	{ NULL, tf_tonga_thermal_enable_alert },
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
-	{ NULL, tf_tonga_thermal_setup_fan_table},
-	{ NULL, tf_tonga_thermal_start_smc_fan_control},
-	{ NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
-	0,
-	PHM_MasterTableFlag_None,
-	tonga_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
-	{ NULL, tf_tonga_thermal_disable_alert},
-	{ NULL, tf_tonga_thermal_set_temperature_range},
-	{ NULL, tf_tonga_thermal_enable_alert},
-	{ NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
-	0,
-	PHM_MasterTableFlag_None,
-	tonga_thermal_set_temperature_range_master_list
-};
-
-int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
-	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
-		tonga_fan_ctrl_set_default_mode(hwmgr);
-	return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param    hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
-	int result;
-
-	result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
-
-	if (0 == result) {
-		result = phm_construct_table(hwmgr,
-						&tonga_thermal_start_thermal_controller_master,
-						&(hwmgr->start_thermal_controller));
-		if (0 != result)
-			phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
-	}
-
-	if (0 == result)
-		hwmgr->fan_ctrl_is_in_default_mode = true;
-	return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644
index aa335f2..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_THERMAL_H
-#define TONGA_THERMAL_H
-
-#include "hwmgr.h"
-
-#define TONGA_THERMAL_HIGH_ALERT_MASK         0x1
-#define TONGA_THERMAL_LOW_ALERT_MASK          0x2
-
-#define TONGA_THERMAL_MINIMUM_TEMP_READING    -256
-#define TONGA_THERMAL_MAXIMUM_TEMP_READING    255
-
-#define TONGA_THERMAL_MINIMUM_ALERT_TEMP      0
-#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP      255
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index b764c8c..3fb5e57 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,19 @@
 #include "amd_shared.h"
 #include "cgs_common.h"
 
+enum amd_pp_sensors {
+	AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+	AMDGPU_PP_SENSOR_VDDNB,
+	AMDGPU_PP_SENSOR_VDDGFX,
+	AMDGPU_PP_SENSOR_UVD_VCLK,
+	AMDGPU_PP_SENSOR_UVD_DCLK,
+	AMDGPU_PP_SENSOR_VCE_ECCLK,
+	AMDGPU_PP_SENSOR_GPU_LOAD,
+	AMDGPU_PP_SENSOR_GFX_MCLK,
+	AMDGPU_PP_SENSOR_GPU_TEMP,
+	AMDGPU_PP_SENSOR_VCE_POWER,
+	AMDGPU_PP_SENSOR_UVD_POWER,
+};
 
 enum amd_pp_event {
 	AMD_PP_EVENT_INITIALIZE = 0,
@@ -131,9 +144,8 @@
 	struct cgs_device *device;
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t rev_id;
-	bool powercontainment_enabled;
 };
+
 enum amd_pp_display_config_type{
 	AMD_PP_DisplayConfigType_None = 0,
 	AMD_PP_DisplayConfigType_DP54 ,
@@ -261,6 +273,7 @@
 struct amd_pp_clocks {
 	uint32_t count;
 	uint32_t clock[MAX_NUM_CLOCKS];
+	uint32_t latency[MAX_NUM_CLOCKS];
 };
 
 
@@ -332,8 +345,6 @@
 	int (*powergate_uvd)(void *handle, bool gate);
 	int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
 				   void *input, void *output);
-	void (*print_current_performance_level)(void *handle,
-						      struct seq_file *m);
 	int (*set_fan_control_mode)(void *handle, uint32_t mode);
 	int (*get_fan_control_mode)(void *handle);
 	int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@@ -347,6 +358,7 @@
 	int (*set_sclk_od)(void *handle, uint32_t value);
 	int (*get_mclk_od)(void *handle);
 	int (*set_mclk_od)(void *handle, uint32_t value);
+	int (*read_sensor)(void *handle, int idx, int32_t *value);
 };
 
 struct amd_powerplay {
@@ -378,4 +390,6 @@
 int amd_powerplay_get_display_mode_validation_clocks(void *handle,
 		struct amd_pp_simple_clock_info *output);
 
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
+
 #endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 962cb53..d449583 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -341,7 +341,6 @@
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
 extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bf0d2ac..4f0fedd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -31,15 +31,20 @@
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
+#include "power_state.h"
 
 struct pp_instance;
 struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
 struct phm_fan_speed_info;
 struct pp_atomctrl_voltage_table;
 
+extern int amdgpu_powercontainment;
+extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
 
 enum DISPLAY_GAP {
 	DISPLAY_GAP_VBLANK_OR_WM = 0,   /* Wait for vblank or MCHG watermark. */
@@ -49,7 +54,6 @@
 };
 typedef enum DISPLAY_GAP DISPLAY_GAP;
 
-
 struct vi_dpm_level {
 	bool enabled;
 	uint32_t value;
@@ -71,6 +75,19 @@
 #define PCIE_PERF_REQ_GEN2         3
 #define PCIE_PERF_REQ_GEN3         4
 
+enum PP_FEATURE_MASK {
+	PP_SCLK_DPM_MASK = 0x1,
+	PP_MCLK_DPM_MASK = 0x2,
+	PP_PCIE_DPM_MASK = 0x4,
+	PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+	PP_POWER_CONTAINMENT_MASK = 0x10,
+	PP_UVD_HANDSHAKE_MASK = 0x20,
+	PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+	PP_VBI_TIME_SUPPORT_MASK = 0x80,
+	PP_ULV_MASK = 0x100,
+	PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
 enum PHM_BackEnd_Magic {
 	PHM_Dummy_Magic       = 0xAA5555AA,
 	PHM_RV770_Magic       = 0xDCBAABCD,
@@ -294,8 +311,6 @@
 	int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
 	int (*power_state_set)(struct pp_hwmgr *hwmgr,
 						const void *state);
-	void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
-							struct seq_file *m);
 	int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
 	int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
 	int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -342,6 +357,7 @@
 	int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
 	int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
 	int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+	int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
 };
 
 struct pp_table_func {
@@ -351,7 +367,7 @@
 	int (*pptable_get_vce_state_table_entry)(
 						struct pp_hwmgr *hwmgr,
 						unsigned long i,
-						struct PP_VCEState *vce_state,
+						struct pp_vce_state *vce_state,
 						void **clock_info,
 						unsigned long *flag);
 };
@@ -570,22 +586,43 @@
 	uint32_t NB;
 };
 
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+	PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+	PP_TABLE_V0 = 0,
+	PP_TABLE_V1,
+	PP_TABLE_V2,
+	PP_TABLE_MAX
+};
+
 /**
  * The main hardware manager structure.
  */
 struct pp_hwmgr {
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t hw_revision;
-	uint32_t sub_sys_id;
-	uint32_t sub_vendor_id;
 
+	uint32_t pp_table_version;
 	void *device;
 	struct pp_smumgr *smumgr;
 	const void *soft_pp_table;
 	uint32_t soft_pp_table_size;
 	void *hardcode_pp_table;
 	bool need_pp_table_upload;
+
+	struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+	uint32_t num_vce_state_tables;
+
 	enum amd_dpm_forced_level dpm_level;
 	bool block_hw_access;
 	struct phm_gfx_arbiter gfx_arbiter;
@@ -614,7 +651,6 @@
 	uint32_t num_ps;
 	struct pp_thermal_controller_info thermal_controller;
 	bool fan_ctrl_is_in_default_mode;
-	bool powercontainment_enabled;
 	uint32_t fan_ctrl_default_mode;
 	uint32_t tmin;
 	struct phm_microcode_version_info microcode_version_info;
@@ -624,6 +660,7 @@
 	struct pp_power_state    *boot_ps;
 	struct pp_power_state    *uvd_ps;
 	struct amd_pp_display_configuration display_config;
+	uint32_t feature_mask;
 };
 
 
@@ -637,16 +674,7 @@
 extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
 				uint32_t value, uint32_t mask);
 
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
-				uint32_t index, uint32_t value, uint32_t mask);
 
-extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
-		uint32_t indirect_port, uint32_t index);
-
-extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
-		uint32_t indirect_port,
-		uint32_t index,
-		uint32_t value);
 
 extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
 				uint32_t indirect_port,
@@ -654,12 +682,7 @@
 				uint32_t value,
 				uint32_t mask);
 
-extern void phm_wait_for_indirect_register_unequal(
-				struct pp_hwmgr *hwmgr,
-				uint32_t indirect_port,
-				uint32_t index,
-				uint32_t value,
-				uint32_t mask);
+
 
 extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
 extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
@@ -673,6 +696,8 @@
 extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
 extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
 extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+		uint32_t voltage);
 extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
 extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
 extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -683,6 +708,10 @@
 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
 extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
 
+extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t id, uint16_t *voltage);
+
 #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
 
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -697,44 +726,6 @@
 	 PHM_FIELD_SHIFT(reg, field))
 
 
-#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask)	\
-	phm_wait_on_register(hwmgr, index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask)	\
-	phm_wait_for_register_unequal(hwmgr, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask)	\
-	phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-/* Operations on named registers. */
-
-#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
 /* Operations on named fields. */
 
 #define PHM_READ_FIELD(device, reg, field)	\
@@ -762,60 +753,16 @@
 			PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg),	\
 				reg, field, fieldval))
 
-#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval)	\
-	PHM_WAIT_REGISTER(hwmgr, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask)        \
+       phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
+
+
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask)      \
+       PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
 
 #define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)	\
 	PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval)	\
 			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
 
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval)	\
-	PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval)	\
-	PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval)	\
-			<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-/* Operations on arrays of registers & fields. */
-
-#define PHM_READ_ARRAY_REGISTER(device, reg, offset)	\
-	cgs_read_register(device, mm##reg + (offset))
-
-#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value)	\
-	cgs_write_register(device, mm##reg + (offset), value)
-
-#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
-	PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
-
-#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset,	\
-			PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset),	\
-				reg, field, fieldvalue))
-
-#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset),	\
-			(fieldvalue) << PHM_FIELD_SHIFT(reg, field),	\
-			PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue)	\
-	PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset),	\
-			(fieldvalue) << PHM_FIELD_SHIFT(reg, field),	\
-			PHM_FIELD_MASK(reg, field))
 
 #endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index f497e7d..0de4436 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -23,8 +23,7 @@
 #ifndef _POLARIS10_PWRVIRUS_H
 #define _POLARIS10_PWRVIRUS_H
 
-#define mmSMC_IND_INDEX_11                              0x01AC
-#define mmSMC_IND_DATA_11                               0x01AD
+
 #define mmCP_HYP_MEC1_UCODE_ADDR	0xf81a
 #define mmCP_HYP_MEC1_UCODE_DATA	0xf81b
 #define mmCP_HYP_MEC2_UCODE_ADDR	0xf81c
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index a3f0ce4..9ceaed9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -158,7 +158,7 @@
 
 
 /*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
 	uint32_t evclk;
 	uint32_t ecclk;
 	uint32_t sclk;
@@ -171,30 +171,28 @@
 	PP_MMProfilingState_Stopped
 };
 
-struct PP_Clock_Engine_Request {
-	unsigned long clientType;
-	unsigned long ctxid;
+struct pp_clock_engine_request {
+	unsigned long client_type;
+	unsigned long ctx_id;
 	uint64_t  context_handle;
 	unsigned long sclk;
-	unsigned long sclkHardMin;
+	unsigned long sclk_hard_min;
 	unsigned long mclk;
 	unsigned long iclk;
 	unsigned long evclk;
 	unsigned long ecclk;
-	unsigned long ecclkHardMin;
+	unsigned long ecclk_hard_min;
 	unsigned long vclk;
 	unsigned long dclk;
-	unsigned long samclk;
-	unsigned long acpclk;
-	unsigned long sclkOverdrive;
-	unsigned long mclkOverdrive;
+	unsigned long sclk_over_drive;
+	unsigned long mclk_over_drive;
 	unsigned long sclk_threshold;
 	unsigned long flag;
 	unsigned long vclk_ceiling;
 	unsigned long dclk_ceiling;
 	unsigned long num_cus;
-	unsigned long pmflag;
-	enum PP_MMProfilingState MMProfilingState;
+	unsigned long pm_flag;
+	enum PP_MMProfilingState mm_profiling_state;
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index d7d83b7..bfdbec1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -43,5 +43,8 @@
 	} while (0)
 
 
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n)	\
+	(type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
 #endif /* PP_DEBUG_H */
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644
index 0000000..71c9b2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+  SID_OPTION_HI,
+  SID_OPTION_LO,
+  SID_OPTION_COUNT
+};
+
+typedef struct {
+  uint32_t high;
+  uint32_t low;
+} data_64_t;
+
+typedef struct {
+  data_64_t high;
+  data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU71_MAX_LEVELS_VDDC            8
+#define SMU71_MAX_LEVELS_VDDCI           4
+#define SMU71_MAX_LEVELS_MVDD            4
+#define SMU71_MAX_LEVELS_VDDNB           8
+
+#define SMU71_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO           32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+#if defined SMU__DGPU_ONLY
+#define SMU71_DTE_ITERATIONS 5
+#define SMU71_DTE_SOURCES 3
+#define SMU71_DTE_SINKS 1
+#define SMU71_NUM_CPU_TES 0
+#define SMU71_NUM_GPU_TES 1
+#define SMU71_NUM_NON_TES 2
+
+#endif
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+#endif
+
+struct SMU71_PIDController
+{
+    uint32_t Ki;
+    int32_t LFWindupUpperLim;
+    int32_t LFWindupLowerLim;
+    uint32_t StatePrecision;
+    uint32_t LfPrecision;
+    uint32_t LfOffset;
+    uint32_t MaxState;
+    uint32_t MaxLfFraction;
+    uint32_t StateShift;
+};
+
+typedef struct SMU71_PIDController SMU71_PIDController;
+
+struct SMU7_LocalDpmScoreboard
+{
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfSclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  GfxClkSlow;
+    uint8_t  GpioClampMode;
+
+    uint8_t  FpsFilterWeight;
+    uint8_t  EnabledLevelsChange;
+    uint8_t  DteClampMode;
+    uint8_t  FpsClampMode;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint8_t  FpsEnabled;
+    uint8_t  MaxPerfLevel;
+    uint8_t  AllowLowClkInterruptToHost;
+    uint8_t  FpsRunning;
+
+    uint32_t MaxAllowedFrequency;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+struct SMU7_VoltageScoreboard
+{
+    uint16_t CurrentVoltage;
+    uint16_t HighestVoltage;
+    uint16_t MaxVid;
+    uint8_t  HighestVidOffset;
+    uint8_t  CurrentVidOffset;
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  CurrentPhases;
+    uint8_t  HighestPhases;
+#else
+    uint8_t  AvsOffset;
+    uint8_t  AvsOffsetApplied;
+#endif
+    uint8_t  ControllerBusy;
+    uint8_t  CurrentVid;
+    uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+#if defined (SMU__DGPU_ONLY)
+    uint8_t  RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS];
+#endif
+    uint8_t  EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+    uint8_t  TargetIndex;
+    uint8_t  Delay;
+    uint8_t  ControllerEnable;
+    uint8_t  ControllerRunning;
+    uint16_t CurrentStdVoltageHiSidd;
+    uint16_t CurrentStdVoltageLoSidd;
+#if defined (SMU__DGPU_ONLY)
+    uint16_t RequestedVddci;
+    uint16_t CurrentVddci;
+    uint16_t HighestVddci;
+    uint8_t  CurrentVddciVid;
+    uint8_t  TargetVddciIndex;
+#endif
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard
+{
+    uint8_t     DpmEnable;
+    uint8_t     DpmRunning;
+    uint8_t     DpmForce;
+    uint8_t     DpmForceLevel;
+
+    uint8_t     CurrentLinkSpeed;
+    uint8_t     EnabledLevelsChange;
+    uint16_t    AutoDpmInterval;
+
+    uint16_t    AutoDpmRange;
+    uint16_t    AutoDpmCount;
+
+    uint8_t     DpmMode;
+    uint8_t     AcpiReq;
+    uint8_t     AcpiAck;
+    uint8_t     CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+// -------------------------------------------------------- CAC table ------------------------------------------------------
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I  7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard
+{
+    uint16_t   MinVoltage;
+    uint16_t   MaxVoltage;
+
+    uint32_t   AvgGpuPower;
+
+    uint16_t   VddcLeakagePower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkConstantPower[SID_OPTION_COUNT];
+    uint16_t   VddcSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcNonSclkDynamicPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalPower[SID_OPTION_COUNT];
+    uint16_t   VddcTotalCurrent[SID_OPTION_COUNT];
+    uint16_t   VddcLoadVoltage[SID_OPTION_COUNT];
+    uint16_t   VddcNoLoadVoltage[SID_OPTION_COUNT];
+
+    uint16_t   DisplayPhyPower;
+    uint16_t   PciePhyPower;
+
+    uint16_t   VddciTotalPower;
+    uint16_t   Vddr1TotalPower;
+
+    uint32_t   RocPower;
+
+    uint32_t   last_power;
+    uint32_t   enableWinAvg;
+
+    uint32_t   lkg_acc;
+    uint16_t   VoltLkgeScaler;
+    uint16_t   TempLkgeScaler;
+
+    uint32_t   uvd_cac_dclk;
+    uint32_t   uvd_cac_vclk;
+    uint32_t   vce_cac_eclk;
+    uint32_t   samu_cac_samclk;
+    uint32_t   display_cac_dispclk;
+    uint32_t   acp_cac_aclk;
+    uint32_t   unb_cac;
+
+    uint32_t   WinTime;
+
+    uint16_t  GpuPwr_MAWt;
+    uint16_t  FilteredVddcTotalPower;
+
+    uint8_t   CalculationRepeats;
+    uint8_t   WaterfallUp;
+    uint8_t   WaterfallDown;
+    uint8_t   WaterfallLimit;
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+// --------------------------------------------------------------------------------------------------
+
+struct SMU7_ThermalScoreboard
+{
+   int16_t  GpuLimit;
+   int16_t  GpuHyst;
+   uint16_t CurrGnbTemp;
+   uint16_t FilteredGnbTemp;
+   uint8_t  ControllerEnable;
+   uint8_t  ControllerRunning;
+   uint8_t  WaterfallUp;
+   uint8_t  WaterfallDown;
+   uint8_t  WaterfallLimit;
+   uint8_t  padding[3];
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+// For FeatureEnables:
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+// All 'soft registers' should be uint32_t.
+struct SMU71_SoftRegisters
+{
+    uint32_t        RefClockFrequency;
+    uint32_t        PmTimerPeriod;
+    uint32_t        FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+    uint32_t        PreVBlankGap;
+    uint32_t        VBlankTimeout;
+    uint32_t        TrainTimeGap;
+    uint32_t        MvddSwitchTime;
+    uint32_t        LongestAcpiTrainTime;
+    uint32_t        AcpiDelay;
+    uint32_t        G5TrainTime;
+    uint32_t        DelayMpllPwron;
+    uint32_t        VoltageChangeTimeout;
+#endif
+    uint32_t        HandshakeDisables;
+
+    uint8_t         DisplayPhy1Config;
+    uint8_t         DisplayPhy2Config;
+    uint8_t         DisplayPhy3Config;
+    uint8_t         DisplayPhy4Config;
+
+    uint8_t         DisplayPhy5Config;
+    uint8_t         DisplayPhy6Config;
+    uint8_t         DisplayPhy7Config;
+    uint8_t         DisplayPhy8Config;
+
+    uint32_t        AverageGraphicsActivity;
+    uint32_t        AverageMemoryActivity;
+    uint32_t        AverageGioActivity;
+
+    uint8_t         SClkDpmEnabledLevels;
+    uint8_t         MClkDpmEnabledLevels;
+    uint8_t         LClkDpmEnabledLevels;
+    uint8_t         PCIeDpmEnabledLevels;
+
+    uint32_t        DRAM_LOG_ADDR_H;
+    uint32_t        DRAM_LOG_ADDR_L;
+    uint32_t        DRAM_LOG_PHY_ADDR_H;
+    uint32_t        DRAM_LOG_PHY_ADDR_L;
+    uint32_t        DRAM_LOG_BUFF_SIZE;
+    uint32_t        UlvEnterCount;
+    uint32_t        UlvTime;
+    uint32_t        UcodeLoadStatus;
+    uint8_t         DPMFreezeAndForced;
+    uint8_t         Activity_Weight;
+    uint8_t         Reserved8[2];
+    uint32_t        Reserved;
+};
+
+typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
+
+struct SMU71_Firmware_Header
+{
+    uint32_t Digest[5];
+    uint32_t Version;
+    uint32_t HeaderSize;
+    uint32_t Flags;
+    uint32_t EntryPoint;
+    uint32_t CodeSize;
+    uint32_t ImageSize;
+
+    uint32_t Rtos;
+    uint32_t SoftRegisters;
+    uint32_t DpmTable;
+    uint32_t FanTable;
+    uint32_t CacConfigTable;
+    uint32_t CacStatusTable;
+
+    uint32_t mcRegisterTable;
+
+    uint32_t mcArbDramTimingTable;
+
+    uint32_t PmFuseTable;
+    uint32_t Globals;
+    uint32_t UvdDpmTable;
+    uint32_t AcpDpmTable;
+    uint32_t VceDpmTable;
+    uint32_t SamuDpmTable;
+    uint32_t UlvSettings;
+    uint32_t Reserved[37];
+    uint32_t Signature;
+};
+
+typedef struct SMU71_Firmware_Header SMU71_Firmware_Header;
+
+struct SMU7_HystController_Data
+{
+    uint8_t waterfall_up;
+    uint8_t waterfall_down;
+    uint8_t pstate;
+    uint8_t clamp_mode;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+    PowerDown = 1,
+    DP54x4,
+    DP54x2,
+    DP54x1,
+    DP27x4,
+    DP27x2,
+    DP27x1,
+    HDMI297,
+    HDMI162,
+    LVDS,
+    DP324x4,
+    DP324x2,
+    DP324x1
+};
+
+//#define SX_BLOCK_COUNT 8
+//#define MC_BLOCK_COUNT 1
+//#define CPL_BLOCK_COUNT 27
+
+#if defined SMU__VARIANT__ICELAND
+  #define SX_BLOCK_COUNT 8
+  #define MC_BLOCK_COUNT 1
+  #define CPL_BLOCK_COUNT 29
+#endif
+
+struct SMU7_Local_Cac {
+  uint8_t BlockId;
+  uint8_t SignalId;
+  uint8_t Threshold;
+  uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+  SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT];
+  SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+  SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
new file mode 100644
index 0000000..c0e3936
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_DISCRETE_H
+#define SMU71_DISCRETE_H
+
+#include "smu71.h"
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define VDDC_ON_SVI2  0x1
+#define VDDCI_ON_SVI2 0x2
+#define MVDD_ON_SVI2  0x4
+
+struct SMU71_Discrete_VoltageLevel
+{
+    uint16_t    Voltage;
+    uint16_t    StdVoltageHiSidd;
+    uint16_t    StdVoltageLoSidd;
+    uint8_t     Smio;
+    uint8_t     padding;
+};
+
+typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel;
+
+struct SMU71_Discrete_GraphicsLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+
+    uint32_t    SclkFrequency;
+
+    uint8_t     pcieDpmLevel;
+    uint8_t     DeepSleepDivId;
+    uint16_t    ActivityLevel;
+
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     EnabledForActivity;
+    uint8_t     EnabledForThrottle;
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     PowerThrottle;
+};
+
+typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel;
+
+struct SMU71_Discrete_ACPILevel
+{
+    uint32_t    Flags;
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    SclkFrequency;
+    uint8_t     SclkDid;
+    uint8_t     DisplayWatermark;
+    uint8_t     DeepSleepDivId;
+    uint8_t     padding;
+    uint32_t    CgSpllFuncCntl;
+    uint32_t    CgSpllFuncCntl2;
+    uint32_t    CgSpllFuncCntl3;
+    uint32_t    CgSpllFuncCntl4;
+    uint32_t    SpllSpreadSpectrum;
+    uint32_t    SpllSpreadSpectrum2;
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+};
+
+typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel;
+
+struct SMU71_Discrete_Ulv
+{
+    uint32_t    CcPwrDynRm;
+    uint32_t    CcPwrDynRm1;
+    uint16_t    VddcOffset;
+    uint8_t     VddcOffsetVid;
+    uint8_t     VddcPhase;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv;
+
+struct SMU71_Discrete_MemoryLevel
+{
+    uint32_t    MinVddc;
+    uint32_t    MinVddcPhases;
+    uint32_t    MinVddci;
+    uint32_t    MinMvdd;
+
+    uint32_t    MclkFrequency;
+
+    uint8_t     EdcReadEnable;
+    uint8_t     EdcWriteEnable;
+    uint8_t     RttEnable;
+    uint8_t     StutterEnable;
+
+    uint8_t     StrobeEnable;
+    uint8_t     StrobeRatio;
+    uint8_t     EnabledForThrottle;
+    uint8_t     EnabledForActivity;
+
+    uint8_t     UpHyst;
+    uint8_t     DownHyst;
+    uint8_t     VoltageDownHyst;
+    uint8_t     padding;
+
+    uint16_t    ActivityLevel;
+    uint8_t     DisplayWatermark;
+    uint8_t     padding1;
+
+    uint32_t    MpllFuncCntl;
+    uint32_t    MpllFuncCntl_1;
+    uint32_t    MpllFuncCntl_2;
+    uint32_t    MpllAdFuncCntl;
+    uint32_t    MpllDqFuncCntl;
+    uint32_t    MclkPwrmgtCntl;
+    uint32_t    DllCntl;
+    uint32_t    MpllSs1;
+    uint32_t    MpllSs2;
+};
+
+typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel;
+
+struct SMU71_Discrete_LinkLevel
+{
+    uint8_t     PcieGenSpeed;           ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
+    uint8_t     PcieLaneCount;          ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+    uint8_t     EnabledForActivity;
+    uint8_t     SPC;
+    uint32_t    DownThreshold;
+    uint32_t    UpThreshold;
+    uint32_t    Reserved;
+};
+
+typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel;
+
+
+#ifdef SMU__DYNAMIC_MCARB_SETTINGS
+// MC ARB DRAM Timing registers.
+struct SMU71_Discrete_MCArbDramTimingTableEntry
+{
+    uint32_t McArbDramTiming;
+    uint32_t McArbDramTiming2;
+    uint8_t  McArbBurstTime;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU71_Discrete_MCArbDramTimingTable
+{
+    SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable;
+#endif
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU71_Discrete_UvdLevel
+{
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint16_t MinVddc;
+    uint8_t  MinVddcPhases;
+    uint8_t  VclkDivider;
+    uint8_t  DclkDivider;
+    uint8_t  padding[3];
+};
+
+typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU71_Discrete_ExtClkLevel
+{
+    uint32_t Frequency;
+    uint16_t MinVoltage;
+    uint8_t  MinPhases;
+    uint8_t  Divider;
+};
+
+typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel;
+
+// Everything that we need to keep track of about the current state.
+// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters
+// that need to be checked later.
+// We don't need to cache everything about a state, just a few parameters.
+struct SMU71_Discrete_StateInfo
+{
+    uint32_t SclkFrequency;
+    uint32_t MclkFrequency;
+    uint32_t VclkFrequency;
+    uint32_t DclkFrequency;
+    uint32_t SamclkFrequency;
+    uint32_t AclkFrequency;
+    uint32_t EclkFrequency;
+    uint16_t MvddVoltage;
+    uint16_t padding16;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+    uint8_t  McRegIndex;
+    uint8_t  SeqIndex;
+    uint8_t  SclkDid;
+    int8_t   SclkIndex;
+    int8_t   MclkIndex;
+    uint8_t  PCIeGen;
+
+};
+
+typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo;
+
+
+struct SMU71_Discrete_DpmTable
+{
+    // Multi-DPM controller settings
+    SMU71_PIDController                  GraphicsPIDController;
+    SMU71_PIDController                  MemoryPIDController;
+    SMU71_PIDController                  LinkPIDController;
+
+    uint32_t                            SystemFlags;
+
+    // SMIO masks for voltage and phase controls
+    uint32_t                            SmioMaskVddcVid;
+    uint32_t                            SmioMaskVddcPhase;
+    uint32_t                            SmioMaskVddciVid;
+    uint32_t                            SmioMaskMvddVid;
+
+    uint32_t                            VddcLevelCount;
+    uint32_t                            VddciLevelCount;
+    uint32_t                            MvddLevelCount;
+
+    SMU71_Discrete_VoltageLevel          VddcLevel               [SMU71_MAX_LEVELS_VDDC];
+    SMU71_Discrete_VoltageLevel          VddciLevel              [SMU71_MAX_LEVELS_VDDCI];
+    SMU71_Discrete_VoltageLevel          MvddLevel               [SMU71_MAX_LEVELS_MVDD];
+
+    uint8_t                             GraphicsDpmLevelCount;
+    uint8_t                             MemoryDpmLevelCount;
+    uint8_t                             LinkLevelCount;
+    uint8_t                             MasterDeepSleepControl;
+
+    uint32_t                            Reserved[5];
+
+    // State table entries for each DPM state
+    SMU71_Discrete_GraphicsLevel         GraphicsLevel           [SMU71_MAX_LEVELS_GRAPHICS];
+    SMU71_Discrete_MemoryLevel           MemoryACPILevel;
+    SMU71_Discrete_MemoryLevel           MemoryLevel             [SMU71_MAX_LEVELS_MEMORY];
+    SMU71_Discrete_LinkLevel             LinkLevel               [SMU71_MAX_LEVELS_LINK];
+    SMU71_Discrete_ACPILevel             ACPILevel;
+
+    uint32_t                            SclkStepSize;
+    uint32_t                            Smio                    [SMU71_MAX_ENTRIES_SMIO];
+
+    uint8_t                             GraphicsBootLevel;
+    uint8_t                             GraphicsVoltageChangeEnable;
+    uint8_t                             GraphicsThermThrottleEnable;
+    uint8_t                             GraphicsInterval;
+
+    uint8_t                             VoltageInterval;
+    uint8_t                             ThermalInterval;
+    uint16_t                            TemperatureLimitHigh;
+
+    uint16_t                            TemperatureLimitLow;
+    uint8_t                             MemoryBootLevel;
+    uint8_t                             MemoryVoltageChangeEnable;
+
+    uint8_t                             MemoryInterval;
+    uint8_t                             MemoryThermThrottleEnable;
+    uint8_t                             MergedVddci;
+    uint8_t                             padding2;
+
+    uint16_t                            VoltageResponseTime;
+    uint16_t                            PhaseResponseTime;
+
+    uint8_t                             PCIeBootLinkLevel;
+    uint8_t                             PCIeGenInterval;
+    uint8_t                             DTEInterval;
+    uint8_t                             DTEMode;
+
+    uint8_t                             SVI2Enable;
+    uint8_t                             VRHotGpio;
+    uint8_t                             AcDcGpio;
+    uint8_t                             ThermGpio;
+
+    uint32_t                            DisplayCac;
+
+    uint16_t                            MaxPwr;
+    uint16_t                            NomPwr;
+
+    uint16_t                            FpsHighThreshold;
+    uint16_t                            FpsLowThreshold;
+
+    uint16_t                            BAPMTI_R  [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+    uint16_t                            BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+
+    uint8_t                             DTEAmbientTempBase;
+    uint8_t                             DTETjOffset;
+    uint8_t                             GpuTjMax;
+    uint8_t                             GpuTjHyst;
+
+    uint16_t                            BootVddc;
+    uint16_t                            BootVddci;
+
+    uint16_t                            BootMVdd;
+    uint16_t                            padding;
+
+    uint32_t                            BAPM_TEMP_GRADIENT;
+
+    uint32_t                            LowSclkInterruptThreshold;
+    uint32_t                            VddGfxReChkWait;
+
+    uint16_t                            PPM_PkgPwrLimit;
+    uint16_t                            PPM_TemperatureLimit;
+
+    uint16_t                            DefaultTdp;
+    uint16_t                            TargetTdp;
+};
+
+typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable;
+
+// --------------------------------------------------- AC Timing Parameters ------------------------------------------------
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY
+
+struct SMU71_Discrete_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress;
+
+struct SMU71_Discrete_MCRegisterSet
+{
+    uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet;
+
+struct SMU71_Discrete_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMU71_Discrete_MCRegisterAddress     address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+    SMU71_Discrete_MCRegisterSet         data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters;
+
+
+// --------------------------------------------------- Fan Table -----------------------------------------------------------
+struct SMU71_Discrete_FanTable
+{
+    uint16_t FdoMode;
+    int16_t  TempMin;
+    int16_t  TempMed;
+    int16_t  TempMax;
+    int16_t  Slope1;
+    int16_t  Slope2;
+    int16_t  FdoMin;
+    int16_t  HystUp;
+    int16_t  HystDown;
+    int16_t  HystSlope;
+    int16_t  TempRespLim;
+    int16_t  TempCurr;
+    int16_t  SlopeCurr;
+    int16_t  PwmCurr;
+    uint32_t RefreshPeriod;
+    int16_t  FdoMax;
+    uint8_t  TempSrc;
+    int8_t   Padding;
+};
+
+typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG             4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT         (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+struct SMU71_MclkDpmScoreboard
+{
+
+    uint32_t PercentageBusy;
+
+    int32_t  PIDError;
+    int32_t  PIDIntegral;
+    int32_t  PIDOutput;
+
+    uint32_t SigmaDeltaAccum;
+    uint32_t SigmaDeltaOutput;
+    uint32_t SigmaDeltaLevel;
+
+    uint32_t UtilizationSetpoint;
+
+    uint8_t  TdpClampMode;
+    uint8_t  TdcClampMode;
+    uint8_t  ThermClampMode;
+    uint8_t  VoltageBusy;
+
+    int8_t   CurrLevel;
+    int8_t   TargLevel;
+    uint8_t  LevelChangeInProgress;
+    uint8_t  UpHyst;
+
+    uint8_t  DownHyst;
+    uint8_t  VoltageDownHyst;
+    uint8_t  DpmEnable;
+    uint8_t  DpmRunning;
+
+    uint8_t  DpmForce;
+    uint8_t  DpmForceLevel;
+    uint8_t  DisplayWatermark;
+    uint8_t  McArbIndex;
+
+    uint32_t MinimumPerfMclk;
+
+    uint8_t  AcpiReq;
+    uint8_t  AcpiAck;
+    uint8_t  MclkSwitchInProgress;
+    uint8_t  MclkSwitchCritical;
+
+    uint8_t  TargetMclkIndex;
+    uint8_t  TargetMvddIndex;
+    uint8_t  MclkSwitchResult;
+
+    uint8_t  EnabledLevelsChange;
+
+    uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY];
+    uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY];
+
+    void     (*TargetStateCalculator)(uint8_t);
+    void     (*SavedTargetStateCalculator)(uint8_t);
+
+    uint16_t AutoDpmInterval;
+    uint16_t AutoDpmRange;
+
+    uint16_t  MclkSwitchingTime;
+    uint8_t padding[2];
+};
+
+typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard;
+
+struct SMU71_UlvScoreboard
+{
+    uint8_t     EnterUlv;
+    uint8_t     ExitUlv;
+    uint8_t     UlvActive;
+    uint8_t     WaitingForUlv;
+    uint8_t     UlvEnable;
+    uint8_t     UlvRunning;
+    uint8_t     UlvMasterEnable;
+    uint8_t     padding;
+    uint32_t    UlvAbortedCount;
+    uint32_t    UlvTimeStamp;
+};
+
+typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard;
+
+struct SMU71_VddGfxScoreboard
+{
+    uint8_t     VddGfxEnable;
+    uint8_t     VddGfxActive;
+    uint8_t     padding[2];
+
+    uint32_t    VddGfxEnteredCount;
+    uint32_t    VddGfxAbortedCount;
+};
+
+typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard;
+
+struct SMU71_AcpiScoreboard {
+  uint32_t SavedInterruptMask[2];
+  uint8_t LastACPIRequest;
+  uint8_t CgBifResp;
+  uint8_t RequestType;
+  uint8_t Padding;
+  SMU71_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard;
+
+
+struct SMU71_Discrete_PmFuses {
+  // dw0-dw1
+  uint8_t BapmVddCVidHiSidd[8];
+
+  // dw2-dw3
+  uint8_t BapmVddCVidLoSidd[8];
+
+  // dw4-dw5
+  uint8_t VddCVid[8];
+
+  // dw6
+  uint8_t SviLoadLineEn;
+  uint8_t SviLoadLineVddC;
+  uint8_t SviLoadLineTrimVddC;
+  uint8_t SviLoadLineOffsetVddC;
+
+  // dw7
+  uint16_t TDC_VDDC_PkgLimit;
+  uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+  uint8_t TDC_MAWt;
+
+  // dw8
+  uint8_t TdcWaterfallCtl;
+  uint8_t LPMLTemperatureMin;
+  uint8_t LPMLTemperatureMax;
+  uint8_t Reserved;
+
+  // dw9-dw12
+  uint8_t LPMLTemperatureScaler[16];
+
+  // dw13-dw14
+  int16_t FuzzyFan_ErrorSetDelta;
+  int16_t FuzzyFan_ErrorRateSetDelta;
+  int16_t FuzzyFan_PwmSetDelta;
+  uint16_t Reserved6;
+
+  // dw15
+  uint8_t GnbLPML[16];
+
+  // dw15
+  uint8_t GnbLPMLMaxVid;
+  uint8_t GnbLPMLMinVid;
+  uint8_t Reserved1[2];
+
+  // dw16
+  uint16_t BapmVddCBaseLeakageHiSidd;
+  uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses;
+
+struct SMU71_Discrete_Log_Header_Table {
+  uint32_t    version;
+  uint32_t    asic_id;
+  uint16_t    flags;
+  uint16_t    entry_size;
+  uint32_t    total_size;
+  uint32_t    num_of_entries;
+  uint8_t     type;
+  uint8_t     mode;
+  uint8_t     filler_0[2];
+  uint32_t    filler_1[2];
+};
+
+typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table;
+
+struct SMU71_Discrete_Log_Cntl {
+    uint8_t             Enabled;
+    uint8_t             Type;
+    uint8_t             padding[2];
+    uint32_t            BufferSize;
+    uint32_t            SamplesLogged;
+    uint32_t            SampleSize;
+    uint32_t            AddrL;
+    uint32_t            AddrH;
+};
+
+typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+  #define CAC_ACC_NW_NUM_OF_SIGNALS 83
+#endif
+
+
+struct SMU71_Discrete_Cac_Collection_Table {
+  uint32_t temperature;
+  uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+  uint32_t filler[4];
+};
+
+typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table;
+
+struct SMU71_Discrete_Cac_Verification_Table {
+  uint32_t VddcTotalPower;
+  uint32_t VddcLeakagePower;
+  uint32_t VddcConstantPower;
+  uint32_t VddcGfxDynamicPower;
+  uint32_t VddcUvdDynamicPower;
+  uint32_t VddcVceDynamicPower;
+  uint32_t VddcAcpDynamicPower;
+  uint32_t VddcPcieDynamicPower;
+  uint32_t VddcDceDynamicPower;
+  uint32_t VddcCurrent;
+  uint32_t VddcVoltage;
+  uint32_t VddciTotalPower;
+  uint32_t VddciLeakagePower;
+  uint32_t VddciConstantPower;
+  uint32_t VddciDynamicPower;
+  uint32_t Vddr1TotalPower;
+  uint32_t Vddr1LeakagePower;
+  uint32_t Vddr1ConstantPower;
+  uint32_t Vddr1DynamicPower;
+  uint32_t spare[8];
+  uint32_t temperature;
+};
+
+typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
similarity index 66%
rename from drivers/gpu/drm/amd/amdgpu/fiji_smum.h
rename to drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 1cef03d..65eb630 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -21,22 +21,38 @@
  *
  */
 
-#ifndef FIJI_SMUMGR_H
-#define FIJI_SMUMGR_H
+#ifndef _PP_COMMON_H
+#define _PP_COMMON_H
 
-#include "fiji_ppsmc.h"
+#include "smu7_ppsmc.h"
+#include "cgs_common.h"
 
-int fiji_smu_init(struct amdgpu_device *adev);
-int fiji_smu_fini(struct amdgpu_device *adev);
-int fiji_smu_start(struct amdgpu_device *adev);
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
 
-struct fiji_smu_private_data
-{
-	uint8_t *header;
-	uint32_t smu_buffer_addr_high;
-	uint32_t smu_buffer_addr_low;
-	uint32_t header_addr_high;
-	uint32_t header_addr_low;
-};
+
+#include "smu74.h"
+#include "smu74_discrete.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
 
 #endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
new file mode 100644
index 0000000..bce0009
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef DGPU_VI_PP_SMC_H
+#define DGPU_VI_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+#define PPSMC_MSG_SetGBDroopSettings          ((uint16_t) 0x305)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    0x01
+#define PPSMC_STATEFLAG_POWERBOOST         0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK             ((uint16_t)0x01)
+#define PPSMC_Result_NoMore         ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow         ((uint16_t)0x03)
+#define PPSMC_Result_Failed         ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt                      ((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           ((uint16_t)0x5A)
+#define PPSMC_StartFanControl               ((uint16_t)0x5B)
+#define PPSMC_StopFanControl                ((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     ((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            ((uint16_t)0x6B)
+#define PPSMC_OCPActive                     ((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start     ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels          ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults           ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump      ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode           ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                   ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                  ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress          ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc        ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc         ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc         ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                       ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                        ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt          ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config                  ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start         ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState              ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD            ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD              ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop          ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config              ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable         ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable         ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service               ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF                 ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON                  ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS          ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS           ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled        ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF                 ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON                  ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown           ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp             ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown    ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp      ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff              ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn               ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment  ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify      ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM                  ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM                 ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable                 ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer                  ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config             ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request               ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown        ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp          ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config              ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config               ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config               ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config               ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config              ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask       ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask       ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask      ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState          ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel       ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable        ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit                 ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable        ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable              ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF                 ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON                  ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF                ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON                 ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF               ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON                ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal          ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance      ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce               ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask      ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask      ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel          ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel        ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt     ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt    ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC                  ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF                ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON                 ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable                  ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable                 ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable              ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable             ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable              ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable             ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable               ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable              ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable              ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable             ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable               ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable              ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable               ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable              ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask      ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode                ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode           ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request                ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask      ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask      ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask      ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask       ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask      ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask       ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask       ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask      ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask      ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable              ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable             ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode         ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH            ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS            ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH           ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS           ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart            ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample           ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON             ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON             ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON             ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON              ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON             ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON              ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON              ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON             ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON           ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF          ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER         ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON           ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF          ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON          ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF         ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON        ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF       ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT     ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable           ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable          ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit              ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp       ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel         ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel       ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel         ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel       ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING          ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING           ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt    ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc       ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci      ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1              ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2              ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1              ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2              ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable    ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable   ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax                    ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish     ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency        ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency        ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy             ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy             ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower            ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax                ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget            ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm                ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget     ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor           ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel                 ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx                ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx               ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow             ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh            ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus             ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes                  ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify            ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI      ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO      ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI          ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO          ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios                   ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion             ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry                  ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit                   ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature    ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature   ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning             ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData                ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable  ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope          ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset         ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope          ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset         ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs                  ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs                 ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc                  ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid             ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme            ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm               ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm                 ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm                  ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk         ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function		  ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
+
+#define PPSMC_MSG_SecureSRBMWrite             ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead              ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress                  ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData                     ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData                     ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     0x00000002
+#define PPSMC_EVENT_STATUS_DC               0x00000004
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 3c235f0..2139072 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -28,6 +28,7 @@
 
 struct pp_smumgr;
 struct pp_instance;
+struct pp_hwmgr;
 
 #define smu_lower_32_bits(n) ((uint32_t)(n))
 #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@@ -53,6 +54,45 @@
 	AVFS_BTC_SMUMSG_ERROR
 };
 
+enum SMU_TABLE {
+	SMU_UVD_TABLE = 0,
+	SMU_VCE_TABLE,
+	SMU_SAMU_TABLE,
+	SMU_BIF_TABLE,
+};
+
+enum SMU_TYPE {
+	SMU_SoftRegisters = 0,
+	SMU_Discrete_DpmTable,
+};
+
+enum SMU_MEMBER {
+	HandshakeDisables = 0,
+	VoltageChangeTimeout,
+	AverageGraphicsActivity,
+	PreVBlankGap,
+	VBlankTimeout,
+	UcodeLoadStatus,
+	UvdBootLevel,
+	VceBootLevel,
+	SamuBootLevel,
+	LowSclkInterruptThreshold,
+};
+
+
+enum SMU_MAC_DEFINITION {
+	SMU_MAX_LEVELS_GRAPHICS = 0,
+	SMU_MAX_LEVELS_MEMORY,
+	SMU_MAX_LEVELS_LINK,
+	SMU_MAX_ENTRIES_SMIO,
+	SMU_MAX_LEVELS_VDDC,
+	SMU_MAX_LEVELS_VDDGFX,
+	SMU_MAX_LEVELS_VDDCI,
+	SMU_MAX_LEVELS_MVDD,
+	SMU_UVD_MCLK_HANDSHAKE_DISABLE,
+};
+
+
 struct pp_smumgr_func {
 	int (*smu_init)(struct pp_smumgr *smumgr);
 	int (*smu_fini)(struct pp_smumgr *smumgr);
@@ -69,12 +109,23 @@
 	int (*download_pptable_settings)(struct pp_smumgr *smumgr,
 					 void **table);
 	int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
+	int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+	int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+	int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+	int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+	int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+	int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+	int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+	int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+	int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+	uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+	uint32_t (*get_mac_definition)(uint32_t value);
+	bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
 };
 
 struct pp_smumgr {
 	uint32_t chip_family;
 	uint32_t chip_id;
-	uint32_t hw_revision;
 	void *device;
 	void *backend;
 	uint32_t usec_timeout;
@@ -122,6 +173,30 @@
 
 extern int smu_free_memory(void *device, void *handle);
 
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+
+extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result);
+extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+				uint32_t type, uint32_t member);
+extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+
+extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
+
 #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 
 #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index f10fb64..51ff083 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,9 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
+	  polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
+	  smu7_smumgr.o iceland_smc.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 87c023e..5a44485 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -89,13 +89,8 @@
 	if (result != 0)
 		return result;
 
-	result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+	return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
 					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
-	if (result != 0)
-		return result;
-
-	return 0;
 }
 
 static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@
 
 	if (0 != (3 & smc_address)) {
 		printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	if (limit <= (smc_address + 3)) {
 		printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@
 		return -EINVAL;
 
 	result = cz_set_smc_sram_address(smumgr, smc_address, limit);
-	cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+	if (!result)
+		cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
 
-	return 0;
+	return result;
 }
 
 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@
 static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
 {
 	struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
-	int result = 0;
 	uint32_t smc_address;
 
 	if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@
 	cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
 				cz_smu->toc_entry_power_profiling_index);
 
-	result = cz_send_msg_to_smc_with_parameter(smumgr,
+	return cz_send_msg_to_smc_with_parameter(smumgr,
 					PPSMC_MSG_ExecuteJob,
 					cz_smu->toc_entry_initialize_index);
-
-	return result;
 }
 
 static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@
 	if (smumgr == NULL || smumgr->device == NULL)
 		return -EINVAL;
 
-	return cgs_read_register(smumgr->device,
-					mmSMU_MP1_SRBM2P_ARG_0);
-
 	cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
 
 	for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@
 	if (smumgr->chip_id == CHIP_STONEY)
 		fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
 
-	cz_request_smu_load_fw(smumgr);
+	ret = cz_request_smu_load_fw(smumgr);
+	if (ret)
+		printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
 	cz_check_fw_load_finish(smumgr, fw_to_check);
 
 	ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@
 
 	cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-	if (smumgr->chip_id == CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(smumgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-	else
+	if (smumgr->chip_id != CHIP_STONEY)
 		cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	if (smumgr->chip_id == CHIP_STONEY)
-		cz_smu_populate_single_ucode_load_task(smumgr,
-				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-	else
+	if (smumgr->chip_id != CHIP_STONEY)
 		cz_smu_populate_single_ucode_load_task(smumgr,
 				CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
 	cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@
 	struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
 
 	cz_smu->toc_entry_used_count = 0;
-
 	cz_smu_initialize_toc_empty_job_list(smumgr);
-
 	cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
 	cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
 	cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
 	cz_smu_construct_toc_for_power_profiling(smumgr);
-
 	cz_smu_construct_toc_for_bootup(smumgr);
-
 	cz_smu_construct_toc_for_clock_table(smumgr);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
new file mode 100644
index 0000000..76310ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "fiji_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "fiji_smumgr.h"
+#include "pppcielanes.h"
+#include "smu7_ppsmc.h"
+#include "smu73.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu7_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define VDDC_VDDCI_DELTA            300
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+				{600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+	{ {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+	{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+				{0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+		/*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+		{1,               0xF,             0xFD,
+		/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+		0x19,        5,               45}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ *  This is the unit expected by SMC firmware
+ */
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+		uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+	uint32_t i;
+	uint16_t vddci;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	*voltage = *mvdd = 0;
+
+
+	/* clock - voltage dependency table is empty table */
+	if (dep_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < dep_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (dep_table->entries[i].clk >= clock) {
+			*voltage |= (dep_table->entries[i].vddc *
+					VOLTAGE_SCALE) << VDDC_SHIFT;
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else if (dep_table->entries[i].vddci)
+				*voltage |= (dep_table->entries[i].vddci *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else {
+				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+						(dep_table->entries[i].vddc -
+								VDDC_VDDCI_DELTA));
+				*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+			}
+
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
+					VOLTAGE_SCALE;
+			else if (dep_table->entries[i].mvdd)
+				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
+					VOLTAGE_SCALE;
+
+			*voltage |= 1 << PHASES_SHIFT;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+				VOLTAGE_SCALE) << VDDCI_SHIFT;
+	else if (dep_table->entries[i-1].vddci) {
+		vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+				(dep_table->entries[i].vddc -
+						VDDC_VDDCI_DELTA));
+		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+	}
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+	else if (dep_table->entries[i].mvdd)
+		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+	return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+	uint32_t tmp;
+	tmp = raw_setting * 4096 / 100;
+	return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+	switch (line) {
+	case SMU7_I2CLineID_DDC1:
+		*scl = SMU7_I2C_DDC1CLK;
+		*sda = SMU7_I2C_DDC1DATA;
+		break;
+	case SMU7_I2CLineID_DDC2:
+		*scl = SMU7_I2C_DDC2CLK;
+		*sda = SMU7_I2C_DDC2DATA;
+		break;
+	case SMU7_I2CLineID_DDC3:
+		*scl = SMU7_I2C_DDC3CLK;
+		*sda = SMU7_I2C_DDC3DATA;
+		break;
+	case SMU7_I2CLineID_DDC4:
+		*scl = SMU7_I2C_DDC4CLK;
+		*sda = SMU7_I2C_DDC4DATA;
+		break;
+	case SMU7_I2CLineID_DDC5:
+		*scl = SMU7_I2C_DDC5CLK;
+		*sda = SMU7_I2C_DDC5DATA;
+		break;
+	case SMU7_I2CLineID_DDC6:
+		*scl = SMU7_I2C_DDC6CLK;
+		*sda = SMU7_I2C_DDC6DATA;
+		break;
+	case SMU7_I2CLineID_SCLSDA:
+		*scl = SMU7_I2C_SCL;
+		*sda = SMU7_I2C_SDA;
+		break;
+	case SMU7_I2CLineID_DDCVGA:
+		*scl = SMU7_I2C_DDCVGACLK;
+		*sda = SMU7_I2C_DDCVGADATA;
+		break;
+	default:
+		*scl = 0;
+		*sda = 0;
+		break;
+	}
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		smu_data->power_tune_defaults =
+				&fiji_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	SMU73_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	struct pp_advance_fan_control_parameters *fan_table =
+			&hwmgr->thermal_controller.advanceFanControlParameters;
+	uint8_t uc_scl, uc_sda;
+
+	/* TDP number of fraction bits are changed from 8 to 7 for Fiji
+	 * as requested by SMC team
+	 */
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usTDP * 128));
+	dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usTDP * 128));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+			"Target Operating Temp is out of Range!",
+			);
+
+	dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+	/* The following are for new Fiji Multi-input fan/thermal control */
+	dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTargetOperatingTemp * 256);
+	dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitHotspot * 256);
+	dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+	dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+	dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitVrVddc * 256);
+	dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+	dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitPlx * 256);
+
+	dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainEdge));
+	dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainHotspot));
+	dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainLiquid));
+	dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+	dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+	dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainPlx));
+	dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+	dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+	dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+	dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+	dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+	get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+	dpm_table->Liquid_I2C_LineSCL = uc_scl;
+	dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+	get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+	dpm_table->Vr_I2C_LineSCL = uc_scl;
+	dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+	get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+	dpm_table->Plx_I2C_LineSCL = uc_scl;
+	dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+	return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+	smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	/* TDC number of fraction bits are changed from 8 to 7
+	 * for Fiji as requested by SMC team
+	 */
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+	smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+	return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+	uint32_t temp;
+
+	if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, SMC_RAM_END))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else {
+		smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+		smu_data->power_tune_table.LPMLTemperatureMin =
+				(uint8_t)((temp >> 16) & 0xff);
+		smu_data->power_tune_table.LPMLTemperatureMax =
+				(uint8_t)((temp >> 8) & 0xff);
+		smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+	}
+	return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.
+			usFanOutputSensitivity & (1 << 15)) ||
+			0 == hwmgr->thermal_controller.advanceFanControlParameters.
+			usFanOutputSensitivity)
+		hwmgr->thermal_controller.advanceFanControlParameters.
+		usFanOutputSensitivity = hwmgr->thermal_controller.
+			advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+			PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+					advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+	return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	uint32_t pm_fuse_table_offset;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU73_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		/* DW6 */
+		if (fiji_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+		/* DW7 */
+		if (fiji_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+		/* DW8 */
+		if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		/* DW9-DW12 */
+		if (0 != fiji_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		/* DW13-DW14 */
+		if (fiji_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate Fuzzy Fan Control parameters Failed!",
+					return -EINVAL);
+
+		/* DW15-DW18 */
+		if (fiji_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		/* DW19 */
+		if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		/* DW20 */
+		if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+					"Sidd Failed!", return -EINVAL);
+
+		if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&smu_data->power_tune_table,
+				sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    table  the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	uint32_t count;
+	uint8_t index;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+			table_info->vddc_lookup_table;
+	/* tables is already swapped, so in order to use the value from it,
+	 * we need to swap it back.
+	 * We are populating vddc CAC data to BapmVddc table
+	 * in split and merged mode
+	 */
+
+	for (count = 0; count < lookup_table->count; count++) {
+		index = phm_get_voltage_index(lookup_table,
+				data->vddc_voltage_table.entries[count].value);
+		table->BapmVddcVidLoSidd[count] =
+			convert_to_vid(lookup_table->entries[index].us_cac_low);
+		table->BapmVddcVidHiSidd[count] =
+			convert_to_vid(lookup_table->entries[index].us_cac_high);
+	}
+
+	return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always  0
+*/
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	int result;
+
+	result = fiji_populate_cac_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate CAC voltage tables to SMC",
+			return -EINVAL);
+
+	return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_Ulv *state)
+{
+	int result = 0;
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+	state->VddcPhase = 1;
+
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+		CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+	}
+	return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	int i;
+
+	/* Index (dpm_table->pcie_speed_table.count)
+	 * is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+				dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+	}
+
+	smu_data->smc_state_table.LinkLevelCount =
+			(uint8_t)dpm_table->pcie_speed_table.count;
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    clock  the engine clock to use to populate the structure
+* @param    sclk   the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t ref_clock;
+	uint32_t ref_divider;
+	uint32_t fbdiv;
+	int result;
+
+	/* get the engine clock dividers for this clock value */
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error retrieving Engine Clock dividers from VBIOS.",
+			return result);
+
+	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+	ref_clock = atomctrl_get_reference_clock(hwmgr);
+	ref_divider = 1 + dividers.uc_pll_ref_div;
+
+	/* low 14 bits is fraction and high 12 bits is divider */
+	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+	/* SPLL_FUNC_CNTL setup */
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+			SPLL_REF_DIV, dividers.uc_pll_ref_div);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+			SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+	/* SPLL_FUNC_CNTL_3 setup*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+			SPLL_FB_DIV, fbdiv);
+
+	/* set to use fractional accumulation*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+			SPLL_DITHEN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+		struct pp_atomctrl_internal_ss_info ssInfo;
+
+		uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+		if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+				vco_freq, &ssInfo)) {
+			/*
+			 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+			 * ss_info.speed_spectrum_rate -- in unit of khz
+			 *
+			 * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+			 */
+			uint32_t clk_s = ref_clock * 5 /
+					(ref_divider * ssInfo.speed_spectrum_rate);
+			/* clkv = 2 * D * fbdiv / NS */
+			uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+					fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+		}
+	}
+
+	sclk->SclkFrequency        = clock;
+	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+	return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    clock the engine clock to use to populate the structure
+* @param    sclk        the SMC SCLK structure to be populated
+*/
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, uint16_t sclk_al_threshold,
+		struct SMU73_Discrete_GraphicsLevel *level)
+{
+	int result;
+	/* PP_Clocks minClocks; */
+	uint32_t threshold, mvdd;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+	/* populate graphics levels */
+	result = fiji_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk, clock,
+			(uint32_t *)(&level->MinVoltage), &mvdd);
+	PP_ASSERT_WITH_CODE((0 == result),
+			"can not find VDDC voltage value for "
+			"VDDC engine clock dependency table",
+			return result);
+
+	level->SclkFrequency = clock;
+	level->ActivityLevel = sclk_al_threshold;
+	level->CcPwrDynRm = 0;
+	level->CcPwrDynRm1 = 0;
+	level->EnabledForActivity = 0;
+	level->EnabledForThrottle = 1;
+	level->UpHyst = 10;
+	level->DownHyst = 0;
+	level->VoltageDownHyst = 0;
+	level->PowerThrottle = 0;
+
+	threshold = clock * data->fast_watermark_threshold / 100;
+
+	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+		level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+								hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+	/* Default to slow, highest DPM level will be
+	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+	 */
+	level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+	return 0;
+}
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+	int result = 0;
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+	uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+			SMU73_MAX_LEVELS_GRAPHICS;
+	struct SMU73_Discrete_GraphicsLevel *levels =
+			smu_data->smc_state_table.GraphicsLevel;
+	uint32_t i, max_entry;
+	uint8_t hightest_pcie_level_enabled = 0,
+			lowest_pcie_level_enabled = 0,
+			mid_pcie_level_enabled = 0,
+			count = 0;
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		result = fiji_populate_single_graphic_level(hwmgr,
+				dpm_table->sclk_table.dpm_levels[i].value,
+				(uint16_t)smu_data->activity_target[i],
+				&levels[i]);
+		if (result)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			levels[i].DeepSleepDivId = 0;
+	}
+
+	/* Only enable level 0 for now.*/
+	levels[0].EnabledForActivity = 1;
+
+	/* set highest level watermark to high */
+	levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	smu_data->smc_state_table.GraphicsDpmLevelCount =
+			(uint8_t)dpm_table->sclk_table.count;
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	if (pcie_table != NULL) {
+		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+				"There must be 1 or more PCIE levels defined in PPTable.",
+				return -EINVAL);
+		max_entry = pcie_entry_cnt - 1;
+		for (i = 0; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel =
+					(uint8_t) ((i < max_entry) ? i : max_entry);
+	} else {
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (hightest_pcie_level_enabled + 1))) != 0))
+			hightest_pcie_level_enabled++;
+
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << lowest_pcie_level_enabled)) == 0))
+			lowest_pcie_level_enabled++;
+
+		while ((count < hightest_pcie_level_enabled) &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+			count++;
+
+		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+				hightest_pcie_level_enabled ?
+						(lowest_pcie_level_enabled + 1 + count) :
+						hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to hightest_pcie_level_enabled */
+		for (i = 2; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to lowest_pcie_level_enabled */
+		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to mid_pcie_level_enabled */
+		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+	}
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP  Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz,       800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+	if (mem_clock <= 10000)
+		return 0x0;
+	if (mem_clock <= 15000)
+		return 0x1;
+	if (mem_clock <= 20000)
+		return 0x2;
+	if (mem_clock <= 25000)
+		return 0x3;
+	if (mem_clock <= 30000)
+		return 0x4;
+	if (mem_clock <= 35000)
+		return 0x5;
+	if (mem_clock <= 40000)
+		return 0x6;
+	if (mem_clock <= 45000)
+		return 0x7;
+	if (mem_clock <= 50000)
+		return 0x8;
+	if (mem_clock <= 55000)
+		return 0x9;
+	if (mem_clock <= 60000)
+		return 0xa;
+	if (mem_clock <= 65000)
+		return 0xb;
+	if (mem_clock <= 70000)
+		return 0xc;
+	if (mem_clock <= 75000)
+		return 0xd;
+	if (mem_clock <= 80000)
+		return 0xe;
+	/* mem_clock > 800MHz */
+	return 0xf;
+}
+
+/**
+* Populates the SMC MCLK structure using the provided memory clock
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    clock   the memory clock to use to populate the structure
+* @param    sclk    the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+	struct pp_atomctrl_memory_clock_param mem_param;
+	int result;
+
+	result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+	PP_ASSERT_WITH_CODE((0 == result),
+			"Failed to get Memory PLL Dividers.",
+			);
+
+	/* Save the result data to outpupt memory level structure */
+	mclk->MclkFrequency   = clock;
+	mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
+	mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
+
+	return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int result = 0;
+	uint32_t mclk_stutter_mode_threshold = 60000;
+
+	if (table_info->vdd_dep_on_mclk) {
+		result = fiji_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk, clock,
+				(uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find MinVddc voltage value from memory "
+				"VDDC voltage dependency table", return result);
+	}
+
+	mem_level->EnabledForThrottle = 1;
+	mem_level->EnabledForActivity = 0;
+	mem_level->UpHyst = 0;
+	mem_level->DownHyst = 100;
+	mem_level->VoltageDownHyst = 0;
+	mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	mem_level->StutterEnable = false;
+
+	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	/* enable stutter mode if all the follow condition applied
+	 * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+	 * &(data->DisplayTiming.numExistingDisplays));
+	 */
+	data->display_timing.num_existing_displays = 1;
+
+	if (mclk_stutter_mode_threshold &&
+		(clock <= mclk_stutter_mode_threshold) &&
+		(!data->is_uvd_enabled) &&
+		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_ENABLE) & 0x1))
+		mem_level->StutterEnable = true;
+
+	result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+	}
+	return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	int result;
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+	uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+			SMU73_MAX_LEVELS_MEMORY;
+	struct SMU73_Discrete_MemoryLevel *levels =
+			smu_data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+				"can not populate memory level as memory clock is zero",
+				return -EINVAL);
+		result = fiji_populate_single_memory_level(hwmgr,
+				dpm_table->mclk_table.dpm_levels[i].value,
+				&levels[i]);
+		if (result)
+			return result;
+	}
+
+	/* Only enable level 0 for now. */
+	levels[0].EnabledForActivity = 1;
+
+	/* in order to prevent MC activity from stutter mode to push DPM up.
+	 * the UVD change complements this by putting the MCLK in
+	 * a higher state by default such that we are not effected by
+	 * up threshold or and MCLK DPM latency.
+	 */
+	levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+	smu_data->smc_state_table.MemoryDpmLevelCount =
+			(uint8_t)dpm_table->mclk_table.count;
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+	/* set highest level watermark to high */
+	levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+* @param    voltage     the SMC VOLTAGE structure to be populated
+*/
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+		uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i = 0;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+				"MVDD Voltage is outside the supported range.",
+				return -EINVAL);
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+		SMU73_Discrete_DpmTable *table)
+{
+	int result = 0;
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	SMIO_Pattern vol_level;
+	uint32_t mvdd;
+	uint16_t us_mvdd;
+	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (!data->sclk_dpm_key_disabled) {
+		/* Get MinVoltage and Frequency from DPM0,
+		 * already converted to SMC_UL */
+		table->ACPILevel.SclkFrequency =
+				data->dpm_table.sclk_table.dpm_levels[0].value;
+		result = fiji_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_sclk,
+				table->ACPILevel.SclkFrequency,
+				(uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Cannot find ACPI VDDC voltage value " \
+				"in Clock Dependency Table",
+				);
+	} else {
+		table->ACPILevel.SclkFrequency =
+				data->vbios_boot_state.sclk_bootup_value;
+		table->ACPILevel.MinVoltage =
+				data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+	}
+
+	/* get the engine clock dividers for this clock value */
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+			table->ACPILevel.SclkFrequency,  &dividers);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error retrieving Engine Clock dividers from VBIOS.",
+			return result);
+
+	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+			SPLL_PWRON, 0);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+			SPLL_RESET, 1);
+	spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+			SCLK_MUX_SEL, 4);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	if (!data->mclk_dpm_key_disabled) {
+		/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+		table->MemoryACPILevel.MclkFrequency =
+				data->dpm_table.mclk_table.dpm_levels[0].value;
+		result = fiji_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk,
+				table->MemoryACPILevel.MclkFrequency,
+			(uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+				);
+	} else {
+		table->MemoryACPILevel.MclkFrequency =
+				data->vbios_boot_state.mclk_bootup_value;
+		table->MemoryACPILevel.MinVoltage =
+				data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+	}
+
+	us_mvdd = 0;
+	if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+			(data->mclk_dpm_key_disabled))
+		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+	else {
+		if (!fiji_populate_mvdd_value(hwmgr,
+				data->dpm_table.mclk_table.dpm_levels[0].value,
+				&vol_level))
+			us_mvdd = vol_level.Voltage;
+	}
+
+	table->MemoryACPILevel.MinMvdd =
+			PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	table->MemoryACPILevel.ActivityLevel =
+			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = false;
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+	return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU73_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	table->VceLevelCount = (uint8_t)(mm_table->count);
+	table->VceBootLevel = 0;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage = 0;
+		table->VceLevel[count].MinVoltage |=
+				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->VceLevel[count].MinVoltage |=
+				((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/*retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->VceLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for VCE engine clock",
+				return result);
+
+		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+		SMU73_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	table->AcpLevelCount = (uint8_t)(mm_table->count);
+	table->AcpBootLevel = 0;
+
+	for (count = 0; count < table->AcpLevelCount; count++) {
+		table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+		table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+				VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->AcpLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for engine clock", return result);
+
+		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+		SMU73_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	table->SamuBootLevel = 0;
+	table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].MinVoltage = 0;
+		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+				VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->SamuLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for samu clock", return result);
+
+		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+		int32_t eng_clock, int32_t mem_clock,
+		struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	uint32_t dram_timing;
+	uint32_t dram_timing2;
+	uint32_t burstTime;
+	ULONG state, trrds, trrdl;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+			eng_clock, mem_clock);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+	state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+	trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+	trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+	arb_regs->McArbBurstTime   = (uint8_t)burstTime;
+	arb_regs->TRRDS            = (uint8_t)trrds;
+	arb_regs->TRRDL            = (uint8_t)trrdl;
+
+	return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+	uint32_t i, j;
+	int result = 0;
+
+	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+			result = fiji_populate_memory_timing_parameters(hwmgr,
+					data->dpm_table.sclk_table.dpm_levels[i].value,
+					data->dpm_table.mclk_table.dpm_levels[j].value,
+					&arb_regs.entries[i][j]);
+			if (result)
+				break;
+		}
+	}
+
+	if (!result)
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.arb_table_start,
+				(uint8_t *)&arb_regs,
+				sizeof(SMU73_Discrete_MCArbDramTimingTable),
+				SMC_RAM_END);
+	return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	table->UvdLevelCount = (uint8_t)(mm_table->count);
+	table->UvdBootLevel = 0;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].MinVoltage = 0;
+		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+				VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].VclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Vclk clock", return result);
+
+		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].DclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Dclk clock", return result);
+
+		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+	}
+	return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table */
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(table->GraphicsBootLevel));
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+			data->vbios_boot_state.mclk_bootup_value,
+			(uint32_t *)&(table->MemoryBootLevel));
+
+	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+			VOLTAGE_SCALE;
+
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+	return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint8_t count, level;
+
+	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+				data->vbios_boot_state.sclk_bootup_value) {
+			smu_data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+				data->vbios_boot_state.mclk_bootup_value) {
+			smu_data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+			volt_with_cks, value;
+	uint16_t clock_freq_u16;
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+			volt_offset = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+
+	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+	/* Read SMU_Eefuse to read and calculate RO and determine
+	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+	 */
+	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (146 * 4));
+	efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (148 * 4));
+	efuse &= 0xFF000000;
+	efuse = efuse >> 24;
+	efuse2 &= 0xF;
+
+	if (efuse2 == 1)
+		ro = (2300 - 1350) * efuse / 255 + 1350;
+	else
+		ro = (2500 - 1000) * efuse / 255 + 1000;
+
+	if (ro >= 1660)
+		type = 0;
+	else
+		type = 1;
+
+	/* Populate Stretch amount */
+	smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+	for (i = 0; i < sclk_table->count; i++) {
+		smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+				sclk_table->entries[i].cks_enable << i;
+		volt_without_cks = (uint32_t)((14041 *
+			(sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+			(4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+		volt_with_cks = (uint32_t)((13946 *
+			(sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+			(3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+		if (volt_without_cks >= volt_with_cks)
+			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+					sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+		smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+	}
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			STRETCH_ENABLE, 0x0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x1);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			staticEnable, 0x1);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x0);
+
+	/* Populate CKS Lookup Table */
+	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+		stretch_amount2 = 0;
+	else if (stretch_amount == 3 || stretch_amount == 4)
+		stretch_amount2 = 1;
+	else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ClockStretcher);
+		PP_ASSERT_WITH_CODE(false,
+				"Stretch Amount in PPTable not supported\n",
+				return -EINVAL);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL);
+	value &= 0xFFC2FF87;
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+			fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+			fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+	clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+			GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+			SclkFrequency) / 100);
+	if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+			clock_freq_u16 &&
+	    fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+			clock_freq_u16) {
+		/* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+		value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+		/* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+		value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+		/* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+		value |= (fiji_clock_stretch_amount_conversion
+				[fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+				 [stretch_amount]) << 3;
+	}
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+			CKS_LOOKUPTableEntry[0].minFreq);
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+			CKS_LOOKUPTableEntry[0].maxFreq);
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+			fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+			(fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL, value);
+
+	/* Populate DDT Lookup Table */
+	for (i = 0; i < 4; i++) {
+		/* Assign the minimum and maximum VID stored
+		 * in the last row of Clock Stretcher Voltage Table.
+		 */
+		smu_data->smc_state_table.ClockStretcherDataTable.
+		ClockStretcherDataTableEntry[i].minVID =
+				(uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+		smu_data->smc_state_table.ClockStretcherDataTable.
+		ClockStretcherDataTableEntry[i].maxVID =
+				(uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+		/* Loop through each SCLK and check the frequency
+		 * to see if it lies within the frequency for clock stretcher.
+		 */
+		for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+			cks_setting = 0;
+			clock_freq = PP_SMC_TO_HOST_UL(
+					smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+			/* Check the allowed frequency against the sclk level[j].
+			 *  Sclk's endianness has already been converted,
+			 *  and it's in 10Khz unit,
+			 *  as opposed to Data table, which is in Mhz unit.
+			 */
+			if (clock_freq >=
+					(fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+				cks_setting |= 0x2;
+				if (clock_freq <
+						(fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+					cks_setting |= 0x1;
+			}
+			smu_data->smc_state_table.ClockStretcherDataTable.
+			ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+		}
+		CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+				ClockStretcherDataTable.
+				ClockStretcherDataTableEntry[i].setting);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+	value &= 0xFFFFFFFE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+	return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+		struct SMU73_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint16_t config;
+
+	config = VR_MERGED_WITH_VDDC;
+	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+	/* Set Vddc Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		config = VR_SVI2_PLANE_1;
+		table->VRConfig |= config;
+	} else {
+		PP_ASSERT_WITH_CODE(false,
+				"VDDC should be on SVI2 control in merged mode!",
+				);
+	}
+	/* Set Vddci Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		config = VR_SVI2_PLANE_2;  /* only in merged mode */
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		config = VR_SMIO_PATTERN_1;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	}
+	/* Set Mvdd Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		config = VR_SVI2_PLANE_2;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		config = VR_SMIO_PATTERN_2;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	}
+
+	return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
+	uint32_t tmp;
+	int result;
+
+	/* This is a read-modify-write on the first byte of the ARB table.
+	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+	 * is the field 'current'.
+	 * This solution is ugly, but we never write the whole table only
+	 * individual fields in it.
+	 * In reality this field should not be in that structure
+	 * but in a soft register.
+	 */
+	result = smu7_read_smc_sram_dword(smumgr,
+			smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+	if (result)
+		return result;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+	return smu7_write_smc_sram_dword(smumgr,
+			smu_data->smu7_data.arb_table_start,  tmp, SMC_RAM_END);
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput  the pointer to input data (PowerState)
+* @return   always 0
+*/
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	uint8_t i;
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+	fiji_initialize_power_tune_defaults(hwmgr);
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+		fiji_populate_smc_voltage_tables(hwmgr, table);
+
+	table->SystemFlags = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+		result = fiji_populate_ulv_state(hwmgr, table);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to initialize ULV state!", return result);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				ixCG_ULV_PARAMETER, 0x40035);
+	}
+
+	result = fiji_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Link Level!", return result);
+
+	result = fiji_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Graphics Level!", return result);
+
+	result = fiji_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Memory Level!", return result);
+
+	result = fiji_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ACPI Level!", return result);
+
+	result = fiji_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize VCE Level!", return result);
+
+	result = fiji_populate_smc_acp_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ACP Level!", return result);
+
+	result = fiji_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize SAMU Level!", return result);
+
+	/* Since only the initial state is completely set up at this point
+	 * (the other states are just copies of the boot state) we only
+	 * need to populate the  ARB settings for the initial state.
+	 */
+	result = fiji_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to Write ARB settings for the initial state.", return result);
+
+	result = fiji_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize UVD Level!", return result);
+
+	result = fiji_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot Level!", return result);
+
+	result = fiji_populate_smc_initailial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot State!", return result);
+
+	result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate BAPM Parameters!", return result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ClockStretcher)) {
+		result = fiji_populate_clock_stretcher_data_table(hwmgr);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to populate Clock Stretcher Data Table!",
+				return result);
+	}
+
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+			table_info->cac_dtp_table->usTargetOperatingTemp *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->TemperatureLimitLow  =
+			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
+	table->PCIeGenInterval = 1;
+	table->VRConfig = 0;
+
+	result = fiji_populate_vr_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate VRConfig setting!", return result);
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot);
+	} else {
+		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+			&gpio_pin)) {
+		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	} else {
+		table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	/* Thermal Output GPIO */
+	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+			&gpio_pin)) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ThermalOutGPIO);
+
+		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+		/* For porlarity read GPIOPAD_A with assigned Gpio pin
+		 * since VBIOS will program this register to set 'inactive state',
+		 * driver can then determine 'active state' from this and
+		 * program SMU with correct polarity
+		 */
+		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+				(1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+		/* if required, combine VRHot/PCC with thermal out GPIO */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot) &&
+			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_CombinePCCWithThermalSignal))
+			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+	} else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ThermalOutGPIO);
+		table->ThermOutGpio = 17;
+		table->ThermOutPolarity = 1;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+	}
+
+	for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+			smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+			(uint8_t *)&(table->SystemFlags),
+			sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+			SMC_RAM_END);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to upload dpm data to SMC memory!", return result);
+
+	result = fiji_init_arb_table_index(hwmgr->smumgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to upload arb data to SMC memory!", return result);
+
+	result = fiji_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to  populate PM fuses to SMC memory!", return result);
+	return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (smu_data->smu7_data.fan_table_start == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (duty100 == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+			usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->
+			thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = smu7_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+			thermal_controller.advanceFanControlParameters.ulCycleDelay *
+			reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+			hwmgr->device, CGS_IND_REG__SMC,
+			CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+			SMC_RAM_END);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ucMinimumPWMLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanMinPwm,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ucMinimumPWMLimit);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanSclkTarget,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+	if (res)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+
+	return 0;
+}
+
+int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return fiji_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold !=
+				data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold =
+				hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU73_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				SMC_RAM_END);
+	}
+	result = fiji_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to program memory timing parameters!",
+			);
+	return result;
+}
+
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+	switch (type) {
+	case SMU_SoftRegisters:
+		switch (member) {
+		case HandshakeDisables:
+			return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+		case VoltageChangeTimeout:
+			return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+		case AverageGraphicsActivity:
+			return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+		case PreVBlankGap:
+			return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+		case VBlankTimeout:
+			return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+		case UcodeLoadStatus:
+			return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+		}
+	case SMU_Discrete_DpmTable:
+		switch (member) {
+		case UvdBootLevel:
+			return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+		case VceBootLevel:
+			return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+		case SamuBootLevel:
+			return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+		case LowSclkInterruptThreshold:
+			return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+		}
+	}
+	printk("cant't get the offset of type %x member %x \n", type, member);
+	return 0;
+}
+
+uint32_t fiji_get_mac_definition(uint32_t value)
+{
+	switch (value) {
+	case SMU_MAX_LEVELS_GRAPHICS:
+		return SMU73_MAX_LEVELS_GRAPHICS;
+	case SMU_MAX_LEVELS_MEMORY:
+		return SMU73_MAX_LEVELS_MEMORY;
+	case SMU_MAX_LEVELS_LINK:
+		return SMU73_MAX_LEVELS_LINK;
+	case SMU_MAX_ENTRIES_SMIO:
+		return SMU73_MAX_ENTRIES_SMIO;
+	case SMU_MAX_LEVELS_VDDC:
+		return SMU73_MAX_LEVELS_VDDC;
+	case SMU_MAX_LEVELS_VDDGFX:
+		return SMU73_MAX_LEVELS_VDDGFX;
+	case SMU_MAX_LEVELS_VDDCI:
+		return SMU73_MAX_LEVELS_VDDCI;
+	case SMU_MAX_LEVELS_MVDD:
+		return SMU73_MAX_LEVELS_MVDD;
+	}
+
+	printk("cant't get the mac of %x \n", value);
+	return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	smu_data->smc_state_table.UvdBootLevel = 0;
+	if (table_info->mm_dep_table->count > 0)
+		smu_data->smc_state_table.UvdBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+						UvdBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0x00FFFFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM) ||
+		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_UVDDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+	return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_StablePState))
+		smu_data->smc_state_table.VceBootLevel =
+			(uint8_t) (table_info->mm_dep_table->count - 1);
+	else
+		smu_data->smc_state_table.VceBootLevel = 0;
+
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+					offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFF00FFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_VCEDPM_SetEnabledMask,
+				(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+	return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+	smu_data->smc_state_table.SamuBootLevel = 0;
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFFFFFF00;
+	mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SAMUDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+	return 0;
+}
+
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+	switch (type) {
+	case SMU_UVD_TABLE:
+		fiji_update_uvd_smc_table(hwmgr);
+		break;
+	case SMU_VCE_TABLE:
+		fiji_update_vce_smc_table(hwmgr);
+		break;
+	case SMU_SAMU_TABLE:
+		fiji_update_samu_smc_table(hwmgr);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, DpmTable),
+			&tmp, SMC_RAM_END);
+
+	if (0 == result)
+		smu_data->smu7_data.dpm_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, SoftRegisters),
+			&tmp, SMC_RAM_END);
+
+	if (!result) {
+		data->soft_regs_start = tmp;
+		smu_data->smu7_data.soft_regs_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, mcRegisterTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.mc_reg_table_start = tmp;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, FanTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.fan_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.arb_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU73_Firmware_Header, Version),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		hwmgr->microcode_version_info.SMC = tmp;
+
+	error |= (0 != result);
+
+	return error ? -1 : 0;
+}
+
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+	/* Program additional LP registers
+	 * that are no longer programmed by VBIOS
+	 */
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+	return 0;
+}
+
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
new file mode 100644
index 0000000..d30d150
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef FIJI_SMC_H
+#define FIJI_SMC_H
+
+#include "smumgr.h"
+#include "smu73.h"
+
+struct fiji_pt_defaults {
+	uint8_t   SviLoadLineEn;
+	uint8_t   SviLoadLineVddC;
+	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t   TDC_MAWt;
+	uint8_t   TdcWaterfallCtl;
+	uint8_t   DTEAmbientTempBase;
+};
+
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
+uint32_t fiji_get_mac_definition(uint32_t value);
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
old mode 100644
new mode 100755
index 8e52a2e..02fe1df
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -38,6 +38,7 @@
 #include "bif/bif_5_0_sh_mask.h"
 #include "pp_debug.h"
 #include "fiji_pwrvirus.h"
+#include "fiji_smc.h"
 
 #define AVFS_EN_MSB                                        1568
 #define AVFS_EN_LSB                                        1568
@@ -57,509 +58,6 @@
 		{ 0xf811d047, 0x80380100,   0x01,     0x00,   0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000,   0,      0,   0x0c,   0x01,       0x01,        0x01,      0x00,   0x00,      0x00,     0x00 }
 };
 
-static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-	switch (fw_type) {
-	case UCODE_ID_SMU:
-		result = CGS_UCODE_ID_SMU;
-		break;
-	case UCODE_ID_SDMA0:
-		result = CGS_UCODE_ID_SDMA0;
-		break;
-	case UCODE_ID_SDMA1:
-		result = CGS_UCODE_ID_SDMA1;
-		break;
-	case UCODE_ID_CP_CE:
-		result = CGS_UCODE_ID_CP_CE;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = CGS_UCODE_ID_CP_PFP;
-		break;
-	case UCODE_ID_CP_ME:
-		result = CGS_UCODE_ID_CP_ME;
-		break;
-	case UCODE_ID_CP_MEC:
-		result = CGS_UCODE_ID_CP_MEC;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-		result = CGS_UCODE_ID_CP_MEC_JT1;
-		break;
-	case UCODE_ID_CP_MEC_JT2:
-		result = CGS_UCODE_ID_CP_MEC_JT2;
-		break;
-	case UCODE_ID_RLC_G:
-		result = CGS_UCODE_ID_RLC_G;
-		break;
-	default:
-		break;
-	}
-
-	return result;
-}
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-*/
-static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
-		uint32_t smc_addr, uint32_t limit)
-{
-	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
-			"SMC address must be 4 byte aligned.", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
-			"SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
-	return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byteCount the number of bytes to copy.
-*/
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-		uint32_t smcStartAddress, const uint8_t *src,
-		uint32_t byteCount, uint32_t limit)
-{
-	int result;
-	uint32_t data, originalData;
-	uint32_t addr, extraShift;
-
-	PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
-			"SMC address must be 4 byte aligned.", return -EINVAL;);
-	PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
-			"SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
-	addr = smcStartAddress;
-
-	while (byteCount >= 4) {
-		/* Bytes are written into the SMC addres space with the MSB first. */
-		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
-		result = fiji_set_smc_sram_address(smumgr, addr, limit);
-		if (result)
-			return result;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
-		src += 4;
-		byteCount -= 4;
-		addr += 4;
-	}
-
-	if (byteCount) {
-		/* Now write the odd bytes left.
-		 * Do a read modify write cycle.
-		 */
-		data = 0;
-
-		result = fiji_set_smc_sram_address(smumgr, addr, limit);
-		if (result)
-			return result;
-
-		originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-		extraShift = 8 * (4 - byteCount);
-
-		while (byteCount > 0) {
-			/* Bytes are written into the SMC addres
-			 * space with the MSB first.
-			 */
-			data = (0x100 * data) + *src++;
-			byteCount--;
-		}
-		data <<= extraShift;
-		data |= (originalData & ~((~0UL) << extraShift));
-
-		result = fiji_set_smc_sram_address(smumgr, addr, limit);
-		if (!result)
-			return result;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-	}
-	return 0;
-}
-
-int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-	static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
-	fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
-
-	return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-	return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
-			CGS_IND_REG__SMC,
-			SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-			&& (0x20100 <= cgs_read_ind_register(smumgr->device,
-					CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-	if (!fiji_is_smc_ram_running(smumgr))
-		return -1;
-
-	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-		printk(KERN_ERR "Failed to send Previous Message.");
-		SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	}
-
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	return 0;
-}
-
-/**
- * Send a message to the SMC with parameter
- * @param    smumgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: the parameter to send
- * @return   The response that came from the SMC.
- */
-int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
-		uint16_t msg, uint32_t parameter)
-{
-	if (!fiji_is_smc_ram_running(smumgr))
-		return -1;
-
-	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-		printk(KERN_ERR "Failed to send Previous Message.");
-		SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	}
-
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	return 0;
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc_with_parameter_without_waiting(
-		struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
-		printk(KERN_ERR "Failed to send Previous Message.");
-		SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	}
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-/**
-* Uploads the SMU firmware from .hex file
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @return   0 or -1.
-*/
-
-static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
-	const uint8_t *src;
-	uint32_t byte_count;
-	uint32_t *data;
-	struct cgs_firmware_info info = {0};
-
-	cgs_get_firmware_info(smumgr->device,
-			fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
-	if (info.image_size & 3) {
-		printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
-		return -EINVAL;
-	}
-
-	if (info.image_size > FIJI_SMC_SIZE) {
-		printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
-	byte_count = info.image_size;
-	src = (const uint8_t *)info.kptr;
-
-	data = (uint32_t *)src;
-	for (; byte_count >= 4; data++, byte_count -= 4)
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-	return 0;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value and output parameter for the data read from the SMC SRAM.
-*/
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-		uint32_t *value, uint32_t limit)
-{
-	int	result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
-	if (result)
-		return result;
-
-	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-	return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value to write to the SMC SRAM.
-*/
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-		uint32_t value, uint32_t limit)
-{
-	int result;
-
-	result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
-	if (result)
-		return result;
-
-	cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-	return 0;
-}
-
-static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
-{
-	uint32_t result = 0;
-
-	switch (fw_type) {
-	case UCODE_ID_SDMA0:
-		result = UCODE_ID_SDMA0_MASK;
-		break;
-	case UCODE_ID_SDMA1:
-		result = UCODE_ID_SDMA1_MASK;
-		break;
-	case UCODE_ID_CP_CE:
-		result = UCODE_ID_CP_CE_MASK;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = UCODE_ID_CP_PFP_MASK;
-		break;
-	case UCODE_ID_CP_ME:
-		result = UCODE_ID_CP_ME_MASK;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-		result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
-		break;
-	case UCODE_ID_CP_MEC_JT2:
-		result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
-		break;
-	case UCODE_ID_RLC_G:
-		result = UCODE_ID_RLC_G_MASK;
-		break;
-	default:
-		printk(KERN_ERR "UCode type is out of range!");
-		result = 0;
-	}
-
-	return result;
-}
-
-/* Populate one firmware image to the data structure */
-static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-		uint32_t fw_type, struct SMU_Entry *entry)
-{
-	int result;
-	struct cgs_firmware_info info = {0};
-
-	result = cgs_get_firmware_info(
-			smumgr->device,
-			fiji_convert_fw_type_to_cgs(fw_type),
-			&info);
-
-	if (!result) {
-		entry->version = 0;
-		entry->id = (uint16_t)fw_type;
-		entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-		entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-		entry->meta_data_addr_high = 0;
-		entry->meta_data_addr_low = 0;
-		entry->data_size_byte = info.image_size;
-		entry->num_register_entries = 0;
-
-		if (fw_type == UCODE_ID_RLC_G)
-			entry->flags = 1;
-		else
-			entry->flags = 0;
-	}
-
-	return result;
-}
-
-static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-	uint32_t fw_to_load;
-	struct SMU_DRAMData_TOC *toc;
-
-	if (priv->soft_regs_start)
-		cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
-				priv->soft_regs_start +
-				offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
-				0x0);
-
-	toc = (struct SMU_DRAMData_TOC *)priv->header;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
-			"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
-			"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
-			"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
-			"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
-			"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
-					"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
-					"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
-					"Failed to Get Firmware Entry.\n" , return -1 );
-	PP_ASSERT_WITH_CODE(
-			0 == fiji_populate_single_firmware_entry(smumgr,
-					UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
-					"Failed to Get Firmware Entry.\n" , return -1 );
-
-	fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
-			priv->header_buffer.mc_addr_high);
-	fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
-			priv->header_buffer.mc_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK
-			+ UCODE_ID_SDMA0_MASK
-			+ UCODE_ID_SDMA1_MASK
-			+ UCODE_ID_CP_CE_MASK
-			+ UCODE_ID_CP_ME_MASK
-			+ UCODE_ID_CP_PFP_MASK
-			+ UCODE_ID_CP_MEC_MASK
-			+ UCODE_ID_CP_MEC_JT1_MASK
-			+ UCODE_ID_CP_MEC_JT2_MASK;
-
-	if (fiji_send_msg_to_smc_with_parameter(smumgr,
-			PPSMC_MSG_LoadUcodes, fw_to_load))
-		printk(KERN_ERR "Fail to Request SMU Load uCode");
-
-	return 0;
-}
-
-
-/* Check if the FW has been loaded, SMU will not return
- * if loading has not finished.
- */
-static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
-		uint32_t fw_type)
-{
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-	uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
-
-	/* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
-	if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
-			priv->soft_regs_start +
-			offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
-			mask, mask)) {
-		printk(KERN_ERR "check firmware loading failed\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-
-static int fiji_reload_firmware(struct pp_smumgr *smumgr)
-{
-	return smumgr->smumgr_funcs->start_smu(smumgr);
-}
-
-static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
-{
-	uint32_t value;
-
-	value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
-	if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
-		/* driver reads on SR-IOV enabled PF: 0x80000000
-		 * driver reads on SR-IOV enabled VF: 0x80000001
-		 * driver reads on SR-IOV disabled:   0x00000000
-		 */
-		return true;
-	}
-	return false;
-}
-
-static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
-	if (fiji_is_hw_virtualization_enabled(smumgr)) {
-		uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
-		if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
-				PPSMC_MSG_LoadUcodes, masks))
-			printk(KERN_ERR "Fail to Request SMU Load uCode");
-	}
-	/* For non-virtualization cases,
-	 * SMU loads all FWs at once in fiji_request_smu_load_fw.
-	 */
-	return 0;
-}
-
 static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
 {
 	int result = 0;
@@ -571,7 +69,7 @@
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 			SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-	result = fiji_upload_smu_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 	if (result)
 		return result;
 
@@ -610,8 +108,8 @@
 			SMU_STATUS, SMU_DONE, 0);
 
 	/* Check pass/failed indicator */
-	if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
-			SMU_STATUS, SMU_PASS)) {
+	if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+			SMU_STATUS, SMU_PASS) != 1) {
 		PP_ASSERT_WITH_CODE(false,
 				"SMU Firmware start failed!", return -1);
 	}
@@ -639,12 +137,12 @@
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 			SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-	result = fiji_upload_smu_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 	if (result)
 		return result;
 
 	/* Set smc instruct start point at 0x0 */
-	fiji_program_jump_on_start(smumgr);
+	smu7_program_jump_on_start(smumgr);
 
 	/* Enable clock */
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -698,15 +196,15 @@
 
 	priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
 	if (priv->avfs.AvfsBtcParam) {
-		if (!fiji_send_msg_to_smc_with_parameter(smumgr,
+		if (!smum_send_msg_to_smc_with_parameter(smumgr,
 				PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
-			if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
+			if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
 				priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
 				result = 0;
 			} else {
 				printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
 						" to Enable AVFS Failed!");
-				fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
+				smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
 				result = -1;
 			}
 		} else {
@@ -736,7 +234,7 @@
 	charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
 	inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
 
-	PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
 			SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
 					PmFuseTable), &table_start, 0x40000),
 			"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@@ -748,13 +246,13 @@
 	inversion_voltage_addr = table_start +
 			offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
 
-	result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr,
+	result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
 			(uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
 	PP_ASSERT_WITH_CODE(0 == result,
 			"[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
 			"be populated.", return -1;);
 
-	result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
+	result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
 			(uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
 	PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
 			"charz_freq could not be populated.", return -1;);
@@ -769,7 +267,7 @@
 	uint32_t level_addr, vr_config_addr;
 	uint32_t level_size = sizeof(avfs_graphics_level);
 
-	PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
 			SMU7_FIRMWARE_HEADER_LOCATION +
 			offsetof(SMU73_Firmware_Header, DpmTable),
 			&table_start, 0x40000),
@@ -784,7 +282,7 @@
 	vr_config_addr = table_start +
 			offsetof(SMU73_Discrete_DpmTable, VRConfig);
 
-	PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
 			(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
 			"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
 			"vr_config value over to SMC",
@@ -792,7 +290,7 @@
 
 	level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
 
-	PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
 			(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
 			"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
 			return -1;);
@@ -839,13 +337,13 @@
 		break;
 	case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
 		priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-		PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
-				PPSMC_MSG_VftTableIsValid),
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
+				0x666),
 				"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
 				"correctly to VftTableIsValid Msg",
 				return -1;);
 		priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-		PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
 				PPSMC_MSG_EnableAvfs),
 				"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
 				"correctly to EnableAvfs Message Msg",
@@ -898,7 +396,7 @@
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!fiji_is_smc_ram_running(smumgr)) {
+	if (!smu7_is_smc_ram_running(smumgr)) {
 		fiji_avfs_event_mgr(smumgr, false);
 
 		/* Check if SMU is running in protected mode */
@@ -929,12 +427,12 @@
 	/* Setup SoftRegsStart here for register lookup in case
 	 * DummyBackEnd is used and ProcessFirmwareHeader is not executed
 	 */
-	fiji_read_smc_sram_dword(smumgr,
+	smu7_read_smc_sram_dword(smumgr,
 			SMU7_FIRMWARE_HEADER_LOCATION +
 			offsetof(SMU73_Firmware_Header, SoftRegisters),
-			&(priv->soft_regs_start), 0x40000);
+			&(priv->smu7_data.soft_regs_start), 0x40000);
 
-	result = fiji_request_smu_load_fw(smumgr);
+	result = smu7_request_smu_load_fw(smumgr);
 
 	return result;
 }
@@ -963,28 +461,10 @@
 static int fiji_smu_init(struct pp_smumgr *smumgr)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-	uint64_t mc_addr;
+	int i;
 
-	priv->header_buffer.data_size =
-			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	smu_allocate_memory(smumgr->device,
-			priv->header_buffer.data_size,
-			CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-			PAGE_SIZE,
-			&mc_addr,
-			&priv->header_buffer.kaddr,
-			&priv->header_buffer.handle);
-
-	priv->header = priv->header_buffer.kaddr;
-	priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != priv->header),
-			"Out of memory.",
-			kfree(smumgr->backend);
-			cgs_free_gpu_mem(smumgr->device,
-			(cgs_handle_t)priv->header_buffer.handle);
-			return -1);
+	if (smu7_init(smumgr))
+		return -EINVAL;
 
 	priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
 	if (fiji_is_hw_avfs_present(smumgr))
@@ -999,37 +479,35 @@
 	else
 		priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
 
-	priv->acpi_optimization = 1;
+	for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
+		priv->activity_target[i] = 30;
 
 	return 0;
 }
 
-static int fiji_smu_fini(struct pp_smumgr *smumgr)
-{
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
-	smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
-	if (smumgr->backend) {
-		kfree(smumgr->backend);
-		smumgr->backend = NULL;
-	}
-
-	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-	return 0;
-}
 
 static const struct pp_smumgr_func fiji_smu_funcs = {
 	.smu_init = &fiji_smu_init,
-	.smu_fini = &fiji_smu_fini,
+	.smu_fini = &smu7_smu_fini,
 	.start_smu = &fiji_start_smu,
-	.check_fw_load_finish = &fiji_check_fw_load_finish,
-	.request_smu_load_fw = &fiji_reload_firmware,
-	.request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load,
-	.send_msg_to_smc = &fiji_send_msg_to_smc,
-	.send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter,
+	.check_fw_load_finish = &smu7_check_fw_load_finish,
+	.request_smu_load_fw = &smu7_reload_firmware,
+	.request_smu_load_specific_fw = NULL,
+	.send_msg_to_smc = &smu7_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
 	.download_pptable_settings = NULL,
 	.upload_pptable_settings = NULL,
+	.update_smc_table = fiji_update_smc_table,
+	.get_offsetof = fiji_get_offsetof,
+	.process_firmware_header = fiji_process_firmware_header,
+	.init_smc_table = fiji_init_smc_table,
+	.update_sclk_threshold = fiji_update_sclk_threshold,
+	.thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+	.populate_all_graphic_levels = fiji_populate_all_graphic_levels,
+	.populate_all_memory_levels = fiji_populate_all_memory_levels,
+	.get_mac_definition = fiji_get_mac_definition,
+	.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
+	.is_dpm_running = fiji_is_dpm_running,
 };
 
 int fiji_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index b4eb483..adcbdfb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,37 +23,31 @@
 #ifndef _FIJI_SMUMANAGER_H_
 #define _FIJI_SMUMANAGER_H_
 
+#include "smu73_discrete.h"
+#include <pp_endian.h>
+#include "smu7_smumgr.h"
+
+
 
 struct fiji_smu_avfs {
 	enum AVFS_BTC_STATUS AvfsBtcStatus;
 	uint32_t           AvfsBtcParam;
 };
 
-struct fiji_buffer_entry {
-	uint32_t data_size;
-	uint32_t mc_addr_low;
-	uint32_t mc_addr_high;
-	void *kaddr;
-	unsigned long  handle;
-};
 
 struct fiji_smumgr {
-	uint8_t        *header;
-	uint8_t        *mec_image;
-	uint32_t        soft_regs_start;
-	struct fiji_smu_avfs avfs;
-	uint32_t        acpi_optimization;
+	struct smu7_smumgr                   smu7_data;
 
-	struct fiji_buffer_entry header_buffer;
+	struct fiji_smu_avfs avfs;
+	struct SMU73_Discrete_DpmTable       smc_state_table;
+	struct SMU73_Discrete_Ulv            ulv_setting;
+	struct SMU73_Discrete_PmFuses  power_tune_table;
+	const struct fiji_pt_defaults  *power_tune_defaults;
+	uint32_t        activity_target[SMU73_MAX_LEVELS_GRAPHICS];
+
 };
 
-int fiji_smum_init(struct pp_smumgr *smumgr);
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-		uint32_t *value, uint32_t limit);
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
-		uint32_t value, uint32_t limit);
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
-		const uint8_t *src,	uint32_t byteCount, uint32_t limit);
+
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
new file mode 100644
index 0000000..8c889ca
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -0,0 +1,2576 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "iceland_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu71_discrete.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "processpptables.h"
+
+#include "iceland_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+#define DEVICE_ID_VI_ICELAND_M_6900	0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901	0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902	0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903	0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+	 * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC,
+	 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+	 * BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+	{ 0xA7,  0x0, 0x0, 0xB5,  0x0, 0x0, 0x9F,  0x0, 0x0, 0xD6,  0x0, 0x0, 0xD7,  0x0, 0x0},
+	{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+	/*
+	 * sviLoadLIneEn, SviLoadLineVddC,
+	 * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+	 * BAPM_TEMP_GRADIENT
+	 */
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+	{ 0xB7,  0x0, 0x0, 0xC3,  0x0, 0x0, 0xB5,  0x0, 0x0, 0xEA,  0x0, 0x0, 0xE6,  0x0, 0x0},
+	{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	struct cgs_system_info sys_info = {0};
+	uint32_t dev_id;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	dev_id = (uint32_t)sys_info.value;
+
+	switch (dev_id) {
+	case DEVICE_ID_VI_ICELAND_M_6900:
+	case DEVICE_ID_VI_ICELAND_M_6903:
+		smu_data->power_tune_defaults = &defaults_icelandxt;
+		break;
+
+	case DEVICE_ID_VI_ICELAND_M_6901:
+	case DEVICE_ID_VI_ICELAND_M_6902:
+		smu_data->power_tune_defaults = &defaults_icelandpro;
+		break;
+	default:
+		smu_data->power_tune_defaults = &defaults_iceland;
+		pr_warning("Unknown V.I. Device ID.\n");
+		break;
+	}
+	return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+	smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->tdc_vddc_throttle_release_limit_perc;
+	smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+	uint32_t temp;
+
+	if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, SMC_RAM_END))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else
+		smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 8; i++)
+		smu_data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+	return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+	uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+			    "The CAC Leakage table does not exist!", return -EINVAL);
+	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+			    "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+			    "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+		for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+		}
+	} else {
+		PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+	}
+
+	return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint8_t *vid = smu_data->power_tune_table.VddCVid;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+		"There should never be more than 8 entries for VddcVid!!!",
+		return -EINVAL);
+
+	for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+		vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+	}
+
+	return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		/* DW0 - DW3 */
+		if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate bapm vddc vid Failed!",
+					return -EINVAL);
+
+		/* DW4 - DW5 */
+		if (iceland_populate_vddc_vid(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate vddc vid Failed!",
+					return -EINVAL);
+
+		/* DW6 */
+		if (iceland_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+		/* DW7 */
+		if (iceland_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+		/* DW8 */
+		if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		/* DW9-DW12 */
+		if (0 != iceland_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		/* DW13-DW16 */
+		if (iceland_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		/* DW17 */
+		if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		/* DW18 */
+		if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+					return -EINVAL);
+
+		if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&smu_data->power_tune_table,
+				sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+	struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+	uint32_t clock, uint32_t *vol)
+{
+	uint32_t i = 0;
+
+	/* clock - voltage dependency table is empty table */
+	if (allowed_clock_voltage_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+			*vol = allowed_clock_voltage_table->entries[i].v;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+	return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+		pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+		uint16_t *lo)
+{
+	uint16_t v_index;
+	bool vol_found = false;
+	*hi = tab->value * VOLTAGE_SCALE;
+	*lo = tab->value * VOLTAGE_SCALE;
+
+	/* SCLK/VDDC Dependency Table has to exist. */
+	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+			"The SCLK/VDDC Dependency Table does not exist.\n",
+			return -EINVAL);
+
+	if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+		pr_warning("CAC Leakage Table does not exist, using vddc.\n");
+		return 0;
+	}
+
+	/*
+	 * Since voltage in the sclk/vddc dependency table is not
+	 * necessarily in ascending order because of ELB voltage
+	 * patching, loop through entire list to find exact voltage.
+	 */
+	for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+		if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+			vol_found = true;
+			if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+				*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+			} else {
+				pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+				*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+			}
+			break;
+		}
+	}
+
+	/*
+	 * If voltage is not found in the first pass, loop again to
+	 * find the best match, equal or higher value.
+	 */
+	if (!vol_found) {
+		for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+			if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+				vol_found = true;
+				if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+					*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+				} else {
+					pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+					*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+				}
+				break;
+			}
+		}
+
+		if (!vol_found)
+			pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+	}
+
+	return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+		pp_atomctrl_voltage_table_entry *tab,
+		SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+	int result;
+
+	result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+			&smc_voltage_tab->StdVoltageHiSidd,
+			&smc_voltage_tab->StdVoltageLoSidd);
+	if (0 != result) {
+		smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+		smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+	}
+
+	smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+	return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	table->VddcLevelCount = data->vddc_voltage_table.count;
+	for (count = 0; count < table->VddcLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&(data->vddc_voltage_table.entries[count]),
+				&(table->VddcLevel[count]));
+		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+		/* GPIO voltage control */
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+			table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+			table->VddcLevel[count].Smio = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+	return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count;
+	int result;
+
+	table->VddciLevelCount = data->vddci_voltage_table.count;
+
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&(data->vddci_voltage_table.entries[count]),
+				&(table->VddciLevel[count]));
+		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+		else
+			table->VddciLevel[count].Smio |= 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+	return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count;
+	int result;
+
+	table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		result = iceland_populate_smc_voltage_table(hwmgr,
+				&(data->mvdd_voltage_table.entries[count]),
+				&table->MvddLevel[count]);
+		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+			table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+		else
+			table->MvddLevel[count].Smio |= 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+	return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+	SMU71_Discrete_DpmTable *table)
+{
+	int result;
+
+	result = iceland_populate_smc_vddc_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate VDDC voltage table to SMC", return -EINVAL);
+
+	result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+	result = iceland_populate_smc_mvdd_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"can not populate MVDD voltage table to SMC", return -EINVAL);
+
+	return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU71_Discrete_Ulv *state)
+{
+	uint32_t voltage_response_time, ulv_voltage;
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+	PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+	if (ulv_voltage == 0) {
+		data->ulv_supported = false;
+		return 0;
+	}
+
+	if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+			state->VddcOffset = 0;
+		else
+			/* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+			state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+	} else {
+		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+			state->VddcOffsetVid = 0;
+		else  /* used in SVI2 Mode */
+			state->VddcOffsetVid = (uint8_t)(
+					(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+						* VOLTAGE_VID_OFFSET_SCALE2
+						/ VOLTAGE_VID_OFFSET_SCALE1);
+	}
+	state->VddcPhase = 1;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+	return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		 SMU71_Discrete_Ulv *ulv_level)
+{
+	return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t i;
+
+	/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount =
+			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity =
+			1;
+		table->LinkLevel[i].SPC =
+			(uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold =
+			PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold =
+			PP_HOST_TO_SMC_UL(30);
+	}
+
+	smu_data->smc_state_table.LinkLevelCount =
+		(uint8_t)dpm_table->pcie_speed_table.count;
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_clock_dividers_vi dividers;
+	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t    reference_clock;
+	uint32_t reference_divider;
+	uint32_t fbdiv;
+	int result;
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+	reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+	reference_divider = 1 + dividers.uc_pll_ref_div;
+
+	/* low 14 bits is fraction and high 12 bits is divider*/
+	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+	/* SPLL_FUNC_CNTL setup*/
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+	/* SPLL_FUNC_CNTL_3 setup*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+	/* set to use fractional accumulation*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+		pp_atomctrl_internal_ss_info ss_info;
+
+		uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+		if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+			/*
+			* ss_info.speed_spectrum_percentage -- in unit of 0.01%
+			* ss_info.speed_spectrum_rate -- in unit of khz
+			*/
+			/* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+			uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+			/* clkv = 2 * D * fbdiv / NS */
+			uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+			cg_spll_spread_spectrum_2 =
+				PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+		}
+	}
+
+	sclk->SclkFrequency        = engine_clock;
+	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+	return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+				const struct phm_phase_shedding_limits_table *pl,
+					uint32_t sclk, uint32_t *p_shed)
+{
+	unsigned int i;
+
+	/* use the minimum phase shedding */
+	*p_shed = 1;
+
+	for (i = 0; i < pl->count; i++) {
+		if (sclk < pl->entries[i].Sclk) {
+			*p_shed = i;
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+						uint32_t engine_clock,
+				uint16_t sclk_activity_level_threshold,
+				SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+	/* populate graphics levels*/
+	result = iceland_get_dependecy_volt_by_clk(hwmgr,
+		hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+		&graphic_level->MinVddc);
+	PP_ASSERT_WITH_CODE((0 == result),
+		"can not find VDDC voltage value for VDDC	\
+		engine clock dependency table", return result);
+
+	/* SCLK frequency in units of 10KHz*/
+	graphic_level->SclkFrequency = engine_clock;
+	graphic_level->MinVddcPhases = 1;
+
+	if (data->vddc_phase_shed_control)
+		iceland_populate_phase_value_based_on_sclk(hwmgr,
+				hwmgr->dyn_state.vddc_phase_shed_limits_table,
+				engine_clock,
+				&graphic_level->MinVddcPhases);
+
+	/* Indicates maximum activity level for this performance level. 50% for now*/
+	graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+	graphic_level->CcPwrDynRm = 0;
+	graphic_level->CcPwrDynRm1 = 0;
+	/* this level can be used if activity is high enough.*/
+	graphic_level->EnabledForActivity = 0;
+	/* this level can be used for throttling.*/
+	graphic_level->EnabledForThrottle = 1;
+	graphic_level->UpHyst = 0;
+	graphic_level->DownHyst = 100;
+	graphic_level->VoltageDownHyst = 0;
+	graphic_level->PowerThrottle = 0;
+
+	data->display_timing.min_clock_in_sr =
+			hwmgr->display_config.min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep))
+		graphic_level->DeepSleepDivId =
+				smu7_get_sleep_divider_id_from_clock(engine_clock,
+						data->display_timing.min_clock_in_sr);
+
+	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	if (0 == result) {
+		graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+	}
+
+	return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+	uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+						SMU71_MAX_LEVELS_GRAPHICS;
+
+	SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+	uint32_t i;
+	uint8_t highest_pcie_level_enabled = 0;
+	uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+	uint8_t count = 0;
+	int result = 0;
+
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		result = iceland_populate_single_graphic_level(hwmgr,
+					dpm_table->sclk_table.dpm_levels[i].value,
+					(uint16_t)smu_data->activity_target[i],
+					&(smu_data->smc_state_table.GraphicsLevel[i]));
+		if (result != 0)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+	}
+
+	/* Only enable level 0 for now. */
+	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+	/* set highest level watermark to high */
+	if (dpm_table->sclk_table.count > 1)
+		smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	smu_data->smc_state_table.GraphicsDpmLevelCount =
+		(uint8_t)dpm_table->sclk_table.count;
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+				(1 << (highest_pcie_level_enabled + 1))) != 0) {
+		highest_pcie_level_enabled++;
+	}
+
+	while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+		(1 << lowest_pcie_level_enabled)) == 0) {
+		lowest_pcie_level_enabled++;
+	}
+
+	while ((count < highest_pcie_level_enabled) &&
+			((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+				(1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+		count++;
+	}
+
+	mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+		(lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+	/* set pcieDpmLevel to highest_pcie_level_enabled*/
+	for (i = 2; i < dpm_table->sclk_table.count; i++) {
+		smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+	}
+
+	/* set pcieDpmLevel to lowest_pcie_level_enabled*/
+	smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+	/* set pcieDpmLevel to mid_pcie_level_enabled*/
+	smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+	/* level count will send to smc once at init smc table and never change*/
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
+				(uint8_t *)levels, (uint32_t)level_array_size,
+								SMC_RAM_END);
+
+	return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    memory_clock the memory clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_mclk_params(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU71_Discrete_MemoryLevel *mclk,
+		bool strobe_mode,
+		bool dllStateOn
+		)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
+	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
+	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+	pp_atomctrl_memory_clock_param mpll_param;
+	int result;
+
+	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+				memory_clock, &mpll_param, strobe_mode);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+	/* MPLL_FUNC_CNTL setup*/
+	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+	/* MPLL_FUNC_CNTL_1 setup*/
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+	/* MPLL_AD_FUNC_CNTL setup*/
+	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+	if (data->is_memory_gddr5) {
+		/* MPLL_DQ_FUNC_CNTL setup*/
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+		/*
+		 ************************************
+		 Fref = Reference Frequency
+		 NF = Feedback divider ratio
+		 NR = Reference divider ratio
+		 Fnom = Nominal VCO output frequency = Fref * NF / NR
+		 Fs = Spreading Rate
+		 D = Percentage down-spread / 2
+		 Fint = Reference input frequency to PFD = Fref / NR
+		 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+		 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+		 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+		 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+		 *************************************
+		 */
+		pp_atomctrl_internal_ss_info ss_info;
+		uint32_t freq_nom;
+		uint32_t tmp;
+		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+		/* for GDDR5 for all modes and DDR3 */
+		if (1 == mpll_param.qdr)
+			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+		else
+			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+		tmp = (freq_nom / reference_clock);
+		tmp = tmp * tmp;
+
+		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+			/* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+			/* ss.Info.speed_spectrum_rate -- in unit of khz */
+			/* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+			/*     = reference_clock * 5 / speed_spectrum_rate */
+			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+			/* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+			/*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+			uint32_t clkv =
+				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+		}
+	}
+
+	/* MCLK_PWRMGT_CNTL setup */
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+	/* Save the result data to outpupt memory level structure */
+	mclk->MclkFrequency   = memory_clock;
+	mclk->MpllFuncCntl    = mpll_func_cntl;
+	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+	mclk->DllCntl         = dll_cntl;
+	mclk->MpllSs1         = mpll_ss1;
+	mclk->MpllSs2         = mpll_ss2;
+
+	return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+		bool strobe_mode)
+{
+	uint8_t mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500) {
+			mc_para_index = 0x00;
+		} else if (memory_clock > 47500) {
+			mc_para_index = 0x0f;
+		} else {
+			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+		}
+	} else {
+		if (memory_clock < 65000) {
+			mc_para_index = 0x00;
+		} else if (memory_clock > 135000) {
+			mc_para_index = 0x0f;
+		} else {
+			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+		}
+	}
+
+	return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+	uint8_t mc_para_index;
+
+	if (memory_clock < 10000) {
+		mc_para_index = 0;
+	} else if (memory_clock >= 80000) {
+		mc_para_index = 0x0f;
+	} else {
+		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+	}
+
+	return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+					uint32_t memory_clock, uint32_t *p_shed)
+{
+	unsigned int i;
+
+	*p_shed = 1;
+
+	for (i = 0; i < pl->count; i++) {
+		if (memory_clock < pl->entries[i].Mclk) {
+			*p_shed = i;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_populate_single_memory_level(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU71_Discrete_MemoryLevel *memory_level
+		)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	int result = 0;
+	bool dll_state_on;
+	struct cgs_display_info info = {0};
+	uint32_t mclk_edc_wr_enable_threshold = 40000;
+	uint32_t mclk_edc_enable_threshold = 40000;
+	uint32_t mclk_strobe_mode_threshold = 40000;
+
+	if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+		result = iceland_get_dependecy_volt_by_clk(hwmgr,
+			hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+	}
+
+	if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+		memory_level->MinVddci = memory_level->MinVddc;
+	} else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+		result = iceland_get_dependecy_volt_by_clk(hwmgr,
+				hwmgr->dyn_state.vddci_dependency_on_mclk,
+				memory_clock,
+				&memory_level->MinVddci);
+		PP_ASSERT_WITH_CODE((0 == result),
+			"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+	}
+
+	memory_level->MinVddcPhases = 1;
+
+	if (data->vddc_phase_shed_control) {
+		iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+				memory_clock, &memory_level->MinVddcPhases);
+	}
+
+	memory_level->EnabledForThrottle = 1;
+	memory_level->EnabledForActivity = 0;
+	memory_level->UpHyst = 0;
+	memory_level->DownHyst = 100;
+	memory_level->VoltageDownHyst = 0;
+
+	/* Indicates maximum activity level for this performance level.*/
+	memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	memory_level->StutterEnable = 0;
+	memory_level->StrobeEnable = 0;
+	memory_level->EdcReadEnable = 0;
+	memory_level->EdcWriteEnable = 0;
+	memory_level->RttEnable = 0;
+
+	/* default set to low watermark. Highest level will be set to high later.*/
+	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	data->display_timing.num_existing_displays = info.display_count;
+
+	/* stutter mode not support on iceland */
+
+	/* decide strobe mode*/
+	memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+		(memory_clock <= mclk_strobe_mode_threshold);
+
+	/* decide EDC mode and memory clock ratio*/
+	if (data->is_memory_gddr5) {
+		memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+					memory_level->StrobeEnable);
+
+		if ((mclk_edc_enable_threshold != 0) &&
+				(memory_clock > mclk_edc_enable_threshold)) {
+			memory_level->EdcReadEnable = 1;
+		}
+
+		if ((mclk_edc_wr_enable_threshold != 0) &&
+				(memory_clock > mclk_edc_wr_enable_threshold)) {
+			memory_level->EdcWriteEnable = 1;
+		}
+
+		if (memory_level->StrobeEnable) {
+			if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+			else
+				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+		} else
+			dll_state_on = data->dll_default_on;
+	} else {
+		memory_level->StrobeRatio =
+			iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+		dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+	}
+
+	result = iceland_calculate_mclk_params(hwmgr,
+		memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+	if (0 == result) {
+		memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+		memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+		memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+		/* MCLK frequency in units of 10KHz*/
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+		/* Indicates maximum activity level for this performance level.*/
+		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+	}
+
+	return result;
+}
+
+/**
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	int result;
+
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+	uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+	SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+			"can not populate memory level as memory clock is zero", return -EINVAL);
+		result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+			&(smu_data->smc_state_table.MemoryLevel[i]));
+		if (0 != result) {
+			return result;
+		}
+	}
+
+	/* Only enable level 0 for now.*/
+	smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+	/*
+	* in order to prevent MC activity from stutter mode to push DPM up.
+	* the UVD change complements this by putting the MCLK in a higher state
+	* by default such that we are not effected by up threshold or and MCLK DPM latency.
+	*/
+	smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+	smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+	/* set highest level watermark to high*/
+	smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	/* level count will send to smc once at init smc table and never change*/
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+		level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+		SMC_RAM_END);
+
+	return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+					SMU71_Discrete_VoltageLevel *voltage)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	uint32_t i = 0;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+			if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+				/* Always round to higher voltage. */
+				voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+
+		PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+			"MVDD Voltage is outside the supported range.", return -EINVAL);
+
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+	SMU71_Discrete_DpmTable *table)
+{
+	int result = 0;
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	uint32_t vddc_phase_shed_control = 0;
+
+	SMU71_Discrete_VoltageLevel voltage_level;
+	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+	/* The ACPI state should not do DPM on DC (or ever).*/
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (data->acpi_vddc)
+		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+	else
+		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+	table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+	/* assign zero for now*/
+	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+		table->ACPILevel.SclkFrequency,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	/* divider ID for required SCLK*/
+	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
+	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
+	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
+							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+	/* For various features to be enabled/disabled while this level is active.*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	/* SCLK frequency in units of 10KHz*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+	else {
+		if (data->acpi_vddci != 0)
+			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+		else
+			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+	}
+
+	if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+		table->MemoryACPILevel.MinMvdd =
+			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	/* Force reset on DLL*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+	/* Disable DLL in ACPIState*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+	/* Enable DLL bypass signal*/
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK0_BYPASS, 0);
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK1_BYPASS, 0);
+
+	table->MemoryACPILevel.DllCntl            =
+		PP_HOST_TO_SMC_UL(dll_cntl);
+	table->MemoryACPILevel.MclkPwrmgtCntl     =
+		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+	table->MemoryACPILevel.MpllAdFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+	table->MemoryACPILevel.MpllDqFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl       =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl_1     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+	table->MemoryACPILevel.MpllFuncCntl_2     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+	table->MemoryACPILevel.MpllSs1            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+	table->MemoryACPILevel.MpllSs2            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	/* Indicates maximum activity level for this performance level.*/
+	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = 0;
+	table->MemoryACPILevel.StrobeEnable = 0;
+	table->MemoryACPILevel.EdcReadEnable = 0;
+	table->MemoryACPILevel.EdcWriteEnable = 0;
+	table->MemoryACPILevel.RttEnable = 0;
+
+	return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+					SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+		SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+	SMU71_Discrete_DpmTable *table)
+{
+	return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+		struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock,
+		uint32_t memory_clock,
+		struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+		)
+{
+	uint32_t dramTiming;
+	uint32_t dramTiming2;
+	uint32_t burstTime;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+				engine_clock, memory_clock);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+	arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+	return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	int result = 0;
+	SMU71_Discrete_MCArbDramTimingTable  arb_regs;
+	uint32_t i, j;
+
+	memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+			result = iceland_populate_memory_timing_parameters
+				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+				 data->dpm_table.mclk_table.dpm_levels[j].value,
+				 &arb_regs.entries[i][j]);
+
+			if (0 != result) {
+				break;
+			}
+		}
+	}
+
+	if (0 == result) {
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.arb_table_start,
+				(uint8_t *)&arb_regs,
+				sizeof(SMU71_Discrete_MCArbDramTimingTable),
+				SMC_RAM_END
+				);
+	}
+
+	return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+			SMU71_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table*/
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+	if (0 != result) {
+		smu_data->smc_state_table.GraphicsBootLevel = 0;
+		printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+			in dependency table. Using Graphics DPM level 0!");
+		result = 0;
+	}
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+		data->vbios_boot_state.mclk_bootup_value,
+		(uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+	if (0 != result) {
+		smu_data->smc_state_table.MemoryBootLevel = 0;
+		printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+			in dependency table. Using Memory DPM level 0!");
+		result = 0;
+	}
+
+	table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		table->BootVddci = table->BootVddc;
+	else
+		table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+	return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
+				 SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+	const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+		if (smu_data->mc_reg_table.validflag & 1<<j) {
+			PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+				"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+			mc_reg_table->address[i].s0 =
+				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (uint8_t)i;
+
+	return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+	const struct iceland_mc_reg_entry *entry,
+	SMU71_Discrete_MCRegisterSet *data,
+	uint32_t num_entries, uint32_t valid_flag)
+{
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & 1<<j) {
+			data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(
+		struct pp_smumgr *smumgr,
+		const uint32_t memory_clock,
+		SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+		)
+{
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+	uint32_t i = 0;
+
+	for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+		if (memory_clock <=
+			smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+			break;
+		}
+	}
+
+	if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, smu_data->mc_reg_table.last,
+				smu_data->mc_reg_table.validflag);
+
+	return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+		SMU71_Discrete_MCRegisters *mc_regs)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	int res;
+	uint32_t i;
+
+	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+		res = iceland_convert_mc_reg_table_entry_to_smc(
+				hwmgr->smumgr,
+				data->dpm_table.mclk_table.dpm_levels[i].value,
+				&mc_regs->data[i]
+				);
+
+		if (0 != res)
+			result = res;
+	}
+
+	return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t address;
+	int32_t result;
+
+	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+		return 0;
+
+
+	memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+	result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+	if (result != 0)
+		return result;
+
+
+	address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+	return  smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+				 (uint8_t *)&smu_data->mc_regs.data[0],
+				sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+				SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+
+	memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+	result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+	result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize MCRegTable for driver state!", return result;);
+
+	return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+			(uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	uint8_t count, level;
+
+	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+			 >= data->vbios_boot_state.sclk_bootup_value) {
+			smu_data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+			>= data->vbios_boot_state.mclk_bootup_value) {
+			smu_data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+	SMU71_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+	struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+	struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+	const uint16_t *def1, *def2;
+	int i, j, k;
+
+
+	/*
+	 * TDP number of fraction bits are changed from 8 to 7 for Iceland
+	 * as requested by SMC team
+	 */
+
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+	dpm_table->DTETjOffset = 0;
+
+	dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+	/* The following are for new Iceland Multi-input fan/thermal control */
+	if (NULL != ppm) {
+		dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+		dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+	} else {
+		dpm_table->PPM_PkgPwrLimit = 0;
+		dpm_table->PPM_TemperatureLimit = 0;
+	}
+
+	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+	def1 = defaults->bapmti_r;
+	def2 = defaults->bapmti_rc;
+
+	for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU71_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+				def1++;
+				def2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+					    SMU71_Discrete_DpmTable *tab)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+		tab->SVI2Enable |= VDDC_ON_SVI2;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+		tab->SVI2Enable |= VDDCI_ON_SVI2;
+	else
+		tab->MergedVddci = 1;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+		tab->SVI2Enable |= MVDD_ON_SVI2;
+
+	PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+		(tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+	return 0;
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    pInput  the pointer to input data (PowerState)
+ * @return   always 0
+ */
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	SMU71_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+
+
+	iceland_initialize_power_tune_defaults(hwmgr);
+	memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+		iceland_populate_smc_voltage_tables(hwmgr, table);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+	if (data->ulv_supported) {
+		result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+		PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ULV state!", return result;);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_ULV_PARAMETER, 0x40035);
+	}
+
+	result = iceland_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Link Level!", return result;);
+
+	result = iceland_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Graphics Level!", return result;);
+
+	result = iceland_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Memory Level!", return result;);
+
+	result = iceland_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize ACPI Level!", return result;);
+
+	result = iceland_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize VCE Level!", return result;);
+
+	result = iceland_populate_smc_acp_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize ACP Level!", return result;);
+
+	result = iceland_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize SAMU Level!", return result;);
+
+	/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+	/* need to populate the  ARB settings for the initial state. */
+	result = iceland_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to Write ARB settings for the initial state.", return result;);
+
+	result = iceland_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize UVD Level!", return result;);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	result = iceland_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to initialize Boot Level!", return result;);
+
+	result = iceland_populate_smc_initial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+	result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+
+	table->TemperatureLimitHigh =
+		(data->thermal_temp_setting.temperature_high *
+		 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	table->TemperatureLimitLow =
+		(data->thermal_temp_setting.temperature_low *
+		SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	table->MemoryVoltageChangeEnable  = 1;
+	table->MemoryInterval  = 1;
+	table->VoltageResponseTime  = 0;
+	table->PhaseResponseTime  = 0;
+	table->MemoryThermThrottleEnable  = 1;
+	table->PCIeBootLinkLevel = 0;
+	table->PCIeGenInterval = 1;
+
+	result = iceland_populate_smc_svi2_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to populate SVI2 setting!", return result);
+
+	table->ThermGpio  = 17;
+	table->SclkStepSize = 0x4000;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+	table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+	table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
+										offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+										(uint8_t *)&(table->SystemFlags),
+										sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+										SMC_RAM_END);
+
+	PP_ASSERT_WITH_CODE(0 == result,
+		"Failed to upload dpm data to SMC memory!", return result;);
+
+	/* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+			smu_data->smu7_data.ulv_setting_starts,
+			(uint8_t *)&(smu_data->ulv_setting),
+			sizeof(SMU71_Discrete_Ulv),
+			SMC_RAM_END);
+
+
+	result = iceland_populate_initial_mc_reg_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == result),
+		"Failed to populate initialize MC Reg table!", return result);
+
+	result = iceland_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to  populate PM fuses to SMC memory!", return result);
+
+	return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+	SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+		return 0;
+
+	if (0 == smu7_data->fan_table_start) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (0 == duty100) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = smu7_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	/* fan_table.FanControl_GL_Flag = 1; */
+
+	res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+	return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return iceland_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold !=
+				data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold =
+				hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU71_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				SMC_RAM_END);
+	}
+
+	result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+	PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+	result = iceland_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to program memory timing parameters!",
+			);
+
+	return result;
+}
+
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+	switch (type) {
+	case SMU_SoftRegisters:
+		switch (member) {
+		case HandshakeDisables:
+			return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+		case VoltageChangeTimeout:
+			return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+		case AverageGraphicsActivity:
+			return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+		case PreVBlankGap:
+			return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+		case VBlankTimeout:
+			return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+		case UcodeLoadStatus:
+			return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+		}
+	case SMU_Discrete_DpmTable:
+		switch (member) {
+		case LowSclkInterruptThreshold:
+			return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+		}
+	}
+	printk("cant't get the offset of type %x member %x \n", type, member);
+	return 0;
+}
+
+uint32_t iceland_get_mac_definition(uint32_t value)
+{
+	switch (value) {
+	case SMU_MAX_LEVELS_GRAPHICS:
+		return SMU71_MAX_LEVELS_GRAPHICS;
+	case SMU_MAX_LEVELS_MEMORY:
+		return SMU71_MAX_LEVELS_MEMORY;
+	case SMU_MAX_LEVELS_LINK:
+		return SMU71_MAX_LEVELS_LINK;
+	case SMU_MAX_ENTRIES_SMIO:
+		return SMU71_MAX_ENTRIES_SMIO;
+	case SMU_MAX_LEVELS_VDDC:
+		return SMU71_MAX_LEVELS_VDDC;
+	case SMU_MAX_LEVELS_VDDCI:
+		return SMU71_MAX_LEVELS_VDDCI;
+	case SMU_MAX_LEVELS_MVDD:
+		return SMU71_MAX_LEVELS_MVDD;
+	}
+
+	printk("cant't get the mac of %x \n", value);
+	return 0;
+}
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, DpmTable),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		smu7_data->dpm_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, SoftRegisters),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		data->soft_regs_start = tmp;
+		smu7_data->soft_regs_start = tmp;
+	}
+
+	error |= (0 != result);
+
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, mcRegisterTable),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		smu7_data->mc_reg_table_start = tmp;
+	}
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, FanTable),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		smu7_data->fan_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		smu7_data->arb_table_start = tmp;
+	}
+
+	error |= (0 != result);
+
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, Version),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		hwmgr->microcode_version_info.SMC = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU71_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU71_Firmware_Header, UlvSettings),
+				&tmp, SMC_RAM_END);
+
+	if (0 == result) {
+		smu7_data->ulv_setting_starts = tmp;
+	}
+
+	error |= (0 != result);
+
+	return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+	bool result = true;
+
+	switch (in_reg) {
+	case  mmMC_SEQ_RAS_TIMING:
+		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
+		break;
+
+	case  mmMC_SEQ_DLL_STBY:
+		*out_reg = mmMC_SEQ_DLL_STBY_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD0:
+		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD1:
+		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CTRL:
+		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+		break;
+
+	case mmMC_SEQ_CAS_TIMING:
+		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING:
+		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING2:
+		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CMD:
+		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CTL:
+		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D0:
+		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D1:
+		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D0:
+		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D1:
+		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+		break;
+
+	case mmMC_PMG_CMD_EMRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS1:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+
+	case mmMC_SEQ_PMG_TIMING:
+		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS2:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_2:
+		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
+		break;
+
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+	uint32_t i;
+	uint16_t address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+			? address : table->mc_reg_address[i].s1;
+	}
+	return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+					struct iceland_mc_reg_table *ni_table)
+{
+	uint8_t i, j;
+
+	PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+		"Invalid VramInfo table.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+		"Invalid VramInfo table.", return -EINVAL);
+
+	for (i = 0; i < table->last; i++) {
+		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+	}
+	ni_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ni_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			ni_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+
+	ni_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
+ *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3.   need to set these data for each clock range
+ *
+ * @param    hwmgr the address of the powerplay hardware manager.
+ * @param    table the address of MCRegTable
+ * @return   always 0
+ */
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+					struct iceland_mc_reg_table *table)
+{
+	uint8_t i, j, k;
+	uint32_t temp_reg;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+			"Invalid VramInfo table.", return -EINVAL);
+
+		switch (table->mc_reg_address[i].s1) {
+
+		case mmMC_SEQ_MISC1:
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+				if (!data->is_memory_gddr5) {
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+				}
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+
+			if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++) {
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				}
+				j++;
+				PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+					"Invalid VramInfo table.", return -EINVAL);
+			}
+
+			break;
+
+		case mmMC_SEQ_RESERVE_M:
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+			break;
+
+		default:
+			break;
+		}
+
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+	uint8_t i, j;
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+				table->mc_reg_table_entry[j].mc_data[i]) {
+				table->validflag |= (1<<i);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+	pp_atomctrl_mc_reg_table *table;
+	struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+	uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+	if (NULL == table)
+		return -ENOMEM;
+
+	/* Program additional LP registers that are no longer programmed by VBIOS */
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+	memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+	if (0 == result)
+		result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+	if (0 == result) {
+		iceland_set_s0_mc_reg_index(ni_table);
+		result = iceland_set_mc_special_registers(hwmgr, ni_table);
+	}
+
+	if (0 == result)
+		iceland_set_valid_flag(ni_table);
+
+	kfree(table);
+
+	return result;
+}
+
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
similarity index 63%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
index d24b888..13c8dbb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
@@ -20,16 +20,21 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef _ICELAND_SMC_H
+#define _ICELAND_SMC_H
 
-#include "hwmgr.h"
+#include "smumgr.h"
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
 
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
+uint32_t iceland_get_mac_definition(uint32_t value);
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644
index 0000000..eeafefc
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+#include "iceland_smc.h"
+
+#define ICELAND_SMC_SIZE               0x20000
+
+static int iceland_start_smc(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	return 0;
+}
+
+static void iceland_reset_smc(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_RESET_CNTL,
+				  rst_reg, 1);
+}
+
+
+static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_CLOCK_CNTL_0,
+				  ck_disable, 1);
+}
+
+static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+{
+	SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+				  SMC_SYSCON_CLOCK_CNTL_0,
+				  ck_disable, 0);
+}
+
+static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+{
+	/* set smc instruct start point at 0x0 */
+	smu7_program_jump_on_start(smumgr);
+
+	/* enable smc clock */
+	iceland_start_smc_clock(smumgr);
+
+	/* de-assert reset */
+	iceland_start_smc(smumgr);
+
+	SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+				 INTERRUPTS_ENABLED, 1);
+
+	return 0;
+}
+
+
+static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+					uint32_t length, const uint8_t *src,
+					uint32_t limit, uint32_t start_addr)
+{
+	uint32_t byte_count = length;
+	uint32_t data;
+
+	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+	while (byte_count >= 4) {
+		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+		src += 4;
+		byte_count -= 4;
+	}
+
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+	return 0;
+}
+
+
+static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+{
+	uint32_t val;
+	struct cgs_firmware_info info = {0};
+
+	if (smumgr == NULL || smumgr->device == NULL)
+		return -EINVAL;
+
+	/* load SMC firmware */
+	cgs_get_firmware_info(smumgr->device,
+		smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+
+	if (info.image_size & 3) {
+		pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
+		return -EINVAL;
+	}
+
+	if (info.image_size > ICELAND_SMC_SIZE) {
+		pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	/* wait for smc boot up */
+	SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+					 RCU_UC_EVENTS, boot_seq_done, 0);
+
+	/* clear firmware interrupt enable flag */
+	val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+				    ixSMC_SYSCON_MISC_CNTL);
+	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+			       ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+	/* stop smc clock */
+	iceland_stop_smc_clock(smumgr);
+
+	/* reset smc */
+	iceland_reset_smc(smumgr);
+	iceland_upload_smc_firmware_data(smumgr, info.image_size,
+				(uint8_t *)info.kptr, ICELAND_SMC_SIZE,
+				info.ucode_start_address);
+
+	return 0;
+}
+
+static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+						uint32_t firmwareType)
+{
+	return 0;
+}
+
+static int iceland_start_smu(struct pp_smumgr *smumgr)
+{
+	int result;
+
+	result = iceland_smu_upload_firmware_image(smumgr);
+	if (result)
+		return result;
+	result = iceland_smu_start_smc(smumgr);
+	if (result)
+		return result;
+
+	if (!smu7_is_smc_ram_running(smumgr)) {
+		printk("smu not running, upload firmware again \n");
+		result = iceland_smu_upload_firmware_image(smumgr);
+		if (result)
+			return result;
+
+		result = iceland_smu_start_smc(smumgr);
+		if (result)
+			return result;
+	}
+
+	result = smu7_request_smu_load_fw(smumgr);
+
+	return result;
+}
+
+/**
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    smcAddress the address in the SMC RAM to access.
+ * @param    value to write to the SMC SRAM.
+ */
+static int iceland_smu_init(struct pp_smumgr *smumgr)
+{
+	int i;
+	struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+	if (smu7_init(smumgr))
+		return -EINVAL;
+
+	for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
+		smu_data->activity_target[i] = 30;
+
+	return 0;
+}
+
+static const struct pp_smumgr_func iceland_smu_funcs = {
+	.smu_init = &iceland_smu_init,
+	.smu_fini = &smu7_smu_fini,
+	.start_smu = &iceland_start_smu,
+	.check_fw_load_finish = &smu7_check_fw_load_finish,
+	.request_smu_load_fw = &smu7_reload_firmware,
+	.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+	.send_msg_to_smc = &smu7_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+	.download_pptable_settings = NULL,
+	.upload_pptable_settings = NULL,
+	.get_offsetof = iceland_get_offsetof,
+	.process_firmware_header = iceland_process_firmware_header,
+	.init_smc_table = iceland_init_smc_table,
+	.update_sclk_threshold = iceland_update_sclk_threshold,
+	.thermal_setup_fan_table = iceland_thermal_setup_fan_table,
+	.populate_all_graphic_levels = iceland_populate_all_graphic_levels,
+	.populate_all_memory_levels = iceland_populate_all_memory_levels,
+	.get_mac_definition = iceland_get_mac_definition,
+	.initialize_mc_reg_table = iceland_initialize_mc_reg_table,
+	.is_dpm_running = iceland_is_dpm_running,
+};
+
+int iceland_smum_init(struct pp_smumgr *smumgr)
+{
+	struct iceland_smumgr *iceland_smu = NULL;
+
+	iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+	if (iceland_smu == NULL)
+		return -ENOMEM;
+
+	smumgr->backend = iceland_smu;
+	smumgr->smumgr_funcs = &iceland_smu_funcs;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
new file mode 100644
index 0000000..8eae01b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_SMUMGR_H_
+#define _ICELAND_SMUMGR_H_
+
+
+#include "smu7_smumgr.h"
+#include "pp_endian.h"
+#include "smu71_discrete.h"
+
+struct iceland_pt_defaults {
+	uint8_t   svi_load_line_en;
+	uint8_t   svi_load_line_vddc;
+	uint8_t   tdc_vddc_throttle_release_limit_perc;
+	uint8_t   tdc_mawt;
+	uint8_t   tdc_waterfall_ctl;
+	uint8_t   dte_ambient_temp_base;
+	uint32_t  display_cac;
+	uint32_t  bamp_temp_gradient;
+	uint16_t  bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+	uint16_t  bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+};
+
+struct iceland_mc_reg_entry {
+	uint32_t mclk_max;
+	uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_mc_reg_table {
+	uint8_t   last;               /* number of registers*/
+	uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
+	uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+	struct iceland_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_smumgr {
+	struct smu7_smumgr smu7_data;
+	struct SMU71_Discrete_DpmTable       smc_state_table;
+	struct SMU71_Discrete_PmFuses  power_tune_table;
+	struct SMU71_Discrete_Ulv            ulv_setting;
+	const struct iceland_pt_defaults  *power_tune_defaults;
+	SMU71_Discrete_MCRegisters      mc_regs;
+	struct iceland_mc_reg_table mc_reg_table;
+	uint32_t        activity_target[SMU71_MAX_LEVELS_GRAPHICS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
new file mode 100644
index 0000000..4ccc0b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -0,0 +1,2287 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "polaris10_smumgr.h"
+#include "pppcielanes.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VDDC_VDDCI_DELTA            200
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+	/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+	{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+			{VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
+			{VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+	uint32_t i;
+	uint16_t vddci;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	*voltage = *mvdd = 0;
+
+	/* clock - voltage dependency table is empty table */
+	if (dep_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < dep_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (dep_table->entries[i].clk >= clock) {
+			*voltage |= (dep_table->entries[i].vddc *
+					VOLTAGE_SCALE) << VDDC_SHIFT;
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else if (dep_table->entries[i].vddci)
+				*voltage |= (dep_table->entries[i].vddci *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else {
+				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+						(dep_table->entries[i].vddc -
+								(uint16_t)VDDC_VDDCI_DELTA));
+				*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+			}
+
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
+					VOLTAGE_SCALE;
+			else if (dep_table->entries[i].mvdd)
+				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
+					VOLTAGE_SCALE;
+
+			*voltage |= 1 << PHASES_SHIFT;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+				VOLTAGE_SCALE) << VDDCI_SHIFT;
+	else if (dep_table->entries[i-1].vddci) {
+		vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+				(dep_table->entries[i].vddc -
+						(uint16_t)VDDC_VDDCI_DELTA));
+		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+	}
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+	else if (dep_table->entries[i].mvdd)
+		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+	return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+	uint32_t tmp;
+	tmp = raw_setting * 4096 / 100;
+	return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	struct pp_advance_fan_control_parameters *fan_table =
+			&hwmgr->thermal_controller.advanceFanControlParameters;
+	int i, j, k;
+	const uint16_t *pdef1;
+	const uint16_t *pdef2;
+
+	table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+	table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+				"Target Operating Temp is out of Range!",
+				);
+
+	table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTargetOperatingTemp * 256);
+	table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitHotspot * 256);
+	table->FanGainEdge = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainEdge));
+	table->FanGainHotspot = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+	pdef1 = defaults->BAPMTI_R;
+	pdef2 = defaults->BAPMTI_RC;
+
+	for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU74_DTE_SINKS; k++) {
+				table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+				table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+				pdef1++;
+				pdef2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+	smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+	smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+	return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+	uint32_t temp;
+
+	if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, SMC_RAM_END))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else {
+		smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+		smu_data->power_tune_table.LPMLTemperatureMin =
+				(uint8_t)((temp >> 16) & 0xff);
+		smu_data->power_tune_table.LPMLTemperatureMax =
+				(uint8_t)((temp >> 8) & 0xff);
+		smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+	}
+	return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+/* TO DO move to hwmgr */
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+			hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+				hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+	return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU74_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+		if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		if (0 != polaris10_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate Fuzzy Fan Control parameters Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+					"Sidd Failed!", return -EINVAL);
+
+		if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&smu_data->power_tune_table,
+				(sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU74_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count, level;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		count = data->mvdd_voltage_table.count;
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; level++) {
+			table->SmioTable2.Pattern[level].Voltage =
+				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+			table->SmioTable2.Pattern[level].Smio =
+				(uint8_t) level;
+			table->Smio[level] |=
+				data->mvdd_voltage_table.entries[level].smio_low;
+		}
+		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+		table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+					struct SMU74_Discrete_DpmTable *table)
+{
+	uint32_t count, level;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	count = data->vddci_voltage_table.count;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; ++level) {
+			table->SmioTable1.Pattern[level].Voltage =
+				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+			table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+			table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+		}
+	}
+
+	table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+	return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    table  the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	uint32_t count;
+	uint8_t index;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+			table_info->vddc_lookup_table;
+	/* tables is already swapped, so in order to use the value from it,
+	 * we need to swap it back.
+	 * We are populating vddc CAC data to BapmVddc table
+	 * in split and merged mode
+	 */
+	for (count = 0; count < lookup_table->count; count++) {
+		index = phm_get_voltage_index(lookup_table,
+				data->vddc_voltage_table.entries[count].value);
+		table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+		table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+		table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+	}
+
+	return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always  0
+*/
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	polaris10_populate_smc_vddci_table(hwmgr, table);
+	polaris10_populate_smc_mvdd_table(hwmgr, table);
+	polaris10_populate_cac_table(hwmgr, table);
+
+	return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_Ulv *state)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+	state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+	return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	int i;
+
+	/* Index (dpm_table->pcie_speed_table.count)
+	 * is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+				dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+	}
+
+	smu_data->smc_state_table.LinkLevelCount =
+			(uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+				   SMU74_Discrete_DpmTable  *table)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	uint32_t i, ref_clk;
+
+	struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+	ref_clk = smu7_get_xclk(hwmgr);
+
+	if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+		for (i = 0; i < NUM_SCLK_RANGE; i++) {
+			table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+			table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+			table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+			table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+			table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+		}
+		return;
+	}
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+		smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+		smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+		table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+		table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+		table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+		table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+		table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+	}
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    clock  the engine clock to use to populate the structure
+* @param    sclk   the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	struct pp_atomctrl_clock_dividers_ai dividers;
+	uint32_t ref_clock;
+	uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+	uint8_t i;
+	int result;
+	uint64_t temp;
+
+	sclk_setting->SclkFrequency = clock;
+	/* get the engine clock dividers for this clock value */
+	result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
+	if (result == 0) {
+		sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+		sclk_setting->PllRange = dividers.ucSclkPllRange;
+		sclk_setting->Sclk_slew_rate = 0x400;
+		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+		sclk_setting->Pcc_down_slew_rate = 0xffff;
+		sclk_setting->SSc_En = dividers.ucSscEnable;
+		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+		return result;
+	}
+
+	ref_clock = smu7_get_xclk(hwmgr);
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+		if (clock > smu_data->range_table[i].trans_lower_frequency
+		&& clock <= smu_data->range_table[i].trans_upper_frequency) {
+			sclk_setting->PllRange = i;
+			break;
+		}
+	}
+
+	sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+	temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+	temp <<= 0x10;
+	do_div(temp, ref_clock);
+	sclk_setting->Fcw_frac = temp & 0xffff;
+
+	pcc_target_percent = 10; /*  Hardcode 10% for now. */
+	pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+	sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+	ss_target_percent = 2; /*  Hardcode 2% for now. */
+	sclk_setting->SSc_En = 0;
+	if (ss_target_percent) {
+		sclk_setting->SSc_En = 1;
+		ss_target_freq = clock - (clock * ss_target_percent / 100);
+		sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+		temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+		temp <<= 0x10;
+		do_div(temp, ref_clock);
+		sclk_setting->Fcw1_frac = temp & 0xffff;
+	}
+
+	return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    clock the engine clock to use to populate the structure
+* @param    sclk        the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, uint16_t sclk_al_threshold,
+		struct SMU74_Discrete_GraphicsLevel *level)
+{
+	int result;
+	/* PP_Clocks minClocks; */
+	uint32_t mvdd;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMU_SclkSetting curr_sclk_setting = { 0 };
+
+	result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+	/* populate graphics levels */
+	result = polaris10_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk, clock,
+			&level->MinVoltage, &mvdd);
+
+	PP_ASSERT_WITH_CODE((0 == result),
+			"can not find VDDC voltage value for "
+			"VDDC engine clock dependency table",
+			return result);
+	level->ActivityLevel = sclk_al_threshold;
+
+	level->CcPwrDynRm = 0;
+	level->CcPwrDynRm1 = 0;
+	level->EnabledForActivity = 0;
+	level->EnabledForThrottle = 1;
+	level->UpHyst = 10;
+	level->DownHyst = 0;
+	level->VoltageDownHyst = 0;
+	level->PowerThrottle = 0;
+	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+		level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+								hwmgr->display_config.min_core_set_clock_in_sr);
+
+	/* Default to slow, highest DPM level will be
+	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+	 */
+	if (data->update_up_hyst)
+		level->UpHyst = (uint8_t)data->up_hyst;
+	if (data->update_down_hyst)
+		level->DownHyst = (uint8_t)data->down_hyst;
+
+	level->SclkSetting = curr_sclk_setting;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+	return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+	int result = 0;
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+	uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+			SMU74_MAX_LEVELS_GRAPHICS;
+	struct SMU74_Discrete_GraphicsLevel *levels =
+			smu_data->smc_state_table.GraphicsLevel;
+	uint32_t i, max_entry;
+	uint8_t hightest_pcie_level_enabled = 0,
+		lowest_pcie_level_enabled = 0,
+		mid_pcie_level_enabled = 0,
+		count = 0;
+
+	polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+		result = polaris10_populate_single_graphic_level(hwmgr,
+				dpm_table->sclk_table.dpm_levels[i].value,
+				(uint16_t)smu_data->activity_target[i],
+				&(smu_data->smc_state_table.GraphicsLevel[i]));
+		if (result)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			levels[i].DeepSleepDivId = 0;
+	}
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SPLLShutdownSupport))
+		smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+	smu_data->smc_state_table.GraphicsDpmLevelCount =
+			(uint8_t)dpm_table->sclk_table.count;
+	hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+	if (pcie_table != NULL) {
+		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+				"There must be 1 or more PCIE levels defined in PPTable.",
+				return -EINVAL);
+		max_entry = pcie_entry_cnt - 1;
+		for (i = 0; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel =
+					(uint8_t) ((i < max_entry) ? i : max_entry);
+	} else {
+		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (hightest_pcie_level_enabled + 1))) != 0))
+			hightest_pcie_level_enabled++;
+
+		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << lowest_pcie_level_enabled)) == 0))
+			lowest_pcie_level_enabled++;
+
+		while ((count < hightest_pcie_level_enabled) &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+			count++;
+
+		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+				hightest_pcie_level_enabled ?
+						(lowest_pcie_level_enabled + 1 + count) :
+						hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to hightest_pcie_level_enabled */
+		for (i = 2; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to lowest_pcie_level_enabled */
+		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to mid_pcie_level_enabled */
+		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+	}
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int result = 0;
+	struct cgs_display_info info = {0, 0, NULL};
+	uint32_t mclk_stutter_mode_threshold = 40000;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (table_info->vdd_dep_on_mclk) {
+		result = polaris10_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk, clock,
+				&mem_level->MinVoltage, &mem_level->MinMvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find MinVddc voltage value from memory "
+				"VDDC voltage dependency table", return result);
+	}
+
+	mem_level->MclkFrequency = clock;
+	mem_level->EnabledForThrottle = 1;
+	mem_level->EnabledForActivity = 0;
+	mem_level->UpHyst = 0;
+	mem_level->DownHyst = 100;
+	mem_level->VoltageDownHyst = 0;
+	mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	mem_level->StutterEnable = false;
+	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	data->display_timing.num_existing_displays = info.display_count;
+
+	if (mclk_stutter_mode_threshold &&
+		(clock <= mclk_stutter_mode_threshold) &&
+		(SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_ENABLE) & 0x1))
+		mem_level->StutterEnable = true;
+
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+	}
+	return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+	int result;
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+	uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+			SMU74_MAX_LEVELS_MEMORY;
+	struct SMU74_Discrete_MemoryLevel *levels =
+			smu_data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+				"can not populate memory level as memory clock is zero",
+				return -EINVAL);
+		result = polaris10_populate_single_memory_level(hwmgr,
+				dpm_table->mclk_table.dpm_levels[i].value,
+				&levels[i]);
+		if (i == dpm_table->mclk_table.count - 1) {
+			levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+			levels[i].EnabledForActivity = 1;
+		}
+		if (result)
+			return result;
+	}
+
+	/* In order to prevent MC activity from stutter mode to push DPM up,
+	 * the UVD change complements this by putting the MCLK in
+	 * a higher state by default such that we are not affected by
+	 * up threshold or and MCLK DPM latency.
+	 */
+	levels[0].ActivityLevel = 0x1f;
+	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+	smu_data->smc_state_table.MemoryDpmLevelCount =
+			(uint8_t)dpm_table->mclk_table.count;
+	hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+* @param    voltage     the SMC VOLTAGE structure to be populated
+*/
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+		uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i = 0;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+				"MVDD Voltage is outside the supported range.",
+				return -EINVAL);
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = 0;
+	uint32_t sclk_frequency;
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMIO_Pattern vol_level;
+	uint32_t mvdd;
+	uint16_t us_mvdd;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	/* Get MinVoltage and Frequency from DPM0,
+	 * already converted to SMC_UL */
+	sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+	result = polaris10_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk,
+			sclk_frequency,
+			&table->ACPILevel.MinVoltage, &mvdd);
+	PP_ASSERT_WITH_CODE((0 == result),
+			"Cannot find ACPI VDDC voltage value "
+			"in Clock Dependency Table",
+			);
+
+	result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
+	PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	table->ACPILevel.DeepSleepDivId = 0;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+	/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+	table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+	result = polaris10_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_mclk,
+			table->MemoryACPILevel.MclkFrequency,
+			&table->MemoryACPILevel.MinVoltage, &mvdd);
+	PP_ASSERT_WITH_CODE((0 == result),
+			"Cannot find ACPI VDDCI voltage value "
+			"in Clock Dependency Table",
+			);
+
+	us_mvdd = 0;
+	if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+			(data->mclk_dpm_key_disabled))
+		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+	else {
+		if (!polaris10_populate_mvdd_value(hwmgr,
+				data->dpm_table.mclk_table.dpm_levels[0].value,
+				&vol_level))
+			us_mvdd = vol_level.Voltage;
+	}
+
+	if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	table->MemoryACPILevel.StutterEnable = false;
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	table->MemoryACPILevel.ActivityLevel =
+			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+	return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->VceLevelCount = (uint8_t)(mm_table->count);
+	table->VceBootLevel = 0;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage = 0;
+		table->VceLevel[count].MinVoltage |=
+				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+		table->VceLevel[count].MinVoltage |=
+				(vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/*retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->VceLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for VCE engine clock",
+				return result);
+
+		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->SamuBootLevel = 0;
+	table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].MinVoltage = 0;
+		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+		table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->SamuLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for samu clock", return result);
+
+		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+		int32_t eng_clock, int32_t mem_clock,
+		SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	uint32_t dram_timing;
+	uint32_t dram_timing2;
+	uint32_t burst_time;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+			eng_clock, mem_clock);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+	arb_regs->McArbBurstTime   = (uint8_t)burst_time;
+
+	return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+	uint32_t i, j;
+	int result = 0;
+
+	for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+			result = polaris10_populate_memory_timing_parameters(hwmgr,
+					hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+					hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+					&arb_regs.entries[i][j]);
+			if (result == 0)
+				result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+			if (result != 0)
+				return result;
+		}
+	}
+
+	result = smu7_copy_bytes_to_smc(
+			hwmgr->smumgr,
+			smu_data->smu7_data.arb_table_start,
+			(uint8_t *)&arb_regs,
+			sizeof(SMU74_Discrete_MCArbDramTimingTable),
+			SMC_RAM_END);
+	return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->UvdLevelCount = (uint8_t)(mm_table->count);
+	table->UvdBootLevel = 0;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].MinVoltage = 0;
+		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+		table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].VclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Vclk clock", return result);
+
+		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].DclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Dclk clock", return result);
+
+		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+	}
+
+	return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table */
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(table->GraphicsBootLevel));
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+			data->vbios_boot_state.mclk_bootup_value,
+			(uint32_t *)&(table->MemoryBootLevel));
+
+	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+			VOLTAGE_SCALE;
+
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+	return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint8_t count, level;
+
+	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+				hw_data->vbios_boot_state.sclk_bootup_value) {
+			smu_data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+				hw_data->vbios_boot_state.mclk_bootup_value) {
+			smu_data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+
+	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+	/* Read SMU_Eefuse to read and calculate RO and determine
+	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+	 */
+	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (67 * 4));
+	efuse &= 0xFF000000;
+	efuse = efuse >> 24;
+
+	if (hwmgr->chip_id == CHIP_POLARIS10) {
+		min = 1000;
+		max = 2300;
+	} else {
+		min = 1100;
+		max = 2100;
+	}
+
+	ro = efuse * (max - min) / 255 + min;
+
+	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+	for (i = 0; i < sclk_table->count; i++) {
+		smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+				sclk_table->entries[i].cks_enable << i;
+		if (hwmgr->chip_id == CHIP_POLARIS10) {
+			volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+						(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+			volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+					(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+		} else {
+			volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+						(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+			volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+					(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+		}
+
+		if (volt_without_cks >= volt_with_cks)
+			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+					sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+		smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+	}
+
+	smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+	/* Populate CKS Lookup Table */
+	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+		stretch_amount2 = 0;
+	else if (stretch_amount == 3 || stretch_amount == 4)
+		stretch_amount2 = 1;
+	else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ClockStretcher);
+		PP_ASSERT_WITH_CODE(false,
+				"Stretch Amount in PPTable not supported\n",
+				return -EINVAL);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+	value &= 0xFFFFFFFE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+	return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint16_t config;
+
+	config = VR_MERGED_WITH_VDDC;
+	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+	/* Set Vddc Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		config = VR_SVI2_PLANE_1;
+		table->VRConfig |= config;
+	} else {
+		PP_ASSERT_WITH_CODE(false,
+				"VDDC should be on SVI2 control in merged mode!",
+				);
+	}
+	/* Set Vddci Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		config = VR_SVI2_PLANE_2;  /* only in merged mode */
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		config = VR_SMIO_PATTERN_1;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	}
+	/* Set Mvdd Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		config = VR_SVI2_PLANE_2;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+			offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	}
+
+	return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+	int result = 0;
+	struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+	AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+	AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+	uint32_t tmp, i;
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+
+
+	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+		return result;
+
+	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+	if (0 == result) {
+		table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+		table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+		table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+		table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+		table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+		table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+		table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+		table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+		table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+		table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+		table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+		table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+		table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+		table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+		table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+		table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+		table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+		AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+		AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+		AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+		AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+		AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+		AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+		AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+		for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+			AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+			AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+		}
+
+		result = smu7_read_smc_sram_dword(smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+				&tmp, SMC_RAM_END);
+
+		smu7_copy_bytes_to_smc(smumgr,
+					tmp,
+					(uint8_t *)&AVFS_meanNsigma,
+					sizeof(AVFS_meanNsigma_t),
+					SMC_RAM_END);
+
+		result = smu7_read_smc_sram_dword(smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+				&tmp, SMC_RAM_END);
+		smu7_copy_bytes_to_smc(smumgr,
+					tmp,
+					(uint8_t *)&AVFS_SclkOffset,
+					sizeof(AVFS_Sclk_Offset_t),
+					SMC_RAM_END);
+
+		data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+						(avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+						(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+						(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+		data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+	}
+	return result;
+}
+
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	uint32_t tmp;
+	int result;
+
+	/* This is a read-modify-write on the first byte of the ARB table.
+	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+	 * is the field 'current'.
+	 * This solution is ugly, but we never write the whole table only
+	 * individual fields in it.
+	 * In reality this field should not be in that structure
+	 * but in a soft register.
+	 */
+	result = smu7_read_smc_sram_dword(smumgr,
+			smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+	if (result)
+		return result;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+	return smu7_write_smc_sram_dword(smumgr,
+			smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct  phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		smu_data->power_tune_defaults =
+				&polaris10_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	uint8_t i;
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+	pp_atomctrl_clock_dividers_vi dividers;
+
+	polaris10_initialize_power_tune_defaults(hwmgr);
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+		polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+	table->SystemFlags = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (hw_data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+		result = polaris10_populate_ulv_state(hwmgr, table);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to initialize ULV state!", return result);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+	}
+
+	result = polaris10_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Link Level!", return result);
+
+	result = polaris10_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Graphics Level!", return result);
+
+	result = polaris10_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Memory Level!", return result);
+
+	result = polaris10_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ACPI Level!", return result);
+
+	result = polaris10_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize VCE Level!", return result);
+
+	result = polaris10_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize SAMU Level!", return result);
+
+	/* Since only the initial state is completely set up at this point
+	 * (the other states are just copies of the boot state) we only
+	 * need to populate the  ARB settings for the initial state.
+	 */
+	result = polaris10_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to Write ARB settings for the initial state.", return result);
+
+	result = polaris10_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize UVD Level!", return result);
+
+	result = polaris10_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot Level!", return result);
+
+	result = polaris10_populate_smc_initailial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot State!", return result);
+
+	result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate BAPM Parameters!", return result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ClockStretcher)) {
+		result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to populate Clock Stretcher Data Table!",
+				return result);
+	}
+
+	result = polaris10_populate_avfs_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+	table->CurrSclkPllRange = 0xff;
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+			table_info->cac_dtp_table->usTargetOperatingTemp *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->TemperatureLimitLow  =
+			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+	table->PCIeBootLinkLevel = 0;
+	table->PCIeGenInterval = 1;
+	table->VRConfig = 0;
+
+	result = polaris10_populate_vr_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate VRConfig setting!", return result);
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+	} else {
+		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+			&gpio_pin)) {
+		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	} else {
+		table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	/* Thermal Output GPIO */
+	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+			&gpio_pin)) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ThermalOutGPIO);
+
+		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+		/* For porlarity read GPIOPAD_A with assigned Gpio pin
+		 * since VBIOS will program this register to set 'inactive state',
+		 * driver can then determine 'active state' from this and
+		 * program SMU with correct polarity
+		 */
+		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+					& (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+		/* if required, combine VRHot/PCC with thermal out GPIO */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+		&& phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+	} else {
+		table->ThermOutGpio = 17;
+		table->ThermOutPolarity = 1;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+	}
+
+	/* Populate BIF_SCLK levels into SMC DPM table */
+	for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+		PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+		if (i == 0)
+			table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+		else
+			table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+	}
+
+	for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+			smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+			(uint8_t *)&(table->SystemFlags),
+			sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+			SMC_RAM_END);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to upload dpm data to SMC memory!", return result);
+
+	result = polaris10_init_arb_table_index(hwmgr->smumgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to upload arb data to SMC memory!", return result);
+
+	result = polaris10_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to  populate PM fuses to SMC memory!", return result);
+	return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return polaris10_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+	int ret;
+	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+		return 0;
+
+	ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+	ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+			0 : -1;
+
+	if (!ret)
+		/* If this param is not changed, this function could fire unnecessarily */
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+	return ret;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (smu_data->smu7_data.fan_table_start == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (duty100 == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+			usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->
+			thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = smu7_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+			thermal_controller.advanceFanControlParameters.ulCycleDelay *
+			reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+			hwmgr->device, CGS_IND_REG__SMC,
+			CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+			SMC_RAM_END);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ucMinimumPWMLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanMinPwm,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ucMinimumPWMLimit);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanSclkTarget,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+	if (res)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+
+	return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	smu_data->smc_state_table.UvdBootLevel = 0;
+	if (table_info->mm_dep_table->count > 0)
+		smu_data->smc_state_table.UvdBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+						UvdBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0x00FFFFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM) ||
+		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_UVDDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+	return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_StablePState))
+		smu_data->smc_state_table.VceBootLevel =
+			(uint8_t) (table_info->mm_dep_table->count - 1);
+	else
+		smu_data->smc_state_table.VceBootLevel = 0;
+
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+					offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFF00FFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_VCEDPM_SetEnabledMask,
+				(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+	return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+	smu_data->smc_state_table.SamuBootLevel = 0;
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFFFFFF00;
+	mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SAMUDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+	return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	int max_entry, i;
+
+	max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+						SMU74_MAX_LEVELS_LINK :
+						pcie_table->count;
+	/* Setup BIF_SCLK levels */
+	for (i = 0; i < max_entry; i++)
+		smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+	return 0;
+}
+
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+	switch (type) {
+	case SMU_UVD_TABLE:
+		polaris10_update_uvd_smc_table(hwmgr);
+		break;
+	case SMU_VCE_TABLE:
+		polaris10_update_vce_smc_table(hwmgr);
+		break;
+	case SMU_SAMU_TABLE:
+		polaris10_update_samu_smc_table(hwmgr);
+		break;
+	case SMU_BIF_TABLE:
+		polaris10_update_bif_smc_table(hwmgr);
+	default:
+		break;
+	}
+	return 0;
+}
+
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold !=
+				data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold =
+				hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				SMC_RAM_END);
+	}
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to update SCLK threshold!", return result);
+
+	result = polaris10_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to program memory timing parameters!",
+			);
+
+	return result;
+}
+
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+	switch (type) {
+	case SMU_SoftRegisters:
+		switch (member) {
+		case HandshakeDisables:
+			return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+		case VoltageChangeTimeout:
+			return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+		case AverageGraphicsActivity:
+			return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+		case PreVBlankGap:
+			return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+		case VBlankTimeout:
+			return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+		case UcodeLoadStatus:
+			return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+		}
+	case SMU_Discrete_DpmTable:
+		switch (member) {
+		case UvdBootLevel:
+			return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+		case VceBootLevel:
+			return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+		case SamuBootLevel:
+			return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+		case LowSclkInterruptThreshold:
+			return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+		}
+	}
+	printk("cant't get the offset of type %x member %x \n", type, member);
+	return 0;
+}
+
+uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+	switch (value) {
+	case SMU_MAX_LEVELS_GRAPHICS:
+		return SMU74_MAX_LEVELS_GRAPHICS;
+	case SMU_MAX_LEVELS_MEMORY:
+		return SMU74_MAX_LEVELS_MEMORY;
+	case SMU_MAX_LEVELS_LINK:
+		return SMU74_MAX_LEVELS_LINK;
+	case SMU_MAX_ENTRIES_SMIO:
+		return SMU74_MAX_ENTRIES_SMIO;
+	case SMU_MAX_LEVELS_VDDC:
+		return SMU74_MAX_LEVELS_VDDC;
+	case SMU_MAX_LEVELS_VDDGFX:
+		return SMU74_MAX_LEVELS_VDDGFX;
+	case SMU_MAX_LEVELS_VDDCI:
+		return SMU74_MAX_LEVELS_VDDCI;
+	case SMU_MAX_LEVELS_MVDD:
+		return SMU74_MAX_LEVELS_MVDD;
+	case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+		return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+	}
+
+	printk("cant't get the mac of %x \n", value);
+	return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, DpmTable),
+			&tmp, SMC_RAM_END);
+
+	if (0 == result)
+		smu_data->smu7_data.dpm_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, SoftRegisters),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.soft_regs_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, mcRegisterTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.mc_reg_table_start = tmp;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, FanTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.fan_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.arb_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, Version),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		hwmgr->microcode_version_info.SMC = tmp;
+
+	error |= (0 != result);
+
+	return error ? -1 : 0;
+}
+
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
similarity index 60%
copy from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
copy to drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
index d24b888..5ade3ce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
@@ -20,16 +20,23 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef POLARIS10_SMC_H
+#define POLARIS10_SMC_H
 
-#include "hwmgr.h"
+#include "smumgr.h"
 
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-		struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-				struct pp_power_state *, void *, uint32_t));
+
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
+uint32_t polaris10_get_mac_definition(uint32_t value);
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5dba7c5..5c3598a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -38,16 +38,11 @@
 #include "ppatomctrl.h"
 #include "pp_debug.h"
 #include "cgs_common.h"
+#include "polaris10_smc.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
 
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE                 80000
-#define MAX_STRING_SIZE             15
-#define BUFFER_SIZETWO              131072  /* 128 *1024 */
-
-#define SMC_RAM_END 0x40000
+#define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
 
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
 	/*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
@@ -62,572 +57,9 @@
 	{ 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
 };
 
-static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
-	{0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-*/
-static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
-{
-	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
-	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-	return 0;
-}
-
-/**
-* Copy bytes from SMC RAM space into driver memory.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smc_start_address the start address in the SMC RAM to copy bytes from
-* @param    src the byte array to copy the bytes to.
-* @param    byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
-	uint32_t data;
-	uint32_t addr;
-	uint8_t *dest_byte;
-	uint8_t i, data_byte[4] = {0};
-	uint32_t *pdata = (uint32_t *)&data_byte;
-
-	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
-	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
-	addr = smc_start_address;
-
-	while (byte_count >= 4) {
-		polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-
-		*dest = PP_SMC_TO_HOST_UL(data);
-
-		dest += 1;
-		byte_count -= 4;
-		addr += 4;
-	}
-
-	if (byte_count) {
-		polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-		*pdata = PP_SMC_TO_HOST_UL(data);
-	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
-		dest_byte = (uint8_t *)dest;
-		for (i = 0; i < byte_count; i++)
-			dest_byte[i] = data_byte[i];
-	}
-
-	return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    pSmuMgr  the address of the powerplay SMU manager.
-* @param    smc_start_address the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
-				const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-	int result;
-	uint32_t data = 0;
-	uint32_t original_data;
-	uint32_t addr = 0;
-	uint32_t extra_shift;
-
-	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
-	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
-	addr = smc_start_address;
-
-	while (byte_count >= 4) {
-	/* Bytes are written into the SMC addres space with the MSB first. */
-		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
-		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-		if (0 != result)
-			return result;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-
-		src += 4;
-		byte_count -= 4;
-		addr += 4;
-	}
-
-	if (0 != byte_count) {
-
-		data = 0;
-
-		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-		if (0 != result)
-			return result;
-
-
-		original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-
-		extra_shift = 8 * (4 - byte_count);
-
-		while (byte_count > 0) {
-			/* Bytes are written into the SMC addres space with the MSB first. */
-			data = (0x100 * data) + *src++;
-			byte_count--;
-		}
-
-		data <<= extra_shift;
-
-		data |= (original_data & ~((~0UL) << extra_shift));
-
-		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
-		if (0 != result)
-			return result;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-	}
-
-	return 0;
-}
-
-
-static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
-
-	polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
-
-	return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-	return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-	&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
-{
-	uint32_t efuse;
-
-	efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
-	efuse &= 0x00000001;
-	if (efuse)
-		return true;
-
-	return false;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-	int ret;
-
-	if (!polaris10_is_smc_ram_running(smumgr))
-		return -1;
-
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
-	if (ret != 1)
-		printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
-
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
-	if (ret != 1)
-		printk("\n failed to send message %x ret is %d \n",  msg, ret);
-
-	return 0;
-}
-
-
-/**
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   Always return 0.
-*/
-int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
-{
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-/**
-* Send a message to the SMC with parameter
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-	if (!polaris10_is_smc_ram_running(smumgr)) {
-		return -1;
-	}
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-	return polaris10_send_msg_to_smc(smumgr, msg);
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-	return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-		printk("Failed to send Message.\n");
-
-	return 0;
-}
-
-/**
-* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
-{
-	/* If the SMC is not even on it qualifies as inactive. */
-	if (!polaris10_is_smc_ram_running(smumgr))
-		return -1;
-
-	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
-	return 0;
-}
-
-
-/**
-* Upload the SMC firmware to the SMC microcontroller.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    pFirmware the data structure containing the various sections of the firmware.
-*/
-static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
-{
-	uint32_t byte_count = length;
-
-	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
-
-	for (; byte_count >= 4; byte_count -= 4)
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
-
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
-
-	return 0;
-}
-
-static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-	switch (fw_type) {
-	case UCODE_ID_SMU:
-		result = CGS_UCODE_ID_SMU;
-		break;
-	case UCODE_ID_SMU_SK:
-		result = CGS_UCODE_ID_SMU_SK;
-		break;
-	case UCODE_ID_SDMA0:
-		result = CGS_UCODE_ID_SDMA0;
-		break;
-	case UCODE_ID_SDMA1:
-		result = CGS_UCODE_ID_SDMA1;
-		break;
-	case UCODE_ID_CP_CE:
-		result = CGS_UCODE_ID_CP_CE;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = CGS_UCODE_ID_CP_PFP;
-		break;
-	case UCODE_ID_CP_ME:
-		result = CGS_UCODE_ID_CP_ME;
-		break;
-	case UCODE_ID_CP_MEC:
-		result = CGS_UCODE_ID_CP_MEC;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-		result = CGS_UCODE_ID_CP_MEC_JT1;
-		break;
-	case UCODE_ID_CP_MEC_JT2:
-		result = CGS_UCODE_ID_CP_MEC_JT2;
-		break;
-	case UCODE_ID_RLC_G:
-		result = CGS_UCODE_ID_RLC_G;
-		break;
-	default:
-		break;
-	}
-
-	return result;
-}
-
-static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
-	int result = 0;
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
-	struct cgs_firmware_info info = {0};
-
-	if (smu_data->security_hard_key == 1)
-		cgs_get_firmware_info(smumgr->device,
-			polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-	else
-		cgs_get_firmware_info(smumgr->device,
-			polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
-
-	/* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
-	result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
-
-	return result;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-* @param    value and output parameter for the data read from the SMC SRAM.
-*/
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
-{
-	int result;
-
-	result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
-	if (result)
-		return result;
-
-	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-	return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value to write to the SMC SRAM.
-*/
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
-{
-	int result;
-
-	result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
-	if (result)
-		return result;
-
-	cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
-
-	return 0;
-}
-
-
-int polaris10_smu_fini(struct pp_smumgr *smumgr)
-{
-	if (smumgr->backend) {
-		kfree(smumgr->backend);
-		smumgr->backend = NULL;
-	}
-	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-	return 0;
-}
-
-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
-static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
-{
-	uint32_t result = 0;
-
-	switch (fw_type) {
-	case UCODE_ID_SDMA0:
-		result = UCODE_ID_SDMA0_MASK;
-		break;
-	case UCODE_ID_SDMA1:
-		result = UCODE_ID_SDMA1_MASK;
-		break;
-	case UCODE_ID_CP_CE:
-		result = UCODE_ID_CP_CE_MASK;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = UCODE_ID_CP_PFP_MASK;
-		break;
-	case UCODE_ID_CP_ME:
-		result = UCODE_ID_CP_ME_MASK;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-	case UCODE_ID_CP_MEC_JT2:
-		result = UCODE_ID_CP_MEC_MASK;
-		break;
-	case UCODE_ID_RLC_G:
-		result = UCODE_ID_RLC_G_MASK;
-		break;
-	default:
-		printk("UCode type is out of range! \n");
-		result = 0;
-	}
-
-	return result;
-}
-
-/* Populate one firmware image to the data structure */
-
-static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-						uint32_t fw_type,
-						struct SMU_Entry *entry)
-{
-	int result = 0;
-	struct cgs_firmware_info info = {0};
-
-	result = cgs_get_firmware_info(smumgr->device,
-				polaris10_convert_fw_type_to_cgs(fw_type),
-				&info);
-
-	if (!result) {
-		entry->version = info.version;
-		entry->id = (uint16_t)fw_type;
-		entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-		entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-		entry->meta_data_addr_high = 0;
-		entry->meta_data_addr_low = 0;
-		entry->data_size_byte = info.image_size;
-		entry->num_register_entries = 0;
-	}
-
-	if (fw_type == UCODE_ID_RLC_G)
-		entry->flags = 1;
-	else
-		entry->flags = 0;
-
-	return 0;
-}
-
-static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-	uint32_t fw_to_load;
-
-	int result = 0;
-	struct SMU_DRAMData_TOC *toc;
-
-	if (!smumgr->reload_fw) {
-		printk(KERN_INFO "[ powerplay ] skip reloading...\n");
-		return 0;
-	}
-
-	if (smu_data->soft_regs_start)
-		cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
-					smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
-					0x0);
-
-	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
-	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
-
-	toc = (struct SMU_DRAMData_TOC *)smu_data->header;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-
-	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
-	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK
-		   + UCODE_ID_SDMA0_MASK
-		   + UCODE_ID_SDMA1_MASK
-		   + UCODE_ID_CP_CE_MASK
-		   + UCODE_ID_CP_ME_MASK
-		   + UCODE_ID_CP_PFP_MASK
-		   + UCODE_ID_CP_MEC_MASK;
-
-	if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
-		printk(KERN_ERR "Fail to Request SMU Load uCode");
-
-	return result;
-}
-
-/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-	uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
-	uint32_t ret;
-	/* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
-	ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
-					smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
-					fw_mask, fw_mask);
-
-	return ret;
-}
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
+	0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
-static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
-{
-	return smumgr->smumgr_funcs->start_smu(smumgr);
-}
 
 static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
@@ -669,7 +101,7 @@
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
 
 	if (0 != smu_data->avfs.avfs_btc_param) {
-		if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+		if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
 			printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
 			result = -1;
 		}
@@ -697,7 +129,7 @@
 	graphics_level_size = sizeof(avfs_graphics_level_polaris10);
 	u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
 
-	PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
 				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
 				&dpm_table_start, 0x40000),
 			"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -708,14 +140,14 @@
 
 	vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
 
-	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
 				(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
 			"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
 			return -1);
 
 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
 
-	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
 				(uint8_t *)(&avfs_graphics_level_polaris10),
 				graphics_level_size, 0x40000),
 			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -723,7 +155,7 @@
 
 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
 
-	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
 				(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
 				"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
 			return -1);
@@ -732,7 +164,7 @@
 
 	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
 
-	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
 			(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
 			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
 			return -1);
@@ -793,7 +225,7 @@
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-	result = polaris10_upload_smu_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 	if (result != 0)
 		return result;
 
@@ -812,7 +244,7 @@
 
 
 	/* Call Test SMU message with 0x20000 offset to trigger SMU start */
-	polaris10_send_msg_to_smc_offset(smumgr);
+	smu7_send_msg_to_smc_offset(smumgr);
 
 	/* Wait done bit to be set */
 	/* Check pass/failed indicator */
@@ -853,12 +285,12 @@
 					SMC_SYSCON_RESET_CNTL,
 					rst_reg, 1);
 
-	result = polaris10_upload_smu_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 	if (result != 0)
 		return result;
 
 	/* Set smc instruct start point at 0x0 */
-	polaris10_program_jump_on_start(smumgr);
+	smu7_program_jump_on_start(smumgr);
 
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@@ -881,10 +313,10 @@
 	bool SMU_VFT_INTACT;
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!polaris10_is_smc_ram_running(smumgr)) {
+	if (!smu7_is_smc_ram_running(smumgr)) {
 		SMU_VFT_INTACT = false;
 		smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
-		smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+		smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
 
 		/* Check if SMU is running in protected mode */
 		if (smu_data->protected_mode == 0) {
@@ -894,7 +326,7 @@
 
 			/* If failed, try with different security Key. */
 			if (result != 0) {
-				smu_data->security_hard_key ^= 1;
+				smu_data->smu7_data.security_hard_key ^= 1;
 				result = polaris10_start_smu_in_protection_mode(smumgr);
 			}
 		}
@@ -906,89 +338,69 @@
 	} else
 		SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
 
-	smu_data->post_initial_boot = true;
 	polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
 	/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
-	polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
-					&(smu_data->soft_regs_start), 0x40000);
+	smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+					&(smu_data->smu7_data.soft_regs_start), 0x40000);
 
-	result = polaris10_request_smu_load_fw(smumgr);
+	result = smu7_request_smu_load_fw(smumgr);
 
 	return result;
 }
 
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+	uint32_t efuse;
+
+	efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+	efuse &= 0x00000001;
+	if (efuse)
+		return true;
+
+	return false;
+}
+
 static int polaris10_smu_init(struct pp_smumgr *smumgr)
 {
-	struct polaris10_smumgr *smu_data;
-	uint8_t *internal_buf;
-	uint64_t mc_addr = 0;
-	/* Allocate memory for backend private data */
-	smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-	smu_data->header_buffer.data_size =
-		((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	smu_data->smu_buffer.data_size = 200*4096;
-	smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-/* Allocate FW image data structure and header buffer and
- * send the header buffer address to SMU */
-	smu_allocate_memory(smumgr->device,
-		smu_data->header_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-		PAGE_SIZE,
-		&mc_addr,
-		&smu_data->header_buffer.kaddr,
-		&smu_data->header_buffer.handle);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	int i;
 
-	smu_data->header = smu_data->header_buffer.kaddr;
-	smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != smu_data->header),
-		"Out of memory.",
-		kfree(smumgr->backend);
-		cgs_free_gpu_mem(smumgr->device,
-		(cgs_handle_t)smu_data->header_buffer.handle);
-		return -1);
-
-/* Allocate buffer for SMU internal buffer and send the address to SMU.
- * Iceland SMU does not need internal buffer.*/
-	smu_allocate_memory(smumgr->device,
-		smu_data->smu_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-		PAGE_SIZE,
-		&mc_addr,
-		&smu_data->smu_buffer.kaddr,
-		&smu_data->smu_buffer.handle);
-
-	internal_buf = smu_data->smu_buffer.kaddr;
-	smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != internal_buf),
-		"Out of memory.",
-		kfree(smumgr->backend);
-		cgs_free_gpu_mem(smumgr->device,
-		(cgs_handle_t)smu_data->smu_buffer.handle);
-		return -1;);
+	if (smu7_init(smumgr))
+		return -EINVAL;
 
 	if (polaris10_is_hw_avfs_present(smumgr))
 		smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
 	else
 		smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
 
+	for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
+		smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
 	return 0;
 }
 
-static const struct pp_smumgr_func ellsemere_smu_funcs = {
+static const struct pp_smumgr_func polaris10_smu_funcs = {
 	.smu_init = polaris10_smu_init,
-	.smu_fini = polaris10_smu_fini,
+	.smu_fini = smu7_smu_fini,
 	.start_smu = polaris10_start_smu,
-	.check_fw_load_finish = polaris10_check_fw_load_finish,
-	.request_smu_load_fw = polaris10_reload_firmware,
+	.check_fw_load_finish = smu7_check_fw_load_finish,
+	.request_smu_load_fw = smu7_reload_firmware,
 	.request_smu_load_specific_fw = NULL,
-	.send_msg_to_smc = polaris10_send_msg_to_smc,
-	.send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+	.send_msg_to_smc = smu7_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
 	.download_pptable_settings = NULL,
 	.upload_pptable_settings = NULL,
+	.update_smc_table = polaris10_update_smc_table,
+	.get_offsetof = polaris10_get_offsetof,
+	.process_firmware_header = polaris10_process_firmware_header,
+	.init_smc_table = polaris10_init_smc_table,
+	.update_sclk_threshold = polaris10_update_sclk_threshold,
+	.thermal_avfs_enable = polaris10_thermal_avfs_enable,
+	.thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
+	.populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
+	.populate_all_memory_levels = polaris10_populate_all_memory_levels,
+	.get_mac_definition = polaris10_get_mac_definition,
+	.is_dpm_running = polaris10_is_dpm_running,
 };
 
 int polaris10_smum_init(struct pp_smumgr *smumgr)
@@ -998,10 +410,10 @@
 	polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
 
 	if (polaris10_smu == NULL)
-		return -1;
+		return -EINVAL;
 
 	smumgr->backend = polaris10_smu;
-	smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+	smumgr->smumgr_funcs = &polaris10_smu_funcs;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index e5377ae..49ebf1d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -24,45 +24,52 @@
 #ifndef _POLARIS10_SMUMANAGER_H
 #define _POLARIS10_SMUMANAGER_H
 
-#include <polaris10_ppsmc.h>
+
 #include <pp_endian.h>
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
 
 struct polaris10_avfs {
 	enum AVFS_BTC_STATUS avfs_btc_status;
 	uint32_t           avfs_btc_param;
 };
 
-struct polaris10_buffer_entry {
-	uint32_t data_size;
-	uint32_t mc_addr_low;
-	uint32_t mc_addr_high;
-	void *kaddr;
-	unsigned long  handle;
+struct polaris10_pt_defaults {
+	uint8_t   SviLoadLineEn;
+	uint8_t   SviLoadLineVddC;
+	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t   TDC_MAWt;
+	uint8_t   TdcWaterfallCtl;
+	uint8_t   DTEAmbientTempBase;
+
+	uint32_t  DisplayCac;
+	uint32_t  BAPM_TEMP_GRADIENT;
+	uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+
+
+struct polaris10_range_table {
+	uint32_t trans_lower_frequency; /* in 10khz */
+	uint32_t trans_upper_frequency;
 };
 
 struct polaris10_smumgr {
-	uint8_t *header;
-	uint8_t *mec_image;
-	struct polaris10_buffer_entry smu_buffer;
-	struct polaris10_buffer_entry header_buffer;
-	uint32_t soft_regs_start;
-	uint8_t *read_rrm_straps;
-	uint32_t read_drm_straps_mc_address_high;
-	uint32_t read_drm_straps_mc_address_low;
-	uint32_t acpi_optimization;
-	bool post_initial_boot;
+	struct smu7_smumgr smu7_data;
 	uint8_t protected_mode;
-	uint8_t security_hard_key;
 	struct polaris10_avfs  avfs;
+	SMU74_Discrete_DpmTable              smc_state_table;
+	struct SMU74_Discrete_Ulv            ulv_setting;
+	struct SMU74_Discrete_PmFuses  power_tune_table;
+	struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
+	const struct polaris10_pt_defaults       *power_tune_defaults;
+	uint32_t                   activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+	uint32_t                   bif_sclk_table[SMU74_MAX_LEVELS_LINK];
 };
 
 
-int polaris10_smum_init(struct pp_smumgr *smumgr);
-
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
-				const uint8_t *src, uint32_t byte_count, uint32_t limit);
-
 #endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
new file mode 100644
index 0000000..6af744f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define SMU7_SMC_SIZE 0x20000
+
+static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+	return 0;
+}
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+	uint32_t data;
+	uint32_t addr;
+	uint8_t *dest_byte;
+	uint8_t i, data_byte[4] = {0};
+	uint32_t *pdata = (uint32_t *)&data_byte;
+
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+	addr = smc_start_address;
+
+	while (byte_count >= 4) {
+		smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+		*dest = PP_SMC_TO_HOST_UL(data);
+
+		dest += 1;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (byte_count) {
+		smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+		*pdata = PP_SMC_TO_HOST_UL(data);
+	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+		dest_byte = (uint8_t *)dest;
+		for (i = 0; i < byte_count; i++)
+			dest_byte[i] = data_byte[i];
+	}
+
+	return 0;
+}
+
+
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+				const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+	int result;
+	uint32_t data = 0;
+	uint32_t original_data;
+	uint32_t addr = 0;
+	uint32_t extra_shift;
+
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+	addr = smc_start_address;
+
+	while (byte_count >= 4) {
+	/* Bytes are written into the SMC addres space with the MSB first. */
+		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+		result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (0 != byte_count) {
+
+		data = 0;
+
+		result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+
+		original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* Bytes are written into the SMC addres space with the MSB first. */
+			data = (0x100 * data) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+	}
+
+	return 0;
+}
+
+
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+	smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+	return 0;
+}
+
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+	return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+	&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+	int ret;
+
+	if (!smu7_is_smc_ram_running(smumgr))
+		return -EINVAL;
+
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+	if (ret != 1)
+		printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
+
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+	if (ret != 1)
+		printk("\n failed to send message %x ret is %d \n",  msg, ret);
+
+	return 0;
+}
+
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+	return 0;
+}
+
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+	if (!smu7_is_smc_ram_running(smumgr)) {
+		return -EINVAL;
+	}
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+	return smu7_send_msg_to_smc(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+	return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+		printk("Failed to send Message.\n");
+
+	return 0;
+}
+
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+	if (!smu7_is_smc_ram_running(smumgr))
+		return -EINVAL;
+
+	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+	return 0;
+}
+
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case UCODE_ID_SMU:
+		result = CGS_UCODE_ID_SMU;
+		break;
+	case UCODE_ID_SMU_SK:
+		result = CGS_UCODE_ID_SMU_SK;
+		break;
+	case UCODE_ID_SDMA0:
+		result = CGS_UCODE_ID_SDMA0;
+		break;
+	case UCODE_ID_SDMA1:
+		result = CGS_UCODE_ID_SDMA1;
+		break;
+	case UCODE_ID_CP_CE:
+		result = CGS_UCODE_ID_CP_CE;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = CGS_UCODE_ID_CP_PFP;
+		break;
+	case UCODE_ID_CP_ME:
+		result = CGS_UCODE_ID_CP_ME;
+		break;
+	case UCODE_ID_CP_MEC:
+		result = CGS_UCODE_ID_CP_MEC;
+		break;
+	case UCODE_ID_CP_MEC_JT1:
+		result = CGS_UCODE_ID_CP_MEC_JT1;
+		break;
+	case UCODE_ID_CP_MEC_JT2:
+		result = CGS_UCODE_ID_CP_MEC_JT2;
+		break;
+	case UCODE_ID_RLC_G:
+		result = CGS_UCODE_ID_RLC_G;
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+	int result;
+
+	result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+	if (result)
+		return result;
+
+	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+	return 0;
+}
+
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+	int result;
+
+	result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+	if (result)
+		return result;
+
+	cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+	return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+
+static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
+{
+	uint32_t result = 0;
+
+	switch (fw_type) {
+	case UCODE_ID_SDMA0:
+		result = UCODE_ID_SDMA0_MASK;
+		break;
+	case UCODE_ID_SDMA1:
+		result = UCODE_ID_SDMA1_MASK;
+		break;
+	case UCODE_ID_CP_CE:
+		result = UCODE_ID_CP_CE_MASK;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = UCODE_ID_CP_PFP_MASK;
+		break;
+	case UCODE_ID_CP_ME:
+		result = UCODE_ID_CP_ME_MASK;
+		break;
+	case UCODE_ID_CP_MEC:
+	case UCODE_ID_CP_MEC_JT1:
+	case UCODE_ID_CP_MEC_JT2:
+		result = UCODE_ID_CP_MEC_MASK;
+		break;
+	case UCODE_ID_RLC_G:
+		result = UCODE_ID_RLC_G_MASK;
+		break;
+	default:
+		printk("UCode type is out of range! \n");
+		result = 0;
+	}
+
+	return result;
+}
+
+static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+						uint32_t fw_type,
+						struct SMU_Entry *entry)
+{
+	int result = 0;
+	struct cgs_firmware_info info = {0};
+
+	result = cgs_get_firmware_info(smumgr->device,
+				smu7_convert_fw_type_to_cgs(fw_type),
+				&info);
+
+	if (!result) {
+		entry->version = info.version;
+		entry->id = (uint16_t)fw_type;
+		entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+		entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+		entry->meta_data_addr_high = 0;
+		entry->meta_data_addr_low = 0;
+		entry->data_size_byte = info.image_size;
+		entry->num_register_entries = 0;
+	}
+
+	if (fw_type == UCODE_ID_RLC_G)
+		entry->flags = 1;
+	else
+		entry->flags = 0;
+
+	return 0;
+}
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+	uint32_t fw_to_load;
+	int result = 0;
+	struct SMU_DRAMData_TOC *toc;
+
+	if (!smumgr->reload_fw) {
+		printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+		return 0;
+	}
+
+	if (smu_data->soft_regs_start)
+		cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+					smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+					SMU_SoftRegisters, UcodeLoadStatus),
+					0x0);
+
+	if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+		smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+		smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+		fw_to_load = UCODE_ID_RLC_G_MASK
+			   + UCODE_ID_SDMA0_MASK
+			   + UCODE_ID_SDMA1_MASK
+			   + UCODE_ID_CP_CE_MASK
+			   + UCODE_ID_CP_ME_MASK
+			   + UCODE_ID_CP_PFP_MASK
+			   + UCODE_ID_CP_MEC_MASK;
+	} else {
+		fw_to_load = UCODE_ID_RLC_G_MASK
+			   + UCODE_ID_SDMA0_MASK
+			   + UCODE_ID_SDMA1_MASK
+			   + UCODE_ID_CP_CE_MASK
+			   + UCODE_ID_CP_ME_MASK
+			   + UCODE_ID_CP_PFP_MASK
+			   + UCODE_ID_CP_MEC_MASK
+			   + UCODE_ID_CP_MEC_JT1_MASK
+			   + UCODE_ID_CP_MEC_JT2_MASK;
+	}
+
+	toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
+
+	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+	if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+		printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+	return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+	uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
+	uint32_t ret;
+
+	ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+					smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+					SMU_SoftRegisters, UcodeLoadStatus),
+					fw_mask, fw_mask);
+
+	return ret;
+}
+
+int smu7_reload_firmware(struct pp_smumgr *smumgr)
+{
+	return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+	uint32_t byte_count = length;
+
+	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+	for (; byte_count >= 4; byte_count -= 4)
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+	return 0;
+}
+
+
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+	struct cgs_firmware_info info = {0};
+
+	if (smu_data->security_hard_key == 1)
+		cgs_get_firmware_info(smumgr->device,
+			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+	else
+		cgs_get_firmware_info(smumgr->device,
+			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+	result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+
+	return result;
+}
+
+
+int smu7_init(struct pp_smumgr *smumgr)
+{
+	struct smu7_smumgr *smu_data;
+	uint8_t *internal_buf;
+	uint64_t mc_addr = 0;
+
+	/* Allocate memory for backend private data */
+	smu_data = (struct smu7_smumgr *)(smumgr->backend);
+	smu_data->header_buffer.data_size =
+			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+	smu_data->smu_buffer.data_size = 200*4096;
+
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+	smu_allocate_memory(smumgr->device,
+		smu_data->header_buffer.data_size,
+		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+		PAGE_SIZE,
+		&mc_addr,
+		&smu_data->header_buffer.kaddr,
+		&smu_data->header_buffer.handle);
+
+	smu_data->header = smu_data->header_buffer.kaddr;
+	smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+	smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+	PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+		"Out of memory.",
+		kfree(smumgr->backend);
+		cgs_free_gpu_mem(smumgr->device,
+		(cgs_handle_t)smu_data->header_buffer.handle);
+		return -EINVAL);
+
+	smu_allocate_memory(smumgr->device,
+		smu_data->smu_buffer.data_size,
+		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+		PAGE_SIZE,
+		&mc_addr,
+		&smu_data->smu_buffer.kaddr,
+		&smu_data->smu_buffer.handle);
+
+	internal_buf = smu_data->smu_buffer.kaddr;
+	smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+	smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+	PP_ASSERT_WITH_CODE((NULL != internal_buf),
+		"Out of memory.",
+		kfree(smumgr->backend);
+		cgs_free_gpu_mem(smumgr->device,
+		(cgs_handle_t)smu_data->smu_buffer.handle);
+		return -EINVAL);
+
+	return 0;
+}
+
+
+int smu7_smu_fini(struct pp_smumgr *smumgr)
+{
+	if (smumgr->backend) {
+		kfree(smumgr->backend);
+		smumgr->backend = NULL;
+	}
+	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
new file mode 100644
index 0000000..76352f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_SMUMANAGER_H
+#define _SMU7_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+
+#define SMC_RAM_END 0x40000
+#define mmSMC_IND_INDEX_11                              0x01AC
+#define mmSMC_IND_DATA_11                               0x01AD
+
+struct smu7_buffer_entry {
+	uint32_t data_size;
+	uint32_t mc_addr_low;
+	uint32_t mc_addr_high;
+	void *kaddr;
+	unsigned long  handle;
+};
+
+struct smu7_smumgr {
+	uint8_t *header;
+	uint8_t *mec_image;
+	struct smu7_buffer_entry smu_buffer;
+	struct smu7_buffer_entry header_buffer;
+
+	uint32_t                             soft_regs_start;
+	uint32_t                             dpm_table_start;
+	uint32_t                             mc_reg_table_start;
+	uint32_t                             fan_table_start;
+	uint32_t                             arb_table_start;
+	uint32_t                             ulv_setting_starts;
+	uint8_t                              security_hard_key;
+	uint32_t acpi_optimization;
+};
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+				uint32_t *dest, uint32_t byte_count, uint32_t limit);
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+			const uint8_t *src, uint32_t byte_count, uint32_t limit);
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+						uint32_t parameter);
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+						uint16_t msg, uint32_t parameter);
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+						uint32_t *value, uint32_t limit);
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+						uint32_t value, uint32_t limit);
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_smumgr *smumgr);
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
+int smu7_init(struct pp_smumgr *smumgr);
+int smu7_smu_fini(struct pp_smumgr *smumgr);
+
+#endif
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 7723473..e5812aa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,10 +28,7 @@
 #include "smumgr.h"
 #include "cgs_common.h"
 #include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
 
 int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -47,7 +44,6 @@
 	smumgr->device = pp_init->device;
 	smumgr->chip_family = pp_init->chip_family;
 	smumgr->chip_id = pp_init->chip_id;
-	smumgr->hw_revision = pp_init->rev_id;
 	smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
 	smumgr->reload_fw = 1;
 	handle->smu_mgr = smumgr;
@@ -58,6 +54,9 @@
 		break;
 	case AMDGPU_FAMILY_VI:
 		switch (smumgr->chip_id) {
+		case CHIP_TOPAZ:
+			iceland_smum_init(smumgr);
+			break;
 		case CHIP_TONGA:
 			tonga_smum_init(smumgr);
 			break;
@@ -87,6 +86,57 @@
 	return 0;
 }
 
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
+		return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+
+	return 0;
+}
+
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
+		return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+
+	return 0;
+}
+
+int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+
+	if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
+		return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+
+	return 0;
+}
+
+int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+
+	if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
+		return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+
+	return 0;
+}
+
+uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+{
+	if (NULL != smumgr->smumgr_funcs->get_offsetof)
+		return smumgr->smumgr_funcs->get_offsetof(type, member);
+
+	return 0;
+}
+
+int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
+		return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+	return 0;
+}
+
 int smum_get_argument(struct pp_smumgr *smumgr)
 {
 	if (NULL != smumgr->smumgr_funcs->get_argument)
@@ -95,13 +145,20 @@
 	return 0;
 }
 
+uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+{
+	if (NULL != smumgr->smumgr_funcs->get_mac_definition)
+		return smumgr->smumgr_funcs->get_mac_definition(value);
+
+	return 0;
+}
+
 int smum_download_powerplay_table(struct pp_smumgr *smumgr,
 								void **table)
 {
 	if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
 		return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
 									table);
-
 	return 0;
 }
 
@@ -268,3 +325,44 @@
 
 	return 0;
 }
+
+int smum_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
+		return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+
+	return 0;
+}
+
+int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
+		return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+
+	return 0;
+}
+
+int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
+		return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+
+	return 0;
+}
+
+/*this interface is needed by island ci/vi */
+int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
+		return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+
+	return 0;
+}
+
+bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
+		return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+
+	return true;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
new file mode 100644
index 0000000..de2a24d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -0,0 +1,3206 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "tonga_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "tonga_smumgr.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu72_discrete.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,  TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,        BAPM_TEMP_GRADIENT
+ */
+	{1,               0xF,             0xFD,                0x19,
+	 5,               45,                 0,              0xB0000,
+	 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+		0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+	 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+		0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+	},
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+	{600, 1050, 3, 0},
+	{600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+	{ {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+	{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+	{0, 1, 3, 2, 4, 5},
+	{0, 2, 4, 5, 6, 5}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ *  This is the unit expected by SMC firmware
+ */
+
+
+static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+	phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+	uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+	uint32_t i = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			   (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	/* clock - voltage dependency table is empty table */
+	if (allowed_clock_voltage_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+			voltage->VddGfx = phm_get_voltage_index(
+					pptable_info->vddgfx_lookup_table,
+				allowed_clock_voltage_table->entries[i].vddgfx);
+			voltage->Vddc = phm_get_voltage_index(
+						pptable_info->vddc_lookup_table,
+				  allowed_clock_voltage_table->entries[i].vddc);
+
+			if (allowed_clock_voltage_table->entries[i].vddci)
+				voltage->Vddci =
+					phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+			else
+				voltage->Vddci =
+					phm_get_voltage_id(&data->vddci_voltage_table,
+						allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+			if (allowed_clock_voltage_table->entries[i].mvdd)
+				*mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+			voltage->Phases = 1;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+		allowed_clock_voltage_table->entries[i-1].vddgfx);
+	voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+		allowed_clock_voltage_table->entries[i-1].vddc);
+
+	if (allowed_clock_voltage_table->entries[i-1].vddci)
+		voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+			allowed_clock_voltage_table->entries[i-1].vddci);
+
+	if (allowed_clock_voltage_table->entries[i-1].mvdd)
+		*mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+	return 0;
+}
+
+
+/**
+ * Vddc table preparation for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		table->VddcLevelCount = data->vddc_voltage_table.count;
+		for (count = 0; count < table->VddcLevelCount; count++) {
+			table->VddcTable[count] =
+				PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+		}
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+	}
+	return 0;
+}
+
+/**
+ * VddGfx table preparation for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+		table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+		for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+			table->VddGfxTable[count] =
+				PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+		}
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+	}
+	return 0;
+}
+
+/**
+ * Vddci table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count;
+
+	table->VddciLevelCount = data->vddci_voltage_table.count;
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+			table->VddciTable[count] =
+				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+		} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+			table->SmioTable1.Pattern[count].Voltage =
+				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+			table->SmioTable1.Pattern[count].Smio =
+				(uint8_t) count;
+			table->Smio[count] |=
+				data->vddci_voltage_table.entries[count].smio_low;
+			table->VddciTable[count] =
+				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+		}
+	}
+
+	table->SmioMask1 = data->vddci_voltage_table.mask_low;
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+	return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		table->MvddLevelCount = data->mvdd_voltage_table.count;
+		for (count = 0; count < table->MvddLevelCount; count++) {
+			table->SmioTable2.Pattern[count].Voltage =
+				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+			table->SmioTable2.Pattern[count].Smio =
+				(uint8_t) count;
+			table->Smio[count] |=
+				data->mvdd_voltage_table.entries[count].smio_low;
+		}
+		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+	}
+
+	return 0;
+}
+
+/**
+ * Preparation of vddc and vddgfx CAC tables for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	uint32_t count;
+	uint8_t index = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+					   pptable_info->vddgfx_lookup_table;
+	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+						pptable_info->vddc_lookup_table;
+
+	/* table is already swapped, so in order to use the value from it
+	 * we need to swap it back.
+	 */
+	uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+	uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+	for (count = 0; count < vddc_level_count; count++) {
+		/* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+		index = phm_get_voltage_index(vddc_lookup_table,
+			data->vddc_voltage_table.entries[count].value);
+		table->BapmVddcVidLoSidd[count] =
+			convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+		table->BapmVddcVidHiSidd[count] =
+			convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+		table->BapmVddcVidHiSidd2[count] =
+			convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+	}
+
+	if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+		/* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+		for (count = 0; count < vddgfx_level_count; count++) {
+			index = phm_get_voltage_index(vddgfx_lookup_table,
+				convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+			table->BapmVddGfxVidHiSidd2[count] =
+				convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+		}
+	} else {
+		for (count = 0; count < vddc_level_count; count++) {
+			index = phm_get_voltage_index(vddc_lookup_table,
+				data->vddc_voltage_table.entries[count].value);
+			table->BapmVddGfxVidLoSidd[count] =
+				convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+			table->BapmVddGfxVidHiSidd[count] =
+				convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+			table->BapmVddGfxVidHiSidd2[count] =
+				convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Preparation of voltage tables for SMC.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+	SMU72_Discrete_DpmTable *table)
+{
+	int result;
+
+	result = tonga_populate_smc_vddc_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"can not populate VDDC voltage table to SMC",
+			return -EINVAL);
+
+	result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"can not populate VDDCI voltage table to SMC",
+			return -EINVAL);
+
+	result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"can not populate VDDGFX voltage table to SMC",
+			return -EINVAL);
+
+	result = tonga_populate_smc_mvdd_table(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"can not populate MVDD voltage table to SMC",
+			return -EINVAL);
+
+	result = tonga_populate_cac_tables(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"can not populate CAC voltage tables to SMC",
+			return -EINVAL);
+
+	return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU72_Discrete_Ulv *state)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+	state->VddcPhase = 1;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+	return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		struct SMU72_Discrete_DpmTable *table)
+{
+	return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t i;
+
+	/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount =
+			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity =
+			1;
+		table->LinkLevel[i].SPC =
+			(uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold =
+			PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold =
+			PP_HOST_TO_SMC_UL(30);
+	}
+
+	smu_data->smc_state_table.LinkLevelCount =
+		(uint8_t)dpm_table->pcie_speed_table.count;
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	pp_atomctrl_clock_dividers_vi dividers;
+	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t    reference_clock;
+	uint32_t reference_divider;
+	uint32_t fbdiv;
+	int result;
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+	reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+	reference_divider = 1 + dividers.uc_pll_ref_div;
+
+	/* low 14 bits is fraction and high 12 bits is divider*/
+	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+	/* SPLL_FUNC_CNTL setup*/
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+		CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+	/* SPLL_FUNC_CNTL_3 setup*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+	/* set to use fractional accumulation*/
+	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+		CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+		pp_atomctrl_internal_ss_info ss_info;
+
+		uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+		if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+			/*
+			* ss_info.speed_spectrum_percentage -- in unit of 0.01%
+			* ss_info.speed_spectrum_rate -- in unit of khz
+			*/
+			/* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+			uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+			/* clkv = 2 * D * fbdiv / NS */
+			uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+			cg_spll_spread_spectrum =
+				PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+			cg_spll_spread_spectrum_2 =
+				PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+		}
+	}
+
+	sclk->SclkFrequency        = engine_clock;
+	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+	return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    engine_clock the engine clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+						uint32_t engine_clock,
+				uint16_t sclk_activity_level_threshold,
+				SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+	int result;
+	uint32_t mvdd;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			    (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+	/* populate graphics levels*/
+	result = tonga_get_dependecy_volt_by_clk(hwmgr,
+		pptable_info->vdd_dep_on_sclk, engine_clock,
+		&graphic_level->MinVoltage, &mvdd);
+	PP_ASSERT_WITH_CODE((!result),
+		"can not find VDDC voltage value for VDDC "
+		"engine clock dependency table", return result);
+
+	/* SCLK frequency in units of 10KHz*/
+	graphic_level->SclkFrequency = engine_clock;
+	/* Indicates maximum activity level for this performance level. 50% for now*/
+	graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+	graphic_level->CcPwrDynRm = 0;
+	graphic_level->CcPwrDynRm1 = 0;
+	/* this level can be used if activity is high enough.*/
+	graphic_level->EnabledForActivity = 0;
+	/* this level can be used for throttling.*/
+	graphic_level->EnabledForThrottle = 1;
+	graphic_level->UpHyst = 0;
+	graphic_level->DownHyst = 0;
+	graphic_level->VoltageDownHyst = 0;
+	graphic_level->PowerThrottle = 0;
+
+	data->display_timing.min_clock_in_sr =
+			hwmgr->display_config.min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep))
+		graphic_level->DeepSleepDivId =
+				smu7_get_sleep_divider_id_from_clock(engine_clock,
+						data->display_timing.min_clock_in_sr);
+
+	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	if (!result) {
+		/* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+		/* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+		CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+	}
+
+	return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param    hwmgr      the address of the hardware manager
+ */
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+	uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+	uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+	uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+						SMU72_MAX_LEVELS_GRAPHICS;
+
+	SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+	uint32_t i, max_entry;
+	uint8_t highest_pcie_level_enabled = 0;
+	uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+	uint8_t count = 0;
+	int result = 0;
+
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		result = tonga_populate_single_graphic_level(hwmgr,
+					dpm_table->sclk_table.dpm_levels[i].value,
+					(uint16_t)smu_data->activity_target[i],
+					&(smu_data->smc_state_table.GraphicsLevel[i]));
+		if (result != 0)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+	}
+
+	/* Only enable level 0 for now. */
+	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+	/* set highest level watermark to high */
+	if (dpm_table->sclk_table.count > 1)
+		smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	smu_data->smc_state_table.GraphicsDpmLevelCount =
+		(uint8_t)dpm_table->sclk_table.count;
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	if (pcie_table != NULL) {
+		PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+			"There must be 1 or more PCIE levels defined in PPTable.",
+			return -EINVAL);
+		max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+		for (i = 0; i < dpm_table->sclk_table.count; i++) {
+			smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+				(uint8_t) ((i < max_entry) ? i : max_entry);
+		}
+	} else {
+		if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+			printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
+
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+					(1<<(highest_pcie_level_enabled+1))) != 0)) {
+			highest_pcie_level_enabled++;
+		}
+
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+					(1<<lowest_pcie_level_enabled)) == 0)) {
+			lowest_pcie_level_enabled++;
+		}
+
+		while ((count < highest_pcie_level_enabled) &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+					(1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+			count++;
+		}
+		mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+			(lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+		/* set pcieDpmLevel to highest_pcie_level_enabled*/
+		for (i = 2; i < dpm_table->sclk_table.count; i++)
+			smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to lowest_pcie_level_enabled*/
+		smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to mid_pcie_level_enabled*/
+		smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+	}
+	/* level count will send to smc once at init smc table and never change*/
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
+				(uint8_t *)levels, (uint32_t)level_array_size,
+								SMC_RAM_END);
+
+	return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    memory_clock the memory clock to use to populate the structure
+ * @param    sclk        the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_mclk_params(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU72_Discrete_MemoryLevel *mclk,
+		bool strobe_mode,
+		bool dllStateOn
+		)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+	uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+	uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+	uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+	uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+	uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+	uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+	uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+	uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+	pp_atomctrl_memory_clock_param mpll_param;
+	int result;
+
+	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+				memory_clock, &mpll_param, strobe_mode);
+	PP_ASSERT_WITH_CODE(
+			!result,
+			"Error retrieving Memory Clock Parameters from VBIOS.",
+			return result);
+
+	/* MPLL_FUNC_CNTL setup*/
+	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+					mpll_param.bw_ctrl);
+
+	/* MPLL_FUNC_CNTL_1 setup*/
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+					MPLL_FUNC_CNTL_1, CLKF,
+					mpll_param.mpll_fb_divider.cl_kf);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+					MPLL_FUNC_CNTL_1, CLKFRAC,
+					mpll_param.mpll_fb_divider.clk_frac);
+	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+						MPLL_FUNC_CNTL_1, VCO_MODE,
+						mpll_param.vco_mode);
+
+	/* MPLL_AD_FUNC_CNTL setup*/
+	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+					MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+					mpll_param.mpll_post_divider);
+
+	if (data->is_memory_gddr5) {
+		/* MPLL_DQ_FUNC_CNTL setup*/
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+						MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+						mpll_param.yclk_sel);
+		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+						MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+						mpll_param.mpll_post_divider);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+		/*
+		 ************************************
+		 Fref = Reference Frequency
+		 NF = Feedback divider ratio
+		 NR = Reference divider ratio
+		 Fnom = Nominal VCO output frequency = Fref * NF / NR
+		 Fs = Spreading Rate
+		 D = Percentage down-spread / 2
+		 Fint = Reference input frequency to PFD = Fref / NR
+		 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+		 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+		 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+		 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+		 *************************************
+		 */
+		pp_atomctrl_internal_ss_info ss_info;
+		uint32_t freq_nom;
+		uint32_t tmp;
+		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+		/* for GDDR5 for all modes and DDR3 */
+		if (1 == mpll_param.qdr)
+			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+		else
+			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+		tmp = (freq_nom / reference_clock);
+		tmp = tmp * tmp;
+
+		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+			/* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+			/* ss.Info.speed_spectrum_rate -- in unit of khz */
+			/* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+			/*     = reference_clock * 5 / speed_spectrum_rate */
+			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+			/* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+			/*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+			uint32_t clkv =
+				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+		}
+	}
+
+	/* MCLK_PWRMGT_CNTL setup */
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+	/* Save the result data to outpupt memory level structure */
+	mclk->MclkFrequency   = memory_clock;
+	mclk->MpllFuncCntl    = mpll_func_cntl;
+	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+	mclk->DllCntl         = dll_cntl;
+	mclk->MpllSs1         = mpll_ss1;
+	mclk->MpllSs2         = mpll_ss2;
+
+	return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+		bool strobe_mode)
+{
+	uint8_t mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500)
+			mc_para_index = 0x00;
+		else if (memory_clock > 47500)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+	} else {
+		if (memory_clock < 65000)
+			mc_para_index = 0x00;
+		else if (memory_clock > 135000)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+	}
+
+	return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+	uint8_t mc_para_index;
+
+	if (memory_clock < 10000)
+		mc_para_index = 0;
+	else if (memory_clock >= 80000)
+		mc_para_index = 0x0f;
+	else
+		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+	return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+		struct pp_hwmgr *hwmgr,
+		uint32_t memory_clock,
+		SMU72_Discrete_MemoryLevel *memory_level
+		)
+{
+	uint32_t mvdd = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			  (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int result = 0;
+	bool dll_state_on;
+	struct cgs_display_info info = {0};
+	uint32_t mclk_edc_wr_enable_threshold = 40000;
+	uint32_t mclk_stutter_mode_threshold = 30000;
+	uint32_t mclk_edc_enable_threshold = 40000;
+	uint32_t mclk_strobe_mode_threshold = 40000;
+
+	if (NULL != pptable_info->vdd_dep_on_mclk) {
+		result = tonga_get_dependecy_volt_by_clk(hwmgr,
+				pptable_info->vdd_dep_on_mclk,
+				memory_clock,
+				&memory_level->MinVoltage, &mvdd);
+		PP_ASSERT_WITH_CODE(
+			!result,
+			"can not find MinVddc voltage value from memory VDDC "
+			"voltage dependency table",
+			return result);
+	}
+
+	if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+		memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+	else
+		memory_level->MinMvdd = mvdd;
+
+	memory_level->EnabledForThrottle = 1;
+	memory_level->EnabledForActivity = 0;
+	memory_level->UpHyst = 0;
+	memory_level->DownHyst = 100;
+	memory_level->VoltageDownHyst = 0;
+
+	/* Indicates maximum activity level for this performance level.*/
+	memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	memory_level->StutterEnable = 0;
+	memory_level->StrobeEnable = 0;
+	memory_level->EdcReadEnable = 0;
+	memory_level->EdcWriteEnable = 0;
+	memory_level->RttEnable = 0;
+
+	/* default set to low watermark. Highest level will be set to high later.*/
+	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	data->display_timing.num_existing_displays = info.display_count;
+
+	if ((mclk_stutter_mode_threshold != 0) &&
+	    (memory_clock <= mclk_stutter_mode_threshold) &&
+	    (!data->is_uvd_enabled)
+	    && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+	    && (data->display_timing.num_existing_displays <= 2)
+	    && (data->display_timing.num_existing_displays != 0))
+		memory_level->StutterEnable = 1;
+
+	/* decide strobe mode*/
+	memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+		(memory_clock <= mclk_strobe_mode_threshold);
+
+	/* decide EDC mode and memory clock ratio*/
+	if (data->is_memory_gddr5) {
+		memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+					memory_level->StrobeEnable);
+
+		if ((mclk_edc_enable_threshold != 0) &&
+				(memory_clock > mclk_edc_enable_threshold)) {
+			memory_level->EdcReadEnable = 1;
+		}
+
+		if ((mclk_edc_wr_enable_threshold != 0) &&
+				(memory_clock > mclk_edc_wr_enable_threshold)) {
+			memory_level->EdcWriteEnable = 1;
+		}
+
+		if (memory_level->StrobeEnable) {
+			if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+			} else {
+				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+			}
+
+		} else {
+			dll_state_on = data->dll_default_on;
+		}
+	} else {
+		memory_level->StrobeRatio =
+			tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+		dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+	}
+
+	result = tonga_calculate_mclk_params(hwmgr,
+		memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+		/* MCLK frequency in units of 10KHz*/
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+		/* Indicates maximum activity level for this performance level.*/
+		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+	}
+
+	return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data =
+			(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	int result;
+
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t level_array_address =
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+	uint32_t level_array_size =
+				sizeof(SMU72_Discrete_MemoryLevel) *
+				SMU72_MAX_LEVELS_MEMORY;
+	SMU72_Discrete_MemoryLevel *levels =
+				smu_data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	memset(levels, 0x00, level_array_size);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+			"can not populate memory level as memory clock is zero",
+			return -EINVAL);
+		result = tonga_populate_single_memory_level(
+				hwmgr,
+				dpm_table->mclk_table.dpm_levels[i].value,
+				&(smu_data->smc_state_table.MemoryLevel[i]));
+		if (result)
+			return result;
+	}
+
+	/* Only enable level 0 for now.*/
+	smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+	/*
+	* in order to prevent MC activity from stutter mode to push DPM up.
+	* the UVD change complements this by putting the MCLK in a higher state
+	* by default such that we are not effected by up threshold or and MCLK DPM latency.
+	*/
+	smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+	smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+	/* set highest level watermark to high*/
+	smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	/* level count will send to smc once at init smc table and never change*/
+	result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+		level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+		SMC_RAM_END);
+
+	return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+				uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i = 0;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+				/* Always round to higher voltage. */
+				smio_pattern->Voltage =
+				      data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+
+		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+			"MVDD Voltage is outside the supported range.",
+			return -EINVAL);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+	SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct pp_atomctrl_clock_dividers_vi dividers;
+
+	SMIO_Pattern voltage_level;
+	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+	/* The ACPI state should not do DPM on DC (or ever).*/
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	table->ACPILevel.MinVoltage =
+			smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+	/* assign zero for now*/
+	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+	/* get the engine clock dividers for this clock value*/
+	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+		table->ACPILevel.SclkFrequency,  &dividers);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error retrieving Engine Clock dividers from VBIOS.",
+		return result);
+
+	/* divider ID for required SCLK*/
+	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+					SPLL_PWRON, 0);
+	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+						SPLL_RESET, 1);
+	spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+						SCLK_MUX_SEL, 4);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+	/* For various features to be enabled/disabled while this level is active.*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	/* SCLK frequency in units of 10KHz*/
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+	table->MemoryACPILevel.MinVoltage =
+			    smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+	/*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+	if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+		table->MemoryACPILevel.MinMvdd =
+			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	/* Force reset on DLL*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+	/* Disable DLL in ACPIState*/
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+	/* Enable DLL bypass signal*/
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK0_BYPASS, 0);
+	dll_cntl            = PHM_SET_FIELD(dll_cntl,
+		DLL_CNTL, MRDCK1_BYPASS, 0);
+
+	table->MemoryACPILevel.DllCntl            =
+		PP_HOST_TO_SMC_UL(dll_cntl);
+	table->MemoryACPILevel.MclkPwrmgtCntl     =
+		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+	table->MemoryACPILevel.MpllAdFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+	table->MemoryACPILevel.MpllDqFuncCntl     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl       =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+	table->MemoryACPILevel.MpllFuncCntl_1     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+	table->MemoryACPILevel.MpllFuncCntl_2     =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+	table->MemoryACPILevel.MpllSs1            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+	table->MemoryACPILevel.MpllSs2            =
+		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	/* Indicates maximum activity level for this performance level.*/
+	table->MemoryACPILevel.ActivityLevel =
+			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = 0;
+	table->MemoryACPILevel.StrobeEnable = 0;
+	table->MemoryACPILevel.EdcReadEnable = 0;
+	table->MemoryACPILevel.EdcWriteEnable = 0;
+	table->MemoryACPILevel.RttEnable = 0;
+
+	return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+					SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+
+	uint8_t count;
+	pp_atomctrl_clock_dividers_vi dividers;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+				(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+						pptable_info->mm_dep_table;
+
+	table->UvdLevelCount = (uint8_t) (mm_table->count);
+	table->UvdBootLevel = 0;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+		table->UvdLevel[count].MinVoltage.Vddc =
+			phm_get_voltage_index(pptable_info->vddc_lookup_table,
+						mm_table->entries[count].vddc);
+		table->UvdLevel[count].MinVoltage.VddGfx =
+			(data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+			phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+						mm_table->entries[count].vddgfx) : 0;
+		table->UvdLevel[count].MinVoltage.Vddci =
+			phm_get_voltage_id(&data->vddci_voltage_table,
+					     mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		table->UvdLevel[count].MinVoltage.Phases = 1;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(
+					hwmgr,
+					table->UvdLevel[count].VclkFrequency,
+					&dividers);
+
+		PP_ASSERT_WITH_CODE((!result),
+				    "can not find divide id for Vclk clock",
+					return result);
+
+		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+							  table->UvdLevel[count].DclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((!result),
+				    "can not find divide id for Dclk clock",
+					return result);
+
+		table->UvdLevel[count].DclkDivider =
+					(uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+	}
+
+	return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+
+	uint8_t count;
+	pp_atomctrl_clock_dividers_vi dividers;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			      (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+						     pptable_info->mm_dep_table;
+
+	table->VceLevelCount = (uint8_t) (mm_table->count);
+	table->VceBootLevel = 0;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency =
+			mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage.Vddc =
+			phm_get_voltage_index(pptable_info->vddc_lookup_table,
+				mm_table->entries[count].vddc);
+		table->VceLevel[count].MinVoltage.VddGfx =
+			(data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+			phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+				mm_table->entries[count].vddgfx) : 0;
+		table->VceLevel[count].MinVoltage.Vddci =
+			phm_get_voltage_id(&data->vddci_voltage_table,
+				mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		table->VceLevel[count].MinVoltage.Phases = 1;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+					table->VceLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((!result),
+				"can not find divide id for VCE engine clock",
+				return result);
+
+		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+	}
+
+	return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+		SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+	uint8_t count;
+	pp_atomctrl_clock_dividers_vi dividers;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			     (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+						    pptable_info->mm_dep_table;
+
+	table->AcpLevelCount = (uint8_t) (mm_table->count);
+	table->AcpBootLevel = 0;
+
+	for (count = 0; count < table->AcpLevelCount; count++) {
+		table->AcpLevel[count].Frequency =
+			pptable_info->mm_dep_table->entries[count].aclk;
+		table->AcpLevel[count].MinVoltage.Vddc =
+			phm_get_voltage_index(pptable_info->vddc_lookup_table,
+			mm_table->entries[count].vddc);
+		table->AcpLevel[count].MinVoltage.VddGfx =
+			(data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+			phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+				mm_table->entries[count].vddgfx) : 0;
+		table->AcpLevel[count].MinVoltage.Vddci =
+			phm_get_voltage_id(&data->vddci_voltage_table,
+				mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		table->AcpLevel[count].MinVoltage.Phases = 1;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+			table->AcpLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((!result),
+			"can not find divide id for engine clock", return result);
+
+		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+	}
+
+	return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+		SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+	uint8_t count;
+	pp_atomctrl_clock_dividers_vi dividers;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *pptable_info =
+			     (struct phm_ppt_v1_information *)(hwmgr->pptable);
+	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+						    pptable_info->mm_dep_table;
+
+	table->SamuBootLevel = 0;
+	table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].Frequency =
+			pptable_info->mm_dep_table->entries[count].samclock;
+		table->SamuLevel[count].MinVoltage.Vddc =
+			phm_get_voltage_index(pptable_info->vddc_lookup_table,
+				mm_table->entries[count].vddc);
+		table->SamuLevel[count].MinVoltage.VddGfx =
+			(data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+			phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+				mm_table->entries[count].vddgfx) : 0;
+		table->SamuLevel[count].MinVoltage.Vddci =
+			phm_get_voltage_id(&data->vddci_voltage_table,
+				mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		table->SamuLevel[count].MinVoltage.Phases = 1;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+					table->SamuLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((!result),
+			"can not find divide id for samu clock", return result);
+
+		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+	}
+
+	return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+		struct pp_hwmgr *hwmgr,
+		uint32_t engine_clock,
+		uint32_t memory_clock,
+		struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+		)
+{
+	uint32_t dramTiming;
+	uint32_t dramTiming2;
+	uint32_t burstTime;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+				engine_clock, memory_clock);
+
+	PP_ASSERT_WITH_CODE(result == 0,
+		"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+	arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+	return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	int result = 0;
+	SMU72_Discrete_MCArbDramTimingTable  arb_regs;
+	uint32_t i, j;
+
+	memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+			result = tonga_populate_memory_timing_parameters
+				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+				 data->dpm_table.mclk_table.dpm_levels[j].value,
+				 &arb_regs.entries[i][j]);
+
+			if (result)
+				break;
+		}
+	}
+
+	if (!result) {
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.arb_table_start,
+				(uint8_t *)&arb_regs,
+				sizeof(SMU72_Discrete_MCArbDramTimingTable),
+				SMC_RAM_END
+				);
+	}
+
+	return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table*/
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+	data->vbios_boot_state.sclk_bootup_value,
+	(uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+	if (result != 0) {
+		smu_data->smc_state_table.GraphicsBootLevel = 0;
+		printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
+				"clock value in dependency table. "
+				"Using Graphics DPM level 0 !");
+		result = 0;
+	}
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+		data->vbios_boot_state.mclk_bootup_value,
+		(uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+	if (result != 0) {
+		smu_data->smc_state_table.MemoryBootLevel = 0;
+		printk(KERN_ERR "[powerplay] VBIOS did not find boot "
+				"engine clock value in dependency table."
+				"Using Memory DPM level 0 !");
+		result = 0;
+	}
+
+	table->BootVoltage.Vddc =
+		phm_get_voltage_id(&(data->vddc_voltage_table),
+			data->vbios_boot_state.vddc_bootup_value);
+	table->BootVoltage.VddGfx =
+		phm_get_voltage_id(&(data->vddgfx_voltage_table),
+			data->vbios_boot_state.vddgfx_bootup_value);
+	table->BootVoltage.Vddci =
+		phm_get_voltage_id(&(data->vddci_voltage_table),
+			data->vbios_boot_state.vddci_bootup_value);
+	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+	return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+			volt_with_cks, value;
+	uint16_t clock_freq_u16;
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+			volt_offset = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+	uint32_t hw_revision, dev_id;
+	struct cgs_system_info sys_info = {0};
+
+	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	hw_revision = (uint32_t)sys_info.value;
+
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	dev_id = (uint32_t)sys_info.value;
+
+	/* Read SMU_Eefuse to read and calculate RO and determine
+	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+	 */
+	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (146 * 4));
+	efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (148 * 4));
+	efuse &= 0xFF000000;
+	efuse = efuse >> 24;
+	efuse2 &= 0xF;
+
+	if (efuse2 == 1)
+		ro = (2300 - 1350) * efuse / 255 + 1350;
+	else
+		ro = (2500 - 1000) * efuse / 255 + 1000;
+
+	if (ro >= 1660)
+		type = 0;
+	else
+		type = 1;
+
+	/* Populate Stretch amount */
+	smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+	for (i = 0; i < sclk_table->count; i++) {
+		smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+				sclk_table->entries[i].cks_enable << i;
+		if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+			volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+				(sclk_table->entries[i].clk/100) / 10000) * 1000 /
+				(8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+			volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+				(sclk_table->entries[i].clk/100) / 100000) * 1000 /
+				(6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+		} else {
+			volt_without_cks = (uint32_t)((14041 *
+				(sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+				(4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+			volt_with_cks = (uint32_t)((13946 *
+				(sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+				(3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+		}
+		if (volt_without_cks >= volt_with_cks)
+			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+					sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+		smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+	}
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			STRETCH_ENABLE, 0x0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x1);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			staticEnable, 0x1);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x0);
+
+	/* Populate CKS Lookup Table */
+	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+		stretch_amount2 = 0;
+	else if (stretch_amount == 3 || stretch_amount == 4)
+		stretch_amount2 = 1;
+	else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ClockStretcher);
+		PP_ASSERT_WITH_CODE(false,
+				"Stretch Amount in PPTable not supported\n",
+				return -EINVAL);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL);
+	value &= 0xFFC2FF87;
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+			tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+			tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+	clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+			GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+			SclkFrequency) / 100);
+	if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+			clock_freq_u16 &&
+	    tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+			clock_freq_u16) {
+		/* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+		value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+		/* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+		value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+		/* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+		value |= (tonga_clock_stretch_amount_conversion
+				[tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+				 [stretch_amount]) << 3;
+	}
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+			CKS_LOOKUPTableEntry[0].minFreq);
+	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+			CKS_LOOKUPTableEntry[0].maxFreq);
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+			tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+	smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+			(tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL, value);
+
+	/* Populate DDT Lookup Table */
+	for (i = 0; i < 4; i++) {
+		/* Assign the minimum and maximum VID stored
+		 * in the last row of Clock Stretcher Voltage Table.
+		 */
+		smu_data->smc_state_table.ClockStretcherDataTable.
+		ClockStretcherDataTableEntry[i].minVID =
+				(uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+		smu_data->smc_state_table.ClockStretcherDataTable.
+		ClockStretcherDataTableEntry[i].maxVID =
+				(uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+		/* Loop through each SCLK and check the frequency
+		 * to see if it lies within the frequency for clock stretcher.
+		 */
+		for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+			cks_setting = 0;
+			clock_freq = PP_SMC_TO_HOST_UL(
+					smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+			/* Check the allowed frequency against the sclk level[j].
+			 *  Sclk's endianness has already been converted,
+			 *  and it's in 10Khz unit,
+			 *  as opposed to Data table, which is in Mhz unit.
+			 */
+			if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+				cks_setting |= 0x2;
+				if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+					cks_setting |= 0x1;
+			}
+			smu_data->smc_state_table.ClockStretcherDataTable.
+			ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+		}
+		CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+				ClockStretcherDataTable.
+				ClockStretcherDataTableEntry[i].setting);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+					ixPWR_CKS_CNTL);
+	value &= 0xFFFFFFFE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+					ixPWR_CKS_CNTL, value);
+
+	return 0;
+}
+
+/**
+ * Populates the SMC VRConfig field in DPM table.
+ *
+ * @param    hwmgr      the address of the hardware manager
+ * @param    table     the SMC DPM table structure to be populated
+ * @return   always 0
+ */
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+			SMU72_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint16_t config;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+		/*  Splitted mode */
+		config = VR_SVI2_PLANE_1;
+		table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+		if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+			config = VR_SVI2_PLANE_2;
+			table->VRConfig |= config;
+		} else {
+			printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
+				"be both on SVI2 control in splitted mode !\n");
+		}
+	} else {
+		/* Merged mode  */
+		config = VR_MERGED_WITH_VDDC;
+		table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+		/* Set Vddc Voltage Controller  */
+		if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+			config = VR_SVI2_PLANE_1;
+			table->VRConfig |= config;
+		} else {
+			printk(KERN_ERR "[ powerplay ] VDDC should be on "
+					"SVI2 control in merged mode !\n");
+		}
+	}
+
+	/* Set Vddci Voltage Controller  */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		config = VR_SVI2_PLANE_2;  /* only in merged mode */
+		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		config = VR_SMIO_PATTERN_1;
+		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+	}
+
+	/* Set Mvdd Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		config = VR_SMIO_PATTERN_2;
+		table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+	}
+
+	return 0;
+}
+
+
+/**
+ * Initialize the ARB DRAM timing table's index field.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+	uint32_t tmp;
+	int result;
+
+	/*
+	* This is a read-modify-write on the first byte of the ARB table.
+	* The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+	* is the field 'current'.
+	* This solution is ugly, but we never write the whole table only
+	* individual fields in it.
+	* In reality this field should not be in that structure
+	* but in a soft register.
+	*/
+	result = smu7_read_smc_sram_dword(smumgr,
+				smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+	if (result != 0)
+		return result;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+	return smu7_write_smc_sram_dword(smumgr,
+			smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+	SMU72_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	int  i, j, k;
+	const uint16_t *pdef1, *pdef2;
+
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usTDP * 256));
+	dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+			(uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+			"Target Operating Temp is out of Range !",
+			);
+
+	dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+	dpm_table->BAPM_TEMP_GRADIENT =
+				PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+	pdef1 = defaults->bapmti_r;
+	pdef2 = defaults->bapmti_rc;
+
+	for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU72_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] =
+						PP_HOST_TO_SMC_US(*pdef1);
+				dpm_table->BAPMTI_RC[i][j][k] =
+						PP_HOST_TO_SMC_US(*pdef2);
+				pdef1++;
+				pdef2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+	smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	/* TDC number of fraction bits are changed from 8 to 7
+	 * for Fiji as requested by SMC team
+	 */
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->tdc_vddc_throttle_release_limit_perc;
+	smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct tonga_smumgr *smu_data =
+			(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+	uint32_t temp;
+
+	if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, SMC_RAM_END))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 "
+				"(SviLoadLineEn) from SMC Failed !",
+				return -EINVAL);
+	else
+		smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.
+			usFanOutputSensitivity & (1 << 15)) ||
+		(hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+		hwmgr->thermal_controller.advanceFanControlParameters.
+		usFanOutputSensitivity = hwmgr->thermal_controller.
+			advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+			PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+					advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+	return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to get pm_fuse_table_offset Failed !",
+				return -EINVAL);
+
+		/* DW6 */
+		if (tonga_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate SviLoadLine Failed !",
+				return -EINVAL);
+		/* DW7 */
+		if (tonga_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed !",
+					return -EINVAL);
+		/* DW8 */
+		if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate TdcWaterfallCtl Failed !",
+				return -EINVAL);
+
+		/* DW9-DW12 */
+		if (tonga_populate_temperature_scaler(hwmgr) != 0)
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate LPMLTemperatureScaler Failed !",
+				return -EINVAL);
+
+		/* DW13-DW14 */
+		if (tonga_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate Fuzzy Fan "
+				"Control parameters Failed !",
+				return -EINVAL);
+
+		/* DW15-DW18 */
+		if (tonga_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate GnbLPML Failed !",
+				return -EINVAL);
+
+		/* DW19 */
+		if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+				"Attempt to populate GnbLPML "
+				"Min and Max Vid Failed !",
+				return -EINVAL);
+
+		/* DW20 */
+		if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(
+				false,
+				"Attempt to populate BapmVddCBaseLeakage "
+				"Hi and Lo Sidd Failed !",
+				return -EINVAL);
+
+		if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&smu_data->power_tune_table,
+				sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed !",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
+				 SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+	const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
+
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+		if (smu_data->mc_reg_table.validflag & 1<<j) {
+			PP_ASSERT_WITH_CODE(
+				i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+				"Index of mc_reg_table->address[] array "
+				"out of boundary",
+				return -EINVAL);
+			mc_reg_table->address[i].s0 =
+				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 =
+				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (uint8_t)i;
+
+	return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+	const struct tonga_mc_reg_entry *entry,
+	SMU72_Discrete_MCRegisterSet *data,
+	uint32_t num_entries, uint32_t valid_flag)
+{
+	uint32_t i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & 1<<j) {
+			data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+		struct pp_smumgr *smumgr,
+		const uint32_t memory_clock,
+		SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+		)
+{
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+	uint32_t i = 0;
+
+	for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+		if (memory_clock <=
+			smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+			break;
+		}
+	}
+
+	if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, smu_data->mc_reg_table.last,
+				smu_data->mc_reg_table.validflag);
+
+	return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+		SMU72_Discrete_MCRegisters *mc_regs)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	int res;
+	uint32_t i;
+
+	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+		res = tonga_convert_mc_reg_table_entry_to_smc(
+				hwmgr->smumgr,
+				data->dpm_table.mclk_table.dpm_levels[i].value,
+				&mc_regs->data[i]
+				);
+
+		if (0 != res)
+			result = res;
+	}
+
+	return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t address;
+	int32_t result;
+
+	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+		return 0;
+
+
+	memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+	result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+	if (result != 0)
+		return result;
+
+
+	address = smu_data->smu7_data.mc_reg_table_start +
+			(uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+	return  smu7_copy_bytes_to_smc(
+			hwmgr->smumgr, address,
+			(uint8_t *)&smu_data->mc_regs.data[0],
+			sizeof(SMU72_Discrete_MCRegisterSet) *
+			data->dpm_table.mclk_table.count,
+			SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct pp_smumgr *smumgr = hwmgr->smumgr;
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+	memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+	result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize MCRegTable for the MC register addresses !",
+		return result;);
+
+	result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize MCRegTable for driver state !",
+		return result;);
+
+	return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+			(uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	struct  phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		smu_data->power_tune_defaults =
+				&tonga_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    pInput  the pointer to input data (PowerState)
+ * @return   always 0
+ */
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data =
+			(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	uint8_t i;
+	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+	memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+	tonga_initialize_power_tune_defaults(hwmgr);
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+		tonga_populate_smc_voltage_tables(hwmgr, table);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+	if (i == 1 || i == 0)
+		table->SystemFlags |= 0x40;
+
+	if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+		result = tonga_populate_ulv_state(hwmgr, table);
+		PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize ULV state !",
+			return result;);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_ULV_PARAMETER, 0x40035);
+	}
+
+	result = tonga_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize Link Level !", return result);
+
+	result = tonga_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize Graphics Level !", return result);
+
+	result = tonga_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize Memory Level !", return result);
+
+	result = tonga_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize ACPI Level !", return result);
+
+	result = tonga_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize VCE Level !", return result);
+
+	result = tonga_populate_smc_acp_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize ACP Level !", return result);
+
+	result = tonga_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize SAMU Level !", return result);
+
+	/* Since only the initial state is completely set up at this
+	* point (the other states are just copies of the boot state) we only
+	* need to populate the  ARB settings for the initial state.
+	*/
+	result = tonga_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to Write ARB settings for the initial state.",
+		return result;);
+
+	result = tonga_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize UVD Level !", return result);
+
+	result = tonga_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to initialize Boot Level !", return result);
+
+	tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to populate BAPM Parameters !", return result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ClockStretcher)) {
+		result = tonga_populate_clock_stretcher_data_table(hwmgr);
+		PP_ASSERT_WITH_CODE(!result,
+			"Failed to populate Clock Stretcher Data Table !",
+			return result;);
+	}
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+		table_info->cac_dtp_table->usTargetOperatingTemp *
+		SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->TemperatureLimitLow =
+		(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+		SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->MemoryVoltageChangeEnable  = 1;
+	table->MemoryInterval  = 1;
+	table->VoltageResponseTime  = 0;
+	table->PhaseResponseTime  = 0;
+	table->MemoryThermThrottleEnable  = 1;
+
+	/*
+	* Cail reads current link status and reports it as cap (we cannot
+	* change this due to some previous issues we had)
+	* SMC drops the link status to lowest level after enabling
+	* DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+	* but this time Cail reads current link status which was set to low by
+	* SMC and reports it as cap to powerplay
+	* To avoid it, we set PCIeBootLinkLevel to highest dpm level
+	*/
+	PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+			"There must be 1 or more PCIE levels defined in PPTable.",
+			return -EINVAL);
+
+	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+	table->PCIeGenInterval  = 1;
+
+	result = tonga_populate_vr_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to populate VRConfig setting !", return result);
+
+	table->ThermGpio  = 17;
+	table->SclkStepSize = 0x4000;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+						&gpio_pin_assignment)) {
+		table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_RegulatorHot);
+	} else {
+		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+						&gpio_pin_assignment)) {
+		table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition);
+	} else {
+		table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_Falcon_QuickTransition);
+
+	if (0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition);
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_Falcon_QuickTransition);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr,
+			THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalOutGPIO);
+
+		table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+		table->ThermOutPolarity =
+			(0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+			(1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+		/* if required, combine VRHot/PCC with thermal out GPIO*/
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_RegulatorHot) &&
+			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+		}
+	} else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalOutGPIO);
+
+		table->ThermOutGpio = 17;
+		table->ThermOutPolarity = 1;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+	}
+
+	for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(
+			hwmgr->smumgr,
+			smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+			(uint8_t *)&(table->SystemFlags),
+			sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+			SMC_RAM_END);
+
+	PP_ASSERT_WITH_CODE(!result,
+		"Failed to upload dpm data to SMC memory !", return result;);
+
+	result = tonga_init_arb_table_index(hwmgr->smumgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to upload arb data to SMC memory !", return result);
+
+	tonga_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE((!result),
+		"Failed to populate initialize pm fuses !", return result);
+
+	result = tonga_populate_initial_mc_reg_table(hwmgr);
+	PP_ASSERT_WITH_CODE((!result),
+		"Failed to populate initialize MC Reg table !", return result);
+
+	return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+			(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_MicrocodeFanControl))
+		return 0;
+
+	if (0 == smu_data->smu7_data.fan_table_start) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+						CGS_IND_REG__SMC,
+						CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (0 == duty100) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+		   hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+		  hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+		    hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+		    hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = smu7_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	fan_table.FanControl_GL_Flag = 1;
+
+	res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+					smu_data->smu7_data.fan_table_start,
+					(uint8_t *)&fan_table,
+					(uint32_t)sizeof(fan_table),
+					SMC_RAM_END);
+
+	return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return tonga_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data =
+			(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold !=
+				data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold =
+				hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = smu7_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU72_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				SMC_RAM_END);
+	}
+
+	result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+	PP_ASSERT_WITH_CODE((!result),
+				"Failed to upload MC reg table !",
+				return result);
+
+	result = tonga_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to program memory timing parameters !",
+			);
+
+	return result;
+}
+
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+	switch (type) {
+	case SMU_SoftRegisters:
+		switch (member) {
+		case HandshakeDisables:
+			return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+		case VoltageChangeTimeout:
+			return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+		case AverageGraphicsActivity:
+			return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+		case PreVBlankGap:
+			return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+		case VBlankTimeout:
+			return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+		case UcodeLoadStatus:
+			return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+		}
+	case SMU_Discrete_DpmTable:
+		switch (member) {
+		case UvdBootLevel:
+			return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+		case VceBootLevel:
+			return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+		case SamuBootLevel:
+			return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+		case LowSclkInterruptThreshold:
+			return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+		}
+	}
+	printk("cant't get the offset of type %x member %x\n", type, member);
+	return 0;
+}
+
+uint32_t tonga_get_mac_definition(uint32_t value)
+{
+	switch (value) {
+	case SMU_MAX_LEVELS_GRAPHICS:
+		return SMU72_MAX_LEVELS_GRAPHICS;
+	case SMU_MAX_LEVELS_MEMORY:
+		return SMU72_MAX_LEVELS_MEMORY;
+	case SMU_MAX_LEVELS_LINK:
+		return SMU72_MAX_LEVELS_LINK;
+	case SMU_MAX_ENTRIES_SMIO:
+		return SMU72_MAX_ENTRIES_SMIO;
+	case SMU_MAX_LEVELS_VDDC:
+		return SMU72_MAX_LEVELS_VDDC;
+	case SMU_MAX_LEVELS_VDDGFX:
+		return SMU72_MAX_LEVELS_VDDGFX;
+	case SMU_MAX_LEVELS_VDDCI:
+		return SMU72_MAX_LEVELS_VDDCI;
+	case SMU_MAX_LEVELS_MVDD:
+		return SMU72_MAX_LEVELS_MVDD;
+	}
+	printk("cant't get the mac value %x\n", value);
+
+	return 0;
+}
+
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	smu_data->smc_state_table.UvdBootLevel = 0;
+	if (table_info->mm_dep_table->count > 0)
+		smu_data->smc_state_table.UvdBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0x00FFFFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+	cgs_write_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC,
+				mm_boot_level_offset, mm_boot_level_value);
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM) ||
+		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_UVDDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+	return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data =
+				(struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+	smu_data->smc_state_table.VceBootLevel =
+		(uint8_t) (table_info->mm_dep_table->count - 1);
+
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+					offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFF00FFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_VCEDPM_SetEnabledMask,
+				(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+	return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+	smu_data->smc_state_table.SamuBootLevel = 0;
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFFFFFF00;
+	mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SAMUDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+	return 0;
+}
+
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+	switch (type) {
+	case SMU_UVD_TABLE:
+		tonga_update_uvd_smc_table(hwmgr);
+		break;
+	case SMU_VCE_TABLE:
+		tonga_update_vce_smc_table(hwmgr);
+		break;
+	case SMU_SAMU_TABLE:
+		tonga_update_samu_smc_table(hwmgr);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, DpmTable),
+				&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.dpm_table_start = tmp;
+
+	error |= (result != 0);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, SoftRegisters),
+				&tmp, SMC_RAM_END);
+
+	if (!result) {
+		data->soft_regs_start = tmp;
+		smu_data->smu7_data.soft_regs_start = tmp;
+	}
+
+	error |= (result != 0);
+
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, mcRegisterTable),
+				&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.mc_reg_table_start = tmp;
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, FanTable),
+				&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.fan_table_start = tmp;
+
+	error |= (result != 0);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+				&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.arb_table_start = tmp;
+
+	error |= (result != 0);
+
+	result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+				SMU72_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU72_Firmware_Header, Version),
+				&tmp, SMC_RAM_END);
+
+	if (!result)
+		hwmgr->microcode_version_info.SMC = tmp;
+
+	error |= (result != 0);
+
+	return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+	bool result = true;
+
+	switch (in_reg) {
+	case  mmMC_SEQ_RAS_TIMING:
+		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
+		break;
+
+	case  mmMC_SEQ_DLL_STBY:
+		*out_reg = mmMC_SEQ_DLL_STBY_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD0:
+		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CMD1:
+		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+		break;
+
+	case  mmMC_SEQ_G5PDX_CTRL:
+		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+		break;
+
+	case mmMC_SEQ_CAS_TIMING:
+		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING:
+		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
+		break;
+
+	case mmMC_SEQ_MISC_TIMING2:
+		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CMD:
+		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+		break;
+
+	case mmMC_SEQ_PMG_DVS_CTL:
+		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D0:
+		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_RD_CTL_D1:
+		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D0:
+		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_D1:
+		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+		break;
+
+	case mmMC_PMG_CMD_EMRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS1:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+
+	case mmMC_SEQ_PMG_TIMING:
+		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
+		break;
+
+	case mmMC_PMG_CMD_MRS2:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+
+	case mmMC_SEQ_WR_CTL_2:
+		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
+		break;
+
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+	uint32_t i;
+	uint16_t address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+							&address) ?
+							address :
+						 table->mc_reg_address[i].s1;
+	}
+	return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+					struct tonga_mc_reg_table *ni_table)
+{
+	uint8_t i, j;
+
+	PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+		"Invalid VramInfo table.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+		"Invalid VramInfo table.", return -EINVAL);
+
+	for (i = 0; i < table->last; i++)
+		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+	ni_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ni_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++) {
+			ni_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+		}
+	}
+
+	ni_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
+ *      mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
+ *      mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
+ *      mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3.   need to set these data for each clock range
+ * @param    hwmgr the address of the powerplay hardware manager.
+ * @param    table the address of MCRegTable
+ * @return   always 0
+ */
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+					struct tonga_mc_reg_table *table)
+{
+	uint8_t i, j, k;
+	uint32_t temp_reg;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+			"Invalid VramInfo table.", return -EINVAL);
+
+		switch (table->mc_reg_address[i].s1) {
+
+		case mmMC_SEQ_MISC1:
+			temp_reg = cgs_read_register(hwmgr->device,
+							mmMC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) |
+					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+				if (!data->is_memory_gddr5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+
+			if (!data->is_memory_gddr5) {
+				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++)
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				j++;
+				PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+					"Invalid VramInfo table.", return -EINVAL);
+			}
+
+			break;
+
+		case mmMC_SEQ_RESERVE_M:
+			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) |
+					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+			PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+				"Invalid VramInfo table.", return -EINVAL);
+			break;
+
+		default:
+			break;
+		}
+
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+	uint8_t i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+				table->mc_reg_table_entry[j].mc_data[i]) {
+				table->validflag |= (1<<i);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+	pp_atomctrl_mc_reg_table *table;
+	struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+	uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+	if (table == NULL)
+		return -ENOMEM;
+
+	/* Program additional LP registers that are no longer programmed by VBIOS */
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+			cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+			cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+			cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+			cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+	memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+	if (!result)
+		result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+	if (!result) {
+		tonga_set_s0_mc_reg_index(ni_table);
+		result = tonga_set_mc_special_registers(hwmgr, ni_table);
+	}
+
+	if (!result)
+		tonga_set_valid_flag(ni_table);
+
+	kfree(table);
+
+	return result;
+}
+
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
similarity index 62%
rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
rename to drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
index 8e6670b..8ae169f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
@@ -20,35 +20,19 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
+#ifndef _TONGA_SMC_H
+#define _TONGA_SMC_H
 
-#ifndef TONGA_POWERTUNE_H
-#define TONGA_POWERTUNE_H
+#include "smumgr.h"
+#include "smu72.h"
 
-enum _phw_tonga_ptc_config_reg_type {
-	TONGA_CONFIGREG_MMR = 0,
-	TONGA_CONFIGREG_SMC_IND,
-	TONGA_CONFIGREG_DIDT_IND,
-	TONGA_CONFIGREG_CACHE,
 
-	TONGA_CONFIGREG_MAX
-};
-typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
+#define ASICID_IS_TONGA_P(wDID, bRID)	 \
+	(((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+	|| ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
 
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_BAPM            0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
 
-struct _phw_tonga_pt_config_reg {
-	uint32_t                           Offset;
-	uint32_t                           Mask;
-	uint32_t                           Shift;
-	uint32_t                           Value;
-	phw_tonga_ptc_config_reg_type     Type;
-};
-typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
-
-struct _phw_tonga_pt_defaults {
+struct tonga_pt_defaults {
 	uint8_t   svi_load_line_en;
 	uint8_t   svi_load_line_vddC;
 	uint8_t   tdc_vddc_throttle_release_limit_perc;
@@ -60,7 +44,17 @@
 	uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
 	uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
 };
-typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
 
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
+uint32_t tonga_get_mac_definition(uint32_t value);
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f42c536..5f91240 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,587 +33,9 @@
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
 #include "cgs_common.h"
-
-#define TONGA_SMC_SIZE			0x20000
-#define BUFFER_SIZE			80000
-#define MAX_STRING_SIZE			15
-#define BUFFER_SIZETWO              131072 /*128 *1024*/
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smcAddress the address in the SMC RAM to access.
-*/
-static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
-				uint32_t smcAddress, uint32_t limit)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-	PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
-		"SMC address must be 4 byte aligned.",
-		return -1;);
-
-	PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
-		"SMC address is beyond the SMC RAM area.",
-		return -1;);
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
-	return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param    smumgr  the address of the powerplay SMU manager.
-* @param    smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param    src the byte array to copy the bytes from.
-* @param    byteCount the number of bytes to copy.
-*/
-int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-		uint32_t smcStartAddress, const uint8_t *src,
-		uint32_t byteCount, uint32_t limit)
-{
-	uint32_t addr;
-	uint32_t data, orig_data;
-	int result = 0;
-	uint32_t extra_shift;
-
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-	PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
-		"SMC address must be 4 byte aligned.",
-		return 0;);
-
-	PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
-		"SMC address is beyond the SMC RAM area.",
-		return 0;);
-
-	addr = smcStartAddress;
-
-	while (byteCount >= 4) {
-		/*
-		 * Bytes are written into the
-		 * SMC address space with the MSB first
-		 */
-		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
-		result = tonga_set_smc_sram_address(smumgr, addr, limit);
-
-		if (result)
-			goto out;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
-		src += 4;
-		byteCount -= 4;
-		addr += 4;
-	}
-
-	if (0 != byteCount) {
-		/* Now write odd bytes left, do a read modify write cycle */
-		data = 0;
-
-		result = tonga_set_smc_sram_address(smumgr, addr, limit);
-		if (result)
-			goto out;
-
-		orig_data = cgs_read_register(smumgr->device,
-							mmSMC_IND_DATA_0);
-		extra_shift = 8 * (4 - byteCount);
-
-		while (byteCount > 0) {
-			data = (data << 8) + *src++;
-			byteCount--;
-		}
-
-		data <<= extra_shift;
-		data |= (orig_data & ~((~0UL) << extra_shift));
-
-		result = tonga_set_smc_sram_address(smumgr, addr, limit);
-		if (result)
-			goto out;
-
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-	}
-
-out:
-	return result;
-}
-
-
-int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
-{
-	static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
-
-	tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
-
-	return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-*/
-static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
-	return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
-					SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-			&& (0x20100 <= cgs_read_ind_register(smumgr->device,
-					CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	return 0;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	if (!tonga_is_smc_ram_running(smumgr))
-		return -1;
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	PP_ASSERT_WITH_CODE(
-		1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-		"Failed to send Previous Message.",
-		);
-
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	PP_ASSERT_WITH_CODE(
-		1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-		"Failed to send Message.",
-		);
-
-	return 0;
-}
-
-/*
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    msg the message to send.
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_without_waiting
-				(struct pp_smumgr *smumgr, uint16_t msg)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	PP_ASSERT_WITH_CODE(
-		1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
-		"Failed to send Previous Message.",
-		);
-	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
-	return 0;
-}
-
-/*
-* Send a message to the SMC with parameter
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
-				uint16_t msg, uint32_t parameter)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	if (!tonga_is_smc_ram_running(smumgr))
-		return PPSMC_Result_Failed;
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-	return tonga_send_msg_to_smc(smumgr, msg);
-}
-
-/*
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param    smumgr:  the address of the powerplay hardware manager.
-* @param    msg: the message to send.
-* @param    parameter: the parameter to send
-* @return   The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
-			struct pp_smumgr *smumgr,
-			uint16_t msg, uint32_t parameter)
-{
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
-	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
-	return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-/*
- * Read a 32bit value from the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value and output parameter for the data read from the SMC SRAM.
- */
-int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
-					uint32_t smcAddress, uint32_t *value,
-					uint32_t limit)
-{
-	int result;
-
-	result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
-	if (0 != result)
-		return result;
-
-	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-
-	return 0;
-}
-
-/*
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value to write to the SMC SRAM.
- */
-int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
-					uint32_t smcAddress, uint32_t value,
-					uint32_t limit)
-{
-	int result;
-
-	result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
-	if (0 != result)
-		return result;
-
-	cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-
-	return 0;
-}
-
-static int tonga_smu_fini(struct pp_smumgr *smumgr)
-{
-	struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
-
-	smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
-	smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
-	if (smumgr->backend != NULL) {
-		kfree(smumgr->backend);
-		smumgr->backend = NULL;
-	}
-
-	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
-	return 0;
-}
-
-static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
-{
-	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
-	switch (fw_type) {
-	case UCODE_ID_SMU:
-		result = CGS_UCODE_ID_SMU;
-		break;
-	case UCODE_ID_SDMA0:
-		result = CGS_UCODE_ID_SDMA0;
-		break;
-	case UCODE_ID_SDMA1:
-		result = CGS_UCODE_ID_SDMA1;
-		break;
-	case UCODE_ID_CP_CE:
-		result = CGS_UCODE_ID_CP_CE;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = CGS_UCODE_ID_CP_PFP;
-		break;
-	case UCODE_ID_CP_ME:
-		result = CGS_UCODE_ID_CP_ME;
-		break;
-	case UCODE_ID_CP_MEC:
-		result = CGS_UCODE_ID_CP_MEC;
-		break;
-	case UCODE_ID_CP_MEC_JT1:
-		result = CGS_UCODE_ID_CP_MEC_JT1;
-		break;
-	case UCODE_ID_CP_MEC_JT2:
-		result = CGS_UCODE_ID_CP_MEC_JT2;
-		break;
-	case UCODE_ID_RLC_G:
-		result = CGS_UCODE_ID_RLC_G;
-		break;
-	default:
-		break;
-	}
-
-	return result;
-}
-
-/**
- * Convert the PPIRI firmware type to SMU type mask.
- * For MEC, we need to check all MEC related type
-*/
-static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
-{
-	uint16_t result = 0;
-
-	switch (firmwareType) {
-	case UCODE_ID_SDMA0:
-		result = UCODE_ID_SDMA0_MASK;
-		break;
-	case UCODE_ID_SDMA1:
-		result = UCODE_ID_SDMA1_MASK;
-		break;
-	case UCODE_ID_CP_CE:
-		result = UCODE_ID_CP_CE_MASK;
-		break;
-	case UCODE_ID_CP_PFP:
-		result = UCODE_ID_CP_PFP_MASK;
-		break;
-	case UCODE_ID_CP_ME:
-		result = UCODE_ID_CP_ME_MASK;
-		break;
-	case UCODE_ID_CP_MEC:
-	case UCODE_ID_CP_MEC_JT1:
-	case UCODE_ID_CP_MEC_JT2:
-		result = UCODE_ID_CP_MEC_MASK;
-		break;
-	case UCODE_ID_RLC_G:
-		result = UCODE_ID_RLC_G_MASK;
-		break;
-	default:
-		break;
-	}
-
-	return result;
-}
-
-/**
- * Check if the FW has been loaded,
- * SMU will not return if loading has not finished.
-*/
-static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
-{
-	uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
-
-	if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
-				SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
-		printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/* Populate one firmware image to the data structure */
-static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
-				uint16_t firmware_type,
-				struct SMU_Entry *pentry)
-{
-	int result;
-	struct cgs_firmware_info info = {0};
-
-	result = cgs_get_firmware_info(
-				smumgr->device,
-				tonga_convert_fw_type_to_cgs(firmware_type),
-				&info);
-
-	if (result == 0) {
-		pentry->version = 0;
-		pentry->id = (uint16_t)firmware_type;
-		pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
-		pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
-		pentry->meta_data_addr_high = 0;
-		pentry->meta_data_addr_low = 0;
-		pentry->data_size_byte = info.image_size;
-		pentry->num_register_entries = 0;
-
-		if (firmware_type == UCODE_ID_RLC_G)
-			pentry->flags = 1;
-		else
-			pentry->flags = 0;
-	} else {
-		return result;
-	}
-
-	return result;
-}
-
-static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
-{
-	struct tonga_smumgr *tonga_smu =
-		(struct tonga_smumgr *)(smumgr->backend);
-	uint16_t fw_to_load;
-	struct SMU_DRAMData_TOC *toc;
-	/**
-	 * First time this gets called during SmuMgr init,
-	 * we haven't processed SMU header file yet,
-	 * so Soft Register Start offset is unknown.
-	 * However, for this case, UcodeLoadStatus is already 0,
-	 * so we can skip this if the Soft Registers Start offset is 0.
-	 */
-	cgs_write_ind_register(smumgr->device,
-		CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
-
-	tonga_send_msg_to_smc_with_parameter(smumgr,
-		PPSMC_MSG_SMU_DRAM_ADDR_HI,
-		tonga_smu->smu_buffer.mc_addr_high);
-	tonga_send_msg_to_smc_with_parameter(smumgr,
-		PPSMC_MSG_SMU_DRAM_ADDR_LO,
-		tonga_smu->smu_buffer.mc_addr_low);
-
-	toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
-	toc->num_entries = 0;
-	toc->structure_version = 1;
-
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry(smumgr,
-		UCODE_ID_RLC_G,
-		&toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n",
-		return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry(smumgr,
-		UCODE_ID_CP_CE,
-		&toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n",
-		return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_populate_single_firmware_entry
-		(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
-		"Failed to Get Firmware Entry.\n", return -1);
-
-	tonga_send_msg_to_smc_with_parameter(smumgr,
-		PPSMC_MSG_DRV_DRAM_ADDR_HI,
-		tonga_smu->header_buffer.mc_addr_high);
-	tonga_send_msg_to_smc_with_parameter(smumgr,
-		PPSMC_MSG_DRV_DRAM_ADDR_LO,
-		tonga_smu->header_buffer.mc_addr_low);
-
-	fw_to_load = UCODE_ID_RLC_G_MASK
-			+ UCODE_ID_SDMA0_MASK
-			+ UCODE_ID_SDMA1_MASK
-			+ UCODE_ID_CP_CE_MASK
-			+ UCODE_ID_CP_ME_MASK
-			+ UCODE_ID_CP_PFP_MASK
-			+ UCODE_ID_CP_MEC_MASK;
-
-	PP_ASSERT_WITH_CODE(
-		0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
-		smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
-		"Fail to Request SMU Load uCode", return 0);
-
-	return 0;
-}
-
-static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
-				uint32_t firmwareType)
-{
-	return 0;
-}
-
-/**
- * Upload the SMC firmware to the SMC microcontroller.
- *
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    pFirmware the data structure containing the various sections of the firmware.
- */
-static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
-{
-	const uint8_t *src;
-	uint32_t byte_count;
-	uint32_t *data;
-	struct cgs_firmware_info info = {0};
-
-	if (smumgr == NULL || smumgr->device == NULL)
-		return -EINVAL;
-
-	cgs_get_firmware_info(smumgr->device,
-		tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
-	if (info.image_size & 3) {
-		printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
-		return -EINVAL;
-	}
-
-	if (info.image_size > TONGA_SMC_SIZE) {
-		printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
-		return -EINVAL;
-	}
-
-	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
-	byte_count = info.image_size;
-	src = (const uint8_t *)info.kptr;
-
-	data = (uint32_t *)src;
-	for (; byte_count >= 4; data++, byte_count -= 4)
-		cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
-	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+#include "tonga_smc.h"
+#include "smu7_smumgr.h"
 
-	return 0;
-}
 
 static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
 {
@@ -623,7 +45,7 @@
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 		SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-	result = tonga_smu_upload_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 	if (result)
 		return result;
 
@@ -653,7 +75,7 @@
 	/**
 	 * Call Test SMU message with 0x20000 offset to trigger SMU start
 	 */
-	tonga_send_msg_to_smc_offset(smumgr);
+	smu7_send_msg_to_smc_offset(smumgr);
 
 	/* Wait for done bit to be set */
 	SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
@@ -690,13 +112,13 @@
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 		SMC_SYSCON_RESET_CNTL, rst_reg, 1);
 
-	result = tonga_smu_upload_firmware_image(smumgr);
+	result = smu7_upload_smu_firmware_image(smumgr);
 
 	if (result != 0)
 		return result;
 
 	/* Set smc instruct start point at 0x0 */
-	tonga_program_jump_on_start(smumgr);
+	smu7_program_jump_on_start(smumgr);
 
 
 	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -718,7 +140,7 @@
 	int result;
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!tonga_is_smc_ram_running(smumgr)) {
+	if (!smu7_is_smc_ram_running(smumgr)) {
 		/*Check if SMU is running in protected mode*/
 		if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
 					SMU_FIRMWARE, SMU_MODE)) {
@@ -732,7 +154,7 @@
 		}
 	}
 
-	result = tonga_request_smu_reload_fw(smumgr);
+	result = smu7_request_smu_load_fw(smumgr);
 
 	return result;
 }
@@ -746,67 +168,41 @@
  */
 static int tonga_smu_init(struct pp_smumgr *smumgr)
 {
-	struct tonga_smumgr *tonga_smu;
-	uint8_t *internal_buf;
-	uint64_t mc_addr = 0;
-	/* Allocate memory for backend private data */
-	tonga_smu = (struct tonga_smumgr *)(smumgr->backend);
-	tonga_smu->header_buffer.data_size =
-		((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
-	tonga_smu->smu_buffer.data_size = 200*4096;
+	struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
 
-	smu_allocate_memory(smumgr->device,
-		tonga_smu->header_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-		PAGE_SIZE,
-		&mc_addr,
-		&tonga_smu->header_buffer.kaddr,
-		&tonga_smu->header_buffer.handle);
+	int i;
 
-	tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
-	tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+	if (smu7_init(smumgr))
+		return -EINVAL;
 
-	PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
-		"Out of memory.",
-		kfree(smumgr->backend);
-		cgs_free_gpu_mem(smumgr->device,
-		(cgs_handle_t)tonga_smu->header_buffer.handle);
-		return -1);
-
-	smu_allocate_memory(smumgr->device,
-		tonga_smu->smu_buffer.data_size,
-		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
-		PAGE_SIZE,
-		&mc_addr,
-		&tonga_smu->smu_buffer.kaddr,
-		&tonga_smu->smu_buffer.handle);
-
-	internal_buf = tonga_smu->smu_buffer.kaddr;
-	tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
-	tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
-	PP_ASSERT_WITH_CODE((NULL != internal_buf),
-		"Out of memory.",
-		kfree(smumgr->backend);
-		cgs_free_gpu_mem(smumgr->device,
-		(cgs_handle_t)tonga_smu->smu_buffer.handle);
-		return -1;);
+	for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
+		smu_data->activity_target[i] = 30;
 
 	return 0;
 }
 
 static const struct pp_smumgr_func tonga_smu_funcs = {
 	.smu_init = &tonga_smu_init,
-	.smu_fini = &tonga_smu_fini,
+	.smu_fini = &smu7_smu_fini,
 	.start_smu = &tonga_start_smu,
-	.check_fw_load_finish = &tonga_check_fw_load_finish,
-	.request_smu_load_fw = &tonga_request_smu_reload_fw,
-	.request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw,
-	.send_msg_to_smc = &tonga_send_msg_to_smc,
-	.send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter,
+	.check_fw_load_finish = &smu7_check_fw_load_finish,
+	.request_smu_load_fw = &smu7_request_smu_load_fw,
+	.request_smu_load_specific_fw = NULL,
+	.send_msg_to_smc = &smu7_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
 	.download_pptable_settings = NULL,
 	.upload_pptable_settings = NULL,
+	.update_smc_table = tonga_update_smc_table,
+	.get_offsetof = tonga_get_offsetof,
+	.process_firmware_header = tonga_process_firmware_header,
+	.init_smc_table = tonga_init_smc_table,
+	.update_sclk_threshold = tonga_update_sclk_threshold,
+	.thermal_setup_fan_table = tonga_thermal_setup_fan_table,
+	.populate_all_graphic_levels = tonga_populate_all_graphic_levels,
+	.populate_all_memory_levels = tonga_populate_all_memory_levels,
+	.get_mac_definition = tonga_get_mac_definition,
+	.initialize_mc_reg_table = tonga_initialize_mc_reg_table,
+	.is_dpm_running = tonga_is_dpm_running,
 };
 
 int tonga_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 33c788d..8c4f761 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -24,30 +24,36 @@
 #ifndef _TONGA_SMUMGR_H_
 #define _TONGA_SMUMGR_H_
 
-struct tonga_buffer_entry {
-	uint32_t data_size;
-	uint32_t mc_addr_low;
-	uint32_t mc_addr_high;
-	void *kaddr;
-	unsigned long  handle;
+#include "smu72_discrete.h"
+
+#include "smu7_smumgr.h"
+
+struct tonga_mc_reg_entry {
+	uint32_t mclk_max;
+	uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
 };
 
+struct tonga_mc_reg_table {
+	uint8_t   last;               /* number of registers*/
+	uint8_t   num_entries;        /* number of entries in mc_reg_table_entry used*/
+	uint16_t  validflag;          /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+	struct tonga_mc_reg_entry    mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+
 struct tonga_smumgr {
-	uint8_t *pHeader;
-	uint8_t *pMecImage;
-	uint32_t ulSoftRegsStart;
 
-	struct tonga_buffer_entry header_buffer;
-	struct tonga_buffer_entry smu_buffer;
+	struct smu7_smumgr                   smu7_data;
+	struct SMU72_Discrete_DpmTable       smc_state_table;
+	struct SMU72_Discrete_Ulv            ulv_setting;
+	struct SMU72_Discrete_PmFuses  power_tune_table;
+	const struct tonga_pt_defaults  *power_tune_defaults;
+	SMU72_Discrete_MCRegisters      mc_regs;
+	struct tonga_mc_reg_table mc_reg_table;
+
+	uint32_t        activity_target[SMU72_MAX_LEVELS_GRAPHICS];
+
 };
 
-extern int tonga_smum_init(struct pp_smumgr *smumgr);
-extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
-		uint32_t smcStartAddress, const uint8_t *src,
-		uint32_t byteCount, uint32_t limit);
-extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-		uint32_t *value, uint32_t limit);
-extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
-		uint32_t value, uint32_t limit);
-
 #endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index ee0a61c..7130b04 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -183,8 +183,6 @@
 }
 
 static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
-	.prepare_fb = NULL,
-	.cleanup_fb = NULL,
 	.atomic_update = arc_pgu_plane_atomic_update,
 };
 
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 6d4ff34..28e6471 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -198,8 +198,8 @@
 	int ret;
 
 	drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	ret = arcpgu_load(drm);
 	if (ret)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index d83b46a..fb6a418 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -326,8 +326,8 @@
 		return -ENOMEM;
 
 	drm = drm_dev_alloc(&hdlcd_driver, dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	drm->dev_private = hdlcd;
 	dev_set_drvdata(dev, drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 82171d2..9280358 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -91,7 +91,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, true);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	malidp_atomic_commit_hw_done(state);
 
@@ -310,8 +311,8 @@
 		return ret;
 
 	drm = drm_dev_alloc(&malidp_driver, dev);
-	if (!drm) {
-		ret = -ENOMEM;
+	if (IS_ERR(drm)) {
+		ret = PTR_ERR(drm);
 		goto alloc_fail;
 	}
 
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 95558fd..271d2fb 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -49,6 +49,6 @@
 int malidp_crtc_init(struct drm_device *drm);
 
 /* often used combination of rotational bits */
-#define MALIDP_ROTATED_MASK	(BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+#define MALIDP_ROTATED_MASK	(DRM_ROTATE_90 | DRM_ROTATE_270)
 
 #endif  /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 725098d..82c193e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -108,7 +108,7 @@
 		return -EINVAL;
 
 	/* packed RGB888 / BGR888 can't be rotated or flipped */
-	if (state->rotation != BIT(DRM_ROTATE_0) &&
+	if (state->rotation != DRM_ROTATE_0 &&
 	    (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
 	     state->fb->pixel_format == DRM_FORMAT_BGR888))
 		return -EINVAL;
@@ -188,9 +188,9 @@
 	/* setup the rotation and axis flip bits */
 	if (plane->state->rotation & DRM_ROTATE_MASK)
 		val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
-	if (plane->state->rotation & BIT(DRM_REFLECT_X))
+	if (plane->state->rotation & DRM_REFLECT_X)
 		val |= LAYER_V_FLIP;
-	if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+	if (plane->state->rotation & DRM_REFLECT_Y)
 		val |= LAYER_H_FLIP;
 
 	/* set the 'enable layer' bit */
@@ -255,12 +255,12 @@
 			goto cleanup;
 
 		if (!drm->mode_config.rotation_property) {
-			unsigned long flags = BIT(DRM_ROTATE_0) |
-					      BIT(DRM_ROTATE_90) |
-					      BIT(DRM_ROTATE_180) |
-					      BIT(DRM_ROTATE_270) |
-					      BIT(DRM_REFLECT_X) |
-					      BIT(DRM_REFLECT_Y);
+			unsigned long flags = DRM_ROTATE_0 |
+					      DRM_ROTATE_90 |
+					      DRM_ROTATE_180 |
+					      DRM_ROTATE_270 |
+					      DRM_REFLECT_X |
+					      DRM_REFLECT_Y;
 			drm->mode_config.rotation_property =
 				drm_mode_create_rotation_property(drm, flags);
 		}
@@ -268,7 +268,7 @@
 		if (drm->mode_config.rotation_property && (id != DE_SMART))
 			drm_object_attach_property(&plane->base.base,
 						   drm->mode_config.rotation_property,
-						   BIT(DRM_ROTATE_0));
+						   DRM_ROTATE_0);
 
 		drm_plane_helper_add(&plane->base,
 				     &malidp_de_plane_helper_funcs);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index f5ebdd6..1e0e68f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -211,7 +211,7 @@
 	.desc			= "Armada SoC DRM",
 	.date			= "20120730",
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
-				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+				  DRIVER_PRIME,
 	.ioctls			= armada_ioctls,
 	.fops			= &armada_drm_fops,
 };
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7d03c51..ca73ad8 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -7,7 +7,6 @@
  * published by the Free Software Foundation.
  */
 #include <linux/errno.h>
-#include <linux/fb.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index cb8f034..8067918 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -387,7 +387,7 @@
 	if (!access_ok(VERIFY_READ, ptr, args->size))
 		return -EFAULT;
 
-	ret = fault_in_multipages_readable(ptr, args->size);
+	ret = fault_in_pages_readable(ptr, args->size);
 	if (ret)
 		return ret;
 
@@ -547,7 +547,7 @@
 	exp_info.flags = O_RDWR;
 	exp_info.priv = obj;
 
-	return dma_buf_export(&exp_info);
+	return drm_gem_dmabuf_export(dev, &exp_info);
 }
 
 struct drm_gem_object *
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 1ee707e..152b4e7 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,7 +121,7 @@
 	int ret;
 
 	ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
-					    BIT(DRM_ROTATE_0),
+					    DRM_ROTATE_0,
 					    0, INT_MAX, true, false, &visible);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index c017a93..7a86e24 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -33,7 +33,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 
 
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index b29a412..608df4c 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -150,7 +150,8 @@
 {
 	struct ast_bo *astbo = ast_bo(bo);
 
-	return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+	return drm_vma_node_verify_access(&astbo->gem.vma_node,
+					  filp->private_data);
 }
 
 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index d4a3d61..5f48431 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -457,7 +457,7 @@
 
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_planes(dev, old_state, 0);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -797,8 +797,8 @@
 	int ret;
 
 	ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
-	if (!ddev)
-		return -ENOMEM;
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
 
 	ret = atmel_hlcdc_dc_load(ddev);
 	if (ret)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 52c527f..9d4c030 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@
 
 	if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
 	     state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
-	    (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+	    (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
 		cfg |= ATMEL_HLCDC_YUV422ROT;
 
 	atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@
 	/*
 	 * Swap width and size in case of 90 or 270 degrees rotation
 	 */
-	if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+	if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
 		tmp = state->crtc_w;
 		state->crtc_w = state->crtc_h;
 		state->crtc_h = tmp;
@@ -677,7 +677,7 @@
 			return -EINVAL;
 
 		switch (state->base.rotation & DRM_ROTATE_MASK) {
-		case BIT(DRM_ROTATE_90):
+		case DRM_ROTATE_90:
 			offset = ((y_offset + state->src_y + patched_src_w - 1) /
 				  ydiv) * fb->pitches[i];
 			offset += ((x_offset + state->src_x) / xdiv) *
@@ -686,7 +686,7 @@
 					  fb->pitches[i];
 			state->pstride[i] = -fb->pitches[i] - state->bpp[i];
 			break;
-		case BIT(DRM_ROTATE_180):
+		case DRM_ROTATE_180:
 			offset = ((y_offset + state->src_y + patched_src_h - 1) /
 				  ydiv) * fb->pitches[i];
 			offset += ((x_offset + state->src_x + patched_src_w - 1) /
@@ -695,7 +695,7 @@
 					   state->bpp[i]) - fb->pitches[i];
 			state->pstride[i] = -2 * state->bpp[i];
 			break;
-		case BIT(DRM_ROTATE_270):
+		case DRM_ROTATE_270:
 			offset = ((y_offset + state->src_y) / ydiv) *
 				 fb->pitches[i];
 			offset += ((x_offset + state->src_x + patched_src_h - 1) /
@@ -705,7 +705,7 @@
 					  (2 * state->bpp[i]);
 			state->pstride[i] = fb->pitches[i] - state->bpp[i];
 			break;
-		case BIT(DRM_ROTATE_0):
+		case DRM_ROTATE_0:
 		default:
 			offset = ((y_offset + state->src_y) / ydiv) *
 				 fb->pitches[i];
@@ -755,7 +755,7 @@
 }
 
 static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
-					const struct drm_plane_state *new_state)
+					struct drm_plane_state *new_state)
 {
 	/*
 	 * FIXME: we should avoid this const -> non-const cast but it's
@@ -780,7 +780,7 @@
 }
 
 static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
-				const struct drm_plane_state *old_state)
+					 struct drm_plane_state *old_state)
 {
 	/*
 	 * FIXME: we should avoid this const -> non-const cast but it's
@@ -905,7 +905,7 @@
 	if (desc->layout.xstride && desc->layout.pstride)
 		drm_object_attach_property(&plane->base.base,
 				plane->base.dev->mode_config.rotation_property,
-				BIT(DRM_ROTATE_0));
+				DRM_ROTATE_0);
 
 	if (desc->layout.csc) {
 		/*
@@ -1056,10 +1056,10 @@
 
 	dev->mode_config.rotation_property =
 			drm_mode_create_rotation_property(dev,
-							  BIT(DRM_ROTATE_0) |
-							  BIT(DRM_ROTATE_90) |
-							  BIT(DRM_ROTATE_180) |
-							  BIT(DRM_ROTATE_270));
+							  DRM_ROTATE_0 |
+							  DRM_ROTATE_90 |
+							  DRM_ROTATE_180 |
+							  DRM_ROTATE_270);
 	if (!dev->mode_config.rotation_property)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 19b5ada..32dfe41 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,5 +1,4 @@
 #include <linux/io.h>
-#include <linux/fb.h>
 #include <linux/console.h>
 
 #include <drm/drmP.h>
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index abace82..534227d 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <drm/drm_fb_helper.h>
 
 #include "bochs.h"
 
@@ -153,7 +154,7 @@
 
 	ap->ranges[0].base = pci_resource_start(pdev, 0);
 	ap->ranges[0].size = pci_resource_len(pdev, 0);
-	remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
 	kfree(ap);
 
 	return 0;
@@ -162,8 +163,15 @@
 static int bochs_pci_probe(struct pci_dev *pdev,
 			   const struct pci_device_id *ent)
 {
+	unsigned long fbsize;
 	int ret;
 
+	fbsize = pci_resource_len(pdev, 0);
+	if (fbsize < 4 * 1024 * 1024) {
+		DRM_ERROR("less than 4 MB video memory, ignoring device\n");
+		return -ENOMEM;
+	}
+
 	ret = bochs_kick_out_firmware_fb(pdev);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 207a2cb..0b4e5d1 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -178,7 +178,7 @@
 }
 
 
-int bochs_connector_get_modes(struct drm_connector *connector)
+static int bochs_connector_get_modes(struct drm_connector *connector)
 {
 	int count;
 
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 5c5638a..269cfca 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -128,7 +128,8 @@
 {
 	struct bochs_bo *bochsbo = bochs_bo(bo);
 
-	return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+	return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
+					  filp->private_data);
 }
 
 static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b590e67..10e12e7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -17,6 +17,13 @@
 	  the HDMI output of an application processor to MyDP
 	  or DisplayPort.
 
+config DRM_DUMB_VGA_DAC
+	tristate "Dumb VGA DAC Bridge support"
+	depends on OF
+	select DRM_KMS_HELPER
+	help
+	  Support for RGB to VGA DAC based bridges
+
 config DRM_DW_HDMI
 	tristate
 	select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index efdb07e..cdf3a3c 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,7 @@
 ccflags-y := -Iinclude/drm
 
 obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
+obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec8fb2e..8ed3906 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -922,15 +922,13 @@
 	return 0;
 }
 
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
 static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 {
 	struct adv7511_link_config link_config;
 	struct adv7511 *adv7511;
 	struct device *dev = &i2c->dev;
+	unsigned int main_i2c_addr = i2c->addr << 1;
+	unsigned int edid_i2c_addr = main_i2c_addr + 4;
 	unsigned int val;
 	int ret;
 
@@ -991,8 +989,10 @@
 
 	regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
 	regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-		     packet_i2c_addr);
-	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+		     main_i2c_addr - 0xa);
+	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+		     main_i2c_addr - 2);
+
 	adv7511_packet_disable(adv7511, 0xffff);
 
 	adv7511->i2c_main = i2c;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 5eebd15..d7f7b7c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -149,13 +149,12 @@
 	i2c_unregister_device(adv->i2c_cec);
 }
 
-static const int cec_i2c_addr = 0x78;
-
 int adv7533_init_cec(struct adv7511 *adv)
 {
 	int ret;
 
-	adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+	adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+				     adv->i2c_main->addr - 1);
 	if (!adv->i2c_cec)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index f9f03bc..a2a8236 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1001,16 +1001,11 @@
 	return connector_status_connected;
 }
 
-static void anx78xx_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-}
-
 static const struct drm_connector_funcs anx78xx_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = anx78xx_detect,
-	.destroy = anx78xx_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 32715da..6e0447f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -31,6 +31,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
 
 #define to_dp(nm)	container_of(nm, struct analogix_dp_device, nm)
 
@@ -97,133 +98,89 @@
 	return 0;
 }
 
-static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
+int analogix_dp_psr_supported(struct device *dev)
 {
-	int i;
-	unsigned char sum = 0;
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
 
-	for (i = 0; i < EDID_BLOCK_LENGTH; i++)
-		sum = sum + edid_data[i];
-
-	return sum;
+	return dp->psr_support;
 }
+EXPORT_SYMBOL_GPL(analogix_dp_psr_supported);
 
-static int analogix_dp_read_edid(struct analogix_dp_device *dp)
+int analogix_dp_enable_psr(struct device *dev)
 {
-	unsigned char *edid = dp->edid;
-	unsigned int extend_block = 0;
-	unsigned char sum;
-	unsigned char test_vector;
-	int retval;
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+	struct edp_vsc_psr psr_vsc;
 
-	/*
-	 * EDID device address is 0x50.
-	 * However, if necessary, you must have set upper address
-	 * into E-EDID in I2C device, 0x30.
-	 */
+	if (!dp->psr_support)
+		return -EINVAL;
 
-	/* Read Extension Flag, Number of 128-byte EDID extension blocks */
-	retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
-						EDID_EXTENSION_FLAG,
-						&extend_block);
-	if (retval)
-		return retval;
+	/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x2;
+	psr_vsc.sdp_header.HB3 = 0x8;
 
-	if (extend_block > 0) {
-		dev_dbg(dp->dev, "EDID data includes a single extension!\n");
+	psr_vsc.DB0 = 0;
+	psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
 
-		/* Read EDID data */
-		retval = analogix_dp_read_bytes_from_i2c(dp,
-						I2C_EDID_DEVICE_ADDR,
-						EDID_HEADER_PATTERN,
-						EDID_BLOCK_LENGTH,
-						&edid[EDID_HEADER_PATTERN]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = analogix_dp_calc_edid_check_sum(edid);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		/* Read additional EDID data */
-		retval = analogix_dp_read_bytes_from_i2c(dp,
-				I2C_EDID_DEVICE_ADDR,
-				EDID_BLOCK_LENGTH,
-				EDID_BLOCK_LENGTH,
-				&edid[EDID_BLOCK_LENGTH]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-						&test_vector);
-		if (test_vector & DP_TEST_LINK_EDID_READ) {
-			analogix_dp_write_byte_to_dpcd(dp,
-				DP_TEST_EDID_CHECKSUM,
-				edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
-			analogix_dp_write_byte_to_dpcd(dp,
-				DP_TEST_RESPONSE,
-				DP_TEST_EDID_CHECKSUM_WRITE);
-		}
-	} else {
-		dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
-		/* Read EDID data */
-		retval = analogix_dp_read_bytes_from_i2c(dp,
-				I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
-				EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = analogix_dp_calc_edid_check_sum(edid);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-						&test_vector);
-		if (test_vector & DP_TEST_LINK_EDID_READ) {
-			analogix_dp_write_byte_to_dpcd(dp,
-				DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
-			analogix_dp_write_byte_to_dpcd(dp,
-				DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
-		}
-	}
-
-	dev_dbg(dp->dev, "EDID Read success!\n");
+	analogix_dp_send_psr_spd(dp, &psr_vsc);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
 
-static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
+int analogix_dp_disable_psr(struct device *dev)
 {
-	u8 buf[12];
-	int i;
-	int retval;
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+	struct edp_vsc_psr psr_vsc;
 
-	/* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
-	retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
-	if (retval)
-		return retval;
+	if (!dp->psr_support)
+		return -EINVAL;
 
-	/* Read EDID */
-	for (i = 0; i < 3; i++) {
-		retval = analogix_dp_read_edid(dp);
-		if (!retval)
-			break;
-	}
+	/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+	memset(&psr_vsc, 0, sizeof(psr_vsc));
+	psr_vsc.sdp_header.HB0 = 0;
+	psr_vsc.sdp_header.HB1 = 0x7;
+	psr_vsc.sdp_header.HB2 = 0x2;
+	psr_vsc.sdp_header.HB3 = 0x8;
 
-	return retval;
+	psr_vsc.DB0 = 0;
+	psr_vsc.DB1 = 0;
+
+	analogix_dp_send_psr_spd(dp, &psr_vsc);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
+
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+{
+	unsigned char psr_version;
+
+	drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
+	dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
+
+	return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+}
+
+static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
+{
+	unsigned char psr_en;
+
+	/* Disable psr function */
+	drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+	psr_en &= ~DP_PSR_ENABLE;
+	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+	/* Main-Link transmitter remains active during PSR active states */
+	psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+	/* Enable psr function */
+	psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
+		 DP_PSR_CRC_VERIFICATION;
+	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+	analogix_dp_enable_psr_crc(dp);
 }
 
 static void
@@ -232,15 +189,15 @@
 {
 	u8 data;
 
-	analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+	drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
 
 	if (enable)
-		analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-					       DP_LANE_COUNT_ENHANCED_FRAME_EN |
-					       DPCD_LANE_COUNT_SET(data));
+		drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+				   DP_LANE_COUNT_ENHANCED_FRAME_EN |
+					DPCD_LANE_COUNT_SET(data));
 	else
-		analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-					       DPCD_LANE_COUNT_SET(data));
+		drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+				   DPCD_LANE_COUNT_SET(data));
 }
 
 static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
@@ -248,7 +205,7 @@
 	u8 data;
 	int retval;
 
-	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
 	retval = DPCD_ENHANCED_FRAME_CAP(data);
 
 	return retval;
@@ -267,8 +224,8 @@
 {
 	analogix_dp_set_training_pattern(dp, DP_NONE);
 
-	analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
-				       DP_TRAINING_PATTERN_DISABLE);
+	drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+			   DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -313,8 +270,8 @@
 	/* Setup RX configuration */
 	buf[0] = dp->link_train.link_rate;
 	buf[1] = dp->link_train.lane_count;
-	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
-	if (retval)
+	retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
+	if (retval < 0)
 		return retval;
 
 	/* Set TX pre-emphasis to minimum */
@@ -338,20 +295,22 @@
 	analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
 
 	/* Set RX training pattern */
-	retval = analogix_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
-	if (retval)
+	retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+				    DP_LINK_SCRAMBLING_DISABLE |
+					DP_TRAINING_PATTERN_1);
+	if (retval < 0)
 		return retval;
 
 	for (lane = 0; lane < lane_count; lane++)
 		buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
 			    DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 
-	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-						 lane_count, buf);
+	retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf,
+				   lane_count);
+	if (retval < 0)
+		return retval;
 
-	return retval;
+	return 0;
 }
 
 static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
@@ -503,25 +462,23 @@
 
 	lane_count = dp->link_train.lane_count;
 
-	retval =  analogix_dp_read_bytes_from_dpcd(dp,
-			DP_LANE0_1_STATUS, 2, link_status);
-	if (retval)
+	retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+	if (retval < 0)
 		return retval;
 
-	retval =  analogix_dp_read_bytes_from_dpcd(dp,
-			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-	if (retval)
+	retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+				  adjust_request, 2);
+	if (retval < 0)
 		return retval;
 
 	if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
 		/* set training pattern 2 for EQ */
 		analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-		retval = analogix_dp_write_byte_to_dpcd(dp,
-				DP_TRAINING_PATTERN_SET,
-				DP_LINK_SCRAMBLING_DISABLE |
-				DP_TRAINING_PATTERN_2);
-		if (retval)
+		retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+					    DP_LINK_SCRAMBLING_DISABLE |
+						DP_TRAINING_PATTERN_2);
+		if (retval < 0)
 			return retval;
 
 		dev_info(dp->dev, "Link Training Clock Recovery success\n");
@@ -559,13 +516,12 @@
 		analogix_dp_set_lane_link_training(dp,
 			dp->link_train.training_lane[lane], lane);
 
-	retval = analogix_dp_write_bytes_to_dpcd(dp,
-			DP_TRAINING_LANE0_SET, lane_count,
-			dp->link_train.training_lane);
-	if (retval)
+	retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+				   dp->link_train.training_lane, lane_count);
+	if (retval < 0)
 		return retval;
 
-	return retval;
+	return 0;
 }
 
 static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
@@ -578,9 +534,8 @@
 
 	lane_count = dp->link_train.lane_count;
 
-	retval = analogix_dp_read_bytes_from_dpcd(dp,
-			DP_LANE0_1_STATUS, 2, link_status);
-	if (retval)
+	retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+	if (retval < 0)
 		return retval;
 
 	if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
@@ -588,14 +543,14 @@
 		return -EIO;
 	}
 
-	retval = analogix_dp_read_bytes_from_dpcd(dp,
-			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-	if (retval)
+	retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+				  adjust_request, 2);
+	if (retval < 0)
 		return retval;
 
-	retval = analogix_dp_read_byte_from_dpcd(dp,
-			DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
-	if (retval)
+	retval = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+				   &link_align);
+	if (retval < 0)
 		return retval;
 
 	analogix_dp_get_adjust_training_lane(dp, adjust_request);
@@ -636,10 +591,12 @@
 		analogix_dp_set_lane_link_training(dp,
 			dp->link_train.training_lane[lane], lane);
 
-	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-			lane_count, dp->link_train.training_lane);
+	retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+				   dp->link_train.training_lane, lane_count);
+	if (retval < 0)
+		return retval;
 
-	return retval;
+	return 0;
 }
 
 static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
@@ -653,7 +610,7 @@
 	 * For DP rev.1.2, Maximum link rate of Main Link lanes
 	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
 	 */
-	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
 	*bandwidth = data;
 }
 
@@ -666,7 +623,7 @@
 	 * For DP rev.1.1, Maximum number of Main Link lanes
 	 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
 	 */
-	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
 	*lane_count = DPCD_MAX_LANE_COUNT(data);
 }
 
@@ -835,19 +792,15 @@
 	if (enable) {
 		analogix_dp_enable_scrambling(dp);
 
-		analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-						&data);
-		analogix_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			(u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+		drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+		drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+				   (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
 	} else {
 		analogix_dp_disable_scrambling(dp);
 
-		analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-						&data);
-		analogix_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			(u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+		drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+		drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+				   (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
 	}
 }
 
@@ -921,21 +874,85 @@
 
 	/* Enable video */
 	analogix_dp_start_video(dp);
+
+	dp->psr_support = analogix_dp_detect_sink_psr(dp);
+	if (dp->psr_support)
+		analogix_dp_enable_sink_psr(dp);
 }
 
-int analogix_dp_get_modes(struct drm_connector *connector)
+/*
+ * This function is a bit of a catch-all for panel preparation, hopefully
+ * simplifying the logic of functions that need to prepare/unprepare the panel
+ * below.
+ *
+ * If @prepare is true, this function will prepare the panel. Conversely, if it
+ * is false, the panel will be unprepared.
+ *
+ * If @is_modeset_prepare is true, the function will disregard the current state
+ * of the panel and either prepare/unprepare the panel based on @prepare. Once
+ * it finishes, it will update dp->panel_is_modeset to reflect the current state
+ * of the panel.
+ */
+static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
+				     bool prepare, bool is_modeset_prepare)
+{
+	int ret = 0;
+
+	if (!dp->plat_data->panel)
+		return 0;
+
+	mutex_lock(&dp->panel_lock);
+
+	/*
+	 * Exit early if this is a temporary prepare/unprepare and we're already
+	 * modeset (since we neither want to prepare twice or unprepare early).
+	 */
+	if (dp->panel_is_modeset && !is_modeset_prepare)
+		goto out;
+
+	if (prepare)
+		ret = drm_panel_prepare(dp->plat_data->panel);
+	else
+		ret = drm_panel_unprepare(dp->plat_data->panel);
+
+	if (ret)
+		goto out;
+
+	if (is_modeset_prepare)
+		dp->panel_is_modeset = prepare;
+
+out:
+	mutex_unlock(&dp->panel_lock);
+	return ret;
+}
+
+static int analogix_dp_get_modes(struct drm_connector *connector)
 {
 	struct analogix_dp_device *dp = to_dp(connector);
-	struct edid *edid = (struct edid *)dp->edid;
-	int num_modes = 0;
+	struct edid *edid;
+	int ret, num_modes = 0;
 
-	if (analogix_dp_handle_edid(dp) == 0) {
-		drm_mode_connector_update_edid_property(&dp->connector, edid);
-		num_modes += drm_add_edid_modes(&dp->connector, edid);
-	}
-
-	if (dp->plat_data->panel)
+	if (dp->plat_data->panel) {
 		num_modes += drm_panel_get_modes(dp->plat_data->panel);
+	} else {
+		ret = analogix_dp_prepare_panel(dp, true, false);
+		if (ret) {
+			DRM_ERROR("Failed to prepare panel (%d)\n", ret);
+			return 0;
+		}
+
+		edid = drm_get_edid(connector, &dp->aux.ddc);
+		if (edid) {
+			drm_mode_connector_update_edid_property(&dp->connector,
+								edid);
+			num_modes += drm_add_edid_modes(&dp->connector, edid);
+			kfree(edid);
+		}
+
+		ret = analogix_dp_prepare_panel(dp, false, false);
+		if (ret)
+			DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+	}
 
 	if (dp->plat_data->get_modes)
 		num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
@@ -956,29 +973,37 @@
 	.best_encoder = analogix_dp_best_encoder,
 };
 
-enum drm_connector_status
+static enum drm_connector_status
 analogix_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct analogix_dp_device *dp = to_dp(connector);
+	enum drm_connector_status status = connector_status_disconnected;
+	int ret;
 
-	if (analogix_dp_detect_hpd(dp))
+	if (dp->plat_data->panel)
+		return connector_status_connected;
+
+	ret = analogix_dp_prepare_panel(dp, true, false);
+	if (ret) {
+		DRM_ERROR("Failed to prepare panel (%d)\n", ret);
 		return connector_status_disconnected;
+	}
 
-	return connector_status_connected;
-}
+	if (!analogix_dp_detect_hpd(dp))
+		status = connector_status_connected;
 
-static void analogix_dp_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
+	ret = analogix_dp_prepare_panel(dp, false, false);
+	if (ret)
+		DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
 
+	return status;
 }
 
 static const struct drm_connector_funcs analogix_dp_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = analogix_dp_detect,
-	.destroy = analogix_dp_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -1035,6 +1060,16 @@
 	return 0;
 }
 
+static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+	int ret;
+
+	ret = analogix_dp_prepare_panel(dp, true, true);
+	if (ret)
+		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+}
+
 static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
 {
 	struct analogix_dp_device *dp = bridge->driver_private;
@@ -1058,6 +1093,7 @@
 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 {
 	struct analogix_dp_device *dp = bridge->driver_private;
+	int ret;
 
 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
 		return;
@@ -1077,6 +1113,10 @@
 
 	pm_runtime_put_sync(dp->dev);
 
+	ret = analogix_dp_prepare_panel(dp, false, true);
+	if (ret)
+		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
@@ -1165,9 +1205,9 @@
 }
 
 static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+	.pre_enable = analogix_dp_bridge_pre_enable,
 	.enable = analogix_dp_bridge_enable,
 	.disable = analogix_dp_bridge_disable,
-	.pre_enable = analogix_dp_bridge_nop,
 	.post_disable = analogix_dp_bridge_nop,
 	.mode_set = analogix_dp_bridge_mode_set,
 	.attach = analogix_dp_bridge_attach,
@@ -1231,6 +1271,14 @@
 	return 0;
 }
 
+static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
+				       struct drm_dp_aux_msg *msg)
+{
+	struct analogix_dp_device *dp = to_dp(aux);
+
+	return analogix_dp_transfer(dp, msg);
+}
+
 int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 		     struct analogix_dp_plat_data *plat_data)
 {
@@ -1254,6 +1302,9 @@
 	dp->dev = &pdev->dev;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 
+	mutex_init(&dp->panel_lock);
+	dp->panel_is_modeset = false;
+
 	/*
 	 * platform dp driver need containor_of the plat_data to get
 	 * the driver private data, so we need to store the point of
@@ -1333,13 +1384,6 @@
 
 	phy_power_on(dp->phy);
 
-	if (dp->plat_data->panel) {
-		if (drm_panel_prepare(dp->plat_data->panel)) {
-			DRM_ERROR("failed to setup the panel\n");
-			return -EBUSY;
-		}
-	}
-
 	analogix_dp_init_dp(dp);
 
 	ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1355,6 +1399,14 @@
 	dp->drm_dev = drm_dev;
 	dp->encoder = dp->plat_data->encoder;
 
+	dp->aux.name = "DP-AUX";
+	dp->aux.transfer = analogix_dpaux_transfer;
+	dp->aux.dev = &pdev->dev;
+
+	ret = drm_dp_aux_register(&dp->aux);
+	if (ret)
+		goto err_disable_pm_runtime;
+
 	ret = analogix_dp_create_bridge(drm_dev, dp);
 	if (ret) {
 		DRM_ERROR("failed to create bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index b456380..5c6a288 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -20,15 +20,6 @@
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
 
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR			0x50
-#define I2C_E_EDID_DEVICE_ADDR			0x30
-
-#define EDID_BLOCK_LENGTH			0x80
-#define EDID_HEADER_PATTERN			0x00
-#define EDID_EXTENSION_FLAG			0x7e
-#define EDID_CHECKSUM				0x7f
-
 /* DP_MAX_LANE_COUNT */
 #define DPCD_ENHANCED_FRAME_CAP(x)		(((x) >> 7) & 0x1)
 #define DPCD_MAX_LANE_COUNT(x)			((x) & 0x1f)
@@ -166,6 +157,7 @@
 	struct drm_device	*drm_dev;
 	struct drm_connector	connector;
 	struct drm_bridge	*bridge;
+	struct drm_dp_aux       aux;
 	struct clk		*clock;
 	unsigned int		irq;
 	void __iomem		*reg_base;
@@ -176,7 +168,10 @@
 	int			dpms_mode;
 	int			hpd_gpio;
 	bool                    force_hpd;
-	unsigned char           edid[EDID_BLOCK_LENGTH * 2];
+	bool			psr_support;
+
+	struct mutex		panel_lock;
+	bool			panel_is_modeset;
 
 	struct analogix_dp_plat_data *plat_data;
 };
@@ -206,33 +201,6 @@
 void analogix_dp_init_aux(struct analogix_dp_device *dp);
 int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
 void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
-int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
-				   unsigned int reg_addr,
-				   unsigned char data);
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-				    unsigned int reg_addr,
-				    unsigned char *data);
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-				    unsigned int reg_addr,
-				    unsigned int count,
-				    unsigned char data[]);
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-				     unsigned int reg_addr,
-				     unsigned int count,
-				     unsigned char data[]);
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-				  unsigned int device_addr,
-				  unsigned int reg_addr);
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-				   unsigned int device_addr,
-				   unsigned int reg_addr,
-				   unsigned int *data);
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-				    unsigned int device_addr,
-				    unsigned int reg_addr,
-				    unsigned int count,
-				    unsigned char edid[]);
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
 void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
 void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
@@ -278,4 +246,10 @@
 void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
 void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
 void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+			      struct edp_vsc_psr *vsc);
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+			     struct drm_dp_aux_msg *msg);
+
 #endif /* _ANALOGIX_DP_CORE_H */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 48030f0..cd37ac0 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -585,330 +585,6 @@
 	return retval;
 }
 
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-				    unsigned int reg_addr,
-				    unsigned char *data)
-{
-	u32 reg;
-	int i;
-	int retval;
-
-	for (i = 0; i < 3; i++) {
-		/* Clear AUX CH data buffer */
-		reg = BUF_CLR;
-		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-		/* Select DPCD device address */
-		reg = AUX_ADDR_7_0(reg_addr);
-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-		reg = AUX_ADDR_15_8(reg_addr);
-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-		reg = AUX_ADDR_19_16(reg_addr);
-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-		/*
-		 * Set DisplayPort transaction and read 1 byte
-		 * If bit 3 is 1, DisplayPort transaction.
-		 * If Bit 3 is 0, I2C transaction.
-		 */
-		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-		/* Start AUX transaction */
-		retval = analogix_dp_start_aux_transaction(dp);
-		if (retval == 0)
-			break;
-
-		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-	}
-
-	/* Read data buffer */
-	reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-	*data = (unsigned char)(reg & 0xff);
-
-	return retval;
-}
-
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-				    unsigned int reg_addr,
-				    unsigned int count,
-				    unsigned char data[])
-{
-	u32 reg;
-	unsigned int start_offset;
-	unsigned int cur_data_count;
-	unsigned int cur_data_idx;
-	int i;
-	int retval = 0;
-
-	/* Clear AUX CH data buffer */
-	reg = BUF_CLR;
-	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-	start_offset = 0;
-	while (start_offset < count) {
-		/* Buffer size of AUX CH is 16 * 4bytes */
-		if ((count - start_offset) > 16)
-			cur_data_count = 16;
-		else
-			cur_data_count = count - start_offset;
-
-		for (i = 0; i < 3; i++) {
-			/* Select DPCD device address */
-			reg = AUX_ADDR_7_0(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-			reg = AUX_ADDR_15_8(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-			reg = AUX_ADDR_19_16(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-			for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-			     cur_data_idx++) {
-				reg = data[start_offset + cur_data_idx];
-				writel(reg, dp->reg_base +
-				       ANALOGIX_DP_BUF_DATA_0 +
-				       4 * cur_data_idx);
-			}
-
-			/*
-			 * Set DisplayPort transaction and write
-			 * If bit 3 is 1, DisplayPort transaction.
-			 * If Bit 3 is 0, I2C transaction.
-			 */
-			reg = AUX_LENGTH(cur_data_count) |
-				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-			/* Start AUX transaction */
-			retval = analogix_dp_start_aux_transaction(dp);
-			if (retval == 0)
-				break;
-
-			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-				__func__);
-		}
-
-		start_offset += cur_data_count;
-	}
-
-	return retval;
-}
-
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-				     unsigned int reg_addr,
-				     unsigned int count,
-				     unsigned char data[])
-{
-	u32 reg;
-	unsigned int start_offset;
-	unsigned int cur_data_count;
-	unsigned int cur_data_idx;
-	int i;
-	int retval = 0;
-
-	/* Clear AUX CH data buffer */
-	reg = BUF_CLR;
-	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-	start_offset = 0;
-	while (start_offset < count) {
-		/* Buffer size of AUX CH is 16 * 4bytes */
-		if ((count - start_offset) > 16)
-			cur_data_count = 16;
-		else
-			cur_data_count = count - start_offset;
-
-		/* AUX CH Request Transaction process */
-		for (i = 0; i < 3; i++) {
-			/* Select DPCD device address */
-			reg = AUX_ADDR_7_0(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-			reg = AUX_ADDR_15_8(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-			reg = AUX_ADDR_19_16(reg_addr + start_offset);
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-			/*
-			 * Set DisplayPort transaction and read
-			 * If bit 3 is 1, DisplayPort transaction.
-			 * If Bit 3 is 0, I2C transaction.
-			 */
-			reg = AUX_LENGTH(cur_data_count) |
-				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-			/* Start AUX transaction */
-			retval = analogix_dp_start_aux_transaction(dp);
-			if (retval == 0)
-				break;
-
-			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-				__func__);
-		}
-
-		for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-		    cur_data_idx++) {
-			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-						 + 4 * cur_data_idx);
-			data[start_offset + cur_data_idx] =
-				(unsigned char)reg;
-		}
-
-		start_offset += cur_data_count;
-	}
-
-	return retval;
-}
-
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-				  unsigned int device_addr,
-				  unsigned int reg_addr)
-{
-	u32 reg;
-	int retval;
-
-	/* Set EDID device address */
-	reg = device_addr;
-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-	writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-	writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-	/* Set offset from base address of EDID device */
-	writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-	/*
-	 * Set I2C transaction and write address
-	 * If bit 3 is 1, DisplayPort transaction.
-	 * If Bit 3 is 0, I2C transaction.
-	 */
-	reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
-		AUX_TX_COMM_WRITE;
-	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-	/* Start AUX transaction */
-	retval = analogix_dp_start_aux_transaction(dp);
-	if (retval != 0)
-		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
-	return retval;
-}
-
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-				   unsigned int device_addr,
-				   unsigned int reg_addr,
-				   unsigned int *data)
-{
-	u32 reg;
-	int i;
-	int retval;
-
-	for (i = 0; i < 3; i++) {
-		/* Clear AUX CH data buffer */
-		reg = BUF_CLR;
-		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-		/* Select EDID device */
-		retval = analogix_dp_select_i2c_device(dp, device_addr,
-						       reg_addr);
-		if (retval != 0)
-			continue;
-
-		/*
-		 * Set I2C transaction and read data
-		 * If bit 3 is 1, DisplayPort transaction.
-		 * If Bit 3 is 0, I2C transaction.
-		 */
-		reg = AUX_TX_COMM_I2C_TRANSACTION |
-			AUX_TX_COMM_READ;
-		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-		/* Start AUX transaction */
-		retval = analogix_dp_start_aux_transaction(dp);
-		if (retval == 0)
-			break;
-
-		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-	}
-
-	/* Read data */
-	if (retval == 0)
-		*data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-	return retval;
-}
-
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-				    unsigned int device_addr,
-				    unsigned int reg_addr,
-				    unsigned int count,
-				    unsigned char edid[])
-{
-	u32 reg;
-	unsigned int i, j;
-	unsigned int cur_data_idx;
-	unsigned int defer = 0;
-	int retval = 0;
-
-	for (i = 0; i < count; i += 16) {
-		for (j = 0; j < 3; j++) {
-			/* Clear AUX CH data buffer */
-			reg = BUF_CLR;
-			writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-			/* Set normal AUX CH command */
-			reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-			reg &= ~ADDR_ONLY;
-			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-
-			/*
-			 * If Rx sends defer, Tx sends only reads
-			 * request without sending address
-			 */
-			if (!defer)
-				retval = analogix_dp_select_i2c_device(dp,
-						device_addr, reg_addr + i);
-			else
-				defer = 0;
-
-			if (retval == 0) {
-				/*
-				 * Set I2C transaction and write data
-				 * If bit 3 is 1, DisplayPort transaction.
-				 * If Bit 3 is 0, I2C transaction.
-				 */
-				reg = AUX_LENGTH(16) |
-					AUX_TX_COMM_I2C_TRANSACTION |
-					AUX_TX_COMM_READ;
-				writel(reg, dp->reg_base +
-					ANALOGIX_DP_AUX_CH_CTL_1);
-
-				/* Start AUX transaction */
-				retval = analogix_dp_start_aux_transaction(dp);
-				if (retval == 0)
-					break;
-
-				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-					__func__);
-			}
-			/* Check if Rx sends defer */
-			reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
-			if (reg == AUX_RX_COMM_AUX_DEFER ||
-			    reg == AUX_RX_COMM_I2C_DEFER) {
-				dev_err(dp->dev, "Defer: %d\n\n", reg);
-				defer = 1;
-			}
-		}
-
-		for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
-			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-						 + 4 * cur_data_idx);
-			edid[i + cur_data_idx] = (unsigned char)reg;
-		}
-	}
-
-	return retval;
-}
-
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
 {
 	u32 reg;
@@ -1073,34 +749,22 @@
 
 u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
 {
-	u32 reg;
-
-	reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
-	return reg;
+	return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
 }
 
 void analogix_dp_reset_macro(struct analogix_dp_device *dp)
@@ -1322,3 +986,181 @@
 	reg |= SCRAMBLING_DISABLE;
 	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
 }
+
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
+{
+	writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
+}
+
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+			      struct edp_vsc_psr *vsc)
+{
+	unsigned int val;
+
+	/* don't send info frame */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val &= ~IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+	/* configure single frame update mode */
+	writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE,
+	       dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL);
+
+	/* configure VSC HB0~HB3 */
+	writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0);
+	writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1);
+	writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2);
+	writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3);
+
+	/* configure reused VSC PB0~PB3, magic number from vendor */
+	writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0);
+	writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1);
+	writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2);
+	writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
+
+	/* configure DB0 / DB1 values */
+	writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
+	writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
+
+	/* set reuse spd inforframe */
+	val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+	val |= REUSE_SPD_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+
+	/* mark info frame update */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val = (val | IF_UP) & ~IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+	/* send info frame */
+	val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	val |= IF_EN;
+	writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+}
+
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+			     struct drm_dp_aux_msg *msg)
+{
+	u32 reg;
+	u8 *buffer = msg->buffer;
+	int timeout_loop = 0;
+	unsigned int i;
+	int num_transferred = 0;
+
+	/* Buffer size of AUX CH is 16 bytes */
+	if (WARN_ON(msg->size > 16))
+		return -E2BIG;
+
+	/* Clear AUX CH data buffer */
+	reg = BUF_CLR;
+	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_I2C_WRITE:
+		reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_I2C_TRANSACTION;
+		if (msg->request & DP_AUX_I2C_MOT)
+			reg |= AUX_TX_COMM_MOT;
+		break;
+
+	case DP_AUX_I2C_READ:
+		reg = AUX_TX_COMM_READ | AUX_TX_COMM_I2C_TRANSACTION;
+		if (msg->request & DP_AUX_I2C_MOT)
+			reg |= AUX_TX_COMM_MOT;
+		break;
+
+	case DP_AUX_NATIVE_WRITE:
+		reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_DP_TRANSACTION;
+		break;
+
+	case DP_AUX_NATIVE_READ:
+		reg = AUX_TX_COMM_READ | AUX_TX_COMM_DP_TRANSACTION;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	reg |= AUX_LENGTH(msg->size);
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+	/* Select DPCD device address */
+	reg = AUX_ADDR_7_0(msg->address);
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+	reg = AUX_ADDR_15_8(msg->address);
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+	reg = AUX_ADDR_19_16(msg->address);
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+	if (!(msg->request & DP_AUX_I2C_READ)) {
+		for (i = 0; i < msg->size; i++) {
+			reg = buffer[i];
+			writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+			       4 * i);
+			num_transferred++;
+		}
+	}
+
+	/* Enable AUX CH operation */
+	reg = AUX_EN;
+
+	/* Zero-sized messages specify address-only transactions. */
+	if (msg->size < 1)
+		reg |= ADDR_ONLY;
+
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+	/* Is AUX CH command reply received? */
+	/* TODO: Wait for an interrupt instead of looping? */
+	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+	while (!(reg & RPLY_RECEIV)) {
+		timeout_loop++;
+		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+			dev_err(dp->dev, "AUX CH command reply failed!\n");
+			return -ETIMEDOUT;
+		}
+		reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+		usleep_range(10, 11);
+	}
+
+	/* Clear interrupt source for AUX CH command reply */
+	writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+	/* Clear interrupt source for AUX CH access error */
+	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+	if (reg & AUX_ERR) {
+		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+		return -EREMOTEIO;
+	}
+
+	/* Check AUX CH error access status */
+	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+	if ((reg & AUX_STATUS_MASK)) {
+		dev_err(dp->dev, "AUX CH error happened: %d\n\n",
+			reg & AUX_STATUS_MASK);
+		return -EREMOTEIO;
+	}
+
+	if (msg->request & DP_AUX_I2C_READ) {
+		for (i = 0; i < msg->size; i++) {
+			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+				    4 * i);
+			buffer[i] = (unsigned char)reg;
+			num_transferred++;
+		}
+	}
+
+	/* Check if Rx sends defer */
+	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+	if (reg == AUX_RX_COMM_AUX_DEFER)
+		msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+	else if (reg == AUX_RX_COMM_I2C_DEFER)
+		msg->reply = DP_AUX_I2C_REPLY_DEFER;
+	else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE ||
+		 (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_READ)
+		msg->reply = DP_AUX_I2C_REPLY_ACK;
+	else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE ||
+		 (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+		msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+	return num_transferred;
+}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index cdcc6c5..40200c6 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -22,6 +22,8 @@
 #define ANALOGIX_DP_VIDEO_CTL_8			0x3C
 #define ANALOGIX_DP_VIDEO_CTL_10		0x44
 
+#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0		0xD8
+
 #define ANALOGIX_DP_PLL_REG_1			0xfc
 #define ANALOGIX_DP_PLL_REG_2			0x9e4
 #define ANALOGIX_DP_PLL_REG_3			0x9e8
@@ -30,6 +32,21 @@
 
 #define ANALOGIX_DP_PD				0x12c
 
+#define ANALOGIX_DP_IF_TYPE			0x244
+#define ANALOGIX_DP_IF_PKT_DB1			0x254
+#define ANALOGIX_DP_IF_PKT_DB2			0x258
+#define ANALOGIX_DP_SPD_HB0			0x2F8
+#define ANALOGIX_DP_SPD_HB1			0x2FC
+#define ANALOGIX_DP_SPD_HB2			0x300
+#define ANALOGIX_DP_SPD_HB3			0x304
+#define ANALOGIX_DP_SPD_PB0			0x308
+#define ANALOGIX_DP_SPD_PB1			0x30C
+#define ANALOGIX_DP_SPD_PB2			0x310
+#define ANALOGIX_DP_SPD_PB3			0x314
+#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL	0x318
+#define ANALOGIX_DP_VSC_SHADOW_DB0		0x31C
+#define ANALOGIX_DP_VSC_SHADOW_DB1		0x320
+
 #define ANALOGIX_DP_LANE_MAP			0x35C
 
 #define ANALOGIX_DP_ANALOG_CTL_1		0x370
@@ -103,6 +120,8 @@
 
 #define ANALOGIX_DP_SOC_GENERAL_CTL		0x800
 
+#define ANALOGIX_DP_CRC_CON			0x890
+
 /* ANALOGIX_DP_TX_SW_RESET */
 #define RESET_DP_TX				(0x1 << 0)
 
@@ -151,6 +170,7 @@
 #define VID_CHK_UPDATE_TYPE_SHIFT		(4)
 #define VID_CHK_UPDATE_TYPE_1			(0x1 << 4)
 #define VID_CHK_UPDATE_TYPE_0			(0x0 << 4)
+#define REUSE_SPD_EN				(0x1 << 3)
 
 /* ANALOGIX_DP_VIDEO_CTL_8 */
 #define VID_HRES_TH(x)				(((x) & 0xf) << 4)
@@ -167,6 +187,12 @@
 #define REF_CLK_27M				(0x0 << 0)
 #define REF_CLK_MASK				(0x1 << 0)
 
+/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */
+#define PSR_FRAME_UP_TYPE_BURST			(0x1 << 0)
+#define PSR_FRAME_UP_TYPE_SINGLE		(0x0 << 0)
+#define PSR_CRC_SEL_HARDWARE			(0x1 << 1)
+#define PSR_CRC_SEL_MANUALLY			(0x0 << 1)
+
 /* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0			(0x0 << 6)
 #define LANE3_MAP_LOGIC_LANE_1			(0x1 << 6)
@@ -376,4 +402,12 @@
 #define VIDEO_MODE_SLAVE_MODE			(0x1 << 0)
 #define VIDEO_MODE_MASTER_MODE			(0x0 << 0)
 
+/* ANALOGIX_DP_PKT_SEND_CTL */
+#define IF_UP					(0x1 << 4)
+#define IF_EN					(0x1 << 0)
+
+/* ANALOGIX_DP_CRC_CON */
+#define PSR_VID_CRC_FLUSH			(0x1 << 2)
+#define PSR_VID_CRC_ENABLE			(0x1 << 0)
+
 #endif /* _ANALOGIX_DP_REG_H */
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
new file mode 100644
index 0000000..afec232
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct dumb_vga {
+	struct drm_bridge	bridge;
+	struct drm_connector	connector;
+
+	struct i2c_adapter	*ddc;
+};
+
+static inline struct dumb_vga *
+drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct dumb_vga, bridge);
+}
+
+static inline struct dumb_vga *
+drm_connector_to_dumb_vga(struct drm_connector *connector)
+{
+	return container_of(connector, struct dumb_vga, connector);
+}
+
+static int dumb_vga_get_modes(struct drm_connector *connector)
+{
+	struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
+	struct edid *edid;
+	int ret;
+
+	if (IS_ERR(vga->ddc))
+		goto fallback;
+
+	edid = drm_get_edid(connector, vga->ddc);
+	if (!edid) {
+		DRM_INFO("EDID readout failed, falling back to standard modes\n");
+		goto fallback;
+	}
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	return drm_add_edid_modes(connector, edid);
+
+fallback:
+	/*
+	 * In case we cannot retrieve the EDIDs (broken or missing i2c
+	 * bus), fallback on the XGA standards
+	 */
+	ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+	/* And prefer a mode pretty much anyone can handle */
+	drm_set_preferred_mode(connector, 1024, 768);
+
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
+	.get_modes	= dumb_vga_get_modes,
+};
+
+static enum drm_connector_status
+dumb_vga_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
+
+	/*
+	 * Even if we have an I2C bus, we can't assume that the cable
+	 * is disconnected if drm_probe_ddc fails. Some cables don't
+	 * wire the DDC pins, or the I2C bus might not be working at
+	 * all.
+	 */
+	if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc))
+		return connector_status_connected;
+
+	return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs dumb_vga_con_funcs = {
+	.dpms			= drm_atomic_helper_connector_dpms,
+	.detect			= dumb_vga_connector_detect,
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= drm_connector_cleanup,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+};
+
+static int dumb_vga_attach(struct drm_bridge *bridge)
+{
+	struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+	int ret;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Missing encoder\n");
+		return -ENODEV;
+	}
+
+	drm_connector_helper_add(&vga->connector,
+				 &dumb_vga_con_helper_funcs);
+	ret = drm_connector_init(bridge->dev, &vga->connector,
+				 &dumb_vga_con_funcs, DRM_MODE_CONNECTOR_VGA);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector\n");
+		return ret;
+	}
+
+	drm_mode_connector_attach_encoder(&vga->connector,
+					  bridge->encoder);
+
+	return 0;
+}
+
+static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
+	.attach		= dumb_vga_attach,
+};
+
+static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
+{
+	struct device_node *end_node, *phandle, *remote;
+	struct i2c_adapter *ddc;
+
+	end_node = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+	if (!end_node) {
+		dev_err(dev, "Missing connector endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	remote = of_graph_get_remote_port_parent(end_node);
+	of_node_put(end_node);
+	if (!remote) {
+		dev_err(dev, "Enable to parse remote node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+	of_node_put(remote);
+	if (!phandle)
+		return ERR_PTR(-ENODEV);
+
+	ddc = of_get_i2c_adapter_by_node(phandle);
+	of_node_put(phandle);
+	if (!ddc)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return ddc;
+}
+
+static int dumb_vga_probe(struct platform_device *pdev)
+{
+	struct dumb_vga *vga;
+	int ret;
+
+	vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
+	if (!vga)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, vga);
+
+	vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
+	if (IS_ERR(vga->ddc)) {
+		if (PTR_ERR(vga->ddc) == -ENODEV) {
+			dev_dbg(&pdev->dev,
+				"No i2c bus specified. Disabling EDID readout\n");
+		} else {
+			dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+			return PTR_ERR(vga->ddc);
+		}
+	}
+
+	vga->bridge.funcs = &dumb_vga_bridge_funcs;
+	vga->bridge.of_node = pdev->dev.of_node;
+
+	ret = drm_bridge_add(&vga->bridge);
+	if (ret && !IS_ERR(vga->ddc))
+		i2c_put_adapter(vga->ddc);
+
+	return ret;
+}
+
+static int dumb_vga_remove(struct platform_device *pdev)
+{
+	struct dumb_vga *vga = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&vga->bridge);
+
+	if (!IS_ERR(vga->ddc))
+		i2c_put_adapter(vga->ddc);
+
+	return 0;
+}
+
+static const struct of_device_id dumb_vga_match[] = {
+	{ .compatible = "dumb-vga-dac" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dumb_vga_match);
+
+static struct platform_driver dumb_vga_driver = {
+	.probe	= dumb_vga_probe,
+	.remove	= dumb_vga_remove,
+	.driver		= {
+		.name		= "dumb-vga-dac",
+		.of_match_table	= dumb_vga_match,
+	},
+};
+module_platform_driver(dumb_vga_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 122bb01..8f2d137 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -640,7 +640,6 @@
 	.remove	= snd_dw_hdmi_remove,
 	.driver	= {
 		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
 		.pm = PM_OPS,
 	},
 };
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 77ab473..ab7023e 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -940,10 +940,11 @@
 	 */
 
 	/*
-	 * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
-	 * active aspect present in bit 6 rather than 4.
+	 * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6,
+	 * scan info in bits 4,5 rather than 0,1 and active aspect present in
+	 * bit 6 rather than 4.
 	 */
-	val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+	val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3);
 	if (frame.active_aspect & 15)
 		val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
 	if (frame.top_bar || frame.bottom_bar)
@@ -1476,12 +1477,6 @@
 	return mode_status;
 }
 
-static void dw_hdmi_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
 static void dw_hdmi_connector_force(struct drm_connector *connector)
 {
 	struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -1498,7 +1493,7 @@
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = dw_hdmi_connector_detect,
-	.destroy = dw_hdmi_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.force = dw_hdmi_connector_force,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1812,9 +1807,6 @@
 	/* Disable all interrupts */
 	hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
 
-	hdmi->connector.funcs->destroy(&hdmi->connector);
-	hdmi->encoder->funcs->destroy(hdmi->encoder);
-
 	clk_disable_unprepare(hdmi->iahb_clk);
 	clk_disable_unprepare(hdmi->isfr_clk);
 	i2c_put_adapter(hdmi->ddc);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 93f3dac..f1a9993 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -245,16 +245,11 @@
 	return connector_status_connected;
 }
 
-static void ptn3460_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-}
-
 static const struct drm_connector_funcs ptn3460_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = ptn3460_detect,
-	.destroy = ptn3460_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 583b8ce..6f7c2f9 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -16,7 +16,6 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/fb.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -484,16 +483,11 @@
 	return connector_status_connected;
 }
 
-static void ps8622_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_cleanup(connector);
-}
-
 static const struct drm_connector_funcs ps8622_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = ps8622_detect,
-	.destroy = ps8622_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index a09825d..44d476e 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1165,17 +1165,11 @@
 	.best_encoder = tc_connector_best_encoder,
 };
 
-static void tc_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
 static const struct drm_connector_funcs tc_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = tc_connector_detect,
-	.destroy = tc_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b05f7ea..6c76d12 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -57,7 +57,7 @@
 #ifdef CONFIG_X86
 	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-	remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
 	kfree(ap);
 
 	return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 3b5be72..daecf1a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -13,8 +13,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-#include <linux/fb.h>
-
 #include "cirrus_drv.h"
 
 static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1cc9ee6..bb2438d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -150,7 +150,8 @@
 {
 	struct cirrus_bo *cirrusbo = cirrus_bo(bo);
 
-	return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+	return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
+					  filp->private_data);
 }
 
 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 605bd24..d621c8a 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -430,9 +430,7 @@
  * intact so it can still be used. It is safe to call this if AGP is disabled or
  * was already removed.
  *
- * If DRIVER_MODESET is active, nothing is done to protect the modesetting
- * resources from getting destroyed. Drivers are responsible of cleaning them up
- * during device shutdown.
+ * Cleanup is only done for drivers who have DRIVER_LEGACY set.
  */
 void drm_legacy_agp_clear(struct drm_device *dev)
 {
@@ -440,7 +438,7 @@
 
 	if (!dev->agp)
 		return;
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2a3ded4..2373960 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -837,8 +837,9 @@
 	/* Check whether this plane supports the fb pixel format. */
 	ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
 	if (ret) {
-		DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
-				 drm_get_format_name(state->fb->pixel_format));
+		char *format_name = drm_get_format_name(state->fb->pixel_format);
+		DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
+		kfree(format_name);
 		return ret;
 	}
 
@@ -1690,7 +1691,7 @@
 				goto out;
 			}
 
-			prop = drm_property_find(dev, prop_id);
+			prop = drm_mode_obj_find_prop_id(obj, prop_id);
 			if (!prop) {
 				drm_mode_object_unreference(obj);
 				ret = -ENOENT;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 20be86d..c3f8347 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -594,7 +594,7 @@
 	struct drm_plane_state *plane_state;
 	int i, ret = 0;
 
-	ret = drm_atomic_helper_normalize_zpos(dev, state);
+	ret = drm_atomic_normalize_zpos(dev, state);
 	if (ret)
 		return ret;
 
@@ -749,6 +749,8 @@
 		/* Right function depends upon target state. */
 		if (crtc->state->enable && funcs->prepare)
 			funcs->prepare(crtc);
+		else if (funcs->atomic_disable)
+			funcs->atomic_disable(crtc, old_crtc_state);
 		else if (funcs->disable)
 			funcs->disable(crtc);
 		else
@@ -886,8 +888,12 @@
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call mode_set hooks twice.
 		 */
-		if (funcs && funcs->mode_set)
+		if (funcs && funcs->atomic_mode_set) {
+			funcs->atomic_mode_set(encoder, new_crtc_state,
+					       connector->state);
+		} else if (funcs && funcs->mode_set) {
 			funcs->mode_set(encoder, mode, adjusted_mode);
+		}
 
 		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
 	}
@@ -1003,29 +1009,46 @@
  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  * @dev: DRM device
  * @state: atomic state object with old state structures
+ * @pre_swap: if true, do an interruptible wait
  *
  * For implicit sync, driver should fish the exclusive fence out from the
  * incoming fb's and stash it in the drm_plane_state.  This is called after
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
+ *
+ * Returns zero if success or < 0 if fence_wait() fails.
  */
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-			    struct drm_atomic_state *state)
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+				      struct drm_atomic_state *state,
+				      bool pre_swap)
 {
 	struct drm_plane *plane;
 	struct drm_plane_state *plane_state;
-	int i;
+	int i, ret;
 
 	for_each_plane_in_state(state, plane, plane_state, i) {
-		if (!plane->state->fence)
+		if (!pre_swap)
+			plane_state = plane->state;
+
+		if (!plane_state->fence)
 			continue;
 
-		WARN_ON(!plane->state->fb);
+		WARN_ON(!plane_state->fb);
 
-		fence_wait(plane->state->fence, false);
-		fence_put(plane->state->fence);
-		plane->state->fence = NULL;
+		/*
+		 * If waiting for fences pre-swap (ie: nonblock), userspace can
+		 * still interrupt the operation. Instead of blocking until the
+		 * timer expires, make the wait interruptible.
+		 */
+		ret = fence_wait(plane_state->fence, pre_swap);
+		if (ret)
+			return ret;
+
+		fence_put(plane_state->fence);
+		plane_state->fence = NULL;
 	}
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
@@ -1142,7 +1165,8 @@
  *
  *     drm_atomic_helper_commit_modeset_enables(dev, state);
  *
- *     drm_atomic_helper_commit_planes(dev, state, true);
+ *     drm_atomic_helper_commit_planes(dev, state,
+ *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
  *
  * for committing the atomic update to hardware.  See the kerneldoc entries for
  * these three functions for more details.
@@ -1153,7 +1177,7 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -1172,7 +1196,7 @@
 
 	funcs = dev->mode_config.helper_private;
 
-	drm_atomic_helper_wait_for_fences(dev, state);
+	drm_atomic_helper_wait_for_fences(dev, state, false);
 
 	drm_atomic_helper_wait_for_dependencies(state);
 
@@ -1231,6 +1255,12 @@
 	if (ret)
 		return ret;
 
+	if (!nonblock) {
+		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+		if (ret)
+			return ret;
+	}
+
 	/*
 	 * This is the point of no return - everything below never fails except
 	 * when the hw goes bonghits. Which means we can commit the new state on
@@ -1631,6 +1661,9 @@
 
 		funcs = plane->helper_private;
 
+		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+			continue;
+
 		if (funcs->prepare_fb) {
 			ret = funcs->prepare_fb(plane, plane_state);
 			if (ret)
@@ -1647,18 +1680,20 @@
 		if (j >= i)
 			continue;
 
+		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+			continue;
+
 		funcs = plane->helper_private;
 
 		if (funcs->cleanup_fb)
 			funcs->cleanup_fb(plane, plane_state);
-
 	}
 
 	return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
-bool plane_crtc_active(struct drm_plane_state *state)
+static bool plane_crtc_active(const struct drm_plane_state *state)
 {
 	return state->crtc && state->crtc->state->active;
 }
@@ -1667,7 +1702,7 @@
  * drm_atomic_helper_commit_planes - commit plane state
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
- * @active_only: Only commit on active CRTC if set
+ * @flags: flags for committing plane state
  *
  * This function commits the new plane state using the plane and atomic helper
  * functions for planes and crtcs. It assumes that the atomic state has already
@@ -1687,25 +1722,34 @@
  * most drivers don't need to be immediately notified of plane updates for a
  * disabled CRTC.
  *
- * Unless otherwise needed, drivers are advised to set the @active_only
- * parameters to true in order not to receive plane update notifications related
- * to a disabled CRTC. This avoids the need to manually ignore plane updates in
+ * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
+ * @flags in order not to receive plane update notifications related to a
+ * disabled CRTC. This avoids the need to manually ignore plane updates in
  * driver code when the driver and/or hardware can't or just don't need to deal
  * with updates on disabled CRTCs, for example when supporting runtime PM.
  *
- * The drm_atomic_helper_commit() default implementation only sets @active_only
- * to false to most closely match the behaviour of the legacy helpers. This should
- * not be copied blindly by drivers.
+ * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
+ * display controllers require to disable a CRTC's planes when the CRTC is
+ * disabled. This function would skip the ->atomic_disable call for a plane if
+ * the CRTC of the old plane state needs a modesetting operation. Of course,
+ * the drivers need to disable the planes in their CRTC disable callbacks
+ * since no one else would do that.
+ *
+ * The drm_atomic_helper_commit() default implementation doesn't set the
+ * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
+ * This should not be copied blindly by drivers.
  */
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
 				     struct drm_atomic_state *old_state,
-				     bool active_only)
+				     uint32_t flags)
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	struct drm_plane *plane;
 	struct drm_plane_state *old_plane_state;
 	int i;
+	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
+	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
@@ -1749,10 +1793,19 @@
 		/*
 		 * Special-case disabling the plane if drivers support it.
 		 */
-		if (disabling && funcs->atomic_disable)
+		if (disabling && funcs->atomic_disable) {
+			struct drm_crtc_state *crtc_state;
+
+			crtc_state = old_plane_state->crtc->state;
+
+			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+			    no_disable)
+				continue;
+
 			funcs->atomic_disable(plane, old_plane_state);
-		else if (plane->state->crtc || disabling)
+		} else if (plane->state->crtc || disabling) {
 			funcs->atomic_update(plane, old_plane_state);
+		}
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -1831,12 +1884,12 @@
 
 /**
  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
- * @crtc: CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
  *
  * Disables all planes associated with the given CRTC. This can be
- * used for instance in the CRTC helper disable callback to disable
- * all planes before shutting down the display pipeline.
+ * used for instance in the CRTC helper atomic_disable callback to disable
+ * all planes.
  *
  * If the atomic-parameter is set the function calls the CRTC's
  * atomic_begin hook before and atomic_flush hook after disabling the
@@ -1845,9 +1898,11 @@
  * It is a bug to call this function without having implemented the
  * ->atomic_disable() plane hook.
  */
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-					      bool atomic)
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+					 bool atomic)
 {
+	struct drm_crtc *crtc = old_crtc_state->crtc;
 	const struct drm_crtc_helper_funcs *crtc_funcs =
 		crtc->helper_private;
 	struct drm_plane *plane;
@@ -1855,11 +1910,11 @@
 	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
 		crtc_funcs->atomic_begin(crtc, NULL);
 
-	drm_for_each_plane(plane, crtc->dev) {
+	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
 		const struct drm_plane_helper_funcs *plane_funcs =
 			plane->helper_private;
 
-		if (plane->state->crtc != crtc || !plane_funcs)
+		if (!plane_funcs)
 			continue;
 
 		WARN_ON(!plane_funcs->atomic_disable);
@@ -1894,6 +1949,9 @@
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 		const struct drm_plane_helper_funcs *funcs;
 
+		if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
+			continue;
+
 		funcs = plane->helper_private;
 
 		if (funcs->cleanup_fb)
@@ -2354,7 +2412,7 @@
 	primary_state->crtc_h = vdisplay;
 	primary_state->src_x = set->x << 16;
 	primary_state->src_y = set->y << 16;
-	if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+	if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
 		primary_state->src_w = vdisplay << 16;
 		primary_state->src_h = hdisplay << 16;
 	} else {
@@ -3039,7 +3097,7 @@
 
 	if (plane->state) {
 		plane->state->plane = plane;
-		plane->state->rotation = BIT(DRM_ROTATE_0);
+		plane->state->rotation = DRM_ROTATE_0;
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 4153e8a..6b14351 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -251,7 +251,7 @@
 	if (!drm_is_current_master(file_priv))
 		goto out;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+	if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
 		/*
 		 * Since the master is disappearing, so is the
 		 * possibility to lock.
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index f3c0942..85172a9 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -25,12 +25,173 @@
  */
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_blend.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
 
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The basic plane composition model supported by standard plane properties only
+ * has a source rectangle (in logical pixels within the &drm_framebuffer), with
+ * sub-pixel accuracy, which is scaled up to a pixel-aligned destination
+ * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
+ * defined by the horizontal and vertical visible pixels (stored in @hdisplay
+ * and @vdisplay) of the requested mode (stored in @mode in the
+ * &drm_crtc_state). These two rectangles are both stored in the
+ * &drm_plane_state.
+ *
+ * For the atomic ioctl the following standard (atomic) properties on the plane object
+ * encode the basic plane composition model:
+ *
+ * SRC_X:
+ * 	X coordinate offset for the source rectangle within the
+ * 	&drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_Y:
+ * 	Y coordinate offset for the source rectangle within the
+ * 	&drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_W:
+ * 	Width for the source rectangle within the &drm_framebuffer, in 16.16
+ * 	fixed point. SRC_X plus SRC_W must be within the width of the source
+ * 	framebuffer. Must be positive.
+ * SRC_H:
+ * 	Height for the source rectangle within the &drm_framebuffer, in 16.16
+ * 	fixed point. SRC_Y plus SRC_H must be within the height of the source
+ * 	framebuffer. Must be positive.
+ * CRTC_X:
+ * 	X coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_Y:
+ * 	Y coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_W:
+ * 	Width for the destination rectangle. CRTC_X plus CRTC_W can extend past
+ * 	the currently visible horizontal area of the &drm_crtc.
+ * CRTC_H:
+ * 	Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past
+ * 	the currently visible vertical area of the &drm_crtc.
+ * FB_ID:
+ * 	Mode object ID of the &drm_framebuffer this plane should scan out.
+ * CRTC_ID:
+ * 	Mode object ID of the &drm_crtc this plane should be connected to.
+ *
+ * Note that the source rectangle must fully lie within the bounds of the
+ * &drm_framebuffer. The destination rectangle can lie outside of the visible
+ * area of the current mode of the CRTC. It must be apprpriately clipped by the
+ * driver, which can be done by calling drm_plane_helper_check_update(). Drivers
+ * are also allowed to round the subpixel sampling positions appropriately, but
+ * only to the next full pixel. No pixel outside of the source rectangle may
+ * ever be sampled, which is important when applying more sophisticated
+ * filtering than just a bilinear one when scaling. The filtering mode when
+ * scaling is unspecified.
+ *
+ * On top of this basic transformation additional properties can be exposed by
+ * the driver:
+ *
+ * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ *   rotation and reflection step between the source and destination rectangles.
+ *   Without this property the rectangle is only scaled, but not rotated or
+ *   reflected.
+ *
+ * - Z position is set up with drm_plane_create_zpos_immutable_property() and
+ *   drm_plane_create_zpos_property(). It controls the visibility of overlapping
+ *   planes. Without this property the primary plane is always below the cursor
+ *   plane, and ordering between all other planes is undefined.
+ *
+ * Note that all the property extensions described here apply either to the
+ * plane or the CRTC (e.g. for the background color, which currently is not
+ * exposed and assumed to be black).
+ */
+
+/**
+ * drm_mode_create_rotation_property - create a new rotation property
+ * @dev: DRM device
+ * @supported_rotations: bitmask of supported rotations and reflections
+ *
+ * This creates a new property with the selected support for transformations.
+ * The resulting property should be stored in @rotation_property in
+ * &drm_mode_config. It then must be attached to each plane which supports
+ * rotations using drm_object_attach_property().
+ *
+ * FIXME: Probably better if the rotation property is created on each plane,
+ * like the zpos property. Otherwise it's not possible to allow different
+ * rotation modes on different planes.
+ *
+ * Since a rotation by 180° degress is the same as reflecting both along the x
+ * and the y axis the rotation property is somewhat redundant. Drivers can use
+ * drm_rotation_simplify() to normalize values of this property.
+ *
+ * The property exposed to userspace is a bitmask property (see
+ * drm_property_create_bitmask()) called "rotation" and has the following
+ * bitmask enumaration values:
+ *
+ * DRM_ROTATE_0:
+ * 	"rotate-0"
+ * DRM_ROTATE_90:
+ * 	"rotate-90"
+ * DRM_ROTATE_180:
+ * 	"rotate-180"
+ * DRM_ROTATE_270:
+ * 	"rotate-270"
+ * DRM_REFLECT_X:
+ * 	"reflect-x"
+ * DRM_REFELCT_Y:
+ * 	"reflect-y"
+ *
+ * Rotation is the specified amount in degrees in counter clockwise direction,
+ * the X and Y axis are within the source rectangle, i.e.  the X/Y axis before
+ * rotation. After reflection, the rotation is applied to the image sampled from
+ * the source rectangle, before scaling it to fit the destination rectangle.
+ */
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+						       unsigned int supported_rotations)
+{
+	static const struct drm_prop_enum_list props[] = {
+		{ __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
+		{ __builtin_ffs(DRM_ROTATE_90) - 1,  "rotate-90" },
+		{ __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
+		{ __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
+		{ __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
+		{ __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
+	};
+
+	return drm_property_create_bitmask(dev, 0, "rotation",
+					   props, ARRAY_SIZE(props),
+					   supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
+ *                       DRM_ROTATE_90 | DRM_ROTATE_180 |
+ *                       DRM_ROTATE_270 | DRM_REFLECT_Y);
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+				   unsigned int supported_rotations)
+{
+	if (rotation & ~supported_rotations) {
+		rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
+		rotation = (rotation & DRM_REFLECT_MASK) |
+		           BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
+	}
+
+	return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
 
 /**
  * drm_plane_create_zpos_property - create mutable zpos property
@@ -49,10 +210,14 @@
  * If zpos of some planes cannot be changed (like fixed background or
  * cursor/topmost planes), driver should adjust min/max values and assign those
  * planes immutable zpos property with lower or higher values (for more
- * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * information, see drm_plane_create_zpos_immutable_property() function). In such
  * case driver should also assign proper initial zpos values for all planes in
  * its plane_reset() callback, so the planes will be always sorted properly.
  *
+ * See also drm_atomic_normalize_zpos().
+ *
+ * The property exposed to userspace is called "zpos".
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -88,7 +253,9 @@
  * support for it in drm core. Using this property driver lets userspace
  * to get the arrangement of the planes for blending operation and notifies
  * it that the hardware (or driver) doesn't support changing of the planes'
- * order.
+ * order. For mutable zpos see drm_plane_create_zpos_property().
+ *
+ * The property exposed to userspace is called "zpos".
  *
  * Returns:
  * Zero on success, negative errno on failure.
@@ -127,20 +294,6 @@
 		return sa->plane->base.id - sb->plane->base.id;
 }
 
-/**
- * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
- * @crtc: crtc with planes, which have to be considered for normalization
- * @crtc_state: new atomic state to apply
- *
- * This function checks new states of all planes assigned to given crtc and
- * calculates normalized zpos value for them. Planes are compared first by their
- * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
- * is at the bottom. The plane_state->normalized_zpos is then filled with unique
- * values from 0 to number of active planes in crtc minus one.
- *
- * RETURNS
- * Zero for success or -errno
- */
 static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
 					  struct drm_crtc_state *crtc_state)
 {
@@ -193,20 +346,25 @@
 }
 
 /**
- * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
- *				      crtcs
+ * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs
  * @dev: DRM device
  * @state: atomic state of DRM device
  *
  * This function calculates normalized zpos value for all modified planes in
- * the provided atomic state of DRM device. For more information, see
- * drm_atomic_helper_crtc_normalize_zpos() function.
+ * the provided atomic state of DRM device.
+ *
+ * For every CRTC this function checks new states of all planes assigned to
+ * it and calculates normalized zpos value for these planes. Planes are compared
+ * first by their zpos values, then by plane id (if zpos is equal). The plane
+ * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is
+ * then filled with unique values from 0 to number of active planes in crtc
+ * minus one.
  *
  * RETURNS
  * Zero for success or -errno
  */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-				     struct drm_atomic_state *state)
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
@@ -236,3 +394,4 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_atomic_normalize_zpos);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 2555430..0ee052b 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -23,10 +23,9 @@
 
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 
-#include <drm/drm_crtc.h>
-
-#include "drm/drmP.h"
+#include <drm/drm_bridge.h>
 
 /**
  * DOC: overview
@@ -98,11 +97,11 @@
  * @dev: DRM device
  * @bridge: bridge control structure
  *
- * called by a kms driver to link one of our encoder/bridge to the given
+ * Called by a kms driver to link one of our encoder/bridge to the given
  * bridge.
  *
  * Note that setting up links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself
+ * objects needs to be handled by the kms driver itself.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -125,6 +124,31 @@
 EXPORT_SYMBOL(drm_bridge_attach);
 
 /**
+ * drm_bridge_detach - deassociate given bridge from its DRM device
+ *
+ * @bridge: bridge control structure
+ *
+ * Called by a kms driver to unlink the given bridge from its DRM device.
+ *
+ * Note that tearing down links between the bridge and our encoder/bridge
+ * objects needs to be handled by the kms driver itself.
+ */
+void drm_bridge_detach(struct drm_bridge *bridge)
+{
+	if (WARN_ON(!bridge))
+		return;
+
+	if (WARN_ON(!bridge->dev))
+		return;
+
+	if (bridge->funcs->detach)
+		bridge->funcs->detach(bridge);
+
+	bridge->dev = NULL;
+}
+EXPORT_SYMBOL(drm_bridge_detach);
+
+/**
  * DOC: bridge callbacks
  *
  * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index c3a12cd..adb1dd7 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -397,7 +397,7 @@
 		return -EPERM;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -443,7 +443,7 @@
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	idx = map->offset;
@@ -545,7 +545,7 @@
 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 {
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	mutex_lock(&dev->struct_mutex);
@@ -558,7 +558,7 @@
 {
 	struct drm_map_list *r_list, *list_temp;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	mutex_lock(&dev->struct_mutex);
@@ -595,7 +595,7 @@
 	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
@@ -755,7 +755,7 @@
 		return -EINVAL;
 	}
 
-	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 	if (!entry->buflist) {
 		mutex_unlock(&dev->struct_mutex);
 		atomic_dec(&dev->buf_alloc);
@@ -905,14 +905,14 @@
 		return -EINVAL;
 	}
 
-	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 	if (!entry->buflist) {
 		mutex_unlock(&dev->struct_mutex);
 		atomic_dec(&dev->buf_alloc);
 		return -ENOMEM;
 	}
 
-	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+	entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
 	if (!entry->seglist) {
 		kfree(entry->buflist);
 		mutex_unlock(&dev->struct_mutex);
@@ -923,8 +923,9 @@
 	/* Keep the original pagelist until we know all the allocations
 	 * have succeeded
 	 */
-	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
-			       sizeof(*dma->pagelist), GFP_KERNEL);
+	temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
+				      sizeof(*dma->pagelist),
+				      GFP_KERNEL);
 	if (!temp_pagelist) {
 		kfree(entry->buflist);
 		kfree(entry->seglist);
@@ -1116,8 +1117,7 @@
 		return -EINVAL;
 	}
 
-	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
-				GFP_KERNEL);
+	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
 	if (!entry->buflist) {
 		mutex_unlock(&dev->struct_mutex);
 		atomic_dec(&dev->buf_alloc);
@@ -1220,7 +1220,7 @@
 	struct drm_buf_desc *request = data;
 	int ret;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1266,7 +1266,7 @@
 	int i;
 	int count;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1347,7 +1347,7 @@
 	int order;
 	struct drm_buf_entry *entry;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1395,7 +1395,7 @@
 	int idx;
 	struct drm_buf *buf;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1450,7 +1450,7 @@
 	struct drm_buf_map *request = data;
 	int i;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1530,7 +1530,7 @@
 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (dev->driver->dma_ioctl)
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
new file mode 100644
index 0000000..d28ffdd
--- /dev/null
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_color_mgmt.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Color management or color space adjustments is supported through a set of 5
+ * properties on the &drm_crtc object. They are set up by calling
+ * drm_crtc_enable_color_mgmt().
+ *
+ * "DEGAMMA_LUT”:
+ *	Blob property to set the degamma lookup table (LUT) mapping pixel data
+ *	from the framebuffer before it is given to the transformation matrix.
+ *	The data is interpreted as an array of struct &drm_color_lut elements.
+ *	Hardware might choose not to use the full precision of the LUT elements
+ *	nor use all the elements of the LUT (for example the hardware might
+ *	choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “DEGAMMA_LUT_SIZE”:
+ *	Unsinged range property to give the size of the lookup table to be set
+ *	on the DEGAMMA_LUT property (the size depends on the underlying
+ *	hardware). If drivers support multiple LUT sizes then they should
+ *	publish the largest size, and sub-sample smaller sized LUTs (e.g. for
+ *	split-gamma modes) appropriately.
+ *
+ * “CTM”:
+ *	Blob property to set the current transformation matrix (CTM) apply to
+ *	pixel data after the lookup through the degamma LUT and before the
+ *	lookup through the gamma LUT. The data is interpreted as a struct
+ *	&drm_color_ctm.
+ *
+ * “GAMMA_LUT”:
+ *	Blob property to set the gamma lookup table (LUT) mapping pixel data
+ *	after the transformation matrix to data sent to the connector. The
+ *	data is interpreted as an array of struct &drm_color_lut elements.
+ *	Hardware might choose not to use the full precision of the LUT elements
+ *	nor use all the elements of the LUT (for example the hardware might
+ *	choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “GAMMA_LUT_SIZE”:
+ *	Unsigned range property to give the size of the lookup table to be set
+ *	on the GAMMA_LUT property (the size depends on the underlying hardware).
+ *	If drivers support multiple LUT sizes then they should publish the
+ *	largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma
+ *	modes) appropriately.
+ *
+ * There is also support for a legacy gamma table, which is set up by calling
+ * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
+ * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
+ * "GAMMA_LUT" property above.
+ */
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+				uint degamma_lut_size,
+				bool has_ctm,
+				uint gamma_lut_size)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *config = &dev->mode_config;
+
+	if (degamma_lut_size) {
+		drm_object_attach_property(&crtc->base,
+					   config->degamma_lut_property, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->degamma_lut_size_property,
+					   degamma_lut_size);
+	}
+
+	if (has_ctm)
+		drm_object_attach_property(&crtc->base,
+					   config->ctm_property, 0);
+
+	if (gamma_lut_size) {
+		drm_object_attach_property(&crtc->base,
+					   config->gamma_lut_property, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->gamma_lut_size_property,
+					   gamma_lut_size);
+	}
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
+
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+				 int gamma_size)
+{
+	uint16_t *r_base, *g_base, *b_base;
+	int i;
+
+	crtc->gamma_size = gamma_size;
+
+	crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+				    GFP_KERNEL);
+	if (!crtc->gamma_store) {
+		crtc->gamma_size = 0;
+		return -ENOMEM;
+	}
+
+	r_base = crtc->gamma_store;
+	g_base = r_base + gamma_size;
+	b_base = g_base + gamma_size;
+	for (i = 0; i < gamma_size; i++) {
+		r_base[i] = i << 8;
+		g_base[i] = i << 8;
+		b_base[i] = i << 8;
+	}
+
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_lut *crtc_lut = data;
+	struct drm_crtc *crtc;
+	void *r_base, *g_base, *b_base;
+	int size;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+	if (!crtc) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (crtc->funcs->gamma_set == NULL) {
+		ret = -ENOSYS;
+		goto out;
+	}
+
+	/* memcpy into gamma store */
+	if (crtc_lut->gamma_size != crtc->gamma_size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	size = crtc_lut->gamma_size * (sizeof(uint16_t));
+	r_base = crtc->gamma_store;
+	if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	g_base = r_base + size;
+	if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	b_base = g_base + size;
+	if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+
+}
+
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_lut *crtc_lut = data;
+	struct drm_crtc *crtc;
+	void *r_base, *g_base, *b_base;
+	int size;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+	if (!crtc) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	/* memcpy into gamma store */
+	if (crtc_lut->gamma_size != crtc->gamma_size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	size = crtc_lut->gamma_size * (sizeof(uint16_t));
+	r_base = crtc->gamma_store;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	g_base = r_base + size;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	b_base = g_base + size;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
new file mode 100644
index 0000000..2db7fb5
--- /dev/null
+++ b/drivers/gpu/drm/drm_connector.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * In DRM connectors are the general abstraction for display sinks, and include
+ * als fixed panels or anything else that can display pixels in some form. As
+ * opposed to all other KMS objects representing hardware (like CRTC, encoder or
+ * plane abstractions) connectors can be hotplugged and unplugged at runtime.
+ * Hence they are reference-counted using drm_connector_reference() and
+ * drm_connector_unreference().
+ *
+ * KMS driver must create, initialize, register and attach at a struct
+ * &drm_connector for each such sink. The instance is created as other KMS
+ * objects and initialized by setting the following fields.
+ *
+ * The connector is then registered with a call to drm_connector_init() with a
+ * pointer to the connector functions and a connector type, and exposed through
+ * sysfs with a call to drm_connector_register().
+ *
+ * Connectors must be attached to an encoder to be used. For devices that map
+ * connectors to encoders 1:1, the connector should be attached at
+ * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * driver must also set the struct &drm_connector encoder field to point to the
+ * attached encoder.
+ *
+ * For connectors which are not fixed (like built-in panels) the driver needs to
+ * support hotplug notifications. The simplest way to do that is by using the
+ * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have
+ * hardware support for hotplug interrupts. Connectors with hardware hotplug
+ * support can instead use e.g. drm_helper_hpd_irq_event().
+ */
+
+struct drm_conn_prop_enum_list {
+	int type;
+	const char *name;
+	struct ida ida;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
+	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
+	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+	{ DRM_MODE_CONNECTOR_Component, "Component" },
+	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+	{ DRM_MODE_CONNECTOR_TV, "TV" },
+	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
+	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+};
+
+void drm_connector_ida_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
+/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ *
+ * The kernel supports per-connector configuration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+	struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+	char *option = NULL;
+
+	if (fb_get_options(connector->name, &option))
+		return;
+
+	if (!drm_mode_parse_command_line_for_connector(option,
+						       connector,
+						       mode))
+		return;
+
+	if (mode->force) {
+		const char *s;
+
+		switch (mode->force) {
+		case DRM_FORCE_OFF:
+			s = "OFF";
+			break;
+		case DRM_FORCE_ON_DIGITAL:
+			s = "ON - dig";
+			break;
+		default:
+		case DRM_FORCE_ON:
+			s = "ON";
+			break;
+		}
+
+		DRM_INFO("forcing %s connector %s\n", connector->name, s);
+		connector->force = mode->force;
+	}
+
+	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+		      connector->name,
+		      mode->xres, mode->yres,
+		      mode->refresh_specified ? mode->refresh : 60,
+		      mode->rb ? " reduced blanking" : "",
+		      mode->margins ? " with margins" : "",
+		      mode->interlace ?  " interlaced" : "");
+}
+
+static void drm_connector_free(struct kref *kref)
+{
+	struct drm_connector *connector =
+		container_of(kref, struct drm_connector, base.refcount);
+	struct drm_device *dev = connector->dev;
+
+	drm_mode_object_unregister(dev, &connector->base);
+	connector->funcs->destroy(connector);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+		       struct drm_connector *connector,
+		       const struct drm_connector_funcs *funcs,
+		       int connector_type)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	int ret;
+	struct ida *connector_ida =
+		&drm_connector_enum_list[connector_type].ida;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get_reg(dev, &connector->base,
+				      DRM_MODE_OBJECT_CONNECTOR,
+				      false, drm_connector_free);
+	if (ret)
+		goto out_unlock;
+
+	connector->base.properties = &connector->properties;
+	connector->dev = dev;
+	connector->funcs = funcs;
+
+	ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+	if (ret < 0)
+		goto out_put;
+	connector->index = ret;
+	ret = 0;
+
+	connector->connector_type = connector_type;
+	connector->connector_type_id =
+		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+	if (connector->connector_type_id < 0) {
+		ret = connector->connector_type_id;
+		goto out_put_id;
+	}
+	connector->name =
+		kasprintf(GFP_KERNEL, "%s-%d",
+			  drm_connector_enum_list[connector_type].name,
+			  connector->connector_type_id);
+	if (!connector->name) {
+		ret = -ENOMEM;
+		goto out_put_type_id;
+	}
+
+	INIT_LIST_HEAD(&connector->probed_modes);
+	INIT_LIST_HEAD(&connector->modes);
+	connector->edid_blob_ptr = NULL;
+	connector->status = connector_status_unknown;
+
+	drm_connector_get_cmdline_mode(connector);
+
+	/* We should add connectors at the end to avoid upsetting the connector
+	 * index too much. */
+	list_add_tail(&connector->head, &config->connector_list);
+	config->num_connector++;
+
+	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+		drm_object_attach_property(&connector->base,
+					      config->edid_property,
+					      0);
+
+	drm_object_attach_property(&connector->base,
+				      config->dpms_property, 0);
+
+	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+		drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+	}
+
+	connector->debugfs_entry = NULL;
+out_put_type_id:
+	if (ret)
+		ida_simple_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+	if (ret)
+		ida_simple_remove(&config->connector_ida, connector->index);
+out_put:
+	if (ret)
+		drm_mode_object_unregister(dev, &connector->base);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+				      struct drm_encoder *encoder)
+{
+	int i;
+
+	/*
+	 * In the past, drivers have attempted to model the static association
+	 * of connector to encoder in simple connector/encoder devices using a
+	 * direct assignment of connector->encoder = encoder. This connection
+	 * is a logical one and the responsibility of the core, so drivers are
+	 * expected not to mess with this.
+	 *
+	 * Note that the error return should've been enough here, but a large
+	 * majority of drivers ignores the return value, so add in a big WARN
+	 * to get people's attention.
+	 */
+	if (WARN_ON(connector->encoder))
+		return -EINVAL;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0) {
+			connector->encoder_ids[i] = encoder->base.id;
+			return 0;
+		}
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+static void drm_mode_remove(struct drm_connector *connector,
+			    struct drm_display_mode *mode)
+{
+	list_del(&mode->head);
+	drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *t;
+
+	/* The connector should have been removed from userspace long before
+	 * it is finally destroyed.
+	 */
+	if (WARN_ON(connector->registered))
+		drm_connector_unregister(connector);
+
+	if (connector->tile_group) {
+		drm_mode_put_tile_group(dev, connector->tile_group);
+		connector->tile_group = NULL;
+	}
+
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+
+	list_for_each_entry_safe(mode, t, &connector->modes, head)
+		drm_mode_remove(connector, mode);
+
+	ida_simple_remove(&drm_connector_enum_list[connector->connector_type].ida,
+			  connector->connector_type_id);
+
+	ida_simple_remove(&dev->mode_config.connector_ida,
+			  connector->index);
+
+	kfree(connector->display_info.bus_formats);
+	drm_mode_object_unregister(dev, &connector->base);
+	kfree(connector->name);
+	connector->name = NULL;
+	list_del(&connector->head);
+	dev->mode_config.num_connector--;
+
+	WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+	if (connector->state && connector->funcs->atomic_destroy_state)
+		connector->funcs->atomic_destroy_state(connector,
+						       connector->state);
+
+	memset(connector, 0, sizeof(*connector));
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+	int ret;
+
+	if (connector->registered)
+		return 0;
+
+	ret = drm_sysfs_connector_add(connector);
+	if (ret)
+		return ret;
+
+	ret = drm_debugfs_connector_add(connector);
+	if (ret) {
+		goto err_sysfs;
+	}
+
+	if (connector->funcs->late_register) {
+		ret = connector->funcs->late_register(connector);
+		if (ret)
+			goto err_debugfs;
+	}
+
+	drm_mode_object_register(connector->dev, &connector->base);
+
+	connector->registered = true;
+	return 0;
+
+err_debugfs:
+	drm_debugfs_connector_remove(connector);
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+	if (!connector->registered)
+		return;
+
+	if (connector->funcs->early_unregister)
+		connector->funcs->early_unregister(connector);
+
+	drm_sysfs_connector_remove(connector);
+	drm_debugfs_connector_remove(connector);
+
+	connector->registered = false;
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+void drm_connector_unregister_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_connector_unregister(connector);
+}
+
+int drm_connector_register_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	int ret;
+
+	/* FIXME: taking the mode config mutex ends up in a clash with
+	 * fbcon/backlight registration */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		ret = drm_connector_register(connector);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_connector_unregister_all(dev);
+	return ret;
+}
+
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+	if (status == connector_status_connected)
+		return "connected";
+	else if (status == connector_status_disconnected)
+		return "disconnected";
+	else
+		return "unknown";
+}
+EXPORT_SYMBOL(drm_get_connector_status_name);
+
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
+	{ SubPixelUnknown, "Unknown" },
+	{ SubPixelHorizontalRGB, "Horizontal RGB" },
+	{ SubPixelHorizontalBGR, "Horizontal BGR" },
+	{ SubPixelVerticalRGB, "Vertical RGB" },
+	{ SubPixelVerticalBGR, "Vertical BGR" },
+	{ SubPixelNone, "None" },
+};
+
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error.  No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+	return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+	{ DRM_MODE_DPMS_ON, "On" },
+	{ DRM_MODE_DPMS_STANDBY, "Standby" },
+	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
+	{ DRM_MODE_DPMS_OFF, "Off" }
+};
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/**
+ * drm_display_info_set_bus_formats - set the supported bus formats
+ * @info: display info to store bus formats in
+ * @formats: array containing the supported bus formats
+ * @num_formats: the number of entries in the fmts array
+ *
+ * Store the supported bus formats in display info structure.
+ * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
+ * a full list of available formats.
+ */
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+				     const u32 *formats,
+				     unsigned int num_formats)
+{
+	u32 *fmts = NULL;
+
+	if (!formats && num_formats)
+		return -EINVAL;
+
+	if (formats && num_formats) {
+		fmts = kmemdup(formats, sizeof(*formats) * num_formats,
+			       GFP_KERNEL);
+		if (!fmts)
+			return -ENOMEM;
+	}
+
+	kfree(info->bus_formats);
+	info->bus_formats = fmts;
+	info->num_bus_formats = num_formats;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_display_info_set_bus_formats);
+
+/* Optional connector properties. */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
+	{ DRM_MODE_SCALE_NONE, "None" },
+	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
+	{ DRM_MODE_SCALE_CENTER, "Center" },
+	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+	{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+	{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+	{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+		 drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+		 drm_tv_subconnector_enum_list)
+
+int drm_connector_create_standard_properties(struct drm_device *dev)
+{
+	struct drm_property *prop;
+
+	prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "EDID", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.edid_property = prop;
+
+	prop = drm_property_create_enum(dev, 0,
+				   "DPMS", drm_dpms_enum_list,
+				   ARRAY_SIZE(drm_dpms_enum_list));
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.dpms_property = prop;
+
+	prop = drm_property_create(dev,
+				   DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "PATH", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.path_property = prop;
+
+	prop = drm_property_create(dev,
+				   DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "TILE", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.tile_property = prop;
+
+	return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+	struct drm_property *dvi_i_selector;
+	struct drm_property *dvi_i_subconnector;
+
+	if (dev->mode_config.dvi_i_select_subconnector_property)
+		return 0;
+
+	dvi_i_selector =
+		drm_property_create_enum(dev, 0,
+				    "select subconnector",
+				    drm_dvi_i_select_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
+	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_dvi_i_subconnector_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+				  unsigned int num_modes,
+				  const char * const modes[])
+{
+	struct drm_property *tv_selector;
+	struct drm_property *tv_subconnector;
+	unsigned int i;
+
+	if (dev->mode_config.tv_select_subconnector_property)
+		return 0;
+
+	/*
+	 * Basic connector properties
+	 */
+	tv_selector = drm_property_create_enum(dev, 0,
+					  "select subconnector",
+					  drm_tv_select_enum_list,
+					  ARRAY_SIZE(drm_tv_select_enum_list));
+	if (!tv_selector)
+		goto nomem;
+
+	dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+	tv_subconnector =
+		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_tv_subconnector_enum_list,
+				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
+	if (!tv_subconnector)
+		goto nomem;
+	dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+	/*
+	 * Other, TV specific properties: margins & TV modes.
+	 */
+	dev->mode_config.tv_left_margin_property =
+		drm_property_create_range(dev, 0, "left margin", 0, 100);
+	if (!dev->mode_config.tv_left_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_right_margin_property =
+		drm_property_create_range(dev, 0, "right margin", 0, 100);
+	if (!dev->mode_config.tv_right_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_top_margin_property =
+		drm_property_create_range(dev, 0, "top margin", 0, 100);
+	if (!dev->mode_config.tv_top_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_bottom_margin_property =
+		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+	if (!dev->mode_config.tv_bottom_margin_property)
+		goto nomem;
+
+	dev->mode_config.tv_mode_property =
+		drm_property_create(dev, DRM_MODE_PROP_ENUM,
+				    "mode", num_modes);
+	if (!dev->mode_config.tv_mode_property)
+		goto nomem;
+
+	for (i = 0; i < num_modes; i++)
+		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+				      i, modes[i]);
+
+	dev->mode_config.tv_brightness_property =
+		drm_property_create_range(dev, 0, "brightness", 0, 100);
+	if (!dev->mode_config.tv_brightness_property)
+		goto nomem;
+
+	dev->mode_config.tv_contrast_property =
+		drm_property_create_range(dev, 0, "contrast", 0, 100);
+	if (!dev->mode_config.tv_contrast_property)
+		goto nomem;
+
+	dev->mode_config.tv_flicker_reduction_property =
+		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+	if (!dev->mode_config.tv_flicker_reduction_property)
+		goto nomem;
+
+	dev->mode_config.tv_overscan_property =
+		drm_property_create_range(dev, 0, "overscan", 0, 100);
+	if (!dev->mode_config.tv_overscan_property)
+		goto nomem;
+
+	dev->mode_config.tv_saturation_property =
+		drm_property_create_range(dev, 0, "saturation", 0, 100);
+	if (!dev->mode_config.tv_saturation_property)
+		goto nomem;
+
+	dev->mode_config.tv_hue_property =
+		drm_property_create_range(dev, 0, "hue", 0, 100);
+	if (!dev->mode_config.tv_hue_property)
+		goto nomem;
+
+	return 0;
+nomem:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+	struct drm_property *scaling_mode;
+
+	if (dev->mode_config.scaling_mode_property)
+		return 0;
+
+	scaling_mode =
+		drm_property_create_enum(dev, 0, "scaling mode",
+				drm_scaling_mode_enum_list,
+				    ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+	dev->mode_config.scaling_mode_property = scaling_mode;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+	if (dev->mode_config.aspect_ratio_property)
+		return 0;
+
+	dev->mode_config.aspect_ratio_property =
+		drm_property_create_enum(dev, 0, "aspect ratio",
+				drm_aspect_ratio_enum_list,
+				ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+	if (dev->mode_config.aspect_ratio_property == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+	if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+		return 0;
+
+	dev->mode_config.suggested_x_property =
+		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+	dev->mode_config.suggested_y_property =
+		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+	if (dev->mode_config.suggested_x_property == NULL ||
+	    dev->mode_config.suggested_y_property == NULL)
+		return -ENOMEM;
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property; must not be NULL.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+					 const char *path)
+{
+	struct drm_device *dev = connector->dev;
+	int ret;
+
+	ret = drm_property_replace_global_blob(dev,
+	                                       &connector->path_blob_ptr,
+	                                       strlen(path) + 1,
+	                                       path,
+	                                       &connector->base,
+	                                       dev->mode_config.path_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
+/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	char tile[256];
+	int ret;
+
+	if (!connector->has_tile) {
+		ret  = drm_property_replace_global_blob(dev,
+		                                        &connector->tile_blob_ptr,
+		                                        0,
+		                                        NULL,
+		                                        &connector->base,
+		                                        dev->mode_config.tile_property);
+		return ret;
+	}
+
+	snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+		 connector->tile_group->id, connector->tile_is_single_monitor,
+		 connector->num_h_tile, connector->num_v_tile,
+		 connector->tile_h_loc, connector->tile_v_loc,
+		 connector->tile_h_size, connector->tile_v_size);
+
+	ret = drm_property_replace_global_blob(dev,
+	                                       &connector->tile_blob_ptr,
+	                                       strlen(tile) + 1,
+	                                       tile,
+	                                       &connector->base,
+	                                       dev->mode_config.tile_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+					    const struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	size_t size = 0;
+	int ret;
+
+	/* ignore requests to set edid when overridden */
+	if (connector->override_edid)
+		return 0;
+
+	if (edid)
+		size = EDID_LENGTH * (1 + edid->extensions);
+
+	ret = drm_property_replace_global_blob(dev,
+					       &connector->edid_blob_ptr,
+	                                       size,
+	                                       edid,
+	                                       &connector->base,
+	                                       dev->mode_config.edid_property);
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+				    struct drm_property *property,
+				    uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_connector *connector = obj_to_connector(obj);
+
+	/* Do DPMS ourselves */
+	if (property == connector->dev->mode_config.dpms_property) {
+		ret = (*connector->funcs->dpms)(connector, (int)value);
+	} else if (connector->funcs->set_property)
+		ret = connector->funcs->set_property(connector, property, value);
+
+	/* store the property value if successful */
+	if (!ret)
+		drm_object_property_set_value(&connector->base, property, value);
+	return ret;
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+				       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_connector_set_property *conn_set_prop = data;
+	struct drm_mode_obj_set_property obj_set_prop = {
+		.value = conn_set_prop->value,
+		.prop_id = conn_set_prop->prop_id,
+		.obj_id = conn_set_prop->connector_id,
+		.obj_type = DRM_MODE_OBJECT_CONNECTOR
+	};
+
+	/* It does all the locking and checking we need */
+	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+	/* For atomic drivers only state objects are synchronously updated and
+	 * protected by modeset locks, so check those first. */
+	if (connector->state)
+		return connector->state->best_encoder;
+	return connector->encoder;
+}
+
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+					 const struct drm_file *file_priv)
+{
+	/*
+	 * If user-space hasn't configured the driver to expose the stereo 3D
+	 * modes, don't expose them.
+	 */
+	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+		return false;
+
+	return true;
+}
+
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_mode_get_connector *out_resp = data;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *mode;
+	int mode_count = 0;
+	int encoders_count = 0;
+	int ret = 0;
+	int copied = 0;
+	int i;
+	struct drm_mode_modeinfo u_mode;
+	struct drm_mode_modeinfo __user *mode_ptr;
+	uint32_t __user *encoder_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	connector = drm_connector_lookup(dev, out_resp->connector_id);
+	if (!connector) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+		if (connector->encoder_ids[i] != 0)
+			encoders_count++;
+
+	if (out_resp->count_modes == 0) {
+		connector->funcs->fill_modes(connector,
+					     dev->mode_config.max_width,
+					     dev->mode_config.max_height);
+	}
+
+	/* delayed so we get modes regardless of pre-fill_modes state */
+	list_for_each_entry(mode, &connector->modes, head)
+		if (drm_mode_expose_to_userspace(mode, file_priv))
+			mode_count++;
+
+	out_resp->connector_id = connector->base.id;
+	out_resp->connector_type = connector->connector_type;
+	out_resp->connector_type_id = connector->connector_type_id;
+	out_resp->mm_width = connector->display_info.width_mm;
+	out_resp->mm_height = connector->display_info.height_mm;
+	out_resp->subpixel = connector->display_info.subpixel_order;
+	out_resp->connection = connector->status;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	encoder = drm_connector_get_encoder(connector);
+	if (encoder)
+		out_resp->encoder_id = encoder->base.id;
+	else
+		out_resp->encoder_id = 0;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if ((out_resp->count_modes >= mode_count) && mode_count) {
+		copied = 0;
+		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+		list_for_each_entry(mode, &connector->modes, head) {
+			if (!drm_mode_expose_to_userspace(mode, file_priv))
+				continue;
+
+			drm_mode_convert_to_umode(&u_mode, mode);
+			if (copy_to_user(mode_ptr + copied,
+					 &u_mode, sizeof(u_mode))) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	out_resp->count_modes = mode_count;
+
+	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+			&out_resp->count_props);
+	if (ret)
+		goto out;
+
+	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+		copied = 0;
+		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] != 0) {
+				if (put_user(connector->encoder_ids[i],
+					     encoder_ptr + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		}
+	}
+	out_resp->count_encoders = encoders_count;
+
+out:
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	drm_connector_unreference(connector);
+out_unlock:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 192a5f9..3c4000f 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -54,7 +54,7 @@
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	mutex_lock(&dev->struct_mutex);
@@ -92,7 +92,7 @@
 void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	idr_init(&dev->ctx_idr);
@@ -109,7 +109,7 @@
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	mutex_lock(&dev->struct_mutex);
@@ -131,7 +131,7 @@
 	struct drm_ctx_list *pos, *tmp;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
 
 	mutex_lock(&dev->ctxlist_mutex);
@@ -177,7 +177,7 @@
 	struct drm_map_list *_entry;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
@@ -225,7 +225,7 @@
 	struct drm_map_list *r_list = NULL;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
@@ -329,7 +329,7 @@
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
@@ -363,7 +363,7 @@
 	struct drm_ctx *ctx = data;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	ctx->handle = drm_legacy_ctxbitmap_next(dev);
@@ -410,7 +410,7 @@
 	struct drm_ctx *ctx = data;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	/* This is 0, because we don't handle any context flags */
@@ -436,7 +436,7 @@
 	struct drm_ctx *ctx = data;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	DRM_DEBUG("%d\n", ctx->handle);
@@ -460,7 +460,7 @@
 	struct drm_ctx *ctx = data;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	DRM_DEBUG("%d\n", ctx->handle);
@@ -486,7 +486,7 @@
 	struct drm_ctx *ctx = data;
 
 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
-	    drm_core_check_feature(dev, DRIVER_MODESET))
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	DRM_DEBUG("%d\n", ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ddebe54..2d7bedf 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,39 +40,14 @@
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
 
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-			    const struct drm_mode_fb_cmd2 *r,
-			    struct drm_file *file_priv);
-
-/* Avoid boilerplate.  I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list)				\
-	const char *fnname(int val)				\
-	{							\
-		int i;						\
-		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
-			if (list[i].type == val)		\
-				return list[i].name;		\
-		}						\
-		return "(unknown)";				\
-	}
-
 /*
  * Global properties
  */
-static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
-	{ DRM_MODE_DPMS_ON, "On" },
-	{ DRM_MODE_DPMS_STANDBY, "Standby" },
-	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
-	{ DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
 static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
 	{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
 	{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
@@ -82,320 +57,6 @@
 /*
  * Optional properties
  */
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
-	{ DRM_MODE_SCALE_NONE, "None" },
-	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
-	{ DRM_MODE_SCALE_CENTER, "Center" },
-	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
-	{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
-	{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
-	{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
-	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
-		 drm_dvi_i_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
-	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
-	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
-		 drm_tv_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
-	{ DRM_MODE_DIRTY_OFF,      "Off"      },
-	{ DRM_MODE_DIRTY_ON,       "On"       },
-	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-struct drm_conn_prop_enum_list {
-	int type;
-	const char *name;
-	struct ida ida;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
-	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
-	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
-	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
-	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
-	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
-	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
-	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
-	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
-	{ DRM_MODE_CONNECTOR_Component, "Component" },
-	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
-	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
-	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
-	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
-	{ DRM_MODE_CONNECTOR_TV, "TV" },
-	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
-	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
-	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
-	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
-	{ DRM_MODE_ENCODER_NONE, "None" },
-	{ DRM_MODE_ENCODER_DAC, "DAC" },
-	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
-	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
-	{ DRM_MODE_ENCODER_TVDAC, "TV" },
-	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
-	{ DRM_MODE_ENCODER_DSI, "DSI" },
-	{ DRM_MODE_ENCODER_DPMST, "DP MST" },
-	{ DRM_MODE_ENCODER_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
-	{ SubPixelUnknown, "Unknown" },
-	{ SubPixelHorizontalRGB, "Horizontal RGB" },
-	{ SubPixelHorizontalBGR, "Horizontal BGR" },
-	{ SubPixelVerticalRGB, "Vertical RGB" },
-	{ SubPixelVerticalBGR, "Vertical BGR" },
-	{ SubPixelNone, "None" },
-};
-
-void drm_connector_ida_init(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-		ida_init(&drm_connector_enum_list[i].ida);
-}
-
-void drm_connector_ida_destroy(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
-		ida_destroy(&drm_connector_enum_list[i].ida);
-}
-
-/**
- * drm_get_connector_status_name - return a string for connector status
- * @status: connector status to compute name of
- *
- * In contrast to the other drm_get_*_name functions this one here returns a
- * const pointer and hence is threadsafe.
- */
-const char *drm_get_connector_status_name(enum drm_connector_status status)
-{
-	if (status == connector_status_connected)
-		return "connected";
-	else if (status == connector_status_disconnected)
-		return "disconnected";
-	else
-		return "unknown";
-}
-EXPORT_SYMBOL(drm_get_connector_status_name);
-
-/**
- * drm_get_subpixel_order_name - return a string for a given subpixel enum
- * @order: enum of subpixel_order
- *
- * Note you could abuse this and return something out of bounds, but that
- * would be a caller error.  No unscrubbed user data should make it here.
- */
-const char *drm_get_subpixel_order_name(enum subpixel_order order)
-{
-	return drm_subpixel_enum_list[order].name;
-}
-EXPORT_SYMBOL(drm_get_subpixel_order_name);
-
-/*
- * Internal function to assign a slot in the object idr and optionally
- * register the object into the idr.
- */
-static int drm_mode_object_get_reg(struct drm_device *dev,
-				   struct drm_mode_object *obj,
-				   uint32_t obj_type,
-				   bool register_obj,
-				   void (*obj_free_cb)(struct kref *kref))
-{
-	int ret;
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
-	if (ret >= 0) {
-		/*
-		 * Set up the object linking under the protection of the idr
-		 * lock so that other users can't see inconsistent state.
-		 */
-		obj->id = ret;
-		obj->type = obj_type;
-		if (obj_free_cb) {
-			obj->free_cb = obj_free_cb;
-			kref_init(&obj->refcount);
-		}
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return ret < 0 ? ret : 0;
-}
-
-/**
- * drm_mode_object_get - allocate a new modeset identifier
- * @dev: DRM device
- * @obj: object pointer, used to generate unique ID
- * @obj_type: object type
- *
- * Create a unique identifier based on @ptr in @dev's identifier space.  Used
- * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
- * modeset identifiers are _not_ reference counted. Hence don't use this for
- * reference counted modeset objects like framebuffers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_object_get(struct drm_device *dev,
-			struct drm_mode_object *obj, uint32_t obj_type)
-{
-	return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
-}
-
-static void drm_mode_object_register(struct drm_device *dev,
-				     struct drm_mode_object *obj)
-{
-	mutex_lock(&dev->mode_config.idr_mutex);
-	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
-	mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-/**
- * drm_mode_object_unregister - free a modeset identifer
- * @dev: DRM device
- * @object: object to free
- *
- * Free @id from @dev's unique identifier pool.
- * This function can be called multiple times, and guards against
- * multiple removals.
- * These modeset identifiers are _not_ reference counted. Hence don't use this
- * for reference counted modeset objects like framebuffers.
- */
-void drm_mode_object_unregister(struct drm_device *dev,
-			 struct drm_mode_object *object)
-{
-	mutex_lock(&dev->mode_config.idr_mutex);
-	if (object->id) {
-		idr_remove(&dev->mode_config.crtc_idr, object->id);
-		object->id = 0;
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-static struct drm_mode_object *_object_find(struct drm_device *dev,
-		uint32_t id, uint32_t type)
-{
-	struct drm_mode_object *obj = NULL;
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	obj = idr_find(&dev->mode_config.crtc_idr, id);
-	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
-		obj = NULL;
-	if (obj && obj->id != id)
-		obj = NULL;
-
-	if (obj && obj->free_cb) {
-		if (!kref_get_unless_zero(&obj->refcount))
-			obj = NULL;
-	}
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return obj;
-}
-
-/**
- * drm_mode_object_find - look up a drm object with static lifetime
- * @dev: drm device
- * @id: id of the mode object
- * @type: type of the mode object
- *
- * This function is used to look up a modeset object. It will acquire a
- * reference for reference counted objects. This reference must be dropped again
- * by callind drm_mode_object_unreference().
- */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-		uint32_t id, uint32_t type)
-{
-	struct drm_mode_object *obj = NULL;
-
-	obj = _object_find(dev, id, type);
-	return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_mode_object_unreference - decr the object refcnt
- * @obj: mode_object
- *
- * This functions decrements the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. This is used to drop references
- * acquired with drm_mode_object_reference().
- */
-void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
-	if (obj->free_cb) {
-		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-		kref_put(&obj->refcount, obj->free_cb);
-	}
-}
-EXPORT_SYMBOL(drm_mode_object_unreference);
-
-/**
- * drm_mode_object_reference - incr the object refcnt
- * @obj: mode_object
- *
- * This functions increments the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. References should be dropped again
- * by calling drm_mode_object_unreference().
- */
-void drm_mode_object_reference(struct drm_mode_object *obj)
-{
-	if (obj->free_cb) {
-		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
-		kref_get(&obj->refcount);
-	}
-}
-EXPORT_SYMBOL(drm_mode_object_reference);
-
 /**
  * drm_crtc_force_disable - Forcibly turn off a CRTC
  * @crtc: CRTC to turn off
@@ -441,199 +102,6 @@
 }
 EXPORT_SYMBOL(drm_crtc_force_disable_all);
 
-static void drm_framebuffer_free(struct kref *kref)
-{
-	struct drm_framebuffer *fb =
-			container_of(kref, struct drm_framebuffer, base.refcount);
-	struct drm_device *dev = fb->dev;
-
-	/*
-	 * The lookup idr holds a weak reference, which has not necessarily been
-	 * removed at this point. Check for that.
-	 */
-	drm_mode_object_unregister(dev, &fb->base);
-
-	fb->funcs->destroy(fb);
-}
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- * @fb: framebuffer to be initialized
- * @funcs: ... with these functions
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * IMPORTANT:
- * This functions publishes the fb and makes it available for concurrent access
- * by other users. Which means by this point the fb _must_ be fully set up -
- * since all the fb attributes are invariant over its lifetime, no further
- * locking but only correct reference counting is required.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
-			 const struct drm_framebuffer_funcs *funcs)
-{
-	int ret;
-
-	INIT_LIST_HEAD(&fb->filp_head);
-	fb->dev = dev;
-	fb->funcs = funcs;
-
-	ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
-				      false, drm_framebuffer_free);
-	if (ret)
-		goto out;
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	dev->mode_config.num_fb++;
-	list_add(&fb->head, &dev->mode_config.fb_list);
-	mutex_unlock(&dev->mode_config.fb_lock);
-
-	drm_mode_object_register(dev, &fb->base);
-out:
-	return ret;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
- * @dev: drm device
- * @id: id of the fb object
- *
- * If successful, this grabs an additional reference to the framebuffer -
- * callers need to make sure to eventually unreference the returned framebuffer
- * again, using @drm_framebuffer_unreference.
- */
-struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-					       uint32_t id)
-{
-	struct drm_mode_object *obj;
-	struct drm_framebuffer *fb = NULL;
-
-	obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
-	if (obj)
-		fb = obj_to_fb(obj);
-	return fb;
-}
-EXPORT_SYMBOL(drm_framebuffer_lookup);
-
-/**
- * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
- * @fb: fb to unregister
- *
- * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
- * those used for fbdev. Note that the caller must hold a reference of it's own,
- * i.e. the object may not be destroyed through this call (since it'll lead to a
- * locking inversion).
- */
-void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	/* Mark fb as reaped and drop idr ref. */
-	drm_mode_object_unregister(dev, &fb->base);
-}
-EXPORT_SYMBOL(drm_framebuffer_unregister_private);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * Cleanup framebuffer. This function is intended to be used from the drivers
- * ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
- *
- * Note that this function does not remove the fb from active usuage - if it is
- * still used anywhere, hilarity can ensue since userspace could call getfb on
- * the id and get back -EINVAL. Obviously no concern at driver unload time.
- *
- * Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
- * driver-private objects (e.g. for fbdev) drivers need to explicitly call
- * drm_framebuffer_unregister_private.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev = fb->dev;
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	list_del(&fb->head);
-	dev->mode_config.num_fb--;
-	mutex_unlock(&dev->mode_config.fb_lock);
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- *
- * Scans all the CRTCs and planes in @dev's mode_config.  If they're
- * using @fb, removes it, setting it to NULL. Then drops the reference to the
- * passed-in framebuffer. Might take the modeset locks.
- *
- * Note that this function optimizes the cleanup away if the caller holds the
- * last reference to the framebuffer. It is also guaranteed to not take the
- * modeset locks in this case.
- */
-void drm_framebuffer_remove(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-	struct drm_crtc *crtc;
-	struct drm_plane *plane;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	WARN_ON(!list_empty(&fb->filp_head));
-
-	/*
-	 * drm ABI mandates that we remove any deleted framebuffers from active
-	 * useage. But since most sane clients only remove framebuffers they no
-	 * longer need, try to optimize this away.
-	 *
-	 * Since we're holding a reference ourselves, observing a refcount of 1
-	 * means that we're the last holder and can skip it. Also, the refcount
-	 * can never increase from 1 again, so we don't need any barriers or
-	 * locks.
-	 *
-	 * Note that userspace could try to race with use and instate a new
-	 * usage _after_ we've cleared all current ones. End result will be an
-	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
-	 * in this manner.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		drm_modeset_lock_all(dev);
-		/* remove from any CRTC */
-		drm_for_each_crtc(crtc, dev) {
-			if (crtc->primary->fb == fb) {
-				/* should turn off the crtc */
-				if (drm_crtc_force_disable(crtc))
-					DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
-			}
-		}
-
-		drm_for_each_plane(plane, dev) {
-			if (plane->fb == fb)
-				drm_plane_force_disable(plane);
-		}
-		drm_modeset_unlock_all(dev);
-	}
-
-	drm_framebuffer_unreference(fb);
-}
-EXPORT_SYMBOL(drm_framebuffer_remove);
-
 DEFINE_WW_CLASS(crtc_ww_class);
 
 static unsigned int drm_num_crtcs(struct drm_device *dev)
@@ -683,7 +151,11 @@
  * @funcs: callbacks for the new CRTC
  * @name: printf style format string for the CRTC name, or NULL for default name
  *
- * Inits a new object created as base part of a driver crtc object.
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
  *
  * Returns:
  * Zero on success, error code on failure.
@@ -783,720 +255,6 @@
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
-/*
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-static void drm_mode_remove(struct drm_connector *connector,
-			    struct drm_display_mode *mode)
-{
-	list_del(&mode->head);
-	drm_mode_destroy(connector->dev, mode);
-}
-
-/**
- * drm_display_info_set_bus_formats - set the supported bus formats
- * @info: display info to store bus formats in
- * @formats: array containing the supported bus formats
- * @num_formats: the number of entries in the fmts array
- *
- * Store the supported bus formats in display info structure.
- * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
- * a full list of available formats.
- */
-int drm_display_info_set_bus_formats(struct drm_display_info *info,
-				     const u32 *formats,
-				     unsigned int num_formats)
-{
-	u32 *fmts = NULL;
-
-	if (!formats && num_formats)
-		return -EINVAL;
-
-	if (formats && num_formats) {
-		fmts = kmemdup(formats, sizeof(*formats) * num_formats,
-			       GFP_KERNEL);
-		if (!fmts)
-			return -ENOMEM;
-	}
-
-	kfree(info->bus_formats);
-	info->bus_formats = fmts;
-	info->num_bus_formats = num_formats;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_display_info_set_bus_formats);
-
-/**
- * drm_connector_get_cmdline_mode - reads the user's cmdline mode
- * @connector: connector to quwery
- *
- * The kernel supports per-connector configration of its consoles through
- * use of the video= parameter. This function parses that option and
- * extracts the user's specified mode (or enable/disable status) for a
- * particular connector. This is typically only used during the early fbdev
- * setup.
- */
-static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
-{
-	struct drm_cmdline_mode *mode = &connector->cmdline_mode;
-	char *option = NULL;
-
-	if (fb_get_options(connector->name, &option))
-		return;
-
-	if (!drm_mode_parse_command_line_for_connector(option,
-						       connector,
-						       mode))
-		return;
-
-	if (mode->force) {
-		const char *s;
-
-		switch (mode->force) {
-		case DRM_FORCE_OFF:
-			s = "OFF";
-			break;
-		case DRM_FORCE_ON_DIGITAL:
-			s = "ON - dig";
-			break;
-		default:
-		case DRM_FORCE_ON:
-			s = "ON";
-			break;
-		}
-
-		DRM_INFO("forcing %s connector %s\n", connector->name, s);
-		connector->force = mode->force;
-	}
-
-	DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
-		      connector->name,
-		      mode->xres, mode->yres,
-		      mode->refresh_specified ? mode->refresh : 60,
-		      mode->rb ? " reduced blanking" : "",
-		      mode->margins ? " with margins" : "",
-		      mode->interlace ?  " interlaced" : "");
-}
-
-static void drm_connector_free(struct kref *kref)
-{
-	struct drm_connector *connector =
-		container_of(kref, struct drm_connector, base.refcount);
-	struct drm_device *dev = connector->dev;
-
-	drm_mode_object_unregister(dev, &connector->base);
-	connector->funcs->destroy(connector);
-}
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
-		       struct drm_connector *connector,
-		       const struct drm_connector_funcs *funcs,
-		       int connector_type)
-{
-	struct drm_mode_config *config = &dev->mode_config;
-	int ret;
-	struct ida *connector_ida =
-		&drm_connector_enum_list[connector_type].ida;
-
-	drm_modeset_lock_all(dev);
-
-	ret = drm_mode_object_get_reg(dev, &connector->base,
-				      DRM_MODE_OBJECT_CONNECTOR,
-				      false, drm_connector_free);
-	if (ret)
-		goto out_unlock;
-
-	connector->base.properties = &connector->properties;
-	connector->dev = dev;
-	connector->funcs = funcs;
-
-	ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
-	if (ret < 0)
-		goto out_put;
-	connector->index = ret;
-	ret = 0;
-
-	connector->connector_type = connector_type;
-	connector->connector_type_id =
-		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
-	if (connector->connector_type_id < 0) {
-		ret = connector->connector_type_id;
-		goto out_put_id;
-	}
-	connector->name =
-		kasprintf(GFP_KERNEL, "%s-%d",
-			  drm_connector_enum_list[connector_type].name,
-			  connector->connector_type_id);
-	if (!connector->name) {
-		ret = -ENOMEM;
-		goto out_put_type_id;
-	}
-
-	INIT_LIST_HEAD(&connector->probed_modes);
-	INIT_LIST_HEAD(&connector->modes);
-	connector->edid_blob_ptr = NULL;
-	connector->status = connector_status_unknown;
-
-	drm_connector_get_cmdline_mode(connector);
-
-	/* We should add connectors at the end to avoid upsetting the connector
-	 * index too much. */
-	list_add_tail(&connector->head, &config->connector_list);
-	config->num_connector++;
-
-	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-		drm_object_attach_property(&connector->base,
-					      config->edid_property,
-					      0);
-
-	drm_object_attach_property(&connector->base,
-				      config->dpms_property, 0);
-
-	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-		drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
-	}
-
-	connector->debugfs_entry = NULL;
-out_put_type_id:
-	if (ret)
-		ida_remove(connector_ida, connector->connector_type_id);
-out_put_id:
-	if (ret)
-		ida_remove(&config->connector_ida, connector->index);
-out_put:
-	if (ret)
-		drm_mode_object_unregister(dev, &connector->base);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct drm_display_mode *mode, *t;
-
-	/* The connector should have been removed from userspace long before
-	 * it is finally destroyed.
-	 */
-	if (WARN_ON(connector->registered))
-		drm_connector_unregister(connector);
-
-	if (connector->tile_group) {
-		drm_mode_put_tile_group(dev, connector->tile_group);
-		connector->tile_group = NULL;
-	}
-
-	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
-		drm_mode_remove(connector, mode);
-
-	list_for_each_entry_safe(mode, t, &connector->modes, head)
-		drm_mode_remove(connector, mode);
-
-	ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
-		   connector->connector_type_id);
-
-	ida_remove(&dev->mode_config.connector_ida,
-		   connector->index);
-
-	kfree(connector->display_info.bus_formats);
-	drm_mode_object_unregister(dev, &connector->base);
-	kfree(connector->name);
-	connector->name = NULL;
-	list_del(&connector->head);
-	dev->mode_config.num_connector--;
-
-	WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
-	if (connector->state && connector->funcs->atomic_destroy_state)
-		connector->funcs->atomic_destroy_state(connector,
-						       connector->state);
-
-	memset(connector, 0, sizeof(*connector));
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-/**
- * drm_connector_register - register a connector
- * @connector: the connector to register
- *
- * Register userspace interfaces for a connector
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register(struct drm_connector *connector)
-{
-	int ret;
-
-	if (connector->registered)
-		return 0;
-
-	ret = drm_sysfs_connector_add(connector);
-	if (ret)
-		return ret;
-
-	ret = drm_debugfs_connector_add(connector);
-	if (ret) {
-		goto err_sysfs;
-	}
-
-	if (connector->funcs->late_register) {
-		ret = connector->funcs->late_register(connector);
-		if (ret)
-			goto err_debugfs;
-	}
-
-	drm_mode_object_register(connector->dev, &connector->base);
-
-	connector->registered = true;
-	return 0;
-
-err_debugfs:
-	drm_debugfs_connector_remove(connector);
-err_sysfs:
-	drm_sysfs_connector_remove(connector);
-	return ret;
-}
-EXPORT_SYMBOL(drm_connector_register);
-
-/**
- * drm_connector_unregister - unregister a connector
- * @connector: the connector to unregister
- *
- * Unregister userspace interfaces for a connector
- */
-void drm_connector_unregister(struct drm_connector *connector)
-{
-	if (!connector->registered)
-		return;
-
-	if (connector->funcs->early_unregister)
-		connector->funcs->early_unregister(connector);
-
-	drm_sysfs_connector_remove(connector);
-	drm_debugfs_connector_remove(connector);
-
-	connector->registered = false;
-}
-EXPORT_SYMBOL(drm_connector_unregister);
-
-static void drm_connector_unregister_all(struct drm_device *dev)
-{
-	struct drm_connector *connector;
-
-	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		drm_connector_unregister(connector);
-}
-
-static int drm_connector_register_all(struct drm_device *dev)
-{
-	struct drm_connector *connector;
-	int ret;
-
-	/* FIXME: taking the mode config mutex ends up in a clash with
-	 * fbcon/backlight registration */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		ret = drm_connector_register(connector);
-		if (ret)
-			goto err;
-	}
-
-	return 0;
-
-err:
-	mutex_unlock(&dev->mode_config.mutex);
-	drm_connector_unregister_all(dev);
-	return ret;
-}
-
-static int drm_encoder_register_all(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-	int ret = 0;
-
-	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->late_register)
-			ret = encoder->funcs->late_register(encoder);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void drm_encoder_unregister_all(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	drm_for_each_encoder(encoder, dev) {
-		if (encoder->funcs->early_unregister)
-			encoder->funcs->early_unregister(encoder);
-	}
-}
-
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be
- * subclassed as part of driver encoder objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
-		      struct drm_encoder *encoder,
-		      const struct drm_encoder_funcs *funcs,
-		      int encoder_type, const char *name, ...)
-{
-	int ret;
-
-	drm_modeset_lock_all(dev);
-
-	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
-	if (ret)
-		goto out_unlock;
-
-	encoder->dev = dev;
-	encoder->encoder_type = encoder_type;
-	encoder->funcs = funcs;
-	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
-		encoder->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
-	} else {
-		encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
-					  drm_encoder_enum_list[encoder_type].name,
-					  encoder->base.id);
-	}
-	if (!encoder->name) {
-		ret = -ENOMEM;
-		goto out_put;
-	}
-
-	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
-	encoder->index = dev->mode_config.num_encoder++;
-
-out_put:
-	if (ret)
-		drm_mode_object_unregister(dev, &encoder->base);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-/**
- * drm_encoder_cleanup - cleans up an initialised encoder
- * @encoder: encoder to cleanup
- *
- * Cleans up the encoder but doesn't free the object.
- */
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-
-	/* Note that the encoder_list is considered to be static; should we
-	 * remove the drm_encoder at runtime we would have to decrement all
-	 * the indices on the drm_encoder after us in the encoder_list.
-	 */
-
-	drm_modeset_lock_all(dev);
-	drm_mode_object_unregister(dev, &encoder->base);
-	kfree(encoder->name);
-	list_del(&encoder->head);
-	dev->mode_config.num_encoder--;
-	drm_modeset_unlock_all(dev);
-
-	memset(encoder, 0, sizeof(*encoder));
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-static unsigned int drm_num_planes(struct drm_device *dev)
-{
-	unsigned int num = 0;
-	struct drm_plane *tmp;
-
-	drm_for_each_plane(tmp, dev) {
-		num++;
-	}
-
-	return num;
-}
-
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
-			     unsigned long possible_crtcs,
-			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats, unsigned int format_count,
-			     enum drm_plane_type type,
-			     const char *name, ...)
-{
-	struct drm_mode_config *config = &dev->mode_config;
-	int ret;
-
-	ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
-	if (ret)
-		return ret;
-
-	drm_modeset_lock_init(&plane->mutex);
-
-	plane->base.properties = &plane->properties;
-	plane->dev = dev;
-	plane->funcs = funcs;
-	plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
-					    GFP_KERNEL);
-	if (!plane->format_types) {
-		DRM_DEBUG_KMS("out of memory when allocating plane\n");
-		drm_mode_object_unregister(dev, &plane->base);
-		return -ENOMEM;
-	}
-
-	if (name) {
-		va_list ap;
-
-		va_start(ap, name);
-		plane->name = kvasprintf(GFP_KERNEL, name, ap);
-		va_end(ap);
-	} else {
-		plane->name = kasprintf(GFP_KERNEL, "plane-%d",
-					drm_num_planes(dev));
-	}
-	if (!plane->name) {
-		kfree(plane->format_types);
-		drm_mode_object_unregister(dev, &plane->base);
-		return -ENOMEM;
-	}
-
-	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
-	plane->format_count = format_count;
-	plane->possible_crtcs = possible_crtcs;
-	plane->type = type;
-
-	list_add_tail(&plane->head, &config->plane_list);
-	plane->index = config->num_total_plane++;
-	if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-		config->num_overlay_plane++;
-
-	drm_object_attach_property(&plane->base,
-				   config->plane_type_property,
-				   plane->type);
-
-	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-		drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
-		drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
-		drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
-		drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
-		drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
-		drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
-		drm_object_attach_property(&plane->base, config->prop_src_x, 0);
-		drm_object_attach_property(&plane->base, config->prop_src_y, 0);
-		drm_object_attach_property(&plane->base, config->prop_src_w, 0);
-		drm_object_attach_property(&plane->base, config->prop_src_h, 0);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_universal_plane_init);
-
-static int drm_plane_register_all(struct drm_device *dev)
-{
-	struct drm_plane *plane;
-	int ret = 0;
-
-	drm_for_each_plane(plane, dev) {
-		if (plane->funcs->late_register)
-			ret = plane->funcs->late_register(plane);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void drm_plane_unregister_all(struct drm_device *dev)
-{
-	struct drm_plane *plane;
-
-	drm_for_each_plane(plane, dev) {
-		if (plane->funcs->early_unregister)
-			plane->funcs->early_unregister(plane);
-	}
-}
-
-/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
-		   unsigned long possible_crtcs,
-		   const struct drm_plane_funcs *funcs,
-		   const uint32_t *formats, unsigned int format_count,
-		   bool is_primary)
-{
-	enum drm_plane_type type;
-
-	type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-	return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
-					formats, format_count, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
- * drm_plane_cleanup - Clean up the core plane usage
- * @plane: plane to cleanup
- *
- * This function cleans up @plane and removes it from the DRM mode setting
- * core. Note that the function does *not* free the plane structure itself,
- * this is the responsibility of the caller.
- */
-void drm_plane_cleanup(struct drm_plane *plane)
-{
-	struct drm_device *dev = plane->dev;
-
-	drm_modeset_lock_all(dev);
-	kfree(plane->format_types);
-	drm_mode_object_unregister(dev, &plane->base);
-
-	BUG_ON(list_empty(&plane->head));
-
-	/* Note that the plane_list is considered to be static; should we
-	 * remove the drm_plane at runtime we would have to decrement all
-	 * the indices on the drm_plane after us in the plane_list.
-	 */
-
-	list_del(&plane->head);
-	dev->mode_config.num_total_plane--;
-	if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-		dev->mode_config.num_overlay_plane--;
-	drm_modeset_unlock_all(dev);
-
-	WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
-	if (plane->state && plane->funcs->atomic_destroy_state)
-		plane->funcs->atomic_destroy_state(plane, plane->state);
-
-	kfree(plane->name);
-
-	memset(plane, 0, sizeof(*plane));
-}
-EXPORT_SYMBOL(drm_plane_cleanup);
-
-/**
- * drm_plane_from_index - find the registered plane at an index
- * @dev: DRM device
- * @idx: index of registered plane to find for
- *
- * Given a plane index, return the registered plane from DRM device's
- * list of planes with matching index.
- */
-struct drm_plane *
-drm_plane_from_index(struct drm_device *dev, int idx)
-{
-	struct drm_plane *plane;
-
-	drm_for_each_plane(plane, dev)
-		if (idx == plane->index)
-			return plane;
-
-	return NULL;
-}
-EXPORT_SYMBOL(drm_plane_from_index);
-
-/**
- * drm_plane_force_disable - Forcibly disable a plane
- * @plane: plane to disable
- *
- * Forces the plane to be disabled.
- *
- * Used when the plane's current framebuffer is destroyed,
- * and when restoring fbdev mode.
- */
-void drm_plane_force_disable(struct drm_plane *plane)
-{
-	int ret;
-
-	if (!plane->fb)
-		return;
-
-	plane->old_fb = plane->fb;
-	ret = plane->funcs->disable_plane(plane);
-	if (ret) {
-		DRM_ERROR("failed to disable plane with busy fb\n");
-		plane->old_fb = NULL;
-		return;
-	}
-	/* disconnect the plane from the fb and crtc: */
-	drm_framebuffer_unreference(plane->old_fb);
-	plane->old_fb = NULL;
-	plane->fb = NULL;
-	plane->crtc = NULL;
-}
-EXPORT_SYMBOL(drm_plane_force_disable);
-
 int drm_modeset_register_all(struct drm_device *dev)
 {
 	int ret;
@@ -1540,39 +298,11 @@
 static int drm_mode_create_standard_properties(struct drm_device *dev)
 {
 	struct drm_property *prop;
+	int ret;
 
-	/*
-	 * Standard properties (apply to all connectors)
-	 */
-	prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "EDID", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.edid_property = prop;
-
-	prop = drm_property_create_enum(dev, 0,
-				   "DPMS", drm_dpms_enum_list,
-				   ARRAY_SIZE(drm_dpms_enum_list));
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.dpms_property = prop;
-
-	prop = drm_property_create(dev,
-				   DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "PATH", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.path_property = prop;
-
-	prop = drm_property_create(dev,
-				   DRM_MODE_PROP_BLOB |
-				   DRM_MODE_PROP_IMMUTABLE,
-				   "TILE", 0);
-	if (!prop)
-		return -ENOMEM;
-	dev->mode_config.tile_property = prop;
+	ret = drm_connector_create_standard_properties(dev);
+	if (ret)
+		return ret;
 
 	prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
 					"type", drm_plane_type_enum_list,
@@ -1693,250 +423,6 @@
 }
 
 /**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
-	struct drm_property *dvi_i_selector;
-	struct drm_property *dvi_i_subconnector;
-
-	if (dev->mode_config.dvi_i_select_subconnector_property)
-		return 0;
-
-	dvi_i_selector =
-		drm_property_create_enum(dev, 0,
-				    "select subconnector",
-				    drm_dvi_i_select_enum_list,
-				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
-	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
-	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "subconnector",
-				    drm_dvi_i_subconnector_enum_list,
-				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
-	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device.  Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev,
-				  unsigned int num_modes,
-				  const char * const modes[])
-{
-	struct drm_property *tv_selector;
-	struct drm_property *tv_subconnector;
-	unsigned int i;
-
-	if (dev->mode_config.tv_select_subconnector_property)
-		return 0;
-
-	/*
-	 * Basic connector properties
-	 */
-	tv_selector = drm_property_create_enum(dev, 0,
-					  "select subconnector",
-					  drm_tv_select_enum_list,
-					  ARRAY_SIZE(drm_tv_select_enum_list));
-	if (!tv_selector)
-		goto nomem;
-
-	dev->mode_config.tv_select_subconnector_property = tv_selector;
-
-	tv_subconnector =
-		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "subconnector",
-				    drm_tv_subconnector_enum_list,
-				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
-	if (!tv_subconnector)
-		goto nomem;
-	dev->mode_config.tv_subconnector_property = tv_subconnector;
-
-	/*
-	 * Other, TV specific properties: margins & TV modes.
-	 */
-	dev->mode_config.tv_left_margin_property =
-		drm_property_create_range(dev, 0, "left margin", 0, 100);
-	if (!dev->mode_config.tv_left_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_right_margin_property =
-		drm_property_create_range(dev, 0, "right margin", 0, 100);
-	if (!dev->mode_config.tv_right_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_top_margin_property =
-		drm_property_create_range(dev, 0, "top margin", 0, 100);
-	if (!dev->mode_config.tv_top_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_bottom_margin_property =
-		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
-	if (!dev->mode_config.tv_bottom_margin_property)
-		goto nomem;
-
-	dev->mode_config.tv_mode_property =
-		drm_property_create(dev, DRM_MODE_PROP_ENUM,
-				    "mode", num_modes);
-	if (!dev->mode_config.tv_mode_property)
-		goto nomem;
-
-	for (i = 0; i < num_modes; i++)
-		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
-				      i, modes[i]);
-
-	dev->mode_config.tv_brightness_property =
-		drm_property_create_range(dev, 0, "brightness", 0, 100);
-	if (!dev->mode_config.tv_brightness_property)
-		goto nomem;
-
-	dev->mode_config.tv_contrast_property =
-		drm_property_create_range(dev, 0, "contrast", 0, 100);
-	if (!dev->mode_config.tv_contrast_property)
-		goto nomem;
-
-	dev->mode_config.tv_flicker_reduction_property =
-		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
-	if (!dev->mode_config.tv_flicker_reduction_property)
-		goto nomem;
-
-	dev->mode_config.tv_overscan_property =
-		drm_property_create_range(dev, 0, "overscan", 0, 100);
-	if (!dev->mode_config.tv_overscan_property)
-		goto nomem;
-
-	dev->mode_config.tv_saturation_property =
-		drm_property_create_range(dev, 0, "saturation", 0, 100);
-	if (!dev->mode_config.tv_saturation_property)
-		goto nomem;
-
-	dev->mode_config.tv_hue_property =
-		drm_property_create_range(dev, 0, "hue", 0, 100);
-	if (!dev->mode_config.tv_hue_property)
-		goto nomem;
-
-	return 0;
-nomem:
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
-	struct drm_property *scaling_mode;
-
-	if (dev->mode_config.scaling_mode_property)
-		return 0;
-
-	scaling_mode =
-		drm_property_create_enum(dev, 0, "scaling mode",
-				drm_scaling_mode_enum_list,
-				    ARRAY_SIZE(drm_scaling_mode_enum_list));
-
-	dev->mode_config.scaling_mode_property = scaling_mode;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_aspect_ratio_property - create aspect ratio property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
-{
-	if (dev->mode_config.aspect_ratio_property)
-		return 0;
-
-	dev->mode_config.aspect_ratio_property =
-		drm_property_create_enum(dev, 0, "aspect ratio",
-				drm_aspect_ratio_enum_list,
-				ARRAY_SIZE(drm_aspect_ratio_enum_list));
-
-	if (dev->mode_config.aspect_ratio_property == NULL)
-		return -ENOMEM;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
-	struct drm_property *dirty_info;
-
-	if (dev->mode_config.dirty_info_property)
-		return 0;
-
-	dirty_info =
-		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
-				    "dirty",
-				    drm_dirty_info_enum_list,
-				    ARRAY_SIZE(drm_dirty_info_enum_list));
-	dev->mode_config.dirty_info_property = dirty_info;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_create_suggested_offset_properties - create suggests offset properties
- * @dev: DRM device
- *
- * Create the the suggested x/y offset property for connectors.
- */
-int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
-{
-	if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
-		return 0;
-
-	dev->mode_config.suggested_x_property =
-		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
-
-	dev->mode_config.suggested_y_property =
-		drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
-
-	if (dev->mode_config.suggested_x_property == NULL ||
-	    dev->mode_config.suggested_y_property == NULL)
-		return -ENOMEM;
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-
-/**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
  * @data: data pointer for the ioctl
@@ -2123,599 +609,6 @@
 	return 0;
 }
 
-static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
-					 const struct drm_file *file_priv)
-{
-	/*
-	 * If user-space hasn't configured the driver to expose the stereo 3D
-	 * modes, don't expose them.
-	 */
-	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
-		return false;
-
-	return true;
-}
-
-static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
-{
-	/* For atomic drivers only state objects are synchronously updated and
-	 * protected by modeset locks, so check those first. */
-	if (connector->state)
-		return connector->state->best_encoder;
-	return connector->encoder;
-}
-
-/* helper for getconnector and getproperties ioctls */
-static int get_properties(struct drm_mode_object *obj, bool atomic,
-		uint32_t __user *prop_ptr, uint64_t __user *prop_values,
-		uint32_t *arg_count_props)
-{
-	int props_count;
-	int i, ret, copied;
-
-	props_count = obj->properties->count;
-	if (!atomic)
-		props_count -= obj->properties->atomic_count;
-
-	if ((*arg_count_props >= props_count) && props_count) {
-		for (i = 0, copied = 0; copied < props_count; i++) {
-			struct drm_property *prop = obj->properties->properties[i];
-			uint64_t val;
-
-			if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
-				continue;
-
-			ret = drm_object_property_get_value(obj, prop, &val);
-			if (ret)
-				return ret;
-
-			if (put_user(prop->base.id, prop_ptr + copied))
-				return -EFAULT;
-
-			if (put_user(val, prop_values + copied))
-				return -EFAULT;
-
-			copied++;
-		}
-	}
-	*arg_count_props = props_count;
-
-	return 0;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
-{
-	struct drm_mode_get_connector *out_resp = data;
-	struct drm_connector *connector;
-	struct drm_encoder *encoder;
-	struct drm_display_mode *mode;
-	int mode_count = 0;
-	int encoders_count = 0;
-	int ret = 0;
-	int copied = 0;
-	int i;
-	struct drm_mode_modeinfo u_mode;
-	struct drm_mode_modeinfo __user *mode_ptr;
-	uint32_t __user *encoder_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
-	mutex_lock(&dev->mode_config.mutex);
-
-	connector = drm_connector_lookup(dev, out_resp->connector_id);
-	if (!connector) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
-
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-		if (connector->encoder_ids[i] != 0)
-			encoders_count++;
-
-	if (out_resp->count_modes == 0) {
-		connector->funcs->fill_modes(connector,
-					     dev->mode_config.max_width,
-					     dev->mode_config.max_height);
-	}
-
-	/* delayed so we get modes regardless of pre-fill_modes state */
-	list_for_each_entry(mode, &connector->modes, head)
-		if (drm_mode_expose_to_userspace(mode, file_priv))
-			mode_count++;
-
-	out_resp->connector_id = connector->base.id;
-	out_resp->connector_type = connector->connector_type;
-	out_resp->connector_type_id = connector->connector_type_id;
-	out_resp->mm_width = connector->display_info.width_mm;
-	out_resp->mm_height = connector->display_info.height_mm;
-	out_resp->subpixel = connector->display_info.subpixel_order;
-	out_resp->connection = connector->status;
-
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	encoder = drm_connector_get_encoder(connector);
-	if (encoder)
-		out_resp->encoder_id = encoder->base.id;
-	else
-		out_resp->encoder_id = 0;
-
-	/*
-	 * This ioctl is called twice, once to determine how much space is
-	 * needed, and the 2nd time to fill it.
-	 */
-	if ((out_resp->count_modes >= mode_count) && mode_count) {
-		copied = 0;
-		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
-		list_for_each_entry(mode, &connector->modes, head) {
-			if (!drm_mode_expose_to_userspace(mode, file_priv))
-				continue;
-
-			drm_mode_convert_to_umode(&u_mode, mode);
-			if (copy_to_user(mode_ptr + copied,
-					 &u_mode, sizeof(u_mode))) {
-				ret = -EFAULT;
-				goto out;
-			}
-			copied++;
-		}
-	}
-	out_resp->count_modes = mode_count;
-
-	ret = get_properties(&connector->base, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-			&out_resp->count_props);
-	if (ret)
-		goto out;
-
-	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
-		copied = 0;
-		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
-		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-			if (connector->encoder_ids[i] != 0) {
-				if (put_user(connector->encoder_ids[i],
-					     encoder_ptr + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-		}
-	}
-	out_resp->count_encoders = encoders_count;
-
-out:
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-	drm_connector_unreference(connector);
-out_unlock:
-	mutex_unlock(&dev->mode_config.mutex);
-
-	return ret;
-}
-
-static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-	struct drm_connector *connector;
-	struct drm_device *dev = encoder->dev;
-	bool uses_atomic = false;
-
-	/* For atomic drivers only state objects are synchronously updated and
-	 * protected by modeset locks, so check those first. */
-	drm_for_each_connector(connector, dev) {
-		if (!connector->state)
-			continue;
-
-		uses_atomic = true;
-
-		if (connector->state->best_encoder != encoder)
-			continue;
-
-		return connector->state->crtc;
-	}
-
-	/* Don't return stale data (e.g. pending async disable). */
-	if (uses_atomic)
-		return NULL;
-
-	return encoder->crtc;
-}
-
-/**
- * drm_mode_getencoder - get encoder configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a encoder configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getencoder(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_mode_get_encoder *enc_resp = data;
-	struct drm_encoder *encoder;
-	struct drm_crtc *crtc;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	encoder = drm_encoder_find(dev, enc_resp->encoder_id);
-	if (!encoder)
-		return -ENOENT;
-
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	crtc = drm_encoder_get_crtc(encoder);
-	if (crtc)
-		enc_resp->crtc_id = crtc->base.id;
-	else
-		enc_resp->crtc_id = 0;
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
-	enc_resp->encoder_type = encoder->encoder_type;
-	enc_resp->encoder_id = encoder->base.id;
-	enc_resp->possible_crtcs = encoder->possible_crtcs;
-	enc_resp->possible_clones = encoder->possible_clones;
-
-	return 0;
-}
-
-/**
- * drm_mode_getplane_res - enumerate all plane resources
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a list of plane ids to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
-{
-	struct drm_mode_get_plane_res *plane_resp = data;
-	struct drm_mode_config *config;
-	struct drm_plane *plane;
-	uint32_t __user *plane_ptr;
-	int copied = 0;
-	unsigned num_planes;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	config = &dev->mode_config;
-
-	if (file_priv->universal_planes)
-		num_planes = config->num_total_plane;
-	else
-		num_planes = config->num_overlay_plane;
-
-	/*
-	 * This ioctl is called twice, once to determine how much space is
-	 * needed, and the 2nd time to fill it.
-	 */
-	if (num_planes &&
-	    (plane_resp->count_planes >= num_planes)) {
-		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
-		/* Plane lists are invariant, no locking needed. */
-		drm_for_each_plane(plane, dev) {
-			/*
-			 * Unless userspace set the 'universal planes'
-			 * capability bit, only advertise overlays.
-			 */
-			if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
-			    !file_priv->universal_planes)
-				continue;
-
-			if (put_user(plane->base.id, plane_ptr + copied))
-				return -EFAULT;
-			copied++;
-		}
-	}
-	plane_resp->count_planes = num_planes;
-
-	return 0;
-}
-
-/**
- * drm_mode_getplane - get plane configuration
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a plane configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_mode_get_plane *plane_resp = data;
-	struct drm_plane *plane;
-	uint32_t __user *format_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	plane = drm_plane_find(dev, plane_resp->plane_id);
-	if (!plane)
-		return -ENOENT;
-
-	drm_modeset_lock(&plane->mutex, NULL);
-	if (plane->crtc)
-		plane_resp->crtc_id = plane->crtc->base.id;
-	else
-		plane_resp->crtc_id = 0;
-
-	if (plane->fb)
-		plane_resp->fb_id = plane->fb->base.id;
-	else
-		plane_resp->fb_id = 0;
-	drm_modeset_unlock(&plane->mutex);
-
-	plane_resp->plane_id = plane->base.id;
-	plane_resp->possible_crtcs = plane->possible_crtcs;
-	plane_resp->gamma_size = 0;
-
-	/*
-	 * This ioctl is called twice, once to determine how much space is
-	 * needed, and the 2nd time to fill it.
-	 */
-	if (plane->format_count &&
-	    (plane_resp->count_format_types >= plane->format_count)) {
-		format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
-		if (copy_to_user(format_ptr,
-				 plane->format_types,
-				 sizeof(uint32_t) * plane->format_count)) {
-			return -EFAULT;
-		}
-	}
-	plane_resp->count_format_types = plane->format_count;
-
-	return 0;
-}
-
-/**
- * drm_plane_check_pixel_format - Check if the plane supports the pixel format
- * @plane: plane to check for format support
- * @format: the pixel format
- *
- * Returns:
- * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
- * otherwise.
- */
-int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
-{
-	unsigned int i;
-
-	for (i = 0; i < plane->format_count; i++) {
-		if (format == plane->format_types[i])
-			return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int check_src_coords(uint32_t src_x, uint32_t src_y,
-			    uint32_t src_w, uint32_t src_h,
-			    const struct drm_framebuffer *fb)
-{
-	unsigned int fb_width, fb_height;
-
-	fb_width = fb->width << 16;
-	fb_height = fb->height << 16;
-
-	/* Make sure source coordinates are inside the fb. */
-	if (src_w > fb_width ||
-	    src_x > fb_width - src_w ||
-	    src_h > fb_height ||
-	    src_y > fb_height - src_h) {
-		DRM_DEBUG_KMS("Invalid source coordinates "
-			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-			      src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
-			      src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
-			      src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
-			      src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
-		return -ENOSPC;
-	}
-
-	return 0;
-}
-
-/*
- * setplane_internal - setplane handler for internal callers
- *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
- *
- * src_{x,y,w,h} are provided in 16.16 fixed point format
- */
-static int __setplane_internal(struct drm_plane *plane,
-			       struct drm_crtc *crtc,
-			       struct drm_framebuffer *fb,
-			       int32_t crtc_x, int32_t crtc_y,
-			       uint32_t crtc_w, uint32_t crtc_h,
-			       /* src_{x,y,w,h} values are 16.16 fixed point */
-			       uint32_t src_x, uint32_t src_y,
-			       uint32_t src_w, uint32_t src_h)
-{
-	int ret = 0;
-
-	/* No fb means shut it down */
-	if (!fb) {
-		plane->old_fb = plane->fb;
-		ret = plane->funcs->disable_plane(plane);
-		if (!ret) {
-			plane->crtc = NULL;
-			plane->fb = NULL;
-		} else {
-			plane->old_fb = NULL;
-		}
-		goto out;
-	}
-
-	/* Check whether this plane is usable on this CRTC */
-	if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
-		DRM_DEBUG_KMS("Invalid crtc for plane\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Check whether this plane supports the fb pixel format. */
-	ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
-	if (ret) {
-		DRM_DEBUG_KMS("Invalid pixel format %s\n",
-			      drm_get_format_name(fb->pixel_format));
-		goto out;
-	}
-
-	/* Give drivers some help against integer overflows */
-	if (crtc_w > INT_MAX ||
-	    crtc_x > INT_MAX - (int32_t) crtc_w ||
-	    crtc_h > INT_MAX ||
-	    crtc_y > INT_MAX - (int32_t) crtc_h) {
-		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-			      crtc_w, crtc_h, crtc_x, crtc_y);
-		ret = -ERANGE;
-		goto out;
-	}
-
-	ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
-	if (ret)
-		goto out;
-
-	plane->old_fb = plane->fb;
-	ret = plane->funcs->update_plane(plane, crtc, fb,
-					 crtc_x, crtc_y, crtc_w, crtc_h,
-					 src_x, src_y, src_w, src_h);
-	if (!ret) {
-		plane->crtc = crtc;
-		plane->fb = fb;
-		fb = NULL;
-	} else {
-		plane->old_fb = NULL;
-	}
-
-out:
-	if (fb)
-		drm_framebuffer_unreference(fb);
-	if (plane->old_fb)
-		drm_framebuffer_unreference(plane->old_fb);
-	plane->old_fb = NULL;
-
-	return ret;
-}
-
-static int setplane_internal(struct drm_plane *plane,
-			     struct drm_crtc *crtc,
-			     struct drm_framebuffer *fb,
-			     int32_t crtc_x, int32_t crtc_y,
-			     uint32_t crtc_w, uint32_t crtc_h,
-			     /* src_{x,y,w,h} values are 16.16 fixed point */
-			     uint32_t src_x, uint32_t src_y,
-			     uint32_t src_w, uint32_t src_h)
-{
-	int ret;
-
-	drm_modeset_lock_all(plane->dev);
-	ret = __setplane_internal(plane, crtc, fb,
-				  crtc_x, crtc_y, crtc_w, crtc_h,
-				  src_x, src_y, src_w, src_h);
-	drm_modeset_unlock_all(plane->dev);
-
-	return ret;
-}
-
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
- *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable (planes may be disabled without providing a
- * valid crtc).
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setplane(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_mode_set_plane *plane_req = data;
-	struct drm_plane *plane;
-	struct drm_crtc *crtc = NULL;
-	struct drm_framebuffer *fb = NULL;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	/*
-	 * First, find the plane, crtc, and fb objects.  If not available,
-	 * we don't bother to call the driver.
-	 */
-	plane = drm_plane_find(dev, plane_req->plane_id);
-	if (!plane) {
-		DRM_DEBUG_KMS("Unknown plane ID %d\n",
-			      plane_req->plane_id);
-		return -ENOENT;
-	}
-
-	if (plane_req->fb_id) {
-		fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
-		if (!fb) {
-			DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
-				      plane_req->fb_id);
-			return -ENOENT;
-		}
-
-		crtc = drm_crtc_find(dev, plane_req->crtc_id);
-		if (!crtc) {
-			DRM_DEBUG_KMS("Unknown crtc ID %d\n",
-				      plane_req->crtc_id);
-			return -ENOENT;
-		}
-	}
-
-	/*
-	 * setplane_internal will take care of deref'ing either the old or new
-	 * framebuffer depending on success.
-	 */
-	return setplane_internal(plane, crtc, fb,
-				 plane_req->crtc_x, plane_req->crtc_y,
-				 plane_req->crtc_w, plane_req->crtc_h,
-				 plane_req->src_x, plane_req->src_y,
-				 plane_req->src_w, plane_req->src_h);
-}
-
 /**
  * drm_mode_set_config_internal - helper to call ->set_config
  * @set: modeset config to set
@@ -2802,12 +695,13 @@
 	drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
 
 	if (crtc->state &&
-	    crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
-					      BIT(DRM_ROTATE_270)))
+	    crtc->primary->state->rotation & (DRM_ROTATE_90 |
+					      DRM_ROTATE_270))
 		swap(hdisplay, vdisplay);
 
-	return check_src_coords(x << 16, y << 16,
-				hdisplay << 16, vdisplay << 16, fb);
+	return drm_framebuffer_check_src_coords(x << 16, y << 16,
+						hdisplay << 16, vdisplay << 16,
+						fb);
 }
 EXPORT_SYMBOL(drm_crtc_check_viewport);
 
@@ -2902,8 +796,9 @@
 			ret = drm_plane_check_pixel_format(crtc->primary,
 							   fb->pixel_format);
 			if (ret) {
-				DRM_DEBUG_KMS("Invalid pixel format %s\n",
-					drm_get_format_name(fb->pixel_format));
+				char *format_name = drm_get_format_name(fb->pixel_format);
+				DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+				kfree(format_name);
 				goto out;
 			}
 		}
@@ -2993,2004 +888,9 @@
 	return ret;
 }
 
-/**
- * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
- *     universal plane handler call
- * @crtc: crtc to update cursor for
- * @req: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Legacy cursor ioctl's work directly with driver buffer handles.  To
- * translate legacy ioctl calls into universal plane handler calls, we need to
- * wrap the native buffer handle in a drm_framebuffer.
- *
- * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
- * buffer with a pitch of 4*width; the universal plane interface should be used
- * directly in cases where the hardware can support other buffer settings and
- * userspace wants to make use of these capabilities.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-static int drm_mode_cursor_universal(struct drm_crtc *crtc,
-				     struct drm_mode_cursor2 *req,
-				     struct drm_file *file_priv)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_framebuffer *fb = NULL;
-	struct drm_mode_fb_cmd2 fbreq = {
-		.width = req->width,
-		.height = req->height,
-		.pixel_format = DRM_FORMAT_ARGB8888,
-		.pitches = { req->width * 4 },
-		.handles = { req->handle },
-	};
-	int32_t crtc_x, crtc_y;
-	uint32_t crtc_w = 0, crtc_h = 0;
-	uint32_t src_w = 0, src_h = 0;
-	int ret = 0;
-
-	BUG_ON(!crtc->cursor);
-	WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
-
-	/*
-	 * Obtain fb we'll be using (either new or existing) and take an extra
-	 * reference to it if fb != null.  setplane will take care of dropping
-	 * the reference if the plane update fails.
-	 */
-	if (req->flags & DRM_MODE_CURSOR_BO) {
-		if (req->handle) {
-			fb = internal_framebuffer_create(dev, &fbreq, file_priv);
-			if (IS_ERR(fb)) {
-				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
-				return PTR_ERR(fb);
-			}
-			fb->hot_x = req->hot_x;
-			fb->hot_y = req->hot_y;
-		} else {
-			fb = NULL;
-		}
-	} else {
-		fb = crtc->cursor->fb;
-		if (fb)
-			drm_framebuffer_reference(fb);
-	}
-
-	if (req->flags & DRM_MODE_CURSOR_MOVE) {
-		crtc_x = req->x;
-		crtc_y = req->y;
-	} else {
-		crtc_x = crtc->cursor_x;
-		crtc_y = crtc->cursor_y;
-	}
-
-	if (fb) {
-		crtc_w = fb->width;
-		crtc_h = fb->height;
-		src_w = fb->width << 16;
-		src_h = fb->height << 16;
-	}
-
-	/*
-	 * setplane_internal will take care of deref'ing either the old or new
-	 * framebuffer depending on success.
-	 */
-	ret = __setplane_internal(crtc->cursor, crtc, fb,
-				crtc_x, crtc_y, crtc_w, crtc_h,
-				0, 0, src_w, src_h);
-
-	/* Update successful; save new cursor position, if necessary */
-	if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
-		crtc->cursor_x = req->x;
-		crtc->cursor_y = req->y;
-	}
-
-	return ret;
-}
-
-static int drm_mode_cursor_common(struct drm_device *dev,
-				  struct drm_mode_cursor2 *req,
-				  struct drm_file *file_priv)
-{
-	struct drm_crtc *crtc;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
-		return -EINVAL;
-
-	crtc = drm_crtc_find(dev, req->crtc_id);
-	if (!crtc) {
-		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-		return -ENOENT;
-	}
-
-	/*
-	 * If this crtc has a universal cursor plane, call that plane's update
-	 * handler rather than using legacy cursor handlers.
-	 */
-	drm_modeset_lock_crtc(crtc, crtc->cursor);
-	if (crtc->cursor) {
-		ret = drm_mode_cursor_universal(crtc, req, file_priv);
-		goto out;
-	}
-
-	if (req->flags & DRM_MODE_CURSOR_BO) {
-		if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
-			ret = -ENXIO;
-			goto out;
-		}
-		/* Turns off the cursor if handle is 0 */
-		if (crtc->funcs->cursor_set2)
-			ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
-						      req->width, req->height, req->hot_x, req->hot_y);
-		else
-			ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
-						      req->width, req->height);
-	}
-
-	if (req->flags & DRM_MODE_CURSOR_MOVE) {
-		if (crtc->funcs->cursor_move) {
-			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
-		} else {
-			ret = -EFAULT;
-			goto out;
-		}
-	}
-out:
-	drm_modeset_unlock_crtc(crtc);
-
-	return ret;
-
-}
-
-
-/**
- * drm_mode_cursor_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_cursor *req = data;
-	struct drm_mode_cursor2 new_req;
-
-	memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
-	new_req.hot_x = new_req.hot_y = 0;
-
-	return drm_mode_cursor_common(dev, &new_req, file_priv);
-}
-
-/**
- * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request. This implements the 2nd
- * version of the cursor ioctl, which allows userspace to additionally specify
- * the hotspot of the pointer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_cursor2 *req = data;
-
-	return drm_mode_cursor_common(dev, req, file_priv);
-}
-
-/**
- * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
- * @bpp: bits per pixels
- * @depth: bit depth per pixel
- *
- * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
- * Useful in fbdev emulation code, since that deals in those values.
- */
-uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
-{
-	uint32_t fmt;
-
-	switch (bpp) {
-	case 8:
-		fmt = DRM_FORMAT_C8;
-		break;
-	case 16:
-		if (depth == 15)
-			fmt = DRM_FORMAT_XRGB1555;
-		else
-			fmt = DRM_FORMAT_RGB565;
-		break;
-	case 24:
-		fmt = DRM_FORMAT_RGB888;
-		break;
-	case 32:
-		if (depth == 24)
-			fmt = DRM_FORMAT_XRGB8888;
-		else if (depth == 30)
-			fmt = DRM_FORMAT_XRGB2101010;
-		else
-			fmt = DRM_FORMAT_ARGB8888;
-		break;
-	default:
-		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
-		fmt = DRM_FORMAT_XRGB8888;
-		break;
-	}
-
-	return fmt;
-}
-EXPORT_SYMBOL(drm_mode_legacy_fb_format);
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioctl which only supported RGB formats.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd *or = data;
-	struct drm_mode_fb_cmd2 r = {};
-	int ret;
-
-	/* convert to new format and call new ioctl */
-	r.fb_id = or->fb_id;
-	r.width = or->width;
-	r.height = or->height;
-	r.pitches[0] = or->pitch;
-	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
-	r.handles[0] = or->handle;
-
-	ret = drm_mode_addfb2(dev, &r, file_priv);
-	if (ret)
-		return ret;
-
-	or->fb_id = r.fb_id;
-
-	return 0;
-}
-
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
-	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
-
-	switch (format) {
-	case DRM_FORMAT_C8:
-	case DRM_FORMAT_RGB332:
-	case DRM_FORMAT_BGR233:
-	case DRM_FORMAT_XRGB4444:
-	case DRM_FORMAT_XBGR4444:
-	case DRM_FORMAT_RGBX4444:
-	case DRM_FORMAT_BGRX4444:
-	case DRM_FORMAT_ARGB4444:
-	case DRM_FORMAT_ABGR4444:
-	case DRM_FORMAT_RGBA4444:
-	case DRM_FORMAT_BGRA4444:
-	case DRM_FORMAT_XRGB1555:
-	case DRM_FORMAT_XBGR1555:
-	case DRM_FORMAT_RGBX5551:
-	case DRM_FORMAT_BGRX5551:
-	case DRM_FORMAT_ARGB1555:
-	case DRM_FORMAT_ABGR1555:
-	case DRM_FORMAT_RGBA5551:
-	case DRM_FORMAT_BGRA5551:
-	case DRM_FORMAT_RGB565:
-	case DRM_FORMAT_BGR565:
-	case DRM_FORMAT_RGB888:
-	case DRM_FORMAT_BGR888:
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_XBGR8888:
-	case DRM_FORMAT_RGBX8888:
-	case DRM_FORMAT_BGRX8888:
-	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_ABGR8888:
-	case DRM_FORMAT_RGBA8888:
-	case DRM_FORMAT_BGRA8888:
-	case DRM_FORMAT_XRGB2101010:
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_RGBX1010102:
-	case DRM_FORMAT_BGRX1010102:
-	case DRM_FORMAT_ARGB2101010:
-	case DRM_FORMAT_ABGR2101010:
-	case DRM_FORMAT_RGBA1010102:
-	case DRM_FORMAT_BGRA1010102:
-	case DRM_FORMAT_YUYV:
-	case DRM_FORMAT_YVYU:
-	case DRM_FORMAT_UYVY:
-	case DRM_FORMAT_VYUY:
-	case DRM_FORMAT_AYUV:
-	case DRM_FORMAT_NV12:
-	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV16:
-	case DRM_FORMAT_NV61:
-	case DRM_FORMAT_NV24:
-	case DRM_FORMAT_NV42:
-	case DRM_FORMAT_YUV410:
-	case DRM_FORMAT_YVU410:
-	case DRM_FORMAT_YUV411:
-	case DRM_FORMAT_YVU411:
-	case DRM_FORMAT_YUV420:
-	case DRM_FORMAT_YVU420:
-	case DRM_FORMAT_YUV422:
-	case DRM_FORMAT_YVU422:
-	case DRM_FORMAT_YUV444:
-	case DRM_FORMAT_YVU444:
-		return 0;
-	default:
-		DRM_DEBUG_KMS("invalid pixel format %s\n",
-			      drm_get_format_name(r->pixel_format));
-		return -EINVAL;
-	}
-}
-
-static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
-{
-	int ret, hsub, vsub, num_planes, i;
-
-	ret = format_check(r);
-	if (ret) {
-		DRM_DEBUG_KMS("bad framebuffer format %s\n",
-			      drm_get_format_name(r->pixel_format));
-		return ret;
-	}
-
-	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
-	num_planes = drm_format_num_planes(r->pixel_format);
-
-	if (r->width == 0 || r->width % hsub) {
-		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
-		return -EINVAL;
-	}
-
-	if (r->height == 0 || r->height % vsub) {
-		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < num_planes; i++) {
-		unsigned int width = r->width / (i != 0 ? hsub : 1);
-		unsigned int height = r->height / (i != 0 ? vsub : 1);
-		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
-
-		if (!r->handles[i]) {
-			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if ((uint64_t) width * cpp > UINT_MAX)
-			return -ERANGE;
-
-		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
-			return -ERANGE;
-
-		if (r->pitches[i] < width * cpp) {
-			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
-			return -EINVAL;
-		}
-
-		if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
-			DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
-				      r->modifier[i], i);
-			return -EINVAL;
-		}
-
-		/* modifier specific checks: */
-		switch (r->modifier[i]) {
-		case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
-			/* NOTE: the pitch restriction may be lifted later if it turns
-			 * out that no hw has this restriction:
-			 */
-			if (r->pixel_format != DRM_FORMAT_NV12 ||
-					width % 128 || height % 32 ||
-					r->pitches[i] % 128) {
-				DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
-				return -EINVAL;
-			}
-			break;
-
-		default:
-			break;
-		}
-	}
-
-	for (i = num_planes; i < 4; i++) {
-		if (r->modifier[i]) {
-			DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
-		if (!(r->flags & DRM_MODE_FB_MODIFIERS))
-			continue;
-
-		if (r->handles[i]) {
-			DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if (r->pitches[i]) {
-			DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
-			return -EINVAL;
-		}
-
-		if (r->offsets[i]) {
-			DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
-			    const struct drm_mode_fb_cmd2 *r,
-			    struct drm_file *file_priv)
-{
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_framebuffer *fb;
-	int ret;
-
-	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
-		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if ((config->min_width > r->width) || (r->width > config->max_width)) {
-		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
-			  r->width, config->min_width, config->max_width);
-		return ERR_PTR(-EINVAL);
-	}
-	if ((config->min_height > r->height) || (r->height > config->max_height)) {
-		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
-			  r->height, config->min_height, config->max_height);
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (r->flags & DRM_MODE_FB_MODIFIERS &&
-	    !dev->mode_config.allow_fb_modifiers) {
-		DRM_DEBUG_KMS("driver does not support fb modifiers\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	ret = framebuffer_check(r);
-	if (ret)
-		return ERR_PTR(ret);
-
-	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
-	if (IS_ERR(fb)) {
-		DRM_DEBUG_KMS("could not create framebuffer\n");
-		return fb;
-	}
-
-	return fb;
-}
-
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
-		    void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd2 *r = data;
-	struct drm_framebuffer *fb;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = internal_framebuffer_create(dev, r, file_priv);
-	if (IS_ERR(fb))
-		return PTR_ERR(fb);
-
-	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-	r->fb_id = fb->base.id;
-
-	/* Transfer ownership to the filp for reaping on close */
-	mutex_lock(&file_priv->fbs_lock);
-	list_add(&fb->filp_head, &file_priv->fbs);
-	mutex_unlock(&file_priv->fbs_lock);
-
-	return 0;
-}
-
-struct drm_mode_rmfb_work {
-	struct work_struct work;
-	struct list_head fbs;
-};
-
-static void drm_mode_rmfb_work_fn(struct work_struct *w)
-{
-	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
-
-	while (!list_empty(&arg->fbs)) {
-		struct drm_framebuffer *fb =
-			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		drm_framebuffer_remove(fb);
-	}
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_framebuffer *fb = NULL;
-	struct drm_framebuffer *fbl = NULL;
-	uint32_t *id = data;
-	int found = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, *id);
-	if (!fb)
-		return -ENOENT;
-
-	mutex_lock(&file_priv->fbs_lock);
-	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
-		if (fb == fbl)
-			found = 1;
-	if (!found) {
-		mutex_unlock(&file_priv->fbs_lock);
-		goto fail_unref;
-	}
-
-	list_del_init(&fb->filp_head);
-	mutex_unlock(&file_priv->fbs_lock);
-
-	/* drop the reference we picked up in framebuffer lookup */
-	drm_framebuffer_unreference(fb);
-
-	/*
-	 * we now own the reference that was stored in the fbs list
-	 *
-	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
-	 * so run this in a separate stack as there's no way to correctly
-	 * handle this after the fb is already removed from the lookup table.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		struct drm_mode_rmfb_work arg;
-
-		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-		INIT_LIST_HEAD(&arg.fbs);
-		list_add_tail(&fb->filp_head, &arg.fbs);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	} else
-		drm_framebuffer_unreference(fb);
-
-	return 0;
-
-fail_unref:
-	drm_framebuffer_unreference(fb);
-	return -ENOENT;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_fb_cmd *r = data;
-	struct drm_framebuffer *fb;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, r->fb_id);
-	if (!fb)
-		return -ENOENT;
-
-	r->height = fb->height;
-	r->width = fb->width;
-	r->depth = fb->depth;
-	r->bpp = fb->bits_per_pixel;
-	r->pitch = fb->pitches[0];
-	if (fb->funcs->create_handle) {
-		if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
-		    drm_is_control_client(file_priv)) {
-			ret = fb->funcs->create_handle(fb, file_priv,
-						       &r->handle);
-		} else {
-			/* GET_FB() is an unprivileged ioctl so we must not
-			 * return a buffer-handle to non-master processes! For
-			 * backwards-compatibility reasons, we cannot make
-			 * GET_FB() privileged, so just return an invalid handle
-			 * for non-masters. */
-			r->handle = 0;
-			ret = 0;
-		}
-	} else {
-		ret = -ENODEV;
-	}
-
-	drm_framebuffer_unreference(fb);
-
-	return ret;
-}
-
-/**
- * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB and flush out the damaged area supplied by userspace as a clip
- * rectangle list. Generic userspace which does frontbuffer rendering must call
- * this ioctl to flush out the changes on manual-update display outputs, e.g.
- * usb display-link, mipi manual update panels or edp panel self refresh modes.
- *
- * Modesetting drivers which always update the frontbuffer do not need to
- * implement the corresponding ->dirty framebuffer callback.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv)
-{
-	struct drm_clip_rect __user *clips_ptr;
-	struct drm_clip_rect *clips = NULL;
-	struct drm_mode_fb_dirty_cmd *r = data;
-	struct drm_framebuffer *fb;
-	unsigned flags;
-	int num_clips;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	fb = drm_framebuffer_lookup(dev, r->fb_id);
-	if (!fb)
-		return -ENOENT;
-
-	num_clips = r->num_clips;
-	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
-
-	if (!num_clips != !clips_ptr) {
-		ret = -EINVAL;
-		goto out_err1;
-	}
-
-	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
-	/* If userspace annotates copy, clips must come in pairs */
-	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
-		ret = -EINVAL;
-		goto out_err1;
-	}
-
-	if (num_clips && clips_ptr) {
-		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
-			ret = -EINVAL;
-			goto out_err1;
-		}
-		clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
-		if (!clips) {
-			ret = -ENOMEM;
-			goto out_err1;
-		}
-
-		ret = copy_from_user(clips, clips_ptr,
-				     num_clips * sizeof(*clips));
-		if (ret) {
-			ret = -EFAULT;
-			goto out_err2;
-		}
-	}
-
-	if (fb->funcs->dirty) {
-		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
-				       clips, num_clips);
-	} else {
-		ret = -ENOSYS;
-	}
-
-out_err2:
-	kfree(clips);
-out_err1:
-	drm_framebuffer_unreference(fb);
-
-	return ret;
-}
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @priv: drm file for the ioctl
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
-	struct drm_framebuffer *fb, *tfb;
-	struct drm_mode_rmfb_work arg;
-
-	INIT_LIST_HEAD(&arg.fbs);
-
-	/*
-	 * When the file gets released that means no one else can access the fb
-	 * list any more, so no need to grab fpriv->fbs_lock. And we need to
-	 * avoid upsetting lockdep since the universal cursor code adds a
-	 * framebuffer while holding mutex locks.
-	 *
-	 * Note that a real deadlock between fpriv->fbs_lock and the modeset
-	 * locks is impossible here since no one else but this function can get
-	 * at it any more.
-	 */
-	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-		if (drm_framebuffer_read_refcount(fb) > 1) {
-			list_move_tail(&fb->filp_head, &arg.fbs);
-		} else {
-			list_del_init(&fb->filp_head);
-
-			/* This drops the fpriv->fbs reference. */
-			drm_framebuffer_unreference(fb);
-		}
-	}
-
-	if (!list_empty(&arg.fbs)) {
-		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	}
-}
-
-static bool drm_property_type_valid(struct drm_property *property)
-{
-	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-		return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-	return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
-/**
- * drm_property_create - create a new property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Note that the DRM core keeps a per-device list of properties and that, if
- * drm_mode_config_cleanup() is called, it will destroy all properties created
- * by the driver.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-					 const char *name, int num_values)
-{
-	struct drm_property *property = NULL;
-	int ret;
-
-	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
-	if (!property)
-		return NULL;
-
-	property->dev = dev;
-
-	if (num_values) {
-		property->values = kcalloc(num_values, sizeof(uint64_t),
-					   GFP_KERNEL);
-		if (!property->values)
-			goto fail;
-	}
-
-	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
-	if (ret)
-		goto fail;
-
-	property->flags = flags;
-	property->num_values = num_values;
-	INIT_LIST_HEAD(&property->enum_list);
-
-	if (name) {
-		strncpy(property->name, name, DRM_PROP_NAME_LEN);
-		property->name[DRM_PROP_NAME_LEN-1] = '\0';
-	}
-
-	list_add_tail(&property->head, &dev->mode_config.property_list);
-
-	WARN_ON(!drm_property_type_valid(property));
-
-	return property;
-fail:
-	kfree(property->values);
-	kfree(property);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-/**
- * drm_property_create_enum - create a new enumeration property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property values
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set one of the predefined values for enumeration
- * properties.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-					 const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_values)
-{
-	struct drm_property *property;
-	int i, ret;
-
-	flags |= DRM_MODE_PROP_ENUM;
-
-	property = drm_property_create(dev, flags, name, num_values);
-	if (!property)
-		return NULL;
-
-	for (i = 0; i < num_values; i++) {
-		ret = drm_property_add_enum(property, i,
-				      props[i].type,
-				      props[i].name);
-		if (ret) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-	}
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_enum);
-
-/**
- * drm_property_create_bitmask - create a new bitmask property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property bitflags
- * @num_props: size of the @props array
- * @supported_bits: bitmask of all supported enumeration values
- *
- * This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Compared to plain enumeration properties userspace is allowed to set any
- * or'ed together combination of the predefined property bitflag values
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-					 int flags, const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_props,
-					 uint64_t supported_bits)
-{
-	struct drm_property *property;
-	int i, ret, index = 0;
-	int num_values = hweight64(supported_bits);
-
-	flags |= DRM_MODE_PROP_BITMASK;
-
-	property = drm_property_create(dev, flags, name, num_values);
-	if (!property)
-		return NULL;
-	for (i = 0; i < num_props; i++) {
-		if (!(supported_bits & (1ULL << props[i].type)))
-			continue;
-
-		if (WARN_ON(index >= num_values)) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-
-		ret = drm_property_add_enum(property, index++,
-				      props[i].type,
-				      props[i].name);
-		if (ret) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-	}
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_bitmask);
-
-static struct drm_property *property_create_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 uint64_t min, uint64_t max)
-{
-	struct drm_property *property;
-
-	property = drm_property_create(dev, flags, name, 2);
-	if (!property)
-		return NULL;
-
-	property->values[0] = min;
-	property->values[1] = max;
-
-	return property;
-}
-
-/**
- * drm_property_create_range - create a new unsigned ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any unsigned integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-					 const char *name,
-					 uint64_t min, uint64_t max)
-{
-	return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
-			name, min, max);
-}
-EXPORT_SYMBOL(drm_property_create_range);
-
-/**
- * drm_property_create_signed_range - create a new signed ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any signed integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 int64_t min, int64_t max)
-{
-	return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
-			name, I642U64(min), I642U64(max));
-}
-EXPORT_SYMBOL(drm_property_create_signed_range);
-
-/**
- * drm_property_create_object - create a new object property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @type: object type from DRM_MODE_OBJECT_* defines
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set this to any property value of the given
- * @type. Only useful for atomic properties, which is enforced.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-					 int flags, const char *name, uint32_t type)
-{
-	struct drm_property *property;
-
-	flags |= DRM_MODE_PROP_OBJECT;
-
-	if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
-		return NULL;
-
-	property = drm_property_create(dev, flags, name, 1);
-	if (!property)
-		return NULL;
-
-	property->values[0] = type;
-
-	return property;
-}
-EXPORT_SYMBOL(drm_property_create_object);
-
-/**
- * drm_property_create_bool - create a new boolean property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * This is implemented as a ranged property with only {0, 1} as valid values.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-					 const char *name)
-{
-	return drm_property_create_range(dev, flags, name, 0, 1);
-}
-EXPORT_SYMBOL(drm_property_create_bool);
-
-/**
- * drm_property_add_enum - add a possible value to an enumeration property
- * @property: enumeration property to change
- * @index: index of the new enumeration
- * @value: value of the new enumeration
- * @name: symbolic name of the new enumeration
- *
- * This functions adds enumerations to a property.
- *
- * It's use is deprecated, drivers should use one of the more specific helpers
- * to directly create the property with all enumerations already attached.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_property_add_enum(struct drm_property *property, int index,
-			  uint64_t value, const char *name)
-{
-	struct drm_property_enum *prop_enum;
-
-	if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
-		return -EINVAL;
-
-	/*
-	 * Bitmask enum properties have the additional constraint of values
-	 * from 0 to 63
-	 */
-	if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
-			(value > 63))
-		return -EINVAL;
-
-	if (!list_empty(&property->enum_list)) {
-		list_for_each_entry(prop_enum, &property->enum_list, head) {
-			if (prop_enum->value == value) {
-				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-				return 0;
-			}
-		}
-	}
-
-	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
-	if (!prop_enum)
-		return -ENOMEM;
-
-	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-	prop_enum->value = value;
-
-	property->values[index] = value;
-	list_add_tail(&prop_enum->head, &property->enum_list);
-	return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-/**
- * drm_property_destroy - destroy a drm property
- * @dev: drm device
- * @property: property to destry
- *
- * This function frees a property including any attached resources like
- * enumeration values.
- */
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
-	struct drm_property_enum *prop_enum, *pt;
-
-	list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
-		list_del(&prop_enum->head);
-		kfree(prop_enum);
-	}
-
-	if (property->num_values)
-		kfree(property->values);
-	drm_mode_object_unregister(dev, &property->base);
-	list_del(&property->head);
-	kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-/**
- * drm_object_attach_property - attach a property to a modeset object
- * @obj: drm modeset object
- * @property: property to attach
- * @init_val: initial value of the property
- *
- * This attaches the given property to the modeset object with the given initial
- * value. Currently this function cannot fail since the properties are stored in
- * a statically sized array.
- */
-void drm_object_attach_property(struct drm_mode_object *obj,
-				struct drm_property *property,
-				uint64_t init_val)
-{
-	int count = obj->properties->count;
-
-	if (count == DRM_OBJECT_MAX_PROPERTY) {
-		WARN(1, "Failed to attach object property (type: 0x%x). Please "
-			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
-			"you see this message on the same object type.\n",
-			obj->type);
-		return;
-	}
-
-	obj->properties->properties[count] = property;
-	obj->properties->values[count] = init_val;
-	obj->properties->count++;
-	if (property->flags & DRM_MODE_PROP_ATOMIC)
-		obj->properties->atomic_count++;
-}
-EXPORT_SYMBOL(drm_object_attach_property);
-
-/**
- * drm_object_property_set_value - set the value of a property
- * @obj: drm mode object to set property value for
- * @property: property to set
- * @val: value the property should be set to
- *
- * This functions sets a given property on a given object. This function only
- * changes the software state of the property, it does not call into the
- * driver's ->set_property callback.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_set_value(struct drm_mode_object *obj,
-				  struct drm_property *property, uint64_t val)
-{
-	int i;
-
-	for (i = 0; i < obj->properties->count; i++) {
-		if (obj->properties->properties[i] == property) {
-			obj->properties->values[i] = val;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_set_value);
-
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
-				  struct drm_property *property, uint64_t *val)
-{
-	int i;
-
-	/* read-only properties bypass atomic mechanism and still store
-	 * their value in obj->properties->values[].. mostly to avoid
-	 * having to deal w/ EDID and similar props in atomic paths:
-	 */
-	if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
-			!(property->flags & DRM_MODE_PROP_IMMUTABLE))
-		return drm_atomic_get_property(obj, property, val);
-
-	for (i = 0; i < obj->properties->count; i++) {
-		if (obj->properties->properties[i] == property) {
-			*val = obj->properties->values[i];
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_get_value);
-
-/**
- * drm_mode_getproperty_ioctl - get the property metadata
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the metadata for a given property, like the different
- * possible values for an enum property or the limits for a range property.
- *
- * Blob properties are special
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
-			       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_get_property *out_resp = data;
-	struct drm_property *property;
-	int enum_count = 0;
-	int value_count = 0;
-	int ret = 0, i;
-	int copied;
-	struct drm_property_enum *prop_enum;
-	struct drm_mode_property_enum __user *enum_ptr;
-	uint64_t __user *values_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-	property = drm_property_find(dev, out_resp->prop_id);
-	if (!property) {
-		ret = -ENOENT;
-		goto done;
-	}
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		list_for_each_entry(prop_enum, &property->enum_list, head)
-			enum_count++;
-	}
-
-	value_count = property->num_values;
-
-	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
-	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
-	out_resp->flags = property->flags;
-
-	if ((out_resp->count_values >= value_count) && value_count) {
-		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
-		for (i = 0; i < value_count; i++) {
-			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
-				ret = -EFAULT;
-				goto done;
-			}
-		}
-	}
-	out_resp->count_values = value_count;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
-			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
-			copied = 0;
-			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
-			list_for_each_entry(prop_enum, &property->enum_list, head) {
-
-				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
-					ret = -EFAULT;
-					goto done;
-				}
-
-				if (copy_to_user(&enum_ptr[copied].name,
-						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
-					ret = -EFAULT;
-					goto done;
-				}
-				copied++;
-			}
-		}
-		out_resp->count_enum_blobs = enum_count;
-	}
-
-	/*
-	 * NOTE: The idea seems to have been to use this to read all the blob
-	 * property values. But nothing ever added them to the corresponding
-	 * list, userspace always used the special-purpose get_blob ioctl to
-	 * read the value for a blob property. It also doesn't make a lot of
-	 * sense to return values here when everything else is just metadata for
-	 * the property itself.
-	 */
-	if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-		out_resp->count_enum_blobs = 0;
-done:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-static void drm_property_free_blob(struct kref *kref)
-{
-	struct drm_property_blob *blob =
-		container_of(kref, struct drm_property_blob, base.refcount);
-
-	mutex_lock(&blob->dev->mode_config.blob_lock);
-	list_del(&blob->head_global);
-	mutex_unlock(&blob->dev->mode_config.blob_lock);
-
-	drm_mode_object_unregister(blob->dev, &blob->base);
-
-	kfree(blob);
-}
-
-/**
- * drm_property_create_blob - Create new blob property
- *
- * Creates a new blob property for a specified DRM device, optionally
- * copying data.
- *
- * @dev: DRM device to create property for
- * @length: Length to allocate for blob data
- * @data: If specified, copies data into blob
- *
- * Returns:
- * New blob property with a single reference on success, or an ERR_PTR
- * value on failure.
- */
-struct drm_property_blob *
-drm_property_create_blob(struct drm_device *dev, size_t length,
-			 const void *data)
-{
-	struct drm_property_blob *blob;
-	int ret;
-
-	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
-		return ERR_PTR(-EINVAL);
-
-	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
-	if (!blob)
-		return ERR_PTR(-ENOMEM);
-
-	/* This must be explicitly initialised, so we can safely call list_del
-	 * on it in the removal handler, even if it isn't in a file list. */
-	INIT_LIST_HEAD(&blob->head_file);
-	blob->length = length;
-	blob->dev = dev;
-
-	if (data)
-		memcpy(blob->data, data, length);
-
-	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
-				      true, drm_property_free_blob);
-	if (ret) {
-		kfree(blob);
-		return ERR_PTR(-EINVAL);
-	}
-
-	mutex_lock(&dev->mode_config.blob_lock);
-	list_add_tail(&blob->head_global,
-	              &dev->mode_config.property_blob_list);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_create_blob);
-
-/**
- * drm_property_unreference_blob - Unreference a blob property
- *
- * Drop a reference on a blob property. May free the object.
- *
- * @blob: Pointer to blob property
- */
-void drm_property_unreference_blob(struct drm_property_blob *blob)
-{
-	if (!blob)
-		return;
-
-	drm_mode_object_unreference(&blob->base);
-}
-EXPORT_SYMBOL(drm_property_unreference_blob);
-
-/**
- * drm_property_destroy_user_blobs - destroy all blobs created by this client
- * @dev:       DRM device
- * @file_priv: destroy all blobs owned by this file handle
- */
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-				     struct drm_file *file_priv)
-{
-	struct drm_property_blob *blob, *bt;
-
-	/*
-	 * When the file gets released that means no one else can access the
-	 * blob list any more, so no need to grab dev->blob_lock.
-	 */
-	list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
-		list_del_init(&blob->head_file);
-		drm_property_unreference_blob(blob);
-	}
-}
-
-/**
- * drm_property_reference_blob - Take a reference on an existing property
- *
- * Take a new reference on an existing blob property.
- *
- * @blob: Pointer to blob property
- */
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
-{
-	drm_mode_object_reference(&blob->base);
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_reference_blob);
-
-/**
- * drm_property_lookup_blob - look up a blob property and take a reference
- * @dev: drm device
- * @id: id of the blob property
- *
- * If successful, this takes an additional reference to the blob property.
- * callers need to make sure to eventually unreference the returned property
- * again, using @drm_property_unreference_blob.
- */
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-					           uint32_t id)
-{
-	struct drm_mode_object *obj;
-	struct drm_property_blob *blob = NULL;
-
-	obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
-	if (obj)
-		blob = obj_to_blob(obj);
-	return blob;
-}
-EXPORT_SYMBOL(drm_property_lookup_blob);
-
-/**
- * drm_property_replace_global_blob - atomically replace existing blob property
- * @dev: drm device
- * @replace: location of blob property pointer to be replaced
- * @length: length of data for new blob, or 0 for no data
- * @data: content for new blob, or NULL for no data
- * @obj_holds_id: optional object for property holding blob ID
- * @prop_holds_id: optional property holding blob ID
- * @return 0 on success or error on failure
- *
- * This function will atomically replace a global property in the blob list,
- * optionally updating a property which holds the ID of that property. It is
- * guaranteed to be atomic: no caller will be allowed to see intermediate
- * results, and either the entire operation will succeed and clean up the
- * previous property, or it will fail and the state will be unchanged.
- *
- * If length is 0 or data is NULL, no new blob will be created, and the holding
- * property, if specified, will be set to 0.
- *
- * Access to the replace pointer is assumed to be protected by the caller, e.g.
- * by holding the relevant modesetting object lock for its parent.
- *
- * For example, a drm_connector has a 'PATH' property, which contains the ID
- * of a blob property with the value of the MST path information. Calling this
- * function with replace pointing to the connector's path_blob_ptr, length and
- * data set for the new path information, obj_holds_id set to the connector's
- * base object, and prop_holds_id set to the path property name, will perform
- * a completely atomic update. The access to path_blob_ptr is protected by the
- * caller holding a lock on the connector.
- */
-static int drm_property_replace_global_blob(struct drm_device *dev,
-                                            struct drm_property_blob **replace,
-                                            size_t length,
-                                            const void *data,
-                                            struct drm_mode_object *obj_holds_id,
-                                            struct drm_property *prop_holds_id)
-{
-	struct drm_property_blob *new_blob = NULL;
-	struct drm_property_blob *old_blob = NULL;
-	int ret;
-
-	WARN_ON(replace == NULL);
-
-	old_blob = *replace;
-
-	if (length && data) {
-		new_blob = drm_property_create_blob(dev, length, data);
-		if (IS_ERR(new_blob))
-			return PTR_ERR(new_blob);
-	}
-
-	/* This does not need to be synchronised with blob_lock, as the
-	 * get_properties ioctl locks all modesetting objects, and
-	 * obj_holds_id must be locked before calling here, so we cannot
-	 * have its value out of sync with the list membership modified
-	 * below under blob_lock. */
-	if (obj_holds_id) {
-		ret = drm_object_property_set_value(obj_holds_id,
-						    prop_holds_id,
-						    new_blob ?
-						        new_blob->base.id : 0);
-		if (ret != 0)
-			goto err_created;
-	}
-
-	drm_property_unreference_blob(old_blob);
-	*replace = new_blob;
-
-	return 0;
-
-err_created:
-	drm_property_unreference_blob(new_blob);
-	return ret;
-}
-
-/**
- * drm_mode_getblob_ioctl - get the contents of a blob property value
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the contents of a blob property. The value stored in
- * an object's blob property is just a normal modeset object id.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getblob_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_get_blob *out_resp = data;
-	struct drm_property_blob *blob;
-	int ret = 0;
-	void __user *blob_ptr;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob)
-		return -ENOENT;
-
-	if (out_resp->length == blob->length) {
-		blob_ptr = (void __user *)(unsigned long)out_resp->data;
-		if (copy_to_user(blob_ptr, blob->data, blob->length)) {
-			ret = -EFAULT;
-			goto unref;
-		}
-	}
-	out_resp->length = blob->length;
-unref:
-	drm_property_unreference_blob(blob);
-
-	return ret;
-}
-
-/**
- * drm_mode_createblob_ioctl - create a new blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function creates a new blob property with user-defined values. In order
- * to give us sensible validation and checking when creating, rather than at
- * every potential use, we also require a type to be provided upfront.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_createblob_ioctl(struct drm_device *dev,
-			      void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_create_blob *out_resp = data;
-	struct drm_property_blob *blob;
-	void __user *blob_ptr;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_create_blob(dev, out_resp->length, NULL);
-	if (IS_ERR(blob))
-		return PTR_ERR(blob);
-
-	blob_ptr = (void __user *)(unsigned long)out_resp->data;
-	if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
-		ret = -EFAULT;
-		goto out_blob;
-	}
-
-	/* Dropping the lock between create_blob and our access here is safe
-	 * as only the same file_priv can remove the blob; at this point, it is
-	 * not associated with any file_priv. */
-	mutex_lock(&dev->mode_config.blob_lock);
-	out_resp->blob_id = blob->base.id;
-	list_add_tail(&blob->head_file, &file_priv->blobs);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	return 0;
-
-out_blob:
-	drm_property_unreference_blob(blob);
-	return ret;
-}
-
-/**
- * drm_mode_destroyblob_ioctl - destroy a user blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Destroy an existing user-defined blob property.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroyblob_ioctl(struct drm_device *dev,
-			       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_destroy_blob *out_resp = data;
-	struct drm_property_blob *blob = NULL, *bt;
-	bool found = false;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob)
-		return -ENOENT;
-
-	mutex_lock(&dev->mode_config.blob_lock);
-	/* Ensure the property was actually created by this user. */
-	list_for_each_entry(bt, &file_priv->blobs, head_file) {
-		if (bt == blob) {
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		ret = -EPERM;
-		goto err;
-	}
-
-	/* We must drop head_file here, because we may not be the last
-	 * reference on the blob. */
-	list_del_init(&blob->head_file);
-	mutex_unlock(&dev->mode_config.blob_lock);
-
-	/* One reference from lookup, and one from the filp. */
-	drm_property_unreference_blob(blob);
-	drm_property_unreference_blob(blob);
-
-	return 0;
-
-err:
-	mutex_unlock(&dev->mode_config.blob_lock);
-	drm_property_unreference_blob(blob);
-
-	return ret;
-}
-
-/**
- * drm_mode_connector_set_path_property - set tile property on connector
- * @connector: connector to set property on.
- * @path: path to use for property; must not be NULL.
- *
- * This creates a property to expose to userspace to specify a
- * connector path. This is mainly used for DisplayPort MST where
- * connectors have a topology and we want to allow userspace to give
- * them more meaningful names.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
-					 const char *path)
-{
-	struct drm_device *dev = connector->dev;
-	int ret;
-
-	ret = drm_property_replace_global_blob(dev,
-	                                       &connector->path_blob_ptr,
-	                                       strlen(path) + 1,
-	                                       path,
-	                                       &connector->base,
-	                                       dev->mode_config.path_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
-
-/**
- * drm_mode_connector_set_tile_property - set tile property on connector
- * @connector: connector to set property on.
- *
- * This looks up the tile information for a connector, and creates a
- * property for userspace to parse if it exists. The property is of
- * the form of 8 integers using ':' as a separator.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	char tile[256];
-	int ret;
-
-	if (!connector->has_tile) {
-		ret  = drm_property_replace_global_blob(dev,
-		                                        &connector->tile_blob_ptr,
-		                                        0,
-		                                        NULL,
-		                                        &connector->base,
-		                                        dev->mode_config.tile_property);
-		return ret;
-	}
-
-	snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
-		 connector->tile_group->id, connector->tile_is_single_monitor,
-		 connector->num_h_tile, connector->num_v_tile,
-		 connector->tile_h_loc, connector->tile_v_loc,
-		 connector->tile_h_size, connector->tile_v_size);
-
-	ret = drm_property_replace_global_blob(dev,
-	                                       &connector->tile_blob_ptr,
-	                                       strlen(tile) + 1,
-	                                       tile,
-	                                       &connector->base,
-	                                       dev->mode_config.tile_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
-
-/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
- * @connector: drm connector
- * @edid: new value of the edid property
- *
- * This function creates a new blob modeset object and assigns its id to the
- * connector's edid property.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-					    const struct edid *edid)
-{
-	struct drm_device *dev = connector->dev;
-	size_t size = 0;
-	int ret;
-
-	/* ignore requests to set edid when overridden */
-	if (connector->override_edid)
-		return 0;
-
-	if (edid)
-		size = EDID_LENGTH * (1 + edid->extensions);
-
-	ret = drm_property_replace_global_blob(dev,
-					       &connector->edid_blob_ptr,
-	                                       size,
-	                                       edid,
-	                                       &connector->base,
-	                                       dev->mode_config.edid_property);
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-/* Some properties could refer to dynamic refcnt'd objects, or things that
- * need special locking to handle lifetime issues (ie. to ensure the prop
- * value doesn't become invalid part way through the property update due to
- * race).  The value returned by reference via 'obj' should be passed back
- * to drm_property_change_valid_put() after the property is set (and the
- * object to which the property is attached has a chance to take it's own
- * reference).
- */
-bool drm_property_change_valid_get(struct drm_property *property,
-					 uint64_t value, struct drm_mode_object **ref)
-{
-	int i;
-
-	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
-		return false;
-
-	*ref = NULL;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
-		if (value < property->values[0] || value > property->values[1])
-			return false;
-		return true;
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
-		int64_t svalue = U642I64(value);
-
-		if (svalue < U642I64(property->values[0]) ||
-				svalue > U642I64(property->values[1]))
-			return false;
-		return true;
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
-		uint64_t valid_mask = 0;
-
-		for (i = 0; i < property->num_values; i++)
-			valid_mask |= (1ULL << property->values[i]);
-		return !(value & ~valid_mask);
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
-		struct drm_property_blob *blob;
-
-		if (value == 0)
-			return true;
-
-		blob = drm_property_lookup_blob(property->dev, value);
-		if (blob) {
-			*ref = &blob->base;
-			return true;
-		} else {
-			return false;
-		}
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-		/* a zero value for an object property translates to null: */
-		if (value == 0)
-			return true;
-
-		*ref = _object_find(property->dev, value, property->values[0]);
-		return *ref != NULL;
-	}
-
-	for (i = 0; i < property->num_values; i++)
-		if (property->values[i] == value)
-			return true;
-	return false;
-}
-
-void drm_property_change_valid_put(struct drm_property *property,
-		struct drm_mode_object *ref)
-{
-	if (!ref)
-		return;
-
-	if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-		drm_mode_object_unreference(ref);
-	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
-		drm_property_unreference_blob(obj_to_blob(ref));
-}
-
-/**
- * drm_mode_connector_property_set_ioctl - set the current value of a connector property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for a connectors's property. It also
- * calls into a driver's ->set_property callback to update the hardware state
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-				       void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_connector_set_property *conn_set_prop = data;
-	struct drm_mode_obj_set_property obj_set_prop = {
-		.value = conn_set_prop->value,
-		.prop_id = conn_set_prop->prop_id,
-		.obj_id = conn_set_prop->connector_id,
-		.obj_type = DRM_MODE_OBJECT_CONNECTOR
-	};
-
-	/* It does all the locking and checking we need */
-	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
-}
-
-static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
-					   struct drm_property *property,
-					   uint64_t value)
-{
-	int ret = -EINVAL;
-	struct drm_connector *connector = obj_to_connector(obj);
-
-	/* Do DPMS ourselves */
-	if (property == connector->dev->mode_config.dpms_property) {
-		ret = (*connector->funcs->dpms)(connector, (int)value);
-	} else if (connector->funcs->set_property)
-		ret = connector->funcs->set_property(connector, property, value);
-
-	/* store the property value if successful */
-	if (!ret)
-		drm_object_property_set_value(&connector->base, property, value);
-	return ret;
-}
-
-static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
-				      struct drm_property *property,
-				      uint64_t value)
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+			       struct drm_property *property,
+			       uint64_t value)
 {
 	int ret = -EINVAL;
 	struct drm_crtc *crtc = obj_to_crtc(obj);
@@ -5004,497 +904,6 @@
 }
 
 /**
- * drm_mode_plane_set_obj_prop - set the value of a property
- * @plane: drm plane object to set property value for
- * @property: property to set
- * @value: value the property should be set to
- *
- * This functions sets a given property on a given plane object. This function
- * calls the driver's ->set_property callback and changes the software state of
- * the property if the callback succeeds.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-				struct drm_property *property,
-				uint64_t value)
-{
-	int ret = -EINVAL;
-	struct drm_mode_object *obj = &plane->base;
-
-	if (plane->funcs->set_property)
-		ret = plane->funcs->set_property(plane, property, value);
-	if (!ret)
-		drm_object_property_set_value(obj, property, value);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
-
-/**
- * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the current value for an object's property. Compared
- * to the connector specific ioctl this one is extended to also work on crtc and
- * plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-				      struct drm_file *file_priv)
-{
-	struct drm_mode_obj_get_properties *arg = data;
-	struct drm_mode_object *obj;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-
-	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!obj) {
-		ret = -ENOENT;
-		goto out;
-	}
-	if (!obj->properties) {
-		ret = -EINVAL;
-		goto out_unref;
-	}
-
-	ret = get_properties(obj, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(arg->props_ptr),
-			(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
-			&arg->count_props);
-
-out_unref:
-	drm_mode_object_unreference(obj);
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-/**
- * drm_mode_obj_set_property_ioctl - set the current value of an object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for an object's property. It also calls
- * into a driver's ->set_property callback to update the hardware state.
- * Compared to the connector specific ioctl this one is extended to also work on
- * crtc and plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv)
-{
-	struct drm_mode_obj_set_property *arg = data;
-	struct drm_mode_object *arg_obj;
-	struct drm_mode_object *prop_obj;
-	struct drm_property *property;
-	int i, ret = -EINVAL;
-	struct drm_mode_object *ref;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-
-	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!arg_obj) {
-		ret = -ENOENT;
-		goto out;
-	}
-	if (!arg_obj->properties)
-		goto out_unref;
-
-	for (i = 0; i < arg_obj->properties->count; i++)
-		if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
-			break;
-
-	if (i == arg_obj->properties->count)
-		goto out_unref;
-
-	prop_obj = drm_mode_object_find(dev, arg->prop_id,
-					DRM_MODE_OBJECT_PROPERTY);
-	if (!prop_obj) {
-		ret = -ENOENT;
-		goto out_unref;
-	}
-	property = obj_to_property(prop_obj);
-
-	if (!drm_property_change_valid_get(property, arg->value, &ref))
-		goto out_unref;
-
-	switch (arg_obj->type) {
-	case DRM_MODE_OBJECT_CONNECTOR:
-		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
-						      arg->value);
-		break;
-	case DRM_MODE_OBJECT_CRTC:
-		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
-		break;
-	case DRM_MODE_OBJECT_PLANE:
-		ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
-						  property, arg->value);
-		break;
-	}
-
-	drm_property_change_valid_put(property, ref);
-
-out_unref:
-	drm_mode_object_unreference(arg_obj);
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
- * @connector: connector to attach
- * @encoder: encoder to attach @connector to
- *
- * This function links up a connector to an encoder. Note that the routing
- * restrictions between encoders and crtcs are exposed to userspace through the
- * possible_clones and possible_crtcs bitmasks.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-				      struct drm_encoder *encoder)
-{
-	int i;
-
-	/*
-	 * In the past, drivers have attempted to model the static association
-	 * of connector to encoder in simple connector/encoder devices using a
-	 * direct assignment of connector->encoder = encoder. This connection
-	 * is a logical one and the responsibility of the core, so drivers are
-	 * expected not to mess with this.
-	 *
-	 * Note that the error return should've been enough here, but a large
-	 * majority of drivers ignores the return value, so add in a big WARN
-	 * to get people's attention.
-	 */
-	if (WARN_ON(connector->encoder))
-		return -EINVAL;
-
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		if (connector->encoder_ids[i] == 0) {
-			connector->encoder_ids[i] = encoder->base.id;
-			return 0;
-		}
-	}
-	return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-/**
- * drm_mode_crtc_set_gamma_size - set the gamma table size
- * @crtc: CRTC to set the gamma table size for
- * @gamma_size: size of the gamma table
- *
- * Drivers which support gamma tables should set this to the supported gamma
- * table size when initializing the CRTC. Currently the drm core only supports a
- * fixed gamma table size.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-				 int gamma_size)
-{
-	uint16_t *r_base, *g_base, *b_base;
-	int i;
-
-	crtc->gamma_size = gamma_size;
-
-	crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
-				    GFP_KERNEL);
-	if (!crtc->gamma_store) {
-		crtc->gamma_size = 0;
-		return -ENOMEM;
-	}
-
-	r_base = crtc->gamma_store;
-	g_base = r_base + gamma_size;
-	b_base = g_base + gamma_size;
-	for (i = 0; i < gamma_size; i++) {
-		r_base[i] = i << 8;
-		g_base[i] = i << 8;
-		b_base[i] = i << 8;
-	}
-
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-/**
- * drm_mode_gamma_set_ioctl - set the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
- * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_crtc_lut *crtc_lut = data;
-	struct drm_crtc *crtc;
-	void *r_base, *g_base, *b_base;
-	int size;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-	crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
-	if (!crtc) {
-		ret = -ENOENT;
-		goto out;
-	}
-
-	if (crtc->funcs->gamma_set == NULL) {
-		ret = -ENOSYS;
-		goto out;
-	}
-
-	/* memcpy into gamma store */
-	if (crtc_lut->gamma_size != crtc->gamma_size) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	size = crtc_lut->gamma_size * (sizeof(uint16_t));
-	r_base = crtc->gamma_store;
-	if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	g_base = r_base + size;
-	if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	b_base = g_base + size;
-	if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
-
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-
-}
-
-/**
- * drm_mode_gamma_get_ioctl - get the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Copy the current gamma table into the storage provided. This also provides
- * the gamma table size the driver expects, which can be used to size the
- * allocated storage.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_crtc_lut *crtc_lut = data;
-	struct drm_crtc *crtc;
-	void *r_base, *g_base, *b_base;
-	int size;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-	crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
-	if (!crtc) {
-		ret = -ENOENT;
-		goto out;
-	}
-
-	/* memcpy into gamma store */
-	if (crtc_lut->gamma_size != crtc->gamma_size) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	size = crtc_lut->gamma_size * (sizeof(uint16_t));
-	r_base = crtc->gamma_store;
-	if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	g_base = r_base + size;
-	if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
-	b_base = g_base + size;
-	if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
-		ret = -EFAULT;
-		goto out;
-	}
-out:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
-
-/**
- * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This schedules an asynchronous update on a given CRTC, called page flip.
- * Optionally a drm event is generated to signal the completion of the event.
- * Generic drivers cannot assume that a pageflip with changed framebuffer
- * properties (including driver specific metadata like tiling layout) will work,
- * but some drivers support e.g. pixel format changes through the pageflip
- * ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv)
-{
-	struct drm_mode_crtc_page_flip *page_flip = data;
-	struct drm_crtc *crtc;
-	struct drm_framebuffer *fb = NULL;
-	struct drm_pending_vblank_event *e = NULL;
-	int ret = -EINVAL;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return -EINVAL;
-
-	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
-	    page_flip->reserved != 0)
-		return -EINVAL;
-
-	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
-		return -EINVAL;
-
-	crtc = drm_crtc_find(dev, page_flip->crtc_id);
-	if (!crtc)
-		return -ENOENT;
-
-	drm_modeset_lock_crtc(crtc, crtc->primary);
-	if (crtc->primary->fb == NULL) {
-		/* The framebuffer is currently unbound, presumably
-		 * due to a hotplug event, that userspace has not
-		 * yet discovered.
-		 */
-		ret = -EBUSY;
-		goto out;
-	}
-
-	if (crtc->funcs->page_flip == NULL)
-		goto out;
-
-	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-	if (!fb) {
-		ret = -ENOENT;
-		goto out;
-	}
-
-	if (crtc->state) {
-		const struct drm_plane_state *state = crtc->primary->state;
-
-		ret = check_src_coords(state->src_x, state->src_y,
-				       state->src_w, state->src_h, fb);
-	} else {
-		ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
-	}
-	if (ret)
-		goto out;
-
-	if (crtc->primary->fb->pixel_format != fb->pixel_format) {
-		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		e = kzalloc(sizeof *e, GFP_KERNEL);
-		if (!e) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-		e->event.base.length = sizeof(e->event);
-		e->event.user_data = page_flip->user_data;
-		ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
-		if (ret) {
-			kfree(e);
-			goto out;
-		}
-	}
-
-	crtc->primary->old_fb = crtc->primary->fb;
-	ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
-	if (ret) {
-		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
-			drm_event_cancel_free(dev, &e->base);
-		/* Keep the old fb, don't unref it. */
-		crtc->primary->old_fb = NULL;
-	} else {
-		crtc->primary->fb = fb;
-		/* Unref only the old framebuffer. */
-		fb = NULL;
-	}
-
-out:
-	if (fb)
-		drm_framebuffer_unreference(fb);
-	if (crtc->primary->old_fb)
-		drm_framebuffer_unreference(crtc->primary->old_fb);
-	crtc->primary->old_fb = NULL;
-	drm_modeset_unlock_crtc(crtc);
-
-	return ret;
-}
-
-/**
  * drm_mode_config_reset - call ->reset callbacks
  * @dev: drm device
  *
@@ -5639,37 +1048,6 @@
 }
 
 /**
- * drm_rotation_simplify() - Try to simplify the rotation
- * @rotation: Rotation to be simplified
- * @supported_rotations: Supported rotations
- *
- * Attempt to simplify the rotation to a form that is supported.
- * Eg. if the hardware supports everything except DRM_REFLECT_X
- * one could call this function like this:
- *
- * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
- *                       BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
- *                       BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
- *
- * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
- * transforms the hardware supports, this function may not
- * be able to produce a supported transform, so the caller should
- * check the result afterwards.
- */
-unsigned int drm_rotation_simplify(unsigned int rotation,
-				   unsigned int supported_rotations)
-{
-	if (rotation & ~supported_rotations) {
-		rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
-		rotation = (rotation & DRM_REFLECT_MASK) |
-		           BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
-	}
-
-	return rotation;
-}
-EXPORT_SYMBOL(drm_rotation_simplify);
-
-/**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
  *
@@ -5785,24 +1163,6 @@
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
 
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-						       unsigned int supported_rotations)
-{
-	static const struct drm_prop_enum_list props[] = {
-		{ DRM_ROTATE_0,   "rotate-0" },
-		{ DRM_ROTATE_90,  "rotate-90" },
-		{ DRM_ROTATE_180, "rotate-180" },
-		{ DRM_ROTATE_270, "rotate-270" },
-		{ DRM_REFLECT_X,  "reflect-x" },
-		{ DRM_REFLECT_Y,  "reflect-y" },
-	};
-
-	return drm_property_create_bitmask(dev, 0, "rotation",
-					   props, ARRAY_SIZE(props),
-					   supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
 /**
  * DOC: Tile group
  *
@@ -5901,48 +1261,3 @@
 	return tg;
 }
 EXPORT_SYMBOL(drm_mode_create_tile_group);
-
-/**
- * drm_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @has_ctm: whether to attach ctm_property for CSC matrix
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction
- * properties on a CRTC. This includes 3 degamma, csc and gamma
- * properties that userspace can set and 2 size properties to inform
- * the userspace of the lut sizes. Each of the properties are
- * optional. The gamma and degamma properties are only attached if
- * their size is not 0 and ctm_property is only attached if has_ctm is
- * true.
- */
-void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-				uint degamma_lut_size,
-				bool has_ctm,
-				uint gamma_lut_size)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_mode_config *config = &dev->mode_config;
-
-	if (degamma_lut_size) {
-		drm_object_attach_property(&crtc->base,
-					   config->degamma_lut_property, 0);
-		drm_object_attach_property(&crtc->base,
-					   config->degamma_lut_size_property,
-					   degamma_lut_size);
-	}
-
-	if (has_ctm)
-		drm_object_attach_property(&crtc->base,
-					   config->ctm_property, 0);
-
-	if (gamma_lut_size) {
-		drm_object_attach_property(&crtc->base,
-					   config->gamma_lut_property, 0);
-		drm_object_attach_property(&crtc->base,
-					   config->gamma_lut_size_property,
-					   gamma_lut_size);
-	}
-}
-EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 604d3ef..5d2cb13 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -75,35 +75,6 @@
  */
 
 /**
- * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
- * 						connector list
- * @dev: drm device to operate on
- *
- * Some userspace presumes that the first connected connector is the main
- * display, where it's supposed to display e.g. the login screen. For
- * laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
- * painstakingly trying to initialize them in the right order.
- */
-void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
-{
-	struct drm_connector *connector, *tmp;
-	struct list_head panel_list;
-
-	INIT_LIST_HEAD(&panel_list);
-
-	list_for_each_entry_safe(connector, tmp,
-				 &dev->mode_config.connector_list, head) {
-		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
-		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-			list_move_tail(&connector->head, &panel_list);
-	}
-
-	list_splice(&panel_list, &dev->mode_config.connector_list);
-}
-EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
-
-/**
  * drm_helper_encoder_in_use - check if a given encoder is in use
  * @encoder: encoder to check
  *
@@ -913,33 +884,6 @@
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
 /**
- * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
- * @fb: drm_framebuffer object to fill out
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * This helper can be used in a drivers fb_create callback to pre-fill the fb's
- * metadata fields.
- */
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-				    const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-	int i;
-
-	fb->width = mode_cmd->width;
-	fb->height = mode_cmd->height;
-	for (i = 0; i < 4; i++) {
-		fb->pitches[i] = mode_cmd->pitches[i];
-		fb->offsets[i] = mode_cmd->offsets[i];
-		fb->modifier[i] = mode_cmd->modifier[i];
-	}
-	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
-				    &fb->bits_per_pixel);
-	fb->pixel_format = mode_cmd->pixel_format;
-	fb->flags = mode_cmd->flags;
-}
-EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-
-/**
  * drm_helper_resume_force_mode - force-restore mode setting configuration
  * @dev: drm_device which should be restored
  *
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
new file mode 100644
index 0000000..28295e5
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm kms helper module as internal
+ * implementation details and are not exported to drivers.
+ */
+
+#include <drm/drm_dp_helper.h>
+
+/* drm_fb_helper.c */
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fb_helper_modinit(void);
+#else
+static inline int drm_fb_helper_modinit(void)
+{
+	return 0;
+}
+#endif
+
+/* drm_dp_aux_dev.c */
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+#else
+static inline int drm_dp_aux_dev_init(void)
+{
+	return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+	return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 0c34e6d..c48ba02 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -33,28 +33,15 @@
 
 
 /* drm_crtc.c */
-void drm_connector_ida_init(void);
-void drm_connector_ida_destroy(void);
-int drm_mode_object_get(struct drm_device *dev,
-			struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_unregister(struct drm_device *dev,
-				struct drm_mode_object *object);
-bool drm_property_change_valid_get(struct drm_property *property,
-				   uint64_t value,
-				   struct drm_mode_object **ref);
-void drm_property_change_valid_put(struct drm_property *property,
-				   struct drm_mode_object *ref);
-
-int drm_plane_check_pixel_format(const struct drm_plane *plane,
-				 u32 format);
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+			       struct drm_property *property,
+			       uint64_t value);
 int drm_crtc_check_viewport(const struct drm_crtc *crtc,
 			    int x, int y,
 			    const struct drm_display_mode *mode,
 			    const struct drm_framebuffer *fb);
 
 void drm_fb_release(struct drm_file *file_priv);
-void drm_property_destroy_user_blobs(struct drm_device *dev,
-				     struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
@@ -64,42 +51,32 @@
 int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 				void *data, struct drm_file *file_priv);
 
-/* framebuffer IOCTLs */
-extern int drm_mode_addfb(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
-			 void *data, struct drm_file *file_priv);
-int drm_mode_getfb(struct drm_device *dev,
-		   void *data, struct drm_file *file_priv);
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv);
-
 /* IOCTLs */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-				      struct drm_file *file_priv);
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-				    struct drm_file *file_priv);
-
 int drm_mode_getresources(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv);
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv);
 int drm_mode_getcrtc(struct drm_device *dev,
 		     void *data, struct drm_file *file_priv);
-int drm_mode_getconnector(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv);
 int drm_mode_setcrtc(struct drm_device *dev,
 		     void *data, struct drm_file *file_priv);
-int drm_mode_getplane(struct drm_device *dev,
-		      void *data, struct drm_file *file_priv);
-int drm_mode_setplane(struct drm_device *dev,
-		      void *data, struct drm_file *file_priv);
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-			  void *data, struct drm_file *file_priv);
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-			   void *data, struct drm_file *file_priv);
+
+/* drm_color_mgmt.c */
+
+/* IOCTLs */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
+
+/* drm_property.c */
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+				     struct drm_file *file_priv);
+bool drm_property_change_valid_get(struct drm_property *property,
+				   uint64_t value,
+				   struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+				   struct drm_mode_object *ref);
+
+/* IOCTL */
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv);
 int drm_mode_getblob_ioctl(struct drm_device *dev,
@@ -108,17 +85,80 @@
 			      void *data, struct drm_file *file_priv);
 int drm_mode_destroyblob_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv);
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-					  void *data, struct drm_file *file_priv);
+
+/* drm_mode_object.c */
+int drm_mode_object_get_reg(struct drm_device *dev,
+			    struct drm_mode_object *obj,
+			    uint32_t obj_type,
+			    bool register_obj,
+			    void (*obj_free_cb)(struct kref *kref));
+void drm_mode_object_register(struct drm_device *dev,
+			      struct drm_mode_object *obj);
+int drm_mode_object_get(struct drm_device *dev,
+			struct drm_mode_object *obj, uint32_t obj_type);
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+					       uint32_t id, uint32_t type);
+void drm_mode_object_unregister(struct drm_device *dev,
+				struct drm_mode_object *object);
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+				   uint32_t __user *prop_ptr,
+				   uint64_t __user *prop_values,
+				   uint32_t *arg_count_props);
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+					       uint32_t prop_id);
+
+/* IOCTL */
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+/* drm_encoder.c */
+int drm_encoder_register_all(struct drm_device *dev);
+void drm_encoder_unregister_all(struct drm_device *dev);
+
+/* IOCTL */
 int drm_mode_getencoder(struct drm_device *dev,
 			void *data, struct drm_file *file_priv);
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
 
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-			     void *data, struct drm_file *file_priv);
+/* drm_connector.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
+void drm_connector_unregister_all(struct drm_device *dev);
+int drm_connector_register_all(struct drm_device *dev);
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+				    struct drm_property *property,
+				    uint64_t value);
+int drm_connector_create_standard_properties(struct drm_device *dev);
+
+/* IOCTL */
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+					  void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv);
+
+/* drm_framebuffer.c */
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+				const struct drm_mode_fb_cmd2 *r,
+				struct drm_file *file_priv);
+void drm_framebuffer_free(struct kref *kref);
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+				     uint32_t src_w, uint32_t src_h,
+				     const struct drm_framebuffer *fb);
+
+/* IOCTL */
+int drm_mode_addfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv);
+int drm_mode_addfb2(struct drm_device *dev,
+		    void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+		  void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv);
 
 /* drm_atomic.c */
 int drm_atomic_get_property(struct drm_mode_object *obj,
@@ -129,6 +169,23 @@
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
 
-/* drm_blend.c */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
-				     struct drm_atomic_state *state);
+
+/* drm_plane.c */
+int drm_plane_register_all(struct drm_device *dev);
+void drm_plane_unregister_all(struct drm_device *dev);
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+				 u32 format);
+
+/* IOCTL */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+		      void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+		      void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv);
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index fa10cef..1205790 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -104,8 +104,8 @@
 		ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
 					  root, tmp, &drm_debugfs_fops);
 		if (!ent) {
-			DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
-				  root->d_name.name, files[i].name);
+			DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
+				  root, files[i].name);
 			kfree(tmp);
 			ret = -1;
 			goto fail;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index ea48180..3f83e2c 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -50,9 +50,8 @@
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return 0;
-	}
 
 	dev->buf_use = 0;
 	atomic_set(&dev->buf_alloc, 0);
@@ -81,9 +80,8 @@
 	int i, j;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 		return;
-	}
 
 	if (!dma)
 		return;
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 734f86a..ec1ed94 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -36,6 +36,8 @@
 #include <drm/drm_crtc.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 struct drm_dp_aux_dev {
 	unsigned index;
 	struct drm_dp_aux *aux;
@@ -283,12 +285,7 @@
 	schedule();
 	return 0;
 }
-/**
- * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
+
 void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
 {
 	struct drm_dp_aux_dev *aux_dev;
@@ -314,14 +311,7 @@
 	DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
 	kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
 }
-EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
 
-/**
- * drm_dp_aux_register_devnode() - register a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
 int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
 {
 	struct drm_dp_aux_dev *aux_dev;
@@ -347,7 +337,6 @@
 	drm_dp_aux_unregister_devnode(aux);
 	return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_register_devnode);
 
 int drm_dp_aux_dev_init(void)
 {
@@ -369,11 +358,9 @@
 	class_destroy(drm_dp_aux_dev_class);
 	return res;
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_init);
 
 void drm_dp_aux_dev_exit(void)
 {
 	unregister_chrdev(drm_dev_major, "aux");
 	class_destroy(drm_dp_aux_dev_class);
 }
-EXPORT_SYMBOL(drm_dp_aux_dev_exit);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2bd0644..3e6fe82 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -27,10 +27,12 @@
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
+#include <linux/seq_file.h>
 #include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_aux_dev.h>
 #include <drm/drmP.h>
 
+#include "drm_crtc_helper_internal.h"
+
 /**
  * DOC: dp helpers
  *
@@ -223,7 +225,7 @@
 			err = ret;
 	}
 
-	DRM_DEBUG_KMS("too many retries, giving up\n");
+	DRM_DEBUG_KMS("Too many retries, giving up. First error: %d\n", err);
 	ret = err;
 
 unlock:
@@ -438,6 +440,179 @@
 }
 EXPORT_SYMBOL(drm_dp_link_configure);
 
+/**
+ * drm_dp_downstream_max_clock() - extract branch device max
+ *                                 pixel rate for legacy VGA
+ *                                 converter or max TMDS clock
+ *                                 rate for others
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max clock in kHz on success or 0 if max clock not defined
+ */
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+				const u8 port_cap[4])
+{
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+		DP_DETAILED_CAP_INFO_AVAILABLE;
+
+	if (!detailed_cap_info)
+		return 0;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_VGA:
+		return port_cap[1] * 8 * 1000;
+	case DP_DS_PORT_TYPE_DVI:
+	case DP_DS_PORT_TYPE_HDMI:
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		return port_cap[1] * 2500;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract branch device max
+ *                               bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			      const u8 port_cap[4])
+{
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+		DP_DETAILED_CAP_INFO_AVAILABLE;
+	int bpc;
+
+	if (!detailed_cap_info)
+		return 0;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_VGA:
+	case DP_DS_PORT_TYPE_DVI:
+	case DP_DS_PORT_TYPE_HDMI:
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+
+		switch (bpc) {
+		case DP_DS_8BPC:
+			return 8;
+		case DP_DS_10BPC:
+			return 10;
+		case DP_DS_12BPC:
+			return 12;
+		case DP_DS_16BPC:
+			return 16;
+		}
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
+
+/**
+ * drm_dp_downstream_id() - identify branch device
+ * @aux: DisplayPort AUX channel
+ * @id: DisplayPort branch device id
+ *
+ * Returns branch device id on success or NULL on failure
+ */
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
+{
+	return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+}
+EXPORT_SYMBOL(drm_dp_downstream_id);
+
+/**
+ * drm_dp_downstream_debug() - debug DP branch devices
+ * @m: pointer for debugfs file
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @aux: DisplayPort AUX channel
+ *
+ */
+void drm_dp_downstream_debug(struct seq_file *m,
+			     const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			     const u8 port_cap[4], struct drm_dp_aux *aux)
+{
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+				 DP_DETAILED_CAP_INFO_AVAILABLE;
+	int clk;
+	int bpc;
+	char id[6];
+	int len;
+	uint8_t rev[2];
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			     DP_DWN_STRM_PORT_PRESENT;
+
+	seq_printf(m, "\tDP branch device present: %s\n",
+		   branch_device ? "yes" : "no");
+
+	if (!branch_device)
+		return;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_DP:
+		seq_puts(m, "\t\tType: DisplayPort\n");
+		break;
+	case DP_DS_PORT_TYPE_VGA:
+		seq_puts(m, "\t\tType: VGA\n");
+		break;
+	case DP_DS_PORT_TYPE_DVI:
+		seq_puts(m, "\t\tType: DVI\n");
+		break;
+	case DP_DS_PORT_TYPE_HDMI:
+		seq_puts(m, "\t\tType: HDMI\n");
+		break;
+	case DP_DS_PORT_TYPE_NON_EDID:
+		seq_puts(m, "\t\tType: others without EDID support\n");
+		break;
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		seq_puts(m, "\t\tType: DP++\n");
+		break;
+	case DP_DS_PORT_TYPE_WIRELESS:
+		seq_puts(m, "\t\tType: Wireless\n");
+		break;
+	default:
+		seq_puts(m, "\t\tType: N/A\n");
+	}
+
+	drm_dp_downstream_id(aux, id);
+	seq_printf(m, "\t\tID: %s\n", id);
+
+	len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+	if (len > 0)
+		seq_printf(m, "\t\tHW: %d.%d\n",
+			   (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
+
+	len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+	if (len > 0)
+		seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
+
+	if (detailed_cap_info) {
+		clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+
+		if (clk > 0) {
+			if (type == DP_DS_PORT_TYPE_VGA)
+				seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
+			else
+				seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+		}
+
+		bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+
+		if (bpc > 0)
+			seq_printf(m, "\t\tMax bpc: %d\n", bpc);
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_debug);
+
 /*
  * I2C-over-AUX implementation
  */
@@ -574,7 +749,17 @@
 			if (ret == -EBUSY)
 				continue;
 
-			DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+			/*
+			 * While timeouts can be errors, they're usually normal
+			 * behavior (for instance, when a driver tries to
+			 * communicate with a non-existant DisplayPort device).
+			 * Avoid spamming the kernel log with timeout errors.
+			 */
+			if (ret == -ETIMEDOUT)
+				DRM_DEBUG_KMS_RATELIMITED("transaction timed out\n");
+			else
+				DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+
 			return ret;
 		}
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be27ed3..6efdba4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -33,7 +33,6 @@
 #include <linux/mount.h>
 #include <linux/slab.h>
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include "drm_crtc_internal.h"
 #include "drm_legacy.h"
 #include "drm_internal.h"
@@ -46,8 +45,8 @@
 unsigned int drm_debug = 0;
 EXPORT_SYMBOL(drm_debug);
 
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
+MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
+MODULE_DESCRIPTION("DRM shared core routines");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
@@ -63,37 +62,52 @@
 
 static struct dentry *drm_debugfs_root;
 
-void drm_err(const char *format, ...)
+#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
+
+void drm_dev_printk(const struct device *dev, const char *level,
+		    unsigned int category, const char *function_name,
+		    const char *prefix, const char *format, ...)
 {
 	struct va_format vaf;
 	va_list args;
 
-	va_start(args, format);
-
-	vaf.fmt = format;
-	vaf.va = &args;
-
-	printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
-	       __builtin_return_address(0), &vaf);
-
-	va_end(args);
-}
-EXPORT_SYMBOL(drm_err);
-
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
-{
-	struct va_format vaf;
-	va_list args;
+	if (category != DRM_UT_NONE && !(drm_debug & category))
+		return;
 
 	va_start(args, format);
 	vaf.fmt = format;
 	vaf.va = &args;
 
-	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+	if (dev)
+		dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
+			   &vaf);
+	else
+		printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
 
 	va_end(args);
 }
-EXPORT_SYMBOL(drm_ut_debug_printk);
+EXPORT_SYMBOL(drm_dev_printk);
+
+void drm_printk(const char *level, unsigned int category,
+		const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	if (category != DRM_UT_NONE && !(drm_debug & category))
+		return;
+
+	va_start(args, format);
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	printk("%s" "[" DRM_NAME ":%ps]%s %pV",
+	       level, __builtin_return_address(0),
+	       strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(drm_printk);
 
 /*
  * DRM Minors
@@ -112,7 +126,7 @@
 					     unsigned int type)
 {
 	switch (type) {
-	case DRM_MINOR_LEGACY:
+	case DRM_MINOR_PRIMARY:
 		return &dev->primary;
 	case DRM_MINOR_RENDER:
 		return &dev->render;
@@ -325,6 +339,9 @@
 
 static int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
+	if (!name)
+		return -EINVAL;
+
 	kfree(dev->unique);
 	dev->unique = kstrdup(name, GFP_KERNEL);
 
@@ -512,7 +529,7 @@
 			goto err_minors;
 	}
 
-	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
 	if (ret)
 		goto err_minors;
 
@@ -545,7 +562,7 @@
 	drm_legacy_ctxbitmap_cleanup(dev);
 	drm_ht_remove(&dev->map_hash);
 err_minors:
-	drm_minor_free(dev, DRM_MINOR_LEGACY);
+	drm_minor_free(dev, DRM_MINOR_PRIMARY);
 	drm_minor_free(dev, DRM_MINOR_RENDER);
 	drm_minor_free(dev, DRM_MINOR_CONTROL);
 	drm_fs_inode_free(dev->anon_inode);
@@ -575,7 +592,7 @@
  * own struct should look at using drm_dev_init() instead.
  *
  * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * Pointer to new DRM device, or ERR_PTR on failure.
  */
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 				 struct device *parent)
@@ -585,12 +602,12 @@
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	ret = drm_dev_init(dev, driver, parent);
 	if (ret) {
 		kfree(dev);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	return dev;
@@ -608,7 +625,7 @@
 	drm_ht_remove(&dev->map_hash);
 	drm_fs_inode_free(dev->anon_inode);
 
-	drm_minor_free(dev, DRM_MINOR_LEGACY);
+	drm_minor_free(dev, DRM_MINOR_PRIMARY);
 	drm_minor_free(dev, DRM_MINOR_RENDER);
 	drm_minor_free(dev, DRM_MINOR_CONTROL);
 
@@ -684,7 +701,7 @@
 	if (ret)
 		goto err_minors;
 
-	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+	ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
 	if (ret)
 		goto err_minors;
 
@@ -701,7 +718,7 @@
 	goto out_unlock;
 
 err_minors:
-	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 out_unlock:
@@ -741,7 +758,7 @@
 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
 		drm_legacy_rmmap(dev, r_list->map);
 
-	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 }
@@ -807,53 +824,48 @@
 	.llseek = noop_llseek,
 };
 
+static void drm_core_exit(void)
+{
+	unregister_chrdev(DRM_MAJOR, "drm");
+	debugfs_remove(drm_debugfs_root);
+	drm_sysfs_destroy();
+	idr_destroy(&drm_minors_idr);
+	drm_connector_ida_destroy();
+	drm_global_release();
+}
+
 static int __init drm_core_init(void)
 {
-	int ret = -ENOMEM;
+	int ret;
 
 	drm_global_init();
 	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
 
-	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
-		goto err_p1;
-
 	ret = drm_sysfs_init();
 	if (ret < 0) {
-		printk(KERN_ERR "DRM: Error creating drm class.\n");
-		goto err_p2;
+		DRM_ERROR("Cannot create DRM class: %d\n", ret);
+		goto error;
 	}
 
 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
 	if (!drm_debugfs_root) {
-		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
-		ret = -1;
-		goto err_p3;
+		ret = -ENOMEM;
+		DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
+		goto error;
 	}
 
-	DRM_INFO("Initialized %s %d.%d.%d %s\n",
-		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+	ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
+	if (ret < 0)
+		goto error;
+
+	DRM_INFO("Initialized\n");
 	return 0;
-err_p3:
-	drm_sysfs_destroy();
-err_p2:
-	unregister_chrdev(DRM_MAJOR, "drm");
 
-	idr_destroy(&drm_minors_idr);
-err_p1:
+error:
+	drm_core_exit();
 	return ret;
 }
 
-static void __exit drm_core_exit(void)
-{
-	debugfs_remove(drm_debugfs_root);
-	drm_sysfs_destroy();
-
-	unregister_chrdev(DRM_MAJOR, "drm");
-
-	drm_connector_ida_destroy();
-	idr_destroy(&drm_minors_idr);
-}
-
 module_init(drm_core_init);
 module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 637a0aa..ec77bd3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -991,7 +991,7 @@
 	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 	/* 64 - 1920x1080@100Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
-		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 };
@@ -3253,16 +3253,12 @@
 }
 
 static void
-parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
 {
 	u8 len = cea_db_payload_len(db);
 
-	if (len >= 6) {
+	if (len >= 6)
 		connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
-		connector->dvi_dual = db[6] & 1;
-	}
-	if (len >= 7)
-		connector->max_tmds_clock = db[7] * 5;
 	if (len >= 8) {
 		connector->latency_present[0] = db[8] >> 7;
 		connector->latency_present[1] = (db[8] >> 6) & 1;
@@ -3276,19 +3272,15 @@
 	if (len >= 12)
 		connector->audio_latency[1] = db[12];
 
-	DRM_DEBUG_KMS("HDMI: DVI dual %d, "
-		    "max TMDS clock %d, "
-		    "latency present %d %d, "
-		    "video latency %d %d, "
-		    "audio latency %d %d\n",
-		    connector->dvi_dual,
-		    connector->max_tmds_clock,
-	      (int) connector->latency_present[0],
-	      (int) connector->latency_present[1],
-		    connector->video_latency[0],
-		    connector->video_latency[1],
-		    connector->audio_latency[0],
-		    connector->audio_latency[1]);
+	DRM_DEBUG_KMS("HDMI: latency present %d %d, "
+		      "video latency %d %d, "
+		      "audio latency %d %d\n",
+		      connector->latency_present[0],
+		      connector->latency_present[1],
+		      connector->video_latency[0],
+		      connector->video_latency[1],
+		      connector->audio_latency[0],
+		      connector->audio_latency[1]);
 }
 
 static void
@@ -3358,6 +3350,13 @@
 
 	memset(eld, 0, sizeof(connector->eld));
 
+	connector->latency_present[0] = false;
+	connector->latency_present[1] = false;
+	connector->video_latency[0] = 0;
+	connector->audio_latency[0] = 0;
+	connector->video_latency[1] = 0;
+	connector->audio_latency[1] = 0;
+
 	cea = drm_find_cea_extension(edid);
 	if (!cea) {
 		DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
@@ -3407,7 +3406,7 @@
 			case VENDOR_BLOCK:
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
-					parse_hdmi_vsdb(connector, db);
+					drm_parse_hdmi_vsdb_audio(connector, db);
 				break;
 			default:
 				break;
@@ -3721,122 +3720,127 @@
 }
 EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
-/**
- * drm_assign_hdmi_deep_color_info - detect whether monitor supports
- * hdmi deep color modes and update drm_display_info if so.
- * @edid: monitor EDID information
- * @info: Updated with maximum supported deep color bpc and color format
- *        if deep color supported.
- * @connector: DRM connector, used only for debug output
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI deep color supported, false if not or unknown.
- */
-static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
-                                            struct drm_display_info *info,
-                                            struct drm_connector *connector)
+static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
+					   const u8 *hdmi)
 {
-	u8 *edid_ext, *hdmi;
-	int i;
-	int start_offset, end_offset;
+	struct drm_display_info *info = &connector->display_info;
 	unsigned int dc_bpc = 0;
 
+	/* HDMI supports at least 8 bpc */
+	info->bpc = 8;
+
+	if (cea_db_payload_len(hdmi) < 6)
+		return;
+
+	if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
+		dc_bpc = 10;
+		info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
+		DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
+			  connector->name);
+	}
+
+	if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
+		dc_bpc = 12;
+		info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
+		DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
+			  connector->name);
+	}
+
+	if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
+		dc_bpc = 16;
+		info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
+		DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
+			  connector->name);
+	}
+
+	if (dc_bpc == 0) {
+		DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
+			  connector->name);
+		return;
+	}
+
+	DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
+		  connector->name, dc_bpc);
+	info->bpc = dc_bpc;
+
+	/*
+	 * Deep color support mandates RGB444 support for all video
+	 * modes and forbids YCRCB422 support for all video modes per
+	 * HDMI 1.3 spec.
+	 */
+	info->color_formats = DRM_COLOR_FORMAT_RGB444;
+
+	/* YCRCB444 is optional according to spec. */
+	if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+		DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
+			  connector->name);
+	}
+
+	/*
+	 * Spec says that if any deep color mode is supported at all,
+	 * then deep color 36 bit must be supported.
+	 */
+	if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
+		DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
+			  connector->name);
+	}
+}
+
+static void
+drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
+{
+	struct drm_display_info *info = &connector->display_info;
+	u8 len = cea_db_payload_len(db);
+
+	if (len >= 6)
+		info->dvi_dual = db[6] & 1;
+	if (len >= 7)
+		info->max_tmds_clock = db[7] * 5000;
+
+	DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+		      "max TMDS clock %d kHz\n",
+		      info->dvi_dual,
+		      info->max_tmds_clock);
+
+	drm_parse_hdmi_deep_color_info(connector, db);
+}
+
+static void drm_parse_cea_ext(struct drm_connector *connector,
+			      struct edid *edid)
+{
+	struct drm_display_info *info = &connector->display_info;
+	const u8 *edid_ext;
+	int i, start, end;
+
 	edid_ext = drm_find_cea_extension(edid);
 	if (!edid_ext)
-		return false;
+		return;
 
-	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
-		return false;
+	info->cea_rev = edid_ext[1];
 
-	/*
-	 * Because HDMI identifier is in Vendor Specific Block,
-	 * search it from all data blocks of CEA extension.
-	 */
-	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
-		if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
-			/* HDMI supports at least 8 bpc */
-			info->bpc = 8;
+	/* The existence of a CEA block should imply RGB support */
+	info->color_formats = DRM_COLOR_FORMAT_RGB444;
+	if (edid_ext[3] & EDID_CEA_YCRCB444)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+	if (edid_ext[3] & EDID_CEA_YCRCB422)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 
-			hdmi = &edid_ext[i];
-			if (cea_db_payload_len(hdmi) < 6)
-				return false;
+	if (cea_db_offsets(edid_ext, &start, &end))
+		return;
 
-			if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
-				dc_bpc = 10;
-				info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
-				DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
-						  connector->name);
-			}
+	for_each_cea_db(edid_ext, i, start, end) {
+		const u8 *db = &edid_ext[i];
 
-			if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
-				dc_bpc = 12;
-				info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
-				DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
-						  connector->name);
-			}
-
-			if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
-				dc_bpc = 16;
-				info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
-				DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
-						  connector->name);
-			}
-
-			if (dc_bpc > 0) {
-				DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
-						  connector->name, dc_bpc);
-				info->bpc = dc_bpc;
-
-				/*
-				 * Deep color support mandates RGB444 support for all video
-				 * modes and forbids YCRCB422 support for all video modes per
-				 * HDMI 1.3 spec.
-				 */
-				info->color_formats = DRM_COLOR_FORMAT_RGB444;
-
-				/* YCRCB444 is optional according to spec. */
-				if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
-					info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
-					DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
-							  connector->name);
-				}
-
-				/*
-				 * Spec says that if any deep color mode is supported at all,
-				 * then deep color 36 bit must be supported.
-				 */
-				if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
-					DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
-							  connector->name);
-				}
-
-				return true;
-			}
-			else {
-				DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
-						  connector->name);
-			}
-		}
+		if (cea_db_is_hdmi_vsdb(db))
+			drm_parse_hdmi_vsdb_video(connector, db);
 	}
-
-	return false;
 }
 
-/**
- * drm_add_display_info - pull display info out if present
- * @edid: EDID data
- * @info: display info (attached to connector)
- * @connector: connector whose edid is used to build display info
- *
- * Grab any available display info and stuff it into the drm_display_info
- * structure that's part of the connector.  Useful for tracking bpp and
- * color spaces.
- */
-static void drm_add_display_info(struct edid *edid,
-                                 struct drm_display_info *info,
-                                 struct drm_connector *connector)
+static void drm_add_display_info(struct drm_connector *connector,
+				 struct edid *edid)
 {
-	u8 *edid_ext;
+	struct drm_display_info *info = &connector->display_info;
 
 	info->width_mm = edid->width_cm * 10;
 	info->height_mm = edid->height_cm * 10;
@@ -3844,6 +3848,9 @@
 	/* driver figures it out in this case */
 	info->bpc = 0;
 	info->color_formats = 0;
+	info->cea_rev = 0;
+	info->max_tmds_clock = 0;
+	info->dvi_dual = false;
 
 	if (edid->revision < 3)
 		return;
@@ -3851,21 +3858,7 @@
 	if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
 		return;
 
-	/* Get data from CEA blocks if present */
-	edid_ext = drm_find_cea_extension(edid);
-	if (edid_ext) {
-		info->cea_rev = edid_ext[1];
-
-		/* The existence of a CEA block should imply RGB support */
-		info->color_formats = DRM_COLOR_FORMAT_RGB444;
-		if (edid_ext[3] & EDID_CEA_YCRCB444)
-			info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
-		if (edid_ext[3] & EDID_CEA_YCRCB422)
-			info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
-	}
-
-	/* HDMI deep color modes supported? Assign to info, if so */
-	drm_assign_hdmi_deep_color_info(edid, info, connector);
+	drm_parse_cea_ext(connector, edid);
 
 	/*
 	 * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
@@ -4052,7 +4045,9 @@
  * @connector: connector we're probing
  * @edid: EDID data
  *
- * Add the specified modes to the connector's mode list.
+ * Add the specified modes to the connector's mode list. Also fills out the
+ * &drm_display_info structure in @connector with any information which can be
+ * derived from the edid.
  *
  * Return: The number of modes added or 0 if we couldn't find any.
  */
@@ -4099,7 +4094,7 @@
 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
 		edid_fixup_preferred(connector, quirks);
 
-	drm_add_display_info(edid, &connector->display_info, connector);
+	drm_add_display_info(connector, edid);
 
 	if (quirks & EDID_QUIRK_FORCE_6BPC)
 		connector->display_info.bpc = 6;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
new file mode 100644
index 0000000..5c06771
--- /dev/null
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Encoders represent the connecting element between the CRTC (as the overall
+ * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
+ * generic sink entity, represented by struct &drm_connector). An encoder takes
+ * pixel data from a CRTC and converts it to a format suitable for any attached
+ * connector. Encoders are objects exposed to userspace, originally to allow
+ * userspace to infer cloning and connector/CRTC restrictions. Unfortunately
+ * almost all drivers get this wrong, making the uabi pretty much useless. On
+ * top of that the exposed restrictions are too simple for today's hardware, and
+ * the recommended way to infer restrictions is by using the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL.
+ *
+ * Otherwise encoders aren't used in the uapi at all (any modeset request from
+ * userspace directly connects a connector with a CRTC), drivers are therefore
+ * free to use them however they wish. Modeset helper libraries make strong use
+ * of encoders to facilitate code sharing. But for more complex settings it is
+ * usually better to move shared code into a separate &drm_bridge. Compared to
+ * encoders, bridges also have the benefit of being purely an internal
+ * abstraction since they are not exposed to userspace at all.
+ *
+ * Encoders are initialized with drm_encoder_init() and cleaned up using
+ * drm_encoder_cleanup().
+ */
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+	{ DRM_MODE_ENCODER_NONE, "None" },
+	{ DRM_MODE_ENCODER_DAC, "DAC" },
+	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
+	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
+	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
+	{ DRM_MODE_ENCODER_DPMST, "DP MST" },
+	{ DRM_MODE_ENCODER_DPI, "DPI" },
+};
+
+int drm_encoder_register_all(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	int ret = 0;
+
+	drm_for_each_encoder(encoder, dev) {
+		if (encoder->funcs->late_register)
+			ret = encoder->funcs->late_register(encoder);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void drm_encoder_unregister_all(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	drm_for_each_encoder(encoder, dev) {
+		if (encoder->funcs->early_unregister)
+			encoder->funcs->early_unregister(encoder);
+	}
+}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initialises a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
+ * called from the driver's destroy hook in &drm_encoder_funcs.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+		     struct drm_encoder *encoder,
+		     const struct drm_encoder_funcs *funcs,
+		     int encoder_type, const char *name, ...)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+	if (ret)
+		goto out_unlock;
+
+	encoder->dev = dev;
+	encoder->encoder_type = encoder_type;
+	encoder->funcs = funcs;
+	if (name) {
+		va_list ap;
+
+		va_start(ap, name);
+		encoder->name = kvasprintf(GFP_KERNEL, name, ap);
+		va_end(ap);
+	} else {
+		encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+					  drm_encoder_enum_list[encoder_type].name,
+					  encoder->base.id);
+	}
+	if (!encoder->name) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
+
+	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+	encoder->index = dev->mode_config.num_encoder++;
+
+out_put:
+	if (ret)
+		drm_mode_object_unregister(dev, &encoder->base);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+
+	/* Note that the encoder_list is considered to be static; should we
+	 * remove the drm_encoder at runtime we would have to decrement all
+	 * the indices on the drm_encoder after us in the encoder_list.
+	 */
+
+	drm_modeset_lock_all(dev);
+	drm_mode_object_unregister(dev, &encoder->base);
+	kfree(encoder->name);
+	list_del(&encoder->head);
+	dev->mode_config.num_encoder--;
+	drm_modeset_unlock_all(dev);
+
+	memset(encoder, 0, sizeof(*encoder));
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+	bool uses_atomic = false;
+
+	/* For atomic drivers only state objects are synchronously updated and
+	 * protected by modeset locks, so check those first. */
+	drm_for_each_connector(connector, dev) {
+		if (!connector->state)
+			continue;
+
+		uses_atomic = true;
+
+		if (connector->state->best_encoder != encoder)
+			continue;
+
+		return connector->state->crtc;
+	}
+
+	/* Don't return stale data (e.g. pending async disable). */
+	if (uses_atomic)
+		return NULL;
+
+	return encoder->crtc;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_mode_get_encoder *enc_resp = data;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+	if (!encoder)
+		return -ENOENT;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	crtc = drm_encoder_get_crtc(encoder);
+	if (crtc)
+		enc_resp->crtc_id = crtc->base.id;
+	else
+		enc_resp->crtc_id = 0;
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	enc_resp->encoder_type = encoder->encoder_type;
+	enc_resp->encoder_id = encoder->base.id;
+	enc_resp->possible_crtcs = encoder->possible_crtcs;
+	enc_resp->possible_clones = encoder->possible_clones;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a06f91..03414bd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -29,10 +29,10 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/console.h>
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -41,6 +41,8 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 
+#include "drm_crtc_helper_internal.h"
+
 static bool drm_fbdev_emulation = true;
 module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
 MODULE_PARM_DESC(fbdev_emulation,
@@ -335,7 +337,7 @@
 			goto fail;
 		}
 
-		plane_state->rotation = BIT(DRM_ROTATE_0);
+		plane_state->rotation = DRM_ROTATE_0;
 
 		plane->old_fb = plane->fb;
 		plane_mask |= 1 << drm_plane_index(plane);
@@ -395,7 +397,7 @@
 		if (dev->mode_config.rotation_property) {
 			drm_mode_plane_set_obj_prop(plane,
 						    dev->mode_config.rotation_property,
-						    BIT(DRM_ROTATE_0));
+						    DRM_ROTATE_0);
 		}
 	}
 
@@ -618,6 +620,16 @@
 	kfree(helper->crtc_info);
 }
 
+static void drm_fb_helper_resume_worker(struct work_struct *work)
+{
+	struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+						    resume_work);
+
+	console_lock();
+	fb_set_suspend(helper->fbdev, 0);
+	console_unlock();
+}
+
 static void drm_fb_helper_dirty_work(struct work_struct *work)
 {
 	struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -649,6 +661,7 @@
 {
 	INIT_LIST_HEAD(&helper->kernel_fb_list);
 	spin_lock_init(&helper->dirty_lock);
+	INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
 	INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
 	helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
 	helper->funcs = funcs;
@@ -1024,17 +1037,65 @@
 /**
  * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
  * @fb_helper: driver-allocated fbdev helper
- * @state: desired state, zero to resume, non-zero to suspend
+ * @suspend: whether to suspend or resume
  *
- * A wrapper around fb_set_suspend implemented by fbdev core
+ * A wrapper around fb_set_suspend implemented by fbdev core.
+ * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take
+ * the lock yourself
  */
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
 {
 	if (fb_helper && fb_helper->fbdev)
-		fb_set_suspend(fb_helper->fbdev, state);
+		fb_set_suspend(fb_helper->fbdev, suspend);
 }
 EXPORT_SYMBOL(drm_fb_helper_set_suspend);
 
+/**
+ * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
+ *                                      takes the console lock
+ * @fb_helper: driver-allocated fbdev helper
+ * @suspend: whether to suspend or resume
+ *
+ * A wrapper around fb_set_suspend() that takes the console lock. If the lock
+ * isn't available on resume, a worker is tasked with waiting for the lock
+ * to become available. The console lock can be pretty contented on resume
+ * due to all the printk activity.
+ *
+ * This function can be called multiple times with the same state since
+ * &fb_info->state is checked to see if fbdev is running or not before locking.
+ *
+ * Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
+ */
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+					bool suspend)
+{
+	if (!fb_helper || !fb_helper->fbdev)
+		return;
+
+	/* make sure there's no pending/ongoing resume */
+	flush_work(&fb_helper->resume_work);
+
+	if (suspend) {
+		if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING)
+			return;
+
+		console_lock();
+
+	} else {
+		if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING)
+			return;
+
+		if (!console_trylock()) {
+			schedule_work(&fb_helper->resume_work);
+			return;
+		}
+	}
+
+	fb_set_suspend(fb_helper->fbdev, suspend);
+	console_unlock();
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -2194,7 +2255,7 @@
  * @fb_helper: the drm_fb_helper
  *
  * Scan the connectors attached to the fb_helper and try to put together a
- * setup after *notification of a change in output configuration.
+ * setup after notification of a change in output configuration.
  *
  * Called at runtime, takes the mode config locks to be able to check/change the
  * modeset configuration. Must be run from process context (which usually means
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 323c238..e84faec 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -92,7 +92,7 @@
 	int ret;
 
 	if (dev->driver->firstopen &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
 		ret = dev->driver->firstopen(dev);
 		if (ret != 0)
 			return ret;
@@ -199,7 +199,6 @@
 
 	filp->private_data = priv;
 	priv->filp = filp;
-	priv->uid = current_euid();
 	priv->pid = get_pid(task_pid(current));
 	priv->minor = minor;
 
@@ -346,7 +345,7 @@
 		dev->driver->lastclose(dev);
 	DRM_DEBUG("driver lastclose completed\n");
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
 		drm_legacy_dev_reinit(dev);
 }
 
@@ -389,7 +388,7 @@
 		  (long)old_encode_dev(file_priv->minor->kdev->devt),
 		  dev->open_count);
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
 		drm_legacy_lock_release(dev, filp);
 
 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 0645c85..29c56b4 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -36,19 +36,60 @@
 }
 
 /**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+	uint32_t fmt;
+
+	switch (bpp) {
+	case 8:
+		fmt = DRM_FORMAT_C8;
+		break;
+	case 16:
+		if (depth == 15)
+			fmt = DRM_FORMAT_XRGB1555;
+		else
+			fmt = DRM_FORMAT_RGB565;
+		break;
+	case 24:
+		fmt = DRM_FORMAT_RGB888;
+		break;
+	case 32:
+		if (depth == 24)
+			fmt = DRM_FORMAT_XRGB8888;
+		else if (depth == 30)
+			fmt = DRM_FORMAT_XRGB2101010;
+		else
+			fmt = DRM_FORMAT_ARGB8888;
+		break;
+	default:
+		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+		fmt = DRM_FORMAT_XRGB8888;
+		break;
+	}
+
+	return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
  * drm_get_format_name - return a string for drm fourcc format
  * @format: format to compute name of
  *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
+ * Note that the buffer returned by this function is owned by the caller
+ * and will need to be freed using kfree().
  */
-const char *drm_get_format_name(uint32_t format)
+char *drm_get_format_name(uint32_t format)
 {
-	static char buf[32];
+	char *buf = kmalloc(32, GFP_KERNEL);
 
-	snprintf(buf, sizeof(buf),
+	snprintf(buf, 32,
 		 "%c%c%c%c %s-endian (0x%08x)",
 		 printable_char(format & 0xff),
 		 printable_char((format >> 8) & 0xff),
@@ -73,6 +114,8 @@
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
 			  int *bpp)
 {
+	char *format_name;
+
 	switch (format) {
 	case DRM_FORMAT_C8:
 	case DRM_FORMAT_RGB332:
@@ -127,8 +170,9 @@
 		*bpp = 32;
 		break;
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format %s\n",
-			      drm_get_format_name(format));
+		format_name = drm_get_format_name(format);
+		DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
+		kfree(format_name);
 		*depth = 0;
 		*bpp = 0;
 		break;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
new file mode 100644
index 0000000..398efd6
--- /dev/null
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Frame buffers are abstract memory objects that provide a source of pixels to
+ * scanout to a CRTC. Applications explicitly request the creation of frame
+ * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque
+ * handle that can be passed to the KMS CRTC control, plane configuration and
+ * page flip functions.
+ *
+ * Frame buffers rely on the underlying memory manager for allocating backing
+ * storage. When creating a frame buffer applications pass a memory handle
+ * (or a list of memory handles for multi-planar formats) through the
+ * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
+ * buffer management interface this would be a GEM handle.  Drivers are however
+ * free to use their own backing storage object handles, e.g. vmwgfx directly
+ * exposes special TTM handles to userspace and so expects TTM handles in the
+ * create ioctl and not GEM handles.
+ *
+ * Framebuffers are tracked with struct &drm_framebuffer. They are published
+ * using drm_framebuffer_init() - after calling that function userspace can use
+ * and access the framebuffer object. The helper function
+ * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
+ * metadata fields.
+ *
+ * The lifetime of a drm framebuffer is controlled with a reference count,
+ * drivers can grab additional references with drm_framebuffer_reference() and
+ * drop them again with drm_framebuffer_unreference(). For driver-private
+ * framebuffers for which the last reference is never dropped (e.g. for the
+ * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into
+ * the fbdev helper struct) drivers can manually clean up a framebuffer at
+ * module unload time with drm_framebuffer_unregister_private(). But doing this
+ * is not recommended, and it's better to have a normal free-standing struct
+ * &drm_framebuffer.
+ */
+
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+				     uint32_t src_w, uint32_t src_h,
+				     const struct drm_framebuffer *fb)
+{
+	unsigned int fb_width, fb_height;
+
+	fb_width = fb->width << 16;
+	fb_height = fb->height << 16;
+
+	/* Make sure source coordinates are inside the fb. */
+	if (src_w > fb_width ||
+	    src_x > fb_width - src_w ||
+	    src_h > fb_height ||
+	    src_y > fb_height - src_h) {
+		DRM_DEBUG_KMS("Invalid source coordinates "
+			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+			      src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+			      src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+			      src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+			      src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioctl which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *or = data;
+	struct drm_mode_fb_cmd2 r = {};
+	int ret;
+
+	/* convert to new format and call new ioctl */
+	r.fb_id = or->fb_id;
+	r.width = or->width;
+	r.height = or->height;
+	r.pitches[0] = or->pitch;
+	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+	r.handles[0] = or->handle;
+
+	ret = drm_mode_addfb2(dev, &r, file_priv);
+	if (ret)
+		return ret;
+
+	or->fb_id = r.fb_id;
+
+	return 0;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+	char *format_name;
+
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 0;
+	default:
+		format_name = drm_get_format_name(r->pixel_format);
+		DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
+		kfree(format_name);
+		return -EINVAL;
+	}
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+	int ret, hsub, vsub, num_planes, i;
+
+	ret = format_check(r);
+	if (ret) {
+		char *format_name = drm_get_format_name(r->pixel_format);
+		DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
+		kfree(format_name);
+		return ret;
+	}
+
+	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+	num_planes = drm_format_num_planes(r->pixel_format);
+
+	if (r->width == 0 || r->width % hsub) {
+		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+		return -EINVAL;
+	}
+
+	if (r->height == 0 || r->height % vsub) {
+		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		unsigned int width = r->width / (i != 0 ? hsub : 1);
+		unsigned int height = r->height / (i != 0 ? vsub : 1);
+		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+		if (!r->handles[i]) {
+			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if ((uint64_t) width * cpp > UINT_MAX)
+			return -ERANGE;
+
+		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+			return -ERANGE;
+
+		if (r->pitches[i] < width * cpp) {
+			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+			return -EINVAL;
+		}
+
+		if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+			DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+				      r->modifier[i], i);
+			return -EINVAL;
+		}
+
+		/* modifier specific checks: */
+		switch (r->modifier[i]) {
+		case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+			/* NOTE: the pitch restriction may be lifted later if it turns
+			 * out that no hw has this restriction:
+			 */
+			if (r->pixel_format != DRM_FORMAT_NV12 ||
+					width % 128 || height % 32 ||
+					r->pitches[i] % 128) {
+				DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+				return -EINVAL;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	for (i = num_planes; i < 4; i++) {
+		if (r->modifier[i]) {
+			DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
+		if (!(r->flags & DRM_MODE_FB_MODIFIERS))
+			continue;
+
+		if (r->handles[i]) {
+			DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if (r->pitches[i]) {
+			DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if (r->offsets[i]) {
+			DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+				const struct drm_mode_fb_cmd2 *r,
+				struct drm_file *file_priv)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if ((config->min_width > r->width) || (r->width > config->max_width)) {
+		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+			  r->width, config->min_width, config->max_width);
+		return ERR_PTR(-EINVAL);
+	}
+	if ((config->min_height > r->height) || (r->height > config->max_height)) {
+		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+			  r->height, config->min_height, config->max_height);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (r->flags & DRM_MODE_FB_MODIFIERS &&
+	    !dev->mode_config.allow_fb_modifiers) {
+		DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = framebuffer_check(r);
+	if (ret)
+		return ERR_PTR(ret);
+
+	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("could not create framebuffer\n");
+		return fb;
+	}
+
+	return fb;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+		    void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd2 *r = data;
+	struct drm_framebuffer *fb;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_internal_framebuffer_create(dev, r, file_priv);
+	if (IS_ERR(fb))
+		return PTR_ERR(fb);
+
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	r->fb_id = fb->base.id;
+
+	/* Transfer ownership to the filp for reaping on close */
+	mutex_lock(&file_priv->fbs_lock);
+	list_add(&fb->filp_head, &file_priv->fbs);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	return 0;
+}
+
+struct drm_mode_rmfb_work {
+	struct work_struct work;
+	struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+	while (!list_empty(&arg->fbs)) {
+		struct drm_framebuffer *fb =
+			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+		list_del_init(&fb->filp_head);
+		drm_framebuffer_remove(fb);
+	}
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fbl = NULL;
+	uint32_t *id = data;
+	int found = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, *id);
+	if (!fb)
+		return -ENOENT;
+
+	mutex_lock(&file_priv->fbs_lock);
+	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+		if (fb == fbl)
+			found = 1;
+	if (!found) {
+		mutex_unlock(&file_priv->fbs_lock);
+		goto fail_unref;
+	}
+
+	list_del_init(&fb->filp_head);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	/* drop the reference we picked up in framebuffer lookup */
+	drm_framebuffer_unreference(fb);
+
+	/*
+	 * we now own the reference that was stored in the fbs list
+	 *
+	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
+	 * so run this in a separate stack as there's no way to correctly
+	 * handle this after the fb is already removed from the lookup table.
+	 */
+	if (drm_framebuffer_read_refcount(fb) > 1) {
+		struct drm_mode_rmfb_work arg;
+
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+		INIT_LIST_HEAD(&arg.fbs);
+		list_add_tail(&fb->filp_head, &arg.fbs);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
+	} else
+		drm_framebuffer_unreference(fb);
+
+	return 0;
+
+fail_unref:
+	drm_framebuffer_unreference(fb);
+	return -ENOENT;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *r = data;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -ENOENT;
+
+	r->height = fb->height;
+	r->width = fb->width;
+	r->depth = fb->depth;
+	r->bpp = fb->bits_per_pixel;
+	r->pitch = fb->pitches[0];
+	if (fb->funcs->create_handle) {
+		if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
+		    drm_is_control_client(file_priv)) {
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handle);
+		} else {
+			/* GET_FB() is an unprivileged ioctl so we must not
+			 * return a buffer-handle to non-master processes! For
+			 * backwards-compatibility reasons, we cannot make
+			 * GET_FB() privileged, so just return an invalid handle
+			 * for non-masters. */
+			r->handle = 0;
+			ret = 0;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_clip_rect __user *clips_ptr;
+	struct drm_clip_rect *clips = NULL;
+	struct drm_mode_fb_dirty_cmd *r = data;
+	struct drm_framebuffer *fb;
+	unsigned flags;
+	int num_clips;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -ENOENT;
+
+	num_clips = r->num_clips;
+	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+	if (!num_clips != !clips_ptr) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+	/* If userspace annotates copy, clips must come in pairs */
+	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	if (num_clips && clips_ptr) {
+		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+			ret = -EINVAL;
+			goto out_err1;
+		}
+		clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret) {
+			ret = -EFAULT;
+			goto out_err2;
+		}
+	}
+
+	if (fb->funcs->dirty) {
+		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+				       clips, num_clips);
+	} else {
+		ret = -ENOSYS;
+	}
+
+out_err2:
+	kfree(clips);
+out_err1:
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @priv: drm file for the ioctl
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+	struct drm_framebuffer *fb, *tfb;
+	struct drm_mode_rmfb_work arg;
+
+	INIT_LIST_HEAD(&arg.fbs);
+
+	/*
+	 * When the file gets released that means no one else can access the fb
+	 * list any more, so no need to grab fpriv->fbs_lock. And we need to
+	 * avoid upsetting lockdep since the universal cursor code adds a
+	 * framebuffer while holding mutex locks.
+	 *
+	 * Note that a real deadlock between fpriv->fbs_lock and the modeset
+	 * locks is impossible here since no one else but this function can get
+	 * at it any more.
+	 */
+	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+		if (drm_framebuffer_read_refcount(fb) > 1) {
+			list_move_tail(&fb->filp_head, &arg.fbs);
+		} else {
+			list_del_init(&fb->filp_head);
+
+			/* This drops the fpriv->fbs reference. */
+			drm_framebuffer_unreference(fb);
+		}
+	}
+
+	if (!list_empty(&arg.fbs)) {
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
+	}
+}
+
+void drm_framebuffer_free(struct kref *kref)
+{
+	struct drm_framebuffer *fb =
+			container_of(kref, struct drm_framebuffer, base.refcount);
+	struct drm_device *dev = fb->dev;
+
+	/*
+	 * The lookup idr holds a weak reference, which has not necessarily been
+	 * removed at this point. Check for that.
+	 */
+	drm_mode_object_unregister(dev, &fb->base);
+
+	fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+			 const struct drm_framebuffer_funcs *funcs)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&fb->filp_head);
+	fb->dev = dev;
+	fb->funcs = funcs;
+
+	ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+				      false, drm_framebuffer_free);
+	if (ret)
+		goto out;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	dev->mode_config.num_fb++;
+	list_add(&fb->head, &dev->mode_config.fb_list);
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	drm_mode_object_register(dev, &fb->base);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again, using @drm_framebuffer_unreference.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+					       uint32_t id)
+{
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb = NULL;
+
+	obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+	if (obj)
+		fb = obj_to_fb(obj);
+	return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev;
+
+	if (!fb)
+		return;
+
+	dev = fb->dev;
+
+	/* Mark fb as reaped and drop idr ref. */
+	drm_mode_object_unregister(dev, &fb->base);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_del(&fb->head);
+	dev->mode_config.num_fb--;
+	mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+
+	if (!fb)
+		return;
+
+	dev = fb->dev;
+
+	WARN_ON(!list_empty(&fb->filp_head));
+
+	/*
+	 * drm ABI mandates that we remove any deleted framebuffers from active
+	 * useage. But since most sane clients only remove framebuffers they no
+	 * longer need, try to optimize this away.
+	 *
+	 * Since we're holding a reference ourselves, observing a refcount of 1
+	 * means that we're the last holder and can skip it. Also, the refcount
+	 * can never increase from 1 again, so we don't need any barriers or
+	 * locks.
+	 *
+	 * Note that userspace could try to race with use and instate a new
+	 * usage _after_ we've cleared all current ones. End result will be an
+	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+	 * in this manner.
+	 */
+	if (drm_framebuffer_read_refcount(fb) > 1) {
+		drm_modeset_lock_all(dev);
+		/* remove from any CRTC */
+		drm_for_each_crtc(crtc, dev) {
+			if (crtc->primary->fb == fb) {
+				/* should turn off the crtc */
+				if (drm_crtc_force_disable(crtc))
+					DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+			}
+		}
+
+		drm_for_each_plane(plane, dev) {
+			if (plane->fb == fb)
+				drm_plane_force_disable(plane);
+		}
+		drm_modeset_unlock_all(dev);
+	}
+
+	drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9134ae1..465bacd 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,7 @@
 
 	if (drm_core_check_feature(dev, DRIVER_PRIME))
 		drm_gem_remove_prime_handles(obj, file_priv);
-	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+	drm_vma_node_revoke(&obj->vma_node, file_priv);
 
 	if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, file_priv);
@@ -372,7 +372,7 @@
 
 	handle = ret;
 
-	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
 	if (ret)
 		goto err_remove;
 
@@ -386,7 +386,7 @@
 	return 0;
 
 err_revoke:
-	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+	drm_vma_node_revoke(&obj->vma_node, file_priv);
 err_remove:
 	spin_lock(&file_priv->table_lock);
 	idr_remove(&file_priv->object_idr, handle);
@@ -991,7 +991,7 @@
 	if (!obj)
 		return -EINVAL;
 
-	if (!drm_vma_node_is_allowed(node, filp)) {
+	if (!drm_vma_node_is_allowed(node, priv)) {
 		drm_gem_object_unreference_unlocked(obj);
 		return -EACCES;
 	}
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index 3d2e91c..b404287 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -65,30 +65,34 @@
 
 int drm_global_item_ref(struct drm_global_reference *ref)
 {
-	int ret;
+	int ret = 0;
 	struct drm_global_item *item = &glob[ref->global_type];
 
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
-		item->object = kzalloc(ref->size, GFP_KERNEL);
-		if (unlikely(item->object == NULL)) {
+		ref->object = kzalloc(ref->size, GFP_KERNEL);
+		if (unlikely(ref->object == NULL)) {
 			ret = -ENOMEM;
-			goto out_err;
+			goto error_unlock;
 		}
-
-		ref->object = item->object;
 		ret = ref->init(ref);
 		if (unlikely(ret != 0))
-			goto out_err;
+			goto error_free;
 
+		item->object = ref->object;
+	} else {
+		ref->object = item->object;
 	}
+
 	++item->refcount;
-	ref->object = item->object;
 	mutex_unlock(&item->mutex);
 	return 0;
-out_err:
+
+error_free:
+	kfree(ref->object);
+	ref->object = NULL;
+error_unlock:
 	mutex_unlock(&item->mutex);
-	item->object = NULL;
 	return ret;
 }
 EXPORT_SYMBOL(drm_global_item_ref);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7b30b30..dae18e5 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -142,7 +142,7 @@
 			      unsigned long add)
 {
 	int ret;
-	unsigned long mask = (1 << bits) - 1;
+	unsigned long mask = (1UL << bits) - 1;
 	unsigned long first, unshifted_key;
 
 	unshifted_key = hash_long(seed, bits);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 9ae353f..ffb2ab3 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@
 
 	mutex_lock(&dev->master_mutex);
 	master = dev->master;
-	if (!master)
-		goto out_unlock;
-
 	seq_printf(m, "%s", dev->driver->name);
 	if (dev->dev)
 		seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@
 	if (dev->unique)
 		seq_printf(m, " unique=%s", dev->unique);
 	seq_printf(m, "\n");
-out_unlock:
 	mutex_unlock(&dev->master_mutex);
 
 	return 0;
@@ -80,6 +76,7 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_file *priv;
+	kuid_t uid;
 
 	seq_printf(m,
 		   "%20s %5s %3s master a %5s %10s\n",
@@ -98,13 +95,14 @@
 
 		rcu_read_lock(); /* locks pid_task()->comm */
 		task = pid_task(priv->pid, PIDTYPE_PID);
+		uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
 		seq_printf(m, "%20s %5d %3d   %c    %c %5d %10u\n",
 			   task ? task->comm : "<unknown>",
 			   pid_vnr(priv->pid),
 			   priv->minor->index,
 			   drm_is_current_master(priv) ? 'y' : 'n',
 			   priv->authenticated ? 'y' : 'n',
-			   from_kuid_munged(seq_user_ns(m), priv->uid),
+			   from_kuid_munged(seq_user_ns(m), uid),
 			   priv->magic);
 		rcu_read_unlock();
 	}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b86dc9b..e66af28 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,6 +21,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
 /* drm_irq.c */
 extern unsigned int drm_timestamp_monotonic;
 
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index a628975..867ab8c 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -32,7 +32,6 @@
 #include <linux/export.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 
 #define DRM_IOCTL_VERSION32		DRM_IOWR(0x00, drm_version32_t)
 #define DRM_IOCTL_GET_UNIQUE32		DRM_IOWR(0x01, drm_unique32_t)
@@ -346,6 +345,7 @@
 	struct drm_stats __user *stats;
 	int i, err;
 
+	memset(&s32, 0, sizeof(drm_stats32_t));
 	stats = compat_alloc_user_space(sizeof(*stats));
 	if (!stats)
 		return -EFAULT;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 33af4a5..0ad2c47 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -29,7 +29,6 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include <drm/drm_auth.h>
 #include "drm_legacy.h"
 #include "drm_internal.h"
@@ -189,9 +188,8 @@
 	 */
 	if (client->idx == 0) {
 		client->auth = file_priv->authenticated;
-		client->pid = pid_vnr(file_priv->pid);
-		client->uid = from_kuid_munged(current_user_ns(),
-					       file_priv->uid);
+		client->pid = task_pid_vnr(current);
+		client->uid = overflowuid;
 		client->magic = 0;
 		client->iocs = 0;
 
@@ -228,6 +226,7 @@
 static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_get_cap *req = data;
+	struct drm_crtc *crtc;
 
 	req->value = 0;
 	switch (req->capability) {
@@ -254,6 +253,13 @@
 	case DRM_CAP_ASYNC_PAGE_FLIP:
 		req->value = dev->mode_config.async_page_flip;
 		break;
+	case DRM_CAP_PAGE_FLIP_TARGET:
+		req->value = 1;
+		drm_for_each_crtc(crtc, dev) {
+			if (!crtc->funcs->page_flip_target)
+				req->value = 0;
+		}
+		break;
 	case DRM_CAP_CURSOR_WIDTH:
 		if (dev->mode_config.cursor_width)
 			req->value = dev->mode_config.cursor_width;
@@ -714,9 +720,9 @@
 	if (ksize > in_size)
 		memset(kdata + in_size, 0, ksize - in_size);
 
-	/* Enforce sane locking for kms driver ioctls. Core ioctls are
+	/* Enforce sane locking for modern driver ioctls. Core ioctls are
 	 * too messy still. */
-	if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
+	if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) ||
 	    (ioctl->flags & DRM_UNLOCKED))
 		retcode = func(dev, kdata, file_priv);
 	else {
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 77f357b..b969a64 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -482,7 +482,7 @@
 		return ret;
 	}
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
 		vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
 
 	/* After installing handler */
@@ -491,7 +491,7 @@
 
 	if (ret < 0) {
 		dev->irq_enabled = false;
-		if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		if (drm_core_check_feature(dev, DRIVER_LEGACY))
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
 		free_irq(irq, dev);
 	} else {
@@ -557,7 +557,7 @@
 
 	DRM_DEBUG("irq=%d\n", dev->irq);
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
 		vga_client_register(dev->pdev, NULL, NULL, NULL);
 
 	if (dev->driver->irq_uninstall)
@@ -592,7 +592,7 @@
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return 0;
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return 0;
 	/* UMS was only ever supported on pci devices. */
 	if (WARN_ON(!dev->pdev))
@@ -713,10 +713,10 @@
  * Negative value on error, failure or if not supported in current
  * video mode:
  *
- * -EINVAL   - Invalid CRTC.
- * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
- * -ENOTSUPP - Function not supported in current display mode.
- * -EIO      - Failed, e.g., due to failed scanout position query.
+ * -EINVAL    Invalid CRTC.
+ * -EAGAIN    Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP  Function not supported in current display mode.
+ * -EIO       Failed, e.g., due to failed scanout position query.
  *
  * Returns or'ed positive status flags on success:
  *
@@ -1008,6 +1008,31 @@
  * period. This helper function implements exactly the required vblank arming
  * behaviour.
  *
+ * NOTE: Drivers using this to send out the event in struct &drm_crtc_state
+ * as part of an atomic commit must ensure that the next vblank happens at
+ * exactly the same time as the atomic commit is committed to the hardware. This
+ * function itself does **not** protect again the next vblank interrupt racing
+ * with either this function call or the atomic commit operation. A possible
+ * sequence could be:
+ *
+ * 1. Driver commits new hardware state into vblank-synchronized registers.
+ * 2. A vblank happens, committing the hardware state. Also the corresponding
+ *    vblank interrupt is fired off and fully processed by the interrupt
+ *    handler.
+ * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event().
+ * 4. The event is only send out for the next vblank, which is wrong.
+ *
+ * An equivalent race can happen when the driver calls
+ * drm_crtc_arm_vblank_event() before writing out the new hardware state.
+ *
+ * The only way to make this work safely is to prevent the vblank from firing
+ * (and the hardware from committing anything else) until the entire atomic
+ * commit sequence has run to completion. If the hardware does not have such a
+ * feature (e.g. using a "go" bit), then it is unsafe to use this functions.
+ * Instead drivers need to manually send out the event from their interrupt
+ * handler by calling drm_crtc_send_vblank_event() and make sure that there's no
+ * possible race with the hardware committing the atomic update.
+ *
  * Caller must hold event lock. Caller must also hold a vblank reference for
  * the event @e, which will be dropped when the next vblank arrives.
  */
@@ -1030,8 +1055,11 @@
  * @crtc: the source CRTC of the vblank event
  * @e: the event to send
  *
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
+ * Updates sequence # and timestamp on event for the most recently processed
+ * vblank, and sends it to userspace.  Caller must hold event lock.
+ *
+ * See drm_crtc_arm_vblank_event() for a helper which can be used in certain
+ * situation, especially to send out events for atomic commit operations.
  */
 void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
 				struct drm_pending_vblank_event *e)
@@ -1295,7 +1323,7 @@
 		if (e->pipe != pipe)
 			continue;
 		DRM_DEBUG("Sending premature vblank event on disable: "
-			  "wanted %d, current %d\n",
+			  "wanted %u, current %u\n",
 			  e->event.sequence, seq);
 		list_del(&e->base.link);
 		drm_vblank_put(dev, pipe);
@@ -1519,7 +1547,7 @@
 		return 0;
 
 	/* KMS drivers handle this internally */
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return 0;
 
 	pipe = modeset->crtc;
@@ -1585,7 +1613,7 @@
 
 	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
-	DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
+	DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
 		  vblwait->request.sequence, seq, pipe);
 
 	trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1693,7 +1721,7 @@
 		return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
 	}
 
-	DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+	DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
 		  vblwait->request.sequence, pipe);
 	DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
 		    (((drm_vblank_count(dev, pipe) -
@@ -1708,7 +1736,7 @@
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
 
-		DRM_DEBUG("returning %d to client\n",
+		DRM_DEBUG("returning %u to client\n",
 			  vblwait->reply.sequence);
 	} else {
 		DRM_DEBUG("vblank wait interrupted by signal\n");
@@ -1735,7 +1763,7 @@
 		if ((seq - e->event.sequence) > (1<<23))
 			continue;
 
-		DRM_DEBUG("vblank event on %d, current %d\n",
+		DRM_DEBUG("vblank event on %u, current %u\n",
 			  e->event.sequence, seq);
 
 		list_del(&e->base.link);
@@ -1826,6 +1854,7 @@
  */
 u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
 {
+	WARN_ON_ONCE(dev->max_vblank_count != 0);
 	return 0;
 }
 EXPORT_SYMBOL(drm_vblank_no_hw_counter);
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 3187c4b..45db36c 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,7 +27,8 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_aux_dev.h>
+
+#include "drm_crtc_helper_internal.h"
 
 MODULE_AUTHOR("David Airlie, Jesse Barnes");
 MODULE_DESCRIPTION("DRM KMS helper");
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 48ac0eb..c901f3c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -163,7 +163,7 @@
 	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	++file_priv->lock_count;
@@ -252,7 +252,7 @@
 	struct drm_lock *lock = data;
 	struct drm_master *master = file_priv->master;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index af0d471..1160a57 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -999,6 +999,27 @@
 EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
 
 /**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ *    data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+	ssize_t err;
+
+	err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+				 sizeof(format));
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+
+/**
  * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
  *    the Tearing Effect output signal of the display module
  * @dsi: DSI peripheral device
@@ -1021,25 +1042,53 @@
 EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
 
 /**
- * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
- *    data used by the interface
+ * mipi_dsi_dcs_set_display_brightness() - sets the brightness value of the
+ *    display
  * @dsi: DSI peripheral device
- * @format: pixel format
+ * @brightness: brightness value
  *
  * Return: 0 on success or a negative error code on failure.
  */
-int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+					u16 brightness)
 {
+	u8 payload[2] = { brightness & 0xff, brightness >> 8 };
 	ssize_t err;
 
-	err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
-				 sizeof(format));
+	err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+				 payload, sizeof(payload));
 	if (err < 0)
 		return err;
 
 	return 0;
 }
-EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness() - gets the current brightness value
+ *    of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+					u16 *brightness)
+{
+	ssize_t err;
+
+	err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+				brightness, sizeof(*brightness));
+	if (err <= 0) {
+		if (err == 0)
+			err = -ENODATA;
+
+		return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
 
 static int mipi_dsi_drv_probe(struct device *dev)
 {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cb39f45..11d44a1 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -46,6 +46,7 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/export.h>
+#include <linux/interval_tree_generic.h>
 
 /**
  * DOC: Overview
@@ -103,6 +104,72 @@
 						u64 end,
 						enum drm_mm_search_flags flags);
 
+#define START(node) ((node)->start)
+#define LAST(node)  ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+		     u64, __subtree_last,
+		     START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+	return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+					       start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+	return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+					  struct drm_mm_node *node)
+{
+	struct drm_mm *mm = hole_node->mm;
+	struct rb_node **link, *rb;
+	struct drm_mm_node *parent;
+
+	node->__subtree_last = LAST(node);
+
+	if (hole_node->allocated) {
+		rb = &hole_node->rb;
+		while (rb) {
+			parent = rb_entry(rb, struct drm_mm_node, rb);
+			if (parent->__subtree_last >= node->__subtree_last)
+				break;
+
+			parent->__subtree_last = node->__subtree_last;
+			rb = rb_parent(rb);
+		}
+
+		rb = &hole_node->rb;
+		link = &hole_node->rb.rb_right;
+	} else {
+		rb = NULL;
+		link = &mm->interval_tree.rb_node;
+	}
+
+	while (*link) {
+		rb = *link;
+		parent = rb_entry(rb, struct drm_mm_node, rb);
+		if (parent->__subtree_last < node->__subtree_last)
+			parent->__subtree_last = node->__subtree_last;
+		if (node->start < parent->start)
+			link = &parent->rb.rb_left;
+		else
+			link = &parent->rb.rb_right;
+	}
+
+	rb_link_node(&node->rb, rb, link);
+	rb_insert_augmented(&node->rb,
+			    &mm->interval_tree,
+			    &drm_mm_interval_tree_augment);
+}
+
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
 				 u64 size, unsigned alignment,
@@ -150,9 +217,10 @@
 	node->color = color;
 	node->allocated = 1;
 
-	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	drm_mm_interval_tree_add_node(hole_node, node);
+
 	BUG_ON(node->start + node->size > adj_end);
 
 	node->hole_follows = 0;
@@ -178,41 +246,54 @@
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
+	u64 end = node->start + node->size;
 	struct drm_mm_node *hole;
-	u64 end;
-	u64 hole_start;
-	u64 hole_end;
+	u64 hole_start, hole_end;
 
-	BUG_ON(node == NULL);
+	if (WARN_ON(node->size == 0))
+		return -EINVAL;
 
 	end = node->start + node->size;
 
 	/* Find the relevant hole to add our node to */
-	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		if (hole_start > node->start || hole_end < end)
-			continue;
-
-		node->mm = mm;
-		node->allocated = 1;
-
-		INIT_LIST_HEAD(&node->hole_stack);
-		list_add(&node->node_list, &hole->node_list);
-
-		if (node->start == hole_start) {
-			hole->hole_follows = 0;
-			list_del_init(&hole->hole_stack);
-		}
-
-		node->hole_follows = 0;
-		if (end != hole_end) {
-			list_add(&node->hole_stack, &mm->hole_stack);
-			node->hole_follows = 1;
-		}
-
-		return 0;
+	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+					       node->start, ~(u64)0);
+	if (hole) {
+		if (hole->start < end)
+			return -ENOSPC;
+	} else {
+		hole = list_entry(&mm->head_node.node_list,
+				  typeof(*hole), node_list);
 	}
 
-	return -ENOSPC;
+	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+	if (!hole->hole_follows)
+		return -ENOSPC;
+
+	hole_start = __drm_mm_hole_node_start(hole);
+	hole_end = __drm_mm_hole_node_end(hole);
+	if (hole_start > node->start || hole_end < end)
+		return -ENOSPC;
+
+	node->mm = mm;
+	node->allocated = 1;
+
+	list_add(&node->node_list, &hole->node_list);
+
+	drm_mm_interval_tree_add_node(hole, node);
+
+	if (node->start == hole_start) {
+		hole->hole_follows = 0;
+		list_del(&hole->hole_stack);
+	}
+
+	node->hole_follows = 0;
+	if (end != hole_end) {
+		list_add(&node->hole_stack, &mm->hole_stack);
+		node->hole_follows = 1;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
@@ -239,6 +320,9 @@
 {
 	struct drm_mm_node *hole_node;
 
+	if (WARN_ON(size == 0))
+		return -EINVAL;
+
 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
 					       color, sflags);
 	if (!hole_node)
@@ -299,9 +383,10 @@
 	node->color = color;
 	node->allocated = 1;
 
-	INIT_LIST_HEAD(&node->hole_stack);
 	list_add(&node->node_list, &hole_node->node_list);
 
+	drm_mm_interval_tree_add_node(hole_node, node);
+
 	BUG_ON(node->start < start);
 	BUG_ON(node->start < adj_start);
 	BUG_ON(node->start + node->size > adj_end);
@@ -340,6 +425,9 @@
 {
 	struct drm_mm_node *hole_node;
 
+	if (WARN_ON(size == 0))
+		return -EINVAL;
+
 	hole_node = drm_mm_search_free_in_range_generic(mm,
 							size, alignment, color,
 							start, end, sflags);
@@ -390,6 +478,7 @@
 	} else
 		list_move(&prev_node->hole_stack, &mm->hole_stack);
 
+	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	list_del(&node->node_list);
 	node->allocated = 0;
 }
@@ -516,11 +605,13 @@
 {
 	list_replace(&old->node_list, &new->node_list);
 	list_replace(&old->hole_stack, &new->hole_stack);
+	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
 	new->hole_follows = old->hole_follows;
 	new->mm = old->mm;
 	new->start = old->start;
 	new->size = old->size;
 	new->color = old->color;
+	new->__subtree_last = old->__subtree_last;
 
 	old->allocated = 0;
 	new->allocated = 1;
@@ -748,7 +839,6 @@
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	INIT_LIST_HEAD(&mm->head_node.hole_stack);
 	mm->head_node.hole_follows = 1;
 	mm->head_node.scanned_block = 0;
 	mm->head_node.scanned_prev_free = 0;
@@ -758,6 +848,8 @@
 	mm->head_node.size = start - mm->head_node.start;
 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
+	mm->interval_tree = RB_ROOT;
+
 	mm->color_adjust = NULL;
 }
 EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
new file mode 100644
index 0000000..9f17085
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_mode_object.h>
+
+#include "drm_crtc_internal.h"
+
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+int drm_mode_object_get_reg(struct drm_device *dev,
+			    struct drm_mode_object *obj,
+			    uint32_t obj_type,
+			    bool register_obj,
+			    void (*obj_free_cb)(struct kref *kref))
+{
+	int ret;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+	if (ret >= 0) {
+		/*
+		 * Set up the object linking under the protection of the idr
+		 * lock so that other users can't see inconsistent state.
+		 */
+		obj->id = ret;
+		obj->type = obj_type;
+		if (obj_free_cb) {
+			obj->free_cb = obj_free_cb;
+			kref_init(&obj->refcount);
+		}
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_object_get(struct drm_device *dev,
+			struct drm_mode_object *obj, uint32_t obj_type)
+{
+	return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
+}
+
+void drm_mode_object_register(struct drm_device *dev,
+			      struct drm_mode_object *obj)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
+	mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_unregister - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
+ */
+void drm_mode_object_unregister(struct drm_device *dev,
+				struct drm_mode_object *object)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	if (object->id) {
+		idr_remove(&dev->mode_config.crtc_idr, object->id);
+		object->id = 0;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+					       uint32_t id, uint32_t type)
+{
+	struct drm_mode_object *obj = NULL;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	obj = idr_find(&dev->mode_config.crtc_idr, id);
+	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+		obj = NULL;
+	if (obj && obj->id != id)
+		obj = NULL;
+
+	if (obj && obj->free_cb) {
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return obj;
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type)
+{
+	struct drm_mode_object *obj = NULL;
+
+	obj = __drm_mode_object_find(dev, id, type);
+	return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This function decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_put(&obj->refcount, obj->free_cb);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This function increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_get(&obj->refcount);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val)
+{
+	int count = obj->properties->count;
+
+	if (count == DRM_OBJECT_MAX_PROPERTY) {
+		WARN(1, "Failed to attach object property (type: 0x%x). Please "
+			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+			"you see this message on the same object type.\n",
+			obj->type);
+		return;
+	}
+
+	obj->properties->properties[count] = property;
+	obj->properties->values[count] = init_val;
+	obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This function sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Note that atomic drivers should not have any need to call this, the core will
+ * ensure consistency of values reported back to userspace through the
+ * appropriate ->atomic_get_property callback. Only legacy drivers should call
+ * this function to update the tracked value (after clamping and other
+ * restrictions have been applied).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t val)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->properties[i] == property) {
+			obj->properties->values[i] = val;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t *val)
+{
+	int i;
+
+	/* read-only properties bypass atomic mechanism and still store
+	 * their value in obj->properties->values[].. mostly to avoid
+	 * having to deal w/ EDID and similar props in atomic paths:
+	 */
+	if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+			!(property->flags & DRM_MODE_PROP_IMMUTABLE))
+		return drm_atomic_get_property(obj, property, val);
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->properties[i] == property) {
+			*val = obj->properties->values[i];
+			return 0;
+		}
+
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+/* helper for getconnector and getproperties ioctls */
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+				   uint32_t __user *prop_ptr,
+				   uint64_t __user *prop_values,
+				   uint32_t *arg_count_props)
+{
+	int i, ret, count;
+
+	for (i = 0, count = 0; i < obj->properties->count; i++) {
+		struct drm_property *prop = obj->properties->properties[i];
+		uint64_t val;
+
+		if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+			continue;
+
+		if (*arg_count_props > count) {
+			ret = drm_object_property_get_value(obj, prop, &val);
+			if (ret)
+				return ret;
+
+			if (put_user(prop->base.id, prop_ptr + count))
+				return -EFAULT;
+
+			if (put_user(val, prop_values + count))
+				return -EFAULT;
+		}
+
+		count++;
+	}
+	*arg_count_props = count;
+
+	return 0;
+}
+
+/**
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	struct drm_mode_obj_get_properties *arg = data;
+	struct drm_mode_object *obj;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!obj) {
+		ret = -ENOENT;
+		goto out;
+	}
+	if (!obj->properties) {
+		ret = -EINVAL;
+		goto out_unref;
+	}
+
+	ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(arg->props_ptr),
+			(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+			&arg->count_props);
+
+out_unref:
+	drm_mode_object_unreference(obj);
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+					       uint32_t prop_id)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++)
+		if (obj->properties->properties[i]->base.id == prop_id)
+			return obj->properties->properties[i];
+
+	return NULL;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_mode_obj_set_property *arg = data;
+	struct drm_mode_object *arg_obj;
+	struct drm_property *property;
+	int ret = -EINVAL;
+	struct drm_mode_object *ref;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!arg_obj) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (!arg_obj->properties)
+		goto out_unref;
+
+	property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
+	if (!property)
+		goto out_unref;
+
+	if (!drm_property_change_valid_get(property, arg->value, &ref))
+		goto out_unref;
+
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+						      arg->value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+		break;
+	case DRM_MODE_OBJECT_PLANE:
+		ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+						  property, arg->value);
+		break;
+	}
+
+	drm_property_change_valid_put(property, ref);
+
+out_unref:
+	drm_mode_object_unreference(arg_obj);
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc5040a..53f07ac 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -657,11 +657,36 @@
 }
 EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
 
+/**
+ * drm_bus_flags_from_videomode - extract information about pixelclk and
+ * DE polarity from videomode and store it in a separate variable
+ * @vm: videomode structure to use
+ * @bus_flags: information about pixelclk and DE polarity will be stored here
+ *
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
+ * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ */
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
+{
+	*bus_flags = 0;
+	if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
+		*bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+	if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+		*bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+	if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+		*bus_flags |= DRM_BUS_FLAG_DE_LOW;
+	if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
+		*bus_flags |= DRM_BUS_FLAG_DE_HIGH;
+}
+EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
+
 #ifdef CONFIG_OF
 /**
  * of_get_drm_display_mode - get a drm_display_mode from devicetree
  * @np: device_node with the timing specification
  * @dmode: will be set to the return value
+ * @bus_flags: information about pixelclk and DE polarity
  * @index: index into the list of display timings in devicetree
  *
  * This function is expensive and should only be used, if only one mode is to be
@@ -672,7 +697,8 @@
  * 0 on success, a negative errno code when no of videomode node was found.
  */
 int of_get_drm_display_mode(struct device_node *np,
-			    struct drm_display_mode *dmode, int index)
+			    struct drm_display_mode *dmode, u32 *bus_flags,
+			    int index)
 {
 	struct videomode vm;
 	int ret;
@@ -682,6 +708,8 @@
 		return ret;
 
 	drm_display_mode_from_videomode(&vm, dmode);
+	if (bus_flags)
+		drm_bus_flags_from_videomode(&vm, bus_flags);
 
 	pr_debug("%s: got %dx%d display mode from %s\n",
 		of_node_full_name(np), vm.hactive, vm.vactive, np->name);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
new file mode 100644
index 0000000..1d45738
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_plane_helper.h>
+
+/**
+ * DOC: aux kms helpers
+ *
+ * This helper library contains various one-off functions which don't really fit
+ * anywhere else in the DRM modeset helper library.
+ */
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * 						connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+	struct drm_connector *connector, *tmp;
+	struct list_head panel_list;
+
+	INIT_LIST_HEAD(&panel_list);
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			list_move_tail(&connector->head, &panel_list);
+	}
+
+	list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+				    const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	int i;
+
+	fb->width = mode_cmd->width;
+	fb->height = mode_cmd->height;
+	for (i = 0; i < 4; i++) {
+		fb->pitches[i] = mode_cmd->pitches[i];
+		fb->offsets[i] = mode_cmd->offsets[i];
+		fb->modifier[i] = mode_cmd->modifier[i];
+	}
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+				    &fb->bits_per_pixel);
+	fb->pixel_format = mode_cmd->pixel_format;
+	fb->flags = mode_cmd->flags;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers.  Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane.  However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+static const uint32_t safe_modeset_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
+{
+	struct drm_plane *primary;
+	int ret;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (primary == NULL) {
+		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+		return NULL;
+	}
+
+	/*
+	 * Remove the format_default field from drm_plane when dropping
+	 * this helper.
+	 */
+	primary->format_default = true;
+
+	/* possible_crtc's will be filled in later by crtc_init */
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &drm_primary_helper_funcs,
+				       safe_modeset_formats,
+				       ARRAY_SIZE(safe_modeset_formats),
+				       DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		kfree(primary);
+		primary = NULL;
+	}
+
+	return primary;
+}
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs)
+{
+	struct drm_plane *primary;
+
+	primary = create_primary_plane(dev);
+	return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
+					 NULL);
+}
+EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b2f8f10..3ceea9c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -175,7 +175,7 @@
 {
 	struct drm_irq_busid *p = data;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	/* UMS was only ever support on PCI devices. */
@@ -236,8 +236,8 @@
 	DRM_DEBUG("\n");
 
 	dev = drm_dev_alloc(driver, &pdev->dev);
-	if (!dev)
-		return -ENOMEM;
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
 
 	ret = pci_enable_device(pdev);
 	if (ret)
@@ -263,7 +263,7 @@
 
 	/* No locking needed since shadow-attach is single-threaded since it may
 	 * only be called from the per-driver module init hook. */
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
 		list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
 
 	return 0;
@@ -299,7 +299,7 @@
 
 	DRM_DEBUG("\n");
 
-	if (driver->driver_features & DRIVER_MODESET)
+	if (!(driver->driver_features & DRIVER_LEGACY))
 		return pci_register_driver(pdriver);
 
 	/* If not using KMS, fall back to stealth mode manual scanning. */
@@ -421,7 +421,7 @@
 	struct drm_device *dev, *tmp;
 	DRM_DEBUG("\n");
 
-	if (driver->driver_features & DRIVER_MODESET) {
+	if (!(driver->driver_features & DRIVER_LEGACY)) {
 		pci_unregister_driver(pdriver);
 	} else {
 		list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
new file mode 100644
index 0000000..249c0ae
--- /dev/null
+++ b/drivers/gpu/drm/drm_plane.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * A plane represents an image source that can be blended with or overlayed on
+ * top of a CRTC during the scanout process. Planes take their input data from a
+ * &drm_framebuffer object. The plane itself specifies the cropping and scaling
+ * of that image, and where it is placed on the visible are of a display
+ * pipeline, represented by &drm_crtc. A plane can also have additional
+ * properties that specify how the pixels are positioned and blended, like
+ * rotation or Z-position. All these properties are stored in &drm_plane_state.
+ *
+ * To create a plane, a KMS drivers allocates and zeroes an instances of
+ * struct &drm_plane (possibly as part of a larger structure) and registers it
+ * with a call to drm_universal_plane_init().
+ *
+ * Cursor and overlay planes are optional. All drivers should provide one
+ * primary plane per CRTC to avoid surprising userspace too much. See enum
+ * &drm_plane_type for a more in-depth discussion of these special uapi-relevant
+ * plane types. Special planes are associated with their CRTC by calling
+ * drm_crtc_init_with_planes().
+ *
+ * The type of a plane is exposed in the immutable "type" enumeration property,
+ * which has one of the following values: "Overlay", "Primary", "Cursor".
+ */
+
+static unsigned int drm_num_planes(struct drm_device *dev)
+{
+	unsigned int num = 0;
+	struct drm_plane *tmp;
+
+	drm_for_each_plane(tmp, dev) {
+		num++;
+	}
+
+	return num;
+}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+			     unsigned long possible_crtcs,
+			     const struct drm_plane_funcs *funcs,
+			     const uint32_t *formats, unsigned int format_count,
+			     enum drm_plane_type type,
+			     const char *name, ...)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	int ret;
+
+	ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+	if (ret)
+		return ret;
+
+	drm_modeset_lock_init(&plane->mutex);
+
+	plane->base.properties = &plane->properties;
+	plane->dev = dev;
+	plane->funcs = funcs;
+	plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+					    GFP_KERNEL);
+	if (!plane->format_types) {
+		DRM_DEBUG_KMS("out of memory when allocating plane\n");
+		drm_mode_object_unregister(dev, &plane->base);
+		return -ENOMEM;
+	}
+
+	if (name) {
+		va_list ap;
+
+		va_start(ap, name);
+		plane->name = kvasprintf(GFP_KERNEL, name, ap);
+		va_end(ap);
+	} else {
+		plane->name = kasprintf(GFP_KERNEL, "plane-%d",
+					drm_num_planes(dev));
+	}
+	if (!plane->name) {
+		kfree(plane->format_types);
+		drm_mode_object_unregister(dev, &plane->base);
+		return -ENOMEM;
+	}
+
+	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+	plane->format_count = format_count;
+	plane->possible_crtcs = possible_crtcs;
+	plane->type = type;
+
+	list_add_tail(&plane->head, &config->plane_list);
+	plane->index = config->num_total_plane++;
+	if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+		config->num_overlay_plane++;
+
+	drm_object_attach_property(&plane->base,
+				   config->plane_type_property,
+				   plane->type);
+
+	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+		drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+		drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+		drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+		drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+		drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+		drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+		drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+		drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+		drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+		drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_universal_plane_init);
+
+int drm_plane_register_all(struct drm_device *dev)
+{
+	struct drm_plane *plane;
+	int ret = 0;
+
+	drm_for_each_plane(plane, dev) {
+		if (plane->funcs->late_register)
+			ret = plane->funcs->late_register(plane);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void drm_plane_unregister_all(struct drm_device *dev)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, dev) {
+		if (plane->funcs->early_unregister)
+			plane->funcs->early_unregister(plane);
+	}
+}
+
+/**
+ * drm_plane_init - Initialize a legacy plane
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+		   unsigned long possible_crtcs,
+		   const struct drm_plane_funcs *funcs,
+		   const uint32_t *formats, unsigned int format_count,
+		   bool is_primary)
+{
+	enum drm_plane_type type;
+
+	type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+	return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+					formats, format_count, type, NULL);
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+
+	drm_modeset_lock_all(dev);
+	kfree(plane->format_types);
+	drm_mode_object_unregister(dev, &plane->base);
+
+	BUG_ON(list_empty(&plane->head));
+
+	/* Note that the plane_list is considered to be static; should we
+	 * remove the drm_plane at runtime we would have to decrement all
+	 * the indices on the drm_plane after us in the plane_list.
+	 */
+
+	list_del(&plane->head);
+	dev->mode_config.num_total_plane--;
+	if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+		dev->mode_config.num_overlay_plane--;
+	drm_modeset_unlock_all(dev);
+
+	WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+	if (plane->state && plane->funcs->atomic_destroy_state)
+		plane->funcs->atomic_destroy_state(plane, plane->state);
+
+	kfree(plane->name);
+
+	memset(plane, 0, sizeof(*plane));
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_plane_from_index - find the registered plane at an index
+ * @dev: DRM device
+ * @idx: index of registered plane to find for
+ *
+ * Given a plane index, return the registered plane from DRM device's
+ * list of planes with matching index.
+ */
+struct drm_plane *
+drm_plane_from_index(struct drm_device *dev, int idx)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, dev)
+		if (idx == plane->index)
+			return plane;
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_plane_from_index);
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+	int ret;
+
+	if (!plane->fb)
+		return;
+
+	plane->old_fb = plane->fb;
+	ret = plane->funcs->disable_plane(plane);
+	if (ret) {
+		DRM_ERROR("failed to disable plane with busy fb\n");
+		plane->old_fb = NULL;
+		return;
+	}
+	/* disconnect the plane from the fb and crtc: */
+	drm_framebuffer_unreference(plane->old_fb);
+	plane->old_fb = NULL;
+	plane->fb = NULL;
+	plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+				struct drm_property *property,
+				uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_mode_object *obj = &plane->base;
+
+	if (plane->funcs->set_property)
+		ret = plane->funcs->set_property(plane, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
+
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_mode_get_plane_res *plane_resp = data;
+	struct drm_mode_config *config;
+	struct drm_plane *plane;
+	uint32_t __user *plane_ptr;
+	int copied = 0;
+	unsigned num_planes;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	config = &dev->mode_config;
+
+	if (file_priv->universal_planes)
+		num_planes = config->num_total_plane;
+	else
+		num_planes = config->num_overlay_plane;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (num_planes &&
+	    (plane_resp->count_planes >= num_planes)) {
+		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+		/* Plane lists are invariant, no locking needed. */
+		drm_for_each_plane(plane, dev) {
+			/*
+			 * Unless userspace set the 'universal planes'
+			 * capability bit, only advertise overlays.
+			 */
+			if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+			    !file_priv->universal_planes)
+				continue;
+
+			if (put_user(plane->base.id, plane_ptr + copied))
+				return -EFAULT;
+			copied++;
+		}
+	}
+	plane_resp->count_planes = num_planes;
+
+	return 0;
+}
+
+int drm_mode_getplane(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_mode_get_plane *plane_resp = data;
+	struct drm_plane *plane;
+	uint32_t __user *format_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	plane = drm_plane_find(dev, plane_resp->plane_id);
+	if (!plane)
+		return -ENOENT;
+
+	drm_modeset_lock(&plane->mutex, NULL);
+	if (plane->crtc)
+		plane_resp->crtc_id = plane->crtc->base.id;
+	else
+		plane_resp->crtc_id = 0;
+
+	if (plane->fb)
+		plane_resp->fb_id = plane->fb->base.id;
+	else
+		plane_resp->fb_id = 0;
+	drm_modeset_unlock(&plane->mutex);
+
+	plane_resp->plane_id = plane->base.id;
+	plane_resp->possible_crtcs = plane->possible_crtcs;
+	plane_resp->gamma_size = 0;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (plane->format_count &&
+	    (plane_resp->count_format_types >= plane->format_count)) {
+		format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+		if (copy_to_user(format_ptr,
+				 plane->format_types,
+				 sizeof(uint32_t) * plane->format_count)) {
+			return -EFAULT;
+		}
+	}
+	plane_resp->count_format_types = plane->format_count;
+
+	return 0;
+}
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+	unsigned int i;
+
+	for (i = 0; i < plane->format_count; i++) {
+		if (format == plane->format_types[i])
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * setplane_internal - setplane handler for internal callers
+ *
+ * Note that we assume an extra reference has already been taken on fb.  If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
+ *
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
+ */
+static int __setplane_internal(struct drm_plane *plane,
+			       struct drm_crtc *crtc,
+			       struct drm_framebuffer *fb,
+			       int32_t crtc_x, int32_t crtc_y,
+			       uint32_t crtc_w, uint32_t crtc_h,
+			       /* src_{x,y,w,h} values are 16.16 fixed point */
+			       uint32_t src_x, uint32_t src_y,
+			       uint32_t src_w, uint32_t src_h)
+{
+	int ret = 0;
+
+	/* No fb means shut it down */
+	if (!fb) {
+		plane->old_fb = plane->fb;
+		ret = plane->funcs->disable_plane(plane);
+		if (!ret) {
+			plane->crtc = NULL;
+			plane->fb = NULL;
+		} else {
+			plane->old_fb = NULL;
+		}
+		goto out;
+	}
+
+	/* Check whether this plane is usable on this CRTC */
+	if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+		DRM_DEBUG_KMS("Invalid crtc for plane\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Check whether this plane supports the fb pixel format. */
+	ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+	if (ret) {
+		char *format_name = drm_get_format_name(fb->pixel_format);
+		DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+		kfree(format_name);
+		goto out;
+	}
+
+	/* Give drivers some help against integer overflows */
+	if (crtc_w > INT_MAX ||
+	    crtc_x > INT_MAX - (int32_t) crtc_w ||
+	    crtc_h > INT_MAX ||
+	    crtc_y > INT_MAX - (int32_t) crtc_h) {
+		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+			      crtc_w, crtc_h, crtc_x, crtc_y);
+		ret = -ERANGE;
+		goto out;
+	}
+
+	ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+	if (ret)
+		goto out;
+
+	plane->old_fb = plane->fb;
+	ret = plane->funcs->update_plane(plane, crtc, fb,
+					 crtc_x, crtc_y, crtc_w, crtc_h,
+					 src_x, src_y, src_w, src_h);
+	if (!ret) {
+		plane->crtc = crtc;
+		plane->fb = fb;
+		fb = NULL;
+	} else {
+		plane->old_fb = NULL;
+	}
+
+out:
+	if (fb)
+		drm_framebuffer_unreference(fb);
+	if (plane->old_fb)
+		drm_framebuffer_unreference(plane->old_fb);
+	plane->old_fb = NULL;
+
+	return ret;
+}
+
+static int setplane_internal(struct drm_plane *plane,
+			     struct drm_crtc *crtc,
+			     struct drm_framebuffer *fb,
+			     int32_t crtc_x, int32_t crtc_y,
+			     uint32_t crtc_w, uint32_t crtc_h,
+			     /* src_{x,y,w,h} values are 16.16 fixed point */
+			     uint32_t src_x, uint32_t src_y,
+			     uint32_t src_w, uint32_t src_h)
+{
+	int ret;
+
+	drm_modeset_lock_all(plane->dev);
+	ret = __setplane_internal(plane, crtc, fb,
+				  crtc_x, crtc_y, crtc_w, crtc_h,
+				  src_x, src_y, src_w, src_h);
+	drm_modeset_unlock_all(plane->dev);
+
+	return ret;
+}
+
+int drm_mode_setplane(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_mode_set_plane *plane_req = data;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc = NULL;
+	struct drm_framebuffer *fb = NULL;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	/*
+	 * First, find the plane, crtc, and fb objects.  If not available,
+	 * we don't bother to call the driver.
+	 */
+	plane = drm_plane_find(dev, plane_req->plane_id);
+	if (!plane) {
+		DRM_DEBUG_KMS("Unknown plane ID %d\n",
+			      plane_req->plane_id);
+		return -ENOENT;
+	}
+
+	if (plane_req->fb_id) {
+		fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+		if (!fb) {
+			DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+				      plane_req->fb_id);
+			return -ENOENT;
+		}
+
+		crtc = drm_crtc_find(dev, plane_req->crtc_id);
+		if (!crtc) {
+			DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+				      plane_req->crtc_id);
+			return -ENOENT;
+		}
+	}
+
+	/*
+	 * setplane_internal will take care of deref'ing either the old or new
+	 * framebuffer depending on success.
+	 */
+	return setplane_internal(plane, crtc, fb,
+				 plane_req->crtc_x, plane_req->crtc_y,
+				 plane_req->crtc_w, plane_req->crtc_h,
+				 plane_req->src_x, plane_req->src_y,
+				 plane_req->src_w, plane_req->src_h);
+}
+
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+				     struct drm_mode_cursor2 *req,
+				     struct drm_file *file_priv)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 fbreq = {
+		.width = req->width,
+		.height = req->height,
+		.pixel_format = DRM_FORMAT_ARGB8888,
+		.pitches = { req->width * 4 },
+		.handles = { req->handle },
+	};
+	int32_t crtc_x, crtc_y;
+	uint32_t crtc_w = 0, crtc_h = 0;
+	uint32_t src_w = 0, src_h = 0;
+	int ret = 0;
+
+	BUG_ON(!crtc->cursor);
+	WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+
+	/*
+	 * Obtain fb we'll be using (either new or existing) and take an extra
+	 * reference to it if fb != null.  setplane will take care of dropping
+	 * the reference if the plane update fails.
+	 */
+	if (req->flags & DRM_MODE_CURSOR_BO) {
+		if (req->handle) {
+			fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
+			if (IS_ERR(fb)) {
+				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+				return PTR_ERR(fb);
+			}
+			fb->hot_x = req->hot_x;
+			fb->hot_y = req->hot_y;
+		} else {
+			fb = NULL;
+		}
+	} else {
+		fb = crtc->cursor->fb;
+		if (fb)
+			drm_framebuffer_reference(fb);
+	}
+
+	if (req->flags & DRM_MODE_CURSOR_MOVE) {
+		crtc_x = req->x;
+		crtc_y = req->y;
+	} else {
+		crtc_x = crtc->cursor_x;
+		crtc_y = crtc->cursor_y;
+	}
+
+	if (fb) {
+		crtc_w = fb->width;
+		crtc_h = fb->height;
+		src_w = fb->width << 16;
+		src_h = fb->height << 16;
+	}
+
+	/*
+	 * setplane_internal will take care of deref'ing either the old or new
+	 * framebuffer depending on success.
+	 */
+	ret = __setplane_internal(crtc->cursor, crtc, fb,
+				crtc_x, crtc_y, crtc_w, crtc_h,
+				0, 0, src_w, src_h);
+
+	/* Update successful; save new cursor position, if necessary */
+	if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+		crtc->cursor_x = req->x;
+		crtc->cursor_y = req->y;
+	}
+
+	return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+				  struct drm_mode_cursor2 *req,
+				  struct drm_file *file_priv)
+{
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+		return -EINVAL;
+
+	crtc = drm_crtc_find(dev, req->crtc_id);
+	if (!crtc) {
+		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+		return -ENOENT;
+	}
+
+	/*
+	 * If this crtc has a universal cursor plane, call that plane's update
+	 * handler rather than using legacy cursor handlers.
+	 */
+	drm_modeset_lock_crtc(crtc, crtc->cursor);
+	if (crtc->cursor) {
+		ret = drm_mode_cursor_universal(crtc, req, file_priv);
+		goto out;
+	}
+
+	if (req->flags & DRM_MODE_CURSOR_BO) {
+		if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+			ret = -ENXIO;
+			goto out;
+		}
+		/* Turns off the cursor if handle is 0 */
+		if (crtc->funcs->cursor_set2)
+			ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+						      req->width, req->height, req->hot_x, req->hot_y);
+		else
+			ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+						      req->width, req->height);
+	}
+
+	if (req->flags & DRM_MODE_CURSOR_MOVE) {
+		if (crtc->funcs->cursor_move) {
+			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+		} else {
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	drm_modeset_unlock_crtc(crtc);
+
+	return ret;
+
+}
+
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_cursor *req = data;
+	struct drm_mode_cursor2 new_req;
+
+	memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+	new_req.hot_x = new_req.hot_y = 0;
+
+	return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+/*
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ */
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_cursor2 *req = data;
+
+	return drm_mode_cursor_common(dev, req, file_priv);
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_page_flip_target *page_flip = data;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_pending_vblank_event *e = NULL;
+	u32 target_vblank = page_flip->sequence;
+	int ret = -EINVAL;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
+		return -EINVAL;
+
+	if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+		return -EINVAL;
+
+	/* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
+	 * can be specified
+	 */
+	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
+		return -EINVAL;
+
+	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+		return -EINVAL;
+
+	crtc = drm_crtc_find(dev, page_flip->crtc_id);
+	if (!crtc)
+		return -ENOENT;
+
+	if (crtc->funcs->page_flip_target) {
+		u32 current_vblank;
+		int r;
+
+		r = drm_crtc_vblank_get(crtc);
+		if (r)
+			return r;
+
+		current_vblank = drm_crtc_vblank_count(crtc);
+
+		switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
+		case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+			if ((int)(target_vblank - current_vblank) > 1) {
+				DRM_DEBUG("Invalid absolute flip target %u, "
+					  "must be <= %u\n", target_vblank,
+					  current_vblank + 1);
+				drm_crtc_vblank_put(crtc);
+				return -EINVAL;
+			}
+			break;
+		case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
+			if (target_vblank != 0 && target_vblank != 1) {
+				DRM_DEBUG("Invalid relative flip target %u, "
+					  "must be 0 or 1\n", target_vblank);
+				drm_crtc_vblank_put(crtc);
+				return -EINVAL;
+			}
+			target_vblank += current_vblank;
+			break;
+		default:
+			target_vblank = current_vblank +
+				!(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
+			break;
+		}
+	} else if (crtc->funcs->page_flip == NULL ||
+		   (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
+		return -EINVAL;
+	}
+
+	drm_modeset_lock_crtc(crtc, crtc->primary);
+	if (crtc->primary->fb == NULL) {
+		/* The framebuffer is currently unbound, presumably
+		 * due to a hotplug event, that userspace has not
+		 * yet discovered.
+		 */
+		ret = -EBUSY;
+		goto out;
+	}
+
+	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+	if (!fb) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (crtc->state) {
+		const struct drm_plane_state *state = crtc->primary->state;
+
+		ret = drm_framebuffer_check_src_coords(state->src_x,
+						       state->src_y,
+						       state->src_w,
+						       state->src_h,
+						       fb);
+	} else {
+		ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+	}
+	if (ret)
+		goto out;
+
+	if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		e = kzalloc(sizeof *e, GFP_KERNEL);
+		if (!e) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+		e->event.base.length = sizeof(e->event);
+		e->event.user_data = page_flip->user_data;
+		ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+		if (ret) {
+			kfree(e);
+			goto out;
+		}
+	}
+
+	crtc->primary->old_fb = crtc->primary->fb;
+	if (crtc->funcs->page_flip_target)
+		ret = crtc->funcs->page_flip_target(crtc, fb, e,
+						    page_flip->flags,
+						    target_vblank);
+	else
+		ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+	if (ret) {
+		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+			drm_event_cancel_free(dev, &e->base);
+		/* Keep the old fb, don't unref it. */
+		crtc->primary->old_fb = NULL;
+	} else {
+		crtc->primary->fb = fb;
+		/* Unref only the old framebuffer. */
+		fb = NULL;
+	}
+
+out:
+	if (ret && crtc->funcs->page_flip_target)
+		drm_crtc_vblank_put(crtc);
+	if (fb)
+		drm_framebuffer_unreference(fb);
+	if (crtc->primary->old_fb)
+		drm_framebuffer_unreference(crtc->primary->old_fb);
+	crtc->primary->old_fb = NULL;
+	drm_modeset_unlock_crtc(crtc);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 16c4a7b..7899fc1 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -64,18 +64,6 @@
  */
 
 /*
- * This is the minimal list of formats that seem to be safe for modeset use
- * with all current DRM drivers.  Most hardware can actually support more
- * formats than this and drivers may specify a more accurate list when
- * creating the primary plane.  However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
- */
-static const uint32_t safe_modeset_formats[] = {
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-};
-
-/*
  * Returns the connectors currently associated with a CRTC.  This function
  * should be called twice:  once with a NULL connector list to retrieve
  * the list size, and once with the properly allocated list to be filled in.
@@ -108,12 +96,110 @@
 }
 
 /**
+ * drm_plane_helper_check_state() - Check plane state for validity
+ * @state: plane state to check
+ * @clip: integer clipping coordinates
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ *                doesn't cover the entire crtc?  This will generally
+ *                only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ *                       is disabled?
+ *
+ * Checks that a desired plane update is valid, and updates various
+ * bits of derived state (clipped coordinates etc.). Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+				 const struct drm_rect *clip,
+				 int min_scale,
+				 int max_scale,
+				 bool can_position,
+				 bool can_update_disabled)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_rect *src = &state->src;
+	struct drm_rect *dst = &state->dst;
+	unsigned int rotation = state->rotation;
+	int hscale, vscale;
+
+	src->x1 = state->src_x;
+	src->y1 = state->src_y;
+	src->x2 = state->src_x + state->src_w;
+	src->y2 = state->src_y + state->src_h;
+
+	dst->x1 = state->crtc_x;
+	dst->y1 = state->crtc_y;
+	dst->x2 = state->crtc_x + state->crtc_w;
+	dst->y2 = state->crtc_y + state->crtc_h;
+
+	if (!fb) {
+		state->visible = false;
+		return 0;
+	}
+
+	/* crtc should only be NULL when disabling (i.e., !fb) */
+	if (WARN_ON(!crtc)) {
+		state->visible = false;
+		return 0;
+	}
+
+	if (!crtc->enabled && !can_update_disabled) {
+		DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
+		return -EINVAL;
+	}
+
+	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
+	/* Check scaling */
+	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
+	if (hscale < 0 || vscale < 0) {
+		DRM_DEBUG_KMS("Invalid scaling of plane\n");
+		drm_rect_debug_print("src: ", &state->src, true);
+		drm_rect_debug_print("dst: ", &state->dst, false);
+		return -ERANGE;
+	}
+
+	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+
+	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
+	if (!state->visible)
+		/*
+		 * Plane isn't visible; some drivers can handle this
+		 * so we just return success here.  Drivers that can't
+		 * (including those that use the primary plane helper's
+		 * update function) will return an error from their
+		 * update_plane handler.
+		 */
+		return 0;
+
+	if (!can_position && !drm_rect_equals(dst, clip)) {
+		DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
+		drm_rect_debug_print("dst: ", dst, false);
+		drm_rect_debug_print("clip: ", clip, false);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_plane_helper_check_state);
+
+/**
  * drm_plane_helper_check_update() - Check plane update for validity
  * @plane: plane object to update
  * @crtc: owning CRTC of owning plane
  * @fb: framebuffer to flip onto plane
  * @src: source coordinates in 16.16 fixed point
- * @dest: integer destination coordinates
+ * @dst: integer destination coordinates
  * @clip: integer clipping coordinates
  * @rotation: plane rotation
  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
@@ -138,7 +224,7 @@
 				  struct drm_crtc *crtc,
 				  struct drm_framebuffer *fb,
 				  struct drm_rect *src,
-				  struct drm_rect *dest,
+				  struct drm_rect *dst,
 				  const struct drm_rect *clip,
 				  unsigned int rotation,
 				  int min_scale,
@@ -147,56 +233,33 @@
 				  bool can_update_disabled,
 				  bool *visible)
 {
-	int hscale, vscale;
+	struct drm_plane_state state = {
+		.plane = plane,
+		.crtc = crtc,
+		.fb = fb,
+		.src_x = src->x1,
+		.src_y = src->y1,
+		.src_w = drm_rect_width(src),
+		.src_h = drm_rect_height(src),
+		.crtc_x = dst->x1,
+		.crtc_y = dst->y1,
+		.crtc_w = drm_rect_width(dst),
+		.crtc_h = drm_rect_height(dst),
+		.rotation = rotation,
+		.visible = *visible,
+	};
+	int ret;
 
-	if (!fb) {
-		*visible = false;
-		return 0;
-	}
+	ret = drm_plane_helper_check_state(&state, clip,
+					   min_scale, max_scale,
+					   can_position,
+					   can_update_disabled);
+	if (ret)
+		return ret;
 
-	/* crtc should only be NULL when disabling (i.e., !fb) */
-	if (WARN_ON(!crtc)) {
-		*visible = false;
-		return 0;
-	}
-
-	if (!crtc->enabled && !can_update_disabled) {
-		DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
-		return -EINVAL;
-	}
-
-	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
-
-	/* Check scaling */
-	hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
-	vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
-	if (hscale < 0 || vscale < 0) {
-		DRM_DEBUG_KMS("Invalid scaling of plane\n");
-		drm_rect_debug_print("src: ", src, true);
-		drm_rect_debug_print("dst: ", dest, false);
-		return -ERANGE;
-	}
-
-	*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
-
-	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
-
-	if (!*visible)
-		/*
-		 * Plane isn't visible; some drivers can handle this
-		 * so we just return success here.  Drivers that can't
-		 * (including those that use the primary plane helper's
-		 * update function) will return an error from their
-		 * update_plane handler.
-		 */
-		return 0;
-
-	if (!can_position && !drm_rect_equals(dest, clip)) {
-		DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
-		drm_rect_debug_print("dst: ", dest, false);
-		drm_rect_debug_print("clip: ", clip, false);
-		return -EINVAL;
-	}
+	*src = state.src;
+	*dst = state.dst;
+	*visible = state.visible;
 
 	return 0;
 }
@@ -274,7 +337,7 @@
 
 	ret = drm_plane_helper_check_update(plane, crtc, fb,
 					    &src, &dest, &clip,
-					    BIT(DRM_ROTATE_0),
+					    DRM_ROTATE_0,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    false, false, &visible);
@@ -363,60 +426,6 @@
 };
 EXPORT_SYMBOL(drm_primary_helper_funcs);
 
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
-	struct drm_plane *primary;
-	int ret;
-
-	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-	if (primary == NULL) {
-		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
-		return NULL;
-	}
-
-	/*
-	 * Remove the format_default field from drm_plane when dropping
-	 * this helper.
-	 */
-	primary->format_default = true;
-
-	/* possible_crtc's will be filled in later by crtc_init */
-	ret = drm_universal_plane_init(dev, primary, 0,
-				       &drm_primary_helper_funcs,
-				       safe_modeset_formats,
-				       ARRAY_SIZE(safe_modeset_formats),
-				       DRM_PLANE_TYPE_PRIMARY, NULL);
-	if (ret) {
-		kfree(primary);
-		primary = NULL;
-	}
-
-	return primary;
-}
-
-/**
- * drm_crtc_init - Legacy CRTC initialization function
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * Initialize a CRTC object with a default helper-provided primary plane and no
- * cursor plane.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-		  const struct drm_crtc_funcs *funcs)
-{
-	struct drm_plane *primary;
-
-	primary = create_primary_plane(dev);
-	return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
-					 NULL);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
 int drm_plane_helper_commit(struct drm_plane *plane,
 			    struct drm_plane_state *plane_state,
 			    struct drm_framebuffer *old_fb)
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 2c819ef..0262698 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -48,8 +48,8 @@
 	DRM_DEBUG("\n");
 
 	dev = drm_dev_alloc(driver, &platdev->dev);
-	if (!dev)
-		return -ENOMEM;
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
 
 	dev->platformdev = platdev;
 
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 780589b..b22a94d 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -28,6 +28,7 @@
 
 #include <linux/export.h>
 #include <linux/dma-buf.h>
+#include <linux/rbtree.h>
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
@@ -61,9 +62,11 @@
  */
 
 struct drm_prime_member {
-	struct list_head entry;
 	struct dma_buf *dma_buf;
 	uint32_t handle;
+
+	struct rb_node dmabuf_rb;
+	struct rb_node handle_rb;
 };
 
 struct drm_prime_attachment {
@@ -75,6 +78,7 @@
 				    struct dma_buf *dma_buf, uint32_t handle)
 {
 	struct drm_prime_member *member;
+	struct rb_node **p, *rb;
 
 	member = kmalloc(sizeof(*member), GFP_KERNEL);
 	if (!member)
@@ -83,18 +87,56 @@
 	get_dma_buf(dma_buf);
 	member->dma_buf = dma_buf;
 	member->handle = handle;
-	list_add(&member->entry, &prime_fpriv->head);
+
+	rb = NULL;
+	p = &prime_fpriv->dmabufs.rb_node;
+	while (*p) {
+		struct drm_prime_member *pos;
+
+		rb = *p;
+		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
+		if (dma_buf > pos->dma_buf)
+			p = &rb->rb_right;
+		else
+			p = &rb->rb_left;
+	}
+	rb_link_node(&member->dmabuf_rb, rb, p);
+	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
+
+	rb = NULL;
+	p = &prime_fpriv->handles.rb_node;
+	while (*p) {
+		struct drm_prime_member *pos;
+
+		rb = *p;
+		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
+		if (handle > pos->handle)
+			p = &rb->rb_right;
+		else
+			p = &rb->rb_left;
+	}
+	rb_link_node(&member->handle_rb, rb, p);
+	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
+
 	return 0;
 }
 
 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
 						      uint32_t handle)
 {
-	struct drm_prime_member *member;
+	struct rb_node *rb;
 
-	list_for_each_entry(member, &prime_fpriv->head, entry) {
+	rb = prime_fpriv->handles.rb_node;
+	while (rb) {
+		struct drm_prime_member *member;
+
+		member = rb_entry(rb, struct drm_prime_member, handle_rb);
 		if (member->handle == handle)
 			return member->dma_buf;
+		else if (member->handle < handle)
+			rb = rb->rb_right;
+		else
+			rb = rb->rb_left;
 	}
 
 	return NULL;
@@ -104,14 +146,23 @@
 				       struct dma_buf *dma_buf,
 				       uint32_t *handle)
 {
-	struct drm_prime_member *member;
+	struct rb_node *rb;
 
-	list_for_each_entry(member, &prime_fpriv->head, entry) {
+	rb = prime_fpriv->dmabufs.rb_node;
+	while (rb) {
+		struct drm_prime_member *member;
+
+		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 		if (member->dma_buf == dma_buf) {
 			*handle = member->handle;
 			return 0;
+		} else if (member->dma_buf < dma_buf) {
+			rb = rb->rb_right;
+		} else {
+			rb = rb->rb_left;
 		}
 	}
+
 	return -ENOENT;
 }
 
@@ -166,13 +217,24 @@
 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
 					struct dma_buf *dma_buf)
 {
-	struct drm_prime_member *member, *safe;
+	struct rb_node *rb;
 
-	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+	rb = prime_fpriv->dmabufs.rb_node;
+	while (rb) {
+		struct drm_prime_member *member;
+
+		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
 		if (member->dma_buf == dma_buf) {
+			rb_erase(&member->handle_rb, &prime_fpriv->handles);
+			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
+
 			dma_buf_put(dma_buf);
-			list_del(&member->entry);
 			kfree(member);
+			return;
+		} else if (member->dma_buf < dma_buf) {
+			rb = rb->rb_right;
+		} else {
+			rb = rb->rb_left;
 		}
 	}
 }
@@ -222,18 +284,47 @@
 }
 
 /**
+ * drm_gem_dmabuf_export - dma_buf export implementation for GEM
+ * @dev: parent device for the exported dmabuf
+ * @exp_info: the export information used by dma_buf_export()
+ *
+ * This wraps dma_buf_export() for use by generic GEM drivers that are using
+ * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
+ * a reference to the drm_device which is released by drm_gem_dmabuf_release().
+ *
+ * Returns the new dmabuf.
+ */
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+				      struct dma_buf_export_info *exp_info)
+{
+	struct dma_buf *dma_buf;
+
+	dma_buf = dma_buf_export(exp_info);
+	if (!IS_ERR(dma_buf))
+		drm_dev_ref(dev);
+
+	return dma_buf;
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_export);
+
+/**
  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
  * @dma_buf: buffer to be released
  *
  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
  * must use this in their dma_buf ops structure as the release callback.
+ * drm_gem_dmabuf_release() should be used in conjunction with
+ * drm_gem_dmabuf_export().
  */
 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 {
 	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
 
 	/* drop the reference on the export fd holds */
 	drm_gem_object_unreference_unlocked(obj);
+
+	drm_dev_unref(dev);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_release);
 
@@ -335,19 +426,22 @@
  * using the PRIME helpers.
  */
 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
-				     struct drm_gem_object *obj, int flags)
+				     struct drm_gem_object *obj,
+				     int flags)
 {
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &drm_gem_prime_dmabuf_ops;
-	exp_info.size = obj->size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
+	struct dma_buf_export_info exp_info = {
+		.exp_name = KBUILD_MODNAME, /* white lie for debug */
+		.owner = dev->driver->fops->owner,
+		.ops = &drm_gem_prime_dmabuf_ops,
+		.size = obj->size,
+		.flags = flags,
+		.priv = obj,
+	};
 
 	if (dev->driver->gem_prime_res_obj)
 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
 
-	return dma_buf_export(&exp_info);
+	return drm_gem_dmabuf_export(dev, &exp_info);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
@@ -759,12 +853,13 @@
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
 {
-	INIT_LIST_HEAD(&prime_fpriv->head);
 	mutex_init(&prime_fpriv->lock);
+	prime_fpriv->dmabufs = RB_ROOT;
+	prime_fpriv->handles = RB_ROOT;
 }
 
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 {
 	/* by now drm_gem_release should've made sure the list is empty */
-	WARN_ON(!list_empty(&prime_fpriv->head));
+	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
 }
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a0df377..f6b64d7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -129,6 +129,7 @@
 {
 	bool poll = false;
 	struct drm_connector *connector;
+	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
@@ -141,8 +142,13 @@
 			poll = true;
 	}
 
+	if (dev->mode_config.delayed_event) {
+		poll = true;
+		delay = 0;
+	}
+
 	if (poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+		schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
new file mode 100644
index 0000000..a4d81cf
--- /dev/null
+++ b/drivers/gpu/drm/drm_property.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_property.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Properties as represented by &drm_property are used to extend the modeset
+ * interface exposed to userspace. For the atomic modeset IOCTL properties are
+ * even the only way to transport metadata about the desired new modeset
+ * configuration from userspace to the kernel. Properties have a well-defined
+ * value range, which is enforced by the drm core. See the documentation of the
+ * flags member of struct &drm_property for an overview of the different
+ * property types and ranges.
+ *
+ * Properties don't store the current value directly, but need to be
+ * instatiated by attaching them to a &drm_mode_object with
+ * drm_object_attach_property().
+ *
+ * Property values are only 64bit. To support bigger piles of data (like gamma
+ * tables, color correction matrizes or large structures) a property can instead
+ * point at a &drm_property_blob with that additional data
+ *
+ * Properties are defined by their symbolic name, userspace must keep a
+ * per-object mapping from those names to the property ID used in the atomic
+ * IOCTL and in the get/set property IOCTL.
+ */
+
+static bool drm_property_type_valid(struct drm_property *property)
+{
+	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+		return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+	return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+					 const char *name, int num_values)
+{
+	struct drm_property *property = NULL;
+	int ret;
+
+	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+	if (!property)
+		return NULL;
+
+	property->dev = dev;
+
+	if (num_values) {
+		property->values = kcalloc(num_values, sizeof(uint64_t),
+					   GFP_KERNEL);
+		if (!property->values)
+			goto fail;
+	}
+
+	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+	if (ret)
+		goto fail;
+
+	property->flags = flags;
+	property->num_values = num_values;
+	INIT_LIST_HEAD(&property->enum_list);
+
+	if (name) {
+		strncpy(property->name, name, DRM_PROP_NAME_LEN);
+		property->name[DRM_PROP_NAME_LEN-1] = '\0';
+	}
+
+	list_add_tail(&property->head, &dev->mode_config.property_list);
+
+	WARN_ON(!drm_property_type_valid(property));
+
+	return property;
+fail:
+	kfree(property->values);
+	kfree(property);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+/**
+ * drm_property_create_enum - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+					 const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values)
+{
+	struct drm_property *property;
+	int i, ret;
+
+	flags |= DRM_MODE_PROP_ENUM;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+
+	for (i = 0; i < num_values; i++) {
+		ret = drm_property_add_enum(property, i,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+/**
+ * drm_property_create_bitmask - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
+ *
+ * This creates a new bitmask drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+					 int flags, const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_props,
+					 uint64_t supported_bits)
+{
+	struct drm_property *property;
+	int i, ret, index = 0;
+	int num_values = hweight64(supported_bits);
+
+	flags |= DRM_MODE_PROP_BITMASK;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+	for (i = 0; i < num_props; i++) {
+		if (!(supported_bits & (1ULL << props[i].type)))
+			continue;
+
+		if (WARN_ON(index >= num_values)) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+
+		ret = drm_property_add_enum(property, index++,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+static struct drm_property *property_create_range(struct drm_device *dev,
+					 int flags, const char *name,
+					 uint64_t min, uint64_t max)
+{
+	struct drm_property *property;
+
+	property = drm_property_create(dev, flags, name, 2);
+	if (!property)
+		return NULL;
+
+	property->values[0] = min;
+	property->values[1] = max;
+
+	return property;
+}
+
+/**
+ * drm_property_create_range - create a new unsigned ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any unsigned integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+					 const char *name,
+					 uint64_t min, uint64_t max)
+{
+	return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+			name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+/**
+ * drm_property_create_signed_range - create a new signed ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any signed integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+					 int flags, const char *name,
+					 int64_t min, int64_t max)
+{
+	return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+			name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+/**
+ * drm_property_create_object - create a new object property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @type: object type from DRM_MODE_OBJECT_* defines
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set this to any property value of the given
+ * @type. Only useful for atomic properties, which is enforced.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+						int flags, const char *name,
+						uint32_t type)
+{
+	struct drm_property *property;
+
+	flags |= DRM_MODE_PROP_OBJECT;
+
+	if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
+		return NULL;
+
+	property = drm_property_create(dev, flags, name, 1);
+	if (!property)
+		return NULL;
+
+	property->values[0] = type;
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_object);
+
+/**
+ * drm_property_create_bool - create a new boolean property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * This is implemented as a ranged property with only {0, 1} as valid values.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+					      const char *name)
+{
+	return drm_property_create_range(dev, flags, name, 0, 1);
+}
+EXPORT_SYMBOL(drm_property_create_bool);
+
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_property_add_enum(struct drm_property *property, int index,
+			  uint64_t value, const char *name)
+{
+	struct drm_property_enum *prop_enum;
+
+	if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+		return -EINVAL;
+
+	/*
+	 * Bitmask enum properties have the additional constraint of values
+	 * from 0 to 63
+	 */
+	if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+			(value > 63))
+		return -EINVAL;
+
+	if (!list_empty(&property->enum_list)) {
+		list_for_each_entry(prop_enum, &property->enum_list, head) {
+			if (prop_enum->value == value) {
+				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+				return 0;
+			}
+		}
+	}
+
+	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+	if (!prop_enum)
+		return -ENOMEM;
+
+	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+	prop_enum->value = value;
+
+	property->values[index] = value;
+	list_add_tail(&prop_enum->head, &property->enum_list);
+	return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+	struct drm_property_enum *prop_enum, *pt;
+
+	list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+		list_del(&prop_enum->head);
+		kfree(prop_enum);
+	}
+
+	if (property->num_values)
+		kfree(property->values);
+	drm_mode_object_unregister(dev, &property->base);
+	list_del(&property->head);
+	kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_get_property *out_resp = data;
+	struct drm_property *property;
+	int enum_count = 0;
+	int value_count = 0;
+	int ret = 0, i;
+	int copied;
+	struct drm_property_enum *prop_enum;
+	struct drm_mode_property_enum __user *enum_ptr;
+	uint64_t __user *values_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	property = drm_property_find(dev, out_resp->prop_id);
+	if (!property) {
+		ret = -ENOENT;
+		goto done;
+	}
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		list_for_each_entry(prop_enum, &property->enum_list, head)
+			enum_count++;
+	}
+
+	value_count = property->num_values;
+
+	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+	out_resp->flags = property->flags;
+
+	if ((out_resp->count_values >= value_count) && value_count) {
+		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+		for (i = 0; i < value_count; i++) {
+			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+				ret = -EFAULT;
+				goto done;
+			}
+		}
+	}
+	out_resp->count_values = value_count;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+			drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+			copied = 0;
+			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+			list_for_each_entry(prop_enum, &property->enum_list, head) {
+
+				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+					ret = -EFAULT;
+					goto done;
+				}
+
+				if (copy_to_user(&enum_ptr[copied].name,
+						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
+					ret = -EFAULT;
+					goto done;
+				}
+				copied++;
+			}
+		}
+		out_resp->count_enum_blobs = enum_count;
+	}
+
+	/*
+	 * NOTE: The idea seems to have been to use this to read all the blob
+	 * property values. But nothing ever added them to the corresponding
+	 * list, userspace always used the special-purpose get_blob ioctl to
+	 * read the value for a blob property. It also doesn't make a lot of
+	 * sense to return values here when everything else is just metadata for
+	 * the property itself.
+	 */
+	if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+		out_resp->count_enum_blobs = 0;
+done:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+static void drm_property_free_blob(struct kref *kref)
+{
+	struct drm_property_blob *blob =
+		container_of(kref, struct drm_property_blob, base.refcount);
+
+	mutex_lock(&blob->dev->mode_config.blob_lock);
+	list_del(&blob->head_global);
+	mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+	drm_mode_object_unregister(blob->dev, &blob->base);
+
+	kfree(blob);
+}
+
+/**
+ * drm_property_create_blob - Create new blob property
+ * @dev: DRM device to create property for
+ * @length: Length to allocate for blob data
+ * @data: If specified, copies data into blob
+ *
+ * Creates a new blob property for a specified DRM device, optionally
+ * copying data. Note that blob properties are meant to be invariant, hence the
+ * data must be filled out before the blob is used as the value of any property.
+ *
+ * Returns:
+ * New blob property with a single reference on success, or an ERR_PTR
+ * value on failure.
+ */
+struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+			 const void *data)
+{
+	struct drm_property_blob *blob;
+	int ret;
+
+	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+		return ERR_PTR(-EINVAL);
+
+	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+	if (!blob)
+		return ERR_PTR(-ENOMEM);
+
+	/* This must be explicitly initialised, so we can safely call list_del
+	 * on it in the removal handler, even if it isn't in a file list. */
+	INIT_LIST_HEAD(&blob->head_file);
+	blob->length = length;
+	blob->dev = dev;
+
+	if (data)
+		memcpy(blob->data, data, length);
+
+	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+				      true, drm_property_free_blob);
+	if (ret) {
+		kfree(blob);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&dev->mode_config.blob_lock);
+	list_add_tail(&blob->head_global,
+	              &dev->mode_config.property_blob_list);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_create_blob);
+
+/**
+ * drm_property_unreference_blob - Unreference a blob property
+ * @blob: Pointer to blob property
+ *
+ * Drop a reference on a blob property. May free the object.
+ */
+void drm_property_unreference_blob(struct drm_property_blob *blob)
+{
+	if (!blob)
+		return;
+
+	drm_mode_object_unreference(&blob->base);
+}
+EXPORT_SYMBOL(drm_property_unreference_blob);
+
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+				     struct drm_file *file_priv)
+{
+	struct drm_property_blob *blob, *bt;
+
+	/*
+	 * When the file gets released that means no one else can access the
+	 * blob list any more, so no need to grab dev->blob_lock.
+	 */
+	list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
+		list_del_init(&blob->head_file);
+		drm_property_unreference_blob(blob);
+	}
+}
+
+/**
+ * drm_property_reference_blob - Take a reference on an existing property
+ * @blob: Pointer to blob property
+ *
+ * Take a new reference on an existing blob property. Returns @blob, which
+ * allows this to be used as a shorthand in assignments.
+ */
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
+{
+	drm_mode_object_reference(&blob->base);
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_reference_blob);
+
+/**
+ * drm_property_lookup_blob - look up a blob property and take a reference
+ * @dev: drm device
+ * @id: id of the blob property
+ *
+ * If successful, this takes an additional reference to the blob property.
+ * callers need to make sure to eventually unreference the returned property
+ * again, using @drm_property_unreference_blob.
+ *
+ * Return:
+ * NULL on failure, pointer to the blob on success.
+ */
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+					           uint32_t id)
+{
+	struct drm_mode_object *obj;
+	struct drm_property_blob *blob = NULL;
+
+	obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+	if (obj)
+		blob = obj_to_blob(obj);
+	return blob;
+}
+EXPORT_SYMBOL(drm_property_lookup_blob);
+
+/**
+ * drm_property_replace_global_blob - replace existing blob property
+ * @dev: drm device
+ * @replace: location of blob property pointer to be replaced
+ * @length: length of data for new blob, or 0 for no data
+ * @data: content for new blob, or NULL for no data
+ * @obj_holds_id: optional object for property holding blob ID
+ * @prop_holds_id: optional property holding blob ID
+ * @return 0 on success or error on failure
+ *
+ * This function will replace a global property in the blob list, optionally
+ * updating a property which holds the ID of that property.
+ *
+ * If length is 0 or data is NULL, no new blob will be created, and the holding
+ * property, if specified, will be set to 0.
+ *
+ * Access to the replace pointer is assumed to be protected by the caller, e.g.
+ * by holding the relevant modesetting object lock for its parent.
+ *
+ * For example, a drm_connector has a 'PATH' property, which contains the ID
+ * of a blob property with the value of the MST path information. Calling this
+ * function with replace pointing to the connector's path_blob_ptr, length and
+ * data set for the new path information, obj_holds_id set to the connector's
+ * base object, and prop_holds_id set to the path property name, will perform
+ * a completely atomic update. The access to path_blob_ptr is protected by the
+ * caller holding a lock on the connector.
+ */
+int drm_property_replace_global_blob(struct drm_device *dev,
+				     struct drm_property_blob **replace,
+				     size_t length,
+				     const void *data,
+				     struct drm_mode_object *obj_holds_id,
+				     struct drm_property *prop_holds_id)
+{
+	struct drm_property_blob *new_blob = NULL;
+	struct drm_property_blob *old_blob = NULL;
+	int ret;
+
+	WARN_ON(replace == NULL);
+
+	old_blob = *replace;
+
+	if (length && data) {
+		new_blob = drm_property_create_blob(dev, length, data);
+		if (IS_ERR(new_blob))
+			return PTR_ERR(new_blob);
+	}
+
+	if (obj_holds_id) {
+		ret = drm_object_property_set_value(obj_holds_id,
+						    prop_holds_id,
+						    new_blob ?
+						        new_blob->base.id : 0);
+		if (ret != 0)
+			goto err_created;
+	}
+
+	drm_property_unreference_blob(old_blob);
+	*replace = new_blob;
+
+	return 0;
+
+err_created:
+	drm_property_unreference_blob(new_blob);
+	return ret;
+}
+EXPORT_SYMBOL(drm_property_replace_global_blob);
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_get_blob *out_resp = data;
+	struct drm_property_blob *blob;
+	int ret = 0;
+	void __user *blob_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
+
+	if (out_resp->length == blob->length) {
+		blob_ptr = (void __user *)(unsigned long)out_resp->data;
+		if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+			ret = -EFAULT;
+			goto unref;
+		}
+	}
+	out_resp->length = blob->length;
+unref:
+	drm_property_unreference_blob(blob);
+
+	return ret;
+}
+
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+			      void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_create_blob *out_resp = data;
+	struct drm_property_blob *blob;
+	void __user *blob_ptr;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_create_blob(dev, out_resp->length, NULL);
+	if (IS_ERR(blob))
+		return PTR_ERR(blob);
+
+	blob_ptr = (void __user *)(unsigned long)out_resp->data;
+	if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+		ret = -EFAULT;
+		goto out_blob;
+	}
+
+	/* Dropping the lock between create_blob and our access here is safe
+	 * as only the same file_priv can remove the blob; at this point, it is
+	 * not associated with any file_priv. */
+	mutex_lock(&dev->mode_config.blob_lock);
+	out_resp->blob_id = blob->base.id;
+	list_add_tail(&blob->head_file, &file_priv->blobs);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	return 0;
+
+out_blob:
+	drm_property_unreference_blob(blob);
+	return ret;
+}
+
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_destroy_blob *out_resp = data;
+	struct drm_property_blob *blob = NULL, *bt;
+	bool found = false;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
+
+	mutex_lock(&dev->mode_config.blob_lock);
+	/* Ensure the property was actually created by this user. */
+	list_for_each_entry(bt, &file_priv->blobs, head_file) {
+		if (bt == blob) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		ret = -EPERM;
+		goto err;
+	}
+
+	/* We must drop head_file here, because we may not be the last
+	 * reference on the blob. */
+	list_del_init(&blob->head_file);
+	mutex_unlock(&dev->mode_config.blob_lock);
+
+	/* One reference from lookup, and one from the filp. */
+	drm_property_unreference_blob(blob);
+	drm_property_unreference_blob(blob);
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->mode_config.blob_lock);
+	drm_property_unreference_blob(blob);
+
+	return ret;
+}
+
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race).  The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+				   uint64_t value, struct drm_mode_object **ref)
+{
+	int i;
+
+	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+		return false;
+
+	*ref = NULL;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+		if (value < property->values[0] || value > property->values[1])
+			return false;
+		return true;
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+		int64_t svalue = U642I64(value);
+
+		if (svalue < U642I64(property->values[0]) ||
+				svalue > U642I64(property->values[1]))
+			return false;
+		return true;
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+		uint64_t valid_mask = 0;
+
+		for (i = 0; i < property->num_values; i++)
+			valid_mask |= (1ULL << property->values[i]);
+		return !(value & ~valid_mask);
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+		struct drm_property_blob *blob;
+
+		if (value == 0)
+			return true;
+
+		blob = drm_property_lookup_blob(property->dev, value);
+		if (blob) {
+			*ref = &blob->base;
+			return true;
+		} else {
+			return false;
+		}
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+		/* a zero value for an object property translates to null: */
+		if (value == 0)
+			return true;
+
+		*ref = __drm_mode_object_find(property->dev, value,
+					      property->values[0]);
+		return *ref != NULL;
+	}
+
+	for (i = 0; i < property->num_values; i++)
+		if (property->values[i] == value)
+			return true;
+	return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+		struct drm_mode_object *ref)
+{
+	if (!ref)
+		return;
+
+	if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+		drm_mode_object_unreference(ref);
+	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+		drm_property_unreference_blob(obj_to_blob(ref));
+}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index a8e2c86..73e53a8 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -100,7 +100,7 @@
 {
 	int scale = 0;
 
-	if (src < 0 || dst < 0)
+	if (WARN_ON(src < 0 || dst < 0))
 		return -EINVAL;
 
 	if (dst == 0)
@@ -317,38 +317,38 @@
 {
 	struct drm_rect tmp;
 
-	if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+	if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
 		tmp = *r;
 
-		if (rotation & BIT(DRM_REFLECT_X)) {
+		if (rotation & DRM_REFLECT_X) {
 			r->x1 = width - tmp.x2;
 			r->x2 = width - tmp.x1;
 		}
 
-		if (rotation & BIT(DRM_REFLECT_Y)) {
+		if (rotation & DRM_REFLECT_Y) {
 			r->y1 = height - tmp.y2;
 			r->y2 = height - tmp.y1;
 		}
 	}
 
 	switch (rotation & DRM_ROTATE_MASK) {
-	case BIT(DRM_ROTATE_0):
+	case DRM_ROTATE_0:
 		break;
-	case BIT(DRM_ROTATE_90):
+	case DRM_ROTATE_90:
 		tmp = *r;
 		r->x1 = tmp.y1;
 		r->x2 = tmp.y2;
 		r->y1 = width - tmp.x2;
 		r->y2 = width - tmp.x1;
 		break;
-	case BIT(DRM_ROTATE_180):
+	case DRM_ROTATE_180:
 		tmp = *r;
 		r->x1 = width - tmp.x2;
 		r->x2 = width - tmp.x1;
 		r->y1 = height - tmp.y2;
 		r->y2 = height - tmp.y1;
 		break;
-	case BIT(DRM_ROTATE_270):
+	case DRM_ROTATE_270:
 		tmp = *r;
 		r->x1 = height - tmp.y2;
 		r->x2 = height - tmp.y1;
@@ -392,23 +392,23 @@
 	struct drm_rect tmp;
 
 	switch (rotation & DRM_ROTATE_MASK) {
-	case BIT(DRM_ROTATE_0):
+	case DRM_ROTATE_0:
 		break;
-	case BIT(DRM_ROTATE_90):
+	case DRM_ROTATE_90:
 		tmp = *r;
 		r->x1 = width - tmp.y2;
 		r->x2 = width - tmp.y1;
 		r->y1 = tmp.x1;
 		r->y2 = tmp.x2;
 		break;
-	case BIT(DRM_ROTATE_180):
+	case DRM_ROTATE_180:
 		tmp = *r;
 		r->x1 = width - tmp.x2;
 		r->x2 = width - tmp.x1;
 		r->y1 = height - tmp.y2;
 		r->y2 = height - tmp.y1;
 		break;
-	case BIT(DRM_ROTATE_270):
+	case DRM_ROTATE_270:
 		tmp = *r;
 		r->x1 = tmp.y1;
 		r->x2 = tmp.y2;
@@ -419,15 +419,15 @@
 		break;
 	}
 
-	if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+	if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
 		tmp = *r;
 
-		if (rotation & BIT(DRM_REFLECT_X)) {
+		if (rotation & DRM_REFLECT_X) {
 			r->x1 = width - tmp.x2;
 			r->x2 = width - tmp.x1;
 		}
 
-		if (rotation & BIT(DRM_REFLECT_Y)) {
+		if (rotation & DRM_REFLECT_Y) {
 			r->y1 = height - tmp.y2;
 			r->y2 = height - tmp.y1;
 		}
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index bf70431..275bca4 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -68,7 +68,7 @@
 void drm_legacy_sg_cleanup(struct drm_device *dev)
 {
 	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
 		drm_sg_cleanup(dev->sg);
 		dev->sg = NULL;
 	}
@@ -88,7 +88,7 @@
 
 	DRM_DEBUG("\n");
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_SG))
@@ -201,7 +201,7 @@
 	struct drm_scatter_gather *request = data;
 	struct drm_sg_mem *entry;
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 		return -EINVAL;
 
 	if (!drm_core_check_feature(dev, DRIVER_SG))
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 0db36d2..7bae08c 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -34,6 +34,12 @@
 	.destroy = drm_encoder_cleanup,
 };
 
+static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
+				     struct drm_crtc_state *state)
+{
+	return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_simple_display_pipe *pipe;
@@ -57,6 +63,7 @@
 }
 
 static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+	.atomic_check = drm_simple_kms_crtc_check,
 	.disable = drm_simple_kms_crtc_disable,
 	.enable = drm_simple_kms_crtc_enable,
 };
@@ -73,22 +80,9 @@
 static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
 					struct drm_plane_state *plane_state)
 {
-	struct drm_rect src = {
-		.x1 = plane_state->src_x,
-		.y1 = plane_state->src_y,
-		.x2 = plane_state->src_x + plane_state->src_w,
-		.y2 = plane_state->src_y + plane_state->src_h,
-	};
-	struct drm_rect dest = {
-		.x1 = plane_state->crtc_x,
-		.y1 = plane_state->crtc_y,
-		.x2 = plane_state->crtc_x + plane_state->crtc_w,
-		.y2 = plane_state->crtc_y + plane_state->crtc_h,
-	};
 	struct drm_rect clip = { 0 };
 	struct drm_simple_display_pipe *pipe;
 	struct drm_crtc_state *crtc_state;
-	bool visible;
 	int ret;
 
 	pipe = container_of(plane, struct drm_simple_display_pipe, plane);
@@ -102,17 +96,15 @@
 
 	clip.x2 = crtc_state->adjusted_mode.hdisplay;
 	clip.y2 = crtc_state->adjusted_mode.vdisplay;
-	ret = drm_plane_helper_check_update(plane, &pipe->crtc,
-					    plane_state->fb,
-					    &src, &dest, &clip,
-					    plane_state->rotation,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    false, true, &visible);
+
+	ret = drm_plane_helper_check_state(plane_state, &clip,
+					   DRM_PLANE_HELPER_NO_SCALING,
+					   DRM_PLANE_HELPER_NO_SCALING,
+					   false, true);
 	if (ret)
 		return ret;
 
-	if (!visible)
+	if (!plane_state->visible)
 		return -EINVAL;
 
 	if (!pipe->funcs || !pipe->funcs->check)
@@ -133,7 +125,33 @@
 	pipe->funcs->update(pipe, pstate);
 }
 
+static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
+					   struct drm_plane_state *state)
+{
+	struct drm_simple_display_pipe *pipe;
+
+	pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+	if (!pipe->funcs || !pipe->funcs->prepare_fb)
+		return 0;
+
+	return pipe->funcs->prepare_fb(pipe, state);
+}
+
+static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
+					    struct drm_plane_state *state)
+{
+	struct drm_simple_display_pipe *pipe;
+
+	pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+	if (!pipe->funcs || !pipe->funcs->cleanup_fb)
+		return;
+
+	pipe->funcs->cleanup_fb(pipe, state);
+}
+
 static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
+	.prepare_fb = drm_simple_kms_plane_prepare_fb,
+	.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
 	.atomic_check = drm_simple_kms_plane_atomic_check,
 	.atomic_update = drm_simple_kms_plane_atomic_update,
 };
@@ -148,16 +166,61 @@
 };
 
 /**
+ * drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe
+ * @pipe: simple display pipe object
+ * @bridge: bridge to attach
+ *
+ * Makes it possible to still use the drm_simple_display_pipe helpers when
+ * a DRM bridge has to be used.
+ *
+ * Note that you probably want to initialize the pipe by passing a NULL
+ * connector to drm_simple_display_pipe_init().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+					  struct drm_bridge *bridge)
+{
+	bridge->encoder = &pipe->encoder;
+	pipe->encoder.bridge = bridge;
+	return drm_bridge_attach(pipe->encoder.dev, bridge);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
+
+/**
+ * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe
+ * @pipe: simple display pipe object
+ *
+ * Detaches the drm bridge previously attached with
+ * drm_simple_display_pipe_attach_bridge()
+ */
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe)
+{
+	if (WARN_ON(!pipe->encoder.bridge))
+		return;
+
+	drm_bridge_detach(pipe->encoder.bridge);
+	pipe->encoder.bridge = NULL;
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge);
+
+/**
  * drm_simple_display_pipe_init - Initialize a simple display pipeline
  * @dev: DRM device
  * @pipe: simple display pipe object to initialize
  * @funcs: callbacks for the display pipe (optional)
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
  * @format_count: number of elements in @formats
- * @connector: connector to attach and register
+ * @connector: connector to attach and register (optional)
  *
  * Sets up a display pipeline which consist of a really simple
- * plane-crtc-encoder pipe coupled with the provided connector.
+ * plane-crtc-encoder pipe.
+ *
+ * If a connector is supplied, the pipe will be coupled with the provided
+ * connector. You may supply a NULL connector when using drm bridges, that
+ * handle connectors themselves (see drm_simple_display_pipe_attach_bridge()).
+ *
  * Teardown of a simple display pipe is all handled automatically by the drm
  * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
  * release the memory for the structure themselves.
@@ -196,7 +259,7 @@
 	encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
 	ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
 			       DRM_MODE_ENCODER_NONE, NULL);
-	if (ret)
+	if (ret || !connector)
 		return ret;
 
 	return drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 32dd821..9a37196 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -19,7 +19,6 @@
 #include <linux/export.h>
 
 #include <drm/drm_sysfs.h>
-#include <drm/drm_core.h>
 #include <drm/drmP.h>
 #include "drm_internal.h"
 
@@ -37,12 +36,7 @@
 	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
 }
 
-static CLASS_ATTR_STRING(version, S_IRUGO,
-		CORE_NAME " "
-		__stringify(CORE_MAJOR) "."
-		__stringify(CORE_MINOR) "."
-		__stringify(CORE_PATCHLEVEL) " "
-		CORE_DATE);
+static CLASS_ATTR_STRING(version, S_IRUGO, "drm 1.1.0 20060810");
 
 /**
  * drm_sysfs_init - initialize sysfs helpers
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index f306c88..20cc33d 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -25,7 +25,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_vma_manager.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
@@ -86,7 +85,6 @@
 				 unsigned long page_offset, unsigned long size)
 {
 	rwlock_init(&mgr->vm_lock);
-	mgr->vm_addr_space_rb = RB_ROOT;
 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
 }
 EXPORT_SYMBOL(drm_vma_offset_manager_init);
@@ -145,16 +143,16 @@
 							 unsigned long start,
 							 unsigned long pages)
 {
-	struct drm_vma_offset_node *node, *best;
+	struct drm_mm_node *node, *best;
 	struct rb_node *iter;
 	unsigned long offset;
 
-	iter = mgr->vm_addr_space_rb.rb_node;
+	iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
 	best = NULL;
 
 	while (likely(iter)) {
-		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
-		offset = node->vm_node.start;
+		node = rb_entry(iter, struct drm_mm_node, rb);
+		offset = node->start;
 		if (start >= offset) {
 			iter = iter->rb_right;
 			best = node;
@@ -167,39 +165,18 @@
 
 	/* verify that the node spans the requested area */
 	if (best) {
-		offset = best->vm_node.start + best->vm_node.size;
+		offset = best->start + best->size;
 		if (offset < start + pages)
 			best = NULL;
 	}
 
-	return best;
+	if (!best)
+		return NULL;
+
+	return container_of(best, struct drm_vma_offset_node, vm_node);
 }
 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
 
-/* internal helper to link @node into the rb-tree */
-static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
-				   struct drm_vma_offset_node *node)
-{
-	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
-	struct rb_node *parent = NULL;
-	struct drm_vma_offset_node *iter_node;
-
-	while (likely(*iter)) {
-		parent = *iter;
-		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
-
-		if (node->vm_node.start < iter_node->vm_node.start)
-			iter = &(*iter)->rb_left;
-		else if (node->vm_node.start > iter_node->vm_node.start)
-			iter = &(*iter)->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&node->vm_rb, parent, iter);
-	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
-}
-
 /**
  * drm_vma_offset_add() - Add offset node to manager
  * @mgr: Manager object
@@ -240,8 +217,6 @@
 	if (ret)
 		goto out_unlock;
 
-	_drm_vma_offset_add_rb(mgr, node);
-
 out_unlock:
 	write_unlock(&mgr->vm_lock);
 	return ret;
@@ -265,7 +240,6 @@
 	write_lock(&mgr->vm_lock);
 
 	if (drm_mm_node_allocated(&node->vm_node)) {
-		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
 		drm_mm_remove_node(&node->vm_node);
 		memset(&node->vm_node, 0, sizeof(node->vm_node));
 	}
@@ -277,9 +251,9 @@
 /**
  * drm_vma_node_allow - Add open-file to list of allowed users
  * @node: Node to modify
- * @filp: Open file to add
+ * @tag: Tag of file to remove
  *
- * Add @filp to the list of allowed open-files for this node. If @filp is
+ * Add @tag to the list of allowed open-files for this node. If @tag is
  * already on this list, the ref-count is incremented.
  *
  * The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -294,7 +268,7 @@
  * RETURNS:
  * 0 on success, negative error code on internal failure (out-of-mem)
  */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
 {
 	struct rb_node **iter;
 	struct rb_node *parent = NULL;
@@ -315,10 +289,10 @@
 		parent = *iter;
 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
-		if (filp == entry->vm_filp) {
+		if (tag == entry->vm_tag) {
 			entry->vm_count++;
 			goto unlock;
-		} else if (filp > entry->vm_filp) {
+		} else if (tag > entry->vm_tag) {
 			iter = &(*iter)->rb_right;
 		} else {
 			iter = &(*iter)->rb_left;
@@ -330,7 +304,7 @@
 		goto unlock;
 	}
 
-	new->vm_filp = filp;
+	new->vm_tag = tag;
 	new->vm_count = 1;
 	rb_link_node(&new->vm_rb, parent, iter);
 	rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -346,17 +320,18 @@
 /**
  * drm_vma_node_revoke - Remove open-file from list of allowed users
  * @node: Node to modify
- * @filp: Open file to remove
+ * @tag: Tag of file to remove
  *
- * Decrement the ref-count of @filp in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @filp from the list. You must call
- * this once for every drm_vma_node_allow() on @filp.
+ * Decrement the ref-count of @tag in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @tag from the list. You must call
+ * this once for every drm_vma_node_allow() on @tag.
  *
  * This is locked against concurrent access internally.
  *
- * If @filp is not on the list, nothing is done.
+ * If @tag is not on the list, nothing is done.
  */
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+			 struct drm_file *tag)
 {
 	struct drm_vma_offset_file *entry;
 	struct rb_node *iter;
@@ -366,13 +341,13 @@
 	iter = node->vm_files.rb_node;
 	while (likely(iter)) {
 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-		if (filp == entry->vm_filp) {
+		if (tag == entry->vm_tag) {
 			if (!--entry->vm_count) {
 				rb_erase(&entry->vm_rb, &node->vm_files);
 				kfree(entry);
 			}
 			break;
-		} else if (filp > entry->vm_filp) {
+		} else if (tag > entry->vm_tag) {
 			iter = iter->rb_right;
 		} else {
 			iter = iter->rb_left;
@@ -386,9 +361,9 @@
 /**
  * drm_vma_node_is_allowed - Check whether an open-file is granted access
  * @node: Node to check
- * @filp: Open-file to check for
+ * @tag: Tag of file to remove
  *
- * Search the list in @node whether @filp is currently on the list of allowed
+ * Search the list in @node whether @tag is currently on the list of allowed
  * open-files (see drm_vma_node_allow()).
  *
  * This is locked against concurrent access internally.
@@ -397,7 +372,7 @@
  * true iff @filp is on the list
  */
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-			     struct file *filp)
+			     struct drm_file *tag)
 {
 	struct drm_vma_offset_file *entry;
 	struct rb_node *iter;
@@ -407,9 +382,9 @@
 	iter = node->vm_files.rb_node;
 	while (likely(iter)) {
 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-		if (filp == entry->vm_filp)
+		if (tag == entry->vm_tag)
 			break;
-		else if (filp > entry->vm_filp)
+		else if (tag > entry->vm_tag)
 			iter = iter->rb_right;
 		else
 			iter = iter->rb_left;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index d8d5564..cb86c7e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -21,6 +21,7 @@
 
 #include "common.xml.h"
 #include "state.xml.h"
+#include "state_hi.xml.h"
 #include "state_3d.xml.h"
 #include "cmdstream.xml.h"
 
@@ -117,11 +118,6 @@
 		       VIVS_GL_PIPE_SELECT_PIPE(pipe));
 }
 
-static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
-{
-	return buf->paddr - gpu->memory_base;
-}
-
 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
 	struct etnaviv_cmdbuf *buf, u32 off, u32 len)
 {
@@ -129,7 +125,7 @@
 	u32 *ptr = buf->vaddr + off;
 
 	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
-			ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+			ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
 
 	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 			ptr, len * 4, 0);
@@ -162,7 +158,7 @@
 	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 		buffer->user_size = 0;
 
-	return gpu_va(gpu, buffer) + buffer->user_size;
+	return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
 }
 
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -173,7 +169,41 @@
 	buffer->user_size = 0;
 
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+	CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+		 buffer->user_size - 4);
+
+	return buffer->user_size / 8;
+}
+
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+	buffer->user_size = 0;
+
+	if (gpu->identity.features & chipFeatures_PIPE_3D) {
+		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+			       VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
+		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+			mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+	}
+
+	if (gpu->identity.features & chipFeatures_PIPE_2D) {
+		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+			       VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
+		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+			mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+	}
+
+	CMD_END(buffer);
+
+	buffer->user_size = ALIGN(buffer->user_size, 8);
 
 	return buffer->user_size / 8;
 }
@@ -231,7 +261,7 @@
 	if (drm_debug & DRM_UT_DRIVER)
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 
-	link_target = gpu_va(gpu, cmdbuf);
+	link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
 	link_dwords = cmdbuf->size / 8;
 
 	/*
@@ -246,8 +276,12 @@
 		extra_dwords = 1;
 
 		/* flush command */
-		if (gpu->mmu->need_flush)
-			extra_dwords += 1;
+		if (gpu->mmu->need_flush) {
+			if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+				extra_dwords += 1;
+			else
+				extra_dwords += 3;
+		}
 
 		/* pipe switch commands */
 		if (gpu->switch_context)
@@ -257,12 +291,23 @@
 
 		if (gpu->mmu->need_flush) {
 			/* Add the MMU flush */
-			CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
-				       VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
-				       VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
-				       VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
-				       VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
-				       VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+			if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+				CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+					       VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+					       VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+					       VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+					       VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+					       VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+			} else {
+				CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+					VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+					VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
+					VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
+				CMD_SEM(buffer, SYNC_RECIPIENT_FE,
+					SYNC_RECIPIENT_PE);
+				CMD_STALL(buffer, SYNC_RECIPIENT_FE,
+					SYNC_RECIPIENT_PE);
+			}
 
 			gpu->mmu->need_flush = false;
 		}
@@ -301,7 +346,7 @@
 
 	if (drm_debug & DRM_UT_DRIVER)
 		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
-			return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+			return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
 
 	if (drm_debug & DRM_UT_DRIVER) {
 		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ffd1b32..aa68766 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,8 +488,7 @@
 };
 
 static struct drm_driver etnaviv_drm_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ |
-				DRIVER_GEM |
+	.driver_features    = DRIVER_GEM |
 				DRIVER_PRIME |
 				DRIVER_RENDER,
 	.open               = etnaviv_open,
@@ -530,10 +529,8 @@
 	int ret;
 
 	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
-	if (!drm)
-		return -ENOMEM;
-
-	drm->platformdev = to_platform_device(dev);
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 115c5bc..65e0576 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -96,6 +96,7 @@
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 	uintptr_t ptr, u32 size, u32 flags, u32 *handle);
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
 void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	struct etnaviv_cmdbuf *cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 4a29eea..2bef501 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -175,11 +175,13 @@
 	etnaviv_core_dump_registers(&iter, gpu);
 	etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
-			      gpu->buffer->size, gpu->buffer->paddr);
+			      gpu->buffer->size,
+			      etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
 
 	list_for_each_entry(cmd, &gpu->active_cmd_list, node)
 		etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
-				      cmd->size, cmd->paddr);
+				      cmd->size,
+				      etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
 
 	/* Reserve space for the bomap */
 	if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603..0370b84 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@
 	int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 	struct page **pvec;
 	uintptr_t ptr;
+	unsigned int flags = 0;
 
 	pvec = drm_malloc_ab(npages, sizeof(struct page *));
 	if (!pvec)
 		return ERR_PTR(-ENOMEM);
 
+	if (!etnaviv_obj->userptr.ro)
+		flags |= FOLL_WRITE;
+
 	pinned = 0;
 	ptr = etnaviv_obj->userptr.ptr;
 
 	down_read(&mm->mmap_sem);
 	while (pinned < npages) {
 		ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
-					    !etnaviv_obj->userptr.ro, 0,
-					    pvec + pinned, NULL);
+					    flags, pvec + pinned, NULL);
 		if (ret < 0)
 			break;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b382cf5..b1254f8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -22,8 +22,6 @@
 #include "etnaviv_gpu.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
-#include "etnaviv_iommu_v2.h"
 #include "common.xml.h"
 #include "state.xml.h"
 #include "state_hi.xml.h"
@@ -329,6 +327,18 @@
 				gpu->identity.revision = 0x1051;
 			}
 		}
+
+		/*
+		 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
+		 * reality it's just a re-branded GC3000. We can identify this
+		 * core by the upper half of the revision register being all 1.
+		 * Fix model/rev here, so all other places can refer to this
+		 * core by its real identity.
+		 */
+		if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
+			gpu->identity.model = chipModel_GC3000;
+			gpu->identity.revision &= 0xffff;
+		}
 	}
 
 	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -528,6 +538,14 @@
 	gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
 }
 
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
+{
+	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
+	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+		  VIVS_FE_COMMAND_CONTROL_ENABLE |
+		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
 	u16 prefetch;
@@ -568,33 +586,20 @@
 		gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
 	}
 
-	/* set base addresses */
-	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
-	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
-	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
-	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
-	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
-
-	/* setup the MMU page table pointers */
-	etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
+	/* setup the MMU */
+	etnaviv_iommu_restore(gpu);
 
 	/* Start command processor */
 	prefetch = etnaviv_buffer_init(gpu);
 
 	gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
-	gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
-		  gpu->buffer->paddr - gpu->memory_base);
-	gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
-		  VIVS_FE_COMMAND_CONTROL_ENABLE |
-		  VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+	etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+			     prefetch);
 }
 
 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 {
 	int ret, i;
-	struct iommu_domain *iommu;
-	enum etnaviv_iommu_version version;
-	bool mmuv2;
 
 	ret = pm_runtime_get_sync(gpu->dev);
 	if (ret < 0) {
@@ -642,32 +647,10 @@
 		goto fail;
 	}
 
-	/* Setup IOMMU.. eventually we will (I think) do this once per context
-	 * and have separate page tables per context.  For now, to keep things
-	 * simple and to get something working, just use a single address space:
-	 */
-	mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
-	dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
-
-	if (!mmuv2) {
-		iommu = etnaviv_iommu_domain_alloc(gpu);
-		version = ETNAVIV_IOMMU_V1;
-	} else {
-		iommu = etnaviv_iommu_v2_domain_alloc(gpu);
-		version = ETNAVIV_IOMMU_V2;
-	}
-
-	if (!iommu) {
-		dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
-	if (!gpu->mmu) {
+	gpu->mmu = etnaviv_iommu_new(gpu);
+	if (IS_ERR(gpu->mmu)) {
 		dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
-		iommu_domain_free(iommu);
-		ret = -ENOMEM;
+		ret = PTR_ERR(gpu->mmu);
 		goto fail;
 	}
 
@@ -678,7 +661,9 @@
 		dev_err(gpu->dev, "could not create command buffer\n");
 		goto destroy_iommu;
 	}
-	if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+
+	if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
+	    gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
 		ret = -EINVAL;
 		dev_err(gpu->dev,
 			"command buffer outside valid memory window\n");
@@ -868,45 +853,6 @@
 #endif
 
 /*
- * Power Management:
- */
-static int enable_clk(struct etnaviv_gpu *gpu)
-{
-	if (gpu->clk_core)
-		clk_prepare_enable(gpu->clk_core);
-	if (gpu->clk_shader)
-		clk_prepare_enable(gpu->clk_shader);
-
-	return 0;
-}
-
-static int disable_clk(struct etnaviv_gpu *gpu)
-{
-	if (gpu->clk_core)
-		clk_disable_unprepare(gpu->clk_core);
-	if (gpu->clk_shader)
-		clk_disable_unprepare(gpu->clk_shader);
-
-	return 0;
-}
-
-static int enable_axi(struct etnaviv_gpu *gpu)
-{
-	if (gpu->clk_bus)
-		clk_prepare_enable(gpu->clk_bus);
-
-	return 0;
-}
-
-static int disable_axi(struct etnaviv_gpu *gpu)
-{
-	if (gpu->clk_bus)
-		clk_disable_unprepare(gpu->clk_bus);
-
-	return 0;
-}
-
-/*
  * Hangcheck detection for locked gpu:
  */
 static void recover_worker(struct work_struct *work)
@@ -945,7 +891,7 @@
 	gpu->completed_fence = gpu->active_fence;
 
 	etnaviv_gpu_hw_init(gpu);
-	gpu->switch_context = true;
+	gpu->lastctx = NULL;
 	gpu->exec_state = -1;
 
 	mutex_unlock(&gpu->lock);
@@ -1178,6 +1124,9 @@
 	if (!cmdbuf)
 		return NULL;
 
+	if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
+		size = ALIGN(size, SZ_4K);
+
 	cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
 				     GFP_KERNEL);
 	if (!cmdbuf->vaddr) {
@@ -1193,6 +1142,7 @@
 
 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
 {
+	etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
 	dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
 		    cmdbuf->paddr);
 	kfree(cmdbuf);
@@ -1425,6 +1375,21 @@
 			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
 		}
 
+		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
+			int i;
+
+			dev_err_ratelimited(gpu->dev,
+				"MMU fault status 0x%08x\n",
+				gpu_read(gpu, VIVS_MMUv2_STATUS));
+			for (i = 0; i < 4; i++) {
+				dev_err_ratelimited(gpu->dev,
+					"MMU %d fault addr 0x%08x\n",
+					i, gpu_read(gpu,
+					VIVS_MMUv2_EXCEPTION_ADDR(i)));
+			}
+			intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
+		}
+
 		while ((event = ffs(intr)) != 0) {
 			struct fence *fence;
 
@@ -1466,39 +1431,72 @@
 {
 	int ret;
 
-	ret = enable_clk(gpu);
-	if (ret)
-		return ret;
+	if (gpu->clk_bus) {
+		ret = clk_prepare_enable(gpu->clk_bus);
+		if (ret)
+			return ret;
+	}
 
-	ret = enable_axi(gpu);
-	if (ret) {
-		disable_clk(gpu);
-		return ret;
+	if (gpu->clk_core) {
+		ret = clk_prepare_enable(gpu->clk_core);
+		if (ret)
+			goto disable_clk_bus;
+	}
+
+	if (gpu->clk_shader) {
+		ret = clk_prepare_enable(gpu->clk_shader);
+		if (ret)
+			goto disable_clk_core;
 	}
 
 	return 0;
+
+disable_clk_core:
+	if (gpu->clk_core)
+		clk_disable_unprepare(gpu->clk_core);
+disable_clk_bus:
+	if (gpu->clk_bus)
+		clk_disable_unprepare(gpu->clk_bus);
+
+	return ret;
 }
 
 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
 {
-	int ret;
-
-	ret = disable_axi(gpu);
-	if (ret)
-		return ret;
-
-	ret = disable_clk(gpu);
-	if (ret)
-		return ret;
+	if (gpu->clk_shader)
+		clk_disable_unprepare(gpu->clk_shader);
+	if (gpu->clk_core)
+		clk_disable_unprepare(gpu->clk_core);
+	if (gpu->clk_bus)
+		clk_disable_unprepare(gpu->clk_bus);
 
 	return 0;
 }
 
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+	do {
+		u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+		if ((idle & gpu->idle_mask) == gpu->idle_mask)
+			return 0;
+
+		if (time_is_before_jiffies(timeout)) {
+			dev_warn(gpu->dev,
+				 "timed out waiting for idle: idle=0x%x\n",
+				 idle);
+			return -ETIMEDOUT;
+		}
+
+		udelay(5);
+	} while (1);
+}
+
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
 	if (gpu->buffer) {
-		unsigned long timeout;
-
 		/* Replace the last WAIT with END */
 		etnaviv_buffer_end(gpu);
 
@@ -1507,22 +1505,7 @@
 		 * happen quickly (as the WAIT is only 200 cycles).  If
 		 * we fail, just warn and continue.
 		 */
-		timeout = jiffies + msecs_to_jiffies(100);
-		do {
-			u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
-
-			if ((idle & gpu->idle_mask) == gpu->idle_mask)
-				break;
-
-			if (time_is_before_jiffies(timeout)) {
-				dev_warn(gpu->dev,
-					 "timed out waiting for idle: idle=0x%x\n",
-					 idle);
-				break;
-			}
-
-			udelay(5);
-		} while (1);
+		etnaviv_gpu_wait_idle(gpu, 100);
 	}
 
 	return etnaviv_gpu_clk_disable(gpu);
@@ -1634,7 +1617,7 @@
 {
 	struct device *dev = &pdev->dev;
 	struct etnaviv_gpu *gpu;
-	int err = 0;
+	int err;
 
 	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
 	if (!gpu)
@@ -1651,16 +1634,15 @@
 	/* Get Interrupt: */
 	gpu->irq = platform_get_irq(pdev, 0);
 	if (gpu->irq < 0) {
-		err = gpu->irq;
-		dev_err(dev, "failed to get irq: %d\n", err);
-		goto fail;
+		dev_err(dev, "failed to get irq: %d\n", gpu->irq);
+		return gpu->irq;
 	}
 
 	err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
 			       dev_name(gpu->dev), gpu);
 	if (err) {
 		dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
-		goto fail;
+		return err;
 	}
 
 	/* Get Clocks: */
@@ -1694,13 +1676,10 @@
 	err = component_add(&pdev->dev, &gpu_ops);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register component: %d\n", err);
-		goto fail;
+		return err;
 	}
 
 	return 0;
-
-fail:
-	return err;
 }
 
 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index a69cdd5..73c278d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -160,6 +160,8 @@
 	dma_addr_t paddr;
 	u32 size;
 	u32 user_size;
+	/* vram node used if the cmdbuf is mapped through the MMUv2 */
+	struct drm_mm_node vram_node;
 	/* fence after which this buffer is to be disposed */
 	struct fence *fence;
 	/* target exec state */
@@ -214,6 +216,8 @@
 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
 
 extern struct platform_driver etnaviv_gpu_driver;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 16353ee..81f1583 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -196,12 +196,19 @@
 	.dump = etnaviv_iommuv1_dump,
 };
 
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
-	struct iommu_domain *domain)
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
 {
-	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+	struct etnaviv_iommu_domain *etnaviv_domain =
+			to_etnaviv_domain(gpu->mmu->domain);
 	u32 pgtable;
 
+	/* set base addresses */
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
+	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+
 	/* set page table address in MC */
 	pgtable = (u32)etnaviv_domain->pgtable.paddr;
 
@@ -212,7 +219,7 @@
 	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
 }
 
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_iommu_domain *etnaviv_domain;
 	int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index cf45503..8b51e7c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -17,12 +17,12 @@
 #ifndef __ETNAVIV_IOMMU_H__
 #define __ETNAVIV_IOMMU_H__
 
-#include <linux/iommu.h>
 struct etnaviv_gpu;
 
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
-	struct iommu_domain *domain);
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
+
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
 
 #endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index fbb4aed..7e9c4d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ * Copyright (C) 2016 Etnaviv Project
   *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -22,12 +22,267 @@
 #include <linux/bitops.h>
 
 #include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
 #include "etnaviv_iommu.h"
+#include "state.xml.h"
 #include "state_hi.xml.h"
 
+#define MMUv2_PTE_PRESENT		BIT(0)
+#define MMUv2_PTE_EXCEPTION		BIT(1)
+#define MMUv2_PTE_WRITEABLE		BIT(2)
 
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+#define MMUv2_MTLB_MASK			0xffc00000
+#define MMUv2_MTLB_SHIFT		22
+#define MMUv2_STLB_MASK			0x003ff000
+#define MMUv2_STLB_SHIFT		12
+
+#define MMUv2_MAX_STLB_ENTRIES		1024
+
+struct etnaviv_iommuv2_domain {
+	struct iommu_domain domain;
+	struct device *dev;
+	void *bad_page_cpu;
+	dma_addr_t bad_page_dma;
+	/* M(aster) TLB aka first level pagetable */
+	u32 *mtlb_cpu;
+	dma_addr_t mtlb_dma;
+	/* S(lave) TLB aka second level pagetable */
+	u32 *stlb_cpu[1024];
+	dma_addr_t stlb_dma[1024];
+};
+
+static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
 {
-	/* TODO */
+	return container_of(domain, struct etnaviv_iommuv2_domain, domain);
+}
+
+static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
+	   phys_addr_t paddr, size_t size, int prot)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	int mtlb_entry, stlb_entry;
+	u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
+
+	if (size != SZ_4K)
+		return -EINVAL;
+
+	if (prot & IOMMU_WRITE)
+		entry |= MMUv2_PTE_WRITEABLE;
+
+	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+
+	return 0;
+}
+
+static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
+	unsigned long iova, size_t size)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	int mtlb_entry, stlb_entry;
+
+	if (size != SZ_4K)
+		return -EINVAL;
+
+	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
+
+	return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
+	dma_addr_t iova)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	int mtlb_entry, stlb_entry;
+
+	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+	return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
+}
+
+static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
+{
+	u32 *p;
+	int ret, i, j;
+
+	/* allocate scratch page */
+	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+						  SZ_4K,
+						  &etnaviv_domain->bad_page_dma,
+						  GFP_KERNEL);
+	if (!etnaviv_domain->bad_page_cpu) {
+		ret = -ENOMEM;
+		goto fail_mem;
+	}
+	p = etnaviv_domain->bad_page_cpu;
+	for (i = 0; i < SZ_4K / 4; i++)
+		*p++ = 0xdead55aa;
+
+	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+						  SZ_4K,
+						  &etnaviv_domain->mtlb_dma,
+						  GFP_KERNEL);
+	if (!etnaviv_domain->mtlb_cpu) {
+		ret = -ENOMEM;
+		goto fail_mem;
+	}
+
+	/* pre-populate STLB pages (may want to switch to on-demand later) */
+	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+		etnaviv_domain->stlb_cpu[i] =
+				dma_alloc_coherent(etnaviv_domain->dev,
+						   SZ_4K,
+						   &etnaviv_domain->stlb_dma[i],
+						   GFP_KERNEL);
+		if (!etnaviv_domain->stlb_cpu[i]) {
+			ret = -ENOMEM;
+			goto fail_mem;
+		}
+		p = etnaviv_domain->stlb_cpu[i];
+		for (j = 0; j < SZ_4K / 4; j++)
+			*p++ = MMUv2_PTE_EXCEPTION;
+
+		etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
+					      MMUv2_PTE_PRESENT;
+	}
+
+	return 0;
+
+fail_mem:
+	if (etnaviv_domain->bad_page_cpu)
+		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+				  etnaviv_domain->bad_page_cpu,
+				  etnaviv_domain->bad_page_dma);
+
+	if (etnaviv_domain->mtlb_cpu)
+		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+				  etnaviv_domain->mtlb_cpu,
+				  etnaviv_domain->mtlb_dma);
+
+	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+		if (etnaviv_domain->stlb_cpu[i])
+			dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+					  etnaviv_domain->stlb_cpu[i],
+					  etnaviv_domain->stlb_dma[i]);
+	}
+
+	return ret;
+}
+
+static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	int i;
+
+	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+			  etnaviv_domain->bad_page_cpu,
+			  etnaviv_domain->bad_page_dma);
+
+	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+			  etnaviv_domain->mtlb_cpu,
+			  etnaviv_domain->mtlb_dma);
+
+	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+		if (etnaviv_domain->stlb_cpu[i])
+			dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+					  etnaviv_domain->stlb_cpu[i],
+					  etnaviv_domain->stlb_dma[i]);
+	}
+
+	vfree(etnaviv_domain);
+}
+
+static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	size_t dump_size = SZ_4K;
+	int i;
+
+	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+		if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+			dump_size += SZ_4K;
+
+	return dump_size;
+}
+
+static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(domain);
+	int i;
+
+	memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+	buf += SZ_4K;
+	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
+		if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+			memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+	.ops = {
+		.domain_free = etnaviv_iommuv2_domain_free,
+		.map = etnaviv_iommuv2_map,
+		.unmap = etnaviv_iommuv2_unmap,
+		.iova_to_phys = etnaviv_iommuv2_iova_to_phys,
+		.pgsize_bitmap = SZ_4K,
+	},
+	.dump_size = etnaviv_iommuv2_dump_size,
+	.dump = etnaviv_iommuv2_dump,
+};
+
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain =
+			to_etnaviv_domain(gpu->mmu->domain);
+	u16 prefetch;
+
+	/* If the MMU is already enabled the state is still there. */
+	if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+		return;
+
+	prefetch = etnaviv_buffer_config_mmuv2(gpu,
+				(u32)etnaviv_domain->mtlb_dma,
+				(u32)etnaviv_domain->bad_page_dma);
+	etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+	etnaviv_gpu_wait_idle(gpu, 100);
+
+	gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
+}
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_iommuv2_domain *etnaviv_domain;
+	int ret;
+
+	etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
+	if (!etnaviv_domain)
+		return NULL;
+
+	etnaviv_domain->dev = gpu->dev;
+
+	etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+	etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+	etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
+	etnaviv_domain->domain.geometry.aperture_start = 0;
+	etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
+
+	ret = etnaviv_iommuv2_init(etnaviv_domain);
+	if (ret)
+		goto out_free;
+
+	return &etnaviv_domain->domain;
+
+out_free:
+	vfree(etnaviv_domain);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
deleted file mode 100644
index 603ea41..0000000
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
-  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ETNAVIV_IOMMU_V2_H__
-#define __ETNAVIV_IOMMU_V2_H__
-
-#include <linux/iommu.h>
-struct etnaviv_gpu;
-
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
-
-#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 29a723f..d3796ed 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -14,9 +14,11 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include "common.xml.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
 #include "etnaviv_mmu.h"
 
 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
@@ -101,40 +103,21 @@
 	drm_mm_remove_node(&mapping->vram_node);
 }
 
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
-	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
-	struct etnaviv_vram_mapping *mapping)
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+				   struct drm_mm_node *node, size_t size)
 {
 	struct etnaviv_vram_mapping *free = NULL;
-	struct sg_table *sgt = etnaviv_obj->sgt;
-	struct drm_mm_node *node;
 	int ret;
 
-	lockdep_assert_held(&etnaviv_obj->lock);
+	lockdep_assert_held(&mmu->lock);
 
-	mutex_lock(&mmu->lock);
-
-	/* v1 MMU can optimize single entry (contiguous) scatterlists */
-	if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
-		u32 iova;
-
-		iova = sg_dma_address(sgt->sgl) - memory_base;
-		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
-			mapping->iova = iova;
-			list_add_tail(&mapping->mmu_node, &mmu->mappings);
-			mutex_unlock(&mmu->lock);
-			return 0;
-		}
-	}
-
-	node = &mapping->vram_node;
 	while (1) {
 		struct etnaviv_vram_mapping *m, *n;
 		struct list_head list;
 		bool found;
 
 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-			etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+			size, 0, mmu->last_iova, ~0UL,
 			DRM_MM_SEARCH_DEFAULT);
 
 		if (ret != -ENOSPC)
@@ -151,7 +134,7 @@
 		}
 
 		/* Try to retire some entries */
-		drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+		drm_mm_init_scan(&mmu->mm, size, 0, 0);
 
 		found = 0;
 		INIT_LIST_HEAD(&list);
@@ -212,6 +195,38 @@
 		mmu->need_flush = true;
 	}
 
+	return ret;
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+	struct etnaviv_vram_mapping *mapping)
+{
+	struct sg_table *sgt = etnaviv_obj->sgt;
+	struct drm_mm_node *node;
+	int ret;
+
+	lockdep_assert_held(&etnaviv_obj->lock);
+
+	mutex_lock(&mmu->lock);
+
+	/* v1 MMU can optimize single entry (contiguous) scatterlists */
+	if (mmu->version == ETNAVIV_IOMMU_V1 &&
+	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+		u32 iova;
+
+		iova = sg_dma_address(sgt->sgl) - memory_base;
+		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+			mapping->iova = iova;
+			list_add_tail(&mapping->mmu_node, &mmu->mappings);
+			mutex_unlock(&mmu->lock);
+			return 0;
+		}
+	}
+
+	node = &mapping->vram_node;
+
+	ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
 	if (ret < 0) {
 		mutex_unlock(&mmu->lock);
 		return ret;
@@ -256,30 +271,102 @@
 	kfree(mmu);
 }
 
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
-	struct iommu_domain *domain, enum etnaviv_iommu_version version)
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
 {
+	enum etnaviv_iommu_version version;
 	struct etnaviv_iommu *mmu;
 
 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
 	if (!mmu)
 		return ERR_PTR(-ENOMEM);
 
-	mmu->domain = domain;
+	if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
+		mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
+		version = ETNAVIV_IOMMU_V1;
+	} else {
+		mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
+		version = ETNAVIV_IOMMU_V2;
+	}
+
+	if (!mmu->domain) {
+		dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
+		kfree(mmu);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	mmu->gpu = gpu;
 	mmu->version = version;
 	mutex_init(&mmu->lock);
 	INIT_LIST_HEAD(&mmu->mappings);
 
-	drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
-		    domain->geometry.aperture_end -
-		      domain->geometry.aperture_start + 1);
+	drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
+		    mmu->domain->geometry.aperture_end -
+		    mmu->domain->geometry.aperture_start + 1);
 
-	iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+	iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
 
 	return mmu;
 }
 
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+{
+	if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+		etnaviv_iommuv1_restore(gpu);
+	else
+		etnaviv_iommuv2_restore(gpu);
+}
+
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+				struct etnaviv_cmdbuf *buf)
+{
+	struct etnaviv_iommu *mmu = gpu->mmu;
+
+	if (mmu->version == ETNAVIV_IOMMU_V1) {
+		return buf->paddr - gpu->memory_base;
+	} else {
+		int ret;
+
+		if (buf->vram_node.allocated)
+			return (u32)buf->vram_node.start;
+
+		mutex_lock(&mmu->lock);
+		ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+		if (ret < 0) {
+			mutex_unlock(&mmu->lock);
+			return 0;
+		}
+		ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
+				buf->size, IOMMU_READ);
+		if (ret < 0) {
+			drm_mm_remove_node(&buf->vram_node);
+			mutex_unlock(&mmu->lock);
+			return 0;
+		}
+		/*
+		 * At least on GC3000 the FE MMU doesn't properly flush old TLB
+		 * entries. Make sure to space the command buffers out in a way
+		 * that the FE MMU prefetch won't load invalid entries.
+		 */
+		mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+		gpu->mmu->need_flush = true;
+		mutex_unlock(&mmu->lock);
+
+		return (u32)buf->vram_node.start;
+	}
+}
+
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+				 struct etnaviv_cmdbuf *buf)
+{
+	struct etnaviv_iommu *mmu = gpu->mmu;
+
+	if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+		mutex_lock(&mmu->lock);
+		iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
+		drm_mm_remove_node(&buf->vram_node);
+		mutex_unlock(&mmu->lock);
+	}
+}
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
 {
 	struct etnaviv_iommu_ops *ops;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index fff215a4..e787e49 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -62,10 +62,15 @@
 	struct etnaviv_vram_mapping *mapping);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+				struct etnaviv_cmdbuf *buf);
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+				 struct etnaviv_cmdbuf *buf);
+
 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
 
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
-	struct iommu_domain *domain, enum etnaviv_iommu_version version);
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
 
 #endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 807a3d9..43c73e2 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,10 +8,10 @@
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
-- common.xml   (  18437 bytes, from 2015-12-12 09:02:53)
+- state_hi.xml (  25620 bytes, from 2016-08-19 22:07:37)
+- common.xml   (  20583 bytes, from 2016-06-07 05:22:38)
 
-Copyright (C) 2015
+Copyright (C) 2016
 */
 
 
@@ -78,9 +78,10 @@
 #define VIVS_HI_AXI_STATUS_DET_RD_ERR				0x00000200
 
 #define VIVS_HI_INTR_ACKNOWLEDGE				0x00000010
-#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK			0x7fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK			0x3fffffff
 #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT		0
 #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x)			(((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION			0x40000000
 #define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR			0x80000000
 
 #define VIVS_HI_INTR_ENBL					0x00000014
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83f61c5..465d344f 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -38,7 +38,6 @@
 
 config DRM_EXYNOS_MIXER
 	bool "Mixer"
-	depends on !VIDEO_SAMSUNG_S5P_TV
 	help
 	  Choose this option if you want to use Exynos Mixer for DRM.
 
@@ -77,7 +76,7 @@
 
 config DRM_EXYNOS_HDMI
 	bool "HDMI"
-	depends on !VIDEO_SAMSUNG_S5P_TV && (DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON)
+	depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
 	help
 	  Choose this option if you want to use Exynos HDMI for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index ac21b40..6ca1f31 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -551,7 +551,6 @@
 {
 	struct decon_context *ctx = dev_id;
 	u32 val;
-	int win;
 
 	if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
 		goto out;
@@ -560,16 +559,6 @@
 	val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND;
 
 	if (val) {
-		for (win = ctx->first_win; win < WINDOWS_NR ; win++) {
-			struct exynos_drm_plane *plane = &ctx->planes[win];
-
-			if (!plane->pending_fb)
-				continue;
-
-			exynos_drm_crtc_finish_update(ctx->crtc, plane);
-		}
-
-		/* clear */
 		writel(val, ctx->addr + DECON_VIDINTCON1);
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 	}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 7f9901b..f4d5a21 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -603,7 +603,6 @@
 {
 	struct decon_context *ctx = (struct decon_context *)dev_id;
 	u32 val, clear_bit;
-	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -617,14 +616,6 @@
 
 	if (!ctx->i80_if) {
 		drm_crtc_handle_vblank(&ctx->crtc->base);
-		for (win = 0 ; win < WINDOWS_NR ; win++) {
-			struct exynos_drm_plane *plane = &ctx->planes[win];
-
-			if (!plane->pending_fb)
-				continue;
-
-			exynos_drm_crtc_finish_update(ctx->crtc, plane);
-		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4f08505..528229f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -43,7 +43,7 @@
 	struct analogix_dp_plat_data plat_data;
 };
 
-int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
+static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
 				bool enable)
 {
 	struct exynos_dp_device *dp = to_dp(plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 785ffa6..2530bf5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,6 @@
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	exynos_crtc->event = crtc->state->event;
-
 	if (exynos_crtc->ops->atomic_begin)
 		exynos_crtc->ops->atomic_begin(exynos_crtc);
 }
@@ -79,9 +77,24 @@
 				     struct drm_crtc_state *old_crtc_state)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
 
 	if (exynos_crtc->ops->atomic_flush)
 		exynos_crtc->ops->atomic_flush(exynos_crtc);
+
+	event = crtc->state->event;
+	if (event) {
+		crtc->state->event = NULL;
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		if (drm_crtc_vblank_get(crtc) == 0)
+			drm_crtc_arm_vblank_event(crtc, event);
+		else
+			drm_crtc_send_vblank_event(crtc, event);
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+
 }
 
 static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -134,8 +147,6 @@
 	exynos_crtc->ops = ops;
 	exynos_crtc->ctx = ctx;
 
-	init_waitqueue_head(&exynos_crtc->wait_update);
-
 	crtc = &exynos_crtc->base;
 
 	private->crtc[pipe] = crtc;
@@ -175,32 +186,6 @@
 		exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
-{
-	wait_event_timeout(exynos_crtc->wait_update,
-			   (atomic_read(&exynos_crtc->pending_update) == 0),
-			   msecs_to_jiffies(50));
-}
-
-void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
-				struct exynos_drm_plane *exynos_plane)
-{
-	struct drm_crtc *crtc = &exynos_crtc->base;
-	unsigned long flags;
-
-	exynos_plane->pending_fb = NULL;
-
-	if (atomic_dec_and_test(&exynos_crtc->pending_update))
-		wake_up(&exynos_crtc->wait_update);
-
-	spin_lock_irqsave(&crtc->dev->event_lock, flags);
-	if (exynos_crtc->event)
-		drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
-
-	exynos_crtc->event = NULL;
-	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-}
-
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
 				       enum exynos_drm_output_type out_type)
 {
@@ -228,20 +213,19 @@
 void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
 					struct drm_file *file)
 {
-	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 	struct drm_pending_vblank_event *e;
 	unsigned long flags;
 
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-	e = exynos_crtc->event;
-	if (e && e->base.file_priv == file) {
-		exynos_crtc->event = NULL;
-		atomic_dec(&exynos_crtc->pending_update);
-	}
+	e = crtc->state->event;
+	if (e && e->base.file_priv == file)
+		crtc->state->event = NULL;
+	else
+		e = NULL;
 
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
-	if (e && e->base.file_priv == file)
+	if (e)
 		drm_event_cancel_free(crtc->dev, &e->base);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 877d2ef..def78c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -45,37 +45,11 @@
 	u32			crtcs;
 };
 
-static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
-{
-	struct drm_crtc_state *crtc_state;
-	struct drm_crtc *crtc;
-	int i, ret;
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-		if (!crtc->state->enable)
-			continue;
-
-		ret = drm_crtc_vblank_get(crtc);
-		if (ret)
-			continue;
-
-		exynos_drm_crtc_wait_pending_update(exynos_crtc);
-		drm_crtc_vblank_put(crtc);
-	}
-}
-
 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
 {
 	struct drm_device *dev = commit->dev;
 	struct exynos_drm_private *priv = dev->dev_private;
 	struct drm_atomic_state *state = commit->state;
-	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	struct drm_plane_state *plane_state;
-	struct drm_crtc_state *crtc_state;
-	int i;
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
@@ -89,25 +63,9 @@
 	 * have the relevant clocks enabled to perform the update.
 	 */
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
-		atomic_set(&exynos_crtc->pending_update, 0);
-	}
-
-	for_each_plane_in_state(state, plane, plane_state, i) {
-		struct exynos_drm_crtc *exynos_crtc =
-						to_exynos_crtc(plane->crtc);
-
-		if (!plane->crtc)
-			continue;
-
-		atomic_inc(&exynos_crtc->pending_update);
-	}
-
-	drm_atomic_helper_commit_planes(dev, state, false);
-
-	exynos_atomic_wait_for_commit(state);
+	drm_atomic_helper_wait_for_vblanks(dev, state);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 7f1a49d..d215149 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -86,7 +86,6 @@
 	struct drm_plane base;
 	const struct exynos_drm_plane_config *config;
 	unsigned int index;
-	struct drm_framebuffer *pending_fb;
 };
 
 #define EXYNOS_DRM_PLANE_CAP_DOUBLE	(1 << 0)
@@ -172,9 +171,6 @@
 	struct drm_crtc			base;
 	enum exynos_drm_output_type	type;
 	unsigned int			pipe;
-	struct drm_pending_vblank_event	*event;
-	wait_queue_head_t		wait_update;
-	atomic_t			pending_update;
 	const struct exynos_drm_crtc_ops	*ops;
 	void				*ctx;
 	struct exynos_drm_clk		*pipe_clk;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index d472164..e2e4051 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -198,6 +198,7 @@
 	atomic_t			wait_vsync_event;
 	atomic_t			win_updated;
 	atomic_t			triggering;
+	u32				clkdiv;
 
 	const struct fimd_driver_data *driver_data;
 	struct drm_encoder *encoder;
@@ -389,15 +390,18 @@
 	pm_runtime_put(ctx->dev);
 }
 
-static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
-		const struct drm_display_mode *mode)
+
+static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
+		struct drm_crtc_state *state)
 {
-	unsigned long ideal_clk;
+	struct drm_display_mode *mode = &state->adjusted_mode;
+	struct fimd_context *ctx = crtc->ctx;
+	unsigned long ideal_clk, lcd_rate;
 	u32 clkdiv;
 
 	if (mode->clock == 0) {
-		DRM_ERROR("Mode has zero clock value.\n");
-		return 0xff;
+		DRM_INFO("Mode has zero clock value.\n");
+		return -EINVAL;
 	}
 
 	ideal_clk = mode->clock * 1000;
@@ -410,10 +414,23 @@
 		ideal_clk *= 2;
 	}
 
-	/* Find the clock divider value that gets us closest to ideal_clk */
-	clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk);
+	lcd_rate = clk_get_rate(ctx->lcd_clk);
+	if (2 * lcd_rate < ideal_clk) {
+		DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
+			 lcd_rate, ideal_clk);
+		return -EINVAL;
+	}
 
-	return (clkdiv < 0x100) ? clkdiv : 0xff;
+	/* Find the clock divider value that gets us closest to ideal_clk */
+	clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
+	if (clkdiv >= 0x200) {
+		DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk);
+		return -EINVAL;
+	}
+
+	ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff;
+
+	return 0;
 }
 
 static void fimd_setup_trigger(struct fimd_context *ctx)
@@ -442,7 +459,7 @@
 	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
 	const struct fimd_driver_data *driver_data = ctx->driver_data;
 	void *timing_base = ctx->regs + driver_data->timing_base;
-	u32 val, clkdiv;
+	u32 val;
 
 	if (ctx->suspended)
 		return;
@@ -543,9 +560,8 @@
 	if (ctx->driver_data->has_clksel)
 		val |= VIDCON0_CLKSEL_LCD;
 
-	clkdiv = fimd_calc_clkdiv(ctx, mode);
-	if (clkdiv > 1)
-		val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR;
+	if (ctx->clkdiv > 1)
+		val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
 
 	writel(val, ctx->regs + VIDCON0);
 }
@@ -939,14 +955,14 @@
 	.update_plane = fimd_update_plane,
 	.disable_plane = fimd_disable_plane,
 	.atomic_flush = fimd_atomic_flush,
+	.atomic_check = fimd_atomic_check,
 	.te_handler = fimd_te_handler,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
-	u32 val, clear_bit, start, start_s;
-	int win;
+	u32 val, clear_bit;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -961,18 +977,6 @@
 	if (!ctx->i80_if)
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 
-	for (win = 0 ; win < WINDOWS_NR ; win++) {
-		struct exynos_drm_plane *plane = &ctx->planes[win];
-
-		if (!plane->pending_fb)
-			continue;
-
-		start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
-		start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
-		if (start == start_s)
-			exynos_drm_crtc_finish_update(ctx->crtc, plane);
-	}
-
 	if (ctx->i80_if) {
 		/* Exits triggering mode */
 		atomic_set(&ctx->triggering, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6eca8bb..fbd13fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -138,6 +138,18 @@
 	MAX_REG_TYPE_NR
 };
 
+enum g2d_flag_bits {
+	/*
+	 * If set, suspends the runqueue worker after the currently
+	 * processed node is finished.
+	 */
+	G2D_BIT_SUSPEND_RUNQUEUE,
+	/*
+	 * If set, indicates that the engine is currently busy.
+	 */
+	G2D_BIT_ENGINE_BUSY,
+};
+
 /* cmdlist data structure */
 struct g2d_cmdlist {
 	u32		head;
@@ -226,7 +238,7 @@
 	struct workqueue_struct		*g2d_workq;
 	struct work_struct		runqueue_work;
 	struct exynos_drm_subdrv	subdrv;
-	bool				suspended;
+	unsigned long			flags;
 
 	/* cmdlist */
 	struct g2d_cmdlist_node		*cmdlist_node;
@@ -246,6 +258,12 @@
 	unsigned long			max_pool;
 };
 
+static inline void g2d_hw_reset(struct g2d_data *g2d)
+{
+	writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
+	clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
+}
+
 static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
 	struct device *dev = g2d->dev;
@@ -470,7 +488,8 @@
 		goto err_free;
 	}
 
-	ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+	ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+		g2d_userptr->vec);
 	if (ret != npages) {
 		DRM_ERROR("failed to get user pages from userptr.\n");
 		if (ret < 0)
@@ -803,12 +822,8 @@
 	struct g2d_cmdlist_node *node =
 				list_first_entry(&runqueue_node->run_cmdlist,
 						struct g2d_cmdlist_node, list);
-	int ret;
 
-	ret = pm_runtime_get_sync(g2d->dev);
-	if (ret < 0)
-		return;
-
+	set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
 	writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
 	writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
 }
@@ -831,9 +846,6 @@
 {
 	struct g2d_cmdlist_node *node;
 
-	if (!runqueue_node)
-		return;
-
 	mutex_lock(&g2d->cmdlist_mutex);
 	/*
 	 * commands in run_cmdlist have been completed so unmap all gem
@@ -847,29 +859,65 @@
 	kmem_cache_free(g2d->runqueue_slab, runqueue_node);
 }
 
-static void g2d_exec_runqueue(struct g2d_data *g2d)
+/**
+ * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
+ * @g2d: G2D state object
+ * @file: if not zero, only remove items with this DRM file
+ *
+ * Has to be called under runqueue lock.
+ */
+static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file)
 {
-	g2d->runqueue_node = g2d_get_runqueue_node(g2d);
-	if (g2d->runqueue_node)
-		g2d_dma_start(g2d, g2d->runqueue_node);
+	struct g2d_runqueue_node *node, *n;
+
+	if (list_empty(&g2d->runqueue))
+		return;
+
+	list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
+		if (file && node->filp != file)
+			continue;
+
+		list_del_init(&node->list);
+		g2d_free_runqueue_node(g2d, node);
+	}
 }
 
 static void g2d_runqueue_worker(struct work_struct *work)
 {
 	struct g2d_data *g2d = container_of(work, struct g2d_data,
 					    runqueue_work);
+	struct g2d_runqueue_node *runqueue_node;
+
+	/*
+	 * The engine is busy and the completion of the current node is going
+	 * to poke the runqueue worker, so nothing to do here.
+	 */
+	if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
+		return;
 
 	mutex_lock(&g2d->runqueue_mutex);
-	pm_runtime_put_sync(g2d->dev);
 
-	complete(&g2d->runqueue_node->complete);
-	if (g2d->runqueue_node->async)
-		g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+	runqueue_node = g2d->runqueue_node;
+	g2d->runqueue_node = NULL;
 
-	if (g2d->suspended)
-		g2d->runqueue_node = NULL;
-	else
-		g2d_exec_runqueue(g2d);
+	if (runqueue_node) {
+		pm_runtime_mark_last_busy(g2d->dev);
+		pm_runtime_put_autosuspend(g2d->dev);
+
+		complete(&runqueue_node->complete);
+		if (runqueue_node->async)
+			g2d_free_runqueue_node(g2d, runqueue_node);
+	}
+
+	if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
+		g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+
+		if (g2d->runqueue_node) {
+			pm_runtime_get_sync(g2d->dev);
+			g2d_dma_start(g2d, g2d->runqueue_node);
+		}
+	}
+
 	mutex_unlock(&g2d->runqueue_mutex);
 }
 
@@ -918,12 +966,72 @@
 		}
 	}
 
-	if (pending & G2D_INTP_ACMD_FIN)
+	if (pending & G2D_INTP_ACMD_FIN) {
+		clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
 		queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+	}
 
 	return IRQ_HANDLED;
 }
 
+/**
+ * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
+ * @g2d: G2D state object
+ * @file: if not zero, only wait if the current runqueue node belongs
+ *        to the DRM file
+ *
+ * Should the engine not become idle after a 100ms timeout, a hardware
+ * reset is issued.
+ */
+static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
+{
+	struct device *dev = g2d->dev;
+
+	struct g2d_runqueue_node *runqueue_node = NULL;
+	unsigned int tries = 10;
+
+	mutex_lock(&g2d->runqueue_mutex);
+
+	/* If no node is currently processed, we have nothing to do. */
+	if (!g2d->runqueue_node)
+		goto out;
+
+	runqueue_node = g2d->runqueue_node;
+
+	/* Check if the currently processed item belongs to us. */
+	if (file && runqueue_node->filp != file)
+		goto out;
+
+	mutex_unlock(&g2d->runqueue_mutex);
+
+	/* Wait for the G2D engine to finish. */
+	while (tries-- && (g2d->runqueue_node == runqueue_node))
+		mdelay(10);
+
+	mutex_lock(&g2d->runqueue_mutex);
+
+	if (g2d->runqueue_node != runqueue_node)
+		goto out;
+
+	dev_err(dev, "wait timed out, resetting engine...\n");
+	g2d_hw_reset(g2d);
+
+	/*
+	 * After the hardware reset of the engine we are going to loose
+	 * the IRQ which triggers the PM runtime put().
+	 * So do this manually here.
+	 */
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	complete(&runqueue_node->complete);
+	if (runqueue_node->async)
+		g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+	mutex_unlock(&g2d->runqueue_mutex);
+}
+
 static int g2d_check_reg_offset(struct device *dev,
 				struct g2d_cmdlist_node *node,
 				int nr, bool for_addr)
@@ -1259,10 +1367,11 @@
 	runqueue_node->pid = current->pid;
 	runqueue_node->filp = file;
 	list_add_tail(&runqueue_node->list, &g2d->runqueue);
-	if (!g2d->runqueue_node)
-		g2d_exec_runqueue(g2d);
 	mutex_unlock(&g2d->runqueue_mutex);
 
+	/* Let the runqueue know that there is work to do. */
+	queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
 	if (runqueue_node->async)
 		goto out;
 
@@ -1339,15 +1448,26 @@
 	if (!g2d)
 		return;
 
+	/* Remove the runqueue nodes that belong to us. */
+	mutex_lock(&g2d->runqueue_mutex);
+	g2d_remove_runqueue_nodes(g2d, file);
+	mutex_unlock(&g2d->runqueue_mutex);
+
+	/*
+	 * Wait for the runqueue worker to finish its current node.
+	 * After this the engine should no longer be accessing any
+	 * memory belonging to us.
+	 */
+	g2d_wait_finish(g2d, file);
+
+	/*
+	 * Even after the engine is idle, there might still be stale cmdlists
+	 * (i.e. cmdlisst which we submitted but never executed) around, with
+	 * their corresponding GEM/userptr buffers.
+	 * Properly unmap these buffers here.
+	 */
 	mutex_lock(&g2d->cmdlist_mutex);
 	list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
-		/*
-		 * unmap all gem objects not completed.
-		 *
-		 * P.S. if current process was terminated forcely then
-		 * there may be some commands in inuse_cmdlist so unmap
-		 * them.
-		 */
 		g2d_unmap_cmdlist_gem(g2d, node, file);
 		list_move_tail(&node->list, &g2d->free_cmdlist);
 	}
@@ -1399,7 +1519,11 @@
 		goto err_destroy_workqueue;
 	}
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, 2000);
 	pm_runtime_enable(dev);
+	clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+	clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -1440,7 +1564,7 @@
 		goto err_put_clk;
 	}
 
-	dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+	dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
 			G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
 
 	return 0;
@@ -1458,14 +1582,17 @@
 {
 	struct g2d_data *g2d = platform_get_drvdata(pdev);
 
+	/* Suspend operation and wait for engine idle. */
+	set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+	g2d_wait_finish(g2d, NULL);
+
 	cancel_work_sync(&g2d->runqueue_work);
 	exynos_drm_subdrv_unregister(&g2d->subdrv);
 
-	while (g2d->runqueue_node) {
-		g2d_free_runqueue_node(g2d, g2d->runqueue_node);
-		g2d->runqueue_node = g2d_get_runqueue_node(g2d);
-	}
+	/* There should be no locking needed here. */
+	g2d_remove_runqueue_nodes(g2d, NULL);
 
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
 	g2d_fini_cmdlist(g2d);
@@ -1475,21 +1602,38 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+
+	/*
+	 * Suspend the runqueue worker operation and wait until the G2D
+	 * engine is idle.
+	 */
+	set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+	g2d_wait_finish(g2d, NULL);
+	flush_work(&g2d->runqueue_work);
+
+	return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+
+	clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+	queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_PM
 static int g2d_runtime_suspend(struct device *dev)
 {
 	struct g2d_data *g2d = dev_get_drvdata(dev);
 
-	mutex_lock(&g2d->runqueue_mutex);
-	g2d->suspended = true;
-	mutex_unlock(&g2d->runqueue_mutex);
-
-	while (g2d->runqueue_node)
-		/* FIXME: good range? */
-		usleep_range(500, 1000);
-
-	flush_work(&g2d->runqueue_work);
-
 	clk_disable_unprepare(g2d->gate_clk);
 
 	return 0;
@@ -1504,16 +1648,12 @@
 	if (ret < 0)
 		dev_warn(dev, "failed to enable clock.\n");
 
-	g2d->suspended = false;
-	g2d_exec_runqueue(g2d);
-
 	return ret;
 }
 #endif
 
 static const struct dev_pm_ops g2d_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-				pm_runtime_force_resume)
+	SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
 	SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index c8de491..87f6b56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -66,7 +66,7 @@
 	if (ret)
 		goto free_domain;
 
-	ret = iommu_dma_init_domain(domain, start, size);
+	ret = iommu_dma_init_domain(domain, start, size, NULL);
 	if (ret)
 		goto put_cookie;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7f32419..c2f17f3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -238,7 +238,6 @@
 		return;
 
 	plane->crtc = state->crtc;
-	exynos_plane->pending_fb = state->fb;
 
 	if (exynos_crtc->ops->update_plane)
 		exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e8f6c92..57fe514 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/component.h>
+#include <linux/timer.h>
 
 #include <drm/exynos_drm.h>
 
@@ -28,6 +29,9 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_vidi.h"
 
+/* VIDI uses fixed refresh rate of 50Hz */
+#define VIDI_REFRESH_TIME (1000 / 50)
+
 /* vidi has totally three virtual windows. */
 #define WINDOWS_NR		3
 
@@ -43,12 +47,9 @@
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct edid			*raw_edid;
 	unsigned int			clkdiv;
-	unsigned long			irq_flags;
 	unsigned int			connected;
-	bool				vblank_on;
 	bool				suspended;
-	bool				direct_vblank;
-	struct work_struct		work;
+	struct timer_list		timer;
 	struct mutex			lock;
 	int				pipe;
 };
@@ -102,30 +103,14 @@
 	if (ctx->suspended)
 		return -EPERM;
 
-	if (!test_and_set_bit(0, &ctx->irq_flags))
-		ctx->vblank_on = true;
-
-	ctx->direct_vblank = true;
-
-	/*
-	 * in case of page flip request, vidi_finish_pageflip function
-	 * will not be called because direct_vblank is true and then
-	 * that function will be called by crtc_ops->update_plane callback
-	 */
-	schedule_work(&ctx->work);
+	mod_timer(&ctx->timer,
+		jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
 
 	return 0;
 }
 
 static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
 {
-	struct vidi_context *ctx = crtc->ctx;
-
-	if (ctx->suspended)
-		return;
-
-	if (test_and_clear_bit(0, &ctx->irq_flags))
-		ctx->vblank_on = false;
 }
 
 static void vidi_update_plane(struct exynos_drm_crtc *crtc,
@@ -140,9 +125,6 @@
 
 	addr = exynos_drm_fb_dma_addr(state->fb, 0);
 	DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
-
-	if (ctx->vblank_on)
-		schedule_work(&ctx->work);
 }
 
 static void vidi_enable(struct exynos_drm_crtc *crtc)
@@ -153,17 +135,17 @@
 
 	ctx->suspended = false;
 
-	/* if vblank was enabled status, enable it again. */
-	if (test_and_clear_bit(0, &ctx->irq_flags))
-		vidi_enable_vblank(ctx->crtc);
-
 	mutex_unlock(&ctx->lock);
+
+	drm_crtc_vblank_on(&crtc->base);
 }
 
 static void vidi_disable(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
 
+	drm_crtc_vblank_off(&crtc->base);
+
 	mutex_lock(&ctx->lock);
 
 	ctx->suspended = true;
@@ -190,37 +172,16 @@
 	.update_plane = vidi_update_plane,
 };
 
-static void vidi_fake_vblank_handler(struct work_struct *work)
+static void vidi_fake_vblank_timer(unsigned long arg)
 {
-	struct vidi_context *ctx = container_of(work, struct vidi_context,
-					work);
-	int win;
+	struct vidi_context *ctx = (void *)arg;
 
 	if (ctx->pipe < 0)
 		return;
 
-	/* refresh rate is about 50Hz. */
-	usleep_range(16000, 20000);
-
-	mutex_lock(&ctx->lock);
-
-	if (ctx->direct_vblank) {
-		drm_crtc_handle_vblank(&ctx->crtc->base);
-		ctx->direct_vblank = false;
-		mutex_unlock(&ctx->lock);
-		return;
-	}
-
-	mutex_unlock(&ctx->lock);
-
-	for (win = 0 ; win < WINDOWS_NR ; win++) {
-		struct exynos_drm_plane *plane = &ctx->planes[win];
-
-		if (!plane->pending_fb)
-			continue;
-
-		exynos_drm_crtc_finish_update(ctx->crtc, plane);
-	}
+	if (drm_crtc_handle_vblank(&ctx->crtc->base))
+		mod_timer(&ctx->timer,
+			jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
 }
 
 static ssize_t vidi_show_connection(struct device *dev,
@@ -489,6 +450,9 @@
 
 static void vidi_unbind(struct device *dev, struct device *master, void *data)
 {
+	struct vidi_context *ctx = dev_get_drvdata(dev);
+
+	del_timer_sync(&ctx->timer);
 }
 
 static const struct component_ops vidi_component_ops = {
@@ -507,7 +471,7 @@
 
 	ctx->pdev = pdev;
 
-	INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
+	setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
 
 	mutex_init(&ctx->lock);
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2275efe..e8fb6ef 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,10 +1669,9 @@
 	if (ret)
 		return ret;
 
-	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+	for (i = 0; i < ARRAY_SIZE(supply); ++i)
 		hdata->regul_bulk[i].supply = supply[i];
-		hdata->regul_bulk[i].consumer = NULL;
-	}
+
 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
 	if (ret) {
 		if (ret != -EPROBE_DEFER)
@@ -1760,28 +1759,74 @@
 	.unbind = hdmi_unbind,
 };
 
-static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev)
+static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
 {
 	const char *compatible_str = "samsung,exynos4210-hdmiddc";
 	struct device_node *np;
+	struct i2c_adapter *adpt;
 
 	np = of_find_compatible_node(NULL, NULL, compatible_str);
 	if (np)
-		return of_get_next_parent(np);
+		np = of_get_next_parent(np);
+	else
+		np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
 
-	return NULL;
+	if (!np) {
+		DRM_ERROR("Failed to find ddc node in device tree\n");
+		return -ENODEV;
+	}
+
+	adpt = of_find_i2c_adapter_by_node(np);
+	of_node_put(np);
+
+	if (!adpt) {
+		DRM_INFO("Failed to get ddc i2c adapter by node\n");
+		return -EPROBE_DEFER;
+	}
+
+	hdata->ddc_adpt = adpt;
+
+	return 0;
 }
 
-static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
+static int hdmi_get_phy_io(struct hdmi_context *hdata)
 {
 	const char *compatible_str = "samsung,exynos4212-hdmiphy";
+	struct device_node *np;
+	int ret = 0;
 
-	return of_find_compatible_node(NULL, NULL, compatible_str);
+	np = of_find_compatible_node(NULL, NULL, compatible_str);
+	if (!np) {
+		np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
+		if (!np) {
+			DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+			return -ENODEV;
+		}
+	}
+
+	if (hdata->drv_data->is_apb_phy) {
+		hdata->regs_hdmiphy = of_iomap(np, 0);
+		if (!hdata->regs_hdmiphy) {
+			DRM_ERROR("failed to ioremap hdmi phy\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		hdata->hdmiphy_port = of_find_i2c_device_by_node(np);
+		if (!hdata->hdmiphy_port) {
+			DRM_INFO("Failed to get hdmi phy i2c client\n");
+			ret = -EPROBE_DEFER;
+			goto out;
+		}
+	}
+
+out:
+	of_node_put(np);
+	return ret;
 }
 
 static int hdmi_probe(struct platform_device *pdev)
 {
-	struct device_node *ddc_node, *phy_node;
 	struct device *dev = &pdev->dev;
 	struct hdmi_context *hdata;
 	struct resource *res;
@@ -1811,52 +1856,13 @@
 		return ret;
 	}
 
-	ddc_node = hdmi_legacy_ddc_dt_binding(dev);
-	if (ddc_node)
-		goto out_get_ddc_adpt;
+	ret = hdmi_get_ddc_adapter(hdata);
+	if (ret)
+		return ret;
 
-	ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
-	if (!ddc_node) {
-		DRM_ERROR("Failed to find ddc node in device tree\n");
-		return -ENODEV;
-	}
-	of_node_put(dev->of_node);
-
-out_get_ddc_adpt:
-	hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
-	if (!hdata->ddc_adpt) {
-		DRM_ERROR("Failed to get ddc i2c adapter by node\n");
-		return -EPROBE_DEFER;
-	}
-
-	phy_node = hdmi_legacy_phy_dt_binding(dev);
-	if (phy_node)
-		goto out_get_phy_port;
-
-	phy_node = of_parse_phandle(dev->of_node, "phy", 0);
-	if (!phy_node) {
-		DRM_ERROR("Failed to find hdmiphy node in device tree\n");
-		ret = -ENODEV;
+	ret = hdmi_get_phy_io(hdata);
+	if (ret)
 		goto err_ddc;
-	}
-	of_node_put(dev->of_node);
-
-out_get_phy_port:
-	if (hdata->drv_data->is_apb_phy) {
-		hdata->regs_hdmiphy = of_iomap(phy_node, 0);
-		if (!hdata->regs_hdmiphy) {
-			DRM_ERROR("failed to ioremap hdmi phy\n");
-			ret = -ENOMEM;
-			goto err_ddc;
-		}
-	} else {
-		hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
-		if (!hdata->hdmiphy_port) {
-			DRM_ERROR("Failed to get hdmi phy i2c client\n");
-			ret = -EPROBE_DEFER;
-			goto err_ddc;
-		}
-	}
 
 	INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
 
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e1d47f9..edb20a3 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -73,6 +73,9 @@
 enum mixer_flag_bits {
 	MXR_BIT_POWERED,
 	MXR_BIT_VSYNC,
+	MXR_BIT_INTERLACE,
+	MXR_BIT_VP_ENABLED,
+	MXR_BIT_HAS_SCLK,
 };
 
 static const uint32_t mixer_formats[] = {
@@ -98,9 +101,6 @@
 	struct exynos_drm_plane	planes[MIXER_WIN_NR];
 	int			pipe;
 	unsigned long		flags;
-	bool			interlace;
-	bool			vp_enabled;
-	bool			has_sclk;
 
 	struct mixer_resources	mixer_res;
 	enum mixer_version_id	mxr_ver;
@@ -346,7 +346,7 @@
 	mixer_reg_writemask(res, MXR_STATUS, enable ?
 			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
 
-	if (ctx->vp_enabled)
+	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
 		vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
 			VP_SHADOW_UPDATE_ENABLE : 0);
 }
@@ -357,8 +357,8 @@
 	u32 val;
 
 	/* choosing between interlace and progressive mode */
-	val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
-				MXR_CFG_SCAN_PROGRESSIVE);
+	val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ?
+		MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE;
 
 	if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
 		/* choosing between proper HD and SD mode */
@@ -436,9 +436,10 @@
 		mixer_reg_writemask(res, MXR_LAYER_CFG,
 				    MXR_LAYER_CFG_GRP1_VAL(priority),
 				    MXR_LAYER_CFG_GRP1_MASK);
+
 		break;
 	case VP_DEFAULT_WIN:
-		if (ctx->vp_enabled) {
+		if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
 			vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
 			mixer_reg_writemask(res, MXR_CFG, val,
 				MXR_CFG_VP_ENABLE);
@@ -501,7 +502,7 @@
 	chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
 
 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		ctx->interlace = true;
+		__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
 		if (tiled_mode) {
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
@@ -510,7 +511,7 @@
 			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
 		}
 	} else {
-		ctx->interlace = false;
+		__clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
 		luma_addr[1] = 0;
 		chroma_addr[1] = 0;
 	}
@@ -518,7 +519,7 @@
 	spin_lock_irqsave(&res->reg_slock, flags);
 
 	/* interlace or progressive scan mode */
-	val = (ctx->interlace ? ~0 : 0);
+	val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
 
 	/* setup format */
@@ -541,7 +542,7 @@
 
 	vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
 	vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
-	if (ctx->interlace) {
+	if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
 		vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
 		vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
 	} else {
@@ -636,9 +637,9 @@
 	src_y_offset = 0;
 
 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		ctx->interlace = true;
+		__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
 	else
-		ctx->interlace = false;
+		__clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
 
 	spin_lock_irqsave(&res->reg_slock, flags);
 
@@ -697,10 +698,10 @@
 static void vp_win_reset(struct mixer_context *ctx)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
-	int tries = 100;
+	unsigned int tries = 100;
 
 	vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
-	for (tries = 100; tries; --tries) {
+	while (tries--) {
 		/* waiting until VP_SRESET_PROCESSING is 0 */
 		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
 			break;
@@ -733,7 +734,7 @@
 	mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
 	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
 
-	if (ctx->vp_enabled) {
+	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
 		/* configuration of Video Processor Registers */
 		vp_win_reset(ctx);
 		vp_default_filter(res);
@@ -742,7 +743,7 @@
 	/* disable all layers */
 	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
 	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
-	if (ctx->vp_enabled)
+	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
 		mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
 
 	spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -753,7 +754,6 @@
 	struct mixer_context *ctx = arg;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, base, shadow;
-	int win;
 
 	spin_lock(&res->reg_slock);
 
@@ -767,7 +767,7 @@
 		val &= ~MXR_INT_STATUS_VSYNC;
 
 		/* interlace scan need to check shadow register */
-		if (ctx->interlace) {
+		if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
 			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
 			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
 			if (base != shadow)
@@ -780,14 +780,6 @@
 		}
 
 		drm_crtc_handle_vblank(&ctx->crtc->base);
-		for (win = 0 ; win < MIXER_WIN_NR ; win++) {
-			struct exynos_drm_plane *plane = &ctx->planes[win];
-
-			if (!plane->pending_fb)
-				continue;
-
-			exynos_drm_crtc_finish_update(ctx->crtc, plane);
-		}
 	}
 
 out:
@@ -867,7 +859,7 @@
 		return -ENODEV;
 	}
 
-	if (mixer_ctx->has_sclk) {
+	if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) {
 		mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
 		if (IS_ERR(mixer_res->sclk_mixer)) {
 			dev_err(dev, "failed to get clock 'sclk_mixer'\n");
@@ -917,7 +909,7 @@
 		return ret;
 	}
 
-	if (mixer_ctx->vp_enabled) {
+	if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) {
 		/* acquire vp resources: regs, irqs, clocks */
 		ret = vp_resources_init(mixer_ctx);
 		if (ret) {
@@ -1160,7 +1152,8 @@
 		return ret;
 
 	for (i = 0; i < MIXER_WIN_NR; i++) {
-		if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
+		if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED,
+						     &ctx->flags))
 			continue;
 
 		ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
@@ -1215,10 +1208,13 @@
 
 	ctx->pdev = pdev;
 	ctx->dev = dev;
-	ctx->vp_enabled = drv->is_vp_enabled;
-	ctx->has_sclk = drv->has_sclk;
 	ctx->mxr_ver = drv->version;
 
+	if (drv->is_vp_enabled)
+		__set_bit(MXR_BIT_VP_ENABLED, &ctx->flags);
+	if (drv->has_sclk)
+		__set_bit(MXR_BIT_HAS_SCLK, &ctx->flags);
+
 	platform_set_drvdata(pdev, ctx);
 
 	ret = component_add(&pdev->dev, &mixer_component_ops);
@@ -1244,9 +1240,9 @@
 
 	clk_disable_unprepare(res->hdmi);
 	clk_disable_unprepare(res->mixer);
-	if (ctx->vp_enabled) {
+	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
 		clk_disable_unprepare(res->vp);
-		if (ctx->has_sclk)
+		if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags))
 			clk_disable_unprepare(res->sclk_mixer);
 	}
 
@@ -1269,14 +1265,14 @@
 		DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
 		return ret;
 	}
-	if (ctx->vp_enabled) {
+	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
 		ret = clk_prepare_enable(res->vp);
 		if (ret < 0) {
 			DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
 				  ret);
 			return ret;
 		}
-		if (ctx->has_sclk) {
+		if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
 			ret = clk_prepare_enable(res->sclk_mixer);
 			if (ret < 0) {
 				DRM_ERROR("Failed to prepare_enable the " \
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 7882387..0884c45 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -270,7 +270,7 @@
 	ret = clk_prepare_enable(fsl_dev->pix_clk);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable pix clk\n");
-		return ret;
+		goto disable_dcu_clk;
 	}
 
 	fsl_dcu_drm_init_planes(fsl_dev->drm);
@@ -284,6 +284,10 @@
 	enable_irq(fsl_dev->irq);
 
 	return 0;
+
+disable_dcu_clk:
+	clk_disable_unprepare(fsl_dev->clk);
+	return ret;
 }
 #endif
 
@@ -330,6 +334,7 @@
 	const char *pix_clk_in_name;
 	const struct of_device_id *id;
 	int ret;
+	u8 div_ratio_shift = 0;
 
 	fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
 	if (!fsl_dev)
@@ -382,11 +387,14 @@
 		pix_clk_in = fsl_dev->clk;
 	}
 
+	if (of_property_read_bool(dev->of_node, "big-endian"))
+		div_ratio_shift = 24;
+
 	pix_clk_in_name = __clk_get_name(pix_clk_in);
 	snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
 	fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
 			pix_clk_in_name, 0, base + DCU_DIV_RATIO,
-			0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+			div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
 	if (IS_ERR(fsl_dev->pix_clk)) {
 		dev_err(dev, "failed to register pix clk\n");
 		ret = PTR_ERR(fsl_dev->pix_clk);
@@ -402,8 +410,8 @@
 	fsl_dev->tcon = fsl_tcon_init(dev);
 
 	drm = drm_dev_alloc(driver, dev);
-	if (!drm) {
-		ret = -ENOMEM;
+	if (IS_ERR(drm)) {
+		ret = PTR_ERR(drm);
 		goto disable_pix_clk;
 	}
 
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index e50467a..a7e5486 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -169,25 +169,10 @@
 	return;
 }
 
-static void
-fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
-			     const struct drm_plane_state *new_state)
-{
-}
-
-static int
-fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
-			     const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
 static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
 	.atomic_check = fsl_dcu_drm_plane_atomic_check,
 	.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
 	.atomic_update = fsl_dcu_drm_plane_atomic_update,
-	.cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
-	.prepare_fb = fsl_dcu_drm_plane_prepare_fb,
 };
 
 static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index bca09ea..3194e54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -57,10 +57,7 @@
 
 	tcon->regs = devm_regmap_init_mmio(dev, regs,
 					   &fsl_tcon_regmap_config);
-	if (IS_ERR(tcon->regs))
-		return PTR_ERR(tcon->regs);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(tcon->regs);
 }
 
 struct fsl_tcon *fsl_tcon_init(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index db9f7d0..0d2bb16 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -28,7 +28,6 @@
 #include <linux/tty.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/console.h>
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 38dc890..ea733ab 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -415,14 +415,6 @@
 	if (ret)
 		return ret;
 
-	/* Didn't get an EDID, so
-	 * Set wide sync ranges so we get all modes
-	 * handed to valid_mode for checking
-	 */
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
 	if (mode_dev->panel_fixed_mode != NULL) {
 		struct drm_display_mode *mode =
 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0fcdce0..3a44e70 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -26,7 +26,6 @@
 #include <linux/tty.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/console.h>
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 907cb51..acb3848 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -335,11 +335,6 @@
 	struct drm_display_mode *dup_mode = NULL;
 	struct drm_device *dev = connector->dev;
 
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
-
 	if (fixed_mode) {
 		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
 				fixed_mode->hdisplay, fixed_mode->vdisplay);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index ab696ca..eab6d88 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -163,10 +163,7 @@
 	if (bclp > 255)
 		return ASLE_BACKLIGHT_FAILED;
 
-	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
-		int max = bd->props.max_brightness;
-		gma_backlight_set(dev, bclp * max / 255);
-	}
+	gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
 
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
 
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index e55733c..fd7c912 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -530,15 +530,6 @@
 	if (ret)
 		return ret;
 
-	/* Didn't get an EDID, so
-	 * Set wide sync ranges so we get all modes
-	 * handed to valid_mode for checking
-	 */
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
-
 	if (mode_dev->panel_fixed_mode != NULL) {
 		struct drm_display_mode *mode =
 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 4fca0d6..e536072 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/i2c.h>
-#include <linux/fb.h>
 #include <drm/drmP.h>
 #include "psb_intel_drv.h"
 
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index c3707d4..7e7a4d4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,15 +608,17 @@
 			 u32 ch, u32 y, u32 in_h, u32 fmt)
 {
 	struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+	char *format_name;
 	u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
 	u32 stride = fb->pitches[0];
 	u32 addr = (u32)obj->paddr + y * stride;
 
 	DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
 			 ch + 1, y, in_h, stride, (u32)obj->paddr);
+	format_name = drm_get_format_name(fb->pixel_format);
 	DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
-			 addr, fb->width, fb->height, fmt,
-			 drm_get_format_name(fb->pixel_format));
+			 addr, fb->width, fb->height, fmt, format_name);
+	kfree(format_name);
 
 	/* get reg offset */
 	reg_ctrl = RD_CH_CTRL(ch);
@@ -815,19 +817,6 @@
 	ade_compositor_routing_disable(base, ch);
 }
 
-static int ade_plane_prepare_fb(struct drm_plane *plane,
-				const struct drm_plane_state *new_state)
-{
-	/* do nothing */
-	return 0;
-}
-
-static void ade_plane_cleanup_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *old_state)
-{
-	/* do nothing */
-}
-
 static int ade_plane_atomic_check(struct drm_plane *plane,
 				  struct drm_plane_state *state)
 {
@@ -895,8 +884,6 @@
 }
 
 static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
-	.prepare_fb = ade_plane_prepare_fb,
-	.cleanup_fb = ade_plane_cleanup_fb,
 	.atomic_check = ade_plane_atomic_check,
 	.atomic_update = ade_plane_atomic_update,
 	.atomic_disable = ade_plane_atomic_disable,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 1edd9bc..90377a6 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -169,7 +169,7 @@
 
 static struct drm_driver kirin_drm_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
-				  DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+				  DRIVER_ATOMIC,
 	.fops			= &kirin_drm_fops,
 
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -207,8 +207,8 @@
 	int ret;
 
 	drm_dev = drm_dev_alloc(driver, dev);
-	if (!drm_dev)
-		return -ENOMEM;
+	if (IS_ERR(drm_dev))
+		return PTR_ERR(drm_dev);
 
 	drm_dev->platformdev = to_platform_device(dev);
 
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db..a6c92be 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -22,6 +22,7 @@
 config DRM_I2C_NXP_TDA998X
 	tristate "NXP Semiconductors TDA998X HDMI encoder"
 	default m if DRM_TILCDC
+	select SND_SOC_HDMI_CODEC if SND_SOC
 	help
 	  Support for NXP Semiconductors TDA998X HDMI encoders.
 
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f4315bc..9798d40 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <sound/asoundef.h>
+#include <sound/hdmi-codec.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
@@ -30,6 +31,11 @@
 
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
+struct tda998x_audio_port {
+	u8 format;		/* AFMT_xxx */
+	u8 config;		/* AP value */
+};
+
 struct tda998x_priv {
 	struct i2c_client *cec;
 	struct i2c_client *hdmi;
@@ -41,7 +47,10 @@
 	u8 vip_cntrl_0;
 	u8 vip_cntrl_1;
 	u8 vip_cntrl_2;
-	struct tda998x_encoder_params params;
+	struct tda998x_audio_params audio_params;
+
+	struct platform_device *audio_pdev;
+	struct mutex audio_mutex;
 
 	wait_queue_head_t wq_edid;
 	volatile int wq_edid_wait;
@@ -53,6 +62,8 @@
 
 	struct drm_encoder encoder;
 	struct drm_connector connector;
+
+	struct tda998x_audio_port audio_port[2];
 };
 
 #define conn_to_tda998x_priv(x) \
@@ -666,26 +677,16 @@
 	reg_set(priv, REG_DIP_IF_FLAGS, bit);
 }
 
-static void
-tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
+static int tda998x_write_aif(struct tda998x_priv *priv,
+			     struct hdmi_audio_infoframe *cea)
 {
 	union hdmi_infoframe frame;
 
-	hdmi_audio_infoframe_init(&frame.audio);
-
-	frame.audio.channels = p->audio_frame[1] & 0x07;
-	frame.audio.channel_allocation = p->audio_frame[4];
-	frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
-	frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
-
-	/*
-	 * L-PCM and IEC61937 compressed audio shall always set sample
-	 * frequency to "refer to stream".  For others, see the HDMI
-	 * specification.
-	 */
-	frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
+	frame.audio = *cea;
 
 	tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
+
+	return 0;
 }
 
 static void
@@ -710,20 +711,21 @@
 	}
 }
 
-static void
+static int
 tda998x_configure_audio(struct tda998x_priv *priv,
-		struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+			struct tda998x_audio_params *params,
+			unsigned mode_clock)
 {
 	u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
 	u32 n;
 
 	/* Enable audio ports */
-	reg_write(priv, REG_ENA_AP, p->audio_cfg);
-	reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
+	reg_write(priv, REG_ENA_AP, params->config);
 
 	/* Set audio input source */
-	switch (p->audio_format) {
+	switch (params->format) {
 	case AFMT_SPDIF:
+		reg_write(priv, REG_ENA_ACLK, 0);
 		reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
 		clksel_aip = AIP_CLKSEL_AIP_SPDIF;
 		clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
@@ -731,15 +733,29 @@
 		break;
 
 	case AFMT_I2S:
+		reg_write(priv, REG_ENA_ACLK, 1);
 		reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
 		clksel_aip = AIP_CLKSEL_AIP_I2S;
 		clksel_fs = AIP_CLKSEL_FS_ACLK;
-		cts_n = CTS_N_M(3) | CTS_N_K(3);
+		switch (params->sample_width) {
+		case 16:
+			cts_n = CTS_N_M(3) | CTS_N_K(1);
+			break;
+		case 18:
+		case 20:
+		case 24:
+			cts_n = CTS_N_M(3) | CTS_N_K(2);
+			break;
+		default:
+		case 32:
+			cts_n = CTS_N_M(3) | CTS_N_K(3);
+			break;
+		}
 		break;
 
 	default:
-		BUG();
-		return;
+		dev_err(&priv->hdmi->dev, "Unsupported I2S format\n");
+		return -EINVAL;
 	}
 
 	reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
@@ -755,11 +771,11 @@
 	 * assume 100MHz requires larger divider.
 	 */
 	adiv = AUDIO_DIV_SERCLK_8;
-	if (mode->clock > 100000)
+	if (mode_clock > 100000)
 		adiv++;			/* AUDIO_DIV_SERCLK_16 */
 
 	/* S/PDIF asks for a larger divider */
-	if (p->audio_format == AFMT_SPDIF)
+	if (params->format == AFMT_SPDIF)
 		adiv++;			/* AUDIO_DIV_SERCLK_16 or _32 */
 
 	reg_write(priv, REG_AUDIO_DIV, adiv);
@@ -768,7 +784,7 @@
 	 * This is the approximate value of N, which happens to be
 	 * the recommended values for non-coherent clocks.
 	 */
-	n = 128 * p->audio_sample_rate / 1000;
+	n = 128 * params->sample_rate / 1000;
 
 	/* Write the CTS and N values */
 	buf[0] = 0x44;
@@ -786,20 +802,21 @@
 	reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
 	reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
 
-	/* Write the channel status */
-	buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
-	buf[1] = 0x00;
-	buf[2] = IEC958_AES3_CON_FS_NOTID;
-	buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
-			IEC958_AES4_CON_MAX_WORDLEN_24;
+	/* Write the channel status
+	 * The REG_CH_STAT_B-registers skip IEC958 AES2 byte, because
+	 * there is a separate register for each I2S wire.
+	 */
+	buf[0] = params->status[0];
+	buf[1] = params->status[1];
+	buf[2] = params->status[3];
+	buf[3] = params->status[4];
 	reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
 
 	tda998x_audio_mute(priv, true);
 	msleep(20);
 	tda998x_audio_mute(priv, false);
 
-	/* Write the audio information packet */
-	tda998x_write_aif(priv, p);
+	return tda998x_write_aif(priv, &params->cea);
 }
 
 /* DRM encoder functions */
@@ -820,7 +837,7 @@
 			    VIP_CNTRL_2_SWAP_F(p->swap_f) |
 			    (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
 
-	priv->params = *p;
+	priv->audio_params = p->audio_params;
 }
 
 static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -1057,9 +1074,13 @@
 
 		tda998x_write_avi(priv, adjusted_mode);
 
-		if (priv->params.audio_cfg)
-			tda998x_configure_audio(priv, adjusted_mode,
-						&priv->params);
+		if (priv->audio_params.format != AFMT_UNUSED) {
+			mutex_lock(&priv->audio_mutex);
+			tda998x_configure_audio(priv,
+						&priv->audio_params,
+						adjusted_mode->clock);
+			mutex_unlock(&priv->audio_mutex);
+		}
 	}
 }
 
@@ -1159,6 +1180,8 @@
 	drm_mode_connector_update_edid_property(connector, edid);
 	n = drm_add_edid_modes(connector, edid);
 	priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+	drm_edid_to_eld(connector, edid);
+
 	kfree(edid);
 
 	return n;
@@ -1180,6 +1203,9 @@
 	cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
 	reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 
+	if (priv->audio_pdev)
+		platform_device_unregister(priv->audio_pdev);
+
 	if (priv->hdmi->irq)
 		free_irq(priv->hdmi->irq, priv);
 
@@ -1189,8 +1215,189 @@
 	i2c_unregister_device(priv->cec);
 }
 
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+				   struct hdmi_codec_daifmt *daifmt,
+				   struct hdmi_codec_params *params)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+	int i, ret;
+	struct tda998x_audio_params audio = {
+		.sample_width = params->sample_width,
+		.sample_rate = params->sample_rate,
+		.cea = params->cea,
+	};
+
+	if (!priv->encoder.crtc)
+		return -ENODEV;
+
+	memcpy(audio.status, params->iec.status,
+	       min(sizeof(audio.status), sizeof(params->iec.status)));
+
+	switch (daifmt->fmt) {
+	case HDMI_I2S:
+		if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+		    daifmt->bit_clk_master || daifmt->frame_clk_master) {
+			dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+				daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+				daifmt->bit_clk_master,
+				daifmt->frame_clk_master);
+			return -EINVAL;
+		}
+		for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+			if (priv->audio_port[i].format == AFMT_I2S)
+				audio.config = priv->audio_port[i].config;
+		audio.format = AFMT_I2S;
+		break;
+	case HDMI_SPDIF:
+		for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+			if (priv->audio_port[i].format == AFMT_SPDIF)
+				audio.config = priv->audio_port[i].config;
+		audio.format = AFMT_SPDIF;
+		break;
+	default:
+		dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+		return -EINVAL;
+	}
+
+	if (audio.config == 0) {
+		dev_err(dev, "%s: No audio configutation found\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&priv->audio_mutex);
+	ret = tda998x_configure_audio(priv,
+				      &audio,
+				      priv->encoder.crtc->hwmode.clock);
+
+	if (ret == 0)
+		priv->audio_params = audio;
+	mutex_unlock(&priv->audio_mutex);
+
+	return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+	mutex_lock(&priv->audio_mutex);
+
+	reg_write(priv, REG_ENA_AP, 0);
+
+	priv->audio_params.format = AFMT_UNUSED;
+
+	mutex_unlock(&priv->audio_mutex);
+}
+
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+	mutex_lock(&priv->audio_mutex);
+
+	tda998x_audio_mute(priv, enable);
+
+	mutex_unlock(&priv->audio_mutex);
+	return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+				 uint8_t *buf, size_t len)
+{
+	struct tda998x_priv *priv = dev_get_drvdata(dev);
+	struct drm_mode_config *config = &priv->encoder.dev->mode_config;
+	struct drm_connector *connector;
+	int ret = -ENODEV;
+
+	mutex_lock(&config->mutex);
+	list_for_each_entry(connector, &config->connector_list, head) {
+		if (&priv->encoder == connector->encoder) {
+			memcpy(buf, connector->eld,
+			       min(sizeof(connector->eld), len));
+			ret = 0;
+		}
+	}
+	mutex_unlock(&config->mutex);
+
+	return ret;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+	.hw_params = tda998x_audio_hw_params,
+	.audio_shutdown = tda998x_audio_shutdown,
+	.digital_mute = tda998x_audio_digital_mute,
+	.get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+				    struct device *dev)
+{
+	struct hdmi_codec_pdata codec_data = {
+		.ops = &audio_codec_ops,
+		.max_i2s_channels = 2,
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+		if (priv->audio_port[i].format == AFMT_I2S &&
+		    priv->audio_port[i].config != 0)
+			codec_data.i2s = 1;
+		if (priv->audio_port[i].format == AFMT_SPDIF &&
+		    priv->audio_port[i].config != 0)
+			codec_data.spdif = 1;
+	}
+
+	priv->audio_pdev = platform_device_register_data(
+		dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+		&codec_data, sizeof(codec_data));
+
+	return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
 /* I2C driver functions */
 
+static int tda998x_get_audio_ports(struct tda998x_priv *priv,
+				   struct device_node *np)
+{
+	const u32 *port_data;
+	u32 size;
+	int i;
+
+	port_data = of_get_property(np, "audio-ports", &size);
+	if (!port_data)
+		return 0;
+
+	size /= sizeof(u32);
+	if (size > 2 * ARRAY_SIZE(priv->audio_port) || size % 2 != 0) {
+		dev_err(&priv->hdmi->dev,
+			"Bad number of elements in audio-ports dt-property\n");
+		return -EINVAL;
+	}
+
+	size /= 2;
+
+	for (i = 0; i < size; i++) {
+		u8 afmt = be32_to_cpup(&port_data[2*i]);
+		u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
+
+		if (afmt != AFMT_SPDIF && afmt != AFMT_I2S) {
+			dev_err(&priv->hdmi->dev,
+				"Bad audio format %u\n", afmt);
+			return -EINVAL;
+		}
+
+		priv->audio_port[i].format = afmt;
+		priv->audio_port[i].config = ena_ap;
+	}
+
+	if (priv->audio_port[0].format == priv->audio_port[1].format) {
+		dev_err(&priv->hdmi->dev,
+			"There can only be on I2S port and one SPDIF port\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 {
 	struct device_node *np = client->dev.of_node;
@@ -1304,7 +1511,7 @@
 	if (!np)
 		return 0;		/* non-DT */
 
-	/* get the optional video properties */
+	/* get the device tree parameters */
 	ret = of_property_read_u32(np, "video-ports", &video);
 	if (ret == 0) {
 		priv->vip_cntrl_0 = video >> 16;
@@ -1312,8 +1519,16 @@
 		priv->vip_cntrl_2 = video;
 	}
 
-	return 0;
+	mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
 
+	ret = tda998x_get_audio_ports(priv, np);
+	if (ret)
+		goto fail;
+
+	if (priv->audio_port[0].format != AFMT_UNUSED)
+		tda998x_audio_codec_init(priv, &client->dev);
+
+	return 0;
 fail:
 	/* if encoder_init fails, the encoder slave is never registered,
 	 * so cleanup here:
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 44f4a13..0be55dc 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -56,9 +56,7 @@
 };
 
 static struct drm_driver driver = {
-	.driver_features =
-	    DRIVER_USE_AGP |
-	    DRIVER_HAVE_DMA,
+	.driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY,
 	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
 	.load = i810_driver_load,
 	.lastclose = i810_driver_lastclose,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 684fc1c..a998c2b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,15 +3,20 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+subdir-ccflags-y += \
+	$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
 
 # Please keep these build lists sorted!
 
 # core driver code
 i915-y := i915_drv.o \
 	  i915_irq.o \
+	  i915_memcpy.o \
+	  i915_mm.o \
 	  i915_params.o \
 	  i915_pci.o \
           i915_suspend.o \
+	  i915_sw_fence.o \
 	  i915_sysfs.o \
 	  intel_csr.o \
 	  intel_device_info.o \
@@ -25,7 +30,6 @@
 i915-y += i915_cmd_parser.o \
 	  i915_gem_batch_pool.o \
 	  i915_gem_context.o \
-	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
@@ -33,6 +37,7 @@
 	  i915_gem_gtt.o \
 	  i915_gem.o \
 	  i915_gem_render_state.o \
+	  i915_gem_request.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
@@ -40,6 +45,7 @@
 	  i915_gpu_error.o \
 	  i915_trace_points.o \
 	  intel_breadcrumbs.o \
+	  intel_engine_cs.o \
 	  intel_lrc.o \
 	  intel_mocs.o \
 	  intel_ringbuffer.o \
@@ -109,6 +115,6 @@
 include $(src)/gvt/Makefile
 endif
 
-obj-$(CONFIG_DRM_I915)  += i915.o
+obj-$(CONFIG_DRM_I915) += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0fd6a7..70980f8 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -62,23 +62,23 @@
  * The parser always rejects such commands.
  *
  * The majority of the problematic commands fall in the MI_* range, with only a
- * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
  *
  * Implementation:
- * Each ring maintains tables of commands and registers which the parser uses in
- * scanning batch buffers submitted to that ring.
+ * Each engine maintains tables of commands and registers which the parser
+ * uses in scanning batch buffers submitted to that engine.
  *
  * Since the set of commands that the parser must check for is significantly
  * smaller than the number of commands supported, the parser tables contain only
  * those commands required by the parser. This generally works because command
  * opcode ranges have standard command length encodings. So for commands that
  * the parser does not need to check, it can easily skip them. This is
- * implemented via a per-ring length decoding vfunc.
+ * implemented via a per-engine length decoding vfunc.
  *
  * Unfortunately, there are a number of commands that do not follow the standard
  * length encoding for their opcode range, primarily amongst the MI_* commands.
  * To handle this, the parser provides a way to define explicit "skip" entries
- * in the per-ring command tables.
+ * in the per-engine command tables.
  *
  * Other command table entries map fairly directly to high level categories
  * mentioned above: rejected, master-only, register whitelist. The parser
@@ -86,24 +86,25 @@
  * general bitmasking mechanism.
  */
 
-#define STD_MI_OPCODE_MASK  0xFF800000
-#define STD_3D_OPCODE_MASK  0xFFFF0000
-#define STD_2D_OPCODE_MASK  0xFFC00000
-#define STD_MFX_OPCODE_MASK 0xFFFF0000
+#define STD_MI_OPCODE_SHIFT  (32 - 9)
+#define STD_3D_OPCODE_SHIFT  (32 - 16)
+#define STD_2D_OPCODE_SHIFT  (32 - 10)
+#define STD_MFX_OPCODE_SHIFT (32 - 16)
+#define MIN_OPCODE_SHIFT 16
 
 #define CMD(op, opm, f, lm, fl, ...)				\
 	{							\
 		.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),	\
-		.cmd = { (op), (opm) },				\
+		.cmd = { (op), ~0u << (opm) },			\
 		.length = { (lm) },				\
 		__VA_ARGS__					\
 	}
 
 /* Convenience macros to compress the tables */
-#define SMI STD_MI_OPCODE_MASK
-#define S3D STD_3D_OPCODE_MASK
-#define S2D STD_2D_OPCODE_MASK
-#define SMFX STD_MFX_OPCODE_MASK
+#define SMI STD_MI_OPCODE_SHIFT
+#define S3D STD_3D_OPCODE_SHIFT
+#define S2D STD_2D_OPCODE_SHIFT
+#define SMFX STD_MFX_OPCODE_SHIFT
 #define F true
 #define S CMD_DESC_SKIP
 #define R CMD_DESC_REJECT
@@ -350,6 +351,9 @@
 	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
 };
 
+static const struct drm_i915_cmd_descriptor noop_desc =
+	CMD(MI_NOOP, SMI, F, 1, S);
+
 #undef CMD
 #undef SMI
 #undef S3D
@@ -458,6 +462,7 @@
 	REG32(GEN7_GPGPU_DISPATCHDIMX),
 	REG32(GEN7_GPGPU_DISPATCHDIMY),
 	REG32(GEN7_GPGPU_DISPATCHDIMZ),
+	REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
 	REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
@@ -473,6 +478,7 @@
 	REG32(GEN7_L3SQCREG1),
 	REG32(GEN7_L3CNTLREG2),
 	REG32(GEN7_L3CNTLREG3),
+	REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
 };
 
 static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
@@ -502,7 +508,10 @@
 };
 
 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+	REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+	REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
 	REG32(BCS_SWCTRL),
+	REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
 };
 
 static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
@@ -603,7 +612,7 @@
 	return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_engine_cs *engine,
+static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
 				 const struct drm_i915_cmd_table *cmd_tables,
 				 int cmd_table_count)
 {
@@ -624,8 +633,10 @@
 			u32 curr = desc->cmd.value & desc->cmd.mask;
 
 			if (curr < previous) {
-				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-					  engine->id, i, j, curr, previous);
+				DRM_ERROR("CMD: %s [%d] command table not sorted: "
+					  "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+					  engine->name, engine->id,
+					  i, j, curr, previous);
 				ret = false;
 			}
 
@@ -636,7 +647,7 @@
 	return ret;
 }
 
-static bool check_sorted(int ring_id,
+static bool check_sorted(const struct intel_engine_cs *engine,
 			 const struct drm_i915_reg_descriptor *reg_table,
 			 int reg_count)
 {
@@ -648,8 +659,10 @@
 		u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
 
 		if (curr < previous) {
-			DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
-				  ring_id, i, curr, previous);
+			DRM_ERROR("CMD: %s [%d] register table not sorted: "
+				  "entry=%d reg=0x%08X prev=0x%08X\n",
+				  engine->name, engine->id,
+				  i, curr, previous);
 			ret = false;
 		}
 
@@ -666,7 +679,7 @@
 
 	for (i = 0; i < engine->reg_table_count; i++) {
 		table = &engine->reg_tables[i];
-		if (!check_sorted(engine->id, table->regs, table->num_regs))
+		if (!check_sorted(engine, table->regs, table->num_regs))
 			return false;
 	}
 
@@ -687,12 +700,26 @@
  * non-opcode bits being set. But if we don't include those bits, some 3D
  * commands may hash to the same bucket due to not including opcode bits that
  * make the command unique. For now, we will risk hashing to the same bucket.
- *
- * If we attempt to generate a perfect hash, we should be able to look at bits
- * 31:29 of a command from a batch buffer and use the full mask for that
- * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
  */
-#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+static inline u32 cmd_header_key(u32 x)
+{
+	u32 shift;
+
+	switch (x >> INSTR_CLIENT_SHIFT) {
+	default:
+	case INSTR_MI_CLIENT:
+		shift = STD_MI_OPCODE_SHIFT;
+		break;
+	case INSTR_RC_CLIENT:
+		shift = STD_3D_OPCODE_SHIFT;
+		break;
+	case INSTR_BC_CLIENT:
+		shift = STD_2D_OPCODE_SHIFT;
+		break;
+	}
+
+	return x >> shift;
+}
 
 static int init_hash_table(struct intel_engine_cs *engine,
 			   const struct drm_i915_cmd_table *cmd_tables,
@@ -716,7 +743,7 @@
 
 			desc_node->desc = desc;
 			hash_add(engine->cmd_hash, &desc_node->node,
-				 desc->cmd.value & CMD_HASH_MASK);
+				 cmd_header_key(desc->cmd.value));
 		}
 	}
 
@@ -736,23 +763,21 @@
 }
 
 /**
- * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
  * @engine: the engine to initialize
  *
  * Optionally initializes fields related to batch buffer command parsing in the
  * struct intel_engine_cs based on whether the platform requires software
  * command parsing.
- *
- * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 {
 	const struct drm_i915_cmd_table *cmd_tables;
 	int cmd_table_count;
 	int ret;
 
 	if (!IS_GEN7(engine->i915))
-		return 0;
+		return;
 
 	switch (engine->id) {
 	case RCS:
@@ -806,36 +831,38 @@
 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	default:
-		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
-			  engine->id);
-		BUG();
+		MISSING_CASE(engine->id);
+		return;
 	}
 
-	BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
-	BUG_ON(!validate_regs_sorted(engine));
-
-	WARN_ON(!hash_empty(engine->cmd_hash));
+	if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
+		DRM_ERROR("%s: command descriptions are not sorted\n",
+			  engine->name);
+		return;
+	}
+	if (!validate_regs_sorted(engine)) {
+		DRM_ERROR("%s: registers are not sorted\n", engine->name);
+		return;
+	}
 
 	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
 	if (ret) {
-		DRM_ERROR("CMD: cmd_parser_init failed!\n");
+		DRM_ERROR("%s: initialised failed!\n", engine->name);
 		fini_hash_table(engine);
-		return ret;
+		return;
 	}
 
 	engine->needs_cmd_parser = true;
-
-	return 0;
 }
 
 /**
- * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
  * @engine: the engine to clean up
  *
  * Releases any resources related to command parsing that may have been
- * initialized for the specified ring.
+ * initialized for the specified engine.
  */
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
 {
 	if (!engine->needs_cmd_parser)
 		return;
@@ -850,12 +877,9 @@
 	struct cmd_node *desc_node;
 
 	hash_for_each_possible(engine->cmd_hash, desc_node, node,
-			       cmd_header & CMD_HASH_MASK) {
+			       cmd_header_key(cmd_header)) {
 		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
-		u32 masked_cmd = desc->cmd.mask & cmd_header;
-		u32 masked_value = desc->cmd.value & desc->cmd.mask;
-
-		if (masked_cmd == masked_value)
+		if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
 			return desc;
 	}
 
@@ -866,18 +890,21 @@
  * Returns a pointer to a descriptor for the command specified by cmd_header.
  *
  * The caller must supply space for a default descriptor via the default_desc
- * parameter. If no descriptor for the specified command exists in the ring's
+ * parameter. If no descriptor for the specified command exists in the engine's
  * command parser tables, this function fills in default_desc based on the
- * ring's default length encoding and returns default_desc.
+ * engine's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
 find_cmd(struct intel_engine_cs *engine,
 	 u32 cmd_header,
+	 const struct drm_i915_cmd_descriptor *desc,
 	 struct drm_i915_cmd_descriptor *default_desc)
 {
-	const struct drm_i915_cmd_descriptor *desc;
 	u32 mask;
 
+	if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
+		return desc;
+
 	desc = find_cmd_in_table(engine, cmd_header);
 	if (desc)
 		return desc;
@@ -886,152 +913,140 @@
 	if (!mask)
 		return NULL;
 
-	BUG_ON(!default_desc);
-	default_desc->flags = CMD_DESC_SKIP;
+	default_desc->cmd.value = cmd_header;
+	default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT;
 	default_desc->length.mask = mask;
-
+	default_desc->flags = CMD_DESC_SKIP;
 	return default_desc;
 }
 
 static const struct drm_i915_reg_descriptor *
-find_reg(const struct drm_i915_reg_descriptor *table,
-	 int count, u32 addr)
+__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
 {
-	int i;
-
-	for (i = 0; i < count; i++) {
-		if (i915_mmio_reg_offset(table[i].addr) == addr)
-			return &table[i];
+	int start = 0, end = count;
+	while (start < end) {
+		int mid = start + (end - start) / 2;
+		int ret = addr - i915_mmio_reg_offset(table[mid].addr);
+		if (ret < 0)
+			end = mid;
+		else if (ret > 0)
+			start = mid + 1;
+		else
+			return &table[mid];
 	}
-
 	return NULL;
 }
 
 static const struct drm_i915_reg_descriptor *
-find_reg_in_tables(const struct drm_i915_reg_table *tables,
-		   int count, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
 {
-	int i;
-	const struct drm_i915_reg_table *table;
-	const struct drm_i915_reg_descriptor *reg;
+	const struct drm_i915_reg_table *table = engine->reg_tables;
+	int count = engine->reg_table_count;
 
-	for (i = 0; i < count; i++) {
-		table = &tables[i];
+	do {
 		if (!table->master || is_master) {
-			reg = find_reg(table->regs, table->num_regs,
-				       addr);
+			const struct drm_i915_reg_descriptor *reg;
+
+			reg = __find_reg(table->regs, table->num_regs, addr);
 			if (reg != NULL)
 				return reg;
 		}
-	}
+	} while (table++, --count);
 
 	return NULL;
 }
 
-static u32 *vmap_batch(struct drm_i915_gem_object *obj,
-		       unsigned start, unsigned len)
-{
-	int i;
-	void *addr = NULL;
-	struct sg_page_iter sg_iter;
-	int first_page = start >> PAGE_SHIFT;
-	int last_page = (len + start + 4095) >> PAGE_SHIFT;
-	int npages = last_page - first_page;
-	struct page **pages;
-
-	pages = drm_malloc_ab(npages, sizeof(*pages));
-	if (pages == NULL) {
-		DRM_DEBUG_DRIVER("Failed to get space for pages\n");
-		goto finish;
-	}
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
-		pages[i++] = sg_page_iter_page(&sg_iter);
-		if (i == npages)
-			break;
-	}
-
-	addr = vmap(pages, i, 0, PAGE_KERNEL);
-	if (addr == NULL) {
-		DRM_DEBUG_DRIVER("Failed to vmap pages\n");
-		goto finish;
-	}
-
-finish:
-	if (pages)
-		drm_free_large(pages);
-	return (u32*)addr;
-}
-
-/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
-static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 		       struct drm_i915_gem_object *src_obj,
 		       u32 batch_start_offset,
-		       u32 batch_len)
+		       u32 batch_len,
+		       bool *needs_clflush_after)
 {
-	int needs_clflush = 0;
-	void *src_base, *src;
-	void *dst = NULL;
+	unsigned int src_needs_clflush;
+	unsigned int dst_needs_clflush;
+	void *dst, *src;
 	int ret;
 
-	if (batch_len > dest_obj->base.size ||
-	    batch_len + batch_start_offset > src_obj->base.size)
-		return ERR_PTR(-E2BIG);
-
-	if (WARN_ON(dest_obj->pages_pin_count == 0))
-		return ERR_PTR(-ENODEV);
-
-	ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
-	if (ret) {
-		DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
+	ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
+	if (ret)
 		return ERR_PTR(ret);
-	}
 
-	src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
-	if (!src_base) {
-		DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
-		ret = -ENOMEM;
+	ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
+	if (ret) {
+		dst = ERR_PTR(ret);
 		goto unpin_src;
 	}
 
-	ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
-	if (ret) {
-		DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
-		goto unmap_src;
+	dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+	if (IS_ERR(dst))
+		goto unpin_dst;
+
+	src = ERR_PTR(-ENODEV);
+	if (src_needs_clflush &&
+	    i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
+		src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+		if (!IS_ERR(src)) {
+			i915_memcpy_from_wc(dst,
+					    src + batch_start_offset,
+					    ALIGN(batch_len, 16));
+			i915_gem_object_unpin_map(src_obj);
+		}
+	}
+	if (IS_ERR(src)) {
+		void *ptr;
+		int offset, n;
+
+		offset = offset_in_page(batch_start_offset);
+
+		/* We can avoid clflushing partial cachelines before the write
+		 * if we only every write full cache-lines. Since we know that
+		 * both the source and destination are in multiples of
+		 * PAGE_SIZE, we can simply round up to the next cacheline.
+		 * We don't care about copying too much here as we only
+		 * validate up to the end of the batch.
+		 */
+		if (dst_needs_clflush & CLFLUSH_BEFORE)
+			batch_len = roundup(batch_len,
+					    boot_cpu_data.x86_clflush_size);
+
+		ptr = dst;
+		for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
+			int len = min_t(int, batch_len, PAGE_SIZE - offset);
+
+			src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+			if (src_needs_clflush)
+				drm_clflush_virt_range(src + offset, len);
+			memcpy(ptr, src + offset, len);
+			kunmap_atomic(src);
+
+			ptr += len;
+			batch_len -= len;
+			offset = 0;
+		}
 	}
 
-	dst = vmap_batch(dest_obj, 0, batch_len);
-	if (!dst) {
-		DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
-		ret = -ENOMEM;
-		goto unmap_src;
-	}
+	/* dst_obj is returned with vmap pinned */
+	*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
 
-	src = src_base + offset_in_page(batch_start_offset);
-	if (needs_clflush)
-		drm_clflush_virt_range(src, batch_len);
-
-	memcpy(dst, src, batch_len);
-
-unmap_src:
-	vunmap(src_base);
+unpin_dst:
+	i915_gem_obj_finish_shmem_access(dst_obj);
 unpin_src:
-	i915_gem_object_unpin_pages(src_obj);
-
-	return ret ? ERR_PTR(ret) : dst;
+	i915_gem_obj_finish_shmem_access(src_obj);
+	return dst;
 }
 
 /**
- * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * intel_engine_needs_cmd_parser() - should a given engine use software
+ *                                   command parsing?
  * @engine: the engine in question
  *
  * Only certain platforms require software batch buffer command parsing, and
  * only when enabled via module parameter.
  *
- * Return: true if the ring requires software command parsing
+ * Return: true if the engine requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
 {
 	if (!engine->needs_cmd_parser)
 		return false;
@@ -1048,6 +1063,9 @@
 		      const bool is_master,
 		      bool *oacontrol_set)
 {
+	if (desc->flags & CMD_DESC_SKIP)
+		return true;
+
 	if (desc->flags & CMD_DESC_REJECT) {
 		DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
 		return false;
@@ -1072,14 +1090,11 @@
 		     offset += step) {
 			const u32 reg_addr = cmd[offset] & desc->reg.mask;
 			const struct drm_i915_reg_descriptor *reg =
-				find_reg_in_tables(engine->reg_tables,
-						   engine->reg_table_count,
-						   is_master,
-						   reg_addr);
+				find_reg(engine, is_master, reg_addr);
 
 			if (!reg) {
-				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
-						 reg_addr, *cmd, engine->id);
+				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
+						 reg_addr, *cmd, engine->exec_id);
 				return false;
 			}
 
@@ -1159,11 +1174,11 @@
 				desc->bits[i].mask;
 
 			if (dword != desc->bits[i].expected) {
-				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n",
 						 *cmd,
 						 desc->bits[i].mask,
 						 desc->bits[i].expected,
-						 dword, engine->id);
+						 dword, engine->exec_id);
 				return false;
 			}
 		}
@@ -1189,23 +1204,26 @@
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-int i915_parse_cmds(struct intel_engine_cs *engine,
-		    struct drm_i915_gem_object *batch_obj,
-		    struct drm_i915_gem_object *shadow_batch_obj,
-		    u32 batch_start_offset,
-		    u32 batch_len,
-		    bool is_master)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+			    struct drm_i915_gem_object *batch_obj,
+			    struct drm_i915_gem_object *shadow_batch_obj,
+			    u32 batch_start_offset,
+			    u32 batch_len,
+			    bool is_master)
 {
-	u32 *cmd, *batch_base, *batch_end;
-	struct drm_i915_cmd_descriptor default_desc = { 0 };
+	u32 *cmd, *batch_end;
+	struct drm_i915_cmd_descriptor default_desc = noop_desc;
+	const struct drm_i915_cmd_descriptor *desc = &default_desc;
 	bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+	bool needs_clflush_after = false;
 	int ret = 0;
 
-	batch_base = copy_batch(shadow_batch_obj, batch_obj,
-				batch_start_offset, batch_len);
-	if (IS_ERR(batch_base)) {
+	cmd = copy_batch(shadow_batch_obj, batch_obj,
+			 batch_start_offset, batch_len,
+			 &needs_clflush_after);
+	if (IS_ERR(cmd)) {
 		DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
-		return PTR_ERR(batch_base);
+		return PTR_ERR(cmd);
 	}
 
 	/*
@@ -1213,17 +1231,14 @@
 	 * large or larger and copy_batch() will write MI_NOPs to the extra
 	 * space. Parsing should be faster in some cases this way.
 	 */
-	batch_end = batch_base + (batch_len / sizeof(*batch_end));
-
-	cmd = batch_base;
+	batch_end = cmd + (batch_len / sizeof(*batch_end));
 	while (cmd < batch_end) {
-		const struct drm_i915_cmd_descriptor *desc;
 		u32 length;
 
 		if (*cmd == MI_BATCH_BUFFER_END)
 			break;
 
-		desc = find_cmd(engine, *cmd, &default_desc);
+		desc = find_cmd(engine, *cmd, desc, &default_desc);
 		if (!desc) {
 			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
 					 *cmd);
@@ -1274,7 +1289,9 @@
 		ret = -EINVAL;
 	}
 
-	vunmap(batch_base);
+	if (ret == 0 && needs_clflush_after)
+		drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+	i915_gem_object_unpin_map(shadow_batch_obj);
 
 	return ret;
 }
@@ -1295,7 +1312,7 @@
 
 	/* If the command parser is not enabled, report 0 - unsupported */
 	for_each_engine(engine, dev_priv) {
-		if (i915_needs_cmd_parser(engine)) {
+		if (intel_engine_needs_cmd_parser(engine)) {
 			active = true;
 			break;
 		}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 844fea7..27b0e34 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,11 +40,10 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-enum {
-	ACTIVE_LIST,
-	INACTIVE_LIST,
-	PINNED_LIST,
-};
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+	return to_i915(node->minor->dev);
+}
 
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
@@ -63,7 +62,7 @@
 
 	node->minor = minor;
 	node->dent = ent;
-	node->info_ent = (void *) key;
+	node->info_ent = (void *)key;
 
 	mutex_lock(&minor->debugfs_lock);
 	list_add(&node->list, &minor->debugfs_list);
@@ -74,12 +73,11 @@
 
 static int i915_capabilities(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	const struct intel_device_info *info = INTEL_INFO(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const struct intel_device_info *info = INTEL_INFO(dev_priv);
 
-	seq_printf(m, "gen: %d\n", info->gen);
-	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
 #define SEP_SEMICOLON ;
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
@@ -91,7 +89,7 @@
 
 static char get_active_flag(struct drm_i915_gem_object *obj)
 {
-	return obj->active ? '*' : ' ';
+	return i915_gem_object_is_active(obj) ? '*' : ' ';
 }
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
@@ -101,7 +99,7 @@
 
 static char get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-	switch (obj->tiling_mode) {
+	switch (i915_gem_object_get_tiling(obj)) {
 	default:
 	case I915_TILING_NONE: return ' ';
 	case I915_TILING_X: return 'X';
@@ -111,7 +109,7 @@
 
 static char get_global_flag(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+	return i915_gem_object_to_ggtt(obj, NULL) ?  'g' : ' ';
 }
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -125,7 +123,7 @@
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
 	}
 
@@ -138,6 +136,7 @@
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
+	unsigned int frontbuffer_bits;
 	int pin_count = 0;
 	enum intel_engine_id id;
 
@@ -155,30 +154,36 @@
 		   obj->base.write_domain);
 	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "%x ",
-				i915_gem_request_get_seqno(obj->last_read_req[id]));
-	seq_printf(m, "] %x %x%s%s%s",
-		   i915_gem_request_get_seqno(obj->last_write_req),
-		   i915_gem_request_get_seqno(obj->last_fenced_req),
-		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+			   i915_gem_active_get_seqno(&obj->last_read[id],
+						     &obj->base.dev->struct_mutex));
+	seq_printf(m, "] %x %s%s%s",
+		   i915_gem_active_get_seqno(&obj->last_write,
+					     &obj->base.dev->struct_mutex),
+		   i915_cache_level_str(dev_priv, obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->pin_count > 0)
+		if (i915_vma_is_pinned(vma))
 			pin_count++;
 	}
 	seq_printf(m, " (pinned x %d)", pin_count);
 	if (obj->pin_display)
 		seq_printf(m, " (display)");
-	if (obj->fence_reg != I915_FENCE_REG_NONE)
-		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
-			   vma->is_ggtt ? "g" : "pp",
+			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 			   vma->node.start, vma->node.size);
-		if (vma->is_ggtt)
+		if (i915_vma_is_ggtt(vma))
 			seq_printf(m, ", type: %u", vma->ggtt_view.type);
+		if (vma->fence)
+			seq_printf(m, " , fence: %d%s",
+				   vma->fence->id,
+				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
 		seq_puts(m, ")");
 	}
 	if (obj->stolen)
@@ -192,58 +197,15 @@
 		*t = '\0';
 		seq_printf(m, " (%s mappable)", s);
 	}
-	if (obj->last_write_req != NULL)
-		seq_printf(m, " (%s)",
-			   i915_gem_request_get_engine(obj->last_write_req)->name);
-	if (obj->frontbuffer_bits)
-		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
-}
 
-static int i915_gem_object_list_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = m->private;
-	uintptr_t list = (uintptr_t) node->info_ent->data;
-	struct list_head *head;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct i915_vma *vma;
-	u64 total_obj_size, total_gtt_size;
-	int count, ret;
+	engine = i915_gem_active_get_engine(&obj->last_write,
+					    &dev_priv->drm.struct_mutex);
+	if (engine)
+		seq_printf(m, " (%s)", engine->name);
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	/* FIXME: the user of this interface might want more than just GGTT */
-	switch (list) {
-	case ACTIVE_LIST:
-		seq_puts(m, "Active:\n");
-		head = &ggtt->base.active_list;
-		break;
-	case INACTIVE_LIST:
-		seq_puts(m, "Inactive:\n");
-		head = &ggtt->base.inactive_list;
-		break;
-	default:
-		mutex_unlock(&dev->struct_mutex);
-		return -EINVAL;
-	}
-
-	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(vma, head, vm_link) {
-		seq_printf(m, "   ");
-		describe_obj(m, vma->obj);
-		seq_printf(m, "\n");
-		total_obj_size += vma->obj->base.size;
-		total_gtt_size += vma->node.size;
-		count++;
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
-		   count, total_obj_size, total_gtt_size);
-	return 0;
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (frontbuffer_bits)
+		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 static int obj_rank_by_stolen(void *priv,
@@ -263,9 +225,8 @@
 
 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
 	LIST_HEAD(stolen);
@@ -311,17 +272,6 @@
 	return 0;
 }
 
-#define count_objects(list, member) do { \
-	list_for_each_entry(obj, list, member) { \
-		size += i915_gem_obj_total_ggtt_size(obj); \
-		++count; \
-		if (obj->map_and_fenceable) { \
-			mappable_size += i915_gem_obj_ggtt_size(obj); \
-			++mappable_count; \
-		} \
-	} \
-} while (0)
-
 struct file_stats {
 	struct drm_i915_file_private *file_priv;
 	unsigned long count;
@@ -338,47 +288,30 @@
 
 	stats->count++;
 	stats->total += obj->base.size;
-
+	if (!obj->bind_count)
+		stats->unbound += obj->base.size;
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
-	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, obj_link) {
-			struct i915_hw_ppgtt *ppgtt;
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
 
-			if (!drm_mm_node_allocated(&vma->node))
+		if (i915_vma_is_ggtt(vma)) {
+			stats->global += vma->node.size;
+		} else {
+			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
+
+			if (ppgtt->base.file != stats->file_priv)
 				continue;
-
-			if (vma->is_ggtt) {
-				stats->global += obj->base.size;
-				continue;
-			}
-
-			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
-			if (ppgtt->file_priv != stats->file_priv)
-				continue;
-
-			if (obj->active) /* XXX per-vma statistic */
-				stats->active += obj->base.size;
-			else
-				stats->inactive += obj->base.size;
-
-			return 0;
 		}
-	} else {
-		if (i915_gem_obj_ggtt_bound(obj)) {
-			stats->global += obj->base.size;
-			if (obj->active)
-				stats->active += obj->base.size;
-			else
-				stats->inactive += obj->base.size;
-			return 0;
-		}
+
+		if (i915_vma_is_active(vma))
+			stats->active += vma->node.size;
+		else
+			stats->inactive += vma->node.size;
 	}
 
-	if (!list_empty(&obj->global_list))
-		stats->unbound += obj->base.size;
-
 	return 0;
 }
 
@@ -424,9 +357,9 @@
 
 	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
 		if (ctx->engine[n].state)
-			per_file_stats(0, ctx->engine[n].state, data);
-		if (ctx->engine[n].ringbuf)
-			per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+			per_file_stats(0, ctx->engine[n].state->obj, data);
+		if (ctx->engine[n].ring)
+			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
 	}
 
 	return 0;
@@ -435,48 +368,34 @@
 static void print_context_stats(struct seq_file *m,
 				struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = &dev_priv->drm;
 	struct file_stats stats;
 	struct drm_file *file;
 
 	memset(&stats, 0, sizeof(stats));
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev->struct_mutex);
 	if (dev_priv->kernel_context)
 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 
-	list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+	list_for_each_entry(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *fpriv = file->driver_priv;
 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
 	}
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	print_file_stats(m, "[k]contexts", stats);
 }
 
-#define count_vmas(list, member) do { \
-	list_for_each_entry(vma, list, member) { \
-		size += i915_gem_obj_total_ggtt_size(vma->obj); \
-		++count; \
-		if (vma->obj->map_and_fenceable) { \
-			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
-			++mappable_count; \
-		} \
-	} \
-} while (0)
-
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	u32 count, mappable_count, purgeable_count;
-	u64 size, mappable_size, purgeable_size;
-	unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
-	u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
+	u32 count, mapped_count, purgeable_count, dpy_count;
+	u64 size, mapped_size, purgeable_size, dpy_size;
 	struct drm_i915_gem_object *obj;
 	struct drm_file *file;
-	struct i915_vma *vma;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -487,70 +406,53 @@
 		   dev_priv->mm.object_count,
 		   dev_priv->mm.object_memory);
 
-	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.bound_list, global_list);
-	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
-		   count, mappable_count, size, mappable_size);
-
-	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&ggtt->base.active_list, vm_link);
-	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
-	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&ggtt->base.inactive_list, vm_link);
-	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
-	size = count = purgeable_size = purgeable_count = 0;
+	size = count = 0;
+	mapped_size = mapped_count = 0;
+	purgeable_size = purgeable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-		size += obj->base.size, ++count;
-		if (obj->madv == I915_MADV_DONTNEED)
-			purgeable_size += obj->base.size, ++purgeable_count;
-		if (obj->mapping) {
-			pin_mapped_count++;
-			pin_mapped_size += obj->base.size;
-			if (obj->pages_pin_count == 0) {
-				pin_mapped_purgeable_count++;
-				pin_mapped_purgeable_size += obj->base.size;
-			}
-		}
-	}
-	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
+		size += obj->base.size;
+		++count;
 
-	size = count = mappable_size = mappable_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (obj->fault_mappable) {
-			size += i915_gem_obj_ggtt_size(obj);
-			++count;
-		}
-		if (obj->pin_display) {
-			mappable_size += i915_gem_obj_ggtt_size(obj);
-			++mappable_count;
-		}
 		if (obj->madv == I915_MADV_DONTNEED) {
 			purgeable_size += obj->base.size;
 			++purgeable_count;
 		}
+
 		if (obj->mapping) {
-			pin_mapped_count++;
-			pin_mapped_size += obj->base.size;
-			if (obj->pages_pin_count == 0) {
-				pin_mapped_purgeable_count++;
-				pin_mapped_purgeable_size += obj->base.size;
-			}
+			mapped_count++;
+			mapped_size += obj->base.size;
 		}
 	}
+	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
+
+	size = count = dpy_size = dpy_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+		size += obj->base.size;
+		++count;
+
+		if (obj->pin_display) {
+			dpy_size += obj->base.size;
+			++dpy_count;
+		}
+
+		if (obj->madv == I915_MADV_DONTNEED) {
+			purgeable_size += obj->base.size;
+			++purgeable_count;
+		}
+
+		if (obj->mapping) {
+			mapped_count++;
+			mapped_size += obj->base.size;
+		}
+	}
+	seq_printf(m, "%u bound objects, %llu bytes\n",
+		   count, size);
 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 		   purgeable_count, purgeable_size);
-	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
-		   mappable_count, mappable_size);
-	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
-		   count, size);
-	seq_printf(m,
-		   "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
-		   pin_mapped_count, pin_mapped_purgeable_count,
-		   pin_mapped_size, pin_mapped_purgeable_size);
+	seq_printf(m, "%u mapped objects, %llu bytes\n",
+		   mapped_count, mapped_size);
+	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+		   dpy_count, dpy_size);
 
 	seq_printf(m, "%llu [%llu] gtt total\n",
 		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
@@ -563,6 +465,8 @@
 	print_context_stats(m, dev_priv);
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct file_stats stats;
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+		struct drm_i915_gem_request *request;
 		struct task_struct *task;
 
 		memset(&stats, 0, sizeof(stats));
@@ -576,10 +480,17 @@
 		 * still alive (e.g. get_pid(current) => fork() => exit()).
 		 * Therefore, we need to protect this ->comm access using RCU.
 		 */
+		mutex_lock(&dev->struct_mutex);
+		request = list_first_entry_or_null(&file_priv->mm.request_list,
+						   struct drm_i915_gem_request,
+						   client_list);
 		rcu_read_lock();
-		task = pid_task(file->pid, PIDTYPE_PID);
+		task = pid_task(request && request->ctx->pid ?
+				request->ctx->pid : file->pid,
+				PIDTYPE_PID);
 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 		rcu_read_unlock();
+		mutex_unlock(&dev->struct_mutex);
 	}
 	mutex_unlock(&dev->filelist_mutex);
 
@@ -589,9 +500,9 @@
 static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	uintptr_t list = (uintptr_t) node->info_ent->data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(node);
+	struct drm_device *dev = &dev_priv->drm;
+	bool show_pin_display_only = !!node->info_ent->data;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
 	int count, ret;
@@ -602,7 +513,7 @@
 
 	total_obj_size = total_gtt_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+		if (show_pin_display_only && !obj->pin_display)
 			continue;
 
 		seq_puts(m, "   ");
@@ -623,9 +534,8 @@
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 	int ret;
 
@@ -672,7 +582,7 @@
 				   intel_crtc_get_vblank_counter(crtc));
 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
-			if (INTEL_INFO(dev)->gen >= 4)
+			if (INTEL_GEN(dev_priv) >= 4)
 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
 			else
 				addr = I915_READ(DSPADDR(crtc->plane));
@@ -693,9 +603,8 @@
 
 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_i915_gem_object *obj;
 	struct intel_engine_cs *engine;
 	int total = 0;
@@ -738,9 +647,8 @@
 
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *req;
 	int ret, any;
@@ -754,21 +662,20 @@
 		int count;
 
 		count = 0;
-		list_for_each_entry(req, &engine->request_list, list)
+		list_for_each_entry(req, &engine->request_list, link)
 			count++;
 		if (count == 0)
 			continue;
 
 		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->request_list, list) {
+		list_for_each_entry(req, &engine->request_list, link) {
+			struct pid *pid = req->ctx->pid;
 			struct task_struct *task;
 
 			rcu_read_lock();
-			task = NULL;
-			if (req->pid)
-				task = pid_task(req->pid, PIDTYPE_PID);
+			task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
 			seq_printf(m, "    %x @ %d: %s [%d]\n",
-				   req->seqno,
+				   req->fence.seqno,
 				   (int) (jiffies - req->emitted_jiffies),
 				   task ? task->comm : "<unknown>",
 				   task ? task->pid : -1);
@@ -793,8 +700,6 @@
 
 	seq_printf(m, "Current sequence (%s): %x\n",
 		   engine->name, intel_engine_get_seqno(engine));
-	seq_printf(m, "Current user interrupts (%s): %lx\n",
-		   engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
 
 	spin_lock(&b->lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -808,41 +713,25 @@
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
 
 	for_each_engine(engine, dev_priv)
 		i915_ring_seqno_info(m, engine);
 
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
-
 	return 0;
 }
 
 
 static int i915_interrupt_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
-	int ret, i, pipe;
+	int i, pipe;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 			   I915_READ(GEN8_MASTER_IRQ));
 
@@ -881,7 +770,7 @@
 			   I915_READ(GEN8_PCU_IIR));
 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 			   I915_READ(GEN8_PCU_IER));
-	} else if (INTEL_INFO(dev)->gen >= 8) {
+	} else if (INTEL_GEN(dev_priv) >= 8) {
 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 			   I915_READ(GEN8_MASTER_IRQ));
 
@@ -937,7 +826,7 @@
 			   I915_READ(GEN8_PCU_IIR));
 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 			   I915_READ(GEN8_PCU_IER));
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		seq_printf(m, "Display IER:\t%08x\n",
 			   I915_READ(VLV_IER));
 		seq_printf(m, "Display IIR:\t%08x\n",
@@ -975,7 +864,7 @@
 		seq_printf(m, "DPINVGTT:\t%08x\n",
 			   I915_READ(DPINVGTT));
 
-	} else if (!HAS_PCH_SPLIT(dev)) {
+	} else if (!HAS_PCH_SPLIT(dev_priv)) {
 		seq_printf(m, "Interrupt enable:    %08x\n",
 			   I915_READ(IER));
 		seq_printf(m, "Interrupt identity:  %08x\n",
@@ -1007,7 +896,7 @@
 			   I915_READ(GTIMR));
 	}
 	for_each_engine(engine, dev_priv) {
-		if (INTEL_INFO(dev)->gen >= 6) {
+		if (INTEL_GEN(dev_priv) >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
 				   engine->name, I915_READ_IMR(engine));
@@ -1015,16 +904,14 @@
 		i915_ring_seqno_info(m, engine);
 	}
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int i, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1033,14 +920,14 @@
 
 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
 
 		seq_printf(m, "Fence %d, pin count = %d, object = ",
 			   i, dev_priv->fence_regs[i].pin_count);
-		if (obj == NULL)
+		if (!vma)
 			seq_puts(m, "unused");
 		else
-			describe_obj(m, obj);
+			describe_obj(m, vma->obj);
 		seq_putc(m, '\n');
 	}
 
@@ -1051,8 +938,7 @@
 static int i915_hws_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(node);
 	struct intel_engine_cs *engine;
 	const u32 *hws;
 	int i;
@@ -1077,33 +963,25 @@
 		       loff_t *ppos)
 {
 	struct i915_error_state_file_priv *error_priv = filp->private_data;
-	struct drm_device *dev = error_priv->dev;
-	int ret;
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	i915_destroy_error_state(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_destroy_error_state(error_priv->dev);
 
 	return cnt;
 }
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 	struct i915_error_state_file_priv *error_priv;
 
 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
 	if (!error_priv)
 		return -ENOMEM;
 
-	error_priv->dev = dev;
+	error_priv->dev = &dev_priv->drm;
 
-	i915_error_state_get(dev, error_priv);
+	i915_error_state_get(&dev_priv->drm, error_priv);
 
 	file->private_data = error_priv;
 
@@ -1129,7 +1007,8 @@
 	ssize_t ret_count = 0;
 	int ret;
 
-	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+	ret = i915_error_state_buf_init(&error_str,
+					to_i915(error_priv->dev), count, *pos);
 	if (ret)
 		return ret;
 
@@ -1162,16 +1041,15 @@
 static int
 i915_next_seqno_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
 	if (ret)
 		return ret;
 
 	*val = dev_priv->next_seqno;
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	return 0;
 }
@@ -1179,7 +1057,8 @@
 static int
 i915_next_seqno_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1198,16 +1077,13 @@
 
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int ret = 0;
 
 	intel_runtime_pm_get(dev_priv);
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1217,7 +1093,7 @@
 			   MEMSTAT_VID_SHIFT);
 		seq_printf(m, "Current P-state: %d\n",
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		u32 freq_sts;
 
 		mutex_lock(&dev_priv->rps.hw_lock);
@@ -1244,7 +1120,7 @@
 			   "efficient (RPe) frequency: %d MHz\n",
 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 		mutex_unlock(&dev_priv->rps.hw_lock);
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		u32 rp_state_limits;
 		u32 gt_perf_status;
 		u32 rp_state_cap;
@@ -1256,7 +1132,7 @@
 		int max_freq;
 
 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		if (IS_BROXTON(dev)) {
+		if (IS_BROXTON(dev_priv)) {
 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
 		} else {
@@ -1272,11 +1148,11 @@
 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
-		if (IS_GEN9(dev))
+		if (IS_GEN9(dev_priv))
 			reqf >>= 23;
 		else {
 			reqf &= ~GEN6_TURBO_DISABLE;
-			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 				reqf >>= 24;
 			else
 				reqf >>= 25;
@@ -1294,9 +1170,9 @@
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		if (IS_GEN9(dev))
+		if (IS_GEN9(dev_priv))
 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 		else
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1305,7 +1181,7 @@
 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 
-		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
 			pm_ier = I915_READ(GEN6_PMIER);
 			pm_imr = I915_READ(GEN6_PMIMR);
 			pm_isr = I915_READ(GEN6_PMISR);
@@ -1323,7 +1199,7 @@
 		seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 		seq_printf(m, "Render p-state ratio: %d\n",
-			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+			   (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
 		seq_printf(m, "Render p-state VID: %d\n",
 			   gt_perf_status & 0xff);
 		seq_printf(m, "Render p-state limit: %d\n",
@@ -1352,22 +1228,22 @@
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
-		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+		max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
 			    rp_state_cap >> 16) & 0xff;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
-		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+		max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
 			    rp_state_cap >> 0) & 0xff;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1381,6 +1257,8 @@
 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
 		seq_printf(m, "Min freq: %d MHz\n",
 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+		seq_printf(m, "Boost freq: %d MHz\n",
+			   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
 		seq_printf(m, "Max freq: %d MHz\n",
 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
 		seq_printf(m,
@@ -1401,9 +1279,7 @@
 
 static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
 	u64 acthd[I915_NUM_ENGINES];
 	u32 seqno[I915_NUM_ENGINES];
@@ -1411,6 +1287,15 @@
 	enum intel_engine_id id;
 	int j;
 
+	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+		seq_printf(m, "Wedged\n");
+	if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
+		seq_printf(m, "Reset in progress\n");
+	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
+		seq_printf(m, "Waiter holding struct mutex\n");
+	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
+		seq_printf(m, "struct_mutex blocked for reset\n");
+
 	if (!i915.enable_hangcheck) {
 		seq_printf(m, "Hangcheck disabled\n");
 		return 0;
@@ -1419,7 +1304,7 @@
 	intel_runtime_pm_get(dev_priv);
 
 	for_each_engine_id(engine, dev_priv, id) {
-		acthd[id] = intel_ring_get_active_head(engine);
+		acthd[id] = intel_engine_get_active_head(engine);
 		seqno[id] = intel_engine_get_seqno(engine);
 	}
 
@@ -1440,11 +1325,10 @@
 			   engine->hangcheck.seqno,
 			   seqno[id],
 			   engine->last_submitted_seqno);
-		seq_printf(m, "\twaiters? %d\n",
-			   intel_engine_has_waiter(engine));
-		seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
-			   engine->hangcheck.user_interrupts,
-			   READ_ONCE(engine->breadcrumbs.irq_wakeups));
+		seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
+			   yesno(intel_engine_has_waiter(engine)),
+			   yesno(test_bit(engine->id,
+					  &dev_priv->gpu_error.missed_irq_rings)));
 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
 			   (long long)engine->hangcheck.acthd,
 			   (long long)acthd[id]);
@@ -1472,9 +1356,8 @@
 
 static int ironlake_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	u32 rgvmodectl, rstdbyctl;
 	u16 crstandvid;
 	int ret;
@@ -1540,9 +1423,7 @@
 
 static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_uncore_forcewake_domain *fw_domain;
 
 	spin_lock_irq(&dev_priv->uncore.lock);
@@ -1558,9 +1439,7 @@
 
 static int vlv_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u32 rpmodectl1, rcctl1, pw_status;
 
 	intel_runtime_pm_get(dev_priv);
@@ -1598,10 +1477,10 @@
 
 static int gen6_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
 	unsigned forcewake_count;
 	int count = 0, ret;
 
@@ -1629,6 +1508,10 @@
 
 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
+	if (INTEL_GEN(dev_priv) >= 9) {
+		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
+		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
+	}
 	mutex_unlock(&dev->struct_mutex);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -1647,6 +1530,12 @@
 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
 	seq_printf(m, "RC6 Enabled: %s\n",
 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+	if (INTEL_GEN(dev_priv) >= 9) {
+		seq_printf(m, "Render Well Gating Enabled: %s\n",
+			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+		seq_printf(m, "Media Well Gating Enabled: %s\n",
+			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+	}
 	seq_printf(m, "Deep RC6 Enabled: %s\n",
 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
@@ -1675,6 +1564,14 @@
 
 	seq_printf(m, "Core Power Down: %s\n",
 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+	if (INTEL_GEN(dev_priv) >= 9) {
+		seq_printf(m, "Render Power Well: %s\n",
+			(gen9_powergate_status &
+			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+		seq_printf(m, "Media Power Well: %s\n",
+			(gen9_powergate_status &
+			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+	}
 
 	/* Not exactly sure what this is */
 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
@@ -1692,17 +1589,16 @@
 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
 	seq_printf(m, "RC6++ voltage: %dmV\n",
 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
-	return 0;
+	return i915_forcewake_domains(m, NULL);
 }
 
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return vlv_drpc_info(m);
-	else if (INTEL_INFO(dev)->gen >= 6)
+	else if (INTEL_GEN(dev_priv) >= 6)
 		return gen6_drpc_info(m);
 	else
 		return ironlake_drpc_info(m);
@@ -1710,9 +1606,7 @@
 
 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
 		   dev_priv->fb_tracking.busy_bits);
@@ -1725,11 +1619,9 @@
 
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (!HAS_FBC(dev)) {
+	if (!HAS_FBC(dev_priv)) {
 		seq_puts(m, "FBC unsupported on this chipset\n");
 		return 0;
 	}
@@ -1743,7 +1635,7 @@
 		seq_printf(m, "FBC disabled: %s\n",
 			   dev_priv->fbc.no_fbc_reason);
 
-	if (INTEL_INFO(dev_priv)->gen >= 7)
+	if (INTEL_GEN(dev_priv) >= 7)
 		seq_printf(m, "Compressing: %s\n",
 			   yesno(I915_READ(FBC_STATUS2) &
 				 FBC_COMPRESSION_MASK));
@@ -1756,10 +1648,9 @@
 
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
-	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
 		return -ENODEV;
 
 	*val = dev_priv->fbc.false_color;
@@ -1769,11 +1660,10 @@
 
 static int i915_fbc_fc_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 reg;
 
-	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
 		return -ENODEV;
 
 	mutex_lock(&dev_priv->fbc.lock);
@@ -1795,11 +1685,9 @@
 
 static int i915_ips_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (!HAS_IPS(dev)) {
+	if (!HAS_IPS(dev_priv)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -1809,7 +1697,7 @@
 	seq_printf(m, "Enabled by kernel parameter: %s\n",
 		   yesno(i915.enable_ips));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_GEN(dev_priv) >= 8) {
 		seq_puts(m, "Currently: unknown\n");
 	} else {
 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1825,23 +1713,21 @@
 
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	bool sr_enabled = false;
 
 	intel_runtime_pm_get(dev_priv);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
-		 IS_I945G(dev) || IS_I945GM(dev))
+	else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-	else if (IS_I915GM(dev))
+	else if (IS_I915GM(dev_priv))
 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-	else if (IS_PINEVIEW(dev))
+	else if (IS_PINEVIEW(dev_priv))
 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
 	intel_runtime_pm_put(dev_priv);
@@ -1854,13 +1740,12 @@
 
 static int i915_emon_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	unsigned long temp, chipset, gfx;
 	int ret;
 
-	if (!IS_GEN5(dev))
+	if (!IS_GEN5(dev_priv))
 		return -ENODEV;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1882,27 +1767,23 @@
 
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	int ret = 0;
 	int gpu_freq, ia_freq;
 	unsigned int max_gpu_freq, min_gpu_freq;
 
-	if (!HAS_CORE_RING_FREQ(dev)) {
+	if (!HAS_LLC(dev_priv)) {
 		seq_puts(m, "unsupported on this chipset\n");
 		return 0;
 	}
 
 	intel_runtime_pm_get(dev_priv);
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		goto out;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		/* Convert GT frequency to 50 HZ units */
 		min_gpu_freq =
 			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1922,7 +1803,7 @@
 				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
 			   intel_gpu_freq(dev_priv, (gpu_freq *
-				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+				(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 				 GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
@@ -1937,9 +1818,8 @@
 
 static int i915_opregion(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	int ret;
 
@@ -1958,10 +1838,7 @@
 
 static int i915_vbt(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
 	if (opregion->vbt)
 		seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -1971,8 +1848,8 @@
 
 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_framebuffer *fbdev_fb = NULL;
 	struct drm_framebuffer *drm_fb;
 	int ret;
@@ -1982,8 +1859,8 @@
 		return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (to_i915(dev)->fbdev) {
-		fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+	if (dev_priv->fbdev) {
+		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 
 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 			   fbdev_fb->base.width,
@@ -2019,19 +1896,17 @@
 	return 0;
 }
 
-static void describe_ctx_ringbuf(struct seq_file *m,
-				 struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
-		   ringbuf->space, ringbuf->head, ringbuf->tail,
-		   ringbuf->last_retired_head);
+		   ring->space, ring->head, ring->tail,
+		   ring->last_retired_head);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	int ret;
@@ -2042,18 +1917,17 @@
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
 		seq_printf(m, "HW context %u ", ctx->hw_id);
-		if (IS_ERR(ctx->file_priv)) {
-			seq_puts(m, "(deleted) ");
-		} else if (ctx->file_priv) {
-			struct pid *pid = ctx->file_priv->file->pid;
+		if (ctx->pid) {
 			struct task_struct *task;
 
-			task = get_pid_task(pid, PIDTYPE_PID);
+			task = get_pid_task(ctx->pid, PIDTYPE_PID);
 			if (task) {
 				seq_printf(m, "(%s [%d]) ",
 					   task->comm, task->pid);
 				put_task_struct(task);
 			}
+		} else if (IS_ERR(ctx->file_priv)) {
+			seq_puts(m, "(deleted) ");
 		} else {
 			seq_puts(m, "(kernel) ");
 		}
@@ -2067,9 +1941,9 @@
 			seq_printf(m, "%s: ", engine->name);
 			seq_putc(m, ce->initialised ? 'I' : 'i');
 			if (ce->state)
-				describe_obj(m, ce->state);
-			if (ce->ringbuf)
-				describe_ctx_ringbuf(m, ce->ringbuf);
+				describe_obj(m, ce->state->obj);
+			if (ce->ring)
+				describe_ctx_ring(m, ce->ring);
 			seq_putc(m, '\n');
 		}
 
@@ -2085,36 +1959,34 @@
 			      struct i915_gem_context *ctx,
 			      struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+	struct i915_vma *vma = ctx->engine[engine->id].state;
 	struct page *page;
-	uint32_t *reg_state;
 	int j;
-	unsigned long ggtt_offset = 0;
 
 	seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
 
-	if (ctx_obj == NULL) {
-		seq_puts(m, "\tNot allocated\n");
+	if (!vma) {
+		seq_puts(m, "\tFake context\n");
 		return;
 	}
 
-	if (!i915_gem_obj_ggtt_bound(ctx_obj))
-		seq_puts(m, "\tNot bound in GGTT\n");
-	else
-		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+	if (vma->flags & I915_VMA_GLOBAL_BIND)
+		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
+			   i915_ggtt_offset(vma));
 
-	if (i915_gem_object_get_pages(ctx_obj)) {
-		seq_puts(m, "\tFailed to get pages for context object\n");
+	if (i915_gem_object_get_pages(vma->obj)) {
+		seq_puts(m, "\tFailed to get pages for context object\n\n");
 		return;
 	}
 
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	if (!WARN_ON(page == NULL)) {
-		reg_state = kmap_atomic(page);
+	page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
+	if (page) {
+		u32 *reg_state = kmap_atomic(page);
 
 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
-			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				   ggtt_offset + 4096 + (j * 4),
+			seq_printf(m,
+				   "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				   j * 4,
 				   reg_state[j], reg_state[j + 1],
 				   reg_state[j + 2], reg_state[j + 3]);
 		}
@@ -2126,9 +1998,8 @@
 
 static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	int ret;
@@ -2153,9 +2024,8 @@
 
 static int i915_execlists(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = (struct drm_info_node *)m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	u32 status_pointer;
 	u8 read_pointer;
@@ -2190,7 +2060,7 @@
 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-		read_pointer = engine->next_context_status_buffer;
+		read_pointer = GEN8_CSB_READ_PTR(status_pointer);
 		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 		if (read_pointer > write_pointer)
 			write_pointer += GEN8_CSB_ENTRIES;
@@ -2256,9 +2126,8 @@
 
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2271,7 +2140,7 @@
 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-	if (IS_GEN3(dev) || IS_GEN4(dev)) {
+	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
 		seq_printf(m, "DDC = 0x%08x\n",
 			   I915_READ(DCC));
 		seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2280,7 +2149,7 @@
 			   I915_READ16(C0DRB3));
 		seq_printf(m, "C1DRB3 = 0x%04x\n",
 			   I915_READ16(C1DRB3));
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 			   I915_READ(MAD_DIMM_C0));
 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -2289,7 +2158,7 @@
 			   I915_READ(MAD_DIMM_C2));
 		seq_printf(m, "TILECTL = 0x%08x\n",
 			   I915_READ(TILECTL));
-		if (INTEL_INFO(dev)->gen >= 8)
+		if (INTEL_GEN(dev_priv) >= 8)
 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
 				   I915_READ(GAMTARBMODE));
 		else
@@ -2329,9 +2198,9 @@
 	return 0;
 }
 
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+			    struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 	int i;
@@ -2350,9 +2219,9 @@
 	}
 }
 
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+			    struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 
 	if (IS_GEN6(dev_priv))
@@ -2384,22 +2253,23 @@
 
 static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_file *file;
-
-	int ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	if (INTEL_INFO(dev)->gen >= 8)
-		gen8_ppgtt_info(m, dev);
-	else if (INTEL_INFO(dev)->gen >= 6)
-		gen6_ppgtt_info(m, dev);
+	int ret;
 
 	mutex_lock(&dev->filelist_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		goto out_unlock;
+
+	intel_runtime_pm_get(dev_priv);
+
+	if (INTEL_GEN(dev_priv) >= 8)
+		gen8_ppgtt_info(m, dev_priv);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		gen6_ppgtt_info(m, dev_priv);
+
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
 		struct task_struct *task;
@@ -2407,19 +2277,19 @@
 		task = get_pid_task(file->pid, PIDTYPE_PID);
 		if (!task) {
 			ret = -ESRCH;
-			goto out_unlock;
+			goto out_rpm;
 		}
 		seq_printf(m, "\nproc: %s\n", task->comm);
 		put_task_struct(task);
 		idr_for_each(&file_priv->context_idr, per_file_ctx,
 			     (void *)(unsigned long)m);
 	}
-out_unlock:
-	mutex_unlock(&dev->filelist_mutex);
 
+out_rpm:
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
-
+out_unlock:
+	mutex_unlock(&dev->filelist_mutex);
 	return ret;
 }
 
@@ -2434,23 +2304,41 @@
 	return count;
 }
 
+static const char *rps_power_to_str(unsigned int power)
+{
+	static const char * const strings[] = {
+		[LOW_POWER] = "low power",
+		[BETWEEN] = "mixed",
+		[HIGH_POWER] = "high power",
+	};
+
+	if (power >= ARRAY_SIZE(strings) || !strings[power])
+		return "unknown";
+
+	return strings[power];
+}
+
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_file *file;
 
 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
 	seq_printf(m, "GPU busy? %s [%x]\n",
 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
-	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+	seq_printf(m, "Frequency requested %d\n",
+		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
+		   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+		   intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
 
 	mutex_lock(&dev->filelist_mutex);
 	spin_lock(&dev_priv->rps.client_lock);
@@ -2467,27 +2355,44 @@
 			   list_empty(&file_priv->rps.link) ? "" : ", active");
 		rcu_read_unlock();
 	}
-	seq_printf(m, "Semaphore boosts: %d%s\n",
-		   dev_priv->rps.semaphores.boosts,
-		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
-	seq_printf(m, "MMIO flip boosts: %d%s\n",
-		   dev_priv->rps.mmioflips.boosts,
-		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
-	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
+	seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
 	spin_unlock(&dev_priv->rps.client_lock);
 	mutex_unlock(&dev->filelist_mutex);
 
+	if (INTEL_GEN(dev_priv) >= 6 &&
+	    dev_priv->rps.enabled &&
+	    dev_priv->gt.active_engines) {
+		u32 rpup, rpupei;
+		u32 rpdown, rpdownei;
+
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+			   rps_power_to_str(dev_priv->rps.power));
+		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
+			   100 * rpup / rpupei,
+			   dev_priv->rps.up_threshold);
+		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
+			   100 * rpdown / rpdownei,
+			   dev_priv->rps.down_threshold);
+	} else {
+		seq_puts(m, "\nRPS Autotuning inactive\n");
+	}
+
 	return 0;
 }
 
 static int i915_llc(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	const bool edram = INTEL_GEN(dev_priv) > 8;
 
-	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
 		   intel_uncore_edram_size(dev_priv)/1024/1024);
 
@@ -2496,8 +2401,7 @@
 
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	u32 tmp, i;
 
@@ -2543,6 +2447,7 @@
 				 struct i915_guc_client *client)
 {
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	uint64_t tot = 0;
 
 	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
@@ -2553,27 +2458,26 @@
 		client->wq_size, client->wq_offset, client->wq_tail);
 
 	seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
-	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
 	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
 	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine_id(engine, dev_priv, id) {
+		u64 submissions = client->submissions[id];
+		tot += submissions;
 		seq_printf(m, "\tSubmissions: %llu %s\n",
-				client->submissions[engine->id],
-				engine->name);
-		tot += client->submissions[engine->id];
+				submissions, engine->name);
 	}
 	seq_printf(m, "\tTotal: %llu\n", tot);
 }
 
 static int i915_guc_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_guc guc;
 	struct i915_guc_client client = {};
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	u64 total = 0;
 
 	if (!HAS_GUC_SCHED(dev_priv))
@@ -2600,11 +2504,11 @@
 	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
 
 	seq_printf(m, "\nGuC submissions:\n");
-	for_each_engine(engine, dev_priv) {
+	for_each_engine_id(engine, dev_priv, id) {
+		u64 submissions = guc.submissions[id];
+		total += submissions;
 		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-			engine->name, guc.submissions[engine->id],
-			guc.last_seqno[engine->id]);
-		total += guc.submissions[engine->id];
+			engine->name, submissions, guc.last_seqno[id]);
 	}
 	seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2618,18 +2522,16 @@
 
 static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
-	u32 *log;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_gem_object *obj;
 	int i = 0, pg;
 
-	if (!log_obj)
+	if (!dev_priv->guc.log_vma)
 		return 0;
 
-	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
-		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+	obj = dev_priv->guc.log_vma->obj;
+	for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
+		u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
 
 		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
 			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -2646,15 +2548,13 @@
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u32 psrperf = 0;
 	u32 stat[3];
 	enum pipe pipe;
 	bool enabled = false;
 
-	if (!HAS_PSR(dev)) {
+	if (!HAS_PSR(dev_priv)) {
 		seq_puts(m, "PSR not supported\n");
 		return 0;
 	}
@@ -2671,7 +2571,7 @@
 	seq_printf(m, "Re-enable work scheduled: %s\n",
 		   yesno(work_busy(&dev_priv->psr.work.work)));
 
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
 	else {
 		for_each_pipe(dev_priv, pipe) {
@@ -2688,7 +2588,7 @@
 
 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
-	if (!HAS_DDI(dev))
+	if (!HAS_DDI(dev_priv))
 		for_each_pipe(dev_priv, pipe) {
 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
@@ -2700,7 +2600,7 @@
 	 * VLV/CHV PSR has no kind of performance counter
 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
 	 */
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
 			EDP_PSR_PERF_CNT_MASK;
 
@@ -2714,8 +2614,8 @@
 
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_connector *connector;
 	struct intel_dp *intel_dp = NULL;
 	int ret;
@@ -2754,13 +2654,11 @@
 
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u64 power;
 	u32 units;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
 	intel_runtime_pm_get(dev_priv);
@@ -2780,9 +2678,8 @@
 
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	if (!HAS_RUNTIME_PM(dev_priv))
 		seq_puts(m, "Runtime power management not supported\n");
@@ -2792,22 +2689,20 @@
 		   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
 	seq_printf(m, "Usage count: %d\n",
-		   atomic_read(&dev->dev->power.usage_count));
+		   atomic_read(&dev_priv->drm.dev->power.usage_count));
 #else
 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
 	seq_printf(m, "PCI device power state: %s [%d]\n",
-		   pci_power_name(dev_priv->drm.pdev->current_state),
-		   dev_priv->drm.pdev->current_state);
+		   pci_power_name(pdev->current_state),
+		   pdev->current_state);
 
 	return 0;
 }
 
 static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	int i;
 
@@ -2840,12 +2735,10 @@
 
 static int i915_dmc_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_csr *csr;
 
-	if (!HAS_CSR(dev)) {
+	if (!HAS_CSR(dev_priv)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -2863,12 +2756,12 @@
 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
 		   CSR_VERSION_MINOR(csr->version));
 
-	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+	if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
 		seq_printf(m, "DC3 -> DC5 count: %d\n",
 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
 		seq_printf(m, "DC5 -> DC6 count: %d\n",
 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
-	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
 		seq_printf(m, "DC3 -> DC5 count: %d\n",
 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
 	}
@@ -2905,8 +2798,8 @@
 			       struct intel_crtc *intel_crtc,
 			       struct intel_encoder *intel_encoder)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_connector *intel_connector;
 	struct drm_encoder *encoder;
@@ -2932,8 +2825,8 @@
 
 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_encoder *intel_encoder;
 	struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2967,6 +2860,9 @@
 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
 		intel_panel_info(m, &intel_connector->panel);
+
+	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+				&intel_dp->aux);
 }
 
 static void intel_hdmi_info(struct seq_file *m,
@@ -3031,12 +2927,11 @@
 		intel_seq_print_mode(m, 2, mode);
 }
 
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 state;
 
-	if (IS_845G(dev) || IS_I865G(dev))
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv))
 		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 	else
 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3044,9 +2939,9 @@
 	return state;
 }
 
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+			    int pipe, int *x, int *y)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 pos;
 
 	pos = I915_READ(CURPOS(pipe));
@@ -3059,7 +2954,7 @@
 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
 		*y = -*y;
 
-	return cursor_active(dev, pipe);
+	return cursor_active(dev_priv, pipe);
 }
 
 static const char *plane_type(enum drm_plane_type type)
@@ -3089,12 +2984,12 @@
 	 */
 	snprintf(buf, sizeof(buf),
 		 "%s%s%s%s%s%s(0x%08x)",
-		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
-		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
-		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
-		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
-		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
-		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
+		 (rotation & DRM_ROTATE_0) ? "0 " : "",
+		 (rotation & DRM_ROTATE_90) ? "90 " : "",
+		 (rotation & DRM_ROTATE_180) ? "180 " : "",
+		 (rotation & DRM_ROTATE_270) ? "270 " : "",
+		 (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
+		 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
 		 rotation);
 
 	return buf;
@@ -3102,13 +2997,14 @@
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_plane *intel_plane;
 
 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
 		struct drm_plane_state *state;
 		struct drm_plane *plane = &intel_plane->base;
+		char *format_name;
 
 		if (!plane->state) {
 			seq_puts(m, "plane->state is NULL!\n");
@@ -3117,6 +3013,12 @@
 
 		state = plane->state;
 
+		if (state->fb) {
+			format_name = drm_get_format_name(state->fb->pixel_format);
+		} else {
+			format_name = kstrdup("N/A", GFP_KERNEL);
+		}
+
 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
 			   plane->base.id,
 			   plane_type(intel_plane->base.type),
@@ -3130,8 +3032,10 @@
 			   ((state->src_w & 0xffff) * 15625) >> 10,
 			   (state->src_h >> 16),
 			   ((state->src_h & 0xffff) * 15625) >> 10,
-			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
+			   format_name,
 			   plane_rotation(state->rotation));
+
+		kfree(format_name);
 	}
 }
 
@@ -3165,9 +3069,8 @@
 
 static int i915_display_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 	struct drm_connector *connector;
 
@@ -3191,7 +3094,7 @@
 		if (pipe_config->base.active) {
 			intel_crtc_info(m, crtc);
 
-			active = cursor_position(dev, crtc->pipe, &x, &y);
+			active = cursor_position(dev_priv, crtc->pipe, &x, &y);
 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
 				   yesno(crtc->cursor_base),
 				   x, y, crtc->base.cursor->state->crtc_w,
@@ -3220,15 +3123,14 @@
 
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
-	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	int num_rings = INTEL_INFO(dev_priv)->num_rings;
 	enum intel_engine_id id;
 	int j, ret;
 
-	if (!i915_semaphore_is_enabled(dev_priv)) {
+	if (!i915.semaphores) {
 		seq_puts(m, "Semaphores are disabled\n");
 		return 0;
 	}
@@ -3238,11 +3140,11 @@
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_BROADWELL(dev)) {
+	if (IS_BROADWELL(dev_priv)) {
 		struct page *page;
 		uint64_t *seqno;
 
-		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+		page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
 		for_each_engine_id(engine, dev_priv, id) {
@@ -3293,9 +3195,8 @@
 
 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int i;
 
 	drm_modeset_lock_all(dev);
@@ -3323,9 +3224,8 @@
 	int i;
 	int ret;
 	struct intel_engine_cs *engine;
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
 	enum intel_engine_id id;
 
@@ -3361,15 +3261,14 @@
 
 static int i915_ddb_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct skl_ddb_allocation *ddb;
 	struct skl_ddb_entry *entry;
 	enum pipe pipe;
 	int plane;
 
-	if (INTEL_INFO(dev)->gen < 9)
+	if (INTEL_GEN(dev_priv) < 9)
 		return 0;
 
 	drm_modeset_lock_all(dev);
@@ -3399,7 +3298,8 @@
 }
 
 static void drrs_status_per_crtc(struct seq_file *m,
-		struct drm_device *dev, struct intel_crtc *intel_crtc)
+				 struct drm_device *dev,
+				 struct intel_crtc *intel_crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_drrs *drrs = &dev_priv->drrs;
@@ -3468,8 +3368,8 @@
 
 static int i915_drrs_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *intel_crtc;
 	int active_crtc_cnt = 0;
 
@@ -3492,14 +3392,14 @@
 
 struct pipe_crc_info {
 	const char *name;
-	struct drm_device *dev;
+	struct drm_i915_private *dev_priv;
 	enum pipe pipe;
 };
 
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_encoder *intel_encoder;
 	struct intel_digital_port *intel_dig_port;
 	struct drm_connector *connector;
@@ -3528,10 +3428,10 @@
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
 	struct pipe_crc_info *info = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(info->dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
-	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+	if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
 		return -ENODEV;
 
 	spin_lock_irq(&pipe_crc->lock);
@@ -3552,7 +3452,7 @@
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
 	struct pipe_crc_info *info = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(info->dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
 	spin_lock_irq(&pipe_crc->lock);
@@ -3579,8 +3479,7 @@
 		   loff_t *pos)
 {
 	struct pipe_crc_info *info = filep->private_data;
-	struct drm_device *dev = info->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 	char buf[PIPE_CRC_BUFFER_LEN];
 	int n_entries;
@@ -3621,7 +3520,6 @@
 	while (n_entries > 0) {
 		struct intel_pipe_crc_entry *entry =
 			&pipe_crc->entries[pipe_crc->tail];
-		int ret;
 
 		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
 			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
@@ -3638,8 +3536,7 @@
 
 		spin_unlock_irq(&pipe_crc->lock);
 
-		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
-		if (ret == PIPE_CRC_LINE_LEN)
+		if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
 			return -EFAULT;
 
 		user_buf += PIPE_CRC_LINE_LEN;
@@ -3678,11 +3575,11 @@
 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
 				enum pipe pipe)
 {
-	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = to_i915(minor->dev);
 	struct dentry *ent;
 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 
-	info->dev = dev;
+	info->dev_priv = dev_priv;
 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
 				  &i915_pipe_crc_fops);
 	if (!ent)
@@ -3712,8 +3609,7 @@
 
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	int i;
 
 	for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3725,9 +3621,7 @@
 
 static int display_crc_ctl_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-
-	return single_open(file, display_crc_ctl_show, dev);
+	return single_open(file, display_crc_ctl_show, inode->i_private);
 }
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -3750,9 +3644,11 @@
 	return 0;
 }
 
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+				     enum pipe pipe,
 				     enum intel_pipe_crc_source *source)
 {
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_encoder *encoder;
 	struct intel_crtc *crtc;
 	struct intel_digital_port *dig_port;
@@ -3802,16 +3698,15 @@
 	return ret;
 }
 
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
 				uint32_t *val)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool need_stable_symbols = false;
 
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
 		if (ret)
 			return ret;
 	}
@@ -3829,7 +3724,7 @@
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_CHERRYVIEW(dev))
+		if (!IS_CHERRYVIEW(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
 		need_stable_symbols = true;
@@ -3873,16 +3768,15 @@
 	return 0;
 }
 
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				 enum pipe pipe,
 				 enum intel_pipe_crc_source *source,
 				 uint32_t *val)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool need_stable_symbols = false;
 
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
 		if (ret)
 			return ret;
 	}
@@ -3892,24 +3786,24 @@
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_TV:
-		if (!SUPPORTS_TV(dev))
+		if (!SUPPORTS_TV(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_B:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_C:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
 		need_stable_symbols = true;
@@ -3933,7 +3827,7 @@
 	if (need_stable_symbols) {
 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
-		WARN_ON(!IS_G4X(dev));
+		WARN_ON(!IS_G4X(dev_priv));
 
 		I915_WRITE(PORT_DFT_I9XX,
 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
@@ -3949,10 +3843,9 @@
 	return 0;
 }
 
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
 	switch (pipe) {
@@ -3974,10 +3867,9 @@
 
 }
 
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
 	if (pipe == PIPE_A)
@@ -4018,9 +3910,10 @@
 	return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+					bool enable)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
 	struct intel_crtc_state *pipe_config;
@@ -4054,7 +3947,7 @@
 		drm_atomic_state_free(state);
 }
 
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
 				uint32_t *val)
@@ -4070,8 +3963,8 @@
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
-		if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev, true);
+		if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
 
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
@@ -4085,13 +3978,14 @@
 	return 0;
 }
 
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+			       enum pipe pipe,
 			       enum intel_pipe_crc_source source)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
-									pipe));
+	struct intel_crtc *crtc =
+			to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
 	enum intel_display_power_domain power_domain;
 	u32 val = 0; /* shut up gcc */
 	int ret;
@@ -4109,16 +4003,16 @@
 		return -EIO;
 	}
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
-	else if (INTEL_INFO(dev)->gen < 5)
-		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-	else if (IS_GEN5(dev) || IS_GEN6(dev))
+	else if (INTEL_GEN(dev_priv) < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
 	else
-		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+		ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
 
 	if (ret != 0)
 		goto out;
@@ -4182,12 +4076,12 @@
 
 		kfree(entries);
 
-		if (IS_G4X(dev))
-			g4x_undo_pipe_scramble_reset(dev, pipe);
-		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-			vlv_undo_pipe_scramble_reset(dev, pipe);
-		else if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev, false);
+		if (IS_G4X(dev_priv))
+			g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+			vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+		else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
 
 		hsw_enable_ips(crtc);
 	}
@@ -4291,7 +4185,8 @@
 	return -EINVAL;
 }
 
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+				 char *buf, size_t len)
 {
 #define N_WORDS 3
 	int n_words;
@@ -4322,14 +4217,14 @@
 		return -EINVAL;
 	}
 
-	return pipe_crc_set_source(dev, pipe, source);
+	return pipe_crc_set_source(dev_priv, pipe, source);
 }
 
 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
 				     size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
 	char *tmpbuf;
 	int ret;
 
@@ -4352,7 +4247,7 @@
 	}
 	tmpbuf[len] = '\0';
 
-	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+	ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
 
 out:
 	kfree(tmpbuf);
@@ -4373,8 +4268,8 @@
 };
 
 static ssize_t i915_displayport_test_active_write(struct file *file,
-					    const char __user *ubuf,
-					    size_t len, loff_t *offp)
+						  const char __user *ubuf,
+						  size_t len, loff_t *offp)
 {
 	char *input_buffer;
 	int status = 0;
@@ -4404,7 +4299,6 @@
 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4442,7 +4336,6 @@
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4462,11 +4355,12 @@
 }
 
 static int i915_displayport_test_active_open(struct inode *inode,
-				       struct file *file)
+					     struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_active_show, dev);
+	return single_open(file, i915_displayport_test_active_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_active_fops = {
@@ -4486,7 +4380,6 @@
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4502,11 +4395,12 @@
 	return 0;
 }
 static int i915_displayport_test_data_open(struct inode *inode,
-				       struct file *file)
+					   struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_data_show, dev);
+	return single_open(file, i915_displayport_test_data_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_data_fops = {
@@ -4525,7 +4419,6 @@
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4544,9 +4437,10 @@
 static int i915_displayport_test_type_open(struct inode *inode,
 				       struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_type_show, dev);
+	return single_open(file, i915_displayport_test_type_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_type_fops = {
@@ -4559,13 +4453,14 @@
 
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	int level;
 	int num_levels;
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
 		num_levels = ilk_wm_max_level(dev) + 1;
@@ -4579,8 +4474,8 @@
 		 * - WM1+ latency values in 0.5us units
 		 * - latencies are in us on gen9/vlv/chv
 		 */
-		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
-		    IS_CHERRYVIEW(dev))
+		if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+		    IS_CHERRYVIEW(dev_priv))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -4594,14 +4489,13 @@
 
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.pri_latency;
+		latencies = dev_priv->wm.pri_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4610,14 +4504,13 @@
 
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.spr_latency;
+		latencies = dev_priv->wm.spr_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4626,14 +4519,13 @@
 
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.cur_latency;
+		latencies = dev_priv->wm.cur_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4642,48 +4534,49 @@
 
 static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 5)
+	if (INTEL_GEN(dev_priv) < 5)
 		return -ENODEV;
 
-	return single_open(file, pri_wm_latency_show, dev);
+	return single_open(file, pri_wm_latency_show, dev_priv);
 }
 
 static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		return -ENODEV;
 
-	return single_open(file, spr_wm_latency_show, dev);
+	return single_open(file, spr_wm_latency_show, dev_priv);
 }
 
 static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		return -ENODEV;
 
-	return single_open(file, cur_wm_latency_show, dev);
+	return single_open(file, cur_wm_latency_show, dev_priv);
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 				size_t len, loff_t *offp, uint16_t wm[8])
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	uint16_t new[8] = { 0 };
 	int num_levels;
 	int level;
 	int ret;
 	char tmp[32];
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
 		num_levels = ilk_wm_max_level(dev) + 1;
@@ -4717,14 +4610,13 @@
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.pri_latency;
+		latencies = dev_priv->wm.pri_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4733,14 +4625,13 @@
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.spr_latency;
+		latencies = dev_priv->wm.spr_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4749,14 +4640,13 @@
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.cur_latency;
+		latencies = dev_priv->wm.cur_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4791,8 +4681,7 @@
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4802,8 +4691,7 @@
 static int
 i915_wedged_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	/*
 	 * There is no safeguard against this debugfs entry colliding
@@ -4833,8 +4721,7 @@
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = dev_priv->gpu_error.missed_irq_rings;
 	return 0;
@@ -4843,8 +4730,8 @@
 static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	/* Lock against concurrent debugfs callers */
@@ -4864,8 +4751,7 @@
 static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4875,8 +4761,7 @@
 static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	val &= INTEL_INFO(dev_priv)->ring_mask;
 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4908,8 +4793,8 @@
 static int
 i915_drop_caches_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4921,7 +4806,9 @@
 		return ret;
 
 	if (val & DROP_ACTIVE) {
-		ret = i915_gem_wait_for_idle(dev_priv);
+		ret = i915_gem_wait_for_idle(dev_priv,
+					     I915_WAIT_INTERRUPTIBLE |
+					     I915_WAIT_LOCKED);
 		if (ret)
 			goto unlock;
 	}
@@ -4948,38 +4835,25 @@
 static int
 i915_max_freq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
+	struct drm_i915_private *dev_priv = data;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
-	if (ret)
-		return ret;
-
 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-
 	return 0;
 }
 
 static int
 i915_max_freq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 hw_max, hw_min;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5015,38 +4889,25 @@
 static int
 i915_min_freq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
+	struct drm_i915_private *dev_priv = data;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
-	if (ret)
-		return ret;
-
 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-
 	return 0;
 }
 
 static int
 i915_min_freq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 hw_max, hw_min;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5061,7 +4922,8 @@
 	hw_max = dev_priv->rps.max_freq;
 	hw_min = dev_priv->rps.min_freq;
 
-	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+	if (val < hw_min ||
+	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
@@ -5082,12 +4944,12 @@
 static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	u32 snpcr;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
 		return -ENODEV;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -5098,7 +4960,7 @@
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5108,11 +4970,10 @@
 static int
 i915_cache_sharing_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 snpcr;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
 		return -ENODEV;
 
 	if (val > 3)
@@ -5135,18 +4996,9 @@
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 
-struct sseu_dev_status {
-	unsigned int slice_total;
-	unsigned int subslice_total;
-	unsigned int subslice_per_slice;
-	unsigned int eu_total;
-	unsigned int eu_per_subslice;
-};
-
-static void cherryview_sseu_device_status(struct drm_device *dev,
-					  struct sseu_dev_status *stat)
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+					  struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ss_max = 2;
 	int ss;
 	u32 sig1[ss_max], sig2[ss_max];
@@ -5163,28 +5015,27 @@
 			/* skip disabled subslice */
 			continue;
 
-		stat->slice_total = 1;
-		stat->subslice_per_slice++;
+		sseu->slice_mask = BIT(0);
+		sseu->subslice_mask |= BIT(ss);
 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-		stat->eu_total += eu_cnt;
-		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+		sseu->eu_total += eu_cnt;
+		sseu->eu_per_subslice = max_t(unsigned int,
+					      sseu->eu_per_subslice, eu_cnt);
 	}
-	stat->subslice_total = stat->subslice_per_slice;
 }
 
-static void gen9_sseu_device_status(struct drm_device *dev,
-				    struct sseu_dev_status *stat)
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+				    struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int s_max = 3, ss_max = 4;
 	int s, ss;
 	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
 
 	/* BXT has a single slice and at most 3 subslices. */
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		s_max = 1;
 		ss_max = 3;
 	}
@@ -5205,126 +5056,134 @@
 		     GEN9_PGCTL_SSB_EU311_ACK;
 
 	for (s = 0; s < s_max; s++) {
-		unsigned int ss_cnt = 0;
-
 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
 			/* skip disabled slice */
 			continue;
 
-		stat->slice_total++;
+		sseu->slice_mask |= BIT(s);
 
-		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+			sseu->subslice_mask =
+				INTEL_INFO(dev_priv)->sseu.subslice_mask;
 
 		for (ss = 0; ss < ss_max; ss++) {
 			unsigned int eu_cnt;
 
-			if (IS_BROXTON(dev) &&
-			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-				/* skip disabled subslice */
-				continue;
+			if (IS_BROXTON(dev_priv)) {
+				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+					/* skip disabled subslice */
+					continue;
 
-			if (IS_BROXTON(dev))
-				ss_cnt++;
+				sseu->subslice_mask |= BIT(ss);
+			}
 
 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
 					       eu_mask[ss%2]);
-			stat->eu_total += eu_cnt;
-			stat->eu_per_subslice = max(stat->eu_per_subslice,
-						    eu_cnt);
+			sseu->eu_total += eu_cnt;
+			sseu->eu_per_subslice = max_t(unsigned int,
+						      sseu->eu_per_subslice,
+						      eu_cnt);
 		}
-
-		stat->subslice_total += ss_cnt;
-		stat->subslice_per_slice = max(stat->subslice_per_slice,
-					       ss_cnt);
 	}
 }
 
-static void broadwell_sseu_device_status(struct drm_device *dev,
-					 struct sseu_dev_status *stat)
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+					 struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int s;
 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+	int s;
 
-	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
 
-	if (stat->slice_total) {
-		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
-		stat->subslice_total = stat->slice_total *
-				       stat->subslice_per_slice;
-		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
-		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+	if (sseu->slice_mask) {
+		sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+		sseu->eu_per_subslice =
+				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+		sseu->eu_total = sseu->eu_per_subslice *
+				 sseu_subslice_total(sseu);
 
 		/* subtract fused off EU(s) from enabled slice(s) */
-		for (s = 0; s < stat->slice_total; s++) {
-			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+		for (s = 0; s < fls(sseu->slice_mask); s++) {
+			u8 subslice_7eu =
+				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
 
-			stat->eu_total -= hweight8(subslice_7eu);
+			sseu->eu_total -= hweight8(subslice_7eu);
 		}
 	}
 }
 
+static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
+				 const struct sseu_dev_info *sseu)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const char *type = is_available_info ? "Available" : "Enabled";
+
+	seq_printf(m, "  %s Slice Mask: %04x\n", type,
+		   sseu->slice_mask);
+	seq_printf(m, "  %s Slice Total: %u\n", type,
+		   hweight8(sseu->slice_mask));
+	seq_printf(m, "  %s Subslice Total: %u\n", type,
+		   sseu_subslice_total(sseu));
+	seq_printf(m, "  %s Subslice Mask: %04x\n", type,
+		   sseu->subslice_mask);
+	seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
+		   hweight8(sseu->subslice_mask));
+	seq_printf(m, "  %s EU Total: %u\n", type,
+		   sseu->eu_total);
+	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
+		   sseu->eu_per_subslice);
+
+	if (!is_available_info)
+		return;
+
+	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+	if (HAS_POOLED_EU(dev_priv))
+		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
+
+	seq_printf(m, "  Has Slice Power Gating: %s\n",
+		   yesno(sseu->has_slice_pg));
+	seq_printf(m, "  Has Subslice Power Gating: %s\n",
+		   yesno(sseu->has_subslice_pg));
+	seq_printf(m, "  Has EU Power Gating: %s\n",
+		   yesno(sseu->has_eu_pg));
+}
+
 static int i915_sseu_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct sseu_dev_status stat;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct sseu_dev_info sseu;
 
-	if (INTEL_INFO(dev)->gen < 8)
+	if (INTEL_GEN(dev_priv) < 8)
 		return -ENODEV;
 
 	seq_puts(m, "SSEU Device Info\n");
-	seq_printf(m, "  Available Slice Total: %u\n",
-		   INTEL_INFO(dev)->slice_total);
-	seq_printf(m, "  Available Subslice Total: %u\n",
-		   INTEL_INFO(dev)->subslice_total);
-	seq_printf(m, "  Available Subslice Per Slice: %u\n",
-		   INTEL_INFO(dev)->subslice_per_slice);
-	seq_printf(m, "  Available EU Total: %u\n",
-		   INTEL_INFO(dev)->eu_total);
-	seq_printf(m, "  Available EU Per Subslice: %u\n",
-		   INTEL_INFO(dev)->eu_per_subslice);
-	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
-	if (HAS_POOLED_EU(dev))
-		seq_printf(m, "  Min EU in pool: %u\n",
-			   INTEL_INFO(dev)->min_eu_in_pool);
-	seq_printf(m, "  Has Slice Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_slice_pg));
-	seq_printf(m, "  Has Subslice Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_subslice_pg));
-	seq_printf(m, "  Has EU Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_eu_pg));
+	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
 
 	seq_puts(m, "SSEU Device Status\n");
-	memset(&stat, 0, sizeof(stat));
-	if (IS_CHERRYVIEW(dev)) {
-		cherryview_sseu_device_status(dev, &stat);
-	} else if (IS_BROADWELL(dev)) {
-		broadwell_sseu_device_status(dev, &stat);
-	} else if (INTEL_INFO(dev)->gen >= 9) {
-		gen9_sseu_device_status(dev, &stat);
+	memset(&sseu, 0, sizeof(sseu));
+
+	intel_runtime_pm_get(dev_priv);
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		cherryview_sseu_device_status(dev_priv, &sseu);
+	} else if (IS_BROADWELL(dev_priv)) {
+		broadwell_sseu_device_status(dev_priv, &sseu);
+	} else if (INTEL_GEN(dev_priv) >= 9) {
+		gen9_sseu_device_status(dev_priv, &sseu);
 	}
-	seq_printf(m, "  Enabled Slice Total: %u\n",
-		   stat.slice_total);
-	seq_printf(m, "  Enabled Subslice Total: %u\n",
-		   stat.subslice_total);
-	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
-		   stat.subslice_per_slice);
-	seq_printf(m, "  Enabled EU Total: %u\n",
-		   stat.eu_total);
-	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
-		   stat.eu_per_subslice);
+
+	intel_runtime_pm_put(dev_priv);
+
+	i915_print_sseu_info(m, false, &sseu);
 
 	return 0;
 }
 
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return 0;
 
 	intel_runtime_pm_get(dev_priv);
@@ -5335,10 +5194,9 @@
 
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return 0;
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5355,12 +5213,11 @@
 
 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 {
-	struct drm_device *dev = minor->dev;
 	struct dentry *ent;
 
 	ent = debugfs_create_file("i915_forcewake_user",
 				  S_IRUSR,
-				  root, dev,
+				  root, to_i915(minor->dev),
 				  &i915_forcewake_fops);
 	if (!ent)
 		return -ENOMEM;
@@ -5373,12 +5230,11 @@
 			       const char *name,
 			       const struct file_operations *fops)
 {
-	struct drm_device *dev = minor->dev;
 	struct dentry *ent;
 
 	ent = debugfs_create_file(name,
 				  S_IRUGO | S_IWUSR,
-				  root, dev,
+				  root, to_i915(minor->dev),
 				  fops);
 	if (!ent)
 		return -ENOMEM;
@@ -5390,9 +5246,7 @@
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
-	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
-	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
 	{"i915_gem_stolen", i915_gem_stolen_list_info },
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
@@ -5467,9 +5321,8 @@
 	{"i915_dp_test_active", &i915_displayport_test_active_fops}
 };
 
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
@@ -5517,7 +5370,7 @@
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
 
-	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+	drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
 				 1, minor);
 
 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
@@ -5529,7 +5382,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		struct drm_info_list *info_list =
-			(struct drm_info_list *) i915_debugfs_files[i].fops;
+			(struct drm_info_list *)i915_debugfs_files[i].fops;
 
 		drm_debugfs_remove_files(info_list, 1, minor);
 	}
@@ -5609,6 +5462,40 @@
 	.release = single_release,
 };
 
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_dp *intel_dp =
+		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	seq_printf(m, "Panel power up delay: %d\n",
+		   intel_dp->panel_power_up_delay);
+	seq_printf(m, "Panel power down delay: %d\n",
+		   intel_dp->panel_power_down_delay);
+	seq_printf(m, "Backlight on delay: %d\n",
+		   intel_dp->backlight_on_delay);
+	seq_printf(m, "Backlight off delay: %d\n",
+		   intel_dp->backlight_off_delay);
+
+	return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_panel_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5628,8 +5515,12 @@
 
 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
-				    &i915_dpcd_fops);
+		debugfs_create_file("i915_dpcd", S_IRUGO, root,
+				    connector, &i915_dpcd_fops);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+				    connector, &i915_panel_fops);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5de36d8..bfb2efd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -77,7 +77,7 @@
 	      const char *fmt, ...)
 {
 	static bool shown_bug_once;
-	struct device *dev = dev_priv->drm.dev;
+	struct device *kdev = dev_priv->drm.dev;
 	bool is_error = level[1] <= KERN_ERR[1];
 	bool is_debug = level[1] == KERN_DEBUG[1];
 	struct va_format vaf;
@@ -91,11 +91,11 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+	dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
 		   __builtin_return_address(0), &vaf);
 
 	if (is_error && !shown_bug_once) {
-		dev_notice(dev, "%s", FDO_BUG_MSG);
+		dev_notice(kdev, "%s", FDO_BUG_MSG);
 		shown_bug_once = true;
 	}
 
@@ -228,31 +228,11 @@
 	pci_dev_put(pch);
 }
 
-bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) < 6)
-		return false;
-
-	if (i915.semaphores >= 0)
-		return i915.semaphores;
-
-	/* TODO: make semaphores and Execlists play nicely together */
-	if (i915.enable_execlists)
-		return false;
-
-#ifdef CONFIG_INTEL_IOMMU
-	/* Enable semaphores on SNB when IO remapping is off */
-	if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
-		return false;
-#endif
-
-	return true;
-}
-
 static int i915_getparam(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	drm_i915_getparam_t *param = data;
 	int value;
 
@@ -263,13 +243,10 @@
 		/* Reject all old ums/dri params. */
 		return -ENODEV;
 	case I915_PARAM_CHIPSET_ID:
-		value = dev->pdev->device;
+		value = pdev->device;
 		break;
 	case I915_PARAM_REVISION:
-		value = dev->pdev->revision;
-		break;
-	case I915_PARAM_HAS_GEM:
-		value = 1;
+		value = pdev->revision;
 		break;
 	case I915_PARAM_NUM_FENCES_AVAIL:
 		value = dev_priv->num_fence_regs;
@@ -277,13 +254,6 @@
 	case I915_PARAM_HAS_OVERLAY:
 		value = dev_priv->overlay ? 1 : 0;
 		break;
-	case I915_PARAM_HAS_PAGEFLIPPING:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXECBUF2:
-		/* depends on GEM */
-		value = 1;
-		break;
 	case I915_PARAM_HAS_BSD:
 		value = intel_engine_initialized(&dev_priv->engine[VCS]);
 		break;
@@ -296,67 +266,34 @@
 	case I915_PARAM_HAS_BSD2:
 		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
 		break;
-	case I915_PARAM_HAS_RELAXED_FENCING:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_COHERENT_RINGS:
-		value = 1;
-		break;
 	case I915_PARAM_HAS_EXEC_CONSTANTS:
-		value = INTEL_INFO(dev)->gen >= 4;
-		break;
-	case I915_PARAM_HAS_RELAXED_DELTA:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_GEN7_SOL_RESET:
-		value = 1;
+		value = INTEL_GEN(dev_priv) >= 4;
 		break;
 	case I915_PARAM_HAS_LLC:
-		value = HAS_LLC(dev);
+		value = HAS_LLC(dev_priv);
 		break;
 	case I915_PARAM_HAS_WT:
-		value = HAS_WT(dev);
+		value = HAS_WT(dev_priv);
 		break;
 	case I915_PARAM_HAS_ALIASING_PPGTT:
-		value = USES_PPGTT(dev);
-		break;
-	case I915_PARAM_HAS_WAIT_TIMEOUT:
-		value = 1;
+		value = USES_PPGTT(dev_priv);
 		break;
 	case I915_PARAM_HAS_SEMAPHORES:
-		value = i915_semaphore_is_enabled(dev_priv);
-		break;
-	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-		value = 1;
+		value = i915.semaphores;
 		break;
 	case I915_PARAM_HAS_SECURE_BATCHES:
 		value = capable(CAP_SYS_ADMIN);
 		break;
-	case I915_PARAM_HAS_PINNED_BATCHES:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXEC_NO_RELOC:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-		value = 1;
-		break;
 	case I915_PARAM_CMD_PARSER_VERSION:
 		value = i915_cmd_parser_get_version(dev_priv);
 		break;
-	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-		value = 1;
-		break;
-	case I915_PARAM_MMAP_VERSION:
-		value = 1;
-		break;
 	case I915_PARAM_SUBSLICE_TOTAL:
-		value = INTEL_INFO(dev)->subslice_total;
+		value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
 		if (!value)
 			return -ENODEV;
 		break;
 	case I915_PARAM_EU_TOTAL:
-		value = INTEL_INFO(dev)->eu_total;
+		value = INTEL_INFO(dev_priv)->sseu.eu_total;
 		if (!value)
 			return -ENODEV;
 		break;
@@ -364,16 +301,43 @@
 		value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
 		break;
 	case I915_PARAM_HAS_RESOURCE_STREAMER:
-		value = HAS_RESOURCE_STREAMER(dev);
-		break;
-	case I915_PARAM_HAS_EXEC_SOFTPIN:
-		value = 1;
+		value = HAS_RESOURCE_STREAMER(dev_priv);
 		break;
 	case I915_PARAM_HAS_POOLED_EU:
-		value = HAS_POOLED_EU(dev);
+		value = HAS_POOLED_EU(dev_priv);
 		break;
 	case I915_PARAM_MIN_EU_IN_POOL:
-		value = INTEL_INFO(dev)->min_eu_in_pool;
+		value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+		break;
+	case I915_PARAM_MMAP_GTT_VERSION:
+		/* Though we've started our numbering from 1, and so class all
+		 * earlier versions as 0, in effect their value is undefined as
+		 * the ioctl will report EINVAL for the unknown param!
+		 */
+		value = i915_gem_mmap_gtt_version();
+		break;
+	case I915_PARAM_MMAP_VERSION:
+		/* Remember to bump this if the version changes! */
+	case I915_PARAM_HAS_GEM:
+	case I915_PARAM_HAS_PAGEFLIPPING:
+	case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+	case I915_PARAM_HAS_RELAXED_FENCING:
+	case I915_PARAM_HAS_COHERENT_RINGS:
+	case I915_PARAM_HAS_RELAXED_DELTA:
+	case I915_PARAM_HAS_GEN7_SOL_RESET:
+	case I915_PARAM_HAS_WAIT_TIMEOUT:
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+	case I915_PARAM_HAS_PINNED_BATCHES:
+	case I915_PARAM_HAS_EXEC_NO_RELOC:
+	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+	case I915_PARAM_HAS_EXEC_SOFTPIN:
+		/* For the time being all of these are always true;
+		 * if some supported hardware does not have one of these
+		 * features this value needs to be provided from
+		 * INTEL_INFO(), a feature macro, or similar.
+		 */
+		value = 1;
 		break;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -537,7 +501,7 @@
 		pr_info("switched on\n");
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		/* i915 resume handler doesn't set to D0 */
-		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_set_power_state(pdev, PCI_D0);
 		i915_resume_switcheroo(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
@@ -595,7 +559,6 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_reset(dev);
 	i915_gem_cleanup_engines(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
@@ -606,6 +569,7 @@
 static int i915_load_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int ret;
 
 	if (i915_inject_load_failure())
@@ -622,13 +586,13 @@
 	 * then we do not take part in VGA arbitration and the
 	 * vga_client_register() fails with -ENODEV.
 	 */
-	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+	ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
 	if (ret && ret != -ENODEV)
 		goto out;
 
 	intel_register_dsm_handler();
 
-	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
 	if (ret)
 		goto cleanup_vga_client;
 
@@ -680,9 +644,9 @@
 cleanup_csr:
 	intel_csr_ucode_fini(dev_priv);
 	intel_power_domains_fini(dev_priv);
-	vga_switcheroo_unregister_client(dev->pdev);
+	vga_switcheroo_unregister_client(pdev);
 cleanup_vga_client:
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
+	vga_client_register(pdev, NULL, NULL, NULL);
 out:
 	return ret;
 }
@@ -706,7 +670,7 @@
 	primary =
 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 
-	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+	ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 
 	kfree(ap);
 
@@ -848,6 +812,8 @@
 	mutex_init(&dev_priv->wm.wm_mutex);
 	mutex_init(&dev_priv->pps_mutex);
 
+	i915_memcpy_init_early(dev_priv);
+
 	ret = i915_workqueues_init(dev_priv);
 	if (ret < 0)
 		return ret;
@@ -868,7 +834,7 @@
 	intel_init_audio_hooks(dev_priv);
 	i915_gem_load_init(&dev_priv->drm);
 
-	intel_display_crc_init(&dev_priv->drm);
+	intel_display_crc_init(dev_priv);
 
 	intel_device_info_dump(dev_priv);
 
@@ -900,6 +866,7 @@
 static int i915_mmio_setup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int mmio_bar;
 	int mmio_size;
 
@@ -916,7 +883,7 @@
 		mmio_size = 512 * 1024;
 	else
 		mmio_size = 2 * 1024 * 1024;
-	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+	dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
 	if (dev_priv->regs == NULL) {
 		DRM_ERROR("failed to map registers\n");
 
@@ -932,9 +899,10 @@
 static void i915_mmio_cleanup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	intel_teardown_mchbar(dev);
-	pci_iounmap(dev->pdev, dev_priv->regs);
+	pci_iounmap(pdev, dev_priv->regs);
 }
 
 /**
@@ -999,6 +967,9 @@
 	i915.enable_ppgtt =
 		intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
 	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+
+	i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
+	DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
 }
 
 /**
@@ -1010,9 +981,8 @@
  */
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	uint32_t aperture_size;
 	int ret;
 
 	if (i915_inject_load_failure())
@@ -1022,16 +992,10 @@
 
 	intel_sanitize_options(dev_priv);
 
-	ret = i915_ggtt_init_hw(dev);
+	ret = i915_ggtt_probe_hw(dev_priv);
 	if (ret)
 		return ret;
 
-	ret = i915_ggtt_enable_hw(dev);
-	if (ret) {
-		DRM_ERROR("failed to enable GGTT\n");
-		goto out_ggtt;
-	}
-
 	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
 	 * otherwise the vga fbdev driver falls over. */
 	ret = i915_kick_out_firmware_fb(dev_priv);
@@ -1046,11 +1010,21 @@
 		goto out_ggtt;
 	}
 
-	pci_set_master(dev->pdev);
+	ret = i915_ggtt_init_hw(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_ggtt_enable_hw(dev_priv);
+	if (ret) {
+		DRM_ERROR("failed to enable GGTT\n");
+		goto out_ggtt;
+	}
+
+	pci_set_master(pdev);
 
 	/* overlay on gen2 is broken and can't address above 1G */
 	if (IS_GEN2(dev)) {
-		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
 
@@ -1058,7 +1032,6 @@
 		}
 	}
 
-
 	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
 	 * using 32bit addressing, overwriting memory if HWS is located
 	 * above 4GB.
@@ -1068,7 +1041,7 @@
 	 * which also needs to be handled carefully.
 	 */
 	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
-		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
@@ -1077,19 +1050,6 @@
 		}
 	}
 
-	aperture_size = ggtt->mappable_end;
-
-	ggtt->mappable =
-		io_mapping_create_wc(ggtt->mappable_base,
-				     aperture_size);
-	if (!ggtt->mappable) {
-		ret = -EIO;
-		goto out_ggtt;
-	}
-
-	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
-					      aperture_size);
-
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
 			   PM_QOS_DEFAULT_VALUE);
 
@@ -1111,14 +1071,14 @@
 	 * stuck interrupts on some machines.
 	 */
 	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-		if (pci_enable_msi(dev->pdev) < 0)
+		if (pci_enable_msi(pdev) < 0)
 			DRM_DEBUG_DRIVER("can't enable MSI");
 	}
 
 	return 0;
 
 out_ggtt:
-	i915_ggtt_cleanup_hw(dev);
+	i915_ggtt_cleanup_hw(dev_priv);
 
 	return ret;
 }
@@ -1129,16 +1089,13 @@
  */
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
-	if (dev->pdev->msi_enabled)
-		pci_disable_msi(dev->pdev);
+	if (pdev->msi_enabled)
+		pci_disable_msi(pdev);
 
 	pm_qos_remove_request(&dev_priv->pm_qos);
-	arch_phys_wc_del(ggtt->mtrr);
-	io_mapping_free(ggtt->mappable);
-	i915_ggtt_cleanup_hw(dev);
+	i915_ggtt_cleanup_hw(dev_priv);
 }
 
 /**
@@ -1164,7 +1121,7 @@
 	/* Reveal our presence to userspace */
 	if (drm_dev_register(dev, 0) == 0) {
 		i915_debugfs_register(dev_priv);
-		i915_setup_sysfs(dev);
+		i915_setup_sysfs(dev_priv);
 	} else
 		DRM_ERROR("Failed to register driver for userspace access!\n");
 
@@ -1201,7 +1158,7 @@
 	acpi_video_unregister();
 	intel_opregion_unregister(dev_priv);
 
-	i915_teardown_sysfs(&dev_priv->drm);
+	i915_teardown_sysfs(dev_priv);
 	i915_debugfs_unregister(dev_priv);
 	drm_dev_unregister(&dev_priv->drm);
 
@@ -1310,6 +1267,7 @@
 void i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	intel_fbdev_fini(dev);
 
@@ -1338,8 +1296,8 @@
 	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
 	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-	vga_switcheroo_unregister_client(dev->pdev);
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
+	vga_switcheroo_unregister_client(pdev);
+	vga_client_register(pdev, NULL, NULL, NULL);
 
 	intel_csr_ucode_fini(dev_priv);
 
@@ -1348,7 +1306,7 @@
 	i915_destroy_error_state(dev);
 
 	/* Flush any outstanding unpin_work. */
-	flush_workqueue(dev_priv->wq);
+	drain_workqueue(dev_priv->wq);
 
 	intel_guc_fini(dev);
 	i915_gem_fini(dev);
@@ -1436,6 +1394,7 @@
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	pci_power_t opregion_target_state;
 	int error;
 
@@ -1452,19 +1411,17 @@
 
 	drm_kms_helper_poll_disable(dev);
 
-	pci_save_state(dev->pdev);
+	pci_save_state(pdev);
 
 	error = i915_gem_suspend(dev);
 	if (error) {
-		dev_err(&dev->pdev->dev,
+		dev_err(&pdev->dev,
 			"GEM idle failed, resume might fail\n");
 		goto out;
 	}
 
 	intel_guc_suspend(dev);
 
-	intel_suspend_gt_powersave(dev_priv);
-
 	intel_display_suspend(dev);
 
 	intel_dp_mst_suspend(dev);
@@ -1500,9 +1457,10 @@
 	return error;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	bool fw_csr;
 	int ret;
 
@@ -1536,7 +1494,7 @@
 		goto out;
 	}
 
-	pci_disable_device(drm_dev->pdev);
+	pci_disable_device(pdev);
 	/*
 	 * During hibernation on some platforms the BIOS may try to access
 	 * the device even though it's already in D3 and hang the machine. So
@@ -1550,7 +1508,7 @@
 	 * Acer Aspire 1830T
 	 */
 	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
-		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+		pci_set_power_state(pdev, PCI_D3hot);
 
 	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
 
@@ -1590,18 +1548,18 @@
 	int ret;
 
 	disable_rpm_wakeref_asserts(dev_priv);
+	intel_sanitize_gt_powersave(dev_priv);
 
-	ret = i915_ggtt_enable_hw(dev);
+	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
 		DRM_ERROR("failed to re-enable GGTT\n");
 
 	intel_csr_ucode_resume(dev_priv);
 
-	mutex_lock(&dev->struct_mutex);
-	i915_gem_restore_gtt_mappings(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_resume(dev);
 
 	i915_restore_state(dev);
+	intel_pps_unlock_regs_wa(dev_priv);
 	intel_opregion_setup(dev_priv);
 
 	intel_init_pch_refclk(dev);
@@ -1620,7 +1578,7 @@
 	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_init_hw(dev)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		i915_gem_set_wedged(dev_priv);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1657,6 +1615,7 @@
 
 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
+	intel_autoenable_gt_powersave(dev_priv);
 	drm_kms_helper_poll_enable(dev);
 
 	enable_rpm_wakeref_asserts(dev_priv);
@@ -1667,6 +1626,7 @@
 static int i915_drm_resume_early(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int ret;
 
 	/*
@@ -1689,7 +1649,7 @@
 	 * the device powered we can also remove the following set power state
 	 * call.
 	 */
-	ret = pci_set_power_state(dev->pdev, PCI_D0);
+	ret = pci_set_power_state(pdev, PCI_D0);
 	if (ret) {
 		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
 		goto out;
@@ -1708,12 +1668,12 @@
 	 * depend on the device enable refcount we can't anyway depend on them
 	 * disabling/enabling the device.
 	 */
-	if (pci_enable_device(dev->pdev)) {
+	if (pci_enable_device(pdev)) {
 		ret = -EIO;
 		goto out;
 	}
 
-	pci_set_master(dev->pdev);
+	pci_set_master(pdev);
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
@@ -1765,8 +1725,10 @@
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
  *
- * Reset the chip.  Useful if a hang is detected. Returns zero on successful
- * reset or otherwise an error code.
+ * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
  *
  * Procedure is fairly simple:
  *   - reset the chip using the reset reg
@@ -1776,31 +1738,22 @@
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
 	struct i915_gpu_error *error = &dev_priv->gpu_error;
-	unsigned reset_counter;
 	int ret;
 
-	intel_reset_gt_powersave(dev_priv);
+	lockdep_assert_held(&dev->struct_mutex);
 
-	mutex_lock(&dev->struct_mutex);
+	if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+		return;
 
 	/* Clear any previous failed attempts at recovery. Time to try again. */
-	atomic_andnot(I915_WEDGED, &error->reset_counter);
-
-	/* Clear the reset-in-progress flag and increment the reset epoch. */
-	reset_counter = atomic_inc_return(&error->reset_counter);
-	if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
-		ret = -EIO;
-		goto error;
-	}
+	__clear_bit(I915_WEDGED, &error->flags);
+	error->reset_count++;
 
 	pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
-	i915_gem_reset(dev);
-
 	ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
 	if (ret) {
 		if (ret != -ENODEV)
@@ -1810,6 +1763,7 @@
 		goto error;
 	}
 
+	i915_gem_reset(dev_priv);
 	intel_overlay_reset(dev_priv);
 
 	/* Ok, now get things going again... */
@@ -1832,44 +1786,34 @@
 		goto error;
 	}
 
-	mutex_unlock(&dev->struct_mutex);
-
-	/*
-	 * rps/rc6 re-init is necessary to restore state lost after the
-	 * reset and the re-install of gt irqs. Skip for ironlake per
-	 * previous concerns that it doesn't respond well to some forms
-	 * of re-init after reset.
-	 */
-	if (INTEL_INFO(dev)->gen > 5)
-		intel_enable_gt_powersave(dev_priv);
-
-	return 0;
+wakeup:
+	wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+	return;
 
 error:
-	atomic_or(I915_WEDGED, &error->reset_counter);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	i915_gem_set_wedged(dev_priv);
+	goto wakeup;
 }
 
-static int i915_pm_suspend(struct device *dev)
+static int i915_pm_suspend(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
 
-	if (!drm_dev) {
-		dev_err(dev, "DRM not initialized, aborting suspend.\n");
+	if (!dev) {
+		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
 		return -ENODEV;
 	}
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend(drm_dev);
+	return i915_drm_suspend(dev);
 }
 
-static int i915_pm_suspend_late(struct device *dev)
+static int i915_pm_suspend_late(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
 	/*
 	 * We have a suspend ordering issue with the snd-hda driver also
@@ -1880,57 +1824,67 @@
 	 * FIXME: This should be solved with a special hdmi sink device or
 	 * similar so that power domains can be employed.
 	 */
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend_late(drm_dev, false);
+	return i915_drm_suspend_late(dev, false);
 }
 
-static int i915_pm_poweroff_late(struct device *dev)
+static int i915_pm_poweroff_late(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend_late(drm_dev, true);
+	return i915_drm_suspend_late(dev, true);
 }
 
-static int i915_pm_resume_early(struct device *dev)
+static int i915_pm_resume_early(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_resume_early(drm_dev);
+	return i915_drm_resume_early(dev);
 }
 
-static int i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_resume(drm_dev);
+	return i915_drm_resume(dev);
 }
 
 /* freeze: before creating the hibernation_image */
-static int i915_pm_freeze(struct device *dev)
-{
-	return i915_pm_suspend(dev);
-}
-
-static int i915_pm_freeze_late(struct device *dev)
+static int i915_pm_freeze(struct device *kdev)
 {
 	int ret;
 
-	ret = i915_pm_suspend_late(dev);
+	ret = i915_pm_suspend(kdev);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_freeze_late(dev_to_i915(dev));
+	ret = i915_gem_freeze(kdev_to_i915(kdev));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int i915_pm_freeze_late(struct device *kdev)
+{
+	int ret;
+
+	ret = i915_pm_suspend_late(kdev);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_freeze_late(kdev_to_i915(kdev));
 	if (ret)
 		return ret;
 
@@ -1938,25 +1892,25 @@
 }
 
 /* thaw: called after creating the hibernation image, but before turning off. */
-static int i915_pm_thaw_early(struct device *dev)
+static int i915_pm_thaw_early(struct device *kdev)
 {
-	return i915_pm_resume_early(dev);
+	return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_thaw(struct device *kdev)
 {
-	return i915_pm_resume(dev);
+	return i915_pm_resume(kdev);
 }
 
 /* restore: called after loading the hibernation image. */
-static int i915_pm_restore_early(struct device *dev)
+static int i915_pm_restore_early(struct device *kdev)
 {
-	return i915_pm_resume_early(dev);
+	return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_restore(struct device *dev)
+static int i915_pm_restore(struct device *kdev)
 {
-	return i915_pm_resume(dev);
+	return i915_pm_resume(kdev);
 }
 
 /*
@@ -2318,9 +2272,9 @@
 	return ret;
 }
 
-static int intel_runtime_suspend(struct device *device)
+static int intel_runtime_suspend(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
+	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
@@ -2346,7 +2300,7 @@
 		 * Bump the expiration timestamp, otherwise the suspend won't
 		 * be rescheduled.
 		 */
-		pm_runtime_mark_last_busy(device);
+		pm_runtime_mark_last_busy(kdev);
 
 		return -EAGAIN;
 	}
@@ -2425,9 +2379,9 @@
 	return 0;
 }
 
-static int intel_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
+	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret = 0;
@@ -2467,7 +2421,6 @@
 	 * we can do is to hope that things will still work (and disable RPM).
 	 */
 	i915_gem_init_swizzling(dev);
-	gen6_update_ring_freq(dev_priv);
 
 	intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -2623,6 +2576,7 @@
 	.postclose = i915_driver_postclose,
 	.set_busid = drm_pci_set_busid,
 
+	.gem_close_object = i915_gem_close_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f68c789..8b9ee4e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -61,6 +61,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
+#include "i915_gem_request.h"
 
 #include "intel_gvt.h"
 
@@ -69,7 +70,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20160711"
+#define DRIVER_DATE		"20160919"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -401,7 +402,7 @@
 		unsigned boosts;
 	} rps;
 
-	unsigned int bsd_ring;
+	unsigned int bsd_engine;
 };
 
 /* Used by dp and fdi links */
@@ -431,8 +432,6 @@
 #define DRIVER_MINOR		6
 #define DRIVER_PATCHLEVEL	0
 
-#define WATCH_LISTS	0
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -456,15 +455,21 @@
 struct intel_overlay;
 struct intel_overlay_error_state;
 
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
 struct drm_i915_fence_reg {
-	struct list_head lru_list;
-	struct drm_i915_gem_object *obj;
+	struct list_head link;
+	struct drm_i915_private *i915;
+	struct i915_vma *vma;
 	int pin_count;
+	int id;
+	/**
+	 * Whether the tiling parameters for the currently
+	 * associated fence register have changed. Note that
+	 * for the purposes of tracking tiling changes we also
+	 * treat the unfenced register, the register slot that
+	 * the object occupies whilst it executes a fenced
+	 * command (such as BLT on gen2/3), as a "fence".
+	 */
+	bool dirty;
 };
 
 struct sdvo_device_mapping {
@@ -476,130 +481,6 @@
 	u8 ddc_pin;
 };
 
-struct intel_display_error_state;
-
-struct drm_i915_error_state {
-	struct kref ref;
-	struct timeval time;
-
-	char error_msg[128];
-	bool simulated;
-	int iommu;
-	u32 reset_count;
-	u32 suspend_count;
-
-	/* Generic register state */
-	u32 eir;
-	u32 pgtbl_er;
-	u32 ier;
-	u32 gtier[4];
-	u32 ccid;
-	u32 derrmr;
-	u32 forcewake;
-	u32 error; /* gen6+ */
-	u32 err_int; /* gen7 */
-	u32 fault_data0; /* gen8, gen9 */
-	u32 fault_data1; /* gen8, gen9 */
-	u32 done_reg;
-	u32 gac_eco;
-	u32 gam_ecochk;
-	u32 gab_ctl;
-	u32 gfx_mode;
-	u32 extra_instdone[I915_NUM_INSTDONE_REG];
-	u64 fence[I915_MAX_NUM_FENCES];
-	struct intel_overlay_error_state *overlay;
-	struct intel_display_error_state *display;
-	struct drm_i915_error_object *semaphore_obj;
-
-	struct drm_i915_error_ring {
-		bool valid;
-		/* Software tracked state */
-		bool waiting;
-		int num_waiters;
-		int hangcheck_score;
-		enum intel_ring_hangcheck_action hangcheck_action;
-		int num_requests;
-
-		/* our own tracking of ring head and tail */
-		u32 cpu_ring_head;
-		u32 cpu_ring_tail;
-
-		u32 last_seqno;
-		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
-
-		/* Register state */
-		u32 start;
-		u32 tail;
-		u32 head;
-		u32 ctl;
-		u32 hws;
-		u32 ipeir;
-		u32 ipehr;
-		u32 instdone;
-		u32 bbstate;
-		u32 instpm;
-		u32 instps;
-		u32 seqno;
-		u64 bbaddr;
-		u64 acthd;
-		u32 fault_reg;
-		u64 faddr;
-		u32 rc_psmi; /* sleep state */
-		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
-
-		struct drm_i915_error_object {
-			int page_count;
-			u64 gtt_offset;
-			u32 *pages[0];
-		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
-		struct drm_i915_error_object *wa_ctx;
-
-		struct drm_i915_error_request {
-			long jiffies;
-			u32 seqno;
-			u32 tail;
-		} *requests;
-
-		struct drm_i915_error_waiter {
-			char comm[TASK_COMM_LEN];
-			pid_t pid;
-			u32 seqno;
-		} *waiters;
-
-		struct {
-			u32 gfx_mode;
-			union {
-				u64 pdp[4];
-				u32 pp_dir_base;
-			};
-		} vm_info;
-
-		pid_t pid;
-		char comm[TASK_COMM_LEN];
-	} ring[I915_NUM_ENGINES];
-
-	struct drm_i915_error_buffer {
-		u32 size;
-		u32 name;
-		u32 rseqno[I915_NUM_ENGINES], wseqno;
-		u64 gtt_offset;
-		u32 read_domains;
-		u32 write_domain;
-		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
-		s32 pinned:2;
-		u32 tiling:2;
-		u32 dirty:1;
-		u32 purgeable:1;
-		u32 userptr:1;
-		s32 ring:4;
-		u32 cache_level:3;
-	} **active_bo, **pinned_bo;
-
-	u32 *active_bo_count, *pinned_bo_count;
-	u32 vm_count;
-};
-
 struct intel_connector;
 struct intel_encoder;
 struct intel_crtc_state;
@@ -629,8 +510,12 @@
 					 struct intel_initial_plane_config *);
 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state);
-	void (*crtc_enable)(struct drm_crtc *crtc);
-	void (*crtc_disable)(struct drm_crtc *crtc);
+	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
+			    struct drm_atomic_state *old_state);
+	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
+			     struct drm_atomic_state *old_state);
+	void (*update_crtcs)(struct drm_atomic_state *state,
+			     unsigned int *crtc_vblank_mask);
 	void (*audio_codec_enable)(struct drm_connector *connector,
 				   struct intel_encoder *encoder,
 				   const struct drm_display_mode *adjusted_mode);
@@ -694,8 +579,6 @@
 				uint16_t val, bool trace);
 	void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
 				uint32_t val, bool trace);
-	void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
-				uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -756,7 +639,7 @@
 	func(is_i915g) sep \
 	func(is_i945gm) sep \
 	func(is_g33) sep \
-	func(need_gfx_hws) sep \
+	func(hws_needs_physical) sep \
 	func(is_g4x) sep \
 	func(is_pineview) sep \
 	func(is_broadwater) sep \
@@ -771,6 +654,19 @@
 	func(is_kabylake) sep \
 	func(is_preliminary) sep \
 	func(has_fbc) sep \
+	func(has_psr) sep \
+	func(has_runtime_pm) sep \
+	func(has_csr) sep \
+	func(has_resource_streamer) sep \
+	func(has_rc6) sep \
+	func(has_rc6p) sep \
+	func(has_dp_mst) sep \
+	func(has_gmbus_irq) sep \
+	func(has_hw_contexts) sep \
+	func(has_logical_ring_contexts) sep \
+	func(has_l3_dpf) sep \
+	func(has_gmch_display) sep \
+	func(has_guc) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_hotplug) sep \
 	func(cursor_needs_physical) sep \
@@ -786,25 +682,9 @@
 #define DEFINE_FLAG(name) u8 name:1
 #define SEP_SEMICOLON ;
 
-struct intel_device_info {
-	u32 display_mmio_offset;
-	u16 device_id;
-	u8 num_pipes;
-	u8 num_sprites[I915_MAX_PIPES];
-	u8 gen;
-	u16 gen_mask;
-	u8 ring_mask; /* Rings supported by the HW */
-	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
-	/* Register offsets for the various display pipes and transcoders */
-	int pipe_offsets[I915_MAX_TRANSCODERS];
-	int trans_offsets[I915_MAX_TRANSCODERS];
-	int palette_offsets[I915_MAX_PIPES];
-	int cursor_offsets[I915_MAX_PIPES];
-
-	/* Slice/subslice/EU info */
-	u8 slice_total;
-	u8 subslice_total;
-	u8 subslice_per_slice;
+struct sseu_dev_info {
+	u8 slice_mask;
+	u8 subslice_mask;
 	u8 eu_total;
 	u8 eu_per_subslice;
 	u8 min_eu_in_pool;
@@ -813,6 +693,32 @@
 	u8 has_slice_pg:1;
 	u8 has_subslice_pg:1;
 	u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+	return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
+struct intel_device_info {
+	u32 display_mmio_offset;
+	u16 device_id;
+	u8 num_pipes;
+	u8 num_sprites[I915_MAX_PIPES];
+	u8 gen;
+	u16 gen_mask;
+	u8 ring_mask; /* Rings supported by the HW */
+	u8 num_rings;
+	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+	u16 ddb_size; /* in blocks */
+	/* Register offsets for the various display pipes and transcoders */
+	int pipe_offsets[I915_MAX_TRANSCODERS];
+	int trans_offsets[I915_MAX_TRANSCODERS];
+	int palette_offsets[I915_MAX_PIPES];
+	int cursor_offsets[I915_MAX_PIPES];
+
+	/* Slice/subslice/EU info */
+	struct sseu_dev_info sseu;
 
 	struct color_luts {
 		u16 degamma_lut_size;
@@ -823,6 +729,134 @@
 #undef DEFINE_FLAG
 #undef SEP_SEMICOLON
 
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+	struct kref ref;
+	struct timeval time;
+
+	char error_msg[128];
+	bool simulated;
+	int iommu;
+	u32 reset_count;
+	u32 suspend_count;
+	struct intel_device_info device_info;
+
+	/* Generic register state */
+	u32 eir;
+	u32 pgtbl_er;
+	u32 ier;
+	u32 gtier[4];
+	u32 ccid;
+	u32 derrmr;
+	u32 forcewake;
+	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
+	u32 fault_data0; /* gen8, gen9 */
+	u32 fault_data1; /* gen8, gen9 */
+	u32 done_reg;
+	u32 gac_eco;
+	u32 gam_ecochk;
+	u32 gab_ctl;
+	u32 gfx_mode;
+	u32 extra_instdone[I915_NUM_INSTDONE_REG];
+	u64 fence[I915_MAX_NUM_FENCES];
+	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
+	struct drm_i915_error_object *semaphore;
+
+	struct drm_i915_error_engine {
+		int engine_id;
+		/* Software tracked state */
+		bool waiting;
+		int num_waiters;
+		int hangcheck_score;
+		enum intel_engine_hangcheck_action hangcheck_action;
+		struct i915_address_space *vm;
+		int num_requests;
+
+		/* our own tracking of ring head and tail */
+		u32 cpu_ring_head;
+		u32 cpu_ring_tail;
+
+		u32 last_seqno;
+		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
+
+		/* Register state */
+		u32 start;
+		u32 tail;
+		u32 head;
+		u32 ctl;
+		u32 mode;
+		u32 hws;
+		u32 ipeir;
+		u32 ipehr;
+		u32 instdone;
+		u32 bbstate;
+		u32 instpm;
+		u32 instps;
+		u32 seqno;
+		u64 bbaddr;
+		u64 acthd;
+		u32 fault_reg;
+		u64 faddr;
+		u32 rc_psmi; /* sleep state */
+		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+
+		struct drm_i915_error_object {
+			int page_count;
+			u64 gtt_offset;
+			u64 gtt_size;
+			u32 *pages[0];
+		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+		struct drm_i915_error_object *wa_ctx;
+
+		struct drm_i915_error_request {
+			long jiffies;
+			pid_t pid;
+			u32 seqno;
+			u32 head;
+			u32 tail;
+		} *requests;
+
+		struct drm_i915_error_waiter {
+			char comm[TASK_COMM_LEN];
+			pid_t pid;
+			u32 seqno;
+		} *waiters;
+
+		struct {
+			u32 gfx_mode;
+			union {
+				u64 pdp[4];
+				u32 pp_dir_base;
+			};
+		} vm_info;
+
+		pid_t pid;
+		char comm[TASK_COMM_LEN];
+	} engine[I915_NUM_ENGINES];
+
+	struct drm_i915_error_buffer {
+		u32 size;
+		u32 name;
+		u32 rseqno[I915_NUM_ENGINES], wseqno;
+		u64 gtt_offset;
+		u32 read_domains;
+		u32 write_domain;
+		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+		u32 tiling:2;
+		u32 dirty:1;
+		u32 purgeable:1;
+		u32 userptr:1;
+		s32 engine:4;
+		u32 cache_level:3;
+	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
+	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
+	struct i915_address_space *active_vm[I915_NUM_ENGINES];
+};
+
 enum i915_cache_level {
 	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -879,6 +913,7 @@
 	struct drm_i915_private *i915;
 	struct drm_i915_file_private *file_priv;
 	struct i915_hw_ppgtt *ppgtt;
+	struct pid *pid;
 
 	struct i915_ctx_hang_stats hang_stats;
 
@@ -893,9 +928,8 @@
 	u32 ggtt_alignment;
 
 	struct intel_context {
-		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ringbuf;
-		struct i915_vma *lrc_vma;
+		struct i915_vma *state;
+		struct intel_ring *ring;
 		uint32_t *lrc_reg_state;
 		u64 lrc_desc;
 		int pin_count;
@@ -909,6 +943,7 @@
 	struct list_head link;
 
 	u8 remap_slice;
+	bool closed:1;
 };
 
 enum fb_op_origin {
@@ -1062,13 +1097,6 @@
 
 struct i915_suspend_saved_registers {
 	u32 saveDSPARB;
-	u32 saveLVDS;
-	u32 savePP_ON_DELAYS;
-	u32 savePP_OFF_DELAYS;
-	u32 savePP_ON;
-	u32 savePP_OFF;
-	u32 savePP_CONTROL;
-	u32 savePP_DIVISOR;
 	u32 saveFBC_CONTROL;
 	u32 saveCACHE_MODE_0;
 	u32 saveMI_ARB_STATE;
@@ -1157,6 +1185,7 @@
 	bool interrupts_enabled;
 	u32 pm_iir;
 
+	/* PM interrupt bits that should never be masked */
 	u32 pm_intr_keep;
 
 	/* Frequencies are stored in potentially platform dependent multiples.
@@ -1174,6 +1203,7 @@
 	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
 	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
 	u8 min_freq;		/* AKA RPn. Minimum frequency */
+	u8 boost_freq;		/* Frequency to request when wait boosting */
 	u8 idle_freq;		/* Frequency to request when we are idle */
 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
@@ -1191,11 +1221,9 @@
 	bool client_boost;
 
 	bool enabled;
-	struct delayed_work delayed_resume_work;
+	struct delayed_work autoenable_work;
 	unsigned boosts;
 
-	struct intel_rps_client semaphores, mmioflips;
-
 	/* manual wa residency calculations */
 	struct intel_rps_ei up_ei, down_ei;
 
@@ -1320,7 +1348,6 @@
 	struct notifier_block oom_notifier;
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
-	bool shrinker_no_lock_stealing;
 
 	/** LRU list of objects with fence regs on them. */
 	struct list_head fence_list;
@@ -1332,7 +1359,7 @@
 	bool interruptible;
 
 	/* the indicator for dispatch video commands on two BSD rings */
-	unsigned int bsd_ring_dispatch_index;
+	atomic_t bsd_engine_dispatch_index;
 
 	/** Bit 6 swizzling required for X tiling */
 	uint32_t bit_6_swizzle_x;
@@ -1380,9 +1407,10 @@
 	 * State variable controlling the reset flow and count
 	 *
 	 * This is a counter which gets incremented when reset is triggered,
-	 * and again when reset has been handled. So odd values (lowest bit set)
-	 * means that reset is in progress and even values that
-	 * (reset_counter >> 1):th reset was successfully completed.
+	 *
+	 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+	 * meaning that any waiters holding onto the struct_mutex should
+	 * relinquish the lock immediately in order for the reset to start.
 	 *
 	 * If reset is not completed succesfully, the I915_WEDGE bit is
 	 * set meaning that hardware is terminally sour and there is no
@@ -1397,10 +1425,11 @@
 	 * naturally enforces the correct ordering between the bail-out of the
 	 * waiter and the gpu reset work code.
 	 */
-	atomic_t reset_counter;
+	unsigned long reset_count;
 
-#define I915_RESET_IN_PROGRESS_FLAG	1
-#define I915_WEDGED			(1 << 31)
+	unsigned long flags;
+#define I915_RESET_IN_PROGRESS	0
+#define I915_WEDGED		(BITS_PER_LONG - 1)
 
 	/**
 	 * Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1671,7 +1700,7 @@
 };
 
 struct i915_frontbuffer_tracking {
-	struct mutex lock;
+	spinlock_t lock;
 
 	/*
 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
@@ -1706,18 +1735,6 @@
 	bool active;
 };
 
-struct i915_execbuffer_params {
-	struct drm_device               *dev;
-	struct drm_file                 *file;
-	uint32_t                        dispatch_flags;
-	uint32_t                        args_batch_start_offset;
-	uint64_t                        batch_obj_vm_offset;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_object      *batch_obj;
-	struct i915_gem_context            *ctx;
-	struct drm_i915_gem_request     *request;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
 	unsigned int num_pipes_active;
@@ -1764,13 +1781,15 @@
 
 	uint32_t psr_mmio_base;
 
+	uint32_t pps_mmio_base;
+
 	wait_queue_head_t gmbus_wait_queue;
 
 	struct pci_dev *bridge_dev;
 	struct i915_gem_context *kernel_context;
 	struct intel_engine_cs engine[I915_NUM_ENGINES];
-	struct drm_i915_gem_object *semaphore_obj;
-	uint32_t last_seqno, next_seqno;
+	struct i915_vma *semaphore;
+	u32 next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
@@ -1965,11 +1984,11 @@
 	struct vlv_s0ix_state vlv_s0ix_state;
 
 	enum {
-		I915_SKL_SAGV_UNKNOWN = 0,
-		I915_SKL_SAGV_DISABLED,
-		I915_SKL_SAGV_ENABLED,
-		I915_SKL_SAGV_NOT_CONTROLLED
-	} skl_sagv_status;
+		I915_SAGV_UNKNOWN = 0,
+		I915_SAGV_DISABLED,
+		I915_SAGV_ENABLED,
+		I915_SAGV_NOT_CONTROLLED
+	} sagv_status;
 
 	struct {
 		/*
@@ -2025,12 +2044,8 @@
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct i915_execbuffer_params *params,
-				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas);
-		int (*init_engines)(struct drm_device *dev);
+		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
-		void (*stop_engine)(struct intel_engine_cs *engine);
 
 		/**
 		 * Is the GPU currently considered idle, or busy executing
@@ -2077,9 +2092,9 @@
 	return container_of(dev, struct drm_i915_private, drm);
 }
 
-static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 {
-	return to_i915(dev_get_drvdata(dev));
+	return to_i915(dev_get_drvdata(kdev));
 }
 
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2102,13 +2117,16 @@
 		for_each_if (((id__) = (engine__)->id, \
 			      intel_engine_initialized(engine__)))
 
+#define __mask_next_bit(mask) ({					\
+	int __idx = ffs(mask) - 1;					\
+	mask &= ~BIT(__idx);						\
+	__idx;								\
+})
+
 /* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
-	for ((engine__) = &(dev_priv__)->engine[0]; \
-	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-	     (engine__)++) \
-		for_each_if (((mask__) & intel_engine_flag(engine__)) && \
-			     intel_engine_initialized(engine__))
+#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
+	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
+	     tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
@@ -2153,8 +2171,6 @@
  */
 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
-	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
@@ -2178,18 +2194,21 @@
 	struct drm_mm_node *stolen;
 	struct list_head global_list;
 
-	struct list_head engine_list[I915_NUM_ENGINES];
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
 
 	struct list_head batch_pool_link;
 
+	unsigned long flags;
 	/**
 	 * This is set if the object is on the active lists (has pending
 	 * rendering and so a non-zero seqno), and is not set if it i s on
 	 * inactive (ready to be unbound) list.
 	 */
-	unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+	((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
 	/**
 	 * This is set if the object has been written to since last bound
@@ -2198,37 +2217,11 @@
 	unsigned int dirty:1;
 
 	/**
-	 * Fence register bits (if any) for this object.  Will be set
-	 * as needed when mapped into the GTT.
-	 * Protected by dev->struct_mutex.
-	 */
-	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
-
-	/**
 	 * Advice: are the backing pages purgeable?
 	 */
 	unsigned int madv:2;
 
 	/**
-	 * Current tiling mode for the object.
-	 */
-	unsigned int tiling_mode:2;
-	/**
-	 * Whether the tiling parameters for the currently associated fence
-	 * register have changed. Note that for the purposes of tracking
-	 * tiling changes we also treat the unfenced register, the register
-	 * slot that the object occupies whilst it executes a fenced
-	 * command (such as BLT on gen2/3), as a "fence".
-	 */
-	unsigned int fence_dirty:1;
-
-	/**
-	 * Is the object at the current location in the gtt mappable and
-	 * fenceable? Used to avoid costly recalculations.
-	 */
-	unsigned int map_and_fenceable:1;
-
-	/**
 	 * Whether the current gtt mapping needs to be mappable (and isn't just
 	 * mappable by accident). Track pin and fault separate for a more
 	 * accurate mappable working set.
@@ -2243,9 +2236,17 @@
 	unsigned int cache_level:3;
 	unsigned int cache_dirty:1;
 
-	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+	atomic_t frontbuffer_bits;
+	unsigned int frontbuffer_ggtt_origin; /* write once */
 
-	unsigned int has_wc_mmap;
+	/** Current tiling stride for the object, if it's tiled. */
+	unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+	/** Count of VMA actually bound by this object */
+	unsigned int bind_count;
 	unsigned int pin_display;
 
 	struct sg_table *pages;
@@ -2265,14 +2266,9 @@
 	 * requests on one ring where the write request is older than the
 	 * read request. This allows for the CPU to read from an active
 	 * buffer by only waiting for the write to complete.
-	 * */
-	struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
-	struct drm_i915_gem_request *last_write_req;
-	/** Breadcrumb of last fenced GPU access to the buffer. */
-	struct drm_i915_gem_request *last_fenced_req;
-
-	/** Current tiling stride for the object, if it's tiled. */
-	uint32_t stride;
+	 */
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	struct i915_gem_active last_write;
 
 	/** References from framebuffers, locks out tiling changes. */
 	unsigned long framebuffer_references;
@@ -2280,23 +2276,70 @@
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
-	union {
-		/** for phy allocated objects */
-		struct drm_dma_handle *phys_handle;
-
-		struct i915_gem_userptr {
-			uintptr_t ptr;
-			unsigned read_only :1;
-			unsigned workers :4;
+	struct i915_gem_userptr {
+		uintptr_t ptr;
+		unsigned read_only :1;
+		unsigned workers :4;
 #define I915_GEM_USERPTR_MAX_WORKERS 15
 
-			struct i915_mm_struct *mm;
-			struct i915_mmu_object *mmu_object;
-			struct work_struct *work;
-		} userptr;
-	};
+		struct i915_mm_struct *mm;
+		struct i915_mmu_object *mmu_object;
+		struct work_struct *work;
+	} userptr;
+
+	/** for phys allocated objects */
+	struct drm_dma_handle *phys_handle;
 };
-#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+	/* Assert that to_intel_bo(NULL) == NULL */
+	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+	return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+	return to_intel_bo(drm_gem_object_lookup(file, handle));
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+	drm_gem_object_reference(&obj->base);
+	return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+	drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
+{
+	drm_gem_object_unreference_unlocked(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
@@ -2304,6 +2347,67 @@
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+	return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+	return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+				  int engine)
+{
+	return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+	return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+	return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+	i915_gem_object_get(vma->obj);
+	return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
+	i915_gem_object_put(vma->obj);
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
@@ -2374,171 +2478,6 @@
 	     (((__iter).curr += PAGE_SIZE) < (__iter).max) ||		\
 	     ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
 
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable sequence
- * number comparisons on buffer last_read|write_seqno. It also allows an
- * emission time to be associated with the request for tracking how far ahead
- * of the GPU the submission is.
- *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
- */
-struct drm_i915_gem_request {
-	struct kref ref;
-
-	/** On Which ring this request was generated */
-	struct drm_i915_private *i915;
-	struct intel_engine_cs *engine;
-	struct intel_signal_node signaling;
-
-	 /** GEM sequence number associated with the previous request,
-	  * when the HWS breadcrumb is equal to this the GPU is processing
-	  * this request.
-	  */
-	u32 previous_seqno;
-
-	 /** GEM sequence number associated with this request,
-	  * when the HWS breadcrumb is equal or greater than this the GPU
-	  * has finished processing this request.
-	  */
-	u32 seqno;
-
-	/** Position in the ringbuffer of the start of the request */
-	u32 head;
-
-	/**
-	 * Position in the ringbuffer of the start of the postfix.
-	 * This is required to calculate the maximum available ringbuffer
-	 * space without overwriting the postfix.
-	 */
-	 u32 postfix;
-
-	/** Position in the ringbuffer of the end of the whole request */
-	u32 tail;
-
-	/** Preallocate space in the ringbuffer for the emitting the request */
-	u32 reserved_space;
-
-	/**
-	 * Context and ring buffer related to this request
-	 * Contexts are refcounted, so when this request is associated with a
-	 * context, we must increment the context's refcount, to guarantee that
-	 * it persists while any request is linked to it. Requests themselves
-	 * are also refcounted, so the request will only be freed when the last
-	 * reference to it is dismissed, and the code in
-	 * i915_gem_request_free() will then decrement the refcount on the
-	 * context.
-	 */
-	struct i915_gem_context *ctx;
-	struct intel_ringbuffer *ringbuf;
-
-	/**
-	 * Context related to the previous request.
-	 * As the contexts are accessed by the hardware until the switch is
-	 * completed to a new context, the hardware may still be writing
-	 * to the context object after the breadcrumb is visible. We must
-	 * not unpin/unbind/prune that object whilst still active and so
-	 * we keep the previous context pinned until the following (this)
-	 * request is retired.
-	 */
-	struct i915_gem_context *previous_context;
-
-	/** Batch buffer related to this request if any (used for
-	    error state dump only) */
-	struct drm_i915_gem_object *batch_obj;
-
-	/** Time at which this request was emitted, in jiffies. */
-	unsigned long emitted_jiffies;
-
-	/** global list entry for this request */
-	struct list_head list;
-
-	struct drm_i915_file_private *file_priv;
-	/** file_priv list entry for this request */
-	struct list_head client_list;
-
-	/** process identifier submitting this request */
-	struct pid *pid;
-
-	/**
-	 * The ELSP only accepts two elements at a time, so we queue
-	 * context/tail pairs on a given queue (ring->execlist_queue) until the
-	 * hardware is available. The queue serves a double purpose: we also use
-	 * it to keep track of the up to 2 contexts currently in the hardware
-	 * (usually one in execution and the other queued up by the GPU): We
-	 * only remove elements from the head of the queue when the hardware
-	 * informs us that an element has been completed.
-	 *
-	 * All accesses to the queue are mediated by a spinlock
-	 * (ring->execlist_lock).
-	 */
-
-	/** Execlist link in the submission queue.*/
-	struct list_head execlist_link;
-
-	/** Execlists no. of times this request has been sent to the ELSP */
-	int elsp_submitted;
-
-	/** Execlists context hardware id. */
-	unsigned ctx_hw_id;
-};
-
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx);
-void i915_gem_request_free(struct kref *req_ref);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-				   struct drm_file *file);
-
-static inline uint32_t
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
-	return req ? req->seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
-	return req ? req->engine : NULL;
-}
-
-static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
-{
-	if (req)
-		kref_get(&req->ref);
-	return req;
-}
-
-static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
-{
-	kref_put(&req->ref, i915_gem_request_free);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
-					   struct drm_i915_gem_request *src)
-{
-	if (src)
-		i915_gem_request_reference(src);
-
-	if (*pdst)
-		i915_gem_request_unreference(*pdst);
-
-	*pdst = src;
-}
-
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
-
 /*
  * A command that requires special handling by the command parser.
  */
@@ -2626,8 +2565,9 @@
 /*
  * A table of commands requiring special handling by the command parser.
  *
- * Each ring has an array of tables. Each table consists of an array of command
- * descriptors, which must be sorted with command opcodes in ascending order.
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
  */
 struct drm_i915_cmd_table {
 	const struct drm_i915_cmd_descriptor *table;
@@ -2645,7 +2585,7 @@
 		BUILD_BUG(); \
 	__p; \
 })
-#define INTEL_INFO(p) 	(&__I915__(p)->info)
+#define INTEL_INFO(p)	(&__I915__(p)->info)
 #define INTEL_GEN(p)	(INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
 
@@ -2812,10 +2752,10 @@
 #define HAS_EDRAM(dev)		(!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
 				 HAS_EDRAM(dev))
-#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+#define HWS_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->hws_needs_physical)
 
-#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
-#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 8)
+#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->has_logical_ring_contexts)
 #define USES_PPGTT(dev)		(i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev)	(i915.enable_ppgtt == 3)
@@ -2839,7 +2779,7 @@
  * interrupt source and so prevents the other device from working properly.
  */
 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -2855,38 +2795,27 @@
 
 #define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
-#define HAS_DP_MST(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-				 INTEL_INFO(dev)->gen >= 9)
+#define HAS_DP_MST(dev)	(INTEL_INFO(dev)->has_dp_mst)
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-				 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
-				 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
-				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
-				 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-				 IS_KABYLAKE(dev) || IS_BROXTON(dev))
-#define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev)		(IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PSR(dev)		(INTEL_INFO(dev)->has_psr)
+#define HAS_RUNTIME_PM(dev)	(INTEL_INFO(dev)->has_runtime_pm)
+#define HAS_RC6(dev)		(INTEL_INFO(dev)->has_rc6)
+#define HAS_RC6p(dev)		(INTEL_INFO(dev)->has_rc6p)
 
-#define HAS_CSR(dev)	(IS_GEN9(dev))
+#define HAS_CSR(dev)	(INTEL_INFO(dev)->has_csr)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev)		(IS_GEN9(dev))
+#define HAS_GUC(dev)		(INTEL_INFO(dev)->has_guc)
 #define HAS_GUC_UCODE(dev)	(HAS_GUC(dev))
 #define HAS_GUC_SCHED(dev)	(HAS_GUC(dev))
 
-#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
-				    INTEL_INFO(dev)->gen >= 8)
-
-#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
-				 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
-				 !IS_BROXTON(dev))
+#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
 
 #define HAS_POOLED_EU(dev)	(INTEL_INFO(dev)->has_pooled_eu)
 
@@ -2914,11 +2843,10 @@
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
-			       IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
@@ -2939,7 +2867,9 @@
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-			       	int enable_ppgtt);
+				int enable_ppgtt);
+
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
 
 /* i915_drv.c */
 void __printf(3, 4)
@@ -2955,7 +2885,7 @@
 #endif
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset(struct drm_i915_private *dev_priv);
+extern void i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3116,11 +3046,6 @@
 			      struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
-void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct drm_i915_gem_request *req);
-int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -3149,6 +3074,7 @@
 void i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze(struct drm_i915_private *dev_priv);
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
 
 void *i915_gem_object_alloc(struct drm_device *dev);
@@ -3159,47 +3085,28 @@
 						  size_t size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
 		struct drm_device *dev, const void *data, size_t size);
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
-void i915_gem_vma_destroy(struct i915_vma *vma);
 
-/* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE	(1<<0)
-#define PIN_NONBLOCK	(1<<1)
-#define PIN_GLOBAL	(1<<2)
-#define PIN_OFFSET_BIAS	(1<<3)
-#define PIN_USER	(1<<4)
-#define PIN_UPDATE	(1<<5)
-#define PIN_ZONE_4G	(1<<6)
-#define PIN_HIGH	(1<<7)
-#define PIN_OFFSET_FIXED	(1<<8)
-#define PIN_OFFSET_MASK (~4095)
-int __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags);
+			 u64 size,
+			 u64 alignment,
+			 u64 flags);
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
-/*
- * BEWARE: Do not use the function below unless you can _absolutely_
- * _guarantee_ VMA in question is _not in use_ anywhere.
- */
-int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-				    int *needs_clflush);
-
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
@@ -3259,13 +3166,20 @@
 	obj->pages_pin_count--;
 }
 
+enum i915_map_type {
+	I915_MAP_WB = 0,
+	I915_MAP_WC,
+};
+
 /**
  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
  * @obj - the object to map into kernel address space
+ * @type - the type of mapping, used to select pgprot_t
  *
  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
  * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space.
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
  *
  * The caller must hold the struct_mutex, and is responsible for calling
  * i915_gem_object_unpin_map() when the mapping is no longer required.
@@ -3273,7 +3187,8 @@
  * Returns the pointer through which to access the mapped object, or an
  * ERR_PTR() on error.
  */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+					   enum i915_map_type type);
 
 /**
  * i915_gem_object_unpin_map - releases an earlier mapping
@@ -3292,122 +3207,73 @@
 	i915_gem_object_unpin_pages(obj);
 }
 
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+				    unsigned int *needs_clflush);
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+				     unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE 0x1
+#define CLFLUSH_AFTER 0x2
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to,
-			 struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct drm_i915_gem_request *req);
+			     struct drm_i915_gem_request *req,
+			     unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 		      uint32_t handle, uint64_t *offset);
+int i915_gem_mmap_gtt_version(void);
 
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-	return (int32_t)(seq1 - seq2) >= 0;
-}
-
-static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
-	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				 req->previous_seqno);
-}
-
-static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
-{
-	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				 req->seqno);
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
-			 int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
-				     int state, unsigned long timeout_us)
-{
-	return (i915_gem_request_started(request) &&
-		__i915_spin_request(request, state, timeout_us));
-}
-
-int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
-
-static inline u32 i915_reset_counter(struct i915_gpu_error *error)
-{
-	return atomic_read(&error->reset_counter);
-}
-
-static inline bool __i915_reset_in_progress(u32 reset)
-{
-	return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
-}
-
-static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
-{
-	return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
-}
-
-static inline bool __i915_terminally_wedged(u32 reset)
-{
-	return unlikely(reset & I915_WEDGED);
-}
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-	return __i915_reset_in_progress(i915_reset_counter(error));
-}
-
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
-{
-	return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
+	return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-	return __i915_terminally_wedged(i915_reset_counter(error));
+	return unlikely(test_bit(I915_WEDGED, &error->flags));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+	return i915_reset_in_progress(error) | i915_terminally_wedged(error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-	return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
+	return READ_ONCE(error->reset_count);
 }
 
-void i915_gem_reset(struct drm_device *dev);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+					unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct drm_i915_gem_request *req,
-			struct drm_i915_gem_object *batch_obj,
-			bool flush_caches);
-#define i915_add_request(req) \
-	__i915_add_request(req, NULL, true)
-#define i915_add_request_no_flush(req) \
-	__i915_add_request(req, NULL, false)
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -3417,22 +3283,20 @@
 				  bool write);
 int __must_check
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					      const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 				int align);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			    int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
+			   int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level);
@@ -3443,85 +3307,81 @@
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view);
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm);
-static inline u64
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
-{
-	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view);
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm);
-
 struct i915_vma *
 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-			  const struct i915_ggtt_view *view);
+		     struct i915_address_space *vm,
+		     const struct i915_ggtt_view *view);
 
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view);
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view);
 
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
-
-/* Some GGTT VM helpers */
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
-
-static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *
+i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
+			const struct i915_ggtt_view *view)
 {
-	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 
-unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
-
-static inline int __must_check
-i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
-		      uint32_t alignment,
-		      unsigned flags)
+static inline unsigned long
+i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
+			    const struct i915_ggtt_view *view)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	return i915_gem_object_pin(obj, &ggtt->base,
-				   alignment, flags | PIN_GLOBAL);
-}
-
-void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-				     const struct i915_ggtt_view *view);
-static inline void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
-{
-	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+	return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
 }
 
 /* i915_gem_fence.c */
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_vma_get_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
 
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+	if (vma->fence) {
+		vma->fence->pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+	if (vma->fence) {
+		GEM_BUG_ON(vma->fence->pin_count <= 0);
+		vma->fence->pin_count--;
+	}
+}
 
 void i915_gem_restore_fences(struct drm_device *dev);
 
@@ -3533,10 +3393,10 @@
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
-void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
 void i915_gem_context_free(struct kref *ctx_ref);
 struct drm_i915_gem_object *
 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3557,12 +3417,14 @@
 	return ctx;
 }
 
-static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
+static inline struct i915_gem_context *
+i915_gem_context_get(struct i915_gem_context *ctx)
 {
 	kref_get(&ctx->ref);
+	return ctx;
 }
 
-static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
+static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 {
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	kref_put(&ctx->ref, i915_gem_context_free);
@@ -3585,13 +3447,10 @@
 				       struct drm_file *file);
 
 /* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-					  struct i915_address_space *vm,
-					  int min_size,
-					  unsigned alignment,
+int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+					  u64 min_size, u64 alignment,
 					  unsigned cache_level,
-					  unsigned long start,
-					  unsigned long end,
+					  u64 start, u64 end,
 					  unsigned flags);
 int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
@@ -3644,28 +3503,21 @@
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-		obj->tiling_mode != I915_TILING_NONE;
+		i915_gem_object_is_tiled(obj);
 }
 
-/* i915_gem_debug.c */
-#if WATCH_LISTS
-int i915_verify_lists(struct drm_device *dev);
-#else
-#define i915_verify_lists(dev) 0
-#endif
-
 /* i915_debugfs.c */
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_device *dev);
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
 #else
 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
-static inline void intel_display_crc_init(struct drm_device *dev) {}
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
@@ -3694,23 +3546,23 @@
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
-int i915_parse_cmds(struct intel_engine_cs *engine,
-		    struct drm_i915_gem_object *batch_obj,
-		    struct drm_i915_gem_object *shadow_batch_obj,
-		    u32 batch_start_offset,
-		    u32 batch_len,
-		    bool is_master);
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+			    struct drm_i915_gem_object *batch_obj,
+			    struct drm_i915_gem_object *shadow_batch_obj,
+			    u32 batch_start_offset,
+			    u32 batch_len,
+			    bool is_master);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
 /* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_device *dev_priv);
-void i915_teardown_sysfs(struct drm_device *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *dev_priv);
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
 
 /* intel_i2c.c */
 extern int intel_setup_gmbus(struct drm_device *dev);
@@ -3810,7 +3662,6 @@
 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
 				  bool enable);
 
-extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
 
@@ -3888,9 +3739,16 @@
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
  * act upon the intermediate value, possibly leading to corruption and
- * machine death. You have been warned.
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * dev_priv->uncore.funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actualy support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
  */
-#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
@@ -3933,7 +3791,7 @@
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * criticial sections inside IRQ handlers where forcewake is explicitly
+ * critical sections inside IRQ handlers where forcewake is explicitly
  * controlled.
  * Think twice, and think again, before using these.
  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
@@ -4005,7 +3863,9 @@
 			    schedule_timeout_uninterruptible(remaining_jiffies);
 	}
 }
-static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
+
+static inline bool
+__i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 
@@ -4027,7 +3887,7 @@
 	 * is woken.
 	 */
 	if (engine->irq_seqno_barrier &&
-	    READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+	    rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
 	    cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
 		struct task_struct *tsk;
 
@@ -4052,7 +3912,7 @@
 		 * irq_posted == false but we are still running).
 		 */
 		rcu_read_lock();
-		tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
 		if (tsk && tsk != current)
 			/* Note that if the bottom-half is changed as we
 			 * are sending the wake-up, the new bottom-half will
@@ -4067,18 +3927,35 @@
 			return true;
 	}
 
-	/* We need to check whether any gpu reset happened in between
-	 * the request being submitted and now. If a reset has occurred,
-	 * the seqno will have been advance past ours and our request
-	 * is complete. If we are in the process of handling a reset,
-	 * the request is effectively complete as the rendering will
-	 * be discarded, but we need to return in order to drop the
-	 * struct_mutex.
-	 */
-	if (i915_reset_in_progress(&req->i915->gpu_error))
-		return true;
-
 	return false;
 }
 
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+
+/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+		     unsigned long addr, unsigned long pfn, unsigned long size,
+		     struct io_mapping *iomap);
+
+#define ptr_mask_bits(ptr) ({						\
+	unsigned long __v = (unsigned long)(ptr);			\
+	(typeof(ptr))(__v & PAGE_MASK);					\
+})
+
+#define ptr_unpack_bits(ptr, bits) ({					\
+	unsigned long __v = (unsigned long)(ptr);			\
+	(bits) = __v & ~PAGE_MASK;					\
+	(typeof(ptr))(__v & PAGE_MASK);					\
+})
+
+#define ptr_pack_bits(ptr, bits)					\
+	((typeof(ptr))((unsigned long)(ptr) | (bits)))
+
+#define fetch_and_zero(ptr) ({						\
+	typeof(*ptr) __T = *(ptr);					\
+	*(ptr) = (typeof(*ptr))0;					\
+	__T;								\
+})
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a77ce99..947e82c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,10 +29,13 @@
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "intel_mocs.h"
+#include <linux/reservation.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
@@ -41,10 +44,6 @@
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -139,7 +138,6 @@
 	if (ret)
 		return ret;
 
-	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
@@ -156,10 +154,10 @@
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -281,23 +279,129 @@
 	.release = i915_gem_object_release_phys,
 };
 
-static int
-drop_pages(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
-	struct i915_vma *vma, *next;
+	struct i915_vma *vma;
+	LIST_HEAD(still_in_list);
 	int ret;
 
-	drm_gem_object_reference(&obj->base);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
-		if (i915_vma_unbind(vma))
-			break;
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	ret = i915_gem_object_put_pages(obj);
-	drm_gem_object_unreference(&obj->base);
+	/* Closed vma are removed from the obj->vma_list - but they may
+	 * still have an active binding on the object. To remove those we
+	 * must wait for all rendering to complete to the object (as unbinding
+	 * must anyway), and retire the requests.
+	 */
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(to_i915(obj->base.dev));
+
+	while ((vma = list_first_entry_or_null(&obj->vma_list,
+					       struct i915_vma,
+					       obj_link))) {
+		list_move_tail(&vma->obj_link, &still_in_list);
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			break;
+	}
+	list_splice(&still_in_list, &obj->vma_list);
 
 	return ret;
 }
 
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for just read access or read-write access
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct reservation_object *resv;
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	if (!readonly) {
+		active = obj->last_read;
+		active_mask = i915_gem_object_get_active(obj);
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		int ret;
+
+		ret = i915_gem_active_wait(&active[idx],
+					   &obj->base.dev->struct_mutex);
+		if (ret)
+			return ret;
+	}
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (resv) {
+		long err;
+
+		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+							  MAX_SCHEDULE_TIMEOUT);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. Must be called prior to
+ * acquiring the mutex for the object, as the object state may change
+ * during this call. A reference must be held by the caller for the object.
+ */
+static __must_check int
+__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
+			struct intel_rps_client *rps,
+			bool readonly)
+{
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	active_mask = __I915_BO_ACTIVE(obj);
+	if (!active_mask)
+		return 0;
+
+	if (!readonly) {
+		active = obj->last_read;
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		int ret;
+
+		ret = i915_gem_active_wait_unlocked(&active[idx],
+						    I915_WAIT_INTERRUPTIBLE,
+						    NULL, rps);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+
+	return &fpriv->rps;
+}
+
 int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 			    int align)
@@ -318,7 +422,11 @@
 	if (obj->base.filp == NULL)
 		return -EINVAL;
 
-	ret = drop_pages(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_put_pages(obj);
 	if (ret)
 		return ret;
 
@@ -408,7 +516,7 @@
 
 	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(&obj->base);
+	i915_gem_object_put_unlocked(obj);
 	if (ret)
 		return ret;
 
@@ -502,26 +610,18 @@
  * flush the object from the CPU cache.
  */
 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-				    int *needs_clflush)
+				    unsigned int *needs_clflush)
 {
 	int ret;
 
 	*needs_clflush = 0;
 
-	if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
-		return -EINVAL;
+	if (!i915_gem_object_has_struct_page(obj))
+		return -ENODEV;
 
-	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-		/* If we're not in the cpu read domain, set ourself into the gtt
-		 * read domain and manually flush cachelines (if required). This
-		 * optimizes for the case when the gpu will dirty the data
-		 * anyway again before the next pread happens. */
-		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
-							obj->cache_level);
-		ret = i915_gem_object_wait_rendering(obj, true);
-		if (ret)
-			return ret;
-	}
+	ret = i915_gem_object_wait_rendering(obj, true);
+	if (ret)
+		return ret;
 
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
@@ -529,6 +629,87 @@
 
 	i915_gem_object_pin_pages(obj);
 
+	i915_gem_object_flush_gtt_write_domain(obj);
+
+	/* If we're not in the cpu read domain, set ourself into the gtt
+	 * read domain and manually flush cachelines (if required). This
+	 * optimizes for the case when the gpu will dirty the data
+	 * anyway again before the next pread happens.
+	 */
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+							obj->cache_level);
+
+	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, false);
+		if (ret)
+			goto err_unpin;
+
+		*needs_clflush = 0;
+	}
+
+	/* return with the pages pinned */
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+	return ret;
+}
+
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+				     unsigned int *needs_clflush)
+{
+	int ret;
+
+	*needs_clflush = 0;
+	if (!i915_gem_object_has_struct_page(obj))
+		return -ENODEV;
+
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	i915_gem_object_flush_gtt_write_domain(obj);
+
+	/* If we're not in the cpu write domain, set ourself into the
+	 * gtt write domain and manually flush cachelines (as required).
+	 * This optimizes for the case when the gpu will use the data
+	 * right away and we therefore have to clflush anyway.
+	 */
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+
+	/* Same trick applies to invalidate partially written cachelines read
+	 * before writing.
+	 */
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
+							 obj->cache_level);
+
+	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, true);
+		if (ret)
+			goto err_unpin;
+
+		*needs_clflush = 0;
+	}
+
+	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
+		obj->cache_dirty = true;
+
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	obj->dirty = 1;
+	/* return with the pages pinned */
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
 	return ret;
 }
 
@@ -638,14 +819,24 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_vma *vma;
 	struct drm_mm_node node;
 	char __user *user_data;
 	uint64_t remain;
 	uint64_t offset;
 	int ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
-	if (ret) {
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	if (!IS_ERR(vma)) {
+		node.start = i915_ggtt_offset(vma);
+		node.allocated = false;
+		ret = i915_vma_put_fence(vma);
+		if (ret) {
+			i915_vma_unpin(vma);
+			vma = ERR_PTR(ret);
+		}
+	}
+	if (IS_ERR(vma)) {
 		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
 		if (ret)
 			goto out;
@@ -657,12 +848,6 @@
 		}
 
 		i915_gem_object_pin_pages(obj);
-	} else {
-		node.start = i915_gem_obj_ggtt_offset(obj);
-		node.allocated = false;
-		ret = i915_gem_object_put_fence(obj);
-		if (ret)
-			goto out_unpin;
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -675,7 +860,7 @@
 
 	mutex_unlock(&dev->struct_mutex);
 	if (likely(!i915.prefault_disable)) {
-		ret = fault_in_multipages_writeable(user_data, remain);
+		ret = fault_in_pages_writeable(user_data, remain);
 		if (ret) {
 			mutex_lock(&dev->struct_mutex);
 			goto out_unpin;
@@ -707,7 +892,7 @@
 		 * and write to user memory which may result into page
 		 * faults, and so we cannot perform this under struct_mutex.
 		 */
-		if (slow_user_access(ggtt->mappable, page_base,
+		if (slow_user_access(&ggtt->mappable, page_base,
 				     page_offset, user_data,
 				     page_length, false)) {
 			ret = -EFAULT;
@@ -739,7 +924,7 @@
 		i915_gem_object_unpin_pages(obj);
 		remove_mappable_node(&node);
 	} else {
-		i915_gem_object_ggtt_unpin(obj);
+		i915_vma_unpin(vma);
 	}
 out:
 	return ret;
@@ -760,19 +945,14 @@
 	int needs_clflush = 0;
 	struct sg_page_iter sg_iter;
 
-	if (!i915_gem_object_has_struct_page(obj))
-		return -ENODEV;
-
-	user_data = u64_to_user_ptr(args->data_ptr);
-	remain = args->size;
-
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
 	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
+	remain = args->size;
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
@@ -803,7 +983,7 @@
 		mutex_unlock(&dev->struct_mutex);
 
 		if (likely(!i915.prefault_disable) && !prefaulted) {
-			ret = fault_in_multipages_writeable(user_data, remain);
+			ret = fault_in_pages_writeable(user_data, remain);
 			/* Userspace is tricking us, but we've already clobbered
 			 * its pages with the prefault and promised to write the
 			 * data up to the first fault. Hence ignore any errors
@@ -828,7 +1008,7 @@
 	}
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_obj_finish_shmem_access(obj);
 
 	return ret;
 }
@@ -857,25 +1037,27 @@
 		       args->size))
 		return -EFAULT;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
 
 	/* Bounds check source.  */
 	if (args->offset > obj->base.size ||
 	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
-		goto out;
+		goto err;
 	}
 
 	trace_i915_gem_object_pread(obj, args->offset, args->size);
 
+	ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+	if (ret)
+		goto err;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err;
+
 	ret = i915_gem_shmem_pread(dev, obj, args, file);
 
 	/* pread for non shmem backed objects */
@@ -886,10 +1068,13 @@
 		intel_runtime_pm_put(to_i915(dev));
 	}
 
-out:
-	drm_gem_object_unreference(&obj->base);
-unlock:
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+
+err:
+	i915_gem_object_put_unlocked(obj);
 	return ret;
 }
 
@@ -919,7 +1104,7 @@
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
- * @dev: drm device pointer
+ * @i915: i915 device private data
  * @obj: i915 gem object
  * @args: pwrite arguments structure
  * @file: drm file pointer
@@ -932,17 +1117,28 @@
 {
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct drm_device *dev = obj->base.dev;
+	struct i915_vma *vma;
 	struct drm_mm_node node;
 	uint64_t remain, offset;
 	char __user *user_data;
 	int ret;
 	bool hit_slow_path = false;
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (i915_gem_object_is_tiled(obj))
 		return -EFAULT;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
-	if (ret) {
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       PIN_MAPPABLE | PIN_NONBLOCK);
+	if (!IS_ERR(vma)) {
+		node.start = i915_ggtt_offset(vma);
+		node.allocated = false;
+		ret = i915_vma_put_fence(vma);
+		if (ret) {
+			i915_vma_unpin(vma);
+			vma = ERR_PTR(ret);
+		}
+	}
+	if (IS_ERR(vma)) {
 		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
 		if (ret)
 			goto out;
@@ -954,19 +1150,13 @@
 		}
 
 		i915_gem_object_pin_pages(obj);
-	} else {
-		node.start = i915_gem_obj_ggtt_offset(obj);
-		node.allocated = false;
-		ret = i915_gem_object_put_fence(obj);
-		if (ret)
-			goto out_unpin;
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
 		goto out_unpin;
 
-	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	obj->dirty = true;
 
 	user_data = u64_to_user_ptr(args->data_ptr);
@@ -998,11 +1188,11 @@
 		 * If the object is non-shmem backed, we retry again with the
 		 * path that handles page fault.
 		 */
-		if (fast_user_write(ggtt->mappable, page_base,
+		if (fast_user_write(&ggtt->mappable, page_base,
 				    page_offset, user_data, page_length)) {
 			hit_slow_path = true;
 			mutex_unlock(&dev->struct_mutex);
-			if (slow_user_access(ggtt->mappable,
+			if (slow_user_access(&ggtt->mappable,
 					     page_base,
 					     page_offset, user_data,
 					     page_length, true)) {
@@ -1033,7 +1223,7 @@
 		}
 	}
 
-	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 out_unpin:
 	if (node.allocated) {
 		wmb();
@@ -1043,7 +1233,7 @@
 		i915_gem_object_unpin_pages(obj);
 		remove_mappable_node(&node);
 	} else {
-		i915_gem_object_ggtt_unpin(obj);
+		i915_vma_unpin(vma);
 	}
 out:
 	return ret;
@@ -1126,41 +1316,17 @@
 	int shmem_page_offset, page_length, ret = 0;
 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 	int hit_slowpath = 0;
-	int needs_clflush_after = 0;
-	int needs_clflush_before = 0;
+	unsigned int needs_clflush;
 	struct sg_page_iter sg_iter;
 
-	user_data = u64_to_user_ptr(args->data_ptr);
-	remain = args->size;
-
-	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-		/* If we're not in the cpu write domain, set ourself into the gtt
-		 * write domain and manually flush cachelines (if required). This
-		 * optimizes for the case when the gpu will use the data
-		 * right away and we therefore have to clflush anyway. */
-		needs_clflush_after = cpu_write_needs_clflush(obj);
-		ret = i915_gem_object_wait_rendering(obj, false);
-		if (ret)
-			return ret;
-	}
-	/* Same trick applies to invalidate partially written cachelines read
-	 * before writing. */
-	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
-		needs_clflush_before =
-			!cpu_cache_is_coherent(dev, obj->cache_level);
-
-	ret = i915_gem_object_get_pages(obj);
+	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-
-	i915_gem_object_pin_pages(obj);
-
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
-	obj->dirty = 1;
+	remain = args->size;
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
@@ -1184,7 +1350,7 @@
 		/* If we don't overwrite a cacheline completely we need to be
 		 * careful to have up-to-date data by first clflushing. Don't
 		 * overcomplicate things and flush the entire patch. */
-		partial_cacheline_write = needs_clflush_before &&
+		partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
 			((shmem_page_offset | page_length)
 				& (boot_cpu_data.x86_clflush_size - 1));
 
@@ -1194,7 +1360,7 @@
 		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
-					needs_clflush_after);
+					needs_clflush & CLFLUSH_AFTER);
 		if (ret == 0)
 			goto next_page;
 
@@ -1203,7 +1369,7 @@
 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
-					needs_clflush_after);
+					needs_clflush & CLFLUSH_AFTER);
 
 		mutex_lock(&dev->struct_mutex);
 
@@ -1217,7 +1383,7 @@
 	}
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_gem_obj_finish_shmem_access(obj);
 
 	if (hit_slowpath) {
 		/*
@@ -1225,17 +1391,15 @@
 		 * cachelines in-line while writing and the object moved
 		 * out of the cpu write domain while we've dropped the lock.
 		 */
-		if (!needs_clflush_after &&
+		if (!(needs_clflush & CLFLUSH_AFTER) &&
 		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 			if (i915_gem_clflush_object(obj, obj->pin_display))
-				needs_clflush_after = true;
+				needs_clflush |= CLFLUSH_AFTER;
 		}
 	}
 
-	if (needs_clflush_after)
+	if (needs_clflush & CLFLUSH_AFTER)
 		i915_gem_chipset_flush(to_i915(dev));
-	else
-		obj->cache_dirty = true;
 
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
@@ -1267,33 +1431,35 @@
 		return -EFAULT;
 
 	if (likely(!i915.prefault_disable)) {
-		ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
+		ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
 						   args->size);
 		if (ret)
 			return -EFAULT;
 	}
 
-	intel_runtime_pm_get(dev_priv);
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto put_rpm;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
 
 	/* Bounds check destination. */
 	if (args->offset > obj->base.size ||
 	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
-		goto out;
+		goto err;
 	}
 
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
+	ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+	if (ret)
+		goto err;
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err_rpm;
+
 	ret = -EFAULT;
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
 	 * it would end up going through the fenced access, and we'll get
@@ -1312,505 +1478,28 @@
 	if (ret == -EFAULT || ret == -ENOSPC) {
 		if (obj->phys_handle)
 			ret = i915_gem_phys_pwrite(obj, args, file);
-		else if (i915_gem_object_has_struct_page(obj))
-			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 		else
-			ret = -ENODEV;
+			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 	}
 
-out:
-	drm_gem_object_unreference(&obj->base);
-unlock:
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
-put_rpm:
 	intel_runtime_pm_put(dev_priv);
 
 	return ret;
-}
 
-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
-	if (__i915_terminally_wedged(reset_counter))
-		return -EIO;
-
-	if (__i915_reset_in_progress(reset_counter)) {
-		/* Non-interruptible callers can't handle -EAGAIN, hence return
-		 * -EIO unconditionally for these. */
-		if (!interruptible)
-			return -EIO;
-
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-static unsigned long local_clock_us(unsigned *cpu)
-{
-	unsigned long t;
-
-	/* Cheaply and approximately convert from nanoseconds to microseconds.
-	 * The result and subsequent calculations are also defined in the same
-	 * approximate microseconds units. The principal source of timing
-	 * error here is from the simple truncation.
-	 *
-	 * Note that local_clock() is only defined wrt to the current CPU;
-	 * the comparisons are no longer valid if we switch CPUs. Instead of
-	 * blocking preemption for the entire busywait, we can detect the CPU
-	 * switch and use that as indicator of system load and a reason to
-	 * stop busywaiting, see busywait_stop().
-	 */
-	*cpu = get_cpu();
-	t = local_clock() >> 10;
-	put_cpu();
-
-	return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
-	unsigned this_cpu;
-
-	if (time_after(local_clock_us(&this_cpu), timeout))
-		return true;
-
-	return this_cpu != cpu;
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
-			 int state, unsigned long timeout_us)
-{
-	unsigned cpu;
-
-	/* When waiting for high frequency requests, e.g. during synchronous
-	 * rendering split between the CPU and GPU, the finite amount of time
-	 * required to set up the irq and wait upon it limits the response
-	 * rate. By busywaiting on the request completion for a short while we
-	 * can service the high frequency waits as quick as possible. However,
-	 * if it is a slow request, we want to sleep as quickly as possible.
-	 * The tradeoff between waiting and sleeping is roughly the time it
-	 * takes to sleep on a request, on the order of a microsecond.
-	 */
-
-	timeout_us += local_clock_us(&cpu);
-	do {
-		if (i915_gem_request_completed(req))
-			return true;
-
-		if (signal_pending_state(state, current))
-			break;
-
-		if (busywait_stop(timeout_us, cpu))
-			break;
-
-		cpu_relax_lowlatency();
-	} while (!need_resched());
-
-	return false;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: RPS client
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps)
-{
-	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-	DEFINE_WAIT(reset);
-	struct intel_wait wait;
-	unsigned long timeout_remain;
-	s64 before = 0; /* Only to silence a compiler warning. */
-	int ret = 0;
-
-	might_sleep();
-
-	if (list_empty(&req->list))
-		return 0;
-
-	if (i915_gem_request_completed(req))
-		return 0;
-
-	timeout_remain = MAX_SCHEDULE_TIMEOUT;
-	if (timeout) {
-		if (WARN_ON(*timeout < 0))
-			return -EINVAL;
-
-		if (*timeout == 0)
-			return -ETIME;
-
-		timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-
-		/*
-		 * Record current time in case interrupted by signal, or wedged.
-		 */
-		before = ktime_get_raw_ns();
-	}
-
-	trace_i915_gem_request_wait_begin(req);
-
-	/* This client is about to stall waiting for the GPU. In many cases
-	 * this is undesirable and limits the throughput of the system, as
-	 * many clients cannot continue processing user input/output whilst
-	 * blocked. RPS autotuning may take tens of milliseconds to respond
-	 * to the GPU load and thus incurs additional latency for the client.
-	 * We can circumvent that by promoting the GPU frequency to maximum
-	 * before we wait. This makes the GPU throttle up much more quickly
-	 * (good for benchmarks and user experience, e.g. window animations),
-	 * but at a cost of spending more power processing the workload
-	 * (bad for battery). Not all clients even want their results
-	 * immediately and for them we should just let the GPU select its own
-	 * frequency to maximise efficiency. To prevent a single client from
-	 * forcing the clocks too high for the whole system, we only allow
-	 * each client to waitboost once in a busy period.
-	 */
-	if (INTEL_INFO(req->i915)->gen >= 6)
-		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
-
-	/* Optimistic spin for the next ~jiffie before touching IRQs */
-	if (i915_spin_request(req, state, 5))
-		goto complete;
-
-	set_current_state(state);
-	add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
-	intel_wait_init(&wait, req->seqno);
-	if (intel_engine_add_wait(req->engine, &wait))
-		/* In order to check that we haven't missed the interrupt
-		 * as we enabled it, we need to kick ourselves to do a
-		 * coherent check on the seqno before we sleep.
-		 */
-		goto wakeup;
-
-	for (;;) {
-		if (signal_pending_state(state, current)) {
-			ret = -ERESTARTSYS;
-			break;
-		}
-
-		timeout_remain = io_schedule_timeout(timeout_remain);
-		if (timeout_remain == 0) {
-			ret = -ETIME;
-			break;
-		}
-
-		if (intel_wait_complete(&wait))
-			break;
-
-		set_current_state(state);
-
-wakeup:
-		/* Carefully check if the request is complete, giving time
-		 * for the seqno to be visible following the interrupt.
-		 * We also have to check in case we are kicked by the GPU
-		 * reset in order to drop the struct_mutex.
-		 */
-		if (__i915_request_irq_complete(req))
-			break;
-
-		/* Only spin if we know the GPU is processing this request */
-		if (i915_spin_request(req, state, 2))
-			break;
-	}
-	remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
-	intel_engine_remove_wait(req->engine, &wait);
-	__set_current_state(TASK_RUNNING);
-complete:
-	trace_i915_gem_request_wait_end(req);
-
-	if (timeout) {
-		s64 tres = *timeout - (ktime_get_raw_ns() - before);
-
-		*timeout = tres < 0 ? 0 : tres;
-
-		/*
-		 * Apparently ktime isn't accurate enough and occasionally has a
-		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-		 * things up to make the test happy. We allow up to 1 jiffy.
-		 *
-		 * This is a regrssion from the timespec->ktime conversion.
-		 */
-		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-			*timeout = 0;
-	}
-
-	if (rps && req->seqno == req->engine->last_submitted_seqno) {
-		/* The GPU is now idle and this client has stalled.
-		 * Since no other client has submitted a request in the
-		 * meantime, assume that this client is the only one
-		 * supplying work to the GPU but is unable to keep that
-		 * work supplied because it is waiting. Since the GPU is
-		 * then never kept fully busy, RPS autoclocking will
-		 * keep the clocks relatively low, causing further delays.
-		 * Compensate by giving the synchronous client credit for
-		 * a waitboost next time.
-		 */
-		spin_lock(&req->i915->rps.client_lock);
-		list_del_init(&rps->link);
-		spin_unlock(&req->i915->rps.client_lock);
-	}
-
+err_rpm:
+	intel_runtime_pm_put(dev_priv);
+err:
+	i915_gem_object_put_unlocked(obj);
 	return ret;
 }
 
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-				   struct drm_file *file)
-{
-	struct drm_i915_file_private *file_priv;
-
-	WARN_ON(!req || !file || req->file_priv);
-
-	if (!req || !file)
-		return -EINVAL;
-
-	if (req->file_priv)
-		return -EINVAL;
-
-	file_priv = file->driver_priv;
-
-	spin_lock(&file_priv->mm.lock);
-	req->file_priv = file_priv;
-	list_add_tail(&req->client_list, &file_priv->mm.request_list);
-	spin_unlock(&file_priv->mm.lock);
-
-	req->pid = get_pid(task_pid(current));
-
-	return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-	struct drm_i915_file_private *file_priv = request->file_priv;
-
-	if (!file_priv)
-		return;
-
-	spin_lock(&file_priv->mm.lock);
-	list_del(&request->client_list);
-	request->file_priv = NULL;
-	spin_unlock(&file_priv->mm.lock);
-
-	put_pid(request->pid);
-	request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
-	trace_i915_gem_request_retire(request);
-
-	/* We know the GPU must have read the request to have
-	 * sent us the seqno + interrupt, so use the position
-	 * of tail of the request to update the last known position
-	 * of the GPU head.
-	 *
-	 * Note this requires that we are always called in request
-	 * completion order.
-	 */
-	request->ringbuf->last_retired_head = request->postfix;
-
-	list_del_init(&request->list);
-	i915_gem_request_remove_from_client(request);
-
-	if (request->previous_context) {
-		if (i915.enable_execlists)
-			intel_lr_context_unpin(request->previous_context,
-					       request->engine);
-	}
-
-	i915_gem_context_unreference(request->ctx);
-	i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_gem_request *tmp;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	if (list_empty(&req->list))
-		return;
-
-	do {
-		tmp = list_first_entry(&engine->request_list,
-				       typeof(*tmp), list);
-
-		i915_gem_request_retire(tmp);
-	} while (tmp != req);
-
-	WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- * @req: request to wait on
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
-	struct drm_i915_private *dev_priv = req->i915;
-	bool interruptible;
-	int ret;
-
-	interruptible = dev_priv->mm.interruptible;
-
-	BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
-
-	ret = __i915_wait_request(req, interruptible, NULL, NULL);
-	if (ret)
-		return ret;
-
-	/* If the GPU hung, we want to keep the requests to find the guilty. */
-	if (!i915_reset_in_progress(&dev_priv->gpu_error))
-		__i915_gem_request_retire__upto(req);
-
-	return 0;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for read access or write
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool readonly)
-{
-	int ret, i;
-
-	if (!obj->active)
-		return 0;
-
-	if (readonly) {
-		if (obj->last_write_req != NULL) {
-			ret = i915_wait_request(obj->last_write_req);
-			if (ret)
-				return ret;
-
-			i = obj->last_write_req->engine->id;
-			if (obj->last_read_req[i] == obj->last_write_req)
-				i915_gem_object_retire__read(obj, i);
-			else
-				i915_gem_object_retire__write(obj);
-		}
-	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			if (obj->last_read_req[i] == NULL)
-				continue;
-
-			ret = i915_wait_request(obj->last_read_req[i]);
-			if (ret)
-				return ret;
-
-			i915_gem_object_retire__read(obj, i);
-		}
-		GEM_BUG_ON(obj->active);
-	}
-
-	return 0;
-}
-
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-			       struct drm_i915_gem_request *req)
-{
-	int ring = req->engine->id;
-
-	if (obj->last_read_req[ring] == req)
-		i915_gem_object_retire__read(obj, ring);
-	else if (obj->last_write_req == req)
-		i915_gem_object_retire__write(obj);
-
-	if (!i915_reset_in_progress(&req->i915->gpu_error))
-		__i915_gem_request_retire__upto(req);
-}
-
-/* A nonblocking variant of the above wait. This is a highly dangerous routine
- * as the object state may change during this call.
- */
-static __must_check int
-i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-					    struct intel_rps_client *rps,
-					    bool readonly)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-	int ret, i, n = 0;
-
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-	BUG_ON(!dev_priv->mm.interruptible);
-
-	if (!obj->active)
-		return 0;
-
-	if (readonly) {
-		struct drm_i915_gem_request *req;
-
-		req = obj->last_write_req;
-		if (req == NULL)
-			return 0;
-
-		requests[n++] = i915_gem_request_reference(req);
-	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
-
-			req = obj->last_read_req[i];
-			if (req == NULL)
-				continue;
-
-			requests[n++] = i915_gem_request_reference(req);
-		}
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-	ret = 0;
-	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], true, NULL, rps);
-	mutex_lock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++) {
-		if (ret == 0)
-			i915_gem_object_retire_request(obj, requests[i]);
-		i915_gem_request_unreference(requests[i]);
-	}
-
-	return ret;
-}
-
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
-	struct drm_i915_file_private *fpriv = file->driver_priv;
-	return &fpriv->rps;
-}
-
-static enum fb_op_origin
+static inline enum fb_op_origin
 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 {
-	return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
-	       ORIGIN_GTT : ORIGIN_CPU;
+	return (domain == I915_GEM_DOMAIN_GTT ?
+		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 }
 
 /**
@@ -1831,10 +1520,7 @@
 	int ret;
 
 	/* Only handle setting domains to types used by the CPU. */
-	if (write_domain & I915_GEM_GPU_DOMAINS)
-		return -EINVAL;
-
-	if (read_domains & I915_GEM_GPU_DOMAINS)
+	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
 		return -EINVAL;
 
 	/* Having something in the write domain implies it's in the read
@@ -1843,25 +1529,21 @@
 	if (write_domain != 0 && read_domains != write_domain)
 		return -EINVAL;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
 
 	/* Try to flush the object off the GPU without holding the lock.
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 */
-	ret = i915_gem_object_wait_rendering__nonblocking(obj,
-							  to_rps_client(file),
-							  !write_domain);
+	ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
 	if (ret)
-		goto unref;
+		goto err;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err;
 
 	if (read_domains & I915_GEM_DOMAIN_GTT)
 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1871,11 +1553,13 @@
 	if (write_domain != 0)
 		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
-unref:
-	drm_gem_object_unreference(&obj->base);
-unlock:
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
+
+err:
+	i915_gem_object_put_unlocked(obj);
+	return ret;
 }
 
 /**
@@ -1890,26 +1574,23 @@
 {
 	struct drm_i915_gem_sw_finish *args = data;
 	struct drm_i915_gem_object *obj;
-	int ret = 0;
+	int err = 0;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
 
 	/* Pinned buffers may be scanout, so flush the cache */
-	if (obj->pin_display)
-		i915_gem_object_flush_cpu_write_domain(obj);
+	if (READ_ONCE(obj->pin_display)) {
+		err = i915_mutex_lock_interruptible(dev);
+		if (!err) {
+			i915_gem_object_flush_cpu_write_domain(obj);
+			mutex_unlock(&dev->struct_mutex);
+		}
+	}
 
-	drm_gem_object_unreference(&obj->base);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	i915_gem_object_put_unlocked(obj);
+	return err;
 }
 
 /**
@@ -1937,7 +1618,7 @@
 		    struct drm_file *file)
 {
 	struct drm_i915_gem_mmap *args = data;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	unsigned long addr;
 
 	if (args->flags & ~(I915_MMAP_WC))
@@ -1946,19 +1627,19 @@
 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
 		return -ENODEV;
 
-	obj = drm_gem_object_lookup(file, args->handle);
-	if (obj == NULL)
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
 		return -ENOENT;
 
 	/* prime objects have no backing filp to GEM mmap
 	 * pages from.
 	 */
-	if (!obj->filp) {
-		drm_gem_object_unreference_unlocked(obj);
+	if (!obj->base.filp) {
+		i915_gem_object_put_unlocked(obj);
 		return -EINVAL;
 	}
 
-	addr = vm_mmap(obj->filp, 0, args->size,
+	addr = vm_mmap(obj->base.filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
 	if (args->flags & I915_MMAP_WC) {
@@ -1966,7 +1647,7 @@
 		struct vm_area_struct *vma;
 
 		if (down_write_killable(&mm->mmap_sem)) {
-			drm_gem_object_unreference_unlocked(obj);
+			i915_gem_object_put_unlocked(obj);
 			return -EINTR;
 		}
 		vma = find_vma(mm, addr);
@@ -1978,9 +1659,9 @@
 		up_write(&mm->mmap_sem);
 
 		/* This may race, but that's ok, it only gets set */
-		WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
+		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
 	}
-	drm_gem_object_unreference_unlocked(obj);
+	i915_gem_object_put_unlocked(obj);
 	if (IS_ERR((void *)addr))
 		return addr;
 
@@ -1989,9 +1670,69 @@
 	return 0;
 }
 
+static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
+{
+	u64 size;
+
+	size = i915_gem_object_get_stride(obj);
+	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
+
+	return size >> PAGE_SHIFT;
+}
+
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ *     aligned and suitable for fencing, and still fit into the available
+ *     mappable space left by the pinned display objects. A classic problem
+ *     we called the page-fault-of-doom where we would ping-pong between
+ *     two objects that could not fit inside the GTT and so the memcpy
+ *     would page one object in at the expense of the other between every
+ *     single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ *     object is too large for the available space (or simply too large
+ *     for the mappable aperture!), a view is created instead and faulted
+ *     into userspace. (This view is aligned and sized appropriately for
+ *     fenced access.)
+ *
+ * Restrictions:
+ *
+ *  * snoopable objects cannot be accessed via the GTT. It can cause machine
+ *    hangs on some architectures, corruption on others. An attempt to service
+ *    a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ *  * the object must be able to fit into RAM (physical memory, though no
+ *    limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ *  * a new GTT page fault will synchronize rendering from the GPU and flush
+ *    all data to system memory. Subsequent access will not be synchronized.
+ *
+ *  * all mappings are revoked on runtime device suspend.
+ *
+ *  * there are only 8, 16 or 32 fence registers to share between all users
+ *    (older machines require fence register for display and blitter access
+ *    as well). Contention of the fence registers will cause the previous users
+ *    to be unmapped and any new access will generate new page faults.
+ *
+ *  * running out of memory while servicing a fault may generate a SIGBUS,
+ *    rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+	return 1;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
- * @vma: VMA in question
+ * @area: CPU VMA in question
  * @vmf: fault info
  *
  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -2004,122 +1745,120 @@
  * from the GTT and/or fence registers to make room.  So performance may
  * suffer if the GTT working set is large or there are few fence registers
  * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
-	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct i915_ggtt_view view = i915_ggtt_view_normal;
-	pgoff_t page_offset;
-	unsigned long pfn;
-	int ret = 0;
 	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
-
-	intel_runtime_pm_get(dev_priv);
+	struct i915_vma *vma;
+	pgoff_t page_offset;
+	unsigned int flags;
+	int ret;
 
 	/* We don't use vmf->pgoff since that has the fake offset */
-	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
 		PAGE_SHIFT;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto out;
-
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
 	/* Try to flush the object off the GPU first without holding the lock.
-	 * Upon reacquiring the lock, we will perform our sanity checks and then
+	 * Upon acquiring the lock, we will perform our sanity checks and then
 	 * repeat the flush holding the lock in the normal manner to catch cases
 	 * where we are gazumped.
 	 */
-	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+	ret = __unsafe_wait_rendering(obj, NULL, !write);
 	if (ret)
-		goto unlock;
+		goto err;
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err_rpm;
 
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
 		ret = -EFAULT;
-		goto unlock;
+		goto err_unlock;
 	}
 
-	/* Use a partial view if the object is bigger than the aperture. */
-	if (obj->base.size >= ggtt->mappable_end &&
-	    obj->tiling_mode == I915_TILING_NONE) {
-		static const unsigned int chunk_size = 256; // 1 MiB
+	/* If the object is smaller than a couple of partial vma, it is
+	 * not worth only creating a single partial vma - we may as well
+	 * clear enough space for the full object.
+	 */
+	flags = PIN_MAPPABLE;
+	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
+		flags |= PIN_NONBLOCK | PIN_NONFAULT;
+
+	/* Now pin it into the GTT as needed */
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+	if (IS_ERR(vma)) {
+		struct i915_ggtt_view view;
+		unsigned int chunk_size;
+
+		/* Use a partial view if it is bigger than available space */
+		chunk_size = MIN_CHUNK_PAGES;
+		if (i915_gem_object_is_tiled(obj))
+			chunk_size = max(chunk_size, tile_row_pages(obj));
 
 		memset(&view, 0, sizeof(view));
 		view.type = I915_GGTT_VIEW_PARTIAL;
 		view.params.partial.offset = rounddown(page_offset, chunk_size);
 		view.params.partial.size =
-			min_t(unsigned int,
-			      chunk_size,
-			      (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+			min_t(unsigned int, chunk_size,
+			      (area->vm_end - area->vm_start) / PAGE_SIZE -
 			      view.params.partial.offset);
-	}
 
-	/* Now pin it into the GTT if needed */
-	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
-	if (ret)
-		goto unlock;
+		/* If the partial covers the entire object, just create a
+		 * normal VMA.
+		 */
+		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
+			view.type = I915_GGTT_VIEW_NORMAL;
+
+		/* Userspace is now writing through an untracked VMA, abandon
+		 * all hope that the hardware is able to track future writes.
+		 */
+		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
+		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+	}
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unlock;
+	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
 	if (ret)
-		goto unpin;
+		goto err_unpin;
 
-	ret = i915_gem_object_get_fence(obj);
+	ret = i915_vma_get_fence(vma);
 	if (ret)
-		goto unpin;
+		goto err_unpin;
 
 	/* Finally, remap it using the new GTT offset */
-	pfn = ggtt->mappable_base +
-		i915_gem_obj_ggtt_offset_view(obj, &view);
-	pfn >>= PAGE_SHIFT;
+	ret = remap_io_mapping(area,
+			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+			       min_t(u64, vma->size, area->vm_end - area->vm_start),
+			       &ggtt->mappable);
+	if (ret)
+		goto err_unpin;
 
-	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
-		/* Overriding existing pages in partial view does not cause
-		 * us any trouble as TLBs are still valid because the fault
-		 * is due to userspace losing part of the mapping or never
-		 * having accessed it before (at this partials' range).
-		 */
-		unsigned long base = vma->vm_start +
-				     (view.params.partial.offset << PAGE_SHIFT);
-		unsigned int i;
-
-		for (i = 0; i < view.params.partial.size; i++) {
-			ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
-			if (ret)
-				break;
-		}
-
-		obj->fault_mappable = true;
-	} else {
-		if (!obj->fault_mappable) {
-			unsigned long size = min_t(unsigned long,
-						   vma->vm_end - vma->vm_start,
-						   obj->base.size);
-			int i;
-
-			for (i = 0; i < size >> PAGE_SHIFT; i++) {
-				ret = vm_insert_pfn(vma,
-						    (unsigned long)vma->vm_start + i * PAGE_SIZE,
-						    pfn + i);
-				if (ret)
-					break;
-			}
-
-			obj->fault_mappable = true;
-		} else
-			ret = vm_insert_pfn(vma,
-					    (unsigned long)vmf->virtual_address,
-					    pfn + page_offset);
-	}
-unpin:
-	i915_gem_object_ggtt_unpin_view(obj, &view);
-unlock:
+	obj->fault_mappable = true;
+err_unpin:
+	__i915_vma_unpin(vma);
+err_unlock:
 	mutex_unlock(&dev->struct_mutex);
-out:
+err_rpm:
+	intel_runtime_pm_put(dev_priv);
+err:
 	switch (ret) {
 	case -EIO:
 		/*
@@ -2160,8 +1899,6 @@
 		ret = VM_FAULT_SIGBUS;
 		break;
 	}
-
-	intel_runtime_pm_put(dev_priv);
 	return ret;
 }
 
@@ -2215,46 +1952,58 @@
 		i915_gem_release_mmap(obj);
 }
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev_priv: i915 device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required global GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
+			   u64 size, int tiling_mode)
 {
-	uint32_t gtt_size;
+	u64 ggtt_size;
 
-	if (INTEL_INFO(dev)->gen >= 4 ||
+	GEM_BUG_ON(size == 0);
+
+	if (INTEL_GEN(dev_priv) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
-	if (IS_GEN3(dev))
-		gtt_size = 1024*1024;
+	if (IS_GEN3(dev_priv))
+		ggtt_size = 1024*1024;
 	else
-		gtt_size = 512*1024;
+		ggtt_size = 512*1024;
 
-	while (gtt_size < size)
-		gtt_size <<= 1;
+	while (ggtt_size < size)
+		ggtt_size <<= 1;
 
-	return gtt_size;
+	return ggtt_size;
 }
 
 /**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @dev: drm device
+ * i915_gem_get_ggtt_alignment - return required global GTT alignment
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
- * @fenced: is fenced alignemned required or not
+ * @fenced: is fenced alignment required or not
  *
- * Return the required GTT alignment for an object, taking into account
+ * Return the required global GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			   int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+				int tiling_mode, bool fenced)
 {
+	GEM_BUG_ON(size == 0);
+
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -2262,42 +2011,34 @@
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	int ret;
+	int err;
 
-	dev_priv->mm.shrinker_no_lock_stealing = true;
+	err = drm_gem_create_mmap_offset(&obj->base);
+	if (!err)
+		return 0;
 
-	ret = drm_gem_create_mmap_offset(&obj->base);
-	if (ret != -ENOSPC)
-		goto out;
-
-	/* Badly fragmented mmap space? The only way we can recover
-	 * space is by destroying unwanted objects. We can't randomly release
-	 * mmap_offsets as userspace expects them to be persistent for the
-	 * lifetime of the objects. The closest we can is to release the
-	 * offsets on purgeable objects by truncating it and marking it purged,
-	 * which prevents userspace from ever using that object again.
+	/* We can idle the GPU locklessly to flush stale objects, but in order
+	 * to claim that space for ourselves, we need to take the big
+	 * struct_mutex to free the requests+objects and allocate our slot.
 	 */
-	i915_gem_shrink(dev_priv,
-			obj->base.size >> PAGE_SHIFT,
-			I915_SHRINK_BOUND |
-			I915_SHRINK_UNBOUND |
-			I915_SHRINK_PURGEABLE);
-	ret = drm_gem_create_mmap_offset(&obj->base);
-	if (ret != -ENOSPC)
-		goto out;
+	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+	if (err)
+		return err;
 
-	i915_gem_shrink_all(dev_priv);
-	ret = drm_gem_create_mmap_offset(&obj->base);
-out:
-	dev_priv->mm.shrinker_no_lock_stealing = false;
+	err = i915_mutex_lock_interruptible(&dev_priv->drm);
+	if (!err) {
+		i915_gem_retire_requests(dev_priv);
+		err = drm_gem_create_mmap_offset(&obj->base);
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+	}
 
-	return ret;
+	return err;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2314,32 +2055,15 @@
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
-
-	if (obj->madv != I915_MADV_WILLNEED) {
-		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
-		ret = -EFAULT;
-		goto out;
-	}
+	obj = i915_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
 
 	ret = i915_gem_object_create_mmap_offset(obj);
-	if (ret)
-		goto out;
+	if (ret == 0)
+		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
-	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
-
-out:
-	drm_gem_object_unreference(&obj->base);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_object_put_unlocked(obj);
 	return ret;
 }
 
@@ -2457,7 +2181,7 @@
 	if (obj->pages_pin_count)
 		return -EBUSY;
 
-	BUG_ON(i915_gem_obj_bound_any(obj));
+	GEM_BUG_ON(obj->bind_count);
 
 	/* ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
@@ -2465,10 +2189,14 @@
 	list_del(&obj->global_list);
 
 	if (obj->mapping) {
-		if (is_vmalloc_addr(obj->mapping))
-			vunmap(obj->mapping);
+		void *ptr;
+
+		ptr = ptr_mask_bits(obj->mapping);
+		if (is_vmalloc_addr(ptr))
+			vunmap(ptr);
 		else
-			kunmap(kmap_to_page(obj->mapping));
+			kunmap(kmap_to_page(ptr));
+
 		obj->mapping = NULL;
 	}
 
@@ -2577,7 +2305,7 @@
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_do_bit_17_swizzle(obj);
 
-	if (obj->tiling_mode != I915_TILING_NONE &&
+	if (i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
 		i915_gem_object_pin_pages(obj);
 
@@ -2641,7 +2369,8 @@
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
 	struct sg_table *sgt = obj->pages;
@@ -2650,10 +2379,11 @@
 	struct page *stack_pages[32];
 	struct page **pages = stack_pages;
 	unsigned long i = 0;
+	pgprot_t pgprot;
 	void *addr;
 
 	/* A single page can always be kmapped */
-	if (n_pages == 1)
+	if (n_pages == 1 && type == I915_MAP_WB)
 		return kmap(sg_page(sgt->sgl));
 
 	if (n_pages > ARRAY_SIZE(stack_pages)) {
@@ -2669,7 +2399,15 @@
 	/* Check that we have the expected number of pages */
 	GEM_BUG_ON(i != n_pages);
 
-	addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+	switch (type) {
+	case I915_MAP_WB:
+		pgprot = PAGE_KERNEL;
+		break;
+	case I915_MAP_WC:
+		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+		break;
+	}
+	addr = vmap(pages, n_pages, 0, pgprot);
 
 	if (pages != stack_pages)
 		drm_free_large(pages);
@@ -2678,276 +2416,89 @@
 }
 
 /* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+			      enum i915_map_type type)
 {
+	enum i915_map_type has_type;
+	bool pinned;
+	void *ptr;
 	int ret;
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
 		return ERR_PTR(ret);
 
 	i915_gem_object_pin_pages(obj);
+	pinned = obj->pages_pin_count > 1;
 
-	if (!obj->mapping) {
-		obj->mapping = i915_gem_object_map(obj);
-		if (!obj->mapping) {
-			i915_gem_object_unpin_pages(obj);
-			return ERR_PTR(-ENOMEM);
+	ptr = ptr_unpack_bits(obj->mapping, has_type);
+	if (ptr && has_type != type) {
+		if (pinned) {
+			ret = -EBUSY;
+			goto err;
 		}
+
+		if (is_vmalloc_addr(ptr))
+			vunmap(ptr);
+		else
+			kunmap(kmap_to_page(ptr));
+
+		ptr = obj->mapping = NULL;
 	}
 
-	return obj->mapping;
-}
+	if (!ptr) {
+		ptr = i915_gem_object_map(obj, type);
+		if (!ptr) {
+			ret = -ENOMEM;
+			goto err;
+		}
 
-void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct drm_i915_gem_request *req)
-{
-	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *engine;
+		obj->mapping = ptr_pack_bits(ptr, type);
+	}
 
-	engine = i915_gem_request_get_engine(req);
+	return ptr;
 
-	/* Add a reference if we're newly entering the active list. */
-	if (obj->active == 0)
-		drm_gem_object_reference(&obj->base);
-	obj->active |= intel_engine_flag(engine);
-
-	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
-	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
-
-	list_move_tail(&vma->vm_link, &vma->vm->active_list);
+err:
+	i915_gem_object_unpin_pages(obj);
+	return ERR_PTR(ret);
 }
 
 static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__write(struct i915_gem_active *active,
+			      struct drm_i915_gem_request *request)
 {
-	GEM_BUG_ON(obj->last_write_req == NULL);
-	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
+	struct drm_i915_gem_object *obj =
+		container_of(active, struct drm_i915_gem_object, last_write);
 
-	i915_gem_request_assign(&obj->last_write_req, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
+i915_gem_object_retire__read(struct i915_gem_active *active,
+			     struct drm_i915_gem_request *request)
 {
-	struct i915_vma *vma;
+	int idx = request->engine->id;
+	struct drm_i915_gem_object *obj =
+		container_of(active, struct drm_i915_gem_object, last_read[idx]);
 
-	GEM_BUG_ON(obj->last_read_req[ring] == NULL);
-	GEM_BUG_ON(!(obj->active & (1 << ring)));
+	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
 
-	list_del_init(&obj->engine_list[ring]);
-	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
-
-	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
-		i915_gem_object_retire__write(obj);
-
-	obj->active &= ~(1 << ring);
-	if (obj->active)
+	i915_gem_object_clear_active(obj, idx);
+	if (i915_gem_object_is_active(obj))
 		return;
 
 	/* Bump our place on the bound list to keep it roughly in LRU order
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list,
-		       &to_i915(obj->base.dev)->mm.bound_list);
+	if (obj->bind_count)
+		list_move_tail(&obj->global_list,
+			       &request->i915->mm.bound_list);
 
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (!list_empty(&vma->vm_link))
-			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	}
-
-	i915_gem_request_assign(&obj->last_fenced_req, NULL);
-	drm_gem_object_unreference(&obj->base);
-}
-
-static int
-i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
-{
-	struct intel_engine_cs *engine;
-	int ret;
-
-	/* Carefully retire all requests without writing to the rings */
-	for_each_engine(engine, dev_priv) {
-		ret = intel_engine_idle(engine);
-		if (ret)
-			return ret;
-	}
-	i915_gem_retire_requests(dev_priv);
-
-	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-	if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-		while (intel_kick_waiters(dev_priv) ||
-		       intel_kick_signalers(dev_priv))
-			yield();
-	}
-
-	/* Finally reset hw state */
-	for_each_engine(engine, dev_priv)
-		intel_ring_init_seqno(engine, seqno);
-
-	return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
-
-	if (seqno == 0)
-		return -EINVAL;
-
-	/* HWS page needs to be set less than what we
-	 * will inject to ring
-	 */
-	ret = i915_gem_init_seqno(dev_priv, seqno - 1);
-	if (ret)
-		return ret;
-
-	/* Carefully set the last_seqno value so that wrap
-	 * detection still works
-	 */
-	dev_priv->next_seqno = seqno;
-	dev_priv->last_seqno = seqno - 1;
-	if (dev_priv->last_seqno == 0)
-		dev_priv->last_seqno--;
-
-	return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
-{
-	/* reserve 0 for non-seqno */
-	if (dev_priv->next_seqno == 0) {
-		int ret = i915_gem_init_seqno(dev_priv, 0);
-		if (ret)
-			return ret;
-
-		dev_priv->next_seqno = 1;
-	}
-
-	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
-	return 0;
-}
-
-static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	dev_priv->gt.active_engines |= intel_engine_flag(engine);
-	if (dev_priv->gt.awake)
-		return;
-
-	intel_runtime_pm_get_noresume(dev_priv);
-	dev_priv->gt.awake = true;
-
-	i915_update_gfx_val(dev_priv);
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_busy(dev_priv);
-
-	queue_delayed_work(dev_priv->wq,
-			   &dev_priv->gt.retire_work,
-			   round_jiffies_up_relative(HZ));
-}
-
-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
-			struct drm_i915_gem_object *obj,
-			bool flush_caches)
-{
-	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ringbuf;
-	u32 request_start;
-	u32 reserved_tail;
-	int ret;
-
-	if (WARN_ON(request == NULL))
-		return;
-
-	engine = request->engine;
-	ringbuf = request->ringbuf;
-
-	/*
-	 * To ensure that this call will not fail, space for its emissions
-	 * should already have been reserved in the ring buffer. Let the ring
-	 * know that it is time to use that space up.
-	 */
-	request_start = intel_ring_get_tail(ringbuf);
-	reserved_tail = request->reserved_space;
-	request->reserved_space = 0;
-
-	/*
-	 * Emit any outstanding flushes - execbuf can fail to emit the flush
-	 * after having emitted the batchbuffer command. Hence we need to fix
-	 * things up similar to emitting the lazy request. The difference here
-	 * is that the flush _must_ happen before the next request, no matter
-	 * what.
-	 */
-	if (flush_caches) {
-		if (i915.enable_execlists)
-			ret = logical_ring_flush_all_caches(request);
-		else
-			ret = intel_ring_flush_all_caches(request);
-		/* Not allowed to fail! */
-		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
-	}
-
-	trace_i915_gem_request_add(request);
-
-	request->head = request_start;
-
-	/* Whilst this request exists, batch_obj will be on the
-	 * active_list, and so will hold the active reference. Only when this
-	 * request is retired will the the batch_obj be moved onto the
-	 * inactive_list and lose its active reference. Hence we do not need
-	 * to explicitly hold another reference here.
-	 */
-	request->batch_obj = obj;
-
-	/* Seal the request and mark it as pending execution. Note that
-	 * we may inspect this state, without holding any locks, during
-	 * hangcheck. Hence we apply the barrier to ensure that we do not
-	 * see a more recent value in the hws than we are tracking.
-	 */
-	request->emitted_jiffies = jiffies;
-	request->previous_seqno = engine->last_submitted_seqno;
-	smp_store_mb(engine->last_submitted_seqno, request->seqno);
-	list_add_tail(&request->list, &engine->request_list);
-
-	/* Record the position of the start of the request so that
-	 * should we detect the updated seqno part-way through the
-	 * GPU processing the request, we never over-estimate the
-	 * position of the head.
-	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
-
-	if (i915.enable_execlists)
-		ret = engine->emit_request(request);
-	else {
-		ret = engine->add_request(request);
-
-		request->tail = intel_ring_get_tail(ringbuf);
-	}
-	/* Not allowed to fail! */
-	WARN(ret, "emit|add_request failed: %d!\n", ret);
-	/* Sanity check that the reserved size was large enough. */
-	ret = intel_ring_get_tail(ringbuf) - request_start;
-	if (ret < 0)
-		ret += ringbuf->size;
-	WARN_ONCE(ret > reserved_tail,
-		  "Not enough space reserved (%d bytes) "
-		  "for adding the request (%d bytes)\n",
-		  reserved_tail, ret);
-
-	i915_gem_mark_busy(engine);
+	i915_gem_object_put(obj);
 }
 
 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2981,101 +2532,6 @@
 	}
 }
 
-void i915_gem_request_free(struct kref *req_ref)
-{
-	struct drm_i915_gem_request *req = container_of(req_ref,
-						 typeof(*req), ref);
-	kmem_cache_free(req->i915->requests, req);
-}
-
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-			 struct i915_gem_context *ctx,
-			 struct drm_i915_gem_request **req_out)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	struct drm_i915_gem_request *req;
-	int ret;
-
-	if (!req_out)
-		return -EINVAL;
-
-	*req_out = NULL;
-
-	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-	 * and restart.
-	 */
-	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-	if (req == NULL)
-		return -ENOMEM;
-
-	ret = i915_gem_get_seqno(engine->i915, &req->seqno);
-	if (ret)
-		goto err;
-
-	kref_init(&req->ref);
-	req->i915 = dev_priv;
-	req->engine = engine;
-	req->ctx  = ctx;
-	i915_gem_context_reference(req->ctx);
-
-	/*
-	 * Reserve space in the ring buffer for all the commands required to
-	 * eventually emit this request. This is to guarantee that the
-	 * i915_add_request() call can't fail. Note that the reserve may need
-	 * to be redone if the request is not actually submitted straight
-	 * away, e.g. because a GPU scheduler has deferred it.
-	 */
-	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-
-	if (i915.enable_execlists)
-		ret = intel_logical_ring_alloc_request_extras(req);
-	else
-		ret = intel_ring_alloc_request_extras(req);
-	if (ret)
-		goto err_ctx;
-
-	*req_out = req;
-	return 0;
-
-err_ctx:
-	i915_gem_context_unreference(ctx);
-err:
-	kmem_cache_free(dev_priv->requests, req);
-	return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx)
-{
-	struct drm_i915_gem_request *req;
-	int err;
-
-	if (ctx == NULL)
-		ctx = engine->i915->kernel_context;
-	err = __i915_gem_request_alloc(engine, ctx, &req);
-	return err ? ERR_PTR(err) : req;
-}
-
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
@@ -3089,45 +2545,111 @@
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	list_for_each_entry(request, &engine->request_list, list) {
+	list_for_each_entry(request, &engine->request_list, link) {
 		if (i915_gem_request_completed(request))
 			continue;
 
+		if (!i915_sw_fence_done(&request->submit))
+			break;
+
 		return request;
 	}
 
 	return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void reset_request(struct drm_i915_gem_request *request)
+{
+	void *vaddr = request->ring->vaddr;
+	u32 head;
+
+	/* As this request likely depends on state from the lost
+	 * context, clear out all the user operations leaving the
+	 * breadcrumb at the end (so we get the fence notifications).
+	 */
+	head = request->head;
+	if (request->postfix < head) {
+		memset(vaddr + head, 0, request->ring->size - head);
+		head = 0;
+	}
+	memset(vaddr + head, 0, request->postfix - head);
+}
+
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
+	struct i915_gem_context *incomplete_ctx;
 	bool ring_hung;
 
+	/* Ensure irq handler finishes, and not run again. */
+	tasklet_kill(&engine->irq_tasklet);
+	if (engine->irq_seqno_barrier)
+		engine->irq_seqno_barrier(engine);
+
 	request = i915_gem_find_active_request(engine);
-	if (request == NULL)
+	if (!request)
 		return;
 
 	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
-
 	i915_set_reset_status(request->ctx, ring_hung);
-	list_for_each_entry_continue(request, &engine->request_list, list)
-		i915_set_reset_status(request->ctx, false);
+	if (!ring_hung)
+		return;
+
+	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+			 engine->name, request->fence.seqno);
+
+	/* Setup the CS to resume from the breadcrumb of the hung request */
+	engine->reset_hw(engine, request);
+
+	/* Users of the default context do not rely on logical state
+	 * preserved between batches. They have to emit full state on
+	 * every batch and so it is safe to execute queued requests following
+	 * the hang.
+	 *
+	 * Other contexts preserve state, now corrupt. We want to skip all
+	 * queued requests that reference the corrupt context.
+	 */
+	incomplete_ctx = request->ctx;
+	if (i915_gem_context_is_default(incomplete_ctx))
+		return;
+
+	list_for_each_entry_continue(request, &engine->request_list, link)
+		if (request->ctx == incomplete_ctx)
+			reset_request(request);
 }
 
-static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
-	struct intel_ringbuffer *buffer;
+	struct intel_engine_cs *engine;
 
-	while (!list_empty(&engine->active_list)) {
-		struct drm_i915_gem_object *obj;
+	i915_gem_retire_requests(dev_priv);
 
-		obj = list_first_entry(&engine->active_list,
-				       struct drm_i915_gem_object,
-				       engine_list[engine->id]);
+	for_each_engine(engine, dev_priv)
+		i915_gem_reset_engine(engine);
 
-		i915_gem_object_retire__read(obj, engine->id);
+	i915_gem_restore_fences(&dev_priv->drm);
+
+	if (dev_priv->gt.awake) {
+		intel_sanitize_gt_powersave(dev_priv);
+		intel_enable_gt_powersave(dev_priv);
+		if (INTEL_GEN(dev_priv) >= 6)
+			gen6_rps_busy(dev_priv);
 	}
+}
+
+static void nop_submit_request(struct drm_i915_gem_request *request)
+{
+}
+
+static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+{
+	engine->submit_request = nop_submit_request;
+
+	/* Mark all pending requests as complete so that any concurrent
+	 * (lockless) lookup doesn't try and wait upon the request as we
+	 * reset it.
+	 */
+	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
 
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
@@ -3136,138 +2658,30 @@
 	 */
 
 	if (i915.enable_execlists) {
-		/* Ensure irq handler finishes or is cancelled. */
-		tasklet_kill(&engine->irq_tasklet);
-
-		intel_execlists_cancel_requests(engine);
+		spin_lock(&engine->execlist_lock);
+		INIT_LIST_HEAD(&engine->execlist_queue);
+		i915_gem_request_put(engine->execlist_port[0].request);
+		i915_gem_request_put(engine->execlist_port[1].request);
+		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+		spin_unlock(&engine->execlist_lock);
 	}
 
-	/*
-	 * We must free the requests after all the corresponding objects have
-	 * been moved off active lists. Which is the same order as the normal
-	 * retire_requests function does. This is important if object hold
-	 * implicit references on things like e.g. ppgtt address spaces through
-	 * the request.
-	 */
-	while (!list_empty(&engine->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&engine->request_list,
-					   struct drm_i915_gem_request,
-					   list);
-
-		i915_gem_request_retire(request);
-	}
-
-	/* Having flushed all requests from all queues, we know that all
-	 * ringbuffers must now be empty. However, since we do not reclaim
-	 * all space when retiring the request (to prevent HEADs colliding
-	 * with rapid ringbuffer wraparound) the amount of available space
-	 * upon reset is less than when we start. Do one more pass over
-	 * all the ringbuffers to reset last_retired_head.
-	 */
-	list_for_each_entry(buffer, &engine->buffers, link) {
-		buffer->last_retired_head = buffer->tail;
-		intel_ring_update_space(buffer);
-	}
-
-	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
-
 	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
-void i915_gem_reset(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine;
-
-	/*
-	 * Before we free the objects from the requests, we need to inspect
-	 * them for finding the guilty party. As the requests only borrow
-	 * their reference to the objects, the inspection must be done first.
-	 */
-	for_each_engine(engine, dev_priv)
-		i915_gem_reset_engine_status(engine);
-
-	for_each_engine(engine, dev_priv)
-		i915_gem_reset_engine_cleanup(engine);
-	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
-
-	i915_gem_context_reset(dev);
-
-	i915_gem_restore_fences(dev);
-
-	WARN_ON(i915_verify_lists(dev));
-}
-
-/**
- * This function clears the request list as sequence numbers are passed.
- * @engine: engine to retire requests on
- */
-void
-i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
-{
-	WARN_ON(i915_verify_lists(engine->dev));
-
-	/* Retire requests first as we use it above for the early return.
-	 * If we retire requests last, we may use a later seqno and so clear
-	 * the requests lists without clearing the active list, leading to
-	 * confusion.
-	 */
-	while (!list_empty(&engine->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&engine->request_list,
-					   struct drm_i915_gem_request,
-					   list);
-
-		if (!i915_gem_request_completed(request))
-			break;
-
-		i915_gem_request_retire(request);
-	}
-
-	/* Move any buffers on the active list that are no longer referenced
-	 * by the ringbuffer to the flushing/inactive lists as appropriate,
-	 * before we free the context associated with the requests.
-	 */
-	while (!list_empty(&engine->active_list)) {
-		struct drm_i915_gem_object *obj;
-
-		obj = list_first_entry(&engine->active_list,
-				       struct drm_i915_gem_object,
-				       engine_list[engine->id]);
-
-		if (!list_empty(&obj->last_read_req[engine->id]->list))
-			break;
-
-		i915_gem_object_retire__read(obj, engine->id);
-	}
-
-	WARN_ON(i915_verify_lists(engine->dev));
-}
-
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
 
-	if (dev_priv->gt.active_engines == 0)
-		return;
+	i915_gem_context_lost(dev_priv);
+	for_each_engine(engine, dev_priv)
+		i915_gem_cleanup_engine(engine);
+	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
-	GEM_BUG_ON(!dev_priv->gt.awake);
-
-	for_each_engine(engine, dev_priv) {
-		i915_gem_retire_requests_ring(engine);
-		if (list_empty(&engine->request_list))
-			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-	}
-
-	if (dev_priv->gt.active_engines == 0)
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.idle_work,
-				   msecs_to_jiffies(100));
+	i915_gem_retire_requests(dev_priv);
 }
 
 static void
@@ -3287,10 +2701,12 @@
 	 * We do not need to do this test under locking as in the worst-case
 	 * we queue the retire worker once too often.
 	 */
-	if (READ_ONCE(dev_priv->gt.awake))
+	if (READ_ONCE(dev_priv->gt.awake)) {
+		i915_queue_hangcheck(dev_priv);
 		queue_delayed_work(dev_priv->wq,
 				   &dev_priv->gt.retire_work,
 				   round_jiffies_up_relative(HZ));
+	}
 }
 
 static void
@@ -3300,7 +2716,6 @@
 		container_of(work, typeof(*dev_priv), gt.idle_work.work);
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
-	unsigned int stuck_engines;
 	bool rearm_hangcheck;
 
 	if (!READ_ONCE(dev_priv->gt.awake))
@@ -3330,12 +2745,6 @@
 	dev_priv->gt.awake = false;
 	rearm_hangcheck = false;
 
-	stuck_engines = intel_kick_waiters(dev_priv);
-	if (unlikely(stuck_engines)) {
-		DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
-		dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
-	}
-
 	if (INTEL_GEN(dev_priv) >= 6)
 		gen6_rps_idle(dev_priv);
 	intel_runtime_pm_put(dev_priv);
@@ -3349,32 +2758,17 @@
 	}
 }
 
-/**
- * Ensures that an object will eventually get non-busy by flushing any required
- * write domains, emitting any outstanding lazy request and retiring and
- * completed requests.
- * @obj: object to flush
- */
-static int
-i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
-	int i;
+	struct drm_i915_gem_object *obj = to_intel_bo(gem);
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+	struct i915_vma *vma, *vn;
 
-	if (!obj->active)
-		return 0;
-
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct drm_i915_gem_request *req;
-
-		req = obj->last_read_req[i];
-		if (req == NULL)
-			continue;
-
-		if (i915_gem_request_completed(req))
-			i915_gem_object_retire__read(obj, i);
-	}
-
-	return 0;
+	mutex_lock(&obj->base.dev->struct_mutex);
+	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
+		if (vma->vm->file == fpriv)
+			i915_vma_close(vma);
+	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 /**
@@ -3405,219 +2799,35 @@
 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
 	struct drm_i915_gem_wait *args = data;
+	struct intel_rps_client *rps = to_rps_client(file);
 	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
-	int i, n = 0;
-	int ret;
+	unsigned long active;
+	int idx, ret = 0;
 
 	if (args->flags != 0)
 		return -EINVAL;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
-	if (&obj->base == NULL) {
-		mutex_unlock(&dev->struct_mutex);
+	obj = i915_gem_object_lookup(file, args->bo_handle);
+	if (!obj)
 		return -ENOENT;
+
+	active = __I915_BO_ACTIVE(obj);
+	for_each_active(active, idx) {
+		s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
+		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+						    I915_WAIT_INTERRUPTIBLE,
+						    timeout, rps);
+		if (ret)
+			break;
 	}
 
-	/* Need to make sure the object gets inactive eventually. */
-	ret = i915_gem_object_flush_active(obj);
-	if (ret)
-		goto out;
-
-	if (!obj->active)
-		goto out;
-
-	/* Do this after OLR check to make sure we make forward progress polling
-	 * on this IOCTL with a timeout == 0 (like busy ioctl)
-	 */
-	if (args->timeout_ns == 0) {
-		ret = -ETIME;
-		goto out;
-	}
-
-	drm_gem_object_unreference(&obj->base);
-
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (obj->last_read_req[i] == NULL)
-			continue;
-
-		req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++) {
-		if (ret == 0)
-			ret = __i915_wait_request(req[i], true,
-						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
-						  to_rps_client(file));
-		i915_gem_request_unreference(req[i]);
-	}
+	i915_gem_object_put_unlocked(obj);
 	return ret;
-
-out:
-	drm_gem_object_unreference(&obj->base);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-static int
-__i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *from_req,
-		       struct drm_i915_gem_request **to_req)
-{
-	struct intel_engine_cs *from;
-	int ret;
-
-	from = i915_gem_request_get_engine(from_req);
-	if (to == from)
-		return 0;
-
-	if (i915_gem_request_completed(from_req))
-		return 0;
-
-	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(from_req,
-					  i915->mm.interruptible,
-					  NULL,
-					  &i915->rps.semaphores);
-		if (ret)
-			return ret;
-
-		i915_gem_object_retire_request(obj, from_req);
-	} else {
-		int idx = intel_ring_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(from_req);
-
-		WARN_ON(!to_req);
-
-		if (seqno <= from->semaphore.sync_seqno[idx])
-			return 0;
-
-		if (*to_req == NULL) {
-			struct drm_i915_gem_request *req;
-
-			req = i915_gem_request_alloc(to, NULL);
-			if (IS_ERR(req))
-				return PTR_ERR(req);
-
-			*to_req = req;
-		}
-
-		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
-		ret = to->semaphore.sync_to(*to_req, from, seqno);
-		if (ret)
-			return ret;
-
-		/* We use last_read_req because sync_to()
-		 * might have just caused seqno wrap under
-		 * the radar.
-		 */
-		from->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
-	}
-
-	return 0;
-}
-
-/**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- *          This will be allocated and returned if a request is
- *          required but not passed in.
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- *   request must wait for it to complete (either CPU or in hw, requests
- *   on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- *   request must wait for outstanding read requests to complete.
- *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
- *
- * Returns 0 if successful, else propagates up the lower layer error.
- */
-int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to,
-		     struct drm_i915_gem_request **to_req)
-{
-	const bool readonly = obj->base.pending_write_domain == 0;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
-	int ret, i, n;
-
-	if (!obj->active)
-		return 0;
-
-	if (to == NULL)
-		return i915_gem_object_wait_rendering(obj, readonly);
-
-	n = 0;
-	if (readonly) {
-		if (obj->last_write_req)
-			req[n++] = obj->last_write_req;
-	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++)
-			if (obj->last_read_req[i])
-				req[n++] = obj->last_read_req[i];
-	}
-	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
-{
-	u32 old_write_domain, old_read_domains;
-
-	/* Force a pagefault for domain tracking on next user access */
-	i915_gem_release_mmap(obj);
-
-	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
-		return;
-
-	old_read_domains = obj->base.read_domains;
-	old_write_domain = obj->base.write_domain;
-
-	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
-	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
-
-	trace_i915_gem_object_change_domain(obj,
-					    old_read_domains,
-					    old_write_domain);
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
-	GEM_BUG_ON(vma->pin_count);
+	GEM_BUG_ON(i915_vma_is_pinned(vma));
 
 	if (vma->iomap == NULL)
 		return;
@@ -3626,65 +2836,83 @@
 	vma->iomap = NULL;
 }
 
-static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
+int i915_vma_unbind(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	unsigned long active;
 	int ret;
 
-	if (list_empty(&vma->obj_link))
-		return 0;
+	/* First wait upon any activity as retiring the request may
+	 * have side-effects such as unpinning or even unbinding this vma.
+	 */
+	active = i915_vma_get_active(vma);
+	if (active) {
+		int idx;
 
-	if (!drm_mm_node_allocated(&vma->node)) {
-		i915_gem_vma_destroy(vma);
-		return 0;
+		/* When a closed VMA is retired, it is unbound - eek.
+		 * In order to prevent it from being recursively closed,
+		 * take a pin on the vma so that the second unbind is
+		 * aborted.
+		 */
+		__i915_vma_pin(vma);
+
+		for_each_active(active, idx) {
+			ret = i915_gem_active_retire(&vma->last_read[idx],
+						   &vma->vm->dev->struct_mutex);
+			if (ret)
+				break;
+		}
+
+		__i915_vma_unpin(vma);
+		if (ret)
+			return ret;
+
+		GEM_BUG_ON(i915_vma_is_active(vma));
 	}
 
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return -EBUSY;
 
-	BUG_ON(obj->pages == NULL);
+	if (!drm_mm_node_allocated(&vma->node))
+		goto destroy;
 
-	if (wait) {
-		ret = i915_gem_object_wait_rendering(obj, false);
-		if (ret)
-			return ret;
-	}
+	GEM_BUG_ON(obj->bind_count == 0);
+	GEM_BUG_ON(!obj->pages);
 
-	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-		i915_gem_object_finish_gtt(obj);
-
+	if (i915_vma_is_map_and_fenceable(vma)) {
 		/* release the fence reg _after_ flushing */
-		ret = i915_gem_object_put_fence(obj);
+		ret = i915_vma_put_fence(vma);
 		if (ret)
 			return ret;
 
+		/* Force a pagefault for domain tracking on next user access */
+		i915_gem_release_mmap(obj);
+
 		__i915_vma_iounmap(vma);
+		vma->flags &= ~I915_VMA_CAN_FENCE;
 	}
 
-	trace_i915_vma_unbind(vma);
-
-	vma->vm->unbind_vma(vma);
-	vma->bound = 0;
-
-	list_del_init(&vma->vm_link);
-	if (vma->is_ggtt) {
-		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
-			obj->map_and_fenceable = false;
-		} else if (vma->ggtt_view.pages) {
-			sg_free_table(vma->ggtt_view.pages);
-			kfree(vma->ggtt_view.pages);
-		}
-		vma->ggtt_view.pages = NULL;
+	if (likely(!vma->vm->closed)) {
+		trace_i915_vma_unbind(vma);
+		vma->vm->unbind_vma(vma);
 	}
+	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
 	drm_mm_remove_node(&vma->node);
-	i915_gem_vma_destroy(vma);
+	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+	if (vma->pages != obj->pages) {
+		GEM_BUG_ON(!vma->pages);
+		sg_free_table(vma->pages);
+		kfree(vma->pages);
+	}
+	vma->pages = NULL;
 
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
-	if (list_empty(&obj->vma_list))
-		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+	if (--obj->bind_count == 0)
+		list_move_tail(&obj->global_list,
+			       &to_i915(obj->base.dev)->mm.unbound_list);
 
 	/* And finally now the object is completely decoupled from this vma,
 	 * we can drop its hold on the backing storage and allow it to be
@@ -3692,36 +2920,28 @@
 	 */
 	i915_gem_object_unpin_pages(obj);
 
+destroy:
+	if (unlikely(i915_vma_is_closed(vma)))
+		i915_vma_destroy(vma);
+
 	return 0;
 }
 
-int i915_vma_unbind(struct i915_vma *vma)
-{
-	return __i915_vma_unbind(vma, true);
-}
-
-int __i915_vma_unbind_no_wait(struct i915_vma *vma)
-{
-	return __i915_vma_unbind(vma, false);
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+			   unsigned int flags)
 {
 	struct intel_engine_cs *engine;
 	int ret;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
 	for_each_engine(engine, dev_priv) {
 		if (engine->last_context == NULL)
 			continue;
 
-		ret = intel_engine_idle(engine);
+		ret = intel_engine_idle(engine, flags);
 		if (ret)
 			return ret;
 	}
 
-	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
@@ -3759,128 +2979,87 @@
 }
 
 /**
- * Finds free space in the GTT aperture and binds the object or a view of it
- * there.
- * @obj: object to bind
- * @vm: address space to bind into
- * @ggtt_view: global gtt view if applicable
- * @alignment: requested alignment
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
  * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
-static struct i915_vma *
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-			   struct i915_address_space *vm,
-			   const struct i915_ggtt_view *ggtt_view,
-			   unsigned alignment,
-			   uint64_t flags)
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	u32 fence_alignment, unfenced_alignment;
-	u32 search_flag, alloc_flag;
+	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+	struct drm_i915_gem_object *obj = vma->obj;
 	u64 start, end;
-	u64 size, fence_size;
-	struct i915_vma *vma;
 	int ret;
 
-	if (i915_is_ggtt(vm)) {
-		u32 view_size;
+	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
-		if (WARN_ON(!ggtt_view))
-			return ERR_PTR(-EINVAL);
+	size = max(size, vma->size);
+	if (flags & PIN_MAPPABLE)
+		size = i915_gem_get_ggtt_size(dev_priv, size,
+					      i915_gem_object_get_tiling(obj));
 
-		view_size = i915_ggtt_view_size(obj, ggtt_view);
-
-		fence_size = i915_gem_get_gtt_size(dev,
-						   view_size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     view_size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
-								view_size,
-								obj->tiling_mode,
-								false);
-		size = flags & PIN_MAPPABLE ? fence_size : view_size;
-	} else {
-		fence_size = i915_gem_get_gtt_size(dev,
-						   obj->base.size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     obj->base.size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment =
-			i915_gem_get_gtt_alignment(dev,
-						   obj->base.size,
-						   obj->tiling_mode,
-						   false);
-		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-	}
+	alignment = max(max(alignment, vma->display_alignment),
+			i915_gem_get_ggtt_alignment(dev_priv, size,
+						    i915_gem_object_get_tiling(obj),
+						    flags & PIN_MAPPABLE));
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	end = vm->total;
+
+	end = vma->vm->total;
 	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, ggtt->mappable_end);
+		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 	if (flags & PIN_ZONE_4G)
 		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
-	if (alignment == 0)
-		alignment = flags & PIN_MAPPABLE ? fence_alignment :
-						unfenced_alignment;
-	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
-			  ggtt_view ? ggtt_view->type : 0,
-			  alignment);
-		return ERR_PTR(-EINVAL);
-	}
-
 	/* If binding the object/GGTT view requires more space than the entire
 	 * aperture has, reject it early before evicting everything in a vain
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
-			  ggtt_view ? ggtt_view->type : 0,
-			  size,
+		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
-		return ERR_PTR(-E2BIG);
+		return -E2BIG;
 	}
 
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
-		return ERR_PTR(ret);
+		return ret;
 
 	i915_gem_object_pin_pages(obj);
 
-	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-			  i915_gem_obj_lookup_or_create_vma(obj, vm);
-
-	if (IS_ERR(vma))
-		goto err_unpin;
-
 	if (flags & PIN_OFFSET_FIXED) {
-		uint64_t offset = flags & PIN_OFFSET_MASK;
-
-		if (offset & (alignment - 1) || offset + size > end) {
+		u64 offset = flags & PIN_OFFSET_MASK;
+		if (offset & (alignment - 1) || offset > end - size) {
 			ret = -EINVAL;
-			goto err_free_vma;
+			goto err_unpin;
 		}
+
 		vma->node.start = offset;
 		vma->node.size = size;
 		vma->node.color = obj->cache_level;
-		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
 		if (ret) {
 			ret = i915_gem_evict_for_vma(vma);
 			if (ret == 0)
-				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+			if (ret)
+				goto err_unpin;
 		}
-		if (ret)
-			goto err_free_vma;
 	} else {
+		u32 search_flag, alloc_flag;
+
 		if (flags & PIN_HIGH) {
 			search_flag = DRM_MM_SEARCH_BELOW;
 			alloc_flag = DRM_MM_CREATE_TOP;
@@ -3889,47 +3068,45 @@
 			alloc_flag = DRM_MM_CREATE_DEFAULT;
 		}
 
+		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+		 * so we know that we always have a minimum alignment of 4096.
+		 * The drm_mm range manager is optimised to return results
+		 * with zero alignment, so where possible use the optimal
+		 * path.
+		 */
+		if (alignment <= 4096)
+			alignment = 0;
+
 search_free:
-		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+							  &vma->node,
 							  size, alignment,
 							  obj->cache_level,
 							  start, end,
 							  search_flag,
 							  alloc_flag);
 		if (ret) {
-			ret = i915_gem_evict_something(dev, vm, size, alignment,
+			ret = i915_gem_evict_something(vma->vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
 			if (ret == 0)
 				goto search_free;
 
-			goto err_free_vma;
+			goto err_unpin;
 		}
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
-		ret = -EINVAL;
-		goto err_remove_node;
-	}
-
-	trace_i915_vma_bind(vma, flags);
-	ret = i915_vma_bind(vma, obj->cache_level, flags);
-	if (ret)
-		goto err_remove_node;
+	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	obj->bind_count++;
 
-	return vma;
+	return 0;
 
-err_remove_node:
-	drm_mm_remove_node(&vma->node);
-err_free_vma:
-	i915_gem_vma_destroy(vma);
-	vma = ERR_PTR(ret);
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
-	return vma;
+	return ret;
 }
 
 bool
@@ -3974,51 +3151,72 @@
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 {
-	uint32_t old_write_domain;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
 		return;
 
 	/* No actual flushing is required for the GTT write domain.  Writes
-	 * to it immediately go to main memory as far as we know, so there's
+	 * to it "immediately" go to main memory as far as we know, so there's
 	 * no chipset flush.  It also doesn't land in render cache.
 	 *
 	 * However, we do have to enforce the order so that all writes through
 	 * the GTT land before any writes to the device, such as updates to
 	 * the GATT itself.
+	 *
+	 * We also have to wait a bit for the writes to land from the GTT.
+	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+	 * timing. This issue has only been observed when switching quickly
+	 * between GTT writes and CPU reads from inside the kernel on recent hw,
+	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
+	 * system agents we cannot reproduce this behaviour).
 	 */
 	wmb();
+	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
+		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
 
-	old_write_domain = obj->base.write_domain;
+	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
+
 	obj->base.write_domain = 0;
-
-	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
-
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
-					    old_write_domain);
+					    I915_GEM_DOMAIN_GTT);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	uint32_t old_write_domain;
-
 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
 		return;
 
 	if (i915_gem_clflush_object(obj, obj->pin_display))
 		i915_gem_chipset_flush(to_i915(obj->base.dev));
 
-	old_write_domain = obj->base.write_domain;
-	obj->base.write_domain = 0;
-
 	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
+	obj->base.write_domain = 0;
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
-					    old_write_domain);
+					    I915_GEM_DOMAIN_CPU);
+}
+
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!i915_vma_is_ggtt(vma))
+			continue;
+
+		if (i915_vma_is_active(vma))
+			continue;
+
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
+		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	}
 }
 
 /**
@@ -4032,20 +3230,16 @@
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t old_write_domain, old_read_domains;
-	struct i915_vma *vma;
 	int ret;
 
-	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
-		return 0;
-
 	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
+	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+		return 0;
+
 	/* Flush and acquire obj->pages so that we are coherent through
 	 * direct access in memory with previous cached writes through
 	 * shmemfs and that our cache domain tracking remains valid.
@@ -4086,10 +3280,7 @@
 					    old_write_domain);
 
 	/* And bump the LRU for this access */
-	vma = i915_gem_obj_to_ggtt(obj);
-	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-		list_move_tail(&vma->vm_link,
-			       &ggtt->base.inactive_list);
+	i915_gem_object_bump_inactive_ggtt(obj);
 
 	return 0;
 }
@@ -4112,9 +3303,7 @@
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct i915_vma *vma, *next;
-	bool bound = false;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	if (obj->cache_level == cache_level)
@@ -4125,21 +3314,28 @@
 	 * catch the issue of the CS prefetch crossing page boundaries and
 	 * reading an invalid PTE on older architectures.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		if (vma->pin_count) {
+		if (i915_vma_is_pinned(vma)) {
 			DRM_DEBUG("can not change the cache level of pinned objects\n");
 			return -EBUSY;
 		}
 
-		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
-		} else
-			bound = true;
+		if (i915_gem_valid_gtt_space(vma, cache_level))
+			continue;
+
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+
+		/* As unbinding may affect other elements in the
+		 * obj->vma_list (due to side-effects from retiring
+		 * an active vma), play safe and restart the iterator.
+		 */
+		goto restart;
 	}
 
 	/* We can reuse the existing drm_mm nodes but need to change the
@@ -4149,7 +3345,7 @@
 	 * rewrite the PTE in the belief that doing so tramples upon less
 	 * state and so involves less work.
 	 */
-	if (bound) {
+	if (obj->bind_count) {
 		/* Before we change the PTE, the GPU must not be accessing it.
 		 * If we wait upon the object, we know that all the bound
 		 * VMA are no longer active.
@@ -4158,7 +3354,7 @@
 		if (ret)
 			return ret;
 
-		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
 			/* Access to snoopable pages through the GTT is
 			 * incoherent and on some machines causes a hard
 			 * lockup. Relinquish the CPU mmaping to force
@@ -4175,9 +3371,11 @@
 			 * dropped the fence as all snoopable access is
 			 * supposed to be linear.
 			 */
-			ret = i915_gem_object_put_fence(obj);
-			if (ret)
-				return ret;
+			list_for_each_entry(vma, &obj->vma_list, obj_link) {
+				ret = i915_vma_put_fence(vma);
+				if (ret)
+					return ret;
+			}
 		} else {
 			/* We either have incoherent backing store and
 			 * so no GTT access or the architecture is fully
@@ -4221,8 +3419,8 @@
 	struct drm_i915_gem_caching *args = data;
 	struct drm_i915_gem_object *obj;
 
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL)
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
 		return -ENOENT;
 
 	switch (obj->cache_level) {
@@ -4240,7 +3438,7 @@
 		break;
 	}
 
-	drm_gem_object_unreference_unlocked(&obj->base);
+	i915_gem_object_put_unlocked(obj);
 	return 0;
 }
 
@@ -4282,15 +3480,15 @@
 	if (ret)
 		goto rpm_put;
 
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj) {
 		ret = -ENOENT;
 		goto unlock;
 	}
 
 	ret = i915_gem_object_set_cache_level(obj, level);
 
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 rpm_put:
@@ -4304,11 +3502,12 @@
  * Can be called from an uninterruptible phase (modesetting) and allows
  * any flushes to be pipelined (for pageflips).
  */
-int
+struct i915_vma *
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     const struct i915_ggtt_view *view)
 {
+	struct i915_vma *vma;
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
@@ -4328,19 +3527,31 @@
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
 					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
-	if (ret)
+	if (ret) {
+		vma = ERR_PTR(ret);
 		goto err_unpin_display;
+	}
 
 	/* As the user may map the buffer once pinned in the display plane
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
-	 * always use map_and_fenceable for all scanout buffers.
+	 * always use map_and_fenceable for all scanout buffers. However,
+	 * it may simply be too big to fit into mappable, in which case
+	 * put it anyway and hope that userspace can cope (but always first
+	 * try to preserve the existing ABI).
 	 */
-	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
-				       view->type == I915_GGTT_VIEW_NORMAL ?
-				       PIN_MAPPABLE : 0);
-	if (ret)
+	vma = ERR_PTR(-ENOSPC);
+	if (view->type == I915_GGTT_VIEW_NORMAL)
+		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+					       PIN_MAPPABLE | PIN_NONBLOCK);
+	if (IS_ERR(vma))
+		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+	if (IS_ERR(vma))
 		goto err_unpin_display;
 
+	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+	WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
+
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -4356,23 +3567,28 @@
 					    old_read_domains,
 					    old_write_domain);
 
-	return 0;
+	return vma;
 
 err_unpin_display:
 	obj->pin_display--;
-	return ret;
+	return vma;
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
-					 const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-	if (WARN_ON(obj->pin_display == 0))
+	if (WARN_ON(vma->obj->pin_display == 0))
 		return;
 
-	i915_gem_object_ggtt_unpin_view(obj, view);
+	if (--vma->obj->pin_display == 0)
+		vma->display_alignment = 0;
 
-	obj->pin_display--;
+	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
+	if (!i915_vma_is_active(vma))
+		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+	i915_vma_unpin(vma);
+	WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
 }
 
 /**
@@ -4389,13 +3605,13 @@
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-		return 0;
-
 	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return 0;
+
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -4470,28 +3686,31 @@
 		target = request;
 	}
 	if (target)
-		i915_gem_request_reference(target);
+		i915_gem_request_get(target);
 	spin_unlock(&file_priv->mm.lock);
 
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, true, NULL, NULL);
-	i915_gem_request_unreference(target);
+	ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+	i915_gem_request_put(target);
 
 	return ret;
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_gem_object *obj = vma->obj;
+	if (!drm_mm_node_allocated(&vma->node))
+		return false;
 
-	if (alignment &&
-	    vma->node.start & (alignment - 1))
+	if (vma->node.size < size)
 		return true;
 
-	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+	if (alignment && vma->node.start & (alignment - 1))
+		return true;
+
+	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
 		return true;
 
 	if (flags & PIN_OFFSET_BIAS &&
@@ -4508,135 +3727,208 @@
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_gtt_size(obj->base.dev,
-					   obj->base.size,
-					   obj->tiling_mode);
-	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-						     obj->base.size,
-						     obj->tiling_mode,
-						     true);
+	fence_size = i915_gem_get_ggtt_size(dev_priv,
+					    vma->size,
+					    i915_gem_object_get_tiling(obj));
+	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+						      vma->size,
+						      i915_gem_object_get_tiling(obj),
+						      true);
 
 	fenceable = (vma->node.size == fence_size &&
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    dev_priv->ggtt.mappable_end);
 
-	obj->map_and_fenceable = mappable && fenceable;
+	if (mappable && fenceable)
+		vma->flags |= I915_VMA_CAN_FENCE;
+	else
+		vma->flags &= ~I915_VMA_CAN_FENCE;
 }
 
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
-		       struct i915_address_space *vm,
-		       const struct i915_ggtt_view *ggtt_view,
-		       uint32_t alignment,
-		       uint64_t flags)
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_vma *vma;
-	unsigned bound;
+	unsigned int bound = vma->flags;
 	int ret;
 
-	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
-		return -ENODEV;
+	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
-		return -EINVAL;
-
-	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
-		return -EINVAL;
-
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-		return -EINVAL;
-
-	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
-			  i915_gem_obj_to_vma(obj, vm);
-
-	if (vma) {
-		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-			return -EBUSY;
-
-		if (i915_vma_misplaced(vma, alignment, flags)) {
-			WARN(vma->pin_count,
-			     "bo is already pinned in %s with incorrect alignment:"
-			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
-			     " obj->map_and_fenceable=%d\n",
-			     ggtt_view ? "ggtt" : "ppgtt",
-			     upper_32_bits(vma->node.start),
-			     lower_32_bits(vma->node.start),
-			     alignment,
-			     !!(flags & PIN_MAPPABLE),
-			     obj->map_and_fenceable);
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
-
-			vma = NULL;
-		}
+	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+		ret = -EBUSY;
+		goto err;
 	}
 
-	bound = vma ? vma->bound : 0;
-	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
-						 flags);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
-	} else {
-		ret = i915_vma_bind(vma, obj->cache_level, flags);
+	if ((bound & I915_VMA_BIND_MASK) == 0) {
+		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
-			return ret;
+			goto err;
 	}
 
-	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
-	    (bound ^ vma->bound) & GLOBAL_BIND) {
+	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+	if (ret)
+		goto err;
+
+	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
-		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-	}
 
-	vma->pin_count++;
+	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 	return 0;
+
+err:
+	__i915_vma_unpin(vma);
+	return ret;
 }
 
-int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags)
-{
-	return i915_gem_object_do_pin(obj, vm,
-				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-				      alignment, flags);
-}
-
-int
+struct i915_vma *
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags)
+			 u64 size,
+			 u64 alignment,
+			 u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+	struct i915_vma *vma;
+	int ret;
 
-	BUG_ON(!view);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
+	if (IS_ERR(vma))
+		return vma;
 
-	return i915_gem_object_do_pin(obj, &ggtt->base, view,
-				      alignment, flags | PIN_GLOBAL);
+	if (i915_vma_misplaced(vma, size, alignment, flags)) {
+		if (flags & PIN_NONBLOCK &&
+		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
+			return ERR_PTR(-ENOSPC);
+
+		WARN(i915_vma_is_pinned(vma),
+		     "bo is already pinned in ggtt with incorrect alignment:"
+		     " offset=%08x, req.alignment=%llx,"
+		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
+		     i915_ggtt_offset(vma), alignment,
+		     !!(flags & PIN_MAPPABLE),
+		     i915_vma_is_map_and_fenceable(vma));
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return vma;
 }
 
-void
-i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
-				const struct i915_ggtt_view *view)
+static __always_inline unsigned int __busy_read_flag(unsigned int id)
 {
-	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+	/* Note that we could alias engines in the execbuf API, but
+	 * that would be very unwise as it prevents userspace from
+	 * fine control over engine selection. Ahem.
+	 *
+	 * This should be something like EXEC_MAX_ENGINE instead of
+	 * I915_NUM_ENGINES.
+	 */
+	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
+	return 0x10000 << id;
+}
 
-	WARN_ON(vma->pin_count == 0);
-	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
+static __always_inline unsigned int __busy_write_id(unsigned int id)
+{
+	/* The uABI guarantees an active writer is also amongst the read
+	 * engines. This would be true if we accessed the activity tracking
+	 * under the lock, but as we perform the lookup of the object and
+	 * its activity locklessly we can not guarantee that the last_write
+	 * being active implies that we have set the same engine flag from
+	 * last_read - hence we always set both read and write busy for
+	 * last_write.
+	 */
+	return id | __busy_read_flag(id);
+}
 
-	--vma->pin_count;
+static __always_inline unsigned int
+__busy_set_if_active(const struct i915_gem_active *active,
+		     unsigned int (*flag)(unsigned int id))
+{
+	struct drm_i915_gem_request *request;
+
+	request = rcu_dereference(active->request);
+	if (!request || i915_gem_request_completed(request))
+		return 0;
+
+	/* This is racy. See __i915_gem_active_get_rcu() for an in detail
+	 * discussion of how to handle the race correctly, but for reporting
+	 * the busy state we err on the side of potentially reporting the
+	 * wrong engine as being busy (but we guarantee that the result
+	 * is at least self-consistent).
+	 *
+	 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
+	 * whilst we are inspecting it, even under the RCU read lock as we are.
+	 * This means that there is a small window for the engine and/or the
+	 * seqno to have been overwritten. The seqno will always be in the
+	 * future compared to the intended, and so we know that if that
+	 * seqno is idle (on whatever engine) our request is idle and the
+	 * return 0 above is correct.
+	 *
+	 * The issue is that if the engine is switched, it is just as likely
+	 * to report that it is busy (but since the switch happened, we know
+	 * the request should be idle). So there is a small chance that a busy
+	 * result is actually the wrong engine.
+	 *
+	 * So why don't we care?
+	 *
+	 * For starters, the busy ioctl is a heuristic that is by definition
+	 * racy. Even with perfect serialisation in the driver, the hardware
+	 * state is constantly advancing - the state we report to the user
+	 * is stale.
+	 *
+	 * The critical information for the busy-ioctl is whether the object
+	 * is idle as userspace relies on that to detect whether its next
+	 * access will stall, or if it has missed submitting commands to
+	 * the hardware allowing the GPU to stall. We never generate a
+	 * false-positive for idleness, thus busy-ioctl is reliable at the
+	 * most fundamental level, and we maintain the guarantee that a
+	 * busy object left to itself will eventually become idle (and stay
+	 * idle!).
+	 *
+	 * We allow ourselves the leeway of potentially misreporting the busy
+	 * state because that is an optimisation heuristic that is constantly
+	 * in flux. Being quickly able to detect the busy/idle state is much
+	 * more important than accurate logging of exactly which engines were
+	 * busy.
+	 *
+	 * For accuracy in reporting the engine, we could use
+	 *
+	 *	result = 0;
+	 *	request = __i915_gem_active_get_rcu(active);
+	 *	if (request) {
+	 *		if (!i915_gem_request_completed(request))
+	 *			result = flag(request->engine->exec_id);
+	 *		i915_gem_request_put(request);
+	 *	}
+	 *
+	 * but that still remains susceptible to both hardware and userspace
+	 * races. So we accept making the result of that race slightly worse,
+	 * given the rarity of the race and its low impact on the result.
+	 */
+	return flag(READ_ONCE(request->engine->exec_id));
+}
+
+static __always_inline unsigned int
+busy_check_reader(const struct i915_gem_active *active)
+{
+	return __busy_set_if_active(active, __busy_read_flag);
+}
+
+static __always_inline unsigned int
+busy_check_writer(const struct i915_gem_active *active)
+{
+	return __busy_set_if_active(active, __busy_write_id);
 }
 
 int
@@ -4645,47 +3937,64 @@
 {
 	struct drm_i915_gem_busy *args = data;
 	struct drm_i915_gem_object *obj;
-	int ret;
+	unsigned long active;
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
-
-	/* Count all active objects as busy, even if they are currently not used
-	 * by the gpu. Users of this interface expect objects to eventually
-	 * become non-busy without any further actions, therefore emit any
-	 * necessary flushes here.
-	 */
-	ret = i915_gem_object_flush_active(obj);
-	if (ret)
-		goto unref;
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
 
 	args->busy = 0;
-	if (obj->active) {
-		int i;
+	active = __I915_BO_ACTIVE(obj);
+	if (active) {
+		int idx;
 
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
+		/* Yes, the lookups are intentionally racy.
+		 *
+		 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
+		 * to regard the value as stale and as our ABI guarantees
+		 * forward progress, we confirm the status of each active
+		 * request with the hardware.
+		 *
+		 * Even though we guard the pointer lookup by RCU, that only
+		 * guarantees that the pointer and its contents remain
+		 * dereferencable and does *not* mean that the request we
+		 * have is the same as the one being tracked by the object.
+		 *
+		 * Consider that we lookup the request just as it is being
+		 * retired and freed. We take a local copy of the pointer,
+		 * but before we add its engine into the busy set, the other
+		 * thread reallocates it and assigns it to a task on another
+		 * engine with a fresh and incomplete seqno. Guarding against
+		 * that requires careful serialisation and reference counting,
+		 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
+		 * instead we expect that if the result is busy, which engines
+		 * are busy is not completely reliable - we only guarantee
+		 * that the object was busy.
+		 */
+		rcu_read_lock();
 
-			req = obj->last_read_req[i];
-			if (req)
-				args->busy |= 1 << (16 + req->engine->exec_id);
-		}
-		if (obj->last_write_req)
-			args->busy |= obj->last_write_req->engine->exec_id;
+		for_each_active(active, idx)
+			args->busy |= busy_check_reader(&obj->last_read[idx]);
+
+		/* For ABI sanity, we only care that the write engine is in
+		 * the set of read engines. This should be ensured by the
+		 * ordering of setting last_read/last_write in
+		 * i915_vma_move_to_active(), and then in reverse in retire.
+		 * However, for good measure, we always report the last_write
+		 * request as a busy read as well as being a busy write.
+		 *
+		 * We don't care that the set of active read/write engines
+		 * may change during construction of the result, as it is
+		 * equally liable to change before userspace can inspect
+		 * the result.
+		 */
+		args->busy |= busy_check_writer(&obj->last_write);
+
+		rcu_read_unlock();
 	}
 
-unref:
-	drm_gem_object_unreference(&obj->base);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	i915_gem_object_put_unlocked(obj);
+	return 0;
 }
 
 int
@@ -4716,19 +4025,14 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
-	if (&obj->base == NULL) {
+	obj = i915_gem_object_lookup(file_priv, args->handle);
+	if (!obj) {
 		ret = -ENOENT;
 		goto unlock;
 	}
 
-	if (i915_gem_obj_is_pinned(obj)) {
-		ret = -EINVAL;
-		goto out;
-	}
-
 	if (obj->pages &&
-	    obj->tiling_mode != I915_TILING_NONE &&
+	    i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (obj->madv == I915_MADV_WILLNEED)
 			i915_gem_object_unpin_pages(obj);
@@ -4745,8 +4049,7 @@
 
 	args->retained = obj->madv != __I915_MADV_PURGED;
 
-out:
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4759,14 +4062,17 @@
 
 	INIT_LIST_HEAD(&obj->global_list);
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		INIT_LIST_HEAD(&obj->engine_list[i]);
+		init_request_active(&obj->last_read[i],
+				    i915_gem_object_retire__read);
+	init_request_active(&obj->last_write,
+			    i915_gem_object_retire__write);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
 	obj->ops = ops;
 
-	obj->fence_reg = I915_FENCE_REG_NONE;
+	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
 	obj->madv = I915_MADV_WILLNEED;
 
 	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
@@ -4871,33 +4177,31 @@
 
 	trace_i915_gem_object_destroy(obj);
 
+	/* All file-owned VMA should have been released by this point through
+	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
+	 * However, the object may also be bound into the global GTT (e.g.
+	 * older GPUs without per-process support, or for direct access through
+	 * the GTT either for the user or for scanout). Those VMA still need to
+	 * unbound now.
+	 */
 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-		int ret;
-
-		vma->pin_count = 0;
-		ret = i915_vma_unbind(vma);
-		if (WARN_ON(ret == -ERESTARTSYS)) {
-			bool was_interruptible;
-
-			was_interruptible = dev_priv->mm.interruptible;
-			dev_priv->mm.interruptible = false;
-
-			WARN_ON(i915_vma_unbind(vma));
-
-			dev_priv->mm.interruptible = was_interruptible;
-		}
+		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+		GEM_BUG_ON(i915_vma_is_active(vma));
+		vma->flags &= ~I915_VMA_PIN_MASK;
+		i915_vma_close(vma);
 	}
+	GEM_BUG_ON(obj->bind_count);
 
 	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
 	 * before progressing. */
 	if (obj->stolen)
 		i915_gem_object_unpin_pages(obj);
 
-	WARN_ON(obj->frontbuffer_bits);
+	WARN_ON(atomic_read(&obj->frontbuffer_bits));
 
 	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
-	    obj->tiling_mode != I915_TILING_NONE)
+	    i915_gem_object_is_tiled(obj))
 		i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(obj->pages_pin_count))
@@ -4905,7 +4209,6 @@
 	if (discard_backing_storage(obj))
 		obj->madv = I915_MADV_DONTNEED;
 	i915_gem_object_put_pages(obj);
-	i915_gem_object_free_mmap_offset(obj);
 
 	BUG_ON(obj->pages);
 
@@ -4924,71 +4227,35 @@
 	intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
-		    vma->vm == vm)
-			return vma;
-	}
-	return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
-					   const struct i915_ggtt_view *view)
-{
-	struct i915_vma *vma;
-
-	GEM_BUG_ON(!view);
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma;
-	return NULL;
-}
-
-void i915_gem_vma_destroy(struct i915_vma *vma)
-{
-	WARN_ON(vma->node.allocated);
-
-	/* Keep the vma as a placeholder in the execbuffer reservation lists */
-	if (!list_empty(&vma->exec_list))
-		return;
-
-	if (!vma->is_ggtt)
-		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
-	list_del(&vma->obj_link);
-
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-static void
-i915_gem_stop_engines(struct drm_device *dev)
+int i915_gem_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine;
+	int ret;
 
-	for_each_engine(engine, dev_priv)
-		dev_priv->gt.stop_engine(engine);
-}
-
-int
-i915_gem_suspend(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret = 0;
+	intel_suspend_gt_powersave(dev_priv);
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_wait_for_idle(dev_priv);
+
+	/* We have to flush all the executing contexts to main memory so
+	 * that they can saved in the hibernation image. To ensure the last
+	 * context image is coherent, we have to switch away from it. That
+	 * leaves the dev_priv->kernel_context still active when
+	 * we actually suspend, and its image in memory may not match the GPU
+	 * state. Fortunately, the kernel_context is disposable and we do
+	 * not rely on its state.
+	 */
+	ret = i915_gem_switch_to_kernel_context(dev_priv);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_wait_for_idle(dev_priv,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
 	if (ret)
 		goto err;
 
 	i915_gem_retire_requests(dev_priv);
 
-	i915_gem_stop_engines(dev);
 	i915_gem_context_lost(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -5008,6 +4275,22 @@
 	return ret;
 }
 
+void i915_gem_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_restore_gtt_mappings(dev);
+
+	/* As we didn't flush the kernel context before suspend, we cannot
+	 * guarantee that the context image is complete. So let's just reset
+	 * it and start again.
+	 */
+	dev_priv->gt.resume(dev_priv);
+
+	mutex_unlock(&dev->struct_mutex);
+}
+
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5060,53 +4343,6 @@
 	}
 }
 
-int i915_gem_init_engines(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
-
-	ret = intel_init_render_ring_buffer(dev);
-	if (ret)
-		return ret;
-
-	if (HAS_BSD(dev)) {
-		ret = intel_init_bsd_ring_buffer(dev);
-		if (ret)
-			goto cleanup_render_ring;
-	}
-
-	if (HAS_BLT(dev)) {
-		ret = intel_init_blt_ring_buffer(dev);
-		if (ret)
-			goto cleanup_bsd_ring;
-	}
-
-	if (HAS_VEBOX(dev)) {
-		ret = intel_init_vebox_ring_buffer(dev);
-		if (ret)
-			goto cleanup_blt_ring;
-	}
-
-	if (HAS_BSD2(dev)) {
-		ret = intel_init_bsd2_ring_buffer(dev);
-		if (ret)
-			goto cleanup_vebox_ring;
-	}
-
-	return 0;
-
-cleanup_vebox_ring:
-	intel_cleanup_engine(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
-	intel_cleanup_engine(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
-	intel_cleanup_engine(&dev_priv->engine[VCS]);
-cleanup_render_ring:
-	intel_cleanup_engine(&dev_priv->engine[RCS]);
-
-	return ret;
-}
-
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
@@ -5173,6 +4409,27 @@
 	return ret;
 }
 
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+{
+	if (INTEL_INFO(dev_priv)->gen < 6)
+		return false;
+
+	/* TODO: make semaphores and Execlists play nicely together */
+	if (i915.enable_execlists)
+		return false;
+
+	if (value >= 0)
+		return value;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Enable semaphores on SNB when IO remapping is off */
+	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return true;
+}
+
 int i915_gem_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5181,15 +4438,11 @@
 	mutex_lock(&dev->struct_mutex);
 
 	if (!i915.enable_execlists) {
-		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-		dev_priv->gt.init_engines = i915_gem_init_engines;
-		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
-		dev_priv->gt.stop_engine = intel_stop_engine;
+		dev_priv->gt.resume = intel_legacy_submission_resume;
+		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
 	} else {
-		dev_priv->gt.execbuf_submit = intel_execlists_submission;
-		dev_priv->gt.init_engines = intel_logical_rings_init;
+		dev_priv->gt.resume = intel_lr_context_resume;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
-		dev_priv->gt.stop_engine = intel_logical_ring_stop;
 	}
 
 	/* This is just a security blanket to placate dragons.
@@ -5201,24 +4454,27 @@
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 	i915_gem_init_userptr(dev_priv);
-	i915_gem_init_ggtt(dev);
+
+	ret = i915_gem_init_ggtt(dev_priv);
+	if (ret)
+		goto out_unlock;
 
 	ret = i915_gem_context_init(dev);
 	if (ret)
 		goto out_unlock;
 
-	ret = dev_priv->gt.init_engines(dev);
+	ret = intel_engines_init(dev);
 	if (ret)
 		goto out_unlock;
 
 	ret = i915_gem_init_hw(dev);
 	if (ret == -EIO) {
-		/* Allow ring initialisation to fail by marking the GPU as
+		/* Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		i915_gem_set_wedged(dev_priv);
 		ret = 0;
 	}
 
@@ -5242,7 +4498,6 @@
 static void
 init_engine_lists(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&engine->active_list);
 	INIT_LIST_HEAD(&engine->request_list);
 }
 
@@ -5250,6 +4505,7 @@
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
+	int i;
 
 	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
 	    !IS_CHERRYVIEW(dev_priv))
@@ -5265,6 +4521,13 @@
 				I915_READ(vgtif_reg(avail_rs.fence_num));
 
 	/* Initialize fence registers to zero */
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+
+		fence->i915 = dev_priv;
+		fence->id = i;
+		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
+	}
 	i915_gem_restore_fences(dev);
 
 	i915_gem_detect_bit_6_swizzle(dev);
@@ -5289,18 +4552,17 @@
 	dev_priv->requests =
 		kmem_cache_create("i915_gem_request",
 				  sizeof(struct drm_i915_gem_request), 0,
-				  SLAB_HWCACHE_ALIGN,
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_DESTROY_BY_RCU,
 				  NULL);
 
-	INIT_LIST_HEAD(&dev_priv->vm_list);
 	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	for (i = 0; i < I915_NUM_ENGINES; i++)
 		init_engine_lists(&dev_priv->engine[i]);
-	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
-		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
 			  i915_gem_retire_work_handler);
 	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -5310,13 +4572,13 @@
 
 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
 
 	dev_priv->mm.interruptible = true;
 
-	mutex_init(&dev_priv->fb_tracking.lock);
+	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+
+	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
@@ -5326,11 +4588,32 @@
 	kmem_cache_destroy(dev_priv->requests);
 	kmem_cache_destroy(dev_priv->vmas);
 	kmem_cache_destroy(dev_priv->objects);
+
+	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+	rcu_barrier();
+}
+
+int i915_gem_freeze(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_get(dev_priv);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	i915_gem_shrink_all(dev_priv);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
 }
 
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 {
 	struct drm_i915_gem_object *obj;
+	struct list_head *phases[] = {
+		&dev_priv->mm.unbound_list,
+		&dev_priv->mm.bound_list,
+		NULL
+	}, **p;
 
 	/* Called just before we write the hibernation image.
 	 *
@@ -5341,17 +4624,21 @@
 	 *
 	 * To make sure the hibernation image contains the latest state,
 	 * we update that state just before writing out the image.
+	 *
+	 * To try and reduce the hibernation image, we manually shrink
+	 * the objects as well.
 	 */
 
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	for (p = phases; *p; p++) {
+		list_for_each_entry(obj, *p, global_list) {
+			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+		}
 	}
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	return 0;
 }
@@ -5359,21 +4646,15 @@
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_request *request;
 
 	/* Clean up our request list when the client is going away, so that
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * file_priv.
 	 */
 	spin_lock(&file_priv->mm.lock);
-	while (!list_empty(&file_priv->mm.request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&file_priv->mm.request_list,
-					   struct drm_i915_gem_request,
-					   client_list);
-		list_del(&request->client_list);
+	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
 		request->file_priv = NULL;
-	}
 	spin_unlock(&file_priv->mm.lock);
 
 	if (!list_empty(&file_priv->rps.link)) {
@@ -5402,7 +4683,7 @@
 	spin_lock_init(&file_priv->mm.lock);
 	INIT_LIST_HEAD(&file_priv->mm.request_list);
 
-	file_priv->bsd_ring = -1;
+	file_priv->bsd_engine = -1;
 
 	ret = i915_gem_context_open(dev, file);
 	if (ret)
@@ -5424,120 +4705,26 @@
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits)
 {
+	/* Control of individual bits within the mask are guarded by
+	 * the owning plane->mutex, i.e. we can never see concurrent
+	 * manipulation of individual bits. But since the bitfield as a whole
+	 * is updated using RMW, we need to use atomics in order to update
+	 * the bits.
+	 */
+	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+		     sizeof(atomic_t) * BITS_PER_BYTE);
+
 	if (old) {
-		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
-		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
-		old->frontbuffer_bits &= ~frontbuffer_bits;
+		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
 	}
 
 	if (new) {
-		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
-		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
-		new->frontbuffer_bits |= frontbuffer_bits;
+		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
 	}
 }
 
-/* All the new VM stuff */
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm)
-{
-	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-	struct i915_vma *vma;
-
-	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm)
-			return vma->node.start;
-	}
-
-	WARN(1, "%s vma for this object not found.\n",
-	     i915_is_ggtt(vm) ? "global" : "ppgtt");
-	return -1;
-}
-
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
-			return vma->node.start;
-
-	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
-	return -1;
-}
-
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-			struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-			continue;
-		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
-			return true;
-	}
-
-	return false;
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-				  const struct i915_ggtt_view *view)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt &&
-		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
-		    drm_mm_node_allocated(&vma->node))
-			return true;
-
-	return false;
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (drm_mm_node_allocated(&vma->node))
-			return true;
-
-	return false;
-}
-
-unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
-{
-	struct i915_vma *vma;
-
-	GEM_BUG_ON(list_empty(&o->vma_list));
-
-	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
-		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-			return vma->node.size;
-	}
-
-	return 0;
-}
-
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count > 0)
-			return true;
-
-	return false;
-}
-
 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
 struct page *
 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
@@ -5590,6 +4777,6 @@
 	return obj;
 
 fail:
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 3752d5d..ed98959 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -41,15 +41,15 @@
 
 /**
  * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
  * @pool: the batch buffer pool
  */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool)
 {
 	int n;
 
-	pool->dev = dev;
+	pool->engine = engine;
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 		INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,18 +65,17 @@
 {
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
-		while (!list_empty(&pool->cache_list[n])) {
-			struct drm_i915_gem_object *obj =
-				list_first_entry(&pool->cache_list[n],
-						 struct drm_i915_gem_object,
-						 batch_pool_link);
+		struct drm_i915_gem_object *obj, *next;
 
-			list_del(&obj->batch_pool_link);
-			drm_gem_object_unreference(&obj->base);
-		}
+		list_for_each_entry_safe(obj, next,
+					 &pool->cache_list[n],
+					 batch_pool_link)
+			i915_gem_object_put(obj);
+
+		INIT_LIST_HEAD(&pool->cache_list[n]);
 	}
 }
 
@@ -102,7 +101,7 @@
 	struct list_head *list;
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	/* Compute a power-of-two bucket, but throw everything greater than
 	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -115,13 +114,14 @@
 
 	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
-		if (tmp->active)
+		if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+					     &tmp->base.dev->struct_mutex))
 			break;
 
 		/* While we're looping, do some clean up */
 		if (tmp->madv == __I915_MADV_PURGED) {
 			list_del(&tmp->batch_pool_link);
-			drm_gem_object_unreference(&tmp->base);
+			i915_gem_object_put(tmp);
 			continue;
 		}
 
@@ -134,7 +134,7 @@
 	if (obj == NULL) {
 		int ret;
 
-		obj = i915_gem_object_create(pool->dev, size);
+		obj = i915_gem_object_create(&pool->engine->i915->drm, size);
 		if (IS_ERR(obj))
 			return obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 848e907..10d5ac4 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -27,13 +27,15 @@
 
 #include "i915_drv.h"
 
+struct intel_engine_cs;
+
 struct i915_gem_batch_pool {
-	struct drm_device *dev;
+	struct intel_engine_cs *engine;
 	struct list_head cache_list[4];
 };
 
 /* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool);
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
 struct drm_i915_gem_object*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3c97f0e..df10f4e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -134,21 +134,6 @@
 	return ret;
 }
 
-static void i915_gem_context_clean(struct i915_gem_context *ctx)
-{
-	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-	struct i915_vma *vma, *next;
-
-	if (!ppgtt)
-		return;
-
-	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-				 vm_link) {
-		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
-			break;
-	}
-}
-
 void i915_gem_context_free(struct kref *ctx_ref)
 {
 	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -156,13 +141,7 @@
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	trace_i915_context_free(ctx);
-
-	/*
-	 * This context is going away and we need to remove all VMAs still
-	 * around. This is to handle imported shared objects for which
-	 * destructor did not run when their handles were closed.
-	 */
-	i915_gem_context_clean(ctx);
+	GEM_BUG_ON(!ctx->closed);
 
 	i915_ppgtt_put(ctx->ppgtt);
 
@@ -173,12 +152,13 @@
 			continue;
 
 		WARN_ON(ce->pin_count);
-		if (ce->ringbuf)
-			intel_ringbuffer_free(ce->ringbuf);
+		if (ce->ring)
+			intel_ring_free(ce->ring);
 
-		drm_gem_object_unreference(&ce->state->base);
+		i915_vma_put(ce->state);
 	}
 
+	put_pid(ctx->pid);
 	list_del(&ctx->link);
 
 	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -216,7 +196,7 @@
 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
 		/* Failure shouldn't ever happen this early */
 		if (WARN_ON(ret)) {
-			drm_gem_object_unreference(&obj->base);
+			i915_gem_object_put(obj);
 			return ERR_PTR(ret);
 		}
 	}
@@ -224,6 +204,37 @@
 	return obj;
 }
 
+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+	struct list_head *phases[] = {
+		&vm->active_list,
+		&vm->inactive_list,
+		&vm->unbound_list,
+		NULL,
+	}, **phase;
+
+	GEM_BUG_ON(vm->closed);
+	vm->closed = true;
+
+	for (phase = phases; *phase; phase++) {
+		struct i915_vma *vma, *vn;
+
+		list_for_each_entry_safe(vma, vn, *phase, vm_link)
+			if (!i915_vma_is_closed(vma))
+				i915_vma_close(vma);
+	}
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+	GEM_BUG_ON(ctx->closed);
+	ctx->closed = true;
+	if (ctx->ppgtt)
+		i915_ppgtt_close(&ctx->ppgtt->base);
+	ctx->file_priv = ERR_PTR(-EBADF);
+	i915_gem_context_put(ctx);
+}
+
 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
 {
 	int ret;
@@ -271,13 +282,24 @@
 	ctx->ggtt_alignment = get_context_alignment(dev_priv);
 
 	if (dev_priv->hw_context_size) {
-		struct drm_i915_gem_object *obj =
-				i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+		struct drm_i915_gem_object *obj;
+		struct i915_vma *vma;
+
+		obj = i915_gem_alloc_context_obj(dev,
+						 dev_priv->hw_context_size);
 		if (IS_ERR(obj)) {
 			ret = PTR_ERR(obj);
 			goto err_out;
 		}
-		ctx->engine[RCS].state = obj;
+
+		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+		if (IS_ERR(vma)) {
+			i915_gem_object_put(obj);
+			ret = PTR_ERR(vma);
+			goto err_out;
+		}
+
+		ctx->engine[RCS].state = vma;
 	}
 
 	/* Default context will never have a file_priv */
@@ -290,6 +312,9 @@
 		ret = DEFAULT_CONTEXT_HANDLE;
 
 	ctx->file_priv = file_priv;
+	if (file_priv)
+		ctx->pid = get_task_pid(current, PIDTYPE_PID);
+
 	ctx->user_handle = ret;
 	/* NB: Mark all slices as needing a remap so that when the context first
 	 * loads it will restore whatever remap state already exists. If there
@@ -305,7 +330,7 @@
 	return ctx;
 
 err_out:
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	return ERR_PTR(ret);
 }
 
@@ -327,13 +352,14 @@
 		return ctx;
 
 	if (USES_FULL_PPGTT(dev)) {
-		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+		struct i915_hw_ppgtt *ppgtt =
+			i915_ppgtt_create(to_i915(dev), file_priv);
 
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
 			idr_remove(&file_priv->context_idr, ctx->user_handle);
-			i915_gem_context_unreference(ctx);
+			context_close(ctx);
 			return ERR_CAST(ppgtt);
 		}
 
@@ -388,28 +414,12 @@
 		struct intel_context *ce = &ctx->engine[engine->id];
 
 		if (ce->state)
-			i915_gem_object_ggtt_unpin(ce->state);
+			i915_vma_unpin(ce->state);
 
-		i915_gem_context_unreference(ctx);
+		i915_gem_context_put(ctx);
 	}
 }
 
-void i915_gem_context_reset(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	lockdep_assert_held(&dev->struct_mutex);
-
-	if (i915.enable_execlists) {
-		struct i915_gem_context *ctx;
-
-		list_for_each_entry(ctx, &dev_priv->context_list, link)
-			intel_lr_context_reset(dev_priv, ctx);
-	}
-
-	i915_gem_context_lost(dev_priv);
-}
-
 int i915_gem_context_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -504,7 +514,7 @@
 
 	lockdep_assert_held(&dev->struct_mutex);
 
-	i915_gem_context_unreference(dctx);
+	context_close(dctx);
 	dev_priv->kernel_context = NULL;
 
 	ida_destroy(&dev_priv->context_hw_ida);
@@ -514,8 +524,7 @@
 {
 	struct i915_gem_context *ctx = p;
 
-	ctx->file_priv = ERR_PTR(-EBADF);
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	return 0;
 }
 
@@ -552,12 +561,13 @@
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
-		i915_semaphore_is_enabled(dev_priv) ?
-		hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
+		i915.semaphores ?
+		INTEL_INFO(dev_priv)->num_rings - 1 :
 		0;
 	int len, ret;
 
@@ -567,7 +577,7 @@
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = engine->emit_flush(req, EMIT_INVALIDATE);
 		if (ret)
 			return ret;
 	}
@@ -589,64 +599,64 @@
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(engine,
+				intel_ring_emit_reg(ring,
 						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(engine,
+				intel_ring_emit(ring,
 						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
 	}
 
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_SET_CONTEXT);
-	intel_ring_emit(engine,
-			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
-			flags);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring,
+			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(engine, last_reg);
-				intel_ring_emit(engine,
+				intel_ring_emit_reg(ring, last_reg);
+				intel_ring_emit(ring,
 						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_STORE_REGISTER_MEM |
 					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(engine, last_reg);
-			intel_ring_emit(engine, engine->scratch.gtt_offset);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit_reg(ring, last_reg);
+			intel_ring_emit(ring,
+					i915_ggtt_offset(engine->scratch));
+			intel_ring_emit(ring, MI_NOOP);
 		}
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return ret;
 }
@@ -654,7 +664,7 @@
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int i, ret;
 
 	if (!remap_info)
@@ -669,13 +679,13 @@
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-		intel_ring_emit(engine, remap_info[i]);
+		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+		intel_ring_emit(ring, remap_info[i]);
 	}
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -744,6 +754,7 @@
 	struct i915_gem_context *to = req->ctx;
 	struct intel_engine_cs *engine = req->engine;
 	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+	struct i915_vma *vma = to->engine[RCS].state;
 	struct i915_gem_context *from;
 	u32 hw_flags;
 	int ret, i;
@@ -751,10 +762,15 @@
 	if (skip_rcs_switch(ppgtt, engine, to))
 		return 0;
 
+	/* Clear this page out of any CPU caches for coherent swap-in/out. */
+	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+		ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+		if (ret)
+			return ret;
+	}
+
 	/* Trying to pin first makes error handling easier. */
-	ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
-				    to->ggtt_alignment,
-				    0);
+	ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
 	if (ret)
 		return ret;
 
@@ -767,18 +783,6 @@
 	 */
 	from = engine->last_context;
 
-	/*
-	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
-	 * that thanks to write = false in this call and us not setting any gpu
-	 * write domains when putting a context object onto the active list
-	 * (when switching away from it), this won't block.
-	 *
-	 * XXX: We need a real interface to do this instead of trickery.
-	 */
-	ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
-	if (ret)
-		goto unpin_out;
-
 	if (needs_pd_load_pre(ppgtt, engine, to)) {
 		/* Older GENs and non render rings still want the load first,
 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -787,7 +791,7 @@
 		trace_switch_mm(engine, to);
 		ret = ppgtt->switch_mm(ppgtt, req);
 		if (ret)
-			goto unpin_out;
+			goto err;
 	}
 
 	if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
@@ -804,7 +808,7 @@
 	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
 		ret = mi_set_context(req, hw_flags);
 		if (ret)
-			goto unpin_out;
+			goto err;
 	}
 
 	/* The backing object for the context is done after switching to the
@@ -814,8 +818,6 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -823,14 +825,12 @@
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from->engine[RCS].state->dirty = 1;
-
-		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->engine[RCS].state);
-		i915_gem_context_unreference(from);
+		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+		/* state is kept alive until the next request */
+		i915_vma_unpin(from->engine[RCS].state);
+		i915_gem_context_put(from);
 	}
-	i915_gem_context_reference(to);
-	engine->last_context = to;
+	engine->last_context = i915_gem_context_get(to);
 
 	/* GEN8 does *not* require an explicit reload if the PDPs have been
 	 * setup, and we do not wish to move them.
@@ -872,8 +872,8 @@
 
 	return 0;
 
-unpin_out:
-	i915_gem_object_ggtt_unpin(to->engine[RCS].state);
+err:
+	i915_vma_unpin(vma);
 	return ret;
 }
 
@@ -894,8 +894,9 @@
 {
 	struct intel_engine_cs *engine = req->engine;
 
-	WARN_ON(i915.enable_execlists);
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	if (i915.enable_execlists)
+		return 0;
 
 	if (!req->ctx->engine[engine->id].state) {
 		struct i915_gem_context *to = req->ctx;
@@ -914,10 +915,9 @@
 		}
 
 		if (to != engine->last_context) {
-			i915_gem_context_reference(to);
 			if (engine->last_context)
-				i915_gem_context_unreference(engine->last_context);
-			engine->last_context = to;
+				i915_gem_context_put(engine->last_context);
+			engine->last_context = i915_gem_context_get(to);
 		}
 
 		return 0;
@@ -926,6 +926,33 @@
 	return do_rcs_switch(req);
 }
 
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv) {
+		struct drm_i915_gem_request *req;
+		int ret;
+
+		if (engine->last_context == NULL)
+			continue;
+
+		if (engine->last_context == dev_priv->kernel_context)
+			continue;
+
+		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+
+		ret = i915_switch_context(req);
+		i915_add_request_no_flush(req);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static bool contexts_enabled(struct drm_device *dev)
 {
 	return i915.enable_execlists || to_i915(dev)->hw_context_size;
@@ -985,7 +1012,7 @@
 	}
 
 	idr_remove(&file_priv->context_idr, ctx->user_handle);
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
deleted file mode 100644
index a565164..0000000
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-
-#if WATCH_LISTS
-int
-i915_verify_lists(struct drm_device *dev)
-{
-	static int warned;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	int err = 0;
-
-	if (warned)
-		return 0;
-
-	for_each_engine(engine, dev_priv) {
-		list_for_each_entry(obj, &engine->active_list,
-				    engine_list[engine->id]) {
-			if (obj->base.dev != dev ||
-			    !atomic_read(&obj->base.refcount.refcount)) {
-				DRM_ERROR("%s: freed active obj %p\n",
-					  engine->name, obj);
-				err++;
-				break;
-			} else if (!obj->active ||
-				   obj->last_read_req[engine->id] == NULL) {
-				DRM_ERROR("%s: invalid active obj %p\n",
-					  engine->name, obj);
-				err++;
-			} else if (obj->base.write_domain) {
-				DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-					  engine->name,
-					  obj, obj->base.write_domain);
-				err++;
-			}
-		}
-	}
-
-	return warned = err;
-}
-#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 80bbe43..97c9d68 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -23,9 +23,13 @@
  * Authors:
  *	Dave Airlie <airlied@redhat.com>
  */
-#include <drm/drmP.h>
-#include "i915_drv.h"
+
 #include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include <drm/drmP.h>
+
+#include "i915_drv.h"
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -115,7 +119,7 @@
 	if (ret)
 		return ERR_PTR(ret);
 
-	addr = i915_gem_object_pin_map(obj);
+	addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
 	mutex_unlock(&dev->struct_mutex);
 
 	return addr;
@@ -218,25 +222,73 @@
 	.end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+			  struct dma_buf *dma_buf)
+{
+	struct reservation_object *resv = dma_buf->resv;
+	struct drm_i915_gem_request *req;
+	unsigned long active;
+	int idx;
+
+	active = __I915_BO_ACTIVE(obj);
+	if (!active)
+		return;
+
+	/* Serialise with execbuf to prevent concurrent fence-loops */
+	mutex_lock(&obj->base.dev->struct_mutex);
+
+	/* Mark the object for future fences before racily adding old fences */
+	obj->base.dma_buf = dma_buf;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	for_each_active(active, idx) {
+		req = i915_gem_active_get(&obj->last_read[idx],
+					  &obj->base.dev->struct_mutex);
+		if (!req)
+			continue;
+
+		if (reservation_object_reserve_shared(resv) == 0)
+			reservation_object_add_shared_fence(resv, &req->fence);
+
+		i915_gem_request_put(req);
+	}
+
+	req = i915_gem_active_get(&obj->last_write,
+				  &obj->base.dev->struct_mutex);
+	if (req) {
+		reservation_object_add_excl_fence(resv, &req->fence);
+		i915_gem_request_put(req);
+	}
+
+	ww_mutex_unlock(&resv->lock);
+	mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dma_buf;
 
 	exp_info.ops = &i915_dmabuf_ops;
 	exp_info.size = gem_obj->size;
 	exp_info.flags = flags;
 	exp_info.priv = gem_obj;
 
-
 	if (obj->ops->dmabuf_export) {
 		int ret = obj->ops->dmabuf_export(obj);
 		if (ret)
 			return ERR_PTR(ret);
 	}
 
-	return dma_buf_export(&exp_info);
+	dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
+	if (IS_ERR(dma_buf))
+		return dma_buf;
+
+	export_fences(obj, dma_buf);
+	return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -278,8 +330,7 @@
 			 * Importing dmabuf exported from out own gem increases
 			 * refcount on gem itself instead of f_count of dmabuf.
 			 */
-			drm_gem_object_reference(&obj->base);
-			return &obj->base;
+			return &i915_gem_object_get(obj)->base;
 		}
 	}
 
@@ -300,6 +351,16 @@
 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 	obj->base.import_attach = attach;
 
+	/* We use GTT as shorthand for a coherent domain, one that is
+	 * neither in the GPU cache nor in the CPU cache, where all
+	 * writes are immediately visible in memory. (That's not strictly
+	 * true, but it's close! There are internal buffers such as the
+	 * write-combined buffer or a delay through the chipset for GTT
+	 * writes that do require us to treat GTT as a separate cache domain.)
+	 */
+	obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+	obj->base.write_domain = 0;
+
 	return &obj->base;
 
 fail_detach:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3c1280e..5b6f81c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,53 +33,37 @@
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+static bool
+gpu_is_idle(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 
-	if (i915.enable_execlists)
-		return 0;
-
 	for_each_engine(engine, dev_priv) {
-		struct drm_i915_gem_request *req;
-		int ret;
-
-		if (engine->last_context == NULL)
-			continue;
-
-		if (engine->last_context == dev_priv->kernel_context)
-			continue;
-
-		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
-		if (IS_ERR(req))
-			return PTR_ERR(req);
-
-		ret = i915_switch_context(req);
-		i915_add_request_no_flush(req);
-		if (ret)
-			return ret;
+		if (intel_engine_is_active(engine))
+			return false;
 	}
 
-	return 0;
+	return true;
 }
 
-
 static bool
-mark_free(struct i915_vma *vma, struct list_head *unwind)
+mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
 {
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return false;
 
 	if (WARN_ON(!list_empty(&vma->exec_list)))
 		return false;
 
+	if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+		return false;
+
 	list_add(&vma->exec_list, unwind);
 	return drm_mm_scan_add_block(&vma->node);
 }
 
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
- * @dev: drm_device
  * @vm: address space to evict from
  * @min_size: size of the desired free space
  * @alignment: alignment constraint of the desired free space
@@ -102,42 +86,37 @@
  * memory in e.g. the shrinker.
  */
 int
-i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
-			 int min_size, unsigned alignment, unsigned cache_level,
-			 unsigned long start, unsigned long end,
+i915_gem_evict_something(struct i915_address_space *vm,
+			 u64 min_size, u64 alignment,
+			 unsigned cache_level,
+			 u64 start, u64 end,
 			 unsigned flags)
 {
-	struct list_head eviction_list, unwind_list;
-	struct i915_vma *vma;
-	int ret = 0;
-	int pass = 0;
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct list_head eviction_list;
+	struct list_head *phases[] = {
+		&vm->inactive_list,
+		&vm->active_list,
+		NULL,
+	}, **phase;
+	struct i915_vma *vma, *next;
+	int ret;
 
-	trace_i915_gem_evict(dev, min_size, alignment, flags);
+	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
 	 * The oldest idle objects reside on the inactive list, which is in
-	 * retirement order. The next objects to retire are those on the (per
-	 * ring) active list that do not have an outstanding flush. Once the
-	 * hardware reports completion (the seqno is updated after the
-	 * batchbuffer has been finished) the clean buffer objects would
-	 * be retired to the inactive list. Any dirty objects would be added
-	 * to the tail of the flushing list. So after processing the clean
-	 * active objects we need to emit a MI_FLUSH to retire the flushing
-	 * list, hence the retirement order of the flushing list is in
-	 * advance of the dirty objects on the active lists.
+	 * retirement order. The next objects to retire are those in flight,
+	 * on the active list, again in retirement order.
 	 *
 	 * The retirement sequence is thus:
 	 *   1. Inactive objects (already retired)
-	 *   2. Clean active objects
-	 *   3. Flushing list
-	 *   4. Dirty active objects.
+	 *   2. Active objects (will stall on unbinding)
 	 *
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
-
-	INIT_LIST_HEAD(&unwind_list);
 	if (start != 0 || end != vm->total) {
 		drm_mm_init_scan_with_range(&vm->mm, min_size,
 					    alignment, cache_level,
@@ -145,96 +124,86 @@
 	} else
 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
-search_again:
-	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
-
 	if (flags & PIN_NONBLOCK)
-		goto none;
+		phases[1] = NULL;
 
-	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(vma, &vm->active_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
+search_again:
+	INIT_LIST_HEAD(&eviction_list);
+	phase = phases;
+	do {
+		list_for_each_entry(vma, *phase, vm_link)
+			if (mark_free(vma, flags, &eviction_list))
+				goto found;
+	} while (*++phase);
 
-none:
 	/* Nothing found, clean up and bail out! */
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
 		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
-		list_del_init(&vma->exec_list);
+		INIT_LIST_HEAD(&vma->exec_list);
 	}
 
 	/* Can we unpin some objects such as idle hw contents,
-	 * or pending flips?
+	 * or pending flips? But since only the GGTT has global entries
+	 * such as scanouts, rinbuffers and contexts, we can skip the
+	 * purge when inspecting per-process local address spaces.
 	 */
-	if (flags & PIN_NONBLOCK)
+	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 		return -ENOSPC;
 
-	/* Only idle the GPU and repeat the search once */
-	if (pass++ == 0) {
-		struct drm_i915_private *dev_priv = to_i915(dev);
-
-		if (i915_is_ggtt(vm)) {
-			ret = switch_to_pinned_context(dev_priv);
-			if (ret)
-				return ret;
-		}
-
-		ret = i915_gem_wait_for_idle(dev_priv);
-		if (ret)
-			return ret;
-
-		i915_gem_retire_requests(dev_priv);
-		goto search_again;
+	if (gpu_is_idle(dev_priv)) {
+		/* If we still have pending pageflip completions, drop
+		 * back to userspace to give our workqueues time to
+		 * acquire our locks and unpin the old scanouts.
+		 */
+		return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
 	}
 
-	/* If we still have pending pageflip completions, drop
-	 * back to userspace to give our workqueues time to
-	 * acquire our locks and unpin the old scanouts.
+	/* Not everything in the GGTT is tracked via vma (otherwise we
+	 * could evict as required with minimal stalling) so we are forced
+	 * to idle the GPU and explicitly retire outstanding requests in
+	 * the hopes that we can then remove contexts and the like only
+	 * bound by their active reference.
 	 */
-	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+	ret = i915_gem_switch_to_kernel_context(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_wait_for_idle(dev_priv,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(dev_priv);
+	goto search_again;
 
 found:
 	/* drm_mm doesn't allow any other other operations while
-	 * scanning, therefore store to be evicted objects on a
-	 * temporary list. */
-	INIT_LIST_HEAD(&eviction_list);
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
-		if (drm_mm_scan_remove_block(&vma->node)) {
-			list_move(&vma->exec_list, &eviction_list);
-			drm_gem_object_reference(&vma->obj->base);
-			continue;
-		}
-		list_del_init(&vma->exec_list);
+	 * scanning, therefore store to-be-evicted objects on a
+	 * temporary list and take a reference for all before
+	 * calling unbind (which may remove the active reference
+	 * of any of our objects, thus corrupting the list).
+	 */
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+		if (drm_mm_scan_remove_block(&vma->node))
+			__i915_vma_pin(vma);
+		else
+			list_del_init(&vma->exec_list);
 	}
 
 	/* Unbinding will emit any required flushes */
 	while (!list_empty(&eviction_list)) {
-		struct drm_gem_object *obj;
 		vma = list_first_entry(&eviction_list,
 				       struct i915_vma,
 				       exec_list);
 
-		obj =  &vma->obj->base;
 		list_del_init(&vma->exec_list);
+		__i915_vma_unpin(vma);
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
-
-		drm_gem_object_unreference(obj);
 	}
-
 	return ret;
 }
 
@@ -256,8 +225,8 @@
 
 		vma = container_of(node, typeof(*vma), node);
 
-		if (vma->pin_count) {
-			if (!vma->exec_entry || (vma->pin_count > 1))
+		if (i915_vma_is_pinned(vma)) {
+			if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
 				/* Object is pinned for some other use */
 				return -EBUSY;
 
@@ -303,22 +272,23 @@
 		struct drm_i915_private *dev_priv = to_i915(vm->dev);
 
 		if (i915_is_ggtt(vm)) {
-			ret = switch_to_pinned_context(dev_priv);
+			ret = i915_gem_switch_to_kernel_context(dev_priv);
 			if (ret)
 				return ret;
 		}
 
-		ret = i915_gem_wait_for_idle(dev_priv);
+		ret = i915_gem_wait_for_idle(dev_priv,
+					     I915_WAIT_INTERRUPTIBLE |
+					     I915_WAIT_LOCKED);
 		if (ret)
 			return ret;
 
 		i915_gem_retire_requests(dev_priv);
-
 		WARN_ON(!list_empty(&vm->active_list));
 	}
 
 	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
-		if (vma->pin_count == 0)
+		if (!i915_vma_is_pinned(vma))
 			WARN_ON(i915_vma_unbind(vma));
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b35e5b6..7adb4c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,22 +26,42 @@
  *
  */
 
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
 #include <linux/dma_remapping.h>
+#include <linux/reservation.h>
 #include <linux/uaccess.h>
 
-#define  __EXEC_OBJECT_HAS_PIN (1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
-#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
-#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+
+#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
+
+#define  __EXEC_OBJECT_HAS_PIN		(1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
+#define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
+#define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
+#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	struct i915_vma			*batch;
+	u32				dispatch_flags;
+	u32				args_batch_start_offset;
+	struct intel_engine_cs          *engine;
+	struct i915_gem_context         *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct eb_vmas {
+	struct drm_i915_private *i915;
 	struct list_head vmas;
 	int and;
 	union {
@@ -51,7 +71,8 @@
 };
 
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+eb_create(struct drm_i915_private *i915,
+	  struct drm_i915_gem_execbuffer2 *args)
 {
 	struct eb_vmas *eb = NULL;
 
@@ -78,6 +99,7 @@
 	} else
 		eb->and = -args->buffer_count;
 
+	eb->i915 = i915;
 	INIT_LIST_HEAD(&eb->vmas);
 	return eb;
 }
@@ -89,6 +111,26 @@
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
+static struct i915_vma *
+eb_get_batch(struct eb_vmas *eb)
+{
+	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+	/*
+	 * SNA is doing fancy tricks with compressing batch buffers, which leads
+	 * to negative relocation deltas. Usually that works out ok since the
+	 * relocate address is still positive, except when the batch is placed
+	 * very low in the GTT. Ensure this doesn't happen.
+	 *
+	 * Note that actual hangs have only been observed on gen7, but for
+	 * paranoia do it everywhere.
+	 */
+	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
+		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+	return vma;
+}
+
 static int
 eb_lookup_vmas(struct eb_vmas *eb,
 	       struct drm_i915_gem_exec_object2 *exec,
@@ -122,7 +164,7 @@
 			goto err;
 		}
 
-		drm_gem_object_reference(&obj->base);
+		i915_gem_object_get(obj);
 		list_add_tail(&obj->obj_exec_link, &objects);
 	}
 	spin_unlock(&file->table_lock);
@@ -143,8 +185,8 @@
 		 * from the (obj, vm) we don't run the risk of creating
 		 * duplicated vmas for the same vm.
 		 */
-		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
-		if (IS_ERR(vma)) {
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+		if (unlikely(IS_ERR(vma))) {
 			DRM_DEBUG("Failed to lookup VMA\n");
 			ret = PTR_ERR(vma);
 			goto err;
@@ -175,7 +217,7 @@
 				       struct drm_i915_gem_object,
 				       obj_exec_link);
 		list_del_init(&obj->obj_exec_link);
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_put(obj);
 	}
 	/*
 	 * Objects already transfered to the vmas list will be unreferenced by
@@ -208,7 +250,6 @@
 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry;
-	struct drm_i915_gem_object *obj = vma->obj;
 
 	if (!drm_mm_node_allocated(&vma->node))
 		return;
@@ -216,10 +257,10 @@
 	entry = vma->exec_entry;
 
 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-		i915_gem_object_unpin_fence(obj);
+		i915_vma_unpin_fence(vma);
 
 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 
 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -234,13 +275,19 @@
 				       exec_list);
 		list_del_init(&vma->exec_list);
 		i915_gem_execbuffer_unreserve_vma(vma);
-		drm_gem_object_unreference(&vma->obj->base);
+		i915_vma_put(vma);
 	}
 	kfree(eb);
 }
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
+	if (!i915_gem_object_has_struct_page(obj))
+		return false;
+
+	if (DBG_USE_CPU_RELOC)
+		return DBG_USE_CPU_RELOC > 0;
+
 	return (HAS_LLC(obj->base.dev) ||
 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		obj->cache_level != I915_CACHE_NONE);
@@ -265,144 +312,265 @@
 }
 
 static inline uint64_t
-relocation_target(struct drm_i915_gem_relocation_entry *reloc,
+relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
 		  uint64_t target_offset)
 {
 	return gen8_canonical_addr((int)reloc->delta + target_offset);
 }
 
-static int
-relocate_entry_cpu(struct drm_i915_gem_object *obj,
-		   struct drm_i915_gem_relocation_entry *reloc,
-		   uint64_t target_offset)
+struct reloc_cache {
+	struct drm_i915_private *i915;
+	struct drm_mm_node node;
+	unsigned long vaddr;
+	unsigned int page;
+	bool use_64bit_reloc;
+};
+
+static void reloc_cache_init(struct reloc_cache *cache,
+			     struct drm_i915_private *i915)
 {
-	struct drm_device *dev = obj->base.dev;
-	uint32_t page_offset = offset_in_page(reloc->offset);
-	uint64_t delta = relocation_target(reloc, target_offset);
-	char *vaddr;
-	int ret;
+	cache->page = -1;
+	cache->vaddr = 0;
+	cache->i915 = i915;
+	cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+	cache->node.allocated = false;
+}
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (ret)
-		return ret;
+static inline void *unmask_page(unsigned long p)
+{
+	return (void *)(uintptr_t)(p & PAGE_MASK);
+}
 
-	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-				reloc->offset >> PAGE_SHIFT));
-	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
+static inline unsigned int unmask_flags(unsigned long p)
+{
+	return p & ~PAGE_MASK;
+}
 
-	if (INTEL_INFO(dev)->gen >= 8) {
-		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
 
-		if (page_offset == 0) {
-			kunmap_atomic(vaddr);
-			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+static void reloc_cache_fini(struct reloc_cache *cache)
+{
+	void *vaddr;
+
+	if (!cache->vaddr)
+		return;
+
+	vaddr = unmask_page(cache->vaddr);
+	if (cache->vaddr & KMAP) {
+		if (cache->vaddr & CLFLUSH_AFTER)
+			mb();
+
+		kunmap_atomic(vaddr);
+		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
+	} else {
+		wmb();
+		io_mapping_unmap_atomic((void __iomem *)vaddr);
+		if (cache->node.allocated) {
+			struct i915_ggtt *ggtt = &cache->i915->ggtt;
+
+			ggtt->base.clear_range(&ggtt->base,
+					       cache->node.start,
+					       cache->node.size,
+					       true);
+			drm_mm_remove_node(&cache->node);
+		} else {
+			i915_vma_unpin((struct i915_vma *)cache->node.mm);
 		}
+	}
+}
 
-		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+			struct reloc_cache *cache,
+			int page)
+{
+	void *vaddr;
+
+	if (cache->vaddr) {
+		kunmap_atomic(unmask_page(cache->vaddr));
+	} else {
+		unsigned int flushes;
+		int ret;
+
+		ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+		if (ret)
+			return ERR_PTR(ret);
+
+		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+		cache->vaddr = flushes | KMAP;
+		cache->node.mm = (void *)obj;
+		if (flushes)
+			mb();
 	}
 
-	kunmap_atomic(vaddr);
+	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+	cache->page = page;
+
+	return vaddr;
+}
+
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+			 struct reloc_cache *cache,
+			 int page)
+{
+	struct i915_ggtt *ggtt = &cache->i915->ggtt;
+	unsigned long offset;
+	void *vaddr;
+
+	if (cache->node.allocated) {
+		wmb();
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, page),
+				       cache->node.start, I915_CACHE_NONE, 0);
+		cache->page = page;
+		return unmask_page(cache->vaddr);
+	}
+
+	if (cache->vaddr) {
+		io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+	} else {
+		struct i915_vma *vma;
+		int ret;
+
+		if (use_cpu_reloc(obj))
+			return NULL;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (ret)
+			return ERR_PTR(ret);
+
+		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+					       PIN_MAPPABLE | PIN_NONBLOCK);
+		if (IS_ERR(vma)) {
+			memset(&cache->node, 0, sizeof(cache->node));
+			ret = drm_mm_insert_node_in_range_generic
+				(&ggtt->base.mm, &cache->node,
+				 4096, 0, 0,
+				 0, ggtt->mappable_end,
+				 DRM_MM_SEARCH_DEFAULT,
+				 DRM_MM_CREATE_DEFAULT);
+			if (ret) /* no inactive aperture space, use cpu reloc */
+				return NULL;
+		} else {
+			ret = i915_vma_put_fence(vma);
+			if (ret) {
+				i915_vma_unpin(vma);
+				return ERR_PTR(ret);
+			}
+
+			cache->node.start = vma->node.start;
+			cache->node.mm = (void *)vma;
+		}
+	}
+
+	offset = cache->node.start;
+	if (cache->node.allocated) {
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, page),
+				       offset, I915_CACHE_NONE, 0);
+	} else {
+		offset += page << PAGE_SHIFT;
+	}
+
+	vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+	cache->page = page;
+	cache->vaddr = (unsigned long)vaddr;
+
+	return vaddr;
+}
+
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+			 struct reloc_cache *cache,
+			 int page)
+{
+	void *vaddr;
+
+	if (cache->page == page) {
+		vaddr = unmask_page(cache->vaddr);
+	} else {
+		vaddr = NULL;
+		if ((cache->vaddr & KMAP) == 0)
+			vaddr = reloc_iomap(obj, cache, page);
+		if (!vaddr)
+			vaddr = reloc_kmap(obj, cache, page);
+	}
+
+	return vaddr;
+}
+
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
+{
+	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+		if (flushes & CLFLUSH_BEFORE) {
+			clflushopt(addr);
+			mb();
+		}
+
+		*addr = value;
+
+		/* Writes to the same cacheline are serialised by the CPU
+		 * (including clflush). On the write path, we only require
+		 * that it hits memory in an orderly fashion and place
+		 * mb barriers at the start and end of the relocation phase
+		 * to ensure ordering of clflush wrt to the system.
+		 */
+		if (flushes & CLFLUSH_AFTER)
+			clflushopt(addr);
+	} else
+		*addr = value;
+}
+
+static int
+relocate_entry(struct drm_i915_gem_object *obj,
+	       const struct drm_i915_gem_relocation_entry *reloc,
+	       struct reloc_cache *cache,
+	       u64 target_offset)
+{
+	u64 offset = reloc->offset;
+	bool wide = cache->use_64bit_reloc;
+	void *vaddr;
+
+	target_offset = relocation_target(reloc, target_offset);
+repeat:
+	vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+
+	clflush_write32(vaddr + offset_in_page(offset),
+			lower_32_bits(target_offset),
+			cache->vaddr);
+
+	if (wide) {
+		offset += sizeof(u32);
+		target_offset >>= 32;
+		wide = false;
+		goto repeat;
+	}
 
 	return 0;
 }
 
-static int
-relocate_entry_gtt(struct drm_i915_gem_object *obj,
-		   struct drm_i915_gem_relocation_entry *reloc,
-		   uint64_t target_offset)
+static bool object_is_idle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	uint64_t delta = relocation_target(reloc, target_offset);
-	uint64_t offset;
-	void __iomem *reloc_page;
-	int ret;
+	unsigned long active = i915_gem_object_get_active(obj);
+	int idx;
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, true);
-	if (ret)
-		return ret;
-
-	ret = i915_gem_object_put_fence(obj);
-	if (ret)
-		return ret;
-
-	/* Map the page containing the relocation we're going to perform.  */
-	offset = i915_gem_obj_ggtt_offset(obj);
-	offset += reloc->offset;
-	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
-					      offset & PAGE_MASK);
-	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
-
-	if (INTEL_INFO(dev)->gen >= 8) {
-		offset += sizeof(uint32_t);
-
-		if (offset_in_page(offset) == 0) {
-			io_mapping_unmap_atomic(reloc_page);
-			reloc_page =
-				io_mapping_map_atomic_wc(ggtt->mappable,
-							 offset);
-		}
-
-		iowrite32(upper_32_bits(delta),
-			  reloc_page + offset_in_page(offset));
+	for_each_active(active, idx) {
+		if (!i915_gem_active_is_idle(&obj->last_read[idx],
+					     &obj->base.dev->struct_mutex))
+			return false;
 	}
 
-	io_mapping_unmap_atomic(reloc_page);
-
-	return 0;
-}
-
-static void
-clflush_write32(void *addr, uint32_t value)
-{
-	/* This is not a fast path, so KISS. */
-	drm_clflush_virt_range(addr, sizeof(uint32_t));
-	*(uint32_t *)addr = value;
-	drm_clflush_virt_range(addr, sizeof(uint32_t));
-}
-
-static int
-relocate_entry_clflush(struct drm_i915_gem_object *obj,
-		       struct drm_i915_gem_relocation_entry *reloc,
-		       uint64_t target_offset)
-{
-	struct drm_device *dev = obj->base.dev;
-	uint32_t page_offset = offset_in_page(reloc->offset);
-	uint64_t delta = relocation_target(reloc, target_offset);
-	char *vaddr;
-	int ret;
-
-	ret = i915_gem_object_set_to_gtt_domain(obj, true);
-	if (ret)
-		return ret;
-
-	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-				reloc->offset >> PAGE_SHIFT));
-	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
-
-	if (INTEL_INFO(dev)->gen >= 8) {
-		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
-
-		if (page_offset == 0) {
-			kunmap_atomic(vaddr);
-			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
-			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
-		}
-
-		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
-	}
-
-	kunmap_atomic(vaddr);
-
-	return 0;
+	return true;
 }
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
-				   struct drm_i915_gem_relocation_entry *reloc)
+				   struct drm_i915_gem_relocation_entry *reloc,
+				   struct reloc_cache *cache)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
@@ -465,7 +633,7 @@
 
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
-		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+		     obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
@@ -482,26 +650,15 @@
 	}
 
 	/* We can't wait for rendering with pagefaults disabled */
-	if (obj->active && pagefault_disabled())
+	if (pagefault_disabled() && !object_is_idle(obj))
 		return -EFAULT;
 
-	if (use_cpu_reloc(obj))
-		ret = relocate_entry_cpu(obj, reloc, target_offset);
-	else if (obj->map_and_fenceable)
-		ret = relocate_entry_gtt(obj, reloc, target_offset);
-	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
-		ret = relocate_entry_clflush(obj, reloc, target_offset);
-	else {
-		WARN_ONCE(1, "Impossible case in relocation handling\n");
-		ret = -ENODEV;
-	}
-
+	ret = relocate_entry(obj, reloc, cache, target_offset);
 	if (ret)
 		return ret;
 
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
-
 	return 0;
 }
 
@@ -513,9 +670,11 @@
 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	int remain, ret;
+	struct reloc_cache cache;
+	int remain, ret = 0;
 
 	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
+	reloc_cache_init(&cache, eb->i915);
 
 	remain = entry->relocation_count;
 	while (remain) {
@@ -525,19 +684,23 @@
 			count = ARRAY_SIZE(stack_reloc);
 		remain -= count;
 
-		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
-			return -EFAULT;
+		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+			ret = -EFAULT;
+			goto out;
+		}
 
 		do {
 			u64 offset = r->presumed_offset;
 
-			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
+			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
 			if (ret)
-				return ret;
+				goto out;
 
 			if (r->presumed_offset != offset &&
-			    __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
-				return -EFAULT;
+			    __put_user(r->presumed_offset,
+				       &user_relocs->presumed_offset)) {
+				ret = -EFAULT;
+				goto out;
 			}
 
 			user_relocs++;
@@ -545,7 +708,9 @@
 		} while (--count);
 	}
 
-	return 0;
+out:
+	reloc_cache_fini(&cache);
+	return ret;
 #undef N_RELOC
 }
 
@@ -555,15 +720,18 @@
 				      struct drm_i915_gem_relocation_entry *relocs)
 {
 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	int i, ret;
+	struct reloc_cache cache;
+	int i, ret = 0;
 
+	reloc_cache_init(&cache, eb->i915);
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
+		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
 		if (ret)
-			return ret;
+			break;
 	}
+	reloc_cache_fini(&cache);
 
-	return 0;
+	return ret;
 }
 
 static int
@@ -626,23 +794,27 @@
 			flags |= PIN_HIGH;
 	}
 
-	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
-	if ((ret == -ENOSPC  || ret == -E2BIG) &&
+	ret = i915_vma_pin(vma,
+			   entry->pad_to_size,
+			   entry->alignment,
+			   flags);
+	if ((ret == -ENOSPC || ret == -E2BIG) &&
 	    only_mappable_for_reloc(entry->flags))
-		ret = i915_gem_object_pin(obj, vma->vm,
-					  entry->alignment,
-					  flags & ~PIN_MAPPABLE);
+		ret = i915_vma_pin(vma,
+				   entry->pad_to_size,
+				   entry->alignment,
+				   flags & ~PIN_MAPPABLE);
 	if (ret)
 		return ret;
 
 	entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
 	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-		ret = i915_gem_object_get_fence(obj);
+		ret = i915_vma_get_fence(vma);
 		if (ret)
 			return ret;
 
-		if (i915_gem_object_pin_fence(obj))
+		if (i915_vma_pin_fence(vma))
 			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 	}
 
@@ -667,7 +839,7 @@
 	if (entry->relocation_count == 0)
 		return false;
 
-	if (!vma->is_ggtt)
+	if (!i915_vma_is_ggtt(vma))
 		return false;
 
 	/* See also use_cpu_reloc() */
@@ -684,14 +856,17 @@
 eb_vma_misplaced(struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	struct drm_i915_gem_object *obj = vma->obj;
 
-	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+		!i915_vma_is_ggtt(vma));
 
 	if (entry->alignment &&
 	    vma->node.start & (entry->alignment - 1))
 		return true;
 
+	if (vma->node.size < entry->pad_to_size)
+		return true;
+
 	if (entry->flags & EXEC_OBJECT_PINNED &&
 	    vma->node.start != entry->offset)
 		return true;
@@ -701,7 +876,8 @@
 		return true;
 
 	/* avoid costly ping-pong once a batch bo ended up non-mappable */
-	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+	    !i915_vma_is_map_and_fenceable(vma))
 		return !only_mappable_for_reloc(entry->flags);
 
 	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
@@ -725,8 +901,6 @@
 	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(engine);
-
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
 	INIT_LIST_HEAD(&ordered_vmas);
@@ -746,7 +920,7 @@
 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 		need_fence =
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-			obj->tiling_mode != I915_TILING_NONE;
+			i915_gem_object_is_tiled(obj);
 		need_mappable = need_fence || need_reloc_mappable(vma);
 
 		if (entry->flags & EXEC_OBJECT_PINNED)
@@ -843,7 +1017,7 @@
 		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
 		list_del_init(&vma->exec_list);
 		i915_gem_execbuffer_unreserve_vma(vma);
-		drm_gem_object_unreference(&vma->obj->base);
+		i915_vma_put(vma);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -937,23 +1111,45 @@
 	return ret;
 }
 
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+	unsigned int mask;
+
+	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+	mask <<= I915_BO_ACTIVE_SHIFT;
+
+	return mask;
+}
+
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
+	const unsigned int other_rings = eb_other_engines(req);
 	struct i915_vma *vma;
 	int ret;
 
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
+		struct reservation_object *resv;
 
-		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
+		if (obj->flags & other_rings) {
+			ret = i915_gem_request_await_object
+				(req, obj, obj->base.pending_write_domain);
 			if (ret)
 				return ret;
 		}
 
+		resv = i915_gem_object_get_dmabuf_resv(obj);
+		if (resv) {
+			ret = i915_sw_fence_await_reservation
+				(&req->submit, resv, &i915_fence_ops,
+				 obj->base.pending_write_domain, 10*HZ,
+				 GFP_KERNEL | __GFP_NOWARN);
+			if (ret < 0)
+				return ret;
+		}
+
 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 			i915_gem_clflush_object(obj, false);
 	}
@@ -961,10 +1157,8 @@
 	/* Unconditionally flush any chipset caches (for streaming writes). */
 	i915_gem_chipset_flush(req->engine->i915);
 
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return intel_ring_invalidate_all_caches(req);
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	return req->engine->emit_flush(req, EMIT_INVALIDATE);
 }
 
 static bool
@@ -1000,6 +1194,9 @@
 	unsigned invalid_flags;
 	int i;
 
+	/* INTERNAL flags must not overlap with external ones */
+	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
+
 	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
 	if (USES_FULL_PPGTT(dev))
 		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -1029,6 +1226,14 @@
 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
 			return -EINVAL;
 
+		/* pad_to_size was once a reserved field, so sanitize it */
+		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
+			if (offset_in_page(exec[i].pad_to_size))
+				return -EINVAL;
+		} else {
+			exec[i].pad_to_size = 0;
+		}
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
@@ -1048,7 +1253,7 @@
 			return -EFAULT;
 
 		if (likely(!i915.prefault_disable)) {
-			if (fault_in_multipages_readable(ptr, length))
+			if (fault_in_pages_readable(ptr, length))
 				return -EFAULT;
 		}
 	}
@@ -1060,12 +1265,9 @@
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 			  struct intel_engine_cs *engine, const u32 ctx_id)
 {
-	struct i915_gem_context *ctx = NULL;
+	struct i915_gem_context *ctx;
 	struct i915_ctx_hang_stats *hs;
 
-	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
-		return ERR_PTR(-EINVAL);
-
 	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
 	if (IS_ERR(ctx))
 		return ctx;
@@ -1079,66 +1281,99 @@
 	return ctx;
 }
 
-void
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct drm_i915_gem_request *req,
+			     unsigned int flags)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	const unsigned int idx = req->engine->id;
+
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+	obj->dirty = 1; /* be paranoid  */
+
+	/* Add a reference if we're newly entering the active list.
+	 * The order in which we add operations to the retirement queue is
+	 * vital here: mark_active adds to the start of the callback list,
+	 * such that subsequent callbacks are called first. Therefore we
+	 * add the active reference first and queue for it to be dropped
+	 * *last*.
+	 */
+	if (!i915_gem_object_is_active(obj))
+		i915_gem_object_get(obj);
+	i915_gem_object_set_active(obj, idx);
+	i915_gem_active_set(&obj->last_read[idx], req);
+
+	if (flags & EXEC_OBJECT_WRITE) {
+		i915_gem_active_set(&obj->last_write, req);
+
+		intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+		/* update for the implicit flush after a batch */
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	if (flags & EXEC_OBJECT_NEEDS_FENCE)
+		i915_gem_active_set(&vma->last_fence, req);
+
+	i915_vma_set_active(vma, idx);
+	i915_gem_active_set(&vma->last_read[idx], req);
+	list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+			    struct drm_i915_gem_request *req,
+			    unsigned int flags)
+{
+	struct reservation_object *resv;
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (!resv)
+		return;
+
+	/* Ignore errors from failing to allocate the new fence, we can't
+	 * handle an error right now. Worst case should be missed
+	 * synchronisation leading to rendering corruption.
+	 */
+	ww_mutex_lock(&resv->lock, NULL);
+	if (flags & EXEC_OBJECT_WRITE)
+		reservation_object_add_excl_fence(resv, &req->fence);
+	else if (reservation_object_reserve_shared(resv) == 0)
+		reservation_object_add_shared_fence(resv, &req->fence);
+	ww_mutex_unlock(&resv->lock);
+}
+
+static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 		struct drm_i915_gem_object *obj = vma->obj;
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 
-		obj->dirty = 1; /* be paranoid  */
 		obj->base.write_domain = obj->base.pending_write_domain;
-		if (obj->base.write_domain == 0)
+		if (obj->base.write_domain)
+			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+		else
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, req);
-		if (obj->base.write_domain) {
-			i915_gem_request_assign(&obj->last_write_req, req);
-
-			intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
-			/* update for the implicit flush after a batch */
-			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-		}
-		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-			i915_gem_request_assign(&obj->last_fenced_req, req);
-			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = engine->i915;
-				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
-					       &dev_priv->mm.fence_list);
-			}
-		}
-
+		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+		eb_export_fence(obj, req, vma->exec_entry->flags);
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
-{
-	/* Unconditionally force add_request to emit a full flush. */
-	params->engine->gpu_caches_dirty = true;
-
-	/* Add a breadcrumb for the completion of the batch buffer */
-	__i915_add_request(params->request, params->batch_obj, true);
-}
-
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_ring *ring = req->ring;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
+	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1148,21 +1383,21 @@
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(engine, 0);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static struct drm_i915_gem_object*
+static struct i915_vma *
 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
-			  struct eb_vmas *eb,
 			  struct drm_i915_gem_object *batch_obj,
+			  struct eb_vmas *eb,
 			  u32 batch_start_offset,
 			  u32 batch_len,
 			  bool is_master)
@@ -1174,51 +1409,44 @@
 	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
 						   PAGE_ALIGN(batch_len));
 	if (IS_ERR(shadow_batch_obj))
-		return shadow_batch_obj;
+		return ERR_CAST(shadow_batch_obj);
 
-	ret = i915_parse_cmds(engine,
-			      batch_obj,
-			      shadow_batch_obj,
-			      batch_start_offset,
-			      batch_len,
-			      is_master);
-	if (ret)
-		goto err;
+	ret = intel_engine_cmd_parser(engine,
+				      batch_obj,
+				      shadow_batch_obj,
+				      batch_start_offset,
+				      batch_len,
+				      is_master);
+	if (ret) {
+		if (ret == -EACCES) /* unhandled chained batch */
+			vma = NULL;
+		else
+			vma = ERR_PTR(ret);
+		goto out;
+	}
 
-	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
-	if (ret)
-		goto err;
-
-	i915_gem_object_unpin_pages(shadow_batch_obj);
+	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+	if (IS_ERR(vma))
+		goto out;
 
 	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
-	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
 	vma->exec_entry = shadow_exec_entry;
 	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
-	drm_gem_object_reference(&shadow_batch_obj->base);
+	i915_gem_object_get(shadow_batch_obj);
 	list_add_tail(&vma->exec_list, &eb->vmas);
 
-	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-
-	return shadow_batch_obj;
-
-err:
+out:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
-	if (ret == -EACCES) /* unhandled chained batch */
-		return batch_obj;
-	else
-		return ERR_PTR(ret);
+	return vma;
 }
 
-int
-i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
+static int
+execbuf_submit(struct i915_execbuffer_params *params,
+	       struct drm_i915_gem_execbuffer2 *args,
+	       struct list_head *vmas)
 {
-	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
@@ -1232,34 +1460,31 @@
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
-	     "%s didn't clear reload\n", engine->name);
-
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
 	switch (instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+		if (instp_mode != 0 && params->engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
 
 		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (INTEL_INFO(dev)->gen < 4) {
+			if (INTEL_INFO(dev_priv)->gen < 4) {
 				DRM_DEBUG("no rel constants on pre-gen4\n");
 				return -EINVAL;
 			}
 
-			if (INTEL_INFO(dev)->gen > 5 &&
+			if (INTEL_INFO(dev_priv)->gen > 5 &&
 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				return -EINVAL;
 			}
 
 			/* The HW changed the meaning on this bit on gen6 */
-			if (INTEL_INFO(dev)->gen >= 6)
+			if (INTEL_INFO(dev_priv)->gen >= 6)
 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
@@ -1268,37 +1493,39 @@
 		return -EINVAL;
 	}
 
-	if (engine == &dev_priv->engine[RCS] &&
+	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
+		struct intel_ring *ring = params->request->ring;
+
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, INSTPM);
-		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+		ret = i915_reset_gen7_sol_offsets(params->request);
 		if (ret)
 			return ret;
 	}
 
 	exec_len   = args->batch_len;
-	exec_start = params->batch_obj_vm_offset +
+	exec_start = params->batch->node.start +
 		     params->args_batch_start_offset;
 
 	if (exec_len == 0)
-		exec_len = params->batch_obj->base.size;
+		exec_len = params->batch->size - params->args_batch_start_offset;
 
-	ret = engine->dispatch_execbuffer(params->request,
-					exec_start, exec_len,
-					params->dispatch_flags);
+	ret = params->engine->emit_bb_start(params->request,
+					    exec_start, exec_len,
+					    params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -1311,43 +1538,20 @@
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The ring index is returned.
+ * The engine index is returned.
  */
 static unsigned int
-gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
+gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+			 struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	/* Check whether the file_priv has already selected one ring. */
-	if ((int)file_priv->bsd_ring < 0) {
-		/* If not, use the ping-pong mechanism to select one. */
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
-		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-	}
+	if ((int)file_priv->bsd_engine < 0)
+		file_priv->bsd_engine = atomic_fetch_xor(1,
+			 &dev_priv->mm.bsd_engine_dispatch_index);
 
-	return file_priv->bsd_ring;
-}
-
-static struct drm_i915_gem_object *
-eb_get_batch(struct eb_vmas *eb)
-{
-	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
-
-	/*
-	 * SNA is doing fancy tricks with compressing batch buffers, which leads
-	 * to negative relocation deltas. Usually that works out ok since the
-	 * relocate address is still positive, except when the batch is placed
-	 * very low in the GTT. Ensure this doesn't happen.
-	 *
-	 * Note that actual hangs have only been observed on gen7, but for
-	 * paranoia do it everywhere.
-	 */
-	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
-		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
-	return vma->obj;
+	return file_priv->bsd_engine;
 }
 
 #define I915_USER_RINGS (4)
@@ -1360,31 +1564,31 @@
 	[I915_EXEC_VEBOX]	= VECS
 };
 
-static int
-eb_select_ring(struct drm_i915_private *dev_priv,
-	       struct drm_file *file,
-	       struct drm_i915_gem_execbuffer2 *args,
-	       struct intel_engine_cs **ring)
+static struct intel_engine_cs *
+eb_select_engine(struct drm_i915_private *dev_priv,
+		 struct drm_file *file,
+		 struct drm_i915_gem_execbuffer2 *args)
 {
 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+	struct intel_engine_cs *engine;
 
 	if (user_ring_id > I915_USER_RINGS) {
 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
-		return -EINVAL;
+		return NULL;
 	}
 
 	if ((user_ring_id != I915_EXEC_BSD) &&
 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
 			  "bsd dispatch flags: %d\n", (int)(args->flags));
-		return -EINVAL;
+		return NULL;
 	}
 
 	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
-			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
 			   bsd_idx <= I915_EXEC_BSD_RING2) {
 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -1392,20 +1596,20 @@
 		} else {
 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
 				  bsd_idx);
-			return -EINVAL;
+			return NULL;
 		}
 
-		*ring = &dev_priv->engine[_VCS(bsd_idx)];
+		engine = &dev_priv->engine[_VCS(bsd_idx)];
 	} else {
-		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
+		engine = &dev_priv->engine[user_ring_map[user_ring_id]];
 	}
 
-	if (!intel_engine_initialized(*ring)) {
+	if (!intel_engine_initialized(engine)) {
 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
-		return -EINVAL;
+		return NULL;
 	}
 
-	return 0;
+	return engine;
 }
 
 static int
@@ -1416,9 +1620,7 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_request *req = NULL;
 	struct eb_vmas *eb;
-	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
@@ -1447,9 +1649,9 @@
 	if (args->flags & I915_EXEC_IS_PINNED)
 		dispatch_flags |= I915_DISPATCH_PINNED;
 
-	ret = eb_select_ring(dev_priv, file, args, &engine);
-	if (ret)
-		return ret;
+	engine = eb_select_engine(dev_priv, file, args);
+	if (!engine)
+		return -EINVAL;
 
 	if (args->buffer_count < 1) {
 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1489,7 +1691,7 @@
 		goto pre_mutex_err;
 	}
 
-	i915_gem_context_reference(ctx);
+	i915_gem_context_get(ctx);
 
 	if (ctx->ppgtt)
 		vm = &ctx->ppgtt->base;
@@ -1498,9 +1700,9 @@
 
 	memset(&params_master, 0x00, sizeof(params_master));
 
-	eb = eb_create(args);
+	eb = eb_create(dev_priv, args);
 	if (eb == NULL) {
-		i915_gem_context_unreference(ctx);
+		i915_gem_context_put(ctx);
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
 		goto pre_mutex_err;
@@ -1512,7 +1714,7 @@
 		goto err;
 
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = eb_get_batch(eb);
+	params->batch = eb_get_batch(eb);
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1536,34 +1738,34 @@
 	}
 
 	/* Set the pending read domains for the batch buffer to COMMAND */
-	if (batch_obj->base.pending_write_domain) {
+	if (params->batch->obj->base.pending_write_domain) {
 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
 		ret = -EINVAL;
 		goto err;
 	}
+	if (args->batch_start_offset > params->batch->size ||
+	    args->batch_len > params->batch->size - args->batch_start_offset) {
+		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+		ret = -EINVAL;
+		goto err;
+	}
 
 	params->args_batch_start_offset = args->batch_start_offset;
-	if (i915_needs_cmd_parser(engine) && args->batch_len) {
-		struct drm_i915_gem_object *parsed_batch_obj;
+	if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+		struct i915_vma *vma;
 
-		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
-							     &shadow_exec_entry,
-							     eb,
-							     batch_obj,
-							     args->batch_start_offset,
-							     args->batch_len,
-							     drm_is_current_master(file));
-		if (IS_ERR(parsed_batch_obj)) {
-			ret = PTR_ERR(parsed_batch_obj);
+		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
+						params->batch->obj,
+						eb,
+						args->batch_start_offset,
+						args->batch_len,
+						drm_is_current_master(file));
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto err;
 		}
 
-		/*
-		 * parsed_batch_obj == batch_obj means batch not fully parsed:
-		 * Accept, but don't promote to secure.
-		 */
-
-		if (parsed_batch_obj != batch_obj) {
+		if (vma) {
 			/*
 			 * Batch parsed and accepted:
 			 *
@@ -1575,16 +1777,19 @@
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
 			params->args_batch_start_offset = 0;
-			batch_obj = parsed_batch_obj;
+			params->batch = vma;
 		}
 	}
 
-	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (dispatch_flags & I915_DISPATCH_SECURE) {
+		struct drm_i915_gem_object *obj = params->batch->obj;
+		struct i915_vma *vma;
+
 		/*
 		 * So on first glance it looks freaky that we pin the batch here
 		 * outside of the reservation loop. But:
@@ -1595,22 +1800,31 @@
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
-		if (ret)
+		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto err;
+		}
 
-		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
-	} else
-		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+		params->batch = vma;
+	}
 
 	/* Allocate a request for this batch buffer nice and early. */
-	req = i915_gem_request_alloc(engine, ctx);
-	if (IS_ERR(req)) {
-		ret = PTR_ERR(req);
+	params->request = i915_gem_request_alloc(engine, ctx);
+	if (IS_ERR(params->request)) {
+		ret = PTR_ERR(params->request);
 		goto err_batch_unpin;
 	}
 
-	ret = i915_gem_request_add_to_client(req, file);
+	/* Whilst this request exists, batch_obj will be on the
+	 * active_list, and so will hold the active reference. Only when this
+	 * request is retired will the the batch_obj be moved onto the
+	 * inactive_list and lose its active reference. Hence we do not need
+	 * to explicitly hold another reference here.
+	 */
+	params->request->batch = params->batch;
+
+	ret = i915_gem_request_add_to_client(params->request, file);
 	if (ret)
 		goto err_request;
 
@@ -1624,13 +1838,11 @@
 	params->file                    = file;
 	params->engine                    = engine;
 	params->dispatch_flags          = dispatch_flags;
-	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
-	params->request                 = req;
 
-	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-	i915_gem_execbuffer_retire_commands(params);
+	__i915_add_request(params->request, ret == 0);
 
 err_batch_unpin:
 	/*
@@ -1640,11 +1852,10 @@
 	 * active.
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
-		i915_gem_object_ggtt_unpin(batch_obj);
-
+		i915_vma_unpin(params->batch);
 err:
 	/* the request owns the ref now */
-	i915_gem_context_unreference(ctx);
+	i915_gem_context_put(ctx);
 	eb_destroy(eb);
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 251d7a9..8df1fa7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -55,226 +55,228 @@
  * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
  */
 
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
+#define pipelined 0
+
+static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+				 struct i915_vma *vma)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	i915_reg_t fence_reg_lo, fence_reg_hi;
 	int fence_pitch_shift;
+	u64 val;
 
-	if (INTEL_INFO(dev)->gen >= 6) {
-		fence_reg_lo = FENCE_REG_GEN6_LO(reg);
-		fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+	if (INTEL_INFO(fence->i915)->gen >= 6) {
+		fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
+		fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
 		fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+
 	} else {
-		fence_reg_lo = FENCE_REG_965_LO(reg);
-		fence_reg_hi = FENCE_REG_965_HI(reg);
+		fence_reg_lo = FENCE_REG_965_LO(fence->id);
+		fence_reg_hi = FENCE_REG_965_HI(fence->id);
 		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
 	}
 
-	/* To w/a incoherency with non-atomic 64-bit register updates,
-	 * we split the 64-bit update into two 32-bit writes. In order
-	 * for a partial fence not to be evaluated between writes, we
-	 * precede the update with write to turn off the fence register,
-	 * and only enable the fence as the last step.
-	 *
-	 * For extra levels of paranoia, we make sure each step lands
-	 * before applying the next step.
-	 */
-	I915_WRITE(fence_reg_lo, 0);
-	POSTING_READ(fence_reg_lo);
+	val = 0;
+	if (vma) {
+		unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+		bool is_y_tiled = tiling == I915_TILING_Y;
+		unsigned int stride = i915_gem_object_get_stride(vma->obj);
+		u32 row_size = stride * (is_y_tiled ? 32 : 8);
+		u32 size = rounddown((u32)vma->node.size, row_size);
 
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint64_t val;
-
-		/* Adjust fence size to match tiled area */
-		if (obj->tiling_mode != I915_TILING_NONE) {
-			uint32_t row_size = obj->stride *
-				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-			size = (size / row_size) * row_size;
-		}
-
-		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-				 0xfffff000) << 32;
-		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
+		val |= vma->node.start & 0xfffff000;
+		val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
+		if (is_y_tiled)
+			val |= BIT(I965_FENCE_TILING_Y_SHIFT);
 		val |= I965_FENCE_REG_VALID;
+	}
 
-		I915_WRITE(fence_reg_hi, val >> 32);
-		POSTING_READ(fence_reg_hi);
+	if (!pipelined) {
+		struct drm_i915_private *dev_priv = fence->i915;
 
-		I915_WRITE(fence_reg_lo, val);
+		/* To w/a incoherency with non-atomic 64-bit register updates,
+		 * we split the 64-bit update into two 32-bit writes. In order
+		 * for a partial fence not to be evaluated between writes, we
+		 * precede the update with write to turn off the fence register,
+		 * and only enable the fence as the last step.
+		 *
+		 * For extra levels of paranoia, we make sure each step lands
+		 * before applying the next step.
+		 */
+		I915_WRITE(fence_reg_lo, 0);
 		POSTING_READ(fence_reg_lo);
-	} else {
-		I915_WRITE(fence_reg_hi, 0);
-		POSTING_READ(fence_reg_hi);
+
+		I915_WRITE(fence_reg_hi, upper_32_bits(val));
+		I915_WRITE(fence_reg_lo, lower_32_bits(val));
+		POSTING_READ(fence_reg_lo);
 	}
 }
 
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
+static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+				 struct i915_vma *vma)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 val;
 
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
+	val = 0;
+	if (vma) {
+		unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+		bool is_y_tiled = tiling == I915_TILING_Y;
+		unsigned int stride = i915_gem_object_get_stride(vma->obj);
 		int pitch_val;
 		int tile_width;
 
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+		WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
+		     !is_power_of_2(vma->node.size) ||
+		     (vma->node.start & (vma->node.size - 1)),
+		     "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
+		     vma->node.start,
+		     i915_vma_is_map_and_fenceable(vma),
+		     vma->node.size);
 
-		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+		if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
 			tile_width = 128;
 		else
 			tile_width = 512;
 
 		/* Note: pitch better be a power of two tile widths */
-		pitch_val = obj->stride / tile_width;
+		pitch_val = stride / tile_width;
 		pitch_val = ffs(pitch_val) - 1;
 
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I915_FENCE_SIZE_BITS(size);
+		val = vma->node.start;
+		if (is_y_tiled)
+			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+		val |= I915_FENCE_SIZE_BITS(vma->node.size);
 		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	I915_WRITE(FENCE_REG(reg), val);
-	POSTING_READ(FENCE_REG(reg));
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-				struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	uint32_t val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint32_t pitch_val;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), size);
-
-		pitch_val = obj->stride / 128;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I830_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	I915_WRITE(FENCE_REG(reg), val);
-	POSTING_READ(FENCE_REG(reg));
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	/* Ensure that all CPU reads are completed before installing a fence
-	 * and all writes before removing the fence.
-	 */
-	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-		mb();
-
-	WARN(obj && (!obj->stride || !obj->tiling_mode),
-	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-	     obj->stride, obj->tiling_mode);
-
-	if (IS_GEN2(dev))
-		i830_write_fence_reg(dev, reg, obj);
-	else if (IS_GEN3(dev))
-		i915_write_fence_reg(dev, reg, obj);
-	else if (INTEL_INFO(dev)->gen >= 4)
-		i965_write_fence_reg(dev, reg, obj);
-
-	/* And similarly be paranoid that no direct access to this region
-	 * is reordered to before the fence is installed.
-	 */
-	if (i915_gem_object_needs_mb(obj))
-		mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
-			       struct drm_i915_fence_reg *fence)
-{
-	return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable)
-{
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	int reg = fence_number(dev_priv, fence);
-
-	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
-	if (enable) {
-		obj->fence_reg = reg;
-		fence->obj = obj;
-		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-	} else {
-		obj->fence_reg = I915_FENCE_REG_NONE;
-		fence->obj = NULL;
-		list_del_init(&fence->lru_list);
 	}
-	obj->fence_dirty = false;
+
+	if (!pipelined) {
+		struct drm_i915_private *dev_priv = fence->i915;
+		i915_reg_t reg = FENCE_REG(fence->id);
+
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+	}
 }
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+				 struct i915_vma *vma)
 {
-	if (obj->tiling_mode)
-		i915_gem_release_mmap(obj);
+	u32 val;
 
-	/* As we do not have an associated fence register, we will force
-	 * a tiling change if we ever need to acquire one.
+	val = 0;
+	if (vma) {
+		unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+		bool is_y_tiled = tiling == I915_TILING_Y;
+		unsigned int stride = i915_gem_object_get_stride(vma->obj);
+		u32 pitch_val;
+
+		WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
+		     !is_power_of_2(vma->node.size) ||
+		     (vma->node.start & (vma->node.size - 1)),
+		     "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
+		     vma->node.start, vma->node.size);
+
+		pitch_val = stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = vma->node.start;
+		if (is_y_tiled)
+			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+		val |= I830_FENCE_SIZE_BITS(vma->node.size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	}
+
+	if (!pipelined) {
+		struct drm_i915_private *dev_priv = fence->i915;
+		i915_reg_t reg = FENCE_REG(fence->id);
+
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+	}
+}
+
+static void fence_write(struct drm_i915_fence_reg *fence,
+			struct i915_vma *vma)
+{
+	/* Previous access through the fence register is marshalled by
+	 * the mb() inside the fault handlers (i915_gem_release_mmaps)
+	 * and explicitly managed for internal users.
 	 */
-	obj->fence_dirty = false;
-	obj->fence_reg = I915_FENCE_REG_NONE;
+
+	if (IS_GEN2(fence->i915))
+		i830_write_fence_reg(fence, vma);
+	else if (IS_GEN3(fence->i915))
+		i915_write_fence_reg(fence, vma);
+	else
+		i965_write_fence_reg(fence, vma);
+
+	/* Access through the fenced region afterwards is
+	 * ordered by the posting reads whilst writing the registers.
+	 */
+
+	fence->dirty = false;
 }
 
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+static int fence_update(struct drm_i915_fence_reg *fence,
+			struct i915_vma *vma)
 {
-	if (obj->last_fenced_req) {
-		int ret = i915_wait_request(obj->last_fenced_req);
+	int ret;
+
+	if (vma) {
+		if (!i915_vma_is_map_and_fenceable(vma))
+			return -EINVAL;
+
+		if (WARN(!i915_gem_object_get_stride(vma->obj) ||
+			 !i915_gem_object_get_tiling(vma->obj),
+			 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+			 i915_gem_object_get_stride(vma->obj),
+			 i915_gem_object_get_tiling(vma->obj)))
+			return -EINVAL;
+
+		ret = i915_gem_active_retire(&vma->last_fence,
+					     &vma->obj->base.dev->struct_mutex);
 		if (ret)
 			return ret;
+	}
 
-		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	if (fence->vma) {
+		ret = i915_gem_active_retire(&fence->vma->last_fence,
+				      &fence->vma->obj->base.dev->struct_mutex);
+		if (ret)
+			return ret;
+	}
+
+	if (fence->vma && fence->vma != vma) {
+		/* Ensure that all userspace CPU access is completed before
+		 * stealing the fence.
+		 */
+		i915_gem_release_mmap(fence->vma->obj);
+
+		fence->vma->fence = NULL;
+		fence->vma = NULL;
+
+		list_move(&fence->link, &fence->i915->mm.fence_list);
+	}
+
+	fence_write(fence, vma);
+
+	if (vma) {
+		if (fence->vma != vma) {
+			vma->fence = fence;
+			fence->vma = vma;
+		}
+
+		list_move_tail(&fence->link, &fence->i915->mm.fence_list);
 	}
 
 	return 0;
 }
 
 /**
- * i915_gem_object_put_fence - force-remove fence for an object
- * @obj: object to map through a fence reg
+ * i915_vma_put_fence - force-remove fence for a VMA
+ * @vma: vma to map linearly (not through a fence reg)
  *
  * This function force-removes any fence from the given object, which is useful
  * if the kernel wants to do untiled GTT access.
@@ -284,70 +286,40 @@
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+i915_vma_put_fence(struct i915_vma *vma)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct drm_i915_fence_reg *fence;
-	int ret;
+	struct drm_i915_fence_reg *fence = vma->fence;
 
-	ret = i915_gem_object_wait_fence(obj);
-	if (ret)
-		return ret;
-
-	if (obj->fence_reg == I915_FENCE_REG_NONE)
+	if (!fence)
 		return 0;
 
-	fence = &dev_priv->fence_regs[obj->fence_reg];
-
-	if (WARN_ON(fence->pin_count))
+	if (fence->pin_count)
 		return -EBUSY;
 
-	i915_gem_object_fence_lost(obj);
-	i915_gem_object_update_fence(obj, fence, false);
-
-	return 0;
+	return fence_update(fence, NULL);
 }
 
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
+static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_fence_reg *reg, *avail;
-	int i;
+	struct drm_i915_fence_reg *fence;
 
-	/* First try to find a free reg */
-	avail = NULL;
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			return reg;
-
-		if (!reg->pin_count)
-			avail = reg;
-	}
-
-	if (avail == NULL)
-		goto deadlock;
-
-	/* None available, try to steal one or wait for a user to finish */
-	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-		if (reg->pin_count)
+	list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+		if (fence->pin_count)
 			continue;
 
-		return reg;
+		return fence;
 	}
 
-deadlock:
 	/* Wait for completion of pending flips which consume fences */
-	if (intel_has_pending_fb_unpin(dev))
+	if (intel_has_pending_fb_unpin(&dev_priv->drm))
 		return ERR_PTR(-EAGAIN);
 
 	return ERR_PTR(-EDEADLK);
 }
 
 /**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
+ * i915_vma_get_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -364,103 +336,27 @@
  * 0 on success, negative error code on failure.
  */
 int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+i915_vma_get_fence(struct i915_vma *vma)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	bool enable = obj->tiling_mode != I915_TILING_NONE;
-	struct drm_i915_fence_reg *reg;
-	int ret;
-
-	/* Have we updated the tiling parameters upon the object and so
-	 * will need to serialise the write to the associated fence register?
-	 */
-	if (obj->fence_dirty) {
-		ret = i915_gem_object_wait_fence(obj);
-		if (ret)
-			return ret;
-	}
+	struct drm_i915_fence_reg *fence;
+	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 
 	/* Just update our place in the LRU if our fence is getting reused. */
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj->fence_reg];
-		if (!obj->fence_dirty) {
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
+	if (vma->fence) {
+		fence = vma->fence;
+		if (!fence->dirty) {
+			list_move_tail(&fence->link,
+				       &fence->i915->mm.fence_list);
 			return 0;
 		}
-	} else if (enable) {
-		if (WARN_ON(!obj->map_and_fenceable))
-			return -EINVAL;
-
-		reg = i915_find_fence_reg(dev);
-		if (IS_ERR(reg))
-			return PTR_ERR(reg);
-
-		if (reg->obj) {
-			struct drm_i915_gem_object *old = reg->obj;
-
-			ret = i915_gem_object_wait_fence(old);
-			if (ret)
-				return ret;
-
-			i915_gem_object_fence_lost(old);
-		}
+	} else if (set) {
+		fence = fence_find(to_i915(vma->vm->dev));
+		if (IS_ERR(fence))
+			return PTR_ERR(fence);
 	} else
 		return 0;
 
-	i915_gem_object_update_fence(obj, reg, enable);
-
-	return 0;
-}
-
-/**
- * i915_gem_object_pin_fence - pin fencing state
- * @obj: object to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * object is ready to be used as a scanout target. Fencing status must be
- * synchronize first by calling i915_gem_object_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_gem_object_unpin_fence().
- *
- * Returns:
- *
- * True if the object has a fence, false otherwise.
- */
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
-		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-		return true;
-	} else
-		return false;
-}
-
-/**
- * i915_gem_object_unpin_fence - unpin fencing state
- * @obj: object to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_gem_object_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-		dev_priv->fence_regs[obj->fence_reg].pin_count--;
-	}
+	return fence_update(fence, set);
 }
 
 /**
@@ -477,17 +373,16 @@
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+		struct i915_vma *vma = reg->vma;
 
 		/*
 		 * Commit delayed tiling changes if we have an object still
 		 * attached to the fence, otherwise just clear the fence.
 		 */
-		if (reg->obj) {
-			i915_gem_object_update_fence(reg->obj, reg,
-						     reg->obj->tiling_mode);
-		} else {
-			i915_gem_write_fence(dev, i, NULL);
-		}
+		if (vma && !i915_gem_object_is_tiled(vma->obj))
+			vma = NULL;
+
+		fence_update(reg, vma);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f38ceff..0bb4232 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -32,6 +32,8 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+
 /**
  * DOC: Global GTT views
  *
@@ -173,11 +175,13 @@
 {
 	u32 pte_flags = 0;
 
+	vma->pages = vma->obj->pages;
+
 	/* Currently applicable only to VLV */
 	if (vma->obj->gt_ro)
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
 				cache_level, pte_flags);
 
 	return 0;
@@ -187,7 +191,7 @@
 {
 	vma->vm->clear_range(vma->vm,
 			     vma->node.start,
-			     vma->obj->base.size,
+			     vma->size,
 			     true);
 }
 
@@ -327,16 +331,16 @@
 static int __setup_page_dma(struct drm_device *dev,
 			    struct i915_page_dma *p, gfp_t flags)
 {
-	struct device *device = &dev->pdev->dev;
+	struct device *kdev = &dev->pdev->dev;
 
 	p->page = alloc_page(flags);
 	if (!p->page)
 		return -ENOMEM;
 
-	p->daddr = dma_map_page(device,
+	p->daddr = dma_map_page(kdev,
 				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 
-	if (dma_mapping_error(device, p->daddr)) {
+	if (dma_mapping_error(kdev, p->daddr)) {
 		__free_page(p->page);
 		return -EINVAL;
 	}
@@ -346,15 +350,17 @@
 
 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	return __setup_page_dma(dev, p, GFP_KERNEL);
+	return __setup_page_dma(dev, p, I915_GFP_DMA);
 }
 
 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
+	struct pci_dev *pdev = dev->pdev;
+
 	if (WARN_ON(!p->page))
 		return;
 
-	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 	__free_page(p->page);
 	memset(p, 0, sizeof(*p));
 }
@@ -408,33 +414,18 @@
 	fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+static int
+setup_scratch_page(struct drm_device *dev,
+		   struct i915_page_dma *scratch,
+		   gfp_t gfp)
 {
-	struct i915_page_scratch *sp;
-	int ret;
-
-	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-	if (sp == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
-	if (ret) {
-		kfree(sp);
-		return ERR_PTR(ret);
-	}
-
-	set_pages_uc(px_page(sp), 1);
-
-	return sp;
+	return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
 }
 
-static void free_scratch_page(struct drm_device *dev,
-			      struct i915_page_scratch *sp)
+static void cleanup_scratch_page(struct drm_device *dev,
+				 struct i915_page_dma *scratch)
 {
-	set_pages_wb(px_page(sp), 1);
-
-	cleanup_px(dev, sp);
-	kfree(sp);
+	cleanup_page_dma(dev, scratch);
 }
 
 static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -480,7 +471,7 @@
 {
 	gen8_pte_t scratch_pte;
 
-	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 				      I915_CACHE_LLC, true);
 
 	fill_px(vm->dev, pt, scratch_pte);
@@ -491,9 +482,9 @@
 {
 	gen6_pte_t scratch_pte;
 
-	WARN_ON(px_dma(vm->scratch_page) == 0);
+	WARN_ON(vm->scratch_page.daddr == 0);
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	fill32_px(vm->dev, pt, scratch_pte);
@@ -672,6 +663,7 @@
 			  unsigned entry,
 			  dma_addr_t addr)
 {
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -681,13 +673,13 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(engine, upper_32_bits(addr));
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(engine, lower_32_bits(addr));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
+	intel_ring_emit(ring, upper_32_bits(addr));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
+	intel_ring_emit(ring, lower_32_bits(addr));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -776,7 +768,7 @@
 				   bool use_scratch)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 						 I915_CACHE_LLC, use_scratch);
 
 	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -882,9 +874,9 @@
 	struct drm_device *dev = vm->dev;
 	int ret;
 
-	vm->scratch_page = alloc_scratch_page(dev);
-	if (IS_ERR(vm->scratch_page))
-		return PTR_ERR(vm->scratch_page);
+	ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+	if (ret)
+		return ret;
 
 	vm->scratch_pt = alloc_pt(dev);
 	if (IS_ERR(vm->scratch_pt)) {
@@ -918,7 +910,7 @@
 free_pt:
 	free_pt(dev, vm->scratch_pt);
 free_scratch_page:
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 
 	return ret;
 }
@@ -962,7 +954,7 @@
 		free_pdp(dev, vm->scratch_pdp);
 	free_pd(dev, vm->scratch_pd);
 	free_pt(dev, vm->scratch_pt);
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
@@ -1459,7 +1451,7 @@
 	struct i915_address_space *vm = &ppgtt->base;
 	uint64_t start = ppgtt->base.start;
 	uint64_t length = ppgtt->base.total;
-	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 						 I915_CACHE_LLC, true);
 
 	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -1576,7 +1568,7 @@
 	uint32_t  pte, pde;
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
@@ -1663,11 +1655,12 @@
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -1675,13 +1668,13 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1689,11 +1682,12 @@
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -1701,17 +1695,17 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 		if (ret)
 			return ret;
 	}
@@ -1799,7 +1793,7 @@
 	unsigned first_pte = first_entry % GEN6_PTES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
@@ -1945,14 +1939,15 @@
 static int gen6_init_scratch(struct i915_address_space *vm)
 {
 	struct drm_device *dev = vm->dev;
+	int ret;
 
-	vm->scratch_page = alloc_scratch_page(dev);
-	if (IS_ERR(vm->scratch_page))
-		return PTR_ERR(vm->scratch_page);
+	ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+	if (ret)
+		return ret;
 
 	vm->scratch_pt = alloc_pt(dev);
 	if (IS_ERR(vm->scratch_pt)) {
-		free_scratch_page(dev, vm->scratch_page);
+		cleanup_scratch_page(dev, &vm->scratch_page);
 		return PTR_ERR(vm->scratch_pt);
 	}
 
@@ -1966,7 +1961,7 @@
 	struct drm_device *dev = vm->dev;
 
 	free_pt(dev, vm->scratch_pt);
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -2012,7 +2007,7 @@
 						  0, ggtt->base.total,
 						  DRM_MM_TOPDOWN);
 	if (ret == -ENOSPC && !retried) {
-		ret = i915_gem_evict_something(dev, &ggtt->base,
+		ret = i915_gem_evict_something(&ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
 					       I915_CACHE_NONE,
 					       0, ggtt->base.total,
@@ -2104,11 +2099,12 @@
 	return 0;
 }
 
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+			   struct drm_i915_private *dev_priv)
 {
-	ppgtt->base.dev = dev;
+	ppgtt->base.dev = &dev_priv->drm;
 
-	if (INTEL_INFO(dev)->gen < 8)
+	if (INTEL_INFO(dev_priv)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
 	else
 		return gen8_ppgtt_init(ppgtt);
@@ -2118,9 +2114,9 @@
 				    struct drm_i915_private *dev_priv)
 {
 	drm_mm_init(&vm->mm, vm->start, vm->total);
-	vm->dev = &dev_priv->drm;
 	INIT_LIST_HEAD(&vm->active_list);
 	INIT_LIST_HEAD(&vm->inactive_list);
+	INIT_LIST_HEAD(&vm->unbound_list);
 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
@@ -2143,15 +2139,17 @@
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
 }
 
-static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+			   struct drm_i915_private *dev_priv,
+			   struct drm_i915_file_private *file_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret = 0;
+	int ret;
 
-	ret = __hw_ppgtt_init(dev, ppgtt);
+	ret = __hw_ppgtt_init(ppgtt, dev_priv);
 	if (ret == 0) {
 		kref_init(&ppgtt->ref);
 		i915_address_space_init(&ppgtt->base, dev_priv);
+		ppgtt->base.file = file_priv;
 	}
 
 	return ret;
@@ -2183,7 +2181,8 @@
 }
 
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *dev_priv,
+		  struct drm_i915_file_private *fpriv)
 {
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
@@ -2192,14 +2191,12 @@
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_ppgtt_init(dev, ppgtt);
+	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
 	if (ret) {
 		kfree(ppgtt);
 		return ERR_PTR(ret);
 	}
 
-	ppgtt->file_priv = fpriv;
-
 	trace_i915_ppgtt_create(&ppgtt->base);
 
 	return ppgtt;
@@ -2212,9 +2209,10 @@
 
 	trace_i915_ppgtt_release(&ppgtt->base);
 
-	/* vmas should already be unbound */
+	/* vmas should already be unbound and destroyed */
 	WARN_ON(!list_empty(&ppgtt->base.active_list));
 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
 
 	list_del(&ppgtt->base.global_link);
 	drm_mm_takedown(&ppgtt->base.mm);
@@ -2223,47 +2221,21 @@
 	kfree(ppgtt);
 }
 
-extern int intel_iommu_gfx_mapped;
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
-static bool needs_idle_maps(struct drm_device *dev)
+static bool needs_idle_maps(struct drm_i915_private *dev_priv)
 {
 #ifdef CONFIG_INTEL_IOMMU
 	/* Query intel_iommu to see if we need the workaround. Presumably that
 	 * was loaded first.
 	 */
-	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+	if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
 		return true;
 #endif
 	return false;
 }
 
-static bool do_idling(struct drm_i915_private *dev_priv)
-{
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	bool ret = dev_priv->mm.interruptible;
-
-	if (unlikely(ggtt->do_idle_maps)) {
-		dev_priv->mm.interruptible = false;
-		if (i915_gem_wait_for_idle(dev_priv)) {
-			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
-			/* Wait a bit, in hopes it avoids the hang */
-			udelay(10);
-		}
-	}
-
-	return ret;
-}
-
-static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	if (unlikely(ggtt->do_idle_maps))
-		dev_priv->mm.interruptible = interruptible;
-}
-
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
@@ -2332,12 +2304,7 @@
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 {
-#ifdef writeq
 	writeq(pte, addr);
-#else
-	iowrite32((u32)pte, addr);
-	iowrite32(pte >> 32, addr + 4);
-#endif
 }
 
 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
@@ -2530,7 +2497,7 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -2562,7 +2529,7 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
@@ -2641,8 +2608,7 @@
 	if (obj->gt_ro)
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
-				vma->node.start,
+	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
 				cache_level, pte_flags);
 
 	/*
@@ -2650,7 +2616,7 @@
 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
-	vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
 	return 0;
 }
@@ -2672,19 +2638,17 @@
 		pte_flags |= PTE_READ_ONLY;
 
 
-	if (flags & GLOBAL_BIND) {
+	if (flags & I915_VMA_GLOBAL_BIND) {
 		vma->vm->insert_entries(vma->vm,
-					vma->ggtt_view.pages,
-					vma->node.start,
+					vma->pages, vma->node.start,
 					cache_level, pte_flags);
 	}
 
-	if (flags & LOCAL_BIND) {
+	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_hw_ppgtt *appgtt =
 			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
 		appgtt->base.insert_entries(&appgtt->base,
-					    vma->ggtt_view.pages,
-					    vma->node.start,
+					    vma->pages, vma->node.start,
 					    cache_level, pte_flags);
 	}
 
@@ -2693,42 +2657,36 @@
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-	struct drm_device *dev = vma->vm->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj = vma->obj;
-	const uint64_t size = min_t(uint64_t,
-				    obj->base.size,
-				    vma->node.size);
+	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->bound & GLOBAL_BIND) {
+	if (vma->flags & I915_VMA_GLOBAL_BIND)
 		vma->vm->clear_range(vma->vm,
-				     vma->node.start,
-				     size,
+				     vma->node.start, size,
 				     true);
-	}
 
-	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
-		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
-
+	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
-					 vma->node.start,
-					 size,
+					 vma->node.start, size,
 					 true);
-	}
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	bool interruptible;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct device *kdev = &dev_priv->drm.pdev->dev;
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	interruptible = do_idling(dev_priv);
+	if (unlikely(ggtt->do_idle_maps)) {
+		if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+			/* Wait a bit, in hopes it avoids the hang */
+			udelay(10);
+		}
+	}
 
-	dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+	dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
 		     PCI_DMA_BIDIRECTIONAL);
-
-	undo_idling(dev_priv, interruptible);
 }
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2739,19 +2697,14 @@
 	if (node->color != color)
 		*start += 4096;
 
-	if (!list_empty(&node->node_list)) {
-		node = list_entry(node->node_list.next,
-				  struct drm_mm_node,
-				  node_list);
-		if (node->allocated && node->color != color)
-			*end -= 4096;
-	}
+	node = list_first_entry_or_null(&node->node_list,
+					struct drm_mm_node,
+					node_list);
+	if (node && node->allocated && node->color != color)
+		*end -= 4096;
 }
 
-static int i915_gem_setup_global_gtt(struct drm_device *dev,
-				     u64 start,
-				     u64 mappable_end,
-				     u64 end)
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 {
 	/* Let GEM Manage all of the aperture.
 	 *
@@ -2762,48 +2715,15 @@
 	 * aperture.  One page should be enough to keep any prefetching inside
 	 * of the aperture.
 	 */
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_mm_node *entry;
-	struct drm_i915_gem_object *obj;
 	unsigned long hole_start, hole_end;
+	struct drm_mm_node *entry;
 	int ret;
 
-	BUG_ON(mappable_end > end);
-
-	ggtt->base.start = start;
-
-	/* Subtract the guard page before address space initialization to
-	 * shrink the range used by drm_mm */
-	ggtt->base.total = end - start - PAGE_SIZE;
-	i915_address_space_init(&ggtt->base, dev_priv);
-	ggtt->base.total += PAGE_SIZE;
-
 	ret = intel_vgt_balloon(dev_priv);
 	if (ret)
 		return ret;
 
-	if (!HAS_LLC(dev))
-		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
-
-	/* Mark any preallocated objects as occupied */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
-
-		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
-			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
-
-		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
-			return ret;
-		}
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
-	}
-
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2813,18 +2733,19 @@
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
+	ggtt->base.clear_range(&ggtt->base,
+			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
+			       true);
 
-	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
 		struct i915_hw_ppgtt *ppgtt;
 
 		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
 		if (!ppgtt)
 			return -ENOMEM;
 
-		ret = __hw_ppgtt_init(dev, ppgtt);
+		ret = __hw_ppgtt_init(ppgtt, dev_priv);
 		if (ret) {
-			ppgtt->base.cleanup(&ppgtt->base);
 			kfree(ppgtt);
 			return ret;
 		}
@@ -2852,34 +2773,20 @@
 }
 
 /**
- * i915_gem_init_ggtt - Initialize GEM for Global GTT
- * @dev: DRM device
- */
-void i915_gem_init_ggtt(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
-}
-
-/**
  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev: DRM device
+ * @dev_priv: i915 device
  */
-void i915_ggtt_cleanup_hw(struct drm_device *dev)
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
 	if (dev_priv->mm.aliasing_ppgtt) {
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 		ppgtt->base.cleanup(&ppgtt->base);
 		kfree(ppgtt);
 	}
 
-	i915_gem_cleanup_stolen(dev);
+	i915_gem_cleanup_stolen(&dev_priv->drm);
 
 	if (drm_mm_initialized(&ggtt->base.mm)) {
 		intel_vgt_deballoon(dev_priv);
@@ -2889,6 +2796,9 @@
 	}
 
 	ggtt->base.cleanup(&ggtt->base);
+
+	arch_phys_wc_del(ggtt->mtrr);
+	io_mapping_fini(&ggtt->mappable);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2969,17 +2879,14 @@
 		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
 }
 
-static int ggtt_probe_common(struct drm_device *dev,
-			     size_t gtt_size)
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct i915_page_scratch *scratch_page;
-	phys_addr_t ggtt_phys_addr;
+	struct pci_dev *pdev = ggtt->base.dev->pdev;
+	phys_addr_t phys_addr;
+	int ret;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
-	ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
-			 (pci_resource_len(dev->pdev, 0) / 2);
+	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 
 	/*
 	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2988,25 +2895,25 @@
 	 * resort to an uncached mapping. The WC issue is easily caught by the
 	 * readback check when writing GTT PTE entries.
 	 */
-	if (IS_BROXTON(dev))
-		ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
+	if (IS_BROXTON(ggtt->base.dev))
+		ggtt->gsm = ioremap_nocache(phys_addr, size);
 	else
-		ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+		ggtt->gsm = ioremap_wc(phys_addr, size);
 	if (!ggtt->gsm) {
-		DRM_ERROR("Failed to map the gtt page table\n");
+		DRM_ERROR("Failed to map the ggtt page table\n");
 		return -ENOMEM;
 	}
 
-	scratch_page = alloc_scratch_page(dev);
-	if (IS_ERR(scratch_page)) {
+	ret = setup_scratch_page(ggtt->base.dev,
+				 &ggtt->base.scratch_page,
+				 GFP_DMA32);
+	if (ret) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
 		iounmap(ggtt->gsm);
-		return PTR_ERR(scratch_page);
+		return ret;
 	}
 
-	ggtt->base.scratch_page = scratch_page;
-
 	return 0;
 }
 
@@ -3083,42 +2990,49 @@
 	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
 }
 
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+	iounmap(ggtt->gsm);
+	cleanup_scratch_page(vm->dev, &vm->scratch_page);
+}
+
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_device *dev = ggtt->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	unsigned int size;
 	u16 snb_gmch_ctl;
-	int ret;
 
 	/* TODO: We're not aware of mappable constraints on gen8 yet */
-	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
-	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+	ggtt->mappable_base = pci_resource_start(pdev, 2);
+	ggtt->mappable_end = pci_resource_len(pdev, 2);
 
-	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
-		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
 
-	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
-		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
-	} else if (IS_CHERRYVIEW(dev)) {
+		size = gen8_get_total_gtt_size(snb_gmch_ctl);
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
-		ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
+		size = chv_get_total_gtt_size(snb_gmch_ctl);
 	} else {
 		ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
-		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
+		size = gen8_get_total_gtt_size(snb_gmch_ctl);
 	}
 
-	ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
-	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+	if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
 		chv_setup_private_ppat(dev_priv);
 	else
 		bdw_setup_private_ppat(dev_priv);
 
-	ret = ggtt_probe_common(dev, ggtt->size);
-
+	ggtt->base.cleanup = gen6_gmch_remove;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
 	ggtt->base.insert_page = gen8_ggtt_insert_page;
@@ -3130,57 +3044,65 @@
 	if (IS_CHERRYVIEW(dev_priv))
 		ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
-	return ret;
+	return ggtt_probe_common(ggtt, size);
 }
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_device *dev = ggtt->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	unsigned int size;
 	u16 snb_gmch_ctl;
-	int ret;
 
-	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
-	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+	ggtt->mappable_base = pci_resource_start(pdev, 2);
+	ggtt->mappable_end = pci_resource_len(pdev, 2);
 
 	/* 64/512MB is the current min/max we actually know of, but this is just
 	 * a coarse sanity check.
 	 */
-	if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
 		DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
 		return -ENXIO;
 	}
 
-	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
-		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
-	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
 	ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-	ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
-	ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-	ret = ggtt_probe_common(dev, ggtt->size);
+	size = gen6_get_total_gtt_size(snb_gmch_ctl);
+	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
 	ggtt->base.clear_range = gen6_ggtt_clear_range;
 	ggtt->base.insert_page = gen6_ggtt_insert_page;
 	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.cleanup = gen6_gmch_remove;
 
-	return ret;
+	if (HAS_EDRAM(dev_priv))
+		ggtt->base.pte_encode = iris_pte_encode;
+	else if (IS_HASWELL(dev_priv))
+		ggtt->base.pte_encode = hsw_pte_encode;
+	else if (IS_VALLEYVIEW(dev_priv))
+		ggtt->base.pte_encode = byt_pte_encode;
+	else if (INTEL_GEN(dev_priv) >= 7)
+		ggtt->base.pte_encode = ivb_pte_encode;
+	else
+		ggtt->base.pte_encode = snb_pte_encode;
+
+	return ggtt_probe_common(ggtt, size);
 }
 
-static void gen6_gmch_remove(struct i915_address_space *vm)
+static void i915_gmch_remove(struct i915_address_space *vm)
 {
-	struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
-
-	iounmap(ggtt->gsm);
-	free_scratch_page(vm->dev, vm->scratch_page);
+	intel_gmch_remove();
 }
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_device *dev = ggtt->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
 	int ret;
 
 	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3192,12 +3114,13 @@
 	intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
 		      &ggtt->mappable_base, &ggtt->mappable_end);
 
-	ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
 	ggtt->base.insert_page = i915_ggtt_insert_page;
 	ggtt->base.insert_entries = i915_ggtt_insert_entries;
 	ggtt->base.clear_range = i915_ggtt_clear_range;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.cleanup = i915_gmch_remove;
 
 	if (unlikely(ggtt->do_idle_maps))
 		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3205,65 +3128,40 @@
 	return 0;
 }
 
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
-	intel_gmch_remove();
-}
-
 /**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev: DRM device
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @dev_priv: i915 device
  */
-int i915_ggtt_init_hw(struct drm_device *dev)
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen <= 5) {
-		ggtt->probe = i915_gmch_probe;
-		ggtt->base.cleanup = i915_gmch_remove;
-	} else if (INTEL_INFO(dev)->gen < 8) {
-		ggtt->probe = gen6_gmch_probe;
-		ggtt->base.cleanup = gen6_gmch_remove;
+	ggtt->base.dev = &dev_priv->drm;
 
-		if (HAS_EDRAM(dev))
-			ggtt->base.pte_encode = iris_pte_encode;
-		else if (IS_HASWELL(dev))
-			ggtt->base.pte_encode = hsw_pte_encode;
-		else if (IS_VALLEYVIEW(dev))
-			ggtt->base.pte_encode = byt_pte_encode;
-		else if (INTEL_INFO(dev)->gen >= 7)
-			ggtt->base.pte_encode = ivb_pte_encode;
-		else
-			ggtt->base.pte_encode = snb_pte_encode;
-	} else {
-		ggtt->probe = gen8_gmch_probe;
-		ggtt->base.cleanup = gen6_gmch_remove;
-	}
-
-	ggtt->base.dev = dev;
-	ggtt->base.is_ggtt = true;
-
-	ret = ggtt->probe(ggtt);
+	if (INTEL_GEN(dev_priv) <= 5)
+		ret = i915_gmch_probe(ggtt);
+	else if (INTEL_GEN(dev_priv) < 8)
+		ret = gen6_gmch_probe(ggtt);
+	else
+		ret = gen8_gmch_probe(ggtt);
 	if (ret)
 		return ret;
 
 	if ((ggtt->base.total - 1) >> 32) {
 		DRM_ERROR("We never expected a Global GTT with more than 32bits"
-			  "of address space! Found %lldM!\n",
+			  " of address space! Found %lldM!\n",
 			  ggtt->base.total >> 20);
 		ggtt->base.total = 1ULL << 32;
 		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
 	}
 
-	/*
-	 * Initialise stolen early so that we may reserve preallocated
-	 * objects for the BIOS to KMS transition.
-	 */
-	ret = i915_gem_init_stolen(dev);
-	if (ret)
-		goto out_gtt_cleanup;
+	if (ggtt->mappable_end > ggtt->base.total) {
+		DRM_ERROR("mappable aperture extends past end of GGTT,"
+			  " aperture=%llx, total=%llx\n",
+			  ggtt->mappable_end, ggtt->base.total);
+		ggtt->mappable_end = ggtt->base.total;
+	}
 
 	/* GMADR is the PCI mmio aperture into the global GTT. */
 	DRM_INFO("Memory usable by graphics device = %lluM\n",
@@ -3276,16 +3174,55 @@
 #endif
 
 	return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	int ret;
+
+	INIT_LIST_HEAD(&dev_priv->vm_list);
+
+	/* Subtract the guard page before address space initialization to
+	 * shrink the range used by drm_mm.
+	 */
+	ggtt->base.total -= PAGE_SIZE;
+	i915_address_space_init(&ggtt->base, dev_priv);
+	ggtt->base.total += PAGE_SIZE;
+	if (!HAS_LLC(dev_priv))
+		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+
+	if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
+				dev_priv->ggtt.mappable_base,
+				dev_priv->ggtt.mappable_end)) {
+		ret = -EIO;
+		goto out_gtt_cleanup;
+	}
+
+	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+
+	/*
+	 * Initialise stolen early so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(&dev_priv->drm);
+	if (ret)
+		goto out_gtt_cleanup;
+
+	return 0;
 
 out_gtt_cleanup:
 	ggtt->base.cleanup(&ggtt->base);
-
 	return ret;
 }
 
-int i915_ggtt_enable_hw(struct drm_device *dev)
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
 		return -EIO;
 
 	return 0;
@@ -3295,8 +3232,7 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
+	struct drm_i915_gem_object *obj, *on;
 
 	i915_check_and_clear_faults(dev_priv);
 
@@ -3304,20 +3240,32 @@
 	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
 			       true);
 
-	/* Cache flush objects bound into GGTT and rebind them. */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+
+	/* clflush objects bound into the GGTT and rebind them. */
+	list_for_each_entry_safe(obj, on,
+				 &dev_priv->mm.bound_list, global_list) {
+		bool ggtt_bound = false;
+		struct i915_vma *vma;
+
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (vma->vm != &ggtt->base)
 				continue;
 
+			if (!i915_vma_unbind(vma))
+				continue;
+
 			WARN_ON(i915_vma_bind(vma, obj->cache_level,
 					      PIN_UPDATE));
+			ggtt_bound = true;
 		}
 
-		if (obj->pin_display)
+		if (ggtt_bound)
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 	}
 
+	ggtt->base.closed = false;
+
 	if (INTEL_INFO(dev)->gen >= 8) {
 		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 			chv_setup_private_ppat(dev_priv);
@@ -3335,7 +3283,7 @@
 
 			struct i915_hw_ppgtt *ppgtt;
 
-			if (vm->is_ggtt)
+			if (i915_is_ggtt(vm))
 				ppgtt = dev_priv->mm.aliasing_ppgtt;
 			else
 				ppgtt = i915_vm_to_ppgtt(vm);
@@ -3348,67 +3296,157 @@
 	i915_ggtt_flush(dev_priv);
 }
 
+static void
+i915_vma_retire(struct i915_gem_active *active,
+		struct drm_i915_gem_request *rq)
+{
+	const unsigned int idx = rq->engine->id;
+	struct i915_vma *vma =
+		container_of(active, struct i915_vma, last_read[idx]);
+
+	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+	i915_vma_clear_active(vma, idx);
+	if (i915_vma_is_active(vma))
+		return;
+
+	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+		WARN_ON(i915_vma_unbind(vma));
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+	GEM_BUG_ON(vma->node.allocated);
+	GEM_BUG_ON(i915_vma_is_active(vma));
+	GEM_BUG_ON(!i915_vma_is_closed(vma));
+	GEM_BUG_ON(vma->fence);
+
+	list_del(&vma->vm_link);
+	if (!i915_vma_is_ggtt(vma))
+		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+	GEM_BUG_ON(i915_vma_is_closed(vma));
+	vma->flags |= I915_VMA_CLOSED;
+
+	list_del_init(&vma->obj_link);
+	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+		WARN_ON(i915_vma_unbind(vma));
+}
+
 static struct i915_vma *
-__i915_gem_vma_create(struct drm_i915_gem_object *obj,
-		      struct i915_address_space *vm,
-		      const struct i915_ggtt_view *ggtt_view)
+__i915_vma_create(struct drm_i915_gem_object *obj,
+		  struct i915_address_space *vm,
+		  const struct i915_ggtt_view *view)
 {
 	struct i915_vma *vma;
+	int i;
 
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-		return ERR_PTR(-EINVAL);
+	GEM_BUG_ON(vm->closed);
 
 	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vm_link);
-	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
+	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+		init_request_active(&vma->last_read[i], i915_vma_retire);
+	init_request_active(&vma->last_fence, NULL);
+	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
 	vma->obj = obj;
-	vma->is_ggtt = i915_is_ggtt(vm);
+	vma->size = obj->base.size;
 
-	if (i915_is_ggtt(vm))
-		vma->ggtt_view = *ggtt_view;
-	else
+	if (view) {
+		vma->ggtt_view = *view;
+		if (view->type == I915_GGTT_VIEW_PARTIAL) {
+			vma->size = view->params.partial.size;
+			vma->size <<= PAGE_SHIFT;
+		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+			vma->size =
+				intel_rotation_info_size(&view->params.rotated);
+			vma->size <<= PAGE_SHIFT;
+		}
+	}
+
+	if (i915_is_ggtt(vm)) {
+		vma->flags |= I915_VMA_GGTT;
+	} else {
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+	}
 
 	list_add_tail(&vma->obj_link, &obj->vma_list);
-
 	return vma;
 }
 
+static inline bool vma_matches(struct i915_vma *vma,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *view)
+{
+	if (vma->vm != vm)
+		return false;
+
+	if (!i915_vma_is_ggtt(vma))
+		return true;
+
+	if (!view)
+		return vma->ggtt_view.type == 0;
+
+	if (vma->ggtt_view.type != view->type)
+		return false;
+
+	return memcmp(&vma->ggtt_view.params,
+		      &view->params,
+		      sizeof(view->params)) == 0;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+		struct i915_address_space *vm,
+		const struct i915_ggtt_view *view)
+{
+	GEM_BUG_ON(view && !i915_is_ggtt(vm));
+	GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+	return __i915_vma_create(obj, vm, view);
+}
+
+struct i915_vma *
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+		    struct i915_address_space *vm,
+		    const struct i915_ggtt_view *view)
+{
+	struct i915_vma *vma;
+
+	list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
+		if (vma_matches(vma, vm, view))
+			return vma;
+
+	return NULL;
+}
+
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
+				  struct i915_address_space *vm,
+				  const struct i915_ggtt_view *view)
 {
 	struct i915_vma *vma;
 
-	vma = i915_gem_obj_to_vma(obj, vm);
+	GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+	vma = i915_gem_obj_to_vma(obj, vm, view);
 	if (!vma)
-		vma = __i915_gem_vma_create(obj, vm,
-					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+		vma = __i915_vma_create(obj, vm, view);
 
+	GEM_BUG_ON(i915_vma_is_closed(vma));
 	return vma;
 }
 
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
-				       const struct i915_ggtt_view *view)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
-
-	if (!vma)
-		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
-
-	return vma;
-
-}
-
 static struct scatterlist *
 rotate_pages(const dma_addr_t *in, unsigned int offset,
 	     unsigned int width, unsigned int height,
@@ -3438,18 +3476,16 @@
 }
 
 static struct sg_table *
-intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
+intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
 			  struct drm_i915_gem_object *obj)
 {
 	const size_t n_pages = obj->base.size / PAGE_SIZE;
-	unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
-	unsigned int size_pages_uv;
+	unsigned int size = intel_rotation_info_size(rot_info);
 	struct sgt_iter sgt_iter;
 	dma_addr_t dma_addr;
 	unsigned long i;
 	dma_addr_t *page_addr_list;
 	struct sg_table *st;
-	unsigned int uv_start_page;
 	struct scatterlist *sg;
 	int ret = -ENOMEM;
 
@@ -3460,18 +3496,12 @@
 	if (!page_addr_list)
 		return ERR_PTR(ret);
 
-	/* Account for UV plane with NV12. */
-	if (rot_info->pixel_format == DRM_FORMAT_NV12)
-		size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
-	else
-		size_pages_uv = 0;
-
 	/* Allocate target SG list. */
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st)
 		goto err_st_alloc;
 
-	ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
+	ret = sg_alloc_table(st, size, GFP_KERNEL);
 	if (ret)
 		goto err_sg_alloc;
 
@@ -3484,32 +3514,14 @@
 	st->nents = 0;
 	sg = st->sgl;
 
-	/* Rotate the pages. */
-	sg = rotate_pages(page_addr_list, 0,
-			  rot_info->plane[0].width, rot_info->plane[0].height,
-			  rot_info->plane[0].width,
-			  st, sg);
-
-	/* Append the UV plane if NV12. */
-	if (rot_info->pixel_format == DRM_FORMAT_NV12) {
-		uv_start_page = size_pages;
-
-		/* Check for tile-row un-alignment. */
-		if (offset_in_page(rot_info->uv_offset))
-			uv_start_page--;
-
-		rot_info->uv_start_page = uv_start_page;
-
-		sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
-				  rot_info->plane[1].width, rot_info->plane[1].height,
-				  rot_info->plane[1].width,
-				  st, sg);
+	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+				  rot_info->plane[i].width, rot_info->plane[i].height,
+				  rot_info->plane[i].stride, st, sg);
 	}
 
-	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
-		      obj->base.size, rot_info->plane[0].width,
-		      rot_info->plane[0].height, size_pages + size_pages_uv,
-		      size_pages);
+	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
+		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
 
 	drm_free_large(page_addr_list);
 
@@ -3520,10 +3532,9 @@
 err_st_alloc:
 	drm_free_large(page_addr_list);
 
-	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
-		      obj->base.size, ret, rot_info->plane[0].width,
-		      rot_info->plane[0].height, size_pages + size_pages_uv,
-		      size_pages);
+	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
 	return ERR_PTR(ret);
 }
 
@@ -3573,28 +3584,27 @@
 {
 	int ret = 0;
 
-	if (vma->ggtt_view.pages)
+	if (vma->pages)
 		return 0;
 
 	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-		vma->ggtt_view.pages = vma->obj->pages;
+		vma->pages = vma->obj->pages;
 	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
-		vma->ggtt_view.pages =
+		vma->pages =
 			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
 	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
-		vma->ggtt_view.pages =
-			intel_partial_pages(&vma->ggtt_view, vma->obj);
+		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
 	else
 		WARN_ONCE(1, "GGTT view %u not implemented!\n",
 			  vma->ggtt_view.type);
 
-	if (!vma->ggtt_view.pages) {
+	if (!vma->pages) {
 		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
 			  vma->ggtt_view.type);
 		ret = -EINVAL;
-	} else if (IS_ERR(vma->ggtt_view.pages)) {
-		ret = PTR_ERR(vma->ggtt_view.pages);
-		vma->ggtt_view.pages = NULL;
+	} else if (IS_ERR(vma->pages)) {
+		ret = PTR_ERR(vma->pages);
+		vma->pages = NULL;
 		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
 			  vma->ggtt_view.type, ret);
 	}
@@ -3615,34 +3625,32 @@
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags)
 {
-	int ret;
 	u32 bind_flags;
+	u32 vma_flags;
+	int ret;
 
 	if (WARN_ON(flags == 0))
 		return -EINVAL;
 
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
-		bind_flags |= GLOBAL_BIND;
+		bind_flags |= I915_VMA_GLOBAL_BIND;
 	if (flags & PIN_USER)
-		bind_flags |= LOCAL_BIND;
+		bind_flags |= I915_VMA_LOCAL_BIND;
 
+	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 	if (flags & PIN_UPDATE)
-		bind_flags |= vma->bound;
+		bind_flags |= vma_flags;
 	else
-		bind_flags &= ~vma->bound;
-
+		bind_flags &= ~vma_flags;
 	if (bind_flags == 0)
 		return 0;
 
-	if (vma->bound == 0 && vma->vm->allocate_va_range) {
-		/* XXX: i915_vma_pin() will fix this +- hack */
-		vma->pin_count++;
+	if (vma_flags == 0 && vma->vm->allocate_va_range) {
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
-		vma->pin_count--;
 		if (ret)
 			return ret;
 	}
@@ -3651,56 +3659,47 @@
 	if (ret)
 		return ret;
 
-	vma->bound |= bind_flags;
-
+	vma->flags |= bind_flags;
 	return 0;
 }
 
-/**
- * i915_ggtt_view_size - Get the size of a GGTT view.
- * @obj: Object the view is of.
- * @view: The view in question.
- *
- * @return The size of the GGTT view in bytes.
- */
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view)
-{
-	if (view->type == I915_GGTT_VIEW_NORMAL) {
-		return obj->base.size;
-	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
-		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
-	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
-		return view->params.partial.size << PAGE_SHIFT;
-	} else {
-		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
-		return obj->base.size;
-	}
-}
-
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
 	void __iomem *ptr;
 
-	lockdep_assert_held(&vma->vm->dev->struct_mutex);
-	if (WARN_ON(!vma->obj->map_and_fenceable))
-		return ERR_PTR(-ENODEV);
+	/* Access through the GTT requires the device to be awake. */
+	assert_rpm_wakelock_held(to_i915(vma->vm->dev));
 
-	GEM_BUG_ON(!vma->is_ggtt);
-	GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
+	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+		return IO_ERR_PTR(-ENODEV);
+
+	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
 
 	ptr = vma->iomap;
 	if (ptr == NULL) {
-		ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
 					vma->node.start,
 					vma->node.size);
 		if (ptr == NULL)
-			return ERR_PTR(-ENOMEM);
+			return IO_ERR_PTR(-ENOMEM);
 
 		vma->iomap = ptr;
 	}
 
-	vma->pin_count++;
+	__i915_vma_pin(vma);
 	return ptr;
 }
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+	struct i915_vma *vma;
+
+	vma = fetch_and_zero(p_vma);
+	if (!vma)
+		return;
+
+	i915_vma_unpin(vma);
+	i915_vma_put(vma);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aa5f31d..ec78be2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,7 +36,15 @@
 
 #include <linux/io-mapping.h>
 
+#include "i915_gem_request.h"
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
 struct drm_i915_file_private;
+struct drm_i915_fence_reg;
 
 typedef uint32_t gen6_pte_t;
 typedef uint64_t gen8_pte_t;
@@ -137,12 +145,9 @@
 };
 
 struct intel_rotation_info {
-	unsigned int uv_offset;
-	uint32_t pixel_format;
-	unsigned int uv_start_page;
 	struct {
 		/* tiles */
-		unsigned int width, height;
+		unsigned int width, height, stride, offset;
 	} plane[2];
 };
 
@@ -156,8 +161,6 @@
 		} partial;
 		struct intel_rotation_info rotated;
 	} params;
-
-	struct sg_table *pages;
 };
 
 extern const struct i915_ggtt_view i915_ggtt_view_normal;
@@ -177,13 +180,38 @@
 	struct drm_mm_node node;
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm;
+	struct drm_i915_fence_reg *fence;
+	struct sg_table *pages;
 	void __iomem *iomap;
+	u64 size;
+	u64 display_alignment;
+
+	unsigned int flags;
+	/**
+	 * How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, execbuffer
+	 * (objects are not allowed multiple times for the same batchbuffer),
+	 * and the framebuffer code. When switching/pageflipping, the
+	 * framebuffer code has at most two buffers pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits.
+	 */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW	BIT(5)
 
 	/** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND	(1<<0)
-#define LOCAL_BIND	(1<<1)
-	unsigned int bound : 4;
-	bool is_ggtt : 1;
+#define I915_VMA_GLOBAL_BIND	BIT(6)
+#define I915_VMA_LOCAL_BIND	BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT		BIT(8)
+#define I915_VMA_CAN_FENCE	BIT(9)
+#define I915_VMA_CLOSED		BIT(10)
+
+	unsigned int active;
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	struct i915_gem_active last_fence;
 
 	/**
 	 * Support different GGTT views into the same object.
@@ -208,20 +236,66 @@
 	struct hlist_node exec_node;
 	unsigned long exec_handle;
 	struct drm_i915_gem_exec_object2 *exec_entry;
-
-	/**
-	 * How many users have pinned this object in GTT space. The following
-	 * users can each hold at most one reference: pwrite/pread, execbuffer
-	 * (objects are not allowed multiple times for the same batchbuffer),
-	 * and the framebuffer code. When switching/pageflipping, the
-	 * framebuffer code has at most two buffers pinned per crtc.
-	 *
-	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-	 * bits with absolutely no headroom. So use 4 bits. */
-	unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+		struct i915_address_space *vm,
+		const struct i915_ggtt_view *view);
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+	return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+	return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+				       unsigned int engine)
+{
+	vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+					 unsigned int engine)
+{
+	vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+					      unsigned int engine)
+{
+	return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+	GEM_BUG_ON(!vma->node.allocated);
+	GEM_BUG_ON(upper_32_bits(vma->node.start));
+	GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+	return lower_32_bits(vma->node.start);
+}
+
 struct i915_page_dma {
 	struct page *page;
 	union {
@@ -238,10 +312,6 @@
 #define px_page(px) (px_base(px)->page)
 #define px_dma(px) (px_base(px)->daddr)
 
-struct i915_page_scratch {
-	struct i915_page_dma base;
-};
-
 struct i915_page_table {
 	struct i915_page_dma base;
 
@@ -272,13 +342,22 @@
 struct i915_address_space {
 	struct drm_mm mm;
 	struct drm_device *dev;
+	/* Every address space belongs to a struct file - except for the global
+	 * GTT that is owned by the driver (and so @file is set to NULL). In
+	 * principle, no information should leak from one context to another
+	 * (or between files/processes etc) unless explicitly shared by the
+	 * owner. Tracking the owner is important in order to free up per-file
+	 * objects along with the file, to aide resource tracking, and to
+	 * assign blame.
+	 */
+	struct drm_i915_file_private *file;
 	struct list_head global_link;
 	u64 start;		/* Start offset always 0 for dri2 */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
-	bool is_ggtt;
+	bool closed;
 
-	struct i915_page_scratch *scratch_page;
+	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
 	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
@@ -306,6 +385,13 @@
 	 */
 	struct list_head inactive_list;
 
+	/**
+	 * List of vma that have been unbound.
+	 *
+	 * A reference is not held on the buffer while on this list.
+	 */
+	struct list_head unbound_list;
+
 	/* FIXME: Need a more generic return type */
 	gen6_pte_t (*pte_encode)(dma_addr_t addr,
 				 enum i915_cache_level level,
@@ -338,7 +424,7 @@
 			u32 flags);
 };
 
-#define i915_is_ggtt(V) ((V)->is_ggtt)
+#define i915_is_ggtt(V) (!(V)->file)
 
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
@@ -349,14 +435,13 @@
  */
 struct i915_ggtt {
 	struct i915_address_space base;
+	struct io_mapping mappable;	/* Mapping to our CPU mappable region */
 
 	size_t stolen_size;		/* Total size of stolen memory */
 	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
 	size_t stolen_reserved_base;
 	size_t stolen_reserved_size;
-	size_t size;			/* Total size of Global GTT */
 	u64 mappable_end;		/* End offset that we can CPU map */
-	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
 
 	/** "Graphics Stolen Memory" holds the global PTEs */
@@ -365,8 +450,6 @@
 	bool do_idle_maps;
 
 	int mtrr;
-
-	int (*probe)(struct i915_ggtt *ggtt);
 };
 
 struct i915_hw_ppgtt {
@@ -380,8 +463,6 @@
 		struct i915_page_directory pd;		/* GEN6-7 */
 	};
 
-	struct drm_i915_file_private *file_priv;
-
 	gen6_pte_t __iomem *pd_addr;
 
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
@@ -521,14 +602,15 @@
 		px_dma(ppgtt->base.scratch_pd);
 }
 
-int i915_ggtt_init_hw(struct drm_device *dev);
-int i915_ggtt_enable_hw(struct drm_device *dev);
-void i915_gem_init_ggtt(struct drm_device *dev);
-void i915_ggtt_cleanup_hw(struct drm_device *dev);
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 
 int i915_ppgtt_init_hw(struct drm_device *dev);
 void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
 					struct drm_i915_file_private *fpriv);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
@@ -548,23 +630,67 @@
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 
-static inline bool
-i915_ggtt_view_equal(const struct i915_ggtt_view *a,
-                     const struct i915_ggtt_view *b)
-{
-	if (WARN_ON(!a || !b))
-		return false;
+/* Flags used by pin/bind&friends. */
+#define PIN_NONBLOCK		BIT(0)
+#define PIN_MAPPABLE		BIT(1)
+#define PIN_ZONE_4G		BIT(2)
+#define PIN_NONFAULT		BIT(3)
 
-	if (a->type != b->type)
-		return false;
-	if (a->type != I915_GGTT_VIEW_NORMAL)
-		return !memcmp(&a->params, &b->params, sizeof(a->params));
-	return true;
+#define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER		BIT(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE		BIT(8)
+
+#define PIN_HIGH		BIT(9)
+#define PIN_OFFSET_BIAS		BIT(10)
+#define PIN_OFFSET_FIXED	BIT(11)
+#define PIN_OFFSET_MASK		(~4095)
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+	/* Pin early to prevent the shrinker/eviction logic from destroying
+	 * our vma as we insert and bind.
+	 */
+	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+		return 0;
+
+	return __i915_vma_do_pin(vma, size, alignment, flags);
 }
 
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view);
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+	return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+	vma->flags++;
+	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+	vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	__i915_vma_unpin(vma);
+}
 
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
@@ -580,6 +706,7 @@
  * Returns a valid iomapped pointer or ERR_PTR.
  */
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
 
 /**
  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
@@ -593,9 +720,14 @@
 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
 	lockdep_assert_held(&vma->vm->dev->struct_mutex);
-	GEM_BUG_ON(vma->pin_count == 0);
 	GEM_BUG_ON(vma->iomap == NULL);
-	vma->pin_count--;
+	i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!vma->pages);
+	return sg_page(vma->pages->sgl);
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index f75bbd6..95b7e9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,10 +28,17 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
+struct render_state {
+	const struct intel_renderstate_rodata *rodata;
+	struct i915_vma *vma;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
+};
+
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(const int gen)
+render_state_get_rodata(const struct drm_i915_gem_request *req)
 {
-	switch (gen) {
+	switch (INTEL_GEN(req->i915)) {
 	case 6:
 		return &gen6_null_state;
 	case 7:
@@ -45,35 +52,6 @@
 	return NULL;
 }
 
-static int render_state_init(struct render_state *so,
-			     struct drm_i915_private *dev_priv)
-{
-	int ret;
-
-	so->gen = INTEL_GEN(dev_priv);
-	so->rodata = render_state_get_rodata(so->gen);
-	if (so->rodata == NULL)
-		return 0;
-
-	if (so->rodata->batch_items * 4 > 4096)
-		return -EINVAL;
-
-	so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
-	if (IS_ERR(so->obj))
-		return PTR_ERR(so->obj);
-
-	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-	if (ret)
-		goto free_gem;
-
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-	return 0;
-
-free_gem:
-	drm_gem_object_unreference(&so->obj->base);
-	return ret;
-}
-
 /*
  * Macro to add commands to auxiliary batch.
  * This macro only checks for page overflow before inserting the commands,
@@ -94,27 +72,28 @@
 
 static int render_state_setup(struct render_state *so)
 {
-	struct drm_device *dev = so->obj->base.dev;
+	struct drm_device *dev = so->vma->vm->dev;
 	const struct intel_renderstate_rodata *rodata = so->rodata;
+	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
 	unsigned int i = 0, reloc_index = 0;
 	struct page *page;
 	u32 *d;
 	int ret;
 
-	ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+	ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
 	if (ret)
 		return ret;
 
-	page = i915_gem_object_get_dirty_page(so->obj, 0);
+	page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
 	d = kmap(page);
 
 	while (i < rodata->batch_items) {
 		u32 s = rodata->batch[i];
 
 		if (i * 4  == rodata->reloc[reloc_index]) {
-			u64 r = s + so->ggtt_offset;
+			u64 r = s + so->vma->node.start;
 			s = lower_32_bits(r);
-			if (so->gen >= 8) {
+			if (has_64bit_reloc) {
 				if (i + 1 >= rodata->batch_items ||
 				    rodata->batch[i + 1] != 0) {
 					ret = -EINVAL;
@@ -174,7 +153,7 @@
 
 	kunmap(page);
 
-	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+	ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
 	if (ret)
 		return ret;
 
@@ -192,67 +171,60 @@
 
 #undef OUT_BATCH
 
-void i915_gem_render_state_fini(struct render_state *so)
-{
-	i915_gem_object_ggtt_unpin(so->obj);
-	drm_gem_object_unreference(&so->obj->base);
-}
-
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so)
-{
-	int ret;
-
-	if (WARN_ON(engine->id != RCS))
-		return -ENOENT;
-
-	ret = render_state_init(so, engine->i915);
-	if (ret)
-		return ret;
-
-	if (so->rodata == NULL)
-		return 0;
-
-	ret = render_state_setup(so);
-	if (ret) {
-		i915_gem_render_state_fini(so);
-		return ret;
-	}
-
-	return 0;
-}
-
 int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
 	struct render_state so;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->engine, &so);
-	if (ret)
-		return ret;
+	if (WARN_ON(req->engine->id != RCS))
+		return -ENOENT;
 
-	if (so.rodata == NULL)
+	so.rodata = render_state_get_rodata(req);
+	if (!so.rodata)
 		return 0;
 
-	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
-					     so.rodata->batch_items * 4,
-					     I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
+	if (so.rodata->batch_items * 4 > 4096)
+		return -EINVAL;
 
-	if (so.aux_batch_size > 8) {
-		ret = req->engine->dispatch_execbuffer(req,
-						     (so.ggtt_offset +
-						      so.aux_batch_offset),
-						     so.aux_batch_size,
-						     I915_DISPATCH_SECURE);
-		if (ret)
-			goto out;
+	obj = i915_gem_object_create(&req->i915->drm, 4096);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
+	if (IS_ERR(so.vma)) {
+		ret = PTR_ERR(so.vma);
+		goto err_obj;
 	}
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+	ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
+	if (ret)
+		goto err_obj;
 
-out:
-	i915_gem_render_state_fini(&so);
+	ret = render_state_setup(&so);
+	if (ret)
+		goto err_unpin;
+
+	ret = req->engine->emit_bb_start(req, so.vma->node.start,
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
+	if (ret)
+		goto err_unpin;
+
+	if (so.aux_batch_size > 8) {
+		ret = req->engine->emit_bb_start(req,
+						 (so.vma->node.start +
+						  so.aux_batch_offset),
+						 so.aux_batch_size,
+						 I915_DISPATCH_SECURE);
+		if (ret)
+			goto err_unpin;
+	}
+
+	i915_vma_move_to_active(so.vma, req, 0);
+err_unpin:
+	i915_vma_unpin(so.vma);
+err_obj:
+	i915_gem_object_put(obj);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 6aaa3a1..18cce3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -24,26 +24,8 @@
 #ifndef _I915_GEM_RENDER_STATE_H_
 #define _I915_GEM_RENDER_STATE_H_
 
-#include <linux/types.h>
-
-struct intel_renderstate_rodata {
-	const u32 *reloc;
-	const u32 *batch;
-	const u32 batch_items;
-};
-
-struct render_state {
-	const struct intel_renderstate_rodata *rodata;
-	struct drm_i915_gem_object *obj;
-	u64 ggtt_offset;
-	int gen;
-	u32 aux_batch_size;
-	u32 aux_batch_offset;
-};
+struct drm_i915_gem_request;
 
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
new file mode 100644
index 0000000..8832f8e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prefetch.h>
+
+#include "i915_drv.h"
+
+static const char *i915_fence_get_driver_name(struct fence *fence)
+{
+	return "i915";
+}
+
+static const char *i915_fence_get_timeline_name(struct fence *fence)
+{
+	/* Timelines are bound by eviction to a VM. However, since
+	 * we only have a global seqno at the moment, we only have
+	 * a single timeline. Note that each timeline will have
+	 * multiple execution contexts (fence contexts) as we allow
+	 * engines within a single timeline to execute in parallel.
+	 */
+	return "global";
+}
+
+static bool i915_fence_signaled(struct fence *fence)
+{
+	return i915_gem_request_completed(to_request(fence));
+}
+
+static bool i915_fence_enable_signaling(struct fence *fence)
+{
+	if (i915_fence_signaled(fence))
+		return false;
+
+	intel_engine_enable_signaling(to_request(fence));
+	return true;
+}
+
+static signed long i915_fence_wait(struct fence *fence,
+				   bool interruptible,
+				   signed long timeout_jiffies)
+{
+	s64 timeout_ns, *timeout;
+	int ret;
+
+	if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+		timeout_ns = jiffies_to_nsecs(timeout_jiffies);
+		timeout = &timeout_ns;
+	} else {
+		timeout = NULL;
+	}
+
+	ret = i915_wait_request(to_request(fence),
+				interruptible, timeout,
+				NO_WAITBOOST);
+	if (ret == -ETIME)
+		return 0;
+
+	if (ret < 0)
+		return ret;
+
+	if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
+		timeout_jiffies = nsecs_to_jiffies(timeout_ns);
+
+	return timeout_jiffies;
+}
+
+static void i915_fence_value_str(struct fence *fence, char *str, int size)
+{
+	snprintf(str, size, "%u", fence->seqno);
+}
+
+static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+					  int size)
+{
+	snprintf(str, size, "%u",
+		 intel_engine_get_seqno(to_request(fence)->engine));
+}
+
+static void i915_fence_release(struct fence *fence)
+{
+	struct drm_i915_gem_request *req = to_request(fence);
+
+	kmem_cache_free(req->i915->requests, req);
+}
+
+const struct fence_ops i915_fence_ops = {
+	.get_driver_name = i915_fence_get_driver_name,
+	.get_timeline_name = i915_fence_get_timeline_name,
+	.enable_signaling = i915_fence_enable_signaling,
+	.signaled = i915_fence_signaled,
+	.wait = i915_fence_wait,
+	.release = i915_fence_release,
+	.fence_value_str = i915_fence_value_str,
+	.timeline_value_str = i915_fence_timeline_value_str,
+};
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->i915;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_file_private *file_priv = request->file_priv;
+
+	if (!file_priv)
+		return;
+
+	spin_lock(&file_priv->mm.lock);
+	list_del(&request->client_list);
+	request->file_priv = NULL;
+	spin_unlock(&file_priv->mm.lock);
+}
+
+void i915_gem_retire_noop(struct i915_gem_active *active,
+			  struct drm_i915_gem_request *request)
+{
+	/* Space left intentionally blank */
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+	struct i915_gem_active *active, *next;
+
+	trace_i915_gem_request_retire(request);
+	list_del(&request->link);
+
+	/* We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	list_del(&request->ring_link);
+	request->ring->last_retired_head = request->postfix;
+
+	/* Walk through the active list, calling retire on each. This allows
+	 * objects to track their GPU activity and mark themselves as idle
+	 * when their *last* active request is completed (updating state
+	 * tracking lists for eviction, active references for GEM, etc).
+	 *
+	 * As the ->retire() may free the node, we decouple it first and
+	 * pass along the auxiliary information (to avoid dereferencing
+	 * the node after the callback).
+	 */
+	list_for_each_entry_safe(active, next, &request->active_list, link) {
+		/* In microbenchmarks or focusing upon time inside the kernel,
+		 * we may spend an inordinate amount of time simply handling
+		 * the retirement of requests and processing their callbacks.
+		 * Of which, this loop itself is particularly hot due to the
+		 * cache misses when jumping around the list of i915_gem_active.
+		 * So we try to keep this loop as streamlined as possible and
+		 * also prefetch the next i915_gem_active to try and hide
+		 * the likely cache miss.
+		 */
+		prefetchw(next);
+
+		INIT_LIST_HEAD(&active->link);
+		RCU_INIT_POINTER(active->request, NULL);
+
+		active->retire(active, request);
+	}
+
+	i915_gem_request_remove_from_client(request);
+
+	if (request->previous_context) {
+		if (i915.enable_execlists)
+			intel_lr_context_unpin(request->previous_context,
+					       request->engine);
+	}
+
+	i915_gem_context_put(request->ctx);
+	i915_gem_request_put(request);
+}
+
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_i915_gem_request *tmp;
+
+	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	GEM_BUG_ON(list_empty(&req->link));
+
+	do {
+		tmp = list_first_entry(&engine->request_list,
+				       typeof(*tmp), link);
+
+		i915_gem_request_retire(tmp);
+	} while (tmp != req);
+}
+
+static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+{
+	struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+	if (i915_terminally_wedged(error))
+		return -EIO;
+
+	if (i915_reset_in_progress(error)) {
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these.
+		 */
+		if (!dev_priv->mm.interruptible)
+			return -EIO;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+{
+	struct intel_engine_cs *engine;
+	int ret;
+
+	/* Carefully retire all requests without writing to the rings */
+	for_each_engine(engine, dev_priv) {
+		ret = intel_engine_idle(engine,
+					I915_WAIT_INTERRUPTIBLE |
+					I915_WAIT_LOCKED);
+		if (ret)
+			return ret;
+	}
+	i915_gem_retire_requests(dev_priv);
+
+	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+	if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+		while (intel_kick_waiters(dev_priv) ||
+		       intel_kick_signalers(dev_priv))
+			yield();
+	}
+
+	/* Finally reset hw state */
+	for_each_engine(engine, dev_priv)
+		intel_engine_init_seqno(engine, seqno);
+
+	return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int ret;
+
+	if (seqno == 0)
+		return -EINVAL;
+
+	/* HWS page needs to be set less than what we
+	 * will inject to ring
+	 */
+	ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+	if (ret)
+		return ret;
+
+	dev_priv->next_seqno = seqno;
+	return 0;
+}
+
+static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+{
+	/* reserve 0 for non-seqno */
+	if (unlikely(dev_priv->next_seqno == 0)) {
+		int ret;
+
+		ret = i915_gem_init_seqno(dev_priv, 0);
+		if (ret)
+			return ret;
+
+		dev_priv->next_seqno = 1;
+	}
+
+	*seqno = dev_priv->next_seqno++;
+	return 0;
+}
+
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct drm_i915_gem_request *request =
+		container_of(fence, typeof(*request), submit);
+
+	/* Will be called from irq-context when using foreign DMA fences */
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		request->engine->last_submitted_seqno = request->fence.seqno;
+		request->engine->submit_request(request);
+		break;
+
+	case FENCE_FREE:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct i915_gem_context *ctx)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_gem_request *req;
+	u32 seqno;
+	int ret;
+
+	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+	 * and restart.
+	 */
+	ret = i915_gem_check_wedge(dev_priv);
+	if (ret)
+		return ERR_PTR(ret);
+
+	/* Move the oldest request to the slab-cache (if not in use!) */
+	req = list_first_entry_or_null(&engine->request_list,
+				       typeof(*req), link);
+	if (req && i915_gem_request_completed(req))
+		i915_gem_request_retire(req);
+
+	/* Beware: Dragons be flying overhead.
+	 *
+	 * We use RCU to look up requests in flight. The lookups may
+	 * race with the request being allocated from the slab freelist.
+	 * That is the request we are writing to here, may be in the process
+	 * of being read by __i915_gem_active_get_rcu(). As such,
+	 * we have to be very careful when overwriting the contents. During
+	 * the RCU lookup, we change chase the request->engine pointer,
+	 * read the request->fence.seqno and increment the reference count.
+	 *
+	 * The reference count is incremented atomically. If it is zero,
+	 * the lookup knows the request is unallocated and complete. Otherwise,
+	 * it is either still in use, or has been reallocated and reset
+	 * with fence_init(). This increment is safe for release as we check
+	 * that the request we have a reference to and matches the active
+	 * request.
+	 *
+	 * Before we increment the refcount, we chase the request->engine
+	 * pointer. We must not call kmem_cache_zalloc() or else we set
+	 * that pointer to NULL and cause a crash during the lookup. If
+	 * we see the request is completed (based on the value of the
+	 * old engine and seqno), the lookup is complete and reports NULL.
+	 * If we decide the request is not completed (new engine or seqno),
+	 * then we grab a reference and double check that it is still the
+	 * active request - which it won't be and restart the lookup.
+	 *
+	 * Do not use kmem_cache_zalloc() here!
+	 */
+	req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	ret = i915_gem_get_seqno(dev_priv, &seqno);
+	if (ret)
+		goto err;
+
+	spin_lock_init(&req->lock);
+	fence_init(&req->fence,
+		   &i915_fence_ops,
+		   &req->lock,
+		   engine->fence_context,
+		   seqno);
+
+	i915_sw_fence_init(&req->submit, submit_notify);
+
+	INIT_LIST_HEAD(&req->active_list);
+	req->i915 = dev_priv;
+	req->engine = engine;
+	req->ctx = i915_gem_context_get(ctx);
+
+	/* No zalloc, must clear what we need by hand */
+	req->previous_context = NULL;
+	req->file_priv = NULL;
+	req->batch = NULL;
+
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_alloc_request_extras(req);
+	else
+		ret = intel_ring_alloc_request_extras(req);
+	if (ret)
+		goto err_ctx;
+
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	req->head = req->ring->tail;
+
+	return req;
+
+err_ctx:
+	i915_gem_context_put(ctx);
+err:
+	kmem_cache_free(dev_priv->requests, req);
+	return ERR_PTR(ret);
+}
+
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+			       struct drm_i915_gem_request *from)
+{
+	int idx, ret;
+
+	GEM_BUG_ON(to == from);
+
+	if (to->engine == from->engine)
+		return 0;
+
+	idx = intel_engine_sync_index(from->engine, to->engine);
+	if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+		return 0;
+
+	trace_i915_gem_ring_sync_to(to, from);
+	if (!i915.semaphores) {
+		if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+			ret = i915_sw_fence_await_dma_fence(&to->submit,
+							    &from->fence, 0,
+							    GFP_KERNEL);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		ret = to->engine->semaphore.sync_to(to, from);
+		if (ret)
+			return ret;
+	}
+
+	from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+	return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+			      struct drm_i915_gem_object *obj,
+			      bool write)
+{
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	if (write) {
+		active_mask = i915_gem_object_get_active(obj);
+		active = obj->last_read;
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		struct drm_i915_gem_request *request;
+		int ret;
+
+		request = i915_gem_active_peek(&active[idx],
+					       &obj->base.dev->struct_mutex);
+		if (!request)
+			continue;
+
+		ret = i915_gem_request_await_request(to, request);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	dev_priv->gt.active_engines |= intel_engine_flag(engine);
+	if (dev_priv->gt.awake)
+		return;
+
+	intel_runtime_pm_get_noresume(dev_priv);
+	dev_priv->gt.awake = true;
+
+	intel_enable_gt_powersave(dev_priv);
+	i915_update_gfx_val(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 6)
+		gen6_rps_busy(dev_priv);
+
+	queue_delayed_work(dev_priv->wq,
+			   &dev_priv->gt.retire_work,
+			   round_jiffies_up_relative(HZ));
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+{
+	struct intel_engine_cs *engine = request->engine;
+	struct intel_ring *ring = request->ring;
+	struct drm_i915_gem_request *prev;
+	u32 request_start;
+	u32 reserved_tail;
+	int ret;
+
+	trace_i915_gem_request_add(request);
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	request_start = ring->tail;
+	reserved_tail = request->reserved_space;
+	request->reserved_space = 0;
+
+	/*
+	 * Emit any outstanding flushes - execbuf can fail to emit the flush
+	 * after having emitted the batchbuffer command. Hence we need to fix
+	 * things up similar to emitting the lazy request. The difference here
+	 * is that the flush _must_ happen before the next request, no matter
+	 * what.
+	 */
+	if (flush_caches) {
+		ret = engine->emit_flush(request, EMIT_FLUSH);
+
+		/* Not allowed to fail! */
+		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+	}
+
+	/* Record the position of the start of the breadcrumb so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the ring's HEAD.
+	 */
+	request->postfix = ring->tail;
+
+	/* Not allowed to fail! */
+	ret = engine->emit_request(request);
+	WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
+
+	/* Sanity check that the reserved size was large enough. */
+	ret = ring->tail - request_start;
+	if (ret < 0)
+		ret += ring->size;
+	WARN_ONCE(ret > reserved_tail,
+		  "Not enough space reserved (%d bytes) "
+		  "for adding the request (%d bytes)\n",
+		  reserved_tail, ret);
+
+	/* Seal the request and mark it as pending execution. Note that
+	 * we may inspect this state, without holding any locks, during
+	 * hangcheck. Hence we apply the barrier to ensure that we do not
+	 * see a more recent value in the hws than we are tracking.
+	 */
+
+	prev = i915_gem_active_raw(&engine->last_request,
+				   &request->i915->drm.struct_mutex);
+	if (prev)
+		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+					     &request->submitq);
+
+	request->emitted_jiffies = jiffies;
+	request->previous_seqno = engine->last_pending_seqno;
+	engine->last_pending_seqno = request->fence.seqno;
+	i915_gem_active_set(&engine->last_request, request);
+	list_add_tail(&request->link, &engine->request_list);
+	list_add_tail(&request->ring_link, &ring->request_list);
+
+	i915_gem_mark_busy(engine);
+
+	local_bh_disable();
+	i915_sw_fence_commit(&request->submit);
+	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+}
+
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (list_empty(&wait->task_list))
+		__add_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static unsigned long local_clock_us(unsigned int *cpu)
+{
+	unsigned long t;
+
+	/* Cheaply and approximately convert from nanoseconds to microseconds.
+	 * The result and subsequent calculations are also defined in the same
+	 * approximate microseconds units. The principal source of timing
+	 * error here is from the simple truncation.
+	 *
+	 * Note that local_clock() is only defined wrt to the current CPU;
+	 * the comparisons are no longer valid if we switch CPUs. Instead of
+	 * blocking preemption for the entire busywait, we can detect the CPU
+	 * switch and use that as indicator of system load and a reason to
+	 * stop busywaiting, see busywait_stop().
+	 */
+	*cpu = get_cpu();
+	t = local_clock() >> 10;
+	put_cpu();
+
+	return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned int cpu)
+{
+	unsigned int this_cpu;
+
+	if (time_after(local_clock_us(&this_cpu), timeout))
+		return true;
+
+	return this_cpu != cpu;
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+			 int state, unsigned long timeout_us)
+{
+	unsigned int cpu;
+
+	/* When waiting for high frequency requests, e.g. during synchronous
+	 * rendering split between the CPU and GPU, the finite amount of time
+	 * required to set up the irq and wait upon it limits the response
+	 * rate. By busywaiting on the request completion for a short while we
+	 * can service the high frequency waits as quick as possible. However,
+	 * if it is a slow request, we want to sleep as quickly as possible.
+	 * The tradeoff between waiting and sleeping is roughly the time it
+	 * takes to sleep on a request, on the order of a microsecond.
+	 */
+
+	timeout_us += local_clock_us(&cpu);
+	do {
+		if (i915_gem_request_completed(req))
+			return true;
+
+		if (signal_pending_state(state, current))
+			break;
+
+		if (busywait_stop(timeout_us, cpu))
+			break;
+
+		cpu_relax_lowlatency();
+	} while (!need_resched());
+
+	return false;
+}
+
+/**
+ * i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @flags: how to wait
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: client to charge for RPS boosting
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int i915_wait_request(struct drm_i915_gem_request *req,
+		      unsigned int flags,
+		      s64 *timeout,
+		      struct intel_rps_client *rps)
+{
+	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	DEFINE_WAIT(reset);
+	struct intel_wait wait;
+	unsigned long timeout_remain;
+	int ret = 0;
+
+	might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+	GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+		   !!(flags & I915_WAIT_LOCKED));
+#endif
+
+	if (i915_gem_request_completed(req))
+		return 0;
+
+	timeout_remain = MAX_SCHEDULE_TIMEOUT;
+	if (timeout) {
+		if (WARN_ON(*timeout < 0))
+			return -EINVAL;
+
+		if (*timeout == 0)
+			return -ETIME;
+
+		/* Record current time in case interrupted, or wedged */
+		timeout_remain = nsecs_to_jiffies_timeout(*timeout);
+		*timeout += ktime_get_raw_ns();
+	}
+
+	trace_i915_gem_request_wait_begin(req);
+
+	/* This client is about to stall waiting for the GPU. In many cases
+	 * this is undesirable and limits the throughput of the system, as
+	 * many clients cannot continue processing user input/output whilst
+	 * blocked. RPS autotuning may take tens of milliseconds to respond
+	 * to the GPU load and thus incurs additional latency for the client.
+	 * We can circumvent that by promoting the GPU frequency to maximum
+	 * before we wait. This makes the GPU throttle up much more quickly
+	 * (good for benchmarks and user experience, e.g. window animations),
+	 * but at a cost of spending more power processing the workload
+	 * (bad for battery). Not all clients even want their results
+	 * immediately and for them we should just let the GPU select its own
+	 * frequency to maximise efficiency. To prevent a single client from
+	 * forcing the clocks too high for the whole system, we only allow
+	 * each client to waitboost once in a busy period.
+	 */
+	if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
+		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+
+	/* Optimistic short spin before touching IRQs */
+	if (i915_spin_request(req, state, 5))
+		goto complete;
+
+	set_current_state(state);
+	if (flags & I915_WAIT_LOCKED)
+		add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+
+	intel_wait_init(&wait, req->fence.seqno);
+	if (intel_engine_add_wait(req->engine, &wait))
+		/* In order to check that we haven't missed the interrupt
+		 * as we enabled it, we need to kick ourselves to do a
+		 * coherent check on the seqno before we sleep.
+		 */
+		goto wakeup;
+
+	for (;;) {
+		if (signal_pending_state(state, current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		timeout_remain = io_schedule_timeout(timeout_remain);
+		if (timeout_remain == 0) {
+			ret = -ETIME;
+			break;
+		}
+
+		if (intel_wait_complete(&wait))
+			break;
+
+		set_current_state(state);
+
+wakeup:
+		/* Carefully check if the request is complete, giving time
+		 * for the seqno to be visible following the interrupt.
+		 * We also have to check in case we are kicked by the GPU
+		 * reset in order to drop the struct_mutex.
+		 */
+		if (__i915_request_irq_complete(req))
+			break;
+
+		/* If the GPU is hung, and we hold the lock, reset the GPU
+		 * and then check for completion. On a full reset, the engine's
+		 * HW seqno will be advanced passed us and we are complete.
+		 * If we do a partial reset, we have to wait for the GPU to
+		 * resume and update the breadcrumb.
+		 *
+		 * If we don't hold the mutex, we can just wait for the worker
+		 * to come along and update the breadcrumb (either directly
+		 * itself, or indirectly by recovering the GPU).
+		 */
+		if (flags & I915_WAIT_LOCKED &&
+		    i915_reset_in_progress(&req->i915->gpu_error)) {
+			__set_current_state(TASK_RUNNING);
+			i915_reset(req->i915);
+			reset_wait_queue(&req->i915->gpu_error.wait_queue,
+					 &reset);
+			continue;
+		}
+
+		/* Only spin if we know the GPU is processing this request */
+		if (i915_spin_request(req, state, 2))
+			break;
+	}
+
+	intel_engine_remove_wait(req->engine, &wait);
+	if (flags & I915_WAIT_LOCKED)
+		remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+	__set_current_state(TASK_RUNNING);
+
+complete:
+	trace_i915_gem_request_wait_end(req);
+
+	if (timeout) {
+		*timeout -= ktime_get_raw_ns();
+		if (*timeout < 0)
+			*timeout = 0;
+
+		/*
+		 * Apparently ktime isn't accurate enough and occasionally has a
+		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+		 * things up to make the test happy. We allow up to 1 jiffy.
+		 *
+		 * This is a regrssion from the timespec->ktime conversion.
+		 */
+		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+			*timeout = 0;
+	}
+
+	if (IS_RPS_USER(rps) &&
+	    req->fence.seqno == req->engine->last_submitted_seqno) {
+		/* The GPU is now idle and this client has stalled.
+		 * Since no other client has submitted a request in the
+		 * meantime, assume that this client is the only one
+		 * supplying work to the GPU but is unable to keep that
+		 * work supplied because it is waiting. Since the GPU is
+		 * then never kept fully busy, RPS autoclocking will
+		 * keep the clocks relatively low, causing further delays.
+		 * Compensate by giving the synchronous client credit for
+		 * a waitboost next time.
+		 */
+		spin_lock(&req->i915->rps.client_lock);
+		list_del_init(&rps->link);
+		spin_unlock(&req->i915->rps.client_lock);
+	}
+
+	return ret;
+}
+
+static bool engine_retire_requests(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_request *request, *next;
+
+	list_for_each_entry_safe(request, next, &engine->request_list, link) {
+		if (!i915_gem_request_completed(request))
+			return false;
+
+		i915_gem_request_retire(request);
+	}
+
+	return true;
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	unsigned int tmp;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	if (dev_priv->gt.active_engines == 0)
+		return;
+
+	GEM_BUG_ON(!dev_priv->gt.awake);
+
+	for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
+		if (engine_retire_requests(engine))
+			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+
+	if (dev_priv->gt.active_engines == 0)
+		queue_delayed_work(dev_priv->wq,
+				   &dev_priv->gt.idle_work,
+				   msecs_to_jiffies(100));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
new file mode 100644
index 0000000..974bd7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_REQUEST_H
+#define I915_GEM_REQUEST_H
+
+#include <linux/fence.h>
+
+#include "i915_gem.h"
+#include "i915_sw_fence.h"
+
+struct intel_wait {
+	struct rb_node node;
+	struct task_struct *tsk;
+	u32 seqno;
+};
+
+struct intel_signal_node {
+	struct rb_node node;
+	struct intel_wait wait;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * When modifying this structure be very aware that we perform a lockless
+ * RCU lookup of it that may race against reallocation of the struct
+ * from the slab freelist. We intentionally do not zero the structure on
+ * allocation so that the lookup can use the dangling pointers (and is
+ * cogniscent that those pointers may be wrong). Instead, everything that
+ * needs to be initialised must be done so explicitly.
+ *
+ * The requests are reference counted.
+ */
+struct drm_i915_gem_request {
+	struct fence fence;
+	spinlock_t lock;
+
+	/** On Which ring this request was generated */
+	struct drm_i915_private *i915;
+
+	/**
+	 * Context and ring buffer related to this request
+	 * Contexts are refcounted, so when this request is associated with a
+	 * context, we must increment the context's refcount, to guarantee that
+	 * it persists while any request is linked to it. Requests themselves
+	 * are also refcounted, so the request will only be freed when the last
+	 * reference to it is dismissed, and the code in
+	 * i915_gem_request_free() will then decrement the refcount on the
+	 * context.
+	 */
+	struct i915_gem_context *ctx;
+	struct intel_engine_cs *engine;
+	struct intel_ring *ring;
+	struct intel_signal_node signaling;
+
+	struct i915_sw_fence submit;
+	wait_queue_t submitq;
+
+	/** GEM sequence number associated with the previous request,
+	 * when the HWS breadcrumb is equal to this the GPU is processing
+	 * this request.
+	 */
+	u32 previous_seqno;
+
+	/** Position in the ring of the start of the request */
+	u32 head;
+
+	/**
+	 * Position in the ring of the start of the postfix.
+	 * This is required to calculate the maximum available ring space
+	 * without overwriting the postfix.
+	 */
+	u32 postfix;
+
+	/** Position in the ring of the end of the whole request */
+	u32 tail;
+
+	/** Position in the ring of the end of any workarounds after the tail */
+	u32 wa_tail;
+
+	/** Preallocate space in the ring for the emitting the request */
+	u32 reserved_space;
+
+	/**
+	 * Context related to the previous request.
+	 * As the contexts are accessed by the hardware until the switch is
+	 * completed to a new context, the hardware may still be writing
+	 * to the context object after the breadcrumb is visible. We must
+	 * not unpin/unbind/prune that object whilst still active and so
+	 * we keep the previous context pinned until the following (this)
+	 * request is retired.
+	 */
+	struct i915_gem_context *previous_context;
+
+	/** Batch buffer related to this request if any (used for
+	 * error state dump only).
+	 */
+	struct i915_vma *batch;
+	struct list_head active_list;
+
+	/** Time at which this request was emitted, in jiffies. */
+	unsigned long emitted_jiffies;
+
+	/** engine->request_list entry for this request */
+	struct list_head link;
+
+	/** ring->request_list entry for this request */
+	struct list_head ring_link;
+
+	struct drm_i915_file_private *file_priv;
+	/** file_priv list entry for this request */
+	struct list_head client_list;
+
+	/** Link in the execlist submission queue, guarded by execlist_lock. */
+	struct list_head execlist_link;
+};
+
+extern const struct fence_ops i915_fence_ops;
+
+static inline bool fence_is_i915(struct fence *fence)
+{
+	return fence->ops == &i915_fence_ops;
+}
+
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct i915_gem_context *ctx);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+
+static inline u32
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+	return req ? req->fence.seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
+{
+	return req ? req->engine : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+to_request(struct fence *fence)
+{
+	/* We assume that NULL fence/request are interoperable */
+	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+	GEM_BUG_ON(fence && !fence_is_i915(fence));
+	return container_of(fence, struct drm_i915_gem_request, fence);
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get(struct drm_i915_gem_request *req)
+{
+	return to_request(fence_get(&req->fence));
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+{
+	return to_request(fence_get_rcu(&req->fence));
+}
+
+static inline void
+i915_gem_request_put(struct drm_i915_gem_request *req)
+{
+	fence_put(&req->fence);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+					   struct drm_i915_gem_request *src)
+{
+	if (src)
+		i915_gem_request_get(src);
+
+	if (*pdst)
+		i915_gem_request_put(*pdst);
+
+	*pdst = src;
+}
+
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+			      struct drm_i915_gem_object *obj,
+			      bool write);
+
+void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, false)
+
+struct intel_rps_client;
+#define NO_WAITBOOST ERR_PTR(-1)
+#define IS_RPS_CLIENT(p) (!IS_ERR(p))
+#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+
+int i915_wait_request(struct drm_i915_gem_request *req,
+		      unsigned int flags,
+		      s64 *timeout,
+		      struct intel_rps_client *rps)
+	__attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE	BIT(0)
+#define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
+
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
+{
+	return (s32)(seq1 - seq2) >= 0;
+}
+
+static inline bool
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+				 req->previous_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+				 req->fence.seqno);
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+			 int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+				     int state, unsigned long timeout_us)
+{
+	return (i915_gem_request_started(request) &&
+		__i915_spin_request(request, state, timeout_us));
+}
+
+/* We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_gem_active to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_gem_active is updated with i915_gem_active_set() to track the
+ * most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_gem_active completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_gem_active.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+struct i915_gem_active;
+
+typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
+				   struct drm_i915_gem_request *);
+
+struct i915_gem_active {
+	struct drm_i915_gem_request __rcu *request;
+	struct list_head link;
+	i915_gem_retire_fn retire;
+};
+
+void i915_gem_retire_noop(struct i915_gem_active *,
+			  struct drm_i915_gem_request *request);
+
+/**
+ * init_request_active - prepares the activity tracker for use
+ * @active - the active tracker
+ * @func - a callback when then the tracker is retired (becomes idle),
+ *         can be NULL
+ *
+ * init_request_active() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+init_request_active(struct i915_gem_active *active,
+		    i915_gem_retire_fn retire)
+{
+	INIT_LIST_HEAD(&active->link);
+	active->retire = retire ?: i915_gem_retire_noop;
+}
+
+/**
+ * i915_gem_active_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * i915_gem_active_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
+static inline void
+i915_gem_active_set(struct i915_gem_active *active,
+		    struct drm_i915_gem_request *request)
+{
+	list_move(&active->link, &request->active_list);
+	rcu_assign_pointer(active->request, request);
+}
+
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+	/* Inside the error capture (running with the driver in an unknown
+	 * state), we want to bend the rules slightly (a lot).
+	 *
+	 * Work is in progress to make it safer, in the meantime this keeps
+	 * the known issue from spamming the logs.
+	 */
+	return rcu_dereference_protected(active->request, 1);
+}
+
+/**
+ * i915_gem_active_raw - return the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_raw() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the caller
+ * must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
+{
+	return rcu_dereference_protected(active->request,
+					 lockdep_is_held(mutex));
+}
+
+/**
+ * i915_gem_active_peek - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_raw(active, mutex);
+	if (!request || i915_gem_request_completed(request))
+		return NULL;
+
+	return request;
+}
+
+/**
+ * i915_gem_active_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
+{
+	return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+}
+
+/**
+ * __i915_gem_active_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold the RCU read lock, but
+ * the returned pointer is safe to use outside of RCU.
+ */
+static inline struct drm_i915_gem_request *
+__i915_gem_active_get_rcu(const struct i915_gem_active *active)
+{
+	/* Performing a lockless retrieval of the active request is super
+	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+	 * slab of request objects will not be freed whilst we hold the
+	 * RCU read lock. It does not guarantee that the request itself
+	 * will not be freed and then *reused*. Viz,
+	 *
+	 * Thread A			Thread B
+	 *
+	 * req = active.request
+	 *				retire(req) -> free(req);
+	 *				(req is now first on the slab freelist)
+	 *				active.request = NULL
+	 *
+	 *				req = new submission on a new object
+	 * ref(req)
+	 *
+	 * To prevent the request from being reused whilst the caller
+	 * uses it, we take a reference like normal. Whilst acquiring
+	 * the reference we check that it is not in a destroyed state
+	 * (refcnt == 0). That prevents the request being reallocated
+	 * whilst the caller holds on to it. To check that the request
+	 * was not reallocated as we acquired the reference we have to
+	 * check that our request remains the active request across
+	 * the lookup, in the same manner as a seqlock. The visibility
+	 * of the pointer versus the reference counting is controlled
+	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+	 *
+	 * In the middle of all that, we inspect whether the request is
+	 * complete. Retiring is lazy so the request may be completed long
+	 * before the active tracker is updated. Querying whether the
+	 * request is complete is far cheaper (as it involves no locked
+	 * instructions setting cachelines to exclusive) than acquiring
+	 * the reference, so we do it first. The RCU read lock ensures the
+	 * pointer dereference is valid, but does not ensure that the
+	 * seqno nor HWS is the right one! However, if the request was
+	 * reallocated, that means the active tracker's request was complete.
+	 * If the new request is also complete, then both are and we can
+	 * just report the active tracker is idle. If the new request is
+	 * incomplete, then we acquire a reference on it and check that
+	 * it remained the active request.
+	 *
+	 * It is then imperative that we do not zero the request on
+	 * reallocation, so that we can chase the dangling pointers!
+	 * See i915_gem_request_alloc().
+	 */
+	do {
+		struct drm_i915_gem_request *request;
+
+		request = rcu_dereference(active->request);
+		if (!request || i915_gem_request_completed(request))
+			return NULL;
+
+		/* An especially silly compiler could decide to recompute the
+		 * result of i915_gem_request_completed, more specifically
+		 * re-emit the load for request->fence.seqno. A race would catch
+		 * a later seqno value, which could flip the result from true to
+		 * false. Which means part of the instructions below might not
+		 * be executed, while later on instructions are executed. Due to
+		 * barriers within the refcounting the inconsistency can't reach
+		 * past the call to i915_gem_request_get_rcu, but not executing
+		 * that while still executing i915_gem_request_put() creates
+		 * havoc enough.  Prevent this with a compiler barrier.
+		 */
+		barrier();
+
+		request = i915_gem_request_get_rcu(request);
+
+		/* What stops the following rcu_access_pointer() from occurring
+		 * before the above i915_gem_request_get_rcu()? If we were
+		 * to read the value before pausing to get the reference to
+		 * the request, we may not notice a change in the active
+		 * tracker.
+		 *
+		 * The rcu_access_pointer() is a mere compiler barrier, which
+		 * means both the CPU and compiler are free to perform the
+		 * memory read without constraint. The compiler only has to
+		 * ensure that any operations after the rcu_access_pointer()
+		 * occur afterwards in program order. This means the read may
+		 * be performed earlier by an out-of-order CPU, or adventurous
+		 * compiler.
+		 *
+		 * The atomic operation at the heart of
+		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+		 * atomic_inc_not_zero() which is only a full memory barrier
+		 * when successful. That is, if i915_gem_request_get_rcu()
+		 * returns the request (and so with the reference counted
+		 * incremented) then the following read for rcu_access_pointer()
+		 * must occur after the atomic operation and so confirm
+		 * that this request is the one currently being tracked.
+		 *
+		 * The corresponding write barrier is part of
+		 * rcu_assign_pointer().
+		 */
+		if (!request || request == rcu_access_pointer(active->request))
+			return rcu_pointer_handoff(request);
+
+		i915_gem_request_put(request);
+	} while (1);
+}
+
+/**
+ * i915_gem_active_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_gem_request_put().
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	rcu_read_lock();
+	request = __i915_gem_active_get_rcu(active);
+	rcu_read_unlock();
+
+	return request;
+}
+
+/**
+ * i915_gem_active_isset - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * i915_gem_active_isset() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+i915_gem_active_isset(const struct i915_gem_active *active)
+{
+	return rcu_access_pointer(active->request);
+}
+
+/**
+ * i915_gem_active_is_idle - report whether the active tracker is idle
+ * @active - the active tracker
+ *
+ * i915_gem_active_is_idle() returns true if the active tracker is currently
+ * unassigned or if the request is complete (but not yet retired). Requires
+ * the caller to hold struct_mutex (but that can be relaxed if desired).
+ */
+static inline bool
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+			struct mutex *mutex)
+{
+	return !i915_gem_active_peek(active, mutex);
+}
+
+/**
+ * i915_gem_active_wait - waits until the request is completed
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_wait() waits until the request is completed before
+ * returning. Note that it does not guarantee that the request is
+ * retired first, see i915_gem_active_retire().
+ *
+ * i915_gem_active_wait() returns immediately if the active
+ * request is already complete.
+ */
+static inline int __must_check
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active, mutex);
+	if (!request)
+		return 0;
+
+	return i915_wait_request(request,
+				 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				 NULL, NULL);
+}
+
+/**
+ * i915_gem_active_wait_unlocked - waits until the request is completed
+ * @active - the active request on which to wait
+ * @flags - how to wait
+ * @timeout - how long to wait at most
+ * @rps - userspace client to charge for a waitboost
+ *
+ * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * returning, without requiring any locks to be held. Note that it does not
+ * retire any requests before returning.
+ *
+ * This function relies on RCU in order to acquire the reference to the active
+ * request without holding any locks. See __i915_gem_active_get_rcu() for the
+ * glory details on how that is managed. Once the reference is acquired, we
+ * can then wait upon the request, and afterwards release our reference,
+ * free of any locking.
+ *
+ * This function wraps i915_wait_request(), see it for the full details on
+ * the arguments.
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+static inline int
+i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
+			      unsigned int flags,
+			      s64 *timeout,
+			      struct intel_rps_client *rps)
+{
+	struct drm_i915_gem_request *request;
+	int ret = 0;
+
+	request = i915_gem_active_get_unlocked(active);
+	if (request) {
+		ret = i915_wait_request(request, flags, timeout, rps);
+		i915_gem_request_put(request);
+	}
+
+	return ret;
+}
+
+/**
+ * i915_gem_active_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_gem_active_retire(struct i915_gem_active *active,
+		       struct mutex *mutex)
+{
+	struct drm_i915_gem_request *request;
+	int ret;
+
+	request = i915_gem_active_raw(active, mutex);
+	if (!request)
+		return 0;
+
+	ret = i915_wait_request(request,
+				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				NULL, NULL);
+	if (ret)
+		return ret;
+
+	list_del_init(&active->link);
+	RCU_INIT_POINTER(active->request, NULL);
+
+	active->retire(active, request);
+
+	return 0;
+}
+
+/* Convenience functions for peeking at state inside active's request whilst
+ * guarded by the struct_mutex.
+ */
+
+static inline uint32_t
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+			  struct mutex *mutex)
+{
+	return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
+}
+
+static inline struct intel_engine_cs *
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+			   struct mutex *mutex)
+{
+	return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
+}
+
+#define for_each_active(mask, idx) \
+	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+
+#endif /* I915_GEM_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 6f10b42..1c237d0 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,19 +48,15 @@
 #endif
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
-	int count = 0;
 
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (drm_mm_node_allocated(&vma->node))
-			count++;
-		if (vma->pin_count)
-			count++;
-	}
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
+		if (i915_vma_is_pinned(vma))
+			return true;
 
-	return count;
+	return false;
 }
 
 static bool swap_available(void)
@@ -82,7 +78,10 @@
 	 * to the GPU, simply unbinding from the GPU is not going to succeed
 	 * in releasing our pin count on the pages themselves.
 	 */
-	if (obj->pages_pin_count != num_vma_bound(obj))
+	if (obj->pages_pin_count > obj->bind_count)
+		return false;
+
+	if (any_vma_pinned(obj))
 		return false;
 
 	/* We can only return physical pages to the system if we can either
@@ -163,17 +162,16 @@
 	 */
 	for (phase = phases; phase->list; phase++) {
 		struct list_head still_in_list;
+		struct drm_i915_gem_object *obj;
 
 		if ((flags & phase->bit) == 0)
 			continue;
 
 		INIT_LIST_HEAD(&still_in_list);
-		while (count < target && !list_empty(phase->list)) {
-			struct drm_i915_gem_object *obj;
-			struct i915_vma *vma, *v;
-
-			obj = list_first_entry(phase->list,
-					       typeof(*obj), global_list);
+		while (count < target &&
+		       (obj = list_first_entry_or_null(phase->list,
+						       typeof(*obj),
+						       global_list))) {
 			list_move_tail(&obj->global_list, &still_in_list);
 
 			if (flags & I915_SHRINK_PURGEABLE &&
@@ -184,24 +182,21 @@
 			    !is_vmalloc_addr(obj->mapping))
 				continue;
 
-			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+			if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+			    i915_gem_object_is_active(obj))
 				continue;
 
 			if (!can_release_pages(obj))
 				continue;
 
-			drm_gem_object_reference(&obj->base);
+			i915_gem_object_get(obj);
 
 			/* For the unbound phase, this should be a no-op! */
-			list_for_each_entry_safe(vma, v,
-						 &obj->vma_list, obj_link)
-				if (i915_vma_unbind(vma))
-					break;
-
+			i915_gem_object_unbind(obj);
 			if (i915_gem_object_put_pages(obj) == 0)
 				count += obj->base.size >> PAGE_SHIFT;
 
-			drm_gem_object_unreference(&obj->base);
+			i915_gem_object_put(obj);
 		}
 		list_splice(&still_in_list, phase->list);
 	}
@@ -210,6 +205,8 @@
 		intel_runtime_pm_put(dev_priv);
 
 	i915_gem_retire_requests(dev_priv);
+	/* expedite the RCU grace period to free some request slabs */
+	synchronize_rcu_expedited();
 
 	return count;
 }
@@ -230,10 +227,15 @@
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
-	return i915_gem_shrink(dev_priv, -1UL,
-			       I915_SHRINK_BOUND |
-			       I915_SHRINK_UNBOUND |
-			       I915_SHRINK_ACTIVE);
+	unsigned long freed;
+
+	freed = i915_gem_shrink(dev_priv, -1UL,
+				I915_SHRINK_BOUND |
+				I915_SHRINK_UNBOUND |
+				I915_SHRINK_ACTIVE);
+	rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+
+	return freed;
 }
 
 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
@@ -242,9 +244,6 @@
 		if (!mutex_is_locked_by(&dev->struct_mutex, current))
 			return false;
 
-		if (to_i915(dev)->mm.shrinker_no_lock_stealing)
-			return false;
-
 		*unlock = false;
 	} else
 		*unlock = true;
@@ -273,7 +272,7 @@
 			count += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!obj->active && can_release_pages(obj))
+		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 	}
 
@@ -321,17 +320,22 @@
 				       struct shrinker_lock_uninterruptible *slu,
 				       int timeout_ms)
 {
-	unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
+	unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
-	while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
+	do {
+		if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
+		    i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
+			break;
+
 		schedule_timeout_killable(1);
 		if (fatal_signal_pending(current))
 			return false;
-		if (--timeout == 0) {
+
+		if (time_after(jiffies, timeout)) {
 			pr_err("Unable to lock GPU to purge memory.\n");
 			return false;
 		}
-	}
+	} while (1);
 
 	slu->was_interruptible = dev_priv->mm.interruptible;
 	dev_priv->mm.interruptible = false;
@@ -410,7 +414,7 @@
 		return NOTIFY_DONE;
 
 	/* Force everything onto the inactive lists */
-	ret = i915_gem_wait_for_idle(dev_priv);
+	ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 66be299a1..59989e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -92,6 +92,7 @@
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct resource *r;
 	u32 base;
@@ -111,43 +112,15 @@
 	if (INTEL_INFO(dev)->gen >= 3) {
 		u32 bsm;
 
-		pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
+		pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 
 		base = bsm & INTEL_BSM_MASK;
 	} else if (IS_I865G(dev)) {
+		u32 tseg_size = 0;
 		u16 toud = 0;
-
-		/*
-		 * FIXME is the graphics stolen memory region
-		 * always at TOUD? Ie. is it always the last
-		 * one to be allocated by the BIOS?
-		 */
-		pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
-					 I865_TOUD, &toud);
-
-		base = toud << 16;
-	} else if (IS_I85X(dev)) {
-		u32 tseg_size = 0;
-		u32 tom;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
-					 I85X_ESMRAMC, &tmp);
-
-		if (tmp & TSEG_ENABLE)
-			tseg_size = MB(1);
-
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
-					 I85X_DRB3, &tmp);
-		tom = tmp * MB(32);
-
-		base = tom - tseg_size - ggtt->stolen_size;
-	} else if (IS_845G(dev)) {
-		u32 tseg_size = 0;
-		u32 tom;
-		u8 tmp;
-
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I845_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE) {
@@ -161,7 +134,46 @@
 			}
 		}
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
+					 I865_TOUD, &toud);
+
+		base = (toud << 16) + tseg_size;
+	} else if (IS_I85X(dev)) {
+		u32 tseg_size = 0;
+		u32 tom;
+		u8 tmp;
+
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
+					 I85X_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE)
+			tseg_size = MB(1);
+
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
+					 I85X_DRB3, &tmp);
+		tom = tmp * MB(32);
+
+		base = tom - tseg_size - ggtt->stolen_size;
+	} else if (IS_845G(dev)) {
+		u32 tseg_size = 0;
+		u32 tom;
+		u8 tmp;
+
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
+					 I845_ESMRAMC, &tmp);
+
+		if (tmp & TSEG_ENABLE) {
+			switch (tmp & I845_TSEG_SIZE_MASK) {
+			case I845_TSEG_SIZE_512K:
+				tseg_size = KB(512);
+				break;
+			case I845_TSEG_SIZE_1M:
+				tseg_size = MB(1);
+				break;
+			}
+		}
+
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 
@@ -171,7 +183,7 @@
 		u32 tom;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE) {
@@ -181,7 +193,7 @@
 				tseg_size = KB(512);
 		}
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 
@@ -685,7 +697,7 @@
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err;
@@ -698,24 +710,25 @@
 	 */
 	vma->node.start = gtt_offset;
 	vma->node.size = size;
-	if (drm_mm_initialized(&ggtt->base.mm)) {
-		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-			goto err;
-		}
 
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+		goto err;
 	}
 
+	vma->pages = obj->pages;
+	vma->flags |= I915_VMA_GLOBAL_BIND;
+	__i915_vma_set_map_and_fenceable(vma);
+	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	obj->bind_count++;
+
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	i915_gem_object_pin_pages(obj);
 
 	return obj;
 
 err:
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8030199..a14b1e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -68,6 +68,9 @@
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
+	if (tiling_mode > I915_TILING_LAST)
+		return false;
+
 	if (IS_GEN2(dev) ||
 	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
 		tile_width = 128;
@@ -113,36 +116,58 @@
 	return true;
 }
 
-/* Is the current GTT allocation valid for the change in tiling? */
-static bool
-i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
 {
+	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
 	u32 size;
 
-	if (tiling_mode == I915_TILING_NONE)
+	if (!i915_vma_is_map_and_fenceable(vma))
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
-		return true;
-
-	if (IS_GEN3(obj->base.dev)) {
-		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+	if (INTEL_GEN(dev_priv) == 3) {
+		if (vma->node.start & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
-		if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
+		if (vma->node.start & ~I830_FENCE_START_MASK)
 			return false;
 	}
 
-	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
-	if (i915_gem_obj_ggtt_size(obj) != size)
+	size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+	if (vma->node.size < size)
 		return false;
 
-	if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
+	if (vma->node.start & (size - 1))
 		return false;
 
 	return true;
 }
 
+/* Make the current GTT allocation valid for the change in tiling. */
+static int
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct i915_vma *vma;
+	int ret;
+
+	if (tiling_mode == I915_TILING_NONE)
+		return 0;
+
+	if (INTEL_GEN(dev_priv) >= 4)
+		return 0;
+
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (i915_vma_fence_prepare(vma, tiling_mode))
+			continue;
+
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /**
  * i915_gem_set_tiling - IOCTL handler to set tiling mode
  * @dev: DRM device
@@ -164,15 +189,18 @@
 	struct drm_i915_gem_set_tiling *args = data;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
-	int ret = 0;
+	int err = 0;
 
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL)
+	/* Make sure we don't cross-contaminate obj->tiling_and_stride */
+	BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
 		return -ENOENT;
 
 	if (!i915_tiling_ok(dev,
 			    args->stride, obj->base.size, args->tiling_mode)) {
-		drm_gem_object_unreference_unlocked(&obj->base);
+		i915_gem_object_put_unlocked(obj);
 		return -EINVAL;
 	}
 
@@ -180,7 +208,7 @@
 
 	mutex_lock(&dev->struct_mutex);
 	if (obj->pin_display || obj->framebuffer_references) {
-		ret = -EBUSY;
+		err = -EBUSY;
 		goto err;
 	}
 
@@ -213,8 +241,8 @@
 		}
 	}
 
-	if (args->tiling_mode != obj->tiling_mode ||
-	    args->stride != obj->stride) {
+	if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
+	    args->stride != i915_gem_object_get_stride(obj)) {
 		/* We need to rebind the object if its current allocation
 		 * no longer meets the alignment restrictions for its new
 		 * tiling mode. Otherwise we can just leave it alone, but
@@ -227,34 +255,36 @@
 		 * has to also include the unfenced register the GPU uses
 		 * whilst executing a fenced command for an untiled object.
 		 */
-		if (obj->map_and_fenceable &&
-		    !i915_gem_object_fence_ok(obj, args->tiling_mode))
-			ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
 
-		if (ret == 0) {
+		err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
+		if (!err) {
+			struct i915_vma *vma;
+
 			if (obj->pages &&
 			    obj->madv == I915_MADV_WILLNEED &&
 			    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 				if (args->tiling_mode == I915_TILING_NONE)
 					i915_gem_object_unpin_pages(obj);
-				if (obj->tiling_mode == I915_TILING_NONE)
+				if (!i915_gem_object_is_tiled(obj))
 					i915_gem_object_pin_pages(obj);
 			}
 
-			obj->fence_dirty =
-				obj->last_fenced_req ||
-				obj->fence_reg != I915_FENCE_REG_NONE;
+			list_for_each_entry(vma, &obj->vma_list, obj_link) {
+				if (!vma->fence)
+					continue;
 
-			obj->tiling_mode = args->tiling_mode;
-			obj->stride = args->stride;
+				vma->fence->dirty = true;
+			}
+			obj->tiling_and_stride =
+				args->stride | args->tiling_mode;
 
 			/* Force the fence to be reacquired for GTT access */
 			i915_gem_release_mmap(obj);
 		}
 	}
 	/* we have to maintain this existing ABI... */
-	args->stride = obj->stride;
-	args->tiling_mode = obj->tiling_mode;
+	args->stride = i915_gem_object_get_stride(obj);
+	args->tiling_mode = i915_gem_object_get_tiling(obj);
 
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -268,12 +298,12 @@
 	}
 
 err:
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_runtime_pm_put(dev_priv);
 
-	return ret;
+	return err;
 }
 
 /**
@@ -297,14 +327,12 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
 
-	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-	if (&obj->base == NULL)
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
 		return -ENOENT;
 
-	mutex_lock(&dev->struct_mutex);
-
-	args->tiling_mode = obj->tiling_mode;
-	switch (obj->tiling_mode) {
+	args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+	switch (args->tiling_mode) {
 	case I915_TILING_X:
 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
 		break;
@@ -328,8 +356,6 @@
 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-	drm_gem_object_unreference(&obj->base);
-	mutex_unlock(&dev->struct_mutex);
-
+	i915_gem_object_put_unlocked(obj);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2314c88..c6f780f 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -63,33 +63,12 @@
 
 static void wait_rendering(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-	int i, n;
+	unsigned long active = __I915_BO_ACTIVE(obj);
+	int idx;
 
-	if (!obj->active)
-		return;
-
-	n = 0;
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct drm_i915_gem_request *req;
-
-		req = obj->last_read_req[i];
-		if (req == NULL)
-			continue;
-
-		requests[n++] = i915_gem_request_reference(req);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++)
-		__i915_wait_request(requests[i], false, NULL, NULL);
-
-	mutex_lock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++)
-		i915_gem_request_unreference(requests[i]);
+	for_each_active(active, idx)
+		i915_gem_active_wait_unlocked(&obj->last_read[idx],
+					      0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)
@@ -98,28 +77,19 @@
 	struct drm_i915_gem_object *obj = mo->obj;
 	struct drm_device *dev = obj->base.dev;
 
+	wait_rendering(obj);
+
 	mutex_lock(&dev->struct_mutex);
 	/* Cancel any active worker and force us to re-evaluate gup */
 	obj->userptr.work = NULL;
 
 	if (obj->pages != NULL) {
-		struct drm_i915_private *dev_priv = to_i915(dev);
-		struct i915_vma *vma, *tmp;
-		bool was_interruptible;
-
-		wait_rendering(obj);
-
-		was_interruptible = dev_priv->mm.interruptible;
-		dev_priv->mm.interruptible = false;
-
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-			WARN_ON(i915_vma_unbind(vma));
+		/* We are inside a kthread context and can't be interrupted */
+		WARN_ON(i915_gem_object_unbind(obj));
 		WARN_ON(i915_gem_object_put_pages(obj));
-
-		dev_priv->mm.interruptible = was_interruptible;
 	}
 
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -538,6 +508,10 @@
 	pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
 	if (pvec != NULL) {
 		struct mm_struct *mm = obj->userptr.mm->mm;
+		unsigned int flags = 0;
+
+		if (!obj->userptr.read_only)
+			flags |= FOLL_WRITE;
 
 		ret = -EFAULT;
 		if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -547,7 +521,7 @@
 					(work->task, mm,
 					 obj->userptr.ptr + pinned * PAGE_SIZE,
 					 npages - pinned,
-					 !obj->userptr.read_only, 0,
+					 flags,
 					 pvec + pinned, NULL);
 				if (ret < 0)
 					break;
@@ -572,12 +546,10 @@
 			}
 		}
 		obj->userptr.work = ERR_PTR(ret);
-		if (ret)
-			__i915_gem_userptr_set_active(obj, false);
 	}
 
 	obj->userptr.workers--;
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	release_pages(pvec, pinned, 0);
@@ -622,8 +594,7 @@
 	obj->userptr.work = &work->work;
 	obj->userptr.workers++;
 
-	work->obj = obj;
-	drm_gem_object_reference(&obj->base);
+	work->obj = i915_gem_object_get(obj);
 
 	work->task = current;
 	get_task_struct(work->task);
@@ -659,15 +630,14 @@
 	 * to the vma (discard or cloning) which should prevent the more
 	 * egregious cases from causing harm.
 	 */
-	if (IS_ERR(obj->userptr.work)) {
-		/* active flag will have been dropped already by the worker */
-		ret = PTR_ERR(obj->userptr.work);
-		obj->userptr.work = NULL;
-		return ret;
-	}
-	if (obj->userptr.work)
+
+	if (obj->userptr.work) {
 		/* active flag should still be held for the pending work */
-		return -EAGAIN;
+		if (IS_ERR(obj->userptr.work))
+			return PTR_ERR(obj->userptr.work);
+		else
+			return -EAGAIN;
+	}
 
 	/* Let the mmu-notifier know that we have begun and need cancellation */
 	ret = __i915_gem_userptr_set_active(obj, true);
@@ -846,7 +816,7 @@
 		ret = drm_gem_handle_create(file, &obj->base, &handle);
 
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(&obj->base);
+	i915_gem_object_put_unlocked(obj);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d73d22..334f15d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,9 +30,9 @@
 #include <generated/utsrelease.h>
 #include "i915_drv.h"
 
-static const char *ring_str(int ring)
+static const char *engine_str(int engine)
 {
-	switch (ring) {
+	switch (engine) {
 	case RCS: return "render";
 	case VCS: return "bsd";
 	case BCS: return "blt";
@@ -42,16 +42,6 @@
 	}
 }
 
-static const char *pin_flag(int pinned)
-{
-	if (pinned > 0)
-		return " P";
-	else if (pinned < 0)
-		return " p";
-	else
-		return "";
-}
-
 static const char *tiling_flag(int tiling)
 {
 	switch (tiling) {
@@ -189,7 +179,7 @@
 {
 	int i;
 
-	err_printf(m, "  %s [%d]:\n", name, count);
+	err_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
 		err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
@@ -202,13 +192,12 @@
 			err_printf(m, "%02x ", err->rseqno[i]);
 
 		err_printf(m, "] %02x", err->wseqno);
-		err_puts(m, pin_flag(err->pinned));
 		err_puts(m, tiling_flag(err->tiling));
 		err_puts(m, dirty_flag(err->dirty));
 		err_puts(m, purgeable_flag(err->purgeable));
 		err_puts(m, err->userptr ? " userptr" : "");
-		err_puts(m, err->ring != -1 ? " " : "");
-		err_puts(m, ring_str(err->ring));
+		err_puts(m, err->engine != -1 ? " " : "");
+		err_puts(m, engine_str(err->engine));
 		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
 
 		if (err->name)
@@ -221,7 +210,7 @@
 	}
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 {
 	switch (a) {
 	case HANGCHECK_IDLE:
@@ -239,70 +228,74 @@
 	return "unknown";
 }
 
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
-				  struct drm_device *dev,
-				  struct drm_i915_error_state *error,
-				  int ring_idx)
+static void error_print_engine(struct drm_i915_error_state_buf *m,
+			       struct drm_i915_error_engine *ee)
 {
-	struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+	err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+	err_printf(m, "  START: 0x%08x\n", ee->start);
+	err_printf(m, "  HEAD:  0x%08x\n", ee->head);
+	err_printf(m, "  TAIL:  0x%08x\n", ee->tail);
+	err_printf(m, "  CTL:   0x%08x\n", ee->ctl);
+	err_printf(m, "  MODE:  0x%08x\n", ee->mode);
+	err_printf(m, "  HWS:   0x%08x\n", ee->hws);
+	err_printf(m, "  ACTHD: 0x%08x %08x\n",
+		   (u32)(ee->acthd>>32), (u32)ee->acthd);
+	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
+	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
+	err_printf(m, "  INSTDONE: 0x%08x\n", ee->instdone);
+	if (ee->batchbuffer) {
+		u64 start = ee->batchbuffer->gtt_offset;
+		u64 end = start + ee->batchbuffer->gtt_size;
 
-	if (!ring->valid)
-		return;
-
-	err_printf(m, "%s command stream:\n", ring_str(ring_idx));
-	err_printf(m, "  START: 0x%08x\n", ring->start);
-	err_printf(m, "  HEAD:  0x%08x\n", ring->head);
-	err_printf(m, "  TAIL:  0x%08x\n", ring->tail);
-	err_printf(m, "  CTL:   0x%08x\n", ring->ctl);
-	err_printf(m, "  HWS:   0x%08x\n", ring->hws);
-	err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
-	err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
-	err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
-	err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
-	if (INTEL_INFO(dev)->gen >= 4) {
-		err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
-		err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
-		err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
+		err_printf(m, "  batch: [0x%08x_%08x, 0x%08x_%08x]\n",
+			   upper_32_bits(start), lower_32_bits(start),
+			   upper_32_bits(end), lower_32_bits(end));
 	}
-	err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
-	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
-		   lower_32_bits(ring->faddr));
-	if (INTEL_INFO(dev)->gen >= 6) {
-		err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
-		err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
+	if (INTEL_GEN(m->i915) >= 4) {
+		err_printf(m, "  BBADDR: 0x%08x_%08x\n",
+			   (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
+		err_printf(m, "  BB_STATE: 0x%08x\n", ee->bbstate);
+		err_printf(m, "  INSTPS: 0x%08x\n", ee->instps);
+	}
+	err_printf(m, "  INSTPM: 0x%08x\n", ee->instpm);
+	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
+		   lower_32_bits(ee->faddr));
+	if (INTEL_GEN(m->i915) >= 6) {
+		err_printf(m, "  RC PSMI: 0x%08x\n", ee->rc_psmi);
+		err_printf(m, "  FAULT_REG: 0x%08x\n", ee->fault_reg);
 		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-			   ring->semaphore_mboxes[0],
-			   ring->semaphore_seqno[0]);
+			   ee->semaphore_mboxes[0],
+			   ee->semaphore_seqno[0]);
 		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-			   ring->semaphore_mboxes[1],
-			   ring->semaphore_seqno[1]);
-		if (HAS_VEBOX(dev)) {
+			   ee->semaphore_mboxes[1],
+			   ee->semaphore_seqno[1]);
+		if (HAS_VEBOX(m->i915)) {
 			err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-				   ring->semaphore_mboxes[2],
-				   ring->semaphore_seqno[2]);
+				   ee->semaphore_mboxes[2],
+				   ee->semaphore_seqno[2]);
 		}
 	}
-	if (USES_PPGTT(dev)) {
-		err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+	if (USES_PPGTT(m->i915)) {
+		err_printf(m, "  GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
 
-		if (INTEL_INFO(dev)->gen >= 8) {
+		if (INTEL_GEN(m->i915) >= 8) {
 			int i;
 			for (i = 0; i < 4; i++)
 				err_printf(m, "  PDP%d: 0x%016llx\n",
-					   i, ring->vm_info.pdp[i]);
+					   i, ee->vm_info.pdp[i]);
 		} else {
 			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
-				   ring->vm_info.pp_dir_base);
+				   ee->vm_info.pp_dir_base);
 		}
 	}
-	err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
-	err_printf(m, "  last_seqno: 0x%08x\n", ring->last_seqno);
-	err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
-	err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
-	err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+	err_printf(m, "  seqno: 0x%08x\n", ee->seqno);
+	err_printf(m, "  last_seqno: 0x%08x\n", ee->last_seqno);
+	err_printf(m, "  waiting: %s\n", yesno(ee->waiting));
+	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
+	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
 	err_printf(m, "  hangcheck: %s [%d]\n",
-		   hangcheck_action_to_str(ring->hangcheck_action),
-		   ring->hangcheck_score);
+		   hangcheck_action_to_str(ee->hangcheck_action),
+		   ee->hangcheck_score);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -328,11 +321,22 @@
 	}
 }
 
+static void err_print_capabilities(struct drm_i915_error_state_buf *m,
+				   const struct intel_device_info *info)
+{
+#define PRINT_FLAG(x)  err_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
+}
+
 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			    const struct i915_error_state_file_priv *error_priv)
 {
 	struct drm_device *dev = error_priv->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_i915_error_state *error = error_priv->error;
 	struct drm_i915_error_object *obj;
 	int i, j, offset, elt;
@@ -347,27 +351,28 @@
 	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
+	err_print_capabilities(m, &error->device_info);
 	max_hangcheck_score = 0;
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		if (error->ring[i].hangcheck_score > max_hangcheck_score)
-			max_hangcheck_score = error->ring[i].hangcheck_score;
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		if (error->engine[i].hangcheck_score > max_hangcheck_score)
+			max_hangcheck_score = error->engine[i].hangcheck_score;
 	}
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		if (error->ring[i].hangcheck_score == max_hangcheck_score &&
-		    error->ring[i].pid != -1) {
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+		    error->engine[i].pid != -1) {
 			err_printf(m, "Active process (on ring %s): %s [%d]\n",
-				   ring_str(i),
-				   error->ring[i].comm,
-				   error->ring[i].pid);
+				   engine_str(i),
+				   error->engine[i].comm,
+				   error->engine[i].pid);
 		}
 	}
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
-	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
-	err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
 	err_printf(m, "PCI Subsystem: %04x:%04x\n",
-		   dev->pdev->subsystem_vendor,
-		   dev->pdev->subsystem_device);
+		   pdev->subsystem_vendor,
+		   pdev->subsystem_device);
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
 	if (HAS_CSR(dev)) {
@@ -414,36 +419,55 @@
 	if (IS_GEN7(dev))
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++)
-		i915_ring_error_state(m, dev, error, i);
-
-	for (i = 0; i < error->vm_count; i++) {
-		err_printf(m, "vm[%d]\n", i);
-
-		print_error_buffers(m, "Active",
-				    error->active_bo[i],
-				    error->active_bo_count[i]);
-
-		print_error_buffers(m, "Pinned",
-				    error->pinned_bo[i],
-				    error->pinned_bo_count[i]);
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		if (error->engine[i].engine_id != -1)
+			error_print_engine(m, &error->engine[i]);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		obj = error->ring[i].batchbuffer;
+	for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
+		char buf[128];
+		int len, first = 1;
+
+		if (!error->active_vm[i])
+			break;
+
+		len = scnprintf(buf, sizeof(buf), "Active (");
+		for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
+			if (error->engine[j].vm != error->active_vm[i])
+				continue;
+
+			len += scnprintf(buf + len, sizeof(buf), "%s%s",
+					 first ? "" : ", ",
+					 dev_priv->engine[j].name);
+			first = 0;
+		}
+		scnprintf(buf + len, sizeof(buf), ")");
+		print_error_buffers(m, buf,
+				    error->active_bo[i],
+				    error->active_bo_count[i]);
+	}
+
+	print_error_buffers(m, "Pinned (global)",
+			    error->pinned_bo,
+			    error->pinned_bo_count);
+
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		struct drm_i915_error_engine *ee = &error->engine[i];
+
+		obj = ee->batchbuffer;
 		if (obj) {
 			err_puts(m, dev_priv->engine[i].name);
-			if (error->ring[i].pid != -1)
+			if (ee->pid != -1)
 				err_printf(m, " (submitted by %s [%d])",
-					   error->ring[i].comm,
-					   error->ring[i].pid);
+					   ee->comm,
+					   ee->pid);
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
 
-		obj = error->ring[i].wa_batchbuffer;
+		obj = ee->wa_batchbuffer;
 		if (obj) {
 			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
 				   dev_priv->engine[i].name,
@@ -451,38 +475,43 @@
 			print_error_obj(m, obj);
 		}
 
-		if (error->ring[i].num_requests) {
+		if (ee->num_requests) {
 			err_printf(m, "%s --- %d requests\n",
 				   dev_priv->engine[i].name,
-				   error->ring[i].num_requests);
-			for (j = 0; j < error->ring[i].num_requests; j++) {
-				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
-					   error->ring[i].requests[j].seqno,
-					   error->ring[i].requests[j].jiffies,
-					   error->ring[i].requests[j].tail);
+				   ee->num_requests);
+			for (j = 0; j < ee->num_requests; j++) {
+				err_printf(m, "  pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
+					   ee->requests[j].pid,
+					   ee->requests[j].seqno,
+					   ee->requests[j].jiffies,
+					   ee->requests[j].head,
+					   ee->requests[j].tail);
 			}
 		}
 
-		if (error->ring[i].num_waiters) {
+		if (IS_ERR(ee->waiters)) {
+			err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
+				   dev_priv->engine[i].name);
+		} else if (ee->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
 				   dev_priv->engine[i].name,
-				   error->ring[i].num_waiters);
-			for (j = 0; j < error->ring[i].num_waiters; j++) {
+				   ee->num_waiters);
+			for (j = 0; j < ee->num_waiters; j++) {
 				err_printf(m, " seqno 0x%08x for %s [%d]\n",
-					   error->ring[i].waiters[j].seqno,
-					   error->ring[i].waiters[j].comm,
-					   error->ring[i].waiters[j].pid);
+					   ee->waiters[j].seqno,
+					   ee->waiters[j].comm,
+					   ee->waiters[j].pid);
 			}
 		}
 
-		if ((obj = error->ring[i].ringbuffer)) {
+		if ((obj = ee->ringbuffer)) {
 			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
 				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
 
-		if ((obj = error->ring[i].hws_page)) {
+		if ((obj = ee->hws_page)) {
 			u64 hws_offset = obj->gtt_offset;
 			u32 *hws_page = &obj->pages[0][0];
 
@@ -504,7 +533,7 @@
 			}
 		}
 
-		obj = error->ring[i].wa_ctx;
+		obj = ee->wa_ctx;
 		if (obj) {
 			u64 wa_ctx_offset = obj->gtt_offset;
 			u32 *wa_ctx_page = &obj->pages[0][0];
@@ -526,7 +555,7 @@
 			}
 		}
 
-		if ((obj = error->ring[i].ctx)) {
+		if ((obj = ee->ctx)) {
 			err_printf(m, "%s --- HW Context = 0x%08x\n",
 				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
@@ -534,7 +563,7 @@
 		}
 	}
 
-	if ((obj = error->semaphore_obj)) {
+	if ((obj = error->semaphore)) {
 		err_printf(m, "Semaphore page = 0x%08x\n",
 			   lower_32_bits(obj->gtt_offset));
 		for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
@@ -611,26 +640,27 @@
 							  typeof(*error), ref);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		i915_error_object_free(error->ring[i].batchbuffer);
-		i915_error_object_free(error->ring[i].wa_batchbuffer);
-		i915_error_object_free(error->ring[i].ringbuffer);
-		i915_error_object_free(error->ring[i].hws_page);
-		i915_error_object_free(error->ring[i].ctx);
-		i915_error_object_free(error->ring[i].wa_ctx);
-		kfree(error->ring[i].requests);
-		kfree(error->ring[i].waiters);
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		struct drm_i915_error_engine *ee = &error->engine[i];
+
+		i915_error_object_free(ee->batchbuffer);
+		i915_error_object_free(ee->wa_batchbuffer);
+		i915_error_object_free(ee->ringbuffer);
+		i915_error_object_free(ee->hws_page);
+		i915_error_object_free(ee->ctx);
+		i915_error_object_free(ee->wa_ctx);
+
+		kfree(ee->requests);
+		if (!IS_ERR_OR_NULL(ee->waiters))
+			kfree(ee->waiters);
 	}
 
-	i915_error_object_free(error->semaphore_obj);
+	i915_error_object_free(error->semaphore);
 
-	for (i = 0; i < error->vm_count; i++)
+	for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
 		kfree(error->active_bo[i]);
-
-	kfree(error->active_bo);
-	kfree(error->active_bo_count);
 	kfree(error->pinned_bo);
-	kfree(error->pinned_bo_count);
+
 	kfree(error->overlay);
 	kfree(error->display);
 	kfree(error);
@@ -638,46 +668,45 @@
 
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *dev_priv,
-			 struct drm_i915_gem_object *src,
-			 struct i915_address_space *vm)
+			 struct i915_vma *vma)
 {
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_gem_object *src;
 	struct drm_i915_error_object *dst;
-	struct i915_vma *vma = NULL;
 	int num_pages;
 	bool use_ggtt;
 	int i = 0;
 	u64 reloc_offset;
 
-	if (src == NULL || src->pages == NULL)
+	if (!vma)
+		return NULL;
+
+	src = vma->obj;
+	if (!src->pages)
 		return NULL;
 
 	num_pages = src->base.size >> PAGE_SHIFT;
 
 	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-	if (dst == NULL)
+	if (!dst)
 		return NULL;
 
-	if (i915_gem_obj_bound(src, vm))
-		dst->gtt_offset = i915_gem_obj_offset(src, vm);
-	else
-		dst->gtt_offset = -1;
+	dst->gtt_offset = vma->node.start;
+	dst->gtt_size = vma->node.size;
 
 	reloc_offset = dst->gtt_offset;
-	if (i915_is_ggtt(vm))
-		vma = i915_gem_obj_to_ggtt(src);
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-		   vma && (vma->bound & GLOBAL_BIND) &&
+		   (vma->flags & I915_VMA_GLOBAL_BIND) &&
 		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
 	/* Cannot access stolen address directly, try to use the aperture */
 	if (src->stolen) {
 		use_ggtt = true;
 
-		if (!(vma && vma->bound & GLOBAL_BIND))
+		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
 			goto unwind;
 
-		reloc_offset = i915_gem_obj_ggtt_offset(src);
+		reloc_offset = vma->node.start;
 		if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
 			goto unwind;
 	}
@@ -705,7 +734,7 @@
 			 * captures what the GPU read.
 			 */
 
-			s = io_mapping_map_atomic_wc(ggtt->mappable,
+			s = io_mapping_map_atomic_wc(&ggtt->mappable,
 						     reloc_offset);
 			memcpy_fromio(d, s, PAGE_SIZE);
 			io_mapping_unmap_atomic(s);
@@ -737,8 +766,24 @@
 	kfree(dst);
 	return NULL;
 }
-#define i915_error_ggtt_object_create(dev_priv, src) \
-	i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
+
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+	return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+	struct intel_engine_cs *engine;
+
+	engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+	return engine ? engine->id : -1;
+}
 
 static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
@@ -748,32 +793,34 @@
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
+
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
-	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+		err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+	err->wseqno = __active_get_seqno(&obj->last_write);
+	err->engine = __active_get_engine_id(&obj->last_write);
+
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
-	err->fence_reg = obj->fence_reg;
-	err->pinned = 0;
-	if (i915_gem_obj_is_pinned(obj))
-		err->pinned = 1;
-	err->tiling = obj->tiling_mode;
+	err->fence_reg = vma->fence ? vma->fence->id : -1;
+	err->tiling = i915_gem_object_get_tiling(obj);
 	err->dirty = obj->dirty;
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
-	err->ring = obj->last_write_req ?
-			i915_gem_request_get_engine(obj->last_write_req)->id : -1;
 	err->cache_level = obj->cache_level;
 }
 
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-			     int count, struct list_head *head)
+static u32 capture_error_bo(struct drm_i915_error_buffer *err,
+			    int count, struct list_head *head,
+			    bool pinned_only)
 {
 	struct i915_vma *vma;
 	int i = 0;
 
 	list_for_each_entry(vma, head, vm_link) {
+		if (pinned_only && !i915_vma_is_pinned(vma))
+			continue;
+
 		capture_bo(err++, vma);
 		if (++i == count)
 			break;
@@ -782,28 +829,6 @@
 	return i;
 }
 
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
-			     int count, struct list_head *head,
-			     struct i915_address_space *vm)
-{
-	struct drm_i915_gem_object *obj;
-	struct drm_i915_error_buffer * const first = err;
-	struct drm_i915_error_buffer * const last = err + count;
-
-	list_for_each_entry(obj, head, global_list) {
-		struct i915_vma *vma;
-
-		if (err == last)
-			break;
-
-		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
-				capture_bo(err++, vma);
-	}
-
-	return err - first;
-}
-
 /* Generate a semi-unique error code. The code is not meant to have meaning, The
  * code's only purpose is to try to prevent false duplicated bug reports by
  * grossly estimating a GPU error state.
@@ -815,7 +840,7 @@
  */
 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 					 struct drm_i915_error_state *error,
-					 int *ring_id)
+					 int *engine_id)
 {
 	uint32_t error_code = 0;
 	int i;
@@ -826,11 +851,11 @@
 	 * strictly a client bug. Use instdone to differentiate those some.
 	 */
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
-			if (ring_id)
-				*ring_id = i;
+		if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+			if (engine_id)
+				*engine_id = i;
 
-			return error->ring[i].ipehr ^ error->ring[i].instdone;
+			return error->engine[i].ipehr ^ error->engine[i].instdone;
 		}
 	}
 
@@ -855,22 +880,17 @@
 }
 
 
-static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
-					struct drm_i915_error_state *error,
+static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
 					struct intel_engine_cs *engine,
-					struct drm_i915_error_ring *ering)
+					struct drm_i915_error_engine *ee)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_engine_cs *to;
 	enum intel_engine_id id;
 
-	if (!i915_semaphore_is_enabled(dev_priv))
+	if (!error->semaphore)
 		return;
 
-	if (!error->semaphore_obj)
-		error->semaphore_obj =
-			i915_error_ggtt_object_create(dev_priv,
-						      dev_priv->semaphore_obj);
-
 	for_each_engine_id(to, dev_priv, id) {
 		int idx;
 		u16 signal_offset;
@@ -879,44 +899,52 @@
 		if (engine == to)
 			continue;
 
-		signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
-				/ 4;
-		tmp = error->semaphore_obj->pages[0];
-		idx = intel_ring_sync_index(engine, to);
+		signal_offset =
+			(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
+		tmp = error->semaphore->pages[0];
+		idx = intel_engine_sync_index(engine, to);
 
-		ering->semaphore_mboxes[idx] = tmp[signal_offset];
-		ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
+		ee->semaphore_mboxes[idx] = tmp[signal_offset];
+		ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
 	}
 }
 
-static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
-					struct intel_engine_cs *engine,
-					struct drm_i915_error_ring *ering)
+static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
+					struct drm_i915_error_engine *ee)
 {
-	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
-	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
-	ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
-	ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+	ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+	ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+	ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
 	if (HAS_VEBOX(dev_priv)) {
-		ering->semaphore_mboxes[2] =
+		ee->semaphore_mboxes[2] =
 			I915_READ(RING_SYNC_2(engine->mmio_base));
-		ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
+		ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
 	}
 }
 
-static void engine_record_waiters(struct intel_engine_cs *engine,
-				  struct drm_i915_error_ring *ering)
+static void error_record_engine_waiters(struct intel_engine_cs *engine,
+					struct drm_i915_error_engine *ee)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 	struct drm_i915_error_waiter *waiter;
 	struct rb_node *rb;
 	int count;
 
-	ering->num_waiters = 0;
-	ering->waiters = NULL;
+	ee->num_waiters = 0;
+	ee->waiters = NULL;
 
-	spin_lock(&b->lock);
+	if (RB_EMPTY_ROOT(&b->waiters))
+		return;
+
+	if (!spin_trylock(&b->lock)) {
+		ee->waiters = ERR_PTR(-EDEADLK);
+		return;
+	}
+
 	count = 0;
 	for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
 		count++;
@@ -930,9 +958,13 @@
 	if (!waiter)
 		return;
 
-	ering->waiters = waiter;
+	if (!spin_trylock(&b->lock)) {
+		kfree(waiter);
+		ee->waiters = ERR_PTR(-EDEADLK);
+		return;
+	}
 
-	spin_lock(&b->lock);
+	ee->waiters = waiter;
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = container_of(rb, typeof(*w), node);
 
@@ -941,57 +973,59 @@
 		waiter->seqno = w->seqno;
 		waiter++;
 
-		if (++ering->num_waiters == count)
+		if (++ee->num_waiters == count)
 			break;
 	}
 	spin_unlock(&b->lock);
 }
 
-static void i915_record_ring_state(struct drm_i915_private *dev_priv,
-				   struct drm_i915_error_state *error,
-				   struct intel_engine_cs *engine,
-				   struct drm_i915_error_ring *ering)
+static void error_record_engine_registers(struct drm_i915_error_state *error,
+					  struct intel_engine_cs *engine,
+					  struct drm_i915_error_engine *ee)
 {
+	struct drm_i915_private *dev_priv = engine->i915;
+
 	if (INTEL_GEN(dev_priv) >= 6) {
-		ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
-		ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
+		ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+		ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (INTEL_GEN(dev_priv) >= 8)
-			gen8_record_semaphore_state(dev_priv, error, engine,
-						    ering);
+			gen8_record_semaphore_state(error, engine, ee);
 		else
-			gen6_record_semaphore_state(dev_priv, engine, ering);
+			gen6_record_semaphore_state(engine, ee);
 	}
 
 	if (INTEL_GEN(dev_priv) >= 4) {
-		ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
-		ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
-		ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-		ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
-		ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
-		ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+		ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+		ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+		ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+		ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+		ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+		ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
 		if (INTEL_GEN(dev_priv) >= 8) {
-			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
-			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+			ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+			ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
 		}
-		ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+		ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
 	} else {
-		ering->faddr = I915_READ(DMA_FADD_I8XX);
-		ering->ipeir = I915_READ(IPEIR);
-		ering->ipehr = I915_READ(IPEHR);
-		ering->instdone = I915_READ(GEN2_INSTDONE);
+		ee->faddr = I915_READ(DMA_FADD_I8XX);
+		ee->ipeir = I915_READ(IPEIR);
+		ee->ipehr = I915_READ(IPEHR);
+		ee->instdone = I915_READ(GEN2_INSTDONE);
 	}
 
-	ering->waiting = intel_engine_has_waiter(engine);
-	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
-	ering->acthd = intel_ring_get_active_head(engine);
-	ering->seqno = intel_engine_get_seqno(engine);
-	ering->last_seqno = engine->last_submitted_seqno;
-	ering->start = I915_READ_START(engine);
-	ering->head = I915_READ_HEAD(engine);
-	ering->tail = I915_READ_TAIL(engine);
-	ering->ctl = I915_READ_CTL(engine);
+	ee->waiting = intel_engine_has_waiter(engine);
+	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+	ee->acthd = intel_engine_get_active_head(engine);
+	ee->seqno = intel_engine_get_seqno(engine);
+	ee->last_seqno = engine->last_submitted_seqno;
+	ee->start = I915_READ_START(engine);
+	ee->head = I915_READ_HEAD(engine);
+	ee->tail = I915_READ_TAIL(engine);
+	ee->ctl = I915_READ_CTL(engine);
+	if (INTEL_GEN(dev_priv) > 2)
+		ee->mode = I915_READ_MODE(engine);
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
+	if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
 		i915_reg_t mmio;
 
 		if (IS_GEN7(dev_priv)) {
@@ -1017,107 +1051,150 @@
 			mmio = RING_HWS_PGA(engine->mmio_base);
 		}
 
-		ering->hws = I915_READ(mmio);
+		ee->hws = I915_READ(mmio);
 	}
 
-	ering->hangcheck_score = engine->hangcheck.score;
-	ering->hangcheck_action = engine->hangcheck.action;
+	ee->hangcheck_score = engine->hangcheck.score;
+	ee->hangcheck_action = engine->hangcheck.action;
 
 	if (USES_PPGTT(dev_priv)) {
 		int i;
 
-		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
+		ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
 		if (IS_GEN6(dev_priv))
-			ering->vm_info.pp_dir_base =
+			ee->vm_info.pp_dir_base =
 				I915_READ(RING_PP_DIR_BASE_READ(engine));
 		else if (IS_GEN7(dev_priv))
-			ering->vm_info.pp_dir_base =
+			ee->vm_info.pp_dir_base =
 				I915_READ(RING_PP_DIR_BASE(engine));
 		else if (INTEL_GEN(dev_priv) >= 8)
 			for (i = 0; i < 4; i++) {
-				ering->vm_info.pdp[i] =
+				ee->vm_info.pdp[i] =
 					I915_READ(GEN8_RING_PDP_UDW(engine, i));
-				ering->vm_info.pdp[i] <<= 32;
-				ering->vm_info.pdp[i] |=
+				ee->vm_info.pdp[i] <<= 32;
+				ee->vm_info.pdp[i] |=
 					I915_READ(GEN8_RING_PDP_LDW(engine, i));
 			}
 	}
 }
 
-
-static void i915_gem_record_active_context(struct intel_engine_cs *engine,
-					   struct drm_i915_error_state *error,
-					   struct drm_i915_error_ring *ering)
+static void engine_record_requests(struct intel_engine_cs *engine,
+				   struct drm_i915_gem_request *first,
+				   struct drm_i915_error_engine *ee)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_request *request;
+	int count;
 
-	/* Currently render ring is the only HW context user */
-	if (engine->id != RCS || !error->ccid)
+	count = 0;
+	request = first;
+	list_for_each_entry_from(request, &engine->request_list, link)
+		count++;
+	if (!count)
 		return;
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!i915_gem_obj_ggtt_bound(obj))
-			continue;
+	ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+	if (!ee->requests)
+		return;
 
-		if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
-			ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+	ee->num_requests = count;
+
+	count = 0;
+	request = first;
+	list_for_each_entry_from(request, &engine->request_list, link) {
+		struct drm_i915_error_request *erq;
+
+		if (count >= ee->num_requests) {
+			/*
+			 * If the ring request list was changed in
+			 * between the point where the error request
+			 * list was created and dimensioned and this
+			 * point then just exit early to avoid crashes.
+			 *
+			 * We don't need to communicate that the
+			 * request list changed state during error
+			 * state capture and that the error state is
+			 * slightly incorrect as a consequence since we
+			 * are typically only interested in the request
+			 * list state at the point of error state
+			 * capture, not in any changes happening during
+			 * the capture.
+			 */
 			break;
 		}
+
+		erq = &ee->requests[count++];
+		erq->seqno = request->fence.seqno;
+		erq->jiffies = request->emitted_jiffies;
+		erq->head = request->head;
+		erq->tail = request->tail;
+
+		rcu_read_lock();
+		erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+		rcu_read_unlock();
 	}
+	ee->num_requests = count;
 }
 
 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				  struct drm_i915_error_state *error)
 {
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_request *request;
-	int i, count;
+	int i;
+
+	error->semaphore =
+		i915_error_object_create(dev_priv, dev_priv->semaphore);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct intel_engine_cs *engine = &dev_priv->engine[i];
+		struct drm_i915_error_engine *ee = &error->engine[i];
+		struct drm_i915_gem_request *request;
 
-		error->ring[i].pid = -1;
+		ee->pid = -1;
+		ee->engine_id = -1;
 
 		if (!intel_engine_initialized(engine))
 			continue;
 
-		error->ring[i].valid = true;
+		ee->engine_id = i;
 
-		i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
-		engine_record_waiters(engine, &error->ring[i]);
+		error_record_engine_registers(error, engine, ee);
+		error_record_engine_waiters(engine, ee);
 
 		request = i915_gem_find_active_request(engine);
 		if (request) {
-			struct i915_address_space *vm;
-			struct intel_ringbuffer *rb;
+			struct intel_ring *ring;
+			struct pid *pid;
 
-			vm = request->ctx->ppgtt ?
+			ee->vm = request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base : &ggtt->base;
 
 			/* We need to copy these to an anonymous buffer
 			 * as the simplest method to avoid being overwritten
 			 * by userspace.
 			 */
-			error->ring[i].batchbuffer =
+			ee->batchbuffer =
 				i915_error_object_create(dev_priv,
-							 request->batch_obj,
-							 vm);
+							 request->batch);
 
 			if (HAS_BROKEN_CS_TLB(dev_priv))
-				error->ring[i].wa_batchbuffer =
-					i915_error_ggtt_object_create(dev_priv,
-							     engine->scratch.obj);
+				ee->wa_batchbuffer =
+					i915_error_object_create(dev_priv,
+								 engine->scratch);
 
-			if (request->pid) {
+			ee->ctx =
+				i915_error_object_create(dev_priv,
+							 request->ctx->engine[i].state);
+
+			pid = request->ctx->pid;
+			if (pid) {
 				struct task_struct *task;
 
 				rcu_read_lock();
-				task = pid_task(request->pid, PIDTYPE_PID);
+				task = pid_task(pid, PIDTYPE_PID);
 				if (task) {
-					strcpy(error->ring[i].comm, task->comm);
-					error->ring[i].pid = task->pid;
+					strcpy(ee->comm, task->comm);
+					ee->pid = task->pid;
 				}
 				rcu_read_unlock();
 			}
@@ -1125,153 +1202,106 @@
 			error->simulated |=
 				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-			rb = request->ringbuf;
-			error->ring[i].cpu_ring_head = rb->head;
-			error->ring[i].cpu_ring_tail = rb->tail;
-			error->ring[i].ringbuffer =
-				i915_error_ggtt_object_create(dev_priv,
-							      rb->obj);
+			ring = request->ring;
+			ee->cpu_ring_head = ring->head;
+			ee->cpu_ring_tail = ring->tail;
+			ee->ringbuffer =
+				i915_error_object_create(dev_priv, ring->vma);
+
+			engine_record_requests(engine, request, ee);
 		}
 
-		error->ring[i].hws_page =
-			i915_error_ggtt_object_create(dev_priv,
-						      engine->status_page.obj);
+		ee->hws_page =
+			i915_error_object_create(dev_priv,
+						 engine->status_page.vma);
 
-		if (engine->wa_ctx.obj) {
-			error->ring[i].wa_ctx =
-				i915_error_ggtt_object_create(dev_priv,
-							      engine->wa_ctx.obj);
-		}
-
-		i915_gem_record_active_context(engine, error, &error->ring[i]);
-
-		count = 0;
-		list_for_each_entry(request, &engine->request_list, list)
-			count++;
-
-		error->ring[i].num_requests = count;
-		error->ring[i].requests =
-			kcalloc(count, sizeof(*error->ring[i].requests),
-				GFP_ATOMIC);
-		if (error->ring[i].requests == NULL) {
-			error->ring[i].num_requests = 0;
-			continue;
-		}
-
-		count = 0;
-		list_for_each_entry(request, &engine->request_list, list) {
-			struct drm_i915_error_request *erq;
-
-			if (count >= error->ring[i].num_requests) {
-				/*
-				 * If the ring request list was changed in
-				 * between the point where the error request
-				 * list was created and dimensioned and this
-				 * point then just exit early to avoid crashes.
-				 *
-				 * We don't need to communicate that the
-				 * request list changed state during error
-				 * state capture and that the error state is
-				 * slightly incorrect as a consequence since we
-				 * are typically only interested in the request
-				 * list state at the point of error state
-				 * capture, not in any changes happening during
-				 * the capture.
-				 */
-				break;
-			}
-
-			erq = &error->ring[i].requests[count++];
-			erq->seqno = request->seqno;
-			erq->jiffies = request->emitted_jiffies;
-			erq->tail = request->postfix;
-		}
+		ee->wa_ctx =
+			i915_error_object_create(dev_priv, engine->wa_ctx.vma);
 	}
 }
 
-/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
- * VM.
- */
 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 				struct drm_i915_error_state *error,
 				struct i915_address_space *vm,
-				const int ndx)
+				int idx)
 {
-	struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_error_buffer *active_bo;
 	struct i915_vma *vma;
-	int i;
+	int count;
 
-	i = 0;
+	count = 0;
 	list_for_each_entry(vma, &vm->active_list, vm_link)
-		i++;
-	error->active_bo_count[ndx] = i;
+		count++;
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
-				i++;
-	}
-	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
-
-	if (i) {
-		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
-		if (active_bo)
-			pinned_bo = active_bo + error->active_bo_count[ndx];
-	}
-
+	active_bo = NULL;
+	if (count)
+		active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
 	if (active_bo)
-		error->active_bo_count[ndx] =
-			capture_active_bo(active_bo,
-					  error->active_bo_count[ndx],
-					  &vm->active_list);
+		count = capture_error_bo(active_bo, count, &vm->active_list, false);
+	else
+		count = 0;
 
-	if (pinned_bo)
-		error->pinned_bo_count[ndx] =
-			capture_pinned_bo(pinned_bo,
-					  error->pinned_bo_count[ndx],
-					  &dev_priv->mm.bound_list, vm);
-	error->active_bo[ndx] = active_bo;
-	error->pinned_bo[ndx] = pinned_bo;
+	error->active_vm[idx] = vm;
+	error->active_bo[idx] = active_bo;
+	error->active_bo_count[idx] = count;
 }
 
-static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
-				     struct drm_i915_error_state *error)
+static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
+					struct drm_i915_error_state *error)
 {
-	struct i915_address_space *vm;
-	int cnt = 0, i = 0;
+	int cnt = 0, i, j;
 
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-		cnt++;
+	BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
+	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
+	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
 
-	error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
-	error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
-	error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
-					 GFP_ATOMIC);
-	error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
-					 GFP_ATOMIC);
+	/* Scan each engine looking for unique active contexts/vm */
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		struct drm_i915_error_engine *ee = &error->engine[i];
+		bool found;
 
-	if (error->active_bo == NULL ||
-	    error->pinned_bo == NULL ||
-	    error->active_bo_count == NULL ||
-	    error->pinned_bo_count == NULL) {
-		kfree(error->active_bo);
-		kfree(error->active_bo_count);
-		kfree(error->pinned_bo);
-		kfree(error->pinned_bo_count);
+		if (!ee->vm)
+			continue;
 
-		error->active_bo = NULL;
-		error->active_bo_count = NULL;
-		error->pinned_bo = NULL;
-		error->pinned_bo_count = NULL;
-	} else {
-		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-			i915_gem_capture_vm(dev_priv, error, vm, i++);
-
-		error->vm_count = cnt;
+		found = false;
+		for (j = 0; j < i && !found; j++)
+			found = error->engine[j].vm == ee->vm;
+		if (!found)
+			i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
 	}
 }
 
+static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
+					struct drm_i915_error_state *error)
+{
+	struct i915_address_space *vm = &dev_priv->ggtt.base;
+	struct drm_i915_error_buffer *bo;
+	struct i915_vma *vma;
+	int count_inactive, count_active;
+
+	count_inactive = 0;
+	list_for_each_entry(vma, &vm->active_list, vm_link)
+		count_inactive++;
+
+	count_active = 0;
+	list_for_each_entry(vma, &vm->inactive_list, vm_link)
+		count_active++;
+
+	bo = NULL;
+	if (count_inactive + count_active)
+		bo = kcalloc(count_inactive + count_active,
+			     sizeof(*bo), GFP_ATOMIC);
+	if (!bo)
+		return;
+
+	count_inactive = capture_error_bo(bo, count_inactive,
+					  &vm->active_list, true);
+	count_active = capture_error_bo(bo + count_inactive, count_active,
+					&vm->inactive_list, true);
+	error->pinned_bo_count = count_inactive + count_active;
+	error->pinned_bo = bo;
+}
+
 /* Capture all registers which don't fit into another category. */
 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
@@ -1352,20 +1382,20 @@
 				   const char *error_msg)
 {
 	u32 ecode;
-	int ring_id = -1, len;
+	int engine_id = -1, len;
 
-	ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+	ecode = i915_error_generate_code(dev_priv, error, &engine_id);
 
 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
 			"GPU HANG: ecode %d:%d:0x%08x",
-			INTEL_GEN(dev_priv), ring_id, ecode);
+			INTEL_GEN(dev_priv), engine_id, ecode);
 
-	if (ring_id != -1 && error->ring[ring_id].pid != -1)
+	if (engine_id != -1 && error->engine[engine_id].pid != -1)
 		len += scnprintf(error->error_msg + len,
 				 sizeof(error->error_msg) - len,
 				 ", in %s [%d]",
-				 error->ring[ring_id].comm,
-				 error->ring[ring_id].pid);
+				 error->engine[engine_id].comm,
+				 error->engine[engine_id].pid);
 
 	scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
 		  ", reason: %s, action: %s",
@@ -1382,6 +1412,10 @@
 #endif
 	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
 	error->suspend_count = dev_priv->suspend_count;
+
+	memcpy(&error->device_info,
+	       INTEL_INFO(dev_priv),
+	       sizeof(error->device_info));
 }
 
 /**
@@ -1415,9 +1449,10 @@
 
 	i915_capture_gen_state(dev_priv, error);
 	i915_capture_reg_state(dev_priv, error);
-	i915_gem_capture_buffers(dev_priv, error);
 	i915_gem_record_fences(dev_priv, error);
 	i915_gem_record_rings(dev_priv, error);
+	i915_capture_active_buffers(dev_priv, error);
+	i915_capture_pinned_buffers(dev_priv, error);
 
 	do_gettimeofday(&error->time);
 
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index cf5a65b..a47e1e4 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -103,9 +103,6 @@
 #define HOST2GUC_INTERRUPT		_MMIO(0xc4c8)
 #define   HOST2GUC_TRIGGER		  (1<<0)
 
-#define DRBMISC1			0x1984
-#define   DOORBELL_ENABLE		  (1<<0)
-
 #define GEN8_DRBREGL(x)			_MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID		  (1<<0)
 #define GEN8_DRBREGU(x)			_MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 2112e02..3106dcc 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -59,7 +59,7 @@
  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
  * represents in-order queue. The kernel driver packs ring tail pointer and an
  * ELSP context descriptor dword into Work Item.
- * See guc_add_workqueue_item()
+ * See guc_wq_item_append()
  *
  */
 
@@ -114,10 +114,8 @@
 		if (ret != -ETIMEDOUT)
 			ret = -EIO;
 
-		DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
-				"status=0x%08X response=0x%08X\n",
-				data[0], ret, status,
-				I915_READ(SOFT_SCRATCH(15)));
+		DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
+			 data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
 
 		dev_priv->guc.action_fail += 1;
 		dev_priv->guc.action_err = ret;
@@ -183,7 +181,7 @@
 				  struct i915_guc_client *client,
 				  u16 new_id)
 {
-	struct sg_table *sg = guc->ctx_pool_obj->pages;
+	struct sg_table *sg = guc->ctx_pool_vma->pages;
 	void *doorbell_bitmap = guc->doorbell_bitmap;
 	struct guc_doorbell_info *doorbell;
 	struct guc_context_desc desc;
@@ -290,7 +288,7 @@
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_init_proc_desc(struct intel_guc *guc,
+static void guc_proc_desc_init(struct intel_guc *guc,
 			       struct i915_guc_client *client)
 {
 	struct guc_process_desc *desc;
@@ -322,15 +320,15 @@
  * write queue, etc).
  */
 
-static void guc_init_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_init(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
-	struct drm_i915_gem_object *client_obj = client->client_obj;
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx = client->owner;
 	struct guc_context_desc desc;
 	struct sg_table *sg;
+	unsigned int tmp;
 	u32 gfx_addr;
 
 	memset(&desc, 0, sizeof(desc));
@@ -340,10 +338,10 @@
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for_each_engine(engine, dev_priv) {
+	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
 		struct intel_context *ce = &ctx->engine[engine->id];
-		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
-		struct drm_i915_gem_object *obj;
+		uint32_t guc_engine_id = engine->guc_id;
+		struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
 
 		/* TODO: We have a design issue to be solved here. Only when we
 		 * receive the first batch, we know which engine is used by the
@@ -358,30 +356,29 @@
 		lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
 		/* The state page is after PPHWSP */
-		gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
-		lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
+		lrc->ring_lcra =
+			i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
+				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ce->ringbuf->obj;
-		gfx_addr = i915_gem_obj_ggtt_offset(obj);
-
-		lrc->ring_begin = gfx_addr;
-		lrc->ring_end = gfx_addr + obj->base.size - 1;
-		lrc->ring_next_free_location = gfx_addr;
+		lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
+		lrc->ring_next_free_location = lrc->ring_begin;
 		lrc->ring_current_tail_pointer_value = 0;
 
-		desc.engines_used |= (1 << engine->guc_id);
+		desc.engines_used |= (1 << guc_engine_id);
 	}
 
+	DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
+			client->engines, desc.engines_used);
 	WARN_ON(desc.engines_used == 0);
 
 	/*
 	 * The doorbell, process descriptor, and workqueue are all parts
 	 * of the client object, which the GuC will reference via the GGTT
 	 */
-	gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
-	desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+	gfx_addr = i915_ggtt_offset(client->vma);
+	desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
 				client->doorbell_offset;
 	desc.db_trigger_cpu = (uintptr_t)client->client_base +
 				client->doorbell_offset;
@@ -397,12 +394,12 @@
 	desc.desc_private = (uintptr_t)client;
 
 	/* Pool context is pinned already */
-	sg = guc->ctx_pool_obj->pages;
+	sg = guc->ctx_pool_vma->pages;
 	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
 			     sizeof(desc) * client->ctx_index);
 }
 
-static void guc_fini_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_fini(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
 	struct guc_context_desc desc;
@@ -410,13 +407,13 @@
 
 	memset(&desc, 0, sizeof(desc));
 
-	sg = guc->ctx_pool_obj->pages;
+	sg = guc->ctx_pool_vma->pages;
 	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
 			     sizeof(desc) * client->ctx_index);
 }
 
 /**
- * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
  * @request:	request associated with the commands
  *
  * Return:	0 if space is available
@@ -424,39 +421,56 @@
  *
  * This function must be called (and must return 0) before a request
  * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it remains valid until (but only until)
- * the next call to submit().
+ * of 0 has been returned, it must be balanced by a corresponding
+ * call to submit().
  *
- * This precheck allows the caller to determine in advance that space
+ * Reservation allows the caller to determine in advance that space
  * will be available for the next submission before committing resources
  * to it, and helps avoid late failures with complicated recovery paths.
  */
-int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
+int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
 {
 	const size_t wqi_size = sizeof(struct guc_wq_item);
 	struct i915_guc_client *gc = request->i915->guc.execbuf_client;
-	struct guc_process_desc *desc;
+	struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
 	u32 freespace;
+	int ret;
 
-	GEM_BUG_ON(gc == NULL);
-
-	desc = gc->client_base + gc->proc_desc_offset;
-
+	spin_lock(&gc->wq_lock);
 	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
-	if (likely(freespace >= wqi_size))
-		return 0;
+	freespace -= gc->wq_rsvd;
+	if (likely(freespace >= wqi_size)) {
+		gc->wq_rsvd += wqi_size;
+		ret = 0;
+	} else {
+		gc->no_wq_space++;
+		ret = -EAGAIN;
+	}
+	spin_unlock(&gc->wq_lock);
 
-	gc->no_wq_space += 1;
-
-	return -EAGAIN;
+	return ret;
 }
 
-static void guc_add_workqueue_item(struct i915_guc_client *gc,
-				   struct drm_i915_gem_request *rq)
+void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
+{
+	const size_t wqi_size = sizeof(struct guc_wq_item);
+	struct i915_guc_client *gc = request->i915->guc.execbuf_client;
+
+	GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
+
+	spin_lock(&gc->wq_lock);
+	gc->wq_rsvd -= wqi_size;
+	spin_unlock(&gc->wq_lock);
+}
+
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct i915_guc_client *gc,
+			       struct drm_i915_gem_request *rq)
 {
 	/* wqi_len is in DWords, and does not include the one-word header */
 	const size_t wqi_size = sizeof(struct guc_wq_item);
 	const u32 wqi_len = wqi_size/sizeof(u32) - 1;
+	struct intel_engine_cs *engine = rq->engine;
 	struct guc_process_desc *desc;
 	struct guc_wq_item *wqi;
 	void *base;
@@ -464,7 +478,7 @@
 
 	desc = gc->client_base + gc->proc_desc_offset;
 
-	/* Free space is guaranteed, see i915_guc_wq_check_space() above */
+	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
 	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
 	GEM_BUG_ON(freespace < wqi_size);
 
@@ -482,31 +496,32 @@
 	 * workqueue buffer dw by dw.
 	 */
 	BUILD_BUG_ON(wqi_size != 16);
+	GEM_BUG_ON(gc->wq_rsvd < wqi_size);
 
 	/* postincrement WQ tail for next time */
 	wq_off = gc->wq_tail;
+	GEM_BUG_ON(wq_off & (wqi_size - 1));
 	gc->wq_tail += wqi_size;
 	gc->wq_tail &= gc->wq_size - 1;
-	GEM_BUG_ON(wq_off & (wqi_size - 1));
+	gc->wq_rsvd -= wqi_size;
 
 	/* WQ starts from the page after doorbell / process_desc */
 	wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
 	wq_off &= PAGE_SIZE - 1;
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
+	base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
 	wqi = (struct guc_wq_item *)((char *)base + wq_off);
 
 	/* Now fill in the 4-word work queue item */
 	wqi->header = WQ_TYPE_INORDER |
 			(wqi_len << WQ_LEN_SHIFT) |
-			(rq->engine->guc_id << WQ_TARGET_SHIFT) |
+			(engine->guc_id << WQ_TARGET_SHIFT) |
 			WQ_NO_WCFLUSH_WAIT;
 
 	/* The GuC wants only the low-order word of the context descriptor */
-	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
-							     rq->engine);
+	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
 
 	wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
-	wqi->fence_id = rq->seqno;
+	wqi->fence_id = rq->fence.seqno;
 
 	kunmap_atomic(base);
 }
@@ -553,8 +568,8 @@
 		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
 			break;
 
-		DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
-			  db_cmp.cookie, db_ret.cookie);
+		DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
+			 db_cmp.cookie, db_ret.cookie);
 
 		/* update the cookie to newly read cookie from GuC */
 		db_cmp.cookie = db_ret.cookie;
@@ -573,26 +588,26 @@
  * Return:	0 on success, otherwise an errno.
  * 		(Note: nonzero really shouldn't happen!)
  *
- * The caller must have already called i915_guc_wq_check_space() above
- * with a result of 0 (success) since the last request submission. This
- * guarantees that there is space in the work queue for the new request,
- * so enqueuing the item cannot fail.
+ * The caller must have already called i915_guc_wq_reserve() above with
+ * a result of 0 (success), guaranteeing that there is space in the work
+ * queue for the new request, so enqueuing the item cannot fail.
  *
  * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when check() says there's no space, or calls submit() multiple
- * times with no intervening check().
+ * submit() when _reserve() says there's no space, or calls _submit()
+ * a different number of times from (successful) calls to _reserve().
  *
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
 	unsigned int engine_id = rq->engine->id;
 	struct intel_guc *guc = &rq->i915->guc;
 	struct i915_guc_client *client = guc->execbuf_client;
 	int b_ret;
 
-	guc_add_workqueue_item(client, rq);
+	spin_lock(&client->wq_lock);
+	guc_wq_item_append(client, rq);
 	b_ret = guc_ring_doorbell(client);
 
 	client->submissions[engine_id] += 1;
@@ -601,9 +616,8 @@
 		client->b_fail += 1;
 
 	guc->submissions[engine_id] += 1;
-	guc->last_seqno[engine_id] = rq->seqno;
-
-	return b_ret;
+	guc->last_seqno[engine_id] = rq->fence.seqno;
+	spin_unlock(&client->wq_lock);
 }
 
 /*
@@ -613,55 +627,48 @@
  */
 
 /**
- * gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev_priv:	driver private data structure
- * @size:	size of object
+ * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc:	the guc
+ * @size:	size of area to allocate (both virtual space and memory)
  *
- * This is a wrapper to create a gem obj. In order to use it inside GuC, the
- * object needs to be pinned lifetime. Also we must pin it to gtt space other
- * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
  *
- * Return:	A drm_i915_gem_object if successful, otherwise NULL.
+ * Return:	A i915_vma if successful, otherwise an ERR_PTR.
  */
-static struct drm_i915_gem_object *
-gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
+static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
 {
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int ret;
 
 	obj = i915_gem_object_create(&dev_priv->drm, size);
 	if (IS_ERR(obj))
-		return NULL;
+		return ERR_CAST(obj);
 
-	if (i915_gem_object_get_pages(obj)) {
-		drm_gem_object_unreference(&obj->base);
-		return NULL;
-	}
+	vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+	if (IS_ERR(vma))
+		goto err;
 
-	if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-			PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
-		drm_gem_object_unreference(&obj->base);
-		return NULL;
+	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	if (ret) {
+		vma = ERR_PTR(ret);
+		goto err;
 	}
 
 	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
 	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
-	return obj;
-}
+	return vma;
 
-/**
- * gem_release_guc_obj() - Release gem object allocated for GuC usage
- * @obj:	gem obj to be released
- */
-static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
-{
-	if (!obj)
-		return;
-
-	if (i915_gem_obj_is_pinned(obj))
-		i915_gem_object_ggtt_unpin(obj);
-
-	drm_gem_object_unreference(&obj->base);
+err:
+	i915_gem_object_put(obj);
+	return vma;
 }
 
 static void
@@ -688,61 +695,74 @@
 		kunmap(kmap_to_page(client->client_base));
 	}
 
-	gem_release_guc_obj(client->client_obj);
+	i915_vma_unpin_and_release(&client->vma);
 
 	if (client->ctx_index != GUC_INVALID_CTX_ID) {
-		guc_fini_ctx_desc(guc, client);
+		guc_ctx_desc_fini(guc, client);
 		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
 	}
 
 	kfree(client);
 }
 
+/* Check that a doorbell register is in the expected state */
+static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	i915_reg_t drbreg = GEN8_DRBREGL(db_id);
+	uint32_t value = I915_READ(drbreg);
+	bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
+	bool expected = test_bit(db_id, guc->doorbell_bitmap);
+
+	if (enabled == expected)
+		return true;
+
+	DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
+			 db_id, drbreg.reg, value,
+			 expected ? "active" : "inactive");
+
+	return false;
+}
+
 /*
- * Borrow the first client to set up & tear down every doorbell
+ * Borrow the first client to set up & tear down each unused doorbell
  * in turn, to ensure that all doorbell h/w is (re)initialised.
  */
 static void guc_init_doorbell_hw(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct i915_guc_client *client = guc->execbuf_client;
-	uint16_t db_id, i;
-	int err;
+	uint16_t db_id;
+	int i, err;
 
+	/* Save client's original doorbell selection */
 	db_id = client->doorbell_id;
 
 	for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
-		i915_reg_t drbreg = GEN8_DRBREGL(i);
-		u32 value = I915_READ(drbreg);
+		/* Skip if doorbell is OK */
+		if (guc_doorbell_check(guc, i))
+			continue;
 
 		err = guc_update_doorbell_id(guc, client, i);
-
-		/* Report update failure or unexpectedly active doorbell */
-		if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
-			DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
-					  i, drbreg.reg, value, err);
+		if (err)
+			DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
+					i, err);
 	}
 
 	/* Restore to original value */
 	err = guc_update_doorbell_id(guc, client, db_id);
 	if (err)
-		DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
-			db_id, err);
+		DRM_WARN("Failed to restore doorbell to %d, err %d\n",
+			 db_id, err);
 
-	for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
-		i915_reg_t drbreg = GEN8_DRBREGL(i);
-		u32 value = I915_READ(drbreg);
-
-		if (i != db_id && (value & GUC_DOORBELL_ENABLED))
-			DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
-					  i, drbreg.reg, value);
-
-	}
+	/* Read back & verify all doorbell registers */
+	for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
+		(void)guc_doorbell_check(guc, i);
 }
 
 /**
  * guc_client_alloc() - Allocate an i915_guc_client
  * @dev_priv:	driver private data structure
+ * @engines:	The set of engines to enable for this client
  * @priority:	four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
  * 		The kernel client to replace ExecList submission is created with
  * 		NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -754,22 +774,24 @@
  */
 static struct i915_guc_client *
 guc_client_alloc(struct drm_i915_private *dev_priv,
+		 uint32_t engines,
 		 uint32_t priority,
 		 struct i915_gem_context *ctx)
 {
 	struct i915_guc_client *client;
 	struct intel_guc *guc = &dev_priv->guc;
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	uint16_t db_id;
 
 	client = kzalloc(sizeof(*client), GFP_KERNEL);
 	if (!client)
 		return NULL;
 
-	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
-	client->priority = priority;
 	client->owner = ctx;
 	client->guc = guc;
+	client->engines = engines;
+	client->priority = priority;
+	client->doorbell_id = GUC_INVALID_DOORBELL_ID;
 
 	client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
 			GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
@@ -779,13 +801,15 @@
 	}
 
 	/* The first page is doorbell/proc_desc. Two followed pages are wq. */
-	obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
-	if (!obj)
+	vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+	if (IS_ERR(vma))
 		goto err;
 
 	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
-	client->client_obj = obj;
-	client->client_base = kmap(i915_gem_object_get_page(obj, 0));
+	client->vma = vma;
+	client->client_base = kmap(i915_vma_first_page(vma));
+
+	spin_lock_init(&client->wq_lock);
 	client->wq_offset = GUC_DB_SIZE;
 	client->wq_size = GUC_WQ_SIZE;
 
@@ -806,29 +830,26 @@
 	else
 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-	guc_init_proc_desc(guc, client);
-	guc_init_ctx_desc(guc, client);
+	guc_proc_desc_init(guc, client);
+	guc_ctx_desc_init(guc, client);
 	if (guc_init_doorbell(guc, client, db_id))
 		goto err;
 
-	DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
-		priority, client, client->ctx_index);
+	DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
+		priority, client, client->engines, client->ctx_index);
 	DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
 		client->doorbell_id, client->doorbell_offset);
 
 	return client;
 
 err:
-	DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
-
 	guc_client_free(dev_priv, client);
 	return NULL;
 }
 
-static void guc_create_log(struct intel_guc *guc)
+static void guc_log_create(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	unsigned long offset;
 	uint32_t size, flags;
 
@@ -844,16 +865,16 @@
 		GUC_LOG_ISR_PAGES + 1 +
 		GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
 
-	obj = guc->log_obj;
-	if (!obj) {
-		obj = gem_allocate_guc_obj(dev_priv, size);
-		if (!obj) {
+	vma = guc->log_vma;
+	if (!vma) {
+		vma = guc_allocate_vma(guc, size);
+		if (IS_ERR(vma)) {
 			/* logging will be off */
 			i915.guc_log_level = -1;
 			return;
 		}
 
-		guc->log_obj = obj;
+		guc->log_vma = vma;
 	}
 
 	/* each allocated unit is a page */
@@ -862,11 +883,11 @@
 		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
 		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
-	offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
+	offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
 	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
-static void init_guc_policies(struct guc_policies *policies)
+static void guc_policies_init(struct guc_policies *policies)
 {
 	struct guc_policy *policy;
 	u32 p, i;
@@ -888,10 +909,10 @@
 	policies->is_valid = 1;
 }
 
-static void guc_create_ads(struct intel_guc *guc)
+static void guc_addon_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	struct guc_ads *ads;
 	struct guc_policies *policies;
 	struct guc_mmio_reg_state *reg_state;
@@ -904,16 +925,16 @@
 			sizeof(struct guc_mmio_reg_state) +
 			GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
 
-	obj = guc->ads_obj;
-	if (!obj) {
-		obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
-		if (!obj)
+	vma = guc->ads_vma;
+	if (!vma) {
+		vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+		if (IS_ERR(vma))
 			return;
 
-		guc->ads_obj = obj;
+		guc->ads_vma = vma;
 	}
 
-	page = i915_gem_object_get_page(obj, 0);
+	page = i915_vma_first_page(vma);
 	ads = kmap(page);
 
 	/*
@@ -924,17 +945,17 @@
 	 * to find it.
 	 */
 	engine = &dev_priv->engine[RCS];
-	ads->golden_context_lrca = engine->status_page.gfx_addr;
+	ads->golden_context_lrca = engine->status_page.ggtt_offset;
 
 	for_each_engine(engine, dev_priv)
 		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
 	/* GuC scheduling policies */
 	policies = (void *)ads + sizeof(struct guc_ads);
-	init_guc_policies(policies);
+	guc_policies_init(policies);
 
-	ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
-			sizeof(struct guc_ads);
+	ads->scheduler_policies =
+		i915_ggtt_offset(vma) + sizeof(struct guc_ads);
 
 	/* MMIO reg state */
 	reg_state = (void *)policies + sizeof(struct guc_policies);
@@ -966,6 +987,7 @@
 	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
 	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
 	struct intel_guc *guc = &dev_priv->guc;
+	struct i915_vma *vma;
 
 	/* Wipe bitmap & delete client in case of reinitialisation */
 	bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
@@ -974,16 +996,17 @@
 	if (!i915.enable_guc_submission)
 		return 0; /* not enabled  */
 
-	if (guc->ctx_pool_obj)
+	if (guc->ctx_pool_vma)
 		return 0; /* already allocated */
 
-	guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
-	if (!guc->ctx_pool_obj)
-		return -ENOMEM;
+	vma = guc_allocate_vma(guc, gemsize);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
+	guc->ctx_pool_vma = vma;
 	ida_init(&guc->ctx_ids);
-	guc_create_log(guc);
-	guc_create_ads(guc);
+	guc_log_create(guc);
+	guc_addon_create(guc);
 
 	return 0;
 }
@@ -992,13 +1015,16 @@
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_guc_client *client;
+	struct intel_engine_cs *engine;
+	struct drm_i915_gem_request *request;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
+				  INTEL_INFO(dev_priv)->ring_mask,
 				  GUC_CTX_PRIORITY_KMD_NORMAL,
 				  dev_priv->kernel_context);
 	if (!client) {
-		DRM_ERROR("Failed to create execbuf guc_client\n");
+		DRM_ERROR("Failed to create normal GuC client!\n");
 		return -ENOMEM;
 	}
 
@@ -1006,6 +1032,18 @@
 	host2guc_sample_forcewake(guc, client);
 	guc_init_doorbell_hw(guc);
 
+	/* Take over from manual control of ELSP (execlists) */
+	for_each_engine(engine, dev_priv) {
+		engine->submit_request = i915_guc_submit;
+
+		/* Replay the current set of previously submitted requests */
+		list_for_each_entry(request, &engine->request_list, link) {
+			client->wq_rsvd += sizeof(struct guc_wq_item);
+			if (i915_sw_fence_done(&request->submit))
+				i915_guc_submit(request);
+		}
+	}
+
 	return 0;
 }
 
@@ -1013,6 +1051,12 @@
 {
 	struct intel_guc *guc = &dev_priv->guc;
 
+	if (!guc->execbuf_client)
+		return;
+
+	/* Revert back to manual ELSP submission */
+	intel_execlists_enable_submission(dev_priv);
+
 	guc_client_free(dev_priv, guc->execbuf_client);
 	guc->execbuf_client = NULL;
 }
@@ -1021,16 +1065,12 @@
 {
 	struct intel_guc *guc = &dev_priv->guc;
 
-	gem_release_guc_obj(dev_priv->guc.ads_obj);
-	guc->ads_obj = NULL;
+	i915_vma_unpin_and_release(&guc->ads_vma);
+	i915_vma_unpin_and_release(&guc->log_vma);
 
-	gem_release_guc_obj(dev_priv->guc.log_obj);
-	guc->log_obj = NULL;
-
-	if (guc->ctx_pool_obj)
+	if (guc->ctx_pool_vma)
 		ida_destroy(&guc->ctx_ids);
-	gem_release_guc_obj(guc->ctx_pool_obj);
-	guc->ctx_pool_obj = NULL;
+	i915_vma_unpin_and_release(&guc->ctx_pool_vma);
 }
 
 /**
@@ -1053,7 +1093,7 @@
 	/* any value greater than GUC_POWER_D0 */
 	data[1] = GUC_POWER_D1;
 	/* first page is shared data with GuC */
-	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+	data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
 
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
@@ -1078,7 +1118,7 @@
 	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
 	data[1] = GUC_POWER_D0;
 	/* first page is shared data with GuC */
-	data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+	data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
 
 	return host2guc_action(guc, data, ARRAY_SIZE(data));
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1c2aec3..3fc286cd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,6 +350,9 @@
 
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
+	if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+		return;
+
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON_ONCE(dev_priv->rps.pm_iir);
 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -368,10 +371,13 @@
 
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
+	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+		return;
+
 	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->rps.interrupts_enabled = false;
 
-	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -656,12 +662,6 @@
  *   of horizontal active on the first line of vertical active
  */
 
-static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
-	/* Gen2 doesn't have a hardware frame counter */
-	return 0;
-}
-
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -978,10 +978,8 @@
 static void notify_ring(struct intel_engine_cs *engine)
 {
 	smp_store_mb(engine->breadcrumbs.irq_posted, true);
-	if (intel_engine_wakeup(engine)) {
+	if (intel_engine_wakeup(engine))
 		trace_i915_gem_request_notify(engine);
-		engine->breadcrumbs.irq_wakeups++;
-	}
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1105,9 +1103,10 @@
 	new_delay = dev_priv->rps.cur_freq;
 	min = dev_priv->rps.min_freq_softlimit;
 	max = dev_priv->rps.max_freq_softlimit;
-
-	if (client_boost) {
-		new_delay = dev_priv->rps.max_freq_softlimit;
+	if (client_boost || any_waiters(dev_priv))
+		max = dev_priv->rps.max_freq;
+	if (client_boost && new_delay < dev_priv->rps.boost_freq) {
+		new_delay = dev_priv->rps.boost_freq;
 		adj = 0;
 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
 		if (adj > 0)
@@ -1122,7 +1121,7 @@
 			new_delay = dev_priv->rps.efficient_freq;
 			adj = 0;
 		}
-	} else if (any_waiters(dev_priv)) {
+	} else if (client_boost || any_waiters(dev_priv)) {
 		adj = 0;
 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@@ -2504,57 +2503,52 @@
 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-	int ret;
 
 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
+	DRM_DEBUG_DRIVER("resetting chip\n");
+	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
 	/*
-	 * Note that there's only one work item which does gpu resets, so we
-	 * need not worry about concurrent gpu resets potentially incrementing
-	 * error->reset_counter twice. We only need to take care of another
-	 * racing irq/hangcheck declaring the gpu dead for a second time. A
-	 * quick check for that is good enough: schedule_work ensures the
-	 * correct ordering between hang detection and this work item, and since
-	 * the reset in-progress bit is only ever set by code outside of this
-	 * work we don't need to worry about any other races.
+	 * In most cases it's guaranteed that we get here with an RPM
+	 * reference held, for example because there is a pending GPU
+	 * request that won't finish until the reset is done. This
+	 * isn't the case at least when we get here by doing a
+	 * simulated reset via debugs, so get an RPM reference.
 	 */
-	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
-		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+	intel_runtime_pm_get(dev_priv);
+	intel_prepare_reset(dev_priv);
 
-		/*
-		 * In most cases it's guaranteed that we get here with an RPM
-		 * reference held, for example because there is a pending GPU
-		 * request that won't finish until the reset is done. This
-		 * isn't the case at least when we get here by doing a
-		 * simulated reset via debugs, so get an RPM reference.
-		 */
-		intel_runtime_pm_get(dev_priv);
-
-		intel_prepare_reset(dev_priv);
-
+	do {
 		/*
 		 * All state reset _must_ be completed before we update the
 		 * reset counter, for otherwise waiters might miss the reset
 		 * pending state and not properly drop locks, resulting in
 		 * deadlocks with the reset work.
 		 */
-		ret = i915_reset(dev_priv);
+		if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+			i915_reset(dev_priv);
+			mutex_unlock(&dev_priv->drm.struct_mutex);
+		}
 
-		intel_finish_reset(dev_priv);
+		/* We need to wait for anyone holding the lock to wakeup */
+	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+				     I915_RESET_IN_PROGRESS,
+				     TASK_UNINTERRUPTIBLE,
+				     HZ));
 
-		intel_runtime_pm_put(dev_priv);
+	intel_finish_reset(dev_priv);
+	intel_runtime_pm_put(dev_priv);
 
-		if (ret == 0)
-			kobject_uevent_env(kobj,
-					   KOBJ_CHANGE, reset_done_event);
+	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+		kobject_uevent_env(kobj,
+				   KOBJ_CHANGE, reset_done_event);
 
-		/*
-		 * Note: The wake_up also serves as a memory barrier so that
-		 * waiters see the update value of the reset counter atomic_t.
-		 */
-		wake_up_all(&dev_priv->gpu_error.reset_queue);
-	}
+	/*
+	 * Note: The wake_up also serves as a memory barrier so that
+	 * waiters see the updated value of the dev_priv->gpu_error.
+	 */
+	wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
@@ -2673,25 +2667,26 @@
 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
 	i915_report_and_clear_eir(dev_priv);
 
-	if (engine_mask) {
-		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
-				&dev_priv->gpu_error.reset_counter);
+	if (!engine_mask)
+		return;
 
-		/*
-		 * Wakeup waiting processes so that the reset function
-		 * i915_reset_and_wakeup doesn't deadlock trying to grab
-		 * various locks. By bumping the reset counter first, the woken
-		 * processes will see a reset in progress and back off,
-		 * releasing their locks and then wait for the reset completion.
-		 * We must do this for _all_ gpu waiters that might hold locks
-		 * that the reset work needs to acquire.
-		 *
-		 * Note: The wake_up serves as the required memory barrier to
-		 * ensure that the waiters see the updated value of the reset
-		 * counter atomic_t.
-		 */
-		i915_error_wake_up(dev_priv);
-	}
+	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+			     &dev_priv->gpu_error.flags))
+		return;
+
+	/*
+	 * Wakeup waiting processes so that the reset function
+	 * i915_reset_and_wakeup doesn't deadlock trying to grab
+	 * various locks. By bumping the reset counter first, the woken
+	 * processes will see a reset in progress and back off,
+	 * releasing their locks and then wait for the reset completion.
+	 * We must do this for _all_ gpu waiters that might hold locks
+	 * that the reset work needs to acquire.
+	 *
+	 * Note: The wake_up also provides a memory barrier to ensure that the
+	 * waiters see the updated value of the reset flags.
+	 */
+	i915_error_wake_up(dev_priv);
 
 	i915_reset_and_wakeup(dev_priv);
 }
@@ -2804,13 +2799,6 @@
 }
 
 static bool
-ring_idle(struct intel_engine_cs *engine, u32 seqno)
-{
-	return i915_seqno_passed(seqno,
-				 READ_ONCE(engine->last_submitted_seqno));
-}
-
-static bool
 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 {
 	if (INTEL_GEN(engine->i915) >= 8) {
@@ -2834,7 +2822,7 @@
 			if (engine == signaller)
 				continue;
 
-			if (offset == signaller->semaphore.signal_ggtt[engine->id])
+			if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
 				return signaller;
 		}
 	} else {
@@ -2844,21 +2832,22 @@
 			if(engine == signaller)
 				continue;
 
-			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
+			if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
 				return signaller;
 		}
 	}
 
-	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-		  engine->id, ipehr, offset);
+	DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+			 engine->name, ipehr, offset);
 
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
+	void __iomem *vaddr;
 	u32 cmd, ipehr, head;
 	u64 offset = 0;
 	int i, backwards;
@@ -2897,6 +2886,7 @@
 	 */
 	head = I915_READ_HEAD(engine) & HEAD_ADDR;
 	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+	vaddr = (void __iomem *)engine->buffer->vaddr;
 
 	for (i = backwards; i; --i) {
 		/*
@@ -2907,7 +2897,7 @@
 		head &= engine->buffer->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(engine->buffer->virtual_start + head);
+		cmd = ioread32(vaddr + head);
 		if (cmd == ipehr)
 			break;
 
@@ -2917,11 +2907,11 @@
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+	*seqno = ioread32(vaddr + head + 4) + 1;
 	if (INTEL_GEN(dev_priv) >= 8) {
-		offset = ioread32(engine->buffer->virtual_start + head + 12);
+		offset = ioread32(vaddr + head + 12);
 		offset <<= 32;
-		offset = ioread32(engine->buffer->virtual_start + head + 8);
+		offset |= ioread32(vaddr + head + 8);
 	}
 	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 }
@@ -2938,6 +2928,9 @@
 	if (signaller == NULL)
 		return -1;
 
+	if (IS_ERR(signaller))
+		return 0;
+
 	/* Prevent pathological recursion due to driver bugs */
 	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 		return -1;
@@ -2990,7 +2983,7 @@
 	return stuck;
 }
 
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
 head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	if (acthd != engine->hangcheck.acthd) {
@@ -3008,11 +3001,11 @@
 	return HANGCHECK_HUNG;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	enum intel_ring_hangcheck_action ha;
+	enum intel_engine_hangcheck_action ha;
 	u32 tmp;
 
 	ha = head_stuck(engine, acthd);
@@ -3054,22 +3047,6 @@
 	return HANGCHECK_HUNG;
 }
 
-static unsigned long kick_waiters(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *i915 = engine->i915;
-	unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
-
-	if (engine->hangcheck.user_interrupts == irq_count &&
-	    !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
-		if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
-			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-				  engine->name);
-
-		intel_engine_enable_fake_irq(engine);
-	}
-
-	return irq_count;
-}
 /*
  * This is called when the chip hasn't reported back with completed
  * batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3107,7 +3084,7 @@
 		bool busy = intel_engine_has_waiter(engine);
 		u64 acthd;
 		u32 seqno;
-		unsigned user_interrupts;
+		u32 submit;
 
 		semaphore_clear_deadlocks(dev_priv);
 
@@ -3121,29 +3098,22 @@
 		if (engine->irq_seqno_barrier)
 			engine->irq_seqno_barrier(engine);
 
-		acthd = intel_ring_get_active_head(engine);
+		acthd = intel_engine_get_active_head(engine);
 		seqno = intel_engine_get_seqno(engine);
-
-		/* Reset stuck interrupts between batch advances */
-		user_interrupts = 0;
+		submit = READ_ONCE(engine->last_submitted_seqno);
 
 		if (engine->hangcheck.seqno == seqno) {
-			if (ring_idle(engine, seqno)) {
+			if (i915_seqno_passed(seqno, submit)) {
 				engine->hangcheck.action = HANGCHECK_IDLE;
-				if (busy) {
-					/* Safeguard against driver failure */
-					user_interrupts = kick_waiters(engine);
-					engine->hangcheck.score += BUSY;
-				}
 			} else {
 				/* We always increment the hangcheck score
-				 * if the ring is busy and still processing
+				 * if the engine is busy and still processing
 				 * the same request, so that no single request
 				 * can run indefinitely (such as a chain of
 				 * batches). The only time we do not increment
 				 * the hangcheck score on this ring, if this
-				 * ring is in a legitimate wait for another
-				 * ring. In that case the waiting ring is a
+				 * engine is in a legitimate wait for another
+				 * engine. In that case the waiting engine is a
 				 * victim and we want to be sure we catch the
 				 * right culprit. Then every time we do kick
 				 * the ring, add a small increment to the
@@ -3151,8 +3121,8 @@
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				engine->hangcheck.action = ring_stuck(engine,
-								      acthd);
+				engine->hangcheck.action =
+					engine_stuck(engine, acthd);
 
 				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
@@ -3195,12 +3165,12 @@
 
 		engine->hangcheck.seqno = seqno;
 		engine->hangcheck.acthd = acthd;
-		engine->hangcheck.user_interrupts = user_interrupts;
 		busy_count += busy;
 	}
 
 	if (hung) {
 		char msg[80];
+		unsigned int tmp;
 		int len;
 
 		/* If some rings hung but others were still busy, only
@@ -3210,7 +3180,7 @@
 			hung &= ~stuck;
 		len = scnprintf(msg, sizeof(msg),
 				"%s on ", stuck == hung ? "No progress" : "Hang");
-		for_each_engine_masked(engine, dev_priv, hung)
+		for_each_engine_masked(engine, dev_priv, hung, tmp)
 			len += scnprintf(msg + len, sizeof(msg) - len,
 					 "%s, ", engine->name);
 		msg[len-2] = '\0';
@@ -4536,14 +4506,15 @@
 		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
 
 	if (INTEL_INFO(dev_priv)->gen >= 8)
-		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
 
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);
 
 	if (IS_GEN2(dev_priv)) {
+		/* Gen2 doesn't have a hardware frame counter */
 		dev->max_vblank_count = 0;
-		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+		dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
new file mode 100644
index 0000000..49a0794
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <asm/fpu/api.h>
+
+#include "i915_drv.h"
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+#ifdef CONFIG_AS_MOVNTDQA
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+	kernel_fpu_begin();
+
+	len >>= 4;
+	while (len >= 4) {
+		asm("movntdqa   (%0), %%xmm0\n"
+		    "movntdqa 16(%0), %%xmm1\n"
+		    "movntdqa 32(%0), %%xmm2\n"
+		    "movntdqa 48(%0), %%xmm3\n"
+		    "movaps %%xmm0,   (%1)\n"
+		    "movaps %%xmm1, 16(%1)\n"
+		    "movaps %%xmm2, 32(%1)\n"
+		    "movaps %%xmm3, 48(%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 64;
+		dst += 64;
+		len -= 4;
+	}
+	while (len--) {
+		asm("movntdqa (%0), %%xmm0\n"
+		    "movaps %%xmm0, (%1)\n"
+		    :: "r" (src), "r" (dst) : "memory");
+		src += 16;
+		dst += 16;
+	}
+
+	kernel_fpu_end();
+}
+#endif
+
+/**
+ * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * i915_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ *
+ * To test whether accelerated reads from WC are supported, use
+ * i915_memcpy_from_wc(NULL, NULL, 0);
+ *
+ * Returns true if the copy was successful, false if the preconditions
+ * are not met.
+ */
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+		return false;
+
+#ifdef CONFIG_AS_MOVNTDQA
+	if (static_branch_likely(&has_movntdqa)) {
+		if (likely(len))
+			__memcpy_ntdqa(dst, src, len);
+		return true;
+	}
+#endif
+
+	return false;
+}
+
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
+{
+	if (static_cpu_has(X86_FEATURE_XMM4_1))
+		static_branch_enable(&has_movntdqa);
+}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
new file mode 100644
index 0000000..e4935dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/io-mapping.h>
+
+#include <asm/pgtable.h>
+
+#include "i915_drv.h"
+
+struct remap_pfn {
+	struct mm_struct *mm;
+	unsigned long pfn;
+	pgprot_t prot;
+};
+
+static int remap_pfn(pte_t *pte, pgtable_t token,
+		     unsigned long addr, void *data)
+{
+	struct remap_pfn *r = data;
+
+	/* Special PTE are not associated with any struct page */
+	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+	r->pfn++;
+
+	return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+		     unsigned long addr, unsigned long pfn, unsigned long size,
+		     struct io_mapping *iomap)
+{
+	struct remap_pfn r;
+	int err;
+
+	GEM_BUG_ON((vma->vm_flags &
+		    (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
+		   (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+
+	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+	r.mm = vma->vm_mm;
+	r.pfn = pfn;
+	r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+			  (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+	err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+	if (unlikely(err)) {
+		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+		return err;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b6e404c..768ad89 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -45,6 +45,7 @@
 	.fastboot = 0,
 	.prefault_disable = 0,
 	.load_detect_test = 0,
+	.force_reset_modeset_test = 0,
 	.reset = true,
 	.invert_brightness = 0,
 	.disable_display = 0,
@@ -161,6 +162,11 @@
 	"Force-enable the VGA load detect code for testing (default:false). "
 	"For developers only.");
 
+module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
+MODULE_PARM_DESC(force_reset_modeset_test,
+	"Force a modeset during gpu reset for testing (default:false). "
+	"For developers only.");
+
 module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
 MODULE_PARM_DESC(invert_brightness,
 	"Invert backlight brightness "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 0ad020b..3a0dd78 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -57,6 +57,7 @@
 	bool fastboot;
 	bool prefault_disable;
 	bool load_detect_test;
+	bool force_reset_modeset_test;
 	bool reset;
 	bool disable_display;
 	bool verbose_state_checks;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 949c016..687c768 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,207 +54,216 @@
 #define CHV_COLORS \
 	.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
 
+#define GEN2_FEATURES \
+	.gen = 2, .num_pipes = 1, \
+	.has_overlay = 1, .overlay_needs_physical = 1, \
+	.has_gmch_display = 1, \
+	.hws_needs_physical = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i830_info = {
-	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
+	.is_mobile = 1, .cursor_needs_physical = 1,
+	.num_pipes = 2, /* legal, last one wins */
 };
 
 static const struct intel_device_info intel_845g_info = {
-	.gen = 2, .num_pipes = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+	GEN2_FEATURES,
+	.is_i85x = 1, .is_mobile = 1,
+	.num_pipes = 2, /* legal, last one wins */
 	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-	.gen = 2, .num_pipes = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
 };
 
+#define GEN3_FEATURES \
+	.gen = 3, .num_pipes = 2, \
+	.has_gmch_display = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i915g_info = {
-	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_i915g = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-	.gen = 3, .is_mobile = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_mobile = 1,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_i945gm = 1, .is_mobile = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
+#define GEN4_FEATURES \
+	.gen = 4, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_gmch_display = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i965g_info = {
-	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
-	.has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_broadwater = 1,
 	.has_overlay = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-	.gen = 4, .is_crestline = 1, .num_pipes = 2,
-	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_crestline = 1,
+	.is_mobile = 1, .has_fbc = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-	.gen = 3, .is_g33 = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	GEN3_FEATURES,
+	.is_g33 = 1,
+	.has_hotplug = 1,
 	.has_overlay = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_g45_info = {
-	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_g4x = 1,
+	.has_pipe_cxsr = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-	.gen = 4, .is_g4x = 1, .num_pipes = 2,
-	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_g4x = 1,
+	.is_mobile = 1, .has_fbc = 1,
+	.has_pipe_cxsr = 1,
 	.supports_tv = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	GEN3_FEATURES,
+	.is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+	.has_hotplug = 1,
 	.has_overlay = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
+#define GEN5_FEATURES \
+	.gen = 5, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_gmbus_irq = 1, \
+	.ring_mask = RENDER_RING | BSD_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_ironlake_d_info = {
-	.gen = 5, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN5_FEATURES,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-	.gen = 5, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN5_FEATURES,
+	.is_mobile = 1,
 };
 
+#define GEN6_FEATURES \
+	.gen = 6, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_fbc = 1, \
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1, \
+	.has_rc6 = 1, \
+	.has_rc6p = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_sandybridge_d_info = {
-	.gen = 6, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-	.has_llc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN6_FEATURES,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-	.gen = 6, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-	.has_llc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN6_FEATURES,
+	.is_mobile = 1,
 };
 
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
-	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_hotplug = 1, \
 	.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_llc = 1, \
+	.has_rc6 = 1, \
+	.has_rc6p = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	IVB_CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
+	.has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
+	.has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.num_pipes = 0, /* legal, last one wins */
+	.has_l3_dpf = 1,
 };
 
 #define VLV_FEATURES  \
 	.gen = 7, .num_pipes = 2, \
-	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_psr = 1, \
+	.has_runtime_pm = 1, \
+	.has_rc6 = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
+	.has_gmch_display = 1, \
+	.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.display_mmio_offset = VLV_DISPLAY_BASE, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	CURSOR_OFFSETS
 
-static const struct intel_device_info intel_valleyview_m_info = {
-	VLV_FEATURES,
-	.is_valleyview = 1,
-	.is_mobile = 1,
-};
-
-static const struct intel_device_info intel_valleyview_d_info = {
+static const struct intel_device_info intel_valleyview_info = {
 	VLV_FEATURES,
 	.is_valleyview = 1,
 };
@@ -263,54 +272,50 @@
 	GEN7_FEATURES, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
 	.has_ddi = 1, \
-	.has_fpga_dbg = 1
+	.has_fpga_dbg = 1, \
+	.has_psr = 1, \
+	.has_resource_streamer = 1, \
+	.has_dp_mst = 1, \
+	.has_rc6p = 0 /* RC6p removed-by HSW */, \
+	.has_runtime_pm = 1
 
-static const struct intel_device_info intel_haswell_d_info = {
+static const struct intel_device_info intel_haswell_info = {
 	HSW_FEATURES,
 	.is_haswell = 1,
-};
-
-static const struct intel_device_info intel_haswell_m_info = {
-	HSW_FEATURES,
-	.is_haswell = 1,
-	.is_mobile = 1,
+	.has_l3_dpf = 1,
 };
 
 #define BDW_FEATURES \
 	HSW_FEATURES, \
-	BDW_COLORS
+	BDW_COLORS, \
+	.has_logical_ring_contexts = 1
 
-static const struct intel_device_info intel_broadwell_d_info = {
+static const struct intel_device_info intel_broadwell_info = {
 	BDW_FEATURES,
 	.gen = 8,
 	.is_broadwell = 1,
 };
 
-static const struct intel_device_info intel_broadwell_m_info = {
-	BDW_FEATURES,
-	.gen = 8, .is_mobile = 1,
-	.is_broadwell = 1,
-};
-
-static const struct intel_device_info intel_broadwell_gt3d_info = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
 	BDW_FEATURES,
 	.gen = 8,
 	.is_broadwell = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-	BDW_FEATURES,
-	.gen = 8, .is_mobile = 1,
-	.is_broadwell = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
-
 static const struct intel_device_info intel_cherryview_info = {
 	.gen = 8, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.is_cherryview = 1,
+	.has_psr = 1,
+	.has_runtime_pm = 1,
+	.has_resource_streamer = 1,
+	.has_rc6 = 1,
+	.has_gmbus_irq = 1,
+	.has_hw_contexts = 1,
+	.has_logical_ring_contexts = 1,
+	.has_gmch_display = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
 	GEN_CHV_PIPEOFFSETS,
 	CURSOR_OFFSETS,
@@ -321,25 +326,41 @@
 	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
 	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broxton_info = {
 	.is_broxton = 1,
 	.gen = 9,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.num_pipes = 3,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
+	.has_runtime_pm = 1,
 	.has_pooled_eu = 0,
+	.has_csr = 1,
+	.has_resource_streamer = 1,
+	.has_rc6 = 1,
+	.has_dp_mst = 1,
+	.has_gmbus_irq = 1,
+	.has_hw_contexts = 1,
+	.has_logical_ring_contexts = 1,
+	.has_guc = 1,
+	.ddb_size = 512,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
 	BDW_COLORS,
@@ -349,12 +370,18 @@
 	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
 	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
@@ -386,14 +413,10 @@
 	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
 	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
 	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-	INTEL_HSW_D_IDS(&intel_haswell_d_info),
-	INTEL_HSW_M_IDS(&intel_haswell_m_info),
-	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+	INTEL_HSW_IDS(&intel_haswell_info),
+	INTEL_VLV_IDS(&intel_valleyview_info),
+	INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+	INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
 	INTEL_CHV_IDS(&intel_cherryview_info),
 	INTEL_SKL_GT1_IDS(&intel_skylake_info),
 	INTEL_SKL_GT2_IDS(&intel_skylake_info),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bf2cad3..70d9616 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,13 +186,13 @@
 #define  GEN9_GRDOM_GUC			(1 << 5)
 #define  GEN8_GRDOM_MEDIA2		(1 << 7)
 
-#define RING_PP_DIR_BASE(ring)		_MMIO((ring)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(ring)	_MMIO((ring)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(ring)		_MMIO((ring)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine)	_MMIO((engine)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(engine)	_MMIO((engine)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(engine)	_MMIO((engine)->mmio_base+0x220)
 #define   PP_DIR_DCLV_2G		0xffffffff
 
-#define GEN8_RING_PDP_UDW(ring, n)	_MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(ring, n)	_MMIO((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n)	_MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n)	_MMIO((engine)->mmio_base+0x270 + (n) * 8)
 
 #define GEN8_R_PWR_CLK_STATE		_MMIO(0x20C8)
 #define   GEN8_RPCS_ENABLE		(1 << 31)
@@ -1648,7 +1648,7 @@
 #define   ARB_MODE_BWGTLB_DISABLE (1<<9)
 #define   ARB_MODE_SWIZZLE_BDW	(1<<1)
 #define RENDER_HWS_PGA_GEN7	_MMIO(0x04080)
-#define RING_FAULT_REG(ring)	_MMIO(0x4094 + 0x100*(ring)->id)
+#define RING_FAULT_REG(engine)	_MMIO(0x4094 + 0x100*(engine)->hw_id)
 #define   RING_FAULT_GTTSEL_MASK (1<<11)
 #define   RING_FAULT_SRCID(x)	(((x) >> 3) & 0xff)
 #define   RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@@ -1846,7 +1846,7 @@
 
 #define GFX_MODE	_MMIO(0x2520)
 #define GFX_MODE_GEN7	_MMIO(0x229c)
-#define RING_MODE_GEN7(ring)	_MMIO((ring)->mmio_base+0x29c)
+#define RING_MODE_GEN7(engine)	_MMIO((engine)->mmio_base+0x29c)
 #define   GFX_RUN_LIST_ENABLE		(1<<15)
 #define   GFX_INTERRUPT_STEERING	(1<<14)
 #define   GFX_TLB_INVALIDATE_EXPLICIT	(1<<13)
@@ -3660,8 +3660,17 @@
 #define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
 
 /* Panel power sequencing */
-#define PP_STATUS	_MMIO(0x61200)
-#define   PP_ON		(1 << 31)
+#define PPS_BASE			0x61200
+#define VLV_PPS_BASE			(VLV_DISPLAY_BASE + PPS_BASE)
+#define PCH_PPS_BASE			0xC7200
+
+#define _MMIO_PPS(pps_idx, reg)		_MMIO(dev_priv->pps_mmio_base -	\
+					      PPS_BASE + (reg) +	\
+					      (pps_idx) * 0x100)
+
+#define _PP_STATUS			0x61200
+#define PP_STATUS(pps_idx)		_MMIO_PPS(pps_idx, _PP_STATUS)
+#define   PP_ON				(1 << 31)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -3669,14 +3678,14 @@
  * - pipe enabled
  * - LVDS/DVOB/DVOC on
  */
-#define   PP_READY		(1 << 30)
-#define   PP_SEQUENCE_NONE	(0 << 28)
-#define   PP_SEQUENCE_POWER_UP	(1 << 28)
-#define   PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define   PP_SEQUENCE_MASK	(3 << 28)
-#define   PP_SEQUENCE_SHIFT	28
-#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
-#define   PP_SEQUENCE_STATE_MASK 0x0000000f
+#define   PP_READY			(1 << 30)
+#define   PP_SEQUENCE_NONE		(0 << 28)
+#define   PP_SEQUENCE_POWER_UP		(1 << 28)
+#define   PP_SEQUENCE_POWER_DOWN	(2 << 28)
+#define   PP_SEQUENCE_MASK		(3 << 28)
+#define   PP_SEQUENCE_SHIFT		28
+#define   PP_CYCLE_DELAY_ACTIVE		(1 << 27)
+#define   PP_SEQUENCE_STATE_MASK	0x0000000f
 #define   PP_SEQUENCE_STATE_OFF_IDLE	(0x0 << 0)
 #define   PP_SEQUENCE_STATE_OFF_S0_1	(0x1 << 0)
 #define   PP_SEQUENCE_STATE_OFF_S0_2	(0x2 << 0)
@@ -3686,11 +3695,46 @@
 #define   PP_SEQUENCE_STATE_ON_S1_2	(0xa << 0)
 #define   PP_SEQUENCE_STATE_ON_S1_3	(0xb << 0)
 #define   PP_SEQUENCE_STATE_RESET	(0xf << 0)
-#define PP_CONTROL	_MMIO(0x61204)
-#define   POWER_TARGET_ON	(1 << 0)
-#define PP_ON_DELAYS	_MMIO(0x61208)
-#define PP_OFF_DELAYS	_MMIO(0x6120c)
-#define PP_DIVISOR	_MMIO(0x61210)
+
+#define _PP_CONTROL			0x61204
+#define PP_CONTROL(pps_idx)		_MMIO_PPS(pps_idx, _PP_CONTROL)
+#define  PANEL_UNLOCK_REGS		(0xabcd << 16)
+#define  PANEL_UNLOCK_MASK		(0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK	0x1f0
+#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
+#define  EDP_FORCE_VDD			(1 << 3)
+#define  EDP_BLC_ENABLE			(1 << 2)
+#define  PANEL_POWER_RESET		(1 << 1)
+#define  PANEL_POWER_OFF		(0 << 0)
+#define  PANEL_POWER_ON			(1 << 0)
+
+#define _PP_ON_DELAYS			0x61208
+#define PP_ON_DELAYS(pps_idx)		_MMIO_PPS(pps_idx, _PP_ON_DELAYS)
+#define  PANEL_PORT_SELECT_SHIFT	30
+#define  PANEL_PORT_SELECT_MASK		(3 << 30)
+#define  PANEL_PORT_SELECT_LVDS		(0 << 30)
+#define  PANEL_PORT_SELECT_DPA		(1 << 30)
+#define  PANEL_PORT_SELECT_DPC		(2 << 30)
+#define  PANEL_PORT_SELECT_DPD		(3 << 30)
+#define  PANEL_PORT_SELECT_VLV(port)	((port) << 30)
+#define  PANEL_POWER_UP_DELAY_MASK	0x1fff0000
+#define  PANEL_POWER_UP_DELAY_SHIFT	16
+#define  PANEL_LIGHT_ON_DELAY_MASK	0x1fff
+#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
+
+#define _PP_OFF_DELAYS			0x6120C
+#define PP_OFF_DELAYS(pps_idx)		_MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
+#define  PANEL_POWER_DOWN_DELAY_MASK	0x1fff0000
+#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define  PANEL_LIGHT_OFF_DELAY_MASK	0x1fff
+#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define _PP_DIVISOR			0x61210
+#define PP_DIVISOR(pps_idx)		_MMIO_PPS(pps_idx, _PP_DIVISOR)
+#define  PP_REFERENCE_DIVIDER_MASK	0xffffff00
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	0x1f
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
 /* Panel fitting */
 #define PFIT_CONTROL	_MMIO(dev_priv->info.display_mmio_offset + 0x61230)
@@ -6133,6 +6177,7 @@
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
 # define GEN9_RHWO_OPTIMIZATION_DISABLE		(1<<14)
 #define COMMON_SLICE_CHICKEN2			_MMIO(0x7014)
+# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
 # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE	(1<<0)
 
@@ -6749,77 +6794,6 @@
 #define PCH_LVDS	_MMIO(0xe1180)
 #define  LVDS_DETECTED	(1 << 1)
 
-/* vlv has 2 sets of panel control regs. */
-#define _PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
-#define _PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
-#define _PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
-#define  PANEL_PORT_SELECT_VLV(port)	((port) << 30)
-#define _PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
-#define _PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
-
-#define _PIPEB_PP_STATUS         (VLV_DISPLAY_BASE + 0x61300)
-#define _PIPEB_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61304)
-#define _PIPEB_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61308)
-#define _PIPEB_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6130c)
-#define _PIPEB_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61310)
-
-#define VLV_PIPE_PP_STATUS(pipe)	_MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
-#define VLV_PIPE_PP_CONTROL(pipe)	_MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
-#define VLV_PIPE_PP_ON_DELAYS(pipe)	_MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
-#define VLV_PIPE_PP_OFF_DELAYS(pipe)	_MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
-#define VLV_PIPE_PP_DIVISOR(pipe)	_MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
-
-#define _PCH_PP_STATUS		0xc7200
-#define _PCH_PP_CONTROL		0xc7204
-#define  PANEL_UNLOCK_REGS	(0xabcd << 16)
-#define  PANEL_UNLOCK_MASK	(0xffff << 16)
-#define  BXT_POWER_CYCLE_DELAY_MASK	(0x1f0)
-#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
-#define  EDP_FORCE_VDD		(1 << 3)
-#define  EDP_BLC_ENABLE		(1 << 2)
-#define  PANEL_POWER_RESET	(1 << 1)
-#define  PANEL_POWER_OFF	(0 << 0)
-#define  PANEL_POWER_ON		(1 << 0)
-#define _PCH_PP_ON_DELAYS	0xc7208
-#define  PANEL_PORT_SELECT_MASK	(3 << 30)
-#define  PANEL_PORT_SELECT_LVDS	(0 << 30)
-#define  PANEL_PORT_SELECT_DPA	(1 << 30)
-#define  PANEL_PORT_SELECT_DPC	(2 << 30)
-#define  PANEL_PORT_SELECT_DPD	(3 << 30)
-#define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
-#define  PANEL_POWER_UP_DELAY_SHIFT	16
-#define  PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
-#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
-
-#define _PCH_PP_OFF_DELAYS		0xc720c
-#define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
-#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
-#define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
-#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
-
-#define _PCH_PP_DIVISOR			0xc7210
-#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
-#define  PP_REFERENCE_DIVIDER_SHIFT	8
-#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
-#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
-
-#define PCH_PP_STATUS			_MMIO(_PCH_PP_STATUS)
-#define PCH_PP_CONTROL			_MMIO(_PCH_PP_CONTROL)
-#define PCH_PP_ON_DELAYS		_MMIO(_PCH_PP_ON_DELAYS)
-#define PCH_PP_OFF_DELAYS		_MMIO(_PCH_PP_OFF_DELAYS)
-#define PCH_PP_DIVISOR			_MMIO(_PCH_PP_DIVISOR)
-
-/* BXT PPS changes - 2nd set of PPS registers */
-#define _BXT_PP_STATUS2 	0xc7300
-#define _BXT_PP_CONTROL2 	0xc7304
-#define _BXT_PP_ON_DELAYS2	0xc7308
-#define _BXT_PP_OFF_DELAYS2	0xc730c
-
-#define BXT_PP_STATUS(n)	_MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
-#define BXT_PP_CONTROL(n)	_MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
-#define BXT_PP_ON_DELAYS(n)	_MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
-#define BXT_PP_OFF_DELAYS(n)	_MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
-
 #define _PCH_DP_B		0xe4100
 #define PCH_DP_B		_MMIO(_PCH_DP_B)
 #define _PCH_DPB_AUX_CH_CTL	0xe4110
@@ -6959,6 +6933,9 @@
 #define  ECOBUS					_MMIO(0xa180)
 #define    FORCEWAKE_MT_ENABLE			(1<<5)
 #define  VLV_SPAREG2H				_MMIO(0xA194)
+#define  GEN9_PWRGT_DOMAIN_STATUS		_MMIO(0xA2A0)
+#define   GEN9_PWRGT_MEDIA_STATUS_MASK		(1 << 0)
+#define   GEN9_PWRGT_RENDER_STATUS_MASK		(1 << 1)
 
 #define  GTFIFODBG				_MMIO(0x120000)
 #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV	(0x1f << 20)
@@ -7059,12 +7036,13 @@
 #define GEN6_RP_UP_THRESHOLD			_MMIO(0xA02C)
 #define GEN6_RP_DOWN_THRESHOLD			_MMIO(0xA030)
 #define GEN6_RP_CUR_UP_EI			_MMIO(0xA050)
-#define   GEN6_CURICONT_MASK			0xffffff
+#define   GEN6_RP_EI_MASK			0xffffff
+#define   GEN6_CURICONT_MASK			GEN6_RP_EI_MASK
 #define GEN6_RP_CUR_UP				_MMIO(0xA054)
-#define   GEN6_CURBSYTAVG_MASK			0xffffff
+#define   GEN6_CURBSYTAVG_MASK			GEN6_RP_EI_MASK
 #define GEN6_RP_PREV_UP				_MMIO(0xA058)
 #define GEN6_RP_CUR_DOWN_EI			_MMIO(0xA05C)
-#define   GEN6_CURIAVG_MASK			0xffffff
+#define   GEN6_CURIAVG_MASK			GEN6_RP_EI_MASK
 #define GEN6_RP_CUR_DOWN			_MMIO(0xA060)
 #define GEN6_RP_PREV_DOWN			_MMIO(0xA064)
 #define GEN6_RP_UP_EI				_MMIO(0xA068)
@@ -7089,7 +7067,7 @@
 #define VLV_RCEDATA				_MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD			_MMIO(0xA0C0)
 #define GEN6_PMINTRMSK				_MMIO(0xA168)
-#define   GEN8_PMINTR_REDIRECT_TO_NON_DISP	(1<<31)
+#define   GEN8_PMINTR_REDIRECT_TO_GUC		  (1<<31)
 #define GEN8_MISC_CTRL0				_MMIO(0xA180)
 #define VLV_PWRDWNUPCTL				_MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS		_MMIO(0xA0C4)
@@ -7499,6 +7477,7 @@
 #define _DDI_BUF_TRANS_A		0x64E00
 #define _DDI_BUF_TRANS_B		0x64E60
 #define DDI_BUF_TRANS_LO(port, i)	_MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define  DDI_BUF_BALANCE_LEG_ENABLE	(1 << 31)
 #define DDI_BUF_TRANS_HI(port, i)	_MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5cfe4c7..a0af170 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -37,25 +37,6 @@
 	if (INTEL_INFO(dev)->gen <= 4)
 		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
-	/* LVDS state */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
-	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
-		dev_priv->regfile.saveLVDS = I915_READ(LVDS);
-
-	/* Panel power sequencer */
-	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
-		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
-		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-	} else if (INTEL_INFO(dev)->gen <= 4) {
-		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
-		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-		dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
-	}
-
 	/* save FBC interval */
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
 		dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
@@ -64,33 +45,11 @@
 static void i915_restore_display(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	u32 mask = 0xffffffff;
 
 	/* Display arbitration */
 	if (INTEL_INFO(dev)->gen <= 4)
 		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
 
-	mask = ~LVDS_PORT_EN;
-
-	/* LVDS state */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
-	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
-		I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
-
-	/* Panel power sequencer */
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
-		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
-		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
-		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-	} else if (INTEL_INFO(dev)->gen <= 4) {
-		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
-		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
-		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
-		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-	}
-
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_fbc_global_disable(dev_priv);
 
@@ -104,6 +63,7 @@
 int i915_save_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int i;
 
 	mutex_lock(&dev->struct_mutex);
@@ -111,7 +71,7 @@
 	i915_save_display(dev);
 
 	if (IS_GEN4(dev))
-		pci_read_config_word(dev->pdev, GCDGMBUS,
+		pci_read_config_word(pdev, GCDGMBUS,
 				     &dev_priv->regfile.saveGCDGMBUS);
 
 	/* Cache mode state */
@@ -149,6 +109,7 @@
 int i915_restore_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int i;
 
 	mutex_lock(&dev->struct_mutex);
@@ -156,7 +117,7 @@
 	i915_gem_restore_fences(dev);
 
 	if (IS_GEN4(dev))
-		pci_write_config_word(dev->pdev, GCDGMBUS,
+		pci_write_config_word(pdev, GCDGMBUS,
 				      dev_priv->regfile.saveGCDGMBUS);
 	i915_restore_display(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
new file mode 100644
index 0000000..1e5cbc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -0,0 +1,362 @@
+/*
+ * (C) Copyright 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/slab.h>
+#include <linux/fence.h>
+#include <linux/reservation.h>
+
+#include "i915_sw_fence.h"
+
+static DEFINE_SPINLOCK(i915_sw_fence_lock);
+
+static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
+				  enum i915_sw_fence_notify state)
+{
+	i915_sw_fence_notify_t fn;
+
+	fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
+	return fn(fence, state);
+}
+
+static void i915_sw_fence_free(struct kref *kref)
+{
+	struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
+
+	WARN_ON(atomic_read(&fence->pending) > 0);
+
+	if (fence->flags & I915_SW_FENCE_MASK)
+		__i915_sw_fence_notify(fence, FENCE_FREE);
+	else
+		kfree(fence);
+}
+
+static void i915_sw_fence_put(struct i915_sw_fence *fence)
+{
+	kref_put(&fence->kref, i915_sw_fence_free);
+}
+
+static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
+{
+	kref_get(&fence->kref);
+	return fence;
+}
+
+static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+					struct list_head *continuation)
+{
+	wait_queue_head_t *x = &fence->wait;
+	wait_queue_t *pos, *next;
+	unsigned long flags;
+
+	atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
+
+	/*
+	 * To prevent unbounded recursion as we traverse the graph of
+	 * i915_sw_fences, we move the task_list from this, the next ready
+	 * fence, to the tail of the original fence's task_list
+	 * (and so added to the list to be woken).
+	 */
+
+	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
+	if (continuation) {
+		list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+			if (pos->func == autoremove_wake_function)
+				pos->func(pos, TASK_NORMAL, 0, continuation);
+			else
+				list_move_tail(&pos->task_list, continuation);
+		}
+	} else {
+		LIST_HEAD(extra);
+
+		do {
+			list_for_each_entry_safe(pos, next,
+						 &x->task_list, task_list)
+				pos->func(pos, TASK_NORMAL, 0, &extra);
+
+			if (list_empty(&extra))
+				break;
+
+			list_splice_tail_init(&extra, &x->task_list);
+		} while (1);
+	}
+	spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
+				     struct list_head *continuation)
+{
+	if (!atomic_dec_and_test(&fence->pending))
+		return;
+
+	if (fence->flags & I915_SW_FENCE_MASK &&
+	    __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+		return;
+
+	__i915_sw_fence_wake_up_all(fence, continuation);
+}
+
+static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+{
+	if (WARN_ON(i915_sw_fence_done(fence)))
+		return;
+
+	__i915_sw_fence_complete(fence, NULL);
+}
+
+static void i915_sw_fence_await(struct i915_sw_fence *fence)
+{
+	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+}
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+{
+	BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+
+	init_waitqueue_head(&fence->wait);
+	kref_init(&fence->kref);
+	atomic_set(&fence->pending, 1);
+	fence->flags = (unsigned long)fn;
+}
+
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+	i915_sw_fence_complete(fence);
+	i915_sw_fence_put(fence);
+}
+
+static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+{
+	list_del(&wq->task_list);
+	__i915_sw_fence_complete(wq->private, key);
+	i915_sw_fence_put(wq->private);
+	return 0;
+}
+
+static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+				    const struct i915_sw_fence * const signaler)
+{
+	wait_queue_t *wq;
+
+	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+		return false;
+
+	if (fence == signaler)
+		return true;
+
+	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+		if (wq->func != i915_sw_fence_wake)
+			continue;
+
+		if (__i915_sw_fence_check_if_after(wq->private, signaler))
+			return true;
+	}
+
+	return false;
+}
+
+static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
+{
+	wait_queue_t *wq;
+
+	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+		return;
+
+	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+		if (wq->func != i915_sw_fence_wake)
+			continue;
+
+		__i915_sw_fence_clear_checked_bit(wq->private);
+	}
+}
+
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+				  const struct i915_sw_fence * const signaler)
+{
+	unsigned long flags;
+	bool err;
+
+	if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+		return false;
+
+	spin_lock_irqsave(&i915_sw_fence_lock, flags);
+	err = __i915_sw_fence_check_if_after(fence, signaler);
+	__i915_sw_fence_clear_checked_bit(fence);
+	spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
+
+	return err;
+}
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+				 struct i915_sw_fence *signaler,
+				 wait_queue_t *wq)
+{
+	unsigned long flags;
+	int pending;
+
+	if (i915_sw_fence_done(signaler))
+		return 0;
+
+	/* The dependency graph must be acyclic. */
+	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
+		return -EINVAL;
+
+	INIT_LIST_HEAD(&wq->task_list);
+	wq->flags = 0;
+	wq->func = i915_sw_fence_wake;
+	wq->private = i915_sw_fence_get(fence);
+
+	i915_sw_fence_await(fence);
+
+	spin_lock_irqsave(&signaler->wait.lock, flags);
+	if (likely(!i915_sw_fence_done(signaler))) {
+		__add_wait_queue_tail(&signaler->wait, wq);
+		pending = 1;
+	} else {
+		i915_sw_fence_wake(wq, 0, 0, NULL);
+		pending = 0;
+	}
+	spin_unlock_irqrestore(&signaler->wait.lock, flags);
+
+	return pending;
+}
+
+struct dma_fence_cb {
+	struct fence_cb base;
+	struct i915_sw_fence *fence;
+	struct fence *dma;
+	struct timer_list timer;
+};
+
+static void timer_i915_sw_fence_wake(unsigned long data)
+{
+	struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+
+	printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
+	       cb->dma->ops->get_driver_name(cb->dma),
+	       cb->dma->ops->get_timeline_name(cb->dma),
+	       cb->dma->seqno);
+	fence_put(cb->dma);
+	cb->dma = NULL;
+
+	i915_sw_fence_commit(cb->fence);
+	cb->timer.function = NULL;
+}
+
+static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+{
+	struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+	del_timer_sync(&cb->timer);
+	if (cb->timer.function)
+		i915_sw_fence_commit(cb->fence);
+	fence_put(cb->dma);
+
+	kfree(cb);
+}
+
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				  struct fence *dma,
+				  unsigned long timeout,
+				  gfp_t gfp)
+{
+	struct dma_fence_cb *cb;
+	int ret;
+
+	if (fence_is_signaled(dma))
+		return 0;
+
+	cb = kmalloc(sizeof(*cb), gfp);
+	if (!cb) {
+		if (!gfpflags_allow_blocking(gfp))
+			return -ENOMEM;
+
+		return fence_wait(dma, false);
+	}
+
+	cb->fence = i915_sw_fence_get(fence);
+	i915_sw_fence_await(fence);
+
+	cb->dma = NULL;
+	__setup_timer(&cb->timer,
+		      timer_i915_sw_fence_wake, (unsigned long)cb,
+		      TIMER_IRQSAFE);
+	if (timeout) {
+		cb->dma = fence_get(dma);
+		mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+	}
+
+	ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+	if (ret == 0) {
+		ret = 1;
+	} else {
+		dma_i915_sw_fence_wake(dma, &cb->base);
+		if (ret == -ENOENT) /* fence already signaled */
+			ret = 0;
+	}
+
+	return ret;
+}
+
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+				    struct reservation_object *resv,
+				    const struct fence_ops *exclude,
+				    bool write,
+				    unsigned long timeout,
+				    gfp_t gfp)
+{
+	struct fence *excl;
+	int ret = 0, pending;
+
+	if (write) {
+		struct fence **shared;
+		unsigned int count, i;
+
+		ret = reservation_object_get_fences_rcu(resv,
+							&excl, &count, &shared);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < count; i++) {
+			if (shared[i]->ops == exclude)
+				continue;
+
+			pending = i915_sw_fence_await_dma_fence(fence,
+								shared[i],
+								timeout,
+								gfp);
+			if (pending < 0) {
+				ret = pending;
+				break;
+			}
+
+			ret |= pending;
+		}
+
+		for (i = 0; i < count; i++)
+			fence_put(shared[i]);
+		kfree(shared);
+	} else {
+		excl = reservation_object_get_excl_rcu(resv);
+	}
+
+	if (ret >= 0 && excl && excl->ops != exclude) {
+		pending = i915_sw_fence_await_dma_fence(fence,
+							excl,
+							timeout,
+							gfp);
+		if (pending < 0)
+			ret = pending;
+		else
+			ret |= pending;
+	}
+
+	fence_put(excl);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
new file mode 100644
index 0000000..3731416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -0,0 +1,65 @@
+/*
+ * i915_sw_fence.h - library routines for N:M synchronisation points
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _I915_SW_FENCE_H_
+#define _I915_SW_FENCE_H_
+
+#include <linux/gfp.h>
+#include <linux/kref.h>
+#include <linux/notifier.h> /* for NOTIFY_DONE */
+#include <linux/wait.h>
+
+struct completion;
+struct fence;
+struct fence_ops;
+struct reservation_object;
+
+struct i915_sw_fence {
+	wait_queue_head_t wait;
+	unsigned long flags;
+	struct kref kref;
+	atomic_t pending;
+};
+
+#define I915_SW_FENCE_CHECKED_BIT	0 /* used internally for DAG checking */
+#define I915_SW_FENCE_PRIVATE_BIT	1 /* available for use by owner */
+#define I915_SW_FENCE_MASK		(~3)
+
+enum i915_sw_fence_notify {
+	FENCE_COMPLETE,
+	FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+				      enum i915_sw_fence_notify state);
+#define __i915_sw_fence_call __aligned(4)
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void i915_sw_fence_commit(struct i915_sw_fence *fence);
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+				 struct i915_sw_fence *after,
+				 wait_queue_t *wq);
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				  struct fence *dma,
+				  unsigned long timeout,
+				  gfp_t gfp);
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+				    struct reservation_object *resv,
+				    const struct fence_ops *exclude,
+				    bool write,
+				    unsigned long timeout,
+				    gfp_t gfp);
+
+static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
+{
+	return atomic_read(&fence->pending) < 0;
+}
+
+#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d61829e..1012eee 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,13 +32,16 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-#define dev_to_drm_minor(d) dev_get_drvdata((d))
+static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+{
+	struct drm_minor *minor = dev_get_drvdata(kdev);
+	return to_i915(minor->dev);
+}
 
 #ifdef CONFIG_PM
-static u32 calc_residency(struct drm_device *dev,
+static u32 calc_residency(struct drm_i915_private *dev_priv,
 			  i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u64 raw_time; /* 32b value may overflow during fixed point math */
 	u64 units = 128ULL, div = 100000ULL;
 	u32 ret;
@@ -49,13 +52,13 @@
 	intel_runtime_pm_get(dev_priv);
 
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		units = 1;
 		div = dev_priv->czclk_freq;
 
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		units = 1;
 		div = 1200;		/* 833.33ns */
 	}
@@ -76,32 +79,32 @@
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_get_drvdata(kdev);
-	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_to_drm_minor(kdev);
-	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_to_drm_minor(kdev);
-	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
 static ssize_t
 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_get_drvdata(kdev);
-	u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
@@ -144,9 +147,9 @@
 };
 #endif
 
-static int l3_access_valid(struct drm_device *dev, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
 {
-	if (!HAS_L3_DPF(dev))
+	if (!HAS_L3_DPF(dev_priv))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -163,22 +166,21 @@
 	     struct bin_attribute *attr, char *buf,
 	     loff_t offset, size_t count)
 {
-	struct device *dev = kobj_to_dev(kobj);
-	struct drm_minor *dminor = dev_to_drm_minor(dev);
-	struct drm_device *drm_dev = dminor->dev;
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct device *kdev = kobj_to_dev(kobj);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
 	count = round_down(count, 4);
 
-	ret = l3_access_valid(drm_dev, offset);
+	ret = l3_access_valid(dev_priv, offset);
 	if (ret)
 		return ret;
 
 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 
-	ret = i915_mutex_lock_interruptible(drm_dev);
+	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
@@ -189,7 +191,7 @@
 	else
 		memset(buf, 0, count);
 
-	mutex_unlock(&drm_dev->struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	return count;
 }
@@ -199,30 +201,29 @@
 	      struct bin_attribute *attr, char *buf,
 	      loff_t offset, size_t count)
 {
-	struct device *dev = kobj_to_dev(kobj);
-	struct drm_minor *dminor = dev_to_drm_minor(dev);
-	struct drm_device *drm_dev = dminor->dev;
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct device *kdev = kobj_to_dev(kobj);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_gem_context *ctx;
 	u32 *temp = NULL; /* Just here to make handling failures easy */
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
-	if (!HAS_HW_CONTEXTS(drm_dev))
+	if (!HAS_HW_CONTEXTS(dev_priv))
 		return -ENXIO;
 
-	ret = l3_access_valid(drm_dev, offset);
+	ret = l3_access_valid(dev_priv, offset);
 	if (ret)
 		return ret;
 
-	ret = i915_mutex_lock_interruptible(drm_dev);
+	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
 	if (!dev_priv->l3_parity.remap_info[slice]) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
-			mutex_unlock(&drm_dev->struct_mutex);
+			mutex_unlock(&dev->struct_mutex);
 			return -ENOMEM;
 		}
 	}
@@ -240,7 +241,7 @@
 	list_for_each_entry(ctx, &dev_priv->context_list, link)
 		ctx->remap_slice |= (1<<slice);
 
-	mutex_unlock(&drm_dev->struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	return count;
 }
@@ -266,13 +267,9 @@
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	int ret;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 	intel_runtime_pm_get(dev_priv);
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -300,59 +297,27 @@
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	intel_runtime_pm_get(dev_priv);
-
-	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-
-	intel_runtime_pm_put(dev_priv);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_gpu_freq(dev_priv,
+				       dev_priv->rps.cur_freq));
 }
 
-static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
-				     struct device_attribute *attr, char *buf)
+static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-	return snprintf(buf, PAGE_SIZE,
-			"%d\n",
-			intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_gpu_freq(dev_priv,
+				       dev_priv->rps.boost_freq));
 }
 
-static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
-
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
-}
-
-static ssize_t gt_max_freq_mhz_store(struct device *kdev,
-				     struct device_attribute *attr,
-				     const char *buf, size_t count)
-{
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 	ssize_t ret;
 
@@ -360,7 +325,48 @@
 	if (ret)
 		return ret;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+	/* Validate against (static) hardware limits */
+	val = intel_freq_opcode(dev_priv, val);
+	if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+		return -EINVAL;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	dev_priv->rps.boost_freq = val;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return count;
+}
+
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_gpu_freq(dev_priv,
+				       dev_priv->rps.efficient_freq));
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_gpu_freq(dev_priv,
+				       dev_priv->rps.max_freq_softlimit));
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 val;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
 
 	intel_runtime_pm_get(dev_priv);
 
@@ -400,27 +406,18 @@
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			intel_gpu_freq(dev_priv,
+				       dev_priv->rps.min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 	ssize_t ret;
 
@@ -428,8 +425,6 @@
 	if (ret)
 		return ret;
 
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 	intel_runtime_pm_get(dev_priv);
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -465,6 +460,7 @@
 
 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
@@ -478,9 +474,7 @@
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz)
@@ -498,6 +492,7 @@
 static const struct attribute *gen6_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_boost_freq_mhz.attr,
 	&dev_attr_gt_max_freq_mhz.attr,
 	&dev_attr_gt_min_freq_mhz.attr,
 	&dev_attr_gt_RP0_freq_mhz.attr,
@@ -509,6 +504,7 @@
 static const struct attribute *vlv_attrs[] = {
 	&dev_attr_gt_act_freq_mhz.attr,
 	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_boost_freq_mhz.attr,
 	&dev_attr_gt_max_freq_mhz.attr,
 	&dev_attr_gt_min_freq_mhz.attr,
 	&dev_attr_gt_RP0_freq_mhz.attr,
@@ -524,8 +520,8 @@
 {
 
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_error_state_file_priv error_priv;
 	struct drm_i915_error_state_buf error_str;
 	ssize_t ret_count = 0;
@@ -559,18 +555,10 @@
 				 loff_t off, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	int ret;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	i915_destroy_error_state(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_destroy_error_state(&dev_priv->drm);
 
 	return count;
 }
@@ -583,37 +571,38 @@
 	.write = error_state_write,
 };
 
-void i915_setup_sysfs(struct drm_device *dev)
+void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
+	struct device *kdev = dev_priv->drm.primary->kdev;
 	int ret;
 
 #ifdef CONFIG_PM
-	if (HAS_RC6(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (HAS_RC6(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
-	if (HAS_RC6p(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (HAS_RC6p(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6p_attr_group);
 		if (ret)
 			DRM_ERROR("RC6p residency sysfs setup failed\n");
 	}
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&media_rc6_attr_group);
 		if (ret)
 			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
 	}
 #endif
-	if (HAS_L3_DPF(dev)) {
-		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+	if (HAS_L3_DPF(dev_priv)) {
+		ret = device_create_bin_file(kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 
-		if (NUM_L3_SLICES(dev) > 1) {
-			ret = device_create_bin_file(dev->primary->kdev,
+		if (NUM_L3_SLICES(dev_priv) > 1) {
+			ret = device_create_bin_file(kdev,
 						     &dpf_attrs_1);
 			if (ret)
 				DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -621,30 +610,32 @@
 	}
 
 	ret = 0;
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
-	else if (INTEL_INFO(dev)->gen >= 6)
-		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
-	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+	ret = sysfs_create_bin_file(&kdev->kobj,
 				    &error_state_attr);
 	if (ret)
 		DRM_ERROR("error_state sysfs setup failed\n");
 }
 
-void i915_teardown_sysfs(struct drm_device *dev)
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 {
-	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+	struct device *kdev = dev_priv->drm.primary->kdev;
+
+	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		sysfs_remove_files(&kdev->kobj, vlv_attrs);
 	else
-		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
-	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
-	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
+		sysfs_remove_files(&kdev->kobj, gen6_attrs);
+	device_remove_bin_file(kdev,  &dpf_attrs_1);
+	device_remove_bin_file(kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
-	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+	sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 #endif
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 534154e..1787980 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -394,25 +394,27 @@
 );
 
 TRACE_EVENT(i915_gem_evict,
-	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
-	    TP_ARGS(dev, size, align, flags),
+	    TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+	    TP_ARGS(vm, size, align, flags),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(struct i915_address_space *, vm)
 			     __field(u32, size)
 			     __field(u32, align)
-			     __field(unsigned, flags)
+			     __field(unsigned int, flags)
 			    ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = vm->dev->primary->index;
+			   __entry->vm = vm;
 			   __entry->size = size;
 			   __entry->align = align;
 			   __entry->flags = flags;
 			  ),
 
-	    TP_printk("dev=%d, size=%d, align=%d %s",
-		      __entry->dev, __entry->size, __entry->align,
+	    TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+		      __entry->dev, __entry->vm, __entry->size, __entry->align,
 		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
@@ -449,10 +451,9 @@
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct drm_i915_gem_request *to_req,
-		     struct intel_engine_cs *from,
-		     struct drm_i915_gem_request *req),
-	    TP_ARGS(to_req, from, req),
+	    TP_PROTO(struct drm_i915_gem_request *to,
+		     struct drm_i915_gem_request *from),
+	    TP_ARGS(to, from),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -463,9 +464,9 @@
 
 	    TP_fast_assign(
 			   __entry->dev = from->i915->drm.primary->index;
-			   __entry->sync_from = from->id;
-			   __entry->sync_to = to_req->engine->id;
-			   __entry->seqno = i915_gem_request_get_seqno(req);
+			   __entry->sync_from = from->engine->id;
+			   __entry->sync_to = to->engine->id;
+			   __entry->seqno = from->fence.seqno;
 			   ),
 
 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -488,9 +489,9 @@
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->seqno;
+			   __entry->seqno = req->fence.seqno;
 			   __entry->flags = flags;
-			   intel_engine_enable_signaling(req);
+			   fence_enable_sw_signaling(&req->fence);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -533,7 +534,7 @@
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->seqno;
+			   __entry->seqno = req->fence.seqno;
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -595,7 +596,7 @@
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->seqno;
+			   __entry->seqno = req->fence.seqno;
 			   __entry->blocking =
 				     mutex_is_locked(&req->i915->drm.struct_mutex);
 			   ),
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index b81cfb3..dae340c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -94,6 +94,7 @@
 
 /**
  * intel_vgt_deballoon - deballoon reserved graphics address trunks
+ * @dev_priv: i915 device private data
  *
  * This function is called to deallocate the ballooned-out graphic memory, when
  * driver is unloaded or when ballooning fails.
@@ -135,7 +136,7 @@
 
 /**
  * intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev: drm device
+ * @dev_priv: i915 device private data
  *
  * This function is called at the initialization stage, to balloon out the
  * graphic address space allocated to other vGPUs, by marking these spaces as
@@ -152,27 +153,27 @@
  * host point of view, the graphic address space is partitioned by multiple
  * vGPUs in different VMs. ::
  *
- *                        vGPU1 view         Host view
- *             0 ------> +-----------+     +-----------+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *        mappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               v       |###########|     |   Host    |
- *               +=======+===========+     +===========+
- *               ^       |###########|     |   vGPU3   |
- *               |       |###########|     +-----------+
- *               |       |###########|     |   vGPU2   |
- *               |       +-----------+     +-----------+
- *      unmappable GM    | available | ==> |   vGPU1   |
- *               |       +-----------+     +-----------+
- *               |       |###########|     |           |
- *               |       |###########|     |   Host    |
- *               v       |###########|     |           |
- * total GM size ------> +-----------+     +-----------+
+ *                         vGPU1 view         Host view
+ *              0 ------> +-----------+     +-----------+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *         mappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                v       |###########|     |   Host    |
+ *                +=======+===========+     +===========+
+ *                ^       |###########|     |   vGPU3   |
+ *                |       |###########|     +-----------+
+ *                |       |###########|     |   vGPU2   |
+ *                |       +-----------+     +-----------+
+ *       unmappable GM    | available | ==> |   vGPU1   |
+ *                |       +-----------+     +-----------+
+ *                |       |###########|     |           |
+ *                |       |###########|     |   Host    |
+ *                v       |###########|     |           |
+ *  total GM size ------> +-----------+     +-----------+
  *
  * Returns:
  * zero on success, non-zero if configuration invalid or ballooning failed
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 7de7721..b82de30 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -55,7 +55,7 @@
 		return NULL;
 
 	state->base.plane = plane;
-	state->base.rotation = BIT(DRM_ROTATE_0);
+	state->base.rotation = DRM_ROTATE_0;
 	state->ckey.flags = I915_SET_COLORKEY_NONE;
 
 	return state;
@@ -134,20 +134,6 @@
 
 	crtc_state = to_intel_crtc_state(drm_crtc_state);
 
-	/*
-	 * The original src/dest coordinates are stored in state->base, but
-	 * we want to keep another copy internal to our driver that we can
-	 * clip/modify ourselves.
-	 */
-	intel_state->src.x1 = state->src_x;
-	intel_state->src.y1 = state->src_y;
-	intel_state->src.x2 = state->src_x + state->src_w;
-	intel_state->src.y2 = state->src_y + state->src_h;
-	intel_state->dst.x1 = state->crtc_x;
-	intel_state->dst.y1 = state->crtc_y;
-	intel_state->dst.x2 = state->crtc_x + state->crtc_w;
-	intel_state->dst.y2 = state->crtc_y + state->crtc_h;
-
 	/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
 	intel_state->clip.x1 = 0;
 	intel_state->clip.y1 = 0;
@@ -157,6 +143,7 @@
 		crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+		char *format_name;
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
 			DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
@@ -171,8 +158,9 @@
 		switch (state->fb->pixel_format) {
 		case DRM_FORMAT_C8:
 		case DRM_FORMAT_RGB565:
-			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
-					drm_get_format_name(state->fb->pixel_format));
+			format_name = drm_get_format_name(state->fb->pixel_format);
+			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 
 		default:
@@ -180,7 +168,7 @@
 		}
 	}
 
-	intel_state->visible = false;
+	intel_state->base.visible = false;
 	ret = intel_plane->check_plane(plane, crtc_state, intel_state);
 	if (ret)
 		return ret;
@@ -196,7 +184,7 @@
 		to_intel_plane_state(plane->state);
 	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
 
-	if (intel_state->visible)
+	if (intel_state->base.visible)
 		intel_plane->update_plane(plane,
 					  to_intel_crtc_state(crtc->state),
 					  intel_state);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d32f586..6c70a5b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -51,10 +51,10 @@
  * related registers. (The notable exception is the power management, not
  * covered here.)
  *
- * The struct i915_audio_component is used to interact between the graphics
- * and audio drivers. The struct i915_audio_component_ops *ops in it is
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
  * defined in graphics driver and called in audio driver. The
- * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
  */
 
 static const struct {
@@ -359,9 +359,7 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
-	enum port port = intel_dig_port->port;
+	enum port port = enc_to_dig_port(&encoder->base)->port;
 	enum pipe pipe = intel_crtc->pipe;
 	uint32_t tmp, eldv;
 	i915_reg_t aud_config, aud_cntrl_st2;
@@ -407,13 +405,10 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
-	enum port port = intel_dig_port->port;
+	enum port port = enc_to_dig_port(&encoder->base)->port;
 	enum pipe pipe = intel_crtc->pipe;
 	uint8_t *eld = connector->eld;
-	uint32_t eldv;
-	uint32_t tmp;
+	uint32_t tmp, eldv;
 	int len, i;
 	i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -581,26 +576,26 @@
 	}
 }
 
-static void i915_audio_component_get_power(struct device *dev)
+static void i915_audio_component_get_power(struct device *kdev)
 {
-	intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+	intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_put_power(struct device *dev)
+static void i915_audio_component_put_power(struct device *kdev)
 {
-	intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+	intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_codec_wake_override(struct device *dev,
+static void i915_audio_component_codec_wake_override(struct device *kdev,
 						     bool enable)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	u32 tmp;
 
 	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
 		return;
 
-	i915_audio_component_get_power(dev);
+	i915_audio_component_get_power(kdev);
 
 	/*
 	 * Enable/disable generating the codec wake signal, overriding the
@@ -618,13 +613,13 @@
 		usleep_range(1000, 1500);
 	}
 
-	i915_audio_component_put_power(dev);
+	i915_audio_component_put_power(kdev);
 }
 
 /* Get CDCLK in kHz  */
-static int i915_audio_component_get_cdclk_freq(struct device *dev)
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 
 	if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
 		return -ENODEV;
@@ -632,10 +627,10 @@
 	return dev_priv->cdclk_freq;
 }
 
-static int i915_audio_component_sync_audio_rate(struct device *dev,
+static int i915_audio_component_sync_audio_rate(struct device *kdev,
 						int port, int rate)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
 	struct intel_crtc *crtc;
 	struct drm_display_mode *mode;
@@ -652,7 +647,7 @@
 	    !IS_HASWELL(dev_priv))
 		return 0;
 
-	i915_audio_component_get_power(dev);
+	i915_audio_component_get_power(kdev);
 	mutex_lock(&dev_priv->av_mutex);
 	/* 1. get the pipe */
 	intel_encoder = dev_priv->dig_port_map[port];
@@ -703,15 +698,15 @@
 
  unlock:
 	mutex_unlock(&dev_priv->av_mutex);
-	i915_audio_component_put_power(dev);
+	i915_audio_component_put_power(kdev);
 	return err;
 }
 
-static int i915_audio_component_get_eld(struct device *dev, int port,
+static int i915_audio_component_get_eld(struct device *kdev, int port,
 					bool *enabled,
 					unsigned char *buf, int max_bytes)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
 	struct intel_digital_port *intel_dig_port;
 	const u8 *eld;
@@ -745,11 +740,11 @@
 	.get_eld	= i915_audio_component_get_eld,
 };
 
-static int i915_audio_component_bind(struct device *i915_dev,
-				     struct device *hda_dev, void *data)
+static int i915_audio_component_bind(struct device *i915_kdev,
+				     struct device *hda_kdev, void *data)
 {
 	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 	int i;
 
 	if (WARN_ON(acomp->ops || acomp->dev))
@@ -757,7 +752,7 @@
 
 	drm_modeset_lock_all(&dev_priv->drm);
 	acomp->ops = &i915_audio_component_ops;
-	acomp->dev = i915_dev;
+	acomp->dev = i915_kdev;
 	BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
 	for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
 		acomp->aud_sample_rate[i] = 0;
@@ -767,11 +762,11 @@
 	return 0;
 }
 
-static void i915_audio_component_unbind(struct device *i915_dev,
-					struct device *hda_dev, void *data)
+static void i915_audio_component_unbind(struct device *i915_kdev,
+					struct device *hda_kdev, void *data)
 {
 	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
 	drm_modeset_lock_all(&dev_priv->drm);
 	acomp->ops = NULL;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index b074f3d..495611b 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -26,6 +26,40 @@
 
 #include "i915_drv.h"
 
+static void intel_breadcrumbs_hangcheck(unsigned long data)
+{
+	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	if (!b->irq_enabled)
+		return;
+
+	if (time_before(jiffies, b->timeout)) {
+		mod_timer(&b->hangcheck, b->timeout);
+		return;
+	}
+
+	DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
+	set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+	mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+
+	/* Ensure that even if the GPU hangs, we get woken up.
+	 *
+	 * However, note that if no one is waiting, we never notice
+	 * a gpu hang. Eventually, we will have to wait for a resource
+	 * held by the GPU and so trigger a hangcheck. In the most
+	 * pathological case, this will be upon memory starvation! To
+	 * prevent this, we also queue the hangcheck from the retire
+	 * worker.
+	 */
+	i915_queue_hangcheck(engine->i915);
+}
+
+static unsigned long wait_timeout(void)
+{
+	return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+}
+
 static void intel_breadcrumbs_fake_irq(unsigned long data)
 {
 	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
@@ -37,10 +71,8 @@
 	 * every jiffie in order to kick the oldest waiter to do the
 	 * coherent seqno check.
 	 */
-	rcu_read_lock();
 	if (intel_engine_wakeup(engine))
 		mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-	rcu_read_unlock();
 }
 
 static void irq_enable(struct intel_engine_cs *engine)
@@ -91,17 +123,13 @@
 	}
 
 	if (!b->irq_enabled ||
-	    test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+	    test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
 		mod_timer(&b->fake_irq, jiffies + 1);
-
-	/* Ensure that even if the GPU hangs, we get woken up.
-	 *
-	 * However, note that if no one is waiting, we never notice
-	 * a gpu hang. Eventually, we will have to wait for a resource
-	 * held by the GPU and so trigger a hangcheck. In the most
-	 * pathological case, this will be upon memory starvation!
-	 */
-	i915_queue_hangcheck(i915);
+	} else {
+		/* Ensure we never sleep indefinitely */
+		GEM_BUG_ON(!time_after(b->timeout, jiffies));
+		mod_timer(&b->hangcheck, b->timeout);
+	}
 }
 
 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
@@ -204,7 +232,7 @@
 	}
 	rb_link_node(&wait->node, parent, p);
 	rb_insert_color(&wait->node, &b->waiters);
-	GEM_BUG_ON(!first && !b->irq_seqno_bh);
+	GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
 
 	if (completed) {
 		struct rb_node *next = rb_next(completed);
@@ -212,8 +240,9 @@
 		GEM_BUG_ON(!next && !first);
 		if (next && next != &wait->node) {
 			GEM_BUG_ON(first);
+			b->timeout = wait_timeout();
 			b->first_wait = to_wait(next);
-			smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+			rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
 			/* As there is a delay between reading the current
 			 * seqno, processing the completed tasks and selecting
 			 * the next waiter, we may have missed the interrupt
@@ -238,8 +267,9 @@
 
 	if (first) {
 		GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+		b->timeout = wait_timeout();
 		b->first_wait = wait;
-		smp_store_mb(b->irq_seqno_bh, wait->tsk);
+		rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
 		/* After assigning ourselves as the new bottom-half, we must
 		 * perform a cursory check to prevent a missed interrupt.
 		 * Either we miss the interrupt whilst programming the hardware,
@@ -250,7 +280,7 @@
 		 */
 		__intel_breadcrumbs_enable_irq(b);
 	}
-	GEM_BUG_ON(!b->irq_seqno_bh);
+	GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
 	GEM_BUG_ON(!b->first_wait);
 	GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
 
@@ -270,11 +300,6 @@
 	return first;
 }
 
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
-{
-	mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-}
-
 static inline bool chain_wakeup(struct rb_node *rb, int priority)
 {
 	return rb && to_wait(rb)->tsk->prio <= priority;
@@ -310,7 +335,7 @@
 		const int priority = wakeup_priority(b, wait->tsk);
 		struct rb_node *next;
 
-		GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+		GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
 
 		/* We are the current bottom-half. Find the next candidate,
 		 * the first waiter in the queue on the remaining oldest
@@ -352,14 +377,15 @@
 			 * the interrupt, or if we have to handle an
 			 * exception rather than a seqno completion.
 			 */
+			b->timeout = wait_timeout();
 			b->first_wait = to_wait(next);
-			smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+			rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
 			if (b->first_wait->seqno != wait->seqno)
 				__intel_breadcrumbs_enable_irq(b);
-			wake_up_process(b->irq_seqno_bh);
+			wake_up_process(b->first_wait->tsk);
 		} else {
 			b->first_wait = NULL;
-			WRITE_ONCE(b->irq_seqno_bh, NULL);
+			rcu_assign_pointer(b->irq_seqno_bh, NULL);
 			__intel_breadcrumbs_disable_irq(b);
 		}
 	} else {
@@ -373,7 +399,7 @@
 	GEM_BUG_ON(b->first_wait == wait);
 	GEM_BUG_ON(rb_first(&b->waiters) !=
 		   (b->first_wait ? &b->first_wait->node : NULL));
-	GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+	GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
 	spin_unlock(&b->lock);
 }
 
@@ -437,6 +463,10 @@
 			intel_engine_remove_wait(engine,
 						 &request->signaling.wait);
 
+			local_bh_disable();
+			fence_signal(&request->fence);
+			local_bh_enable(); /* kick start the tasklets */
+
 			/* Find the next oldest signal. Note that as we have
 			 * not been holding the lock, another client may
 			 * have installed an even older signal than the one
@@ -452,7 +482,7 @@
 			rb_erase(&request->signaling.node, &b->signals);
 			spin_unlock(&b->lock);
 
-			i915_gem_request_unreference(request);
+			i915_gem_request_put(request);
 		} else {
 			if (kthread_should_stop())
 				break;
@@ -472,18 +502,14 @@
 	struct rb_node *parent, **p;
 	bool first, wakeup;
 
-	if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
-		return;
-
-	spin_lock(&b->lock);
-	if (unlikely(request->signaling.wait.tsk)) {
-		wakeup = false;
-		goto unlock;
-	}
+	/* locked by fence_enable_sw_signaling() */
+	assert_spin_locked(&request->lock);
 
 	request->signaling.wait.tsk = b->signaler;
-	request->signaling.wait.seqno = request->seqno;
-	i915_gem_request_reference(request);
+	request->signaling.wait.seqno = request->fence.seqno;
+	i915_gem_request_get(request);
+
+	spin_lock(&b->lock);
 
 	/* First add ourselves into the list of waiters, but register our
 	 * bottom-half as the signaller thread. As per usual, only the oldest
@@ -504,8 +530,8 @@
 	p = &b->signals.rb_node;
 	while (*p) {
 		parent = *p;
-		if (i915_seqno_passed(request->seqno,
-				      to_signaler(parent)->seqno)) {
+		if (i915_seqno_passed(request->fence.seqno,
+				      to_signaler(parent)->fence.seqno)) {
 			p = &parent->rb_right;
 			first = false;
 		} else {
@@ -517,7 +543,6 @@
 	if (first)
 		smp_store_mb(b->first_signal, request);
 
-unlock:
 	spin_unlock(&b->lock);
 
 	if (wakeup)
@@ -533,6 +558,9 @@
 	setup_timer(&b->fake_irq,
 		    intel_breadcrumbs_fake_irq,
 		    (unsigned long)engine);
+	setup_timer(&b->hangcheck,
+		    intel_breadcrumbs_hangcheck,
+		    (unsigned long)engine);
 
 	/* Spawn a thread to provide a common bottom-half for all signals.
 	 * As this is an asynchronous interface we cannot steal the current
@@ -550,6 +578,36 @@
 	return 0;
 }
 
+static void cancel_fake_irq(struct intel_engine_cs *engine)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	del_timer_sync(&b->hangcheck);
+	del_timer_sync(&b->fake_irq);
+	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+}
+
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	cancel_fake_irq(engine);
+	spin_lock(&b->lock);
+
+	__intel_breadcrumbs_disable_irq(b);
+	if (intel_engine_has_waiter(engine)) {
+		b->timeout = wait_timeout();
+		__intel_breadcrumbs_enable_irq(b);
+		if (READ_ONCE(b->irq_posted))
+			wake_up_process(b->first_wait->tsk);
+	} else {
+		/* sanitize the IMR and unmask any auxiliary interrupts */
+		irq_disable(engine);
+	}
+
+	spin_unlock(&b->lock);
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
@@ -557,7 +615,7 @@
 	if (!IS_ERR_OR_NULL(b->signaler))
 		kthread_stop(b->signaler);
 
-	del_timer_sync(&b->fake_irq);
+	cancel_fake_irq(engine);
 }
 
 unsigned int intel_kick_waiters(struct drm_i915_private *i915)
@@ -570,11 +628,9 @@
 	 * RCU lock, i.e. as we call wake_up_process() we must be holding the
 	 * rcu_read_lock().
 	 */
-	rcu_read_lock();
 	for_each_engine(engine, i915)
 		if (unlikely(intel_engine_wakeup(engine)))
 			mask |= intel_engine_flag(engine);
-	rcu_read_unlock();
 
 	return mask;
 }
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index bc0fef3..95a7277 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -100,13 +100,14 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int i, pipe = intel_crtc->pipe;
 	uint16_t coeffs[9] = { 0, };
+	struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
 
 	if (crtc_state->ctm) {
 		struct drm_color_ctm *ctm =
 			(struct drm_color_ctm *)crtc_state->ctm->data;
 		uint64_t input[9] = { 0, };
 
-		if (intel_crtc->config->limited_color_range) {
+		if (intel_crtc_state->limited_color_range) {
 			ctm_mult_by_limited(input, ctm->matrix);
 		} else {
 			for (i = 0; i < ARRAY_SIZE(input); i++)
@@ -158,7 +159,7 @@
 		 * into consideration.
 		 */
 		for (i = 0; i < 3; i++) {
-			if (intel_crtc->config->limited_color_range)
+			if (intel_crtc_state->limited_color_range)
 				coeffs[i * 3 + i] =
 					I9XX_CSC_COEFF_LIMITED_RANGE;
 			else
@@ -182,7 +183,7 @@
 	if (INTEL_INFO(dev)->gen > 6) {
 		uint16_t postoff = 0;
 
-		if (intel_crtc->config->limited_color_range)
+		if (intel_crtc_state->limited_color_range)
 			postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
 		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -193,7 +194,7 @@
 	} else {
 		uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-		if (intel_crtc->config->limited_color_range)
+		if (intel_crtc_state->limited_color_range)
 			mode |= CSC_BLACK_SCREEN_OFFSET;
 
 		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -263,7 +264,8 @@
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
 static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-				    struct drm_property_blob *blob)
+				    struct drm_property_blob *blob,
+				    struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -272,7 +274,7 @@
 	int i;
 
 	if (HAS_GMCH_DISPLAY(dev)) {
-		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
+		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
 			assert_dsi_pll_enabled(dev_priv);
 		else
 			assert_pll_enabled(dev_priv, pipe);
@@ -305,7 +307,8 @@
 
 static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
 {
-	i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+	i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
+				to_intel_crtc_state(crtc_state));
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
@@ -323,7 +326,7 @@
 	 * Workaround : Do not read or write the pipe palette/gamma data while
 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
 	 */
-	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+	if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
 	    (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
 		hsw_disable_ips(intel_crtc);
 		reenable_ips = true;
@@ -436,7 +439,8 @@
 		/* Turn off degamma/gamma on CGM block. */
 		I915_WRITE(CGM_PIPE_MODE(pipe),
 			   (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-		i9xx_load_luts_internal(crtc, state->gamma_lut);
+		i9xx_load_luts_internal(crtc, state->gamma_lut,
+					to_intel_crtc_state(state));
 		return;
 	}
 
@@ -479,7 +483,7 @@
 	 * Also program a linear LUT in the legacy block (behind the
 	 * CGM block).
 	 */
-	i9xx_load_luts_internal(crtc, NULL);
+	i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
 }
 
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 827b6ef..dfbcf16 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,13 +143,15 @@
 
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
-static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+			       struct intel_crtc_state *crtc_state,
+			       int mode)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
 	u32 adpa;
 
 	if (INTEL_INFO(dev)->gen >= 5)
@@ -193,23 +195,45 @@
 	I915_WRITE(crt->adpa_reg, adpa);
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
+static void intel_disable_crt(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
-	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+	intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
 }
 
-static void pch_disable_crt(struct intel_encoder *encoder)
+static void pch_disable_crt(struct intel_encoder *encoder,
+			    struct intel_crtc_state *old_crtc_state,
+			    struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_crt(struct intel_encoder *encoder)
+static void pch_post_disable_crt(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
-	intel_disable_crt(encoder);
+	intel_disable_crt(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_crt(struct intel_encoder *encoder)
+static void hsw_post_disable_crt(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
-	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+	pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+
+	lpt_disable_pch_transcoder(dev_priv);
+	lpt_disable_iclkip(dev_priv);
+
+	intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
+{
+	intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
 }
 
 static enum drm_mode_status
@@ -253,7 +277,8 @@
 }
 
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 
@@ -894,6 +919,7 @@
 	if (HAS_DDI(dev)) {
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
+		crt->base.post_disable = hsw_post_disable_crt;
 	} else {
 		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index c3b33a1..1ea0e1f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -32,13 +32,6 @@
  * onwards to drive newly added DMC (Display microcontroller) in display
  * engine to save and restore the state of display engine when it enter into
  * low-power state and comes back to normal.
- *
- * Firmware loading status will be one of the below states: FW_UNINITIALIZED,
- * FW_LOADED, FW_FAILED.
- *
- * Once the firmware is written into the registers status will be moved from
- * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
- * be moved to FW_FAILED.
  */
 
 #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1a7efac..15d47c8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,45 +301,34 @@
 	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
 };
 
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
-				    u32 level, enum port port, int type);
-
-static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
-				 struct intel_digital_port **dig_port,
-				 enum port *port)
+enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
 {
-	struct drm_encoder *encoder = &intel_encoder->base;
-
-	switch (intel_encoder->type) {
+	switch (encoder->type) {
 	case INTEL_OUTPUT_DP_MST:
-		*dig_port = enc_to_mst(encoder)->primary;
-		*port = (*dig_port)->port;
-		break;
-	default:
-		WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
-		/* fallthrough and treat as unknown */
+		return enc_to_mst(&encoder->base)->primary->port;
 	case INTEL_OUTPUT_DP:
 	case INTEL_OUTPUT_EDP:
 	case INTEL_OUTPUT_HDMI:
 	case INTEL_OUTPUT_UNKNOWN:
-		*dig_port = enc_to_dig_port(encoder);
-		*port = (*dig_port)->port;
-		break;
+		return enc_to_dig_port(&encoder->base)->port;
 	case INTEL_OUTPUT_ANALOG:
-		*dig_port = NULL;
-		*port = PORT_E;
-		break;
+		return PORT_E;
+	default:
+		MISSING_CASE(encoder->type);
+		return PORT_A;
 	}
 }
 
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const struct ddi_buf_trans *
+bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	struct intel_digital_port *dig_port;
-	enum port port;
-
-	ddi_get_encoder_port(intel_encoder, &dig_port, &port);
-
-	return port;
+	if (dev_priv->vbt.edp.low_vswing) {
+		*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+		return bdw_ddi_translations_edp;
+	} else {
+		*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		return bdw_ddi_translations_dp;
+	}
 }
 
 static const struct ddi_buf_trans *
@@ -424,37 +413,22 @@
 
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
- * values in advance. The buffer values are different for FDI and DP modes,
- * but the HDMI/DVI fields are shared among those. So we program the DDI
- * in either FDI or DP modes only, as HDMI connections will work with both
- * of those
+ * values in advance. This function programs the correct values for
+ * DP/eDP/FDI use cases.
  */
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 iboost_bit = 0;
-	int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
-	    size;
-	int hdmi_level;
-	enum port port;
+	int i, n_dp_entries, n_edp_entries, size;
+	enum port port = intel_ddi_get_encoder_port(encoder);
 	const struct ddi_buf_trans *ddi_translations_fdi;
 	const struct ddi_buf_trans *ddi_translations_dp;
 	const struct ddi_buf_trans *ddi_translations_edp;
-	const struct ddi_buf_trans *ddi_translations_hdmi;
 	const struct ddi_buf_trans *ddi_translations;
 
-	port = intel_ddi_get_encoder_port(encoder);
-	hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
-
-	if (IS_BROXTON(dev_priv)) {
-		if (encoder->type != INTEL_OUTPUT_HDMI)
-			return;
-
-		/* Vswing programming for HDMI */
-		bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
-					INTEL_OUTPUT_HDMI);
+	if (IS_BROXTON(dev_priv))
 		return;
-	}
 
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		ddi_translations_fdi = NULL;
@@ -462,12 +436,10 @@
 				skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
 		ddi_translations_edp =
 				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-		ddi_translations_hdmi =
-				skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+
 		/* If we're boosting the current, set bit 31 of trans1 */
-		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
-		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
-			iboost_bit = 1<<31;
+		if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
 		if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
 			    port != PORT_A && port != PORT_E &&
@@ -476,35 +448,20 @@
 	} else if (IS_BROADWELL(dev_priv)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
-
-		if (dev_priv->vbt.edp.low_vswing) {
-			ddi_translations_edp = bdw_ddi_translations_edp;
-			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
-		} else {
-			ddi_translations_edp = bdw_ddi_translations_dp;
-			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-		}
-
-		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-
+		ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 	} else if (IS_HASWELL(dev_priv)) {
 		ddi_translations_fdi = hsw_ddi_translations_fdi;
 		ddi_translations_dp = hsw_ddi_translations_dp;
 		ddi_translations_edp = hsw_ddi_translations_dp;
-		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
 		n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
-		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
 	} else {
 		WARN(1, "ddi translation table missing\n");
 		ddi_translations_edp = bdw_ddi_translations_dp;
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
-		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
 		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 	}
 
 	switch (encoder->type) {
@@ -513,7 +470,6 @@
 		size = n_edp_entries;
 		break;
 	case INTEL_OUTPUT_DP:
-	case INTEL_OUTPUT_HDMI:
 		ddi_translations = ddi_translations_dp;
 		size = n_dp_entries;
 		break;
@@ -531,14 +487,48 @@
 		I915_WRITE(DDI_BUF_TRANS_HI(port, i),
 			   ddi_translations[i].trans2);
 	}
+}
 
-	if (encoder->type != INTEL_OUTPUT_HDMI)
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. This function programs the correct values for
+ * HDMI/DVI use cases.
+ */
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	u32 iboost_bit = 0;
+	int n_hdmi_entries, hdmi_level;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	const struct ddi_buf_trans *ddi_translations_hdmi;
+
+	if (IS_BROXTON(dev_priv))
 		return;
 
+	hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+	} else if (IS_BROADWELL(dev_priv)) {
+		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+	} else if (IS_HASWELL(dev_priv)) {
+		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+	} else {
+		WARN(1, "ddi translation table missing\n");
+		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+	}
+
 	/* Entry 9 is for HDMI: */
-	I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+	I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
 		   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
-	I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+	I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
 		   ddi_translations_hdmi[hdmi_level].trans2);
 }
 
@@ -556,6 +546,27 @@
 	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 }
 
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+	switch (pll->id) {
+	case DPLL_ID_WRPLL1:
+		return PORT_CLK_SEL_WRPLL1;
+	case DPLL_ID_WRPLL2:
+		return PORT_CLK_SEL_WRPLL2;
+	case DPLL_ID_SPLL:
+		return PORT_CLK_SEL_SPLL;
+	case DPLL_ID_LCPLL_810:
+		return PORT_CLK_SEL_LCPLL_810;
+	case DPLL_ID_LCPLL_1350:
+		return PORT_CLK_SEL_LCPLL_1350;
+	case DPLL_ID_LCPLL_2700:
+		return PORT_CLK_SEL_LCPLL_2700;
+	default:
+		MISSING_CASE(pll->id);
+		return PORT_CLK_SEL_NONE;
+	}
+}
+
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
  * both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -571,11 +582,11 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	u32 temp, i, rx_ctl_val;
+	u32 temp, i, rx_ctl_val, ddi_pll_sel;
 
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
-		intel_prepare_ddi_buffer(encoder);
+		intel_prepare_dp_ddi_buffers(encoder);
 	}
 
 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@@ -602,8 +613,9 @@
 	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
 	/* Configure Port Clock Select */
-	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
-	WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+	ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+	I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+	WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
 	/* Start the training iterating through available voltages and emphasis,
 	 * testing each value twice. */
@@ -880,7 +892,7 @@
 	int link_clock = 0;
 	uint32_t dpll_ctl1, dpll;
 
-	dpll = pipe_config->ddi_pll_sel;
+	dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
 	dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
@@ -928,7 +940,7 @@
 	int link_clock = 0;
 	u32 val, pll;
 
-	val = pipe_config->ddi_pll_sel;
+	val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
 	switch (val & PORT_CLK_SEL_MASK) {
 	case PORT_CLK_SEL_LCPLL_810:
 		link_clock = 81000;
@@ -1136,7 +1148,6 @@
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe = intel_crtc->pipe;
@@ -1202,29 +1213,15 @@
 			temp |= TRANS_DDI_MODE_SELECT_HDMI;
 		else
 			temp |= TRANS_DDI_MODE_SELECT_DVI;
-
 	} else if (type == INTEL_OUTPUT_ANALOG) {
 		temp |= TRANS_DDI_MODE_SELECT_FDI;
 		temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
-
 	} else if (type == INTEL_OUTPUT_DP ||
 		   type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		if (intel_dp->is_mst) {
-			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
-		} else
-			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 		temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
 	} else if (type == INTEL_OUTPUT_DP_MST) {
-		struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
-
-		if (intel_dp->is_mst) {
-			temp |= TRANS_DDI_MODE_SELECT_DP_MST;
-		} else
-			temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+		temp |= TRANS_DDI_MODE_SELECT_DP_MST;
 		temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
 	} else {
 		WARN(1, "Invalid encoder type %d for pipe %c\n",
@@ -1611,13 +1608,15 @@
 }
 
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config)
+			  struct intel_shared_dpll *pll)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 
+	if (WARN_ON(!pll))
+		return;
+
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-		uint32_t dpll = pipe_config->ddi_pll_sel;
 		uint32_t val;
 
 		/* DDI -> PLL mapping  */
@@ -1625,65 +1624,91 @@
 
 		val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
-		val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+		val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
 			DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
 		I915_WRITE(DPLL_CTRL2, val);
 
 	} else if (INTEL_INFO(dev_priv)->gen < 9) {
-		WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
-		I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
+		I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
 	}
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+				    int link_rate, uint32_t lane_count,
+				    struct intel_shared_dpll *pll,
+				    bool link_mst)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = intel_ddi_get_encoder_port(encoder);
+
+	intel_dp_set_link_params(intel_dp, link_rate, lane_count,
+				 link_mst);
+	if (encoder->type == INTEL_OUTPUT_EDP)
+		intel_edp_panel_on(intel_dp);
+
+	intel_ddi_clk_select(encoder, pll);
+	intel_prepare_dp_ddi_buffers(encoder);
+	intel_ddi_init_dp_buf_reg(encoder);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_start_link_train(intel_dp);
+	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+		intel_dp_stop_link_train(intel_dp);
+}
+
+static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+				      bool has_hdmi_sink,
+				      struct drm_display_mode *adjusted_mode,
+				      struct intel_shared_dpll *pll)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_encoder *drm_encoder = &encoder->base;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	int level = intel_ddi_hdmi_level(dev_priv, port);
+
+	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+	intel_ddi_clk_select(encoder, pll);
+	intel_prepare_hdmi_ddi_buffers(encoder);
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+		skl_ddi_set_iboost(encoder, level);
+	else if (IS_BROXTON(dev_priv))
+		bxt_ddi_vswing_sequence(dev_priv, level, port,
+					INTEL_OUTPUT_HDMI);
+
+	intel_hdmi->set_infoframes(drm_encoder,
+				   has_hdmi_sink,
+				   adjusted_mode);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
 	int type = intel_encoder->type;
 
-	if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
-		intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-	}
-
-	intel_prepare_ddi_buffer(intel_encoder);
-
-	if (type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-		intel_edp_panel_on(intel_dp);
-	}
-
-	intel_ddi_clk_select(intel_encoder, crtc->config);
-
 	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		intel_dp_set_link_params(intel_dp, crtc->config);
-
-		intel_ddi_init_dp_buf_reg(intel_encoder);
-
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
-		intel_dp_start_link_train(intel_dp);
-		if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
-			intel_dp_stop_link_train(intel_dp);
-	} else if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-		int level = intel_ddi_hdmi_level(dev_priv, port);
-
-		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-			skl_ddi_set_iboost(intel_encoder, level);
-
-		intel_hdmi->set_infoframes(encoder,
-					   crtc->config->has_hdmi_sink,
-					   &crtc->config->base.adjusted_mode);
+		intel_ddi_pre_enable_dp(intel_encoder,
+					crtc->config->port_clock,
+					crtc->config->lane_count,
+					crtc->config->shared_dpll,
+					intel_crtc_has_type(crtc->config,
+							    INTEL_OUTPUT_DP_MST));
+	}
+	if (type == INTEL_OUTPUT_HDMI) {
+		intel_ddi_pre_enable_hdmi(intel_encoder,
+					  crtc->config->has_hdmi_sink,
+					  &crtc->config->base.adjusted_mode,
+					  crtc->config->shared_dpll);
 	}
 }
 
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
+				   struct intel_crtc_state *old_crtc_state,
+				   struct drm_connector_state *old_conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
@@ -1693,6 +1718,8 @@
 	uint32_t val;
 	bool wait = false;
 
+	/* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+
 	val = I915_READ(DDI_BUF_CTL(port));
 	if (val & DDI_BUF_CTL_ENABLE) {
 		val &= ~DDI_BUF_CTL_ENABLE;
@@ -1728,7 +1755,42 @@
 	}
 }
 
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	uint32_t val;
+
+	/*
+	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+	 * step 13 is the correct place for it. Step 18 is where it was
+	 * originally before the BUN.
+	 */
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+
+	val = I915_READ(FDI_RX_MISC(PIPE_A));
+	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_PCDCLK;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_PLL_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = encoder->crtc;
@@ -1757,7 +1819,7 @@
 
 		intel_edp_backlight_on(intel_dp);
 		intel_psr_enable(intel_dp);
-		intel_edp_drrs_enable(intel_dp);
+		intel_edp_drrs_enable(intel_dp, pipe_config);
 	}
 
 	if (intel_crtc->config->has_audio) {
@@ -1766,7 +1828,9 @@
 	}
 }
 
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+static void intel_disable_ddi(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = encoder->crtc;
@@ -1783,7 +1847,7 @@
 	if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		intel_edp_drrs_disable(intel_dp);
+		intel_edp_drrs_disable(intel_dp, old_crtc_state);
 		intel_psr_disable(intel_dp);
 		intel_edp_backlight_off(intel_dp);
 	}
@@ -2072,7 +2136,9 @@
 	}
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2144,7 +2210,7 @@
 
 	val = DP_TP_CTL_ENABLE |
 	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-	if (intel_dp->is_mst)
+	if (intel_dp->link_mst)
 		val |= DP_TP_CTL_MODE_MST;
 	else {
 		val |= DP_TP_CTL_MODE_SST;
@@ -2161,38 +2227,6 @@
 	udelay(600);
 }
 
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-	uint32_t val;
-
-	/*
-	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
-	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
-	 * step 13 is the correct place for it. Step 18 is where it was
-	 * originally before the BUN.
-	 */
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_RX_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-	intel_ddi_post_disable(intel_encoder);
-
-	val = I915_READ(FDI_RX_MISC(PIPE_A));
-	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
-	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
-
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_PCDCLK;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_RX_PLL_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -2292,7 +2326,8 @@
 }
 
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int type = encoder->type;
@@ -2305,9 +2340,9 @@
 		pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
 	if (type == INTEL_OUTPUT_HDMI)
-		ret = intel_hdmi_compute_config(encoder, pipe_config);
+		ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
 	else
-		ret = intel_dp_compute_config(encoder, pipe_config);
+		ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
 
 	if (IS_BROXTON(dev_priv) && ret)
 		pipe_config->lane_lat_optim_mask =
@@ -2358,6 +2393,45 @@
 	return connector;
 }
 
+struct intel_shared_dpll *
+intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
+{
+	struct intel_connector *connector = intel_dp->attached_connector;
+	struct intel_encoder *encoder = connector->encoder;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_shared_dpll *pll = NULL;
+	struct intel_shared_dpll_config tmp_pll_config;
+	enum intel_dpll_id dpll_id;
+
+	if (IS_BROXTON(dev_priv)) {
+		dpll_id =  (enum intel_dpll_id)dig_port->port;
+		/*
+		 * Select the required PLL. This works for platforms where
+		 * there is no shared DPLL.
+		 */
+		pll = &dev_priv->shared_dplls[dpll_id];
+		if (WARN_ON(pll->active_mask)) {
+
+			DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
+				  pll->active_mask);
+			return NULL;
+		}
+		tmp_pll_config = pll->config;
+		if (!bxt_ddi_dp_set_dpll_hw_state(clock,
+						  &pll->config.hw_state)) {
+			DRM_ERROR("Could not setup DPLL\n");
+			pll->config = tmp_pll_config;
+			return NULL;
+		}
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		pll = skl_find_link_pll(dev_priv, clock);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		pll = hsw_ddi_dp_get_dpll(encoder, clock);
+	}
+	return pll;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index cba137f..73b6858 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -46,71 +46,70 @@
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 	u32 fuse, eu_dis;
 
 	fuse = I915_READ(CHV_FUSE_GT);
 
-	info->slice_total = 1;
+	sseu->slice_mask = BIT(0);
 
 	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-		info->subslice_per_slice++;
+		sseu->subslice_mask |= BIT(0);
 		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
 				 CHV_FGT_EU_DIS_SS0_R1_MASK);
-		info->eu_total += 8 - hweight32(eu_dis);
+		sseu->eu_total += 8 - hweight32(eu_dis);
 	}
 
 	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-		info->subslice_per_slice++;
+		sseu->subslice_mask |= BIT(1);
 		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
 				 CHV_FGT_EU_DIS_SS1_R1_MASK);
-		info->eu_total += 8 - hweight32(eu_dis);
+		sseu->eu_total += 8 - hweight32(eu_dis);
 	}
 
-	info->subslice_total = info->subslice_per_slice;
 	/*
 	 * CHV expected to always have a uniform distribution of EU
 	 * across subslices.
 	*/
-	info->eu_per_subslice = info->subslice_total ?
-				info->eu_total / info->subslice_total :
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				sseu->eu_total / sseu_subslice_total(sseu) :
 				0;
 	/*
 	 * CHV supports subslice power gating on devices with more than
 	 * one subslice, and supports EU power gating on devices with
 	 * more than one EU pair per subslice.
 	*/
-	info->has_slice_pg = 0;
-	info->has_subslice_pg = (info->subslice_total > 1);
-	info->has_eu_pg = (info->eu_per_subslice > 2);
+	sseu->has_slice_pg = 0;
+	sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 }
 
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &info->sseu;
 	int s_max = 3, ss_max = 4, eu_max = 8;
 	int s, ss;
-	u32 fuse2, s_enable, ss_disable, eu_disable;
+	u32 fuse2, eu_disable;
 	u8 eu_mask = 0xff;
 
 	fuse2 = I915_READ(GEN8_FUSE2);
-	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 
-	info->slice_total = hweight32(s_enable);
 	/*
 	 * The subslice disable field is global, i.e. it applies
 	 * to each of the enabled slices.
 	*/
-	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-	info->subslice_total = info->slice_total * info->subslice_per_slice;
+	sseu->subslice_mask = (1 << ss_max) - 1;
+	sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+				 GEN9_F2_SS_DIS_SHIFT);
 
 	/*
 	 * Iterate through enabled slices and subslices to
 	 * count the total enabled EU.
 	*/
 	for (s = 0; s < s_max; s++) {
-		if (!(s_enable & BIT(s)))
+		if (!(sseu->slice_mask & BIT(s)))
 			/* skip disabled slice */
 			continue;
 
@@ -118,7 +117,7 @@
 		for (ss = 0; ss < ss_max; ss++) {
 			int eu_per_ss;
 
-			if (ss_disable & BIT(ss))
+			if (!(sseu->subslice_mask & BIT(ss)))
 				/* skip disabled subslice */
 				continue;
 
@@ -131,9 +130,9 @@
 			 * subslices if they are unbalanced.
 			 */
 			if (eu_per_ss == 7)
-				info->subslice_7eu[s] |= BIT(ss);
+				sseu->subslice_7eu[s] |= BIT(ss);
 
-			info->eu_total += eu_per_ss;
+			sseu->eu_total += eu_per_ss;
 		}
 	}
 
@@ -144,9 +143,9 @@
 	 * recovery. BXT is expected to be perfectly uniform in EU
 	 * distribution.
 	*/
-	info->eu_per_subslice = info->subslice_total ?
-				DIV_ROUND_UP(info->eu_total,
-					     info->subslice_total) : 0;
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				DIV_ROUND_UP(sseu->eu_total,
+					     sseu_subslice_total(sseu)) : 0;
 	/*
 	 * SKL supports slice power gating on devices with more than
 	 * one slice, and supports EU power gating on devices with
@@ -155,15 +154,15 @@
 	 * supports EU power gating on devices with more than one EU
 	 * pair per subslice.
 	*/
-	info->has_slice_pg =
+	sseu->has_slice_pg =
 		(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-		info->slice_total > 1;
-	info->has_subslice_pg =
-		IS_BROXTON(dev_priv) && info->subslice_total > 1;
-	info->has_eu_pg = info->eu_per_subslice > 2;
+		hweight8(sseu->slice_mask) > 1;
+	sseu->has_subslice_pg =
+		IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
 	if (IS_BROXTON(dev_priv)) {
-#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask & BIT(ss)))
 		/*
 		 * There is a HW issue in 2x6 fused down parts that requires
 		 * Pooled EU to be enabled as a WA. The pool configuration
@@ -171,19 +170,18 @@
 		 * doesn't affect if the device has all 3 subslices enabled.
 		 */
 		/* WaEnablePooledEuFor2x6:bxt */
-		info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
-				       (info->subslice_per_slice == 2 &&
+		info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
+				       (hweight8(sseu->subslice_mask) == 2 &&
 					INTEL_REVID(dev_priv) < BXT_REVID_C0));
 
-		info->min_eu_in_pool = 0;
+		sseu->min_eu_in_pool = 0;
 		if (info->has_pooled_eu) {
-			if (IS_SS_DISABLED(ss_disable, 0) ||
-			    IS_SS_DISABLED(ss_disable, 2))
-				info->min_eu_in_pool = 3;
-			else if (IS_SS_DISABLED(ss_disable, 1))
-				info->min_eu_in_pool = 6;
+			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+				sseu->min_eu_in_pool = 3;
+			else if (IS_SS_DISABLED(1))
+				sseu->min_eu_in_pool = 6;
 			else
-				info->min_eu_in_pool = 9;
+				sseu->min_eu_in_pool = 9;
 		}
 #undef IS_SS_DISABLED
 	}
@@ -191,14 +189,20 @@
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 	const int s_max = 3, ss_max = 3, eu_max = 8;
 	int s, ss;
-	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+	u32 fuse2, eu_disable[s_max];
 
 	fuse2 = I915_READ(GEN8_FUSE2);
-	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+	/*
+	 * The subslice disable field is global, i.e. it applies
+	 * to each of the enabled slices.
+	 */
+	sseu->subslice_mask = BIT(ss_max) - 1;
+	sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+				 GEN8_F2_SS_DIS_SHIFT);
 
 	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
 	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -208,28 +212,19 @@
 			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
 			 (32 - GEN8_EU_DIS1_S2_SHIFT));
 
-	info->slice_total = hweight32(s_enable);
-
-	/*
-	 * The subslice disable field is global, i.e. it applies
-	 * to each of the enabled slices.
-	 */
-	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-	info->subslice_total = info->slice_total * info->subslice_per_slice;
-
 	/*
 	 * Iterate through enabled slices and subslices to
 	 * count the total enabled EU.
 	 */
 	for (s = 0; s < s_max; s++) {
-		if (!(s_enable & (0x1 << s)))
+		if (!(sseu->slice_mask & BIT(s)))
 			/* skip disabled slice */
 			continue;
 
 		for (ss = 0; ss < ss_max; ss++) {
 			u32 n_disabled;
 
-			if (ss_disable & (0x1 << ss))
+			if (!(sseu->subslice_mask & BIT(ss)))
 				/* skip disabled subslice */
 				continue;
 
@@ -239,9 +234,9 @@
 			 * Record which subslices have 7 EUs.
 			 */
 			if (eu_max - n_disabled == 7)
-				info->subslice_7eu[s] |= 1 << ss;
+				sseu->subslice_7eu[s] |= 1 << ss;
 
-			info->eu_total += eu_max - n_disabled;
+			sseu->eu_total += eu_max - n_disabled;
 		}
 	}
 
@@ -250,16 +245,17 @@
 	 * subslices with the exception that any one EU in any one subslice may
 	 * be fused off for die recovery.
 	 */
-	info->eu_per_subslice = info->subslice_total ?
-		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				DIV_ROUND_UP(sseu->eu_total,
+					     sseu_subslice_total(sseu)) : 0;
 
 	/*
 	 * BDW supports slice power gating on devices with more than
 	 * one slice.
 	 */
-	info->has_slice_pg = (info->slice_total > 1);
-	info->has_subslice_pg = 0;
-	info->has_eu_pg = 0;
+	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+	sseu->has_subslice_pg = 0;
+	sseu->has_eu_pg = 0;
 }
 
 /*
@@ -374,15 +370,19 @@
 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		info->has_snoop = false;
 
-	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+	DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
+	DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
+	DRM_DEBUG_DRIVER("subslice total: %u\n",
+			 sseu_subslice_total(&info->sseu));
+	DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
+	DRM_DEBUG_DRIVER("subslice per slice: %u\n",
+			 hweight8(info->sseu.subslice_mask));
+	DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
+	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
 	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-			 info->has_slice_pg ? "y" : "n");
+			 info->sseu.has_slice_pg ? "y" : "n");
 	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-			 info->has_subslice_pg ? "y" : "n");
+			 info->sseu.has_subslice_pg ? "y" : "n");
 	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-			 info->has_eu_pg ? "y" : "n");
+			 info->sseu.has_eu_pg ? "y" : "n");
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 175595f..fbcfed6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -34,6 +34,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_gem_dmabuf.h"
@@ -1201,8 +1202,8 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		u32 port_sel;
 
-		pp_reg = PCH_PP_CONTROL;
-		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
+		pp_reg = PP_CONTROL(0);
+		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
@@ -1210,10 +1211,10 @@
 		/* XXX: else fix for eDP */
 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 		/* presumably write lock depends on pipe, not port select */
-		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
+		pp_reg = PP_CONTROL(pipe);
 		panel_pipe = pipe;
 	} else {
-		pp_reg = PP_CONTROL;
+		pp_reg = PP_CONTROL(0);
 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
 			panel_pipe = PIPE_B;
 	}
@@ -1906,7 +1907,7 @@
 	}
 }
 
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
@@ -1958,12 +1959,12 @@
 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 	 * need the check.
 	 */
-	if (HAS_GMCH_DISPLAY(dev_priv))
+	if (HAS_GMCH_DISPLAY(dev_priv)) {
 		if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
 			assert_dsi_pll_enabled(dev_priv);
 		else
 			assert_pll_enabled(dev_priv, pipe);
-	else {
+	} else {
 		if (crtc->config->has_pch_encoder) {
 			/* if driving the PCH, we need FDI enabled */
 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
@@ -2146,33 +2147,6 @@
 	}
 }
 
-static void
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
-		   struct drm_framebuffer *fb)
-{
-	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
-	unsigned int tile_size, tile_width, tile_height, cpp;
-
-	tile_size = intel_tile_size(dev_priv);
-
-	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-	intel_tile_dims(dev_priv, &tile_width, &tile_height,
-			fb->modifier[0], cpp);
-
-	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
-	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
-
-	if (info->pixel_format == DRM_FORMAT_NV12) {
-		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
-		intel_tile_dims(dev_priv, &tile_width, &tile_height,
-				fb->modifier[1], cpp);
-
-		info->uv_offset = fb->offsets[1];
-		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
-		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
-	}
-}
-
 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
 {
 	if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2205,16 +2179,15 @@
 	}
 }
 
-int
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
-			   unsigned int rotation)
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
+	struct i915_vma *vma;
 	u32 alignment;
-	int ret;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
@@ -2239,75 +2212,112 @@
 	 */
 	intel_runtime_pm_get(dev_priv);
 
-	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
-						   &view);
-	if (ret)
-		goto err_pm;
+	vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+	if (IS_ERR(vma))
+		goto err;
 
-	/* Install a fence for tiled scan-out. Pre-i965 always needs a
-	 * fence, whereas 965+ only requires a fence if using
-	 * framebuffer compression.  For simplicity, we always install
-	 * a fence as the cost is not that onerous.
-	 */
-	if (view.type == I915_GGTT_VIEW_NORMAL) {
-		ret = i915_gem_object_get_fence(obj);
-		if (ret == -EDEADLK) {
-			/*
-			 * -EDEADLK means there are no free fences
-			 * no pending flips.
-			 *
-			 * This is propagated to atomic, but it uses
-			 * -EDEADLK to force a locking recovery, so
-			 * change the returned error to -EBUSY.
-			 */
-			ret = -EBUSY;
-			goto err_unpin;
-		} else if (ret)
-			goto err_unpin;
-
-		i915_gem_object_pin_fence(obj);
+	if (i915_vma_is_map_and_fenceable(vma)) {
+		/* Install a fence for tiled scan-out. Pre-i965 always needs a
+		 * fence, whereas 965+ only requires a fence if using
+		 * framebuffer compression.  For simplicity, we always, when
+		 * possible, install a fence as the cost is not that onerous.
+		 *
+		 * If we fail to fence the tiled scanout, then either the
+		 * modeset will reject the change (which is highly unlikely as
+		 * the affected systems, all but one, do not have unmappable
+		 * space) or we will not be able to enable full powersaving
+		 * techniques (also likely not to apply due to various limits
+		 * FBC and the like impose on the size of the buffer, which
+		 * presumably we violated anyway with this unmappable buffer).
+		 * Anyway, it is presumably better to stumble onwards with
+		 * something and try to run the system in a "less than optimal"
+		 * mode that matches the user configuration.
+		 */
+		if (i915_vma_get_fence(vma) == 0)
+			i915_vma_pin_fence(vma);
 	}
 
+err:
 	intel_runtime_pm_put(dev_priv);
-	return 0;
-
-err_unpin:
-	i915_gem_object_unpin_from_display_plane(obj, &view);
-err_pm:
-	intel_runtime_pm_put(dev_priv);
-	return ret;
+	return vma;
 }
 
 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
+	struct i915_vma *vma;
 
 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
 	intel_fill_fb_ggtt_view(&view, fb, rotation);
+	vma = i915_gem_object_to_ggtt(obj, &view);
 
-	if (view.type == I915_GGTT_VIEW_NORMAL)
-		i915_gem_object_unpin_fence(obj);
+	i915_vma_unpin_fence(vma);
+	i915_gem_object_unpin_from_display_plane(vma);
+}
 
-	i915_gem_object_unpin_from_display_plane(obj, &view);
+static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
+			  unsigned int rotation)
+{
+	if (intel_rotation_90_or_270(rotation))
+		return to_intel_framebuffer(fb)->rotated[plane].pitch;
+	else
+		return fb->pitches[plane];
 }
 
 /*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- *
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+			  const struct intel_plane_state *state,
+			  int plane)
+{
+	const struct drm_framebuffer *fb = state->base.fb;
+	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+	unsigned int pitch = fb->pitches[plane];
+
+	return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+			  const struct intel_plane_state *state,
+			  int plane)
+
+{
+	const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
+	unsigned int rotation = state->base.rotation;
+
+	if (intel_rotation_90_or_270(rotation)) {
+		*x += intel_fb->rotated[plane].x;
+		*y += intel_fb->rotated[plane].y;
+	} else {
+		*x += intel_fb->normal[plane].x;
+		*y += intel_fb->normal[plane].y;
+	}
+}
+
+/*
  * Input tile dimensions and pitch must already be
  * rotated to match x and y, and in pixel units.
  */
-static u32 intel_adjust_tile_offset(int *x, int *y,
-				    unsigned int tile_width,
-				    unsigned int tile_height,
-				    unsigned int tile_size,
-				    unsigned int pitch_tiles,
-				    u32 old_offset,
-				    u32 new_offset)
+static u32 _intel_adjust_tile_offset(int *x, int *y,
+				     unsigned int tile_width,
+				     unsigned int tile_height,
+				     unsigned int tile_size,
+				     unsigned int pitch_tiles,
+				     u32 old_offset,
+				     u32 new_offset)
 {
+	unsigned int pitch_pixels = pitch_tiles * tile_width;
 	unsigned int tiles;
 
 	WARN_ON(old_offset & (tile_size - 1));
@@ -2319,6 +2329,54 @@
 	*y += tiles / pitch_tiles * tile_height;
 	*x += tiles % pitch_tiles * tile_width;
 
+	/* minimize x in case it got needlessly big */
+	*y += *x / pitch_pixels * tile_height;
+	*x %= pitch_pixels;
+
+	return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+				    const struct intel_plane_state *state, int plane,
+				    u32 old_offset, u32 new_offset)
+{
+	const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+	const struct drm_framebuffer *fb = state->base.fb;
+	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+	unsigned int rotation = state->base.rotation;
+	unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
+
+	WARN_ON(new_offset > old_offset);
+
+	if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+		unsigned int tile_size, tile_width, tile_height;
+		unsigned int pitch_tiles;
+
+		tile_size = intel_tile_size(dev_priv);
+		intel_tile_dims(dev_priv, &tile_width, &tile_height,
+				fb->modifier[plane], cpp);
+
+		if (intel_rotation_90_or_270(rotation)) {
+			pitch_tiles = pitch / tile_height;
+			swap(tile_width, tile_height);
+		} else {
+			pitch_tiles = pitch / (tile_width * cpp);
+		}
+
+		_intel_adjust_tile_offset(x, y, tile_width, tile_height,
+					  tile_size, pitch_tiles,
+					  old_offset, new_offset);
+	} else {
+		old_offset += *y * pitch + *x * cpp;
+
+		*y = (old_offset - new_offset) / pitch;
+		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
+	}
+
 	return new_offset;
 }
 
@@ -2329,18 +2387,24 @@
  * In the 90/270 rotated case, x and y are assumed
  * to be already rotated to match the rotated GTT view, and
  * pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
  */
-u32 intel_compute_tile_offset(int *x, int *y,
-			      const struct drm_framebuffer *fb, int plane,
-			      unsigned int pitch,
-			      unsigned int rotation)
+static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
+				      int *x, int *y,
+				      const struct drm_framebuffer *fb, int plane,
+				      unsigned int pitch,
+				      unsigned int rotation,
+				      u32 alignment)
 {
-	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
 	uint64_t fb_modifier = fb->modifier[plane];
 	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
-	u32 offset, offset_aligned, alignment;
+	u32 offset, offset_aligned;
 
-	alignment = intel_surf_alignment(dev_priv, fb_modifier);
 	if (alignment)
 		alignment--;
 
@@ -2368,9 +2432,9 @@
 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
 		offset_aligned = offset & ~alignment;
 
-		intel_adjust_tile_offset(x, y, tile_width, tile_height,
-					 tile_size, pitch_tiles,
-					 offset, offset_aligned);
+		_intel_adjust_tile_offset(x, y, tile_width, tile_height,
+					  tile_size, pitch_tiles,
+					  offset, offset_aligned);
 	} else {
 		offset = *y * pitch + *x * cpp;
 		offset_aligned = offset & ~alignment;
@@ -2382,6 +2446,177 @@
 	return offset_aligned;
 }
 
+u32 intel_compute_tile_offset(int *x, int *y,
+			      const struct intel_plane_state *state,
+			      int plane)
+{
+	const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+	const struct drm_framebuffer *fb = state->base.fb;
+	unsigned int rotation = state->base.rotation;
+	int pitch = intel_fb_pitch(fb, plane, rotation);
+	u32 alignment;
+
+	/* AUX_DIST needs only 4K alignment */
+	if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
+		alignment = 4096;
+	else
+		alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+
+	return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
+					  rotation, alignment);
+}
+
+/* Convert the fb->offset[] linear offset into x/y offsets */
+static void intel_fb_offset_to_xy(int *x, int *y,
+				  const struct drm_framebuffer *fb, int plane)
+{
+	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+	unsigned int pitch = fb->pitches[plane];
+	u32 linear_offset = fb->offsets[plane];
+
+	*y = linear_offset / pitch;
+	*x = linear_offset % pitch / cpp;
+}
+
+static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+{
+	switch (fb_modifier) {
+	case I915_FORMAT_MOD_X_TILED:
+		return I915_TILING_X;
+	case I915_FORMAT_MOD_Y_TILED:
+		return I915_TILING_Y;
+	default:
+		return I915_TILING_NONE;
+	}
+}
+
+static int
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+		   struct drm_framebuffer *fb)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+	u32 gtt_offset_rotated = 0;
+	unsigned int max_size = 0;
+	uint32_t format = fb->pixel_format;
+	int i, num_planes = drm_format_num_planes(format);
+	unsigned int tile_size = intel_tile_size(dev_priv);
+
+	for (i = 0; i < num_planes; i++) {
+		unsigned int width, height;
+		unsigned int cpp, size;
+		u32 offset;
+		int x, y;
+
+		cpp = drm_format_plane_cpp(format, i);
+		width = drm_format_plane_width(fb->width, format, i);
+		height = drm_format_plane_height(fb->height, format, i);
+
+		intel_fb_offset_to_xy(&x, &y, fb, i);
+
+		/*
+		 * The fence (if used) is aligned to the start of the object
+		 * so having the framebuffer wrap around across the edge of the
+		 * fenced region doesn't really work. We have no API to configure
+		 * the fence start offset within the object (nor could we probably
+		 * on gen2/3). So it's just easier if we just require that the
+		 * fb layout agrees with the fence layout. We already check that the
+		 * fb stride matches the fence stride elsewhere.
+		 */
+		if (i915_gem_object_is_tiled(intel_fb->obj) &&
+		    (x + width) * cpp > fb->pitches[i]) {
+			DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
+				  i, fb->offsets[i]);
+			return -EINVAL;
+		}
+
+		/*
+		 * First pixel of the framebuffer from
+		 * the start of the normal gtt mapping.
+		 */
+		intel_fb->normal[i].x = x;
+		intel_fb->normal[i].y = y;
+
+		offset = _intel_compute_tile_offset(dev_priv, &x, &y,
+						    fb, 0, fb->pitches[i],
+						    DRM_ROTATE_0, tile_size);
+		offset /= tile_size;
+
+		if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+			unsigned int tile_width, tile_height;
+			unsigned int pitch_tiles;
+			struct drm_rect r;
+
+			intel_tile_dims(dev_priv, &tile_width, &tile_height,
+					fb->modifier[i], cpp);
+
+			rot_info->plane[i].offset = offset;
+			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
+			rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+			rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+			intel_fb->rotated[i].pitch =
+				rot_info->plane[i].height * tile_height;
+
+			/* how many tiles does this plane need */
+			size = rot_info->plane[i].stride * rot_info->plane[i].height;
+			/*
+			 * If the plane isn't horizontally tile aligned,
+			 * we need one more tile.
+			 */
+			if (x != 0)
+				size++;
+
+			/* rotate the x/y offsets to match the GTT view */
+			r.x1 = x;
+			r.y1 = y;
+			r.x2 = x + width;
+			r.y2 = y + height;
+			drm_rect_rotate(&r,
+					rot_info->plane[i].width * tile_width,
+					rot_info->plane[i].height * tile_height,
+					DRM_ROTATE_270);
+			x = r.x1;
+			y = r.y1;
+
+			/* rotate the tile dimensions to match the GTT view */
+			pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
+			swap(tile_width, tile_height);
+
+			/*
+			 * We only keep the x/y offsets, so push all of the
+			 * gtt offset into the x/y offsets.
+			 */
+			_intel_adjust_tile_offset(&x, &y, tile_size,
+						  tile_width, tile_height, pitch_tiles,
+						  gtt_offset_rotated * tile_size, 0);
+
+			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
+
+			/*
+			 * First pixel of the framebuffer from
+			 * the start of the rotated gtt mapping.
+			 */
+			intel_fb->rotated[i].x = x;
+			intel_fb->rotated[i].y = y;
+		} else {
+			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
+					    x * cpp, tile_size);
+		}
+
+		/* how many tiles in total needed in the bo */
+		max_size = max(max_size, offset + size);
+	}
+
+	if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
+		DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
+			  max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int i9xx_format_to_fourcc(int format)
 {
 	switch (format) {
@@ -2465,9 +2700,8 @@
 		return false;
 	}
 
-	obj->tiling_mode = plane_config->tiling;
-	if (obj->tiling_mode == I915_TILING_X)
-		obj->stride = fb->pitches[0];
+	if (plane_config->tiling == I915_TILING_X)
+		obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
 
 	mode_cmd.pixel_format = fb->pixel_format;
 	mode_cmd.width = fb->width;
@@ -2488,7 +2722,7 @@
 	return true;
 
 out_unref_obj:
-	drm_gem_object_unreference(&obj->base);
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 	return false;
 }
@@ -2552,7 +2786,7 @@
 			continue;
 
 		obj = intel_fb_obj(fb);
-		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
 			drm_framebuffer_reference(fb);
 			goto valid_fb;
 		}
@@ -2565,7 +2799,7 @@
 	 * simplest solution is to just disable the primary plane now and
 	 * pretend the BIOS never had it enabled.
 	 */
-	to_intel_plane_state(plane_state)->visible = false;
+	to_intel_plane_state(plane_state)->base.visible = false;
 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
 	intel_pre_disable_primary_noatomic(&intel_crtc->base);
 	intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2583,24 +2817,188 @@
 	plane_state->crtc_w = fb->width;
 	plane_state->crtc_h = fb->height;
 
-	intel_state->src.x1 = plane_state->src_x;
-	intel_state->src.y1 = plane_state->src_y;
-	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
-	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
-	intel_state->dst.x1 = plane_state->crtc_x;
-	intel_state->dst.y1 = plane_state->crtc_y;
-	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
-	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+	intel_state->base.src.x1 = plane_state->src_x;
+	intel_state->base.src.y1 = plane_state->src_y;
+	intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
+	intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
+	intel_state->base.dst.x1 = plane_state->crtc_x;
+	intel_state->base.dst.y1 = plane_state->crtc_y;
+	intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+	intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
 
 	obj = intel_fb_obj(fb);
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (i915_gem_object_is_tiled(obj))
 		dev_priv->preserve_bios_swizzle = true;
 
 	drm_framebuffer_reference(fb);
 	primary->fb = primary->state->fb = fb;
 	primary->crtc = primary->state->crtc = &intel_crtc->base;
 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
-	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+		  &obj->frontbuffer_bits);
+}
+
+static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
+			       unsigned int rotation)
+{
+	int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+	switch (fb->modifier[plane]) {
+	case DRM_FORMAT_MOD_NONE:
+	case I915_FORMAT_MOD_X_TILED:
+		switch (cpp) {
+		case 8:
+			return 4096;
+		case 4:
+		case 2:
+		case 1:
+			return 8192;
+		default:
+			MISSING_CASE(cpp);
+			break;
+		}
+		break;
+	case I915_FORMAT_MOD_Y_TILED:
+	case I915_FORMAT_MOD_Yf_TILED:
+		switch (cpp) {
+		case 8:
+			return 2048;
+		case 4:
+			return 4096;
+		case 2:
+		case 1:
+			return 8192;
+		default:
+			MISSING_CASE(cpp);
+			break;
+		}
+		break;
+	default:
+		MISSING_CASE(fb->modifier[plane]);
+	}
+
+	return 2048;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+	const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	unsigned int rotation = plane_state->base.rotation;
+	int x = plane_state->base.src.x1 >> 16;
+	int y = plane_state->base.src.y1 >> 16;
+	int w = drm_rect_width(&plane_state->base.src) >> 16;
+	int h = drm_rect_height(&plane_state->base.src) >> 16;
+	int max_width = skl_max_plane_width(fb, 0, rotation);
+	int max_height = 4096;
+	u32 alignment, offset, aux_offset = plane_state->aux.offset;
+
+	if (w > max_width || h > max_height) {
+		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+			      w, h, max_width, max_height);
+		return -EINVAL;
+	}
+
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
+	offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+
+	/*
+	 * AUX surface offset is specified as the distance from the
+	 * main surface offset, and it must be non-negative. Make
+	 * sure that is what we will get.
+	 */
+	if (offset > aux_offset)
+		offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+						  offset, aux_offset & ~(alignment - 1));
+
+	/*
+	 * When using an X-tiled surface, the plane blows up
+	 * if the x offset + width exceed the stride.
+	 *
+	 * TODO: linear and Y-tiled seem fine, Yf untested,
+	 */
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+		while ((x + w) * cpp > fb->pitches[0]) {
+			if (offset == 0) {
+				DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
+				return -EINVAL;
+			}
+
+			offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+							  offset, offset - alignment);
+		}
+	}
+
+	plane_state->main.offset = offset;
+	plane_state->main.x = x;
+	plane_state->main.y = y;
+
+	return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	unsigned int rotation = plane_state->base.rotation;
+	int max_width = skl_max_plane_width(fb, 1, rotation);
+	int max_height = 4096;
+	int x = plane_state->base.src.x1 >> 17;
+	int y = plane_state->base.src.y1 >> 17;
+	int w = drm_rect_width(&plane_state->base.src) >> 17;
+	int h = drm_rect_height(&plane_state->base.src) >> 17;
+	u32 offset;
+
+	intel_add_fb_offsets(&x, &y, plane_state, 1);
+	offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+
+	/* FIXME not quite sure how/if these apply to the chroma plane */
+	if (w > max_width || h > max_height) {
+		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
+			      w, h, max_width, max_height);
+		return -EINVAL;
+	}
+
+	plane_state->aux.offset = offset;
+	plane_state->aux.x = x;
+	plane_state->aux.y = y;
+
+	return 0;
+}
+
+int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	unsigned int rotation = plane_state->base.rotation;
+	int ret;
+
+	/* Rotate src coordinates to match rotated GTT view */
+	if (intel_rotation_90_or_270(rotation))
+		drm_rect_rotate(&plane_state->base.src,
+				fb->width, fb->height, DRM_ROTATE_270);
+
+	/*
+	 * Handle the AUX surface first since
+	 * the main surface setup depends on it.
+	 */
+	if (fb->pixel_format == DRM_FORMAT_NV12) {
+		ret = skl_check_nv12_aux_surface(plane_state);
+		if (ret)
+			return ret;
+	} else {
+		plane_state->aux.offset = ~0xfff;
+		plane_state->aux.x = 0;
+		plane_state->aux.y = 0;
+	}
+
+	ret = skl_check_main_surface(plane_state);
+	if (ret)
+		return ret;
+
+	return 0;
 }
 
 static void i9xx_update_primary_plane(struct drm_plane *primary,
@@ -2617,9 +3015,8 @@
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
 	unsigned int rotation = plane_state->base.rotation;
-	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-	int x = plane_state->src.x1 >> 16;
-	int y = plane_state->src.y1 >> 16;
+	int x = plane_state->base.src.x1 >> 16;
+	int y = plane_state->base.src.y1 >> 16;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -2670,37 +3067,31 @@
 		BUG();
 	}
 
-	if (INTEL_INFO(dev)->gen >= 4 &&
-	    obj->tiling_mode != I915_TILING_NONE)
+	if (INTEL_GEN(dev_priv) >= 4 &&
+	    fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dspcntr |= DISPPLANE_TILED;
 
 	if (IS_G4X(dev))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-	linear_offset = y * fb->pitches[0] + x * cpp;
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	if (INTEL_INFO(dev)->gen >= 4)
 		intel_crtc->dspaddr_offset =
-			intel_compute_tile_offset(&x, &y, fb, 0,
-						  fb->pitches[0], rotation);
-		linear_offset -= intel_crtc->dspaddr_offset;
-	} else {
-		intel_crtc->dspaddr_offset = linear_offset;
-	}
+			intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-	if (rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == DRM_ROTATE_180) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
 		x += (crtc_state->pipe_src_w - 1);
 		y += (crtc_state->pipe_src_h - 1);
-
-		/* Finding the last pixel of the last line of the display
-		data and adding to linear_offset*/
-		linear_offset +=
-			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
-			(crtc_state->pipe_src_w - 1) * cpp;
 	}
 
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+	if (INTEL_INFO(dev)->gen < 4)
+		intel_crtc->dspaddr_offset = linear_offset;
+
 	intel_crtc->adjusted_x = x;
 	intel_crtc->adjusted_y = y;
 
@@ -2709,11 +3100,12 @@
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_INFO(dev)->gen >= 4) {
 		I915_WRITE(DSPSURF(plane),
-			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else
-		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+		I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
 	POSTING_READ(reg);
 }
 
@@ -2741,15 +3133,13 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int plane = intel_crtc->plane;
 	u32 linear_offset;
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
 	unsigned int rotation = plane_state->base.rotation;
-	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-	int x = plane_state->src.x1 >> 16;
-	int y = plane_state->src.y1 >> 16;
+	int x = plane_state->base.src.x1 >> 16;
+	int y = plane_state->base.src.y1 >> 16;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 	dspcntr |= DISPLAY_PLANE_ENABLE;
@@ -2780,32 +3170,28 @@
 		BUG();
 	}
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dspcntr |= DISPPLANE_TILED;
 
 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-	linear_offset = y * fb->pitches[0] + x * cpp;
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
+
 	intel_crtc->dspaddr_offset =
-		intel_compute_tile_offset(&x, &y, fb, 0,
-					  fb->pitches[0], rotation);
-	linear_offset -= intel_crtc->dspaddr_offset;
-	if (rotation == BIT(DRM_ROTATE_180)) {
+		intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+	if (rotation == DRM_ROTATE_180) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
 			x += (crtc_state->pipe_src_w - 1);
 			y += (crtc_state->pipe_src_h - 1);
-
-			/* Finding the last pixel of the last line of the display
-			data and adding to linear_offset*/
-			linear_offset +=
-				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
-				(crtc_state->pipe_src_w - 1) * cpp;
 		}
 	}
 
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
 	intel_crtc->adjusted_x = x;
 	intel_crtc->adjusted_y = y;
 
@@ -2813,7 +3199,8 @@
 
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSURF(plane),
-		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+		   intel_fb_gtt_offset(fb, rotation) +
+		   intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 	} else {
@@ -2835,32 +3222,21 @@
 	}
 }
 
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
-			   struct drm_i915_gem_object *obj,
-			   unsigned int plane)
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
+			unsigned int rotation)
 {
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
 	struct i915_vma *vma;
-	u64 offset;
 
-	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
-				intel_plane->base.state->rotation);
+	intel_fill_fb_ggtt_view(&view, fb, rotation);
 
-	vma = i915_gem_obj_to_ggtt_view(obj, &view);
+	vma = i915_gem_object_to_ggtt(obj, &view);
 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-		view.type))
+		 view.type))
 		return -1;
 
-	offset = vma->node.start;
-
-	if (plane == 1) {
-		offset += vma->ggtt_view.params.rotated.uv_start_page *
-			  PAGE_SIZE;
-	}
-
-	WARN_ON(upper_32_bits(offset));
-
-	return lower_32_bits(offset);
+	return i915_ggtt_offset(vma);
 }
 
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -2890,6 +3266,28 @@
 	}
 }
 
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+		     unsigned int rotation)
+{
+	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	u32 stride = intel_fb_pitch(fb, plane, rotation);
+
+	/*
+	 * The stride is either expressed as a multiple of 64 bytes chunks for
+	 * linear buffers or in number of tiles for tiled buffers.
+	 */
+	if (intel_rotation_90_or_270(rotation)) {
+		int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+		stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+	} else {
+		stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+						    fb->pixel_format);
+	}
+
+	return stride;
+}
+
 u32 skl_plane_ctl_format(uint32_t pixel_format)
 {
 	switch (pixel_format) {
@@ -2952,17 +3350,17 @@
 u32 skl_plane_ctl_rotation(unsigned int rotation)
 {
 	switch (rotation) {
-	case BIT(DRM_ROTATE_0):
+	case DRM_ROTATE_0:
 		break;
 	/*
 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
 	 * while i915 HW rotation is clockwise, thats why this swapping.
 	 */
-	case BIT(DRM_ROTATE_90):
+	case DRM_ROTATE_90:
 		return PLANE_CTL_ROTATE_270;
-	case BIT(DRM_ROTATE_180):
+	case DRM_ROTATE_180:
 		return PLANE_CTL_ROTATE_180;
-	case BIT(DRM_ROTATE_270):
+	case DRM_ROTATE_270:
 		return PLANE_CTL_ROTATE_90;
 	default:
 		MISSING_CASE(rotation);
@@ -2979,22 +3377,21 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
 	int pipe = intel_crtc->pipe;
-	u32 plane_ctl, stride_div, stride;
-	u32 tile_height, plane_offset, plane_size;
+	u32 plane_ctl;
 	unsigned int rotation = plane_state->base.rotation;
-	int x_offset, y_offset;
-	u32 surf_addr;
+	u32 stride = skl_plane_stride(fb, 0, rotation);
+	u32 surf_addr = plane_state->main.offset;
 	int scaler_id = plane_state->scaler_id;
-	int src_x = plane_state->src.x1 >> 16;
-	int src_y = plane_state->src.y1 >> 16;
-	int src_w = drm_rect_width(&plane_state->src) >> 16;
-	int src_h = drm_rect_height(&plane_state->src) >> 16;
-	int dst_x = plane_state->dst.x1;
-	int dst_y = plane_state->dst.y1;
-	int dst_w = drm_rect_width(&plane_state->dst);
-	int dst_h = drm_rect_height(&plane_state->dst);
+	int src_x = plane_state->main.x;
+	int src_y = plane_state->main.y;
+	int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	int dst_x = plane_state->base.dst.x1;
+	int dst_y = plane_state->base.dst.y1;
+	int dst_w = drm_rect_width(&plane_state->base.dst);
+	int dst_h = drm_rect_height(&plane_state->base.dst);
 
 	plane_ctl = PLANE_CTL_ENABLE |
 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3005,36 +3402,24 @@
 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-					       fb->pixel_format);
-	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	dst_w--;
+	dst_h--;
 
-	WARN_ON(drm_rect_width(&plane_state->src) == 0);
+	intel_crtc->dspaddr_offset = surf_addr;
 
-	if (intel_rotation_90_or_270(rotation)) {
-		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	intel_crtc->adjusted_x = src_x;
+	intel_crtc->adjusted_y = src_y;
 
-		/* stride = Surface height in tiles */
-		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
-		stride = DIV_ROUND_UP(fb->height, tile_height);
-		x_offset = stride * tile_height - src_y - src_h;
-		y_offset = src_x;
-		plane_size = (src_w - 1) << 16 | (src_h - 1);
-	} else {
-		stride = fb->pitches[0] / stride_div;
-		x_offset = src_x;
-		y_offset = src_y;
-		plane_size = (src_h - 1) << 16 | (src_w - 1);
-	}
-	plane_offset = y_offset << 16 | x_offset;
-
-	intel_crtc->adjusted_x = x_offset;
-	intel_crtc->adjusted_y = y_offset;
+	if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+		skl_write_plane_wm(intel_crtc, wm, 0);
 
 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
-	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+	I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+	I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
 
 	if (scaler_id >= 0) {
 		uint32_t ps_ctrl = 0;
@@ -3051,7 +3436,8 @@
 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
 	}
 
-	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
+	I915_WRITE(PLANE_SURF(pipe, 0),
+		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
 
 	POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -3061,7 +3447,15 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int pipe = to_intel_crtc(crtc)->pipe;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * We only populate skl_results on watermark updates, and if the
+	 * plane's visiblity isn't actually changing neither is its watermarks.
+	 */
+	if (!crtc->primary->state->visible)
+		skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
 
 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3096,7 +3490,7 @@
 		struct intel_plane_state *plane_state =
 			to_intel_plane_state(plane->base.state);
 
-		if (plane_state->visible)
+		if (plane_state->base.visible)
 			plane->update_plane(&plane->base,
 					    to_intel_crtc_state(crtc->state),
 					    plane_state);
@@ -3135,6 +3529,12 @@
 	return ret;
 }
 
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+	return intel_has_gpu_reset(dev_priv) &&
+		INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+}
+
 void intel_prepare_reset(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
@@ -3142,10 +3542,6 @@
 	struct drm_atomic_state *state;
 	int ret;
 
-	/* no reset support for gen2 */
-	if (IS_GEN2(dev_priv))
-		return;
-
 	/*
 	 * Need mode_config.mutex so that we don't
 	 * trample ongoing ->detect() and whatnot.
@@ -3161,7 +3557,8 @@
 	}
 
 	/* reset doesn't touch the display, but flips might get nuked anyway, */
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+	if (!i915.force_reset_modeset_test &&
+	    !gpu_reset_clobbers_display(dev_priv))
 		return;
 
 	/*
@@ -3204,24 +3601,28 @@
 	 */
 	intel_complete_page_flips(dev_priv);
 
-	/* no reset support for gen2 */
-	if (IS_GEN2(dev_priv))
-		return;
+	dev_priv->modeset_restore_state = NULL;
 
 	dev_priv->modeset_restore_state = NULL;
 
 	/* reset doesn't touch the display */
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
-		/*
-		 * Flips in the rings have been nuked by the reset,
-		 * so update the base address of all primary
-		 * planes to the the last fb to make sure we're
-		 * showing the correct fb after a reset.
-		 *
-		 * FIXME: Atomic will make this obsolete since we won't schedule
-		 * CS-based flips (which might get lost in gpu resets) any more.
-		 */
-		intel_update_primary_planes(dev);
+	if (!gpu_reset_clobbers_display(dev_priv)) {
+		if (!state) {
+			/*
+			 * Flips in the rings have been nuked by the reset,
+			 * so update the base address of all primary
+			 * planes to the the last fb to make sure we're
+			 * showing the correct fb after a reset.
+			 *
+			 * FIXME: Atomic will make this obsolete since we won't schedule
+			 * CS-based flips (which might get lost in gpu resets) any more.
+			 */
+			intel_update_primary_planes(dev);
+		} else {
+			ret = __intel_display_resume(dev, state);
+			if (ret)
+				DRM_ERROR("Restoring old state failed with %i\n", ret);
+		}
 	} else {
 		/*
 		 * The display has been reset as well,
@@ -3230,6 +3631,7 @@
 		intel_runtime_pm_disable_interrupts(dev_priv);
 		intel_runtime_pm_enable_interrupts(dev_priv);
 
+		intel_pps_unlock_regs_wa(dev_priv);
 		intel_modeset_init_hw(dev);
 
 		spin_lock_irq(&dev_priv->irq_lock);
@@ -3249,15 +3651,26 @@
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
+static bool abort_flip_on_reset(struct intel_crtc *crtc)
+{
+	struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
+
+	if (i915_reset_in_progress(error))
+		return true;
+
+	if (crtc->reset_count != i915_reset_count(error))
+		return true;
+
+	return false;
+}
+
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	unsigned reset_counter;
 	bool pending;
 
-	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
-	if (intel_crtc->reset_counter != reset_counter)
+	if (abort_flip_on_reset(intel_crtc))
 		return false;
 
 	spin_lock_irq(&dev->event_lock);
@@ -3900,7 +4313,7 @@
 	return 0;
 }
 
-static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 {
 	u32 temp;
 
@@ -4323,7 +4736,7 @@
 		      intel_crtc->pipe, SKL_CRTC_INDEX);
 
 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+		&state->scaler_state.scaler_id, DRM_ROTATE_0,
 		state->pipe_src_w, state->pipe_src_h,
 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4348,7 +4761,7 @@
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	int ret;
 
-	bool force_detach = !fb || !plane_state->visible;
+	bool force_detach = !fb || !plane_state->base.visible;
 
 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
 		      intel_plane->base.base.id, intel_plane->base.name,
@@ -4358,10 +4771,10 @@
 				drm_plane_index(&intel_plane->base),
 				&plane_state->scaler_id,
 				plane_state->base.rotation,
-				drm_rect_width(&plane_state->src) >> 16,
-				drm_rect_height(&plane_state->src) >> 16,
-				drm_rect_width(&plane_state->dst),
-				drm_rect_height(&plane_state->dst));
+				drm_rect_width(&plane_state->base.src) >> 16,
+				drm_rect_height(&plane_state->base.src) >> 16,
+				drm_rect_width(&plane_state->base.dst),
+				drm_rect_height(&plane_state->base.dst));
 
 	if (ret || plane_state->scaler_id < 0)
 		return ret;
@@ -4639,12 +5052,11 @@
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
-	struct drm_device *dev = crtc->base.dev;
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_pri_state =
 		drm_atomic_get_existing_plane_state(old_state, primary);
 
-	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
+	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
 	crtc->wm.cxsr_allowed = true;
 
@@ -4659,9 +5071,9 @@
 
 		intel_fbc_post_update(crtc);
 
-		if (primary_state->visible &&
+		if (primary_state->base.visible &&
 		    (needs_modeset(&pipe_config->base) ||
-		     !old_primary_state->visible))
+		     !old_primary_state->base.visible))
 			intel_post_enable_primary(&crtc->base);
 	}
 }
@@ -4687,8 +5099,8 @@
 
 		intel_fbc_pre_update(crtc, pipe_config, primary_state);
 
-		if (old_primary_state->visible &&
-		    (modeset || !primary_state->visible))
+		if (old_primary_state->base.visible &&
+		    (modeset || !primary_state->base.visible))
 			intel_pre_disable_primary(&crtc->base);
 	}
 
@@ -4767,18 +5179,140 @@
 	 * to compute the mask of flip planes precisely. For the time being
 	 * consider this a flip to a NULL plane.
 	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+					  struct intel_crtc_state *crtc_state,
+					  struct drm_atomic_state *old_state)
 {
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+	}
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+				      struct intel_crtc_state *crtc_state,
+				      struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder, crtc_state, conn_state);
+	}
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+				  struct intel_crtc_state *crtc_state,
+				  struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		encoder->enable(encoder, crtc_state, conn_state);
+		intel_opregion_notify_encoder(encoder, true);
+	}
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+				   struct intel_crtc_state *old_crtc_state,
+				   struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		intel_opregion_notify_encoder(encoder, false);
+		encoder->disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+					struct intel_crtc_state *old_crtc_state,
+					struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->post_disable)
+			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+					    struct intel_crtc_state *old_crtc_state,
+					    struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->post_pll_disable)
+			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+				 struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -4816,9 +5350,7 @@
 
 	intel_crtc->active = true;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -4848,8 +5380,7 @@
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 
 	if (HAS_PCH_CPT(dev))
 		cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -4867,16 +5398,15 @@
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void haswell_crtc_enable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+				struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -4885,9 +5415,7 @@
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      false);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_pll_enable)
-			encoder->pre_pll_enable(encoder);
+	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->shared_dpll)
 		intel_enable_shared_dpll(intel_crtc);
@@ -4925,10 +5453,7 @@
 	else
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
-	}
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder)
 		dev_priv->display.fdi_link_train(crtc);
@@ -4969,10 +5494,7 @@
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		encoder->enable(encoder);
-		intel_opregion_notify_encoder(encoder, true);
-	}
+	intel_encoders_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		intel_wait_for_vblank(dev, pipe);
@@ -5006,12 +5528,13 @@
 	}
 }
 
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+				  struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
 	/*
@@ -5024,8 +5547,7 @@
 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->disable(encoder);
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -5037,9 +5559,7 @@
 	if (intel_crtc->config->has_pch_encoder)
 		ironlake_fdi_disable(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -5069,22 +5589,20 @@
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
-static void haswell_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+				 struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
 	if (intel_crtc->config->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      false);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		intel_opregion_notify_encoder(encoder, false);
-		encoder->disable(encoder);
-	}
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -5107,18 +5625,11 @@
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_ddi_disable_pipe_clock(intel_crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
-	if (intel_crtc->config->has_pch_encoder) {
-		lpt_disable_pch_transcoder(dev_priv);
-		lpt_disable_iclkip(dev_priv);
-		intel_ddi_fdi_disable(crtc);
-
+	if (old_crtc_state->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
-	}
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -6174,14 +6685,13 @@
 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
-static void valleyview_crtc_enable(struct drm_crtc *crtc)
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+				   struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 	int pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
@@ -6206,9 +6716,7 @@
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_pll_enable)
-			encoder->pre_pll_enable(encoder);
+	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
 	if (IS_CHERRYVIEW(dev)) {
 		chv_prepare_pll(intel_crtc, intel_crtc->config);
@@ -6218,9 +6726,7 @@
 		vlv_enable_pll(intel_crtc, intel_crtc->config);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	i9xx_pfit_enable(intel_crtc);
 
@@ -6232,8 +6738,7 @@
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -6245,14 +6750,13 @@
 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 }
 
-static void i9xx_crtc_enable(struct drm_crtc *crtc)
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+			     struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 	enum pipe pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
@@ -6273,9 +6777,7 @@
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	i9xx_enable_pll(intel_crtc);
 
@@ -6289,8 +6791,7 @@
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6308,12 +6809,13 @@
 	I915_WRITE(PFIT_CONTROL, 0);
 }
 
-static void i9xx_crtc_disable(struct drm_crtc *crtc)
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+			      struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
 	/*
@@ -6323,8 +6825,7 @@
 	if (IS_GEN2(dev))
 		intel_wait_for_vblank(dev, pipe);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->disable(encoder);
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -6333,9 +6834,7 @@
 
 	i9xx_pfit_disable(intel_crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
 	if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
 		if (IS_CHERRYVIEW(dev))
@@ -6346,9 +6845,7 @@
 			i9xx_disable_pll(intel_crtc);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_pll_disable)
-			encoder->post_pll_disable(encoder);
+	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6361,20 +6858,34 @@
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
 	unsigned long domains;
+	struct drm_atomic_state *state;
+	struct intel_crtc_state *crtc_state;
+	int ret;
 
 	if (!intel_crtc->active)
 		return;
 
-	if (to_intel_plane_state(crtc->primary->state)->visible) {
+	if (to_intel_plane_state(crtc->primary->state)->base.visible) {
 		WARN_ON(intel_crtc->flip_work);
 
 		intel_pre_disable_primary_noatomic(crtc);
 
 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
-		to_intel_plane_state(crtc->primary->state)->visible = false;
+		to_intel_plane_state(crtc->primary->state)->base.visible = false;
 	}
 
-	dev_priv->display.crtc_disable(crtc);
+	state = drm_atomic_state_alloc(crtc->dev);
+	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+
+	/* Everything's already locked, -EDEADLK can't happen. */
+	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+	ret = drm_atomic_add_affected_connectors(state, crtc);
+
+	WARN_ON(IS_ERR(crtc_state) || ret);
+
+	dev_priv->display.crtc_disable(crtc_state, state);
+
+	drm_atomic_state_free(state);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
 		      crtc->base.id, crtc->name);
@@ -6889,9 +7400,10 @@
 
 static int pnv_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 gcfgc = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
@@ -6913,9 +7425,10 @@
 
 static int i915gm_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 gcfgc = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
 		return 133333;
@@ -6937,6 +7450,7 @@
 
 static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 hpllcc = 0;
 
 	/*
@@ -6944,10 +7458,10 @@
 	 * encoding is different :(
 	 * FIXME is this the right way to detect 852GM/852GMV?
 	 */
-	if (dev->pdev->revision == 0x1)
+	if (pdev->revision == 0x1)
 		return 133333;
 
-	pci_bus_read_config_word(dev->pdev->bus,
+	pci_bus_read_config_word(pdev->bus,
 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
 
 	/* Assume that the hardware is in the high speed state.  This
@@ -7048,10 +7562,11 @@
 
 static int gm45_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = (tmp >> 12) & 0x1;
 
@@ -7070,6 +7585,7 @@
 
 static int i965gm_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	static const uint8_t div_3200[] = { 16, 10,  8 };
 	static const uint8_t div_4000[] = { 20, 12, 10 };
 	static const uint8_t div_5333[] = { 24, 16, 14 };
@@ -7077,7 +7593,7 @@
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
 
@@ -7107,6 +7623,7 @@
 
 static int g33_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -7115,7 +7632,7 @@
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = (tmp >> 4) & 0x7;
 
@@ -8995,6 +9512,24 @@
 	if (intel_crtc_has_dp_encoder(crtc_state))
 		dpll |= DPLL_SDVO_HIGH_SPEED;
 
+	/*
+	 * The high speed IO clock is only really required for
+	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
+	 * possible to share the DPLL between CRT and HDMI. Enabling
+	 * the clock needlessly does no real harm, except use up a
+	 * bit of power potentially.
+	 *
+	 * We'll limit this to IVB with 3 pipes, since it has only two
+	 * DPLLs and so DPLL sharing is the only way to get three pipes
+	 * driving PCH ports at the same time. On SNB we could do this,
+	 * and potentially avoid enabling the second DPLL, but it's not
+	 * clear if it''s a win or loss power wise. No point in doing
+	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+	 */
+	if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+		dpll |= DPLL_SDVO_HIGH_SPEED;
+
 	/* compute bitmask from p1 value */
 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 	/* also FPA1 */
@@ -9281,7 +9816,7 @@
 	return;
 
 error:
-	kfree(fb);
+	kfree(intel_fb);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -9487,7 +10022,7 @@
 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
-	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
 	     "CPU PWM1 enabled\n");
 	if (IS_HASWELL(dev))
@@ -9526,7 +10061,7 @@
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
 					    val))
-			DRM_ERROR("Failed to write to D_COMP\n");
+			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
 		I915_WRITE(D_COMP_BDW, val);
@@ -9934,15 +10469,12 @@
 
 	switch (port) {
 	case PORT_A:
-		pipe_config->ddi_pll_sel = SKL_DPLL0;
 		id = DPLL_ID_SKL_DPLL0;
 		break;
 	case PORT_B:
-		pipe_config->ddi_pll_sel = SKL_DPLL1;
 		id = DPLL_ID_SKL_DPLL1;
 		break;
 	case PORT_C:
-		pipe_config->ddi_pll_sel = SKL_DPLL2;
 		id = DPLL_ID_SKL_DPLL2;
 		break;
 	default:
@@ -9961,25 +10493,10 @@
 	u32 temp;
 
 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+	id = temp >> (port * 3 + 1);
 
-	switch (pipe_config->ddi_pll_sel) {
-	case SKL_DPLL0:
-		id = DPLL_ID_SKL_DPLL0;
-		break;
-	case SKL_DPLL1:
-		id = DPLL_ID_SKL_DPLL1;
-		break;
-	case SKL_DPLL2:
-		id = DPLL_ID_SKL_DPLL2;
-		break;
-	case SKL_DPLL3:
-		id = DPLL_ID_SKL_DPLL3;
-		break;
-	default:
-		MISSING_CASE(pipe_config->ddi_pll_sel);
+	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
 		return;
-	}
 
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
@@ -9989,10 +10506,9 @@
 				struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
+	uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
-	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
-	switch (pipe_config->ddi_pll_sel) {
+	switch (ddi_pll_sel) {
 	case PORT_CLK_SEL_WRPLL1:
 		id = DPLL_ID_WRPLL1;
 		break;
@@ -10012,7 +10528,7 @@
 		id = DPLL_ID_LCPLL_2700;
 		break;
 	default:
-		MISSING_CASE(pipe_config->ddi_pll_sel);
+		MISSING_CASE(ddi_pll_sel);
 		/* fall through */
 	case PORT_CLK_SEL_NONE:
 		return;
@@ -10245,7 +10761,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t cntl = 0, size = 0;
 
-	if (plane_state && plane_state->visible) {
+	if (plane_state && plane_state->base.visible) {
 		unsigned int width = plane_state->base.crtc_w;
 		unsigned int height = plane_state->base.crtc_h;
 		unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -10306,10 +10822,14 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
 	int pipe = intel_crtc->pipe;
 	uint32_t cntl = 0;
 
-	if (plane_state && plane_state->visible) {
+	if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+		skl_write_cursor_wm(intel_crtc, wm);
+
+	if (plane_state && plane_state->base.visible) {
 		cntl = MCURSOR_GAMMA_ENABLE;
 		switch (plane_state->base.crtc_w) {
 			case 64:
@@ -10330,7 +10850,7 @@
 		if (HAS_DDI(dev))
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
 
-		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+		if (plane_state->base.rotation == DRM_ROTATE_180)
 			cntl |= CURSOR_ROTATE_180;
 	}
 
@@ -10376,7 +10896,7 @@
 
 		/* ILK+ do this automagically */
 		if (HAS_GMCH_DISPLAY(dev) &&
-		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+		    plane_state->base.rotation == DRM_ROTATE_180) {
 			base += (plane_state->base.crtc_h *
 				 plane_state->base.crtc_w - 1) * 4;
 		}
@@ -10509,7 +11029,7 @@
 
 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
 	if (IS_ERR(fb))
-		drm_gem_object_unreference_unlocked(&obj->base);
+		i915_gem_object_put_unlocked(obj);
 
 	return fb;
 }
@@ -11020,13 +11540,13 @@
 
 	mutex_lock(&dev->struct_mutex);
 	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
-	drm_gem_object_unreference(&work->pending_flip_obj->base);
-
-	if (work->flip_queued_req)
-		i915_gem_request_assign(&work->flip_queued_req, NULL);
+	i915_gem_object_put(work->pending_flip_obj);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+	i915_gem_request_put(work->flip_queued_req);
+
+	intel_frontbuffer_flip_complete(to_i915(dev),
+					to_intel_plane(primary)->frontbuffer_bit);
 	intel_fbc_post_update(crtc);
 	drm_framebuffer_unreference(work->old_fb);
 
@@ -11047,10 +11567,8 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned reset_counter;
 
-	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	if (crtc->reset_counter != reset_counter)
+	if (abort_flip_on_reset(crtc))
 		return true;
 
 	/*
@@ -11191,7 +11709,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11207,13 +11725,13 @@
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, 0); /* aux display base address, unused */
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	return 0;
 }
@@ -11225,7 +11743,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11238,13 +11756,13 @@
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, MI_NOOP);
 
 	return 0;
 }
@@ -11256,7 +11774,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11270,11 +11788,11 @@
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
-			obj->tiling_mode);
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
+			intel_fb_modifier_to_tiling(fb->modifier[0]));
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -11282,7 +11800,7 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11294,7 +11812,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11304,10 +11822,11 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, fb->pitches[0] |
+			intel_fb_modifier_to_tiling(fb->modifier[0]));
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11317,7 +11836,7 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11329,7 +11848,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -11350,7 +11869,7 @@
 	}
 
 	len = 4;
-	if (engine->id == RCS) {
+	if (req->engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11388,30 +11907,32 @@
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (engine->id == RCS) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+	if (req->engine->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					  DERRMR_PIPEB_PRI_FLIP_DONE |
 					  DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring,
+				i915_ggtt_offset(req->engine->scratch) + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(engine, 0);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, (MI_NOOP));
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(ring, fb->pitches[0] |
+			intel_fb_modifier_to_tiling(fb->modifier[0]));
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, (MI_NOOP));
 
 	return 0;
 }
@@ -11446,7 +11967,8 @@
 	if (resv && !reservation_object_test_signaled_rcu(resv, false))
 		return true;
 
-	return engine != i915_gem_request_get_engine(obj->last_write_req);
+	return engine != i915_gem_active_get_engine(&obj->last_write,
+						    &obj->base.dev->struct_mutex);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11457,7 +11979,7 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
 	const enum pipe pipe = intel_crtc->pipe;
-	u32 ctl, stride, tile_height;
+	u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
 
 	ctl = I915_READ(PLANE_CTL(pipe, 0));
 	ctl &= ~PLANE_CTL_TILED_MASK;
@@ -11478,20 +12000,6 @@
 	}
 
 	/*
-	 * The stride is either expressed as a multiple of 64 bytes chunks for
-	 * linear buffers or in number of tiles for tiled buffers.
-	 */
-	if (intel_rotation_90_or_270(rotation)) {
-		/* stride = Surface height in tiles */
-		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
-		stride = DIV_ROUND_UP(fb->height, tile_height);
-	} else {
-		stride = fb->pitches[0] /
-			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-						  fb->pixel_format);
-	}
-
-	/*
 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
 	 */
@@ -11507,15 +12015,13 @@
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_framebuffer *intel_fb =
-		to_intel_framebuffer(intel_crtc->base.primary->fb);
-	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
 	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
 	u32 dspcntr;
 
 	dspcntr = I915_READ(reg);
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dspcntr |= DISPPLANE_TILED;
 	else
 		dspcntr &= ~DISPPLANE_TILED;
@@ -11538,9 +12044,8 @@
 	struct reservation_object *resv;
 
 	if (work->flip_queued_req)
-		WARN_ON(__i915_wait_request(work->flip_queued_req,
-					    false, NULL,
-					    &dev_priv->rps.mmioflips));
+		WARN_ON(i915_wait_request(work->flip_queued_req,
+					  0, NULL, NO_WAITBOOST));
 
 	/* For framebuffer backed by dmabuf, wait for fence */
 	resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -11651,7 +12156,8 @@
 	struct intel_flip_work *work;
 	struct intel_engine_cs *engine;
 	bool mmio_flip;
-	struct drm_i915_gem_request *request = NULL;
+	struct drm_i915_gem_request *request;
+	struct i915_vma *vma;
 	int ret;
 
 	/*
@@ -11717,22 +12223,18 @@
 
 	/* Reference the objects for the scheduled work. */
 	drm_framebuffer_reference(work->old_fb);
-	drm_gem_object_reference(&obj->base);
 
 	crtc->primary->fb = fb;
 	update_state_fb(crtc->primary);
 
-	intel_fbc_pre_update(intel_crtc, intel_crtc->config,
-			     to_intel_plane_state(primary->state));
-
-	work->pending_flip_obj = obj;
+	work->pending_flip_obj = i915_gem_object_get(obj);
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		goto cleanup;
 
-	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+	intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
 		ret = -EIO;
 		goto cleanup;
 	}
@@ -11744,13 +12246,14 @@
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 		engine = &dev_priv->engine[BCS];
-		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
+		if (fb->modifier[0] != old_fb->modifier[0])
 			/* vlv: DISPLAY_FLIP fails to change tiling */
 			engine = NULL;
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_request_get_engine(obj->last_write_req);
+		engine = i915_gem_active_get_engine(&obj->last_write,
+						    &obj->base.dev->struct_mutex);
 		if (engine == NULL || engine->id != RCS)
 			engine = &dev_priv->engine[BCS];
 	} else {
@@ -11759,47 +12262,52 @@
 
 	mmio_flip = use_mmio_flip(engine, obj);
 
-	/* When using CS flips, we want to emit semaphores between rings.
-	 * However, when using mmio flips we will create a task to do the
-	 * synchronisation, so all we want here is to pin the framebuffer
-	 * into the display plane and skip any waits.
-	 */
-	if (!mmio_flip) {
-		ret = i915_gem_object_sync(obj, engine, &request);
-		if (!ret && !request) {
-			request = i915_gem_request_alloc(engine, NULL);
-			ret = PTR_ERR_OR_ZERO(request);
-		}
-
-		if (ret)
-			goto cleanup_pending;
+	vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto cleanup_pending;
 	}
 
-	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
-	if (ret)
-		goto cleanup_pending;
-
-	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
-						  obj, 0);
+	work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
 	work->gtt_offset += intel_crtc->dspaddr_offset;
 	work->rotation = crtc->primary->state->rotation;
 
+	/*
+	 * There's the potential that the next frame will not be compatible with
+	 * FBC, so we want to call pre_update() before the actual page flip.
+	 * The problem is that pre_update() caches some information about the fb
+	 * object, so we want to do this only after the object is pinned. Let's
+	 * be on the safe side and do this immediately before scheduling the
+	 * flip.
+	 */
+	intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+			     to_intel_plane_state(primary->state));
+
 	if (mmio_flip) {
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
-		i915_gem_request_assign(&work->flip_queued_req,
-					obj->last_write_req);
-
+		work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+							    &obj->base.dev->struct_mutex);
 		schedule_work(&work->mmio_work);
 	} else {
-		i915_gem_request_assign(&work->flip_queued_req, request);
+		request = i915_gem_request_alloc(engine, engine->last_context);
+		if (IS_ERR(request)) {
+			ret = PTR_ERR(request);
+			goto cleanup_unpin;
+		}
+
+		ret = i915_gem_request_await_object(request, obj, false);
+		if (ret)
+			goto cleanup_request;
+
 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
-			goto cleanup_unpin;
+			goto cleanup_request;
 
 		intel_mark_page_flip_active(intel_crtc, work);
 
+		work->flip_queued_req = i915_gem_request_get(request);
 		i915_add_request_no_flush(request);
 	}
 
@@ -11807,25 +12315,25 @@
 			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_prepare(dev,
+	intel_frontbuffer_flip_prepare(to_i915(dev),
 				       to_intel_plane(primary)->frontbuffer_bit);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
 
 	return 0;
 
+cleanup_request:
+	i915_add_request_no_flush(request);
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
-	if (!IS_ERR_OR_NULL(request))
-		i915_add_request_no_flush(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
 	crtc->primary->fb = old_fb;
 	update_state_fb(crtc->primary);
 
-	drm_gem_object_unreference_unlocked(&obj->base);
+	i915_gem_object_put_unlocked(obj);
 	drm_framebuffer_unreference(work->old_fb);
 
 	spin_lock_irq(&dev->event_lock);
@@ -11893,7 +12401,7 @@
 	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
 
 	/* Update watermarks on tiling or size changes. */
-	if (new->visible != cur->visible)
+	if (new->base.visible != cur->base.visible)
 		return true;
 
 	if (!cur->base.fb || !new->base.fb)
@@ -11901,10 +12409,10 @@
 
 	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
 	    cur->base.rotation != new->base.rotation ||
-	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
-	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
-	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
-	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
+	    drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
+	    drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
+	    drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
+	    drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
 		return true;
 
 	return false;
@@ -11912,10 +12420,10 @@
 
 static bool needs_scaling(struct intel_plane_state *state)
 {
-	int src_w = drm_rect_width(&state->src) >> 16;
-	int src_h = drm_rect_height(&state->src) >> 16;
-	int dst_w = drm_rect_width(&state->dst);
-	int dst_h = drm_rect_height(&state->dst);
+	int src_w = drm_rect_width(&state->base.src) >> 16;
+	int src_h = drm_rect_height(&state->base.src) >> 16;
+	int dst_w = drm_rect_width(&state->base.dst);
+	int dst_h = drm_rect_height(&state->base.dst);
 
 	return (src_w != dst_w || src_h != dst_h);
 }
@@ -11946,8 +12454,8 @@
 			return ret;
 	}
 
-	was_visible = old_plane_state->visible;
-	visible = to_intel_plane_state(plane_state)->visible;
+	was_visible = old_plane_state->base.visible;
+	visible = to_intel_plane_state(plane_state)->base.visible;
 
 	if (!was_crtc_enabled && WARN_ON(was_visible))
 		was_visible = false;
@@ -11963,7 +12471,7 @@
 	 * only combine the results from all planes in the current place?
 	 */
 	if (!is_crtc_enabled)
-		to_intel_plane_state(plane_state)->visible = visible = false;
+		to_intel_plane_state(plane_state)->base.visible = visible = false;
 
 	if (!was_visible && !visible)
 		return 0;
@@ -12167,22 +12675,22 @@
 connected_sink_compute_bpp(struct intel_connector *connector,
 			   struct intel_crtc_state *pipe_config)
 {
+	const struct drm_display_info *info = &connector->base.display_info;
 	int bpp = pipe_config->pipe_bpp;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
-		connector->base.base.id,
-		connector->base.name);
+		      connector->base.base.id,
+		      connector->base.name);
 
 	/* Don't use an invalid EDID bpc value */
-	if (connector->base.display_info.bpc &&
-	    connector->base.display_info.bpc * 3 < bpp) {
+	if (info->bpc != 0 && info->bpc * 3 < bpp) {
 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
-			      bpp, connector->base.display_info.bpc*3);
-		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+			      bpp, info->bpc * 3);
+		pipe_config->pipe_bpp = info->bpc * 3;
 	}
 
 	/* Clamp bpp to 8 on screens without EDID 1.4 */
-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+	if (info->bpc == 0 && bpp > 24) {
 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
 			      bpp);
 		pipe_config->pipe_bpp = 24;
@@ -12301,10 +12809,9 @@
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
 	if (IS_BROXTON(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+		DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ebb0,
 			      pipe_config->dpll_hw_state.ebb4,
 			      pipe_config->dpll_hw_state.pll0,
@@ -12317,15 +12824,13 @@
 			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+		DRM_DEBUG_KMS("dpll_hw_state: "
 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ctrl1,
 			      pipe_config->dpll_hw_state.cfgcr1,
 			      pipe_config->dpll_hw_state.cfgcr2);
 	} else if (HAS_DDI(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
+		DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
 			      pipe_config->dpll_hw_state.wrpll,
 			      pipe_config->dpll_hw_state.spll);
 	} else {
@@ -12339,6 +12844,7 @@
 
 	DRM_DEBUG_KMS("planes on this crtc\n");
 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		char *format_name;
 		intel_plane = to_intel_plane(plane);
 		if (intel_plane->pipe != crtc->pipe)
 			continue;
@@ -12351,19 +12857,23 @@
 			continue;
 		}
 
+		format_name = drm_get_format_name(fb->pixel_format);
+
 		DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
 			      plane->base.id, plane->name);
 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
-			      fb->base.id, fb->width, fb->height,
-			      drm_get_format_name(fb->pixel_format));
+			      fb->base.id, fb->width, fb->height, format_name);
 		DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
 			      state->scaler_id,
-			      state->src.x1 >> 16, state->src.y1 >> 16,
-			      drm_rect_width(&state->src) >> 16,
-			      drm_rect_height(&state->src) >> 16,
-			      state->dst.x1, state->dst.y1,
-			      drm_rect_width(&state->dst),
-			      drm_rect_height(&state->dst));
+			      state->base.src.x1 >> 16,
+			      state->base.src.y1 >> 16,
+			      drm_rect_width(&state->base.src) >> 16,
+			      drm_rect_height(&state->base.src) >> 16,
+			      state->base.dst.x1, state->base.dst.y1,
+			      drm_rect_width(&state->base.dst),
+			      drm_rect_height(&state->base.dst));
+
+		kfree(format_name);
 	}
 }
 
@@ -12372,6 +12882,7 @@
 	struct drm_device *dev = state->dev;
 	struct drm_connector *connector;
 	unsigned int used_ports = 0;
+	unsigned int used_mst_ports = 0;
 
 	/*
 	 * Walk the connector list instead of the encoder
@@ -12408,11 +12919,20 @@
 				return false;
 
 			used_ports |= port_mask;
+			break;
+		case INTEL_OUTPUT_DP_MST:
+			used_mst_ports |=
+				1 << enc_to_mst(&encoder->base)->primary->port;
+			break;
 		default:
 			break;
 		}
 	}
 
+	/* can't mix MST and SST/HDMI on the same port */
+	if (used_ports & used_mst_ports)
+		return false;
+
 	return true;
 }
 
@@ -12423,7 +12943,6 @@
 	struct intel_crtc_scaler_state scaler_state;
 	struct intel_dpll_hw_state dpll_hw_state;
 	struct intel_shared_dpll *shared_dpll;
-	uint32_t ddi_pll_sel;
 	bool force_thru;
 
 	/* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12435,7 +12954,6 @@
 	scaler_state = crtc_state->scaler_state;
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
-	ddi_pll_sel = crtc_state->ddi_pll_sel;
 	force_thru = crtc_state->pch_pfit.force_thru;
 
 	memset(crtc_state, 0, sizeof *crtc_state);
@@ -12444,7 +12962,6 @@
 	crtc_state->scaler_state = scaler_state;
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
-	crtc_state->ddi_pll_sel = ddi_pll_sel;
 	crtc_state->pch_pfit.force_thru = force_thru;
 }
 
@@ -12532,7 +13049,7 @@
 
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
-		if (!(encoder->compute_config(encoder, pipe_config))) {
+		if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
 			DRM_DEBUG_KMS("Encoder config failure\n");
 			goto fail;
 		}
@@ -12620,12 +13137,6 @@
 	return false;
 }
 
-#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
-	list_for_each_entry((intel_crtc), \
-			    &(dev)->mode_config.crtc_list, \
-			    base.head) \
-		for_each_if (mask & (1 <<(intel_crtc)->pipe))
-
 static bool
 intel_compare_m_n(unsigned int m, unsigned int n,
 		  unsigned int m2, unsigned int n2,
@@ -12873,8 +13384,6 @@
 
 	PIPE_CONF_CHECK_I(double_wide);
 
-	PIPE_CONF_CHECK_X(ddi_pll_sel);
-
 	PIPE_CONF_CHECK_P(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -12956,16 +13465,23 @@
 			  hw_entry->start, hw_entry->end);
 	}
 
-	/* cursor */
-	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+	/*
+	 * cursor
+	 * If the cursor plane isn't active, we may not have updated it's ddb
+	 * allocation. In that case since the ddb allocation will be updated
+	 * once the plane becomes visible, we can skip this check
+	 */
+	if (intel_crtc->cursor_addr) {
+		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
 
-	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
-		DRM_ERROR("mismatch in DDB state pipe %c cursor "
-			  "(expected (%u,%u), found (%u,%u))\n",
-			  pipe_name(pipe),
-			  sw_entry->start, sw_entry->end,
-			  hw_entry->start, hw_entry->end);
+		if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+			DRM_ERROR("mismatch in DDB state pipe %c cursor "
+				  "(expected (%u,%u), found (%u,%u))\n",
+				  pipe_name(pipe),
+				  sw_entry->start, sw_entry->end,
+				  hw_entry->start, hw_entry->end);
+		}
 	}
 }
 
@@ -13580,8 +14096,9 @@
 			if (!intel_plane_state->wait_req)
 				continue;
 
-			ret = __i915_wait_request(intel_plane_state->wait_req,
-						  true, NULL, NULL);
+			ret = i915_wait_request(intel_plane_state->wait_req,
+						I915_WAIT_INTERRUPTIBLE,
+						NULL, NULL);
 			if (ret) {
 				/* Any hang should be swallowed by the wait */
 				WARN_ON(ret == -EIO);
@@ -13671,6 +14188,111 @@
 	return false;
 }
 
+static void intel_update_crtc(struct drm_crtc *crtc,
+			      struct drm_atomic_state *state,
+			      struct drm_crtc_state *old_crtc_state,
+			      unsigned int *crtc_vblank_mask)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+	bool modeset = needs_modeset(crtc->state);
+
+	if (modeset) {
+		update_scanline_offset(intel_crtc);
+		dev_priv->display.crtc_enable(pipe_config, state);
+	} else {
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+	}
+
+	if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+		intel_fbc_enable(
+		    intel_crtc, pipe_config,
+		    to_intel_plane_state(crtc->primary->state));
+	}
+
+	drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+	if (needs_vblank_wait(pipe_config))
+		*crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+			       unsigned int *crtc_vblank_mask)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		if (!crtc->state->active)
+			continue;
+
+		intel_update_crtc(crtc, state, old_crtc_state,
+				  crtc_vblank_mask);
+	}
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+			     unsigned int *crtc_vblank_mask)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+	unsigned int updated = 0;
+	bool progress;
+	enum pipe pipe;
+
+	/*
+	 * Whenever the number of active pipes changes, we need to make sure we
+	 * update the pipes in the right order so that their ddb allocations
+	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+	 * cause pipe underruns and other bad stuff.
+	 */
+	do {
+		int i;
+		progress = false;
+
+		for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+			bool vbl_wait = false;
+			unsigned int cmask = drm_crtc_mask(crtc);
+			pipe = to_intel_crtc(crtc)->pipe;
+
+			if (updated & cmask || !crtc->state->active)
+				continue;
+			if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+							pipe))
+				continue;
+
+			updated |= cmask;
+
+			/*
+			 * If this is an already active pipe, it's DDB changed,
+			 * and this isn't the last pipe that needs updating
+			 * then we need to wait for a vblank to pass for the
+			 * new ddb allocation to take effect.
+			 */
+			if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+			    !crtc->state->active_changed &&
+			    intel_state->wm_results.dirty_pipes != updated)
+				vbl_wait = true;
+
+			intel_update_crtc(crtc, state, old_crtc_state,
+					  crtc_vblank_mask);
+
+			if (vbl_wait)
+				intel_wait_for_vblank(dev, pipe);
+
+			progress = true;
+		}
+	} while (progress);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -13693,8 +14315,8 @@
 		if (!intel_plane_state->wait_req)
 			continue;
 
-		ret = __i915_wait_request(intel_plane_state->wait_req,
-					  true, NULL, NULL);
+		ret = i915_wait_request(intel_plane_state->wait_req,
+					0, NULL, NULL);
 		/* EIO should be eaten, and we can't get interrupted in the
 		 * worker, and blocking commits have waited already. */
 		WARN_ON(ret);
@@ -13730,7 +14352,7 @@
 
 		if (old_crtc_state->active) {
 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
-			dev_priv->display.crtc_disable(crtc);
+			dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
 			intel_crtc->active = false;
 			intel_fbc_disable(intel_crtc);
 			intel_disable_shared_dpll(intel_crtc);
@@ -13763,23 +14385,15 @@
 		 * SKL workaround: bspec recommends we disable the SAGV when we
 		 * have more then one pipe enabled
 		 */
-		if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
-			skl_disable_sagv(dev_priv);
+		if (!intel_can_enable_sagv(state))
+			intel_disable_sagv(dev_priv);
 
 		intel_modeset_verify_disabled(dev);
 	}
 
-	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	/* Complete the events for pipes that have now been disabled */
 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		bool modeset = needs_modeset(crtc->state);
-		struct intel_crtc_state *pipe_config =
-			to_intel_crtc_state(crtc->state);
-
-		if (modeset && crtc->state->active) {
-			update_scanline_offset(to_intel_crtc(crtc));
-			dev_priv->display.crtc_enable(crtc);
-		}
 
 		/* Complete events for now disable pipes here. */
 		if (modeset && !crtc->state->active && crtc->state->event) {
@@ -13789,21 +14403,11 @@
 
 			crtc->state->event = NULL;
 		}
-
-		if (!modeset)
-			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
-		if (crtc->state->active &&
-		    drm_atomic_get_existing_plane_state(state, crtc->primary))
-			intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
-		if (crtc->state->active)
-			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
-		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
-			crtc_vblank_mask |= 1 << i;
 	}
 
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
 	 * already, but still need the state for the delayed optimization. To
 	 * fix this:
@@ -13839,9 +14443,8 @@
 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
 	}
 
-	if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
-	    skl_can_enable_sagv(state))
-		skl_enable_sagv(dev_priv);
+	if (intel_state->modeset && intel_can_enable_sagv(state))
+		intel_enable_sagv(dev_priv);
 
 	drm_atomic_helper_commit_hw_done(state);
 
@@ -13882,19 +14485,12 @@
 {
 	struct drm_plane_state *old_plane_state;
 	struct drm_plane *plane;
-	struct drm_i915_gem_object *obj, *old_obj;
-	struct intel_plane *intel_plane;
 	int i;
 
-	mutex_lock(&state->dev->struct_mutex);
-	for_each_plane_in_state(state, plane, old_plane_state, i) {
-		obj = intel_fb_obj(plane->state->fb);
-		old_obj = intel_fb_obj(old_plane_state->fb);
-		intel_plane = to_intel_plane(plane);
-
-		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-	}
-	mutex_unlock(&state->dev->struct_mutex);
+	for_each_plane_in_state(state, plane, old_plane_state, i)
+		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+				  intel_fb_obj(plane->state->fb),
+				  to_intel_plane(plane)->frontbuffer_bit);
 }
 
 /**
@@ -13990,8 +14586,6 @@
 		drm_atomic_state_free(state);
 }
 
-#undef for_each_intel_crtc_masked
-
 /*
  * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  *        drm_atomic_helper_legacy_gamma_set() directly.
@@ -14060,7 +14654,7 @@
  */
 int
 intel_prepare_plane_fb(struct drm_plane *plane,
-		       const struct drm_plane_state *new_state)
+		       struct drm_plane_state *new_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_framebuffer *fb = new_state->fb;
@@ -14119,15 +14713,17 @@
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+		struct i915_vma *vma;
+
+		vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+		if (IS_ERR(vma))
+			ret = PTR_ERR(vma);
 	}
 
 	if (ret == 0) {
-		struct intel_plane_state *plane_state =
-			to_intel_plane_state(new_state);
-
-		i915_gem_request_assign(&plane_state->wait_req,
-					obj->last_write_req);
+		to_intel_plane_state(new_state)->wait_req =
+			i915_gem_active_get(&obj->last_write,
+					    &obj->base.dev->struct_mutex);
 	}
 
 	return ret;
@@ -14144,10 +14740,11 @@
  */
 void
 intel_cleanup_plane_fb(struct drm_plane *plane,
-		       const struct drm_plane_state *old_state)
+		       struct drm_plane_state *old_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane_state *old_intel_state;
+	struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
 
@@ -14160,6 +14757,7 @@
 	    !INTEL_INFO(dev)->cursor_needs_physical))
 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
 
+	i915_gem_request_assign(&intel_state->wait_req, NULL);
 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 }
 
@@ -14194,13 +14792,14 @@
 			  struct intel_crtc_state *crtc_state,
 			  struct intel_plane_state *state)
 {
+	struct drm_i915_private *dev_priv = to_i915(plane->dev);
 	struct drm_crtc *crtc = state->base.crtc;
-	struct drm_framebuffer *fb = state->base.fb;
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	bool can_position = false;
+	int ret;
 
-	if (INTEL_INFO(plane->dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		/* use scaler when colorkey is not required */
 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
 			min_scale = 1;
@@ -14209,22 +14808,35 @@
 		can_position = true;
 	}
 
-	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
-					     &state->dst, &state->clip,
-					     state->base.rotation,
-					     min_scale, max_scale,
-					     can_position, true,
-					     &state->visible);
+	ret = drm_plane_helper_check_state(&state->base,
+					   &state->clip,
+					   min_scale, max_scale,
+					   can_position, true);
+	if (ret)
+		return ret;
+
+	if (!state->base.fb)
+		return 0;
+
+	if (INTEL_GEN(dev_priv) >= 9) {
+		ret = skl_check_plane_surface(state);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 				    struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc_state *old_intel_state =
 		to_intel_crtc_state(old_crtc_state);
 	bool modeset = needs_modeset(crtc->state);
+	enum pipe pipe = intel_crtc->pipe;
 
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(intel_crtc);
@@ -14239,8 +14851,12 @@
 
 	if (to_intel_crtc_state(crtc->state)->update_pipe)
 		intel_update_pipe_config(intel_crtc, old_intel_state);
-	else if (INTEL_INFO(dev)->gen >= 9)
+	else if (INTEL_GEN(dev_priv) >= 9) {
 		skl_detach_scalers(intel_crtc);
+
+		I915_WRITE(PIPE_WM_LINETIME(pipe),
+			   dev_priv->wm.skl_hw.wm_linetime[pipe]);
+	}
 }
 
 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14374,11 +14990,11 @@
 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
 {
 	if (!dev->mode_config.rotation_property) {
-		unsigned long flags = BIT(DRM_ROTATE_0) |
-			BIT(DRM_ROTATE_180);
+		unsigned long flags = DRM_ROTATE_0 |
+			DRM_ROTATE_180;
 
 		if (INTEL_INFO(dev)->gen >= 9)
-			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+			flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
 
 		dev->mode_config.rotation_property =
 			drm_mode_create_rotation_property(dev, flags);
@@ -14394,19 +15010,17 @@
 			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
-	struct drm_crtc *crtc = crtc_state->base.crtc;
 	struct drm_framebuffer *fb = state->base.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	enum pipe pipe = to_intel_plane(plane)->pipe;
 	unsigned stride;
 	int ret;
 
-	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
-					    &state->dst, &state->clip,
-					    state->base.rotation,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    true, true, &state->visible);
+	ret = drm_plane_helper_check_state(&state->base,
+					   &state->clip,
+					   DRM_PLANE_HELPER_NO_SCALING,
+					   DRM_PLANE_HELPER_NO_SCALING,
+					   true, true);
 	if (ret)
 		return ret;
 
@@ -14443,7 +15057,7 @@
 	 * Refuse the put the cursor into that compromised position.
 	 */
 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
-	    state->visible && state->base.crtc_x < 0) {
+	    state->base.visible && state->base.crtc_x < 0) {
 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
 		return -EINVAL;
 	}
@@ -14475,7 +15089,7 @@
 	if (!obj)
 		addr = 0;
 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
-		addr = i915_gem_obj_ggtt_offset(obj);
+		addr = i915_gem_object_ggtt_offset(obj, NULL);
 	else
 		addr = obj->phys_handle->busaddr;
 
@@ -14521,8 +15135,8 @@
 		if (!dev->mode_config.rotation_property)
 			dev->mode_config.rotation_property =
 				drm_mode_create_rotation_property(dev,
-							BIT(DRM_ROTATE_0) |
-							BIT(DRM_ROTATE_180));
+							DRM_ROTATE_0 |
+							DRM_ROTATE_180);
 		if (dev->mode_config.rotation_property)
 			drm_object_attach_property(&cursor->base.base,
 				dev->mode_config.rotation_property,
@@ -14728,12 +15342,50 @@
 	return true;
 }
 
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+	int pps_num;
+	int pps_idx;
+
+	if (HAS_DDI(dev_priv))
+		return;
+	/*
+	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
+	 * everywhere where registers can be write protected.
+	 */
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		pps_num = 2;
+	else
+		pps_num = 1;
+
+	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+		u32 val = I915_READ(PP_CONTROL(pps_idx));
+
+		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+		I915_WRITE(PP_CONTROL(pps_idx), val);
+	}
+}
+
+static void intel_pps_init(struct drm_i915_private *dev_priv)
+{
+	if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
+		dev_priv->pps_mmio_base = PCH_PPS_BASE;
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		dev_priv->pps_mmio_base = VLV_PPS_BASE;
+	else
+		dev_priv->pps_mmio_base = PPS_BASE;
+
+	intel_pps_unlock_regs_wa(dev_priv);
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_encoder *encoder;
 	bool dpd_is_edp = false;
 
+	intel_pps_init(dev_priv);
+
 	/*
 	 * intel_edp_init_connector() depends on this completing first, to
 	 * prevent the registeration of both eDP and LVDS and the incorrect
@@ -14921,7 +15573,7 @@
 	drm_framebuffer_cleanup(fb);
 	mutex_lock(&dev->struct_mutex);
 	WARN_ON(!intel_fb->obj->framebuffer_references--);
-	drm_gem_object_unreference(&intel_fb->obj->base);
+	i915_gem_object_put(intel_fb->obj);
 	mutex_unlock(&dev->struct_mutex);
 	kfree(intel_fb);
 }
@@ -15001,24 +15653,27 @@
 				  struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned int aligned_height;
+	unsigned int tiling = i915_gem_object_get_tiling(obj);
 	int ret;
 	u32 pitch_limit, stride_alignment;
+	char *format_name;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
-		/* Enforce that fb modifier and tiling mode match, but only for
-		 * X-tiled. This is needed for FBC. */
-		if (!!(obj->tiling_mode == I915_TILING_X) !=
-		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+		/*
+		 * If there's a fence, enforce that
+		 * the fb modifier and tiling mode match.
+		 */
+		if (tiling != I915_TILING_NONE &&
+		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
 			return -EINVAL;
 		}
 	} else {
-		if (obj->tiling_mode == I915_TILING_X)
+		if (tiling == I915_TILING_X) {
 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
-		else if (obj->tiling_mode == I915_TILING_Y) {
+		} else if (tiling == I915_TILING_Y) {
 			DRM_DEBUG("No Y tiling for legacy addfb\n");
 			return -EINVAL;
 		}
@@ -15042,6 +15697,16 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * gen2/3 display engine uses the fence if present,
+	 * so the tiling mode must match the fb modifier exactly.
+	 */
+	if (INTEL_INFO(dev_priv)->gen < 4 &&
+	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+		DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
+		return -EINVAL;
+	}
+
 	stride_alignment = intel_fb_stride_alignment(dev_priv,
 						     mode_cmd->modifier[0],
 						     mode_cmd->pixel_format);
@@ -15061,10 +15726,15 @@
 		return -EINVAL;
 	}
 
-	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
-	    mode_cmd->pitches[0] != obj->stride) {
+	/*
+	 * If there's a fence, enforce that
+	 * the fb pitch and fence stride match.
+	 */
+	if (tiling != I915_TILING_NONE &&
+	    mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
-			  mode_cmd->pitches[0], obj->stride);
+			  mode_cmd->pitches[0],
+			  i915_gem_object_get_stride(obj));
 		return -EINVAL;
 	}
 
@@ -15077,16 +15747,18 @@
 		break;
 	case DRM_FORMAT_XRGB1555:
 		if (INTEL_INFO(dev)->gen > 3) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	case DRM_FORMAT_ABGR8888:
 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
 		    INTEL_INFO(dev)->gen < 9) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
@@ -15094,15 +15766,17 @@
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
 		if (INTEL_INFO(dev)->gen < 4) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	case DRM_FORMAT_ABGR2101010:
 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
@@ -15111,14 +15785,16 @@
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_VYUY:
 		if (INTEL_INFO(dev)->gen < 5) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-				  drm_get_format_name(mode_cmd->pixel_format));
+			format_name = drm_get_format_name(mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+			kfree(format_name);
 			return -EINVAL;
 		}
 		break;
 	default:
-		DRM_DEBUG("unsupported pixel format: %s\n",
-			  drm_get_format_name(mode_cmd->pixel_format));
+		format_name = drm_get_format_name(mode_cmd->pixel_format);
+		DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -15126,17 +15802,12 @@
 	if (mode_cmd->offsets[0] != 0)
 		return -EINVAL;
 
-	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
-					       mode_cmd->pixel_format,
-					       mode_cmd->modifier[0]);
-	/* FIXME drm helper for size checks (especially planar formats)? */
-	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
-		return -EINVAL;
-
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 	intel_fb->obj = obj;
 
-	intel_fill_fb_info(dev_priv, &intel_fb->base);
+	ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
+	if (ret)
+		return ret;
 
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
@@ -15158,13 +15829,13 @@
 	struct drm_i915_gem_object *obj;
 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
-	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
-	if (&obj->base == NULL)
+	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+	if (!obj)
 		return ERR_PTR(-ENOENT);
 
 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
 	if (IS_ERR(fb))
-		drm_gem_object_unreference_unlocked(&obj->base);
+		i915_gem_object_put_unlocked(obj);
 
 	return fb;
 }
@@ -15347,6 +16018,11 @@
 			skl_modeset_calc_cdclk;
 	}
 
+	if (dev_priv->info.gen >= 9)
+		dev_priv->display.update_crtcs = skl_update_crtcs;
+	else
+		dev_priv->display.update_crtcs = intel_update_crtcs;
+
 	switch (INTEL_INFO(dev_priv)->gen) {
 	case 2:
 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -15548,15 +16224,16 @@
 static void i915_disable_vga(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u8 sr1;
 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
-	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 	outb(SR01, VGA_SR_INDEX);
 	sr1 = inb(VGA_SR_DATA);
 	outb(sr1 | 1<<5, VGA_SR_DATA);
-	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 	udelay(300);
 
 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
@@ -15572,7 +16249,6 @@
 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
 
 	intel_init_clock_gating(dev);
-	intel_enable_gt_powersave(dev_priv);
 }
 
 /*
@@ -15839,15 +16515,22 @@
 	return false;
 }
 
-static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_connector *connector;
 
 	for_each_connector_on_encoder(dev, &encoder->base, connector)
-		return true;
+		return connector;
 
-	return false;
+	return NULL;
+}
+
+static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
+			      enum transcoder pch_transcoder)
+{
+	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
 }
 
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
@@ -15893,7 +16576,7 @@
 		 * Temporarily change the plane mapping and disable everything
 		 * ...  */
 		plane = crtc->plane;
-		to_intel_plane_state(crtc->base.primary->state)->visible = true;
+		to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
 		crtc->plane = !plane;
 		intel_crtc_disable_noatomic(&crtc->base);
 		crtc->plane = plane;
@@ -15928,14 +16611,23 @@
 		 * worst a fifo underrun happens which also sets this to false.
 		 */
 		crtc->cpu_fifo_underrun_disabled = true;
-		crtc->pch_fifo_underrun_disabled = true;
+		/*
+		 * We track the PCH trancoder underrun reporting state
+		 * within the crtc. With crtc for pipe A housing the underrun
+		 * reporting state for PCH transcoder A, crtc for pipe B housing
+		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+		 * and marking underrun reporting as disabled for the non-existing
+		 * PCH transcoders B and C would prevent enabling the south
+		 * error interrupt (see cpt_can_enable_serr_int()).
+		 */
+		if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+			crtc->pch_fifo_underrun_disabled = true;
 	}
 }
 
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
 	struct intel_connector *connector;
-	struct drm_device *dev = encoder->base.dev;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -15943,7 +16635,8 @@
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
+	connector = intel_encoder_find_connector(encoder);
+	if (connector && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -15952,12 +16645,14 @@
 		 * fallout from our resume register restoring. Disable
 		 * the encoder manually again. */
 		if (encoder->base.crtc) {
+			struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+
 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
 				      encoder->base.base.id,
 				      encoder->base.name);
-			encoder->disable(encoder);
+			encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 			if (encoder->post_disable)
-				encoder->post_disable(encoder);
+				encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 		}
 		encoder->base.crtc = NULL;
 
@@ -15965,12 +16660,9 @@
 		 * a bug in one of the get_hw_state functions. Or someplace else
 		 * in our code, like the register restore mess on resume. Clamp
 		 * things to off as a safer default. */
-		for_each_intel_connector(dev, connector) {
-			if (connector->encoder != encoder)
-				continue;
-			connector->base.dpms = DRM_MODE_DPMS_OFF;
-			connector->base.encoder = NULL;
-		}
+
+		connector->base.dpms = DRM_MODE_DPMS_OFF;
+		connector->base.encoder = NULL;
 	}
 	/* Enabled encoders without active connectors will be fixed in
 	 * the crtc fixup. */
@@ -16020,10 +16712,10 @@
 	struct intel_plane_state *plane_state =
 		to_intel_plane_state(primary->state);
 
-	plane_state->visible = crtc->active &&
+	plane_state->base.visible = crtc->active &&
 		primary_get_hw_state(to_intel_plane(primary));
 
-	if (plane_state->visible)
+	if (plane_state->base.visible)
 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
 }
 
@@ -16282,7 +16974,6 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *c;
 	struct drm_i915_gem_object *obj;
-	int ret;
 
 	intel_init_gt_powersave(dev_priv);
 
@@ -16296,15 +16987,17 @@
 	 * for this.
 	 */
 	for_each_crtc(dev, c) {
+		struct i915_vma *vma;
+
 		obj = intel_fb_obj(c->primary->fb);
 		if (obj == NULL)
 			continue;
 
 		mutex_lock(&dev->struct_mutex);
-		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+		vma = intel_pin_and_fence_fb_obj(c->primary->fb,
 						 c->primary->state->rotation);
 		mutex_unlock(&dev->struct_mutex);
-		if (ret) {
+		if (IS_ERR(vma)) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 21b04c3..14a3cf0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -190,6 +190,29 @@
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static int
+intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &intel_dig_port->base;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	int max_dotclk = dev_priv->max_dotclk_freq;
+	int ds_max_dotclk;
+
+	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+
+	if (type != DP_DS_PORT_TYPE_VGA)
+		return max_dotclk;
+
+	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
+						    intel_dp->downstream_ports);
+
+	if (ds_max_dotclk != 0)
+		max_dotclk = min(max_dotclk, ds_max_dotclk);
+
+	return max_dotclk;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
@@ -199,7 +222,9 @@
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	int target_clock = mode->clock;
 	int max_rate, mode_rate, max_lanes, max_link_clock;
-	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+	int max_dotclk;
+
+	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
 	if (is_edp(intel_dp) && fixed_mode) {
 		if (mode->hdisplay > fixed_mode->hdisplay)
@@ -256,6 +281,8 @@
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 					      struct intel_dp *intel_dp);
+static void
+intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
 
 static void pps_lock(struct intel_dp *intel_dp)
 {
@@ -463,13 +490,13 @@
 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 			       enum pipe pipe)
 {
-	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
+	return I915_READ(PP_STATUS(pipe)) & PP_ON;
 }
 
 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 				enum pipe pipe)
 {
-	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 }
 
 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -486,7 +513,7 @@
 	enum pipe pipe;
 
 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
-		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
 			PANEL_PORT_SELECT_MASK;
 
 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -583,30 +610,21 @@
 				    struct intel_dp *intel_dp,
 				    struct pps_registers *regs)
 {
+	int pps_idx = 0;
+
 	memset(regs, 0, sizeof(*regs));
 
-	if (IS_BROXTON(dev_priv)) {
-		int idx = bxt_power_sequencer_idx(intel_dp);
+	if (IS_BROXTON(dev_priv))
+		pps_idx = bxt_power_sequencer_idx(intel_dp);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 
-		regs->pp_ctrl = BXT_PP_CONTROL(idx);
-		regs->pp_stat = BXT_PP_STATUS(idx);
-		regs->pp_on = BXT_PP_ON_DELAYS(idx);
-		regs->pp_off = BXT_PP_OFF_DELAYS(idx);
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		regs->pp_ctrl = PCH_PP_CONTROL;
-		regs->pp_stat = PCH_PP_STATUS;
-		regs->pp_on = PCH_PP_ON_DELAYS;
-		regs->pp_off = PCH_PP_OFF_DELAYS;
-		regs->pp_div = PCH_PP_DIVISOR;
-	} else {
-		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
-
-		regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
-		regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
-		regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
-		regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
-		regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
-	}
+	regs->pp_ctrl = PP_CONTROL(pps_idx);
+	regs->pp_stat = PP_STATUS(pps_idx);
+	regs->pp_on = PP_ON_DELAYS(pps_idx);
+	regs->pp_off = PP_OFF_DELAYS(pps_idx);
+	if (!IS_BROXTON(dev_priv))
+		regs->pp_div = PP_DIVISOR(pps_idx);
 }
 
 static i915_reg_t
@@ -651,8 +669,8 @@
 		i915_reg_t pp_ctrl_reg, pp_div_reg;
 		u32 pp_div;
 
-		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
-		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
+		pp_ctrl_reg = PP_CONTROL(pipe);
+		pp_div_reg  = PP_DIVISOR(pipe);
 		pp_div = I915_READ(pp_div_reg);
 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
 
@@ -1041,10 +1059,10 @@
 		if (WARN_ON(txsize > 20))
 			return -E2BIG;
 
+		WARN_ON(!msg->buffer != !msg->size);
+
 		if (msg->buffer)
 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-		else
-			WARN_ON(msg->size);
 
 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
 		if (ret > 0) {
@@ -1250,7 +1268,7 @@
 }
 
 static void
-intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+intel_dp_aux_init(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
@@ -1426,6 +1444,44 @@
 	DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
+static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+{
+	uint8_t rev;
+	int len;
+
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return;
+
+	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
+	if (len < 0)
+		return;
+
+	DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+}
+
+static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+{
+	uint8_t rev[2];
+	int len;
+
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return;
+
+	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
+	if (len < 0)
+		return;
+
+	DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+}
+
 static int rate_to_index(int find, const int *rates)
 {
 	int i = 0;
@@ -1447,7 +1503,7 @@
 	if (WARN_ON(len <= 0))
 		return 162000;
 
-	return rates[rate_to_index(0, rates) - 1];
+	return rates[len - 1];
 }
 
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@@ -1468,9 +1524,24 @@
 	}
 }
 
+static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
+{
+	int bpp, bpc;
+
+	bpp = pipe_config->pipe_bpp;
+	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+
+	if (bpc > 0)
+		bpp = min(bpp, 3*bpc);
+
+	return bpp;
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
-			struct intel_crtc_state *pipe_config)
+			struct intel_crtc_state *pipe_config,
+			struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1533,7 +1604,7 @@
 
 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
 	 * bpc in between. */
-	bpp = pipe_config->pipe_bpp;
+	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 	if (is_edp(intel_dp)) {
 
 		/* Get bpp from vbt only for panels that dont have bpp in edid */
@@ -1647,22 +1718,28 @@
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-			      const struct intel_crtc_state *pipe_config)
+			      int link_rate, uint8_t lane_count,
+			      bool link_mst)
 {
-	intel_dp->link_rate = pipe_config->port_clock;
-	intel_dp->lane_count = pipe_config->lane_count;
+	intel_dp->link_rate = link_rate;
+	intel_dp->lane_count = lane_count;
+	intel_dp->link_mst = link_mst;
 }
 
-static void intel_dp_prepare(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
-	intel_dp_set_link_params(intel_dp, crtc->config);
+	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
+				 pipe_config->lane_count,
+				 intel_crtc_has_type(pipe_config,
+						     INTEL_OUTPUT_DP_MST));
 
 	/*
 	 * There are four kinds of DP registers:
@@ -1688,7 +1765,7 @@
 
 	/* Handle DP bits in common between all three register formats */
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
 
 	/* Split out the IBX/CPU vs CPT settings */
 
@@ -1716,7 +1793,7 @@
 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
 	} else {
 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
+		    !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1835,7 +1912,8 @@
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	control = I915_READ(_pp_ctrl_reg(intel_dp));
-	if (!IS_BROXTON(dev)) {
+	if (WARN_ON(!HAS_DDI(dev_priv) &&
+		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 		control &= ~PANEL_UNLOCK_MASK;
 		control |= PANEL_UNLOCK_REGS;
 	}
@@ -1956,7 +2034,7 @@
 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
-	if ((pp & POWER_TARGET_ON) == 0)
+	if ((pp & PANEL_POWER_ON) == 0)
 		intel_dp->panel_power_off_time = ktime_get_boottime();
 
 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
@@ -2043,7 +2121,7 @@
 		POSTING_READ(pp_ctrl_reg);
 	}
 
-	pp |= POWER_TARGET_ON;
+	pp |= PANEL_POWER_ON;
 	if (!IS_GEN5(dev))
 		pp |= PANEL_POWER_RESET;
 
@@ -2095,7 +2173,7 @@
 	pp = ironlake_get_pp_control(intel_dp);
 	/* We need to switch off panel power _and_ force vdd, for otherwise some
 	 * panels get very unhappy and cease to work. */
-	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
 		EDP_BLC_ENABLE);
 
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2254,10 +2332,10 @@
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
 {
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2265,11 +2343,11 @@
 	assert_edp_pll_disabled(dev_priv);
 
 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-		      crtc->config->port_clock);
+		      pipe_config->port_clock);
 
 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
-	if (crtc->config->port_clock == 162000)
+	if (pipe_config->port_clock == 162000)
 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
 	else
 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
@@ -2478,16 +2556,17 @@
 	}
 }
 
-static void intel_disable_dp(struct intel_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_audio_codec_disable(encoder);
 
-	if (HAS_PSR(dev) && !HAS_DDI(dev))
+	if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
 		intel_psr_disable(intel_dp);
 
 	/* Make sure the panel is off before trying to change the mode. But also
@@ -2498,11 +2577,13 @@
 	intel_edp_panel_off(intel_dp);
 
 	/* disable the port before the pipe on g4x */
-	if (INTEL_INFO(dev)->gen < 5)
+	if (INTEL_GEN(dev_priv) < 5)
 		intel_dp_link_down(intel_dp);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder)
+static void ilk_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2514,14 +2595,18 @@
 		ironlake_edp_pll_off(intel_dp);
 }
 
-static void vlv_post_disable_dp(struct intel_encoder *encoder)
+static void vlv_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 	intel_dp_link_down(intel_dp);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -2547,6 +2632,10 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = intel_dig_port->port;
 
+	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
+			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
+
 	if (HAS_DDI(dev)) {
 		uint32_t temp = I915_READ(DP_TP_CTL(port));
 
@@ -2588,7 +2677,7 @@
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			DRM_ERROR("DP training pattern 3 not supported\n");
+			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		}
@@ -2613,7 +2702,7 @@
 			if (IS_CHERRYVIEW(dev)) {
 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
 			} else {
-				DRM_ERROR("DP training pattern 3 not supported\n");
+				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
 				*DP |= DP_LINK_TRAIN_PAT_2;
 			}
 			break;
@@ -2621,19 +2710,15 @@
 	}
 }
 
-static void intel_dp_enable_port(struct intel_dp *intel_dp)
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+				 struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc =
-		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
 	/* enable with pattern 1 (as per spec) */
-	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
-				 DP_TRAINING_PATTERN_1);
 
-	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
 
 	/*
 	 * Magic for VLV/CHV. We _must_ first set up the register
@@ -2642,14 +2727,15 @@
 	 * fail when the power sequencer is freshly used for this port.
 	 */
 	intel_dp->DP |= DP_PORT_EN;
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
 	POSTING_READ(intel_dp->output_reg);
 }
 
-static void intel_enable_dp(struct intel_encoder *encoder)
+static void intel_enable_dp(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -2666,7 +2752,7 @@
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 		vlv_init_panel_power_sequencer(intel_dp);
 
-	intel_dp_enable_port(intel_dp);
+	intel_dp_enable_port(intel_dp, pipe_config);
 
 	edp_panel_vdd_on(intel_dp);
 	edp_panel_on(intel_dp);
@@ -2678,7 +2764,7 @@
 		unsigned int lane_mask = 0x0;
 
 		if (IS_CHERRYVIEW(dev))
-			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
 
 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
 				    lane_mask);
@@ -2688,22 +2774,26 @@
 	intel_dp_start_link_train(intel_dp);
 	intel_dp_stop_link_train(intel_dp);
 
-	if (crtc->config->has_audio) {
+	if (pipe_config->has_audio) {
 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
 				 pipe_name(pipe));
 		intel_audio_codec_enable(encoder);
 	}
 }
 
-static void g4x_enable_dp(struct intel_encoder *encoder)
+static void g4x_enable_dp(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config,
+			  struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 	intel_edp_backlight_on(intel_dp);
 }
 
-static void vlv_enable_dp(struct intel_encoder *encoder)
+static void vlv_enable_dp(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config,
+			  struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
@@ -2711,16 +2801,18 @@
 	intel_psr_enable(intel_dp);
 }
 
-static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	/* Only ilk+ has port A */
 	if (port == PORT_A)
-		ironlake_edp_pll_on(intel_dp);
+		ironlake_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2728,7 +2820,7 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	enum pipe pipe = intel_dp->pps_pipe;
-	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 
 	edp_panel_vdd_off_sync(intel_dp);
 
@@ -2826,38 +2918,48 @@
 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	vlv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 }
 
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_pre_enable_dp(struct intel_encoder *encoder)
+static void chv_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	chv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 
 	/* Second common lane will stay alive on its own now */
 	chv_phy_release_cl2_override(encoder);
 }
 
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	chv_phy_post_pll_disable(encoder);
 }
@@ -3395,20 +3497,94 @@
 }
 
 static bool
-intel_dp_get_dpcd(struct intel_dp *intel_dp)
+intel_dp_read_dpcd(struct intel_dp *intel_dp)
 {
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
 			     sizeof(intel_dp->dpcd)) < 0)
 		return false; /* aux transfer failed */
 
 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
-	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
-		return false; /* DPCD not present */
+	return intel_dp->dpcd[DP_DPCD_REV] != 0;
+}
+
+static bool
+intel_edp_init_dpcd(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv =
+		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+	/* this function is meant to be called only once */
+	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+
+	if (!intel_dp_read_dpcd(intel_dp))
+		return false;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+
+	/* Check if the panel supports PSR */
+	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+			 intel_dp->psr_dpcd,
+			 sizeof(intel_dp->psr_dpcd));
+	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+		dev_priv->psr.sink_support = true;
+		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+	}
+
+	if (INTEL_GEN(dev_priv) >= 9 &&
+	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+		uint8_t frame_sync_cap;
+
+		dev_priv->psr.sink_support = true;
+		drm_dp_dpcd_read(&intel_dp->aux,
+				 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+				 &frame_sync_cap, 1);
+		dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+		/* PSR2 needs frame sync as well */
+		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+		DRM_DEBUG_KMS("PSR2 %s on sink",
+			      dev_priv->psr.psr2_support ? "supported" : "not supported");
+	}
+
+	/* Read the eDP Display control capabilities registers */
+	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
+			     sizeof(intel_dp->edp_dpcd)))
+		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+			      intel_dp->edp_dpcd);
+
+	/* Intermediate frequency support */
+	if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
+		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+		int i;
+
+		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+				sink_rates, sizeof(sink_rates));
+
+		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+			int val = le16_to_cpu(sink_rates[i]);
+
+			if (val == 0)
+				break;
+
+			/* Value read is in kHz while drm clock is saved in deca-kHz */
+			intel_dp->sink_rates[i] = (val * 200) / 10;
+		}
+		intel_dp->num_sink_rates = i;
+	}
+
+	return true;
+}
+
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+	if (!intel_dp_read_dpcd(intel_dp))
+		return false;
 
 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
 			     &intel_dp->sink_count, 1) < 0)
@@ -3431,68 +3607,6 @@
 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
 		return false;
 
-	/* Check if the panel supports PSR */
-	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
-	if (is_edp(intel_dp)) {
-		drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
-				 intel_dp->psr_dpcd,
-				 sizeof(intel_dp->psr_dpcd));
-		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
-			dev_priv->psr.sink_support = true;
-			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
-		}
-
-		if (INTEL_INFO(dev)->gen >= 9 &&
-			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
-			uint8_t frame_sync_cap;
-
-			dev_priv->psr.sink_support = true;
-			drm_dp_dpcd_read(&intel_dp->aux,
-					 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-					 &frame_sync_cap, 1);
-			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
-			/* PSR2 needs frame sync as well */
-			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
-			DRM_DEBUG_KMS("PSR2 %s on sink",
-				dev_priv->psr.psr2_support ? "supported" : "not supported");
-		}
-
-		/* Read the eDP Display control capabilities registers */
-		memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
-		if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
-				(drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
-						intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-								sizeof(intel_dp->edp_dpcd)))
-			DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
-					intel_dp->edp_dpcd);
-	}
-
-	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
-		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
-		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
-
-	/* Intermediate frequency support */
-	if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
-		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
-		int i;
-
-		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
-				sink_rates, sizeof(sink_rates));
-
-		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
-			int val = le16_to_cpu(sink_rates[i]);
-
-			if (val == 0)
-				break;
-
-			/* Value read is in kHz while drm clock is saved in deca-kHz */
-			intel_dp->sink_rates[i] = (val * 200) / 10;
-		}
-		intel_dp->num_sink_rates = i;
-	}
-
-	intel_dp_print_rates(intel_dp);
-
 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
 	      DP_DWN_STRM_PORT_PRESENT))
 		return true; /* native DP sink */
@@ -3526,7 +3640,7 @@
 }
 
 static bool
-intel_dp_probe_mst(struct intel_dp *intel_dp)
+intel_dp_can_mst(struct intel_dp *intel_dp)
 {
 	u8 buf[1];
 
@@ -3539,18 +3653,30 @@
 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
 		return false;
 
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
-		if (buf[0] & DP_MST_CAP) {
-			DRM_DEBUG_KMS("Sink is MST capable\n");
-			intel_dp->is_mst = true;
-		} else {
-			DRM_DEBUG_KMS("Sink is not MST capable\n");
-			intel_dp->is_mst = false;
-		}
-	}
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
+		return false;
 
-	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
-	return intel_dp->is_mst;
+	return buf[0] & DP_MST_CAP;
+}
+
+static void
+intel_dp_configure_mst(struct intel_dp *intel_dp)
+{
+	if (!i915.enable_dp_mst)
+		return;
+
+	if (!intel_dp->can_mst)
+		return;
+
+	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
+
+	if (intel_dp->is_mst)
+		DRM_DEBUG_KMS("Sink is MST capable\n");
+	else
+		DRM_DEBUG_KMS("Sink is not MST capable\n");
+
+	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+					intel_dp->is_mst);
 }
 
 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
@@ -3909,7 +4035,7 @@
 intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-	u8 sink_irq_vector;
+	u8 sink_irq_vector = 0;
 	u8 old_sink_count = intel_dp->sink_count;
 	bool ret;
 
@@ -3936,7 +4062,8 @@
 
 	/* Try to read the source of the interrupt */
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+	    sink_irq_vector != 0) {
 		/* Clear interrupt source */
 		drm_dp_dpcd_writeb(&intel_dp->aux,
 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -3980,6 +4107,9 @@
 		connector_status_connected : connector_status_disconnected;
 	}
 
+	if (intel_dp_can_mst(intel_dp))
+		return connector_status_connected;
+
 	/* If no HPD, poke DDC gently */
 	if (drm_probe_ddc(&intel_dp->aux.ddc))
 		return connector_status_connected;
@@ -4148,7 +4278,7 @@
  *
  * Return %true if @port is connected, %false otherwise.
  */
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
 					 struct intel_digital_port *port)
 {
 	if (HAS_PCH_IBX(dev_priv))
@@ -4207,7 +4337,7 @@
 	intel_dp->has_audio = false;
 }
 
-static void
+static enum drm_connector_status
 intel_dp_long_pulse(struct intel_connector *intel_connector)
 {
 	struct drm_connector *connector = &intel_connector->base;
@@ -4217,8 +4347,7 @@
 	struct drm_device *dev = connector->dev;
 	enum drm_connector_status status;
 	enum intel_display_power_domain power_domain;
-	bool ret;
-	u8 sink_irq_vector;
+	u8 sink_irq_vector = 0;
 
 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
 	intel_display_power_get(to_i915(dev), power_domain);
@@ -4232,7 +4361,7 @@
 	else
 		status = connector_status_disconnected;
 
-	if (status != connector_status_connected) {
+	if (status == connector_status_disconnected) {
 		intel_dp->compliance_test_active = 0;
 		intel_dp->compliance_test_type = 0;
 		intel_dp->compliance_test_data = 0;
@@ -4252,10 +4381,20 @@
 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
 		intel_encoder->type = INTEL_OUTPUT_DP;
 
+	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
+		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
+		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+
+	intel_dp_print_rates(intel_dp);
+
 	intel_dp_probe_oui(intel_dp);
 
-	ret = intel_dp_probe_mst(intel_dp);
-	if (ret) {
+	intel_dp_print_hw_revision(intel_dp);
+	intel_dp_print_sw_revision(intel_dp);
+
+	intel_dp_configure_mst(intel_dp);
+
+	if (intel_dp->is_mst) {
 		/*
 		 * If we are in MST mode then this connector
 		 * won't appear connected or have anything
@@ -4284,13 +4423,14 @@
 	intel_dp->aux.i2c_defer_count = 0;
 
 	intel_dp_set_edid(intel_dp);
-
-	status = connector_status_connected;
+	if (is_edp(intel_dp) || intel_connector->detect_edid)
+		status = connector_status_connected;
 	intel_dp->detect_done = true;
 
 	/* Try to read the source of the interrupt */
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+	    sink_irq_vector != 0) {
 		/* Clear interrupt source */
 		drm_dp_dpcd_writeb(&intel_dp->aux,
 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -4303,12 +4443,11 @@
 	}
 
 out:
-	if ((status != connector_status_connected) &&
-	    (intel_dp->is_mst == false))
+	if (status != connector_status_connected && !intel_dp->is_mst)
 		intel_dp_unset_edid(intel_dp);
 
 	intel_display_power_put(to_i915(dev), power_domain);
-	return;
+	return status;
 }
 
 static enum drm_connector_status
@@ -4317,7 +4456,7 @@
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
-	struct intel_connector *intel_connector = to_intel_connector(connector);
+	enum drm_connector_status status = connector->status;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
@@ -4332,14 +4471,11 @@
 
 	/* If full detect is not performed yet, do a full detect */
 	if (!intel_dp->detect_done)
-		intel_dp_long_pulse(intel_dp->attached_connector);
+		status = intel_dp_long_pulse(intel_dp->attached_connector);
 
 	intel_dp->detect_done = false;
 
-	if (is_edp(intel_dp) || intel_connector->detect_edid)
-		return connector_status_connected;
-	else
-		return connector_status_disconnected;
+	return status;
 }
 
 static void
@@ -4630,13 +4766,8 @@
 
 	pps_lock(intel_dp);
 
-	/*
-	 * Read out the current power sequencer assignment,
-	 * in case the BIOS did something with it.
-	 */
-	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
-		vlv_initial_power_sequencer_setup(intel_dp);
-
+	/* Reinit the power sequencer, in case BIOS did something with it. */
+	intel_dp_pps_init(encoder->dev, intel_dp);
 	intel_edp_panel_vdd_sanitize(intel_dp);
 
 	pps_unlock(intel_dp);
@@ -4696,36 +4827,34 @@
 		      port_name(intel_dig_port->port),
 		      long_hpd ? "long" : "short");
 
+	if (long_hpd) {
+		intel_dp->detect_done = false;
+		return IRQ_NONE;
+	}
+
 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
 	intel_display_power_get(dev_priv, power_domain);
 
-	if (long_hpd) {
-		intel_dp_long_pulse(intel_dp->attached_connector);
-		if (intel_dp->is_mst)
-			ret = IRQ_HANDLED;
-		goto put_power;
-
-	} else {
-		if (intel_dp->is_mst) {
-			if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
-				/*
-				 * If we were in MST mode, and device is not
-				 * there, get out of MST mode
-				 */
-				DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
-					      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
-				intel_dp->is_mst = false;
-				drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
-								intel_dp->is_mst);
-				goto put_power;
-			}
+	if (intel_dp->is_mst) {
+		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+			/*
+			 * If we were in MST mode, and device is not
+			 * there, get out of MST mode
+			 */
+			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+			intel_dp->is_mst = false;
+			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+							intel_dp->is_mst);
+			intel_dp->detect_done = false;
+			goto put_power;
 		}
+	}
 
-		if (!intel_dp->is_mst) {
-			if (!intel_dp_short_pulse(intel_dp)) {
-				intel_dp_long_pulse(intel_dp->attached_connector);
-				goto put_power;
-			}
+	if (!intel_dp->is_mst) {
+		if (!intel_dp_short_pulse(intel_dp)) {
+			intel_dp->detect_done = false;
+			goto put_power;
 		}
 	}
 
@@ -4984,9 +5113,21 @@
 		      I915_READ(regs.pp_div));
 }
 
+static void intel_dp_pps_init(struct drm_device *dev,
+			      struct intel_dp *intel_dp)
+{
+	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+		vlv_initial_power_sequencer_setup(intel_dp);
+	} else {
+		intel_dp_init_panel_power_sequencer(dev, intel_dp);
+		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+	}
+}
+
 /**
  * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev: DRM device
+ * @dev_priv: i915 device
+ * @crtc_state: a pointer to the active intel_crtc_state
  * @refresh_rate: RR to be programmed
  *
  * This function gets called when refresh rate (RR) has to be changed from
@@ -4996,14 +5137,14 @@
  *
  * The caller of this function needs to take a lock on dev_priv->drrs.
  */
-static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+				    struct intel_crtc_state *crtc_state,
+				    int refresh_rate)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_encoder *encoder;
 	struct intel_digital_port *dig_port = NULL;
 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
-	struct intel_crtc_state *config = NULL;
-	struct intel_crtc *intel_crtc = NULL;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
 	if (refresh_rate <= 0) {
@@ -5030,8 +5171,6 @@
 		return;
 	}
 
-	config = intel_crtc->config;
-
 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
 		return;
@@ -5047,12 +5186,12 @@
 		return;
 	}
 
-	if (!intel_crtc->active) {
+	if (!crtc_state->base.active) {
 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
 		return;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
 		switch (index) {
 		case DRRS_HIGH_RR:
 			intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5064,18 +5203,18 @@
 		default:
 			DRM_ERROR("Unsupported refreshrate type\n");
 		}
-	} else if (INTEL_INFO(dev)->gen > 6) {
-		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+	} else if (INTEL_GEN(dev_priv) > 6) {
+		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
 		u32 val;
 
 		val = I915_READ(reg);
 		if (index > DRRS_HIGH_RR) {
-			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
 			else
 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
 		} else {
-			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
 			else
 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -5091,18 +5230,17 @@
 /**
  * intel_edp_drrs_enable - init drrs struct if supported
  * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
  *
  * Initializes frontbuffer_bits and drrs.dp
  */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_crtc *crtc = dig_port->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (!intel_crtc->config->has_drrs) {
+	if (!crtc_state->has_drrs) {
 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
 		return;
 	}
@@ -5124,17 +5262,16 @@
 /**
  * intel_edp_drrs_disable - Disable DRRS
  * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
  *
  */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+			    struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_crtc *crtc = dig_port->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (!intel_crtc->config->has_drrs)
+	if (!old_crtc_state->has_drrs)
 		return;
 
 	mutex_lock(&dev_priv->drrs.mutex);
@@ -5144,9 +5281,8 @@
 	}
 
 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					intel_dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	dev_priv->drrs.dp = NULL;
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5175,10 +5311,12 @@
 	if (dev_priv->drrs.busy_frontbuffer_bits)
 		goto unlock;
 
-	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					intel_dp->attached_connector->panel.
-					downclock_mode->vrefresh);
+	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+	}
 
 unlock:
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5186,7 +5324,7 @@
 
 /**
  * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called everytime rendering on the given planes start.
@@ -5194,10 +5332,9 @@
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -5220,16 +5357,15 @@
 
 	/* invalidate means busy screen hence upclock */
 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					dev_priv->drrs.dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
  * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called every time rendering on the given planes has
@@ -5239,10 +5375,9 @@
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_flush(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -5265,9 +5400,8 @@
 
 	/* flush means busy screen hence upclock */
 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					dev_priv->drrs.dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	/*
 	 * flush also means no more activity hence schedule downclock, if all
@@ -5400,27 +5534,15 @@
 	pps_lock(intel_dp);
 
 	intel_dp_init_panel_power_timestamps(intel_dp);
-
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		vlv_initial_power_sequencer_setup(intel_dp);
-	} else {
-		intel_dp_init_panel_power_sequencer(dev, intel_dp);
-		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
-	}
-
+	intel_dp_pps_init(dev, intel_dp);
 	intel_edp_panel_vdd_sanitize(intel_dp);
 
 	pps_unlock(intel_dp);
 
 	/* Cache DPCD and EDID for edp. */
-	has_dpcd = intel_dp_get_dpcd(intel_dp);
+	has_dpcd = intel_edp_init_dpcd(intel_dp);
 
-	if (has_dpcd) {
-		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
-			dev_priv->no_aux_handshake =
-				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
-				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-	} else {
+	if (!has_dpcd) {
 		/* if this fails, presume the device is a ghost */
 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
 		goto out_vdd_off;
@@ -5576,7 +5698,7 @@
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 
-	intel_dp_aux_init(intel_dp, intel_connector);
+	intel_dp_aux_init(intel_dp);
 
 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
 			  edp_panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 60fb39c..c438b02 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -24,6 +24,15 @@
 #include "intel_drv.h"
 
 static void
+intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+
+	DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
+		      link_status[0], link_status[1], link_status[2],
+		      link_status[3], link_status[4], link_status[5]);
+}
+
+static void
 intel_get_adjust_train(struct intel_dp *intel_dp,
 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
@@ -103,13 +112,24 @@
 	return ret == intel_dp->lane_count;
 }
 
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
+{
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++)
+		if ((intel_dp->train_set[lane] &
+		     DP_TRAIN_MAX_SWING_REACHED) == 0)
+			return false;
+
+	return true;
+}
+
 /* Enable corresponding port and start training pattern 1 */
-static void
+static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
-	int i;
 	uint8_t voltage;
-	int voltage_tries, loop_tries;
+	int voltage_tries, max_vswing_tries;
 	uint8_t link_config[2];
 	uint8_t link_bw, rate_select;
 
@@ -125,6 +145,7 @@
 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
 	if (intel_dp->num_sink_rates)
 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
 				  &rate_select, 1);
@@ -140,60 +161,54 @@
 				       DP_TRAINING_PATTERN_1 |
 				       DP_LINK_SCRAMBLING_DISABLE)) {
 		DRM_ERROR("failed to enable link training\n");
-		return;
+		return false;
 	}
 
-	voltage = 0xff;
-	voltage_tries = 0;
-	loop_tries = 0;
+	voltage_tries = 1;
+	max_vswing_tries = 0;
 	for (;;) {
 		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
 			DRM_ERROR("failed to get link status\n");
-			break;
+			return false;
 		}
 
 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			DRM_DEBUG_KMS("clock recovery OK\n");
-			break;
+			return true;
 		}
 
-		/* Check to see if we've tried the max voltage */
-		for (i = 0; i < intel_dp->lane_count; i++)
-			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-				break;
-		if (i == intel_dp->lane_count) {
-			++loop_tries;
-			if (loop_tries == 5) {
-				DRM_ERROR("too many full retries, give up\n");
-				break;
-			}
-			intel_dp_reset_link_train(intel_dp,
-						  DP_TRAINING_PATTERN_1 |
-						  DP_LINK_SCRAMBLING_DISABLE);
-			voltage_tries = 0;
-			continue;
+		if (voltage_tries == 5) {
+			DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+			return false;
 		}
 
-		/* Check to see if we've tried the same voltage 5 times */
-		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-			++voltage_tries;
-			if (voltage_tries == 5) {
-				DRM_ERROR("too many voltage retries, give up\n");
-				break;
-			}
-		} else
-			voltage_tries = 0;
+		if (max_vswing_tries == 1) {
+			DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+			return false;
+		}
+
 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
 			DRM_ERROR("failed to update link training\n");
-			break;
+			return false;
 		}
+
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+		    voltage)
+			++voltage_tries;
+		else
+			voltage_tries = 1;
+
+		if (intel_dp_link_max_vswing_reached(intel_dp))
+			++max_vswing_tries;
+
 	}
 }
 
@@ -229,12 +244,12 @@
 	return training_pattern;
 }
 
-static void
+static bool
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
-	bool channel_eq = false;
-	int tries, cr_tries;
+	int tries;
 	u32 training_pattern;
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 	training_pattern = intel_dp_training_pattern(intel_dp);
 
@@ -243,19 +258,11 @@
 				     training_pattern |
 				     DP_LINK_SCRAMBLING_DISABLE)) {
 		DRM_ERROR("failed to start channel equalization\n");
-		return;
+		return false;
 	}
 
-	tries = 0;
-	cr_tries = 0;
-	channel_eq = false;
-	for (;;) {
-		uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-		if (cr_tries > 5) {
-			DRM_ERROR("failed to train DP, aborting\n");
-			break;
-		}
+	intel_dp->channel_eq_status = false;
+	for (tries = 0; tries < 5; tries++) {
 
 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -266,44 +273,38 @@
 		/* Make sure clock is still ok */
 		if (!drm_dp_clock_recovery_ok(link_status,
 					      intel_dp->lane_count)) {
-			intel_dp_link_training_clock_recovery(intel_dp);
-			intel_dp_set_link_train(intel_dp,
-						training_pattern |
-						DP_LINK_SCRAMBLING_DISABLE);
-			cr_tries++;
-			continue;
+			intel_dp_dump_link_status(link_status);
+			DRM_DEBUG_KMS("Clock recovery check failed, cannot "
+				      "continue channel equalization\n");
+			break;
 		}
 
 		if (drm_dp_channel_eq_ok(link_status,
 					 intel_dp->lane_count)) {
-			channel_eq = true;
+			intel_dp->channel_eq_status = true;
+			DRM_DEBUG_KMS("Channel EQ done. DP Training "
+				      "successful\n");
 			break;
 		}
 
-		/* Try 5 times, then try clock recovery if that fails */
-		if (tries > 5) {
-			intel_dp_link_training_clock_recovery(intel_dp);
-			intel_dp_set_link_train(intel_dp,
-						training_pattern |
-						DP_LINK_SCRAMBLING_DISABLE);
-			tries = 0;
-			cr_tries++;
-			continue;
-		}
-
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
 			DRM_ERROR("failed to update link training\n");
 			break;
 		}
-		++tries;
+	}
+
+	/* Try 5 times, else fail and try at lower BW */
+	if (tries == 5) {
+		intel_dp_dump_link_status(link_status);
+		DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
 	}
 
 	intel_dp_set_idle_link_train(intel_dp);
 
-	if (channel_eq)
-		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+	return intel_dp->channel_eq_status;
+
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 68a005d..54a9d76 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -31,18 +31,16 @@
 #include <drm/drm_edid.h>
 
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
-					struct intel_crtc_state *pipe_config)
+					struct intel_crtc_state *pipe_config,
+					struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_atomic_state *state;
-	int bpp, i;
+	int bpp;
 	int lane_count, slots;
 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct drm_connector *drm_connector;
-	struct intel_connector *connector, *found = NULL;
-	struct drm_connector_state *connector_state;
 	int mst_pbn;
 
 	pipe_config->dp_encoder_is_mst = true;
@@ -54,7 +52,6 @@
 	 */
 	lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 
-
 	pipe_config->lane_count = lane_count;
 
 	pipe_config->pipe_bpp = 24;
@@ -62,20 +59,6 @@
 
 	state = pipe_config->base.state;
 
-	for_each_connector_in_state(state, drm_connector, connector_state, i) {
-		connector = to_intel_connector(drm_connector);
-
-		if (connector_state->best_encoder == &encoder->base) {
-			found = connector;
-			break;
-		}
-	}
-
-	if (!found) {
-		DRM_ERROR("can't find connector\n");
-		return false;
-	}
-
 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
 	pipe_config->pbn = mst_pbn;
@@ -92,16 +75,20 @@
 
 }
 
-static void intel_mst_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_disable_dp(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_connector *connector =
+		to_intel_connector(old_conn_state->connector);
 	int ret;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
+	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 	if (ret) {
@@ -109,11 +96,15 @@
 	}
 }
 
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+				      struct intel_crtc_state *old_crtc_state,
+				      struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_connector *connector =
+		to_intel_connector(old_conn_state->connector);
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
@@ -122,59 +113,51 @@
 	/* and this can also fail */
 	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
+	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
 	intel_dp->active_mst_links--;
 
 	intel_mst->connector = NULL;
 	if (intel_dp->active_mst_links == 0) {
-		intel_dig_port->base.post_disable(&intel_dig_port->base);
+		intel_dig_port->base.post_disable(&intel_dig_port->base,
+						  NULL, NULL);
+
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 	}
 }
 
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_dig_port->port;
+	struct intel_connector *connector =
+		to_intel_connector(conn_state->connector);
 	int ret;
 	uint32_t temp;
-	struct intel_connector *found = NULL, *connector;
 	int slots;
-	struct drm_crtc *crtc = encoder->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	for_each_intel_connector(dev, connector) {
-		if (connector->base.state->best_encoder == &encoder->base) {
-			found = connector;
-			break;
-		}
-	}
-
-	if (!found) {
-		DRM_ERROR("can't find connector\n");
-		return;
-	}
 
 	/* MST encoders are bound to a crtc, not to a connector,
 	 * force the mapping here for get_hw_state.
 	 */
-	found->encoder = encoder;
+	connector->encoder = encoder;
+	intel_mst->connector = connector;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-	intel_mst->connector = found;
-
 	if (intel_dp->active_mst_links == 0) {
-		intel_prepare_ddi_buffer(&intel_dig_port->base);
+		intel_ddi_clk_select(&intel_dig_port->base,
+				     pipe_config->shared_dpll);
 
-		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
-
-		intel_dp_set_link_params(intel_dp, intel_crtc->config);
+		intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
+		intel_dp_set_link_params(intel_dp,
+					 pipe_config->port_clock,
+					 pipe_config->lane_count,
+					 true);
 
 		intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
@@ -185,8 +168,8 @@
 	}
 
 	ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-				       intel_mst->connector->port,
-				       intel_crtc->config->pbn, &slots);
+				       connector->port,
+				       pipe_config->pbn, &slots);
 	if (ret == false) {
 		DRM_ERROR("failed to allocate vcpi\n");
 		return;
@@ -200,13 +183,14 @@
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 }
 
-static void intel_mst_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_enable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_dig_port->port;
 	int ret;
 
@@ -239,9 +223,8 @@
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	u32 temp, flags = 0;
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 5c1f2d2..1c59ca5 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -24,6 +24,44 @@
 #include "intel_drv.h"
 
 struct intel_shared_dpll *
+skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
+{
+	struct intel_shared_dpll *pll = NULL;
+	struct intel_dpll_hw_state dpll_hw_state;
+	enum intel_dpll_id i;
+	bool found = false;
+
+	if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+		return pll;
+
+	for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
+		pll = &dev_priv->shared_dplls[i];
+
+		/* Only want to check enabled timings first */
+		if (pll->config.crtc_mask == 0)
+			continue;
+
+		if (memcmp(&dpll_hw_state, &pll->config.hw_state,
+			   sizeof(pll->config.hw_state)) == 0) {
+			found = true;
+			break;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = DPLL_ID_SKL_DPLL1;
+	     ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
+		pll = &dev_priv->shared_dplls[i];
+		if (pll->config.crtc_mask == 0) {
+			pll->config.hw_state = dpll_hw_state;
+			break;
+		}
+	}
+
+	return pll;
+}
+
+struct intel_shared_dpll *
 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
 			    enum intel_dpll_id id)
 {
@@ -452,26 +490,6 @@
 	return val & SPLL_PLL_ENABLE;
 }
 
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
-{
-	switch (pll->id) {
-	case DPLL_ID_WRPLL1:
-		return PORT_CLK_SEL_WRPLL1;
-	case DPLL_ID_WRPLL2:
-		return PORT_CLK_SEL_WRPLL2;
-	case DPLL_ID_SPLL:
-		return PORT_CLK_SEL_SPLL;
-	case DPLL_ID_LCPLL_810:
-		return PORT_CLK_SEL_LCPLL_810;
-	case DPLL_ID_LCPLL_1350:
-		return PORT_CLK_SEL_LCPLL_1350;
-	case DPLL_ID_LCPLL_2700:
-		return PORT_CLK_SEL_LCPLL_2700;
-	default:
-		return PORT_CLK_SEL_NONE;
-	}
-}
-
 #define LC_FREQ 2700
 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
@@ -687,11 +705,65 @@
 	*r2_out = best.r2;
 }
 
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
+						       struct intel_crtc *crtc,
+						       struct intel_crtc_state *crtc_state)
+{
+	struct intel_shared_dpll *pll;
+	uint32_t val;
+	unsigned int p, n2, r2;
+
+	hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+	val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+	      WRPLL_DIVIDER_POST(p);
+
+	crtc_state->dpll_hw_state.wrpll = val;
+
+	pll = intel_find_shared_dpll(crtc, crtc_state,
+				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+	if (!pll)
+		return NULL;
+
+	return pll;
+}
+
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+					      int clock)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id pll_id;
+
+	switch (clock / 2) {
+	case 81000:
+		pll_id = DPLL_ID_LCPLL_810;
+		break;
+	case 135000:
+		pll_id = DPLL_ID_LCPLL_1350;
+		break;
+	case 270000:
+		pll_id = DPLL_ID_LCPLL_2700;
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+		return NULL;
+	}
+
+	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+	if (!pll)
+		return NULL;
+
+	return pll;
+}
+
 static struct intel_shared_dpll *
 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	int clock = crtc_state->port_clock;
 
@@ -699,41 +771,12 @@
 	       sizeof(crtc_state->dpll_hw_state));
 
 	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		uint32_t val;
-		unsigned p, n2, r2;
-
-		hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
-		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-		      WRPLL_DIVIDER_POST(p);
-
-		crtc_state->dpll_hw_state.wrpll = val;
-
-		pll = intel_find_shared_dpll(crtc, crtc_state,
-					     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+		pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
 
 	} else if (encoder->type == INTEL_OUTPUT_DP ||
 		   encoder->type == INTEL_OUTPUT_DP_MST ||
 		   encoder->type == INTEL_OUTPUT_EDP) {
-		enum intel_dpll_id pll_id;
-
-		switch (clock / 2) {
-		case 81000:
-			pll_id = DPLL_ID_LCPLL_810;
-			break;
-		case 135000:
-			pll_id = DPLL_ID_LCPLL_1350;
-			break;
-		case 270000:
-			pll_id = DPLL_ID_LCPLL_2700;
-			break;
-		default:
-			DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
-			return NULL;
-		}
-
-		pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+		pll = hsw_ddi_dp_get_dpll(encoder, clock);
 
 	} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
@@ -751,14 +794,11 @@
 	if (!pll)
 		return NULL;
 
-	crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
-
 	intel_reference_shared_dpll(pll, crtc_state);
 
 	return pll;
 }
 
-
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
 	.enable = hsw_ddi_wrpll_enable,
 	.disable = hsw_ddi_wrpll_disable,
@@ -1194,67 +1234,33 @@
 	return true;
 }
 
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
+				      struct intel_crtc_state *crtc_state,
+				      int clock)
 {
-	struct intel_shared_dpll *pll;
 	uint32_t ctrl1, cfgcr1, cfgcr2;
-	int clock = crtc_state->port_clock;
+	struct skl_wrpll_params wrpll_params = { 0, };
 
 	/*
 	 * See comment in intel_dpll_hw_state to understand why we always use 0
 	 * as the DPLL id in this function.
 	 */
-
 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
-	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		struct skl_wrpll_params wrpll_params = { 0, };
+	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+	if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+		return false;
 
-		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
-			return NULL;
+	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+		wrpll_params.dco_integer;
 
-		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-			 wrpll_params.dco_integer;
-
-		cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-			 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-			 wrpll_params.central_freq;
-	} else if (encoder->type == INTEL_OUTPUT_DP ||
-		   encoder->type == INTEL_OUTPUT_DP_MST ||
-		   encoder->type == INTEL_OUTPUT_EDP) {
-		switch (crtc_state->port_clock / 2) {
-		case 81000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-			break;
-		case 135000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-			break;
-		case 270000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-			break;
-		/* eDP 1.4 rates */
-		case 162000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
-			break;
-		case 108000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
-			break;
-		case 216000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
-			break;
-		}
-
-		cfgcr1 = cfgcr2 = 0;
-	} else {
-		return NULL;
-	}
+	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+		wrpll_params.central_freq;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
@@ -1262,6 +1268,75 @@
 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+	return true;
+}
+
+
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+				  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	uint32_t ctrl1;
+
+	/*
+	 * See comment in intel_dpll_hw_state to understand why we always use 0
+	 * as the DPLL id in this function.
+	 */
+	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+	switch (clock / 2) {
+	case 81000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+		break;
+	case 135000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+		break;
+	case 270000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+		break;
+		/* eDP 1.4 rates */
+	case 162000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+		break;
+	case 108000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+		break;
+	case 216000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+		break;
+	}
+
+	dpll_hw_state->ctrl1 = ctrl1;
+	return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct intel_shared_dpll *pll;
+	int clock = crtc_state->port_clock;
+	bool bret;
+	struct intel_dpll_hw_state dpll_hw_state;
+
+	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+	if (encoder->type == INTEL_OUTPUT_HDMI) {
+		bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+		if (!bret) {
+			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+			return NULL;
+		}
+	} else if (encoder->type == INTEL_OUTPUT_DP ||
+		   encoder->type == INTEL_OUTPUT_DP_MST ||
+		   encoder->type == INTEL_OUTPUT_EDP) {
+		bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+		if (!bret) {
+			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+			return NULL;
+		}
+		crtc_state->dpll_hw_state = dpll_hw_state;
+	} else {
+		return NULL;
+	}
 
 	if (encoder->type == INTEL_OUTPUT_EDP)
 		pll = intel_find_shared_dpll(crtc, crtc_state,
@@ -1274,8 +1349,6 @@
 	if (!pll)
 		return NULL;
 
-	crtc_state->ddi_pll_sel = pll->id;
-
 	intel_reference_shared_dpll(pll, crtc_state);
 
 	return pll;
@@ -1484,6 +1557,8 @@
 	uint32_t m2_frac;
 	bool m2_frac_en;
 	uint32_t n;
+
+	int vco;
 };
 
 /* pre-calculated values for DP linkrates */
@@ -1497,56 +1572,59 @@
 	{432000, 3, 1, 32, 1677722, 1, 1}
 };
 
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
+			  struct intel_crtc_state *crtc_state, int clock,
+			  struct bxt_clk_div *clk_div)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-	struct intel_digital_port *intel_dig_port;
-	struct bxt_clk_div clk_div = {0};
-	int vco = 0;
+	struct dpll best_clock;
+
+	/* Calculate HDMI div */
+	/*
+	 * FIXME: tie the following calculation into
+	 * i9xx_crtc_compute_clock
+	 */
+	if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+				 clock, pipe_name(intel_crtc->pipe));
+		return false;
+	}
+
+	clk_div->p1 = best_clock.p1;
+	clk_div->p2 = best_clock.p2;
+	WARN_ON(best_clock.m1 != 2);
+	clk_div->n = best_clock.n;
+	clk_div->m2_int = best_clock.m2 >> 22;
+	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+	clk_div->m2_frac_en = clk_div->m2_frac != 0;
+
+	clk_div->vco = best_clock.vco;
+
+	return true;
+}
+
+static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+{
+	int i;
+
+	*clk_div = bxt_dp_clk_val[0];
+	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+		if (bxt_dp_clk_val[i].clock == clock) {
+			*clk_div = bxt_dp_clk_val[i];
+			break;
+		}
+	}
+
+	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(int clock,
+			  struct bxt_clk_div *clk_div,
+			  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	int vco = clk_div->vco;
 	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
 	uint32_t lanestagger;
-	int clock = crtc_state->port_clock;
-
-	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		struct dpll best_clock;
-
-		/* Calculate HDMI div */
-		/*
-		 * FIXME: tie the following calculation into
-		 * i9xx_crtc_compute_clock
-		 */
-		if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
-			DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-					 clock, pipe_name(crtc->pipe));
-			return NULL;
-		}
-
-		clk_div.p1 = best_clock.p1;
-		clk_div.p2 = best_clock.p2;
-		WARN_ON(best_clock.m1 != 2);
-		clk_div.n = best_clock.n;
-		clk_div.m2_int = best_clock.m2 >> 22;
-		clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
-		clk_div.m2_frac_en = clk_div.m2_frac != 0;
-
-		vco = best_clock.vco;
-	} else if (encoder->type == INTEL_OUTPUT_DP ||
-		   encoder->type == INTEL_OUTPUT_EDP) {
-		int i;
-
-		clk_div = bxt_dp_clk_val[0];
-		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-			if (bxt_dp_clk_val[i].clock == clock) {
-				clk_div = bxt_dp_clk_val[i];
-				break;
-			}
-		}
-		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
-	}
 
 	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
@@ -1566,12 +1644,9 @@
 		targ_cnt = 9;
 	} else {
 		DRM_ERROR("Invalid VCO\n");
-		return NULL;
+		return false;
 	}
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
 	if (clock > 270000)
 		lanestagger = 0x18;
 	else if (clock > 135000)
@@ -1583,35 +1658,86 @@
 	else
 		lanestagger = 0x02;
 
-	crtc_state->dpll_hw_state.ebb0 =
-		PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
-	crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
-	crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
-	crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+	dpll_hw_state->pll0 = clk_div->m2_int;
+	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+	dpll_hw_state->pll2 = clk_div->m2_frac;
 
-	if (clk_div.m2_frac_en)
-		crtc_state->dpll_hw_state.pll3 =
-			PORT_PLL_M2_FRAC_ENABLE;
+	if (clk_div->m2_frac_en)
+		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
 
-	crtc_state->dpll_hw_state.pll6 =
-		prop_coef | PORT_PLL_INT_COEFF(int_coef);
-	crtc_state->dpll_hw_state.pll6 |=
-		PORT_PLL_GAIN_CTL(gain_ctl);
+	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
-	crtc_state->dpll_hw_state.pll8 = targ_cnt;
+	dpll_hw_state->pll8 = targ_cnt;
 
-	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-	crtc_state->dpll_hw_state.pll10 =
+	dpll_hw_state->pll10 =
 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
 		| PORT_PLL_DCO_AMP_OVR_EN_H;
 
-	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
-	crtc_state->dpll_hw_state.pcsdw12 =
-		LANESTAGGER_STRAP_OVRD | lanestagger;
+	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
 
-	intel_dig_port = enc_to_dig_port(&encoder->base);
+	return true;
+}
+
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+			  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	struct bxt_clk_div clk_div = {0};
+
+	bxt_ddi_dp_pll_dividers(clock, &clk_div);
+
+	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static bool
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
+			       struct intel_crtc_state *crtc_state, int clock,
+			       struct intel_dpll_hw_state *dpll_hw_state)
+{
+	struct bxt_clk_div clk_div = { };
+
+	bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+
+	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc,
+		struct intel_crtc_state *crtc_state,
+		struct intel_encoder *encoder)
+{
+	struct intel_dpll_hw_state dpll_hw_state = { };
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_digital_port *intel_dig_port;
+	struct intel_shared_dpll *pll;
+	int i, clock = crtc_state->port_clock;
+
+	if (encoder->type == INTEL_OUTPUT_HDMI &&
+	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
+					    &dpll_hw_state))
+		return NULL;
+
+	if ((encoder->type == INTEL_OUTPUT_DP ||
+	     encoder->type == INTEL_OUTPUT_EDP) &&
+	    !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+		return NULL;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state = dpll_hw_state;
+
+	if (encoder->type == INTEL_OUTPUT_DP_MST) {
+		struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+
+		intel_dig_port = intel_mst->primary;
+	} else
+		intel_dig_port = enc_to_dig_port(&encoder->base);
 
 	/* 1:1 mapping between ports and PLLs */
 	i = (enum intel_dpll_id) intel_dig_port->port;
@@ -1622,9 +1748,6 @@
 
 	intel_reference_shared_dpll(pll, crtc_state);
 
-	/* shared DPLL id 0 is DPLL A */
-	crtc_state->ddi_pll_sel = pll->id;
-
 	return pll;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 89c5ada..f438535 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -160,5 +160,20 @@
 void intel_shared_dpll_commit(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
+/* BXT dpll related functions */
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+			  struct intel_dpll_hw_state *dpll_hw_state);
+
+
+/* SKL dpll related functions */
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+				  struct intel_dpll_hw_state *dpll_hw_state);
+struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
+					    int clock);
+
+
+/* HSW dpll related functions */
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+					      int clock);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ff399b9..a19ec06 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -52,11 +52,15 @@
  */
 #define _wait_for(COND, US, W) ({ \
 	unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;	\
-	int ret__ = 0;							\
-	while (!(COND)) {						\
-		if (time_after(jiffies, timeout__)) {			\
-			if (!(COND))					\
-				ret__ = -ETIMEDOUT;			\
+	int ret__;							\
+	for (;;) {							\
+		bool expired__ = time_after(jiffies, timeout__);	\
+		if (COND) {						\
+			ret__ = 0;					\
+			break;						\
+		}							\
+		if (expired__) {					\
+			ret__ = -ETIMEDOUT;				\
 			break;						\
 		}							\
 		if ((W) && drm_can_sleep()) {				\
@@ -178,11 +182,22 @@
 	struct drm_framebuffer base;
 	struct drm_i915_gem_object *obj;
 	struct intel_rotation_info rot_info;
+
+	/* for each plane in the normal GTT view */
+	struct {
+		unsigned int x, y;
+	} normal[2];
+	/* for each plane in the rotated GTT view */
+	struct {
+		unsigned int x, y;
+		unsigned int pitch; /* pixels */
+	} rotated[2];
 };
 
 struct intel_fbdev {
 	struct drm_fb_helper helper;
 	struct intel_framebuffer *fb;
+	struct i915_vma *vma;
 	async_cookie_t cookie;
 	int preferred_bpp;
 };
@@ -194,14 +209,26 @@
 	unsigned int cloneable;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
-			       struct intel_crtc_state *);
-	void (*pre_pll_enable)(struct intel_encoder *);
-	void (*pre_enable)(struct intel_encoder *);
-	void (*enable)(struct intel_encoder *);
-	void (*mode_set)(struct intel_encoder *intel_encoder);
-	void (*disable)(struct intel_encoder *);
-	void (*post_disable)(struct intel_encoder *);
-	void (*post_pll_disable)(struct intel_encoder *);
+			       struct intel_crtc_state *,
+			       struct drm_connector_state *);
+	void (*pre_pll_enable)(struct intel_encoder *,
+			       struct intel_crtc_state *,
+			       struct drm_connector_state *);
+	void (*pre_enable)(struct intel_encoder *,
+			   struct intel_crtc_state *,
+			   struct drm_connector_state *);
+	void (*enable)(struct intel_encoder *,
+		       struct intel_crtc_state *,
+		       struct drm_connector_state *);
+	void (*disable)(struct intel_encoder *,
+			struct intel_crtc_state *,
+			struct drm_connector_state *);
+	void (*post_disable)(struct intel_encoder *,
+			     struct intel_crtc_state *,
+			     struct drm_connector_state *);
+	void (*post_pll_disable)(struct intel_encoder *,
+				 struct intel_crtc_state *,
+				 struct drm_connector_state *);
 	/* Read out the current hw state of this connector, returning true if
 	 * the encoder is active. If the encoder is enabled it also set the pipe
 	 * it is connected to in the pipe parameter. */
@@ -236,6 +263,7 @@
 		bool enabled;
 		bool combination_mode;	/* gen 2/4 only */
 		bool active_low_pwm;
+		bool alternate_pwm_increment;	/* lpt+ */
 
 		/* PWM chip */
 		bool util_pin_active_low;	/* bxt+ */
@@ -338,10 +366,16 @@
 
 struct intel_plane_state {
 	struct drm_plane_state base;
-	struct drm_rect src;
-	struct drm_rect dst;
 	struct drm_rect clip;
-	bool visible;
+
+	struct {
+		u32 offset;
+		int x, y;
+	} main;
+	struct {
+		u32 offset;
+		int x, y;
+	} aux;
 
 	/*
 	 * scaler_id
@@ -561,12 +595,6 @@
 	/* Selected dpll when shared or NULL. */
 	struct intel_shared_dpll *shared_dpll;
 
-	/*
-	 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
-	 * - enum skl_dpll on SKL
-	 */
-	uint32_t ddi_pll_sel;
-
 	/* Actual register state of the dpll, for shared dpll cross-checking. */
 	struct intel_dpll_hw_state dpll_hw_state;
 
@@ -683,8 +711,8 @@
 
 	struct intel_crtc_state *config;
 
-	/* reset counter value when the last flip was submitted */
-	unsigned int reset_counter;
+	/* global reset count when the last flip was submitted */
+	unsigned int reset_count;
 
 	/* Access to these should be protected by dev_priv->irq_lock. */
 	bool cpu_fifo_underrun_disabled;
@@ -852,8 +880,10 @@
 	int link_rate;
 	uint8_t lane_count;
 	uint8_t sink_count;
+	bool link_mst;
 	bool has_audio;
 	bool detect_done;
+	bool channel_eq_status;
 	enum hdmi_force_audio force_audio;
 	bool limited_color_range;
 	bool color_range_auto;
@@ -1106,8 +1136,11 @@
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
+			  struct intel_shared_dpll *pll);
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state);
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1122,7 +1155,6 @@
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1133,22 +1165,12 @@
 			 struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-
-/* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin);
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
-			    unsigned frontbuffer_bits);
+struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
+						  int clock);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-			enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 			      uint64_t fb_modifier, uint32_t pixel_format);
 
@@ -1164,14 +1186,22 @@
 void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 		      const char *name, u32 reg, int ref_freq);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+				   const struct intel_plane_state *state,
+				   int plane);
+void intel_add_fb_offsets(int *x, int *y,
+			  const struct intel_plane_state *state, int plane);
 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
 void intel_mark_busy(struct drm_i915_private *dev_priv);
 void intel_mark_idle(struct drm_i915_private *dev_priv);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
@@ -1227,8 +1257,8 @@
 void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old,
 				    struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
-			       unsigned int rotation);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
@@ -1238,9 +1268,9 @@
 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
 int intel_prepare_plane_fb(struct drm_plane *plane,
-			   const struct drm_plane_state *new_state);
+			   struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
-			    const struct drm_plane_state *old_state);
+			    struct drm_plane_state *old_state);
 int intel_plane_atomic_get_property(struct drm_plane *plane,
 				    const struct drm_plane_state *state,
 				    struct drm_property *property,
@@ -1258,7 +1288,7 @@
 static inline bool
 intel_rotation_90_or_270(unsigned int rotation)
 {
-	return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+	return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
 }
 
 void intel_create_rotation_property(struct drm_device *dev,
@@ -1290,9 +1320,7 @@
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 u32 intel_compute_tile_offset(int *x, int *y,
-			      const struct drm_framebuffer *fb, int plane,
-			      unsigned int pitch,
-			      unsigned int rotation);
+			      const struct intel_plane_state *state, int plane);
 void intel_prepare_reset(struct drm_i915_private *dev_priv);
 void intel_finish_reset(struct drm_i915_private *dev_priv);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1335,13 +1363,14 @@
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
-			   struct drm_i915_gem_object *obj,
-			   unsigned int plane);
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+		     unsigned int rotation);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
 
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1355,7 +1384,8 @@
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 			     struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-			      const struct intel_crtc_state *pipe_config);
+			      int link_rate, uint8_t lane_count,
+			      bool link_mst);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1364,7 +1394,8 @@
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
-			     struct intel_crtc_state *pipe_config);
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
 				  bool long_hpd);
@@ -1382,13 +1413,14 @@
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp);
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-					 struct intel_digital_port *port);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1488,7 +1520,8 @@
 			       struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-			       struct intel_crtc_state *pipe_config);
+			       struct intel_crtc_state *pipe_config,
+			       struct drm_connector_state *conn_state);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
@@ -1561,13 +1594,13 @@
 /* intel_psr.c */
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits);
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits,
 		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
@@ -1667,13 +1700,6 @@
 	atomic_dec(&dev_priv->pm.wakeref_count);
 }
 
-/* TODO: convert users of these to rely instead on proper RPM refcounting */
-#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv)	\
-	disable_rpm_wakeref_asserts(dev_priv)
-
-#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv)	\
-	enable_rpm_wakeref_asserts(dev_priv)
-
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
@@ -1699,11 +1725,11 @@
 void intel_gpu_ips_teardown(void);
 void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
@@ -1716,9 +1742,21 @@
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
-bool skl_can_enable_sagv(struct drm_atomic_state *state);
-int skl_enable_sagv(struct drm_i915_private *dev_priv);
-int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+			       const struct skl_ddb_allocation *new,
+			       enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+				 const struct skl_ddb_allocation *old,
+				 const struct skl_ddb_allocation *new,
+				 enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+			 const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+			const struct skl_wm_values *wm,
+			int plane);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index de8e9fb..b2e3d3a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -312,7 +312,8 @@
 }
 
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -533,14 +534,15 @@
 	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *pipe_config);
 
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	enum port port;
 
 	DRM_DEBUG_KMS("\n");
@@ -550,9 +552,9 @@
 	 * lock. It needs to be fully powered down to fix it.
 	 */
 	intel_disable_dsi_pll(encoder);
-	intel_enable_dsi_pll(encoder, crtc->config);
+	intel_enable_dsi_pll(encoder, pipe_config);
 
-	intel_dsi_prepare(encoder);
+	intel_dsi_prepare(encoder, pipe_config);
 
 	/* Panel Enable over CRC PMIC */
 	if (intel_dsi->gpio_panel)
@@ -582,7 +584,9 @@
 	intel_dsi_enable(encoder);
 }
 
-static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+static void intel_dsi_enable_nop(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
 	DRM_DEBUG_KMS("\n");
 
@@ -592,7 +596,9 @@
 	 */
 }
 
-static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+static void intel_dsi_pre_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	enum port port;
@@ -694,7 +700,9 @@
 	intel_disable_dsi_pll(encoder);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -819,6 +827,7 @@
 	u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
 				crtc_hblank_start_sw, crtc_hblank_end_sw;
 
+	/* FIXME: hw readout should not depend on SW state */
 	intel_crtc = to_intel_crtc(encoder->base.crtc);
 	adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
 
@@ -1104,14 +1113,15 @@
 	}
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *pipe_config)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	enum port port;
 	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	u32 val, tmp;
@@ -1348,7 +1358,7 @@
 		intel_connector->panel.fitting_mode = val;
 	}
 
-	crtc = intel_attached_encoder(connector)->base.crtc;
+	crtc = connector->state->crtc;
 	if (crtc && crtc->state->enable) {
 		/*
 		 * If the CRTC is enabled, the display will be changed
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index b9e5a63..2e452c5 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -174,7 +174,9 @@
 	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_disable_dvo(struct intel_encoder *encoder)
+static void intel_disable_dvo(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -186,17 +188,18 @@
 	I915_READ(dvo_reg);
 }
 
-static void intel_enable_dvo(struct intel_encoder *encoder)
+static void intel_enable_dvo(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
 	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-					 &crtc->config->base.mode,
-					 &crtc->config->base.adjusted_mode);
+					 &pipe_config->base.mode,
+					 &pipe_config->base.adjusted_mode);
 
 	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 	I915_READ(dvo_reg);
@@ -235,7 +238,8 @@
 }
 
 static bool intel_dvo_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	const struct drm_display_mode *fixed_mode =
@@ -253,12 +257,13 @@
 	return true;
 }
 
-static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	int pipe = crtc->pipe;
 	u32 dvo_val;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
new file mode 100644
index 0000000..025e232
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_lrc.h"
+
+static const struct engine_info {
+	const char *name;
+	unsigned exec_id;
+	enum intel_engine_hw_id hw_id;
+	u32 mmio_base;
+	unsigned irq_shift;
+	int (*init_legacy)(struct intel_engine_cs *engine);
+	int (*init_execlists)(struct intel_engine_cs *engine);
+} intel_engines[] = {
+	[RCS] = {
+		.name = "render ring",
+		.exec_id = I915_EXEC_RENDER,
+		.hw_id = RCS_HW,
+		.mmio_base = RENDER_RING_BASE,
+		.irq_shift = GEN8_RCS_IRQ_SHIFT,
+		.init_execlists = logical_render_ring_init,
+		.init_legacy = intel_init_render_ring_buffer,
+	},
+	[BCS] = {
+		.name = "blitter ring",
+		.exec_id = I915_EXEC_BLT,
+		.hw_id = BCS_HW,
+		.mmio_base = BLT_RING_BASE,
+		.irq_shift = GEN8_BCS_IRQ_SHIFT,
+		.init_execlists = logical_xcs_ring_init,
+		.init_legacy = intel_init_blt_ring_buffer,
+	},
+	[VCS] = {
+		.name = "bsd ring",
+		.exec_id = I915_EXEC_BSD,
+		.hw_id = VCS_HW,
+		.mmio_base = GEN6_BSD_RING_BASE,
+		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
+		.init_execlists = logical_xcs_ring_init,
+		.init_legacy = intel_init_bsd_ring_buffer,
+	},
+	[VCS2] = {
+		.name = "bsd2 ring",
+		.exec_id = I915_EXEC_BSD,
+		.hw_id = VCS2_HW,
+		.mmio_base = GEN8_BSD2_RING_BASE,
+		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
+		.init_execlists = logical_xcs_ring_init,
+		.init_legacy = intel_init_bsd2_ring_buffer,
+	},
+	[VECS] = {
+		.name = "video enhancement ring",
+		.exec_id = I915_EXEC_VEBOX,
+		.hw_id = VECS_HW,
+		.mmio_base = VEBOX_RING_BASE,
+		.irq_shift = GEN8_VECS_IRQ_SHIFT,
+		.init_execlists = logical_xcs_ring_init,
+		.init_legacy = intel_init_vebox_ring_buffer,
+	},
+};
+
+static struct intel_engine_cs *
+intel_engine_setup(struct drm_i915_private *dev_priv,
+		   enum intel_engine_id id)
+{
+	const struct engine_info *info = &intel_engines[id];
+	struct intel_engine_cs *engine = &dev_priv->engine[id];
+
+	engine->id = id;
+	engine->i915 = dev_priv;
+	engine->name = info->name;
+	engine->exec_id = info->exec_id;
+	engine->hw_id = engine->guc_id = info->hw_id;
+	engine->mmio_base = info->mmio_base;
+	engine->irq_shift = info->irq_shift;
+
+	return engine;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev: DRM device.
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+	unsigned int mask = 0;
+	int (*init)(struct intel_engine_cs *engine);
+	unsigned int i;
+	int ret;
+
+	WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
+	WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+
+	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+		if (!HAS_ENGINE(dev_priv, i))
+			continue;
+
+		if (i915.enable_execlists)
+			init = intel_engines[i].init_execlists;
+		else
+			init = intel_engines[i].init_legacy;
+
+		if (!init)
+			continue;
+
+		ret = init(intel_engine_setup(dev_priv, i));
+		if (ret)
+			goto cleanup;
+
+		mask |= ENGINE_MASK(i);
+	}
+
+	/*
+	 * Catch failures to update intel_engines table when the new engines
+	 * are added to the driver by a warning and disabling the forgotten
+	 * engines.
+	 */
+	if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+		device_info->ring_mask = mask;
+
+	device_info->num_rings = hweight32(mask);
+
+	return 0;
+
+cleanup:
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		if (i915.enable_execlists)
+			intel_logical_ring_cleanup(&dev_priv->engine[i]);
+		else
+			intel_engine_cleanup(&dev_priv->engine[i]);
+	}
+
+	return ret;
+}
+
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
+	 * so long as the semaphore value in the register/page is greater
+	 * than the sync value), so whenever we reset the seqno,
+	 * so long as we reset the tracking semaphore value to 0, it will
+	 * always be before the next request's seqno. If we don't reset
+	 * the semaphore value, then when the seqno moves backwards all
+	 * future waits will complete instantly (causing rendering corruption).
+	 */
+	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+		if (HAS_VEBOX(dev_priv))
+			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+	}
+	if (dev_priv->semaphore) {
+		struct page *page = i915_vma_first_page(dev_priv->semaphore);
+		void *semaphores;
+
+		/* Semaphores are in noncoherent memory, flush to be safe */
+		semaphores = kmap(page);
+		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+		drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+				       I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+		kunmap(page);
+	}
+	memset(engine->semaphore.sync_seqno, 0,
+	       sizeof(engine->semaphore.sync_seqno));
+
+	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+	if (engine->irq_seqno_barrier)
+		engine->irq_seqno_barrier(engine);
+	engine->last_submitted_seqno = seqno;
+
+	engine->hangcheck.seqno = seqno;
+
+	/* After manually advancing the seqno, fake the interrupt in case
+	 * there are any waiters for that seqno.
+	 */
+	intel_engine_wakeup(engine);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static void intel_engine_init_requests(struct intel_engine_cs *engine)
+{
+	init_request_active(&engine->last_request, NULL);
+	INIT_LIST_HEAD(&engine->request_list);
+}
+
+/**
+ * intel_engines_setup_common - setup engine state not requiring hw access
+ * @engine: Engine to setup.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+void intel_engine_setup_common(struct intel_engine_cs *engine)
+{
+	INIT_LIST_HEAD(&engine->execlist_queue);
+	spin_lock_init(&engine->execlist_lock);
+
+	engine->fence_context = fence_context_alloc(1);
+
+	intel_engine_init_requests(engine);
+	intel_engine_init_hangcheck(engine);
+	i915_gem_batch_pool_init(engine, &engine->batch_pool);
+
+	intel_engine_init_cmd_parser(engine);
+}
+
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int ret;
+
+	WARN_ON(engine->scratch);
+
+	obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+	if (!obj)
+		obj = i915_gem_object_create(&engine->i915->drm, size);
+	if (IS_ERR(obj)) {
+		DRM_ERROR("Failed to allocate scratch page\n");
+		return PTR_ERR(obj);
+	}
+
+	vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
+	}
+
+	ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+	if (ret)
+		goto err_unref;
+
+	engine->scratch = vma;
+	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+			 engine->name, i915_ggtt_offset(vma));
+	return 0;
+
+err_unref:
+	i915_gem_object_put(obj);
+	return ret;
+}
+
+static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+{
+	i915_vma_unpin_and_release(&engine->scratch);
+}
+
+/**
+ * intel_engines_init_common - initialize cengine state which might require hw access
+ * @engine: Engine to initialize.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do require hardware access.
+ *
+ * Typcally done at later stages of submission mode specific engine setup.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_init_common(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = intel_engine_init_breadcrumbs(engine);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * intel_engines_cleanup_common - cleans up the engine state created by
+ *                                the common initiailizers.
+ * @engine: Engine to cleanup.
+ *
+ * This cleans up everything created by the common helpers.
+ */
+void intel_engine_cleanup_common(struct intel_engine_cs *engine)
+{
+	intel_engine_cleanup_scratch(engine);
+
+	intel_engine_fini_breadcrumbs(engine);
+	intel_engine_cleanup_cmd_parser(engine);
+	i915_gem_batch_pool_fini(&engine->batch_pool);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3836a1c..faa6762 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -190,9 +190,13 @@
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-	dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
-	I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+	} else {
+		I915_WRITE(DPFC_FENCE_YOFF, 0);
+	}
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -244,21 +248,29 @@
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 		break;
 	}
-	dpfc_ctl |= DPFC_CTL_FENCE_EN;
-	if (IS_GEN5(dev_priv))
-		dpfc_ctl |= params->fb.fence_reg;
+
+	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN;
+		if (IS_GEN5(dev_priv))
+			dpfc_ctl |= params->fb.fence_reg;
+		if (IS_GEN6(dev_priv)) {
+			I915_WRITE(SNB_DPFC_CTL_SA,
+				   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
+				   params->crtc.fence_y_offset);
+		}
+	} else {
+		if (IS_GEN6(dev_priv)) {
+			I915_WRITE(SNB_DPFC_CTL_SA, 0);
+			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+		}
+	}
 
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_GEN6(dev_priv)) {
-		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-	}
-
 	intel_fbc_recompress(dev_priv);
 }
 
@@ -305,7 +317,15 @@
 		break;
 	}
 
-	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+		I915_WRITE(SNB_DPFC_CTL_SA,
+			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
+	} else {
+		I915_WRITE(SNB_DPFC_CTL_SA,0);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+	}
 
 	if (dev_priv->fbc.false_color)
 		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
@@ -324,10 +344,6 @@
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	I915_WRITE(SNB_DPFC_CTL_SA,
-		   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
-	I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-
 	intel_fbc_recompress(dev_priv);
 }
 
@@ -494,7 +510,7 @@
 	if (!no_fbc_on_multiple_pipes(dev_priv))
 		return true;
 
-	if (plane_state->visible)
+	if (plane_state->base.visible)
 		fbc->visible_pipes_mask |= (1 << pipe);
 	else
 		fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -709,6 +725,14 @@
 	return effective_w <= max_w && effective_h <= max_h;
 }
 
+/* XXX replace me when we have VMA tracking for intel_plane_state */
+static int get_fence_id(struct drm_framebuffer *fb)
+{
+	struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
+
+	return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
+}
+
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_plane_state *plane_state)
@@ -725,9 +749,9 @@
 			ilk_pipe_pixel_rate(crtc_state);
 
 	cache->plane.rotation = plane_state->base.rotation;
-	cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
-	cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
-	cache->plane.visible = plane_state->visible;
+	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	cache->plane.visible = plane_state->base.visible;
 
 	if (!cache->plane.visible)
 		return;
@@ -737,11 +761,11 @@
 	/* FIXME: We lack the proper locking here, so only run this on the
 	 * platforms that need. */
 	if (IS_GEN(dev_priv, 5, 6))
-		cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+		cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.stride = fb->pitches[0];
-	cache->fb.fence_reg = obj->fence_reg;
-	cache->fb.tiling_mode = obj->tiling_mode;
+	cache->fb.fence_reg = get_fence_id(fb);
+	cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -768,6 +792,10 @@
 
 	/* The use of a CPU fence is mandatory in order to detect writes
 	 * by the CPU to the scanout and trigger updates to the FBC.
+	 *
+	 * Note that is possible for a tiled surface to be unmappable (and
+	 * so have no fence associated with it) due to aperture constaints
+	 * at the time of pinning.
 	 */
 	if (cache->fb.tiling_mode != I915_TILING_X ||
 	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
@@ -775,7 +803,7 @@
 		return false;
 	}
 	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
-	    cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+	    cache->plane.rotation != DRM_ROTATE_0) {
 		fbc->no_fbc_reason = "rotation unsupported";
 		return false;
 	}
@@ -1050,7 +1078,7 @@
 		struct intel_plane_state *intel_plane_state =
 			to_intel_plane_state(plane_state);
 
-		if (!intel_plane_state->visible)
+		if (!intel_plane_state->base.visible)
 			continue;
 
 		for_each_crtc_in_state(state, crtc, crtc_state, j) {
@@ -1075,6 +1103,8 @@
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
+ * @crtc_state: corresponding &drm_crtc_state for @crtc
+ * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
  *
  * This function checks if the given CRTC was chosen for FBC, then enables it if
  * possible. Notice that it doesn't activate FBC. It is valid to call
@@ -1163,11 +1193,8 @@
 		return;
 
 	mutex_lock(&fbc->lock);
-	if (fbc->crtc == crtc) {
-		WARN_ON(!fbc->enabled);
-		WARN_ON(fbc->active);
+	if (fbc->crtc == crtc)
 		__intel_fbc_disable(dev_priv);
-	}
 	mutex_unlock(&fbc->lock);
 
 	cancel_work_sync(&fbc->work.work);
@@ -1212,7 +1239,7 @@
 
 	for_each_intel_crtc(&dev_priv->drm, crtc)
 		if (intel_crtc_active(&crtc->base) &&
-		    to_intel_plane_state(crtc->base.primary->state)->visible)
+		    to_intel_plane_state(crtc->base.primary->state)->base.visible)
 			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3e3632c..b7098f9 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -34,7 +34,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/vga_switcheroo.h>
 
@@ -42,6 +41,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -159,7 +159,7 @@
 
 	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
 	if (IS_ERR(fb)) {
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_put(obj);
 		ret = PTR_ERR(fb);
 		goto out;
 	}
@@ -183,13 +183,13 @@
 	struct intel_framebuffer *intel_fb = ifbdev->fb;
 	struct drm_device *dev = helper->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct i915_vma *vma;
-	struct drm_i915_gem_object *obj;
 	bool prealloc = false;
-	void *vaddr;
+	void __iomem *vaddr;
 	int ret;
 
 	if (intel_fb &&
@@ -215,17 +215,17 @@
 		sizes->fb_height = intel_fb->base.height;
 	}
 
-	obj = intel_fb->obj;
-
 	mutex_lock(&dev->struct_mutex);
 
 	/* Pin the GGTT vma for our access via info->screen_base.
 	 * This also validates that any existing fb inherited from the
 	 * BIOS is suitable for own access.
 	 */
-	ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
-	if (ret)
+	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto out_unlock;
+	}
 
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
@@ -245,13 +245,11 @@
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
-	vma = i915_gem_obj_to_ggtt(obj);
-
 	/* setup aperture base/size for vesafb takeover */
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = ggtt->mappable_end;
 
-	info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+	info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
 	info->fix.smem_len = vma->node.size;
 
 	vaddr = i915_vma_pin_iomap(vma);
@@ -273,23 +271,23 @@
 	 * If the object is stolen however, it will be full of whatever
 	 * garbage was left in there.
 	 */
-	if (ifbdev->fb->obj->stolen && !prealloc)
+	if (intel_fb->obj->stolen && !prealloc)
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
-		      fb->width, fb->height,
-		      i915_gem_obj_ggtt_offset(obj), obj);
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
+		      fb->width, fb->height, i915_ggtt_offset(vma));
+	ifbdev->vma = vma;
 
 	mutex_unlock(&dev->struct_mutex);
-	vga_switcheroo_client_fb_set(dev->pdev, info);
+	vga_switcheroo_client_fb_set(pdev, info);
 	return 0;
 
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 out_unpin:
-	intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+	intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -554,7 +552,7 @@
 
 	if (ifbdev->fb) {
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
-		intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+		intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
 		drm_framebuffer_remove(&ifbdev->fb->base);
@@ -768,7 +766,7 @@
 	if (!ifbdev)
 		return;
 
-	flush_work(&dev_priv->fbdev_suspend_work);
+	cancel_work_sync(&dev_priv->fbdev_suspend_work);
 	if (!current_is_async())
 		intel_fbdev_sync(ifbdev);
 
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357..966de4c 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -63,47 +63,30 @@
 #include <drm/drmP.h>
 
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "i915_drv.h"
 
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	if (!obj->frontbuffer_bits)
-		return;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (origin == ORIGIN_CS) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits
-			|= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits
-			&= ~obj->frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_psr_invalidate(dev, obj->frontbuffer_bits);
-	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
-	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+	intel_psr_invalidate(dev_priv, frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
+	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -113,64 +96,45 @@
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_device *dev,
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits,
 				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* Delay flushing when rings are still busy.*/
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	if (!frontbuffer_bits)
 		return;
 
-	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev_priv, frontbuffer_bits, origin);
 	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned frontbuffer_bits;
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	if (!obj->frontbuffer_bits)
-		return;
-
-	frontbuffer_bits = obj->frontbuffer_bits;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (retire) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
 		/* Filter out new bits since rendering started. */
 		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
 		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. The actual
@@ -180,23 +144,21 @@
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev, frontbuffer_bits);
+	intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after the flip has been latched and will complete
@@ -204,23 +166,23 @@
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev_priv,
+					frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
  * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. This is for
@@ -229,15 +191,13 @@
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
 			    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
new file mode 100644
index 0000000..76ceb53
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+				    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+				     unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+			    unsigned frontbuffer_bits);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_bits);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+					   enum fb_op_origin origin)
+{
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+				      bool retire,
+				      enum fb_op_origin origin)
+{
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
+}
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743..5cdf7aa 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -63,26 +63,27 @@
  *   retcode: errno from last guc_submit()
  */
 struct i915_guc_client {
-	struct drm_i915_gem_object *client_obj;
+	struct i915_vma *vma;
 	void *client_base;		/* first page (only) of above	*/
 	struct i915_gem_context *owner;
 	struct intel_guc *guc;
+
+	uint32_t engines;		/* bitmap of (host) engine ids	*/
 	uint32_t priority;
 	uint32_t ctx_index;
-
 	uint32_t proc_desc_offset;
+
 	uint32_t doorbell_offset;
 	uint32_t cookie;
 	uint16_t doorbell_id;
-	uint16_t padding;		/* Maintain alignment		*/
+	uint16_t padding[3];		/* Maintain alignment		*/
 
+	spinlock_t wq_lock;
 	uint32_t wq_offset;
 	uint32_t wq_size;
 	uint32_t wq_tail;
-	uint32_t unused;		/* Was 'wq_head'		*/
-
+	uint32_t wq_rsvd;
 	uint32_t no_wq_space;
-	uint32_t q_fail;		/* No longer used		*/
 	uint32_t b_fail;
 	int retcode;
 
@@ -125,11 +126,10 @@
 struct intel_guc {
 	struct intel_guc_fw guc_fw;
 	uint32_t log_flags;
-	struct drm_i915_gem_object *log_obj;
+	struct i915_vma *log_vma;
 
-	struct drm_i915_gem_object *ads_obj;
-
-	struct drm_i915_gem_object *ctx_pool_obj;
+	struct i915_vma *ads_vma;
+	struct i915_vma *ctx_pool_vma;
 	struct ida ctx_ids;
 
 	struct i915_guc_client *execbuf_client;
@@ -159,8 +159,8 @@
 /* i915_guc_submission.c */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
+int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
+void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 944786d..e40db2d 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -155,6 +155,7 @@
  *
  *     +-------------------------------+
  *     |        guc_css_header         |
+ *     |                               |
  *     | contains major/minor version  |
  *     +-------------------------------+
  *     |             uCode             |
@@ -176,10 +177,10 @@
  *
  * 1. Header, uCode and RSA are must-have components.
  * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
+ *    in the layout table above.
  * 3. Length info of each component can be found in header, in dwords.
  * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
+ *    in fw. So driver will load a truncated firmware in this case.
  */
 
 struct guc_css_header {
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 605c696..6fd39ef 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,13 +59,25 @@
  *
  */
 
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
+#define SKL_FW_MAJOR 6
+#define SKL_FW_MINOR 1
+
+#define BXT_FW_MAJOR 8
+#define BXT_FW_MINOR 7
+
+#define KBL_FW_MAJOR 9
+#define KBL_FW_MINOR 14
+
+#define GUC_FW_PATH(platform, major, minor) \
+       "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
+
+#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
-#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
 MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
 
-#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
 MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 
 /* User-friendly representation of an enum */
@@ -85,7 +97,7 @@
 	}
 };
 
-static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 	int irqs;
@@ -102,7 +114,7 @@
 	I915_WRITE(GUC_WD_VECS_IER, 0);
 }
 
-static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 	int irqs;
@@ -122,13 +134,28 @@
 	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
 
 	/*
-	 * If GuC has routed PM interrupts to itself, don't keep it.
-	 * and keep other interrupts those are unmasked by GuC.
-	*/
+	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+	 * (unmasked) PM interrupts to the GuC. All other bits of this
+	 * register *disable* generation of a specific interrupt.
+	 *
+	 * 'pm_intr_keep' indicates bits that are NOT to be set when
+	 * writing to the PM interrupt mask register, i.e. interrupts
+	 * that must not be disabled.
+	 *
+	 * If the GuC is handling these interrupts, then we must not let
+	 * the PM code disable ANY interrupt that the GuC is expecting.
+	 * So for each ENABLED (0) bit in this register, we must SET the
+	 * bit in pm_intr_keep so that it's left enabled for the GuC.
+	 *
+	 * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
+	 * (so interrupts go to the DISPLAY unit at first); but here we
+	 * need to CLEAR that bit, which will result in the register bit
+	 * being left SET!
+	 */
 	tmp = I915_READ(GEN6_PMINTRMSK);
-	if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
-		dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+	if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
+		dev_priv->rps.pm_intr_keep |= ~tmp;
+		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
 	}
 }
 
@@ -140,17 +167,24 @@
 
 static u32 get_core_family(struct drm_i915_private *dev_priv)
 {
-	switch (INTEL_INFO(dev_priv)->gen) {
+	u32 gen = INTEL_GEN(dev_priv);
+
+	switch (gen) {
 	case 9:
 		return GFXCORE_FAMILY_GEN9;
 
 	default:
-		DRM_ERROR("GUC: unsupported core family\n");
+		WARN(1, "GEN%d does not support GuC operation!\n", gen);
 		return GFXCORE_FAMILY_UNKNOWN;
 	}
 }
 
-static void set_guc_init_params(struct drm_i915_private *dev_priv)
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_params_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	u32 params[GUC_CTL_MAX_DWORDS];
@@ -181,16 +215,15 @@
 			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
 	}
 
-	if (guc->ads_obj) {
-		u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
-				>> PAGE_SHIFT;
+	if (guc->ads_vma) {
+		u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
 		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
 		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
 	}
 
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (i915.enable_guc_submission) {
-		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
+		u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
 		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
 
 		pgs >>= PAGE_SHIFT;
@@ -238,12 +271,12 @@
  * Note that GuC needs the CSS header plus uKernel code to be copied by the
  * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  */
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
+			      struct i915_vma *vma)
 {
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
 	unsigned long offset;
-	struct sg_table *sg = fw_obj->pages;
+	struct sg_table *sg = vma->pages;
 	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
 	int i, ret = 0;
 
@@ -260,7 +293,7 @@
 	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
 	/* Set the source address for the new blob */
-	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
+	offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
 	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
 	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
@@ -315,6 +348,7 @@
 {
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	struct drm_device *dev = &dev_priv->drm;
+	struct i915_vma *vma;
 	int ret;
 
 	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -323,10 +357,10 @@
 		return ret;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
-	if (ret) {
-		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
-		return ret;
+	vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+	if (IS_ERR(vma)) {
+		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+		return PTR_ERR(vma);
 	}
 
 	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
@@ -349,7 +383,9 @@
 	}
 
 	/* WaC6DisallowByGfxPause*/
-	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+	if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
+	    IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
 	if (IS_BROXTON(dev))
 		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -361,13 +397,13 @@
 		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
 					    I915_READ(GEN7_MISCCPCTL)));
 
-		/* allows for 5us before GT can go to RC6 */
+		/* allows for 5us (in 10ns units) before GT can go to RC6 */
 		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
 	}
 
-	set_guc_init_params(dev_priv);
+	guc_params_init(dev_priv);
 
-	ret = guc_ucode_xfer_dma(dev_priv);
+	ret = guc_ucode_xfer_dma(dev_priv, vma);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -375,12 +411,12 @@
 	 * We keep the object pages for reuse during resume. But we can unpin it
 	 * now that DMA has completed, so it doesn't continue to take up space.
 	 */
-	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+	i915_vma_unpin(vma);
 
 	return ret;
 }
 
-static int i915_reset_guc(struct drm_i915_private *dev_priv)
+static int guc_hw_reset(struct drm_i915_private *dev_priv)
 {
 	int ret;
 	u32 guc_status;
@@ -433,7 +469,7 @@
 		goto fail;
 	} else if (*fw_path == '\0') {
 		/* Device has a GuC but we don't know what f/w to load? */
-		DRM_INFO("No GuC firmware known for this platform\n");
+		WARN(1, "No GuC firmware known for this platform!\n");
 		err = -ENODEV;
 		goto fail;
 	}
@@ -447,7 +483,7 @@
 		goto fail;
 	}
 
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 
 	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
@@ -470,11 +506,9 @@
 		 * Always reset the GuC just before (re)loading, so
 		 * that the state and timing are fairly predictable
 		 */
-		err = i915_reset_guc(dev_priv);
-		if (err) {
-			DRM_ERROR("GuC reset failed: %d\n", err);
+		err = guc_hw_reset(dev_priv);
+		if (err)
 			goto fail;
-		}
 
 		err = guc_ucode_xfer(dev_priv);
 		if (!err)
@@ -497,7 +531,7 @@
 		err = i915_guc_submission_enable(dev_priv);
 		if (err)
 			goto fail;
-		direct_interrupts_to_guc(dev_priv);
+		guc_interrupts_capture(dev_priv);
 	}
 
 	return 0;
@@ -506,7 +540,7 @@
 	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
 		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 	i915_guc_submission_disable(dev_priv);
 	i915_guc_submission_fini(dev_priv);
 
@@ -532,15 +566,15 @@
 	else if (err == 0)
 		DRM_INFO("GuC firmware load skipped\n");
 	else if (ret != -EIO)
-		DRM_INFO("GuC firmware load failed: %d\n", err);
+		DRM_NOTE("GuC firmware load failed: %d\n", err);
 	else
-		DRM_ERROR("GuC firmware load failed: %d\n", err);
+		DRM_WARN("GuC firmware load failed: %d\n", err);
 
 	if (i915.enable_guc_submission) {
 		if (fw_path == NULL)
 			DRM_INFO("GuC submission without firmware not supported\n");
 		if (ret == 0)
-			DRM_INFO("Falling back from GuC submission to execlist mode\n");
+			DRM_NOTE("Falling back from GuC submission to execlist mode\n");
 		else
 			DRM_ERROR("GuC init failed: %d\n", ret);
 	}
@@ -551,6 +585,7 @@
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 {
+	struct pci_dev *pdev = dev->pdev;
 	struct drm_i915_gem_object *obj;
 	const struct firmware *fw;
 	struct guc_css_header *css;
@@ -560,7 +595,7 @@
 	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
 		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
-	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+	err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
 	if (err)
 		goto fail;
 	if (!fw)
@@ -571,7 +606,7 @@
 
 	/* Check the size of the blob before examining buffer contents */
 	if (fw->size < sizeof(struct guc_css_header)) {
-		DRM_ERROR("Firmware header is missing\n");
+		DRM_NOTE("Firmware header is missing\n");
 		goto fail;
 	}
 
@@ -583,7 +618,7 @@
 		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
 	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
-		DRM_ERROR("CSS header definition mismatch\n");
+		DRM_NOTE("CSS header definition mismatch\n");
 		goto fail;
 	}
 
@@ -593,7 +628,7 @@
 
 	/* now RSA */
 	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-		DRM_ERROR("RSA key size is bad\n");
+		DRM_NOTE("RSA key size is bad\n");
 		goto fail;
 	}
 	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
@@ -602,14 +637,14 @@
 	/* At least, it should have header, uCode and RSA. Size of all three. */
 	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
 	if (fw->size < size) {
-		DRM_ERROR("Missing firmware components\n");
+		DRM_NOTE("Missing firmware components\n");
 		goto fail;
 	}
 
 	/* Header and uCode will be loaded to WOPCM. Size of the two. */
 	size = guc_fw->header_size + guc_fw->ucode_size;
 	if (size > guc_wopcm_size(to_i915(dev))) {
-		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+		DRM_NOTE("Firmware is too large to fit in WOPCM\n");
 		goto fail;
 	}
 
@@ -624,7 +659,7 @@
 
 	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
 	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
-		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+		DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
 			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
 			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
 		err = -ENOEXEC;
@@ -654,15 +689,15 @@
 	return;
 
 fail:
+	DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
+		 guc_fw->guc_fw_path, err);
 	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
 		err, fw, guc_fw->guc_fw_obj);
-	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
-		  guc_fw->guc_fw_path, err);
 
 	mutex_lock(&dev->struct_mutex);
 	obj = guc_fw->guc_fw_obj;
 	if (obj)
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_put(obj);
 	guc_fw->guc_fw_obj = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -695,16 +730,16 @@
 		fw_path = NULL;
 	} else if (IS_SKYLAKE(dev)) {
 		fw_path = I915_SKL_GUC_UCODE;
-		guc_fw->guc_fw_major_wanted = 6;
-		guc_fw->guc_fw_minor_wanted = 1;
+		guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
+		guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
 	} else if (IS_BROXTON(dev)) {
 		fw_path = I915_BXT_GUC_UCODE;
-		guc_fw->guc_fw_major_wanted = 8;
-		guc_fw->guc_fw_minor_wanted = 7;
+		guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
+		guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
 	} else if (IS_KABYLAKE(dev)) {
 		fw_path = I915_KBL_GUC_UCODE;
-		guc_fw->guc_fw_major_wanted = 9;
-		guc_fw->guc_fw_minor_wanted = 14;
+		guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
+		guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
 	} else {
 		fw_path = "";	/* unknown device */
 	}
@@ -738,12 +773,12 @@
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
 	mutex_lock(&dev->struct_mutex);
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 	i915_guc_submission_disable(dev_priv);
 	i915_guc_submission_fini(dev_priv);
 
 	if (guc_fw->guc_fw_obj)
-		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+		i915_gem_object_put(guc_fw->guc_fw_obj);
 	guc_fw->guc_fw_obj = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4df9f38..f40a35f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -985,7 +985,9 @@
 	intel_audio_codec_enable(encoder);
 }
 
-static void g4x_enable_hdmi(struct intel_encoder *encoder)
+static void g4x_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1006,7 +1008,9 @@
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void ibx_enable_hdmi(struct intel_encoder *encoder)
+static void ibx_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1055,7 +1059,9 @@
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void cpt_enable_hdmi(struct intel_encoder *encoder)
+static void cpt_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1108,11 +1114,15 @@
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void vlv_enable_hdmi(struct intel_encoder *encoder)
+static void vlv_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 }
 
-static void intel_disable_hdmi(struct intel_encoder *encoder)
+static void intel_disable_hdmi(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1164,17 +1174,21 @@
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void g4x_disable_hdmi(struct intel_encoder *encoder)
+static void g4x_disable_hdmi(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
 	if (crtc->config->has_audio)
 		intel_audio_codec_disable(encoder);
 
-	intel_disable_hdmi(encoder);
+	intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_hdmi(struct intel_encoder *encoder)
+static void pch_disable_hdmi(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -1182,9 +1196,11 @@
 		intel_audio_codec_disable(encoder);
 }
 
-static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_hdmi(encoder);
+	intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
 static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
@@ -1204,10 +1220,17 @@
 	int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
 
 	if (respect_downstream_limits) {
+		struct intel_connector *connector = hdmi->attached_connector;
+		const struct drm_display_info *info = &connector->base.display_info;
+
 		if (hdmi->dp_dual_mode.max_tmds_clock)
 			max_tmds_clock = min(max_tmds_clock,
 					     hdmi->dp_dual_mode.max_tmds_clock);
-		if (!hdmi->has_hdmi_sink)
+
+		if (info->max_tmds_clock)
+			max_tmds_clock = min(max_tmds_clock,
+					     info->max_tmds_clock);
+		else if (!hdmi->has_hdmi_sink)
 			max_tmds_clock = min(max_tmds_clock, 165000);
 	}
 
@@ -1285,7 +1308,8 @@
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-			       struct intel_crtc_state *pipe_config)
+			       struct intel_crtc_state *pipe_config,
+			       struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -1422,24 +1446,22 @@
 }
 
 static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-	struct edid *edid = NULL;
+	struct edid *edid;
 	bool connected = false;
 
-	if (force) {
-		intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-		edid = drm_get_edid(connector,
-				    intel_gmbus_get_adapter(dev_priv,
-				    intel_hdmi->ddc_bus));
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+			    intel_hdmi->ddc_bus));
 
-		intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+	intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
 
-		intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
-	}
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
 	to_intel_connector(connector)->detect_edid = edid;
 	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1487,16 @@
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 	enum drm_connector_status status;
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	bool live_status = false;
-	unsigned int try;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 
 	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-	for (try = 0; !live_status && try < 9; try++) {
-		if (try)
-			msleep(10);
-		live_status = intel_digital_port_connected(dev_priv,
-				hdmi_to_dig_port(intel_hdmi));
-	}
-
-	if (!live_status) {
-		DRM_DEBUG_KMS("HDMI live status down\n");
-		/*
-		 * Live status register is not reliable on all intel platforms.
-		 * So consider live_status only for certain platforms, for
-		 * others, read EDID to determine presence of sink.
-		 */
-		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
-			live_status = true;
-	}
-
 	intel_hdmi_unset_edid(connector);
 
-	if (intel_hdmi_set_edid(connector, live_status)) {
+	if (intel_hdmi_set_edid(connector)) {
 		struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
 		hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1522,7 @@
 	if (connector->status != connector_status_connected)
 		return;
 
-	intel_hdmi_set_edid(connector, true);
+	intel_hdmi_set_edid(connector);
 	hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
 }
 
@@ -1638,7 +1639,9 @@
 	return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1651,7 +1654,9 @@
 				   adjusted_mode);
 }
 
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1671,37 +1676,47 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	g4x_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
 
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	intel_hdmi_prepare(encoder);
 
 	vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	intel_hdmi_prepare(encoder);
 
 	chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+				      struct intel_crtc_state *old_crtc_state,
+				      struct drm_connector_state *old_conn_state)
 {
 	chv_phy_post_pll_disable(encoder);
 }
 
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	vlv_phy_reset_lanes(encoder);
 }
 
-static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1714,7 +1729,9 @@
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1734,7 +1751,7 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	g4x_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index f48957e..334d47b 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -477,7 +477,8 @@
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-void i915_hpd_poll_init_work(struct work_struct *work) {
+static void i915_hpd_poll_init_work(struct work_struct *work)
+{
 	struct drm_i915_private *dev_priv =
 		container_of(work, struct drm_i915_private,
 			     hotplug.poll_init_work);
@@ -525,7 +526,6 @@
 /**
  * intel_hpd_poll_init - enables/disables polling for connectors with hpd
  * @dev_priv: i915 device instance
- * @enabled: Whether to enable or disable polling
  *
  * This function enables polling for all connectors, regardless of whether or
  * not they support hotplug detection. Under certain conditions HPD may not be
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1f266d7..79aab9a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -255,67 +255,59 @@
 	algo->data = bus;
 }
 
-static int
-gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
-		     u32 gmbus2_status,
-		     u32 gmbus4_irq_en)
+static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
 {
-	int i;
-	u32 gmbus2 = 0;
 	DEFINE_WAIT(wait);
-
-	if (!HAS_GMBUS_IRQ(dev_priv))
-		gmbus4_irq_en = 0;
+	u32 gmbus2;
+	int ret;
 
 	/* Important: The hw handles only the first bit, so set only one! Since
 	 * we also need to check for NAKs besides the hw ready/idle signal, we
-	 * need to wake up periodically and check that ourselves. */
-	I915_WRITE(GMBUS4, gmbus4_irq_en);
+	 * need to wake up periodically and check that ourselves.
+	 */
+	if (!HAS_GMBUS_IRQ(dev_priv))
+		irq_en = 0;
 
-	for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
-		prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
-				TASK_UNINTERRUPTIBLE);
+	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	I915_WRITE_FW(GMBUS4, irq_en);
 
-		gmbus2 = I915_READ_NOTRACE(GMBUS2);
-		if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
-			break;
+	status |= GMBUS_SATOER;
+	ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+	if (ret)
+		ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
 
-		schedule_timeout(1);
-	}
-	finish_wait(&dev_priv->gmbus_wait_queue, &wait);
-
-	I915_WRITE(GMBUS4, 0);
+	I915_WRITE_FW(GMBUS4, 0);
+	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
 	if (gmbus2 & GMBUS_SATOER)
 		return -ENXIO;
-	if (gmbus2 & gmbus2_status)
-		return 0;
-	return -ETIMEDOUT;
+
+	return ret;
 }
 
 static int
 gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
+	DEFINE_WAIT(wait);
+	u32 irq_enable;
 	int ret;
 
-	if (!HAS_GMBUS_IRQ(dev_priv))
-		return intel_wait_for_register(dev_priv,
-					       GMBUS2, GMBUS_ACTIVE, 0,
-					       10);
-
 	/* Important: The hw handles only the first bit, so set only one! */
-	I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+	irq_enable = 0;
+	if (HAS_GMBUS_IRQ(dev_priv))
+		irq_enable = GMBUS_IDLE_EN;
 
-	ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
-				 (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
-				 msecs_to_jiffies_timeout(10));
+	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	I915_WRITE_FW(GMBUS4, irq_enable);
 
-	I915_WRITE(GMBUS4, 0);
+	ret = intel_wait_for_register_fw(dev_priv,
+					 GMBUS2, GMBUS_ACTIVE, 0,
+					 10);
 
-	if (ret)
-		return 0;
-	else
-		return -ETIMEDOUT;
+	I915_WRITE_FW(GMBUS4, 0);
+	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+
+	return ret;
 }
 
 static int
@@ -323,22 +315,21 @@
 		      unsigned short addr, u8 *buf, unsigned int len,
 		      u32 gmbus1_index)
 {
-	I915_WRITE(GMBUS1,
-		   gmbus1_index |
-		   GMBUS_CYCLE_WAIT |
-		   (len << GMBUS_BYTE_COUNT_SHIFT) |
-		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS1,
+		      gmbus1_index |
+		      GMBUS_CYCLE_WAIT |
+		      (len << GMBUS_BYTE_COUNT_SHIFT) |
+		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		      GMBUS_SLAVE_READ | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 		u32 val, loop = 0;
 
-		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-					   GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
 		if (ret)
 			return ret;
 
-		val = I915_READ(GMBUS3);
+		val = I915_READ_FW(GMBUS3);
 		do {
 			*buf++ = val & 0xff;
 			val >>= 8;
@@ -385,12 +376,12 @@
 		len -= 1;
 	}
 
-	I915_WRITE(GMBUS3, val);
-	I915_WRITE(GMBUS1,
-		   GMBUS_CYCLE_WAIT |
-		   (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS3, val);
+	I915_WRITE_FW(GMBUS1,
+		      GMBUS_CYCLE_WAIT |
+		      (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		      GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 
@@ -399,10 +390,9 @@
 			val |= *buf++ << (8 * loop);
 		} while (--len && ++loop < 4);
 
-		I915_WRITE(GMBUS3, val);
+		I915_WRITE_FW(GMBUS3, val);
 
-		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-					   GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
 		if (ret)
 			return ret;
 	}
@@ -460,13 +450,13 @@
 
 	/* GMBUS5 holds 16-bit index */
 	if (gmbus5)
-		I915_WRITE(GMBUS5, gmbus5);
+		I915_WRITE_FW(GMBUS5, gmbus5);
 
 	ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
 	/* Clear GMBUS5 after each index transfer */
 	if (gmbus5)
-		I915_WRITE(GMBUS5, 0);
+		I915_WRITE_FW(GMBUS5, 0);
 
 	return ret;
 }
@@ -478,11 +468,15 @@
 					       struct intel_gmbus,
 					       adapter);
 	struct drm_i915_private *dev_priv = bus->dev_priv;
+	const unsigned int fw =
+		intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
+					       FW_REG_READ | FW_REG_WRITE);
 	int i = 0, inc, try = 0;
 	int ret = 0;
 
+	intel_uncore_forcewake_get(dev_priv, fw);
 retry:
-	I915_WRITE(GMBUS0, bus->reg0);
+	I915_WRITE_FW(GMBUS0, bus->reg0);
 
 	for (; i < num; i += inc) {
 		inc = 1;
@@ -496,8 +490,8 @@
 		}
 
 		if (!ret)
-			ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
-						   GMBUS_HW_WAIT_EN);
+			ret = gmbus_wait(dev_priv,
+					 GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
 		if (ret == -ETIMEDOUT)
 			goto timeout;
 		else if (ret)
@@ -508,7 +502,7 @@
 	 * a STOP on the very first cycle. To simplify the code we
 	 * unconditionally generate the STOP condition with an additional gmbus
 	 * cycle. */
-	I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
 	/* Mark the GMBUS interface as disabled after waiting for idle.
 	 * We will re-enable it at the start of the next xfer,
@@ -519,7 +513,7 @@
 			 adapter->name);
 		ret = -ETIMEDOUT;
 	}
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 	ret = ret ?: i;
 	goto out;
 
@@ -548,9 +542,9 @@
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
 	 */
-	I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
-	I915_WRITE(GMBUS1, 0);
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
+	I915_WRITE_FW(GMBUS1, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 
 	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
 			 adapter->name, msgs[i].addr,
@@ -573,7 +567,7 @@
 timeout:
 	DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
 		      bus->adapter.name, bus->reg0 & 0xff);
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 
 	/*
 	 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -582,6 +576,7 @@
 	ret = -EAGAIN;
 
 out:
+	intel_uncore_forcewake_put(dev_priv, fw);
 	return ret;
 }
 
@@ -633,6 +628,7 @@
 int intel_setup_gmbus(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct intel_gmbus *bus;
 	unsigned int pin;
 	int ret;
@@ -663,7 +659,7 @@
 			 "i915 gmbus %s",
 			 get_gmbus_pin(dev_priv, pin)->name);
 
-		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->adapter.dev.parent = &pdev->dev;
 		bus->dev_priv = dev_priv;
 
 		bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 414ddda..0adb879 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -156,6 +156,11 @@
 #define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
 #define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
 
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+	 (GEN8_CTX_STATUS_ACTIVE_IDLE | \
+	  GEN8_CTX_STATUS_PREEMPTED | \
+	  GEN8_CTX_STATUS_ELEMENT_SWITCH)
+
 #define CTX_LRI_HEADER_0		0x01
 #define CTX_CONTEXT_CONTROL		0x02
 #define CTX_RING_HEAD			0x04
@@ -221,10 +226,16 @@
 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
 
+#define WA_TAIL_DWORDS 2
+
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 					    struct intel_engine_cs *engine);
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine);
+static void execlists_init_reg_state(u32 *reg_state,
+				     struct i915_gem_context *ctx,
+				     struct intel_engine_cs *engine,
+				     struct intel_ring *ring);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,12 +274,10 @@
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
-		engine->idle_lite_restore_wa = ~0;
-
-	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-					IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
-					(engine->id == VCS || engine->id == VCS2);
+	engine->disable_lite_restore_wa =
+		(IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+		 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+		(engine->id == VCS || engine->id == VCS2);
 
 	engine->ctx_desc_template = GEN8_CTX_VALID;
 	if (IS_GEN8(dev_priv))
@@ -288,7 +297,6 @@
 /**
  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
  * 					  descriptor for a pinned context
- *
  * @ctx: Context to work on
  * @engine: Engine the descriptor will be used with
  *
@@ -297,12 +305,13 @@
  * expensive to calculate, we'll just do it once and cache the result,
  * which remains valid until the context is unpinned.
  *
- * This is what a descriptor looks like, from LSB to MSB:
- *    bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
- *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *    bits 32-52:    ctx ID, a globally unique tag
- *    bits 53-54:    mbz, reserved for use by hardware
- *    bits 55-63:    group ID, currently unused and set to 0
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
+ *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
+ *      bits 32-52:    ctx ID, a globally unique tag
+ *      bits 53-54:    mbz, reserved for use by hardware
+ *      bits 55-63:    group ID, currently unused and set to 0
  */
 static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
@@ -315,7 +324,7 @@
 
 	desc = ctx->desc_template;				/* bits  3-4  */
 	desc |= engine->ctx_desc_template;			/* bits  0-11 */
-	desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+	desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
 								/* bits 12-31 */
 	desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;		/* bits 32-52 */
 
@@ -328,85 +337,9 @@
 	return ctx->engine[engine->id].lrc_desc;
 }
 
-static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
-				 struct drm_i915_gem_request *rq1)
-{
-
-	struct intel_engine_cs *engine = rq0->engine;
-	struct drm_i915_private *dev_priv = rq0->i915;
-	uint64_t desc[2];
-
-	if (rq1) {
-		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
-		rq1->elsp_submitted++;
-	} else {
-		desc[1] = 0;
-	}
-
-	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
-	rq0->elsp_submitted++;
-
-	/* You must always write both descriptors in the order below. */
-	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
-	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
-
-	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
-	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
-
-	/* ELSP is a wo register, use another nearby reg for posting */
-	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
-}
-
-static void
-execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
-{
-	ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-	ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-	ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-	ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-}
-
-static void execlists_update_context(struct drm_i915_gem_request *rq)
-{
-	struct intel_engine_cs *engine = rq->engine;
-	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
-
-	reg_state[CTX_RING_TAIL+1] = rq->tail;
-
-	/* True 32b PPGTT with dynamic page allocation: update PDP
-	 * registers and point the unallocated PDPs to scratch page.
-	 * PML4 is allocated during ppgtt init, so this is not needed
-	 * in 48-bit mode.
-	 */
-	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
-		execlists_update_context_pdps(ppgtt, reg_state);
-}
-
-static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
-				      struct drm_i915_gem_request *rq1)
-{
-	struct drm_i915_private *dev_priv = rq0->i915;
-	unsigned int fw_domains = rq0->engine->fw_domains;
-
-	execlists_update_context(rq0);
-
-	if (rq1)
-		execlists_update_context(rq1);
-
-	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
-
-	execlists_elsp_write(rq0, rq1);
-
-	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
-	spin_unlock_irq(&dev_priv->uncore.lock);
-}
-
-static inline void execlists_context_status_change(
-		struct drm_i915_gem_request *rq,
-		unsigned long status)
+static inline void
+execlists_context_status_change(struct drm_i915_gem_request *rq,
+				unsigned long status)
 {
 	/*
 	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
@@ -418,285 +351,260 @@
 	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
+static void
+execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 {
-	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-	struct drm_i915_gem_request *cursor, *tmp;
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+}
 
-	assert_spin_locked(&engine->execlist_lock);
+static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+{
+	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	u32 *reg_state = ce->lrc_reg_state;
 
-	/*
-	 * If irqs are not active generate a warning as batches that finish
-	 * without the irqs may get lost and a GPU Hang may occur.
+	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+
+	/* True 32b PPGTT with dynamic page allocation: update PDP
+	 * registers and point the unallocated PDPs to scratch page.
+	 * PML4 is allocated during ppgtt init, so this is not needed
+	 * in 48-bit mode.
 	 */
-	WARN_ON(!intel_irqs_enabled(engine->i915));
+	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+		execlists_update_context_pdps(ppgtt, reg_state);
 
-	/* Try to read in pairs */
-	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
-				 execlist_link) {
-		if (!req0) {
-			req0 = cursor;
-		} else if (req0->ctx == cursor->ctx) {
-			/* Same ctx: ignore first request, as second request
-			 * will update tail past first request's workload */
-			cursor->elsp_submitted = req0->elsp_submitted;
-			list_del(&req0->execlist_link);
-			i915_gem_request_unreference(req0);
-			req0 = cursor;
-		} else {
-			if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
-				/*
-				 * req0 (after merged) ctx requires single
-				 * submission, stop picking
-				 */
-				if (req0->ctx->execlists_force_single_submission)
-					break;
-				/*
-				 * req0 ctx doesn't require single submission,
-				 * but next req ctx requires, stop picking
-				 */
-				if (cursor->ctx->execlists_force_single_submission)
-					break;
-			}
-			req1 = cursor;
-			WARN_ON(req1->elsp_submitted);
-			break;
-		}
-	}
-
-	if (unlikely(!req0))
-		return;
-
-	execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
-
-	if (req1)
-		execlists_context_status_change(req1,
-						INTEL_CONTEXT_SCHEDULE_IN);
-
-	if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
-		/*
-		 * WaIdleLiteRestore: make sure we never cause a lite restore
-		 * with HEAD==TAIL.
-		 *
-		 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
-		 * resubmit the request. See gen8_emit_request() for where we
-		 * prepare the padding after the end of the request.
-		 */
-		struct intel_ringbuffer *ringbuf;
-
-		ringbuf = req0->ctx->engine[engine->id].ringbuf;
-		req0->tail += 8;
-		req0->tail &= ringbuf->size - 1;
-	}
-
-	execlists_submit_requests(req0, req1);
+	return ce->lrc_desc;
 }
 
-static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
-{
-	struct drm_i915_gem_request *head_req;
-
-	assert_spin_locked(&engine->execlist_lock);
-
-	head_req = list_first_entry_or_null(&engine->execlist_queue,
-					    struct drm_i915_gem_request,
-					    execlist_link);
-
-	if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
-               return 0;
-
-	WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
-
-	if (--head_req->elsp_submitted > 0)
-		return 0;
-
-	execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
-
-	list_del(&head_req->execlist_link);
-	i915_gem_request_unreference(head_req);
-
-	return 1;
-}
-
-static u32
-get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
-		   u32 *context_id)
+static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	u32 status;
+	struct execlist_port *port = engine->execlist_port;
+	u32 __iomem *elsp =
+		dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+	u64 desc[2];
 
-	read_pointer %= GEN8_CSB_ENTRIES;
+	if (!port[0].count)
+		execlists_context_status_change(port[0].request,
+						INTEL_CONTEXT_SCHEDULE_IN);
+	desc[0] = execlists_update_context(port[0].request);
+	engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
 
-	status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
+	if (port[1].request) {
+		GEM_BUG_ON(port[1].count);
+		execlists_context_status_change(port[1].request,
+						INTEL_CONTEXT_SCHEDULE_IN);
+		desc[1] = execlists_update_context(port[1].request);
+		port[1].count = 1;
+	} else {
+		desc[1] = 0;
+	}
+	GEM_BUG_ON(desc[0] == desc[1]);
 
-	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-		return 0;
+	/* You must always write both descriptors in the order below. */
+	writel(upper_32_bits(desc[1]), elsp);
+	writel(lower_32_bits(desc[1]), elsp);
 
-	*context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
-							      read_pointer));
-
-	return status;
+	writel(upper_32_bits(desc[0]), elsp);
+	/* The context is automatically loaded after the following */
+	writel(lower_32_bits(desc[0]), elsp);
 }
 
-/**
- * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @data: tasklet handler passed in unsigned long
- *
+static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+{
+	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+		ctx->execlists_force_single_submission);
+}
+
+static bool can_merge_ctx(const struct i915_gem_context *prev,
+			  const struct i915_gem_context *next)
+{
+	if (prev != next)
+		return false;
+
+	if (ctx_single_port_submission(prev))
+		return false;
+
+	return true;
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_request *cursor, *last;
+	struct execlist_port *port = engine->execlist_port;
+	bool submit = false;
+
+	last = port->request;
+	if (last)
+		/* WaIdleLiteRestore:bdw,skl
+		 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
+		 * as we resubmit the request. See gen8_emit_request()
+		 * for where we prepare the padding after the end of the
+		 * request.
+		 */
+		last->tail = last->wa_tail;
+
+	GEM_BUG_ON(port[1].request);
+
+	/* Hardware submission is through 2 ports. Conceptually each port
+	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+	 * static for a context, and unique to each, so we only execute
+	 * requests belonging to a single context from each ring. RING_HEAD
+	 * is maintained by the CS in the context image, it marks the place
+	 * where it got up to last time, and through RING_TAIL we tell the CS
+	 * where we want to execute up to this time.
+	 *
+	 * In this list the requests are in order of execution. Consecutive
+	 * requests from the same context are adjacent in the ringbuffer. We
+	 * can combine these requests into a single RING_TAIL update:
+	 *
+	 *              RING_HEAD...req1...req2
+	 *                                    ^- RING_TAIL
+	 * since to execute req2 the CS must first execute req1.
+	 *
+	 * Our goal then is to point each port to the end of a consecutive
+	 * sequence of requests as being the most optimal (fewest wake ups
+	 * and context switches) submission.
+	 */
+
+	spin_lock(&engine->execlist_lock);
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+		/* Can we combine this request with the current port? It has to
+		 * be the same context/ringbuffer and not have any exceptions
+		 * (e.g. GVT saying never to combine contexts).
+		 *
+		 * If we can combine the requests, we can execute both by
+		 * updating the RING_TAIL to point to the end of the second
+		 * request, and so we never need to tell the hardware about
+		 * the first.
+		 */
+		if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+			/* If we are on the second port and cannot combine
+			 * this request with the last, then we are done.
+			 */
+			if (port != engine->execlist_port)
+				break;
+
+			/* If GVT overrides us we only ever submit port[0],
+			 * leaving port[1] empty. Note that we also have
+			 * to be careful that we don't queue the same
+			 * context (even though a different request) to
+			 * the second port.
+			 */
+			if (ctx_single_port_submission(cursor->ctx))
+				break;
+
+			GEM_BUG_ON(last->ctx == cursor->ctx);
+
+			i915_gem_request_assign(&port->request, last);
+			port++;
+		}
+		last = cursor;
+		submit = true;
+	}
+	if (submit) {
+		/* Decouple all the requests submitted from the queue */
+		engine->execlist_queue.next = &cursor->execlist_link;
+		cursor->execlist_link.prev = &engine->execlist_queue;
+
+		i915_gem_request_assign(&port->request, last);
+	}
+	spin_unlock(&engine->execlist_lock);
+
+	if (submit)
+		execlists_submit_ports(engine);
+}
+
+static bool execlists_elsp_idle(struct intel_engine_cs *engine)
+{
+	return !engine->execlist_port[0].request;
+}
+
+static bool execlists_elsp_ready(struct intel_engine_cs *engine)
+{
+	int port;
+
+	port = 1; /* wait for a free slot */
+	if (engine->disable_lite_restore_wa || engine->preempt_wa)
+		port = 0; /* wait for GPU to be idle before continuing */
+
+	return !engine->execlist_port[port].request;
+}
+
+/*
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
 static void intel_lrc_irq_handler(unsigned long data)
 {
 	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct execlist_port *port = engine->execlist_port;
 	struct drm_i915_private *dev_priv = engine->i915;
-	u32 status_pointer;
-	unsigned int read_pointer, write_pointer;
-	u32 csb[GEN8_CSB_ENTRIES][2];
-	unsigned int csb_read = 0, i;
-	unsigned int submit_contexts = 0;
 
 	intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-	status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
+	if (!execlists_elsp_idle(engine)) {
+		u32 __iomem *csb_mmio =
+			dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+		u32 __iomem *buf =
+			dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+		unsigned int csb, head, tail;
 
-	read_pointer = engine->next_context_status_buffer;
-	write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-	if (read_pointer > write_pointer)
-		write_pointer += GEN8_CSB_ENTRIES;
+		csb = readl(csb_mmio);
+		head = GEN8_CSB_READ_PTR(csb);
+		tail = GEN8_CSB_WRITE_PTR(csb);
+		if (tail < head)
+			tail += GEN8_CSB_ENTRIES;
+		while (head < tail) {
+			unsigned int idx = ++head % GEN8_CSB_ENTRIES;
+			unsigned int status = readl(buf + 2 * idx);
 
-	while (read_pointer < write_pointer) {
-		if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
-			break;
-		csb[csb_read][0] = get_context_status(engine, ++read_pointer,
-						      &csb[csb_read][1]);
-		csb_read++;
+			if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+				continue;
+
+			GEM_BUG_ON(port[0].count == 0);
+			if (--port[0].count == 0) {
+				GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+				execlists_context_status_change(port[0].request,
+								INTEL_CONTEXT_SCHEDULE_OUT);
+
+				i915_gem_request_put(port[0].request);
+				port[0] = port[1];
+				memset(&port[1], 0, sizeof(port[1]));
+
+				engine->preempt_wa = false;
+			}
+
+			GEM_BUG_ON(port[0].count == 0 &&
+				   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+		}
+
+		writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+				     GEN8_CSB_WRITE_PTR(csb) << 8),
+		       csb_mmio);
 	}
 
-	engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-	/* Update the read pointer to the old write pointer. Manual ringbuffer
-	 * management ftw </sarcasm> */
-	I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
-		      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-				    engine->next_context_status_buffer << 8));
+	if (execlists_elsp_ready(engine))
+		execlists_dequeue(engine);
 
 	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
-
-	spin_lock(&engine->execlist_lock);
-
-	for (i = 0; i < csb_read; i++) {
-		if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
-			if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
-				if (execlists_check_remove_request(engine, csb[i][1]))
-					WARN(1, "Lite Restored request removed from queue\n");
-			} else
-				WARN(1, "Preemption without Lite Restore\n");
-		}
-
-		if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
-		    GEN8_CTX_STATUS_ELEMENT_SWITCH))
-			submit_contexts +=
-				execlists_check_remove_request(engine, csb[i][1]);
-	}
-
-	if (submit_contexts) {
-		if (!engine->disable_lite_restore_wa ||
-		    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-			execlists_context_unqueue(engine);
-	}
-
-	spin_unlock(&engine->execlist_lock);
-
-	if (unlikely(submit_contexts > 2))
-		DRM_ERROR("More than two context complete events?\n");
 }
 
-static void execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct drm_i915_gem_request *cursor;
-	int num_elements = 0;
+	unsigned long flags;
 
-	spin_lock_bh(&engine->execlist_lock);
+	spin_lock_irqsave(&engine->execlist_lock, flags);
 
-	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
-		if (++num_elements > 2)
-			break;
-
-	if (num_elements > 2) {
-		struct drm_i915_gem_request *tail_req;
-
-		tail_req = list_last_entry(&engine->execlist_queue,
-					   struct drm_i915_gem_request,
-					   execlist_link);
-
-		if (request->ctx == tail_req->ctx) {
-			WARN(tail_req->elsp_submitted != 0,
-				"More than 2 already-submitted reqs queued\n");
-			list_del(&tail_req->execlist_link);
-			i915_gem_request_unreference(tail_req);
-		}
-	}
-
-	i915_gem_request_reference(request);
 	list_add_tail(&request->execlist_link, &engine->execlist_queue);
-	request->ctx_hw_id = request->ctx->hw_id;
-	if (num_elements == 0)
-		execlists_context_unqueue(engine);
+	if (execlists_elsp_idle(engine))
+		tasklet_hi_schedule(&engine->irq_tasklet);
 
-	spin_unlock_bh(&engine->execlist_lock);
-}
-
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
-				 struct list_head *vmas)
-{
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
-	struct i915_vma *vma;
-	uint32_t flush_domains = 0;
-	bool flush_chipset = false;
-	int ret;
-
-	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
-			if (ret)
-				return ret;
-		}
-
-		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-			flush_chipset |= i915_gem_clflush_object(obj, false);
-
-		flush_domains |= obj->base.write_domain;
-	}
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return logical_ring_invalidate_all_caches(req);
+	spin_unlock_irqrestore(&engine->execlist_lock, flags);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -717,7 +625,11 @@
 			return ret;
 	}
 
-	request->ringbuf = ce->ringbuf;
+	request->ring = ce->ring;
+
+	ret = intel_lr_context_pin(request->ctx, engine);
+	if (ret)
+		return ret;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -725,23 +637,19 @@
 		 * going any further, as the i915_add_request() call
 		 * later on mustn't fail ...
 		 */
-		ret = i915_guc_wq_check_space(request);
+		ret = i915_guc_wq_reserve(request);
 		if (ret)
-			return ret;
+			goto err_unpin;
 	}
 
-	ret = intel_lr_context_pin(request->ctx, engine);
-	if (ret)
-		return ret;
-
 	ret = intel_ring_begin(request, 0);
 	if (ret)
-		goto err_unpin;
+		goto err_unreserve;
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
 		if (ret)
-			goto err_unpin;
+			goto err_unreserve;
 
 		ce->initialised = true;
 	}
@@ -756,13 +664,16 @@
 	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
 	return 0;
 
+err_unreserve:
+	if (i915.enable_guc_submission)
+		i915_guc_wq_unreserve(request);
 err_unpin:
 	intel_lr_context_unpin(request->ctx, engine);
 	return ret;
 }
 
 /*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
  * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
@@ -771,13 +682,13 @@
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 
-	intel_logical_ring_advance(ringbuf);
-	request->tail = ringbuf->tail;
+	intel_ring_advance(ring);
+	request->tail = ring->tail;
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -785,9 +696,10 @@
 	 *
 	 * Caller must reserve WA_TAIL_DWORDS for us!
 	 */
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	request->wa_tail = ring->tail;
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -797,168 +709,14 @@
 	 */
 	request->previous_context = engine->last_context;
 	engine->last_context = request->ctx;
-
-	if (i915.enable_guc_submission)
-		i915_guc_submit(request);
-	else
-		execlists_context_queue(request);
-
-	return 0;
-}
-
-/**
- * execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @params: execbuffer call parameters.
- * @args: execbuffer call arguments.
- * @vmas: list of vmas.
- *
- * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
- * away the submission details of the execbuffer ioctl call.
- *
- * Return: non-zero if the submission fails.
- */
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
-{
-	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
-	u64 exec_start;
-	int instp_mode;
-	u32 instp_mask;
-	int ret;
-
-	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	instp_mask = I915_EXEC_CONSTANTS_MASK;
-	switch (instp_mode) {
-	case I915_EXEC_CONSTANTS_REL_GENERAL:
-	case I915_EXEC_CONSTANTS_ABSOLUTE:
-	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
-			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-			return -EINVAL;
-		}
-
-		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-				return -EINVAL;
-			}
-
-			/* The HW changed the meaning on this bit on gen6 */
-			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-		}
-		break;
-	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
-		return -EINVAL;
-	}
-
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		DRM_DEBUG("sol reset is gen7 only\n");
-		return -EINVAL;
-	}
-
-	ret = execlists_move_to_gpu(params->request, vmas);
-	if (ret)
-		return ret;
-
-	if (engine == &dev_priv->engine[RCS] &&
-	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_logical_ring_emit(ringbuf, MI_NOOP);
-		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_logical_ring_emit_reg(ringbuf, INSTPM);
-		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_logical_ring_advance(ringbuf);
-
-		dev_priv->relative_constants_mode = instp_mode;
-	}
-
-	exec_start = params->batch_obj_vm_offset +
-		     args->batch_start_offset;
-
-	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
-	i915_gem_execbuffer_move_to_active(vmas, params->request);
-
-	return 0;
-}
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
-{
-	struct drm_i915_gem_request *req, *tmp;
-	LIST_HEAD(cancel_list);
-
-	WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
-
-	spin_lock_bh(&engine->execlist_lock);
-	list_replace_init(&engine->execlist_queue, &cancel_list);
-	spin_unlock_bh(&engine->execlist_lock);
-
-	list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
-		list_del(&req->execlist_link);
-		i915_gem_request_unreference(req);
-	}
-}
-
-void intel_logical_ring_stop(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	if (!intel_engine_initialized(engine))
-		return;
-
-	ret = intel_engine_idle(engine);
-	if (ret)
-		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-			  engine->name, ret);
-
-	/* TODO: Is this correct with Execlists enabled? */
-	I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-	if (intel_wait_for_register(dev_priv,
-				    RING_MI_MODE(engine->mmio_base),
-				    MODE_IDLE, MODE_IDLE,
-				    1000)) {
-		DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
-		return;
-	}
-	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
-}
-
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ctx->i915;
 	struct intel_context *ce = &ctx->engine[engine->id];
 	void *vaddr;
-	u32 *lrc_reg_state;
 	int ret;
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -966,41 +724,42 @@
 	if (ce->pin_count++)
 		return 0;
 
-	ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
-				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
+			   PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
 	if (ret)
 		goto err;
 
-	vaddr = i915_gem_object_pin_map(ce->state);
+	vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
-		goto unpin_ctx_obj;
+		goto unpin_vma;
 	}
 
-	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
+	ret = intel_ring_pin(ce->ring);
 	if (ret)
 		goto unpin_map;
 
-	i915_gem_context_reference(ctx);
-	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
 	intel_lr_context_descriptor_update(ctx, engine);
 
-	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
-	ce->lrc_reg_state = lrc_reg_state;
-	ce->state->dirty = true;
+	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
+		i915_ggtt_offset(ce->ring->vma);
+
+	ce->state->obj->dirty = true;
 
 	/* Invalidate GuC TLB. */
-	if (i915.enable_guc_submission)
+	if (i915.enable_guc_submission) {
+		struct drm_i915_private *dev_priv = ctx->i915;
 		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+	}
 
+	i915_gem_context_get(ctx);
 	return 0;
 
 unpin_map:
-	i915_gem_object_unpin_map(ce->state);
-unpin_ctx_obj:
-	i915_gem_object_ggtt_unpin(ce->state);
+	i915_gem_object_unpin_map(ce->state->obj);
+unpin_vma:
+	__i915_vma_unpin(ce->state);
 err:
 	ce->pin_count = 0;
 	return ret;
@@ -1017,30 +776,24 @@
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ringbuffer_obj(ce->ringbuf);
+	intel_ring_unpin(ce->ring);
 
-	i915_gem_object_unpin_map(ce->state);
-	i915_gem_object_ggtt_unpin(ce->state);
+	i915_gem_object_unpin_map(ce->state->obj);
+	i915_vma_unpin(ce->state);
 
-	ce->lrc_vma = NULL;
-	ce->lrc_desc = 0;
-	ce->lrc_reg_state = NULL;
-
-	i915_gem_context_unreference(ctx);
+	i915_gem_context_put(ctx);
 }
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1048,17 +801,16 @@
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-		intel_logical_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1094,7 +846,7 @@
  * code duplication.
  */
 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
-						uint32_t *const batch,
+						uint32_t *batch,
 						uint32_t index)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -1113,7 +865,7 @@
 	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
 				   MI_SRM_LRM_GLOBAL_GTT));
 	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
 	wa_ctx_emit(batch, index, 0);
 
 	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1131,7 +883,7 @@
 	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
 				   MI_SRM_LRM_GLOBAL_GTT));
 	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
 	wa_ctx_emit(batch, index, 0);
 
 	return index;
@@ -1156,37 +908,24 @@
 	return 0;
 }
 
-/**
- * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+/*
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
  *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- *  offset: specifies start of the batch, should be cache-aligned. This is updated
- *    with the offset value received as input.
- *  size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of the batch, it should be
- *  cache-aligned otherwise it is adjusted accordingly.
- *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
- *  initialized at the beginning and shared across all contexts but this field
- *  helps us to have multiple batches at different offsets and select them based
- *  on a criteria. At the moment this batch always start at the beginning of the page
- *  and at this point we don't have multiple wa_ctx batch buffers.
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
  *
- *  The number of WA applied are not known at the beginning; we use this field
- *  to return the no of DWORDS written.
- *
- *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
- *  so it adds NOOPs as padding to make it cacheline aligned.
- *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
- *  makes a complete batch buffer.
- *
- * Return: non-zero if we exceed the PAGE_SIZE limit.
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
  */
-
 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    struct i915_wa_ctx_bb *wa_ctx,
-				    uint32_t *const batch,
+				    uint32_t *batch,
 				    uint32_t *offset)
 {
 	uint32_t scratch_addr;
@@ -1205,7 +944,7 @@
 
 	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
 	/* Actual scratch location is at 128 bytes offset */
-	scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+	scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 
 	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
 	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1230,26 +969,18 @@
 	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
 }
 
-/**
- * gen8_init_perctx_bb() - initialize per ctx batch with WA
+/*
+ *  This batch is started immediately after indirect_ctx batch. Since we ensure
+ *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
  *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- *  offset: specifies start of the batch, should be cache-aligned.
- *  size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of this batch.
- *   This batch is started immediately after indirect_ctx batch. Since we ensure
- *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
- *
- *   The number of DWORDS written are returned using this field.
+ *  The number of DWORDS written are returned using this field.
  *
  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
  */
 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
 			       struct i915_wa_ctx_bb *wa_ctx,
-			       uint32_t *const batch,
+			       uint32_t *batch,
 			       uint32_t *offset)
 {
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1264,7 +995,7 @@
 
 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    struct i915_wa_ctx_bb *wa_ctx,
-				    uint32_t *const batch,
+				    uint32_t *batch,
 				    uint32_t *offset)
 {
 	int ret;
@@ -1282,11 +1013,18 @@
 		return ret;
 	index = ret;
 
+	/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
+	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+	wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
+	wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
+			    GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
+	wa_ctx_emit(batch, index, MI_NOOP);
+
 	/* WaClearSlmSpaceAtContextSwitch:kbl */
 	/* Actual scratch location is at 128 bytes offset */
 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
-		uint32_t scratch_addr
-			= engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+		u32 scratch_addr =
+			i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 
 		wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
 		wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1332,7 +1070,7 @@
 
 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 			       struct i915_wa_ctx_bb *wa_ctx,
-			       uint32_t *const batch,
+			       uint32_t *batch,
 			       uint32_t *offset)
 {
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1378,44 +1116,44 @@
 
 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
-	int ret;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
 
-	engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
-						    PAGE_ALIGN(size));
-	if (IS_ERR(engine->wa_ctx.obj)) {
-		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
-		ret = PTR_ERR(engine->wa_ctx.obj);
-		engine->wa_ctx.obj = NULL;
-		return ret;
+	obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
-	if (ret) {
-		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
-				 ret);
-		drm_gem_object_unreference(&engine->wa_ctx.obj->base);
-		return ret;
-	}
+	err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
+	if (err)
+		goto err;
 
+	engine->wa_ctx.vma = vma;
 	return 0;
+
+err:
+	i915_gem_object_put(obj);
+	return err;
 }
 
 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
 {
-	if (engine->wa_ctx.obj) {
-		i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
-		drm_gem_object_unreference(&engine->wa_ctx.obj->base);
-		engine->wa_ctx.obj = NULL;
-	}
+	i915_vma_unpin_and_release(&engine->wa_ctx.vma);
 }
 
 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
-	int ret;
+	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 	uint32_t *batch;
 	uint32_t offset;
 	struct page *page;
-	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+	int ret;
 
 	WARN_ON(engine->id != RCS);
 
@@ -1427,7 +1165,7 @@
 	}
 
 	/* some WA perform writes to scratch page, ensure it is valid */
-	if (engine->scratch.obj == NULL) {
+	if (!engine->scratch) {
 		DRM_ERROR("scratch page not allocated for %s\n", engine->name);
 		return -EINVAL;
 	}
@@ -1438,7 +1176,7 @@
 		return ret;
 	}
 
-	page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
+	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
 	batch = kmap_atomic(page);
 	offset = 0;
 
@@ -1485,55 +1223,37 @@
 	struct drm_i915_private *dev_priv = engine->i915;
 
 	I915_WRITE(RING_HWS_PGA(engine->mmio_base),
-		   (u32)engine->status_page.gfx_addr);
+		   engine->status_page.ggtt_offset);
 	POSTING_READ(RING_HWS_PGA(engine->mmio_base));
 }
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned int next_context_status_buffer_hw;
+	int ret;
+
+	ret = intel_mocs_init_engine(engine);
+	if (ret)
+		return ret;
 
 	lrc_init_hws(engine);
 
-	I915_WRITE_IMR(engine,
-		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	intel_engine_reset_breadcrumbs(engine);
+
 	I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
 	I915_WRITE(RING_MODE_GEN7(engine),
 		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
 		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-	POSTING_READ(RING_MODE_GEN7(engine));
 
-	/*
-	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
-	 * zero, we need to read the write pointer from hardware and use its
-	 * value because "this register is power context save restored".
-	 * Effectively, these states have been observed:
-	 *
-	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
-	 * BDW  | CSB regs not reset       | CSB regs reset       |
-	 * CHT  | CSB regs not reset       | CSB regs not reset   |
-	 * SKL  |         ?                |         ?            |
-	 * BXT  |         ?                |         ?            |
-	 */
-	next_context_status_buffer_hw =
-		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
-
-	/*
-	 * When the CSB registers are reset (also after power-up / gpu reset),
-	 * CSB write pointer is set to all 1's, which is not valid, use '5' in
-	 * this special case, so the first element read is CSB[0].
-	 */
-	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
-		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
-
-	engine->next_context_status_buffer = next_context_status_buffer_hw;
 	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
 	intel_engine_init_hangcheck(engine);
 
-	return intel_mocs_init_engine(engine);
+	if (!execlists_elsp_idle(engine))
+		execlists_submit_ports(engine);
+
+	return 0;
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1569,11 +1289,57 @@
 	return init_workarounds_ring(engine);
 }
 
+static void reset_common_ring(struct intel_engine_cs *engine,
+			      struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct execlist_port *port = engine->execlist_port;
+	struct intel_context *ce = &request->ctx->engine[engine->id];
+
+	/* We want a simple context + ring to execute the breadcrumb update.
+	 * We cannot rely on the context being intact across the GPU hang,
+	 * so clear it and rebuild just what we need for the breadcrumb.
+	 * All pending requests for this context will be zapped, and any
+	 * future request will be after userspace has had the opportunity
+	 * to recreate its own state.
+	 */
+	execlists_init_reg_state(ce->lrc_reg_state,
+				 request->ctx, engine, ce->ring);
+
+	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
+	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
+		i915_ggtt_offset(ce->ring->vma);
+	ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+
+	request->ring->head = request->postfix;
+	request->ring->last_retired_head = -1;
+	intel_ring_update_space(request->ring);
+
+	if (i915.enable_guc_submission)
+		return;
+
+	/* Catch up with any missed context-switch interrupts */
+	I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
+	if (request->ctx != port[0].request->ctx) {
+		i915_gem_request_put(port[0].request);
+		port[0] = port[1];
+		memset(&port[1], 0, sizeof(port[1]));
+	}
+
+	/* CS is stopped, and we will resubmit both ports on resume */
+	GEM_BUG_ON(request->ctx != port[0].request->ctx);
+	port[0].count = 0;
+	port[1].count = 0;
+
+	/* Reset WaIdleLiteRestore:bdw,skl as well */
+	request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+}
+
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1581,28 +1347,27 @@
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_UDW(engine, i));
-		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_LDW(engine, i));
-		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
+		intel_ring_emit(ring, upper_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
+		intel_ring_emit(ring, lower_32_bits(pd_daddr));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
-			      u64 offset, unsigned dispatch_flags)
+			      u64 offset, u32 len,
+			      unsigned int dispatch_flags)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1629,14 +1394,14 @@
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-				(ppgtt<<8) |
-				(dispatch_flags & I915_DISPATCH_RS ?
-				 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
+			(ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1655,14 +1420,10 @@
 	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
-static int gen8_emit_flush(struct drm_i915_gem_request *request,
-			   u32 invalidate_domains,
-			   u32 unused)
+static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
-	struct drm_i915_private *dev_priv = request->i915;
-	uint32_t cmd;
+	struct intel_ring *ring = request->ring;
+	u32 cmd;
 	int ret;
 
 	ret = intel_ring_begin(request, 4);
@@ -1678,30 +1439,30 @@
 	 */
 	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (engine == &dev_priv->engine[VCS])
+		if (request->engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_logical_ring_emit(ringbuf, cmd);
-	intel_logical_ring_emit(ringbuf,
-				I915_GEM_HWS_SCRATCH_ADDR |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-	intel_logical_ring_emit(ringbuf, 0); /* value */
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
+			I915_GEM_HWS_SCRATCH_ADDR |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0); /* upper addr */
+	intel_ring_emit(ring, 0); /* value */
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
-				  u32 invalidate_domains,
-				  u32 flush_domains)
+				  u32 mode)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ring *ring = request->ring;
+	struct intel_engine_cs *engine = request->engine;
+	u32 scratch_addr =
+		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 flags = 0;
 	int ret;
@@ -1709,14 +1470,14 @@
 
 	flags |= PIPE_CONTROL_CS_STALL;
 
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
 
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -1751,40 +1512,40 @@
 		return ret;
 
 	if (vf_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf, flags);
-	intel_logical_ring_emit(ringbuf, scratch_addr);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1809,11 +1570,10 @@
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-#define WA_TAIL_DWORDS 2
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1823,21 +1583,20 @@
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	intel_logical_ring_emit(ringbuf,
-				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine) |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, request->seqno);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
+	intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+	intel_ring_emit(ring,
+			intel_hws_seqno_address(request->engine) |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, request->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
+	return intel_logical_ring_advance(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -1851,50 +1610,19 @@
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf,
-				(PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine));
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring,
+			(PIPE_CONTROL_GLOBAL_GTT_IVB |
+			 PIPE_CONTROL_CS_STALL |
+			 PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
 	/* We're thrashing one dword of HWS. */
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
-}
-
-static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
-{
-	struct render_state so;
-	int ret;
-
-	ret = i915_gem_render_state_prepare(req->engine, &so);
-	if (ret)
-		return ret;
-
-	if (so.rodata == NULL)
-		return 0;
-
-	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-				       I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	ret = req->engine->emit_bb_start(req,
-				       (so.ggtt_offset + so.aux_batch_offset),
-				       I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
-	i915_gem_render_state_fini(&so);
-	return ret;
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
+	return intel_logical_ring_advance(request);
 }
 
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
@@ -1913,14 +1641,12 @@
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return intel_lr_context_render_state_init(req);
+	return i915_gem_render_state_init(req);
 }
 
 /**
  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
- *
  * @engine: Engine Command Streamer.
- *
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
@@ -1939,39 +1665,42 @@
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
-		intel_logical_ring_stop(engine);
 		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 	}
 
 	if (engine->cleanup)
 		engine->cleanup(engine);
 
-	i915_cmd_parser_fini_ring(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
+	intel_engine_cleanup_common(engine);
 
-	intel_engine_fini_breadcrumbs(engine);
-
-	if (engine->status_page.obj) {
-		i915_gem_object_unpin_map(engine->status_page.obj);
-		engine->status_page.obj = NULL;
+	if (engine->status_page.vma) {
+		i915_gem_object_unpin_map(engine->status_page.vma->obj);
+		engine->status_page.vma = NULL;
 	}
 	intel_lr_context_unpin(dev_priv->kernel_context, engine);
 
-	engine->idle_lite_restore_wa = 0;
-	engine->disable_lite_restore_wa = false;
-	engine->ctx_desc_template = 0;
-
 	lrc_destroy_wa_ctx_obj(engine);
 	engine->i915 = NULL;
 }
 
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = execlists_submit_request;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
-	engine->emit_request = gen8_emit_request;
+	engine->reset_hw = reset_common_ring;
 	engine->emit_flush = gen8_emit_flush;
+	engine->emit_request = gen8_emit_request;
+	engine->submit_request = execlists_submit_request;
+
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
@@ -1980,41 +1709,71 @@
 }
 
 static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine)
 {
+	unsigned shift = engine->irq_shift;
 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
 	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
 static int
-lrc_setup_hws(struct intel_engine_cs *engine,
-	      struct drm_i915_gem_object *dctx_obj)
+lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
 {
+	const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
 	void *hws;
 
 	/* The HWSP is part of the default context object in LRC mode. */
-	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
-				       LRC_PPHWSP_PN * PAGE_SIZE;
-	hws = i915_gem_object_pin_map(dctx_obj);
+	hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 	if (IS_ERR(hws))
 		return PTR_ERR(hws);
-	engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
-	engine->status_page.obj = dctx_obj;
+
+	engine->status_page.page_addr = hws + hws_offset;
+	engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
+	engine->status_page.vma = vma;
 
 	return 0;
 }
 
+static void
+logical_ring_setup(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	enum forcewake_domains fw_domains;
+
+	intel_engine_setup_common(engine);
+
+	/* Intentionally left blank. */
+	engine->buffer = NULL;
+
+	fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+						    RING_ELSP(engine),
+						    FW_REG_WRITE);
+
+	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+						     RING_CONTEXT_STATUS_PTR(engine),
+						     FW_REG_READ | FW_REG_WRITE);
+
+	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+						     RING_CONTEXT_STATUS_BUF_BASE(engine),
+						     FW_REG_READ);
+
+	engine->fw_domains = fw_domains;
+
+	tasklet_init(&engine->irq_tasklet,
+		     intel_lrc_irq_handler, (unsigned long)engine);
+
+	logical_ring_init_platform_invariants(engine);
+	logical_ring_default_vfuncs(engine);
+	logical_ring_default_irqs(engine);
+}
+
 static int
 logical_ring_init(struct intel_engine_cs *engine)
 {
 	struct i915_gem_context *dctx = engine->i915->kernel_context;
 	int ret;
 
-	ret = intel_engine_init_breadcrumbs(engine);
-	if (ret)
-		goto error;
-
-	ret = i915_cmd_parser_init_ring(engine);
+	ret = intel_engine_init_common(engine);
 	if (ret)
 		goto error;
 
@@ -2044,11 +1803,13 @@
 	return ret;
 }
 
-static int logical_render_ring_init(struct intel_engine_cs *engine)
+int logical_render_ring_init(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
+	logical_ring_setup(engine);
+
 	if (HAS_L3_DPF(dev_priv))
 		engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
@@ -2058,11 +1819,10 @@
 	else
 		engine->init_hw = gen8_init_render_ring;
 	engine->init_context = gen8_init_rcs_context;
-	engine->cleanup = intel_fini_pipe_control;
 	engine->emit_flush = gen8_emit_flush_render;
 	engine->emit_request = gen8_emit_request_render;
 
-	ret = intel_init_pipe_control(engine, 4096);
+	ret = intel_engine_create_scratch(engine, 4096);
 	if (ret)
 		return ret;
 
@@ -2085,160 +1845,11 @@
 	return ret;
 }
 
-static const struct logical_ring_info {
-	const char *name;
-	unsigned exec_id;
-	unsigned guc_id;
-	u32 mmio_base;
-	unsigned irq_shift;
-	int (*init)(struct intel_engine_cs *engine);
-} logical_rings[] = {
-	[RCS] = {
-		.name = "render ring",
-		.exec_id = I915_EXEC_RENDER,
-		.guc_id = GUC_RENDER_ENGINE,
-		.mmio_base = RENDER_RING_BASE,
-		.irq_shift = GEN8_RCS_IRQ_SHIFT,
-		.init = logical_render_ring_init,
-	},
-	[BCS] = {
-		.name = "blitter ring",
-		.exec_id = I915_EXEC_BLT,
-		.guc_id = GUC_BLITTER_ENGINE,
-		.mmio_base = BLT_RING_BASE,
-		.irq_shift = GEN8_BCS_IRQ_SHIFT,
-		.init = logical_ring_init,
-	},
-	[VCS] = {
-		.name = "bsd ring",
-		.exec_id = I915_EXEC_BSD,
-		.guc_id = GUC_VIDEO_ENGINE,
-		.mmio_base = GEN6_BSD_RING_BASE,
-		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
-		.init = logical_ring_init,
-	},
-	[VCS2] = {
-		.name = "bsd2 ring",
-		.exec_id = I915_EXEC_BSD,
-		.guc_id = GUC_VIDEO_ENGINE2,
-		.mmio_base = GEN8_BSD2_RING_BASE,
-		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
-		.init = logical_ring_init,
-	},
-	[VECS] = {
-		.name = "video enhancement ring",
-		.exec_id = I915_EXEC_VEBOX,
-		.guc_id = GUC_VIDEOENHANCE_ENGINE,
-		.mmio_base = VEBOX_RING_BASE,
-		.irq_shift = GEN8_VECS_IRQ_SHIFT,
-		.init = logical_ring_init,
-	},
-};
-
-static struct intel_engine_cs *
-logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
+int logical_xcs_ring_init(struct intel_engine_cs *engine)
 {
-	const struct logical_ring_info *info = &logical_rings[id];
-	struct intel_engine_cs *engine = &dev_priv->engine[id];
-	enum forcewake_domains fw_domains;
+	logical_ring_setup(engine);
 
-	engine->id = id;
-	engine->name = info->name;
-	engine->exec_id = info->exec_id;
-	engine->guc_id = info->guc_id;
-	engine->mmio_base = info->mmio_base;
-
-	engine->i915 = dev_priv;
-
-	/* Intentionally left blank. */
-	engine->buffer = NULL;
-
-	fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
-						    RING_ELSP(engine),
-						    FW_REG_WRITE);
-
-	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-						     RING_CONTEXT_STATUS_PTR(engine),
-						     FW_REG_READ | FW_REG_WRITE);
-
-	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-						     RING_CONTEXT_STATUS_BUF_BASE(engine),
-						     FW_REG_READ);
-
-	engine->fw_domains = fw_domains;
-
-	INIT_LIST_HEAD(&engine->active_list);
-	INIT_LIST_HEAD(&engine->request_list);
-	INIT_LIST_HEAD(&engine->buffers);
-	INIT_LIST_HEAD(&engine->execlist_queue);
-	spin_lock_init(&engine->execlist_lock);
-
-	tasklet_init(&engine->irq_tasklet,
-		     intel_lrc_irq_handler, (unsigned long)engine);
-
-	logical_ring_init_platform_invariants(engine);
-	logical_ring_default_vfuncs(engine);
-	logical_ring_default_irqs(engine, info->irq_shift);
-
-	intel_engine_init_hangcheck(engine);
-	i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
-
-	return engine;
-}
-
-/**
- * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
- * @dev: DRM device.
- *
- * This function inits the engines for an Execlists submission style (the
- * equivalent in the legacy ringbuffer submission world would be
- * i915_gem_init_engines). It does it only for those engines that are present in
- * the hardware.
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_logical_rings_init(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned int mask = 0;
-	unsigned int i;
-	int ret;
-
-	WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
-		GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
-
-	for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
-		if (!HAS_ENGINE(dev_priv, i))
-			continue;
-
-		if (!logical_rings[i].init)
-			continue;
-
-		ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
-		if (ret)
-			goto cleanup;
-
-		mask |= ENGINE_MASK(i);
-	}
-
-	/*
-	 * Catch failures to update logical_rings table when the new engines
-	 * are added to the driver by a warning and disabling the forgotten
-	 * engines.
-	 */
-	if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
-		struct intel_device_info *info =
-			(struct intel_device_info *)&dev_priv->info;
-		info->ring_mask = mask;
-	}
-
-	return 0;
-
-cleanup:
-	for (i = 0; i < I915_NUM_ENGINES; i++)
-		intel_logical_ring_cleanup(&dev_priv->engine[i]);
-
-	return ret;
+	return logical_ring_init(engine);
 }
 
 static u32
@@ -2259,24 +1870,24 @@
 	 * must make an explicit request through RPCS for full
 	 * enablement.
 	*/
-	if (INTEL_INFO(dev_priv)->has_slice_pg) {
+	if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
 		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev_priv)->slice_total <<
+		rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
 			GEN8_RPCS_S_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev_priv)->has_subslice_pg) {
+	if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
 		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
+		rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
 			GEN8_RPCS_SS_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev_priv)->has_eu_pg) {
-		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+	if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+		rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
 			GEN8_RPCS_EU_MIN_SHIFT;
-		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+		rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
 			GEN8_RPCS_EU_MAX_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
@@ -2305,38 +1916,13 @@
 	return indirect_ctx_offset;
 }
 
-static int
-populate_lr_context(struct i915_gem_context *ctx,
-		    struct drm_i915_gem_object *ctx_obj,
-		    struct intel_engine_cs *engine,
-		    struct intel_ringbuffer *ringbuf)
+static void execlists_init_reg_state(u32 *reg_state,
+				     struct i915_gem_context *ctx,
+				     struct intel_engine_cs *engine,
+				     struct intel_ring *ring)
 {
-	struct drm_i915_private *dev_priv = ctx->i915;
-	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-	void *vaddr;
-	u32 *reg_state;
-	int ret;
-
-	if (!ppgtt)
-		ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
-		return ret;
-	}
-
-	vaddr = i915_gem_object_pin_map(ctx_obj);
-	if (IS_ERR(vaddr)) {
-		ret = PTR_ERR(vaddr);
-		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
-		return ret;
-	}
-	ctx_obj->dirty = true;
-
-	/* The second page of the context object contains some fields which must
-	 * be set up prior to the first execution. */
-	reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
 
 	/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
 	 * commands followed by (reg, value) pairs. The values we are setting here are
@@ -2350,19 +1936,16 @@
 		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
 					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
 					  (HAS_RESOURCE_STREAMER(dev_priv) ?
-					    CTX_CTRL_RS_CTX_ENABLE : 0)));
+					   CTX_CTRL_RS_CTX_ENABLE : 0)));
 	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
 		       0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
 		       0);
-	/* Ring buffer start address is not known until the buffer is pinned.
-	 * It is written to the context image in execlists_update_context()
-	 */
 	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
 		       RING_START(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
 		       RING_CTL(engine->mmio_base),
-		       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
 		       RING_BBADDR_UDW(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2383,9 +1966,9 @@
 			       RING_INDIRECT_CTX(engine->mmio_base), 0);
 		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
 			       RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
-		if (engine->wa_ctx.obj) {
+		if (engine->wa_ctx.vma) {
 			struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
 
 			reg_state[CTX_RCS_INDIRECT_CTX+1] =
 				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
@@ -2440,6 +2023,36 @@
 		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
 			       make_rpcs(dev_priv));
 	}
+}
+
+static int
+populate_lr_context(struct i915_gem_context *ctx,
+		    struct drm_i915_gem_object *ctx_obj,
+		    struct intel_engine_cs *engine,
+		    struct intel_ring *ring)
+{
+	void *vaddr;
+	int ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
+		return ret;
+	}
+
+	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
+		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
+		return ret;
+	}
+	ctx_obj->dirty = true;
+
+	/* The second page of the context object contains some fields which must
+	 * be set up prior to the first execution. */
+
+	execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+				 ctx, engine, ring);
 
 	i915_gem_object_unpin_map(ctx_obj);
 
@@ -2484,26 +2097,14 @@
 	return ret;
 }
 
-/**
- * execlists_context_deferred_alloc() - create the LRC specific bits of a context
- * @ctx: LR context to create.
- * @engine: engine to be used with the context.
- *
- * This function can be called more than once, with different engines, if we plan
- * to use the context with them. The context backing objects and the ringbuffers
- * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
- * the creation is a deferred call: it's better to make sure first that we need to use
- * a given ring with the context.
- *
- * Return: non-zero on error.
- */
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 					    struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *ctx_obj;
 	struct intel_context *ce = &ctx->engine[engine->id];
+	struct i915_vma *vma;
 	uint32_t context_size;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(ce->state);
@@ -2519,60 +2120,63 @@
 		return PTR_ERR(ctx_obj);
 	}
 
-	ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto error_deref_obj;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-		goto error_ringbuf;
+	ring = intel_engine_create_ring(engine, ctx->ring_size);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
+		goto error_deref_obj;
 	}
 
-	ce->ringbuf = ringbuf;
-	ce->state = ctx_obj;
+	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+		goto error_ring_free;
+	}
+
+	ce->ring = ring;
+	ce->state = vma;
 	ce->initialised = engine->init_context == NULL;
 
 	return 0;
 
-error_ringbuf:
-	intel_ringbuffer_free(ringbuf);
+error_ring_free:
+	intel_ring_free(ring);
 error_deref_obj:
-	drm_gem_object_unreference(&ctx_obj->base);
-	ce->ringbuf = NULL;
-	ce->state = NULL;
+	i915_gem_object_put(ctx_obj);
 	return ret;
 }
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-			    struct i915_gem_context *ctx)
+void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 {
+	struct i915_gem_context *ctx = dev_priv->kernel_context;
 	struct intel_engine_cs *engine;
 
 	for_each_engine(engine, dev_priv) {
 		struct intel_context *ce = &ctx->engine[engine->id];
-		struct drm_i915_gem_object *ctx_obj = ce->state;
 		void *vaddr;
 		uint32_t *reg_state;
 
-		if (!ctx_obj)
+		if (!ce->state)
 			continue;
 
-		vaddr = i915_gem_object_pin_map(ctx_obj);
+		vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
 		if (WARN_ON(IS_ERR(vaddr)))
 			continue;
 
 		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-		ctx_obj->dirty = true;
 
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-		i915_gem_object_unpin_map(ctx_obj);
+		ce->state->obj->dirty = true;
+		i915_gem_object_unpin_map(ce->state->obj);
 
-		ce->ringbuf->head = 0;
-		ce->ringbuf->tail = 0;
+		ce->ring->head = 0;
+		ce->ring->tail = 0;
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 2b8255c..4fed816 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -29,17 +29,17 @@
 #define GEN8_LR_CONTEXT_ALIGN 4096
 
 /* Execlists regs */
-#define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(ring)		_MMIO((ring)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(ring)		_MMIO((ring)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(ring)		_MMIO((ring)->mmio_base + 0x244)
+#define RING_ELSP(engine)			_MMIO((engine)->mmio_base + 0x230)
+#define RING_EXECLIST_STATUS_LO(engine)		_MMIO((engine)->mmio_base + 0x234)
+#define RING_EXECLIST_STATUS_HI(engine)		_MMIO((engine)->mmio_base + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(engine)		_MMIO((engine)->mmio_base + 0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
-#define RING_CONTEXT_STATUS_BUF_BASE(ring)	_MMIO((ring)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
+#define RING_CONTEXT_STATUS_BUF_BASE(engine)	_MMIO((engine)->mmio_base + 0x370)
+#define RING_CONTEXT_STATUS_BUF_LO(engine, i)	_MMIO((engine)->mmio_base + 0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(engine, i)	_MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
+#define RING_CONTEXT_STATUS_PTR(engine)		_MMIO((engine)->mmio_base + 0x3a0)
 
 /* The docs specify that the write pointer wraps around after 5h, "After status
  * is written out to the last available status QW at offset 5h, this pointer
@@ -67,35 +67,10 @@
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *engine);
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-int intel_logical_rings_init(struct drm_device *dev);
+int logical_render_ring_init(struct intel_engine_cs *engine);
+int logical_xcs_ring_init(struct intel_engine_cs *engine);
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-	ringbuf->tail &= ringbuf->size - 1;
-}
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-					   u32 data)
-{
-	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-	ringbuf->tail += 4;
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
-					       i915_reg_t reg)
-{
-	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
+int intel_engines_init(struct drm_device *dev);
 
 /* Logical Ring Contexts */
 
@@ -112,19 +87,13 @@
 
 struct drm_i915_private;
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-			    struct i915_gem_context *ctx);
+void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 				     struct intel_engine_cs *engine);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
-struct i915_execbuffer_params;
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas);
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4955047..e1d47d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -48,6 +48,20 @@
 	struct notifier_block lid_notifier;
 };
 
+struct intel_lvds_pps {
+	/* 100us units */
+	int t1_t2;
+	int t3;
+	int t4;
+	int t5;
+	int tx;
+
+	int divider;
+
+	int port;
+	bool powerdown_on_reset;
+};
+
 struct intel_lvds_encoder {
 	struct intel_encoder base;
 
@@ -55,6 +69,9 @@
 	i915_reg_t reg;
 	u32 a3_power;
 
+	struct intel_lvds_pps init_pps;
+	u32 init_lvds_val;
+
 	struct intel_lvds_connector *attached_connector;
 };
 
@@ -136,28 +153,108 @@
 	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+					struct intel_lvds_pps *pps)
+{
+	u32 val;
+
+	pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+
+	val = I915_READ(PP_ON_DELAYS(0));
+	pps->port = (val & PANEL_PORT_SELECT_MASK) >>
+		    PANEL_PORT_SELECT_SHIFT;
+	pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
+		     PANEL_POWER_UP_DELAY_SHIFT;
+	pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
+		  PANEL_LIGHT_ON_DELAY_SHIFT;
+
+	val = I915_READ(PP_OFF_DELAYS(0));
+	pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
+		  PANEL_POWER_DOWN_DELAY_SHIFT;
+	pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
+		  PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+	val = I915_READ(PP_DIVISOR(0));
+	pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
+		       PP_REFERENCE_DIVIDER_SHIFT;
+	val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
+	      PANEL_POWER_CYCLE_DELAY_SHIFT;
+	/*
+	 * Remove the BSpec specified +1 (100ms) offset that accounts for a
+	 * too short power-cycle delay due to the asynchronous programming of
+	 * the register.
+	 */
+	if (val)
+		val--;
+	/* Convert from 100ms to 100us units */
+	pps->t4 = val * 1000;
+
+	if (INTEL_INFO(dev_priv)->gen <= 4 &&
+	    pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+		DRM_DEBUG_KMS("Panel power timings uninitialized, "
+			      "setting defaults\n");
+		/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
+		pps->t1_t2 = 40 * 10;
+		pps->t5 = 200 * 10;
+		/* Set T3 to 35ms and Tx to 200ms in 100 usec units */
+		pps->t3 = 35 * 10;
+		pps->tx = 200 * 10;
+	}
+
+	DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+			 "divider %d port %d powerdown_on_reset %d\n",
+			 pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+			 pps->divider, pps->port, pps->powerdown_on_reset);
+}
+
+static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+				   struct intel_lvds_pps *pps)
+{
+	u32 val;
+
+	val = I915_READ(PP_CONTROL(0));
+	WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+	if (pps->powerdown_on_reset)
+		val |= PANEL_POWER_RESET;
+	I915_WRITE(PP_CONTROL(0), val);
+
+	I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
+				    (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
+				    (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
+	I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
+				     (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+
+	val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
+	val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
+	       PANEL_POWER_CYCLE_DELAY_SHIFT;
+	I915_WRITE(PP_DIVISOR(0), val);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int pipe = crtc->pipe;
 	u32 temp;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		assert_fdi_rx_pll_disabled(dev_priv, pipe);
 		assert_shared_dpll_disabled(dev_priv,
-					    crtc->config->shared_dpll);
+					    pipe_config->shared_dpll);
 	} else {
 		assert_pll_disabled(dev_priv, pipe);
 	}
 
-	temp = I915_READ(lvds_encoder->reg);
+	intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+
+	temp = lvds_encoder->init_lvds_val;
 	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~PORT_TRANS_SEL_MASK;
 		temp |= PORT_TRANS_SEL_CPT(pipe);
 	} else {
@@ -170,7 +267,7 @@
 
 	/* set the corresponsding LVDS_BORDER bit */
 	temp &= ~LVDS_BORDER_ENABLE;
-	temp |= crtc->config->gmch_pfit.lvds_border_bits;
+	temp |= pipe_config->gmch_pfit.lvds_border_bits;
 	/* Set the B0-B3 data pairs corresponding to whether we're going to
 	 * set the DPLLs for dual-channel mode or not.
 	 */
@@ -193,7 +290,7 @@
 	if (IS_GEN4(dev_priv)) {
 		/* Bspec wording suggests that LVDS port dithering only exists
 		 * for 18bpp panels. */
-		if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+		if (pipe_config->dither && pipe_config->pipe_bpp == 18)
 			temp |= LVDS_ENABLE_DITHER;
 		else
 			temp &= ~LVDS_ENABLE_DITHER;
@@ -210,57 +307,45 @@
 /**
  * Sets the power state for the panel.
  */
-static void intel_enable_lvds(struct intel_encoder *encoder)
+static void intel_enable_lvds(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_connector *intel_connector =
 		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	i915_reg_t ctl_reg, stat_reg;
-
-	if (HAS_PCH_SPLIT(dev)) {
-		ctl_reg = PCH_PP_CONTROL;
-		stat_reg = PCH_PP_STATUS;
-	} else {
-		ctl_reg = PP_CONTROL;
-		stat_reg = PP_STATUS;
-	}
 
 	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
 
-	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
 	POSTING_READ(lvds_encoder->reg);
-	if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
+	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(intel_connector);
 }
 
-static void intel_disable_lvds(struct intel_encoder *encoder)
+static void intel_disable_lvds(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *old_conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	i915_reg_t ctl_reg, stat_reg;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (HAS_PCH_SPLIT(dev)) {
-		ctl_reg = PCH_PP_CONTROL;
-		stat_reg = PCH_PP_STATUS;
-	} else {
-		ctl_reg = PP_CONTROL;
-		stat_reg = PP_STATUS;
-	}
-
-	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-	if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
+	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
 
 	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
 	POSTING_READ(lvds_encoder->reg);
 }
 
-static void gmch_disable_lvds(struct intel_encoder *encoder)
+static void gmch_disable_lvds(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
+
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_connector *intel_connector =
@@ -268,10 +353,12 @@
 
 	intel_panel_disable_backlight(intel_connector);
 
-	intel_disable_lvds(encoder);
+	intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_lvds(struct intel_encoder *encoder)
+static void pch_disable_lvds(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_connector *intel_connector =
@@ -280,9 +367,11 @@
 	intel_panel_disable_backlight(intel_connector);
 }
 
-static void pch_post_disable_lvds(struct intel_encoder *encoder)
+static void pch_post_disable_lvds(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_lvds(encoder);
+	intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
 static enum drm_mode_status
@@ -304,7 +393,8 @@
 }
 
 static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
-				      struct intel_crtc_state *pipe_config)
+				      struct intel_crtc_state *pipe_config,
+				      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder =
@@ -900,17 +990,6 @@
 	int pipe;
 	u8 pin;
 
-	/*
-	 * Unlock registers and just leave them unlocked. Do this before
-	 * checking quirk lists to avoid bogus WARNINGs.
-	 */
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PCH_PP_CONTROL,
-			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-	} else if (INTEL_INFO(dev_priv)->gen < 5) {
-		I915_WRITE(PP_CONTROL,
-			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-	}
 	if (!intel_lvds_supported(dev))
 		return;
 
@@ -943,18 +1022,6 @@
 		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
 	}
 
-	 /* Set the Panel Power On/Off timings if uninitialized. */
-	if (INTEL_INFO(dev_priv)->gen < 5 &&
-	    I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
-		/* Set T2 to 40ms and T5 to 200ms */
-		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
-
-		/* Set T3 to 35ms and Tx to 200ms */
-		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
-
-		DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
-	}
-
 	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
@@ -1020,6 +1087,10 @@
 				      dev->mode_config.scaling_mode_property,
 				      DRM_MODE_SCALE_ASPECT);
 	intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+
+	intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+	lvds_encoder->init_lvds_val = lvds;
+
 	/*
 	 * LVDS discovery:
 	 * 1) check for EDID on DDC
@@ -1054,17 +1125,6 @@
 	}
 	lvds_connector->base.edid = edid;
 
-	if (IS_ERR_OR_NULL(edid)) {
-		/* Didn't get an EDID, so
-		 * Set wide sync ranges so we get all modes
-		 * handed to valid_mode for checking
-		 */
-		connector->display_info.min_vfreq = 0;
-		connector->display_info.max_vfreq = 200;
-		connector->display_info.min_hfreq = 0;
-		connector->display_info.max_hfreq = 200;
-	}
-
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 			DRM_DEBUG_KMS("using preferred mode from EDID: ");
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 927825f..80bb924 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -97,7 +97,8 @@
  *       end.
  */
 static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
-	{ /* 0x00000009 */
+	[I915_MOCS_UNCACHED] = {
+	  /* 0x00000009 */
 	  .control_value = LE_CACHEABILITY(LE_UC) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
 			   LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
@@ -106,7 +107,7 @@
 	  /* 0x0010 */
 	  .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
 	},
-	{
+	[I915_MOCS_PTE] = {
 	  /* 0x00000038 */
 	  .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -115,7 +116,7 @@
 	  /* 0x0030 */
 	  .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
 	},
-	{
+	[I915_MOCS_CACHED] = {
 	  /* 0x0000003b */
 	  .control_value = LE_CACHEABILITY(LE_WB) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -128,7 +129,7 @@
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
-	{
+	[I915_MOCS_UNCACHED] = {
 	  /* 0x00000009 */
 	  .control_value = LE_CACHEABILITY(LE_UC) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -138,7 +139,7 @@
 	  /* 0x0010 */
 	  .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
 	},
-	{
+	[I915_MOCS_PTE] = {
 	  /* 0x00000038 */
 	  .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -148,7 +149,7 @@
 	  /* 0x0030 */
 	  .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
 	},
-	{
+	[I915_MOCS_CACHED] = {
 	  /* 0x00000039 */
 	  .control_value = LE_CACHEABILITY(LE_UC) |
 			   LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -203,9 +204,9 @@
 	return result;
 }
 
-static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
 {
-	switch (ring) {
+	switch (engine_id) {
 	case RCS:
 		return GEN9_GFX_MOCS(index);
 	case VCS:
@@ -217,7 +218,7 @@
 	case VCS2:
 		return GEN9_MFX1_MOCS(index);
 	default:
-		MISSING_CASE(ring);
+		MISSING_CASE(engine_id);
 		return INVALID_MMIO_REG;
 	}
 }
@@ -275,7 +276,7 @@
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
@@ -287,14 +288,11 @@
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
-				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[index].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[index].control_value);
 	}
 
 	/*
@@ -306,14 +304,12 @@
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[0].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[0].control_value);
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -340,7 +336,7 @@
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ring *ring = req->ring;
 	unsigned int i;
 	int ret;
 
@@ -351,19 +347,18 @@
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
+	intel_ring_emit(ring,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf,
-					l3cc_combine(table, 2*i, 2*i+1));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
 		i++;
 	}
 
@@ -373,12 +368,12 @@
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 4640299..a8bd9f7 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -54,6 +54,6 @@
 
 int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
 void intel_mocs_init_l3cc_table(struct drm_device *dev);
-int intel_mocs_init_engine(struct intel_engine_cs *ring);
+int intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f2584d0..951e834 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -25,7 +25,6 @@
 
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/fb.h>
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3212d88..a24bc8c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -30,6 +30,7 @@
 #include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 
 /* Limits for overlay size. According to intel doc, the real limits are:
  * Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -170,8 +171,8 @@
 struct intel_overlay {
 	struct drm_i915_private *i915;
 	struct intel_crtc *crtc;
-	struct drm_i915_gem_object *vid_bo;
-	struct drm_i915_gem_object *old_vid_bo;
+	struct i915_vma *vma;
+	struct i915_vma *old_vma;
 	bool active;
 	bool pfit_active;
 	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -183,8 +184,7 @@
 	u32 flip_addr;
 	struct drm_i915_gem_object *reg_bo;
 	/* flip handling */
-	struct drm_i915_gem_request *last_flip_req;
-	void (*flip_tail)(struct intel_overlay *);
+	struct i915_gem_active last_flip;
 };
 
 static struct overlay_registers __iomem *
@@ -196,7 +196,7 @@
 	if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
 		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
 	else
-		regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+		regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
 					 overlay->flip_addr,
 					 PAGE_SIZE);
 
@@ -210,37 +210,46 @@
 		io_mapping_unmap(regs);
 }
 
+static void intel_overlay_submit_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
+					 i915_gem_retire_fn retire)
+{
+	GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
+					&overlay->i915->drm.struct_mutex));
+	overlay->last_flip.retire = retire;
+	i915_gem_active_set(&overlay->last_flip, req);
+	i915_add_request(req);
+}
+
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 					 struct drm_i915_gem_request *req,
-					 void (*tail)(struct intel_overlay *))
+					 i915_gem_retire_fn retire)
 {
-	int ret;
+	intel_overlay_submit_request(overlay, req, retire);
+	return i915_gem_active_retire(&overlay->last_flip,
+				      &overlay->i915->drm.struct_mutex);
+}
 
-	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req, req);
-	i915_add_request(req);
+static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+{
+	struct drm_i915_private *dev_priv = overlay->i915;
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 
-	overlay->flip_tail = tail;
-	ret = i915_wait_request(overlay->last_flip_req);
-	if (ret)
-		return ret;
-
-	i915_gem_request_assign(&overlay->last_flip_req, NULL);
-	return 0;
+	return i915_gem_request_alloc(engine, dev_priv->kernel_context);
 }
 
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -252,11 +261,12 @@
 
 	overlay->active = true;
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	ring = req->ring;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -266,8 +276,8 @@
 				  bool load_polyphase_filter)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -282,7 +292,7 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -292,38 +302,48 @@
 		return ret;
 	}
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_advance(engine);
+	ring = req->ring;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
 
-	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req, req);
-	i915_add_request(req);
+	intel_overlay_submit_request(overlay, req, NULL);
 
 	return 0;
 }
 
-static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+					       struct drm_i915_gem_request *req)
 {
-	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+	struct intel_overlay *overlay =
+		container_of(active, typeof(*overlay), last_flip);
+	struct i915_vma *vma;
 
-	i915_gem_object_ggtt_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-
-	overlay->old_vid_bo = NULL;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
-	struct drm_i915_gem_object *obj = overlay->vid_bo;
-
-	/* never have the overlay hw on without showing a frame */
-	if (WARN_ON(!obj))
+	vma = fetch_and_zero(&overlay->old_vma);
+	if (WARN_ON(!vma))
 		return;
 
-	i915_gem_object_ggtt_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-	overlay->vid_bo = NULL;
+	i915_gem_track_fb(vma->obj, NULL,
+			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+	i915_gem_object_unpin_from_display_plane(vma);
+	i915_vma_put(vma);
+}
+
+static void intel_overlay_off_tail(struct i915_gem_active *active,
+				   struct drm_i915_gem_request *req)
+{
+	struct intel_overlay *overlay =
+		container_of(active, typeof(*overlay), last_flip);
+	struct i915_vma *vma;
+
+	/* never have the overlay hw on without showing a frame */
+	vma = fetch_and_zero(&overlay->vma);
+	if (WARN_ON(!vma))
+		return;
+
+	i915_gem_object_unpin_from_display_plane(vma);
+	i915_vma_put(vma);
 
 	overlay->crtc->overlay = NULL;
 	overlay->crtc = NULL;
@@ -334,8 +354,8 @@
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -347,7 +367,7 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -357,46 +377,36 @@
 		return ret;
 	}
 
+	ring = req->ring;
 	/* wait for overlay to go idle */
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev_priv)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 	} else {
-		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(engine, flip_addr);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(ring, flip_addr);
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
+	return intel_overlay_do_wait_request(overlay, req,
+					     intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-	int ret;
-
-	if (overlay->last_flip_req == NULL)
-		return 0;
-
-	ret = i915_wait_request(overlay->last_flip_req);
-	if (ret)
-		return ret;
-
-	if (overlay->flip_tail)
-		overlay->flip_tail(overlay);
-
-	i915_gem_request_assign(&overlay->last_flip_req, NULL);
-	return 0;
+	return i915_gem_active_retire(&overlay->last_flip,
+				      &overlay->i915->drm.struct_mutex);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -406,7 +416,6 @@
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -414,14 +423,15 @@
 	/* Only wait if there is actually an old frame to release to
 	 * guarantee forward progress.
 	 */
-	if (!overlay->old_vid_bo)
+	if (!overlay->old_vma)
 		return 0;
 
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
+		struct intel_ring *ring;
 
-		req = i915_gem_request_alloc(engine, NULL);
+		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
@@ -431,22 +441,19 @@
 			return ret;
 		}
 
-		intel_ring_emit(engine,
+		ring = req->ring;
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
-	}
+	} else
+		intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
 
-	intel_overlay_release_old_vid_tail(overlay);
-
-
-	i915_gem_track_fb(overlay->old_vid_bo, NULL,
-			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
 	return 0;
 }
 
@@ -459,7 +466,6 @@
 
 	intel_overlay_release_old_vid(overlay);
 
-	overlay->last_flip_req = NULL;
 	overlay->old_xscale = 0;
 	overlay->old_yscale = 0;
 	overlay->crtc = NULL;
@@ -740,6 +746,7 @@
 	struct drm_i915_private *dev_priv = overlay->i915;
 	u32 swidth, swidthsw, sheight, ostride;
 	enum pipe pipe = overlay->crtc->pipe;
+	struct i915_vma *vma;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -748,12 +755,12 @@
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
+	vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
 						   &i915_ggtt_view_normal);
-	if (ret != 0)
-		return ret;
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
-	ret = i915_gem_object_put_fence(new_bo);
+	ret = i915_vma_put_fence(vma);
 	if (ret)
 		goto out_unpin;
 
@@ -794,7 +801,7 @@
 	swidth = params->src_w;
 	swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
 	sheight = params->src_h;
-	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+	iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
 	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -808,8 +815,10 @@
 				      params->src_w/uv_hscale);
 		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
 		sheight |= (params->src_h/uv_vscale) << 16;
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+		iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+			  &regs->OBUF_0U);
+		iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+			  &regs->OBUF_0V);
 		ostride |= params->stride_UV << 16;
 	}
 
@@ -830,19 +839,18 @@
 	if (ret)
 		goto out_unpin;
 
-	i915_gem_track_fb(overlay->vid_bo, new_bo,
+	i915_gem_track_fb(overlay->vma->obj, new_bo,
 			  INTEL_FRONTBUFFER_OVERLAY(pipe));
 
-	overlay->old_vid_bo = overlay->vid_bo;
-	overlay->vid_bo = new_bo;
+	overlay->old_vma = overlay->vma;
+	overlay->vma = vma;
 
-	intel_frontbuffer_flip(&dev_priv->drm,
-			       INTEL_FRONTBUFFER_OVERLAY(pipe));
+	intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	return 0;
 
 out_unpin:
-	i915_gem_object_ggtt_unpin(new_bo);
+	i915_gem_object_unpin_from_display_plane(vma);
 	return ret;
 }
 
@@ -870,12 +878,7 @@
 	iowrite32(0, &regs->OCMD);
 	intel_overlay_unmap_regs(overlay, regs);
 
-	ret = intel_overlay_off(overlay);
-	if (ret != 0)
-		return ret;
-
-	intel_overlay_off_tail(overlay);
-	return 0;
+	return intel_overlay_off(overlay);
 }
 
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
@@ -1122,9 +1125,8 @@
 	}
 	crtc = to_intel_crtc(drmmode_crtc);
 
-	new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
-						   put_image_rec->bo_handle));
-	if (&new_bo->base == NULL) {
+	new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
+	if (!new_bo) {
 		ret = -ENOENT;
 		goto out_free;
 	}
@@ -1132,7 +1134,7 @@
 	drm_modeset_lock_all(dev);
 	mutex_lock(&dev->struct_mutex);
 
-	if (new_bo->tiling_mode) {
+	if (i915_gem_object_is_tiled(new_bo)) {
 		DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
 		ret = -EINVAL;
 		goto out_unlock;
@@ -1220,7 +1222,7 @@
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	drm_modeset_unlock_all(dev);
-	drm_gem_object_unreference_unlocked(&new_bo->base);
+	i915_gem_object_put_unlocked(new_bo);
 out_free:
 	kfree(params);
 
@@ -1371,6 +1373,7 @@
 	struct intel_overlay *overlay;
 	struct drm_i915_gem_object *reg_bo;
 	struct overlay_registers __iomem *regs;
+	struct i915_vma *vma = NULL;
 	int ret;
 
 	if (!HAS_OVERLAY(dev_priv))
@@ -1404,12 +1407,14 @@
 		}
 		overlay->flip_addr = reg_bo->phys_handle->busaddr;
 	} else {
-		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
-		if (ret) {
+		vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
+					       0, PAGE_SIZE, PIN_MAPPABLE);
+		if (IS_ERR(vma)) {
 			DRM_ERROR("failed to pin overlay register bo\n");
+			ret = PTR_ERR(vma);
 			goto out_free_bo;
 		}
-		overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+		overlay->flip_addr = i915_ggtt_offset(vma);
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1441,10 +1446,10 @@
 	return;
 
 out_unpin_bo:
-	if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-		i915_gem_object_ggtt_unpin(reg_bo);
+	if (vma)
+		i915_vma_unpin(vma);
 out_free_bo:
-	drm_gem_object_unreference(&reg_bo->base);
+	i915_gem_object_put(reg_bo);
 out_free:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	kfree(overlay);
@@ -1461,7 +1466,7 @@
 	 * hardware should be off already */
 	WARN_ON(dev_priv->overlay->active);
 
-	drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+	i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
 	kfree(dev_priv->overlay);
 }
 
@@ -1484,7 +1489,7 @@
 		regs = (struct overlay_registers __iomem *)
 			overlay->reg_bo->phys_handle->vaddr;
 	else
-		regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+		regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
 						overlay->flip_addr);
 
 	return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c65d7..be4b4d5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -841,7 +841,7 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
-	u32 pch_ctl1, pch_ctl2;
+	u32 pch_ctl1, pch_ctl2, schicken;
 
 	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
 	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
@@ -850,6 +850,22 @@
 		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
 	}
 
+	if (HAS_PCH_LPT(dev_priv)) {
+		schicken = I915_READ(SOUTH_CHICKEN2);
+		if (panel->backlight.alternate_pwm_increment)
+			schicken |= LPT_PWM_GRANULARITY;
+		else
+			schicken &= ~LPT_PWM_GRANULARITY;
+		I915_WRITE(SOUTH_CHICKEN2, schicken);
+	} else {
+		schicken = I915_READ(SOUTH_CHICKEN1);
+		if (panel->backlight.alternate_pwm_increment)
+			schicken |= SPT_PWM_GRANULARITY;
+		else
+			schicken &= ~SPT_PWM_GRANULARITY;
+		I915_WRITE(SOUTH_CHICKEN1, schicken);
+	}
+
 	pch_ctl2 = panel->backlight.max << 16;
 	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
 
@@ -1242,10 +1258,10 @@
  */
 static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
 	u32 mul;
 
-	if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
+	if (panel->backlight.alternate_pwm_increment)
 		mul = 128;
 	else
 		mul = 16;
@@ -1261,9 +1277,10 @@
 static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	struct intel_panel *panel = &connector->panel;
 	u32 mul, clock;
 
-	if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
+	if (panel->backlight.alternate_pwm_increment)
 		mul = 16;
 	else
 		mul = 128;
@@ -1414,6 +1431,13 @@
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 	u32 pch_ctl1, pch_ctl2, val;
+	bool alt;
+
+	if (HAS_PCH_LPT(dev_priv))
+		alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+	else
+		alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+	panel->backlight.alternate_pwm_increment = alt;
 
 	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
 	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
@@ -1430,10 +1454,11 @@
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = lpt_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
 
 	return 0;
 }
@@ -1459,11 +1484,13 @@
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = pch_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
 	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
 	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
-		(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+		(pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
 	return 0;
 }
@@ -1498,9 +1525,11 @@
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = i9xx_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = panel->backlight.level != 0;
+	panel->backlight.enabled = val != 0;
 
 	return 0;
 }
@@ -1530,10 +1559,11 @@
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = i9xx_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1562,10 +1592,11 @@
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = _vlv_get_backlight(dev_priv, pipe);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1607,10 +1638,11 @@
 		return -ENODEV;
 
 	val = bxt_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2d24813..a2f751c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -340,6 +340,11 @@
 		I915_WRITE(FW_BLC_SELF, val);
 		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_I915GM(dev)) {
+		/*
+		 * FIXME can't find a bit like this for 915G, and
+		 * and yet it does have the related watermark in
+		 * FW_BLC_SELF. What's going on?
+		 */
 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
 		I915_WRITE(INSTPM, val);
@@ -960,7 +965,7 @@
 	if (dev_priv->wm.pri_latency[level] == 0)
 		return USHRT_MAX;
 
-	if (!state->visible)
+	if (!state->base.visible)
 		return 0;
 
 	cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
@@ -1002,7 +1007,7 @@
 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		if (state->visible) {
+		if (state->base.visible) {
 			wm_state->num_active_planes++;
 			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
 		}
@@ -1018,7 +1023,7 @@
 			continue;
 		}
 
-		if (!state->visible) {
+		if (!state->base.visible) {
 			plane->wm.fifo_size = 0;
 			continue;
 		}
@@ -1118,7 +1123,7 @@
 		struct intel_plane_state *state =
 			to_intel_plane_state(plane->base.state);
 
-		if (!state->visible)
+		if (!state->base.visible)
 			continue;
 
 		/* normal watermarks */
@@ -1580,7 +1585,7 @@
 		obj = intel_fb_obj(enabled->primary->state->fb);
 
 		/* self-refresh seems busted with untiled */
-		if (obj->tiling_mode == I915_TILING_NONE)
+		if (!i915_gem_object_is_tiled(obj))
 			enabled = NULL;
 	}
 
@@ -1604,6 +1609,9 @@
 		unsigned long line_time_us;
 		int entries;
 
+		if (IS_I915GM(dev) || IS_I945GM(dev))
+			cpp = 4;
+
 		line_time_us = max(htotal * 1000 / clock, 1);
 
 		/* Use ns/us then divide to preserve precision */
@@ -1618,7 +1626,7 @@
 		if (IS_I945G(dev) || IS_I945GM(dev))
 			I915_WRITE(FW_BLC_SELF,
 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
-		else if (IS_I915GM(dev))
+		else
 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
 	}
 
@@ -1767,7 +1775,7 @@
 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 	uint32_t method1, method2;
 
-	if (!cstate->base.active || !pstate->visible)
+	if (!cstate->base.active || !pstate->base.visible)
 		return 0;
 
 	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
@@ -1777,7 +1785,7 @@
 
 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&pstate->dst),
+				 drm_rect_width(&pstate->base.dst),
 				 cpp, mem_value);
 
 	return min(method1, method2);
@@ -1795,13 +1803,13 @@
 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 	uint32_t method1, method2;
 
-	if (!cstate->base.active || !pstate->visible)
+	if (!cstate->base.active || !pstate->base.visible)
 		return 0;
 
 	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&pstate->dst),
+				 drm_rect_width(&pstate->base.dst),
 				 cpp, mem_value);
 	return min(method1, method2);
 }
@@ -1820,7 +1828,7 @@
 	 * this is necessary to avoid flickering.
 	 */
 	int cpp = 4;
-	int width = pstate->visible ? pstate->base.crtc_w : 64;
+	int width = pstate->base.visible ? pstate->base.crtc_w : 64;
 
 	if (!cstate->base.active)
 		return 0;
@@ -1838,10 +1846,10 @@
 	int cpp = pstate->base.fb ?
 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 
-	if (!cstate->base.active || !pstate->visible)
+	if (!cstate->base.active || !pstate->base.visible)
 		return 0;
 
-	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
+	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
 }
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2119,32 +2127,34 @@
 				GEN9_MEM_LATENCY_LEVEL_MASK;
 
 		/*
+		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+		 * need to be disabled. We make sure to sanitize the values out
+		 * of the punit to satisfy this requirement.
+		 */
+		for (level = 1; level <= max_level; level++) {
+			if (wm[level] == 0) {
+				for (i = level + 1; i <= max_level; i++)
+					wm[i] = 0;
+				break;
+			}
+		}
+
+		/*
 		 * WaWmMemoryReadLatency:skl
 		 *
 		 * punit doesn't take into account the read latency so we need
-		 * to add 2us to the various latency levels we retrieve from
-		 * the punit.
-		 *   - W0 is a bit special in that it's the only level that
-		 *   can't be disabled if we want to have display working, so
-		 *   we always add 2us there.
-		 *   - For levels >=1, punit returns 0us latency when they are
-		 *   disabled, so we respect that and don't add 2us then
-		 *
-		 * Additionally, if a level n (n > 1) has a 0us latency, all
-		 * levels m (m >= n) need to be disabled. We make sure to
-		 * sanitize the values out of the punit to satisfy this
-		 * requirement.
+		 * to add 2us to the various latency levels we retrieve from the
+		 * punit when level 0 response data us 0us.
 		 */
-		wm[0] += 2;
-		for (level = 1; level <= max_level; level++)
-			if (wm[level] != 0)
+		if (wm[0] == 0) {
+			wm[0] += 2;
+			for (level = 1; level <= max_level; level++) {
+				if (wm[level] == 0)
+					break;
 				wm[level] += 2;
-			else {
-				for (i = level + 1; i <= max_level; i++)
-					wm[i] = 0;
-
-				break;
 			}
+		}
+
 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
@@ -2358,10 +2368,10 @@
 
 	pipe_wm->pipe_enabled = cstate->base.active;
 	if (sprstate) {
-		pipe_wm->sprites_enabled = sprstate->visible;
-		pipe_wm->sprites_scaled = sprstate->visible &&
-			(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
-			 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+		pipe_wm->sprites_enabled = sprstate->base.visible;
+		pipe_wm->sprites_scaled = sprstate->base.visible &&
+			(drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
+			 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
 	}
 
 	usable_level = max_level;
@@ -2845,13 +2855,6 @@
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-/*
- * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
- * different active planes.
- */
-
-#define SKL_DDB_SIZE		896	/* in blocks */
-#define BXT_DDB_SIZE		512
 #define SKL_SAGV_BLOCK_TIME	30 /* µs */
 
 /*
@@ -2876,6 +2879,19 @@
 	}
 }
 
+static bool
+intel_has_sagv(struct drm_i915_private *dev_priv)
+{
+	if (IS_KABYLAKE(dev_priv))
+		return true;
+
+	if (IS_SKYLAKE(dev_priv) &&
+	    dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
+		return true;
+
+	return false;
+}
+
 /*
  * SAGV dynamically adjusts the system agent voltage and clock frequencies
  * depending on power and performance requirements. The display engine access
@@ -2888,12 +2904,14 @@
  *  - We're not using an interlaced display configuration
  */
 int
-skl_enable_sagv(struct drm_i915_private *dev_priv)
+intel_enable_sagv(struct drm_i915_private *dev_priv)
 {
 	int ret;
 
-	if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
-	    dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+	if (!intel_has_sagv(dev_priv))
+		return 0;
+
+	if (dev_priv->sagv_status == I915_SAGV_ENABLED)
 		return 0;
 
 	DRM_DEBUG_KMS("Enabling the SAGV\n");
@@ -2909,21 +2927,21 @@
 	 * Some skl systems, pre-release machines in particular,
 	 * don't actually have an SAGV.
 	 */
-	if (ret == -ENXIO) {
+	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
 		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
-		dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 		return 0;
 	} else if (ret < 0) {
 		DRM_ERROR("Failed to enable the SAGV\n");
 		return ret;
 	}
 
-	dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+	dev_priv->sagv_status = I915_SAGV_ENABLED;
 	return 0;
 }
 
 static int
-skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+intel_do_sagv_disable(struct drm_i915_private *dev_priv)
 {
 	int ret;
 	uint32_t temp = GEN9_SAGV_DISABLE;
@@ -2937,19 +2955,21 @@
 }
 
 int
-skl_disable_sagv(struct drm_i915_private *dev_priv)
+intel_disable_sagv(struct drm_i915_private *dev_priv)
 {
 	int ret, result;
 
-	if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
-	    dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+	if (!intel_has_sagv(dev_priv))
+		return 0;
+
+	if (dev_priv->sagv_status == I915_SAGV_DISABLED)
 		return 0;
 
 	DRM_DEBUG_KMS("Disabling the SAGV\n");
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	/* bspec says to keep retrying for at least 1 ms */
-	ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+	ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	if (ret == -ETIMEDOUT) {
@@ -2961,20 +2981,20 @@
 	 * Some skl systems, pre-release machines in particular,
 	 * don't actually have an SAGV.
 	 */
-	if (result == -ENXIO) {
+	if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
 		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
-		dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 		return 0;
 	} else if (result < 0) {
 		DRM_ERROR("Failed to disable the SAGV\n");
 		return result;
 	}
 
-	dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+	dev_priv->sagv_status = I915_SAGV_DISABLED;
 	return 0;
 }
 
-bool skl_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2983,6 +3003,9 @@
 	enum pipe pipe;
 	int level, plane;
 
+	if (!intel_has_sagv(dev_priv))
+		return false;
+
 	/*
 	 * SKL workaround: bspec recommends we disable the SAGV when we have
 	 * more then one pipe enabled
@@ -3049,10 +3072,8 @@
 	else
 		*num_active = hweight32(dev_priv->active_crtcs);
 
-	if (IS_BROXTON(dev))
-		ddb_size = BXT_DDB_SIZE;
-	else
-		ddb_size = SKL_DDB_SIZE;
+	ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+	WARN_ON(ddb_size == 0);
 
 	ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
@@ -3144,14 +3165,14 @@
 	uint32_t downscale_h, downscale_w;
 	uint32_t src_w, src_h, dst_w, dst_h;
 
-	if (WARN_ON(!pstate->visible))
+	if (WARN_ON(!pstate->base.visible))
 		return DRM_PLANE_HELPER_NO_SCALING;
 
 	/* n.b., src is 16.16 fixed point, dst is whole integer */
-	src_w = drm_rect_width(&pstate->src);
-	src_h = drm_rect_height(&pstate->src);
-	dst_w = drm_rect_width(&pstate->dst);
-	dst_h = drm_rect_height(&pstate->dst);
+	src_w = drm_rect_width(&pstate->base.src);
+	src_h = drm_rect_height(&pstate->base.src);
+	dst_w = drm_rect_width(&pstate->base.dst);
+	dst_h = drm_rect_height(&pstate->base.dst);
 	if (intel_rotation_90_or_270(pstate->base.rotation))
 		swap(dst_w, dst_h);
 
@@ -3173,15 +3194,15 @@
 	uint32_t width = 0, height = 0;
 	unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
 
-	if (!intel_pstate->visible)
+	if (!intel_pstate->base.visible)
 		return 0;
 	if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
 		return 0;
 	if (y && format != DRM_FORMAT_NV12)
 		return 0;
 
-	width = drm_rect_width(&intel_pstate->src) >> 16;
-	height = drm_rect_height(&intel_pstate->src) >> 16;
+	width = drm_rect_width(&intel_pstate->base.src) >> 16;
+	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
 	if (intel_rotation_90_or_270(pstate->rotation))
 		swap(width, height);
@@ -3280,8 +3301,8 @@
 	    fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
 		return 8;
 
-	src_w = drm_rect_width(&intel_pstate->src) >> 16;
-	src_h = drm_rect_height(&intel_pstate->src) >> 16;
+	src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
+	src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
 	if (intel_rotation_90_or_270(pstate->rotation))
 		swap(src_w, src_h);
@@ -3372,7 +3393,7 @@
 		if (intel_plane->pipe != pipe)
 			continue;
 
-		if (!to_intel_plane_state(pstate)->visible) {
+		if (!to_intel_plane_state(pstate)->base.visible) {
 			minimum[id] = 0;
 			y_minimum[id] = 0;
 			continue;
@@ -3473,29 +3494,14 @@
 }
 
 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-			       uint32_t horiz_pixels, uint8_t cpp,
-			       uint64_t tiling, uint32_t latency)
+			       uint32_t latency, uint32_t plane_blocks_per_line)
 {
 	uint32_t ret;
-	uint32_t plane_bytes_per_line, plane_blocks_per_line;
 	uint32_t wm_intermediate_val;
 
 	if (latency == 0)
 		return UINT_MAX;
 
-	plane_bytes_per_line = horiz_pixels * cpp;
-
-	if (tiling == I915_FORMAT_MOD_Y_TILED ||
-	    tiling == I915_FORMAT_MOD_Yf_TILED) {
-		plane_bytes_per_line *= 4;
-		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
-		plane_blocks_per_line /= 4;
-	} else if (tiling == DRM_FORMAT_MOD_NONE) {
-		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
-	} else {
-		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
-	}
-
 	wm_intermediate_val = latency * pixel_rate;
 	ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
 				plane_blocks_per_line;
@@ -3511,7 +3517,7 @@
 	uint64_t pixel_rate;
 
 	/* Shouldn't reach here on disabled planes... */
-	if (WARN_ON(!pstate->visible))
+	if (WARN_ON(!pstate->base.visible))
 		return 0;
 
 	/*
@@ -3546,14 +3552,15 @@
 	uint8_t cpp;
 	uint32_t width = 0, height = 0;
 	uint32_t plane_pixel_rate;
+	uint32_t y_tile_minimum, y_min_scanlines;
 
-	if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+	if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
 		*enabled = false;
 		return 0;
 	}
 
-	width = drm_rect_width(&intel_pstate->src) >> 16;
-	height = drm_rect_height(&intel_pstate->src) >> 16;
+	width = drm_rect_width(&intel_pstate->base.src) >> 16;
+	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
 	if (intel_rotation_90_or_270(pstate->rotation))
 		swap(width, height);
@@ -3561,38 +3568,51 @@
 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
 
+	if (intel_rotation_90_or_270(pstate->rotation)) {
+		int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+			drm_format_plane_cpp(fb->pixel_format, 1) :
+			drm_format_plane_cpp(fb->pixel_format, 0);
+
+		switch (cpp) {
+		case 1:
+			y_min_scanlines = 16;
+			break;
+		case 2:
+			y_min_scanlines = 8;
+			break;
+		default:
+			WARN(1, "Unsupported pixel depth for rotation");
+		case 4:
+			y_min_scanlines = 4;
+			break;
+		}
+	} else {
+		y_min_scanlines = 4;
+	}
+
+	plane_bytes_per_line = width * cpp;
+	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+	    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+		plane_blocks_per_line =
+		      DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
+		plane_blocks_per_line /= y_min_scanlines;
+	} else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+					+ 1;
+	} else {
+		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+	}
+
 	method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
 	method2 = skl_wm_method2(plane_pixel_rate,
 				 cstate->base.adjusted_mode.crtc_htotal,
-				 width,
-				 cpp,
-				 fb->modifier[0],
-				 latency);
+				 latency,
+				 plane_blocks_per_line);
 
-	plane_bytes_per_line = width * cpp;
-	plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+	y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
 
 	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 	    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
-		uint32_t min_scanlines = 4;
-		uint32_t y_tile_minimum;
-		if (intel_rotation_90_or_270(pstate->rotation)) {
-			int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
-				drm_format_plane_cpp(fb->pixel_format, 1) :
-				drm_format_plane_cpp(fb->pixel_format, 0);
-
-			switch (cpp) {
-			case 1:
-				min_scanlines = 16;
-				break;
-			case 2:
-				min_scanlines = 8;
-				break;
-			case 8:
-				WARN(1, "Unsupported pixel depth for rotation");
-			}
-		}
-		y_tile_minimum = plane_blocks_per_line * min_scanlines;
 		selected_result = max(method2, y_tile_minimum);
 	} else {
 		if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3606,10 +3626,12 @@
 
 	if (level >= 1 && level <= 7) {
 		if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
-		    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
-			res_lines += 4;
-		else
+		    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+			res_blocks += y_tile_minimum;
+			res_lines += y_min_scanlines;
+		} else {
 			res_blocks++;
+		}
 	}
 
 	if (res_blocks >= ddb_allocation || res_lines > 31) {
@@ -3828,183 +3850,82 @@
 		I915_WRITE(reg, 0);
 }
 
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
-				const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+			const struct skl_wm_values *wm,
+			int plane)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc;
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, max_level = ilk_wm_max_level(dev);
+	enum pipe pipe = intel_crtc->pipe;
 
-	for_each_intel_crtc(dev, crtc) {
-		int i, level, max_level = ilk_wm_max_level(dev);
-		enum pipe pipe = crtc->pipe;
-
-		if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
-			continue;
-		if (!crtc->active)
-			continue;
-
-		I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
-		for (level = 0; level <= max_level; level++) {
-			for (i = 0; i < intel_num_planes(crtc); i++)
-				I915_WRITE(PLANE_WM(pipe, i, level),
-					   new->plane[pipe][i][level]);
-			I915_WRITE(CUR_WM(pipe, level),
-				   new->plane[pipe][PLANE_CURSOR][level]);
-		}
-		for (i = 0; i < intel_num_planes(crtc); i++)
-			I915_WRITE(PLANE_WM_TRANS(pipe, i),
-				   new->plane_trans[pipe][i]);
-		I915_WRITE(CUR_WM_TRANS(pipe),
-			   new->plane_trans[pipe][PLANE_CURSOR]);
-
-		for (i = 0; i < intel_num_planes(crtc); i++) {
-			skl_ddb_entry_write(dev_priv,
-					    PLANE_BUF_CFG(pipe, i),
-					    &new->ddb.plane[pipe][i]);
-			skl_ddb_entry_write(dev_priv,
-					    PLANE_NV12_BUF_CFG(pipe, i),
-					    &new->ddb.y_plane[pipe][i]);
-		}
-
-		skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-				    &new->ddb.plane[pipe][PLANE_CURSOR]);
+	for (level = 0; level <= max_level; level++) {
+		I915_WRITE(PLANE_WM(pipe, plane, level),
+			   wm->plane[pipe][plane][level]);
 	}
+	I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+
+	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+			    &wm->ddb.plane[pipe][plane]);
+	skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+			    &wm->ddb.y_plane[pipe][plane]);
 }
 
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- *  1. re-allocate the pipe(s) with the allocation being reduced and not
- *     overlapping with a previous light-up pipe (another way to put it is:
- *     pipes with their new allocation strickly included into their old ones).
- *  2. re-allocate the other pipes that get their allocation reduced
- *  3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- *     |   B    |   C    |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- *   allocation
- *     |  A  |  B  |  C  |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
-
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+			 const struct skl_wm_values *wm)
 {
-	int plane;
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, max_level = ilk_wm_max_level(dev);
+	enum pipe pipe = intel_crtc->pipe;
 
-	DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
-
-	for_each_plane(dev_priv, pipe, plane) {
-		I915_WRITE(PLANE_SURF(pipe, plane),
-			   I915_READ(PLANE_SURF(pipe, plane)));
+	for (level = 0; level <= max_level; level++) {
+		I915_WRITE(CUR_WM(pipe, level),
+			   wm->plane[pipe][PLANE_CURSOR][level]);
 	}
-	I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+	I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+			    &wm->ddb.plane[pipe][PLANE_CURSOR]);
 }
 
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
-			    const struct skl_ddb_allocation *new,
-			    enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+			       const struct skl_ddb_allocation *new,
+			       enum pipe pipe)
 {
-	uint16_t old_size, new_size;
-
-	old_size = skl_ddb_entry_size(&old->pipe[pipe]);
-	new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
-	return old_size != new_size &&
-	       new->pipe[pipe].start >= old->pipe[pipe].start &&
-	       new->pipe[pipe].end <= old->pipe[pipe].end;
+	return new->pipe[pipe].start == old->pipe[pipe].start &&
+	       new->pipe[pipe].end == old->pipe[pipe].end;
 }
 
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
-				struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+					   const struct skl_ddb_entry *b)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct skl_ddb_allocation *cur_ddb, *new_ddb;
-	bool reallocated[I915_MAX_PIPES] = {};
-	struct intel_crtc *crtc;
-	enum pipe pipe;
+	return a->start < b->end && b->start < a->end;
+}
 
-	new_ddb = &new_values->ddb;
-	cur_ddb = &dev_priv->wm.skl_hw.ddb;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+				 const struct skl_ddb_allocation *old,
+				 const struct skl_ddb_allocation *new,
+				 enum pipe pipe)
+{
+	struct drm_device *dev = state->dev;
+	struct intel_crtc *intel_crtc;
+	enum pipe otherp;
 
-	/*
-	 * First pass: flush the pipes with the new allocation contained into
-	 * the old space.
-	 *
-	 * We'll wait for the vblank on those pipes to ensure we can safely
-	 * re-allocate the freed space without this pipe fetching from it.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
+	for_each_intel_crtc(dev, intel_crtc) {
+		otherp = intel_crtc->pipe;
+
+		if (otherp == pipe)
 			continue;
 
-		pipe = crtc->pipe;
-
-		if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
-			continue;
-
-		skl_wm_flush_pipe(dev_priv, pipe, 1);
-		intel_wait_for_vblank(dev, pipe);
-
-		reallocated[pipe] = true;
+		if (skl_ddb_entries_overlap(&new->pipe[pipe],
+					    &old->pipe[otherp]))
+			return true;
 	}
 
-
-	/*
-	 * Second pass: flush the pipes that are having their allocation
-	 * reduced, but overlapping with a previous allocation.
-	 *
-	 * Here as well we need to wait for the vblank to make sure the freed
-	 * space is not used anymore.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		pipe = crtc->pipe;
-
-		if (reallocated[pipe])
-			continue;
-
-		if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
-		    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
-			skl_wm_flush_pipe(dev_priv, pipe, 2);
-			intel_wait_for_vblank(dev, pipe);
-			reallocated[pipe] = true;
-		}
-	}
-
-	/*
-	 * Third pass: flush the pipes that got more space allocated.
-	 *
-	 * We don't need to actively wait for the update here, next vblank
-	 * will just get more DDB space with the correct WM values.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		pipe = crtc->pipe;
-
-		/*
-		 * At this point, only the pipes more space than before are
-		 * left to re-allocate.
-		 */
-		if (reallocated[pipe])
-			continue;
-
-		skl_wm_flush_pipe(dev_priv, pipe, 3);
-	}
+	return false;
 }
 
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -4041,6 +3962,41 @@
 	return ret;
 }
 
+int
+skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+{
+	struct drm_atomic_state *state = cstate->base.state;
+	struct drm_device *dev = state->dev;
+	struct drm_crtc *crtc = cstate->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+	struct drm_plane_state *plane_state;
+	struct drm_plane *plane;
+	enum pipe pipe = intel_crtc->pipe;
+	int id;
+
+	WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
+
+	drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+		id = skl_wm_plane_id(to_intel_plane(plane));
+
+		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
+					&new_ddb->plane[pipe][id]) &&
+		    skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
+					&new_ddb->y_plane[pipe][id]))
+			continue;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+	}
+
+	return 0;
+}
+
 static int
 skl_compute_ddb(struct drm_atomic_state *state)
 {
@@ -4105,7 +4061,7 @@
 		if (ret)
 			return ret;
 
-		ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+		ret = skl_ddb_add_affected_planes(cstate);
 		if (ret)
 			return ret;
 	}
@@ -4206,7 +4162,7 @@
 	struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
-	int pipe;
+	enum pipe pipe = intel_crtc->pipe;
 
 	if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
 		return;
@@ -4215,15 +4171,22 @@
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 
-	skl_write_wm_values(dev_priv, results);
-	skl_flush_wm_values(dev_priv, results);
-
 	/*
-	 * Store the new configuration (but only for the pipes that have
-	 * changed; the other values weren't recomputed).
+	 * If this pipe isn't active already, we're going to be enabling it
+	 * very soon. Since it's safe to update a pipe's ddb allocation while
+	 * the pipe's shut off, just do so here. Already active pipes will have
+	 * their watermarks updated once we update their planes.
 	 */
-	for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
-		skl_copy_wm_for_pipe(hw_vals, results, pipe);
+	if (crtc->state->active_changed) {
+		int plane;
+
+		for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+			skl_write_plane_wm(intel_crtc, results, plane);
+
+		skl_write_cursor_wm(intel_crtc, results);
+	}
+
+	skl_copy_wm_for_pipe(hw_vals, results, pipe);
 
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5103,7 +5066,7 @@
 	 */
 	if (!(dev_priv->gt.awake &&
 	      dev_priv->rps.enabled &&
-	      dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
+	      dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
 		return;
 
 	/* Force a RPS boost (and don't count it against the client) if
@@ -5294,35 +5257,31 @@
 
 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
-	uint32_t rp_state_cap;
-	u32 ddcc_status = 0;
-	int ret;
-
 	/* All of these values are in units of 50MHz */
-	dev_priv->rps.cur_freq		= 0;
+
 	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
 	if (IS_BROXTON(dev_priv)) {
-		rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+		u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
 		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
 		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
 	} else {
-		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
 		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
 		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
 	}
-
 	/* hw_max = RP0 until we check for overclocking */
-	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
+	dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
 	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
 	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-		ret = sandybridge_pcode_read(dev_priv,
-					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
-					&ddcc_status);
-		if (0 == ret)
+		u32 ddcc_status = 0;
+
+		if (sandybridge_pcode_read(dev_priv,
+					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+					   &ddcc_status) == 0)
 			dev_priv->rps.efficient_freq =
 				clamp_t(u8,
 					((ddcc_status >> 8) & 0xff),
@@ -5332,29 +5291,26 @@
 
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		/* Store the frequency values in 16.66 MHZ units, which is
-		   the natural hardware unit for SKL */
+		 * the natural hardware unit for SKL
+		 */
 		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
 		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
 		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
 		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
 		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
 	}
+}
 
-	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+static void reset_rps(struct drm_i915_private *dev_priv,
+		      void (*set)(struct drm_i915_private *, u8))
+{
+	u8 freq = dev_priv->rps.cur_freq;
 
-	/* Preserve min/max settings in case of re-init */
-	if (dev_priv->rps.max_freq_softlimit == 0)
-		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+	/* force a reset */
+	dev_priv->rps.power = -1;
+	dev_priv->rps.cur_freq = -1;
 
-	if (dev_priv->rps.min_freq_softlimit == 0) {
-		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			dev_priv->rps.min_freq_softlimit =
-				max_t(int, dev_priv->rps.efficient_freq,
-				      intel_freq_opcode(dev_priv, 450));
-		else
-			dev_priv->rps.min_freq_softlimit =
-				dev_priv->rps.min_freq;
-	}
+	set(dev_priv, freq);
 }
 
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
@@ -5362,8 +5318,6 @@
 {
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-	gen6_init_rps_frequencies(dev_priv);
-
 	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 		/*
@@ -5393,8 +5347,7 @@
 	/* Leaning on the below call to gen6_set_rps to program/setup the
 	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
-	dev_priv->rps.power = HIGH_POWER; /* force a reset */
-	gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+	reset_rps(dev_priv, gen6_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5481,9 +5434,6 @@
 	/* 2a: Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 
-	/* Initialize rps frequencies */
-	gen6_init_rps_frequencies(dev_priv);
-
 	/* 2b: Program RC6 thresholds.*/
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
@@ -5540,8 +5490,7 @@
 
 	/* 6: Ring frequency + overclocking (our driver does this later */
 
-	dev_priv->rps.power = HIGH_POWER; /* force a reset */
-	gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+	reset_rps(dev_priv, gen6_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5549,7 +5498,7 @@
 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
-	u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+	u32 rc6vids, rc6_mask = 0;
 	u32 gtfifodbg;
 	int rc6_mode;
 	int ret;
@@ -5573,9 +5522,6 @@
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-	/* Initialize rps frequencies */
-	gen6_init_rps_frequencies(dev_priv);
-
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 
@@ -5626,16 +5572,7 @@
 	if (ret)
 		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 
-	ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
-	if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
-		DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-				 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
-				 (pcu_mbox & 0xff) * 50);
-		dev_priv->rps.max_freq = pcu_mbox & 0xff;
-	}
-
-	dev_priv->rps.power = HIGH_POWER; /* force a reset */
-	gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+	reset_rps(dev_priv, gen6_set_rps);
 
 	rc6vids = 0;
 	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -5654,7 +5591,7 @@
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
 	int min_freq = 15;
 	unsigned int gpu_freq;
@@ -5738,23 +5675,13 @@
 	}
 }
 
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
-	if (!HAS_CORE_RING_FREQ(dev_priv))
-		return;
-
-	mutex_lock(&dev_priv->rps.hw_lock);
-	__gen6_update_ring_freq(dev_priv);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
 	u32 val, rp0;
 
 	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-	switch (INTEL_INFO(dev_priv)->eu_total) {
+	switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
 	case 8:
 		/* (2 * 4) config */
 		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5892,8 +5819,6 @@
 	u32 pcbr;
 	int pctx_size = 24*1024;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-
 	pcbr = I915_READ(VLV_PCBR);
 	if (pcbr) {
 		/* BIOS set it up already, grab the pre-alloc'd space */
@@ -5929,7 +5854,6 @@
 out:
 	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
 	dev_priv->vlv_pctx = pctx;
-	mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -5937,7 +5861,7 @@
 	if (WARN_ON(!dev_priv->vlv_pctx))
 		return;
 
-	drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
+	i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
 	dev_priv->vlv_pctx = NULL;
 }
 
@@ -5960,8 +5884,6 @@
 
 	vlv_init_gpll_ref_freq(dev_priv);
 
-	mutex_lock(&dev_priv->rps.hw_lock);
-
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 	switch ((val >> 6) & 3) {
 	case 0:
@@ -5997,17 +5919,6 @@
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
 			 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
 			 dev_priv->rps.min_freq);
-
-	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
-	/* Preserve min/max settings in case of re-init */
-	if (dev_priv->rps.max_freq_softlimit == 0)
-		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
-	if (dev_priv->rps.min_freq_softlimit == 0)
-		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
-	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6018,8 +5929,6 @@
 
 	vlv_init_gpll_ref_freq(dev_priv);
 
-	mutex_lock(&dev_priv->rps.hw_lock);
-
 	mutex_lock(&dev_priv->sb_lock);
 	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
 	mutex_unlock(&dev_priv->sb_lock);
@@ -6061,17 +5970,6 @@
 		   dev_priv->rps.rp1_freq |
 		   dev_priv->rps.min_freq) & 1,
 		  "Odd GPU freq values\n");
-
-	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
-	/* Preserve min/max settings in case of re-init */
-	if (dev_priv->rps.max_freq_softlimit == 0)
-		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
-	if (dev_priv->rps.min_freq_softlimit == 0)
-		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
-	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6162,16 +6060,7 @@
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-			 dev_priv->rps.cur_freq);
-
-	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-			 dev_priv->rps.idle_freq);
-
-	valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+	reset_rps(dev_priv, valleyview_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -6251,16 +6140,7 @@
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-			 dev_priv->rps.cur_freq);
-
-	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-			 dev_priv->rps.idle_freq);
-
-	valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+	reset_rps(dev_priv, valleyview_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -6589,19 +6469,11 @@
  */
 bool i915_gpu_busy(void)
 {
-	struct drm_i915_private *dev_priv;
-	struct intel_engine_cs *engine;
 	bool ret = false;
 
 	spin_lock_irq(&mchdev_lock);
-	if (!i915_mch_dev)
-		goto out_unlock;
-	dev_priv = i915_mch_dev;
-
-	for_each_engine(engine, dev_priv)
-		ret |= !list_empty(&engine->request_list);
-
-out_unlock:
+	if (i915_mch_dev)
+		ret = i915_mch_dev->gt.awake;
 	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
@@ -6757,10 +6629,51 @@
 		intel_runtime_pm_get(dev_priv);
 	}
 
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	/* Initialize RPS limits (for userspace) */
 	if (IS_CHERRYVIEW(dev_priv))
 		cherryview_init_gt_powersave(dev_priv);
 	else if (IS_VALLEYVIEW(dev_priv))
 		valleyview_init_gt_powersave(dev_priv);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		gen6_init_rps_frequencies(dev_priv);
+
+	/* Derive initial user preferences/limits from the hardware limits */
+	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+	dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+
+	dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+	dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		dev_priv->rps.min_freq_softlimit =
+			max_t(int,
+			      dev_priv->rps.efficient_freq,
+			      intel_freq_opcode(dev_priv, 450));
+
+	/* After setting max-softlimit, find the overclock max freq */
+	if (IS_GEN6(dev_priv) ||
+	    IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+		u32 params = 0;
+
+		sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
+		if (params & BIT(31)) { /* OC supported */
+			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+					 (dev_priv->rps.max_freq & 0xff) * 50,
+					 (params & 0xff) * 50);
+			dev_priv->rps.max_freq = params & 0xff;
+		}
+	}
+
+	/* Finally allow us to boost to max by default */
+	dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+	intel_autoenable_gt_powersave(dev_priv);
 }
 
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6772,13 +6685,6 @@
 		intel_runtime_pm_put(dev_priv);
 }
 
-static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
-{
-	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-	gen6_disable_rps_interrupts(dev_priv);
-}
-
 /**
  * intel_suspend_gt_powersave - suspend PM work and helper threads
  * @dev_priv: i915 device
@@ -6792,60 +6698,76 @@
 	if (INTEL_GEN(dev_priv) < 6)
 		return;
 
-	gen6_suspend_rps(dev_priv);
+	if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+		intel_runtime_pm_put(dev_priv);
 
-	/* Force GPU to min freq during suspend */
-	gen6_rps_idle(dev_priv);
+	/* gen6_rps_idle() will be called later to disable interrupts */
+}
+
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
+{
+	dev_priv->rps.enabled = true; /* force disabling */
+	intel_disable_gt_powersave(dev_priv);
+
+	gen6_reset_rps_interrupts(dev_priv);
 }
 
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-	if (IS_IRONLAKE_M(dev_priv)) {
-		ironlake_disable_drps(dev_priv);
-	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
-		intel_suspend_gt_powersave(dev_priv);
-
-		mutex_lock(&dev_priv->rps.hw_lock);
-		if (INTEL_INFO(dev_priv)->gen >= 9) {
-			gen9_disable_rc6(dev_priv);
-			gen9_disable_rps(dev_priv);
-		} else if (IS_CHERRYVIEW(dev_priv))
-			cherryview_disable_rps(dev_priv);
-		else if (IS_VALLEYVIEW(dev_priv))
-			valleyview_disable_rps(dev_priv);
-		else
-			gen6_disable_rps(dev_priv);
-
-		dev_priv->rps.enabled = false;
-		mutex_unlock(&dev_priv->rps.hw_lock);
-	}
-}
-
-static void intel_gen6_powersave_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private,
-			     rps.delayed_resume_work.work);
+	if (!READ_ONCE(dev_priv->rps.enabled))
+		return;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 
-	gen6_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 9) {
+		gen9_disable_rc6(dev_priv);
+		gen9_disable_rps(dev_priv);
+	} else if (IS_CHERRYVIEW(dev_priv)) {
+		cherryview_disable_rps(dev_priv);
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		valleyview_disable_rps(dev_priv);
+	} else if (INTEL_GEN(dev_priv) >= 6) {
+		gen6_disable_rps(dev_priv);
+	}  else if (IS_IRONLAKE_M(dev_priv)) {
+		ironlake_disable_drps(dev_priv);
+	}
+
+	dev_priv->rps.enabled = false;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+{
+	/* We shouldn't be disabling as we submit, so this should be less
+	 * racy than it appears!
+	 */
+	if (READ_ONCE(dev_priv->rps.enabled))
+		return;
+
+	/* Powersaving is controlled by the host when inside a VM */
+	if (intel_vgpu_active(dev_priv))
+		return;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_CHERRYVIEW(dev_priv)) {
 		cherryview_enable_rps(dev_priv);
 	} else if (IS_VALLEYVIEW(dev_priv)) {
 		valleyview_enable_rps(dev_priv);
-	} else if (INTEL_INFO(dev_priv)->gen >= 9) {
+	} else if (INTEL_GEN(dev_priv) >= 9) {
 		gen9_enable_rc6(dev_priv);
 		gen9_enable_rps(dev_priv);
 		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-			__gen6_update_ring_freq(dev_priv);
+			gen6_update_ring_freq(dev_priv);
 	} else if (IS_BROADWELL(dev_priv)) {
 		gen8_enable_rps(dev_priv);
-		__gen6_update_ring_freq(dev_priv);
-	} else {
+		gen6_update_ring_freq(dev_priv);
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		gen6_enable_rps(dev_priv);
-		__gen6_update_ring_freq(dev_priv);
+		gen6_update_ring_freq(dev_priv);
+	} else if (IS_IRONLAKE_M(dev_priv)) {
+		ironlake_enable_drps(dev_priv);
+		intel_init_emon(dev_priv);
 	}
 
 	WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6855,25 +6777,52 @@
 	WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
 
 	dev_priv->rps.enabled = true;
-
-	gen6_enable_rps_interrupts(dev_priv);
-
 	mutex_unlock(&dev_priv->rps.hw_lock);
+}
 
+static void __intel_autoenable_gt_powersave(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+	struct intel_engine_cs *rcs;
+	struct drm_i915_gem_request *req;
+
+	if (READ_ONCE(dev_priv->rps.enabled))
+		goto out;
+
+	rcs = &dev_priv->engine[RCS];
+	if (rcs->last_context)
+		goto out;
+
+	if (!rcs->init_context)
+		goto out;
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+
+	req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
+	if (IS_ERR(req))
+		goto unlock;
+
+	if (!i915.enable_execlists && i915_switch_context(req) == 0)
+		rcs->init_context(req);
+
+	/* Mark the device busy, calling intel_enable_gt_powersave() */
+	i915_add_request_no_flush(req);
+
+unlock:
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+out:
 	intel_runtime_pm_put(dev_priv);
 }
 
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-	/* Powersaving is controlled by the host when inside a VM */
-	if (intel_vgpu_active(dev_priv))
+	if (READ_ONCE(dev_priv->rps.enabled))
 		return;
 
 	if (IS_IRONLAKE_M(dev_priv)) {
 		ironlake_enable_drps(dev_priv);
-		mutex_lock(&dev_priv->drm.struct_mutex);
 		intel_init_emon(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
 	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
 		/*
 		 * PCU communication is slow and this doesn't need to be
@@ -6887,21 +6836,13 @@
 		 * paths, so the _noresume version is enough (and in case of
 		 * runtime resume it's necessary).
 		 */
-		if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
-					   round_jiffies_up_relative(HZ)))
+		if (queue_delayed_work(dev_priv->wq,
+				       &dev_priv->rps.autoenable_work,
+				       round_jiffies_up_relative(HZ)))
 			intel_runtime_pm_get_noresume(dev_priv);
 	}
 }
 
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_INFO(dev_priv)->gen < 6)
-		return;
-
-	gen6_suspend_rps(dev_priv);
-	dev_priv->rps.enabled = false;
-}
-
 static void ibx_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -8046,7 +7987,7 @@
 	if (!i915_gem_request_completed(req))
 		gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
-	i915_gem_request_unreference(req);
+	i915_gem_request_put(req);
 	kfree(boost);
 }
 
@@ -8064,8 +8005,7 @@
 	if (boost == NULL)
 		return;
 
-	i915_gem_request_reference(req);
-	boost->req = req;
+	boost->req = i915_gem_request_get(req);
 
 	INIT_WORK(&boost->work, __intel_rps_boost_work);
 	queue_work(req->i915->wq, &boost->work);
@@ -8078,11 +8018,9 @@
 	mutex_init(&dev_priv->rps.hw_lock);
 	spin_lock_init(&dev_priv->rps.client_lock);
 
-	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
-			  intel_gen6_powersave_work);
+	INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+			  __intel_autoenable_gt_powersave);
 	INIT_LIST_HEAD(&dev_priv->rps.clients);
-	INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
-	INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
 
 	dev_priv->pm.suspended = false;
 	atomic_set(&dev_priv->pm.wakeref_count, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index cf171b4..108ba1e 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -645,9 +645,8 @@
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_device *dev)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -656,7 +655,7 @@
 	if (!dev_priv->psr.active)
 		return;
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		val = I915_READ(EDP_PSR_CTL);
 
 		WARN_ON(!(val & EDP_PSR_ENABLE));
@@ -691,7 +690,7 @@
 
 /**
  * intel_psr_single_frame_update - Single Frame Update
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
@@ -699,10 +698,9 @@
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 	u32 val;
@@ -711,7 +709,7 @@
 	 * Single frame update is already supported on BDW+ but it requires
 	 * many W/A and it isn't really needed.
 	 */
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
@@ -737,7 +735,7 @@
 
 /**
  * intel_psr_invalidate - Invalidade PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
@@ -747,10 +745,9 @@
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -767,14 +764,14 @@
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
 /**
  * intel_psr_flush - Flush PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -785,10 +782,9 @@
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -806,7 +802,7 @@
 
 	/* By definition flush = invalidate + flush */
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		if (!work_busy(&dev_priv->psr.work.work))
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 5bd6985..08f6fea 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,12 +24,13 @@
 #ifndef _INTEL_RENDERSTATE_H
 #define _INTEL_RENDERSTATE_H
 
-#include "i915_drv.h"
+#include <linux/types.h>
 
-extern const struct intel_renderstate_rodata gen6_null_state;
-extern const struct intel_renderstate_rodata gen7_null_state;
-extern const struct intel_renderstate_rodata gen8_null_state;
-extern const struct intel_renderstate_rodata gen9_null_state;
+struct intel_renderstate_rodata {
+	const u32 *reloc;
+	const u32 *batch;
+	const u32 batch_items;
+};
 
 #define RO_RENDERSTATE(_g)						\
 	const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
@@ -38,4 +39,9 @@
 		.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
 	}
 
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
 #endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d3161b..ed9955d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,57 +47,44 @@
 	return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ring)
 {
-	if (ringbuf->last_retired_head != -1) {
-		ringbuf->head = ringbuf->last_retired_head;
-		ringbuf->last_retired_head = -1;
+	if (ring->last_retired_head != -1) {
+		ring->head = ring->last_retired_head;
+		ring->last_retired_head = -1;
 	}
 
-	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
-					    ringbuf->tail, ringbuf->size);
-}
-
-static void __intel_ring_advance(struct intel_engine_cs *engine)
-{
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	ringbuf->tail &= ringbuf->size - 1;
-	engine->write_tail(engine, ringbuf->tail);
+	ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+					 ring->tail, ring->size);
 }
 
 static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32	invalidate_domains,
-		       u32	flush_domains)
+gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
 	cmd = MI_FLUSH;
-	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
-		cmd |= MI_NO_WRITE_FLUSH;
 
-	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32	invalidate_domains,
-		       u32	flush_domains)
+gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -129,23 +116,20 @@
 	 * are flushed at any MI_FLUSH.
 	 */
 
-	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
-		cmd &= ~MI_NO_WRITE_FLUSH;
-	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+	cmd = MI_FLUSH;
+	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_EXE_FLUSH;
-
-	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
-		cmd |= MI_INVALIDATE_ISP;
+		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+			cmd |= MI_INVALIDATE_ISP;
+	}
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -190,45 +174,46 @@
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ring *ring = req->ring;
+	u32 scratch_addr =
+		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0); /* low dword */
-	intel_ring_emit(engine, 0); /* high dword */
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
+	u32 scratch_addr =
+		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -240,7 +225,7 @@
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		/*
@@ -249,7 +234,7 @@
 		 */
 		flags |= PIPE_CONTROL_CS_STALL;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -266,11 +251,11 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -278,30 +263,31 @@
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
-			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring,
+			PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
+	u32 scratch_addr =
+		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -318,13 +304,13 @@
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -350,11 +336,11 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -363,41 +349,41 @@
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
+	u32 scratch_addr =
+		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
 
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -419,14 +405,7 @@
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *engine,
-			    u32 value)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	I915_WRITE_TAIL(engine, value);
-}
-
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	u64 acthd;
@@ -488,7 +467,7 @@
 		mmio = RING_HWS_PGA(engine->mmio_base);
 	}
 
-	I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
+	I915_WRITE(mmio, engine->status_page.ggtt_offset);
 	POSTING_READ(mmio);
 
 	/*
@@ -519,7 +498,7 @@
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (!IS_GEN2(dev_priv)) {
+	if (INTEL_GEN(dev_priv) > 2) {
 		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
 		if (intel_wait_for_register(dev_priv,
 					    RING_MI_MODE(engine->mmio_base),
@@ -539,9 +518,9 @@
 
 	I915_WRITE_CTL(engine, 0);
 	I915_WRITE_HEAD(engine, 0);
-	engine->write_tail(engine, 0);
+	I915_WRITE_TAIL(engine, 0);
 
-	if (!IS_GEN2(dev_priv)) {
+	if (INTEL_GEN(dev_priv) > 2) {
 		(void)I915_READ_CTL(engine);
 		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 	}
@@ -549,16 +528,10 @@
 	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
-	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
 static int init_ring_common(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct intel_ring *ring = engine->buffer;
 	int ret = 0;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -586,10 +559,12 @@
 		}
 	}
 
-	if (I915_NEED_GFX_HWS(dev_priv))
-		intel_ring_setup_status_page(engine);
-	else
+	if (HWS_NEEDS_PHYSICAL(dev_priv))
 		ring_setup_phys_status_page(engine);
+	else
+		intel_ring_setup_status_page(engine);
+
+	intel_engine_reset_breadcrumbs(engine);
 
 	/* Enforce ordering by reading HEAD register back */
 	I915_READ_HEAD(engine);
@@ -598,40 +573,39 @@
 	 * registers with the above sequence (the readback of the HEAD registers
 	 * also enforces ordering), otherwise the hw might lose the new ring
 	 * register values. */
-	I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
+	I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
 
 	/* WaClearRingBufHeadRegAtInit:ctg,elk */
 	if (I915_READ_HEAD(engine))
 		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
 			  engine->name, I915_READ_HEAD(engine));
-	I915_WRITE_HEAD(engine, 0);
-	(void)I915_READ_HEAD(engine);
+
+	intel_ring_update_space(ring);
+	I915_WRITE_HEAD(engine, ring->head);
+	I915_WRITE_TAIL(engine, ring->tail);
+	(void)I915_READ_TAIL(engine);
 
 	I915_WRITE_CTL(engine,
-			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
-		     I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
-		     (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
+	if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
+				       RING_VALID, RING_VALID,
+				       50)) {
 		DRM_ERROR("%s initialization failed "
-			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
 			  engine->name,
 			  I915_READ_CTL(engine),
 			  I915_READ_CTL(engine) & RING_VALID,
-			  I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+			  I915_READ_HEAD(engine), ring->head,
+			  I915_READ_TAIL(engine), ring->tail,
 			  I915_READ_START(engine),
-			  (unsigned long)i915_gem_obj_ggtt_offset(obj));
+			  i915_ggtt_offset(ring->vma));
 		ret = -EIO;
 		goto out;
 	}
 
-	ringbuf->last_retired_head = -1;
-	ringbuf->head = I915_READ_HEAD(engine);
-	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-	intel_ring_update_space(ringbuf);
-
 	intel_engine_init_hangcheck(engine);
 
 out:
@@ -640,59 +614,25 @@
 	return ret;
 }
 
-void intel_fini_pipe_control(struct intel_engine_cs *engine)
+static void reset_ring_common(struct intel_engine_cs *engine,
+			      struct drm_i915_gem_request *request)
 {
-	if (engine->scratch.obj == NULL)
-		return;
+	struct intel_ring *ring = request->ring;
 
-	i915_gem_object_ggtt_unpin(engine->scratch.obj);
-	drm_gem_object_unreference(&engine->scratch.obj->base);
-	engine->scratch.obj = NULL;
-}
-
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
-{
-	struct drm_i915_gem_object *obj;
-	int ret;
-
-	WARN_ON(engine->scratch.obj);
-
-	obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
-	if (!obj)
-		obj = i915_gem_object_create(&engine->i915->drm, size);
-	if (IS_ERR(obj)) {
-		DRM_ERROR("Failed to allocate scratch page\n");
-		ret = PTR_ERR(obj);
-		goto err;
-	}
-
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
-	if (ret)
-		goto err_unref;
-
-	engine->scratch.obj = obj;
-	engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
-	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-			 engine->name, engine->scratch.gtt_offset);
-	return 0;
-
-err_unref:
-	drm_gem_object_unreference(&engine->scratch.obj->base);
-err:
-	return ret;
+	ring->head = request->postfix;
+	ring->last_retired_head = -1;
 }
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -700,17 +640,16 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(engine, w->reg[i].addr);
-		intel_ring_emit(engine, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1022,7 +961,7 @@
 		 * Only consider slices where one, and only one, subslice has 7
 		 * EUs
 		 */
-		if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
+		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
 			continue;
 
 		/*
@@ -1031,7 +970,7 @@
 		 *
 		 * ->    0 <= ss <= 3;
 		 */
-		ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
 		vals[i] = 3 - ss;
 	}
 
@@ -1329,193 +1268,196 @@
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (dev_priv->semaphore_obj) {
-		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
-		drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
-		dev_priv->semaphore_obj = NULL;
-	}
-
-	intel_fini_pipe_control(engine);
+	i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_rcs_signal(struct drm_i915_gem_request *req)
 {
-#define MBOX_UPDATE_DWORDS 8
-	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	num_rings = INTEL_INFO(dev_priv)->num_rings;
+	ret = intel_ring_begin(req, (num_rings-1) * 8);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
-					   PIPE_CONTROL_QW_WRITE |
-					   PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->seqno);
-		intel_ring_emit(signaller, 0);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring,
+				PIPE_CONTROL_GLOBAL_GTT_IVB |
+				PIPE_CONTROL_QW_WRITE |
+				PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(ring, lower_32_bits(gtt_offset));
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(ring, 0);
 	}
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_xcs_signal(struct drm_i915_gem_request *req)
 {
-#define MBOX_UPDATE_DWORDS 6
-	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	num_rings = INTEL_INFO(dev_priv)->num_rings;
+	ret = intel_ring_begin(req, (num_rings-1) * 6);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
-					   MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
-					   MI_FLUSH_DW_USE_GTT);
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->seqno);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring,
+				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+		intel_ring_emit(ring,
+				lower_32_bits(gtt_offset) |
+				MI_FLUSH_DW_USE_GTT);
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(ring, 0);
 	}
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req,
-		       unsigned int num_dwords)
+static int gen6_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
-	struct intel_engine_cs *useless;
-	enum intel_engine_id id;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
+	struct intel_engine_cs *engine;
 	int ret, num_rings;
 
-#define MBOX_UPDATE_DWORDS 3
-	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	num_rings = INTEL_INFO(dev_priv)->num_rings;
+	ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
 	if (ret)
 		return ret;
 
-	for_each_engine_id(useless, dev_priv, id) {
-		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
+	for_each_engine(engine, dev_priv) {
+		i915_reg_t mbox_reg;
 
+		if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
+			continue;
+
+		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-			intel_ring_emit_reg(signaller, mbox_reg);
-			intel_ring_emit(signaller, signaller_req->seqno);
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit_reg(ring, mbox_reg);
+			intel_ring_emit(ring, req->fence.seqno);
 		}
 	}
 
 	/* If num_dwords was rounded, make sure the tail pointer is correct */
 	if (num_rings % 2 == 0)
-		intel_ring_emit(signaller, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = request->i915;
+
+	I915_WRITE_TAIL(request->engine,
+			intel_ring_offset(request->ring, request->tail));
+}
+
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
+{
+	struct intel_ring *ring = req->ring;
+	int ret;
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	req->tail = ring->tail;
 
 	return 0;
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 4);
-	else
-		ret = intel_ring_begin(req, 4);
-
+	ret = req->engine->semaphore.signal(req);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
-
-	return 0;
+	return i9xx_emit_request(req);
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 8);
-	else
-		ret = intel_ring_begin(req, 8);
+	if (engine->semaphore.signal) {
+		ret = engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_ring_begin(req, 8);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+			       PIPE_CONTROL_CS_STALL |
+			       PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	/* We're thrashing one dword of HWS. */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	intel_ring_emit(engine, MI_NOOP);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	req->tail = ring->tail;
 
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
-					      u32 seqno)
-{
-	return dev_priv->last_seqno < seqno;
-}
-
 /**
  * intel_ring_sync - sync the waiter to the signaller on seqno
  *
@@ -1525,82 +1467,71 @@
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen8_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
-	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
+	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
-				MI_SEMAPHORE_GLOBAL_GTT |
-				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, seqno);
-	intel_ring_emit(waiter, lower_32_bits(offset));
-	intel_ring_emit(waiter, upper_32_bits(offset));
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_GLOBAL_GTT |
+			MI_SEMAPHORE_SAD_GTE_SDD);
+	intel_ring_emit(ring, signal->fence.seqno);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_advance(ring);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = waiter_req->ctx->ppgtt;
-	if (ppgtt && waiter_req->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+	ppgtt = req->ctx->ppgtt;
+	if (ppgtt && req->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen6_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
+	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
 	int ret;
 
+	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
+
+	ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, dw1 | wait_mbox);
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	seqno -= 1;
-
-	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
-
-	ret = intel_ring_begin(waiter_req, 4);
-	if (ret)
-		return ret;
-
-	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
-		intel_ring_emit(waiter, dw1 | wait_mbox);
-		intel_ring_emit(waiter, seqno);
-		intel_ring_emit(waiter, 0);
-		intel_ring_emit(waiter, MI_NOOP);
-	} else {
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-	}
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring, signal->fence.seqno - 1);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static void
-gen5_seqno_barrier(struct intel_engine_cs *ring)
+gen5_seqno_barrier(struct intel_engine_cs *engine)
 {
 	/* MI_STORE are internally buffered by the GPU and not flushed
 	 * either by MI_FLUSH or SyncFlush or any other combination of
@@ -1693,40 +1624,18 @@
 }
 
 static int
-bsd_ring_flush(struct drm_i915_gem_request *req,
-	       u32     invalidate_domains,
-	       u32     flush_domains)
+bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_FLUSH);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
-	return 0;
-}
-
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
-
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 	return 0;
 }
 
@@ -1788,24 +1697,24 @@
 }
 
 static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 length,
-			 unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 length,
+		   unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1815,12 +1724,12 @@
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 cs_offset = engine->scratch.gtt_offset;
+	struct intel_ring *ring = req->ring;
+	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1828,13 +1737,13 @@
 		return ret;
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(engine, cs_offset);
-	intel_ring_emit(engine, 0xdeadbeef);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(ring, cs_offset);
+	intel_ring_emit(ring, 0xdeadbeef);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
@@ -1848,17 +1757,17 @@
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(ring,
 				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(engine, cs_offset);
-		intel_ring_emit(engine, 4096);
-		intel_ring_emit(engine, offset);
+		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(ring, cs_offset);
+		intel_ring_emit(ring, 4096);
+		intel_ring_emit(ring, offset);
 
-		intel_ring_emit(engine, MI_FLUSH);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_FLUSH);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		/* ... and execute it. */
 		offset = cs_offset;
@@ -1868,30 +1777,30 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1909,79 +1818,79 @@
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	obj = engine->status_page.obj;
-	if (obj == NULL)
+	vma = fetch_and_zero(&engine->status_page.vma);
+	if (!vma)
 		return;
 
-	kunmap(sg_page(obj->pages->sgl));
-	i915_gem_object_ggtt_unpin(obj);
-	drm_gem_object_unreference(&obj->base);
-	engine->status_page.obj = NULL;
+	i915_vma_unpin(vma);
+	i915_gem_object_unpin_map(vma->obj);
+	i915_vma_put(vma);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_object *obj = engine->status_page.obj;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	unsigned int flags;
+	int ret;
 
-	if (obj == NULL) {
-		unsigned flags;
-		int ret;
-
-		obj = i915_gem_object_create(&engine->i915->drm, 4096);
-		if (IS_ERR(obj)) {
-			DRM_ERROR("Failed to allocate status page\n");
-			return PTR_ERR(obj);
-		}
-
-		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-		if (ret)
-			goto err_unref;
-
-		flags = 0;
-		if (!HAS_LLC(engine->i915))
-			/* On g33, we cannot place HWS above 256MiB, so
-			 * restrict its pinning to the low mappable arena.
-			 * Though this restriction is not documented for
-			 * gen4, gen5, or byt, they also behave similarly
-			 * and hang if the HWS is placed at the top of the
-			 * GTT. To generalise, it appears that all !llc
-			 * platforms have issues with us placing the HWS
-			 * above the mappable region (even though we never
-			 * actualy map it).
-			 */
-			flags |= PIN_MAPPABLE;
-		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
-		if (ret) {
-err_unref:
-			drm_gem_object_unreference(&obj->base);
-			return ret;
-		}
-
-		engine->status_page.obj = obj;
+	obj = i915_gem_object_create(&engine->i915->drm, 4096);
+	if (IS_ERR(obj)) {
+		DRM_ERROR("Failed to allocate status page\n");
+		return PTR_ERR(obj);
 	}
 
-	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
-	engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+	if (ret)
+		goto err;
+
+	vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err;
+	}
+
+	flags = PIN_GLOBAL;
+	if (!HAS_LLC(engine->i915))
+		/* On g33, we cannot place HWS above 256MiB, so
+		 * restrict its pinning to the low mappable arena.
+		 * Though this restriction is not documented for
+		 * gen4, gen5, or byt, they also behave similarly
+		 * and hang if the HWS is placed at the top of the
+		 * GTT. To generalise, it appears that all !llc
+		 * platforms have issues with us placing the HWS
+		 * above the mappable region (even though we never
+		 * actualy map it).
+		 */
+		flags |= PIN_MAPPABLE;
+	ret = i915_vma_pin(vma, 0, 4096, flags);
+	if (ret)
+		goto err;
+
+	engine->status_page.vma = vma;
+	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
+	engine->status_page.page_addr =
+		i915_gem_object_pin_map(obj, I915_MAP_WB);
 
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-			engine->name, engine->status_page.gfx_addr);
-
+			 engine->name, i915_ggtt_offset(vma));
 	return 0;
+
+err:
+	i915_gem_object_put(obj);
+	return ret;
 }
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (!dev_priv->status_page_dmah) {
-		dev_priv->status_page_dmah =
-			drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
-		if (!dev_priv->status_page_dmah)
-			return -ENOMEM;
-	}
+	dev_priv->status_page_dmah =
+		drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
+	if (!dev_priv->status_page_dmah)
+		return -ENOMEM;
 
 	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
 	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
@@ -1989,115 +1898,105 @@
 	return 0;
 }
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+int intel_ring_pin(struct intel_ring *ring)
 {
-	GEM_BUG_ON(ringbuf->vma == NULL);
-	GEM_BUG_ON(ringbuf->virtual_start == NULL);
-
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-		i915_gem_object_unpin_map(ringbuf->obj);
-	else
-		i915_vma_unpin_iomap(ringbuf->vma);
-	ringbuf->virtual_start = NULL;
-
-	i915_gem_object_ggtt_unpin(ringbuf->obj);
-	ringbuf->vma = NULL;
-}
-
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf)
-{
-	struct drm_i915_gem_object *obj = ringbuf->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-	unsigned flags = PIN_OFFSET_BIAS | 4096;
+	unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
+	enum i915_map_type map;
+	struct i915_vma *vma = ring->vma;
 	void *addr;
 	int ret;
 
-	if (HAS_LLC(dev_priv) && !obj->stolen) {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
-		if (ret)
+	GEM_BUG_ON(ring->vaddr);
+
+	map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
+
+	if (vma->obj->stolen)
+		flags |= PIN_MAPPABLE;
+
+	if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+		if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
+			ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+		else
+			ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
+		if (unlikely(ret))
 			return ret;
-
-		ret = i915_gem_object_set_to_cpu_domain(obj, true);
-		if (ret)
-			goto err_unpin;
-
-		addr = i915_gem_object_pin_map(obj);
-		if (IS_ERR(addr)) {
-			ret = PTR_ERR(addr);
-			goto err_unpin;
-		}
-	} else {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-					    flags | PIN_MAPPABLE);
-		if (ret)
-			return ret;
-
-		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret)
-			goto err_unpin;
-
-		/* Access through the GTT requires the device to be awake. */
-		assert_rpm_wakelock_held(dev_priv);
-
-		addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
-		if (IS_ERR(addr)) {
-			ret = PTR_ERR(addr);
-			goto err_unpin;
-		}
 	}
 
-	ringbuf->virtual_start = addr;
-	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+	ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+	if (unlikely(ret))
+		return ret;
+
+	if (i915_vma_is_map_and_fenceable(vma))
+		addr = (void __force *)i915_vma_pin_iomap(vma);
+	else
+		addr = i915_gem_object_pin_map(vma->obj, map);
+	if (IS_ERR(addr))
+		goto err;
+
+	ring->vaddr = addr;
 	return 0;
 
-err_unpin:
-	i915_gem_object_ggtt_unpin(obj);
-	return ret;
+err:
+	i915_vma_unpin(vma);
+	return PTR_ERR(addr);
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_ring_unpin(struct intel_ring *ring)
 {
-	drm_gem_object_unreference(&ringbuf->obj->base);
-	ringbuf->obj = NULL;
+	GEM_BUG_ON(!ring->vma);
+	GEM_BUG_ON(!ring->vaddr);
+
+	if (i915_vma_is_map_and_fenceable(ring->vma))
+		i915_vma_unpin_iomap(ring->vma);
+	else
+		i915_gem_object_unpin_map(ring->vma->obj);
+	ring->vaddr = NULL;
+
+	i915_vma_unpin(ring->vma);
 }
 
-static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ringbuffer *ringbuf)
+static struct i915_vma *
+intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	obj = NULL;
-	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
-	if (obj == NULL)
-		obj = i915_gem_object_create(dev, ringbuf->size);
+	obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
+	if (!obj)
+		obj = i915_gem_object_create(&dev_priv->drm, size);
 	if (IS_ERR(obj))
-		return PTR_ERR(obj);
+		return ERR_CAST(obj);
 
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
 
-	ringbuf->obj = obj;
+	vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+	if (IS_ERR(vma))
+		goto err;
 
-	return 0;
+	return vma;
+
+err:
+	i915_gem_object_put(obj);
+	return vma;
 }
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
-	struct intel_ringbuffer *ring;
-	int ret;
+	struct intel_ring *ring;
+	struct i915_vma *vma;
+
+	GEM_BUG_ON(!is_power_of_2(size));
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-	if (ring == NULL) {
-		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-				 engine->name);
+	if (!ring)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	ring->engine = engine;
-	list_add(&ring->link, &engine->buffers);
+
+	INIT_LIST_HEAD(&ring->request_list);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -2111,23 +2010,20 @@
 	ring->last_retired_head = -1;
 	intel_ring_update_space(ring);
 
-	ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
-				 engine->name, ret);
-		list_del(&ring->link);
+	vma = intel_ring_create_vma(engine->i915, size);
+	if (IS_ERR(vma)) {
 		kfree(ring);
-		return ERR_PTR(ret);
+		return ERR_CAST(vma);
 	}
+	ring->vma = vma;
 
 	return ring;
 }
 
 void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
 {
-	intel_destroy_ringbuffer_obj(ring);
-	list_del(&ring->link);
+	i915_vma_put(ring->vma);
 	kfree(ring);
 }
 
@@ -2143,7 +2039,12 @@
 		return 0;
 
 	if (ce->state) {
-		ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+		ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
+		if (ret)
+			goto error;
+
+		ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
+				   PIN_GLOBAL | PIN_HIGH);
 		if (ret)
 			goto error;
 	}
@@ -2158,7 +2059,7 @@
 	if (ctx == ctx->i915->kernel_context)
 		ce->initialised = true;
 
-	i915_gem_context_reference(ctx);
+	i915_gem_context_get(ctx);
 	return 0;
 
 error:
@@ -2177,30 +2078,25 @@
 		return;
 
 	if (ce->state)
-		i915_gem_object_ggtt_unpin(ce->state);
+		i915_vma_unpin(ce->state);
 
-	i915_gem_context_unreference(ctx);
+	i915_gem_context_put(ctx);
 }
 
-static int intel_init_ring_buffer(struct drm_device *dev,
-				  struct intel_engine_cs *engine)
+static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ringbuf;
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(engine->buffer);
 
-	engine->i915 = dev_priv;
-	INIT_LIST_HEAD(&engine->active_list);
-	INIT_LIST_HEAD(&engine->request_list);
-	INIT_LIST_HEAD(&engine->execlist_queue);
-	INIT_LIST_HEAD(&engine->buffers);
-	i915_gem_batch_pool_init(dev, &engine->batch_pool);
+	intel_engine_setup_common(engine);
+
 	memset(engine->semaphore.sync_seqno, 0,
 	       sizeof(engine->semaphore.sync_seqno));
 
-	ret = intel_engine_init_breadcrumbs(engine);
+	ret = intel_engine_init_common(engine);
 	if (ret)
 		goto error;
 
@@ -2215,44 +2111,38 @@
 	if (ret)
 		goto error;
 
-	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
 		goto error;
 	}
-	engine->buffer = ringbuf;
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
-		ret = init_status_page(engine);
-		if (ret)
-			goto error;
-	} else {
+	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
 		WARN_ON(engine->id != RCS);
 		ret = init_phys_status_page(engine);
 		if (ret)
 			goto error;
+	} else {
+		ret = init_status_page(engine);
+		if (ret)
+			goto error;
 	}
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+	ret = intel_ring_pin(ring);
 	if (ret) {
-		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-				engine->name, ret);
-		intel_destroy_ringbuffer_obj(ringbuf);
+		intel_ring_free(ring);
 		goto error;
 	}
-
-	ret = i915_cmd_parser_init_ring(engine);
-	if (ret)
-		goto error;
+	engine->buffer = ring;
 
 	return 0;
 
 error:
-	intel_cleanup_engine(engine);
+	intel_engine_cleanup(engine);
 	return ret;
 }
 
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
@@ -2262,49 +2152,39 @@
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
-		intel_stop_engine(engine);
-		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+		WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+			(I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ringbuffer_obj(engine->buffer);
-		intel_ringbuffer_free(engine->buffer);
+		intel_ring_unpin(engine->buffer);
+		intel_ring_free(engine->buffer);
 		engine->buffer = NULL;
 	}
 
 	if (engine->cleanup)
 		engine->cleanup(engine);
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
-		cleanup_status_page(engine);
-	} else {
+	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
 		WARN_ON(engine->id != RCS);
 		cleanup_phys_status_page(engine);
+	} else {
+		cleanup_status_page(engine);
 	}
 
-	i915_cmd_parser_fini_ring(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
-	intel_engine_fini_breadcrumbs(engine);
+	intel_engine_cleanup_common(engine);
 
 	intel_ring_context_unpin(dev_priv->kernel_context, engine);
 
 	engine->i915 = NULL;
 }
 
-int intel_engine_idle(struct intel_engine_cs *engine)
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_gem_request *req;
+	struct intel_engine_cs *engine;
 
-	/* Wait upon the last request to be completed */
-	if (list_empty(&engine->request_list))
-		return 0;
-
-	req = list_entry(engine->request_list.prev,
-			 struct drm_i915_gem_request,
-			 list);
-
-	/* Make sure we do not trigger any retires */
-	return __i915_wait_request(req,
-				   req->i915->mm.interruptible,
-				   NULL, NULL);
+	for_each_engine(engine, dev_priv) {
+		engine->buffer->head = engine->buffer->tail;
+		engine->buffer->last_retired_head = -1;
+	}
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2317,7 +2197,7 @@
 	 */
 	request->reserved_space += LEGACY_REQUEST_SIZE;
 
-	request->ringbuf = request->engine->buffer;
+	request->ring = request->engine->buffer;
 
 	ret = intel_ring_begin(request, 0);
 	if (ret)
@@ -2329,12 +2209,12 @@
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_gem_request *target;
+	int ret;
 
-	intel_ring_update_space(ringbuf);
-	if (ringbuf->space >= bytes)
+	intel_ring_update_space(ring);
+	if (ring->space >= bytes)
 		return 0;
 
 	/*
@@ -2348,35 +2228,37 @@
 	 */
 	GEM_BUG_ON(!req->reserved_space);
 
-	list_for_each_entry(target, &engine->request_list, list) {
+	list_for_each_entry(target, &ring->request_list, ring_link) {
 		unsigned space;
 
-		/*
-		 * The request queue is per-engine, so can contain requests
-		 * from multiple ringbuffers. Here, we must ignore any that
-		 * aren't from the ringbuffer we're considering.
-		 */
-		if (target->ringbuf != ringbuf)
-			continue;
-
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ringbuf->tail,
-					   ringbuf->size);
+		space = __intel_ring_space(target->postfix, ring->tail,
+					   ring->size);
 		if (space >= bytes)
 			break;
 	}
 
-	if (WARN_ON(&target->list == &engine->request_list))
+	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	return i915_wait_request(target);
+	ret = i915_wait_request(target,
+				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				NULL, NO_WAITBOOST);
+	if (ret)
+		return ret;
+
+	i915_gem_request_retire_upto(target);
+
+	intel_ring_update_space(ring);
+	GEM_BUG_ON(ring->space < bytes);
+	return 0;
 }
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int remain_actual = ringbuf->size - ringbuf->tail;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	struct intel_ring *ring = req->ring;
+	int remain_actual = ring->size - ring->tail;
+	int remain_usable = ring->effective_size - ring->tail;
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
@@ -2403,37 +2285,33 @@
 		wait_bytes = total_bytes;
 	}
 
-	if (wait_bytes > ringbuf->space) {
+	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
-
-		intel_ring_update_space(ringbuf);
-		if (unlikely(ringbuf->space < wait_bytes))
-			return -EAGAIN;
 	}
 
 	if (unlikely(need_wrap)) {
-		GEM_BUG_ON(remain_actual > ringbuf->space);
-		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+		GEM_BUG_ON(remain_actual > ring->space);
+		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
 
 		/* Fill the tail with MI_NOOP */
-		memset(ringbuf->virtual_start + ringbuf->tail,
-		       0, remain_actual);
-		ringbuf->tail = 0;
-		ringbuf->space -= remain_actual;
+		memset(ring->vaddr + ring->tail, 0, remain_actual);
+		ring->tail = 0;
+		ring->space -= remain_actual;
 	}
 
-	ringbuf->space -= bytes;
-	GEM_BUG_ON(ringbuf->space < 0);
+	ring->space -= bytes;
+	GEM_BUG_ON(ring->space < 0);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_ring *ring = req->ring;
+	int num_dwords =
+		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2445,61 +2323,16 @@
 		return ret;
 
 	while (num_dwords--)
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
-	 * so long as the semaphore value in the register/page is greater
-	 * than the sync value), so whenever we reset the seqno,
-	 * so long as we reset the tracking semaphore value to 0, it will
-	 * always be before the next request's seqno. If we don't reset
-	 * the semaphore value, then when the seqno moves backwards all
-	 * future waits will complete instantly (causing rendering corruption).
-	 */
-	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
-		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
-		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-		if (HAS_VEBOX(dev_priv))
-			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
-	}
-	if (dev_priv->semaphore_obj) {
-		struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
-		struct page *page = i915_gem_object_get_dirty_page(obj, 0);
-		void *semaphores = kmap(page);
-		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
-		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
-		kunmap(page);
-	}
-	memset(engine->semaphore.sync_seqno, 0,
-	       sizeof(engine->semaphore.sync_seqno));
-
-	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-	if (engine->irq_seqno_barrier)
-		engine->irq_seqno_barrier(engine);
-	engine->last_submitted_seqno = seqno;
-
-	engine->hangcheck.seqno = seqno;
-
-	/* After manually advancing the seqno, fake the interrupt in case
-	 * there are any waiters for that seqno.
-	 */
-	rcu_read_lock();
-	intel_engine_wakeup(engine);
-	rcu_read_unlock();
-}
-
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
-				     u32 value)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *dev_priv = request->i915;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -2523,8 +2356,7 @@
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
-	POSTING_READ_FW(RING_TAIL(engine->mmio_base));
+	i9xx_submit_request(request);
 
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
@@ -2535,10 +2367,9 @@
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
-			       u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2563,30 +2394,29 @@
 	 * operation is complete. This bit is only valid when the
 	 * Post-Sync Operation field is a value of 1h or 3h."
 	 */
-	if (invalidate & I915_GEM_GPU_DOMAINS)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 	return 0;
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	bool ppgtt = USES_PPGTT(engine->dev) &&
+	struct intel_ring *ring = req->ring;
+	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2595,71 +2425,70 @@
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(engine, lower_32_bits(offset));
-	intel_ring_emit(engine, upper_32_bits(offset));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			     u64 offset, u32 len,
-			     unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+		  u64 offset, u32 len,
+		  unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct drm_i915_gem_request *req,
-			   u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2684,19 +2513,19 @@
 	 * operation is complete. This bit is only valid when the
 	 * Post-Sync Operation field is a value of 1h or 3h."
 	 */
-	if (invalidate & I915_GEM_DOMAIN_RENDER)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2707,38 +2536,39 @@
 	struct drm_i915_gem_object *obj;
 	int ret, i;
 
-	if (!i915_semaphore_is_enabled(dev_priv))
+	if (!i915.semaphores)
 		return;
 
-	if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+	if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
+		struct i915_vma *vma;
+
 		obj = i915_gem_object_create(&dev_priv->drm, 4096);
-		if (IS_ERR(obj)) {
-			DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-			i915.semaphores = 0;
-		} else {
-			i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-			ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-			if (ret != 0) {
-				drm_gem_object_unreference(&obj->base);
-				DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-				i915.semaphores = 0;
-			} else {
-				dev_priv->semaphore_obj = obj;
-			}
-		}
+		if (IS_ERR(obj))
+			goto err;
+
+		vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+		if (IS_ERR(vma))
+			goto err_obj;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
+		if (ret)
+			goto err_obj;
+
+		ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+		if (ret)
+			goto err_obj;
+
+		dev_priv->semaphore = vma;
 	}
 
-	if (!i915_semaphore_is_enabled(dev_priv))
-		return;
-
 	if (INTEL_GEN(dev_priv) >= 8) {
-		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+		u32 offset = i915_ggtt_offset(dev_priv->semaphore);
 
-		engine->semaphore.sync_to = gen8_ring_sync;
+		engine->semaphore.sync_to = gen8_ring_sync_to;
 		engine->semaphore.signal = gen8_xcs_signal;
 
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			u64 ring_offset;
+			u32 ring_offset;
 
 			if (i != engine->id)
 				ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
@@ -2748,7 +2578,7 @@
 			engine->semaphore.signal_ggtt[i] = ring_offset;
 		}
 	} else if (INTEL_GEN(dev_priv) >= 6) {
-		engine->semaphore.sync_to = gen6_ring_sync;
+		engine->semaphore.sync_to = gen6_ring_sync_to;
 		engine->semaphore.signal = gen6_signal;
 
 		/*
@@ -2758,52 +2588,62 @@
 		 * initialized as INVALID.  Gen8 will initialize the
 		 * sema between VCS2 and RCS later.
 		 */
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
+		for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
 			static const struct {
 				u32 wait_mbox;
 				i915_reg_t mbox_reg;
-			} sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
-				[RCS] = {
-					[VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
-					[BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
-					[VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+			} sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
+				[RCS_HW] = {
+					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
 				},
-				[VCS] = {
-					[RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
-					[BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
-					[VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+				[VCS_HW] = {
+					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
 				},
-				[BCS] = {
-					[RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
-					[VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
-					[VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+				[BCS_HW] = {
+					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+					[VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
 				},
-				[VECS] = {
-					[RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
-					[VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
-					[BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+				[VECS_HW] = {
+					[RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+					[VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+					[BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
 				},
 			};
 			u32 wait_mbox;
 			i915_reg_t mbox_reg;
 
-			if (i == engine->id || i == VCS2) {
+			if (i == engine->hw_id) {
 				wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
 				mbox_reg = GEN6_NOSYNC;
 			} else {
-				wait_mbox = sem_data[engine->id][i].wait_mbox;
-				mbox_reg = sem_data[engine->id][i].mbox_reg;
+				wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
+				mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
 			}
 
 			engine->semaphore.mbox.wait[i] = wait_mbox;
 			engine->semaphore.mbox.signal[i] = mbox_reg;
 		}
 	}
+
+	return;
+
+err_obj:
+	i915_gem_object_put(obj);
+err:
+	DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
+	i915.semaphores = 0;
 }
 
 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 				struct intel_engine_cs *engine)
 {
+	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
+
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->irq_enable = gen8_irq_enable;
 		engine->irq_disable = gen8_irq_disable;
@@ -2828,83 +2668,76 @@
 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
-	engine->init_hw = init_ring_common;
-	engine->write_tail = ring_write_tail;
-
-	engine->add_request = i9xx_add_request;
-	if (INTEL_GEN(dev_priv) >= 6)
-		engine->add_request = gen6_add_request;
-
-	if (INTEL_GEN(dev_priv) >= 8)
-		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-	else if (INTEL_GEN(dev_priv) >= 6)
-		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-	else if (INTEL_GEN(dev_priv) >= 4)
-		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
-	else if (IS_I830(dev_priv) || IS_845G(dev_priv))
-		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
-	else
-		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
-
 	intel_ring_init_irq(dev_priv, engine);
 	intel_ring_init_semaphores(dev_priv, engine);
+
+	engine->init_hw = init_ring_common;
+	engine->reset_hw = reset_ring_common;
+
+	engine->emit_request = i9xx_emit_request;
+	if (i915.semaphores)
+		engine->emit_request = gen6_sema_emit_request;
+	engine->submit_request = i9xx_submit_request;
+
+	if (INTEL_GEN(dev_priv) >= 8)
+		engine->emit_bb_start = gen8_emit_bb_start;
+	else if (INTEL_GEN(dev_priv) >= 6)
+		engine->emit_bb_start = gen6_emit_bb_start;
+	else if (INTEL_GEN(dev_priv) >= 4)
+		engine->emit_bb_start = i965_emit_bb_start;
+	else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+		engine->emit_bb_start = i830_emit_bb_start;
+	else
+		engine->emit_bb_start = i915_emit_bb_start;
 }
 
-int intel_init_render_ring_buffer(struct drm_device *dev)
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	engine->name = "render ring";
-	engine->id = RCS;
-	engine->exec_id = I915_EXEC_RENDER;
-	engine->hw_id = 0;
-	engine->mmio_base = RENDER_RING_BASE;
-
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 	if (HAS_L3_DPF(dev_priv))
 		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen8_render_add_request;
-		engine->flush = gen8_render_ring_flush;
-		if (i915_semaphore_is_enabled(dev_priv))
+		engine->emit_request = gen8_render_emit_request;
+		engine->emit_flush = gen8_render_ring_flush;
+		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->flush = gen7_render_ring_flush;
+		engine->emit_flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
-			engine->flush = gen6_render_ring_flush;
+			engine->emit_flush = gen6_render_ring_flush;
 	} else if (IS_GEN5(dev_priv)) {
-		engine->flush = gen4_render_ring_flush;
+		engine->emit_flush = gen4_render_ring_flush;
 	} else {
 		if (INTEL_GEN(dev_priv) < 4)
-			engine->flush = gen2_render_ring_flush;
+			engine->emit_flush = gen2_render_ring_flush;
 		else
-			engine->flush = gen4_render_ring_flush;
+			engine->emit_flush = gen4_render_ring_flush;
 		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 
 	if (IS_HASWELL(dev_priv))
-		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		engine->emit_bb_start = hsw_emit_bb_start;
 
 	engine->init_hw = init_render_ring;
 	engine->cleanup = render_ring_cleanup;
 
-	ret = intel_init_ring_buffer(dev, engine);
+	ret = intel_init_ring_buffer(engine);
 	if (ret)
 		return ret;
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		ret = intel_init_pipe_control(engine, 4096);
+		ret = intel_engine_create_scratch(engine, 4096);
 		if (ret)
 			return ret;
 	} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
-		ret = intel_init_pipe_control(engine, I830_WA_SIZE);
+		ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
 		if (ret)
 			return ret;
 	}
@@ -2912,166 +2745,71 @@
 	return 0;
 }
 
-int intel_init_bsd_ring_buffer(struct drm_device *dev)
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
-	engine->name = "bsd ring";
-	engine->id = VCS;
-	engine->exec_id = I915_EXEC_BSD;
-	engine->hw_id = 1;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		engine->mmio_base = GEN6_BSD_RING_BASE;
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
-			engine->write_tail = gen6_bsd_ring_write_tail;
-		engine->flush = gen6_bsd_ring_flush;
-		if (INTEL_GEN(dev_priv) >= 8)
-			engine->irq_enable_mask =
-				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-		else
+			engine->submit_request = gen6_bsd_submit_request;
+		engine->emit_flush = gen6_bsd_ring_flush;
+		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 	} else {
 		engine->mmio_base = BSD_RING_BASE;
-		engine->flush = bsd_ring_flush;
+		engine->emit_flush = bsd_ring_flush;
 		if (IS_GEN5(dev_priv))
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 		else
 			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
 	}
 
-	return intel_init_ring_buffer(dev, engine);
+	return intel_init_ring_buffer(engine);
 }
 
 /**
  * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
  */
-int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
-
-	engine->name = "bsd2 ring";
-	engine->id = VCS2;
-	engine->exec_id = I915_EXEC_BSD;
-	engine->hw_id = 4;
-	engine->mmio_base = GEN8_BSD2_RING_BASE;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_bsd_ring_flush;
-	engine->irq_enable_mask =
-			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+	engine->emit_flush = gen6_bsd_ring_flush;
 
-	return intel_init_ring_buffer(dev, engine);
+	return intel_init_ring_buffer(engine);
 }
 
-int intel_init_blt_ring_buffer(struct drm_device *dev)
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine = &dev_priv->engine[BCS];
-
-	engine->name = "blitter ring";
-	engine->id = BCS;
-	engine->exec_id = I915_EXEC_BLT;
-	engine->hw_id = 2;
-	engine->mmio_base = BLT_RING_BASE;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
-	if (INTEL_GEN(dev_priv) >= 8)
-		engine->irq_enable_mask =
-			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-	else
+	engine->emit_flush = gen6_ring_flush;
+	if (INTEL_GEN(dev_priv) < 8)
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
-	return intel_init_ring_buffer(dev, engine);
+	return intel_init_ring_buffer(engine);
 }
 
-int intel_init_vebox_ring_buffer(struct drm_device *dev)
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_engine_cs *engine = &dev_priv->engine[VECS];
-
-	engine->name = "video enhancement ring";
-	engine->id = VECS;
-	engine->exec_id = I915_EXEC_VEBOX;
-	engine->hw_id = 3;
-	engine->mmio_base = VEBOX_RING_BASE;
+	struct drm_i915_private *dev_priv = engine->i915;
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 
-	if (INTEL_GEN(dev_priv) >= 8) {
-		engine->irq_enable_mask =
-			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-	} else {
+	if (INTEL_GEN(dev_priv) < 8) {
 		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
 		engine->irq_enable = hsw_vebox_irq_enable;
 		engine->irq_disable = hsw_vebox_irq_disable;
 	}
 
-	return intel_init_ring_buffer(dev, engine);
-}
-
-int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-void
-intel_stop_engine(struct intel_engine_cs *engine)
-{
-	int ret;
-
-	if (!intel_engine_initialized(engine))
-		return;
-
-	ret = intel_engine_idle(engine);
-	if (ret)
-		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-			  engine->name, ret);
-
-	stop_ring(engine);
+	return intel_init_ring_buffer(engine);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 12cb7ed..ec0b4a0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,6 +3,7 @@
 
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
+#include "i915_gem_request.h"
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -25,29 +26,29 @@
  */
 #define I915_RING_FREE_SPACE 64
 
-struct  intel_hw_status_page {
-	u32		*page_addr;
-	unsigned int	gfx_addr;
-	struct		drm_i915_gem_object *obj;
+struct intel_hw_status_page {
+	struct i915_vma *vma;
+	u32 *page_addr;
+	u32 ggtt_offset;
 };
 
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
+#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
 
-#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
+#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
 
-#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
+#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
 
-#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
+#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
 
-#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
+#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
 
-#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
-#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
+#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
 
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -56,13 +57,13 @@
 #define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
 	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
 #define GEN8_SIGNAL_OFFSET(__ring, to)			     \
-	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	(dev_priv->semaphore->node.start + \
 	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
 #define GEN8_WAIT_OFFSET(__ring, from)			     \
-	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+	(dev_priv->semaphore->node.start + \
 	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
@@ -72,23 +73,22 @@
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
 	u64 acthd;
-	unsigned long user_interrupts;
 	u32 seqno;
 	int score;
-	enum intel_ring_hangcheck_action action;
+	enum intel_engine_hangcheck_action action;
 	int deadlock;
 	u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
-struct intel_ringbuffer {
-	struct drm_i915_gem_object *obj;
-	void __iomem *virtual_start;
+struct intel_ring {
 	struct i915_vma *vma;
+	void *vaddr;
 
 	struct intel_engine_cs *engine;
-	struct list_head link;
+
+	struct list_head request_list;
 
 	u32 head;
 	u32 tail;
@@ -121,12 +121,12 @@
  *    an option for future use.
  *  size: size of the batch in DWORDS
  */
-struct  i915_ctx_workarounds {
+struct i915_ctx_workarounds {
 	struct i915_wa_ctx_bb {
 		u32 offset;
 		u32 size;
 	} indirect_ctx, per_ctx;
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 };
 
 struct drm_i915_gem_request;
@@ -144,11 +144,18 @@
 #define I915_NUM_ENGINES 5
 #define _VCS(n) (VCS + (n))
 	unsigned int exec_id;
-	unsigned int hw_id;
-	unsigned int guc_id; /* XXX same as hw_id? */
+	enum intel_engine_hw_id {
+		RCS_HW = 0,
+		VCS_HW,
+		BCS_HW,
+		VECS_HW,
+		VCS2_HW
+	} hw_id;
+	enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
+	u64 fence_context;
 	u32		mmio_base;
-	struct intel_ringbuffer *buffer;
-	struct list_head buffers;
+	unsigned int irq_shift;
+	struct intel_ring *buffer;
 
 	/* Rather than have every client wait upon all user interrupts,
 	 * with the herd waking after every interrupt and each doing the
@@ -167,8 +174,7 @@
 	 * the overhead of waking that client is much preferred.
 	 */
 	struct intel_breadcrumbs {
-		struct task_struct *irq_seqno_bh; /* bh for user interrupts */
-		unsigned long irq_wakeups;
+		struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
 		bool irq_posted;
 
 		spinlock_t lock; /* protects the lists of requests */
@@ -178,6 +184,9 @@
 		struct task_struct *signaler; /* used for fence signalling */
 		struct drm_i915_gem_request *first_signal;
 		struct timer_list fake_irq; /* used after a missed interrupt */
+		struct timer_list hangcheck; /* detect missed interrupts */
+
+		unsigned long timeout;
 
 		bool irq_enabled : 1;
 		bool rpm_wakelock : 1;
@@ -192,36 +201,48 @@
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
+	struct i915_vma *scratch;
 
 	u32             irq_keep_mask; /* always keep these interrupts */
 	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
-	void		(*irq_enable)(struct intel_engine_cs *ring);
-	void		(*irq_disable)(struct intel_engine_cs *ring);
+	void		(*irq_enable)(struct intel_engine_cs *engine);
+	void		(*irq_disable)(struct intel_engine_cs *engine);
 
-	int		(*init_hw)(struct intel_engine_cs *ring);
+	int		(*init_hw)(struct intel_engine_cs *engine);
+	void		(*reset_hw)(struct intel_engine_cs *engine,
+				    struct drm_i915_gem_request *req);
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	void		(*write_tail)(struct intel_engine_cs *ring,
-				      u32 value);
-	int __must_check (*flush)(struct drm_i915_gem_request *req,
-				  u32	invalidate_domains,
-				  u32	flush_domains);
-	int		(*add_request)(struct drm_i915_gem_request *req);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 mode);
+#define EMIT_INVALIDATE	BIT(0)
+#define EMIT_FLUSH	BIT(1)
+#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS     BIT(2)
+	int		(*emit_request)(struct drm_i915_gem_request *req);
+
+	/* Pass the request to the hardware queue (e.g. directly into
+	 * the legacy ringbuffer or to the end of an execlist).
+	 *
+	 * This is called from an atomic context with irqs disabled; must
+	 * be irq safe.
+	 */
+	void		(*submit_request)(struct drm_i915_gem_request *req);
+
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
 	 * seen value is good enough. Note that the seqno will always be
 	 * monotonic, even if not coherent.
 	 */
-	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
-	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
-					       u64 offset, u32 length,
-					       unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-	void		(*cleanup)(struct intel_engine_cs *ring);
+	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
+	void		(*cleanup)(struct intel_engine_cs *engine);
 
 	/* GEN8 signal/wait table - never trust comments!
 	 *	  signal to	signal to    signal to   signal to      signal to
@@ -264,51 +285,36 @@
 		u32	sync_seqno[I915_NUM_ENGINES-1];
 
 		union {
+#define GEN6_SEMAPHORE_LAST	VECS_HW
+#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
+#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
 			struct {
 				/* our mbox written by others */
-				u32		wait[I915_NUM_ENGINES];
+				u32		wait[GEN6_NUM_SEMAPHORES];
 				/* mboxes this ring signals to */
-				i915_reg_t	signal[I915_NUM_ENGINES];
+				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
 			} mbox;
 			u64		signal_ggtt[I915_NUM_ENGINES];
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to_req,
-				   struct intel_engine_cs *from,
-				   u32 seqno);
-		int	(*signal)(struct drm_i915_gem_request *signaller_req,
-				  /* num_dwords needed by caller */
-				  unsigned int num_dwords);
+		int	(*sync_to)(struct drm_i915_gem_request *req,
+				   struct drm_i915_gem_request *signal);
+		int	(*signal)(struct drm_i915_gem_request *req);
 	} semaphore;
 
 	/* Execlists */
 	struct tasklet_struct irq_tasklet;
 	spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
+	struct execlist_port {
+		struct drm_i915_gem_request *request;
+		unsigned int count;
+	} execlist_port[2];
 	struct list_head execlist_queue;
 	unsigned int fw_domains;
-	unsigned int next_context_status_buffer;
-	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
+	bool preempt_wa;
 	u32 ctx_desc_template;
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 invalidate_domains,
-				      u32 flush_domains);
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, unsigned dispatch_flags);
-
-	/**
-	 * List of objects currently involved in rendering from the
-	 * ringbuffer.
-	 *
-	 * Includes buffers having the contents of their GPU caches
-	 * flushed, not necessarily primitives.  last_read_req
-	 * represents when the rendering involved will be completed.
-	 *
-	 * A reference is held on the buffer while on this list.
-	 */
-	struct list_head active_list;
 
 	/**
 	 * List of breadcrumbs associated with GPU requests currently
@@ -322,23 +328,24 @@
 	 * inspecting request list.
 	 */
 	u32 last_submitted_seqno;
+	u32 last_pending_seqno;
 
-	bool gpu_caches_dirty;
+	/* An RCU guarded pointer to the last request. No reference is
+	 * held to the request, users must carefully acquire a reference to
+	 * the request using i915_gem_active_get_rcu(), or hold the
+	 * struct_mutex.
+	 */
+	struct i915_gem_active last_request;
 
 	struct i915_gem_context *last_context;
 
-	struct intel_ring_hangcheck hangcheck;
-
-	struct {
-		struct drm_i915_gem_object *obj;
-		u32 gtt_offset;
-	} scratch;
+	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
 
 	/*
 	 * Table of commands the command parser needs to know about
-	 * for this ring.
+	 * for this engine.
 	 */
 	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 
@@ -352,11 +359,11 @@
 	 * Returns the bitmask for the length field of the specified command.
 	 * Return 0 for an unrecognized/invalid command.
 	 *
-	 * If the command parser finds an entry for a command in the ring's
+	 * If the command parser finds an entry for a command in the engine's
 	 * cmd_tables, it gets the command's length based on the table entry.
-	 * If not, it calls this function to determine the per-ring length field
-	 * encoding for the command (i.e. certain opcode ranges use certain bits
-	 * to encode the command length in the header).
+	 * If not, it calls this function to determine the per-engine length
+	 * field encoding for the command (i.e. different opcode ranges use
+	 * certain bits to encode the command length in the header).
 	 */
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
@@ -374,8 +381,8 @@
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
-		      struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+			struct intel_engine_cs *other)
 {
 	int idx;
 
@@ -437,55 +444,76 @@
 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
 
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
+
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *engine,
-				   u32 data)
+
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
 {
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-	ringbuf->tail += 4;
+	*(uint32_t *)(ring->vaddr + ring->tail) = data;
+	ring->tail += 4;
 }
-static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
-				       i915_reg_t reg)
+
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 {
-	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
+	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
-static inline void intel_ring_advance(struct intel_engine_cs *engine)
+
+static inline void intel_ring_advance(struct intel_ring *ring)
 {
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	ringbuf->tail &= ringbuf->size - 1;
+	/* Dummy function.
+	 *
+	 * This serves as a placeholder in the code so that the reader
+	 * can compare against the preceding intel_ring_begin() and
+	 * check that the number of dwords emitted matches the space
+	 * reserved for the command packet (i.e. the value passed to
+	 * intel_ring_begin()).
+	 */
 }
+
+static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+{
+	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+	return value & (ring->size - 1);
+}
+
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ring);
 
-int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
 
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
-void intel_fini_pipe_control(struct intel_engine_cs *engine);
+void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_init_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
+void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-int intel_init_render_ring_buffer(struct drm_device *dev);
-int intel_init_bsd_ring_buffer(struct drm_device *dev);
-int intel_init_bsd2_ring_buffer(struct drm_device *dev);
-int intel_init_blt_ring_buffer(struct drm_device *dev);
-int intel_init_vebox_ring_buffer(struct drm_device *dev);
+static inline int intel_engine_idle(struct intel_engine_cs *engine,
+				    unsigned int flags)
+{
+	/* Wait upon the last request to be completed */
+	return i915_gem_active_wait_unlocked(&engine->last_request,
+					     flags, NULL, NULL);
+}
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
 	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -493,11 +521,6 @@
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
-{
-	return ringbuf->tail;
-}
-
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case
@@ -509,21 +532,10 @@
 
 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 {
-	return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
 }
 
 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-struct intel_wait {
-	struct rb_node node;
-	struct task_struct *tsk;
-	u32 seqno;
-};
-
-struct intel_signal_node {
-	struct rb_node node;
-	struct intel_wait wait;
-};
-
 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 
 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
@@ -543,31 +555,43 @@
 			      struct intel_wait *wait);
 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
 
-static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 {
-	return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+	return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
 }
 
-static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
 {
 	bool wakeup = false;
-	struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+
 	/* Note that for this not to dangerously chase a dangling pointer,
-	 * the caller is responsible for ensure that the task remain valid for
-	 * wake_up_process() i.e. that the RCU grace period cannot expire.
+	 * we must hold the rcu_read_lock here.
 	 *
 	 * Also note that tsk is likely to be in !TASK_RUNNING state so an
 	 * early test for tsk->state != TASK_RUNNING before wake_up_process()
 	 * is unlikely to be beneficial.
 	 */
-	if (tsk)
-		wakeup = wake_up_process(tsk);
+	if (intel_engine_has_waiter(engine)) {
+		struct task_struct *tsk;
+
+		rcu_read_lock();
+		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
+		if (tsk)
+			wakeup = wake_up_process(tsk);
+		rcu_read_unlock();
+	}
+
 	return wakeup;
 }
 
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 unsigned int intel_kick_waiters(struct drm_i915_private *i915);
 unsigned int intel_kick_signalers(struct drm_i915_private *i915);
 
+static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
+{
+	return i915_gem_active_isset(&engine->last_request);
+}
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c603bb..6c11168 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -287,6 +287,7 @@
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
 
 	/*
@@ -299,9 +300,9 @@
 	 * sure vgacon can keep working normally without triggering interrupts
 	 * and error messages.
 	 */
-	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
 	if (IS_BROADWELL(dev))
 		gen8_irq_power_well_post_enable(dev_priv,
@@ -318,7 +319,7 @@
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	struct drm_device *dev = &dev_priv->drm;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	/*
 	 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -331,9 +332,9 @@
 	 * and error messages.
 	 */
 	if (power_well->data == SKL_DISP_PW_2) {
-		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+		vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
 		gen8_irq_power_well_post_enable(dev_priv,
 						1 << PIPE_C | 1 << PIPE_B);
@@ -592,6 +593,8 @@
 	DRM_DEBUG_KMS("Disabling DC9\n");
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+	intel_pps_unlock_regs_wa(dev_priv);
 }
 
 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
@@ -854,7 +857,7 @@
 					   struct i915_power_well *power_well)
 {
 	enum skl_disp_power_wells power_well_id = power_well->data;
-	struct i915_power_well *cmn_a_well;
+	struct i915_power_well *cmn_a_well = NULL;
 
 	if (power_well_id == BXT_DPIO_CMN_BC) {
 		/*
@@ -867,7 +870,7 @@
 
 	bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
 
-	if (power_well_id == BXT_DPIO_CMN_BC)
+	if (cmn_a_well)
 		intel_power_well_put(dev_priv, cmn_a_well);
 }
 
@@ -1121,6 +1124,8 @@
 	}
 
 	i915_redisable_vga_power_on(&dev_priv->drm);
+
+	intel_pps_unlock_regs_wa(dev_priv);
 }
 
 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -2284,7 +2289,7 @@
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-	struct device *device = &dev_priv->drm.pdev->dev;
+	struct device *kdev = &dev_priv->drm.pdev->dev;
 
 	/*
 	 * The i915.ko module is still not prepared to be loaded when
@@ -2306,7 +2311,7 @@
 	 * the platform doesn't support runtime PM.
 	 */
 	if (!HAS_RUNTIME_PM(dev_priv))
-		pm_runtime_put(device);
+		pm_runtime_put(kdev);
 }
 
 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2647,10 +2652,10 @@
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
-	pm_runtime_get_sync(device);
+	pm_runtime_get_sync(kdev);
 
 	atomic_inc(&dev_priv->pm.wakeref_count);
 	assert_rpm_wakelock_held(dev_priv);
@@ -2668,11 +2673,11 @@
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	if (IS_ENABLED(CONFIG_PM)) {
-		int ret = pm_runtime_get_if_in_use(device);
+		int ret = pm_runtime_get_if_in_use(kdev);
 
 		/*
 		 * In cases runtime PM is disabled by the RPM core and we get
@@ -2710,11 +2715,11 @@
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	assert_rpm_wakelock_held(dev_priv);
-	pm_runtime_get_noresume(device);
+	pm_runtime_get_noresume(kdev);
 
 	atomic_inc(&dev_priv->pm.wakeref_count);
 }
@@ -2729,15 +2734,15 @@
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	assert_rpm_wakelock_held(dev_priv);
 	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
 		atomic_inc(&dev_priv->pm.atomic_seq);
 
-	pm_runtime_mark_last_busy(device);
-	pm_runtime_put_autosuspend(device);
+	pm_runtime_mark_last_busy(kdev);
+	pm_runtime_put_autosuspend(kdev);
 }
 
 /**
@@ -2752,11 +2757,12 @@
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct device *kdev = &pdev->dev;
 
-	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-	pm_runtime_mark_last_busy(device);
+	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
+	pm_runtime_mark_last_busy(kdev);
 
 	/*
 	 * Take a permanent reference to disable the RPM functionality and drop
@@ -2765,10 +2771,10 @@
 	 * platforms without RPM support.
 	 */
 	if (!HAS_RUNTIME_PM(dev)) {
-		pm_runtime_dont_use_autosuspend(device);
-		pm_runtime_get_sync(device);
+		pm_runtime_dont_use_autosuspend(kdev);
+		pm_runtime_get_sync(kdev);
 	} else {
-		pm_runtime_use_autosuspend(device);
+		pm_runtime_use_autosuspend(kdev);
 	}
 
 	/*
@@ -2776,6 +2782,5 @@
 	 * We drop that here and will reacquire it during unloading in
 	 * intel_power_domains_fini().
 	 */
-	pm_runtime_put_autosuspend(device);
+	pm_runtime_put_autosuspend(kdev);
 }
-
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e378f35..c551024 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1003,24 +1003,22 @@
 }
 
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-					 const struct drm_display_mode *adjusted_mode)
+					 struct intel_crtc_state *pipe_config)
 {
 	uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
-	struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	union hdmi_infoframe frame;
 	int ret;
 	ssize_t len;
 
 	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-						       adjusted_mode);
+						       &pipe_config->base.adjusted_mode);
 	if (ret < 0) {
 		DRM_ERROR("couldn't fill AVI infoframe\n");
 		return false;
 	}
 
 	if (intel_sdvo->rgb_quant_range_selectable) {
-		if (intel_crtc->config->limited_color_range)
+		if (pipe_config->limited_color_range)
 			frame.avi.quantization_range =
 				HDMI_QUANTIZATION_RANGE_LIMITED;
 		else
@@ -1125,7 +1123,8 @@
 }
 
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
-				      struct intel_crtc_state *pipe_config)
+				      struct intel_crtc_state *pipe_config,
+				      struct drm_connector_state *conn_state)
 {
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1192,22 +1191,21 @@
 	return true;
 }
 
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+				  struct intel_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
-	struct drm_display_mode *mode = &crtc->config->base.mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	struct drm_display_mode *mode = &crtc_state->base.mode;
 	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
 	struct intel_sdvo_dtd input_dtd, output_dtd;
 	int rate;
 
-	if (!mode)
-		return;
-
 	/* First, set the input mapping for the first input to our controlled
 	 * output. This is only correct if we're a single-input device, in
 	 * which case the first input is the output from the appropriate SDVO
@@ -1240,11 +1238,11 @@
 	if (!intel_sdvo_set_target_input(intel_sdvo))
 		return;
 
-	if (crtc->config->has_hdmi_sink) {
+	if (crtc_state->has_hdmi_sink) {
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
 		intel_sdvo_set_colorimetry(intel_sdvo,
 					   SDVO_COLORIMETRY_RGB256);
-		intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+		intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
 	} else
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1260,7 +1258,7 @@
 		DRM_INFO("Setting input timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
-	switch (crtc->config->pixel_multiplier) {
+	switch (crtc_state->pixel_multiplier) {
 	default:
 		WARN(1, "unknown pixel multiplier specified\n");
 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1275,7 +1273,7 @@
 		/* The real mode polarity is set by the SDVO commands, using
 		 * struct intel_sdvo_dtd. */
 		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-		if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+		if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
 			sdvox |= HDMI_COLOR_RANGE_16_235;
 		if (INTEL_INFO(dev)->gen < 5)
 			sdvox |= SDVO_BORDER_ENABLE;
@@ -1301,7 +1299,7 @@
 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
 		/* done in crtc_mode_set as it lives inside the dpll register */
 	} else {
-		sdvox |= (crtc->config->pixel_multiplier - 1)
+		sdvox |= (crtc_state->pixel_multiplier - 1)
 			<< SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
@@ -1434,7 +1432,9 @@
 	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 }
 
-static void intel_disable_sdvo(struct intel_encoder *encoder)
+static void intel_disable_sdvo(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1477,16 +1477,22 @@
 	}
 }
 
-static void pch_disable_sdvo(struct intel_encoder *encoder)
+static void pch_disable_sdvo(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_sdvo(encoder);
+	intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_sdvo(struct intel_encoder *encoder)
+static void intel_enable_sdvo(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2930,10 +2936,12 @@
 intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
 			  struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
+
 	sdvo->ddc.owner = THIS_MODULE;
 	sdvo->ddc.class = I2C_CLASS_DDC;
 	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.dev.parent = &pdev->dev;
 	sdvo->ddc.algo_data = sdvo;
 	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7c08e4f..73a521f 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -36,6 +36,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
@@ -202,23 +203,24 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(drm_plane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
-	u32 plane_ctl, stride_div, stride;
+	u32 plane_ctl;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	u32 surf_addr;
-	u32 tile_height, plane_offset, plane_size;
+	u32 surf_addr = plane_state->main.offset;
 	unsigned int rotation = plane_state->base.rotation;
-	int x_offset, y_offset;
-	int crtc_x = plane_state->dst.x1;
-	int crtc_y = plane_state->dst.y1;
-	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-	uint32_t x = plane_state->src.x1 >> 16;
-	uint32_t y = plane_state->src.y1 >> 16;
-	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+	u32 stride = skl_plane_stride(fb, 0, rotation);
+	int crtc_x = plane_state->base.dst.x1;
+	int crtc_y = plane_state->base.dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+	uint32_t x = plane_state->main.x;
+	uint32_t y = plane_state->main.y;
+	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
 	plane_ctl = PLANE_CTL_ENABLE |
 		PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -229,14 +231,8 @@
 
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
-					       fb->pixel_format);
-
-	/* Sizes are 0 based */
-	src_w--;
-	src_h--;
-	crtc_w--;
-	crtc_h--;
+	if (wm->dirty_pipes & drm_crtc_mask(crtc))
+		skl_write_plane_wm(intel_crtc, wm, plane);
 
 	if (key->flags) {
 		I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
@@ -249,28 +245,15 @@
 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
 
-	surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
 
-	if (intel_rotation_90_or_270(rotation)) {
-		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
-		/* stride: Surface height in tiles */
-		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
-		stride = DIV_ROUND_UP(fb->height, tile_height);
-		plane_size = (src_w << 16) | src_h;
-		x_offset = stride * tile_height - y - (src_h + 1);
-		y_offset = x;
-	} else {
-		stride = fb->pitches[0] / stride_div;
-		plane_size = (src_h << 16) | src_w;
-		x_offset = x;
-		y_offset = y;
-	}
-	plane_offset = y_offset << 16 | x_offset;
-
-	I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
+	I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
 	I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
-	I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+	I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
 
 	/* program plane scaler */
 	if (plane_state->scaler_id >= 0) {
@@ -295,7 +278,8 @@
 	}
 
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-	I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
+	I915_WRITE(PLANE_SURF(pipe, plane),
+		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
 	POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -308,6 +292,14 @@
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
 
+	/*
+	 * We only populate skl_results on watermark updates, and if the
+	 * plane's visiblity isn't actually changing neither is its watermarks.
+	 */
+	if (!dplane->state->visible)
+		skl_write_plane_wm(to_intel_crtc(crtc),
+				   &dev_priv->wm.skl_results, plane);
+
 	I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
 	I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -362,22 +354,20 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(dplane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int pipe = intel_plane->pipe;
 	int plane = intel_plane->plane;
 	u32 sprctl;
 	u32 sprsurf_offset, linear_offset;
 	unsigned int rotation = dplane->state->rotation;
-	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->dst.x1;
-	int crtc_y = plane_state->dst.y1;
-	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-	uint32_t x = plane_state->src.x1 >> 16;
-	uint32_t y = plane_state->src.y1 >> 16;
-	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+	int crtc_x = plane_state->base.dst.x1;
+	int crtc_y = plane_state->base.dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+	uint32_t x = plane_state->base.src.x1 >> 16;
+	uint32_t y = plane_state->base.src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
 	sprctl = SP_ENABLE;
 
@@ -430,7 +420,7 @@
 	 */
 	sprctl |= SP_GAMMA_ENABLE;
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		sprctl |= SP_TILED;
 
 	/* Sizes are 0 based */
@@ -439,19 +429,18 @@
 	crtc_w--;
 	crtc_h--;
 
-	linear_offset = y * fb->pitches[0] + x * cpp;
-	sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-						   fb->pitches[0], rotation);
-	linear_offset -= sprsurf_offset;
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
+	sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-	if (rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == DRM_ROTATE_180) {
 		sprctl |= SP_ROTATE_180;
 
 		x += src_w;
 		y += src_h;
-		linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 	}
 
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
 	if (key->flags) {
 		I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
 		I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
@@ -467,7 +456,7 @@
 	I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
 	I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
 	else
 		I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -476,8 +465,8 @@
 
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
-	I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
-		   sprsurf_offset);
+	I915_WRITE(SPSURF(pipe, plane),
+		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -505,21 +494,19 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	enum pipe pipe = intel_plane->pipe;
 	u32 sprctl, sprscale = 0;
 	u32 sprsurf_offset, linear_offset;
 	unsigned int rotation = plane_state->base.rotation;
-	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->dst.x1;
-	int crtc_y = plane_state->dst.y1;
-	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-	uint32_t x = plane_state->src.x1 >> 16;
-	uint32_t y = plane_state->src.y1 >> 16;
-	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+	int crtc_x = plane_state->base.dst.x1;
+	int crtc_y = plane_state->base.dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+	uint32_t x = plane_state->base.src.x1 >> 16;
+	uint32_t y = plane_state->base.src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -552,7 +539,7 @@
 	 */
 	sprctl |= SPRITE_GAMMA_ENABLE;
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		sprctl |= SPRITE_TILED;
 
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -572,22 +559,21 @@
 	if (crtc_w != src_w || crtc_h != src_h)
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
-	linear_offset = y * fb->pitches[0] + x * cpp;
-	sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-						   fb->pitches[0], rotation);
-	linear_offset -= sprsurf_offset;
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
+	sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-	if (rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == DRM_ROTATE_180) {
 		sprctl |= SPRITE_ROTATE_180;
 
 		/* HSW and BDW does this automagically in hardware */
 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
 			x += src_w;
 			y += src_h;
-			linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 		}
 	}
 
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
 	if (key->flags) {
 		I915_WRITE(SPRKEYVAL(pipe), key->min_value);
 		I915_WRITE(SPRKEYMAX(pipe), key->max_value);
@@ -606,7 +592,7 @@
 	 * register */
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
-	else if (obj->tiling_mode != I915_TILING_NONE)
+	else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
 	else
 		I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -616,7 +602,7 @@
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRSURF(pipe),
-		   i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 }
 
@@ -646,21 +632,19 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int pipe = intel_plane->pipe;
 	u32 dvscntr, dvsscale;
 	u32 dvssurf_offset, linear_offset;
 	unsigned int rotation = plane_state->base.rotation;
-	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-	int crtc_x = plane_state->dst.x1;
-	int crtc_y = plane_state->dst.y1;
-	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
-	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
-	uint32_t x = plane_state->src.x1 >> 16;
-	uint32_t y = plane_state->src.y1 >> 16;
-	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
-	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+	int crtc_x = plane_state->base.dst.x1;
+	int crtc_y = plane_state->base.dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+	uint32_t x = plane_state->base.src.x1 >> 16;
+	uint32_t y = plane_state->base.src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
 
 	dvscntr = DVS_ENABLE;
 
@@ -693,7 +677,7 @@
 	 */
 	dvscntr |= DVS_GAMMA_ENABLE;
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		dvscntr |= DVS_TILED;
 
 	if (IS_GEN6(dev))
@@ -709,19 +693,18 @@
 	if (crtc_w != src_w || crtc_h != src_h)
 		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
-	linear_offset = y * fb->pitches[0] + x * cpp;
-	dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
-						   fb->pitches[0], rotation);
-	linear_offset -= dvssurf_offset;
+	intel_add_fb_offsets(&x, &y, plane_state, 0);
+	dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
 
-	if (rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == DRM_ROTATE_180) {
 		dvscntr |= DVS_ROTATE_180;
 
 		x += src_w;
 		y += src_h;
-		linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 	}
 
+	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
 	if (key->flags) {
 		I915_WRITE(DVSKEYVAL(pipe), key->min_value);
 		I915_WRITE(DVSKEYMAX(pipe), key->max_value);
@@ -736,7 +719,7 @@
 	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
 	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
 		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
 	else
 		I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -745,7 +728,7 @@
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSSURF(pipe),
-		   i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+		   intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 }
 
@@ -778,15 +761,26 @@
 	int crtc_x, crtc_y;
 	unsigned int crtc_w, crtc_h;
 	uint32_t src_x, src_y, src_w, src_h;
-	struct drm_rect *src = &state->src;
-	struct drm_rect *dst = &state->dst;
+	struct drm_rect *src = &state->base.src;
+	struct drm_rect *dst = &state->base.dst;
 	const struct drm_rect *clip = &state->clip;
 	int hscale, vscale;
 	int max_scale, min_scale;
 	bool can_scale;
+	int ret;
+
+	src->x1 = state->base.src_x;
+	src->y1 = state->base.src_y;
+	src->x2 = state->base.src_x + state->base.src_w;
+	src->y2 = state->base.src_y + state->base.src_h;
+
+	dst->x1 = state->base.crtc_x;
+	dst->y1 = state->base.crtc_y;
+	dst->x2 = state->base.crtc_x + state->base.crtc_w;
+	dst->y2 = state->base.crtc_y + state->base.crtc_h;
 
 	if (!fb) {
-		state->visible = false;
+		state->base.visible = false;
 		return 0;
 	}
 
@@ -834,14 +828,14 @@
 	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
 	BUG_ON(vscale < 0);
 
-	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+	state->base.visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
 	crtc_x = dst->x1;
 	crtc_y = dst->y1;
 	crtc_w = drm_rect_width(dst);
 	crtc_h = drm_rect_height(dst);
 
-	if (state->visible) {
+	if (state->base.visible) {
 		/* check again in case clipping clamped the results */
 		hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
 		if (hscale < 0) {
@@ -898,12 +892,12 @@
 				crtc_w &= ~1;
 
 			if (crtc_w == 0)
-				state->visible = false;
+				state->base.visible = false;
 		}
 	}
 
 	/* Check size restrictions when scaling */
-	if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
+	if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
 		unsigned int width_bytes;
 		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
@@ -912,10 +906,10 @@
 		/* FIXME interlacing min height is 6 */
 
 		if (crtc_w < 3 || crtc_h < 3)
-			state->visible = false;
+			state->base.visible = false;
 
 		if (src_w < 3 || src_h < 3)
-			state->visible = false;
+			state->base.visible = false;
 
 		width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
@@ -926,7 +920,7 @@
 		}
 	}
 
-	if (state->visible) {
+	if (state->base.visible) {
 		src->x1 = src_x << 16;
 		src->x2 = (src_x + src_w) << 16;
 		src->y1 = src_y << 16;
@@ -938,6 +932,12 @@
 	dst->y1 = crtc_y;
 	dst->y2 = crtc_y + crtc_h;
 
+	if (INTEL_GEN(dev) >= 9) {
+		ret = skl_check_plane_surface(state);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 49136ad..d960e48 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -838,7 +838,9 @@
 }
 
 static void
-intel_enable_tv(struct intel_encoder *encoder)
+intel_enable_tv(struct intel_encoder *encoder,
+		struct intel_crtc_state *pipe_config,
+		struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -851,7 +853,9 @@
 }
 
 static void
-intel_disable_tv(struct intel_encoder *encoder)
+intel_disable_tv(struct intel_encoder *encoder,
+		 struct intel_crtc_state *old_crtc_state,
+		 struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -908,7 +912,8 @@
 
 static bool
 intel_tv_compute_config(struct intel_encoder *encoder,
-			struct intel_crtc_state *pipe_config)
+			struct intel_crtc_state *pipe_config,
+			struct drm_connector_state *conn_state)
 {
 	struct intel_tv *intel_tv = enc_to_tv(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1010,7 +1015,9 @@
 		   color_conversion->av);
 }
 
-static void intel_tv_pre_enable(struct intel_encoder *encoder)
+static void intel_tv_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ff80a81..ee2306a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -435,7 +435,7 @@
 	i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
 
 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_disable_gt_powersave(dev_priv);
+	intel_sanitize_gt_powersave(dev_priv);
 }
 
 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -796,10 +796,9 @@
 		      const bool read,
 		      const bool before)
 {
-	if (WARN(check_for_unclaimed_mmio(dev_priv),
-		 "Unclaimed register detected %s %s register 0x%x\n",
-		 before ? "before" : "after",
-		 read ? "reading" : "writing to",
+	if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+		 "Unclaimed %s register 0x%x\n",
+		 read ? "read from" : "write to",
 		 i915_mmio_reg_offset(reg)))
 		i915.mmio_debug--; /* Only report the first N failures */
 }
@@ -1018,11 +1017,9 @@
 __gen5_write(8)
 __gen5_write(16)
 __gen5_write(32)
-__gen5_write(64)
 __gen2_write(8)
 __gen2_write(16)
 __gen2_write(32)
-__gen2_write(64)
 
 #undef __gen5_write
 #undef __gen2_write
@@ -1112,23 +1109,18 @@
 __gen9_write(8)
 __gen9_write(16)
 __gen9_write(32)
-__gen9_write(64)
 __chv_write(8)
 __chv_write(16)
 __chv_write(32)
-__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
-__gen8_write(64)
 __hsw_write(8)
 __hsw_write(16)
 __hsw_write(32)
-__hsw_write(64)
 __gen6_write(8)
 __gen6_write(16)
 __gen6_write(32)
-__gen6_write(64)
 
 #undef __gen9_write
 #undef __chv_write
@@ -1158,7 +1150,6 @@
 __vgpu_write(8)
 __vgpu_write(16)
 __vgpu_write(32)
-__vgpu_write(64)
 
 #undef __vgpu_write
 #undef VGPU_WRITE_FOOTER
@@ -1169,7 +1160,6 @@
 	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
 	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
 	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
-	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
 } while (0)
 
 #define ASSIGN_READ_MMIO_VFUNCS(x) \
@@ -1597,8 +1587,10 @@
 	if (engine_mask == ALL_ENGINES) {
 		hw_mask = GEN6_GRDOM_FULL;
 	} else {
+		unsigned int tmp;
+
 		hw_mask = 0;
-		for_each_engine_masked(engine, dev_priv, engine_mask)
+		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 			hw_mask |= hw_engine_mask[engine->id];
 	}
 
@@ -1618,8 +1610,10 @@
  * @timeout_ms: timeout in millisecond
  *
  * This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- *   (I915_READ_FW(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ *     (I915_READ_FW(reg) & mask) == value
+ *
  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  *
  * Note that this routine assumes the caller holds forcewake asserted, it is
@@ -1652,8 +1646,10 @@
  * @timeout_ms: timeout in millisecond
  *
  * This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- *   (I915_READ(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ *     (I915_READ(reg) & mask) == value
+ *
  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  *
  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
@@ -1710,15 +1706,16 @@
 			      unsigned engine_mask)
 {
 	struct intel_engine_cs *engine;
+	unsigned int tmp;
 
-	for_each_engine_masked(engine, dev_priv, engine_mask)
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		if (gen8_request_engine_reset(engine))
 			goto not_ready;
 
 	return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
-	for_each_engine_masked(engine, dev_priv, engine_mask)
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		gen8_unrequest_engine_reset(engine);
 
 	return -EIO;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 7bf90e9..98df09c 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -16,7 +16,6 @@
 #include <linux/component.h>
 #include <linux/device.h>
 #include <linux/dma-buf.h>
-#include <linux/fb.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/reservation.h>
@@ -58,12 +57,6 @@
 module_param(legacyfb_depth, int, 0444);
 #endif
 
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
-{
-	return drm_crtc_index(crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
-
 static void imx_drm_driver_lastclose(struct drm_device *drm)
 {
 	struct imx_drm_device *imxdrm = drm->dev_private;
@@ -71,43 +64,6 @@
 	drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
 }
 
-static int imx_drm_driver_unload(struct drm_device *drm)
-{
-	struct imx_drm_device *imxdrm = drm->dev_private;
-
-	drm_kms_helper_poll_fini(drm);
-
-	if (imxdrm->fbhelper)
-		drm_fbdev_cma_fini(imxdrm->fbhelper);
-
-	component_unbind_all(drm->dev, drm);
-
-	drm_vblank_cleanup(drm);
-	drm_mode_config_cleanup(drm);
-
-	platform_set_drvdata(drm->platformdev, NULL);
-
-	return 0;
-}
-
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
-{
-	return drm_crtc_vblank_get(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
-
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
-{
-	drm_crtc_vblank_put(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
-
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
-{
-	drm_crtc_handle_vblank(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
-
 static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
 	struct imx_drm_device *imxdrm = drm->dev_private;
@@ -195,54 +151,49 @@
 	return ret;
 }
 
+static int imx_drm_atomic_commit(struct drm_device *dev,
+				 struct drm_atomic_state *state,
+				 bool nonblock)
+{
+	struct drm_plane_state *plane_state;
+	struct drm_plane *plane;
+	struct dma_buf *dma_buf;
+	int i;
+
+	/*
+	 * If the plane fb has an dma-buf attached, fish out the exclusive
+	 * fence for the atomic helper to wait on.
+	 */
+	for_each_plane_in_state(state, plane, plane_state, i) {
+		if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+			dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
+							 0)->base.dma_buf;
+			if (!dma_buf)
+				continue;
+			plane_state->fence =
+				reservation_object_get_excl_rcu(dma_buf->resv);
+		}
+	}
+
+	return drm_atomic_helper_commit(dev, state, nonblock);
+}
+
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
 	.fb_create = drm_fb_cma_create,
 	.output_poll_changed = imx_drm_output_poll_changed,
 	.atomic_check = imx_drm_atomic_check,
-	.atomic_commit = drm_atomic_helper_commit,
+	.atomic_commit = imx_drm_atomic_commit,
 };
 
 static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	struct drm_plane_state *plane_state;
-	struct drm_gem_cma_object *cma_obj;
-	struct fence *excl;
-	unsigned shared_count;
-	struct fence **shared;
-	unsigned int i, j;
-	int ret;
-
-	/* Wait for fences. */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		plane_state = crtc->primary->state;
-		if (plane_state->fb) {
-			cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
-			if (cma_obj->base.dma_buf) {
-				ret = reservation_object_get_fences_rcu(
-					cma_obj->base.dma_buf->resv, &excl,
-					&shared_count, &shared);
-				if (unlikely(ret))
-					DRM_ERROR("failed to get fences "
-						  "for buffer\n");
-
-				if (excl) {
-					fence_wait(excl, false);
-					fence_put(excl);
-				}
-				for (j = 0; j < shared_count; i++) {
-					fence_wait(shared[j], false);
-					fence_put(shared[j]);
-				}
-			}
-		}
-	}
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+				DRM_PLANE_COMMIT_ACTIVE_ONLY |
+				DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -258,111 +209,6 @@
 };
 
 /*
- * Main DRM initialisation. This binds, initialises and registers
- * with DRM the subcomponents of the driver.
- */
-static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
-{
-	struct imx_drm_device *imxdrm;
-	struct drm_connector *connector;
-	int ret;
-
-	imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
-	if (!imxdrm)
-		return -ENOMEM;
-
-	imxdrm->drm = drm;
-
-	drm->dev_private = imxdrm;
-
-	/*
-	 * enable drm irq mode.
-	 * - with irq_enabled = true, we can use the vblank feature.
-	 *
-	 * P.S. note that we wouldn't use drm irq handler but
-	 *      just specific driver own one instead because
-	 *      drm framework supports only one irq handler and
-	 *      drivers can well take care of their interrupts
-	 */
-	drm->irq_enabled = true;
-
-	/*
-	 * set max width and height as default value(4096x4096).
-	 * this value would be used to check framebuffer size limitation
-	 * at drm_mode_addfb().
-	 */
-	drm->mode_config.min_width = 64;
-	drm->mode_config.min_height = 64;
-	drm->mode_config.max_width = 4096;
-	drm->mode_config.max_height = 4096;
-	drm->mode_config.funcs = &imx_drm_mode_config_funcs;
-	drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
-
-	drm_mode_config_init(drm);
-
-	ret = drm_vblank_init(drm, MAX_CRTC);
-	if (ret)
-		goto err_kms;
-
-	platform_set_drvdata(drm->platformdev, drm);
-
-	/* Now try and bind all our sub-components */
-	ret = component_bind_all(drm->dev, drm);
-	if (ret)
-		goto err_vblank;
-
-	/*
-	 * All components are now added, we can publish the connector sysfs
-	 * entries to userspace.  This will generate hotplug events and so
-	 * userspace will expect to be able to access DRM at this point.
-	 */
-	list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-		ret = drm_connector_register(connector);
-		if (ret) {
-			dev_err(drm->dev,
-				"[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
-				connector->base.id,
-				connector->name, ret);
-			goto err_unbind;
-		}
-	}
-
-	drm_mode_config_reset(drm);
-
-	/*
-	 * All components are now initialised, so setup the fb helper.
-	 * The fb helper takes copies of key hardware information, so the
-	 * crtcs/connectors/encoders must not change after this point.
-	 */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
-	if (legacyfb_depth != 16 && legacyfb_depth != 32) {
-		dev_warn(drm->dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
-		legacyfb_depth = 16;
-	}
-	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
-				drm->mode_config.num_crtc, MAX_CRTC);
-	if (IS_ERR(imxdrm->fbhelper)) {
-		ret = PTR_ERR(imxdrm->fbhelper);
-		imxdrm->fbhelper = NULL;
-		goto err_unbind;
-	}
-#endif
-
-	drm_kms_helper_poll_init(drm);
-
-	return 0;
-
-err_unbind:
-	component_unbind_all(drm->dev, drm);
-err_vblank:
-	drm_vblank_cleanup(drm);
-err_kms:
-	drm_mode_config_cleanup(drm);
-
-	return ret;
-}
-
-/*
  * imx_drm_add_crtc - add a new crtc
  */
 int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
@@ -454,8 +300,6 @@
 static struct drm_driver imx_drm_driver = {
 	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
 				  DRIVER_ATOMIC,
-	.load			= imx_drm_driver_load,
-	.unload			= imx_drm_driver_unload,
 	.lastclose		= imx_drm_driver_lastclose,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
@@ -508,12 +352,122 @@
 
 static int imx_drm_bind(struct device *dev)
 {
-	return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+	struct drm_device *drm;
+	struct imx_drm_device *imxdrm;
+	int ret;
+
+	drm = drm_dev_alloc(&imx_drm_driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
+	if (!imxdrm) {
+		ret = -ENOMEM;
+		goto err_unref;
+	}
+
+	imxdrm->drm = drm;
+	drm->dev_private = imxdrm;
+
+	/*
+	 * enable drm irq mode.
+	 * - with irq_enabled = true, we can use the vblank feature.
+	 *
+	 * P.S. note that we wouldn't use drm irq handler but
+	 *      just specific driver own one instead because
+	 *      drm framework supports only one irq handler and
+	 *      drivers can well take care of their interrupts
+	 */
+	drm->irq_enabled = true;
+
+	/*
+	 * set max width and height as default value(4096x4096).
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	drm->mode_config.min_width = 64;
+	drm->mode_config.min_height = 64;
+	drm->mode_config.max_width = 4096;
+	drm->mode_config.max_height = 4096;
+	drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+	drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+
+	drm_mode_config_init(drm);
+
+	ret = drm_vblank_init(drm, MAX_CRTC);
+	if (ret)
+		goto err_kms;
+
+	dev_set_drvdata(dev, drm);
+
+	/* Now try and bind all our sub-components */
+	ret = component_bind_all(dev, drm);
+	if (ret)
+		goto err_vblank;
+
+	drm_mode_config_reset(drm);
+
+	/*
+	 * All components are now initialised, so setup the fb helper.
+	 * The fb helper takes copies of key hardware information, so the
+	 * crtcs/connectors/encoders must not change after this point.
+	 */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
+	if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+		dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
+		legacyfb_depth = 16;
+	}
+	imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+				drm->mode_config.num_crtc, MAX_CRTC);
+	if (IS_ERR(imxdrm->fbhelper)) {
+		ret = PTR_ERR(imxdrm->fbhelper);
+		imxdrm->fbhelper = NULL;
+		goto err_unbind;
+	}
+#endif
+
+	drm_kms_helper_poll_init(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto err_fbhelper;
+
+	return 0;
+
+err_fbhelper:
+	drm_kms_helper_poll_fini(drm);
+	if (imxdrm->fbhelper)
+		drm_fbdev_cma_fini(imxdrm->fbhelper);
+err_unbind:
+	component_unbind_all(drm->dev, drm);
+err_vblank:
+	drm_vblank_cleanup(drm);
+err_kms:
+	drm_mode_config_cleanup(drm);
+err_unref:
+	drm_dev_unref(drm);
+
+	return ret;
 }
 
 static void imx_drm_unbind(struct device *dev)
 {
-	drm_put_dev(dev_get_drvdata(dev));
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct imx_drm_device *imxdrm = drm->dev_private;
+
+	drm_dev_unregister(drm);
+
+	drm_kms_helper_poll_fini(drm);
+
+	if (imxdrm->fbhelper)
+		drm_fbdev_cma_fini(imxdrm->fbhelper);
+
+	drm_mode_config_cleanup(drm);
+
+	component_unbind_all(drm->dev, drm);
+	dev_set_drvdata(dev, NULL);
+
+	drm_dev_unref(drm);
 }
 
 static const struct component_master_ops imx_drm_ops = {
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 07d33e4..5a91cb1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -13,8 +13,6 @@
 struct imx_drm_crtc;
 struct platform_device;
 
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
-
 struct imx_crtc_state {
 	struct drm_crtc_state			base;
 	u32					bus_format;
@@ -44,10 +42,6 @@
 		int preferred_bpp);
 int imx_drm_exit_drm(void);
 
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc);
-
 void imx_drm_mode_config_init(struct drm_device *drm);
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index b03919e..3ce391c2 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -57,7 +57,11 @@
 	struct imx_ldb *ldb;
 	struct drm_connector connector;
 	struct drm_encoder encoder;
+
+	/* Defines what is connected to the ldb, only one at a time */
 	struct drm_panel *panel;
+	struct drm_bridge *bridge;
+
 	struct device_node *child;
 	struct i2c_adapter *ddc;
 	int chno;
@@ -66,6 +70,7 @@
 	struct drm_display_mode mode;
 	int mode_valid;
 	u32 bus_format;
+	u32 bus_flags;
 };
 
 static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
@@ -251,11 +256,13 @@
 	drm_panel_enable(imx_ldb_ch->panel);
 }
 
-static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
-			 struct drm_display_mode *orig_mode,
-			 struct drm_display_mode *mode)
+static void
+imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
+				struct drm_crtc_state *crtc_state,
+				struct drm_connector_state *connector_state)
 {
 	struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
 	struct imx_ldb *ldb = imx_ldb_ch->ldb;
 	int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
 	unsigned long serial_clk;
@@ -297,17 +304,11 @@
 	}
 
 	if (!bus_format) {
-		struct drm_connector *connector;
+		struct drm_connector *connector = connector_state->connector;
+		struct drm_display_info *di = &connector->display_info;
 
-		drm_for_each_connector(connector, encoder->dev) {
-			struct drm_display_info *di = &connector->display_info;
-
-			if (connector->encoder == encoder &&
-			    di->num_bus_formats) {
-				bus_format = di->bus_formats[0];
-				break;
-			}
-		}
+		if (di->num_bus_formats)
+			bus_format = di->bus_formats[0];
 	}
 	imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
 }
@@ -379,8 +380,13 @@
 	u32 bus_format = imx_ldb_ch->bus_format;
 
 	/* Bus format description in DT overrides connector display info. */
-	if (!bus_format && di->num_bus_formats)
+	if (!bus_format && di->num_bus_formats) {
 		bus_format = di->bus_formats[0];
+		imx_crtc_state->bus_flags = di->bus_flags;
+	} else {
+		bus_format = imx_ldb_ch->bus_format;
+		imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags;
+	}
 	switch (bus_format) {
 	case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
 		imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
@@ -420,7 +426,7 @@
 };
 
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
-	.mode_set = imx_ldb_encoder_mode_set,
+	.atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
 	.enable = imx_ldb_encoder_enable,
 	.disable = imx_ldb_encoder_disable,
 	.atomic_check = imx_ldb_encoder_atomic_check,
@@ -466,10 +472,30 @@
 	drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
 			 DRM_MODE_ENCODER_LVDS, NULL);
 
-	drm_connector_helper_add(&imx_ldb_ch->connector,
-			&imx_ldb_connector_helper_funcs);
-	drm_connector_init(drm, &imx_ldb_ch->connector,
-			   &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+	if (imx_ldb_ch->bridge) {
+		imx_ldb_ch->bridge->encoder = encoder;
+
+		imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge;
+		ret = drm_bridge_attach(drm, imx_ldb_ch->bridge);
+		if (ret) {
+			DRM_ERROR("Failed to initialize bridge with drm\n");
+			return ret;
+		}
+	} else {
+		/*
+		 * We want to add the connector whenever there is no bridge
+		 * that brings its own, not only when there is a panel. For
+		 * historical reasons, the ldb driver can also work without
+		 * a panel.
+		 */
+		drm_connector_helper_add(&imx_ldb_ch->connector,
+				&imx_ldb_connector_helper_funcs);
+		drm_connector_init(drm, &imx_ldb_ch->connector,
+				&imx_ldb_connector_funcs,
+				DRM_MODE_CONNECTOR_LVDS);
+		drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
+				encoder);
+	}
 
 	if (imx_ldb_ch->panel) {
 		ret = drm_panel_attach(imx_ldb_ch->panel,
@@ -478,8 +504,6 @@
 			return ret;
 	}
 
-	drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
-
 	return 0;
 }
 
@@ -548,6 +572,46 @@
 };
 MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
 
+static int imx_ldb_panel_ddc(struct device *dev,
+		struct imx_ldb_channel *channel, struct device_node *child)
+{
+	struct device_node *ddc_node;
+	const u8 *edidp;
+	int ret;
+
+	ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+	if (ddc_node) {
+		channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+		of_node_put(ddc_node);
+		if (!channel->ddc) {
+			dev_warn(dev, "failed to get ddc i2c adapter\n");
+			return -EPROBE_DEFER;
+		}
+	}
+
+	if (!channel->ddc) {
+		/* if no DDC available, fallback to hardcoded EDID */
+		dev_dbg(dev, "no ddc available\n");
+
+		edidp = of_get_property(child, "edid",
+					&channel->edid_len);
+		if (edidp) {
+			channel->edid = kmemdup(edidp,
+						channel->edid_len,
+						GFP_KERNEL);
+		} else if (!channel->panel) {
+			/* fallback to display-timings node */
+			ret = of_get_drm_display_mode(child,
+						      &channel->mode,
+						      &channel->bus_flags,
+						      OF_USE_NATIVE_MODE);
+			if (!ret)
+				channel->mode_valid = 1;
+		}
+	}
+	return 0;
+}
+
 static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 {
 	struct drm_device *drm = data;
@@ -555,7 +619,6 @@
 	const struct of_device_id *of_id =
 			of_match_device(imx_ldb_dt_ids, dev);
 	struct device_node *child;
-	const u8 *edidp;
 	struct imx_ldb *imx_ldb;
 	int dual;
 	int ret;
@@ -605,7 +668,6 @@
 
 	for_each_child_of_node(np, child) {
 		struct imx_ldb_channel *channel;
-		struct device_node *ddc_node;
 		struct device_node *ep;
 		int bus_format;
 
@@ -638,46 +700,25 @@
 
 			remote = of_graph_get_remote_port_parent(ep);
 			of_node_put(ep);
-			if (remote)
+			if (remote) {
 				channel->panel = of_drm_find_panel(remote);
-			else
+				channel->bridge = of_drm_find_bridge(remote);
+			} else
 				return -EPROBE_DEFER;
 			of_node_put(remote);
-			if (!channel->panel) {
-				dev_err(dev, "panel not found: %s\n",
+
+			if (!channel->panel && !channel->bridge) {
+				dev_err(dev, "panel/bridge not found: %s\n",
 					remote->full_name);
 				return -EPROBE_DEFER;
 			}
 		}
 
-		ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
-		if (ddc_node) {
-			channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
-			of_node_put(ddc_node);
-			if (!channel->ddc) {
-				dev_warn(dev, "failed to get ddc i2c adapter\n");
-				return -EPROBE_DEFER;
-			}
-		}
-
-		if (!channel->ddc) {
-			/* if no DDC available, fallback to hardcoded EDID */
-			dev_dbg(dev, "no ddc available\n");
-
-			edidp = of_get_property(child, "edid",
-						&channel->edid_len);
-			if (edidp) {
-				channel->edid = kmemdup(edidp,
-							channel->edid_len,
-							GFP_KERNEL);
-			} else if (!channel->panel) {
-				/* fallback to display-timings node */
-				ret = of_get_drm_display_mode(child,
-							      &channel->mode,
-							      OF_USE_NATIVE_MODE);
-				if (!ret)
-					channel->mode_valid = 1;
-			}
+		/* panel ddc only if there is no bridge */
+		if (!channel->bridge) {
+			ret = imx_ldb_panel_ddc(dev, channel, child);
+			if (ret)
+				return ret;
 		}
 
 		bus_format = of_get_bus_format(dev, child);
@@ -716,11 +757,10 @@
 	for (i = 0; i < 2; i++) {
 		struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
-		if (!channel->connector.funcs)
-			continue;
-
-		channel->connector.funcs->destroy(&channel->connector);
-		channel->encoder.funcs->destroy(&channel->encoder);
+		if (channel->bridge)
+			drm_bridge_detach(channel->bridge);
+		if (channel->panel)
+			drm_panel_detach(channel->panel);
 
 		kfree(channel->edid);
 		i2c_put_adapter(channel->ddc);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5e87594..8fc0888 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -685,9 +685,6 @@
 {
 	struct imx_tve *tve = dev_get_drvdata(dev);
 
-	tve->connector.funcs->destroy(&tve->connector);
-	tve->encoder.funcs->destroy(&tve->encoder);
-
 	if (!IS_ERR(tve->dac_reg))
 		regulator_disable(tve->dac_reg);
 }
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 462056e..4e1ae3f 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -21,7 +21,6 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -61,7 +60,8 @@
 	ipu_di_enable(ipu_crtc->di);
 }
 
-static void ipu_crtc_disable(struct drm_crtc *crtc)
+static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 	struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -77,6 +77,9 @@
 	}
 	spin_unlock_irq(&crtc->dev->event_lock);
 
+	/* always disable planes on the CRTC */
+	drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
 	drm_crtc_vblank_off(crtc);
 }
 
@@ -123,9 +126,14 @@
 	kfree(to_imx_crtc_state(state));
 }
 
+static void imx_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	imx_drm_remove_crtc(to_ipu_crtc(crtc)->imx_crtc);
+}
+
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
-	.destroy = drm_crtc_cleanup,
+	.destroy = imx_drm_crtc_destroy,
 	.page_flip = drm_atomic_helper_page_flip,
 	.reset = imx_drm_crtc_reset,
 	.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -136,7 +144,7 @@
 {
 	struct ipu_crtc *ipu_crtc = dev_id;
 
-	imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+	drm_crtc_handle_vblank(&ipu_crtc->base);
 
 	return IRQ_HANDLED;
 }
@@ -246,7 +254,7 @@
 	.mode_set_nofb = ipu_crtc_mode_set_nofb,
 	.atomic_check = ipu_crtc_atomic_check,
 	.atomic_begin = ipu_crtc_atomic_begin,
-	.disable = ipu_crtc_disable,
+	.atomic_disable = ipu_crtc_atomic_disable,
 	.enable = ipu_crtc_enable,
 };
 
@@ -414,8 +422,6 @@
 {
 	struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
 
-	imx_drm_remove_crtc(ipu_crtc->imx_crtc);
-
 	ipu_put_resources(ipu_crtc);
 	if (ipu_crtc->plane[1])
 		ipu_plane_put_resources(ipu_crtc->plane[1]);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 29423e75..ce22d0a 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -213,8 +213,12 @@
 		ipu_dp_enable_channel(ipu_plane->dp);
 }
 
-static void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static int ipu_disable_plane(struct drm_plane *plane)
 {
+	struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
 	ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
 
 	if (ipu_plane->dp)
@@ -223,15 +227,6 @@
 	ipu_dmfc_disable_channel(ipu_plane->dmfc);
 	if (ipu_plane->dp)
 		ipu_dp_disable(ipu_plane->ipu);
-}
-
-static int ipu_disable_plane(struct drm_plane *plane)
-{
-	struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
-	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-	ipu_plane_disable(ipu_plane);
 
 	return 0;
 }
@@ -242,7 +237,6 @@
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-	ipu_disable_plane(plane);
 	drm_plane_cleanup(plane);
 	kfree(ipu_plane);
 }
@@ -320,8 +314,10 @@
 
 	/*
 	 * We support resizing active plane or changing its format by
-	 * forcing CRTC mode change and disabling-enabling plane in plane's
-	 * ->atomic_update callback.
+	 * forcing CRTC mode change in plane's ->atomic_check callback
+	 * and disabling all affected active planes in CRTC's ->atomic_disable
+	 * callback.  The planes will be reenabled in plane's ->atomic_update
+	 * callback.
 	 */
 	if (old_fb && (state->src_w != old_state->src_w ||
 			      state->src_h != old_state->src_h ||
@@ -395,12 +391,10 @@
 	if (old_state->fb) {
 		struct drm_crtc_state *crtc_state = state->crtc->state;
 
-		if (!crtc_state->mode_changed) {
+		if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
 			ipu_plane_atomic_set_base(ipu_plane, old_state);
 			return;
 		}
-
-		ipu_disable_plane(plane);
 	}
 
 	switch (ipu_plane->dp_flow) {
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 1dad297..d796ada 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -33,6 +33,7 @@
 	void *edid;
 	int edid_len;
 	u32 bus_format;
+	u32 bus_flags;
 	struct drm_display_mode mode;
 	struct drm_panel *panel;
 	struct drm_bridge *bridge;
@@ -80,6 +81,7 @@
 			return -EINVAL;
 
 		ret = of_get_drm_display_mode(np, &imxpd->mode,
+					      &imxpd->bus_flags,
 					      OF_USE_NATIVE_MODE);
 		if (ret)
 			return ret;
@@ -125,11 +127,13 @@
 	struct drm_display_info *di = &conn_state->connector->display_info;
 	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
-	imx_crtc_state->bus_flags = di->bus_flags;
-	if (!imxpd->bus_format && di->num_bus_formats)
+	if (!imxpd->bus_format && di->num_bus_formats) {
+		imx_crtc_state->bus_flags = di->bus_flags;
 		imx_crtc_state->bus_format = di->bus_formats[0];
-	else
+	} else {
+		imx_crtc_state->bus_flags = imxpd->bus_flags;
 		imx_crtc_state->bus_format = imxpd->bus_format;
+	}
 	imx_crtc_state->di_hsync_pin = 2;
 	imx_crtc_state->di_vsync_pin = 3;
 
@@ -289,8 +293,10 @@
 {
 	struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
 
-	imxpd->encoder.funcs->destroy(&imxpd->encoder);
-	imxpd->connector.funcs->destroy(&imxpd->connector);
+	if (imxpd->bridge)
+		drm_bridge_detach(imxpd->bridge);
+	if (imxpd->panel)
+		drm_panel_detach(imxpd->panel);
 
 	kfree(imxpd->edid);
 }
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 8f62671f..019b7ca 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -103,7 +103,8 @@
 }
 
 static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
-			   unsigned int h, unsigned int vrefresh)
+			   unsigned int h, unsigned int vrefresh,
+			   unsigned int bpc)
 {
 	if (w != 0 && h != 0)
 		writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 5fb80cb..0df05f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -106,7 +106,8 @@
 }
 
 static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
-			    unsigned int height, unsigned int vrefresh)
+			    unsigned int height, unsigned int vrefresh,
+			    unsigned int bpc)
 {
 	unsigned int threshold;
 	unsigned int reg;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 24aa3ba..01a21dd 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -31,7 +31,7 @@
  * struct mtk_drm_crtc - MediaTek specific crtc structure.
  * @base: crtc object.
  * @enabled: records whether crtc_enable succeeded
- * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane
+ * @planes: array of 4 drm_plane structures, one for each overlay plane
  * @pending_planes: whether any plane has pending changes to be applied
  * @config_regs: memory mapped mmsys configuration register space
  * @mutex: handle to one of the ten disp_mutex streams
@@ -45,7 +45,7 @@
 	bool				pending_needs_vblank;
 	struct drm_pending_vblank_event	*event;
 
-	struct mtk_drm_plane		planes[OVL_LAYER_NR];
+	struct drm_plane		planes[OVL_LAYER_NR];
 	bool				pending_planes;
 
 	void __iomem			*config_regs;
@@ -112,8 +112,7 @@
 	struct mtk_crtc_state *state;
 
 	if (crtc->state) {
-		if (crtc->state->mode_blob)
-			drm_property_unreference_blob(crtc->state->mode_blob);
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
 
 		state = to_mtk_crtc_state(crtc->state);
 		memset(state, 0, sizeof(*state));
@@ -222,7 +221,9 @@
 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 {
 	struct drm_crtc *crtc = &mtk_crtc->base;
-	unsigned int width, height, vrefresh;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
 	int ret;
 	int i;
 
@@ -234,6 +235,19 @@
 	height = crtc->state->adjusted_mode.vdisplay;
 	vrefresh = crtc->state->adjusted_mode.vrefresh;
 
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		drm_for_each_connector(connector, crtc->dev) {
+			if (connector->encoder != encoder)
+				continue;
+			if (connector->display_info.bpc != 0 &&
+			    bpc > connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+	}
+
 	ret = pm_runtime_get_sync(crtc->dev->dev);
 	if (ret < 0) {
 		DRM_ERROR("Failed to enable power domain: %d\n", ret);
@@ -266,13 +280,13 @@
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
 		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
 
-		mtk_ddp_comp_config(comp, width, height, vrefresh);
+		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
 		mtk_ddp_comp_start(comp);
 	}
 
 	/* Initially configure all planes */
 	for (i = 0; i < OVL_LAYER_NR; i++) {
-		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct drm_plane *plane = &mtk_crtc->planes[i];
 		struct mtk_plane_state *plane_state;
 
 		plane_state = to_mtk_plane_state(plane->state);
@@ -351,7 +365,7 @@
 
 	/* Set all pending plane state to disabled */
 	for (i = 0; i < OVL_LAYER_NR; i++) {
-		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct drm_plane *plane = &mtk_crtc->planes[i];
 		struct mtk_plane_state *plane_state;
 
 		plane_state = to_mtk_plane_state(plane->state);
@@ -397,7 +411,7 @@
 	if (mtk_crtc->event)
 		mtk_crtc->pending_needs_vblank = true;
 	for (i = 0; i < OVL_LAYER_NR; i++) {
-		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct drm_plane *plane = &mtk_crtc->planes[i];
 		struct mtk_plane_state *plane_state;
 
 		plane_state = to_mtk_plane_state(plane->state);
@@ -409,6 +423,9 @@
 	}
 	if (pending_planes)
 		mtk_crtc->pending_planes = true;
+	if (crtc->state->color_mgmt_changed)
+		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
 }
 
 static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -418,6 +435,7 @@
 	.reset			= mtk_drm_crtc_reset,
 	.atomic_duplicate_state	= mtk_drm_crtc_duplicate_state,
 	.atomic_destroy_state	= mtk_drm_crtc_destroy_state,
+	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
 };
 
 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
@@ -464,14 +482,14 @@
 	if (state->pending_config) {
 		mtk_ddp_comp_config(ovl, state->pending_width,
 				    state->pending_height,
-				    state->pending_vrefresh);
+				    state->pending_vrefresh, 0);
 
 		state->pending_config = false;
 	}
 
 	if (mtk_crtc->pending_planes) {
 		for (i = 0; i < OVL_LAYER_NR; i++) {
-			struct drm_plane *plane = &mtk_crtc->planes[i].base;
+			struct drm_plane *plane = &mtk_crtc->planes[i];
 			struct mtk_plane_state *plane_state;
 
 			plane_state = to_mtk_plane_state(plane->state);
@@ -559,16 +577,17 @@
 				(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
-				     BIT(pipe), type, zpos);
+				     BIT(pipe), type);
 		if (ret)
 			goto unprepare;
 	}
 
-	ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base,
-				&mtk_crtc->planes[1].base, pipe);
+	ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
+				&mtk_crtc->planes[1], pipe);
 	if (ret < 0)
 		goto unprepare;
-
+	drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
+	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
 	priv->crtc[pipe] = &mtk_crtc->base;
 	priv->num_pipes++;
 
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 81e5566..a1550fa 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,10 +19,12 @@
 #include "mtk_drm_plane.h"
 
 #define OVL_LAYER_NR	4
+#define MTK_LUT_SIZE	512
+#define MTK_MAX_BPC	10
+#define MTK_MIN_BPC	3
 
 int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe);
 void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe);
-void mtk_drm_crtc_check_flush(struct drm_crtc *crtc);
 void mtk_drm_crtc_commit(struct drm_crtc *crtc);
 void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
 int mtk_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3970fcf..df33b3c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -24,12 +24,17 @@
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_crtc.h"
 
 #define DISP_OD_EN				0x0000
 #define DISP_OD_INTEN				0x0008
 #define DISP_OD_INTSTA				0x000c
 #define DISP_OD_CFG				0x0020
 #define DISP_OD_SIZE				0x0030
+#define DISP_DITHER_5				0x0114
+#define DISP_DITHER_7				0x011c
+#define DISP_DITHER_15				0x013c
+#define DISP_DITHER_16				0x0140
 
 #define DISP_REG_UFO_START			0x0000
 
@@ -38,15 +43,69 @@
 #define DISP_COLOR_WIDTH			0x0c50
 #define DISP_COLOR_HEIGHT			0x0c54
 
-#define	OD_RELAY_MODE		BIT(0)
+#define DISP_AAL_EN				0x0000
+#define DISP_AAL_SIZE				0x0030
 
-#define	UFO_BYPASS		BIT(2)
+#define DISP_GAMMA_EN				0x0000
+#define DISP_GAMMA_CFG				0x0020
+#define DISP_GAMMA_SIZE				0x0030
+#define DISP_GAMMA_LUT				0x0700
 
-#define	COLOR_BYPASS_ALL	BIT(7)
-#define	COLOR_SEQ_SEL		BIT(13)
+#define LUT_10BIT_MASK				0x03ff
+
+#define COLOR_BYPASS_ALL			BIT(7)
+#define COLOR_SEQ_SEL				BIT(13)
+
+#define OD_RELAYMODE				BIT(0)
+
+#define UFO_BYPASS				BIT(2)
+
+#define AAL_EN					BIT(0)
+
+#define GAMMA_EN				BIT(0)
+#define GAMMA_LUT_EN				BIT(1)
+
+#define DISP_DITHERING				BIT(2)
+#define DITHER_LSB_ERR_SHIFT_R(x)		(((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_R(x)			(((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_R(x)			(((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_R(x)			(((x) & 0x7) << 16)
+#define DITHER_NEW_BIT_MODE			BIT(0)
+#define DITHER_LSB_ERR_SHIFT_B(x)		(((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_B(x)			(((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_B(x)			(((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_B(x)			(((x) & 0x7) << 16)
+#define DITHER_LSB_ERR_SHIFT_G(x)		(((x) & 0x7) << 12)
+#define DITHER_OVFLW_BIT_G(x)			(((x) & 0x7) << 8)
+#define DITHER_ADD_LSHIFT_G(x)			(((x) & 0x7) << 4)
+#define DITHER_ADD_RSHIFT_G(x)			(((x) & 0x7) << 0)
+
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+		    unsigned int CFG)
+{
+	/* If bpc equal to 0, the dithering function didn't be enabled */
+	if (bpc == 0)
+		return;
+
+	if (bpc >= MTK_MIN_BPC) {
+		writel(0, comp->regs + DISP_DITHER_5);
+		writel(0, comp->regs + DISP_DITHER_7);
+		writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+		       DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+		       DITHER_NEW_BIT_MODE,
+		       comp->regs + DISP_DITHER_15);
+		writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+		       DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+		       DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+		       DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+		       comp->regs + DISP_DITHER_16);
+		writel(DISP_DITHERING, comp->regs + CFG);
+	}
+}
 
 static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
-			     unsigned int h, unsigned int vrefresh)
+			     unsigned int h, unsigned int vrefresh,
+			     unsigned int bpc)
 {
 	writel(w, comp->regs + DISP_COLOR_WIDTH);
 	writel(h, comp->regs + DISP_COLOR_HEIGHT);
@@ -60,14 +119,16 @@
 }
 
 static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
-			  unsigned int h, unsigned int vrefresh)
+			  unsigned int h, unsigned int vrefresh,
+			  unsigned int bpc)
 {
 	writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
+	writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+	mtk_dither_set(comp, bpc, DISP_OD_CFG);
 }
 
 static void mtk_od_start(struct mtk_ddp_comp *comp)
 {
-	writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG);
 	writel(1, comp->regs + DISP_OD_EN);
 }
 
@@ -76,6 +137,78 @@
 	writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
 }
 
+static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
+			   unsigned int h, unsigned int vrefresh,
+			   unsigned int bpc)
+{
+	writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+}
+
+static void mtk_aal_start(struct mtk_ddp_comp *comp)
+{
+	writel(AAL_EN, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_aal_stop(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
+			     unsigned int h, unsigned int vrefresh,
+			     unsigned int bpc)
+{
+	writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
+	mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+}
+
+static void mtk_gamma_start(struct mtk_ddp_comp *comp)
+{
+	writel(GAMMA_EN, comp->regs  + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x0, comp->regs  + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_set(struct mtk_ddp_comp *comp,
+			  struct drm_crtc_state *state)
+{
+	unsigned int i, reg;
+	struct drm_color_lut *lut;
+	void __iomem *lut_base;
+	u32 word;
+
+	if (state->gamma_lut) {
+		reg = readl(comp->regs + DISP_GAMMA_CFG);
+		reg = reg | GAMMA_LUT_EN;
+		writel(reg, comp->regs + DISP_GAMMA_CFG);
+		lut_base = comp->regs + DISP_GAMMA_LUT;
+		lut = (struct drm_color_lut *)state->gamma_lut->data;
+		for (i = 0; i < MTK_LUT_SIZE; i++) {
+			word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+				(((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+				((lut[i].blue >> 6) & LUT_10BIT_MASK);
+			writel(word, (lut_base + i * 4));
+		}
+	}
+}
+
+static const struct mtk_ddp_comp_funcs ddp_aal = {
+	.gamma_set = mtk_gamma_set,
+	.config = mtk_aal_config,
+	.start = mtk_aal_start,
+	.stop = mtk_aal_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_gamma = {
+	.gamma_set = mtk_gamma_set,
+	.config = mtk_gamma_config,
+	.start = mtk_gamma_start,
+	.stop = mtk_gamma_stop,
+};
+
 static const struct mtk_ddp_comp_funcs ddp_color = {
 	.config = mtk_color_config,
 	.start = mtk_color_start,
@@ -112,13 +245,13 @@
 };
 
 static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
-	[DDP_COMPONENT_AAL]	= { MTK_DISP_AAL,	0, NULL },
+	[DDP_COMPONENT_AAL]	= { MTK_DISP_AAL,	0, &ddp_aal },
 	[DDP_COMPONENT_COLOR0]	= { MTK_DISP_COLOR,	0, &ddp_color },
 	[DDP_COMPONENT_COLOR1]	= { MTK_DISP_COLOR,	1, &ddp_color },
 	[DDP_COMPONENT_DPI0]	= { MTK_DPI,		0, NULL },
 	[DDP_COMPONENT_DSI0]	= { MTK_DSI,		0, NULL },
 	[DDP_COMPONENT_DSI1]	= { MTK_DSI,		1, NULL },
-	[DDP_COMPONENT_GAMMA]	= { MTK_DISP_GAMMA,	0, NULL },
+	[DDP_COMPONENT_GAMMA]	= { MTK_DISP_GAMMA,	0, &ddp_gamma },
 	[DDP_COMPONENT_OD]	= { MTK_DISP_OD,	0, &ddp_od },
 	[DDP_COMPONENT_OVL0]	= { MTK_DISP_OVL,	0, NULL },
 	[DDP_COMPONENT_OVL1]	= { MTK_DISP_OVL,	1, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 6b13ba9..22a33ee 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -21,6 +21,7 @@
 struct drm_crtc;
 struct drm_device;
 struct mtk_plane_state;
+struct drm_crtc_state;
 
 enum mtk_ddp_comp_type {
 	MTK_DISP_OVL,
@@ -64,7 +65,7 @@
 
 struct mtk_ddp_comp_funcs {
 	void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
-		       unsigned int h, unsigned int vrefresh);
+		       unsigned int h, unsigned int vrefresh, unsigned int bpc);
 	void (*start)(struct mtk_ddp_comp *comp);
 	void (*stop)(struct mtk_ddp_comp *comp);
 	void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
@@ -73,6 +74,8 @@
 	void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
 	void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
 			     struct mtk_plane_state *state);
+	void (*gamma_set)(struct mtk_ddp_comp *comp,
+			  struct drm_crtc_state *state);
 };
 
 struct mtk_ddp_comp {
@@ -86,10 +89,10 @@
 
 static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
 				       unsigned int w, unsigned int h,
-				       unsigned int vrefresh)
+				       unsigned int vrefresh, unsigned int bpc)
 {
 	if (comp->funcs && comp->funcs->config)
-		comp->funcs->config(comp, w, h, vrefresh);
+		comp->funcs->config(comp, w, h, vrefresh, bpc);
 }
 
 static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -139,6 +142,13 @@
 		comp->funcs->layer_config(comp, idx, state);
 }
 
+static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
+				     struct drm_crtc_state *state)
+{
+	if (comp->funcs && comp->funcs->gamma_set)
+		comp->funcs->gamma_set(comp, state);
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
 			enum mtk_ddp_comp_type comp_type);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -146,5 +156,7 @@
 		      const struct mtk_ddp_comp_funcs *funcs);
 int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
 void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+		    unsigned int CFG);
 
 #endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index eebb7d8..cf83f65 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -61,10 +61,27 @@
 
 	mtk_atomic_wait_for_fences(state);
 
+	/*
+	 * Mediatek drm supports runtime PM, so plane registers cannot be
+	 * written when their crtc is disabled.
+	 *
+	 * The comment for drm_atomic_helper_commit states:
+	 *     For drivers supporting runtime PM the recommended sequence is
+	 *
+	 *     drm_atomic_helper_commit_modeset_disables(dev, state);
+	 *     drm_atomic_helper_commit_modeset_enables(dev, state);
+	 *     drm_atomic_helper_commit_planes(dev, state,
+	 *                                     DRM_PLANE_COMMIT_ACTIVE_ONLY);
+	 *
+	 * See the kerneldoc entries for these three functions for more details.
+	 */
 	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, false);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
 	drm_atomic_helper_wait_for_vblanks(drm, state);
+
 	drm_atomic_helper_cleanup_planes(drm, state);
 	drm_atomic_state_free(state);
 }
@@ -277,8 +294,8 @@
 	int ret;
 
 	drm = drm_dev_alloc(&mtk_drm_driver, dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	drm->dev_private = private;
 	private->drm = drm;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 3995765..c461a23 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -30,57 +30,12 @@
 	DRM_FORMAT_RGB565,
 };
 
-static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable,
-			     dma_addr_t addr, struct drm_rect *dest)
-{
-	struct drm_plane *plane = &mtk_plane->base;
-	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-	unsigned int pitch, format;
-	int x, y;
-
-	if (WARN_ON(!plane->state || (enable && !plane->state->fb)))
-		return;
-
-	if (plane->state->fb) {
-		pitch = plane->state->fb->pitches[0];
-		format = plane->state->fb->pixel_format;
-	} else {
-		pitch = 0;
-		format = DRM_FORMAT_RGBA8888;
-	}
-
-	x = plane->state->crtc_x;
-	y = plane->state->crtc_y;
-
-	if (x < 0) {
-		addr -= x * 4;
-		x = 0;
-	}
-
-	if (y < 0) {
-		addr -= y * pitch;
-		y = 0;
-	}
-
-	state->pending.enable = enable;
-	state->pending.pitch = pitch;
-	state->pending.format = format;
-	state->pending.addr = addr;
-	state->pending.x = x;
-	state->pending.y = y;
-	state->pending.width = dest->x2 - dest->x1;
-	state->pending.height = dest->y2 - dest->y1;
-	wmb(); /* Make sure the above parameters are set before update */
-	state->pending.dirty = true;
-}
-
 static void mtk_plane_reset(struct drm_plane *plane)
 {
 	struct mtk_plane_state *state;
 
 	if (plane->state) {
-		if (plane->state->fb)
-			drm_framebuffer_unreference(plane->state->fb);
+		__drm_atomic_helper_plane_destroy_state(plane->state);
 
 		state = to_mtk_plane_state(plane->state);
 		memset(state, 0, sizeof(*state));
@@ -134,20 +89,6 @@
 {
 	struct drm_framebuffer *fb = state->fb;
 	struct drm_crtc_state *crtc_state;
-	bool visible;
-	struct drm_rect dest = {
-		.x1 = state->crtc_x,
-		.y1 = state->crtc_y,
-		.x2 = state->crtc_x + state->crtc_w,
-		.y2 = state->crtc_y + state->crtc_h,
-	};
-	struct drm_rect src = {
-		/* 16.16 fixed point */
-		.x1 = state->src_x,
-		.y1 = state->src_y,
-		.x2 = state->src_x + state->src_w,
-		.y2 = state->src_y + state->src_h,
-	};
 	struct drm_rect clip = { 0, };
 
 	if (!fb)
@@ -168,40 +109,45 @@
 	clip.x2 = crtc_state->mode.hdisplay;
 	clip.y2 = crtc_state->mode.vdisplay;
 
-	return drm_plane_helper_check_update(plane, state->crtc, fb,
-					     &src, &dest, &clip,
-					     state->rotation,
-					     DRM_PLANE_HELPER_NO_SCALING,
-					     DRM_PLANE_HELPER_NO_SCALING,
-					     true, true, &visible);
+	return drm_plane_helper_check_state(state, &clip,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    DRM_PLANE_HELPER_NO_SCALING,
+					    true, true);
 }
 
 static void mtk_plane_atomic_update(struct drm_plane *plane,
 				    struct drm_plane_state *old_state)
 {
 	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_crtc *crtc = plane->state->crtc;
+	struct drm_framebuffer *fb = plane->state->fb;
 	struct drm_gem_object *gem;
 	struct mtk_drm_gem_obj *mtk_gem;
-	struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane);
-	struct drm_rect dest = {
-		.x1 = state->base.crtc_x,
-		.y1 = state->base.crtc_y,
-		.x2 = state->base.crtc_x + state->base.crtc_w,
-		.y2 = state->base.crtc_y + state->base.crtc_h,
-	};
-	struct drm_rect clip = { 0, };
+	unsigned int pitch, format;
+	dma_addr_t addr;
 
-	if (!crtc)
+	if (!crtc || WARN_ON(!fb))
 		return;
 
-	clip.x2 = state->base.crtc->state->mode.hdisplay;
-	clip.y2 = state->base.crtc->state->mode.vdisplay;
-	drm_rect_intersect(&dest, &clip);
-
-	gem = mtk_fb_get_gem_obj(state->base.fb);
+	gem = mtk_fb_get_gem_obj(fb);
 	mtk_gem = to_mtk_gem_obj(gem);
-	mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest);
+	addr = mtk_gem->dma_addr;
+	pitch = fb->pitches[0];
+	format = fb->pixel_format;
+
+	addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0);
+	addr += (plane->state->src.y1 >> 16) * pitch;
+
+	state->pending.enable = true;
+	state->pending.pitch = pitch;
+	state->pending.format = format;
+	state->pending.addr = addr;
+	state->pending.x = plane->state->dst.x1;
+	state->pending.y = plane->state->dst.y1;
+	state->pending.width = drm_rect_width(&plane->state->dst);
+	state->pending.height = drm_rect_height(&plane->state->dst);
+	wmb(); /* Make sure the above parameters are set before update */
+	state->pending.dirty = true;
 }
 
 static void mtk_plane_atomic_disable(struct drm_plane *plane,
@@ -220,13 +166,12 @@
 	.atomic_disable = mtk_plane_atomic_disable,
 };
 
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
-		   unsigned long possible_crtcs, enum drm_plane_type type,
-		   unsigned int zpos)
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+		   unsigned long possible_crtcs, enum drm_plane_type type)
 {
 	int err;
 
-	err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs,
+	err = drm_universal_plane_init(dev, plane, possible_crtcs,
 				       &mtk_plane_funcs, formats,
 				       ARRAY_SIZE(formats), type, NULL);
 	if (err) {
@@ -234,8 +179,7 @@
 		return err;
 	}
 
-	drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs);
-	mtk_plane->idx = zpos;
+	drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 72a7b3e..6a20b49 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -18,11 +18,6 @@
 #include <drm/drm_crtc.h>
 #include <linux/types.h>
 
-struct mtk_drm_plane {
-	struct drm_plane		base;
-	unsigned int			idx;
-};
-
 struct mtk_plane_pending_state {
 	bool				config;
 	bool				enable;
@@ -41,19 +36,13 @@
 	struct mtk_plane_pending_state	pending;
 };
 
-static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane)
-{
-	return container_of(plane, struct mtk_drm_plane, base);
-}
-
 static inline struct mtk_plane_state *
 to_mtk_plane_state(struct drm_plane_state *state)
 {
 	return container_of(state, struct mtk_plane_state, base);
 }
 
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
-		   unsigned long possible_crtcs, enum drm_plane_type type,
-		   unsigned int zpos);
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+		   unsigned long possible_crtcs, enum drm_plane_type type);
 
 #endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 334562d..71227de 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1086,20 +1086,20 @@
 	return 0;
 }
 
-void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
+static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
 {
 	mtk_hdmi_aud_enable_packet(hdmi, true);
 	hdmi->audio_enable = true;
 }
 
-void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
+static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
 {
 	mtk_hdmi_aud_enable_packet(hdmi, false);
 	hdmi->audio_enable = false;
 }
 
-int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
-			     struct hdmi_audio_param *param)
+static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
+				    struct hdmi_audio_param *param)
 {
 	if (!hdmi->audio_enable) {
 		dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
@@ -1624,7 +1624,8 @@
 	mtk_hdmi_audio_disable(hdmi);
 }
 
-int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
 	struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5e2f131..25b2a1a 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -58,7 +58,7 @@
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_PCI_DMA |
+	    DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY |
 	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_mga_buf_priv_t),
 	.load = mga_driver_load,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 2b4b125..1443b3a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -56,7 +56,7 @@
 #ifdef CONFIG_X86
 	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-	remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
 	kfree(ap);
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d9b04b0..88dd221 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -15,8 +15,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-#include <linux/fb.h>
-
 #include "mgag200_drv.h"
 
 static void mga_dirty_update(struct mga_fbdev *mfbdev,
@@ -185,8 +183,10 @@
 	}
 
 	sysram = vmalloc(size);
-	if (!sysram)
+	if (!sysram) {
+		ret = -ENOMEM;
 		goto err_sysram;
+	}
 
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 13798b3..e79cbc2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -135,7 +135,7 @@
 	aper->ranges[0].base = mdev->mc.vram_base;
 	aper->ranges[0].size = mdev->mc.vram_window;
 
-	remove_conflicting_framebuffers(aper, "mgafb", true);
+	drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
 	kfree(aper);
 
 	if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 68268e5..919b35f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -150,7 +150,8 @@
 {
 	struct mgag200_bo *mgabo = mgag200_bo(bo);
 
-	return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+	return drm_vma_node_verify_access(&mgabo->gem.vma_node,
+					  filp->private_data);
 }
 
 static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7c7a031..d96b2b6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -11,6 +11,7 @@
 	select TMPFS
 	select QCOM_SCM
 	select SND_SOC_HDMI_CODEC if SND_SOC
+	select SYNC_FILE
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9737207..a968cad 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -422,12 +422,29 @@
 
 static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
 {
-	int gpio = of_get_named_gpio(of_node, name, 0);
+	int gpio;
+
+	/* try with the gpio names as in the table (downstream bindings) */
+	gpio = of_get_named_gpio(of_node, name, 0);
 	if (gpio < 0) {
 		char name2[32];
-		snprintf(name2, sizeof(name2), "%s-gpio", name);
+
+		/* try with the gpio names as in the upstream bindings */
+		snprintf(name2, sizeof(name2), "%s-gpios", name);
 		gpio = of_get_named_gpio(of_node, name2, 0);
 		if (gpio < 0) {
+			char name3[32];
+
+			/*
+			 * try again after stripping out the "qcom,hdmi-tx"
+			 * prefix. This is mainly to match "hpd-gpios" used
+			 * in the upstream bindings
+			 */
+			if (sscanf(name2, "qcom,hdmi-tx-%s", name3))
+				gpio = of_get_named_gpio(of_node, name3, 0);
+		}
+
+		if (gpio < 0) {
 			DBG("failed to get gpio: %s (%d)", name, gpio);
 			gpio = -1;
 		}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index de9007e..73e2021 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -243,7 +243,6 @@
 
 struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
 {
-	struct drm_device *dev = hdmi->dev;
 	struct hdmi_i2c_adapter *hdmi_i2c;
 	struct i2c_adapter *i2c = NULL;
 	int ret;
@@ -267,10 +266,8 @@
 	i2c->algo = &msm_hdmi_i2c_algorithm;
 
 	ret = i2c_add_adapter(i2c);
-	if (ret) {
-		dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+	if (ret)
 		goto fail;
-	}
 
 	return i2c;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 7b39e89..571a91e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,18 +228,21 @@
 	struct device_node *endpoint, *panel_node;
 	struct device_node *np = dev->dev->of_node;
 
-	endpoint = of_graph_get_next_endpoint(np, NULL);
+	/*
+	 * LVDS/LCDC is the first port described in the list of ports in the
+	 * MDP4 DT node.
+	 */
+	endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
 	if (!endpoint) {
-		DBG("no endpoint in MDP4 to fetch LVDS panel\n");
+		DBG("no LVDS remote endpoint\n");
 		return NULL;
 	}
 
-	/* don't proceed if we have an endpoint but no panel_node tied to it */
 	panel_node = of_graph_get_remote_port_parent(endpoint);
 	if (!panel_node) {
-		dev_err(dev->dev, "no valid panel node\n");
+		DBG("no valid panel node in LVDS endpoint\n");
 		of_node_put(endpoint);
-		return ERR_PTR(-ENODEV);
+		return NULL;
 	}
 
 	of_node_put(endpoint);
@@ -262,14 +265,12 @@
 	switch (intf_type) {
 	case DRM_MODE_ENCODER_LVDS:
 		/*
-		 * bail out early if:
-		 * - there is no panel node (no need to initialize lcdc
-		 *   encoder and lvds connector), or
-		 * - panel node is a bad pointer
+		 * bail out early if there is no panel node (no need to
+		 * initialize LCDC encoder and LVDS connector)
 		 */
 		panel_node = mdp4_detect_lcdc_panel(dev);
-		if (IS_ERR_OR_NULL(panel_node))
-			return PTR_ERR(panel_node);
+		if (!panel_node)
+			return 0;
 
 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
 		if (IS_ERR(encoder)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index bc3d8e7..a06b064 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -93,7 +93,7 @@
 };
 
 /* this should probably be a helper: */
-struct drm_connector *get_connector(struct drm_encoder *encoder)
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_connector *connector;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 9f96dfe..3903dbc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -81,7 +81,7 @@
 	// XXX
 }
 
-int mdp4_plane_set_property(struct drm_plane *plane,
+static int mdp4_plane_set_property(struct drm_plane *plane,
 		struct drm_property *property, uint64_t val)
 {
 	// XXX
@@ -99,7 +99,7 @@
 };
 
 static int mdp4_plane_prepare_fb(struct drm_plane *plane,
-		const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -113,7 +113,7 @@
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
-		const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 	struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 432c098..951c002 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -78,12 +78,12 @@
 	if (!dev->mode_config.rotation_property)
 		dev->mode_config.rotation_property =
 			drm_mode_create_rotation_property(dev,
-			BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+				DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
 
 	if (dev->mode_config.rotation_property)
 		drm_object_attach_property(&plane->base,
 			dev->mode_config.rotation_property,
-			0);
+			DRM_ROTATE_0);
 }
 
 /* helper to install properties which are common to planes and crtcs */
@@ -250,7 +250,7 @@
 };
 
 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
-		const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -264,7 +264,7 @@
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
-		const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -309,8 +309,8 @@
 			return -EINVAL;
 		}
 
-		hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
-		vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+		hflip = !!(state->rotation & DRM_REFLECT_X);
+		vflip = !!(state->rotation & DRM_REFLECT_Y);
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
 			dev_err(plane->dev->dev,
@@ -743,8 +743,8 @@
 	config |= get_scale_config(format, src_h, crtc_h, false);
 	DBG("scale config = %x", config);
 
-	hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
-	vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
+	hflip = !!(pstate->rotation & DRM_REFLECT_X);
+	vflip = !!(pstate->rotation & DRM_REFLECT_Y);
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4a8a6f1..73bae38 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -112,13 +112,13 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 
-	drm_atomic_helper_wait_for_fences(dev, state);
+	drm_atomic_helper_wait_for_fences(dev, state, false);
 
 	kms->funcs->prepare_commit(kms, state);
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8a02370..fb5c0b0 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -26,9 +26,10 @@
  * MSM driver version:
  * - 1.0.0 - initial interface
  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ * - 1.2.0 - adds explicit fence support for submit ioctl
  */
 #define MSM_VERSION_MAJOR	1
-#define MSM_VERSION_MINOR	1
+#define MSM_VERSION_MINOR	2
 #define MSM_VERSION_PATCHLEVEL	0
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -347,9 +348,9 @@
 	int ret;
 
 	ddev = drm_dev_alloc(drv, dev);
-	if (!ddev) {
+	if (IS_ERR(ddev)) {
 		dev_err(dev, "failed to allocate drm_device\n");
-		return -ENOMEM;
+		return PTR_ERR(ddev);
 	}
 
 	platform_set_drvdata(pdev, ddev);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 85f3047..b6ac27e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -593,18 +593,16 @@
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	bool write = !!(op & MSM_PREP_WRITE);
+	unsigned long remain =
+		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
+	long ret;
 
-	if (op & MSM_PREP_NOSYNC) {
-		if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
-			return -EBUSY;
-	} else {
-		int ret;
-
-		ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
-				true, timeout_to_jiffies(timeout));
-		if (ret <= 0)
-			return ret == 0 ? -ETIMEDOUT : ret;
-	}
+	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+						  true,  remain);
+	if (ret == 0)
+		return remain == 0 ? -EBUSY : -ETIMEDOUT;
+	else if (ret < 0)
+		return ret;
 
 	/* TODO cache maintenance */
 
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 880d6a9..b6a0f37 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -15,6 +15,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/sync_file.h>
+
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gem.h"
@@ -378,6 +380,9 @@
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_gem_submit *submit;
 	struct msm_gpu *gpu = priv->gpu;
+	struct fence *in_fence = NULL;
+	struct sync_file *sync_file = NULL;
+	int out_fence_fd = -1;
 	unsigned i;
 	int ret;
 
@@ -387,13 +392,23 @@
 	/* for now, we just have 3d pipe.. eventually this would need to
 	 * be more clever to dispatch to appropriate gpu module:
 	 */
-	if (args->pipe != MSM_PIPE_3D0)
+	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
+		return -EINVAL;
+
+	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
 		return -EINVAL;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
+	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+		if (out_fence_fd < 0) {
+			ret = out_fence_fd;
+			goto out_unlock;
+		}
+	}
 	priv->struct_mutex_task = current;
 
 	submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
@@ -410,9 +425,32 @@
 	if (ret)
 		goto out;
 
-	ret = submit_fence_sync(submit);
-	if (ret)
-		goto out;
+	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+		in_fence = sync_file_get_fence(args->fence_fd);
+
+		if (!in_fence) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/* TODO if we get an array-fence due to userspace merging multiple
+		 * fences, we need a way to determine if all the backing fences
+		 * are from our own context..
+		 */
+
+		if (in_fence->context != gpu->fctx->context) {
+			ret = fence_wait(in_fence, true);
+			if (ret)
+				goto out;
+		}
+
+	}
+
+	if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+		ret = submit_fence_sync(submit);
+		if (ret)
+			goto out;
+	}
 
 	ret = submit_pin_objects(submit);
 	if (ret)
@@ -478,15 +516,39 @@
 
 	submit->nr_cmds = i;
 
-	ret = msm_gpu_submit(gpu, submit, ctx);
+	submit->fence = msm_fence_alloc(gpu->fctx);
+	if (IS_ERR(submit->fence)) {
+		ret = PTR_ERR(submit->fence);
+		submit->fence = NULL;
+		goto out;
+	}
+
+	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+		sync_file = sync_file_create(submit->fence);
+		if (!sync_file) {
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
+	msm_gpu_submit(gpu, submit, ctx);
 
 	args->fence = submit->fence->seqno;
 
+	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+		fd_install(out_fence_fd, sync_file->file);
+		args->fence_fd = out_fence_fd;
+	}
+
 out:
+	if (in_fence)
+		fence_put(in_fence);
 	submit_cleanup(submit);
 	if (ret)
 		msm_gem_submit_free(submit);
 out_unlock:
+	if (ret && (out_fence_fd >= 0))
+		put_unused_fd(out_fence_fd);
 	priv->struct_mutex_task = NULL;
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 36ed53e..5bb0983 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -509,22 +509,15 @@
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		struct msm_file_private *ctx)
 {
 	struct drm_device *dev = gpu->dev;
 	struct msm_drm_private *priv = dev->dev_private;
-	int i, ret;
+	int i;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	submit->fence = msm_fence_alloc(gpu->fctx);
-	if (IS_ERR(submit->fence)) {
-		ret = PTR_ERR(submit->fence);
-		submit->fence = NULL;
-		return ret;
-	}
-
 	inactive_cancel(gpu);
 
 	list_add_tail(&submit->node, &gpu->submit_list);
@@ -557,8 +550,6 @@
 	priv->lastctx = ctx;
 
 	hangcheck_timer_reset(gpu);
-
-	return 0;
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c902283..d61d98a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -163,7 +163,7 @@
 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		struct msm_file_private *ctx);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 17fe4e53..1627294 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -229,8 +229,8 @@
 	perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
 			minor->debugfs_root, perf, &perf_debugfs_fops);
 	if (!perf->ent) {
-		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n",
-				minor->debugfs_root->d_name.name);
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n",
+				minor->debugfs_root);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3a5fdfc..8487f46 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -243,8 +243,8 @@
 	rd->ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
 			minor->debugfs_root, rd, &rd_debugfs_fops);
 	if (!rd->ent) {
-		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
-				minor->debugfs_root->d_name.name);
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n",
+				minor->debugfs_root);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 864323b..343b865 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1152,7 +1152,7 @@
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
 out:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
@@ -1180,7 +1180,7 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
@@ -1298,7 +1298,7 @@
 	/* Fallback to software copy. */
 	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
 	if (ret == 0)
-		ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
+		ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
 
 out:
 	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1316,7 +1316,8 @@
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+	return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+					  filp->private_data);
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 66c1280..3100fd88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -351,7 +351,7 @@
 	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
 	if (nouveau_modeset != 2)
-		remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+		drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 	kfree(aper);
 
 	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
@@ -1067,8 +1067,8 @@
 		goto err_free;
 
 	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
-	if (!drm) {
-		err = -ENOMEM;
+	if (IS_ERR(drm)) {
+		err = PTR_ERR(drm);
 		goto err_free;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index d1f248f..9f56927 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -32,7 +32,6 @@
 #include <linux/tty.h>
 #include <linux/sysrq.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/screen_info.h>
 #include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 0eae8af..b1f3b81 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -13,7 +13,6 @@
 
 #include <linux/backlight.h>
 #include <linux/delay.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index fc4c238..9f3d6f4 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 157c512..3557a4c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -28,7 +28,6 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/backlight.h>
-#include <linux/fb.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index e256d87..dfd4e96 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -125,16 +125,15 @@
 
 static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
 {
-	struct device_node *np, *np_parent;
+	struct device_node *np;
 
 	np = of_parse_phandle(node, "remote-endpoint", 0);
 	if (!np)
 		return NULL;
 
-	np_parent = of_get_next_parent(np);
-	of_node_put(np);
+	np = of_get_next_parent(np);
 
-	return np_parent;
+	return np;
 }
 
 struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 26c6134..e1cfba5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -96,7 +96,7 @@
 	dispc_runtime_get();
 
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_planes(dev, old_state, 0);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
 	omap_atomic_wait_for_completion(dev, old_state);
@@ -295,9 +295,9 @@
 	if (priv->has_dmm) {
 		dev->mode_config.rotation_property =
 			drm_mode_create_rotation_property(dev,
-				BIT(DRM_ROTATE_0) | BIT(DRM_ROTATE_90) |
-				BIT(DRM_ROTATE_180) | BIT(DRM_ROTATE_270) |
-				BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+				DRM_ROTATE_0 | DRM_ROTATE_90 |
+				DRM_ROTATE_180 | DRM_ROTATE_270 |
+				DRM_REFLECT_X | DRM_REFLECT_Y);
 		if (!dev->mode_config.rotation_property)
 			return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 31f5178..5f3337f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -179,24 +179,24 @@
 					(uint32_t)win->rotation);
 			/* fallthru to default to no rotation */
 		case 0:
-		case BIT(DRM_ROTATE_0):
+		case DRM_ROTATE_0:
 			orient = 0;
 			break;
-		case BIT(DRM_ROTATE_90):
+		case DRM_ROTATE_90:
 			orient = MASK_XY_FLIP | MASK_X_INVERT;
 			break;
-		case BIT(DRM_ROTATE_180):
+		case DRM_ROTATE_180:
 			orient = MASK_X_INVERT | MASK_Y_INVERT;
 			break;
-		case BIT(DRM_ROTATE_270):
+		case DRM_ROTATE_270:
 			orient = MASK_XY_FLIP | MASK_Y_INVERT;
 			break;
 		}
 
-		if (win->rotation & BIT(DRM_REFLECT_X))
+		if (win->rotation & DRM_REFLECT_X)
 			orient ^= MASK_X_INVERT;
 
-		if (win->rotation & BIT(DRM_REFLECT_Y))
+		if (win->rotation & DRM_REFLECT_Y)
 			orient ^= MASK_Y_INVERT;
 
 		/* adjust x,y offset for flip/invert: */
@@ -213,7 +213,7 @@
 	} else {
 		switch (win->rotation & DRM_ROTATE_MASK) {
 		case 0:
-		case BIT(DRM_ROTATE_0):
+		case DRM_ROTATE_0:
 			/* OK */
 			break;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 5252ab7..66ac8c4 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -60,7 +60,7 @@
 }
 
 static int omap_plane_prepare_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *new_state)
+				 struct drm_plane_state *new_state)
 {
 	if (!new_state->fb)
 		return 0;
@@ -69,7 +69,7 @@
 }
 
 static void omap_plane_cleanup_fb(struct drm_plane *plane,
-				  const struct drm_plane_state *old_state)
+				  struct drm_plane_state *old_state)
 {
 	if (old_state->fb)
 		omap_framebuffer_unpin(old_state->fb);
@@ -109,8 +109,8 @@
 	win.src_y = state->src_y >> 16;
 
 	switch (state->rotation & DRM_ROTATE_MASK) {
-	case BIT(DRM_ROTATE_90):
-	case BIT(DRM_ROTATE_270):
+	case DRM_ROTATE_90:
+	case DRM_ROTATE_270:
 		win.src_w = state->src_h >> 16;
 		win.src_h = state->src_w >> 16;
 		break;
@@ -149,7 +149,7 @@
 	struct omap_plane_state *omap_state = to_omap_plane_state(plane->state);
 	struct omap_plane *omap_plane = to_omap_plane(plane);
 
-	plane->state->rotation = BIT(DRM_ROTATE_0);
+	plane->state->rotation = DRM_ROTATE_0;
 	omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
 			   ? 0 : omap_plane->id;
 
@@ -178,7 +178,7 @@
 		return -EINVAL;
 
 	if (state->fb) {
-		if (state->rotation != BIT(DRM_ROTATE_0) &&
+		if (state->rotation != DRM_ROTATE_0 &&
 		    !omap_framebuffer_supports_rotation(state->fb))
 			return -EINVAL;
 	}
@@ -269,7 +269,7 @@
 	 */
 	omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
 			   ? 0 : omap_plane->id;
-	omap_state->base.rotation = BIT(DRM_ROTATE_0);
+	omap_state->base.rotation = DRM_ROTATE_0;
 
 	plane->state = &omap_state->base;
 	plane->state->plane = plane;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 1500ab9..62aba97 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,17 @@
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
+config DRM_PANEL_JDI_LT070ME05000
+	tristate "JDI LT070ME05000 WUXGA DSI panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for JDI DSI video mode
+	  panel as found in Google Nexus 7 (2013) devices.
+	  The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
+	  24 bit per pixel.
+
 config DRM_PANEL_SAMSUNG_LD9040
 	tristate "Samsung LD9040 RGB/SPI panel"
 	depends on OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f277eed..a5c7ec0 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
new file mode 100644
index 0000000..5b2340e
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2016 InforceComputing
+ * Author: Vinay Simha BN <simhavcs@gmail.com>
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * From internet archives, the panel for Nexus 7 2nd Gen, 2013 model is a
+ * JDI model LT070ME05000, and its data sheet is at:
+ * http://panelone.net/en/7-0-inch/JDI_LT070ME05000_7.0_inch-datasheet
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+	"vddp",
+	"iovcc"
+};
+
+struct jdi_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+	struct gpio_desc *enable_gpio;
+	struct gpio_desc *reset_gpio;
+	struct gpio_desc *dcdc_en_gpio;
+	struct backlight_device *backlight;
+
+	bool prepared;
+	bool enabled;
+
+	const struct drm_display_mode *mode;
+};
+
+static inline struct jdi_panel *to_jdi_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct jdi_panel, base);
+}
+
+static int jdi_panel_init(struct jdi_panel *jdi)
+{
+	struct mipi_dsi_device *dsi = jdi->dsi;
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_soft_reset(dsi);
+	if (ret < 0)
+		return ret;
+
+	usleep_range(10000, 20000);
+
+	ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+	if (ret < 0) {
+		dev_err(dev, "failed to set pixel format: %d\n", ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_dcs_set_column_address(dsi, 0, jdi->mode->hdisplay - 1);
+	if (ret < 0) {
+		dev_err(dev, "failed to set column address: %d\n", ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_dcs_set_page_address(dsi, 0, jdi->mode->vdisplay - 1);
+	if (ret < 0) {
+		dev_err(dev, "failed to set page address: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * BIT(5) BCTRL = 1 Backlight Control Block On, Brightness registers
+	 *                  are active
+	 * BIT(3) BL = 1    Backlight Control On
+	 * BIT(2) DD = 0    Display Dimming is Off
+	 */
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+				 (u8[]){ 0x24 }, 1);
+	if (ret < 0) {
+		dev_err(dev, "failed to write control display: %d\n", ret);
+		return ret;
+	}
+
+	/* CABC off */
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE,
+				 (u8[]){ 0x00 }, 1);
+	if (ret < 0) {
+		dev_err(dev, "failed to set cabc off: %d\n", ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0) {
+		dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
+		return ret;
+	}
+
+	msleep(120);
+
+	ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x00}, 2);
+	if (ret < 0) {
+		dev_err(dev, "failed to set mcap: %d\n", ret);
+		return ret;
+	}
+
+	mdelay(10);
+
+	/* Interface setting, video mode */
+	ret = mipi_dsi_generic_write(dsi, (u8[])
+				     {0xB3, 0x26, 0x08, 0x00, 0x20, 0x00}, 6);
+	if (ret < 0) {
+		dev_err(dev, "failed to set display interface setting: %d\n"
+			, ret);
+		return ret;
+	}
+
+	mdelay(20);
+
+	ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x03}, 2);
+	if (ret < 0) {
+		dev_err(dev, "failed to set default values for mcap: %d\n"
+			, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int jdi_panel_on(struct jdi_panel *jdi)
+{
+	struct mipi_dsi_device *dsi = jdi->dsi;
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0)
+		dev_err(dev, "failed to set display on: %d\n", ret);
+
+	return ret;
+}
+
+static void jdi_panel_off(struct jdi_panel *jdi)
+{
+	struct mipi_dsi_device *dsi = jdi->dsi;
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		dev_err(dev, "failed to set display off: %d\n", ret);
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		dev_err(dev, "failed to enter sleep mode: %d\n", ret);
+
+	msleep(100);
+}
+
+static int jdi_panel_disable(struct drm_panel *panel)
+{
+	struct jdi_panel *jdi = to_jdi_panel(panel);
+
+	if (!jdi->enabled)
+		return 0;
+
+	jdi->backlight->props.power = FB_BLANK_POWERDOWN;
+	backlight_update_status(jdi->backlight);
+
+	jdi->enabled = false;
+
+	return 0;
+}
+
+static int jdi_panel_unprepare(struct drm_panel *panel)
+{
+	struct jdi_panel *jdi = to_jdi_panel(panel);
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+
+	if (!jdi->prepared)
+		return 0;
+
+	jdi_panel_off(jdi);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+	if (ret < 0)
+		dev_err(dev, "regulator disable failed, %d\n", ret);
+
+	gpiod_set_value(jdi->enable_gpio, 0);
+
+	gpiod_set_value(jdi->reset_gpio, 1);
+
+	gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+	jdi->prepared = false;
+
+	return 0;
+}
+
+static int jdi_panel_prepare(struct drm_panel *panel)
+{
+	struct jdi_panel *jdi = to_jdi_panel(panel);
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+
+	if (jdi->prepared)
+		return 0;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+	if (ret < 0) {
+		dev_err(dev, "regulator enable failed, %d\n", ret);
+		return ret;
+	}
+
+	msleep(20);
+
+	gpiod_set_value(jdi->dcdc_en_gpio, 1);
+	usleep_range(10, 20);
+
+	gpiod_set_value(jdi->reset_gpio, 0);
+	usleep_range(10, 20);
+
+	gpiod_set_value(jdi->enable_gpio, 1);
+	usleep_range(10, 20);
+
+	ret = jdi_panel_init(jdi);
+	if (ret < 0) {
+		dev_err(dev, "failed to init panel: %d\n", ret);
+		goto poweroff;
+	}
+
+	ret = jdi_panel_on(jdi);
+	if (ret < 0) {
+		dev_err(dev, "failed to set panel on: %d\n", ret);
+		goto poweroff;
+	}
+
+	jdi->prepared = true;
+
+	return 0;
+
+poweroff:
+	ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+	if (ret < 0)
+		dev_err(dev, "regulator disable failed, %d\n", ret);
+
+	gpiod_set_value(jdi->enable_gpio, 0);
+
+	gpiod_set_value(jdi->reset_gpio, 1);
+
+	gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+	return ret;
+}
+
+static int jdi_panel_enable(struct drm_panel *panel)
+{
+	struct jdi_panel *jdi = to_jdi_panel(panel);
+
+	if (jdi->enabled)
+		return 0;
+
+	jdi->backlight->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(jdi->backlight);
+
+	jdi->enabled = true;
+
+	return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+		.clock = 155493,
+		.hdisplay = 1200,
+		.hsync_start = 1200 + 48,
+		.hsync_end = 1200 + 48 + 32,
+		.htotal = 1200 + 48 + 32 + 60,
+		.vdisplay = 1920,
+		.vsync_start = 1920 + 3,
+		.vsync_end = 1920 + 3 + 5,
+		.vtotal = 1920 + 3 + 5 + 6,
+		.vrefresh = 60,
+		.flags = 0,
+};
+
+static int jdi_panel_get_modes(struct drm_panel *panel)
+{
+	struct drm_display_mode *mode;
+	struct jdi_panel *jdi = to_jdi_panel(panel);
+	struct device *dev = &jdi->dsi->dev;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(dev, "failed to add mode %ux%ux@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(panel->connector, mode);
+
+	panel->connector->display_info.width_mm = 95;
+	panel->connector->display_info.height_mm = 151;
+
+	return 1;
+}
+
+static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
+{
+	struct mipi_dsi_device *dsi = bl_get_data(bl);
+	int ret;
+	u16 brightness = bl->props.brightness;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+	if (ret < 0)
+		return ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	return brightness & 0xff;
+}
+
+static int dsi_dcs_bl_update_status(struct backlight_device *bl)
+{
+	struct mipi_dsi_device *dsi = bl_get_data(bl);
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+	if (ret < 0)
+		return ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	return 0;
+}
+
+static const struct backlight_ops dsi_bl_ops = {
+	.update_status = dsi_dcs_bl_update_status,
+	.get_brightness = dsi_dcs_bl_get_brightness,
+};
+
+static struct backlight_device *
+drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct backlight_properties props;
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.brightness = 255;
+	props.max_brightness = 255;
+
+	return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+					      &dsi_bl_ops, &props);
+}
+
+static const struct drm_panel_funcs jdi_panel_funcs = {
+	.disable = jdi_panel_disable,
+	.unprepare = jdi_panel_unprepare,
+	.prepare = jdi_panel_prepare,
+	.enable = jdi_panel_enable,
+	.get_modes = jdi_panel_get_modes,
+};
+
+static const struct of_device_id jdi_of_match[] = {
+	{ .compatible = "jdi,lt070me05000", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, jdi_of_match);
+
+static int jdi_panel_add(struct jdi_panel *jdi)
+{
+	struct device *dev = &jdi->dsi->dev;
+	int ret;
+	unsigned int i;
+
+	jdi->mode = &default_mode;
+
+	for (i = 0; i < ARRAY_SIZE(jdi->supplies); i++)
+		jdi->supplies[i].supply = regulator_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
+				      jdi->supplies);
+	if (ret < 0) {
+		dev_err(dev, "failed to init regulator, ret=%d\n", ret);
+		return ret;
+	}
+
+	jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+	if (IS_ERR(jdi->enable_gpio)) {
+		ret = PTR_ERR(jdi->enable_gpio);
+		dev_err(dev, "cannot get enable-gpio %d\n", ret);
+		return ret;
+	}
+
+	jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(jdi->reset_gpio)) {
+		ret = PTR_ERR(jdi->reset_gpio);
+		dev_err(dev, "cannot get reset-gpios %d\n", ret);
+		return ret;
+	}
+
+	jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
+	if (IS_ERR(jdi->dcdc_en_gpio)) {
+		ret = PTR_ERR(jdi->dcdc_en_gpio);
+		dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
+		return ret;
+	}
+
+	jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
+	if (IS_ERR(jdi->backlight)) {
+		ret = PTR_ERR(jdi->backlight);
+		dev_err(dev, "failed to register backlight %d\n", ret);
+		return ret;
+	}
+
+	drm_panel_init(&jdi->base);
+	jdi->base.funcs = &jdi_panel_funcs;
+	jdi->base.dev = &jdi->dsi->dev;
+
+	ret = drm_panel_add(&jdi->base);
+
+	return ret;
+}
+
+static void jdi_panel_del(struct jdi_panel *jdi)
+{
+	if (jdi->base.dev)
+		drm_panel_remove(&jdi->base);
+}
+
+static int jdi_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct jdi_panel *jdi;
+	int ret;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags =  MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+			   MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+	jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
+	if (!jdi)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, jdi);
+
+	jdi->dsi = dsi;
+
+	ret = jdi_panel_add(jdi);
+	if (ret < 0)
+		return ret;
+
+	return mipi_dsi_attach(dsi);
+}
+
+static int jdi_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = jdi_panel_disable(&jdi->base);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+			ret);
+
+	drm_panel_detach(&jdi->base);
+	jdi_panel_del(jdi);
+
+	return 0;
+}
+
+static void jdi_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+
+	jdi_panel_disable(&jdi->base);
+}
+
+static struct mipi_dsi_driver jdi_panel_driver = {
+	.driver = {
+		.name = "panel-jdi-lt070me05000",
+		.of_match_table = jdi_of_match,
+	},
+	.probe = jdi_panel_probe,
+	.remove = jdi_panel_remove,
+	.shutdown = jdi_panel_shutdown,
+};
+module_mipi_dsi_driver(jdi_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_AUTHOR("Vinay Simha BN <simhavcs@gmail.com>");
+MODULE_DESCRIPTION("JDI LT070ME05000 WUXGA");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 85143d1..113db3c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -849,6 +849,34 @@
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct display_timing innolux_g101ice_l01_timing = {
+	.pixelclock = { 60400000, 71100000, 74700000 },
+	.hactive = { 1280, 1280, 1280 },
+	.hfront_porch = { 41, 80, 100 },
+	.hback_porch = { 40, 79, 99 },
+	.hsync_len = { 1, 1, 1 },
+	.vactive = { 800, 800, 800 },
+	.vfront_porch = { 5, 11, 14 },
+	.vback_porch = { 4, 11, 14 },
+	.vsync_len = { 1, 1, 1 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g101ice_l01 = {
+	.timings = &innolux_g101ice_l01_timing,
+	.num_timings = 1,
+	.bpc = 8,
+	.size = {
+		.width = 217,
+		.height = 135,
+	},
+	.delay = {
+		.enable = 200,
+		.disable = 200,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
 static const struct drm_display_mode innolux_g121i1_l01_mode = {
 	.clock = 71000,
 	.hdisplay = 1280,
@@ -1186,7 +1214,7 @@
 		.width = 105,
 		.height = 67,
 	},
-	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
 /*
@@ -1245,6 +1273,7 @@
 		.height = 93,
 	},
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
 };
 
 static const struct drm_display_mode qd43003c0_40_mode = {
@@ -1384,6 +1413,11 @@
 		.width = 259,
 		.height = 173,
 	},
+	.delay = {
+		.prepare = 110,
+		.enable = 50,
+		.unprepare = 550,
+	},
 };
 
 static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
@@ -1430,6 +1464,11 @@
 		.width = 263,
 		.height = 164,
 	},
+	.delay = {
+		.prepare = 10 + 200,
+		.enable = 50,
+		.unprepare = 10 + 500,
+	},
 };
 
 static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -1575,6 +1614,9 @@
 		.compatible = "innolux,at070tn92",
 		.data = &innolux_at070tn92,
 	}, {
+		.compatible ="innolux,g101ice-l01",
+		.data = &innolux_g101ice_l01
+	}, {
 		.compatible ="innolux,g121i1-l01",
 		.data = &innolux_g121i1_l01
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 3aef127..a61c0d4 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -211,6 +211,7 @@
 	struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
 
 	drm_crtc_cleanup(crtc);
+	qxl_bo_unref(&qxl_crtc->cursor_bo);
 	kfree(qxl_crtc);
 }
 
@@ -296,6 +297,52 @@
 	return 0;
 }
 
+static int qxl_crtc_apply_cursor(struct drm_crtc *crtc)
+{
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_cursor_cmd *cmd;
+	struct qxl_release *release;
+	int ret = 0;
+
+	if (!qcrtc->cursor_bo)
+		return 0;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		return ret;
+
+	ret = qxl_release_list_add(release, qcrtc->cursor_bo);
+	if (ret)
+		goto out_free_release;
+
+	ret = qxl_release_reserve_list(release, false);
+	if (ret)
+		goto out_free_release;
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_SET;
+	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+
+	cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
+
+	cmd->u.set.visible = 1;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_fence_buffer_objects(release);
+
+	return ret;
+
+out_free_release:
+	qxl_release_free(qdev, release);
+	return ret;
+}
+
 static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
 				struct drm_file *file_priv,
 				uint32_t handle,
@@ -400,7 +447,8 @@
 	}
 	drm_gem_object_unreference_unlocked(obj);
 
-	qxl_bo_unref(&cursor_bo);
+	qxl_bo_unref (&qcrtc->cursor_bo);
+	qcrtc->cursor_bo = cursor_bo;
 
 	return ret;
 
@@ -655,6 +703,12 @@
 			   bo->surf.stride, bo->surf.format);
 		qxl_io_create_primary(qdev, 0, bo);
 		bo->is_primary = true;
+
+		ret = qxl_crtc_apply_cursor(crtc);
+		if (ret) {
+			DRM_ERROR("could not set cursor after modeset");
+			ret = 0;
+		}
 	}
 
 	if (bo->is_primary) {
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index ffe8853..9b728ed 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -57,11 +57,8 @@
 static int
 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
 {
-	int ret;
-	ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
-					 QXL_RELEASE_DRAWABLE, release,
-					 NULL);
-	return ret;
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+					  QXL_RELEASE_DRAWABLE, release, NULL);
 }
 
 static void
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8e633ca..5f3e5ad 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -137,6 +137,7 @@
 	int cur_y;
 	int hot_spot_x;
 	int hot_spot_y;
+	struct qxl_bo *cursor_bo;
 };
 
 struct qxl_output {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 28c1423..2cd879a 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -24,7 +24,6 @@
  *     David Airlie
  */
 #include <linux/module.h>
-#include <linux/fb.h>
 
 #include "drmP.h"
 #include "drm/drm.h"
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 5e1d789..fa5440d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -61,7 +61,7 @@
 	if (domain == QXL_GEM_DOMAIN_VRAM)
 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
 	if (domain == QXL_GEM_DOMAIN_SURFACE)
-		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
 	if (domain == QXL_GEM_DOMAIN_CPU)
 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
 	if (!c)
@@ -151,7 +151,7 @@
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
 		map = qdev->surface_mapping;
 	else
 		goto fallback;
@@ -191,7 +191,7 @@
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 		map = qdev->vram_mapping;
-	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
 		map = qdev->surface_mapping;
 	else
 		goto fallback;
@@ -311,7 +311,7 @@
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index f599cd0..cd83f05 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -203,12 +203,9 @@
 static int qxl_release_bo_alloc(struct qxl_device *qdev,
 				struct qxl_bo **bo)
 {
-	int ret;
 	/* pin releases bo's they are too messy to evict */
-	ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
-			    QXL_GEM_DOMAIN_VRAM, NULL,
-			    bo);
-	return ret;
+	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
 }
 
 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d50c967..e26c82d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -168,7 +168,7 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-	case TTM_PL_PRIV0:
+	case TTM_PL_PRIV:
 		/* "On-card" video ram */
 		man->func = &ttm_bo_manager_func;
 		man->gpu_offset = 0;
@@ -210,7 +210,8 @@
 {
 	struct qxl_bo *qbo = to_qxl_bo(bo);
 
-	return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
+	return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+					  filp->private_data);
 }
 
 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -235,7 +236,7 @@
 		mem->bus.base = qdev->vram_base;
 		mem->bus.offset = mem->start << PAGE_SHIFT;
 		break;
-	case TTM_PL_PRIV0:
+	case TTM_PL_PRIV:
 		mem->bus.is_iomem = true;
 		mem->bus.base = qdev->surfaceram_base;
 		mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -361,8 +362,8 @@
 		qxl_move_null(bo, new_mem);
 		return 0;
 	}
-	return ttm_bo_move_memcpy(bo, evict, interruptible,
-				  no_wait_gpu, new_mem);
+	return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
+				  new_mem);
 }
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -376,7 +377,7 @@
 	qbo = to_qxl_bo(bo);
 	qdev = qbo->gem_base.dev->dev_private;
 
-	if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+	if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
 		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
@@ -422,7 +423,7 @@
 		DRM_ERROR("Failed initializing VRAM heap.\n");
 		return r;
 	}
-	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
 			   qdev->surfaceram_size / PAGE_SIZE);
 	if (r) {
 		DRM_ERROR("Failed initializing Surfaces heap.\n");
@@ -445,7 +446,7 @@
 void qxl_ttm_fini(struct qxl_device *qdev)
 {
 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
-	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 	ttm_bo_device_release(&qdev->mman.bdev);
 	qxl_ttm_global_fini(qdev);
 	DRM_INFO("qxl: ttm finalized\n");
@@ -489,7 +490,7 @@
 		if (i == 0)
 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
 		else
-			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
 	}
 	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index c57b4de..a982be5 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -56,7 +56,7 @@
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY |
 	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_r128_buf_priv_t),
 	.load = r128_driver_load,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 1dcf390..74f99ba 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,6 +1156,7 @@
 	u32 tmp, viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1259,8 +1260,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
@@ -1435,8 +1437,8 @@
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
@@ -1471,6 +1473,7 @@
 	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
+	char *format_name;
 
 	/* no fb bound */
 	if (!atomic && !crtc->primary->fb) {
@@ -1560,8 +1563,9 @@
 		bypass_lut = true;
 		break;
 	default:
-		DRM_ERROR("Unsupported screen format %s\n",
-			  drm_get_format_name(target_fb->pixel_format));
+		format_name = drm_get_format_name(target_fb->pixel_format);
+		DRM_ERROR("Unsupported screen format %s\n", format_name);
+		kfree(format_name);
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index cead089a..432cb46 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -389,22 +389,21 @@
 {
 	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
 	u8 msg[DP_DPCD_SIZE];
-	int ret, i;
+	int ret;
 
-	for (i = 0; i < 7; i++) {
-		ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-				       DP_DPCD_SIZE);
-		if (ret == DP_DPCD_SIZE) {
-			memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+	ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+			       DP_DPCD_SIZE);
+	if (ret == DP_DPCD_SIZE) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-			DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-				      dig_connector->dpcd);
+		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+			      dig_connector->dpcd);
 
-			radeon_dp_probe_oui(radeon_connector);
+		radeon_dp_probe_oui(radeon_connector);
 
-			return true;
-		}
+		return true;
 	}
+
 	dig_connector->dpcd[0] = 0;
 	return false;
 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0c1b9ff..f6ff41a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1871,7 +1871,7 @@
 {
 	const __be32 *fw_data = NULL;
 	const __le32 *new_fw_data = NULL;
-	u32 running, blackout = 0, tmp;
+	u32 running, tmp;
 	u32 *io_mc_regs = NULL;
 	const __le32 *new_io_mc_regs = NULL;
 	int i, regs_size, ucode_size;
@@ -1912,11 +1912,6 @@
 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1964,9 +1959,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -4201,11 +4193,7 @@
 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
 		     struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(CP_RB0_WPTR);
-
-	return wptr;
+	return RREG32(CP_RB0_WPTR);
 }
 
 void cik_gfx_set_wptr(struct radeon_device *rdev,
@@ -8215,7 +8203,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index cead228..48db935 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2069,6 +2069,7 @@
 #define UVD_UDEC_ADDR_CONFIG		0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG		0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG	0xef54
+#define UVD_NO_OP			0xeffc
 
 #define UVD_LMI_EXT40_ADDR		0xf498
 #define UVD_GP_SCRATCH4			0xf4e0
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index db275b7..0b6b576 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2878,9 +2878,8 @@
 	for (i = 0; i < rdev->num_crtc; i++) {
 		if (save->crtc_enabled[i]) {
 			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-			if ((tmp & 0x7) != 3) {
+			if ((tmp & 0x7) != 0) {
 				tmp &= ~0x7;
-				tmp |= 0x3;
 				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
 			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -5580,7 +5579,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index c8e3d39..f3d88ca 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1523,6 +1523,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xef4c
 #define UVD_UDEC_DB_ADDR_CONFIG				0xef50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xef54
+#define UVD_NO_OP					0xeffc
 #define UVD_RBC_RB_RPTR					0xf690
 #define UVD_RBC_RB_WPTR					0xf694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 4a3d7ca..103fc86 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2062,7 +2062,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 47eb49b..3c9fec8 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1137,6 +1137,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
 #define UVD_RBC_RB_RPTR					0xF690
 #define UVD_RBC_RB_WPTR					0xF694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f25994b..f5e84f4 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1071,11 +1071,7 @@
 u32 r100_gfx_get_wptr(struct radeon_device *rdev,
 		      struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(RADEON_CP_RB_WPTR);
-
-	return wptr;
+	return RREG32(RADEON_CP_RB_WPTR);
 }
 
 void r100_gfx_set_wptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9247e7d..a951881 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2631,11 +2631,7 @@
 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
 		      struct radeon_ring *ring)
 {
-	u32 wptr;
-
-	wptr = RREG32(R600_CP_RB_WPTR);
-
-	return wptr;
+	return RREG32(R600_CP_RB_WPTR);
 }
 
 void r600_gfx_set_wptr(struct radeon_device *rdev,
@@ -3097,7 +3093,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020..5a26eb4 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@
 	struct drm_device *dev = rdev->ddev;
 	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
-	u32 line_time_us, vblank_lines;
+	u32 vblank_in_pixels;
 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 			radeon_crtc = to_radeon_crtc(crtc);
 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-				line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
-					radeon_crtc->hw_mode.clock;
-				vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
-					radeon_crtc->hw_mode.crtc_vdisplay +
-					(radeon_crtc->v_border * 2);
-				vblank_time_us = vblank_lines * line_time_us;
+				vblank_in_pixels =
+					radeon_crtc->hw_mode.crtc_htotal *
+					(radeon_crtc->hw_mode.crtc_vblank_end -
+					 radeon_crtc->hw_mode.crtc_vdisplay +
+					 (radeon_crtc->v_border * 2));
+
+				vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
 				break;
 			}
 		}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 1e8495c..2e00a52 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1490,6 +1490,7 @@
 #define UVD_GPCOM_VCPU_DATA0				0xef10
 #define UVD_GPCOM_VCPU_DATA1				0xef14
 #define UVD_ENGINE_CNTL					0xef18
+#define UVD_NO_OP					0xeffc
 
 #define UVD_SEMA_CNTL					0xf400
 #define UVD_RB_ARB_CTRL					0xf480
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5633ee3..1b0dcad 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -742,6 +742,7 @@
 	struct work_struct		unpin_work;
 	struct radeon_device		*rdev;
 	int				crtc_id;
+	u32				target_vblank;
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct radeon_bo		*old_rbo;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 31c9a92..6efbd65 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -32,6 +33,12 @@
 #include "radeon_acpi.h"
 #include "atom.h"
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atpx_dgpu_req_power_for_displays(void);
+#else
+static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; }
+#endif
+
 #define ACPI_AC_CLASS           "ac_adapter"
 
 extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
@@ -394,6 +401,16 @@
 #endif
 		}
 	}
+	if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+		if ((rdev->flags & RADEON_IS_PX) &&
+		    radeon_atpx_dgpu_req_power_for_displays()) {
+			pm_runtime_get_sync(rdev->ddev->dev);
+			/* Just fire off a uevent and let userspace tell us what to do */
+			drm_helper_hpd_irq_event(rdev->ddev);
+			pm_runtime_mark_last_busy(rdev->ddev->dev);
+			pm_runtime_put_autosuspend(rdev->ddev->dev);
+		}
+	}
 	/* TODO: check other events */
 
 	/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index ddef0d4..2fdcd04 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -29,6 +29,7 @@
 	acpi_handle handle;
 	struct radeon_atpx_functions functions;
 	bool is_hybrid;
+	bool dgpu_req_power_for_displays;
 };
 
 static struct radeon_atpx_priv {
@@ -72,6 +73,10 @@
 	return radeon_atpx_priv.atpx.is_hybrid;
 }
 
+bool radeon_atpx_dgpu_req_power_for_displays(void) {
+	return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b79f3b0..e18839d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -198,12 +198,12 @@
 		}
 
 		/* Any defined maximum tmds clock limit we must not exceed? */
-		if (connector->max_tmds_clock > 0) {
+		if (connector->display_info.max_tmds_clock > 0) {
 			/* mode_clock is clock in kHz for mode to be modeset on this connector */
 			mode_clock = radeon_connector->pixelclock_for_modeset;
 
 			/* Maximum allowable input clock in kHz */
-			max_tmds_clock = connector->max_tmds_clock * 1000;
+			max_tmds_clock = connector->display_info.max_tmds_clock;
 
 			DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
 					  connector->name, mode_clock, max_tmds_clock);
@@ -927,6 +927,16 @@
 	return ret;
 }
 
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->ddc_bus->has_aux) {
+		drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+		radeon_connector->ddc_bus->has_aux = false;
+	}
+}
+
 static void radeon_connector_destroy(struct drm_connector *connector)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.set_property = radeon_lvds_set_property,
 };
@@ -1111,6 +1122,7 @@
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_vga_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.set_property = radeon_connector_set_property,
 };
@@ -1188,6 +1200,7 @@
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_tv_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.set_property = radeon_connector_set_property,
 };
@@ -1519,6 +1532,7 @@
 	.detect = radeon_dvi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = radeon_connector_set_property,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.force = radeon_dvi_force,
 };
@@ -1832,6 +1846,7 @@
 	.detect = radeon_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = radeon_connector_set_property,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.force = radeon_dvi_force,
 };
@@ -1841,6 +1856,7 @@
 	.detect = radeon_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = radeon_lvds_set_property,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.force = radeon_dvi_force,
 };
@@ -1850,6 +1866,7 @@
 	.detect = radeon_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = radeon_lvds_set_property,
+	.early_unregister = radeon_connector_unregister,
 	.destroy = radeon_connector_destroy,
 	.force = radeon_dvi_force,
 };
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a00dd2f..eb92aef 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -639,7 +639,7 @@
  * Used at driver startup.
  * Returns true if virtual or false if not.
  */
-static bool radeon_device_is_virtual(void)
+bool radeon_device_is_virtual(void)
 {
 #ifdef CONFIG_X86
 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
@@ -661,8 +661,9 @@
 {
 	uint32_t reg;
 
-	/* for pass through, always force asic_init */
-	if (radeon_device_is_virtual())
+	/* for pass through, always force asic_init for CI */
+	if (rdev->family >= CHIP_BONAIRE &&
+	    radeon_device_is_virtual())
 		return false;
 
 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
@@ -1956,14 +1957,3 @@
 	}
 #endif
 }
-
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor)
-{
-	return 0;
-}
-
-void radeon_debugfs_cleanup(struct drm_minor *minor)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c3206fb..cdb8cb56 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -321,16 +321,30 @@
 	update_pending = radeon_page_flip_pending(rdev, crtc_id);
 
 	/* Has the pageflip already completed in crtc, or is it certain
-	 * to complete in this vblank?
+	 * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
+	 * distance to start of "fudged earlier" vblank in vpos, distance to
+	 * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
+	 * the last few scanlines before start of real vblank, where the vblank
+	 * irq can fire, so we have sampled update_pending a bit too early and
+	 * know the flip will complete at leading edge of the upcoming real
+	 * vblank. On pre-AVIVO hardware, flips also complete inside the real
+	 * vblank, not only at leading edge, so if update_pending for hpos >= 0
+	 *  == inside real vblank, the flip will complete almost immediately.
+	 * Note that this method of completion handling is still not 100% race
+	 * free, as we could execute before the radeon_flip_work_func managed
+	 * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
+	 * but the flip still gets programmed into hw and completed during
+	 * vblank, leading to a delayed emission of the flip completion event.
+	 * This applies at least to pre-AVIVO hardware, where flips are always
+	 * completing inside vblank, not only at leading edge of vblank.
 	 */
 	if (update_pending &&
-	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
-							       crtc_id,
-							       USE_REAL_VBLANKSTART,
-							       &vpos, &hpos, NULL, NULL,
-							       &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
-	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
-	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+	    (DRM_SCANOUTPOS_VALID &
+	     radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+					GET_DISTANCE_TO_VBLANKSTART,
+					&vpos, &hpos, NULL, NULL,
+					&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
+	    ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) {
 		/* crtc didn't flip in this target vblank interval,
 		 * but flip is pending in crtc. Based on the current
 		 * scanout position we know that the current frame is
@@ -400,14 +414,13 @@
 	struct radeon_flip_work *work =
 		container_of(__work, struct radeon_flip_work, flip_work);
 	struct radeon_device *rdev = work->rdev;
+	struct drm_device *dev = rdev->ddev;
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &radeon_crtc->base;
 	unsigned long flags;
 	int r;
-	int vpos, hpos, stat, min_udelay = 0;
-	unsigned repcnt = 4;
-	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+	int vpos, hpos;
 
 	down_read(&rdev->exclusive_lock);
 	if (work->fence) {
@@ -438,59 +451,28 @@
 		work->fence = NULL;
 	}
 
+	/* Wait until we're out of the vertical blank period before the one
+	 * targeted by the flip. Always wait on pre DCE4 to avoid races with
+	 * flip completion handling from vblank irq, as these old asics don't
+	 * have reliable pageflip completion interrupts.
+	 */
+	while (radeon_crtc->enabled &&
+		(radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
+					    &vpos, &hpos, NULL, NULL,
+					    &crtc->hwmode)
+		& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+		(!ASIC_IS_AVIVO(rdev) ||
+		((int) (work->target_vblank -
+		dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+		usleep_range(1000, 2000);
+
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 	/* set the proper interrupt */
 	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
 
-	/* If this happens to execute within the "virtually extended" vblank
-	 * interval before the start of the real vblank interval then it needs
-	 * to delay programming the mmio flip until the real vblank is entered.
-	 * This prevents completing a flip too early due to the way we fudge
-	 * our vblank counter and vblank timestamps in order to work around the
-	 * problem that the hw fires vblank interrupts before actual start of
-	 * vblank (when line buffer refilling is done for a frame). It
-	 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
-	 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
-	 *
-	 * In practice this won't execute very often unless on very fast
-	 * machines because the time window for this to happen is very small.
-	 */
-	while (radeon_crtc->enabled && --repcnt) {
-		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
-		 * start in hpos, and to the "fudged earlier" vblank start in
-		 * vpos.
-		 */
-		stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
-						  GET_DISTANCE_TO_VBLANKSTART,
-						  &vpos, &hpos, NULL, NULL,
-						  &crtc->hwmode);
-
-		if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
-		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
-		    !(vpos >= 0 && hpos <= 0))
-			break;
-
-		/* Sleep at least until estimated real start of hw vblank */
-		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
-		if (min_udelay > vblank->framedur_ns / 2000) {
-			/* Don't wait ridiculously long - something is wrong */
-			repcnt = 0;
-			break;
-		}
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-		usleep_range(min_udelay, 2 * min_udelay);
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
-	};
-
-	if (!repcnt)
-		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
-				 "framedur %d, linedur %d, stat %d, vpos %d, "
-				 "hpos %d\n", work->crtc_id, min_udelay,
-				 vblank->framedur_ns / 1000,
-				 vblank->linedur_ns / 1000, stat, vpos, hpos);
-
 	/* do the flip (mmio) */
 	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
 
@@ -499,10 +481,11 @@
 	up_read(&rdev->exclusive_lock);
 }
 
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-				 struct drm_framebuffer *fb,
-				 struct drm_pending_vblank_event *event,
-				 uint32_t page_flip_flags)
+static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
+					struct drm_framebuffer *fb,
+					struct drm_pending_vblank_event *event,
+					uint32_t page_flip_flags,
+					uint32_t target)
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -599,12 +582,8 @@
 		base &= ~7;
 	}
 	work->base = base;
-
-	r = drm_crtc_vblank_get(crtc);
-	if (r) {
-		DRM_ERROR("failed to get vblank before flip\n");
-		goto pflip_cleanup;
-	}
+	work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+		dev->driver->get_vblank_counter(dev, work->crtc_id);
 
 	/* We borrow the event spin lock for protecting flip_work */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -613,7 +592,7 @@
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 		r = -EBUSY;
-		goto vblank_cleanup;
+		goto pflip_cleanup;
 	}
 	radeon_crtc->flip_status = RADEON_FLIP_PENDING;
 	radeon_crtc->flip_work = work;
@@ -626,9 +605,6 @@
 	queue_work(radeon_crtc->flip_queue, &work->flip_work);
 	return 0;
 
-vblank_cleanup:
-	drm_crtc_vblank_put(crtc);
-
 pflip_cleanup:
 	if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
 		DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -697,7 +673,7 @@
 	.gamma_set = radeon_crtc_gamma_set,
 	.set_config = radeon_crtc_set_config,
 	.destroy = radeon_crtc_destroy,
-	.page_flip = radeon_crtc_page_flip,
+	.page_flip_target = radeon_crtc_page_flip_target,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1699,20 +1675,20 @@
 
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
-	radeon_fbdev_fini(rdev);
+	if (rdev->mode_info.mode_config_initialized) {
+		drm_kms_helper_poll_fini(rdev->ddev);
+		radeon_hpd_fini(rdev);
+		drm_crtc_force_disable_all(rdev->ddev);
+		radeon_fbdev_fini(rdev);
+		radeon_afmt_fini(rdev);
+		drm_mode_config_cleanup(rdev->ddev);
+		rdev->mode_info.mode_config_initialized = false;
+	}
+
 	kfree(rdev->mode_info.bios_hardcoded_edid);
 
 	/* free i2c buses */
 	radeon_i2c_fini(rdev);
-
-	if (rdev->mode_info.mode_config_initialized) {
-		radeon_afmt_fini(rdev);
-		drm_kms_helper_poll_fini(rdev->ddev);
-		radeon_hpd_fini(rdev);
-		drm_crtc_force_disable_all(rdev->ddev);
-		drm_mode_config_cleanup(rdev->ddev);
-		rdev->mode_info.mode_config_initialized = false;
-	}
 }
 
 static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index db64e00..2d46564 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -164,7 +164,6 @@
 	}
 
 	if (tmp & AUX_SW_RX_TIMEOUT) {
-		DRM_DEBUG_KMS("dp_aux_ch timed out\n");
 		ret = -ETIMEDOUT;
 		goto done;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c01a7c6..00ea000 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -39,6 +39,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_fb_helper.h>
 
 #include "drm_crtc_helper.h"
 #include "radeon_kfd.h"
@@ -94,9 +95,11 @@
  *   2.44.0 - SET_APPEND_CNT packet3 support
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
+ *   2.47.0 - Add UVD_NO_OP register support
+ *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	46
+#define KMS_DRIVER_MINOR	48
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -154,11 +157,6 @@
 extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
 				    unsigned long arg);
 
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor);
-void radeon_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
@@ -309,6 +307,8 @@
 
 static struct drm_driver kms_driver;
 
+bool radeon_device_is_virtual(void);
+
 static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
 	struct apertures_struct *ap;
@@ -324,7 +324,7 @@
 #ifdef CONFIG_X86
 	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
-	remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
 	kfree(ap);
 
 	return 0;
@@ -362,6 +362,17 @@
 	drm_put_dev(dev);
 }
 
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+	/* if we are running in a VM, make sure the device
+	 * torn down properly on reboot/shutdown.
+	 * unfortunately we can't detect certain
+	 * hypervisors so just do this all the time.
+	 */
+	radeon_pci_remove(pdev);
+}
+
 static int radeon_pmops_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -373,6 +384,14 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	/* GPU comes up enabled by the bios on resume */
+	if (radeon_is_px(drm_dev)) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+
 	return radeon_resume_kms(drm_dev, true, true);
 }
 
@@ -529,10 +548,6 @@
 	.disable_vblank = radeon_disable_vblank_kms,
 	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
 	.get_scanout_position = radeon_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = radeon_debugfs_init,
-	.debugfs_cleanup = radeon_debugfs_cleanup,
-#endif
 	.irq_preinstall = radeon_driver_irq_preinstall_kms,
 	.irq_postinstall = radeon_driver_irq_postinstall_kms,
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
@@ -574,6 +589,7 @@
 	.id_table = pciidlist,
 	.probe = radeon_pci_probe,
 	.remove = radeon_pci_remove,
+	.shutdown = radeon_pci_shutdown,
 	.driver.pm = &radeon_pm_ops,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0e3143a..0daad44 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,7 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -47,8 +47,35 @@
 	struct radeon_device *rdev;
 };
 
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+	int ret = pm_runtime_get_sync(rdev->ddev->dev);
+	if (ret < 0 && ret != -EACCES) {
+		pm_runtime_mark_last_busy(rdev->ddev->dev);
+		pm_runtime_put_autosuspend(rdev->ddev->dev);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+	struct radeon_fbdev *rfbdev = info->par;
+	struct radeon_device *rdev = rfbdev->rdev;
+
+	pm_runtime_mark_last_busy(rdev->ddev->dev);
+	pm_runtime_put_autosuspend(rdev->ddev->dev);
+	return 0;
+}
+
 static struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = radeonfb_open,
+	.fb_release = radeonfb_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -383,7 +410,7 @@
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
 {
 	if (rdev->mode_info.rfbdev)
-		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state);
 }
 
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 9590bcd..29f7817 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -938,10 +938,8 @@
 			 "Radeon i2c hw bus %s", name);
 		i2c->adapter.algo = &radeon_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else if (rec->hw_capable &&
 		   radeon_hw_i2c &&
 		   ASIC_IS_DCE3(rdev)) {
@@ -950,10 +948,8 @@
 			 "Radeon i2c hw bus %s", name);
 		i2c->adapter.algo = &radeon_atom_i2c_algo;
 		ret = i2c_add_adapter(&i2c->adapter);
-		if (ret) {
-			DRM_ERROR("Failed to register hw i2c %s\n", name);
+		if (ret)
 			goto out_free;
-		}
 	} else {
 		/* set the radeon bit adapter */
 		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -986,9 +982,8 @@
 {
 	if (!i2c)
 		return;
+	WARN_ON(i2c->has_aux);
 	i2c_del_adapter(&i2c->adapter);
-	if (i2c->has_aux)
-		drm_dp_aux_unregister(&i2c->aux);
 	kfree(i2c);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 835563c..4388dde 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -641,11 +641,11 @@
 	if (rdev->family >= CHIP_CAYMAN) {
 		struct radeon_fpriv *fpriv;
 		struct radeon_vm *vm;
-		int r;
 
 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
 		if (unlikely(!fpriv)) {
-			return -ENOMEM;
+			r = -ENOMEM;
+			goto out_suspend;
 		}
 
 		if (rdev->accel_working) {
@@ -653,14 +653,14 @@
 			r = radeon_vm_init(rdev, vm);
 			if (r) {
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 
 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 			if (r) {
 				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 
 			/* map the ib pool buffer read only into
@@ -674,15 +674,16 @@
 			if (r) {
 				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
-				return r;
+				goto out_suspend;
 			}
 		}
 		file_priv->driver_priv = fpriv;
 	}
 
+out_suspend:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
-	return 0;
+	return r;
 }
 
 /**
@@ -717,6 +718,8 @@
 		kfree(fpriv);
 		file_priv->driver_priv = NULL;
 	}
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -733,6 +736,8 @@
 {
 	struct radeon_device *rdev = dev->dev_private;
 
+	pm_runtime_get_sync(dev->dev);
+
 	mutex_lock(&rdev->gem.mutex);
 	if (rdev->hyperz_filp == file_priv)
 		rdev->hyperz_filp = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c2e0a1c..3de5e6e 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -237,7 +237,8 @@
 
 	if (radeon_ttm_tt_has_userptr(bo->ttm))
 		return -EPERM;
-	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+	return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+					  filp->private_data);
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,
@@ -346,7 +347,7 @@
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
@@ -379,7 +380,7 @@
 	if (unlikely(r)) {
 		return r;
 	}
-	r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+	r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
@@ -444,8 +445,7 @@
 
 	if (r) {
 memcpy:
-		r = ttm_bo_move_memcpy(bo, evict, interruptible,
-				       no_wait_gpu, new_mem);
+		r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
 		if (r) {
 			return r;
 		}
@@ -566,7 +566,8 @@
 		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 		struct page **pages = ttm->pages + pinned;
 
-		r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+		r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+				   pages, NULL);
 		if (r < 0)
 			goto release_pages;
 
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 73dfe01..0cd0e7b 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -669,6 +669,7 @@
 				return r;
 			break;
 		case UVD_ENGINE_CNTL:
+		case UVD_NO_OP:
 			break;
 		default:
 			DRM_ERROR("Invalid reg 0x%X!\n",
@@ -753,8 +754,10 @@
 	ib.ptr[3] = addr >> 32;
 	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
 	ib.ptr[5] = 0;
-	for (i = 6; i < 16; ++i)
-		ib.ptr[i] = PACKET2(0);
+	for (i = 6; i < 16; i += 2) {
+		ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
+		ib.ptr[i+1] = 0;
+	}
 	ib.length_dw = 16;
 
 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1c120a4..729ae58 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1738,7 +1738,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9ef2064..0271f4c 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -387,6 +387,7 @@
 #define UVD_UDEC_TILING_CONFIG                          0xef40
 #define UVD_UDEC_DB_TILING_CONFIG                       0xef44
 #define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+#define UVD_NO_OP					0xeffc
 
 #define	GC_USER_SHADER_PIPE_CONFIG			0x8954
 #define		INACTIVE_QD_PIPES(x)				((x) << 8)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2523ca9..e402be8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1547,7 +1547,7 @@
 {
 	const __be32 *fw_data = NULL;
 	const __le32 *new_fw_data = NULL;
-	u32 running, blackout = 0;
+	u32 running;
 	u32 *io_mc_regs = NULL;
 	const __le32 *new_io_mc_regs = NULL;
 	int i, regs_size, ucode_size;
@@ -1598,11 +1598,6 @@
 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
 	if (running == 0) {
-		if (running) {
-			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
-		}
-
 		/* reset the engine and set to writable */
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1641,9 +1636,6 @@
 				break;
 			udelay(1);
 		}
-
-		if (running)
-			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
 	}
 
 	return 0;
@@ -4439,6 +4431,7 @@
 	case SPI_CONFIG_CNTL:
 	case SPI_CONFIG_CNTL_1:
 	case TA_CNTL_AUX:
+	case TA_CS_BC_BASE_ADDR:
 		return true;
 	default:
 		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
@@ -6928,7 +6921,7 @@
 		return;
 
 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
 	if (r) {
 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
 		return;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1f78ec2..89bdf20 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -4112,7 +4112,7 @@
 							      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
 				si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
 
-				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
 					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
 
 				si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index d1a7b58..65a911d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
 #define	SPI_LB_CU_MASK					0x9354
 
 #define	TA_CNTL_AUX					0x9508
+#define	TA_CS_BC_BASE_ADDR				0x950C
 
 #define CC_RB_BACKEND_DISABLE				0x98F4
 #define		BACKEND_DISABLE(x)     			((x) << 16)
@@ -1559,6 +1560,7 @@
 #define UVD_UDEC_ADDR_CONFIG				0xEF4C
 #define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
 #define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_NO_OP					0xEFFC
 #define UVD_RBC_RB_RPTR					0xF690
 #define UVD_RBC_RB_WPTR					0xF694
 #define UVD_STATUS					0xf6bc
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 3c77983..966e3a5 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -194,6 +194,7 @@
 #define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
 #define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
 #define SISLANDS_SMC_VOLTAGEMASK_MAX   4
 
 struct SISLANDS_SMC_VOLTAGEMASKTABLE
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 899ef7a..73c971e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -316,8 +316,8 @@
 	rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
 
 	ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
-	if (!ddev)
-		return -ENOMEM;
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
 
 	rcdu->ddev = ddev;
 	ddev->dev_private = rcdu;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f03eb55..bd9c3bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -257,7 +257,8 @@
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, true);
+	drm_atomic_helper_commit_planes(dev, old_state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
 
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 05d0713..9746365 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -3,7 +3,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
-		rockchip_drm_gem.o rockchip_drm_vop.o
+		rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 89aadbf..8548e82 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -32,6 +32,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
 #define RK3288_GRF_SOC_CON6		0x25c
@@ -41,6 +42,8 @@
 
 #define HIWORD_UPDATE(val, mask)	(val | (mask) << 16)
 
+#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS	100
+
 #define to_dp(nm)	container_of(nm, struct rockchip_dp_device, nm)
 
 /**
@@ -68,11 +71,65 @@
 	struct regmap            *grf;
 	struct reset_control     *rst;
 
+	struct work_struct	 psr_work;
+	spinlock_t		 psr_lock;
+	unsigned int             psr_state;
+
 	const struct rockchip_dp_chip_data *data;
 
 	struct analogix_dp_plat_data plat_data;
 };
 
+static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+{
+	struct rockchip_dp_device *dp = to_dp(encoder);
+	unsigned long flags;
+
+	if (!analogix_dp_psr_supported(dp->dev))
+		return;
+
+	dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+
+	spin_lock_irqsave(&dp->psr_lock, flags);
+	if (enabled)
+		dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
+	else
+		dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+
+	schedule_work(&dp->psr_work);
+	spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
+static void analogix_dp_psr_work(struct work_struct *work)
+{
+	struct rockchip_dp_device *dp =
+				container_of(work, typeof(*dp), psr_work);
+	struct drm_crtc *crtc = dp->encoder.crtc;
+	int psr_state = dp->psr_state;
+	int vact_end;
+	int ret;
+	unsigned long flags;
+
+	if (!crtc)
+		return;
+
+	vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay;
+
+	ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end,
+					  PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+	if (ret) {
+		dev_err(dp->dev, "line flag interrupt did not arrive\n");
+		return;
+	}
+
+	spin_lock_irqsave(&dp->psr_lock, flags);
+	if (psr_state == EDP_VSC_PSR_STATE_ACTIVE)
+		analogix_dp_enable_psr(dp->dev);
+	else
+		analogix_dp_disable_psr(dp->dev);
+	spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
 {
 	reset_control_assert(dp->rst);
@@ -87,6 +144,8 @@
 	struct rockchip_dp_device *dp = to_dp(plat_data);
 	int ret;
 
+	cancel_work_sync(&dp->psr_work);
+
 	ret = clk_prepare_enable(dp->pclk);
 	if (ret < 0) {
 		dev_err(dp->dev, "failed to enable pclk %d\n", ret);
@@ -342,12 +401,22 @@
 	dp->plat_data.power_off = rockchip_dp_powerdown;
 	dp->plat_data.get_modes = rockchip_dp_get_modes;
 
+	spin_lock_init(&dp->psr_lock);
+	dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+	INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
+
+	rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+
 	return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
 }
 
 static void rockchip_dp_unbind(struct device *dev, struct device *master,
 			       void *data)
 {
+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+	rockchip_drm_psr_unregister(&dp->encoder);
+
 	return analogix_dp_unbind(dev, master, data);
 }
 
@@ -381,10 +450,8 @@
 
 		panel = of_drm_find_panel(panel_node);
 		of_node_put(panel_node);
-		if (!panel) {
-			DRM_ERROR("failed to find panel\n");
+		if (!panel)
 			return -EPROBE_DEFER;
-		}
 	}
 
 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -445,7 +512,6 @@
 	.remove = rockchip_dp_remove,
 	.driver = {
 		   .name = "rockchip-dp",
-		   .owner = THIS_MODULE,
 		   .pm = &rockchip_dp_pm_ops,
 		   .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
 	},
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a822d49..8c8cbe8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -143,8 +143,8 @@
 	int ret;
 
 	drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
-	if (!drm_dev)
-		return -ENOMEM;
+	if (IS_ERR(drm_dev))
+		return PTR_ERR(drm_dev);
 
 	dev_set_drvdata(dev, drm_dev);
 
@@ -156,6 +156,9 @@
 
 	drm_dev->dev_private = private;
 
+	INIT_LIST_HEAD(&private->psr_list);
+	spin_lock_init(&private->psr_list_lock);
+
 	drm_mode_config_init(drm_dev);
 
 	rockchip_drm_mode_config_init(drm_dev);
@@ -306,7 +309,7 @@
 };
 
 #ifdef CONFIG_PM_SLEEP
-void rockchip_drm_fb_suspend(struct drm_device *drm)
+static void rockchip_drm_fb_suspend(struct drm_device *drm)
 {
 	struct rockchip_drm_private *priv = drm->dev_private;
 
@@ -315,7 +318,7 @@
 	console_unlock();
 }
 
-void rockchip_drm_fb_resume(struct drm_device *drm)
+static void rockchip_drm_fb_resume(struct drm_device *drm)
 {
 	struct rockchip_drm_private *priv = drm->dev_private;
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ea39329..fb6226c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -39,7 +39,6 @@
 struct rockchip_crtc_funcs {
 	int (*enable_vblank)(struct drm_crtc *crtc);
 	void (*disable_vblank)(struct drm_crtc *crtc);
-	void (*wait_for_update)(struct drm_crtc *crtc);
 };
 
 struct rockchip_crtc_state {
@@ -61,6 +60,9 @@
 	struct drm_gem_object *fbdev_bo;
 	const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
 	struct drm_atomic_state *state;
+
+	struct list_head psr_list;
+	spinlock_t psr_list_lock;
 };
 
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -70,4 +72,7 @@
 				   struct device *dev);
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
 				    struct device *dev);
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+				unsigned int mstimeout);
+
 #endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 55c5273..0f6eda0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -22,6 +22,7 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
+#include "rockchip_drm_psr.h"
 
 #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
 
@@ -63,9 +64,20 @@
 				     rockchip_fb->obj[0], handle);
 }
 
+static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
+				 struct drm_file *file,
+				 unsigned int flags, unsigned int color,
+				 struct drm_clip_rect *clips,
+				 unsigned int num_clips)
+{
+	rockchip_drm_psr_flush_all(fb->dev);
+	return 0;
+}
+
 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
 	.destroy	= rockchip_drm_fb_destroy,
 	.create_handle	= rockchip_drm_fb_create_handle,
+	.dirty		= rockchip_drm_fb_dirty,
 };
 
 static struct rockchip_drm_fb *
@@ -162,68 +174,6 @@
 		drm_fb_helper_hotplug_event(fb_helper);
 }
 
-static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
-{
-	struct rockchip_drm_private *priv = crtc->dev->dev_private;
-	int pipe = drm_crtc_index(crtc);
-	const struct rockchip_crtc_funcs *crtc_funcs = priv->crtc_funcs[pipe];
-
-	if (crtc_funcs && crtc_funcs->wait_for_update)
-		crtc_funcs->wait_for_update(crtc);
-}
-
-/*
- * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
- * have hardware counters for neither vblanks nor scanlines, which results in
- * a race where:
- *				| <-- HW vsync irq and reg take effect
- *	       plane_commit --> |
- *	get_vblank and wait --> |
- *				| <-- handle_vblank, vblank->count + 1
- *		 cleanup_fb --> |
- *		iommu crash --> |
- *				| <-- HW vsync irq and reg take effect
- *
- * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
- * of waiting for vblank_count to change.
- */
-static void
-rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
-{
-	struct drm_crtc_state *old_crtc_state;
-	struct drm_crtc *crtc;
-	int i, ret;
-
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		/* No one cares about the old state, so abuse it for tracking
-		 * and store whether we hold a vblank reference (and should do a
-		 * vblank wait) in the ->enable boolean.
-		 */
-		old_crtc_state->enable = false;
-
-		if (!crtc->state->active)
-			continue;
-
-		if (!drm_atomic_helper_framebuffer_changed(dev,
-				old_state, crtc))
-			continue;
-
-		ret = drm_crtc_vblank_get(crtc);
-		if (ret != 0)
-			continue;
-
-		old_crtc_state->enable = true;
-	}
-
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		if (!old_crtc_state->enable)
-			continue;
-
-		rockchip_crtc_wait_for_update(crtc);
-		drm_crtc_vblank_put(crtc);
-	}
-}
-
 static void
 rockchip_atomic_commit_tail(struct drm_atomic_state *state)
 {
@@ -233,11 +183,12 @@
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_commit_hw_done(state);
 
-	rockchip_atomic_wait_for_complete(dev, state);
+	drm_atomic_helper_wait_for_vblanks(dev, state);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 207e01d..a16c69f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -20,6 +20,7 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
 
 #define PREFERRED_BPP		32
 #define to_drm_private(x) \
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
new file mode 100644
index 0000000..a553e182
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
+
+#define PSR_FLUSH_TIMEOUT	msecs_to_jiffies(100)
+
+enum psr_state {
+	PSR_FLUSH,
+	PSR_ENABLE,
+	PSR_DISABLE,
+};
+
+struct psr_drv {
+	struct list_head	list;
+	struct drm_encoder	*encoder;
+
+	spinlock_t		lock;
+	bool			active;
+	enum psr_state		state;
+
+	struct timer_list	flush_timer;
+
+	void (*set)(struct drm_encoder *encoder, bool enable);
+};
+
+static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
+{
+	struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry(psr, &drm_drv->psr_list, list) {
+		if (psr->encoder->crtc == crtc)
+			goto out;
+	}
+	psr = ERR_PTR(-ENODEV);
+
+out:
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+	return psr;
+}
+
+static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+{
+	/*
+	 * Allowed finite state machine:
+	 *
+	 *   PSR_ENABLE  < = = = = = >  PSR_FLUSH
+	 *       | ^                        |
+	 *       | |                        |
+	 *       v |                        |
+	 *   PSR_DISABLE < - - - - - - - - -
+	 */
+	if (state == psr->state || !psr->active)
+		return;
+
+	/* Already disabled in flush, change the state, but not the hardware */
+	if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
+		psr->state = state;
+		return;
+	}
+
+	psr->state = state;
+
+	/* Actually commit the state change to hardware */
+	switch (psr->state) {
+	case PSR_ENABLE:
+		psr->set(psr->encoder, true);
+		break;
+
+	case PSR_DISABLE:
+	case PSR_FLUSH:
+		psr->set(psr->encoder, false);
+		break;
+	}
+}
+
+static void psr_set_state(struct psr_drv *psr, enum psr_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&psr->lock, flags);
+	psr_set_state_locked(psr, state);
+	spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+static void psr_flush_handler(unsigned long data)
+{
+	struct psr_drv *psr = (struct psr_drv *)data;
+	unsigned long flags;
+
+	/* If the state has changed since we initiated the flush, do nothing */
+	spin_lock_irqsave(&psr->lock, flags);
+	if (psr->state == PSR_FLUSH)
+		psr_set_state_locked(psr, PSR_ENABLE);
+	spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+/**
+ * rockchip_drm_psr_activate - activate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_activate(struct drm_crtc *crtc)
+{
+	struct psr_drv *psr = find_psr_by_crtc(crtc);
+	unsigned long flags;
+
+	if (IS_ERR(psr))
+		return PTR_ERR(psr);
+
+	spin_lock_irqsave(&psr->lock, flags);
+	psr->active = true;
+	spin_unlock_irqrestore(&psr->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_activate);
+
+/**
+ * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc)
+{
+	struct psr_drv *psr = find_psr_by_crtc(crtc);
+	unsigned long flags;
+
+	if (IS_ERR(psr))
+		return PTR_ERR(psr);
+
+	spin_lock_irqsave(&psr->lock, flags);
+	psr->active = false;
+	spin_unlock_irqrestore(&psr->lock, flags);
+	del_timer_sync(&psr->flush_timer);
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
+
+static void rockchip_drm_do_flush(struct psr_drv *psr)
+{
+	mod_timer(&psr->flush_timer,
+		  round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT));
+	psr_set_state(psr, PSR_FLUSH);
+}
+
+/**
+ * rockchip_drm_psr_flush - flush a single pipe
+ * @crtc: CRTC of the pipe to flush
+ *
+ * Returns:
+ * 0 on success, -errno on fail
+ */
+int rockchip_drm_psr_flush(struct drm_crtc *crtc)
+{
+	struct psr_drv *psr = find_psr_by_crtc(crtc);
+	if (IS_ERR(psr))
+		return PTR_ERR(psr);
+
+	rockchip_drm_do_flush(psr);
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush);
+
+/**
+ * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
+ * @dev: drm device
+ *
+ * Disable the PSR function for all registered encoders, and then enable the
+ * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
+ * changed during flush time, then keep the state no change after flush
+ * timeout.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_flush_all(struct drm_device *dev)
+{
+	struct rockchip_drm_private *drm_drv = dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry(psr, &drm_drv->psr_list, list)
+		rockchip_drm_do_flush(psr);
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
+
+/**
+ * rockchip_drm_psr_register - register encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+			void (*psr_set)(struct drm_encoder *, bool enable))
+{
+	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct psr_drv *psr;
+	unsigned long flags;
+
+	if (!encoder || !psr_set)
+		return -EINVAL;
+
+	psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
+	if (!psr)
+		return -ENOMEM;
+
+	setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+	spin_lock_init(&psr->lock);
+
+	psr->active = true;
+	psr->state = PSR_DISABLE;
+	psr->encoder = encoder;
+	psr->set = psr_set;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_add_tail(&psr->list, &drm_drv->psr_list);
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_register);
+
+/**
+ * rockchip_drm_psr_unregister - unregister encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
+{
+	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct psr_drv *psr, *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+	list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
+		if (psr->encoder == encoder) {
+			del_timer(&psr->flush_timer);
+			list_del(&psr->list);
+			kfree(psr);
+		}
+	}
+	spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
new file mode 100644
index 0000000..b420cf1
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ROCKCHIP_DRM_PSR___
+#define __ROCKCHIP_DRM_PSR___
+
+void rockchip_drm_psr_flush_all(struct drm_device *dev);
+int rockchip_drm_psr_flush(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_activate(struct drm_crtc *crtc);
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+			void (*psr_set)(struct drm_encoder *, bool enable));
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
+
+#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 91305eb..c7eba30 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -17,12 +17,14 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
 #include <drm/drm_plane_helper.h>
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/iopoll.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
@@ -34,17 +36,21 @@
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_fb.h"
+#include "rockchip_drm_psr.h"
 #include "rockchip_drm_vop.h"
 
-#define __REG_SET_RELAXED(x, off, mask, shift, v) \
-		vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
-#define __REG_SET_NORMAL(x, off, mask, shift, v) \
-		vop_mask_write(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
+		vop_mask_write(x, off, mask, shift, v, write_mask, true)
+
+#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
+		vop_mask_write(x, off, mask, shift, v, write_mask, false)
 
 #define REG_SET(x, base, reg, v, mode) \
-		__REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+		__REG_SET_##mode(x, base + reg.offset, \
+				 reg.mask, reg.shift, v, reg.write_mask)
 #define REG_SET_MASK(x, base, reg, mask, v, mode) \
-		__REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
+		__REG_SET_##mode(x, base + reg.offset, \
+				 mask, reg.shift, v, reg.write_mask)
 
 #define VOP_WIN_SET(x, win, name, v) \
 		REG_SET(x, win->base, win->phy->name, v, RELAXED)
@@ -82,25 +88,15 @@
 
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
-#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
 
-struct vop_plane_state {
-	struct drm_plane_state base;
-	int format;
-	struct drm_rect src;
-	struct drm_rect dest;
-	dma_addr_t yrgb_mst;
-	bool enable;
+enum vop_pending {
+	VOP_PENDING_FB_UNREF,
 };
 
 struct vop_win {
 	struct drm_plane base;
 	const struct vop_win_data *data;
 	struct vop *vop;
-
-	/* protected by dev->event_lock */
-	bool enable;
-	dma_addr_t yrgb_mst;
 };
 
 struct vop {
@@ -113,11 +109,15 @@
 	struct mutex vsync_mutex;
 	bool vsync_work_pending;
 	struct completion dsp_hold_completion;
-	struct completion wait_update_complete;
 
 	/* protected by dev->event_lock */
 	struct drm_pending_vblank_event *event;
 
+	struct drm_flip_work fb_unref_work;
+	unsigned long pending;
+
+	struct completion line_flag_completion;
+
 	const struct vop_data *data;
 
 	uint32_t *regsbak;
@@ -164,27 +164,25 @@
 }
 
 static inline void vop_mask_write(struct vop *vop, uint32_t offset,
-				  uint32_t mask, uint32_t v)
+				  uint32_t mask, uint32_t shift, uint32_t v,
+				  bool write_mask, bool relaxed)
 {
-	if (mask) {
+	if (!mask)
+		return;
+
+	if (write_mask) {
+		v = ((v << shift) & 0xffff) | (mask << (shift + 16));
+	} else {
 		uint32_t cached_val = vop->regsbak[offset >> 2];
 
-		cached_val = (cached_val & ~mask) | v;
-		writel(cached_val, vop->regs + offset);
-		vop->regsbak[offset >> 2] = cached_val;
+		v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
+		vop->regsbak[offset >> 2] = v;
 	}
-}
 
-static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
-					  uint32_t mask, uint32_t v)
-{
-	if (mask) {
-		uint32_t cached_val = vop->regsbak[offset >> 2];
-
-		cached_val = (cached_val & ~mask) | v;
-		writel_relaxed(cached_val, vop->regs + offset);
-		vop->regsbak[offset >> 2] = cached_val;
-	}
+	if (relaxed)
+		writel_relaxed(v, vop->regs + offset);
+	else
+		writel(v, vop->regs + offset);
 }
 
 static inline uint32_t vop_get_intr_type(struct vop *vop,
@@ -240,7 +238,7 @@
 	case DRM_FORMAT_NV24:
 		return VOP_FMT_YUV444SP;
 	default:
-		DRM_ERROR("unsupport format[%08x]\n", format);
+		DRM_ERROR("unsupported format[%08x]\n", format);
 		return -EINVAL;
 	}
 }
@@ -317,7 +315,7 @@
 	int vskiplines = 0;
 
 	if (dst_w > 3840) {
-		DRM_ERROR("Maximum destination width (3840) exceeded\n");
+		DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
 		return;
 	}
 
@@ -355,11 +353,11 @@
 	VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
 	if (lb_mode == LB_RGB_3840X2) {
 		if (yrgb_ver_scl_mode != SCALE_NONE) {
-			DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+			DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
 			return;
 		}
 		if (cbcr_ver_scl_mode != SCALE_NONE) {
-			DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+			DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
 			return;
 		}
 		vsu_mode = SCALE_UP_BIL;
@@ -411,6 +409,7 @@
 
 	spin_lock_irqsave(&vop->irq_lock, flags);
 
+	VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
 	VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
 
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -430,7 +429,73 @@
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
-static void vop_enable(struct drm_crtc *crtc)
+/*
+ * (1) each frame starts at the start of the Vsync pulse which is signaled by
+ *     the "FRAME_SYNC" interrupt.
+ * (2) the active data region of each frame ends at dsp_vact_end
+ * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
+ *      to get "LINE_FLAG" interrupt at the end of the active on screen data.
+ *
+ * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
+ * Interrupts
+ * LINE_FLAG -------------------------------+
+ * FRAME_SYNC ----+                         |
+ *                |                         |
+ *                v                         v
+ *                | Vsync | Vbp |  Vactive  | Vfp |
+ *                        ^     ^           ^     ^
+ *                        |     |           |     |
+ *                        |     |           |     |
+ * dsp_vs_end ------------+     |           |     |   VOP_DSP_VTOTAL_VS_END
+ * dsp_vact_start --------------+           |     |   VOP_DSP_VACT_ST_END
+ * dsp_vact_end ----------------------------+     |   VOP_DSP_VACT_ST_END
+ * dsp_total -------------------------------------+   VOP_DSP_VTOTAL_VS_END
+ */
+static bool vop_line_flag_irq_is_enabled(struct vop *vop)
+{
+	uint32_t line_flag_irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+	return !!line_flag_irq;
+}
+
+static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!vop->is_enabled))
+		return;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	VOP_CTRL_SET(vop, line_flag_num[0], line_num);
+	VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
+	VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_line_flag_irq_disable(struct vop *vop)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!vop->is_enabled))
+		return;
+
+	spin_lock_irqsave(&vop->irq_lock, flags);
+
+	VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
+
+	spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static int vop_enable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
 	int ret;
@@ -438,26 +503,20 @@
 	ret = pm_runtime_get_sync(vop->dev);
 	if (ret < 0) {
 		dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
-		return;
+		goto err_put_pm_runtime;
 	}
 
 	ret = clk_enable(vop->hclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
-		return;
-	}
+	if (WARN_ON(ret < 0))
+		goto err_put_pm_runtime;
 
 	ret = clk_enable(vop->dclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+	if (WARN_ON(ret < 0))
 		goto err_disable_hclk;
-	}
 
 	ret = clk_enable(vop->aclk);
-	if (ret < 0) {
-		dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+	if (WARN_ON(ret < 0))
 		goto err_disable_dclk;
-	}
 
 	/*
 	 * Slave iommu shares power, irq and clock with vop.  It was associated
@@ -487,7 +546,7 @@
 
 	drm_crtc_vblank_on(crtc);
 
-	return;
+	return 0;
 
 err_disable_aclk:
 	clk_disable(vop->aclk);
@@ -495,6 +554,9 @@
 	clk_disable(vop->dclk);
 err_disable_hclk:
 	clk_disable(vop->hclk);
+err_put_pm_runtime:
+	pm_runtime_put_sync(vop->dev);
+	return ret;
 }
 
 static void vop_crtc_disable(struct drm_crtc *crtc)
@@ -504,6 +566,8 @@
 
 	WARN_ON(vop->event);
 
+	rockchip_drm_psr_deactivate(&vop->crtc);
+
 	/*
 	 * We need to make sure that all windows are disabled before we
 	 * disable that crtc. Otherwise we might try to scan from a destroyed
@@ -568,22 +632,6 @@
 	drm_plane_cleanup(plane);
 }
 
-static int vop_plane_prepare_fb(struct drm_plane *plane,
-				const struct drm_plane_state *new_state)
-{
-	if (plane->state->fb)
-		drm_framebuffer_reference(plane->state->fb);
-
-	return 0;
-}
-
-static void vop_plane_cleanup_fb(struct drm_plane *plane,
-				 const struct drm_plane_state *old_state)
-{
-	if (old_state->fb)
-		drm_framebuffer_unreference(old_state->fb);
-}
-
 static int vop_plane_atomic_check(struct drm_plane *plane,
 			   struct drm_plane_state *state)
 {
@@ -591,12 +639,8 @@
 	struct drm_crtc_state *crtc_state;
 	struct drm_framebuffer *fb = state->fb;
 	struct vop_win *vop_win = to_vop_win(plane);
-	struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
 	const struct vop_win_data *win = vop_win->data;
-	bool visible;
 	int ret;
-	struct drm_rect *dest = &vop_plane_state->dest;
-	struct drm_rect *src = &vop_plane_state->src;
 	struct drm_rect clip;
 	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
 					DRM_PLANE_HELPER_NO_SCALING;
@@ -604,62 +648,43 @@
 					DRM_PLANE_HELPER_NO_SCALING;
 
 	if (!crtc || !fb)
-		goto out_disable;
+		return 0;
 
 	crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
 	if (WARN_ON(!crtc_state))
 		return -EINVAL;
 
-	src->x1 = state->src_x;
-	src->y1 = state->src_y;
-	src->x2 = state->src_x + state->src_w;
-	src->y2 = state->src_y + state->src_h;
-	dest->x1 = state->crtc_x;
-	dest->y1 = state->crtc_y;
-	dest->x2 = state->crtc_x + state->crtc_w;
-	dest->y2 = state->crtc_y + state->crtc_h;
-
 	clip.x1 = 0;
 	clip.y1 = 0;
 	clip.x2 = crtc_state->adjusted_mode.hdisplay;
 	clip.y2 = crtc_state->adjusted_mode.vdisplay;
 
-	ret = drm_plane_helper_check_update(plane, crtc, state->fb,
-					    src, dest, &clip,
-					    state->rotation,
-					    min_scale,
-					    max_scale,
-					    true, true, &visible);
+	ret = drm_plane_helper_check_state(state, &clip,
+					   min_scale, max_scale,
+					   true, true);
 	if (ret)
 		return ret;
 
-	if (!visible)
-		goto out_disable;
+	if (!state->visible)
+		return 0;
 
-	vop_plane_state->format = vop_convert_format(fb->pixel_format);
-	if (vop_plane_state->format < 0)
-		return vop_plane_state->format;
+	ret = vop_convert_format(fb->pixel_format);
+	if (ret < 0)
+		return ret;
 
 	/*
 	 * Src.x1 can be odd when do clip, but yuv plane start point
 	 * need align with 2 pixel.
 	 */
-	if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
+	if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
 		return -EINVAL;
 
-	vop_plane_state->enable = true;
-
-	return 0;
-
-out_disable:
-	vop_plane_state->enable = false;
 	return 0;
 }
 
 static void vop_plane_atomic_disable(struct drm_plane *plane,
 				     struct drm_plane_state *old_state)
 {
-	struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
 	struct vop_win *vop_win = to_vop_win(plane);
 	const struct vop_win_data *win = vop_win->data;
 	struct vop *vop = to_vop(old_state->crtc);
@@ -667,18 +692,11 @@
 	if (!old_state->crtc)
 		return;
 
-	spin_lock_irq(&plane->dev->event_lock);
-	vop_win->enable = false;
-	vop_win->yrgb_mst = 0;
-	spin_unlock_irq(&plane->dev->event_lock);
-
 	spin_lock(&vop->reg_lock);
 
 	VOP_WIN_SET(vop, win, enable, 0);
 
 	spin_unlock(&vop->reg_lock);
-
-	vop_plane_state->enable = false;
 }
 
 static void vop_plane_atomic_update(struct drm_plane *plane,
@@ -687,21 +705,21 @@
 	struct drm_plane_state *state = plane->state;
 	struct drm_crtc *crtc = state->crtc;
 	struct vop_win *vop_win = to_vop_win(plane);
-	struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
 	const struct vop_win_data *win = vop_win->data;
 	struct vop *vop = to_vop(state->crtc);
 	struct drm_framebuffer *fb = state->fb;
 	unsigned int actual_w, actual_h;
 	unsigned int dsp_stx, dsp_sty;
 	uint32_t act_info, dsp_info, dsp_st;
-	struct drm_rect *src = &vop_plane_state->src;
-	struct drm_rect *dest = &vop_plane_state->dest;
+	struct drm_rect *src = &state->src;
+	struct drm_rect *dest = &state->dst;
 	struct drm_gem_object *obj, *uv_obj;
 	struct rockchip_gem_object *rk_obj, *rk_uv_obj;
 	unsigned long offset;
 	dma_addr_t dma_addr;
 	uint32_t val;
 	bool rb_swap;
+	int format;
 
 	/*
 	 * can't update plane when vop is disabled.
@@ -712,7 +730,7 @@
 	if (WARN_ON(!vop->is_enabled))
 		return;
 
-	if (!vop_plane_state->enable) {
+	if (!state->visible) {
 		vop_plane_atomic_disable(plane, old_state);
 		return;
 	}
@@ -733,18 +751,15 @@
 
 	offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
 	offset += (src->y1 >> 16) * fb->pitches[0];
-	vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+	dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
 
-	spin_lock_irq(&plane->dev->event_lock);
-	vop_win->enable = true;
-	vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
-	spin_unlock_irq(&plane->dev->event_lock);
+	format = vop_convert_format(fb->pixel_format);
 
 	spin_lock(&vop->reg_lock);
 
-	VOP_WIN_SET(vop, win, format, vop_plane_state->format);
+	VOP_WIN_SET(vop, win, format, format);
 	VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
-	VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
+	VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
 	if (is_yuv_support(fb->pixel_format)) {
 		int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
 		int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
@@ -791,68 +806,18 @@
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
-	.prepare_fb = vop_plane_prepare_fb,
-	.cleanup_fb = vop_plane_cleanup_fb,
 	.atomic_check = vop_plane_atomic_check,
 	.atomic_update = vop_plane_atomic_update,
 	.atomic_disable = vop_plane_atomic_disable,
 };
 
-static void vop_atomic_plane_reset(struct drm_plane *plane)
-{
-	struct vop_plane_state *vop_plane_state =
-					to_vop_plane_state(plane->state);
-
-	if (plane->state && plane->state->fb)
-		drm_framebuffer_unreference(plane->state->fb);
-
-	kfree(vop_plane_state);
-	vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
-	if (!vop_plane_state)
-		return;
-
-	plane->state = &vop_plane_state->base;
-	plane->state->plane = plane;
-}
-
-static struct drm_plane_state *
-vop_atomic_plane_duplicate_state(struct drm_plane *plane)
-{
-	struct vop_plane_state *old_vop_plane_state;
-	struct vop_plane_state *vop_plane_state;
-
-	if (WARN_ON(!plane->state))
-		return NULL;
-
-	old_vop_plane_state = to_vop_plane_state(plane->state);
-	vop_plane_state = kmemdup(old_vop_plane_state,
-				  sizeof(*vop_plane_state), GFP_KERNEL);
-	if (!vop_plane_state)
-		return NULL;
-
-	__drm_atomic_helper_plane_duplicate_state(plane,
-						  &vop_plane_state->base);
-
-	return &vop_plane_state->base;
-}
-
-static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
-					   struct drm_plane_state *state)
-{
-	struct vop_plane_state *vop_state = to_vop_plane_state(state);
-
-	__drm_atomic_helper_plane_destroy_state(state);
-
-	kfree(vop_state);
-}
-
 static const struct drm_plane_funcs vop_plane_funcs = {
 	.update_plane	= drm_atomic_helper_update_plane,
 	.disable_plane	= drm_atomic_helper_disable_plane,
 	.destroy = vop_plane_destroy,
-	.reset = vop_atomic_plane_reset,
-	.atomic_duplicate_state = vop_atomic_plane_duplicate_state,
-	.atomic_destroy_state = vop_atomic_plane_destroy_state,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
 };
 
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -865,6 +830,7 @@
 
 	spin_lock_irqsave(&vop->irq_lock, flags);
 
+	VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
 	VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
 
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -887,18 +853,9 @@
 	spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
-static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
-{
-	struct vop *vop = to_vop(crtc);
-
-	reinit_completion(&vop->wait_update_complete);
-	WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
-}
-
 static const struct rockchip_crtc_funcs private_crtc_funcs = {
 	.enable_vblank = vop_crtc_enable_vblank,
 	.disable_vblank = vop_crtc_disable_vblank,
-	.wait_for_update = vop_crtc_wait_for_update,
 };
 
 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -928,11 +885,17 @@
 	u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
 	u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
 	u16 vact_end = vact_st + vdisplay;
-	uint32_t val;
+	uint32_t pin_pol, val;
+	int ret;
 
 	WARN_ON(vop->event);
 
-	vop_enable(crtc);
+	ret = vop_enable(crtc);
+	if (ret) {
+		DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
+		return;
+	}
+
 	/*
 	 * If dclk rate is zero, mean that scanout is stop,
 	 * we don't need wait any more.
@@ -969,25 +932,31 @@
 		vop_dsp_hold_valid_irq_disable(vop);
 	}
 
-	val = 0x8;
-	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
-	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
-	VOP_CTRL_SET(vop, pin_pol, val);
+	pin_pol = 0x8;
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+	pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+	VOP_CTRL_SET(vop, pin_pol, pin_pol);
+
 	switch (s->output_type) {
 	case DRM_MODE_CONNECTOR_LVDS:
 		VOP_CTRL_SET(vop, rgb_en, 1);
+		VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
 		break;
 	case DRM_MODE_CONNECTOR_eDP:
+		VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, edp_en, 1);
 		break;
 	case DRM_MODE_CONNECTOR_HDMIA:
+		VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, hdmi_en, 1);
 		break;
 	case DRM_MODE_CONNECTOR_DSI:
+		VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
 		VOP_CTRL_SET(vop, mipi_en, 1);
 		break;
 	default:
-		DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+		DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
+			      s->output_type);
 	}
 	VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
@@ -1006,12 +975,44 @@
 	clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
 
 	VOP_CTRL_SET(vop, standby, 0);
+
+	rockchip_drm_psr_activate(&vop->crtc);
+}
+
+static bool vop_fs_irq_is_pending(struct vop *vop)
+{
+	return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
+}
+
+static void vop_wait_for_irq_handler(struct vop *vop)
+{
+	bool pending;
+	int ret;
+
+	/*
+	 * Spin until frame start interrupt status bit goes low, which means
+	 * that interrupt handler was invoked and cleared it. The timeout of
+	 * 10 msecs is really too long, but it is just a safety measure if
+	 * something goes really wrong. The wait will only happen in the very
+	 * unlikely case of a vblank happening exactly at the same time and
+	 * shouldn't exceed microseconds range.
+	 */
+	ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
+					!pending, 0, 10 * 1000);
+	if (ret)
+		DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
+
+	synchronize_irq(vop->irq);
 }
 
 static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
 				  struct drm_crtc_state *old_crtc_state)
 {
+	struct drm_atomic_state *old_state = old_crtc_state->state;
+	struct drm_plane_state *old_plane_state;
 	struct vop *vop = to_vop(crtc);
+	struct drm_plane *plane;
+	int i;
 
 	if (WARN_ON(!vop->is_enabled))
 		return;
@@ -1021,12 +1022,13 @@
 	vop_cfg_done(vop);
 
 	spin_unlock(&vop->reg_lock);
-}
 
-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
-				  struct drm_crtc_state *old_crtc_state)
-{
-	struct vop *vop = to_vop(crtc);
+	/*
+	 * There is a (rather unlikely) possiblity that a vblank interrupt
+	 * fired before we set the cfg_done bit. To avoid spuriously
+	 * signalling flip completion we need to wait for it to finish.
+	 */
+	vop_wait_for_irq_handler(vop);
 
 	spin_lock_irq(&crtc->dev->event_lock);
 	if (crtc->state->event) {
@@ -1037,6 +1039,25 @@
 		crtc->state->event = NULL;
 	}
 	spin_unlock_irq(&crtc->dev->event_lock);
+
+	for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+		if (!old_plane_state->fb)
+			continue;
+
+		if (old_plane_state->fb == plane->state->fb)
+			continue;
+
+		drm_framebuffer_reference(old_plane_state->fb);
+		drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
+		set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+	}
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	rockchip_drm_psr_flush(crtc);
 }
 
 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1093,16 +1114,13 @@
 	.atomic_destroy_state = vop_crtc_destroy_state,
 };
 
-static bool vop_win_pending_is_complete(struct vop_win *vop_win)
+static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
 {
-	dma_addr_t yrgb_mst;
+	struct vop *vop = container_of(work, struct vop, fb_unref_work);
+	struct drm_framebuffer *fb = val;
 
-	if (!vop_win->enable)
-		return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
-
-	yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
-
-	return yrgb_mst == vop_win->yrgb_mst;
+	drm_crtc_vblank_put(&vop->crtc);
+	drm_framebuffer_unreference(fb);
 }
 
 static void vop_handle_vblank(struct vop *vop)
@@ -1110,25 +1128,17 @@
 	struct drm_device *drm = vop->drm_dev;
 	struct drm_crtc *crtc = &vop->crtc;
 	unsigned long flags;
-	int i;
-
-	for (i = 0; i < vop->data->win_size; i++) {
-		if (!vop_win_pending_is_complete(&vop->win[i]))
-			return;
-	}
 
 	spin_lock_irqsave(&drm->event_lock, flags);
 	if (vop->event) {
-
 		drm_crtc_send_vblank_event(crtc, vop->event);
 		drm_crtc_vblank_put(crtc);
 		vop->event = NULL;
-
 	}
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 
-	if (!completion_done(&vop->wait_update_complete))
-		complete(&vop->wait_update_complete);
+	if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
+		drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
 }
 
 static irqreturn_t vop_isr(int irq, void *data)
@@ -1162,6 +1172,12 @@
 		ret = IRQ_HANDLED;
 	}
 
+	if (active_irqs & LINE_FLAG_INTR) {
+		complete(&vop->line_flag_completion);
+		active_irqs &= ~LINE_FLAG_INTR;
+		ret = IRQ_HANDLED;
+	}
+
 	if (active_irqs & FS_INTR) {
 		drm_crtc_handle_vblank(crtc);
 		vop_handle_vblank(vop);
@@ -1171,7 +1187,8 @@
 
 	/* Unhandled irqs are spurious. */
 	if (active_irqs)
-		DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+		DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
+			      active_irqs);
 
 	return ret;
 }
@@ -1206,7 +1223,8 @@
 					       win_data->phy->nformats,
 					       win_data->type, NULL);
 		if (ret) {
-			DRM_ERROR("failed to initialize plane\n");
+			DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
+				      ret);
 			goto err_cleanup_planes;
 		}
 
@@ -1244,7 +1262,8 @@
 					       win_data->phy->nformats,
 					       win_data->type, NULL);
 		if (ret) {
-			DRM_ERROR("failed to initialize overlay plane\n");
+			DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
+				      ret);
 			goto err_cleanup_crtc;
 		}
 		drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
@@ -1252,14 +1271,17 @@
 
 	port = of_get_child_by_name(dev->of_node, "port");
 	if (!port) {
-		DRM_ERROR("no port node found in %s\n",
-			  dev->of_node->full_name);
+		DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
+			      dev->of_node->full_name);
 		ret = -ENOENT;
 		goto err_cleanup_crtc;
 	}
 
+	drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
+			   vop_fb_unref_worker);
+
 	init_completion(&vop->dsp_hold_completion);
-	init_completion(&vop->wait_update_complete);
+	init_completion(&vop->line_flag_completion);
 	crtc->port = port;
 	rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
 
@@ -1300,6 +1322,7 @@
 	 * references the CRTC.
 	 */
 	drm_crtc_cleanup(crtc);
+	drm_flip_work_cleanup(&vop->fb_unref_work);
 }
 
 static int vop_initial(struct vop *vop)
@@ -1416,6 +1439,49 @@
 	}
 }
 
+/**
+ * rockchip_drm_wait_line_flag - acqiure the give line flag event
+ * @crtc: CRTC to enable line flag
+ * @line_num: interested line number
+ * @mstimeout: millisecond for timeout
+ *
+ * Driver would hold here until the interested line flag interrupt have
+ * happened or timeout to wait.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+				unsigned int mstimeout)
+{
+	struct vop *vop = to_vop(crtc);
+	unsigned long jiffies_left;
+
+	if (!crtc || !vop->is_enabled)
+		return -ENODEV;
+
+	if (line_num > crtc->mode.vtotal || mstimeout <= 0)
+		return -EINVAL;
+
+	if (vop_line_flag_irq_is_enabled(vop))
+		return -EBUSY;
+
+	reinit_completion(&vop->line_flag_completion);
+	vop_line_flag_irq_enable(vop, line_num);
+
+	jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
+						   msecs_to_jiffies(mstimeout));
+	vop_line_flag_irq_disable(vop);
+
+	if (jiffies_left == 0) {
+		dev_err(vop->dev, "Timeout waiting for IRQ\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
+
 static int vop_bind(struct device *dev, struct device *master, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -1481,10 +1547,15 @@
 
 	ret = vop_create_crtc(vop);
 	if (ret)
-		return ret;
+		goto err_enable_irq;
 
 	pm_runtime_enable(&pdev->dev);
+
 	return 0;
+
+err_enable_irq:
+	enable_irq(vop->irq); /* To balance out the disable_irq above */
+	return ret;
 }
 
 static void vop_unbind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 071ff0b..1dbc526 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -33,6 +33,7 @@
 	uint32_t offset;
 	uint32_t shift;
 	uint32_t mask;
+	bool write_mask;
 };
 
 struct vop_ctrl {
@@ -48,6 +49,10 @@
 	struct vop_reg dither_down;
 	struct vop_reg dither_up;
 	struct vop_reg pin_pol;
+	struct vop_reg rgb_pin_pol;
+	struct vop_reg hdmi_pin_pol;
+	struct vop_reg edp_pin_pol;
+	struct vop_reg mipi_pin_pol;
 
 	struct vop_reg htotal_pw;
 	struct vop_reg hact_st_end;
@@ -56,6 +61,8 @@
 	struct vop_reg hpost_st_end;
 	struct vop_reg vpost_st_end;
 
+	struct vop_reg line_flag_num[2];
+
 	struct vop_reg cfg_done;
 };
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 919992c..35c51f3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -23,7 +23,14 @@
 #define VOP_REG(off, _mask, s) \
 		{.offset = off, \
 		 .mask = _mask, \
-		 .shift = s,}
+		 .shift = s, \
+		 .write_mask = false,}
+
+#define VOP_REG_MASK(off, _mask, s) \
+		{.offset = off, \
+		 .mask = _mask, \
+		 .shift = s, \
+		 .write_mask = true,}
 
 static const uint32_t formats_win_full[] = {
 	DRM_FORMAT_XRGB8888,
@@ -50,6 +57,89 @@
 	DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs rk3036_win_scl = {
+	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3036_win0_data = {
+	.scl = &rk3036_win_scl,
+	.data_formats = formats_win_full,
+	.nformats = ARRAY_SIZE(formats_win_full),
+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
+	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
+	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
+	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
+	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
+	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
+	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3036_win1_data = {
+	.data_formats = formats_win_lite,
+	.nformats = ARRAY_SIZE(formats_win_lite),
+	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
+	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
+	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
+	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
+	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
+	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
+	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
+	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3036_vop_win_data[] = {
+	{ .base = 0x00, .phy = &rk3036_win0_data,
+	  .type = DRM_PLANE_TYPE_PRIMARY },
+	{ .base = 0x00, .phy = &rk3036_win1_data,
+	  .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3036_vop_intrs[] = {
+	DSP_HOLD_VALID_INTR,
+	FS_INTR,
+	LINE_FLAG_INTR,
+	BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3036_intr = {
+	.intrs = rk3036_vop_intrs,
+	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
+	.status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
+	.enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
+	.clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_ctrl rk3036_ctrl_data = {
+	.standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
+	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
+	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
+	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
+	.cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
+	{RK3036_DSP_CTRL1, 0x00000000},
+};
+
+static const struct vop_data rk3036_vop = {
+	.init_table = rk3036_vop_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
+	.ctrl = &rk3036_ctrl_data,
+	.intr = &rk3036_intr,
+	.win = rk3036_vop_win_data,
+	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
+};
+
 static const struct vop_scl_extension rk3288_win_full_scl_ext = {
 	.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
 	.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -133,6 +223,7 @@
 	.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
 	.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
 	.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
 	.cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
 };
 
@@ -190,93 +281,104 @@
 	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_scl_regs rk3036_win_scl = {
-	.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
-	.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
-	.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
-	.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+static const struct vop_ctrl rk3399_ctrl_data = {
+	.standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
+	.gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+	.rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+	.hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+	.edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+	.mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+	.dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1),
+	.dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+	.data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+	.out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+	.rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+	.hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
+	.edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
+	.mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
+	.htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+	.hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0),
+	.vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+	.vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0),
+	.hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+	.vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+	.line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0),
+	.line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16),
+	.cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0),
 };
 
-static const struct vop_win_phy rk3036_win0_data = {
-	.scl = &rk3036_win_scl,
-	.data_formats = formats_win_full,
-	.nformats = ARRAY_SIZE(formats_win_full),
-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
-	.act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
-	.dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
-	.dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
-	.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
-	.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
-	.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
-	.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
-};
-
-static const struct vop_win_phy rk3036_win1_data = {
-	.data_formats = formats_win_lite,
-	.nformats = ARRAY_SIZE(formats_win_lite),
-	.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
-	.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
-	.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
-	.act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
-	.dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
-	.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
-	.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
-	.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
-};
-
-static const struct vop_win_data rk3036_vop_win_data[] = {
-	{ .base = 0x00, .phy = &rk3036_win0_data,
-	  .type = DRM_PLANE_TYPE_PRIMARY },
-	{ .base = 0x00, .phy = &rk3036_win1_data,
-	  .type = DRM_PLANE_TYPE_CURSOR },
-};
-
-static const int rk3036_vop_intrs[] = {
-	DSP_HOLD_VALID_INTR,
+static const int rk3399_vop_intrs[] = {
 	FS_INTR,
+	0, 0,
 	LINE_FLAG_INTR,
+	0,
 	BUS_ERROR_INTR,
+	0, 0, 0, 0, 0, 0, 0,
+	DSP_HOLD_VALID_INTR,
 };
 
-static const struct vop_intr rk3036_intr = {
-	.intrs = rk3036_vop_intrs,
-	.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
-	.status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
-	.enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
-	.clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+static const struct vop_intr rk3399_vop_intr = {
+	.intrs = rk3399_vop_intrs,
+	.nintrs = ARRAY_SIZE(rk3399_vop_intrs),
+	.status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0),
+	.enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0),
+	.clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0),
 };
 
-static const struct vop_ctrl rk3036_ctrl_data = {
-	.standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
-	.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
-	.pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
-	.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
-	.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
-	.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
-	.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
-	.cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+static const struct vop_reg_data rk3399_init_reg_table[] = {
+	{RK3399_SYS_CTRL, 0x2000f800},
+	{RK3399_DSP_CTRL0, 0x00000000},
+	{RK3399_WIN0_CTRL0, 0x00000080},
+	{RK3399_WIN1_CTRL0, 0x00000080},
+	/* TODO: Win2/3 support multiple area function, but we haven't found
+	 * a suitable way to use it yet, so let's just use them as other windows
+	 * with only area 0 enabled.
+	 */
+	{RK3399_WIN2_CTRL0, 0x00000010},
+	{RK3399_WIN3_CTRL0, 0x00000010},
 };
 
-static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
-	{RK3036_DSP_CTRL1, 0x00000000},
+static const struct vop_data rk3399_vop_big = {
+	.init_table = rk3399_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+	.intr = &rk3399_vop_intr,
+	.ctrl = &rk3399_ctrl_data,
+	/*
+	 * rk3399 vop big windows register layout is same as rk3288.
+	 */
+	.win = rk3288_vop_win_data,
+	.win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_data rk3036_vop = {
-	.init_table = rk3036_vop_init_reg_table,
-	.table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
-	.ctrl = &rk3036_ctrl_data,
-	.intr = &rk3036_intr,
-	.win = rk3036_vop_win_data,
-	.win_size = ARRAY_SIZE(rk3036_vop_win_data),
+static const struct vop_win_data rk3399_vop_lit_win_data[] = {
+	{ .base = 0x00, .phy = &rk3288_win01_data,
+	  .type = DRM_PLANE_TYPE_PRIMARY },
+	{ .base = 0x00, .phy = &rk3288_win23_data,
+	  .type = DRM_PLANE_TYPE_CURSOR},
+};
+
+static const struct vop_data rk3399_vop_lit = {
+	.init_table = rk3399_init_reg_table,
+	.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+	.intr = &rk3399_vop_intr,
+	.ctrl = &rk3399_ctrl_data,
+	/*
+	 * rk3399 vop lit windows register layout is same as rk3288,
+	 * but cut off the win1 and win3 windows.
+	 */
+	.win = rk3399_vop_lit_win_data,
+	.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
 };
 
 static const struct of_device_id vop_driver_dt_match[] = {
-	{ .compatible = "rockchip,rk3288-vop",
-	  .data = &rk3288_vop },
 	{ .compatible = "rockchip,rk3036-vop",
 	  .data = &rk3036_vop },
+	{ .compatible = "rockchip,rk3288-vop",
+	  .data = &rk3288_vop },
+	{ .compatible = "rockchip,rk3399-vop-big",
+	  .data = &rk3399_vop_big },
+	{ .compatible = "rockchip,rk3399-vop-lit",
+	  .data = &rk3399_vop_lit },
 	{},
 };
 MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
@@ -305,7 +407,6 @@
 	.remove = vop_remove,
 	.driver = {
 		.name = "rockchip-vop",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(vop_driver_dt_match),
 	},
 };
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index d4b46cb..cd19726 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -166,4 +166,197 @@
 #define RK3036_HWC_LUT_ADDR		0x800
 /* rk3036 register definition end */
 
+/* rk3399 register definition */
+#define RK3399_REG_CFG_DONE		0x00000
+#define RK3399_VERSION_INFO		0x00004
+#define RK3399_SYS_CTRL			0x00008
+#define RK3399_SYS_CTRL1		0x0000c
+#define RK3399_DSP_CTRL0		0x00010
+#define RK3399_DSP_CTRL1		0x00014
+#define RK3399_DSP_BG			0x00018
+#define RK3399_MCU_CTRL			0x0001c
+#define RK3399_WB_CTRL0			0x00020
+#define RK3399_WB_CTRL1			0x00024
+#define RK3399_WB_YRGB_MST		0x00028
+#define RK3399_WB_CBR_MST		0x0002c
+#define RK3399_WIN0_CTRL0		0x00030
+#define RK3399_WIN0_CTRL1		0x00034
+#define RK3399_WIN0_COLOR_KEY		0x00038
+#define RK3399_WIN0_VIR			0x0003c
+#define RK3399_WIN0_YRGB_MST		0x00040
+#define RK3399_WIN0_CBR_MST		0x00044
+#define RK3399_WIN0_ACT_INFO		0x00048
+#define RK3399_WIN0_DSP_INFO		0x0004c
+#define RK3399_WIN0_DSP_ST		0x00050
+#define RK3399_WIN0_SCL_FACTOR_YRGB	0x00054
+#define RK3399_WIN0_SCL_FACTOR_CBR	0x00058
+#define RK3399_WIN0_SCL_OFFSET		0x0005c
+#define RK3399_WIN0_SRC_ALPHA_CTRL	0x00060
+#define RK3399_WIN0_DST_ALPHA_CTRL	0x00064
+#define RK3399_WIN0_FADING_CTRL		0x00068
+#define RK3399_WIN0_CTRL2		0x0006c
+#define RK3399_WIN1_CTRL0		0x00070
+#define RK3399_WIN1_CTRL1		0x00074
+#define RK3399_WIN1_COLOR_KEY		0x00078
+#define RK3399_WIN1_VIR			0x0007c
+#define RK3399_WIN1_YRGB_MST		0x00080
+#define RK3399_WIN1_CBR_MST		0x00084
+#define RK3399_WIN1_ACT_INFO		0x00088
+#define RK3399_WIN1_DSP_INFO		0x0008c
+#define RK3399_WIN1_DSP_ST		0x00090
+#define RK3399_WIN1_SCL_FACTOR_YRGB	0x00094
+#define RK3399_WIN1_SCL_FACTOR_CBR	0x00098
+#define RK3399_WIN1_SCL_OFFSET		0x0009c
+#define RK3399_WIN1_SRC_ALPHA_CTRL	0x000a0
+#define RK3399_WIN1_DST_ALPHA_CTRL	0x000a4
+#define RK3399_WIN1_FADING_CTRL		0x000a8
+#define RK3399_WIN1_CTRL2		0x000ac
+#define RK3399_WIN2_CTRL0		0x000b0
+#define RK3399_WIN2_CTRL1		0x000b4
+#define RK3399_WIN2_VIR0_1		0x000b8
+#define RK3399_WIN2_VIR2_3		0x000bc
+#define RK3399_WIN2_MST0		0x000c0
+#define RK3399_WIN2_DSP_INFO0		0x000c4
+#define RK3399_WIN2_DSP_ST0		0x000c8
+#define RK3399_WIN2_COLOR_KEY		0x000cc
+#define RK3399_WIN2_MST1		0x000d0
+#define RK3399_WIN2_DSP_INFO1		0x000d4
+#define RK3399_WIN2_DSP_ST1		0x000d8
+#define RK3399_WIN2_SRC_ALPHA_CTRL	0x000dc
+#define RK3399_WIN2_MST2		0x000e0
+#define RK3399_WIN2_DSP_INFO2		0x000e4
+#define RK3399_WIN2_DSP_ST2		0x000e8
+#define RK3399_WIN2_DST_ALPHA_CTRL	0x000ec
+#define RK3399_WIN2_MST3		0x000f0
+#define RK3399_WIN2_DSP_INFO3		0x000f4
+#define RK3399_WIN2_DSP_ST3		0x000f8
+#define RK3399_WIN2_FADING_CTRL		0x000fc
+#define RK3399_WIN3_CTRL0		0x00100
+#define RK3399_WIN3_CTRL1		0x00104
+#define RK3399_WIN3_VIR0_1		0x00108
+#define RK3399_WIN3_VIR2_3		0x0010c
+#define RK3399_WIN3_MST0		0x00110
+#define RK3399_WIN3_DSP_INFO0		0x00114
+#define RK3399_WIN3_DSP_ST0		0x00118
+#define RK3399_WIN3_COLOR_KEY		0x0011c
+#define RK3399_WIN3_MST1		0x00120
+#define RK3399_WIN3_DSP_INFO1		0x00124
+#define RK3399_WIN3_DSP_ST1		0x00128
+#define RK3399_WIN3_SRC_ALPHA_CTRL	0x0012c
+#define RK3399_WIN3_MST2		0x00130
+#define RK3399_WIN3_DSP_INFO2		0x00134
+#define RK3399_WIN3_DSP_ST2		0x00138
+#define RK3399_WIN3_DST_ALPHA_CTRL	0x0013c
+#define RK3399_WIN3_MST3		0x00140
+#define RK3399_WIN3_DSP_INFO3		0x00144
+#define RK3399_WIN3_DSP_ST3		0x00148
+#define RK3399_WIN3_FADING_CTRL		0x0014c
+#define RK3399_HWC_CTRL0		0x00150
+#define RK3399_HWC_CTRL1		0x00154
+#define RK3399_HWC_MST			0x00158
+#define RK3399_HWC_DSP_ST		0x0015c
+#define RK3399_HWC_SRC_ALPHA_CTRL	0x00160
+#define RK3399_HWC_DST_ALPHA_CTRL	0x00164
+#define RK3399_HWC_FADING_CTRL		0x00168
+#define RK3399_HWC_RESERVED1		0x0016c
+#define RK3399_POST_DSP_HACT_INFO	0x00170
+#define RK3399_POST_DSP_VACT_INFO	0x00174
+#define RK3399_POST_SCL_FACTOR_YRGB	0x00178
+#define RK3399_POST_RESERVED		0x0017c
+#define RK3399_POST_SCL_CTRL		0x00180
+#define RK3399_POST_DSP_VACT_INFO_F1	0x00184
+#define RK3399_DSP_HTOTAL_HS_END	0x00188
+#define RK3399_DSP_HACT_ST_END		0x0018c
+#define RK3399_DSP_VTOTAL_VS_END	0x00190
+#define RK3399_DSP_VACT_ST_END		0x00194
+#define RK3399_DSP_VS_ST_END_F1		0x00198
+#define RK3399_DSP_VACT_ST_END_F1	0x0019c
+#define RK3399_PWM_CTRL			0x001a0
+#define RK3399_PWM_PERIOD_HPR		0x001a4
+#define RK3399_PWM_DUTY_LPR		0x001a8
+#define RK3399_PWM_CNT			0x001ac
+#define RK3399_BCSH_COLOR_BAR		0x001b0
+#define RK3399_BCSH_BCS			0x001b4
+#define RK3399_BCSH_H			0x001b8
+#define RK3399_BCSH_CTRL		0x001bc
+#define RK3399_CABC_CTRL0		0x001c0
+#define RK3399_CABC_CTRL1		0x001c4
+#define RK3399_CABC_CTRL2		0x001c8
+#define RK3399_CABC_CTRL3		0x001cc
+#define RK3399_CABC_GAUSS_LINE0_0	0x001d0
+#define RK3399_CABC_GAUSS_LINE0_1	0x001d4
+#define RK3399_CABC_GAUSS_LINE1_0	0x001d8
+#define RK3399_CABC_GAUSS_LINE1_1	0x001dc
+#define RK3399_CABC_GAUSS_LINE2_0	0x001e0
+#define RK3399_CABC_GAUSS_LINE2_1	0x001e4
+#define RK3399_FRC_LOWER01_0		0x001e8
+#define RK3399_FRC_LOWER01_1		0x001ec
+#define RK3399_FRC_LOWER10_0		0x001f0
+#define RK3399_FRC_LOWER10_1		0x001f4
+#define RK3399_FRC_LOWER11_0		0x001f8
+#define RK3399_FRC_LOWER11_1		0x001fc
+#define RK3399_AFBCD0_CTRL		0x00200
+#define RK3399_AFBCD0_HDR_PTR		0x00204
+#define RK3399_AFBCD0_PIC_SIZE		0x00208
+#define RK3399_AFBCD0_STATUS		0x0020c
+#define RK3399_AFBCD1_CTRL		0x00220
+#define RK3399_AFBCD1_HDR_PTR		0x00224
+#define RK3399_AFBCD1_PIC_SIZE		0x00228
+#define RK3399_AFBCD1_STATUS		0x0022c
+#define RK3399_AFBCD2_CTRL		0x00240
+#define RK3399_AFBCD2_HDR_PTR		0x00244
+#define RK3399_AFBCD2_PIC_SIZE		0x00248
+#define RK3399_AFBCD2_STATUS		0x0024c
+#define RK3399_AFBCD3_CTRL		0x00260
+#define RK3399_AFBCD3_HDR_PTR		0x00264
+#define RK3399_AFBCD3_PIC_SIZE		0x00268
+#define RK3399_AFBCD3_STATUS		0x0026c
+#define RK3399_INTR_EN0			0x00280
+#define RK3399_INTR_CLEAR0		0x00284
+#define RK3399_INTR_STATUS0		0x00288
+#define RK3399_INTR_RAW_STATUS0		0x0028c
+#define RK3399_INTR_EN1			0x00290
+#define RK3399_INTR_CLEAR1		0x00294
+#define RK3399_INTR_STATUS1		0x00298
+#define RK3399_INTR_RAW_STATUS1		0x0029c
+#define RK3399_LINE_FLAG		0x002a0
+#define RK3399_VOP_STATUS		0x002a4
+#define RK3399_BLANKING_VALUE		0x002a8
+#define RK3399_MCU_BYPASS_PORT		0x002ac
+#define RK3399_WIN0_DSP_BG		0x002b0
+#define RK3399_WIN1_DSP_BG		0x002b4
+#define RK3399_WIN2_DSP_BG		0x002b8
+#define RK3399_WIN3_DSP_BG		0x002bc
+#define RK3399_YUV2YUV_WIN		0x002c0
+#define RK3399_YUV2YUV_POST		0x002c4
+#define RK3399_AUTO_GATING_EN		0x002cc
+#define RK3399_WIN0_CSC_COE		0x003a0
+#define RK3399_WIN1_CSC_COE		0x003c0
+#define RK3399_WIN2_CSC_COE		0x003e0
+#define RK3399_WIN3_CSC_COE		0x00400
+#define RK3399_HWC_CSC_COE		0x00420
+#define RK3399_BCSH_R2Y_CSC_COE		0x00440
+#define RK3399_BCSH_Y2R_CSC_COE		0x00460
+#define RK3399_POST_YUV2YUV_Y2R_COE	0x00480
+#define RK3399_POST_YUV2YUV_3X3_COE	0x004a0
+#define RK3399_POST_YUV2YUV_R2Y_COE	0x004c0
+#define RK3399_WIN0_YUV2YUV_Y2R		0x004e0
+#define RK3399_WIN0_YUV2YUV_3X3		0x00500
+#define RK3399_WIN0_YUV2YUV_R2Y		0x00520
+#define RK3399_WIN1_YUV2YUV_Y2R		0x00540
+#define RK3399_WIN1_YUV2YUV_3X3		0x00560
+#define RK3399_WIN1_YUV2YUV_R2Y		0x00580
+#define RK3399_WIN2_YUV2YUV_Y2R		0x005a0
+#define RK3399_WIN2_YUV2YUV_3X3		0x005c0
+#define RK3399_WIN2_YUV2YUV_R2Y		0x005e0
+#define RK3399_WIN3_YUV2YUV_Y2R		0x00600
+#define RK3399_WIN3_YUV2YUV_3X3		0x00620
+#define RK3399_WIN3_YUV2YUV_R2Y		0x00640
+#define RK3399_WIN2_LUT_ADDR		0x01000
+#define RK3399_WIN3_LUT_ADDR		0x01400
+#define RK3399_HWC_LUT_ADDR		0x01800
+#define RK3399_CABC_GAMMA_LUT_ADDR	0x01c00
+#define RK3399_GAMMA_LUT_ADDR		0x02000
+/* rk3399 register definition end */
+
 #endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 21aed1f..3b80713 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+	    DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA | DRIVER_LEGACY,
 	.dev_priv_size = sizeof(drm_savage_buf_priv_t),
 	.load = savage_driver_load,
 	.firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index c01ad0a..3dc0d8f 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1001,15 +1001,9 @@
 		cmdbuf->cmd_addr = kcmd_addr;
 	}
 	if (cmdbuf->vb_size) {
-		kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
-		if (kvb_addr == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-
-		if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
-				       cmdbuf->vb_size)) {
-			ret = -EFAULT;
+		kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
+		if (IS_ERR(kvb_addr)) {
+			ret = PTR_ERR(kvb_addr);
 			goto done;
 		}
 		cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 79bce76..ae98398 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -102,7 +102,7 @@
 }
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_USE_AGP,
+	.driver_features = DRIVER_USE_AGP | DRIVER_LEGACY,
 	.load = sis_driver_load,
 	.unload = sis_driver_unload,
 	.open = sis_driver_open,
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 494ab25..acd7286 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
 config DRM_STI
-	tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
-	depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+	tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
+	depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
 	select RESET_CONTROLLER
 	select DRM_KMS_HELPER
 	select DRM_GEM_CMA_HELPER
@@ -9,4 +9,4 @@
 	select FW_LOADER
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	help
-	  Choose this option to enable DRM on STM stiH41x chipset
+	  Choose this option to enable DRM on STM stiH4xx chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index b805762..d20f7c0 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -9,7 +9,6 @@
 	sti_crtc.o \
 	sti_plane.o \
 	sti_hdmi.o \
-	sti_hdmi_tx3g0c55phy.o \
 	sti_hdmi_tx3g4c28phy.o \
 	sti_dvo.o \
 	sti_awg_utils.o \
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 134201e..f62041f 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -25,7 +25,7 @@
 /*
  * stiH407 compositor properties
  */
-struct sti_compositor_data stih407_compositor_data = {
+static const struct sti_compositor_data stih407_compositor_data = {
 	.nb_subdev = 8,
 	.subdev_desc = {
 			{STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
@@ -39,38 +39,18 @@
 	},
 };
 
-/*
- * stiH416 compositor properties
- * Note:
- * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
- * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
- * not fit for stiH416 if we want to enable the MIXER_AUX.
- */
-struct sti_compositor_data stih416_compositor_data = {
-	.nb_subdev = 3,
-	.subdev_desc = {
-			{STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
-			{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
-			{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
-	},
-};
-
-int sti_compositor_debufs_init(struct sti_compositor *compo,
-			       struct drm_minor *minor)
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+				struct drm_minor *minor)
 {
-	int ret = 0, i;
+	unsigned int i;
 
-	for (i = 0; compo->vid[i]; i++) {
-		ret = vid_debugfs_init(compo->vid[i], minor);
-		if (ret)
-			return ret;
-	}
+	for (i = 0; i < STI_MAX_VID; i++)
+		if (compo->vid[i])
+			vid_debugfs_init(compo->vid[i], minor);
 
-	for (i = 0; compo->mixer[i]; i++) {
-		ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
-		if (ret)
-			return ret;
-	}
+	for (i = 0; i < STI_MAX_MIXER; i++)
+		if (compo->mixer[i])
+			sti_mixer_debugfs_init(compo->mixer[i], minor);
 
 	return 0;
 }
@@ -183,9 +163,6 @@
 
 static const struct of_device_id compositor_of_match[] = {
 	{
-		.compatible = "st,stih416-compositor",
-		.data = &stih416_compositor_data,
-	}, {
 		.compatible = "st,stih407-compositor",
 		.data = &stih407_compositor_data,
 	}, {
@@ -201,6 +178,7 @@
 	struct device_node *vtg_np;
 	struct sti_compositor *compo;
 	struct resource *res;
+	unsigned int i;
 
 	compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
 	if (!compo) {
@@ -208,7 +186,8 @@
 		return -ENOMEM;
 	}
 	compo->dev = dev;
-	compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
+	for (i = 0; i < STI_MAX_MIXER; i++)
+		compo->vtg_vblank_nb[i].notifier_call = sti_crtc_vblank_cb;
 
 	/* populate data structure depending on compatibility */
 	BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -266,12 +245,12 @@
 
 	vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
 	if (vtg_np)
-		compo->vtg_main = of_vtg_find(vtg_np);
+		compo->vtg[STI_MIXER_MAIN] = of_vtg_find(vtg_np);
 	of_node_put(vtg_np);
 
 	vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
 	if (vtg_np)
-		compo->vtg_aux = of_vtg_find(vtg_np);
+		compo->vtg[STI_MIXER_AUX] = of_vtg_find(vtg_np);
 	of_node_put(vtg_np);
 
 	platform_set_drvdata(pdev, compo);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 24444ef..2952a2d 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -60,9 +60,8 @@
  * @rst_aux: reset control of the aux path
  * @mixer: array of mixers
  * @vid: array of vids
- * @vtg_main: vtg for main data path
- * @vtg_aux: vtg for auxillary data path
- * @vtg_vblank_nb: callback for VTG VSYNC notification
+ * @vtg: array of vtgs
+ * @vtg_vblank_nb: array of callbacks for VTG VSYNC notification
  */
 struct sti_compositor {
 	struct device *dev;
@@ -76,12 +75,11 @@
 	struct reset_control *rst_aux;
 	struct sti_mixer *mixer[STI_MAX_MIXER];
 	struct sti_vid *vid[STI_MAX_VID];
-	struct sti_vtg *vtg_main;
-	struct sti_vtg *vtg_aux;
-	struct notifier_block vtg_vblank_nb;
+	struct sti_vtg *vtg[STI_MAX_MIXER];
+	struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
 };
 
-int sti_compositor_debufs_init(struct sti_compositor *compo,
-			       struct drm_minor *minor);
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+				struct drm_minor *minor);
 
 #endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index c7d734d..e992bed 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -86,8 +86,7 @@
 		goto pix_error;
 	}
 
-	sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, &crtc->mode);
+	sti_vtg_set_config(compo->vtg[mixer->id], &crtc->mode);
 
 	if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
 		DRM_ERROR("Can't set active video area\n");
@@ -166,6 +165,10 @@
 
 		switch (plane->status) {
 		case STI_PLANE_UPDATED:
+			/* ignore update for other CRTC */
+			if (p->state->crtc != crtc)
+				continue;
+
 			/* update planes tag as updated */
 			DRM_DEBUG_DRIVER("update plane %s\n",
 					 sti_plane_to_str(plane));
@@ -244,8 +247,7 @@
 int sti_crtc_vblank_cb(struct notifier_block *nb,
 		       unsigned long event, void *data)
 {
-	struct sti_compositor *compo =
-		container_of(nb, struct sti_compositor, vtg_vblank_nb);
+	struct sti_compositor *compo;
 	struct drm_crtc *crtc = data;
 	struct sti_mixer *mixer;
 	unsigned long flags;
@@ -254,6 +256,7 @@
 
 	priv = crtc->dev->dev_private;
 	pipe = drm_crtc_index(crtc);
+	compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
 	mixer = compo->mixer[pipe];
 
 	if ((event != VTG_TOP_FIELD_EVENT) &&
@@ -295,14 +298,13 @@
 {
 	struct sti_private *dev_priv = dev->dev_private;
 	struct sti_compositor *compo = dev_priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
 	struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+	struct sti_vtg *vtg = compo->vtg[pipe];
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (sti_vtg_register_client(pipe == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux,
-			vtg_vblank_nb, crtc)) {
+	if (sti_vtg_register_client(vtg, vtg_vblank_nb, crtc)) {
 		DRM_ERROR("Cannot register VTG notifier\n");
 		return -EINVAL;
 	}
@@ -314,13 +316,13 @@
 {
 	struct sti_private *priv = drm_dev->dev_private;
 	struct sti_compositor *compo = priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
 	struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+	struct sti_vtg *vtg = compo->vtg[pipe];
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (sti_vtg_unregister_client(pipe == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+	if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	/* free the resources of the pending requests */
@@ -336,7 +338,7 @@
 	struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
 
 	if (drm_crtc_index(crtc) == 0)
-		return sti_compositor_debufs_init(compo, crtc->dev->primary);
+		return sti_compositor_debugfs_init(compo, crtc->dev->primary);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 3b53f7f..cca75bd 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -309,15 +309,15 @@
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 
-	if (!drm_plane->crtc) {
+	if (!oldstate->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
 				 drm_plane->base.id);
 		return;
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id,
-			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+			 oldstate->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
@@ -345,7 +345,7 @@
 	return cursor_debugfs_init(cursor, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = sti_cursor_destroy,
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 96bd3d0..2784919 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -140,7 +140,7 @@
 	return ret;
 }
 
-void sti_drm_dbg_cleanup(struct drm_minor *minor)
+static void sti_drm_dbg_cleanup(struct drm_minor *minor)
 {
 	drm_debugfs_remove_files(sti_drm_dbg_list,
 				 ARRAY_SIZE(sti_drm_dbg_list), minor);
@@ -178,7 +178,7 @@
 	 */
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, false);
+	drm_atomic_helper_commit_planes(drm, state, 0);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
 
 	drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -282,7 +282,7 @@
 };
 
 static struct drm_driver sti_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+	.driver_features = DRIVER_MODESET |
 	    DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
 	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -365,8 +365,8 @@
 	int ret;
 
 	ddev = drm_dev_alloc(&sti_driver, dev);
-	if (!ddev)
-		return -ENOMEM;
+	if (IS_ERR(ddev))
+		return PTR_ERR(ddev);
 
 	ddev->platformdev = to_platform_device(dev);
 
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 00881eb..e8c1ed0 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -17,6 +17,7 @@
 #include <drm/drm_panel.h>
 
 #include "sti_awg_utils.h"
+#include "sti_drv.h"
 #include "sti_mixer.h"
 
 /* DVO registers */
@@ -106,7 +107,7 @@
 	container_of(x, struct sti_dvo_connector, drm_connector)
 
 #define BLANKING_LEVEL 16
-int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
+static int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
 {
 	struct drm_display_mode *mode = &dvo->mode;
 	struct dvo_config *config = dvo->config;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index b8d942c..81df309 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -460,6 +460,7 @@
 		clk_disable_unprepare(gdp->clk_pix);
 
 	gdp->plane.status = STI_PLANE_DISABLED;
+	gdp->vtg = NULL;
 }
 
 /**
@@ -473,8 +474,8 @@
  * RETURNS:
  * 0 on success.
  */
-int sti_gdp_field_cb(struct notifier_block *nb,
-		unsigned long event, void *data)
+static int sti_gdp_field_cb(struct notifier_block *nb,
+			    unsigned long event, void *data)
 {
 	struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 
@@ -611,7 +612,6 @@
 	struct drm_crtc *crtc = state->crtc;
 	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	struct drm_framebuffer *fb =  state->fb;
-	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
 	struct drm_crtc_state *crtc_state;
 	struct sti_mixer *mixer;
 	struct drm_display_mode *mode;
@@ -628,8 +628,8 @@
 	mode = &crtc_state->mode;
 	dst_x = state->crtc_x;
 	dst_y = state->crtc_y;
-	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
@@ -648,10 +648,9 @@
 		return -EINVAL;
 	}
 
-	if (first_prepare) {
+	if (!gdp->vtg) {
 		/* Register gdp callback */
-		gdp->vtg = mixer->id == STI_MIXER_MAIN ?
-					compo->vtg_main : compo->vtg_aux;
+		gdp->vtg = compo->vtg[mixer->id];
 		if (sti_vtg_register_client(gdp->vtg,
 					    &gdp->vtg_field_nb, crtc)) {
 			DRM_ERROR("Cannot register VTG notifier\n");
@@ -719,7 +718,7 @@
 	u32 dma_updated_top;
 	u32 dma_updated_btm;
 	int format;
-	unsigned int depth, bpp;
+	unsigned int bpp;
 	u32 ydo, xdo, yds, xds;
 
 	if (!crtc || !fb)
@@ -728,8 +727,8 @@
 	mode = &crtc->mode;
 	dst_x = state->crtc_x;
 	dst_y = state->crtc_y;
-	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
@@ -758,9 +757,9 @@
 			 (unsigned long)cma_obj->paddr);
 
 	/* pixel memory location */
-	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	bpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
-	top_field->gam_gdp_pml += src_x * (bpp >> 3);
+	top_field->gam_gdp_pml += src_x * bpp;
 	top_field->gam_gdp_pml += src_y * fb->pitches[0];
 
 	/* output parameters (clamped / cropped) */
@@ -810,7 +809,7 @@
 	if (!curr_list) {
 		/* First update or invalid node should directly write in the
 		 * hw register */
-		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+		DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
 				 sti_plane_to_str(plane));
 
 		writel(gdp->is_curr_top ?
@@ -846,15 +845,15 @@
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 
-	if (!drm_plane->crtc) {
+	if (!oldstate->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
 				 drm_plane->base.id);
 		return;
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id,
-			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+			 oldstate->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
@@ -882,7 +881,7 @@
 	return gdp_debugfs_init(gdp, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = sti_gdp_destroy,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8505569..e7c243f 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -62,14 +62,8 @@
 #define SCALE_CTRL_CR_DFLT              0x00DB0249
 
 /* Video DACs control */
-#define VIDEO_DACS_CONTROL_MASK         0x0FFF
-#define VIDEO_DACS_CONTROL_SYSCFG2535   0x085C /* for stih416 */
-#define DAC_CFG_HD_OFF_SHIFT            5
-#define DAC_CFG_HD_OFF_MASK             (0x7 << DAC_CFG_HD_OFF_SHIFT)
-#define VIDEO_DACS_CONTROL_SYSCFG5072   0x0120 /* for stih407 */
 #define DAC_CFG_HD_HZUVW_OFF_MASK       BIT(1)
 
-
 /* Upsampler values for the alternative 2X Filter */
 #define SAMPLER_COEF_NB                 8
 #define HDA_ANA_SRC_Y_CFG_ALT_2X        0x01130000
@@ -300,28 +294,14 @@
  */
 static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
 {
-	u32 mask;
-
 	if (hda->video_dacs_ctrl) {
 		u32 val;
 
-		switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
-		case VIDEO_DACS_CONTROL_SYSCFG2535:
-			mask = DAC_CFG_HD_OFF_MASK;
-			break;
-		case VIDEO_DACS_CONTROL_SYSCFG5072:
-			mask = DAC_CFG_HD_HZUVW_OFF_MASK;
-			break;
-		default:
-			DRM_INFO("Video DACS control register not supported!");
-			return;
-		}
-
 		val = readl(hda->video_dacs_ctrl);
 		if (enable)
-			val &= ~mask;
+			val &= ~DAC_CFG_HD_HZUVW_OFF_MASK;
 		else
-			val |= mask;
+			val |= DAC_CFG_HD_HZUVW_OFF_MASK;
 
 		writel(val, hda->video_dacs_ctrl);
 	}
@@ -352,24 +332,11 @@
 static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
 {
 	u32 val = readl(reg);
-	u32 mask;
-
-	switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
-	case VIDEO_DACS_CONTROL_SYSCFG2535:
-		mask = DAC_CFG_HD_OFF_MASK;
-		break;
-	case VIDEO_DACS_CONTROL_SYSCFG5072:
-		mask = DAC_CFG_HD_HZUVW_OFF_MASK;
-		break;
-	default:
-		DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
-		return;
-	}
 
 	seq_puts(s, "\n");
 	seq_printf(s, "\n  %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
 	seq_puts(s, "\tHD DACs ");
-	seq_puts(s, val & mask ? "disabled" : "enabled");
+	seq_puts(s, val & DAC_CFG_HD_HZUVW_OFF_MASK ? "disabled" : "enabled");
 }
 
 static int hda_dbg_show(struct seq_file *s, void *data)
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index fedc17f..376b076 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -22,7 +22,6 @@
 
 #include "sti_hdmi.h"
 #include "sti_hdmi_tx3g4c28phy.h"
-#include "sti_hdmi_tx3g0c55phy.h"
 #include "sti_vtg.h"
 
 #define HDMI_CFG                        0x0000
@@ -203,7 +202,7 @@
 
 	/* Audio FIFO underrun IRQ */
 	if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
-		DRM_INFO("Warning: audio FIFO underrun occurs!");
+		DRM_INFO("Warning: audio FIFO underrun occurs!\n");
 
 	return IRQ_HANDLED;
 }
@@ -569,7 +568,7 @@
 
 	/* Wait reset completed */
 	wait_event_interruptible_timeout(hdmi->wait_event,
-					 hdmi->event_received == true,
+					 hdmi->event_received,
 					 msecs_to_jiffies
 					 (HDMI_TIMEOUT_SWRESET));
 
@@ -1054,6 +1053,7 @@
 }
 
 static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = sti_hdmi_connector_detect,
 	.destroy = drm_connector_cleanup,
@@ -1181,7 +1181,7 @@
 		    HDMI_AUD_CFG_ONE_BIT_INVALID;
 	hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
 
-	hdmi->audio.enabled = 0;
+	hdmi->audio.enabled = false;
 	hdmi_audio_infoframe_config(hdmi);
 }
 
@@ -1213,7 +1213,7 @@
 		return -EINVAL;
 	}
 
-	audio.enabled = 1;
+	audio.enabled = true;
 
 	ret = hdmi_audio_configure(hdmi, &audio);
 	if (ret < 0)
@@ -1265,7 +1265,7 @@
 
 	DRM_DEBUG_DRIVER("\n");
 
-	hdmi->audio.enabled = 0;
+	hdmi->audio.enabled = false;
 
 	hdmi->audio_pdev = platform_device_register_data(
 		dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
@@ -1373,9 +1373,6 @@
 
 static const struct of_device_id hdmi_of_match[] = {
 	{
-		.compatible = "st,stih416-hdmi",
-		.data = &tx3g0c55phy_ops,
-	}, {
 		.compatible = "st,stih407-hdmi",
 		.data = &tx3g4c28phy_ops,
 	}, {
@@ -1422,22 +1419,6 @@
 		goto release_adapter;
 	}
 
-	if (of_device_is_compatible(np, "st,stih416-hdmi")) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						   "syscfg");
-		if (!res) {
-			DRM_ERROR("Invalid syscfg resource\n");
-			ret = -ENOMEM;
-			goto release_adapter;
-		}
-		hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
-						    resource_size(res));
-		if (!hdmi->syscfg) {
-			ret = -ENOMEM;
-			goto release_adapter;
-		}
-	}
-
 	hdmi->phy_ops = (struct hdmi_phy_ops *)
 		of_match_node(hdmi_of_match, np)->data;
 
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
deleted file mode 100644
index 49ae8e4..0000000
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include "sti_hdmi_tx3g0c55phy.h"
-
-#define HDMI_SRZ_PLL_CFG                0x0504
-#define HDMI_SRZ_TAP_1                  0x0508
-#define HDMI_SRZ_TAP_2                  0x050C
-#define HDMI_SRZ_TAP_3                  0x0510
-#define HDMI_SRZ_CTRL                   0x0514
-
-#define HDMI_SRZ_PLL_CFG_POWER_DOWN     BIT(0)
-#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT     1
-#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ    0
-#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ    1
-#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ   2
-#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ   3
-#define HDMI_SRZ_PLL_CFG_VCOR_MASK      3
-#define HDMI_SRZ_PLL_CFG_VCOR(x)        (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
-#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT     8
-#define HDMI_SRZ_PLL_CFG_NDIV_MASK      (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
-#define HDMI_SRZ_PLL_CFG_MODE_SHIFT     16
-#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ  0x1
-#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ  0x4
-#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ    0x5
-#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
-#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ  0x7
-#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ    0x8
-#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ  0x9
-#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
-#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ    0xB
-#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ  0xC
-#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ   0xD
-#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
-#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ   0xF
-#define HDMI_SRZ_PLL_CFG_MODE_MASK      0xF
-#define HDMI_SRZ_PLL_CFG_MODE(x)        (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
-
-#define HDMI_SRZ_CTRL_POWER_DOWN        (1 << 0)
-#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN  (1 << 1)
-
-/* sysconf registers */
-#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858	/* SYSTEM_CONFIG2534 */
-#define HDMI_REJECTION_PLL_STATUS        0x0948	/* SYSTEM_CONFIG2594 */
-
-#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
-#define REJECTION_PLL_HDMI_ENABLE_MASK  (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
-#define REJECTION_PLL_HDMI_PDIV_SHIFT   24
-#define REJECTION_PLL_HDMI_PDIV_MASK    (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
-#define REJECTION_PLL_HDMI_NDIV_SHIFT   16
-#define REJECTION_PLL_HDMI_NDIV_MASK    (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
-#define REJECTION_PLL_HDMI_MDIV_SHIFT   8
-#define REJECTION_PLL_HDMI_MDIV_MASK    (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
-
-#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
-
-#define HDMI_TIMEOUT_PLL_LOCK  50   /*milliseconds */
-
-/**
- * pll mode structure
- *
- * A pointer to an array of these structures is passed to a TMDS (HDMI) output
- * via the control interface to provide board and SoC specific
- * configurations of the HDMI PHY. Each entry in the array specifies a hardware
- * specific configuration for a given TMDS clock frequency range. The array
- * should be terminated with an entry that has all fields set to zero.
- *
- * @min: Lower bound of TMDS clock frequency this entry applies to
- * @max: Upper bound of TMDS clock frequency this entry applies to
- * @mode: SoC specific register configuration
- */
-struct pllmode {
-	u32 min;
-	u32 max;
-	u32 mode;
-};
-
-#define NB_PLL_MODE 7
-static struct pllmode pllmodes[NB_PLL_MODE] = {
-	{13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
-	{25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
-	{27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
-	{54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
-	{72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
-	{108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
-	{148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
-};
-
-#define NB_HDMI_PHY_CONFIG 5
-static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
-	{0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
-	{40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
-	{140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
-	{160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
-	{250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
-};
-
-#define PLL_CHANGE_DELAY	1 /* ms */
-
-/**
- * Disable the pll rejection
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if the pll has been disabled
- */
-static bool disable_pll_rejection(struct sti_hdmi *hdmi)
-{
-	u32 val;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-	val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
-	writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-	msleep(PLL_CHANGE_DELAY);
-	val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
-	return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
- * clock input to the new PHY PLL that generates the serializer clock
- * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
- * formatter instead of the TMDS clock line from ClockGenB.
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if pll has been correctly set
- */
-static bool enable_pll_rejection(struct sti_hdmi *hdmi)
-{
-	unsigned int inputclock;
-	u32 mdiv, ndiv, pdiv, val;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (!disable_pll_rejection(hdmi))
-		return false;
-
-	inputclock = hdmi->mode.clock * 1000;
-
-	DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
-
-
-	/* Power up the HDMI rejection PLL
-	 * Note: On this SoC (stiH416) we are forced to have the input clock
-	 * be equal to the HDMI pixel clock.
-	 *
-	 * The values here have been suggested by validation however they are
-	 * still provisional and subject to change.
-	 *
-	 * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
-	 */
-	if (inputclock < 50000000) {
-		/*
-		 * For slower clocks we need to multiply more to keep the
-		 * internal VCO frequency within the physical specification
-		 * of the PLL.
-		 */
-		pdiv = 4;
-		ndiv = 240;
-		mdiv = 30;
-	} else {
-		pdiv = 2;
-		ndiv = 60;
-		mdiv = 30;
-	}
-
-	val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-	val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
-		REJECTION_PLL_HDMI_NDIV_MASK |
-		REJECTION_PLL_HDMI_MDIV_MASK |
-		REJECTION_PLL_HDMI_ENABLE_MASK);
-
-	val |=	(pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
-		(ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
-		(mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
-		(0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
-
-	writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
-	msleep(PLL_CHANGE_DELAY);
-	val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
-	return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Start hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * Return false if an error occur
- */
-static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
-{
-	u32 ckpxpll = hdmi->mode.clock * 1000;
-	u32 val, tmdsck, freqvco, pllctrl = 0;
-	unsigned int i;
-
-	if (!enable_pll_rejection(hdmi))
-		return false;
-
-	DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
-
-	/* Assuming no pixel repetition and 24bits color */
-	tmdsck = ckpxpll;
-	pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
-
-	/*
-	 * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
-	 * a clock frequency supported by one of the specific PLL modes then we
-	 * will end up using the generic mode (0) which only supports a 10x
-	 * multiplier, hence only 24bit color.
-	 */
-	for (i = 0; i < NB_PLL_MODE; i++) {
-		if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
-			pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
-	}
-
-	freqvco = tmdsck * 10;
-	if (freqvco <= 425000000UL)
-		pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
-	else if (freqvco <= 850000000UL)
-		pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
-	else if (freqvco <= 1700000000UL)
-		pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
-	else if (freqvco <= 2970000000UL)
-		pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
-	else {
-		DRM_ERROR("PHY serializer clock out of range\n");
-		goto err;
-	}
-
-	/*
-	 * Configure and power up the PHY PLL
-	 */
-	hdmi->event_received = false;
-	DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
-	hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
-
-	/* wait PLL interrupt */
-	wait_event_interruptible_timeout(hdmi->wait_event,
-					 hdmi->event_received == true,
-					 msecs_to_jiffies
-					 (HDMI_TIMEOUT_PLL_LOCK));
-
-	if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
-		DRM_ERROR("hdmi phy pll not locked\n");
-		goto err;
-	}
-
-	DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
-
-	/*
-	 * To configure the source termination and pre-emphasis appropriately
-	 * for different high speed TMDS clock frequencies a phy configuration
-	 * table must be provided, tailored to the SoC and board combination.
-	 */
-	for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
-		if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
-		    (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
-			val = hdmiphy_config[i].config[0];
-			hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
-			val = hdmiphy_config[i].config[1];
-			hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
-			val = hdmiphy_config[i].config[2];
-			hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
-			val = hdmiphy_config[i].config[3];
-			val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
-			val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
-			hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
-
-			DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
-					 hdmiphy_config[i].config[0],
-					 hdmiphy_config[i].config[1],
-					 hdmiphy_config[i].config[2],
-					 hdmiphy_config[i].config[3]);
-			return true;
-		}
-	}
-
-	/*
-	 * Default, power up the serializer with no pre-emphasis or source
-	 * termination.
-	 */
-	hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
-	hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
-	hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
-	hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
-
-	return true;
-
-err:
-	disable_pll_rejection(hdmi);
-
-	return false;
-}
-
-/**
- * Stop hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- */
-static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	hdmi->event_received = false;
-
-	hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
-	hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
-
-	/* wait PLL interrupt */
-	wait_event_interruptible_timeout(hdmi->wait_event,
-					 hdmi->event_received == true,
-					 msecs_to_jiffies
-					 (HDMI_TIMEOUT_PLL_LOCK));
-
-	if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
-		DRM_ERROR("hdmi phy pll not well disabled\n");
-
-	disable_pll_rejection(hdmi);
-}
-
-struct hdmi_phy_ops tx3g0c55phy_ops = {
-	.start = sti_hdmi_tx3g0c55phy_start,
-	.stop = sti_hdmi_tx3g0c55phy_stop,
-};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
deleted file mode 100644
index 068237b..0000000
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HDMI_TX3G0C55PHY_H_
-#define _STI_HDMI_TX3G0C55PHY_H_
-
-#include "sti_hdmi.h"
-
-extern struct hdmi_phy_ops tx3g0c55phy_ops;
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b5ee783..f88130f 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -17,6 +17,7 @@
 #include "sti_hqvdp_lut.h"
 #include "sti_plane.h"
 #include "sti_vtg.h"
+#include "sti_drv.h"
 
 /* Firmware name */
 #define HQVDP_FMW_NAME          "hqvdp-stih407.bin"
@@ -770,6 +771,7 @@
 		DRM_ERROR("XP70 could not revert to idle\n");
 
 	hqvdp->plane.status = STI_PLANE_DISABLED;
+	hqvdp->xp70_initialized = false;
 }
 
 /**
@@ -783,7 +785,7 @@
  * RETURNS:
  * 0 on success.
  */
-int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
 {
 	struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
 	int btm_cmd_offset, top_cmd_offest;
@@ -1012,7 +1014,6 @@
 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
 	struct drm_crtc *crtc = state->crtc;
 	struct drm_framebuffer *fb = state->fb;
-	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
 	struct drm_crtc_state *crtc_state;
 	struct drm_display_mode *mode;
 	int dst_x, dst_y, dst_w, dst_h;
@@ -1026,8 +1027,8 @@
 	mode = &crtc_state->mode;
 	dst_x = state->crtc_x;
 	dst_y = state->crtc_y;
-	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
@@ -1063,7 +1064,7 @@
 		return -EINVAL;
 	}
 
-	if (first_prepare) {
+	if (!hqvdp->xp70_initialized) {
 		/* Start HQVDP XP70 coprocessor */
 		sti_hqvdp_start_xp70(hqvdp);
 
@@ -1115,8 +1116,8 @@
 	mode = &crtc->mode;
 	dst_x = state->crtc_x;
 	dst_y = state->crtc_y;
-	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
@@ -1214,15 +1215,15 @@
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 
-	if (!drm_plane->crtc) {
+	if (!oldstate->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
 				 drm_plane->base.id);
 		return;
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id,
-			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+			 oldstate->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
@@ -1250,7 +1251,7 @@
 	return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
 }
 
-struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = sti_hqvdp_destroy,
@@ -1289,7 +1290,7 @@
 	return &hqvdp->plane.drm_plane;
 }
 
-int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 7d9aea8..4ddc58f 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -16,12 +16,6 @@
 MODULE_PARM_DESC(bkgcolor, "Value of the background color 0xRRGGBB");
 module_param_named(bkgcolor, bkg_color, int, 0644);
 
-/* Identity: G=Y , B=Cb , R=Cr */
-static const u32 mixerColorSpaceMatIdentity[] = {
-	0x10000000, 0x00000000, 0x10000000, 0x00001000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000
-};
-
 /* regs offset */
 #define GAM_MIXER_CTL      0x00
 #define GAM_MIXER_BKC      0x04
@@ -358,22 +352,12 @@
 	return 0;
 }
 
-void sti_mixer_set_matrix(struct sti_mixer *mixer)
-{
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
-		sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
-				    mixerColorSpaceMatIdentity[i]);
-}
-
 struct sti_mixer *sti_mixer_create(struct device *dev,
 				   struct drm_device *drm_dev,
 				   int id,
 				   void __iomem *baseaddr)
 {
 	struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
-	struct device_node *np = dev->of_node;
 
 	dev_dbg(dev, "%s\n", __func__);
 	if (!mixer) {
@@ -384,9 +368,6 @@
 	mixer->dev = dev;
 	mixer->id = id;
 
-	if (of_device_is_compatible(np, "st,stih416-compositor"))
-		sti_mixer_set_matrix(mixer);
-
 	DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
 			 sti_mixer_to_str(mixer), mixer->regs);
 
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index e25995b..ad46d35 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -18,6 +18,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "sti_crtc.h"
+#include "sti_drv.h"
 #include "sti_vtg.h"
 
 /* glue registers */
@@ -209,13 +210,11 @@
  * @tvout: tvout structure
  * @reg: register to set
  * @main_path: main or auxiliary path
- * @sel_input_logic_inverted: need to invert the logic
  * @sel_input: selected_input (main/aux + conv)
  */
 static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
 				    int reg,
 				    bool main_path,
-				    bool sel_input_logic_inverted,
 				    enum sti_tvout_video_out_type video_out)
 {
 	u32 sel_input;
@@ -236,8 +235,7 @@
 	}
 
 	/* on stih407 chip the sel_input bypass mode logic is inverted */
-	if (sel_input_logic_inverted)
-		sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+	sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
 
 	val &= ~TVO_VIP_SEL_INPUT_MASK;
 	val |= sel_input;
@@ -295,8 +293,6 @@
  */
 static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
 {
-	struct device_node *node = tvout->dev->of_node;
-	bool sel_input_logic_inverted = false;
 	u32 tvo_in_vid_format;
 	int val, tmp;
 
@@ -334,16 +330,11 @@
 	/* Set round mode (rounded to 8-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
 
-	if (of_device_is_compatible(node, "st,stih407-tvout")) {
-		/* Set input video format */
-		tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
-					 TVO_IN_FMT_SIGNED);
-		sel_input_logic_inverted = true;
-	}
+	/* Set input video format */
+	tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
 	/* Input selection */
 	tvout_vip_set_sel_input(tvout, TVO_VIP_DVO, main_path,
-				sel_input_logic_inverted,
 				STI_TVOUT_VIDEO_OUT_RGB);
 }
 
@@ -356,8 +347,6 @@
  */
 static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
 {
-	struct device_node *node = tvout->dev->of_node;
-	bool sel_input_logic_inverted = false;
 	u32 tvo_in_vid_format;
 
 	dev_dbg(tvout->dev, "%s\n", __func__);
@@ -390,16 +379,12 @@
 	/* set round mode (rounded to 8-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
 
-	if (of_device_is_compatible(node, "st,stih407-tvout")) {
-		/* set input video format */
-		tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
-					TVO_IN_FMT_SIGNED);
-		sel_input_logic_inverted = true;
-	}
+	/* set input video format */
+	tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
 	/* input selection */
 	tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
-			sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+				STI_TVOUT_VIDEO_OUT_RGB);
 }
 
 /**
@@ -411,8 +396,6 @@
  */
 static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
 {
-	struct device_node *node = tvout->dev->of_node;
-	bool sel_input_logic_inverted = false;
 	u32 tvo_in_vid_format;
 	int val;
 
@@ -448,16 +431,11 @@
 	/* set round mode (rounded to 10-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
 
-	if (of_device_is_compatible(node, "st,stih407-tvout")) {
-		/* set input video format */
-		tvout_vip_set_in_vid_fmt(tvout,
-			tvo_in_vid_format, TVO_IN_FMT_SIGNED);
-		sel_input_logic_inverted = true;
-	}
+	/* Set input video format */
+	tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
 
 	/* Input selection */
 	tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
-				sel_input_logic_inverted,
 				STI_TVOUT_VIDEO_OUT_YUV);
 
 	/* power up HD DAC */
@@ -905,7 +883,6 @@
 }
 
 static const struct of_device_id tvout_of_match[] = {
-	{ .compatible = "st,stih416-tvout", },
 	{ .compatible = "st,stih407-tvout", },
 	{ /* end node */ }
 };
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 47634a0..2ad5989 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -142,8 +142,8 @@
 	struct drm_display_mode *mode = &crtc->mode;
 	int dst_x = state->crtc_x;
 	int dst_y = state->crtc_y;
-	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	int dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
 	int src_h = state->src_h >> 16;
 	u32 val, ydo, xdo, yds, xds;
 
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
index b1eb0d7..cf7fe8a 100644
--- a/drivers/gpu/drm/sti/sti_vtac.c
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -12,6 +12,8 @@
 
 #include <drm/drmP.h>
 
+#include "sti_drv.h"
+
 /* registers offset */
 #define VTAC_CONFIG                     0x00
 #define VTAC_RX_FIFO_CONFIG             0x04
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0bdc385..a8882bd 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -13,6 +13,7 @@
 
 #include <drm/drmP.h>
 
+#include "sti_drv.h"
 #include "sti_vtg.h"
 
 #define VTG_MODE_MASTER         0
@@ -72,7 +73,7 @@
 #define AWG_DELAY_ED        (-8)
 #define AWG_DELAY_SD        (-7)
 
-LIST_HEAD(vtg_lookup);
+static LIST_HEAD(vtg_lookup);
 
 /*
  * STI VTG register offset structure
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 58cd551..d625a82 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -9,5 +9,5 @@
 
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i-drm.o sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i_backend.o
-
+obj-$(CONFIG_DRM_SUN4I)		+= sun6i_drc.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i_tv.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 3ab5604..32c0584 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -83,8 +83,13 @@
 }
 EXPORT_SYMBOL(sun4i_backend_layer_enable);
 
-static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
+static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
+					     u32 format, u32 *mode)
 {
+	if ((plane->type == DRM_PLANE_TYPE_PRIMARY) &&
+	    (format == DRM_FORMAT_ARGB8888))
+		format = DRM_FORMAT_XRGB8888;
+
 	switch (format) {
 	case DRM_FORMAT_ARGB8888:
 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
@@ -164,7 +169,7 @@
 	DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
 			 interlaced ? "on" : "off");
 
-	ret = sun4i_backend_drm_format_to_layer(fb->pixel_format, &val);
+	ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Invalid format\n");
 		return val;
@@ -217,6 +222,51 @@
 }
 EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
 
+static int sun4i_backend_init_sat(struct device *dev) {
+	struct sun4i_backend *backend = dev_get_drvdata(dev);
+	int ret;
+
+	backend->sat_reset = devm_reset_control_get(dev, "sat");
+	if (IS_ERR(backend->sat_reset)) {
+		dev_err(dev, "Couldn't get the SAT reset line\n");
+		return PTR_ERR(backend->sat_reset);
+	}
+
+	ret = reset_control_deassert(backend->sat_reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert the SAT reset line\n");
+		return ret;
+	}
+
+	backend->sat_clk = devm_clk_get(dev, "sat");
+	if (IS_ERR(backend->sat_clk)) {
+		dev_err(dev, "Couldn't get our SAT clock\n");
+		ret = PTR_ERR(backend->sat_clk);
+		goto err_assert_reset;
+	}
+
+	ret = clk_prepare_enable(backend->sat_clk);
+	if (ret) {
+		dev_err(dev, "Couldn't enable the SAT clock\n");
+		return ret;
+	}
+
+	return 0;
+
+err_assert_reset:
+	reset_control_assert(backend->sat_reset);
+	return ret;
+}
+
+static int sun4i_backend_free_sat(struct device *dev) {
+	struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(backend->sat_clk);
+	reset_control_assert(backend->sat_reset);
+
+	return 0;
+}
+
 static struct regmap_config sun4i_backend_regmap_config = {
 	.reg_bits	= 32,
 	.val_bits	= 32,
@@ -243,10 +293,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs)) {
-		dev_err(dev, "Couldn't map the backend registers\n");
+	if (IS_ERR(regs))
 		return PTR_ERR(regs);
-	}
 
 	backend->regs = devm_regmap_init_mmio(dev, regs,
 					      &sun4i_backend_regmap_config);
@@ -291,6 +339,15 @@
 	}
 	clk_prepare_enable(backend->ram_clk);
 
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun8i-a33-display-backend")) {
+		ret = sun4i_backend_init_sat(dev);
+		if (ret) {
+			dev_err(dev, "Couldn't init SAT resources\n");
+			goto err_disable_ram_clk;
+		}
+	}
+
 	/* Reset the registers */
 	for (i = 0x800; i < 0x1000; i += 4)
 		regmap_write(backend->regs, i, 0);
@@ -306,6 +363,8 @@
 
 	return 0;
 
+err_disable_ram_clk:
+	clk_disable_unprepare(backend->ram_clk);
 err_disable_mod_clk:
 	clk_disable_unprepare(backend->mod_clk);
 err_disable_bus_clk:
@@ -320,6 +379,10 @@
 {
 	struct sun4i_backend *backend = dev_get_drvdata(dev);
 
+	if (of_device_is_compatible(dev->of_node,
+				    "allwinner,sun8i-a33-display-backend"))
+		sun4i_backend_free_sat(dev);
+
 	clk_disable_unprepare(backend->ram_clk);
 	clk_disable_unprepare(backend->mod_clk);
 	clk_disable_unprepare(backend->bus_clk);
@@ -345,6 +408,7 @@
 
 static const struct of_device_id sun4i_backend_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-display-backend" },
+	{ .compatible = "allwinner,sun8i-a33-display-backend" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 7070bb3..83e63cc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -52,8 +52,8 @@
 #define SUN4I_BACKEND_LAYFB_L32ADD_REG(l)	(0x850 + (0x4 * (l)))
 
 #define SUN4I_BACKEND_LAYFB_H4ADD_REG		0x860
-#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l)		GENMASK(3 + ((l) * 8), 0)
-#define SUN4I_BACKEND_LAYFB_H4ADD(l, val)			((val) << ((l) * 8))
+#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l)		GENMASK(3 + ((l) * 8), (l) * 8)
+#define SUN4I_BACKEND_LAYFB_H4ADD(l, val)		((val) << ((l) * 8))
 
 #define SUN4I_BACKEND_REGBUFFCTL_REG		0x870
 #define SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS		BIT(1)
@@ -146,6 +146,9 @@
 	struct clk		*bus_clk;
 	struct clk		*mod_clk;
 	struct clk		*ram_clk;
+
+	struct clk		*sat_clk;
+	struct reset_control	*sat_reset;
 };
 
 void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 5b34631..d401156 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -14,6 +14,7 @@
 #include <linux/regmap.h>
 
 #include "sun4i_tcon.h"
+#include "sun4i_dotclock.h"
 
 struct sun4i_dclk {
 	struct clk_hw	hw;
@@ -61,7 +62,7 @@
 	regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
 
 	val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
-	val &= SUN4I_TCON0_DCLK_DIV_WIDTH;
+	val &= (1 << SUN4I_TCON0_DCLK_DIV_WIDTH) - 1;
 
 	if (!val)
 		val = 1;
@@ -76,7 +77,7 @@
 	u8 best_div = 1;
 	int i;
 
-	for (i = 6; i < 127; i++) {
+	for (i = 6; i <= 127; i++) {
 		unsigned long ideal = rate * i;
 		unsigned long rounded;
 
@@ -89,7 +90,8 @@
 			goto out;
 		}
 
-		if ((rounded < ideal) && (rounded > best_parent)) {
+		if (abs(rate - rounded / i) <
+		    abs(rate - best_parent / best_div)) {
 			best_parent = rounded;
 			best_div = i;
 		}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 7092daa..0da9862 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_helper.h>
 
 #include "sun4i_crtc.h"
 #include "sun4i_drv.h"
@@ -109,7 +110,7 @@
 	ap->ranges[0].base = 0;
 	ap->ranges[0].size = ~0;
 
-	remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
 	kfree(ap);
 }
 
@@ -120,8 +121,8 @@
 	int ret;
 
 	drm = drm_dev_alloc(&sun4i_drv_driver, dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
 	if (!drv) {
@@ -199,13 +200,14 @@
 
 static bool sun4i_drv_node_is_frontend(struct device_node *node)
 {
-	return of_device_is_compatible(node,
-				       "allwinner,sun5i-a13-display-frontend");
+	return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+		of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
 }
 
 static bool sun4i_drv_node_is_tcon(struct device_node *node)
 {
-	return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+	return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+		of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
 }
 
 static int compare_of(struct device *dev, void *data)
@@ -257,8 +259,8 @@
 		}
 
 		/*
-		 * If the node is our TCON, the first port is used for our
-		 * panel, and will not be part of the
+		 * If the node is our TCON, the first port is used for
+		 * panel or bridges, and will not be part of the
 		 * component framework.
 		 */
 		if (sun4i_drv_node_is_tcon(node)) {
@@ -320,6 +322,7 @@
 
 static const struct of_device_id sun4i_drv_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-display-engine" },
+	{ .compatible = "allwinner,sun8i-a33-display-engine" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 70688fe..8b6ce619 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 
 #include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
 
 static void sun4i_de_output_poll_changed(struct drm_device *drm)
 {
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 068ab80..f0035bf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -19,7 +19,12 @@
 #include "sun4i_drv.h"
 #include "sun4i_layer.h"
 
-#define SUN4I_NUM_LAYERS	2
+struct sun4i_plane_desc {
+	       enum drm_plane_type     type;
+	       u8                      pipe;
+	       const uint32_t          *formats;
+	       uint32_t                nformats;
+};
 
 static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
 					    struct drm_plane_state *state)
@@ -65,14 +70,35 @@
 	.update_plane		= drm_atomic_helper_update_plane,
 };
 
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_backend_layer_formats_primary[] = {
 	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+};
+
+static const uint32_t sun4i_backend_layer_formats_overlay[] = {
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+};
+
+static const struct sun4i_plane_desc sun4i_backend_planes[] = {
+	{
+		.type = DRM_PLANE_TYPE_PRIMARY,
+		.pipe = 0,
+		.formats = sun4i_backend_layer_formats_primary,
+		.nformats = ARRAY_SIZE(sun4i_backend_layer_formats_primary),
+	},
+	{
+		.type = DRM_PLANE_TYPE_OVERLAY,
+		.pipe = 1,
+		.formats = sun4i_backend_layer_formats_overlay,
+		.nformats = ARRAY_SIZE(sun4i_backend_layer_formats_overlay),
+	},
 };
 
 static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
-						enum drm_plane_type type)
+						const struct sun4i_plane_desc *plane)
 {
 	struct sun4i_drv *drv = drm->dev_private;
 	struct sun4i_layer *layer;
@@ -84,10 +110,8 @@
 
 	ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
 				       &sun4i_backend_layer_funcs,
-				       sun4i_backend_layer_formats,
-				       ARRAY_SIZE(sun4i_backend_layer_formats),
-				       type,
-				       NULL);
+				       plane->formats, plane->nformats,
+				       plane->type, NULL);
 	if (ret) {
 		dev_err(drm->dev, "Couldn't initialize layer\n");
 		return ERR_PTR(ret);
@@ -97,7 +121,7 @@
 			     &sun4i_backend_layer_helper_funcs);
 	layer->drv = drv;
 
-	if (type == DRM_PLANE_TYPE_PRIMARY)
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
 		drv->primary = &layer->plane;
 
 	return layer;
@@ -109,8 +133,8 @@
 	struct sun4i_layer **layers;
 	int i;
 
-	layers = devm_kcalloc(drm->dev, SUN4I_NUM_LAYERS, sizeof(**layers),
-			      GFP_KERNEL);
+	layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes),
+			      sizeof(**layers), GFP_KERNEL);
 	if (!layers)
 		return ERR_PTR(-ENOMEM);
 
@@ -135,13 +159,11 @@
 	 * SoCs that support it, sprites could fill the need for more
 	 * layers.
 	 */
-	for (i = 0; i < SUN4I_NUM_LAYERS; i++) {
-		enum drm_plane_type type = (i == 0)
-					 ? DRM_PLANE_TYPE_PRIMARY
-					 : DRM_PLANE_TYPE_OVERLAY;
+	for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
+		const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
 		struct sun4i_layer *layer = layers[i];
 
-		layer = sun4i_layer_init_one(drm, type);
+		layer = sun4i_layer_init_one(drm, plane);
 		if (IS_ERR(layer)) {
 			dev_err(drm->dev, "Couldn't initialize %s plane\n",
 				i ? "overlay" : "primary");
@@ -149,10 +171,10 @@
 		};
 
 		DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
-				 i ? "overlay" : "primary", i);
+				 i ? "overlay" : "primary", plane->pipe);
 		regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
 				   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
-				   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(i));
+				   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
 
 		layer->id = i;
 	};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f5bbac6..c3ff10f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -19,6 +19,7 @@
 
 #include "sun4i_drv.h"
 #include "sun4i_tcon.h"
+#include "sun4i_rgb.h"
 
 struct sun4i_rgb {
 	struct drm_connector	connector;
@@ -151,7 +152,14 @@
 
 	DRM_DEBUG_DRIVER("Enabling RGB output\n");
 
-	drm_panel_enable(tcon->panel);
+	if (!IS_ERR(tcon->panel)) {
+		drm_panel_prepare(tcon->panel);
+		drm_panel_enable(tcon->panel);
+	}
+
+	/* encoder->bridge can be NULL; drm_bridge_enable checks for it */
+	drm_bridge_enable(encoder->bridge);
+
 	sun4i_tcon_channel_enable(tcon, 0);
 }
 
@@ -164,7 +172,14 @@
 	DRM_DEBUG_DRIVER("Disabling RGB output\n");
 
 	sun4i_tcon_channel_disable(tcon, 0);
-	drm_panel_disable(tcon->panel);
+
+	/* encoder->bridge can be NULL; drm_bridge_disable checks for it */
+	drm_bridge_disable(encoder->bridge);
+
+	if (!IS_ERR(tcon->panel)) {
+		drm_panel_disable(tcon->panel);
+		drm_panel_unprepare(tcon->panel);
+	}
 }
 
 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
@@ -203,17 +218,22 @@
 {
 	struct sun4i_drv *drv = drm->dev_private;
 	struct sun4i_tcon *tcon = drv->tcon;
+	struct drm_encoder *encoder;
 	struct sun4i_rgb *rgb;
 	int ret;
 
-	/* If we don't have a panel, there's no point in going on */
-	if (IS_ERR(tcon->panel))
-		return -ENODEV;
-
 	rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
 	if (!rgb)
 		return -ENOMEM;
 	rgb->drv = drv;
+	encoder = &rgb->encoder;
+
+	tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
+	encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
+	if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) {
+		dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
+		return 0;
+	}
 
 	drm_encoder_helper_add(&rgb->encoder,
 			       &sun4i_rgb_enc_helper_funcs);
@@ -230,19 +250,38 @@
 	/* The RGB encoder can only work with the TCON channel 0 */
 	rgb->encoder.possible_crtcs = BIT(0);
 
-	drm_connector_helper_add(&rgb->connector,
-				 &sun4i_rgb_con_helper_funcs);
-	ret = drm_connector_init(drm, &rgb->connector,
-				 &sun4i_rgb_con_funcs,
-				 DRM_MODE_CONNECTOR_Unknown);
-	if (ret) {
-		dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
-		goto err_cleanup_connector;
+	if (!IS_ERR(tcon->panel)) {
+		drm_connector_helper_add(&rgb->connector,
+					 &sun4i_rgb_con_helper_funcs);
+		ret = drm_connector_init(drm, &rgb->connector,
+					 &sun4i_rgb_con_funcs,
+					 DRM_MODE_CONNECTOR_Unknown);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+			goto err_cleanup_connector;
+		}
+
+		drm_mode_connector_attach_encoder(&rgb->connector,
+						  &rgb->encoder);
+
+		ret = drm_panel_attach(tcon->panel, &rgb->connector);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't attach our panel\n");
+			goto err_cleanup_connector;
+		}
 	}
 
-	drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+	if (!IS_ERR(encoder->bridge)) {
+		encoder->bridge->encoder = &rgb->encoder;
 
-	drm_panel_attach(tcon->panel, &rgb->connector);
+		ret = drm_bridge_attach(drm, encoder->bridge);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't attach our bridge\n");
+			goto err_cleanup_connector;
+		}
+	} else {
+		encoder->bridge = NULL;
+	}
 
 	return 0;
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 652385f..cadacb5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -59,11 +59,13 @@
 		regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
 				   SUN4I_TCON0_CTL_TCON_ENABLE, 0);
 		clk_disable_unprepare(tcon->dclk);
-	} else if (channel == 1) {
-		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-				   SUN4I_TCON1_CTL_TCON_ENABLE, 0);
-		clk_disable_unprepare(tcon->sclk1);
+		return;
 	}
+
+	WARN_ON(!tcon->has_channel_1);
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+	clk_disable_unprepare(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_disable);
 
@@ -75,12 +77,14 @@
 				   SUN4I_TCON0_CTL_TCON_ENABLE,
 				   SUN4I_TCON0_CTL_TCON_ENABLE);
 		clk_prepare_enable(tcon->dclk);
-	} else if (channel == 1) {
-		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
-				   SUN4I_TCON1_CTL_TCON_ENABLE,
-				   SUN4I_TCON1_CTL_TCON_ENABLE);
-		clk_prepare_enable(tcon->sclk1);
+		return;
 	}
+
+	WARN_ON(!tcon->has_channel_1);
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_TCON_ENABLE,
+			   SUN4I_TCON1_CTL_TCON_ENABLE);
+	clk_prepare_enable(tcon->sclk1);
 }
 EXPORT_SYMBOL(sun4i_tcon_channel_enable);
 
@@ -198,6 +202,8 @@
 	u8 clk_delay;
 	u32 val;
 
+	WARN_ON(!tcon->has_channel_1);
+
 	/* Adjust clock delay */
 	clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
 	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
@@ -321,10 +327,12 @@
 		return PTR_ERR(tcon->sclk0);
 	}
 
-	tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
-	if (IS_ERR(tcon->sclk1)) {
-		dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
-		return PTR_ERR(tcon->sclk1);
+	if (tcon->has_channel_1) {
+		tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+		if (IS_ERR(tcon->sclk1)) {
+			dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+			return PTR_ERR(tcon->sclk1);
+		}
 	}
 
 	return sun4i_dclk_create(dev, tcon);
@@ -374,10 +382,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs)) {
-		dev_err(dev, "Couldn't map the TCON registers\n");
+	if (IS_ERR(regs))
 		return PTR_ERR(regs);
-	}
 
 	tcon->regs = devm_regmap_init_mmio(dev, regs,
 					   &sun4i_tcon_regmap_config);
@@ -398,7 +404,7 @@
 	return 0;
 }
 
-static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 {
 	struct device_node *port, *remote, *child;
 	struct device_node *end_node = NULL;
@@ -432,6 +438,40 @@
 	return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
+{
+	struct device_node *port, *remote, *child;
+	struct device_node *end_node = NULL;
+
+	/* Inputs are listed first, then outputs */
+	port = of_graph_get_port_by_id(node, 1);
+
+	/*
+	 * Our first output is the RGB interface where the panel will
+	 * be connected.
+	 */
+	for_each_child_of_node(port, child) {
+		u32 reg;
+
+		of_property_read_u32(child, "reg", &reg);
+		if (reg == 0)
+			end_node = child;
+	}
+
+	if (!end_node) {
+		DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	remote = of_graph_get_remote_port_parent(end_node);
+	if (!remote) {
+		DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
+}
+
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
 			   void *data)
 {
@@ -446,9 +486,15 @@
 	dev_set_drvdata(dev, tcon);
 	drv->tcon = tcon;
 	tcon->drm = drm;
+	tcon->dev = dev;
 
-	if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+	if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
 		tcon->has_mux = true;
+		tcon->has_channel_1 = true;
+	} else {
+		tcon->has_mux = false;
+		tcon->has_channel_1 = false;
+	}
 
 	tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
 	if (IS_ERR(tcon->lcd_rst)) {
@@ -484,12 +530,6 @@
 		goto err_free_clocks;
 	}
 
-	tcon->panel = sun4i_tcon_find_panel(dev->of_node);
-	if (IS_ERR(tcon->panel)) {
-		dev_info(dev, "No panel found... RGB output disabled\n");
-		return 0;
-	}
-
 	ret = sun4i_rgb_init(drm);
 	if (ret < 0)
 		goto err_free_clocks;
@@ -519,19 +559,22 @@
 static int sun4i_tcon_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	struct drm_bridge *bridge;
 	struct drm_panel *panel;
 
 	/*
-	 * The panel is not ready.
+	 * Neither the bridge or the panel is ready.
 	 * Defer the probe.
 	 */
 	panel = sun4i_tcon_find_panel(node);
+	bridge = sun4i_tcon_find_bridge(node);
 
 	/*
 	 * If we don't have a panel endpoint, just go on
 	 */
-	if (PTR_ERR(panel) == -EPROBE_DEFER) {
-		DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+	if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
+	    (PTR_ERR(bridge) == -EPROBE_DEFER)) {
+		DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
 		return -EPROBE_DEFER;
 	}
 
@@ -547,6 +590,7 @@
 
 static const struct of_device_id sun4i_tcon_of_table[] = {
 	{ .compatible = "allwinner,sun5i-a13-tcon" },
+	{ .compatible = "allwinner,sun8i-a33-tcon" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 0e0b11d..12bd489 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -143,6 +143,7 @@
 #define SUN4I_TCON_MAX_CHANNELS		2
 
 struct sun4i_tcon {
+	struct device			*dev;
 	struct drm_device		*drm;
 	struct regmap			*regs;
 
@@ -163,8 +164,13 @@
 	bool				has_mux;
 
 	struct drm_panel		*panel;
+
+	bool				has_channel_1;
 };
 
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
+
 /* Global Control */
 void sun4i_tcon_disable(struct sun4i_tcon *tcon);
 void sun4i_tcon_enable(struct sun4i_tcon *tcon);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index b841478..1dd3d9e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -161,10 +161,10 @@
 	bool		dac3_en;
 	bool		dac_bit25_en;
 
-	struct color_gains		*color_gains;
-	struct burst_levels		*burst_levels;
-	struct video_levels		*video_levels;
-	struct resync_parameters	*resync_params;
+	const struct color_gains	*color_gains;
+	const struct burst_levels	*burst_levels;
+	const struct video_levels	*video_levels;
+	const struct resync_parameters	*resync_params;
 };
 
 struct sun4i_tv {
@@ -178,39 +178,39 @@
 	struct sun4i_drv	*drv;
 };
 
-struct video_levels ntsc_video_levels = {
+static const struct video_levels ntsc_video_levels = {
 	.black = 282,	.blank = 240,
 };
 
-struct video_levels pal_video_levels = {
+static const struct video_levels pal_video_levels = {
 	.black = 252,	.blank = 252,
 };
 
-struct burst_levels ntsc_burst_levels = {
+static const struct burst_levels ntsc_burst_levels = {
 	.cb = 79,	.cr = 0,
 };
 
-struct burst_levels pal_burst_levels = {
+static const struct burst_levels pal_burst_levels = {
 	.cb = 40,	.cr = 40,
 };
 
-struct color_gains ntsc_color_gains = {
+static const struct color_gains ntsc_color_gains = {
 	.cb = 160,	.cr = 160,
 };
 
-struct color_gains pal_color_gains = {
+static const struct color_gains pal_color_gains = {
 	.cb = 224,	.cr = 224,
 };
 
-struct resync_parameters ntsc_resync_parameters = {
+static const struct resync_parameters ntsc_resync_parameters = {
 	.field = false,	.line = 14,	.pixel = 12,
 };
 
-struct resync_parameters pal_resync_parameters = {
+static const struct resync_parameters pal_resync_parameters = {
 	.field = true,	.line = 13,	.pixel = 12,
 };
 
-struct tv_mode tv_modes[] = {
+static const struct tv_mode tv_modes[] = {
 	{
 		.name		= "NTSC",
 		.mode		= SUN4I_TVE_CFG0_RES_480i,
@@ -289,13 +289,13 @@
  * So far, it doesn't seem to be preserved when the mode is passed by
  * to mode_set for some reason.
  */
-static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
+static const struct tv_mode *sun4i_tv_find_tv_by_mode(const struct drm_display_mode *mode)
 {
 	int i;
 
 	/* First try to identify the mode by name */
 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-		struct tv_mode *tv_mode = &tv_modes[i];
+		const struct tv_mode *tv_mode = &tv_modes[i];
 
 		DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
 				 mode->name, tv_mode->name);
@@ -306,7 +306,7 @@
 
 	/* Then by number of lines */
 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-		struct tv_mode *tv_mode = &tv_modes[i];
+		const struct tv_mode *tv_mode = &tv_modes[i];
 
 		DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
 				 mode->name, tv_mode->name,
@@ -319,7 +319,7 @@
 	return NULL;
 }
 
-static void sun4i_tv_mode_to_drm_mode(struct tv_mode *tv_mode,
+static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
 				      struct drm_display_mode *mode)
 {
 	DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
@@ -386,7 +386,7 @@
 	struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
 	struct sun4i_drv *drv = tv->drv;
 	struct sun4i_tcon *tcon = drv->tcon;
-	struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+	const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
 
 	sun4i_tcon1_mode_set(tcon, mode);
 
@@ -507,8 +507,14 @@
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
-		struct drm_display_mode *mode = drm_mode_create(connector->dev);
-		struct tv_mode *tv_mode = &tv_modes[i];
+		struct drm_display_mode *mode;
+		const struct tv_mode *tv_mode = &tv_modes[i];
+
+		mode = drm_mode_create(connector->dev);
+		if (!mode) {
+			DRM_ERROR("Failed to create a new display mode\n");
+			return 0;
+		}
 
 		strcpy(mode->name, tv_mode->name);
 
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
new file mode 100644
index 0000000..bf6d846
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+struct sun6i_drc {
+	struct clk		*bus_clk;
+	struct clk		*mod_clk;
+	struct reset_control	*reset;
+};
+
+static int sun6i_drc_bind(struct device *dev, struct device *master,
+			 void *data)
+{
+	struct sun6i_drc *drc;
+	int ret;
+
+	drc = devm_kzalloc(dev, sizeof(*drc), GFP_KERNEL);
+	if (!drc)
+		return -ENOMEM;
+	dev_set_drvdata(dev, drc);
+
+	drc->reset = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(drc->reset)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(drc->reset);
+	}
+
+	ret = reset_control_deassert(drc->reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert our reset line\n");
+		return ret;
+	}
+
+	drc->bus_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(drc->bus_clk)) {
+		dev_err(dev, "Couldn't get our bus clock\n");
+		ret = PTR_ERR(drc->bus_clk);
+		goto err_assert_reset;
+	}
+	clk_prepare_enable(drc->bus_clk);
+
+	drc->mod_clk = devm_clk_get(dev, "mod");
+	if (IS_ERR(drc->mod_clk)) {
+		dev_err(dev, "Couldn't get our mod clock\n");
+		ret = PTR_ERR(drc->mod_clk);
+		goto err_disable_bus_clk;
+	}
+	clk_prepare_enable(drc->mod_clk);
+
+	return 0;
+
+err_disable_bus_clk:
+	clk_disable_unprepare(drc->bus_clk);
+err_assert_reset:
+	reset_control_assert(drc->reset);
+	return ret;
+}
+
+static void sun6i_drc_unbind(struct device *dev, struct device *master,
+			    void *data)
+{
+	struct sun6i_drc *drc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(drc->mod_clk);
+	clk_disable_unprepare(drc->bus_clk);
+	reset_control_assert(drc->reset);
+}
+
+static struct component_ops sun6i_drc_ops = {
+	.bind	= sun6i_drc_bind,
+	.unbind	= sun6i_drc_unbind,
+};
+
+static int sun6i_drc_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &sun6i_drc_ops);
+}
+
+static int sun6i_drc_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &sun6i_drc_ops);
+
+	return 0;
+}
+
+static const struct of_device_id sun6i_drc_of_table[] = {
+	{ .compatible = "allwinner,sun8i-a33-drc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
+
+static struct platform_driver sun6i_drc_platform_driver = {
+	.probe		= sun6i_drc_probe,
+	.remove		= sun6i_drc_remove,
+	.driver		= {
+		.name		= "sun6i-drc",
+		.of_match_table	= sun6i_drc_of_table,
+	},
+};
+module_platform_driver(sun6i_drc_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 Dynamic Range Control (DRC) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index fab5ebc..f418892 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,6 +56,7 @@
 };
 
 static struct drm_driver driver = {
+	.driver_features = DRIVER_LEGACY,
 	.set_busid = drm_pci_set_busid,
 	.fops = &tdfx_driver_fops,
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 8495bd0..4010d69 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -480,17 +480,6 @@
 	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
 };
 
-static int tegra_plane_prepare_fb(struct drm_plane *plane,
-				  const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-static void tegra_plane_cleanup_fb(struct drm_plane *plane,
-				   const struct drm_plane_state *old_fb)
-{
-}
-
 static int tegra_plane_state_add(struct tegra_plane *plane,
 				 struct drm_plane_state *state)
 {
@@ -591,7 +580,14 @@
 		struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
 
 		window.base[i] = bo->paddr + fb->offsets[i];
-		window.stride[i] = fb->pitches[i];
+
+		/*
+		 * Tegra uses a shared stride for UV planes. Framebuffers are
+		 * already checked for this in the tegra_plane_atomic_check()
+		 * function, so it's safe to ignore the V-plane pitch here.
+		 */
+		if (i < 2)
+			window.stride[i] = fb->pitches[i];
 	}
 
 	tegra_dc_setup_window(dc, p->index, &window);
@@ -624,8 +620,6 @@
 }
 
 static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_plane_atomic_check,
 	.atomic_update = tegra_plane_atomic_update,
 	.atomic_disable = tegra_plane_atomic_disable,
@@ -796,8 +790,6 @@
 };
 
 static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_cursor_atomic_check,
 	.atomic_update = tegra_cursor_atomic_update,
 	.atomic_disable = tegra_cursor_atomic_disable,
@@ -866,8 +858,6 @@
 };
 
 static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
-	.prepare_fb = tegra_plane_prepare_fb,
-	.cleanup_fb = tegra_plane_cleanup_fb,
 	.atomic_check = tegra_plane_atomic_check,
 	.atomic_update = tegra_plane_atomic_update,
 	.atomic_disable = tegra_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 755264d..8ab47b5 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -57,7 +57,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(drm, state);
 	drm_atomic_helper_commit_modeset_enables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state, true);
+	drm_atomic_helper_commit_planes(drm, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_wait_for_vblanks(drm, state);
 
@@ -982,8 +983,8 @@
 	int err;
 
 	drm = drm_dev_alloc(driver, &dev->dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
 	dev_set_drvdata(&dev->dev, drm);
 
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index aa60d99..95e622e 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -613,7 +613,7 @@
 	exp_info.flags = flags;
 	exp_info.priv = gem;
 
-	return dma_buf_export(&exp_info);
+	return drm_gem_dmabuf_export(drm, &exp_info);
 }
 
 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index deeca48..6f67517 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -7,6 +7,7 @@
 					 tilcdc_slave_compat.dtb.o
 
 tilcdc-y := \
+	tilcdc_plane.o \
 	tilcdc_crtc.o \
 	tilcdc_tfp410.o \
 	tilcdc_panel.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 107c8bd..52ebe8f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,8 +15,12 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "drm_flip_work.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
 #include <drm/drm_plane_helper.h>
+#include <linux/workqueue.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -26,13 +30,16 @@
 struct tilcdc_crtc {
 	struct drm_crtc base;
 
+	struct drm_plane primary;
 	const struct tilcdc_panel_info *info;
 	struct drm_pending_vblank_event *event;
-	int dpms;
+	bool enabled;
 	wait_queue_head_t frame_done_wq;
 	bool frame_done;
 	spinlock_t irq_lock;
 
+	unsigned int lcd_fck_rate;
+
 	ktime_t last_vblank;
 
 	struct drm_framebuffer *curr_fb;
@@ -67,6 +74,7 @@
 	struct drm_gem_cma_object *gem;
 	unsigned int depth, bpp;
 	dma_addr_t start, end;
+	u64 dma_base_and_ceiling;
 
 	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -77,8 +85,13 @@
 
 	end = start + (crtc->mode.vdisplay * fb->pitches[0]);
 
-	tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
-	tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+	/* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
+	 * with a single insruction, if available. This should make it more
+	 * unlikely that LCDC would fetch the DMA addresses in the middle of
+	 * an update.
+	 */
+	dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+	tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
 
 	if (tilcdc_crtc->curr_fb)
 		drm_flip_work_queue(&tilcdc_crtc->unref_work,
@@ -87,6 +100,43 @@
 	tilcdc_crtc->curr_fb = fb;
 }
 
+static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	tilcdc_clear_irqstatus(dev, 0xffffffff);
+
+	if (priv->rev == 1) {
+		tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+			LCDC_V1_UNDERFLOW_INT_ENA);
+		tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+			LCDC_V1_END_OF_FRAME_INT_ENA);
+	} else {
+		tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
+			LCDC_V2_UNDERFLOW_INT_ENA |
+			LCDC_V2_END_OF_FRAME0_INT_ENA |
+			LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+	}
+}
+
+static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	/* disable irqs that we might have enabled: */
+	if (priv->rev == 1) {
+		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+			LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+		tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+			LCDC_V1_END_OF_FRAME_INT_ENA);
+	} else {
+		tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+			LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+			LCDC_V2_END_OF_FRAME0_INT_ENA |
+			LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+	}
+}
+
 static void reset(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -100,66 +150,112 @@
 	tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
 }
 
-static void start(struct drm_crtc *crtc)
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+	if (tilcdc_crtc->enabled)
+		return;
+
+	pm_runtime_get_sync(dev->dev);
 
 	reset(crtc);
 
+	tilcdc_crtc_enable_irqs(dev);
+
 	tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+	drm_crtc_vblank_on(crtc);
+
+	tilcdc_crtc->enabled = true;
 }
 
-static void stop(struct drm_crtc *crtc)
+void tilcdc_crtc_disable(struct drm_crtc *crtc)
 {
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
 
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+	if (!tilcdc_crtc->enabled)
+		return;
+
+	tilcdc_crtc->frame_done = false;
 	tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+	/*
+	 * if necessary wait for framedone irq which will still come
+	 * before putting things to sleep..
+	 */
+	if (priv->rev == 2) {
+		int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+					     tilcdc_crtc->frame_done,
+					     msecs_to_jiffies(500));
+		if (ret == 0)
+			dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+				__func__);
+	}
+
+	drm_crtc_vblank_off(crtc);
+
+	tilcdc_crtc_disable_irqs(dev);
+
+	pm_runtime_put_sync(dev->dev);
+
+	if (tilcdc_crtc->next_fb) {
+		drm_flip_work_queue(&tilcdc_crtc->unref_work,
+				    tilcdc_crtc->next_fb);
+		tilcdc_crtc->next_fb = NULL;
+	}
+
+	if (tilcdc_crtc->curr_fb) {
+		drm_flip_work_queue(&tilcdc_crtc->unref_work,
+				    tilcdc_crtc->curr_fb);
+		tilcdc_crtc->curr_fb = NULL;
+	}
+
+	drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+	tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+	tilcdc_crtc->enabled = false;
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+	return crtc->state && crtc->state->enable && crtc->state->active;
 }
 
 static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct tilcdc_drm_private *priv = crtc->dev->dev_private;
 
-	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	drm_modeset_lock_crtc(crtc, NULL);
+	tilcdc_crtc_disable(crtc);
+	drm_modeset_unlock_crtc(crtc);
+
+	flush_workqueue(priv->wq);
 
 	of_node_put(crtc->port);
 	drm_crtc_cleanup(crtc);
 	drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
 }
 
-static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
-	struct drm_device *dev = crtc->dev;
-	unsigned int depth, bpp;
-
-	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
-
-	if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) {
-		dev_err(dev->dev,
-			"Invalid pitch: fb and crtc widths must be the same");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
 		struct drm_framebuffer *fb,
-		struct drm_pending_vblank_event *event,
-		uint32_t page_flip_flags)
+		struct drm_pending_vblank_event *event)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
-	int r;
 	unsigned long flags;
-	s64 tdiff;
-	ktime_t next_vblank;
 
-	r = tilcdc_verify_fb(crtc, fb);
-	if (r)
-		return r;
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	if (tilcdc_crtc->event) {
 		dev_err(dev->dev, "already pending page flip!\n");
@@ -170,82 +266,31 @@
 
 	crtc->primary->fb = fb;
 
-	pm_runtime_get_sync(dev->dev);
-
 	spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
 
-	next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
-		1000000 / crtc->hwmode.vrefresh);
+	if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+		ktime_t next_vblank;
+		s64 tdiff;
 
-	tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+		next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+			1000000 / crtc->hwmode.vrefresh);
 
-	if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+		tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+		if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+			tilcdc_crtc->next_fb = fb;
+	}
+
+	if (tilcdc_crtc->next_fb != fb)
 		set_scanout(crtc, fb);
-	else
-		tilcdc_crtc->next_fb = fb;
 
 	tilcdc_crtc->event = event;
 
 	spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
-	pm_runtime_put_sync(dev->dev);
-
 	return 0;
 }
 
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct tilcdc_drm_private *priv = dev->dev_private;
-
-	/* we really only care about on or off: */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (tilcdc_crtc->dpms == mode)
-		return;
-
-	tilcdc_crtc->dpms = mode;
-
-	if (mode == DRM_MODE_DPMS_ON) {
-		pm_runtime_get_sync(dev->dev);
-		start(crtc);
-	} else {
-		tilcdc_crtc->frame_done = false;
-		stop(crtc);
-
-		/*
-		 * if necessary wait for framedone irq which will still come
-		 * before putting things to sleep..
-		 */
-		if (priv->rev == 2) {
-			int ret = wait_event_timeout(
-					tilcdc_crtc->frame_done_wq,
-					tilcdc_crtc->frame_done,
-					msecs_to_jiffies(50));
-			if (ret == 0)
-				dev_err(dev->dev, "timeout waiting for framedone\n");
-		}
-
-		pm_runtime_put_sync(dev->dev);
-
-		if (tilcdc_crtc->next_fb) {
-			drm_flip_work_queue(&tilcdc_crtc->unref_work,
-					    tilcdc_crtc->next_fb);
-			tilcdc_crtc->next_fb = NULL;
-		}
-
-		if (tilcdc_crtc->curr_fb) {
-			drm_flip_work_queue(&tilcdc_crtc->unref_work,
-					    tilcdc_crtc->curr_fb);
-			tilcdc_crtc->curr_fb = NULL;
-		}
-
-		drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-	}
-}
-
 static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
@@ -275,41 +320,54 @@
 	return true;
 }
 
-static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
 {
-	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	const unsigned clkdiv = 2; /* using a fixed divider of 2 */
+	int ret;
+
+	/* mode.clock is in KHz, set_rate wants parameter in Hz */
+	ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+			crtc->mode.clock);
+		return;
+	}
+
+	tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+
+	DBG("lcd_clk=%u, mode clock=%d, div=%u",
+	    tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
+
+	/* Configure the LCD clock divisor. */
+	tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
+		     LCDC_RASTER_MODE);
+
+	if (priv->rev == 2)
+		tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+				LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+				LCDC_V2_CORE_CLK_EN);
 }
 
-static void tilcdc_crtc_commit(struct drm_crtc *crtc)
-{
-	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode,
-		int x, int y,
-		struct drm_framebuffer *old_fb)
+static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct tilcdc_drm_private *priv = dev->dev_private;
 	const struct tilcdc_panel_info *info = tilcdc_crtc->info;
 	uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
-	int ret;
+	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+	struct drm_framebuffer *fb = crtc->primary->state->fb;
 
-	ret = tilcdc_crtc_mode_valid(crtc, mode);
-	if (WARN_ON(ret))
-		return ret;
+	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	if (WARN_ON(!info))
-		return -EINVAL;
+		return;
 
-	ret = tilcdc_verify_fb(crtc, crtc->primary->fb);
-	if (ret)
-		return ret;
-
-	pm_runtime_get_sync(dev->dev);
+	if (WARN_ON(!fb))
+		return;
 
 	/* Configure the Burst Size and fifo threshold of DMA: */
 	reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
@@ -330,7 +388,8 @@
 		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
 		break;
 	default:
-		return -EINVAL;
+		dev_err(dev->dev, "invalid burst size\n");
+		return;
 	}
 	reg |= (info->fifo_th << 8);
 	tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
@@ -344,9 +403,9 @@
 	vsw = mode->vsync_end - mode->vsync_start;
 
 	DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
-			mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+	    mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
 
-	/* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+	/* Set AC Bias Period and Number of Transitions per Interrupt: */
 	reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
 	reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
 		LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
@@ -381,7 +440,7 @@
 	/*
 	 * be sure to set Bit 10 for the V2 LCDC controller,
 	 * otherwise limited to 1024 pixels width, stopping
-	 * 1920x1080 being suppoted.
+	 * 1920x1080 being supported.
 	 */
 	if (priv->rev == 2) {
 		if ((mode->vdisplay - 1) & 0x400) {
@@ -396,14 +455,15 @@
 	/* Configure display type: */
 	reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
 		~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
-			LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+		  LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
+		  0x000ff000 /* Palette Loading Delay bits */);
 	reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
 	if (info->tft_alt_mode)
 		reg |= LCDC_TFT_ALT_ENABLE;
 	if (priv->rev == 2) {
 		unsigned int depth, bpp;
 
-		drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp);
+		drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
 		switch (bpp) {
 		case 16:
 			break;
@@ -415,7 +475,7 @@
 			break;
 		default:
 			dev_err(dev->dev, "invalid pixel format\n");
-			return -EINVAL;
+			return;
 		}
 	}
 	reg |= info->fdd < 12;
@@ -436,12 +496,7 @@
 	else
 		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
 
-	/*
-	 * use value from adjusted_mode here as this might have been
-	 * changed as part of the fixup for slave encoders to solve the
-	 * issue where tilcdc timings are not VESA compliant
-	 */
-	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
 		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
 	else
 		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -456,51 +511,56 @@
 	else
 		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
 
-	drm_framebuffer_reference(crtc->primary->fb);
+	drm_framebuffer_reference(fb);
 
-	set_scanout(crtc, crtc->primary->fb);
+	set_scanout(crtc, fb);
 
-	tilcdc_crtc_update_clk(crtc);
+	tilcdc_crtc_set_clk(crtc);
 
-	pm_runtime_put_sync(dev->dev);
-
-	return 0;
+	crtc->hwmode = crtc->state->adjusted_mode;
 }
 
-static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-		struct drm_framebuffer *old_fb)
+static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
+				    struct drm_crtc_state *state)
 {
-	struct drm_device *dev = crtc->dev;
-	int r;
+	struct drm_display_mode *mode = &state->mode;
+	int ret;
 
-	r = tilcdc_verify_fb(crtc, crtc->primary->fb);
-	if (r)
-		return r;
+	/* If we are not active we don't care */
+	if (!state->active)
+		return 0;
 
-	drm_framebuffer_reference(crtc->primary->fb);
+	if (state->state->planes[0].ptr != crtc->primary ||
+	    state->state->planes[0].state == NULL ||
+	    state->state->planes[0].state->crtc != crtc) {
+		dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
+		return -EINVAL;
+	}
 
-	pm_runtime_get_sync(dev->dev);
-
-	set_scanout(crtc, crtc->primary->fb);
-
-	pm_runtime_put_sync(dev->dev);
+	ret = tilcdc_crtc_mode_valid(crtc, mode);
+	if (ret) {
+		dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
+		return -EINVAL;
+	}
 
 	return 0;
 }
 
 static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
-		.destroy        = tilcdc_crtc_destroy,
-		.set_config     = drm_crtc_helper_set_config,
-		.page_flip      = tilcdc_crtc_page_flip,
+	.destroy        = tilcdc_crtc_destroy,
+	.set_config     = drm_atomic_helper_set_config,
+	.page_flip      = drm_atomic_helper_page_flip,
+	.reset		= drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
-		.dpms           = tilcdc_crtc_dpms,
 		.mode_fixup     = tilcdc_crtc_mode_fixup,
-		.prepare        = tilcdc_crtc_prepare,
-		.commit         = tilcdc_crtc_commit,
-		.mode_set       = tilcdc_crtc_mode_set,
-		.mode_set_base  = tilcdc_crtc_mode_set_base,
+		.enable		= tilcdc_crtc_enable,
+		.disable	= tilcdc_crtc_disable,
+		.atomic_check	= tilcdc_crtc_atomic_check,
+		.mode_set_nofb	= tilcdc_crtc_mode_set_nofb,
 };
 
 int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -622,46 +682,23 @@
 
 void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
 {
-	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct tilcdc_drm_private *priv = dev->dev_private;
-	int dpms = tilcdc_crtc->dpms;
-	unsigned long lcd_clk;
-	const unsigned clkdiv = 2; /* using a fixed divider of 2 */
-	int ret;
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 
-	pm_runtime_get_sync(dev->dev);
+	drm_modeset_lock_crtc(crtc, NULL);
+	if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
+		if (tilcdc_crtc_is_on(crtc)) {
+			pm_runtime_get_sync(dev->dev);
+			tilcdc_crtc_disable(crtc);
 
-	if (dpms == DRM_MODE_DPMS_ON)
-		tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+			tilcdc_crtc_set_clk(crtc);
 
-	/* mode.clock is in KHz, set_rate wants parameter in Hz */
-	ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
-	if (ret < 0) {
-		dev_err(dev->dev, "failed to set display clock rate to: %d\n",
-				crtc->mode.clock);
-		goto out;
+			tilcdc_crtc_enable(crtc);
+			pm_runtime_put_sync(dev->dev);
+		}
 	}
-
-	lcd_clk = clk_get_rate(priv->clk);
-
-	DBG("lcd_clk=%lu, mode clock=%d, div=%u",
-		lcd_clk, crtc->mode.clock, clkdiv);
-
-	/* Configure the LCD clock divisor. */
-	tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
-			LCDC_RASTER_MODE);
-
-	if (priv->rev == 2)
-		tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
-				LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
-				LCDC_V2_CORE_CLK_EN);
-
-	if (dpms == DRM_MODE_DPMS_ON)
-		tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-
-out:
-	pm_runtime_put_sync(dev->dev);
+	drm_modeset_unlock_crtc(crtc);
 }
 
 #define SYNC_LOST_COUNT_LIMIT 50
@@ -718,31 +755,35 @@
 			tilcdc_crtc->frame_intact = true;
 	}
 
+	if (stat & LCDC_FIFO_UNDERFLOW)
+		dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+				    __func__, stat);
+
+	/* For revision 2 only */
 	if (priv->rev == 2) {
 		if (stat & LCDC_FRAME_DONE) {
 			tilcdc_crtc->frame_done = true;
 			wake_up(&tilcdc_crtc->frame_done_wq);
 		}
+
+		if (stat & LCDC_SYNC_LOST) {
+			dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+					    __func__, stat);
+			tilcdc_crtc->frame_intact = false;
+			if (tilcdc_crtc->sync_lost_count++ >
+			    SYNC_LOST_COUNT_LIMIT) {
+				dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+				tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+					     LCDC_SYNC_LOST);
+			}
+		}
+
+		/* Indicate to LCDC that the interrupt service routine has
+		 * completed, see 13.3.6.1.6 in AM335x TRM.
+		 */
 		tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
 	}
 
-	if (stat & LCDC_SYNC_LOST) {
-		dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
-				    __func__, stat);
-		tilcdc_crtc->frame_intact = false;
-		if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) {
-			dev_err(dev->dev,
-				"%s(0x%08x): Sync lost flood detected, disabling the interrupt",
-				__func__, stat);
-			tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
-				     LCDC_SYNC_LOST);
-		}
-	}
-
-	if (stat & LCDC_FIFO_UNDERFLOW)
-		dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
-				    __func__, stat);
-
 	return IRQ_HANDLED;
 }
 
@@ -761,7 +802,10 @@
 
 	crtc = &tilcdc_crtc->base;
 
-	tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+	ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
+	if (ret < 0)
+		goto fail;
+
 	init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
 
 	drm_flip_work_init(&tilcdc_crtc->unref_work,
@@ -769,7 +813,11 @@
 
 	spin_lock_init(&tilcdc_crtc->irq_lock);
 
-	ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+	ret = drm_crtc_init_with_planes(dev, crtc,
+					&tilcdc_crtc->primary,
+					NULL,
+					&tilcdc_crtc_funcs,
+					"tilcdc crtc");
 	if (ret < 0)
 		goto fail;
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d278093..a694977 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -20,6 +20,8 @@
 #include <linux/component.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/suspend.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -31,6 +33,20 @@
 
 static LIST_HEAD(module_list);
 
+static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
+
+static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565,
+					       DRM_FORMAT_BGR888,
+					       DRM_FORMAT_XBGR8888 };
+
+static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565,
+					      DRM_FORMAT_RGB888,
+					      DRM_FORMAT_XRGB8888 };
+
+static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565,
+					     DRM_FORMAT_RGB888,
+					     DRM_FORMAT_XRGB8888 };
+
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
 		const struct tilcdc_module_ops *funcs)
 {
@@ -59,9 +75,84 @@
 	drm_fbdev_cma_hotplug_event(priv->fbdev);
 }
 
+static int tilcdc_atomic_check(struct drm_device *dev,
+			       struct drm_atomic_state *state)
+{
+	int ret;
+
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
+
+	ret = drm_atomic_helper_check_planes(dev, state);
+	if (ret)
+		return ret;
+
+	/*
+	 * tilcdc ->atomic_check can update ->mode_changed if pixel format
+	 * changes, hence will we check modeset changes again.
+	 */
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int tilcdc_commit(struct drm_device *dev,
+		  struct drm_atomic_state *state,
+		  bool async)
+{
+	int ret;
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret)
+		return ret;
+
+	drm_atomic_helper_swap_state(state, true);
+
+	/*
+	 * Everything below can be run asynchronously without the need to grab
+	 * any modeset locks at all under one condition: It must be guaranteed
+	 * that the asynchronous work has either been cancelled (if the driver
+	 * supports it, which at least requires that the framebuffers get
+	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+	 * before the new state gets committed on the software side with
+	 * drm_atomic_helper_swap_state().
+	 *
+	 * This scheme allows new atomic state updates to be prepared and
+	 * checked in parallel to the asynchronous completion of the previous
+	 * update. Which is important since compositors need to figure out the
+	 * composition of the next frame right after having submitted the
+	 * current layout.
+	 */
+
+	/* Keep HW on while we commit the state. */
+	pm_runtime_get_sync(dev->dev);
+
+	drm_atomic_helper_commit_modeset_disables(dev, state);
+
+	drm_atomic_helper_commit_planes(dev, state, 0);
+
+	drm_atomic_helper_commit_modeset_enables(dev, state);
+
+	/* Now HW should remain on if need becase the crtc is enabled */
+	pm_runtime_put_sync(dev->dev);
+
+	drm_atomic_helper_wait_for_vblanks(dev, state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	drm_atomic_state_free(state);
+
+	return 0;
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = tilcdc_fb_create,
 	.output_poll_changed = tilcdc_fb_output_poll_changed,
+	.atomic_check = tilcdc_atomic_check,
+	.atomic_commit = tilcdc_commit,
 };
 
 static int modeset_init(struct drm_device *dev)
@@ -93,12 +184,9 @@
 {
 	struct tilcdc_drm_private *priv = container_of(nb,
 			struct tilcdc_drm_private, freq_transition);
-	if (val == CPUFREQ_POSTCHANGE) {
-		if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
-			priv->lcd_fck_rate = clk_get_rate(priv->clk);
-			tilcdc_crtc_update_clk(priv->crtc);
-		}
-	}
+
+	if (val == CPUFREQ_POSTCHANGE)
+		tilcdc_crtc_update_clk(priv->crtc);
 
 	return 0;
 }
@@ -112,8 +200,6 @@
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
 
-	tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
 	tilcdc_remove_external_encoders(dev);
 
 	drm_fbdev_cma_fini(priv->fbdev);
@@ -121,9 +207,7 @@
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
 
-	pm_runtime_get_sync(dev->dev);
 	drm_irq_uninstall(dev);
-	pm_runtime_put_sync(dev->dev);
 
 #ifdef CONFIG_CPU_FREQ
 	cpufreq_unregister_notifier(&priv->freq_transition,
@@ -146,24 +230,17 @@
 	return 0;
 }
 
-static size_t tilcdc_num_regs(void);
-
 static int tilcdc_load(struct drm_device *dev, unsigned long flags)
 {
 	struct platform_device *pdev = dev->platformdev;
 	struct device_node *node = pdev->dev.of_node;
 	struct tilcdc_drm_private *priv;
-	struct tilcdc_module *mod;
 	struct resource *res;
 	u32 bpp = 0;
 	int ret;
 
 	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
-	if (priv)
-		priv->saved_register =
-			devm_kcalloc(dev->dev, tilcdc_num_regs(),
-				     sizeof(*priv->saved_register), GFP_KERNEL);
-	if (!priv || !priv->saved_register) {
+	if (!priv) {
 		dev_err(dev->dev, "failed to allocate private data\n");
 		return -ENOMEM;
 	}
@@ -201,7 +278,6 @@
 	}
 
 #ifdef CONFIG_CPU_FREQ
-	priv->lcd_fck_rate = clk_get_rate(priv->clk);
 	priv->freq_transition.notifier_call = cpufreq_transition;
 	ret = cpufreq_register_notifier(&priv->freq_transition,
 			CPUFREQ_TRANSITION_NOTIFIER);
@@ -249,6 +325,37 @@
 
 	pm_runtime_put_sync(dev->dev);
 
+	if (priv->rev == 1) {
+		DBG("Revision 1 LCDC supports only RGB565 format");
+		priv->pixelformats = tilcdc_rev1_formats;
+		priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats);
+		bpp = 16;
+	} else {
+		const char *str = "\0";
+
+		of_property_read_string(node, "blue-and-red-wiring", &str);
+		if (0 == strcmp(str, "crossed")) {
+			DBG("Configured for crossed blue and red wires");
+			priv->pixelformats = tilcdc_crossed_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_crossed_formats);
+			bpp = 32; /* Choose bpp with RGB support for fbdef */
+		} else if (0 == strcmp(str, "straight")) {
+			DBG("Configured for straight blue and red wires");
+			priv->pixelformats = tilcdc_straight_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_straight_formats);
+			bpp = 16; /* Choose bpp with RGB support for fbdef */
+		} else {
+			DBG("Blue and red wiring '%s' unknown, use legacy mode",
+			    str);
+			priv->pixelformats = tilcdc_legacy_formats;
+			priv->num_pixelformats =
+				ARRAY_SIZE(tilcdc_legacy_formats);
+			bpp = 16; /* This is just a guess */
+		}
+	}
+
 	ret = modeset_init(dev);
 	if (ret < 0) {
 		dev_err(dev->dev, "failed to initialize mode setting\n");
@@ -262,7 +369,7 @@
 		if (ret < 0)
 			goto fail_mode_config_cleanup;
 
-		ret = tilcdc_add_external_encoders(dev, &bpp);
+		ret = tilcdc_add_external_encoders(dev);
 		if (ret < 0)
 			goto fail_component_cleanup;
 	}
@@ -279,22 +386,14 @@
 		goto fail_external_cleanup;
 	}
 
-	pm_runtime_get_sync(dev->dev);
 	ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
-	pm_runtime_put_sync(dev->dev);
 	if (ret < 0) {
 		dev_err(dev->dev, "failed to install IRQ handler\n");
 		goto fail_vblank_cleanup;
 	}
 
-	list_for_each_entry(mod, &module_list, list) {
-		DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
-		bpp = mod->preferred_bpp;
-		if (bpp > 0)
-			break;
-	}
+	drm_mode_config_reset(dev);
 
-	drm_helper_disable_unused_functions(dev);
 	priv->fbdev = drm_fbdev_cma_init(dev, bpp,
 			dev->mode_config.num_crtc,
 			dev->mode_config.num_connector);
@@ -308,20 +407,18 @@
 	return 0;
 
 fail_irq_uninstall:
-	pm_runtime_get_sync(dev->dev);
 	drm_irq_uninstall(dev);
-	pm_runtime_put_sync(dev->dev);
 
 fail_vblank_cleanup:
 	drm_vblank_cleanup(dev);
 
-fail_mode_config_cleanup:
-	drm_mode_config_cleanup(dev);
-
 fail_component_cleanup:
 	if (priv->is_componentized)
 		component_unbind_all(dev->dev, dev);
 
+fail_mode_config_cleanup:
+	drm_mode_config_cleanup(dev);
+
 fail_external_cleanup:
 	tilcdc_remove_external_encoders(dev);
 
@@ -361,45 +458,6 @@
 	return tilcdc_crtc_irq(priv->crtc);
 }
 
-static void tilcdc_irq_preinstall(struct drm_device *dev)
-{
-	tilcdc_clear_irqstatus(dev, 0xffffffff);
-}
-
-static int tilcdc_irq_postinstall(struct drm_device *dev)
-{
-	struct tilcdc_drm_private *priv = dev->dev_private;
-
-	/* enable FIFO underflow irq: */
-	if (priv->rev == 1) {
-		tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
-	} else {
-		tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
-			   LCDC_V2_UNDERFLOW_INT_ENA |
-			   LCDC_V2_END_OF_FRAME0_INT_ENA |
-			   LCDC_FRAME_DONE | LCDC_SYNC_LOST);
-	}
-
-	return 0;
-}
-
-static void tilcdc_irq_uninstall(struct drm_device *dev)
-{
-	struct tilcdc_drm_private *priv = dev->dev_private;
-
-	/* disable irqs that we might have enabled: */
-	if (priv->rev == 1) {
-		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
-				LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
-		tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
-	} else {
-		tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
-			LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
-			LCDC_V2_END_OF_FRAME0_INT_ENA |
-			LCDC_FRAME_DONE | LCDC_SYNC_LOST);
-	}
-}
-
 static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 	return 0;
@@ -410,7 +468,7 @@
 	return;
 }
 
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+#if defined(CONFIG_DEBUG_FS)
 static const struct {
 	const char *name;
 	uint8_t  rev;
@@ -441,15 +499,6 @@
 #undef REG
 };
 
-static size_t tilcdc_num_regs(void)
-{
-	return ARRAY_SIZE(registers);
-}
-#else
-static size_t tilcdc_num_regs(void)
-{
-	return 0;
-}
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -537,14 +586,11 @@
 
 static struct drm_driver tilcdc_driver = {
 	.driver_features    = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
-			       DRIVER_PRIME),
+			       DRIVER_PRIME | DRIVER_ATOMIC),
 	.load               = tilcdc_load,
 	.unload             = tilcdc_unload,
 	.lastclose          = tilcdc_lastclose,
 	.irq_handler        = tilcdc_irq,
-	.irq_preinstall     = tilcdc_irq_preinstall,
-	.irq_postinstall    = tilcdc_irq_postinstall,
-	.irq_uninstall      = tilcdc_irq_uninstall,
 	.get_vblank_counter = drm_vblank_no_hw_counter,
 	.enable_vblank      = tilcdc_enable_vblank,
 	.disable_vblank     = tilcdc_disable_vblank,
@@ -584,28 +630,12 @@
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct tilcdc_drm_private *priv = ddev->dev_private;
-	unsigned i, n = 0;
 
-	drm_kms_helper_poll_disable(ddev);
+	priv->saved_state = drm_atomic_helper_suspend(ddev);
 
 	/* Select sleep pin state */
 	pinctrl_pm_select_sleep_state(dev);
 
-	if (pm_runtime_suspended(dev)) {
-		priv->ctx_valid = false;
-		return 0;
-	}
-
-	/* Disable the LCDC controller, to avoid locking up the PRCM */
-	tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
-	/* Save register state: */
-	for (i = 0; i < ARRAY_SIZE(registers); i++)
-		if (registers[i].save && (priv->rev >= registers[i].rev))
-			priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
-
-	priv->ctx_valid = true;
-
 	return 0;
 }
 
@@ -613,23 +643,15 @@
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct tilcdc_drm_private *priv = ddev->dev_private;
-	unsigned i, n = 0;
+	int ret = 0;
 
 	/* Select default pin state */
 	pinctrl_pm_select_default_state(dev);
 
-	if (priv->ctx_valid == true) {
-		/* Restore register state: */
-		for (i = 0; i < ARRAY_SIZE(registers); i++)
-			if (registers[i].save &&
-			    (priv->rev >= registers[i].rev))
-				tilcdc_write(ddev, registers[i].reg,
-					     priv->saved_register[n++]);
-	}
+	if (priv->saved_state)
+		ret = drm_atomic_helper_resume(ddev, priv->saved_state);
 
-	drm_kms_helper_poll_enable(ddev);
-
-	return 0;
+	return ret;
 }
 #endif
 
@@ -648,6 +670,12 @@
 
 static void tilcdc_unbind(struct device *dev)
 {
+	struct drm_device *ddev = dev_get_drvdata(dev);
+
+	/* Check if a subcomponent has already triggered the unloading. */
+	if (!ddev->dev_private)
+		return;
+
 	drm_put_dev(dev_get_drvdata(dev));
 }
 
@@ -680,17 +708,15 @@
 
 static int tilcdc_pdev_remove(struct platform_device *pdev)
 {
-	struct drm_device *ddev = dev_get_drvdata(&pdev->dev);
-	struct tilcdc_drm_private *priv = ddev->dev_private;
+	int ret;
 
-	/* Check if a subcomponent has already triggered the unloading. */
-	if (!priv)
-		return 0;
-
-	if (priv->is_componentized)
-		component_master_del(&pdev->dev, &tilcdc_comp_ops);
-	else
+	ret = tilcdc_get_external_components(&pdev->dev, NULL);
+	if (ret < 0)
+		return ret;
+	else if (ret == 0)
 		drm_put_dev(platform_get_drvdata(pdev));
+	else
+		component_master_del(&pdev->dev, &tilcdc_comp_ops);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index c1de18b..9780c37 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -65,13 +65,15 @@
 	 */
 	uint32_t max_width;
 
-	/* register contents saved across suspend/resume: */
-	u32 *saved_register;
-	bool ctx_valid;
+	/* Supported pixel formats */
+	const uint32_t *pixelformats;
+	uint32_t num_pixelformats;
+
+	/* The context for pm susped/resume cycle is stored here */
+	struct drm_atomic_state *saved_state;
 
 #ifdef CONFIG_CPU_FREQ
 	struct notifier_block freq_transition;
-	unsigned int lcd_fck_rate;
 #endif
 
 	struct workqueue_struct *wq;
@@ -113,7 +115,6 @@
 	const char *name;
 	struct list_head list;
 	const struct tilcdc_module_ops *funcs;
-	unsigned int preferred_bpp;
 };
 
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
@@ -171,6 +172,11 @@
 					bool simulate_vesa_sync);
 int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
 int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
+void tilcdc_crtc_disable(struct drm_crtc *crtc);
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+		struct drm_framebuffer *fb,
+		struct drm_pending_vblank_event *event);
+
+int tilcdc_plane_init(struct drm_device *dev, struct drm_plane *plane);
 
 #endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 03acb4f..68e8950 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -52,7 +52,7 @@
 	return MODE_OK;
 }
 
-static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
+static int tilcdc_add_external_encoder(struct drm_device *dev,
 				       struct drm_connector *connector)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
@@ -64,7 +64,6 @@
 	/* Only tda998x is supported at the moment. */
 	tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
 	tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
-	*bpp = panel_info_tda998x.bpp;
 
 	connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
 				       GFP_KERNEL);
@@ -94,7 +93,7 @@
 	return 0;
 }
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
+int tilcdc_add_external_encoders(struct drm_device *dev)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -108,7 +107,7 @@
 			if (connector == priv->connectors[i])
 				found = true;
 		if (!found) {
-			ret = tilcdc_add_external_encoder(dev, bpp, connector);
+			ret = tilcdc_add_external_encoder(dev, connector);
 			if (ret)
 				return ret;
 		}
@@ -138,14 +137,23 @@
 int tilcdc_get_external_components(struct device *dev,
 				   struct component_match **match)
 {
+	struct device_node *node;
 	struct device_node *ep = NULL;
 	int count = 0;
 
-	while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
-		struct device_node *node;
+	/* Avoid error print by of_graph_get_next_endpoint() if there
+	 * is no ports present.
+	 */
+	node = of_get_child_by_name(dev->of_node, "ports");
+	if (!node)
+		node = of_get_child_by_name(dev->of_node, "port");
+	if (!node)
+		return 0;
+	of_node_put(node);
 
+	while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
 		node = of_graph_get_remote_port_parent(ep);
-		if (!node && !of_device_is_available(node)) {
+		if (!node || !of_device_is_available(node)) {
 			of_node_put(node);
 			continue;
 		}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index 6aabe27..c700e0c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,7 +18,7 @@
 #ifndef __TILCDC_EXTERNAL_H__
 #define __TILCDC_EXTERNAL_H__
 
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp);
+int tilcdc_add_external_encoders(struct drm_device *dev);
 void tilcdc_remove_external_encoders(struct drm_device *dev);
 int tilcdc_get_external_components(struct device *dev,
 				   struct component_match **match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index ff7774c..2134bb20 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -22,8 +22,10 @@
 #include <video/display_timing.h>
 #include <video/of_display_timing.h>
 #include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
+#include "tilcdc_panel.h"
 
 struct panel_module {
 	struct tilcdc_module base;
@@ -64,9 +66,7 @@
 
 static void panel_encoder_prepare(struct drm_encoder *encoder)
 {
-	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
 	panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-	tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
 }
 
 static void panel_encoder_commit(struct drm_encoder *encoder)
@@ -196,9 +196,12 @@
 
 static const struct drm_connector_funcs panel_connector_funcs = {
 	.destroy            = panel_connector_destroy,
-	.dpms               = drm_helper_connector_dpms,
+	.dpms               = drm_atomic_helper_connector_dpms,
 	.detect             = panel_connector_detect,
 	.fill_modes         = drm_helper_probe_single_connector_modes,
+	.reset              = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
@@ -268,6 +271,9 @@
 	priv->encoders[priv->num_encoders++] = encoder;
 	priv->connectors[priv->num_connectors++] = connector;
 
+	tilcdc_crtc_set_panel_info(priv->crtc,
+				   to_panel_encoder(encoder)->mod->info);
+
 	return 0;
 }
 
@@ -392,8 +398,6 @@
 		goto fail_timings;
 	}
 
-	mod->preferred_bpp = panel_mod->info->bpp;
-
 	return 0;
 
 fail_timings:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
new file mode 100644
index 0000000..74c65fa
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <uapi/drm/drm_fourcc.h>
+
+#include "tilcdc_drv.h"
+
+static struct drm_plane_funcs tilcdc_plane_funcs = {
+	.update_plane	= drm_atomic_helper_update_plane,
+	.disable_plane	= drm_atomic_helper_disable_plane,
+	.destroy	= drm_plane_cleanup,
+	.set_property	= drm_atomic_helper_plane_set_property,
+	.reset		= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int tilcdc_plane_atomic_check(struct drm_plane *plane,
+				     struct drm_plane_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane_state *old_state = plane->state;
+	unsigned int depth, bpp;
+
+	if (!state->crtc)
+		return 0;
+
+	if (WARN_ON(!state->fb))
+		return -EINVAL;
+
+	if (state->crtc_x || state->crtc_y) {
+		dev_err(plane->dev->dev, "%s: crtc position must be zero.",
+			__func__);
+		return -EINVAL;
+	}
+
+	crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+							state->crtc);
+	/* we should have a crtc state if the plane is attached to a crtc */
+	if (WARN_ON(!crtc_state))
+		return 0;
+
+	if (crtc_state->mode.hdisplay != state->crtc_w ||
+	    crtc_state->mode.vdisplay != state->crtc_h) {
+		dev_err(plane->dev->dev,
+			"%s: Size must match mode (%dx%d == %dx%d)", __func__,
+			crtc_state->mode.hdisplay, crtc_state->mode.vdisplay,
+			state->crtc_w, state->crtc_h);
+		return -EINVAL;
+	}
+
+	drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
+	if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+		dev_err(plane->dev->dev,
+			"Invalid pitch: fb and crtc widths must be the same");
+		return -EINVAL;
+	}
+
+	if (state->fb && old_state->fb &&
+	    state->fb->pixel_format != old_state->fb->pixel_format) {
+		dev_dbg(plane->dev->dev,
+			"%s(): pixel format change requires mode_change\n",
+			__func__);
+		crtc_state->mode_changed = true;
+	}
+
+	return 0;
+}
+
+static void tilcdc_plane_atomic_update(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *state = plane->state;
+
+	if (!state->crtc)
+		return;
+
+	if (WARN_ON(!state->fb || !state->crtc->state))
+		return;
+
+	tilcdc_crtc_update_fb(state->crtc,
+			      state->fb,
+			      state->crtc->state->event);
+}
+
+static const struct drm_plane_helper_funcs plane_helper_funcs = {
+	.atomic_check = tilcdc_plane_atomic_check,
+	.atomic_update = tilcdc_plane_atomic_update,
+};
+
+int tilcdc_plane_init(struct drm_device *dev,
+		      struct drm_plane *plane)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	int ret;
+
+	ret = drm_plane_init(dev, plane, 1,
+			     &tilcdc_plane_funcs,
+			     priv->pixelformats,
+			     priv->num_pixelformats,
+			     true);
+	if (ret) {
+		dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
+		return ret;
+	}
+
+	drm_plane_helper_add(plane, &plane_helper_funcs);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 1bf5e25..f57c0d6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -119,6 +119,20 @@
 	iowrite32(data, priv->mmio + reg);
 }
 
+static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	volatile void __iomem *addr = priv->mmio + reg;
+
+#ifdef iowrite64
+	iowrite64(data, addr);
+#else
+	__iowmb();
+	/* This compiles to strd (=64-bit write) on ARM7 */
+	*(volatile u64 __force *)addr = __cpu_to_le64(data);
+#endif
+}
+
 static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
index f9c79da..623a914 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
@@ -139,7 +139,7 @@
 	of_update_property(node, prop);
 }
 
-struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
+static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
 {
 	const int size = __dtb_tilcdc_slave_compat_end -
 		__dtb_tilcdc_slave_compat_begin;
@@ -195,7 +195,7 @@
 	NULL
 };
 
-void __init tilcdc_convert_slave_node(void)
+static void __init tilcdc_convert_slave_node(void)
 {
 	struct device_node *slave = NULL, *lcdc = NULL;
 	struct device_node *i2c = NULL, *fragment = NULL;
@@ -207,7 +207,7 @@
 	int ret;
 
 	if (kfree_table_init(&kft))
-		goto out;
+		return;
 
 	lcdc = of_find_matching_node(NULL, tilcdc_of_match);
 	slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
@@ -261,7 +261,7 @@
 	of_node_put(fragment);
 }
 
-int __init tilcdc_slave_compat_init(void)
+static int __init tilcdc_slave_compat_init(void)
 {
 	tilcdc_convert_slave_node();
 	return 0;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 6b8c5b3..458043a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -20,8 +20,10 @@
 #include <linux/of_gpio.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/consumer.h>
+#include <drm/drm_atomic_helper.h>
 
 #include "tilcdc_drv.h"
+#include "tilcdc_tfp410.h"
 
 struct tfp410_module {
 	struct tilcdc_module base;
@@ -75,7 +77,6 @@
 static void tfp410_encoder_prepare(struct drm_encoder *encoder)
 {
 	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-	tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
 }
 
 static void tfp410_encoder_commit(struct drm_encoder *encoder)
@@ -201,9 +202,12 @@
 
 static const struct drm_connector_funcs tfp410_connector_funcs = {
 	.destroy            = tfp410_connector_destroy,
-	.dpms               = drm_helper_connector_dpms,
+	.dpms               = drm_atomic_helper_connector_dpms,
 	.detect             = tfp410_connector_detect,
 	.fill_modes         = drm_helper_probe_single_connector_modes,
+	.reset              = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
@@ -276,6 +280,7 @@
 	priv->encoders[priv->num_encoders++] = encoder;
 	priv->connectors[priv->num_connectors++] = connector;
 
+	tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
 	return 0;
 }
 
@@ -323,8 +328,6 @@
 		goto fail;
 	}
 
-	mod->preferred_bpp = dvi_info.bpp;
-
 	i2c_node = of_find_node_by_phandle(i2c_phandle);
 	if (!i2c_node) {
 		dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 42c074a..fc6217d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -57,14 +57,14 @@
 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
 					  uint32_t *mem_type)
 {
-	int i;
+	int pos;
 
-	for (i = 0; i <= TTM_PL_PRIV5; i++)
-		if (place->flags & (1 << i)) {
-			*mem_type = i;
-			return 0;
-		}
-	return -EINVAL;
+	pos = ffs(place->flags & TTM_PL_MASK_MEM);
+	if (unlikely(!pos))
+		return -EINVAL;
+
+	*mem_type = pos - 1;
+	return 0;
 }
 
 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
@@ -354,14 +354,12 @@
 
 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-		ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
-				      mem);
+		ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
 	else if (bdev->driver->move)
 		ret = bdev->driver->move(bo, evict, interruptible,
 					 no_wait_gpu, mem);
 	else
-		ret = ttm_bo_move_memcpy(bo, evict, interruptible,
-					 no_wait_gpu, mem);
+		ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
 
 	if (ret) {
 		if (bdev->driver->move_notify) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f157a9e..bf6e216 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,8 +45,8 @@
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-		    bool evict, bool interruptible,
-		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+		    bool interruptible, bool no_wait_gpu,
+		    struct ttm_mem_reg *new_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
 	struct ttm_mem_reg *old_mem = &bo->mem;
@@ -329,8 +329,7 @@
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-		       bool evict, bool interruptible,
-		       bool no_wait_gpu,
+		       bool interruptible, bool no_wait_gpu,
 		       struct ttm_mem_reg *new_mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index a1803fb..29855be 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -600,3 +600,9 @@
 	return 0;
 }
 EXPORT_SYMBOL(ttm_round_pot);
+
+uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
+{
+	return glob->zone_kernel->max_mem;
+}
+EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bef9f6f..cec4b4b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -858,7 +858,6 @@
 	if (count) {
 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
 		ttm->pages[index] = d_page->p;
-		ttm_dma->cpu_address[index] = d_page->vaddr;
 		ttm_dma->dma_address[index] = d_page->dma;
 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
 		r = 0;
@@ -989,7 +988,6 @@
 	INIT_LIST_HEAD(&ttm_dma->pages_list);
 	for (i = 0; i < ttm->num_pages; i++) {
 		ttm->pages[i] = NULL;
-		ttm_dma->cpu_address[i] = 0;
 		ttm_dma->dma_address[i] = 0;
 	}
 
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bc5aa57..aee3c00 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -57,10 +57,8 @@
 {
 	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
 					  sizeof(*ttm->ttm.pages) +
-					  sizeof(*ttm->dma_address) +
-					  sizeof(*ttm->cpu_address));
-	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
-	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+					  sizeof(*ttm->dma_address));
+	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 }
 
 #ifdef CONFIG_X86
@@ -244,7 +242,6 @@
 
 	drm_free_large(ttm->pages);
 	ttm->pages = NULL;
-	ttm_dma->cpu_address = NULL;
 	ttm_dma->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 4709b54..d2f57c5 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -150,8 +150,5 @@
 	drm_connector_register(connector);
 	drm_mode_connector_attach_encoder(connector, encoder);
 
-	drm_object_attach_property(&connector->base,
-				      dev->mode_config.dirty_info_property,
-				      1);
 	return 0;
 }
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index e2243ed..ac90ffd 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -209,7 +209,7 @@
 	exp_info.flags = flags;
 	exp_info.priv = obj;
 
-	return dma_buf_export(&exp_info);
+	return drm_gem_dmabuf_export(dev, &exp_info);
 }
 
 static int udl_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 17d34e0..cc45d98 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -16,6 +16,20 @@
 	return 0;
 }
 
+static int udl_usb_suspend(struct usb_interface *interface,
+			   pm_message_t message)
+{
+	return 0;
+}
+
+static int udl_usb_resume(struct usb_interface *interface)
+{
+	struct drm_device *dev = usb_get_intfdata(interface);
+
+	udl_modeset_restore(dev);
+	return 0;
+}
+
 static const struct vm_operations_struct udl_gem_vm_ops = {
 	.fault = udl_gem_fault,
 	.open = drm_gem_vm_open,
@@ -72,8 +86,8 @@
 	int r;
 
 	dev = drm_dev_alloc(&driver, &interface->dev);
-	if (!dev)
-		return -ENOMEM;
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
 
 	r = drm_dev_register(dev, (unsigned long)udev);
 	if (r)
@@ -122,6 +136,8 @@
 	.name = "udl",
 	.probe = udl_usb_probe,
 	.disconnect = udl_usb_disconnect,
+	.suspend = udl_usb_suspend,
+	.resume = udl_usb_resume,
 	.id_table = id_table,
 };
 module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 0b03d34..f338a57 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -52,6 +52,7 @@
 	struct device *dev;
 	struct drm_device *ddev;
 	struct usb_device *udev;
+	struct drm_crtc *crtc;
 
 	int sku_pixel_limit;
 
@@ -87,6 +88,7 @@
 
 /* modeset */
 int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_restore(struct drm_device *dev);
 void udl_modeset_cleanup(struct drm_device *dev);
 int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
 
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 33dbfb2..29f0207 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -16,6 +16,8 @@
 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
 #define BULK_SIZE 512
 
+#define NR_USB_REQUEST_CHANNEL 0x12
+
 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
 #define WRITES_IN_FLIGHT (4)
 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
@@ -90,6 +92,26 @@
 	return true;
 }
 
+/*
+ * Need to ensure a channel is selected before submitting URBs
+ */
+static int udl_select_std_channel(struct udl_device *udl)
+{
+	int ret;
+	u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+			    0x1C, 0x88, 0x5E, 0x15,
+			    0x60, 0xFE, 0xC6, 0x97,
+			    0x16, 0x3D, 0x47, 0xF2};
+
+	ret = usb_control_msg(udl->udev,
+			      usb_sndctrlpipe(udl->udev, 0),
+			      NR_USB_REQUEST_CHANNEL,
+			      (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
+			      set_def_chn, sizeof(set_def_chn),
+			      USB_CTRL_SET_TIMEOUT);
+	return ret < 0 ? ret : 0;
+}
+
 static void udl_release_urb_work(struct work_struct *work)
 {
 	struct urb_node *unode = container_of(work, struct urb_node,
@@ -301,6 +323,9 @@
 		goto err;
 	}
 
+	if (udl_select_std_channel(udl))
+		DRM_ERROR("Selecting channel failed\n");
+
 	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
 		DRM_ERROR("udl_alloc_urb_list failed\n");
 		goto err;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index f92ea95..f2b2481 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -309,6 +309,8 @@
 	char *wrptr;
 	int color_depth = 0;
 
+	udl->crtc = crtc;
+
 	buf = (char *)udl->mode_buf;
 
 	/* for now we just clip 24 -> 16 - if we fix that fix this */
@@ -441,8 +443,6 @@
 
 	dev->mode_config.funcs = &udl_mode_funcs;
 
-	drm_mode_create_dirty_info_property(dev);
-
 	udl_crtc_init(dev);
 
 	encoder = udl_encoder_init(dev);
@@ -452,6 +452,18 @@
 	return 0;
 }
 
+void udl_modeset_restore(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	struct udl_framebuffer *ufb;
+
+	if (!udl->crtc || !udl->crtc->primary->fb)
+		return;
+	udl_crtc_commit(udl->crtc);
+	ufb = to_udl_fb(udl->crtc->primary->fb);
+	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+}
+
 void udl_modeset_cleanup(struct drm_device *dev)
 {
 	drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 8fc2b73..7f08d68 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -163,14 +163,6 @@
 	int vblank_lines;
 	int ret = 0;
 
-	/*
-	 * XXX Doesn't work well in interlaced mode yet, partially due
-	 * to problems in vc4 kms or drm core interlaced mode handling,
-	 * so disable for now in interlaced mode.
-	 */
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		return ret;
-
 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 
 	/* Get optional system timestamp before query. */
@@ -191,10 +183,15 @@
 
 	/* Vertical position of hvs composed scanline. */
 	*vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+	*hpos = 0;
 
-	/* No hpos info available. */
-	if (hpos)
-		*hpos = 0;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		*vpos /= 2;
+
+		/* Use hpos to correct for field offset in interlaced mode. */
+		if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2)
+			*hpos += mode->crtc_htotal / 2;
+	}
 
 	/* This is the offset we need for translating hvs -> pv scanout pos. */
 	fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
@@ -217,8 +214,6 @@
 		 * position of the PV.
 		 */
 		*vpos -= fifo_lines + 1;
-		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-			*vpos /= 2;
 
 		ret |= DRM_SCANOUTPOS_ACCURATE;
 		return ret;
@@ -234,7 +229,7 @@
 	 * and need to make things up in a approximative but consistent way.
 	 */
 	ret |= DRM_SCANOUTPOS_IN_VBLANK;
-	vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+	vblank_lines = mode->vtotal - mode->vdisplay;
 
 	if (flags & DRM_CALLED_FROM_VBLIRQ) {
 		/*
@@ -383,7 +378,7 @@
 	struct drm_crtc_state *state = crtc->state;
 	struct drm_display_mode *mode = &state->adjusted_mode;
 	bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
-	u32 vactive = (mode->vdisplay >> (interlace ? 1 : 0));
+	u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
 	u32 format = PV_CONTROL_FORMAT_24;
 	bool debug_dump_regs = false;
 	int clock_select = vc4_get_clock_select(crtc);
@@ -399,47 +394,65 @@
 	CRTC_WRITE(PV_CONTROL, 0);
 
 	CRTC_WRITE(PV_HORZA,
-		   VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+		   VC4_SET_FIELD((mode->htotal -
+				  mode->hsync_end) * pixel_rep,
 				 PV_HORZA_HBP) |
-		   VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+		   VC4_SET_FIELD((mode->hsync_end -
+				  mode->hsync_start) * pixel_rep,
 				 PV_HORZA_HSYNC));
 	CRTC_WRITE(PV_HORZB,
-		   VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+		   VC4_SET_FIELD((mode->hsync_start -
+				  mode->hdisplay) * pixel_rep,
 				 PV_HORZB_HFP) |
-		   VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
+		   VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE));
 
 	CRTC_WRITE(PV_VERTA,
-		   VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+		   VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
 				 PV_VERTA_VBP) |
-		   VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+		   VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
 				 PV_VERTA_VSYNC));
 	CRTC_WRITE(PV_VERTB,
-		   VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+		   VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
 				 PV_VERTB_VFP) |
-		   VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+		   VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
 
 	if (interlace) {
 		CRTC_WRITE(PV_VERTA_EVEN,
-			   VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1,
+			   VC4_SET_FIELD(mode->crtc_vtotal -
+					 mode->crtc_vsync_end - 1,
 					 PV_VERTA_VBP) |
-			   VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+			   VC4_SET_FIELD(mode->crtc_vsync_end -
+					 mode->crtc_vsync_start,
 					 PV_VERTA_VSYNC));
 		CRTC_WRITE(PV_VERTB_EVEN,
-			   VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+			   VC4_SET_FIELD(mode->crtc_vsync_start -
+					 mode->crtc_vdisplay,
 					 PV_VERTB_VFP) |
-			   VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+			   VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
+
+		/* We set up first field even mode for HDMI.  VEC's
+		 * NTSC mode would want first field odd instead, once
+		 * we support it (to do so, set ODD_FIRST and put the
+		 * delay in VSYNCD_EVEN instead).
+		 */
+		CRTC_WRITE(PV_V_CONTROL,
+			   PV_VCONTROL_CONTINUOUS |
+			   PV_VCONTROL_INTERLACE |
+			   VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
+					 PV_VCONTROL_ODD_DELAY));
+		CRTC_WRITE(PV_VSYNCD_EVEN, 0);
+	} else {
+		CRTC_WRITE(PV_V_CONTROL, PV_VCONTROL_CONTINUOUS);
 	}
 
-	CRTC_WRITE(PV_HACT_ACT, mode->hdisplay);
+	CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
 
-	CRTC_WRITE(PV_V_CONTROL,
-		   PV_VCONTROL_CONTINUOUS |
-		   (interlace ? PV_VCONTROL_INTERLACE : 0));
 
 	CRTC_WRITE(PV_CONTROL,
 		   VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
 		   VC4_SET_FIELD(vc4_get_fifo_full_level(format),
 				 PV_CONTROL_FIFO_LEVEL) |
+		   VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) |
 		   PV_CONTROL_CLR_AT_START |
 		   PV_CONTROL_TRIGGER_UNDERFLOW |
 		   PV_CONTROL_WAIT_HSTART |
@@ -480,6 +493,9 @@
 	int ret;
 	require_hvs_enabled(dev);
 
+	/* Disable vblank irq handling before crtc is disabled. */
+	drm_crtc_vblank_off(crtc);
+
 	CRTC_WRITE(PV_V_CONTROL,
 		   CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
 	ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
@@ -530,6 +546,23 @@
 	/* Turn on the pixel valve, which will emit the vstart signal. */
 	CRTC_WRITE(PV_V_CONTROL,
 		   CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+
+	/* Enable vblank irq handling after crtc is started. */
+	drm_crtc_vblank_on(crtc);
+}
+
+static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/* Do not allow doublescan modes from user space */
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
+			      crtc->base.id);
+		return false;
+	}
+
+	return true;
 }
 
 static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -819,6 +852,7 @@
 	.mode_set_nofb = vc4_crtc_mode_set_nofb,
 	.disable = vc4_crtc_disable,
 	.enable = vc4_crtc_enable,
+	.mode_fixup = vc4_crtc_mode_fixup,
 	.atomic_check = vc4_crtc_atomic_check,
 	.atomic_flush = vc4_crtc_atomic_flush,
 };
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 275fedb..1e1f6b8 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -340,9 +340,20 @@
 	}
 }
 
+static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	return true;
+}
+
 static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
 	.disable = vc4_dpi_encoder_disable,
 	.enable = vc4_dpi_encoder_enable,
+	.mode_fixup = vc4_dpi_encoder_mode_fixup,
 };
 
 static const struct of_device_id vc4_dpi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 9ecef93..8703f56 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include "drm_fb_cma_helper.h"
+#include <drm/drm_fb_helper.h>
 
 #include "uapi/drm/vc4_drm.h"
 #include "vc4_drv.h"
@@ -214,7 +215,7 @@
 	ap->ranges[0].base = 0;
 	ap->ranges[0].size = ~0;
 
-	remove_conflicting_framebuffers(ap, "vc4drmfb", false);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false);
 	kfree(ap);
 }
 
@@ -232,8 +233,8 @@
 		return -ENOMEM;
 
 	drm = drm_dev_alloc(&vc4_drm_driver, dev);
-	if (!drm)
-		return -ENOMEM;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 	platform_set_drvdata(pdev, drm);
 	vc4->dev = drm;
 	drm->dev_private = vc4;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 428e249..7c1e4d9 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -122,9 +122,16 @@
 struct vc4_bo {
 	struct drm_gem_cma_object base;
 
-	/* seqno of the last job to render to this BO. */
+	/* seqno of the last job to render using this BO. */
 	uint64_t seqno;
 
+	/* seqno of the last job to use the RCL to write to this BO.
+	 *
+	 * Note that this doesn't include binner overflow memory
+	 * writes.
+	 */
+	uint64_t write_seqno;
+
 	/* List entry for the BO's position in either
 	 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
 	 */
@@ -216,6 +223,9 @@
 	/* Sequence number for this bin/render job. */
 	uint64_t seqno;
 
+	/* Latest write_seqno of any BO that binning depends on. */
+	uint64_t bin_dep_seqno;
+
 	/* Last current addresses the hardware was processing when the
 	 * hangcheck timer checked on us.
 	 */
@@ -230,6 +240,13 @@
 	struct drm_gem_cma_object **bo;
 	uint32_t bo_count;
 
+	/* List of BOs that are being written by the RCL.  Other than
+	 * the binner temporary storage, this is all the BOs written
+	 * by the job.
+	 */
+	struct drm_gem_cma_object *rcl_write_bo[4];
+	uint32_t rcl_write_bo_count;
+
 	/* Pointers for our position in vc4->job_list */
 	struct list_head head;
 
@@ -307,18 +324,15 @@
 static inline struct vc4_exec_info *
 vc4_first_bin_job(struct vc4_dev *vc4)
 {
-	if (list_empty(&vc4->bin_job_list))
-		return NULL;
-	return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head);
+	return list_first_entry_or_null(&vc4->bin_job_list,
+					struct vc4_exec_info, head);
 }
 
 static inline struct vc4_exec_info *
 vc4_first_render_job(struct vc4_dev *vc4)
 {
-	if (list_empty(&vc4->render_job_list))
-		return NULL;
-	return list_first_entry(&vc4->render_job_list,
-				struct vc4_exec_info, head);
+	return list_first_entry_or_null(&vc4->render_job_list,
+					struct vc4_exec_info, head);
 }
 
 static inline struct vc4_exec_info *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b262c5c..47a095f 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -419,10 +419,6 @@
 
 	vc4_flush_caches(dev);
 
-	/* Disable the binner's pre-loaded overflow memory address */
-	V3D_WRITE(V3D_BPOA, 0);
-	V3D_WRITE(V3D_BPOS, 0);
-
 	/* Either put the job in the binner if it uses the binner, or
 	 * immediately move it to the to-be-rendered queue.
 	 */
@@ -471,6 +467,11 @@
 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
 		bo->seqno = seqno;
 	}
+
+	for (i = 0; i < exec->rcl_write_bo_count; i++) {
+		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
+		bo->write_seqno = seqno;
+	}
 }
 
 /* Queues a struct vc4_exec_info for execution.  If no job is
@@ -673,6 +674,14 @@
 		goto fail;
 
 	ret = vc4_validate_shader_recs(dev, exec);
+	if (ret)
+		goto fail;
+
+	/* Block waiting on any previous rendering into the CS's VBO,
+	 * IB, or textures, so that pixels are actually written by the
+	 * time we try to read them.
+	 */
+	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
 
 fail:
 	drm_free_large(temp);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4452f36..c4cb2e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -62,6 +62,8 @@
 struct vc4_hdmi_encoder {
 	struct vc4_encoder base;
 	bool hdmi_monitor;
+	bool limited_rgb_range;
+	bool rgb_range_selectable;
 };
 
 static inline struct vc4_hdmi_encoder *
@@ -174,6 +176,9 @@
 			return connector_status_disconnected;
 	}
 
+	if (drm_probe_ddc(vc4->hdmi->ddc))
+		return connector_status_connected;
+
 	if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
 		return connector_status_connected;
 	else
@@ -202,6 +207,12 @@
 		return -ENODEV;
 
 	vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
+
+	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+		vc4_encoder->rgb_range_selectable =
+			drm_rgb_quant_range_selectable(edid);
+	}
+
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 
@@ -246,7 +257,7 @@
 	connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
 			     DRM_CONNECTOR_POLL_DISCONNECT);
 
-	connector->interlace_allowed = 0;
+	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 
 	drm_mode_connector_attach_encoder(connector, encoder);
@@ -269,25 +280,143 @@
 	.destroy = vc4_hdmi_encoder_destroy,
 };
 
+static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
+				enum hdmi_infoframe_type type)
+{
+	struct drm_device *dev = encoder->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	u32 packet_id = type - 0x80;
+
+	HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+		   HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+
+	return wait_for(!(HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+			  BIT(packet_id)), 100);
+}
+
+static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
+				     union hdmi_infoframe *frame)
+{
+	struct drm_device *dev = encoder->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	u32 packet_id = frame->any.type - 0x80;
+	u32 packet_reg = VC4_HDMI_GCP_0 + VC4_HDMI_PACKET_STRIDE * packet_id;
+	uint8_t buffer[VC4_HDMI_PACKET_STRIDE];
+	ssize_t len, i;
+	int ret;
+
+	WARN_ONCE(!(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+		    VC4_HDMI_RAM_PACKET_ENABLE),
+		  "Packet RAM has to be on to store the packet.");
+
+	len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
+	if (len < 0)
+		return;
+
+	ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+	if (ret) {
+		DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
+		return;
+	}
+
+	for (i = 0; i < len; i += 7) {
+		HDMI_WRITE(packet_reg,
+			   buffer[i + 0] << 0 |
+			   buffer[i + 1] << 8 |
+			   buffer[i + 2] << 16);
+		packet_reg += 4;
+
+		HDMI_WRITE(packet_reg,
+			   buffer[i + 3] << 0 |
+			   buffer[i + 4] << 8 |
+			   buffer[i + 5] << 16 |
+			   buffer[i + 6] << 24);
+		packet_reg += 4;
+	}
+
+	HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+		   HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
+	ret = wait_for((HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+			BIT(packet_id)), 100);
+	if (ret)
+		DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+}
+
+static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+	struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+	union hdmi_infoframe frame;
+	int ret;
+
+	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill AVI infoframe\n");
+		return;
+	}
+
+	if (vc4_encoder->rgb_range_selectable) {
+		if (vc4_encoder->limited_rgb_range) {
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_LIMITED;
+		} else {
+			frame.avi.quantization_range =
+				HDMI_QUANTIZATION_RANGE_FULL;
+		}
+	}
+
+	vc4_hdmi_write_infoframe(encoder, &frame);
+}
+
+static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+	union hdmi_infoframe frame;
+	int ret;
+
+	ret = hdmi_spd_infoframe_init(&frame.spd, "Broadcom", "Videocore");
+	if (ret < 0) {
+		DRM_ERROR("couldn't fill SPD infoframe\n");
+		return;
+	}
+
+	frame.spd.sdi = HDMI_SPD_SDI_PC;
+
+	vc4_hdmi_write_infoframe(encoder, &frame);
+}
+
+static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
+{
+	vc4_hdmi_set_avi_infoframe(encoder);
+	vc4_hdmi_set_spd_infoframe(encoder);
+}
+
 static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
 				      struct drm_display_mode *unadjusted_mode,
 				      struct drm_display_mode *mode)
 {
+	struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	bool debug_dump_regs = false;
 	bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
 	bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
-	u32 vactive = (mode->vdisplay >>
-		       ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0));
-	u32 verta = (VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+	u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
+	u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
 				   VC4_HDMI_VERTA_VSP) |
-		     VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+		     VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
 				   VC4_HDMI_VERTA_VFP) |
-		     VC4_SET_FIELD(vactive, VC4_HDMI_VERTA_VAL));
+		     VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL));
 	u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
-		     VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+		     VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
 				   VC4_HDMI_VERTB_VBP));
+	u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
+			  VC4_SET_FIELD(mode->crtc_vtotal -
+					mode->crtc_vsync_end -
+					interlaced,
+					VC4_HDMI_VERTB_VBP));
+	u32 csc_ctl;
 
 	if (debug_dump_regs) {
 		DRM_INFO("HDMI regs before:\n");
@@ -296,7 +425,8 @@
 
 	HD_WRITE(VC4_HD_VID_CTL, 0);
 
-	clk_set_rate(vc4->hdmi->pixel_clock, mode->clock * 1000);
+	clk_set_rate(vc4->hdmi->pixel_clock, mode->clock * 1000 *
+		     ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1));
 
 	HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
 		   HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
@@ -306,29 +436,62 @@
 	HDMI_WRITE(VC4_HDMI_HORZA,
 		   (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
 		   (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
-		   VC4_SET_FIELD(mode->hdisplay, VC4_HDMI_HORZA_HAP));
+		   VC4_SET_FIELD(mode->hdisplay * pixel_rep,
+				 VC4_HDMI_HORZA_HAP));
 
 	HDMI_WRITE(VC4_HDMI_HORZB,
-		   VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+		   VC4_SET_FIELD((mode->htotal -
+				  mode->hsync_end) * pixel_rep,
 				 VC4_HDMI_HORZB_HBP) |
-		   VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+		   VC4_SET_FIELD((mode->hsync_end -
+				  mode->hsync_start) * pixel_rep,
 				 VC4_HDMI_HORZB_HSP) |
-		   VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+		   VC4_SET_FIELD((mode->hsync_start -
+				  mode->hdisplay) * pixel_rep,
 				 VC4_HDMI_HORZB_HFP));
 
 	HDMI_WRITE(VC4_HDMI_VERTA0, verta);
 	HDMI_WRITE(VC4_HDMI_VERTA1, verta);
 
-	HDMI_WRITE(VC4_HDMI_VERTB0, vertb);
+	HDMI_WRITE(VC4_HDMI_VERTB0, vertb_even);
 	HDMI_WRITE(VC4_HDMI_VERTB1, vertb);
 
 	HD_WRITE(VC4_HD_VID_CTL,
 		 (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
 		 (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
 
+	csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
+				VC4_HD_CSC_CTL_ORDER);
+
+	if (vc4_encoder->hdmi_monitor && drm_match_cea_mode(mode) > 1) {
+		/* CEA VICs other than #1 requre limited range RGB
+		 * output unless overridden by an AVI infoframe.
+		 * Apply a colorspace conversion to squash 0-255 down
+		 * to 16-235.  The matrix here is:
+		 *
+		 * [ 0      0      0.8594 16]
+		 * [ 0      0.8594 0      16]
+		 * [ 0.8594 0      0      16]
+		 * [ 0      0      0       1]
+		 */
+		csc_ctl |= VC4_HD_CSC_CTL_ENABLE;
+		csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC;
+		csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
+					 VC4_HD_CSC_CTL_MODE);
+
+		HD_WRITE(VC4_HD_CSC_12_11, (0x000 << 16) | 0x000);
+		HD_WRITE(VC4_HD_CSC_14_13, (0x100 << 16) | 0x6e0);
+		HD_WRITE(VC4_HD_CSC_22_21, (0x6e0 << 16) | 0x000);
+		HD_WRITE(VC4_HD_CSC_24_23, (0x100 << 16) | 0x000);
+		HD_WRITE(VC4_HD_CSC_32_31, (0x000 << 16) | 0x6e0);
+		HD_WRITE(VC4_HD_CSC_34_33, (0x100 << 16) | 0x000);
+		vc4_encoder->limited_rgb_range = true;
+	} else {
+		vc4_encoder->limited_rgb_range = false;
+	}
+
 	/* The RGB order applies even when CSC is disabled. */
-	HD_WRITE(VC4_HD_CSC_CTL, VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
-					       VC4_HD_CSC_CTL_ORDER));
+	HD_WRITE(VC4_HD_CSC_CTL, csc_ctl);
 
 	HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
 
@@ -343,6 +506,8 @@
 	struct drm_device *dev = encoder->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+	HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, 0);
+
 	HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
 	HD_WRITE(VC4_HD_VID_CTL,
 		 HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
@@ -369,7 +534,7 @@
 			   VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
 
 		ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
-			       VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1);
+			       VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000);
 		WARN_ONCE(ret, "Timeout waiting for "
 			  "VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
 	} else {
@@ -381,7 +546,7 @@
 			   ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
 
 		ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
-				 VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1);
+				 VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000);
 		WARN_ONCE(ret, "Timeout waiting for "
 			  "!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
 	}
@@ -395,9 +560,10 @@
 			   HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
 			   VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
 
-		/* XXX: Set HDMI_RAM_PACKET_CONFIG (1 << 16) and set
-		 * up the infoframe.
-		 */
+		HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+			   VC4_HDMI_RAM_PACKET_ENABLE);
+
+		vc4_hdmi_set_infoframes(encoder);
 
 		drift = HDMI_READ(VC4_HDMI_FIFO_CTL);
 		drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4ac894d..c1f65c6 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -44,7 +44,7 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, false);
+	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 29e4b40..881bf48 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -735,8 +735,6 @@
 }
 
 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
-	.prepare_fb = NULL,
-	.cleanup_fb = NULL,
 	.atomic_check = vc4_plane_atomic_check,
 	.atomic_update = vc4_plane_atomic_update,
 };
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 160942a..1aa44c2 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -175,6 +175,8 @@
 # define PV_CONTROL_CLR_AT_START		BIT(14)
 # define PV_CONTROL_TRIGGER_UNDERFLOW		BIT(13)
 # define PV_CONTROL_WAIT_HSTART			BIT(12)
+# define PV_CONTROL_PIXEL_REP_MASK		VC4_MASK(5, 4)
+# define PV_CONTROL_PIXEL_REP_SHIFT		4
 # define PV_CONTROL_CLK_SELECT_DSI_VEC		0
 # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI	1
 # define PV_CONTROL_CLK_SELECT_MASK		VC4_MASK(3, 2)
@@ -183,6 +185,9 @@
 # define PV_CONTROL_EN				BIT(0)
 
 #define PV_V_CONTROL				0x04
+# define PV_VCONTROL_ODD_DELAY_MASK		VC4_MASK(22, 6)
+# define PV_VCONTROL_ODD_DELAY_SHIFT		6
+# define PV_VCONTROL_ODD_FIRST			BIT(5)
 # define PV_VCONTROL_INTERLACE			BIT(4)
 # define PV_VCONTROL_CONTINUOUS			BIT(1)
 # define PV_VCONTROL_VIDEN			BIT(0)
@@ -438,6 +443,8 @@
 #define VC4_HDMI_RAM_PACKET_CONFIG		0x0a0
 # define VC4_HDMI_RAM_PACKET_ENABLE		BIT(16)
 
+#define VC4_HDMI_RAM_PACKET_STATUS		0x0a4
+
 #define VC4_HDMI_HORZA				0x0c4
 # define VC4_HDMI_HORZA_VPOS			BIT(14)
 # define VC4_HDMI_HORZA_HPOS			BIT(13)
@@ -499,6 +506,9 @@
 
 #define VC4_HDMI_TX_PHY_RESET_CTL		0x2c0
 
+#define VC4_HDMI_GCP_0				0x400
+#define VC4_HDMI_PACKET_STRIDE			0x24
+
 #define VC4_HD_M_CTL				0x00c
 # define VC4_HD_M_REGISTER_FILE_STANDBY		(3 << 6)
 # define VC4_HD_M_RAM_STANDBY			(3 << 4)
@@ -528,10 +538,17 @@
 # define VC4_HD_CSC_CTL_MODE_SHIFT		2
 # define VC4_HD_CSC_CTL_MODE_RGB_TO_SD_YPRPB	0
 # define VC4_HD_CSC_CTL_MODE_RGB_TO_HD_YPRPB	1
-# define VC4_HD_CSC_CTL_MODE_CUSTOM		2
+# define VC4_HD_CSC_CTL_MODE_CUSTOM		3
 # define VC4_HD_CSC_CTL_RGB2YCC			BIT(1)
 # define VC4_HD_CSC_CTL_ENABLE			BIT(0)
 
+#define VC4_HD_CSC_12_11			0x044
+#define VC4_HD_CSC_14_13			0x048
+#define VC4_HD_CSC_22_21			0x04c
+#define VC4_HD_CSC_24_23			0x050
+#define VC4_HD_CSC_32_31			0x054
+#define VC4_HD_CSC_34_33			0x058
+
 #define VC4_HD_FRAME_COUNT			0x068
 
 /* HVS display list information. */
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 0f12418..08886a3 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -45,6 +45,8 @@
 
 	struct drm_gem_cma_object *rcl;
 	u32 next_offset;
+
+	u32 next_write_bo_index;
 };
 
 static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
@@ -407,6 +409,8 @@
 	if (!*obj)
 		return -EINVAL;
 
+	exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
 	if (surf->offset & 0xf) {
 		DRM_ERROR("MSAA write must be 16b aligned.\n");
 		return -EINVAL;
@@ -417,7 +421,8 @@
 
 static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
 				 struct drm_gem_cma_object **obj,
-				 struct drm_vc4_submit_rcl_surface *surf)
+				 struct drm_vc4_submit_rcl_surface *surf,
+				 bool is_write)
 {
 	uint8_t tiling = VC4_GET_FIELD(surf->bits,
 				       VC4_LOADSTORE_TILE_BUFFER_TILING);
@@ -440,6 +445,9 @@
 	if (!*obj)
 		return -EINVAL;
 
+	if (is_write)
+		exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
 	if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
 		if (surf == &exec->args->zs_write) {
 			DRM_ERROR("general zs write may not be a full-res.\n");
@@ -542,6 +550,8 @@
 	if (!*obj)
 		return -EINVAL;
 
+	exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
 	if (tiling > VC4_TILING_FORMAT_LT) {
 		DRM_ERROR("Bad tiling format\n");
 		return -EINVAL;
@@ -599,15 +609,18 @@
 	if (ret)
 		return ret;
 
-	ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
+	ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read,
+				    false);
 	if (ret)
 		return ret;
 
-	ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
+	ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read,
+				    false);
 	if (ret)
 		return ret;
 
-	ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
+	ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write,
+				    true);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 9ce1d0a..26503e3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -267,6 +267,9 @@
 	if (!ib)
 		return -EINVAL;
 
+	exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+				  to_vc4_bo(&ib->base)->write_seqno);
+
 	if (offset > ib->base.size ||
 	    (ib->base.size - offset) / index_size < length) {
 		DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
@@ -555,8 +558,7 @@
 reloc_tex(struct vc4_exec_info *exec,
 	  void *uniform_data_u,
 	  struct vc4_texture_sample_info *sample,
-	  uint32_t texture_handle_index)
-
+	  uint32_t texture_handle_index, bool is_cs)
 {
 	struct drm_gem_cma_object *tex;
 	uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
@@ -714,6 +716,11 @@
 
 	*validated_p0 = tex->paddr + p0;
 
+	if (is_cs) {
+		exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+					  to_vc4_bo(&tex->base)->write_seqno);
+	}
+
 	return true;
  fail:
 	DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
@@ -835,7 +842,8 @@
 			if (!reloc_tex(exec,
 				       uniform_data_u,
 				       &validated_shader->texture_samples[tex],
-				       texture_handles_u[tex])) {
+				       texture_handles_u[tex],
+				       i == 2)) {
 				return -EINVAL;
 			}
 		}
@@ -867,6 +875,9 @@
 		uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
 		uint32_t max_index;
 
+		exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+					  to_vc4_bo(&vbo->base)->write_seqno);
+
 		if (state->addr & 0x8)
 			stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
 
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c15bafb..f36c147 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -334,8 +334,8 @@
 	int ret;
 
 	vgem_device = drm_dev_alloc(&vgem_driver, NULL);
-	if (!vgem_device) {
-		ret = -ENOMEM;
+	if (IS_ERR(vgem_device)) {
+		ret = PTR_ERR(vgem_device);
 		goto out;
 	}
 
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c..1a3ad76 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@
 	down_read(&current->mm->mmap_sem);
 	ret = get_user_pages((unsigned long)xfer->mem_addr,
 			     vsg->num_pages,
-			     (vsg->direction == DMA_FROM_DEVICE),
-			     0, vsg->pages, NULL);
+			     (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+			     vsg->pages, NULL);
 
 	up_read(&current->mm->mmap_sem);
 	if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index ed8aa8f..e5582ba 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -72,7 +72,7 @@
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
+	    DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY |
 	    DRIVER_IRQ_SHARED,
 	.load = via_driver_load,
 	.unload = via_driver_unload,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4e192aa..7cf3678 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,7 +338,8 @@
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 	drm_atomic_helper_commit_modeset_enables(dev, state);
-	drm_atomic_helper_commit_planes(dev, state, true);
+	drm_atomic_helper_commit_planes(dev, state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	drm_atomic_helper_commit_hw_done(state);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 7f0e93f87..49e5996 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -24,9 +24,20 @@
  */
 
 #include <linux/pci.h>
+#include <drm/drm_fb_helper.h>
 
 #include "virtgpu_drv.h"
 
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	if (pdev) {
+		return drm_pci_set_busid(dev, master);
+	}
+	return 0;
+}
+
 static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
 {
 	struct apertures_struct *ap;
@@ -42,7 +53,7 @@
 	primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
 		& IORESOURCE_ROM_SHADOW;
 
-	remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
+	drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
 
 	kfree(ap);
 }
@@ -53,8 +64,8 @@
 	int ret;
 
 	dev = drm_dev_alloc(driver, &vdev->dev);
-	if (!dev)
-		return -ENOMEM;
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
 	dev->virtdev = vdev;
 	vdev->priv = dev;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index c13f70c..5820b702 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,6 +117,7 @@
 
 static struct drm_driver driver = {
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
+	.set_busid = drm_virtio_set_busid,
 	.load = virtio_gpu_driver_load,
 	.unload = virtio_gpu_driver_unload,
 	.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index b18ef31..ae59080 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,6 +49,7 @@
 #define DRIVER_PATCHLEVEL 1
 
 /* virtgpu_drm_bus.c */
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
 int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
 
 struct virtio_gpu_object {
@@ -75,6 +76,7 @@
 struct virtio_gpu_fence_driver {
 	atomic64_t       last_seq;
 	uint64_t         sync_seq;
+	uint64_t         context;
 	struct list_head fences;
 	spinlock_t       lock;
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index cf44187..f3f70fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -89,7 +89,7 @@
 	(*fence)->drv = drv;
 	(*fence)->seq = ++drv->sync_seq;
 	fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
-		   0, (*fence)->seq);
+		   drv->context, (*fence)->seq);
 	fence_get(&(*fence)->f);
 	list_add_tail(&(*fence)->node, &drv->fences);
 	spin_unlock_irqrestore(&drv->lock, irq_flags);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c046903..818478b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -89,10 +89,16 @@
 	}
 }
 
-static int virtio_gpu_execbuffer(struct drm_device *dev,
-				 struct drm_virtgpu_execbuffer *exbuf,
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full VIRTIO_GPUDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
+ */
+static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 				 struct drm_file *drm_file)
 {
+	struct drm_virtgpu_execbuffer *exbuf = data;
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
 	struct drm_gem_object *gobj;
@@ -152,15 +158,10 @@
 	if (ret)
 		goto out_free;
 
-	buf = kmalloc(exbuf->size, GFP_KERNEL);
-	if (!buf) {
-		ret = -ENOMEM;
-		goto out_unresv;
-	}
-	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
-			   exbuf->size)) {
-		kfree(buf);
-		ret = -EFAULT;
+	buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
+			  exbuf->size);
+	if (IS_ERR(buf)) {
+		ret = PTR_ERR(buf);
 		goto out_unresv;
 	}
 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
@@ -182,20 +183,6 @@
 	return ret;
 }
 
-/*
- * Usage of execbuffer:
- * Relocations need to take into account the full VIRTIO_GPUDrawable size.
- * However, the command as passed from user space must *not* contain the initial
- * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
- */
-static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
-				       struct drm_file *file_priv)
-{
-	struct drm_virtgpu_execbuffer *execbuffer = data;
-	return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
-}
-
-
 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv)
 {
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 4150873..036b0fb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,6 +159,7 @@
 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
 
+	vgdev->fence_drv.context = fence_context_alloc(1);
 	spin_lock_init(&vgdev->fence_drv.lock);
 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
 	INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 925ca25..ba28c0f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -76,7 +76,8 @@
 		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
 	if (old_state->crtc)
 		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
-	WARN_ON(!output);
+	if (WARN_ON(!output))
+		return;
 
 	if (plane->state->fb) {
 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
@@ -129,7 +130,8 @@
 		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
 	if (old_state->crtc)
 		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
-	WARN_ON(!output);
+	if (WARN_ON(!output))
+		return;
 
 	if (plane->state->fb) {
 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index b49445d..fb7b82a 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -6,6 +6,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select DRM_TTM
+	select FB
 	# Only needed for the transitional use of drm_crtc_init - can be removed
 	# again once vmwgfx sets up the primary plane itself.
 	select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 74304b0..070d750 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,10 +67,10 @@
 			VMWGFX_NUM_GB_SURFACE +\
 			VMWGFX_NUM_GB_SCREEN_TARGET)
 
-#define VMW_PL_GMR TTM_PL_PRIV0
-#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_PL_MOB TTM_PL_PRIV1
-#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 63ccd98..23ec673 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -377,9 +377,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -421,10 +418,6 @@
 	if (ret != 0)
 		goto err_free;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (ret != 0)
-		goto err_vblank_cleanup;
-
 	vmw_kms_create_implicit_placement_property(dev_priv, true);
 
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
@@ -439,8 +432,6 @@
 
 	return 0;
 
-err_vblank_cleanup:
-	drm_vblank_cleanup(dev);
 err_free:
 	kfree(dev_priv->ldu_priv);
 	dev_priv->ldu_priv = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b74eae2..f423590 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -538,9 +538,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -574,10 +571,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (unlikely(ret != 0))
-		goto err_vblank_cleanup;
-
 	vmw_kms_create_implicit_placement_property(dev_priv, false);
 
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
@@ -588,10 +581,6 @@
 	DRM_INFO("Screen Objects Display Unit initialized\n");
 
 	return 0;
-
-err_vblank_cleanup:
-	drm_vblank_cleanup(dev);
-	return ret;
 }
 
 int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41932a7..94ad8d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1131,9 +1131,6 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				   dev->mode_config.dirty_info_property,
-				   1);
-	drm_object_attach_property(&connector->base,
 				   dev_priv->hotplug_mode_update_property, 1);
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_x_property, 0);
@@ -1202,10 +1199,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = drm_mode_create_dirty_info_property(dev);
-	if (unlikely(ret != 0))
-		goto err_vblank_cleanup;
-
 	dev_priv->active_display_unit = vmw_du_screen_target;
 
 	vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 107ec23..5f96141 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
 
 imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
-		ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
+		ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
+		ipu-smfc.o ipu-vdi.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 99dcacf..b9539f7 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -45,6 +45,12 @@
 	writel(value, ipu->cm_reg + offset);
 }
 
+int ipu_get_num(struct ipu_soc *ipu)
+{
+	return ipu->id;
+}
+EXPORT_SYMBOL_GPL(ipu_get_num);
+
 void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
 {
 	u32 val;
@@ -724,6 +730,137 @@
 }
 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
 
+
+/* Frame Synchronization Unit Channel Linking */
+
+struct fsu_link_reg_info {
+	int chno;
+	u32 reg;
+	u32 mask;
+	u32 val;
+};
+
+struct fsu_link_info {
+	struct fsu_link_reg_info src;
+	struct fsu_link_reg_info sink;
+};
+
+static const struct fsu_link_info fsu_link_info[] = {
+	{
+		.src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
+			  FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
+		.sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
+			  FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
+	}, {
+		.src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
+			  FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
+		.sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
+			  FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
+	}, {
+		.src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
+			  FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
+		.sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
+			  FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
+	}, {
+		.src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
+		.sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
+			  FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
+	},
+};
+
+static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
+		if (src == fsu_link_info[i].src.chno &&
+		    sink == fsu_link_info[i].sink.chno)
+			return &fsu_link_info[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * Links a source channel to a sink channel in the FSU.
+ */
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+	const struct fsu_link_info *link;
+	u32 src_reg, sink_reg;
+	unsigned long flags;
+
+	link = find_fsu_link_info(src_ch, sink_ch);
+	if (!link)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ipu->lock, flags);
+
+	if (link->src.mask) {
+		src_reg = ipu_cm_read(ipu, link->src.reg);
+		src_reg &= ~link->src.mask;
+		src_reg |= link->src.val;
+		ipu_cm_write(ipu, src_reg, link->src.reg);
+	}
+
+	if (link->sink.mask) {
+		sink_reg = ipu_cm_read(ipu, link->sink.reg);
+		sink_reg &= ~link->sink.mask;
+		sink_reg |= link->sink.val;
+		ipu_cm_write(ipu, sink_reg, link->sink.reg);
+	}
+
+	spin_unlock_irqrestore(&ipu->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_link);
+
+/*
+ * Unlinks source and sink channels in the FSU.
+ */
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+	const struct fsu_link_info *link;
+	u32 src_reg, sink_reg;
+	unsigned long flags;
+
+	link = find_fsu_link_info(src_ch, sink_ch);
+	if (!link)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ipu->lock, flags);
+
+	if (link->src.mask) {
+		src_reg = ipu_cm_read(ipu, link->src.reg);
+		src_reg &= ~link->src.mask;
+		ipu_cm_write(ipu, src_reg, link->src.reg);
+	}
+
+	if (link->sink.mask) {
+		sink_reg = ipu_cm_read(ipu, link->sink.reg);
+		sink_reg &= ~link->sink.mask;
+		ipu_cm_write(ipu, sink_reg, link->sink.reg);
+	}
+
+	spin_unlock_irqrestore(&ipu->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
+
+/* Link IDMAC channels in the FSU */
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+	return ipu_fsu_link(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_link);
+
+/* Unlink IDMAC channels in the FSU */
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+	return ipu_fsu_unlink(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
+
 struct ipu_devtype {
 	const char *name;
 	unsigned long cm_ofs;
@@ -833,6 +970,20 @@
 		goto err_ic;
 	}
 
+	ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
+			   IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
+			   IPU_CONF_IC_INPUT);
+	if (ret) {
+		unit = "vdi";
+		goto err_vdi;
+	}
+
+	ret = ipu_image_convert_init(ipu, dev);
+	if (ret) {
+		unit = "image_convert";
+		goto err_image_convert;
+	}
+
 	ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
 			  IPU_CONF_DI0_EN, ipu_clk);
 	if (ret) {
@@ -887,6 +1038,10 @@
 err_di_1:
 	ipu_di_exit(ipu, 0);
 err_di_0:
+	ipu_image_convert_exit(ipu);
+err_image_convert:
+	ipu_vdi_exit(ipu);
+err_vdi:
 	ipu_ic_exit(ipu);
 err_ic:
 	ipu_csi_exit(ipu, 1);
@@ -971,6 +1126,8 @@
 	ipu_dc_exit(ipu);
 	ipu_di_exit(ipu, 1);
 	ipu_di_exit(ipu, 0);
+	ipu_image_convert_exit(ipu);
+	ipu_vdi_exit(ipu);
 	ipu_ic_exit(ipu);
 	ipu_csi_exit(ipu, 1);
 	ipu_csi_exit(ipu, 0);
@@ -1004,14 +1161,14 @@
 			.dma[0] = IPUV3_CHANNEL_CSI0,
 			.dma[1] = -EINVAL,
 		},
-		.name = "imx-ipuv3-camera",
+		.name = "imx-ipuv3-csi",
 	}, {
 		.pdata = {
 			.csi = 1,
 			.dma[0] = IPUV3_CHANNEL_CSI1,
 			.dma[1] = -EINVAL,
 		},
-		.name = "imx-ipuv3-camera",
+		.name = "imx-ipuv3-csi",
 	}, {
 		.pdata = {
 			.di = 0,
@@ -1207,15 +1364,16 @@
 
 static int ipu_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *of_id =
-			of_match_device(imx_ipu_dt_ids, &pdev->dev);
+	struct device_node *np = pdev->dev.of_node;
 	struct ipu_soc *ipu;
 	struct resource *res;
 	unsigned long ipu_base;
 	int i, ret, irq_sync, irq_err;
 	const struct ipu_devtype *devtype;
 
-	devtype = of_id->data;
+	devtype = of_device_get_match_data(&pdev->dev);
+	if (!devtype)
+		return -EINVAL;
 
 	irq_sync = platform_get_irq(pdev, 0);
 	irq_err = platform_get_irq(pdev, 1);
@@ -1237,6 +1395,7 @@
 		ipu->channel[i].ipu = ipu;
 	ipu->devtype = devtype;
 	ipu->ipu_type = devtype->type;
+	ipu->id = of_alias_get_id(np, "ipu");
 
 	spin_lock_init(&ipu->lock);
 	mutex_init(&ipu->channel_lock);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 6494a4d..fcb7dc8 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -253,6 +253,13 @@
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
 
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
+{
+	ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
+	ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
+
 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
 {
 	ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
@@ -268,6 +275,12 @@
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
 
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
+{
+	return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
+
 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
 {
 	ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 06631ac..d6e5ded 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -258,12 +258,8 @@
 		cfg->data_width = IPU_CSI_DATA_WIDTH_8;
 		break;
 	case MEDIA_BUS_FMT_UYVY8_1X16:
-		cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
-		cfg->mipi_dt = MIPI_DT_YUV422;
-		cfg->data_width = IPU_CSI_DATA_WIDTH_16;
-		break;
 	case MEDIA_BUS_FMT_YUYV8_1X16:
-		cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+		cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
 		cfg->mipi_dt = MIPI_DT_YUV422;
 		cfg->data_width = IPU_CSI_DATA_WIDTH_16;
 		break;
@@ -365,10 +361,14 @@
 {
 	struct ipu_csi_bus_config cfg;
 	unsigned long flags;
-	u32 data = 0;
+	u32 width, height, data = 0;
 
 	fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
 
+	/* set default sensor frame width and height */
+	width = mbus_fmt->width;
+	height = mbus_fmt->height;
+
 	/* Set the CSI_SENS_CONF register remaining fields */
 	data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
 		cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
@@ -386,11 +386,6 @@
 
 	ipu_csi_write(csi, data, CSI_SENS_CONF);
 
-	/* Setup sensor frame size */
-	ipu_csi_write(csi,
-		      (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
-		      CSI_SENS_FRM_SIZE);
-
 	/* Set CCIR registers */
 
 	switch (cfg.clk_mode) {
@@ -408,11 +403,12 @@
 			 * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
 			 * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
 			 */
+			height = 625; /* framelines for PAL */
+
 			ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
 					  CSI_CCIR_CODE_1);
 			ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
 			ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
-
 		} else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
 			/*
 			 * NTSC case
@@ -422,6 +418,8 @@
 			 * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
 			 * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
 			 */
+			height = 525; /* framelines for NTSC */
+
 			ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
 					  CSI_CCIR_CODE_1);
 			ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
@@ -447,6 +445,10 @@
 		break;
 	}
 
+	/* Setup sensor frame size */
+	ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
+		      CSI_SENS_FRM_SIZE);
+
 	dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
 		ipu_csi_read(csi, CSI_SENS_CONF));
 	dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 42705bb..a40f211 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -123,20 +123,6 @@
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
 
-static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-	while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
-		if (time_after(jiffies, timeout)) {
-			dev_warn(priv->dev,
-				 "Timeout waiting for DMFC FIFOs to clear\n");
-			break;
-		}
-		cpu_relax();
-	}
-}
-
 void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 {
 	struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -145,10 +131,8 @@
 
 	priv->use_count--;
 
-	if (!priv->use_count) {
-		ipu_dmfc_wait_fifos(priv);
+	if (!priv->use_count)
 		ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
-	}
 
 	if (priv->use_count < 0)
 		priv->use_count = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 1dcb96c..321eb98 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -160,6 +160,7 @@
 	spinlock_t lock;
 	struct ipu_soc *ipu;
 	int use_count;
+	int irt_use_count;
 	struct ipu_ic task[IC_NUM_TASKS];
 };
 
@@ -379,8 +380,6 @@
 
 	ipu_ic_write(ic, ic_conf, IC_CONF);
 
-	ic->rotation = ic->graphics = false;
-
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
@@ -620,7 +619,7 @@
 	ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
 	ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
 
-	if (rot >= IPU_ROTATE_90_RIGHT)
+	if (ipu_rot_mode_is_irt(rot))
 		ic->rotation = true;
 
 unlock:
@@ -629,22 +628,41 @@
 }
 EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
 
+static void ipu_irt_enable(struct ipu_ic *ic)
+{
+	struct ipu_ic_priv *priv = ic->priv;
+
+	if (!priv->irt_use_count)
+		ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
+
+	priv->irt_use_count++;
+}
+
+static void ipu_irt_disable(struct ipu_ic *ic)
+{
+	struct ipu_ic_priv *priv = ic->priv;
+
+	if (priv->irt_use_count) {
+		if (!--priv->irt_use_count)
+			ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
+	}
+}
+
 int ipu_ic_enable(struct ipu_ic *ic)
 {
 	struct ipu_ic_priv *priv = ic->priv;
 	unsigned long flags;
-	u32 module = IPU_CONF_IC_EN;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	if (ic->rotation)
-		module |= IPU_CONF_ROT_EN;
-
 	if (!priv->use_count)
-		ipu_module_enable(priv->ipu, module);
+		ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
 
 	priv->use_count++;
 
+	if (ic->rotation)
+		ipu_irt_enable(ic);
+
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return 0;
@@ -655,18 +673,22 @@
 {
 	struct ipu_ic_priv *priv = ic->priv;
 	unsigned long flags;
-	u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
 	priv->use_count--;
 
 	if (!priv->use_count)
-		ipu_module_disable(priv->ipu, module);
+		ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
 
 	if (priv->use_count < 0)
 		priv->use_count = 0;
 
+	if (ic->rotation)
+		ipu_irt_disable(ic);
+
+	ic->rotation = ic->graphics = false;
+
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644
index 0000000..2ba7d43
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <video/imx-ipu-image-convert.h>
+#include "ipu-prv.h"
+
+/*
+ * The IC Resizer has a restriction that the output frame from the
+ * resizer must be 1024 or less in both width (pixels) and height
+ * (lines).
+ *
+ * The image converter attempts to split up a conversion when
+ * the desired output (converted) frame resolution exceeds the
+ * IC resizer limit of 1024 in either dimension.
+ *
+ * If either dimension of the output frame exceeds the limit, the
+ * dimension is split into 1, 2, or 4 equal stripes, for a maximum
+ * of 4*4 or 16 tiles. A conversion is then carried out for each
+ * tile (but taking care to pass the full frame stride length to
+ * the DMA channel's parameter memory!). IDMA double-buffering is used
+ * to convert each tile back-to-back when possible (see note below
+ * when double_buffering boolean is set).
+ *
+ * Note that the input frame must be split up into the same number
+ * of tiles as the output frame.
+ *
+ * FIXME: at this point there is no attempt to deal with visible seams
+ * at the tile boundaries when upscaling. The seams are caused by a reset
+ * of the bilinear upscale interpolation when starting a new tile. The
+ * seams are barely visible for small upscale factors, but become
+ * increasingly visible as the upscale factor gets larger, since more
+ * interpolated pixels get thrown out at the tile boundaries. A possilble
+ * fix might be to overlap tiles of different sizes, but this must be done
+ * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
+ * alignment restrictions of each tile.
+ */
+
+#define MAX_STRIPES_W    4
+#define MAX_STRIPES_H    4
+#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
+
+#define MIN_W     16
+#define MIN_H     8
+#define MAX_W     4096
+#define MAX_H     4096
+
+enum ipu_image_convert_type {
+	IMAGE_CONVERT_IN = 0,
+	IMAGE_CONVERT_OUT,
+};
+
+struct ipu_image_convert_dma_buf {
+	void          *virt;
+	dma_addr_t    phys;
+	unsigned long len;
+};
+
+struct ipu_image_convert_dma_chan {
+	int in;
+	int out;
+	int rot_in;
+	int rot_out;
+	int vdi_in_p;
+	int vdi_in;
+	int vdi_in_n;
+};
+
+/* dimensions of one tile */
+struct ipu_image_tile {
+	u32 width;
+	u32 height;
+	/* size and strides are in bytes */
+	u32 size;
+	u32 stride;
+	u32 rot_stride;
+	/* start Y or packed offset of this tile */
+	u32 offset;
+	/* offset from start to tile in U plane, for planar formats */
+	u32 u_off;
+	/* offset from start to tile in V plane, for planar formats */
+	u32 v_off;
+};
+
+struct ipu_image_convert_image {
+	struct ipu_image base;
+	enum ipu_image_convert_type type;
+
+	const struct ipu_image_pixfmt *fmt;
+	unsigned int stride;
+
+	/* # of rows (horizontal stripes) if dest height is > 1024 */
+	unsigned int num_rows;
+	/* # of columns (vertical stripes) if dest width is > 1024 */
+	unsigned int num_cols;
+
+	struct ipu_image_tile tile[MAX_TILES];
+};
+
+struct ipu_image_pixfmt {
+	u32	fourcc;        /* V4L2 fourcc */
+	int     bpp;           /* total bpp */
+	int     uv_width_dec;  /* decimation in width for U/V planes */
+	int     uv_height_dec; /* decimation in height for U/V planes */
+	bool    planar;        /* planar format */
+	bool    uv_swapped;    /* U and V planes are swapped */
+	bool    uv_packed;     /* partial planar (U and V in same plane) */
+};
+
+struct ipu_image_convert_ctx;
+struct ipu_image_convert_chan;
+struct ipu_image_convert_priv;
+
+struct ipu_image_convert_ctx {
+	struct ipu_image_convert_chan *chan;
+
+	ipu_image_convert_cb_t complete;
+	void *complete_context;
+
+	/* Source/destination image data and rotation mode */
+	struct ipu_image_convert_image in;
+	struct ipu_image_convert_image out;
+	enum ipu_rotate_mode rot_mode;
+
+	/* intermediate buffer for rotation */
+	struct ipu_image_convert_dma_buf rot_intermediate[2];
+
+	/* current buffer number for double buffering */
+	int cur_buf_num;
+
+	bool aborting;
+	struct completion aborted;
+
+	/* can we use double-buffering for this conversion operation? */
+	bool double_buffering;
+	/* num_rows * num_cols */
+	unsigned int num_tiles;
+	/* next tile to process */
+	unsigned int next_tile;
+	/* where to place converted tile in dest image */
+	unsigned int out_tile_map[MAX_TILES];
+
+	struct list_head list;
+};
+
+struct ipu_image_convert_chan {
+	struct ipu_image_convert_priv *priv;
+
+	enum ipu_ic_task ic_task;
+	const struct ipu_image_convert_dma_chan *dma_ch;
+
+	struct ipu_ic *ic;
+	struct ipuv3_channel *in_chan;
+	struct ipuv3_channel *out_chan;
+	struct ipuv3_channel *rotation_in_chan;
+	struct ipuv3_channel *rotation_out_chan;
+
+	/* the IPU end-of-frame irqs */
+	int out_eof_irq;
+	int rot_out_eof_irq;
+
+	spinlock_t irqlock;
+
+	/* list of convert contexts */
+	struct list_head ctx_list;
+	/* queue of conversion runs */
+	struct list_head pending_q;
+	/* queue of completed runs */
+	struct list_head done_q;
+
+	/* the current conversion run */
+	struct ipu_image_convert_run *current_run;
+};
+
+struct ipu_image_convert_priv {
+	struct ipu_image_convert_chan chan[IC_NUM_TASKS];
+	struct ipu_soc *ipu;
+};
+
+static const struct ipu_image_convert_dma_chan
+image_convert_dma_chan[IC_NUM_TASKS] = {
+	[IC_TASK_VIEWFINDER] = {
+		.in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
+		.out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+		.rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
+		.rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
+		.vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
+		.vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
+		.vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
+	},
+	[IC_TASK_POST_PROCESSOR] = {
+		.in = IPUV3_CHANNEL_MEM_IC_PP,
+		.out = IPUV3_CHANNEL_IC_PP_MEM,
+		.rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
+		.rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
+	},
+};
+
+static const struct ipu_image_pixfmt image_convert_formats[] = {
+	{
+		.fourcc	= V4L2_PIX_FMT_RGB565,
+		.bpp    = 16,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_RGB24,
+		.bpp    = 24,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_BGR24,
+		.bpp    = 24,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_RGB32,
+		.bpp    = 32,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_BGR32,
+		.bpp    = 32,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_YUYV,
+		.bpp    = 16,
+		.uv_width_dec = 2,
+		.uv_height_dec = 1,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_UYVY,
+		.bpp    = 16,
+		.uv_width_dec = 2,
+		.uv_height_dec = 1,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_YUV420,
+		.bpp    = 12,
+		.planar = true,
+		.uv_width_dec = 2,
+		.uv_height_dec = 2,
+	}, {
+		.fourcc	= V4L2_PIX_FMT_YVU420,
+		.bpp    = 12,
+		.planar = true,
+		.uv_width_dec = 2,
+		.uv_height_dec = 2,
+		.uv_swapped = true,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.bpp    = 12,
+		.planar = true,
+		.uv_width_dec = 2,
+		.uv_height_dec = 2,
+		.uv_packed = true,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUV422P,
+		.bpp    = 16,
+		.planar = true,
+		.uv_width_dec = 2,
+		.uv_height_dec = 1,
+	}, {
+		.fourcc = V4L2_PIX_FMT_NV16,
+		.bpp    = 16,
+		.planar = true,
+		.uv_width_dec = 2,
+		.uv_height_dec = 1,
+		.uv_packed = true,
+	},
+};
+
+static const struct ipu_image_pixfmt *get_format(u32 fourcc)
+{
+	const struct ipu_image_pixfmt *ret = NULL;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
+		if (image_convert_formats[i].fourcc == fourcc) {
+			ret = &image_convert_formats[i];
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void dump_format(struct ipu_image_convert_ctx *ctx,
+			struct ipu_image_convert_image *ic_image)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+
+	dev_dbg(priv->ipu->dev,
+		"task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+		chan->ic_task, ctx,
+		ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
+		ic_image->base.pix.width, ic_image->base.pix.height,
+		ic_image->num_cols, ic_image->num_rows,
+		ic_image->tile[0].width, ic_image->tile[0].height,
+		ic_image->fmt->fourcc & 0xff,
+		(ic_image->fmt->fourcc >> 8) & 0xff,
+		(ic_image->fmt->fourcc >> 16) & 0xff,
+		(ic_image->fmt->fourcc >> 24) & 0xff);
+}
+
+int ipu_image_convert_enum_format(int index, u32 *fourcc)
+{
+	const struct ipu_image_pixfmt *fmt;
+
+	if (index >= (int)ARRAY_SIZE(image_convert_formats))
+		return -EINVAL;
+
+	/* Format found */
+	fmt = &image_convert_formats[index];
+	*fourcc = fmt->fourcc;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
+
+static void free_dma_buf(struct ipu_image_convert_priv *priv,
+			 struct ipu_image_convert_dma_buf *buf)
+{
+	if (buf->virt)
+		dma_free_coherent(priv->ipu->dev,
+				  buf->len, buf->virt, buf->phys);
+	buf->virt = NULL;
+	buf->phys = 0;
+}
+
+static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
+			 struct ipu_image_convert_dma_buf *buf,
+			 int size)
+{
+	buf->len = PAGE_ALIGN(size);
+	buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
+				       GFP_DMA | GFP_KERNEL);
+	if (!buf->virt) {
+		dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static inline int num_stripes(int dim)
+{
+	if (dim <= 1024)
+		return 1;
+	else if (dim <= 2048)
+		return 2;
+	else
+		return 4;
+}
+
+static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+				 struct ipu_image_convert_image *image)
+{
+	int i;
+
+	for (i = 0; i < ctx->num_tiles; i++) {
+		struct ipu_image_tile *tile = &image->tile[i];
+
+		tile->height = image->base.pix.height / image->num_rows;
+		tile->width = image->base.pix.width / image->num_cols;
+		tile->size = ((tile->height * image->fmt->bpp) >> 3) *
+			tile->width;
+
+		if (image->fmt->planar) {
+			tile->stride = tile->width;
+			tile->rot_stride = tile->height;
+		} else {
+			tile->stride =
+				(image->fmt->bpp * tile->width) >> 3;
+			tile->rot_stride =
+				(image->fmt->bpp * tile->height) >> 3;
+		}
+	}
+}
+
+/*
+ * Use the rotation transformation to find the tile coordinates
+ * (row, col) of a tile in the destination frame that corresponds
+ * to the given tile coordinates of a source frame. The destination
+ * coordinate is then converted to a tile index.
+ */
+static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
+				int src_row, int src_col)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_image *s_image = &ctx->in;
+	struct ipu_image_convert_image *d_image = &ctx->out;
+	int dst_row, dst_col;
+
+	/* with no rotation it's a 1:1 mapping */
+	if (ctx->rot_mode == IPU_ROTATE_NONE)
+		return src_row * s_image->num_cols + src_col;
+
+	/*
+	 * before doing the transform, first we have to translate
+	 * source row,col for an origin in the center of s_image
+	 */
+	src_row = src_row * 2 - (s_image->num_rows - 1);
+	src_col = src_col * 2 - (s_image->num_cols - 1);
+
+	/* do the rotation transform */
+	if (ctx->rot_mode & IPU_ROT_BIT_90) {
+		dst_col = -src_row;
+		dst_row = src_col;
+	} else {
+		dst_col = src_col;
+		dst_row = src_row;
+	}
+
+	/* apply flip */
+	if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+		dst_col = -dst_col;
+	if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
+		dst_row = -dst_row;
+
+	dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
+		chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
+
+	/*
+	 * finally translate dest row,col using an origin in upper
+	 * left of d_image
+	 */
+	dst_row += d_image->num_rows - 1;
+	dst_col += d_image->num_cols - 1;
+	dst_row /= 2;
+	dst_col /= 2;
+
+	return dst_row * d_image->num_cols + dst_col;
+}
+
+/*
+ * Fill the out_tile_map[] with transformed destination tile indeces.
+ */
+static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
+{
+	struct ipu_image_convert_image *s_image = &ctx->in;
+	unsigned int row, col, tile = 0;
+
+	for (row = 0; row < s_image->num_rows; row++) {
+		for (col = 0; col < s_image->num_cols; col++) {
+			ctx->out_tile_map[tile] =
+				transform_tile_index(ctx, row, col);
+			tile++;
+		}
+	}
+}
+
+static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+				     struct ipu_image_convert_image *image)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	const struct ipu_image_pixfmt *fmt = image->fmt;
+	unsigned int row, col, tile = 0;
+	u32 H, w, h, y_stride, uv_stride;
+	u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
+	u32 y_row_off, y_col_off, y_off;
+	u32 y_size, uv_size;
+
+	/* setup some convenience vars */
+	H = image->base.pix.height;
+
+	y_stride = image->stride;
+	uv_stride = y_stride / fmt->uv_width_dec;
+	if (fmt->uv_packed)
+		uv_stride *= 2;
+
+	y_size = H * y_stride;
+	uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
+
+	for (row = 0; row < image->num_rows; row++) {
+		w = image->tile[tile].width;
+		h = image->tile[tile].height;
+		y_row_off = row * h * y_stride;
+		uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+
+		for (col = 0; col < image->num_cols; col++) {
+			y_col_off = col * w;
+			uv_col_off = y_col_off / fmt->uv_width_dec;
+			if (fmt->uv_packed)
+				uv_col_off *= 2;
+
+			y_off = y_row_off + y_col_off;
+			uv_off = uv_row_off + uv_col_off;
+
+			u_off = y_size - y_off + uv_off;
+			v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
+			if (fmt->uv_swapped) {
+				tmp = u_off;
+				u_off = v_off;
+				v_off = tmp;
+			}
+
+			image->tile[tile].offset = y_off;
+			image->tile[tile].u_off = u_off;
+			image->tile[tile++].v_off = v_off;
+
+			dev_dbg(priv->ipu->dev,
+				"task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
+				chan->ic_task, ctx,
+				image->type == IMAGE_CONVERT_IN ?
+				"Input" : "Output", row, col,
+				y_off, u_off, v_off);
+		}
+	}
+}
+
+static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+				     struct ipu_image_convert_image *image)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	const struct ipu_image_pixfmt *fmt = image->fmt;
+	unsigned int row, col, tile = 0;
+	u32 w, h, bpp, stride;
+	u32 row_off, col_off;
+
+	/* setup some convenience vars */
+	stride = image->stride;
+	bpp = fmt->bpp;
+
+	for (row = 0; row < image->num_rows; row++) {
+		w = image->tile[tile].width;
+		h = image->tile[tile].height;
+		row_off = row * h * stride;
+
+		for (col = 0; col < image->num_cols; col++) {
+			col_off = (col * w * bpp) >> 3;
+
+			image->tile[tile].offset = row_off + col_off;
+			image->tile[tile].u_off = 0;
+			image->tile[tile++].v_off = 0;
+
+			dev_dbg(priv->ipu->dev,
+				"task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
+				chan->ic_task, ctx,
+				image->type == IMAGE_CONVERT_IN ?
+				"Input" : "Output", row, col,
+				row_off + col_off);
+		}
+	}
+}
+
+static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+			      struct ipu_image_convert_image *image)
+{
+	if (image->fmt->planar)
+		calc_tile_offsets_planar(ctx, image);
+	else
+		calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * return the number of runs in given queue (pending_q or done_q)
+ * for this context. hold irqlock when calling.
+ */
+static int get_run_count(struct ipu_image_convert_ctx *ctx,
+			 struct list_head *q)
+{
+	struct ipu_image_convert_run *run;
+	int count = 0;
+
+	lockdep_assert_held(&ctx->chan->irqlock);
+
+	list_for_each_entry(run, q, list) {
+		if (run->ctx == ctx)
+			count++;
+	}
+
+	return count;
+}
+
+static void convert_stop(struct ipu_image_convert_run *run)
+{
+	struct ipu_image_convert_ctx *ctx = run->ctx;
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
+		__func__, chan->ic_task, ctx, run);
+
+	/* disable IC tasks and the channels */
+	ipu_ic_task_disable(chan->ic);
+	ipu_idmac_disable_channel(chan->in_chan);
+	ipu_idmac_disable_channel(chan->out_chan);
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		ipu_idmac_disable_channel(chan->rotation_in_chan);
+		ipu_idmac_disable_channel(chan->rotation_out_chan);
+		ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
+	}
+
+	ipu_ic_disable(chan->ic);
+}
+
+static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
+			       struct ipuv3_channel *channel,
+			       struct ipu_image_convert_image *image,
+			       enum ipu_rotate_mode rot_mode,
+			       bool rot_swap_width_height)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	unsigned int burst_size;
+	u32 width, height, stride;
+	dma_addr_t addr0, addr1 = 0;
+	struct ipu_image tile_image;
+	unsigned int tile_idx[2];
+
+	if (image->type == IMAGE_CONVERT_OUT) {
+		tile_idx[0] = ctx->out_tile_map[0];
+		tile_idx[1] = ctx->out_tile_map[1];
+	} else {
+		tile_idx[0] = 0;
+		tile_idx[1] = 1;
+	}
+
+	if (rot_swap_width_height) {
+		width = image->tile[0].height;
+		height = image->tile[0].width;
+		stride = image->tile[0].rot_stride;
+		addr0 = ctx->rot_intermediate[0].phys;
+		if (ctx->double_buffering)
+			addr1 = ctx->rot_intermediate[1].phys;
+	} else {
+		width = image->tile[0].width;
+		height = image->tile[0].height;
+		stride = image->stride;
+		addr0 = image->base.phys0 +
+			image->tile[tile_idx[0]].offset;
+		if (ctx->double_buffering)
+			addr1 = image->base.phys0 +
+				image->tile[tile_idx[1]].offset;
+	}
+
+	ipu_cpmem_zero(channel);
+
+	memset(&tile_image, 0, sizeof(tile_image));
+	tile_image.pix.width = tile_image.rect.width = width;
+	tile_image.pix.height = tile_image.rect.height = height;
+	tile_image.pix.bytesperline = stride;
+	tile_image.pix.pixelformat =  image->fmt->fourcc;
+	tile_image.phys0 = addr0;
+	tile_image.phys1 = addr1;
+	ipu_cpmem_set_image(channel, &tile_image);
+
+	if (image->fmt->planar && !rot_swap_width_height)
+		ipu_cpmem_set_uv_offset(channel,
+					image->tile[tile_idx[0]].u_off,
+					image->tile[tile_idx[0]].v_off);
+
+	if (rot_mode)
+		ipu_cpmem_set_rotation(channel, rot_mode);
+
+	if (channel == chan->rotation_in_chan ||
+	    channel == chan->rotation_out_chan) {
+		burst_size = 8;
+		ipu_cpmem_set_block_mode(channel);
+	} else
+		burst_size = (width % 16) ? 8 : 16;
+
+	ipu_cpmem_set_burstsize(channel, burst_size);
+
+	ipu_ic_task_idma_init(chan->ic, channel, width, height,
+			      burst_size, rot_mode);
+
+	ipu_cpmem_set_axi_id(channel, 1);
+
+	ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
+}
+
+static int convert_start(struct ipu_image_convert_run *run)
+{
+	struct ipu_image_convert_ctx *ctx = run->ctx;
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_image *s_image = &ctx->in;
+	struct ipu_image_convert_image *d_image = &ctx->out;
+	enum ipu_color_space src_cs, dest_cs;
+	unsigned int dest_width, dest_height;
+	int ret;
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
+		__func__, chan->ic_task, ctx, run);
+
+	src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
+	dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		/* swap width/height for resizer */
+		dest_width = d_image->tile[0].height;
+		dest_height = d_image->tile[0].width;
+	} else {
+		dest_width = d_image->tile[0].width;
+		dest_height = d_image->tile[0].height;
+	}
+
+	/* setup the IC resizer and CSC */
+	ret = ipu_ic_task_init(chan->ic,
+			       s_image->tile[0].width,
+			       s_image->tile[0].height,
+			       dest_width,
+			       dest_height,
+			       src_cs, dest_cs);
+	if (ret) {
+		dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
+		return ret;
+	}
+
+	/* init the source MEM-->IC PP IDMAC channel */
+	init_idmac_channel(ctx, chan->in_chan, s_image,
+			   IPU_ROTATE_NONE, false);
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		/* init the IC PP-->MEM IDMAC channel */
+		init_idmac_channel(ctx, chan->out_chan, d_image,
+				   IPU_ROTATE_NONE, true);
+
+		/* init the MEM-->IC PP ROT IDMAC channel */
+		init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
+				   ctx->rot_mode, true);
+
+		/* init the destination IC PP ROT-->MEM IDMAC channel */
+		init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
+				   IPU_ROTATE_NONE, false);
+
+		/* now link IC PP-->MEM to MEM-->IC PP ROT */
+		ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
+	} else {
+		/* init the destination IC PP-->MEM IDMAC channel */
+		init_idmac_channel(ctx, chan->out_chan, d_image,
+				   ctx->rot_mode, false);
+	}
+
+	/* enable the IC */
+	ipu_ic_enable(chan->ic);
+
+	/* set buffers ready */
+	ipu_idmac_select_buffer(chan->in_chan, 0);
+	ipu_idmac_select_buffer(chan->out_chan, 0);
+	if (ipu_rot_mode_is_irt(ctx->rot_mode))
+		ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
+	if (ctx->double_buffering) {
+		ipu_idmac_select_buffer(chan->in_chan, 1);
+		ipu_idmac_select_buffer(chan->out_chan, 1);
+		if (ipu_rot_mode_is_irt(ctx->rot_mode))
+			ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
+	}
+
+	/* enable the channels! */
+	ipu_idmac_enable_channel(chan->in_chan);
+	ipu_idmac_enable_channel(chan->out_chan);
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		ipu_idmac_enable_channel(chan->rotation_in_chan);
+		ipu_idmac_enable_channel(chan->rotation_out_chan);
+	}
+
+	ipu_ic_task_enable(chan->ic);
+
+	ipu_cpmem_dump(chan->in_chan);
+	ipu_cpmem_dump(chan->out_chan);
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		ipu_cpmem_dump(chan->rotation_in_chan);
+		ipu_cpmem_dump(chan->rotation_out_chan);
+	}
+
+	ipu_dump(priv->ipu);
+
+	return 0;
+}
+
+/* hold irqlock when calling */
+static int do_run(struct ipu_image_convert_run *run)
+{
+	struct ipu_image_convert_ctx *ctx = run->ctx;
+	struct ipu_image_convert_chan *chan = ctx->chan;
+
+	lockdep_assert_held(&chan->irqlock);
+
+	ctx->in.base.phys0 = run->in_phys;
+	ctx->out.base.phys0 = run->out_phys;
+
+	ctx->cur_buf_num = 0;
+	ctx->next_tile = 1;
+
+	/* remove run from pending_q and set as current */
+	list_del(&run->list);
+	chan->current_run = run;
+
+	return convert_start(run);
+}
+
+/* hold irqlock when calling */
+static void run_next(struct ipu_image_convert_chan *chan)
+{
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_run *run, *tmp;
+	int ret;
+
+	lockdep_assert_held(&chan->irqlock);
+
+	list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+		/* skip contexts that are aborting */
+		if (run->ctx->aborting) {
+			dev_dbg(priv->ipu->dev,
+				"%s: task %u: skipping aborting ctx %p run %p\n",
+				__func__, chan->ic_task, run->ctx, run);
+			continue;
+		}
+
+		ret = do_run(run);
+		if (!ret)
+			break;
+
+		/*
+		 * something went wrong with start, add the run
+		 * to done q and continue to the next run in the
+		 * pending q.
+		 */
+		run->status = ret;
+		list_add_tail(&run->list, &chan->done_q);
+		chan->current_run = NULL;
+	}
+}
+
+static void empty_done_q(struct ipu_image_convert_chan *chan)
+{
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_run *run;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	while (!list_empty(&chan->done_q)) {
+		run = list_entry(chan->done_q.next,
+				 struct ipu_image_convert_run,
+				 list);
+
+		list_del(&run->list);
+
+		dev_dbg(priv->ipu->dev,
+			"%s: task %u: completing ctx %p run %p with %d\n",
+			__func__, chan->ic_task, run->ctx, run, run->status);
+
+		/* call the completion callback and free the run */
+		spin_unlock_irqrestore(&chan->irqlock, flags);
+		run->ctx->complete(run, run->ctx->complete_context);
+		spin_lock_irqsave(&chan->irqlock, flags);
+	}
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+}
+
+/*
+ * the bottom half thread clears out the done_q, calling the
+ * completion handler for each.
+ */
+static irqreturn_t do_bh(int irq, void *dev_id)
+{
+	struct ipu_image_convert_chan *chan = dev_id;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_ctx *ctx;
+	unsigned long flags;
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
+		chan->ic_task);
+
+	empty_done_q(chan);
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	/*
+	 * the done_q is cleared out, signal any contexts
+	 * that are aborting that abort can complete.
+	 */
+	list_for_each_entry(ctx, &chan->ctx_list, list) {
+		if (ctx->aborting) {
+			dev_dbg(priv->ipu->dev,
+				"%s: task %u: signaling abort for ctx %p\n",
+				__func__, chan->ic_task, ctx);
+			complete(&ctx->aborted);
+		}
+	}
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
+		chan->ic_task);
+
+	return IRQ_HANDLED;
+}
+
+/* hold irqlock when calling */
+static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+{
+	struct ipu_image_convert_ctx *ctx = run->ctx;
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_tile *src_tile, *dst_tile;
+	struct ipu_image_convert_image *s_image = &ctx->in;
+	struct ipu_image_convert_image *d_image = &ctx->out;
+	struct ipuv3_channel *outch;
+	unsigned int dst_idx;
+
+	lockdep_assert_held(&chan->irqlock);
+
+	outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
+		chan->rotation_out_chan : chan->out_chan;
+
+	/*
+	 * It is difficult to stop the channel DMA before the channels
+	 * enter the paused state. Without double-buffering the channels
+	 * are always in a paused state when the EOF irq occurs, so it
+	 * is safe to stop the channels now. For double-buffering we
+	 * just ignore the abort until the operation completes, when it
+	 * is safe to shut down.
+	 */
+	if (ctx->aborting && !ctx->double_buffering) {
+		convert_stop(run);
+		run->status = -EIO;
+		goto done;
+	}
+
+	if (ctx->next_tile == ctx->num_tiles) {
+		/*
+		 * the conversion is complete
+		 */
+		convert_stop(run);
+		run->status = 0;
+		goto done;
+	}
+
+	/*
+	 * not done, place the next tile buffers.
+	 */
+	if (!ctx->double_buffering) {
+
+		src_tile = &s_image->tile[ctx->next_tile];
+		dst_idx = ctx->out_tile_map[ctx->next_tile];
+		dst_tile = &d_image->tile[dst_idx];
+
+		ipu_cpmem_set_buffer(chan->in_chan, 0,
+				     s_image->base.phys0 + src_tile->offset);
+		ipu_cpmem_set_buffer(outch, 0,
+				     d_image->base.phys0 + dst_tile->offset);
+		if (s_image->fmt->planar)
+			ipu_cpmem_set_uv_offset(chan->in_chan,
+						src_tile->u_off,
+						src_tile->v_off);
+		if (d_image->fmt->planar)
+			ipu_cpmem_set_uv_offset(outch,
+						dst_tile->u_off,
+						dst_tile->v_off);
+
+		ipu_idmac_select_buffer(chan->in_chan, 0);
+		ipu_idmac_select_buffer(outch, 0);
+
+	} else if (ctx->next_tile < ctx->num_tiles - 1) {
+
+		src_tile = &s_image->tile[ctx->next_tile + 1];
+		dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
+		dst_tile = &d_image->tile[dst_idx];
+
+		ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
+				     s_image->base.phys0 + src_tile->offset);
+		ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
+				     d_image->base.phys0 + dst_tile->offset);
+
+		ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
+		ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
+
+		ctx->cur_buf_num ^= 1;
+	}
+
+	ctx->next_tile++;
+	return IRQ_HANDLED;
+done:
+	list_add_tail(&run->list, &chan->done_q);
+	chan->current_run = NULL;
+	run_next(chan);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t norotate_irq(int irq, void *data)
+{
+	struct ipu_image_convert_chan *chan = data;
+	struct ipu_image_convert_ctx *ctx;
+	struct ipu_image_convert_run *run;
+	unsigned long flags;
+	irqreturn_t ret;
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	/* get current run and its context */
+	run = chan->current_run;
+	if (!run) {
+		ret = IRQ_NONE;
+		goto out;
+	}
+
+	ctx = run->ctx;
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		/* this is a rotation operation, just ignore */
+		spin_unlock_irqrestore(&chan->irqlock, flags);
+		return IRQ_HANDLED;
+	}
+
+	ret = do_irq(run);
+out:
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+	return ret;
+}
+
+static irqreturn_t rotate_irq(int irq, void *data)
+{
+	struct ipu_image_convert_chan *chan = data;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_ctx *ctx;
+	struct ipu_image_convert_run *run;
+	unsigned long flags;
+	irqreturn_t ret;
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	/* get current run and its context */
+	run = chan->current_run;
+	if (!run) {
+		ret = IRQ_NONE;
+		goto out;
+	}
+
+	ctx = run->ctx;
+
+	if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		/* this was NOT a rotation operation, shouldn't happen */
+		dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+		spin_unlock_irqrestore(&chan->irqlock, flags);
+		return IRQ_HANDLED;
+	}
+
+	ret = do_irq(run);
+out:
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+	return ret;
+}
+
+/*
+ * try to force the completion of runs for this ctx. Called when
+ * abort wait times out in ipu_image_convert_abort().
+ */
+static void force_abort(struct ipu_image_convert_ctx *ctx)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_run *run;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	run = chan->current_run;
+	if (run && run->ctx == ctx) {
+		convert_stop(run);
+		run->status = -EIO;
+		list_add_tail(&run->list, &chan->done_q);
+		chan->current_run = NULL;
+		run_next(chan);
+	}
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+
+	empty_done_q(chan);
+}
+
+static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+	if (chan->out_eof_irq >= 0)
+		free_irq(chan->out_eof_irq, chan);
+	if (chan->rot_out_eof_irq >= 0)
+		free_irq(chan->rot_out_eof_irq, chan);
+
+	if (!IS_ERR_OR_NULL(chan->in_chan))
+		ipu_idmac_put(chan->in_chan);
+	if (!IS_ERR_OR_NULL(chan->out_chan))
+		ipu_idmac_put(chan->out_chan);
+	if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
+		ipu_idmac_put(chan->rotation_in_chan);
+	if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
+		ipu_idmac_put(chan->rotation_out_chan);
+	if (!IS_ERR_OR_NULL(chan->ic))
+		ipu_ic_put(chan->ic);
+
+	chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+		chan->rotation_out_chan = NULL;
+	chan->out_eof_irq = chan->rot_out_eof_irq = -1;
+}
+
+static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+	const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	int ret;
+
+	/* get IC */
+	chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
+	if (IS_ERR(chan->ic)) {
+		dev_err(priv->ipu->dev, "could not acquire IC\n");
+		ret = PTR_ERR(chan->ic);
+		goto err;
+	}
+
+	/* get IDMAC channels */
+	chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
+	chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
+	if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
+		dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
+	chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
+	if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
+		dev_err(priv->ipu->dev,
+			"could not acquire idmac rotation channels\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	/* acquire the EOF interrupts */
+	chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+						  chan->out_chan,
+						  IPU_IRQ_EOF);
+
+	ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+				   0, "ipu-ic", chan);
+	if (ret < 0) {
+		dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+			 chan->out_eof_irq);
+		chan->out_eof_irq = -1;
+		goto err;
+	}
+
+	chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+						     chan->rotation_out_chan,
+						     IPU_IRQ_EOF);
+
+	ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+				   0, "ipu-ic", chan);
+	if (ret < 0) {
+		dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+			chan->rot_out_eof_irq);
+		chan->rot_out_eof_irq = -1;
+		goto err;
+	}
+
+	return 0;
+err:
+	release_ipu_resources(chan);
+	return ret;
+}
+
+static int fill_image(struct ipu_image_convert_ctx *ctx,
+		      struct ipu_image_convert_image *ic_image,
+		      struct ipu_image *image,
+		      enum ipu_image_convert_type type)
+{
+	struct ipu_image_convert_priv *priv = ctx->chan->priv;
+
+	ic_image->base = *image;
+	ic_image->type = type;
+
+	ic_image->fmt = get_format(image->pix.pixelformat);
+	if (!ic_image->fmt) {
+		dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
+			type == IMAGE_CONVERT_OUT ? "Output" : "Input");
+		return -EINVAL;
+	}
+
+	if (ic_image->fmt->planar)
+		ic_image->stride = ic_image->base.pix.width;
+	else
+		ic_image->stride  = ic_image->base.pix.bytesperline;
+
+	calc_tile_dimensions(ctx, ic_image);
+	calc_tile_offsets(ctx, ic_image);
+
+	return 0;
+}
+
+/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+				unsigned int max, unsigned int align)
+{
+	/* Bits that must be zero to be aligned */
+	unsigned int mask = ~((1 << align) - 1);
+
+	/* Clamp to aligned min and max */
+	x = clamp(x, (min + ~mask) & mask, max & mask);
+
+	/* Round to nearest aligned value */
+	if (align)
+		x = (x + (1 << (align - 1))) & mask;
+
+	return x;
+}
+
+/*
+ * We have to adjust the tile width such that the tile physaddrs and
+ * U and V plane offsets are multiples of 8 bytes as required by
+ * the IPU DMA Controller. For the planar formats, this corresponds
+ * to a pixel alignment of 16 (but use a more formal equation since
+ * the variables are available). For all the packed formats, 8 is
+ * good enough.
+ */
+static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
+{
+	return fmt->planar ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * For tile height alignment, we have to ensure that the output tile
+ * heights are multiples of 8 lines if the IRT is required by the
+ * given rotation mode (the IRT performs rotations on 8x8 blocks
+ * at a time). If the IRT is not used, or for input image tiles,
+ * 2 lines are good enough.
+ */
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+				    enum ipu_rotate_mode rot_mode)
+{
+	return (type == IMAGE_CONVERT_OUT &&
+		ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
+}
+
+/* Adjusts input/output images to IPU restrictions */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+			      enum ipu_rotate_mode rot_mode)
+{
+	const struct ipu_image_pixfmt *infmt, *outfmt;
+	unsigned int num_in_rows, num_in_cols;
+	unsigned int num_out_rows, num_out_cols;
+	u32 w_align, h_align;
+
+	infmt = get_format(in->pix.pixelformat);
+	outfmt = get_format(out->pix.pixelformat);
+
+	/* set some default pixel formats if needed */
+	if (!infmt) {
+		in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+		infmt = get_format(V4L2_PIX_FMT_RGB24);
+	}
+	if (!outfmt) {
+		out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+		outfmt = get_format(V4L2_PIX_FMT_RGB24);
+	}
+
+	/* image converter does not handle fields */
+	in->pix.field = out->pix.field = V4L2_FIELD_NONE;
+
+	/* resizer cannot downsize more than 4:1 */
+	if (ipu_rot_mode_is_irt(rot_mode)) {
+		out->pix.height = max_t(__u32, out->pix.height,
+					in->pix.width / 4);
+		out->pix.width = max_t(__u32, out->pix.width,
+				       in->pix.height / 4);
+	} else {
+		out->pix.width = max_t(__u32, out->pix.width,
+				       in->pix.width / 4);
+		out->pix.height = max_t(__u32, out->pix.height,
+					in->pix.height / 4);
+	}
+
+	/* get tiling rows/cols from output format */
+	num_out_rows = num_stripes(out->pix.height);
+	num_out_cols = num_stripes(out->pix.width);
+	if (ipu_rot_mode_is_irt(rot_mode)) {
+		num_in_rows = num_out_cols;
+		num_in_cols = num_out_rows;
+	} else {
+		num_in_rows = num_out_rows;
+		num_in_cols = num_out_cols;
+	}
+
+	/* align input width/height */
+	w_align = ilog2(tile_width_align(infmt) * num_in_cols);
+	h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
+			num_in_rows);
+	in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
+	in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
+
+	/* align output width/height */
+	w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
+	h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
+			num_out_rows);
+	out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
+	out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
+
+	/* set input/output strides and image sizes */
+	in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
+	in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
+	out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
+	out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
+
+/*
+ * this is used by ipu_image_convert_prepare() to verify set input and
+ * output images are valid before starting the conversion. Clients can
+ * also call it before calling ipu_image_convert_prepare().
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+			     enum ipu_rotate_mode rot_mode)
+{
+	struct ipu_image testin, testout;
+
+	testin = *in;
+	testout = *out;
+
+	ipu_image_convert_adjust(&testin, &testout, rot_mode);
+
+	if (testin.pix.width != in->pix.width ||
+	    testin.pix.height != in->pix.height ||
+	    testout.pix.width != out->pix.width ||
+	    testout.pix.height != out->pix.height)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
+
+/*
+ * Call ipu_image_convert_prepare() to prepare for the conversion of
+ * given images and rotation mode. Returns a new conversion context.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+			  struct ipu_image *in, struct ipu_image *out,
+			  enum ipu_rotate_mode rot_mode,
+			  ipu_image_convert_cb_t complete,
+			  void *complete_context)
+{
+	struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
+	struct ipu_image_convert_image *s_image, *d_image;
+	struct ipu_image_convert_chan *chan;
+	struct ipu_image_convert_ctx *ctx;
+	unsigned long flags;
+	bool get_res;
+	int ret;
+
+	if (!in || !out || !complete ||
+	    (ic_task != IC_TASK_VIEWFINDER &&
+	     ic_task != IC_TASK_POST_PROCESSOR))
+		return ERR_PTR(-EINVAL);
+
+	/* verify the in/out images before continuing */
+	ret = ipu_image_convert_verify(in, out, rot_mode);
+	if (ret) {
+		dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
+			__func__);
+		return ERR_PTR(ret);
+	}
+
+	chan = &priv->chan[ic_task];
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
+		chan->ic_task, ctx);
+
+	ctx->chan = chan;
+	init_completion(&ctx->aborted);
+
+	s_image = &ctx->in;
+	d_image = &ctx->out;
+
+	/* set tiling and rotation */
+	d_image->num_rows = num_stripes(out->pix.height);
+	d_image->num_cols = num_stripes(out->pix.width);
+	if (ipu_rot_mode_is_irt(rot_mode)) {
+		s_image->num_rows = d_image->num_cols;
+		s_image->num_cols = d_image->num_rows;
+	} else {
+		s_image->num_rows = d_image->num_rows;
+		s_image->num_cols = d_image->num_cols;
+	}
+
+	ctx->num_tiles = d_image->num_cols * d_image->num_rows;
+	ctx->rot_mode = rot_mode;
+
+	ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
+	if (ret)
+		goto out_free;
+	ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
+	if (ret)
+		goto out_free;
+
+	calc_out_tile_map(ctx);
+
+	dump_format(ctx, s_image);
+	dump_format(ctx, d_image);
+
+	ctx->complete = complete;
+	ctx->complete_context = complete_context;
+
+	/*
+	 * Can we use double-buffering for this operation? If there is
+	 * only one tile (the whole image can be converted in a single
+	 * operation) there's no point in using double-buffering. Also,
+	 * the IPU's IDMAC channels allow only a single U and V plane
+	 * offset shared between both buffers, but these offsets change
+	 * for every tile, and therefore would have to be updated for
+	 * each buffer which is not possible. So double-buffering is
+	 * impossible when either the source or destination images are
+	 * a planar format (YUV420, YUV422P, etc.).
+	 */
+	ctx->double_buffering = (ctx->num_tiles > 1 &&
+				 !s_image->fmt->planar &&
+				 !d_image->fmt->planar);
+
+	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+		ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
+				    d_image->tile[0].size);
+		if (ret)
+			goto out_free;
+		if (ctx->double_buffering) {
+			ret = alloc_dma_buf(priv,
+					    &ctx->rot_intermediate[1],
+					    d_image->tile[0].size);
+			if (ret)
+				goto out_free_dmabuf0;
+		}
+	}
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	get_res = list_empty(&chan->ctx_list);
+
+	list_add_tail(&ctx->list, &chan->ctx_list);
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+
+	if (get_res) {
+		ret = get_ipu_resources(chan);
+		if (ret)
+			goto out_free_dmabuf1;
+	}
+
+	return ctx;
+
+out_free_dmabuf1:
+	free_dma_buf(priv, &ctx->rot_intermediate[1]);
+	spin_lock_irqsave(&chan->irqlock, flags);
+	list_del(&ctx->list);
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+out_free_dmabuf0:
+	free_dma_buf(priv, &ctx->rot_intermediate[0]);
+out_free:
+	kfree(ctx);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
+
+/*
+ * Carry out a single image conversion run. Only the physaddr's of the input
+ * and output image buffers are needed. The conversion context must have
+ * been created previously with ipu_image_convert_prepare().
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run)
+{
+	struct ipu_image_convert_chan *chan;
+	struct ipu_image_convert_priv *priv;
+	struct ipu_image_convert_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!run || !run->ctx || !run->in_phys || !run->out_phys)
+		return -EINVAL;
+
+	ctx = run->ctx;
+	chan = ctx->chan;
+	priv = chan->priv;
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
+		chan->ic_task, ctx, run);
+
+	INIT_LIST_HEAD(&run->list);
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	if (ctx->aborting) {
+		ret = -EIO;
+		goto unlock;
+	}
+
+	list_add_tail(&run->list, &chan->pending_q);
+
+	if (!chan->current_run) {
+		ret = do_run(run);
+		if (ret)
+			chan->current_run = NULL;
+	}
+unlock:
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+
+/* Abort any active or pending conversions for this context */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	struct ipu_image_convert_run *run, *active_run, *tmp;
+	unsigned long flags;
+	int run_count, ret;
+	bool need_abort;
+
+	reinit_completion(&ctx->aborted);
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	/* move all remaining pending runs in this context to done_q */
+	list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+		if (run->ctx != ctx)
+			continue;
+		run->status = -EIO;
+		list_move_tail(&run->list, &chan->done_q);
+	}
+
+	run_count = get_run_count(ctx, &chan->done_q);
+	active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
+		chan->current_run : NULL;
+
+	need_abort = (run_count || active_run);
+
+	ctx->aborting = need_abort;
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+
+	if (!need_abort) {
+		dev_dbg(priv->ipu->dev,
+			"%s: task %u: no abort needed for ctx %p\n",
+			__func__, chan->ic_task, ctx);
+		return;
+	}
+
+	dev_dbg(priv->ipu->dev,
+		"%s: task %u: wait for completion: %d runs, active run %p\n",
+		__func__, chan->ic_task, run_count, active_run);
+
+	ret = wait_for_completion_timeout(&ctx->aborted,
+					  msecs_to_jiffies(10000));
+	if (ret == 0) {
+		dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+		force_abort(ctx);
+	}
+
+	ctx->aborting = false;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+
+/* Unprepare image conversion context */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+{
+	struct ipu_image_convert_chan *chan = ctx->chan;
+	struct ipu_image_convert_priv *priv = chan->priv;
+	unsigned long flags;
+	bool put_res;
+
+	/* make sure no runs are hanging around */
+	ipu_image_convert_abort(ctx);
+
+	dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+		chan->ic_task, ctx);
+
+	spin_lock_irqsave(&chan->irqlock, flags);
+
+	list_del(&ctx->list);
+
+	put_res = list_empty(&chan->ctx_list);
+
+	spin_unlock_irqrestore(&chan->irqlock, flags);
+
+	if (put_res)
+		release_ipu_resources(chan);
+
+	free_dma_buf(priv, &ctx->rot_intermediate[1]);
+	free_dma_buf(priv, &ctx->rot_intermediate[0]);
+
+	kfree(ctx);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
+
+/*
+ * "Canned" asynchronous single image conversion. Allocates and returns
+ * a new conversion run.  On successful return the caller must free the
+ * run and call ipu_image_convert_unprepare() after conversion completes.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+		  struct ipu_image *in, struct ipu_image *out,
+		  enum ipu_rotate_mode rot_mode,
+		  ipu_image_convert_cb_t complete,
+		  void *complete_context)
+{
+	struct ipu_image_convert_ctx *ctx;
+	struct ipu_image_convert_run *run;
+	int ret;
+
+	ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
+					complete, complete_context);
+	if (IS_ERR(ctx))
+		return ERR_PTR(PTR_ERR(ctx));
+
+	run = kzalloc(sizeof(*run), GFP_KERNEL);
+	if (!run) {
+		ipu_image_convert_unprepare(ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	run->ctx = ctx;
+	run->in_phys = in->phys0;
+	run->out_phys = out->phys0;
+
+	ret = ipu_image_convert_queue(run);
+	if (ret) {
+		ipu_image_convert_unprepare(ctx);
+		kfree(run);
+		return ERR_PTR(ret);
+	}
+
+	return run;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert);
+
+/* "Canned" synchronous single image conversion */
+static void image_convert_sync_complete(struct ipu_image_convert_run *run,
+					void *data)
+{
+	struct completion *comp = data;
+
+	complete(comp);
+}
+
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+			   struct ipu_image *in, struct ipu_image *out,
+			   enum ipu_rotate_mode rot_mode)
+{
+	struct ipu_image_convert_run *run;
+	struct completion comp;
+	int ret;
+
+	init_completion(&comp);
+
+	run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
+				image_convert_sync_complete, &comp);
+	if (IS_ERR(run))
+		return PTR_ERR(run);
+
+	ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
+	ret = (ret == 0) ? -ETIMEDOUT : 0;
+
+	ipu_image_convert_unprepare(run->ctx);
+	kfree(run);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+{
+	struct ipu_image_convert_priv *priv;
+	int i;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	ipu->image_convert_priv = priv;
+	priv->ipu = ipu;
+
+	for (i = 0; i < IC_NUM_TASKS; i++) {
+		struct ipu_image_convert_chan *chan = &priv->chan[i];
+
+		chan->ic_task = i;
+		chan->priv = priv;
+		chan->dma_ch = &image_convert_dma_chan[i];
+		chan->out_eof_irq = -1;
+		chan->rot_out_eof_irq = -1;
+
+		spin_lock_init(&chan->irqlock);
+		INIT_LIST_HEAD(&chan->ctx_list);
+		INIT_LIST_HEAD(&chan->pending_q);
+		INIT_LIST_HEAD(&chan->done_q);
+	}
+
+	return 0;
+}
+
+void ipu_image_convert_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index bfb1e8a..22e47b6 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -75,6 +75,33 @@
 #define IPU_INT_CTRL(n)		IPU_CM_REG(0x003C + 4 * (n))
 #define IPU_INT_STAT(n)		IPU_CM_REG(0x0200 + 4 * (n))
 
+/* FS_PROC_FLOW1 */
+#define FS_PRPENC_ROT_SRC_SEL_MASK	(0xf << 0)
+#define FS_PRPENC_ROT_SRC_SEL_ENC		(0x7 << 0)
+#define FS_PRPVF_ROT_SRC_SEL_MASK	(0xf << 8)
+#define FS_PRPVF_ROT_SRC_SEL_VF			(0x8 << 8)
+#define FS_PP_SRC_SEL_MASK		(0xf << 12)
+#define FS_PP_ROT_SRC_SEL_MASK		(0xf << 16)
+#define FS_PP_ROT_SRC_SEL_PP			(0x5 << 16)
+#define FS_VDI1_SRC_SEL_MASK		(0x3 << 20)
+#define FS_VDI3_SRC_SEL_MASK		(0x3 << 20)
+#define FS_PRP_SRC_SEL_MASK		(0xf << 24)
+#define FS_VDI_SRC_SEL_MASK		(0x3 << 28)
+#define FS_VDI_SRC_SEL_CSI_DIRECT		(0x1 << 28)
+#define FS_VDI_SRC_SEL_VDOA			(0x2 << 28)
+
+/* FS_PROC_FLOW2 */
+#define FS_PRP_ENC_DEST_SEL_MASK	(0xf << 0)
+#define FS_PRP_ENC_DEST_SEL_IRT_ENC		(0x1 << 0)
+#define FS_PRPVF_DEST_SEL_MASK		(0xf << 4)
+#define FS_PRPVF_DEST_SEL_IRT_VF		(0x1 << 4)
+#define FS_PRPVF_ROT_DEST_SEL_MASK	(0xf << 8)
+#define FS_PP_DEST_SEL_MASK		(0xf << 12)
+#define FS_PP_DEST_SEL_IRT_PP			(0x3 << 12)
+#define FS_PP_ROT_DEST_SEL_MASK		(0xf << 16)
+#define FS_PRPENC_ROT_DEST_SEL_MASK	(0xf << 20)
+#define FS_PRP_DEST_SEL_MASK		(0xf << 24)
+
 #define IPU_DI0_COUNTER_RELEASE			(1 << 24)
 #define IPU_DI1_COUNTER_RELEASE			(1 << 25)
 
@@ -138,6 +165,8 @@
 struct ipu_dmfc_priv;
 struct ipu_di;
 struct ipu_ic_priv;
+struct ipu_vdi;
+struct ipu_image_convert_priv;
 struct ipu_smfc_priv;
 
 struct ipu_devtype;
@@ -152,6 +181,7 @@
 	void __iomem		*cm_reg;
 	void __iomem		*idmac_reg;
 
+	int			id;
 	int			usecount;
 
 	struct clk		*clk;
@@ -169,6 +199,8 @@
 	struct ipu_di		*di_priv[2];
 	struct ipu_csi		*csi_priv[2];
 	struct ipu_ic_priv	*ic_priv;
+	struct ipu_vdi          *vdi_priv;
+	struct ipu_image_convert_priv *image_convert_priv;
 	struct ipu_smfc_priv	*smfc_priv;
 };
 
@@ -199,6 +231,13 @@
 		unsigned long base, unsigned long tpmem_base);
 void ipu_ic_exit(struct ipu_soc *ipu);
 
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+		 unsigned long base, u32 module);
+void ipu_vdi_exit(struct ipu_soc *ipu);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
+void ipu_image_convert_exit(struct ipu_soc *ipu);
+
 int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
 		unsigned long base, u32 module, struct clk *ipu_clk);
 void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
new file mode 100644
index 0000000..f27bf5a
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-vdi.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <linux/io.h>
+#include "ipu-prv.h"
+
+struct ipu_vdi {
+	void __iomem *base;
+	u32 module;
+	spinlock_t lock;
+	int use_count;
+	struct ipu_soc *ipu;
+};
+
+
+/* VDI Register Offsets */
+#define VDI_FSIZE 0x0000
+#define VDI_C     0x0004
+
+/* VDI Register Fields */
+#define VDI_C_CH_420             (0 << 1)
+#define VDI_C_CH_422             (1 << 1)
+#define VDI_C_MOT_SEL_MASK       (0x3 << 2)
+#define VDI_C_MOT_SEL_FULL       (2 << 2)
+#define VDI_C_MOT_SEL_LOW        (1 << 2)
+#define VDI_C_MOT_SEL_MED        (0 << 2)
+#define VDI_C_BURST_SIZE1_4      (3 << 4)
+#define VDI_C_BURST_SIZE2_4      (3 << 8)
+#define VDI_C_BURST_SIZE3_4      (3 << 12)
+#define VDI_C_BURST_SIZE_MASK    0xF
+#define VDI_C_BURST_SIZE1_OFFSET 4
+#define VDI_C_BURST_SIZE2_OFFSET 8
+#define VDI_C_BURST_SIZE3_OFFSET 12
+#define VDI_C_VWM1_SET_1         (0 << 16)
+#define VDI_C_VWM1_SET_2         (1 << 16)
+#define VDI_C_VWM1_CLR_2         (1 << 19)
+#define VDI_C_VWM3_SET_1         (0 << 22)
+#define VDI_C_VWM3_SET_2         (1 << 22)
+#define VDI_C_VWM3_CLR_2         (1 << 25)
+#define VDI_C_TOP_FIELD_MAN_1    (1 << 30)
+#define VDI_C_TOP_FIELD_AUTO_1   (1 << 31)
+
+static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
+{
+	return readl(vdi->base + offset);
+}
+
+static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
+				 unsigned int offset)
+{
+	writel(value, vdi->base + offset);
+}
+
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
+{
+	bool top_field_0 = false;
+	unsigned long flags;
+	u32 reg;
+
+	switch (field) {
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_SEQ_TB:
+	case V4L2_FIELD_TOP:
+		top_field_0 = true;
+		break;
+	case V4L2_FIELD_INTERLACED_BT:
+	case V4L2_FIELD_SEQ_BT:
+	case V4L2_FIELD_BOTTOM:
+		top_field_0 = false;
+		break;
+	default:
+		top_field_0 = (std & V4L2_STD_525_60) ? true : false;
+		break;
+	}
+
+	spin_lock_irqsave(&vdi->lock, flags);
+
+	reg = ipu_vdi_read(vdi, VDI_C);
+	if (top_field_0)
+		reg &= ~VDI_C_TOP_FIELD_MAN_1;
+	else
+		reg |= VDI_C_TOP_FIELD_MAN_1;
+	ipu_vdi_write(vdi, reg, VDI_C);
+
+	spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
+
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
+{
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&vdi->lock, flags);
+
+	reg = ipu_vdi_read(vdi, VDI_C);
+
+	reg &= ~VDI_C_MOT_SEL_MASK;
+
+	switch (motion_sel) {
+	case MED_MOTION:
+		reg |= VDI_C_MOT_SEL_MED;
+		break;
+	case HIGH_MOTION:
+		reg |= VDI_C_MOT_SEL_FULL;
+		break;
+	default:
+		reg |= VDI_C_MOT_SEL_LOW;
+		break;
+	}
+
+	ipu_vdi_write(vdi, reg, VDI_C);
+
+	spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
+
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
+{
+	unsigned long flags;
+	u32 pixel_fmt, reg;
+
+	spin_lock_irqsave(&vdi->lock, flags);
+
+	reg = ((yres - 1) << 16) | (xres - 1);
+	ipu_vdi_write(vdi, reg, VDI_FSIZE);
+
+	/*
+	 * Full motion, only vertical filter is used.
+	 * Burst size is 4 accesses
+	 */
+	if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+	    code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+	    code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+	    code == MEDIA_BUS_FMT_YUYV8_1X16)
+		pixel_fmt = VDI_C_CH_422;
+	else
+		pixel_fmt = VDI_C_CH_420;
+
+	reg = ipu_vdi_read(vdi, VDI_C);
+	reg |= pixel_fmt;
+	reg |= VDI_C_BURST_SIZE2_4;
+	reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
+	reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
+	ipu_vdi_write(vdi, reg, VDI_C);
+
+	spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_setup);
+
+void ipu_vdi_unsetup(struct ipu_vdi *vdi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vdi->lock, flags);
+	ipu_vdi_write(vdi, 0, VDI_FSIZE);
+	ipu_vdi_write(vdi, 0, VDI_C);
+	spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
+
+int ipu_vdi_enable(struct ipu_vdi *vdi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vdi->lock, flags);
+
+	if (!vdi->use_count)
+		ipu_module_enable(vdi->ipu, vdi->module);
+
+	vdi->use_count++;
+
+	spin_unlock_irqrestore(&vdi->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_enable);
+
+int ipu_vdi_disable(struct ipu_vdi *vdi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vdi->lock, flags);
+
+	if (vdi->use_count) {
+		if (!--vdi->use_count)
+			ipu_module_disable(vdi->ipu, vdi->module);
+	}
+
+	spin_unlock_irqrestore(&vdi->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_disable);
+
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
+{
+	return ipu->vdi_priv;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_get);
+
+void ipu_vdi_put(struct ipu_vdi *vdi)
+{
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_put);
+
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+		 unsigned long base, u32 module)
+{
+	struct ipu_vdi *vdi;
+
+	vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
+	if (!vdi)
+		return -ENOMEM;
+
+	ipu->vdi_priv = vdi;
+
+	spin_lock_init(&vdi->lock);
+	vdi->module = module;
+	vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
+	if (!vdi->base)
+		return -ENOMEM;
+
+	dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
+	vdi->ipu = ipu;
+
+	return 0;
+}
+
+void ipu_vdi_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index f17cb04..1887f19 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -131,7 +131,24 @@
 	return NULL;
 }
 
-/* Returns the default VGA device (vgacon's babe) */
+/**
+ * vga_default_device - return the default VGA device, for vgacon
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
 struct pci_dev *vga_default_device(void)
 {
 	return vga_default;
@@ -356,6 +373,40 @@
 		wake_up_all(&vga_wait_queue);
 }
 
+/**
+ * vga_get - acquire & locks VGA resources
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given card and mark those
+ * resources locked. If the resource requested are "normal" (and not legacy)
+ * resources, the arbiter will first check whether the card is doing legacy
+ * decoding for that type of resource. If yes, the lock is "converted" into a
+ * legacy resource lock.
+ *
+ * The arbiter will first look for all VGA cards that might conflict and disable
+ * their IOs and/or Memory access, including VGA forwarding on P2P bridges if
+ * necessary, so that the requested resources can be used. Then, the card is
+ * marked as locking these resources and the IO and/or Memory accesses are
+ * enabled on the card (including VGA forwarding on parent P2P bridges if any).
+ *
+ * This function will block if some conflicting card is already locking one of
+ * the required resources (or any resource on a different bus segment, since P2P
+ * bridges don't differentiate VGA memory and IO afaik). You can indicate
+ * whether this blocking should be interruptible by a signal (for userland
+ * interface) or not.
+ *
+ * Must not be called at interrupt time or in atomic context.  If the card
+ * already owns the resources, the function succeeds.  Nested calls are
+ * supported (a per-resource counter is maintained)
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 {
 	struct vga_device *vgadev, *conflict;
@@ -408,6 +459,21 @@
 }
 EXPORT_SYMBOL(vga_get);
 
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but will return an
+ * error (-EBUSY) instead of blocking if the resources are already locked by
+ * another card. It can be called in any context
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
 int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
 {
 	struct vga_device *vgadev;
@@ -435,6 +501,16 @@
 }
 EXPORT_SYMBOL(vga_tryget);
 
+/**
+ * vga_put - release lock on legacy VGA resources
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This fuction releases resources previously locked by vga_get() or
+ * vga_tryget(). The resources aren't disabled right away, so that a subsequence
+ * vga_get() on the same card will succeed immediately. Resources have a
+ * counter, so locks are only released if the counter reaches 0.
+ */
 void vga_put(struct pci_dev *pdev, unsigned int rsrc)
 {
 	struct vga_device *vgadev;
@@ -716,7 +792,37 @@
 }
 EXPORT_SYMBOL(vga_set_legacy_decoding);
 
-/* call with NULL to unregister */
+/**
+ * vga_client_register - register or unregister a VGA arbitration client
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * Clients have two callback mechanisms they can use.
+ *
+ * @irq_set_state callback: If a client can't disable its GPUs VGA
+ * resources, then we need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ *
+ * @set_vga_decode callback: If a client can disable its GPU VGA resource, it
+ * will get a callback from this to set the encode/decode state.
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally some single
+ * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
+ * control things like backlights etc.  Hopefully newer multi-GPU laptops do
+ * something saner, and desktops won't have any special ACPI for this. The
+ * driver will get a callback when VGA arbitration is first used by userspace
+ * since some older X servers have issues.
+ *
+ * This function does not check whether a client for @pdev has been registered
+ * already.
+ *
+ * To unregister just call this function with @irq_set_state and @set_vga_decode
+ * both set to NULL for the same @pdev as originally used to register them.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
 			unsigned int (*set_vga_decode)(void *cookie,
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf7..818ea7d 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@
 	0xC0                /*  End Collection                  */
 };
 
-static __u8 pid0006_rdesc_fixed[] = {
-	0x05, 0x01,        /* Usage Page (Generic Desktop)	*/
-	0x09, 0x04,        /* Usage (Joystick)			*/
-	0xA1, 0x01,        /* Collection (Application)		*/
-	0xA1, 0x02,        /*   Collection (Logical)		*/
-	0x75, 0x08,        /*     Report Size (8)		*/
-	0x95, 0x05,        /*     Report Count (5)		*/
-	0x15, 0x00,        /*     Logical Minimum (0)		*/
-	0x26, 0xFF, 0x00,  /*     Logical Maximum (255)		*/
-	0x35, 0x00,        /*     Physical Minimum (0)		*/
-	0x46, 0xFF, 0x00,  /*     Physical Maximum (255)	*/
-	0x09, 0x30,        /*     Usage (X)			*/
-	0x09, 0x33,        /*     Usage (Ry)			*/
-	0x09, 0x32,        /*     Usage (Z)			*/
-	0x09, 0x31,        /*     Usage (Y)			*/
-	0x09, 0x34,        /*     Usage (Ry)			*/
-	0x81, 0x02,        /*     Input (Variable)		*/
-	0x75, 0x04,        /*     Report Size (4)		*/
-	0x95, 0x01,        /*     Report Count (1)		*/
-	0x25, 0x07,        /*     Logical Maximum (7)		*/
-	0x46, 0x3B, 0x01,  /*     Physical Maximum (315)	*/
-	0x65, 0x14,        /*     Unit (Centimeter)		*/
-	0x09, 0x39,        /*     Usage (Hat switch)		*/
-	0x81, 0x42,        /*     Input (Variable)		*/
-	0x65, 0x00,        /*     Unit (None)			*/
-	0x75, 0x01,        /*     Report Size (1)		*/
-	0x95, 0x0C,        /*     Report Count (12)		*/
-	0x25, 0x01,        /*     Logical Maximum (1)		*/
-	0x45, 0x01,        /*     Physical Maximum (1)		*/
-	0x05, 0x09,        /*     Usage Page (Button)		*/
-	0x19, 0x01,        /*     Usage Minimum (0x01)		*/
-	0x29, 0x0C,        /*     Usage Maximum (0x0C)		*/
-	0x81, 0x02,        /*     Input (Variable)		*/
-	0x06, 0x00, 0xFF,  /*     Usage Page (Vendor Defined)	*/
-	0x75, 0x01,        /*     Report Size (1)		*/
-	0x95, 0x08,        /*     Report Count (8)		*/
-	0x25, 0x01,        /*     Logical Maximum (1)		*/
-	0x45, 0x01,        /*     Physical Maximum (1)		*/
-	0x09, 0x01,        /*     Usage (0x01)			*/
-	0x81, 0x02,        /*     Input (Variable)		*/
-	0xC0,              /*   End Collection			*/
-	0xA1, 0x02,        /*   Collection (Logical)		*/
-	0x75, 0x08,        /*     Report Size (8)		*/
-	0x95, 0x07,        /*     Report Count (7)		*/
-	0x46, 0xFF, 0x00,  /*     Physical Maximum (255)	*/
-	0x26, 0xFF, 0x00,  /*     Logical Maximum (255)		*/
-	0x09, 0x02,        /*     Usage (0x02)			*/
-	0x91, 0x02,        /*     Output (Variable)		*/
-	0xC0,              /*   End Collection			*/
-	0xC0               /* End Collection			*/
-};
-
 static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 				unsigned int *rsize)
 {
@@ -296,16 +244,34 @@
 			*rsize = sizeof(pid0011_rdesc_fixed);
 		}
 		break;
-	case 0x0006:
-		if (*rsize == sizeof(pid0006_rdesc_fixed)) {
-			rdesc = pid0006_rdesc_fixed;
-			*rsize = sizeof(pid0006_rdesc_fixed);
-		}
-		break;
 	}
 	return rdesc;
 }
 
+#define map_abs(c)      hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c)      hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+			    struct hid_field *field, struct hid_usage *usage,
+			    unsigned long **bit, int *max)
+{
+	switch (usage->hid) {
+	/*
+	 * revert to the old hid-input behavior where axes
+	 * can be randomly assigned when hid->usage is reused.
+	 */
+	case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+	case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+		if (field->flags & HID_MAIN_ITEM_RELATIVE)
+			map_rel(usage->hid & 0xf);
+		else
+			map_abs(usage->hid & 0xf);
+		return 1;
+	}
+
+	return 0;
+}
+
 static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
 	int ret;
@@ -352,6 +318,7 @@
 	.id_table = dr_devices,
 	.report_fixup = dr_report_fixup,
 	.probe = dr_probe,
+	.input_mapping = dr_input_mapping,
 };
 module_hid_driver(dr_driver);
 
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79..6cfb5ca 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
 #define USB_VENDOR_ID_AKAI		0x2011
 #define USB_DEVICE_ID_AKAI_MPKMINI2	0x0715
 
+#define USB_VENDOR_ID_AKAI_09E8		0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX	0x0031
+
 #define USB_VENDOR_ID_ALCOR		0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232	0x9720
 
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f3..d3e1ab1 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@
 	const struct hidled_config *config;
 	struct hid_device       *hdev;
 	struct hidled_rgb	*rgb;
+	u8			*buf;
 	struct mutex		lock;
 };
 
@@ -118,13 +119,19 @@
 
 	mutex_lock(&ldev->lock);
 
+	/*
+	 * buffer provided to hid_hw_raw_request must not be on the stack
+	 * and must not be part of a data structure
+	 */
+	memcpy(ldev->buf, buf, ldev->config->report_size);
+
 	if (ldev->config->report_type == RAW_REQUEST)
-		ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+		ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
 					 ldev->config->report_size,
 					 HID_FEATURE_REPORT,
 					 HID_REQ_SET_REPORT);
 	else if (ldev->config->report_type == OUTPUT_REPORT)
-		ret = hid_hw_output_report(ldev->hdev, buf,
+		ret = hid_hw_output_report(ldev->hdev, ldev->buf,
 					   ldev->config->report_size);
 	else
 		ret = -EINVAL;
@@ -147,17 +154,21 @@
 
 	mutex_lock(&ldev->lock);
 
-	ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+	memcpy(ldev->buf, buf, ldev->config->report_size);
+
+	ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
 				 ldev->config->report_size,
 				 HID_FEATURE_REPORT,
 				 HID_REQ_SET_REPORT);
 	if (ret < 0)
 		goto err;
 
-	ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+	ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
 				 ldev->config->report_size,
 				 HID_FEATURE_REPORT,
 				 HID_REQ_GET_REPORT);
+
+	memcpy(buf, ldev->buf, ldev->config->report_size);
 err:
 	mutex_unlock(&ldev->lock);
 
@@ -447,6 +458,10 @@
 	if (!ldev)
 		return -ENOMEM;
 
+	ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+	if (!ldev->buf)
+		return -ENOMEM;
+
 	ret = hid_parse(hdev);
 	if (ret)
 		return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5..354d49e 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@
 
 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 98fffa3..5ab6721 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1680,7 +1680,7 @@
 
 static void of_i2c_register_devices(struct i2c_adapter *adap)
 {
-	struct device_node *node;
+	struct device_node *bus, *node;
 
 	/* Only register child devices if the adapter has a node pointer set */
 	if (!adap->dev.of_node)
@@ -1688,11 +1688,17 @@
 
 	dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
 
-	for_each_available_child_of_node(adap->dev.of_node, node) {
+	bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus");
+	if (!bus)
+		bus = of_node_get(adap->dev.of_node);
+
+	for_each_available_child_of_node(bus, node) {
 		if (of_node_test_and_set_flag(node, OF_POPULATED))
 			continue;
 		of_i2c_register_device(adap, node);
 	}
+
+	of_node_put(bus);
 }
 
 static int of_dev_node_match(struct device *dev, void *data)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 19a418a..fb3fb89 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -89,4 +89,6 @@
 
 source "drivers/infiniband/hw/hfi1/Kconfig"
 
+source "drivers/infiniband/hw/qedr/Kconfig"
+
 endif # INFINIBAND
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746c..224ad27 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@
 	unsigned long dma_attrs = 0;
 	struct scatterlist *sg, *sg_list_start;
 	int need_release = 0;
+	unsigned int gup_flags = FOLL_WRITE;
 
 	if (dmasync)
 		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@
 	if (ret)
 		goto out;
 
+	if (!umem->writable)
+		gup_flags |= FOLL_FORCE;
+
 	need_release = 1;
 	sg_list_start = umem->sg_head.sgl;
 
@@ -190,7 +194,7 @@
 		ret = get_user_pages(cur_base,
 				     min_t(unsigned long, npages,
 					   PAGE_SIZE / sizeof (struct page *)),
-				     1, !umem->writable, page_list, vma_list);
+				     gup_flags, page_list, vma_list);
 
 		if (ret < 0)
 			goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a0..1f0fe32 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@
 	u64 off;
 	int j, k, ret = 0, start_idx, npages = 0;
 	u64 base_virt_addr;
+	unsigned int flags = 0;
 
 	if (access_mask == 0)
 		return -EINVAL;
@@ -556,6 +557,9 @@
 		goto out_put_task;
 	}
 
+	if (access_mask & ODP_WRITE_ALLOWED_BIT)
+		flags |= FOLL_WRITE;
+
 	start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
 	k = start_idx;
 
@@ -574,8 +578,7 @@
 		 */
 		npages = get_user_pages_remote(owning_process, owning_mm,
 				user_virt, gup_num_pages,
-				access_mask & ODP_WRITE_ALLOWED_BIT,
-				0, local_page_list, NULL);
+				flags, local_page_list, NULL);
 		up_read(&owning_mm->mmap_sem);
 
 		if (npages < 0)
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 21fe401..e7a5ed9 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_INFINIBAND_USNIC)		+= usnic/
 obj-$(CONFIG_INFINIBAND_HFI1)		+= hfi1/
 obj-$(CONFIG_INFINIBAND_HNS)		+= hns/
+obj-$(CONFIG_INFINIBAND_QEDR)		+= qedr/
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 875597b..0973659 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -83,8 +83,7 @@
 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 			     struct hns_roce_mtt *hr_mtt,
 			     struct hns_roce_uar *hr_uar,
-			     struct hns_roce_cq *hr_cq, int vector,
-			     int collapsed)
+			     struct hns_roce_cq *hr_cq, int vector)
 {
 	struct hns_roce_cmd_mailbox *mailbox = NULL;
 	struct hns_roce_cq_table *cq_table = NULL;
@@ -153,6 +152,9 @@
 	hr_cq->cons_index = 0;
 	hr_cq->uar = hr_uar;
 
+	atomic_set(&hr_cq->refcount, 1);
+	init_completion(&hr_cq->free);
+
 	return 0;
 
 err_radix:
@@ -192,6 +194,11 @@
 	/* Waiting interrupt process procedure carried out */
 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
 
+	/* wait for all interrupt processed */
+	if (atomic_dec_and_test(&hr_cq->refcount))
+		complete(&hr_cq->free);
+	wait_for_completion(&hr_cq->free);
+
 	spin_lock_irq(&cq_table->lock);
 	radix_tree_delete(&cq_table->tree, hr_cq->cqn);
 	spin_unlock_irq(&cq_table->lock);
@@ -300,10 +307,7 @@
 
 	cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
 	hr_cq->ib_cq.cqe = cq_entries - 1;
-	mutex_init(&hr_cq->resize_mutex);
 	spin_lock_init(&hr_cq->lock);
-	hr_cq->hr_resize_buf = NULL;
-	hr_cq->resize_umem = NULL;
 
 	if (context) {
 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -338,8 +342,8 @@
 	}
 
 	/* Allocate cq index, fill cq_context */
-	ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
-				uar, hr_cq, vector, 0);
+	ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+				hr_cq, vector);
 	if (ret) {
 		dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
 		goto err_mtt;
@@ -353,12 +357,15 @@
 	if (context) {
 		if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
 			ret = -EFAULT;
-			goto err_mtt;
+			goto err_cqc;
 		}
 	}
 
 	return &hr_cq->ib_cq;
 
+err_cqc:
+	hns_roce_free_cq(hr_dev, hr_cq);
+
 err_mtt:
 	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
 	if (context)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ea73580..3417315 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,7 +62,7 @@
 #define HNS_ROCE_AEQE_OF_VEC_NUM		1
 
 /* 4G/4K = 1M */
-#define HNS_ROCE_SL_SHIFT			29
+#define HNS_ROCE_SL_SHIFT			28
 #define HNS_ROCE_TCLASS_SHIFT			20
 #define HNS_ROCE_FLOW_LABLE_MASK		0xfffff
 
@@ -74,7 +74,9 @@
 #define MR_TYPE_DMA				0x03
 
 #define PKEY_ID					0xffff
+#define GUID_LEN				8
 #define NODE_DESC_SIZE				64
+#define DB_REG_OFFSET				0x1000
 
 #define SERV_TYPE_RC				0
 #define SERV_TYPE_RD				1
@@ -282,20 +284,11 @@
 	struct hns_roce_mtt hr_mtt;
 };
 
-struct hns_roce_cq_resize {
-	struct hns_roce_cq_buf	hr_buf;
-	int			cqe;
-};
-
 struct hns_roce_cq {
 	struct ib_cq			ib_cq;
 	struct hns_roce_cq_buf		hr_buf;
-	/* pointer to store information after resize*/
-	struct hns_roce_cq_resize	*hr_resize_buf;
 	spinlock_t			lock;
-	struct mutex			resize_mutex;
 	struct ib_umem			*umem;
-	struct ib_umem			*resize_umem;
 	void (*comp)(struct hns_roce_cq *);
 	void (*event)(struct hns_roce_cq *, enum hns_roce_event);
 
@@ -408,6 +401,7 @@
 	u32			buff_size;
 	struct mutex		mutex;
 	u8			port;
+	u8			phy_port;
 	u8			sl;
 	u8			resp_depth;
 	u8			state;
@@ -471,7 +465,6 @@
 	u32		max_rq_desc_sz;	/* 64 */
 	int		max_qp_init_rdma;
 	int		max_qp_dest_rdma;
-	int		sqp_start;
 	int		num_cqs;
 	int		max_cqes;
 	int		reserved_cqs;
@@ -512,6 +505,8 @@
 	void (*write_cqc)(struct hns_roce_dev *hr_dev,
 			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
 			  dma_addr_t dma_handle, int nent, u32 vector);
+	int (*clear_hem)(struct hns_roce_dev *hr_dev,
+			 struct hns_roce_hem_table *table, int obj);
 	int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
 	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -533,7 +528,6 @@
 	struct hns_roce_uar     priv_uar;
 	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
 	spinlock_t		sm_lock;
-	spinlock_t		cq_db_lock;
 	spinlock_t		bt_cmd_lock;
 	struct hns_roce_ib_iboe iboe;
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fe..21e21b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@
 {
 	struct device *dev = &hr_dev->pdev->dev;
 
-	qpn = roce_get_field(aeqe->event.qp_event.qp,
-			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
 	dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
 	switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
 			       HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@
 	default:
 		break;
 	}
-
-	hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
-					HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-					HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
-			  roce_get_field(aeqe->asyn,
-					HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-					HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
 }
 
 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@
 {
 	struct device *dev = &hr_dev->pdev->dev;
 
-	qpn = roce_get_field(aeqe->event.qp_event.qp,
-			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
 	dev_warn(dev, "Local Access Violation Work Queue Error.\n");
 	switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
 			       HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@
 	default:
 		break;
 	}
+}
 
-	hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
-					 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-					 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
-			  roce_get_field(aeqe->asyn,
-					 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-					 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+				   struct hns_roce_aeqe *aeqe,
+				   int event_type)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	int phy_port;
+	int qpn;
+
+	qpn = roce_get_field(aeqe->event.qp_event.qp,
+			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+	phy_port = roce_get_field(aeqe->event.qp_event.qp,
+			HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+			HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+	if (qpn <= 1)
+		qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+	switch (event_type) {
+	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+		dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+			      "QP %d, phy_port %d.\n", qpn, phy_port);
+		break;
+	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+		hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+		break;
+	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+		hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+		break;
+	default:
+		break;
+	}
+
+	hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+				   struct hns_roce_aeqe *aeqe,
+				   int event_type)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	u32 cqn;
+
+	cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+		    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+		    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+	switch (event_type) {
+	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+		dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+		break;
+	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+		dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+		break;
+	case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+		dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+		break;
+	default:
+		break;
+	}
+
+	hns_roce_cq_event(hr_dev, cqn, event_type);
 }
 
 static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
@@ -185,7 +228,7 @@
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_aeqe *aeqe;
 	int aeqes_found = 0;
-	int qpn = 0;
+	int event_type;
 
 	while ((aeqe = next_aeqe_sw(eq))) {
 		dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
@@ -195,9 +238,10 @@
 		/* Memory barrier */
 		rmb();
 
-		switch (roce_get_field(aeqe->asyn,
-			HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-			HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
+		event_type = roce_get_field(aeqe->asyn,
+				HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+				HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+		switch (event_type) {
 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
 			dev_warn(dev, "PATH MIG not supported\n");
 			break;
@@ -211,23 +255,9 @@
 			dev_warn(dev, "PATH MIG failed\n");
 			break;
 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
-			dev_warn(dev, "qpn = 0x%lx\n",
-			roce_get_field(aeqe->event.qp_event.qp,
-				       HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-				       HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
-			hns_roce_qp_event(hr_dev,
-				roce_get_field(aeqe->event.qp_event.qp,
-					HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
-					HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
-				roce_get_field(aeqe->asyn,
-					HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-					HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
-			break;
 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
-			hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
-			break;
 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
-			hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+			hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
 			break;
 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
@@ -235,40 +265,9 @@
 			dev_warn(dev, "SRQ not support!\n");
 			break;
 		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
-			dev_warn(dev, "CQ 0x%lx access err.\n",
-			roce_get_field(aeqe->event.cq_event.cq,
-				       HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-				       HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-			hns_roce_cq_event(hr_dev,
-			le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
-			roce_get_field(aeqe->asyn,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
-			break;
 		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
-			dev_warn(dev, "CQ 0x%lx overflow\n",
-			roce_get_field(aeqe->event.cq_event.cq,
-				       HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-				       HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-			hns_roce_cq_event(hr_dev,
-			le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
-			roce_get_field(aeqe->asyn,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
-			break;
 		case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
-			dev_warn(dev, "CQ ID invalid.\n");
-			hns_roce_cq_event(hr_dev,
-			le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-				    HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
-			roce_get_field(aeqe->asyn,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-				       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+			hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
 			break;
 		case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
 			dev_warn(dev, "port change.\n");
@@ -290,11 +289,8 @@
 				     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
 			break;
 		default:
-			dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
-				 roce_get_field(aeqe->asyn,
-					      HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
-					      HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
-				 eq->eqn, eq->cons_index);
+			dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
+				 event_type, eq->eqn, eq->cons_index);
 			break;
 		};
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
index fe43881..c6d212d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -107,6 +107,10 @@
 #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M   \
 	(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
 
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M   \
+	(((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
+
 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M   \
 	(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index d53d643..250d8f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,10 @@
 #include "hns_roce_hem.h"
 #include "hns_roce_common.h"
 
-#define HW_SYNC_TIMEOUT_MSECS		500
-#define HW_SYNC_SLEEP_TIME_INTERVAL	20
-
 #define HNS_ROCE_HEM_ALLOC_SIZE		(1 << 17)
 #define HNS_ROCE_TABLE_CHUNK_SIZE	(1 << 17)
 
 #define DMA_ADDR_T_SHIFT		12
-#define BT_CMD_SYNC_SHIFT		31
 #define BT_BA_SHIFT			32
 
 struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
@@ -213,74 +209,6 @@
 	return ret;
 }
 
-static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
-			      struct hns_roce_hem_table *table,
-			      unsigned long obj)
-{
-	struct device *dev = &hr_dev->pdev->dev;
-	unsigned long end = 0;
-	unsigned long flags;
-	void __iomem *bt_cmd;
-	uint32_t bt_cmd_val[2];
-	u32 bt_cmd_h_val = 0;
-	int ret = 0;
-
-	switch (table->type) {
-	case HEM_TYPE_QPC:
-		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
-			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
-		break;
-	case HEM_TYPE_MTPT:
-		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
-			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
-			       HEM_TYPE_MTPT);
-		break;
-	case HEM_TYPE_CQC:
-		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
-			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
-		break;
-	case HEM_TYPE_SRQC:
-		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
-			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
-			       HEM_TYPE_SRQC);
-		break;
-	default:
-		return ret;
-	}
-	roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
-		       ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
-	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
-	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-	roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
-		       ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
-
-	spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
-	bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
-	end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
-	while (1) {
-		if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
-			if (!(time_before(jiffies, end))) {
-				dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
-				spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
-						       flags);
-				return -EBUSY;
-			}
-		} else {
-			break;
-		}
-		msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
-	}
-
-	bt_cmd_val[0] = 0;
-	bt_cmd_val[1] = bt_cmd_h_val;
-	hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
-	spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
-	return ret;
-}
-
 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 		       struct hns_roce_hem_table *table, unsigned long obj)
 {
@@ -333,7 +261,7 @@
 
 	if (--table->hem[i]->refcount == 0) {
 		/* Clear HEM base address */
-		if (hns_roce_clear_hem(hr_dev, table, obj))
+		if (hr_dev->hw->clear_hem(hr_dev, table, obj))
 			dev_warn(dev, "Clear HEM base address failed.\n");
 
 		hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -456,7 +384,7 @@
 
 	for (i = 0; i < table->num_hem; ++i)
 		if (table->hem[i]) {
-			if (hns_roce_clear_hem(hr_dev, table,
+			if (hr_dev->hw->clear_hem(hr_dev, table,
 			    i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
 				dev_err(dev, "Clear HEM base address failed.\n");
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index ad66175..4357488 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -34,6 +34,10 @@
 #ifndef _HNS_ROCE_HEM_H
 #define _HNS_ROCE_HEM_H
 
+#define HW_SYNC_TIMEOUT_MSECS		500
+#define HW_SYNC_SLEEP_TIME_INTERVAL	20
+#define BT_CMD_SYNC_SHIFT		31
+
 enum {
 	/* MAP HEM(Hardware Entry Memory) */
 	HEM_TYPE_QPC = 0,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 399f5de..71232e5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -73,8 +73,14 @@
 	u32 ind = 0;
 	int ret = 0;
 
-	spin_lock_irqsave(&qp->sq.lock, flags);
+	if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
+		ibqp->qp_type != IB_QPT_RC)) {
+		dev_err(dev, "un-supported QP type\n");
+		*bad_wr = NULL;
+		return -EOPNOTSUPP;
+	}
 
+	spin_lock_irqsave(&qp->sq.lock, flags);
 	ind = qp->sq_next_wqe;
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -162,7 +168,7 @@
 			roce_set_field(ud_sq_wqe->u32_36,
 				       UD_SEND_WQE_U32_36_SGID_INDEX_M,
 				       UD_SEND_WQE_U32_36_SGID_INDEX_S,
-				       hns_get_gid_index(hr_dev, qp->port,
+				       hns_get_gid_index(hr_dev, qp->phy_port,
 							 ah->av.gid_index));
 
 			roce_set_field(ud_sq_wqe->u32_40,
@@ -205,8 +211,7 @@
 				      (wr->send_flags & IB_SEND_FENCE ?
 				      (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
 
-			wqe = (struct hns_roce_wqe_ctrl_seg *)wqe +
-			       sizeof(struct hns_roce_wqe_ctrl_seg);
+			wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
 
 			switch (wr->opcode) {
 			case IB_WR_RDMA_READ:
@@ -235,8 +240,7 @@
 				break;
 			}
 			ctrl->flag |= cpu_to_le32(ps_opcode);
-			wqe = (struct hns_roce_wqe_raddr_seg *)wqe +
-			       sizeof(struct hns_roce_wqe_raddr_seg);
+			wqe += sizeof(struct hns_roce_wqe_raddr_seg);
 
 			dseg = wqe;
 			if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
@@ -253,8 +257,7 @@
 					memcpy(wqe, ((void *) (uintptr_t)
 					       wr->sg_list[i].addr),
 					       wr->sg_list[i].length);
-					wqe = (struct hns_roce_wqe_raddr_seg *)
-					       wqe + wr->sg_list[i].length;
+					wqe += wr->sg_list[i].length;
 				}
 				ctrl->flag |= HNS_ROCE_WQE_INLINE;
 			} else {
@@ -266,9 +269,6 @@
 					      HNS_ROCE_WQE_SGE_NUM_BIT);
 			}
 			ind++;
-		} else {
-			dev_dbg(dev, "unSupported QP type\n");
-			break;
 		}
 	}
 
@@ -285,7 +285,7 @@
 			       SQ_DOORBELL_U32_4_SQ_HEAD_S,
 			      (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
 		roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
-			       SQ_DOORBELL_U32_4_PORT_S, qp->port);
+			       SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
 		roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
 			       SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
 		roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
@@ -365,14 +365,14 @@
 			/* SW update GSI rq header */
 			reg_val = roce_read(to_hr_dev(ibqp->device),
 					    ROCEE_QP1C_CFG3_0_REG +
-					    QP1C_CFGN_OFFSET * hr_qp->port);
+					    QP1C_CFGN_OFFSET * hr_qp->phy_port);
 			roce_set_field(reg_val,
 				       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
 				       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
 				       hr_qp->rq.head);
 			roce_write(to_hr_dev(ibqp->device),
 				   ROCEE_QP1C_CFG3_0_REG +
-				   QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+				   QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
 		} else {
 			rq_db.u32_4 = 0;
 			rq_db.u32_8 = 0;
@@ -789,6 +789,66 @@
 	}
 }
 
+static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_v1_priv *priv;
+	int ret;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+	priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
+		HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
+		GFP_KERNEL);
+	if (!priv->bt_table.qpc_buf.buf)
+		return -ENOMEM;
+
+	priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
+		HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
+		GFP_KERNEL);
+	if (!priv->bt_table.mtpt_buf.buf) {
+		ret = -ENOMEM;
+		goto err_failed_alloc_mtpt_buf;
+	}
+
+	priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
+		HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
+		GFP_KERNEL);
+	if (!priv->bt_table.cqc_buf.buf) {
+		ret = -ENOMEM;
+		goto err_failed_alloc_cqc_buf;
+	}
+
+	return 0;
+
+err_failed_alloc_cqc_buf:
+	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+		priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+err_failed_alloc_mtpt_buf:
+	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+		priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+
+	return ret;
+}
+
+static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_v1_priv *priv;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+		priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
+
+	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+		priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+		priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+}
+
 /**
  * hns_roce_v1_reset - reset RoCE
  * @hr_dev: RoCE device struct pointer
@@ -879,7 +939,6 @@
 	caps->mtt_entry_sz	= HNS_ROCE_V1_MTT_ENTRY_SIZE;
 	caps->cq_entry_sz	= HNS_ROCE_V1_CQE_ENTRY_SIZE;
 	caps->page_size_cap	= HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
-	caps->sqp_start		= 0;
 	caps->reserved_lkey	= 0;
 	caps->reserved_pds	= 0;
 	caps->reserved_mrws	= 1;
@@ -944,8 +1003,18 @@
 
 	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
 
+	ret = hns_roce_bt_init(hr_dev);
+	if (ret) {
+		dev_err(dev, "bt init failed!\n");
+		goto error_failed_bt_init;
+	}
+
 	return 0;
 
+error_failed_bt_init:
+	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+	hns_roce_raq_free(hr_dev);
+
 error_failed_raq_init:
 	hns_roce_db_free(hr_dev);
 	return ret;
@@ -953,6 +1022,7 @@
 
 void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
 {
+	hns_roce_bt_free(hr_dev);
 	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
 	hns_roce_raq_free(hr_dev);
 	hns_roce_db_free(hr_dev);
@@ -1192,9 +1262,7 @@
 	return get_sw_cqe(hr_cq, hr_cq->cons_index);
 }
 
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index,
-			   spinlock_t *doorbell_lock)
-
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
 {
 	u32 doorbell[2];
 
@@ -1254,8 +1322,7 @@
 		*/
 		wmb();
 
-		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
-				   &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
+		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
 	}
 }
 
@@ -1485,7 +1552,8 @@
 		/* SQ conrespond to CQE */
 		sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
 						CQE_BYTE_4_WQE_INDEX_M,
-						CQE_BYTE_4_WQE_INDEX_S));
+						CQE_BYTE_4_WQE_INDEX_S)&
+						((*cur_qp)->sq.wqe_cnt-1));
 		switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
 		case HNS_ROCE_WQE_OPCODE_SEND:
 			wc->opcode = IB_WC_SEND;
@@ -1591,10 +1659,8 @@
 			break;
 	}
 
-	if (npolled) {
-		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
-				      &to_hr_dev(ibcq->device)->cq_db_lock);
-	}
+	if (npolled)
+		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
 
 	spin_unlock_irqrestore(&hr_cq->lock, flags);
 
@@ -1604,6 +1670,74 @@
 		return ret;
 }
 
+int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+		struct hns_roce_hem_table *table, int obj)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_v1_priv *priv;
+	unsigned long end = 0, flags = 0;
+	uint32_t bt_cmd_val[2] = {0};
+	void __iomem *bt_cmd;
+	u64 bt_ba = 0;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+	switch (table->type) {
+	case HEM_TYPE_QPC:
+		roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+			ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+		bt_ba = priv->bt_table.qpc_buf.map >> 12;
+		break;
+	case HEM_TYPE_MTPT:
+		roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+			ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
+		bt_ba = priv->bt_table.mtpt_buf.map >> 12;
+		break;
+	case HEM_TYPE_CQC:
+		roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+			ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+		bt_ba = priv->bt_table.cqc_buf.map >> 12;
+		break;
+	case HEM_TYPE_SRQC:
+		dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
+		return -EINVAL;
+	default:
+		return 0;
+	}
+	roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+		ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+	roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+	roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+	spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+	bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+	end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+	while (1) {
+		if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+			if (!(time_before(jiffies, end))) {
+				dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+				spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+					flags);
+				return -EBUSY;
+			}
+		} else {
+			break;
+		}
+		msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+	}
+
+	bt_cmd_val[0] = (uint32_t)bt_ba;
+	roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+		ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
+	hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+
+	spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+	return 0;
+}
+
 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
 				 struct hns_roce_mtt *mtt,
 				 enum hns_roce_qp_state cur_state,
@@ -1733,13 +1867,10 @@
 		roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
 			       QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
 		roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
-			       QP1C_BYTES_16_PORT_NUM_S, hr_qp->port);
+			       QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
 		roce_set_bit(context->qp1c_bytes_16,
 			     QP1C_BYTES_16_SIGNALING_TYPE_S,
 			     hr_qp->sq_signal_bits);
-		roce_set_bit(context->qp1c_bytes_16,
-			     QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
-			     hr_qp->sq_signal_bits);
 		roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
 			     1);
 		roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -1784,7 +1915,7 @@
 
 		/* Copy context to QP1C register */
 		addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
-			hr_qp->port * sizeof(*context));
+			hr_qp->phy_port * sizeof(*context));
 
 		writel(context->qp1c_bytes_4, addr);
 		writel(context->sq_rq_bt_l, addr + 1);
@@ -1795,15 +1926,16 @@
 		writel(context->qp1c_bytes_28, addr + 6);
 		writel(context->qp1c_bytes_32, addr + 7);
 		writel(context->cur_sq_wqe_ba_l, addr + 8);
+		writel(context->qp1c_bytes_40, addr + 9);
 	}
 
 	/* Modify QP1C status */
 	reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
-			    hr_qp->port * sizeof(*context));
+			    hr_qp->phy_port * sizeof(*context));
 	roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
 		       ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
 	roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
-		    hr_qp->port * sizeof(*context), reg_val);
+		    hr_qp->phy_port * sizeof(*context), reg_val);
 
 	hr_qp->state = new_state;
 	if (new_state == IB_QPS_RESET) {
@@ -1836,12 +1968,10 @@
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_qp_context *context;
-	struct hns_roce_rq_db rq_db;
 	dma_addr_t dma_handle_2 = 0;
 	dma_addr_t dma_handle = 0;
 	uint32_t doorbell[2] = {0};
 	int rq_pa_start = 0;
-	u32 reg_val = 0;
 	u64 *mtts_2 = NULL;
 	int ret = -EINVAL;
 	u64 *mtts = NULL;
@@ -2119,7 +2249,8 @@
 
 		roce_set_field(context->qpc_bytes_68,
 			       QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
-			       QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0);
+			       QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
+			       hr_qp->rq.head);
 		roce_set_field(context->qpc_bytes_68,
 			       QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
 			       QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
@@ -2186,7 +2317,7 @@
 		roce_set_field(context->qpc_bytes_156,
 			       QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
 			       QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
-			       hr_qp->port);
+			       hr_qp->phy_port);
 		roce_set_field(context->qpc_bytes_156,
 			       QP_CONTEXT_QPC_BYTES_156_SL_M,
 			       QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2257,20 +2388,17 @@
 		roce_set_bit(context->qpc_bytes_140,
 			     QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
 
-		roce_set_field(context->qpc_bytes_144,
-			       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
-			       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
-			       attr->qp_state);
-
 		roce_set_field(context->qpc_bytes_148,
 			       QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
 			       QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
 		roce_set_field(context->qpc_bytes_148,
 			       QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
-			       QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0);
+			       QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
+			       attr->retry_cnt);
 		roce_set_field(context->qpc_bytes_148,
 			       QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
-			       QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0);
+			       QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
+			       attr->rnr_retry);
 		roce_set_field(context->qpc_bytes_148,
 			       QP_CONTEXT_QPC_BYTES_148_LSN_M,
 			       QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
@@ -2281,10 +2409,19 @@
 			       QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
 			       QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
 			       attr->retry_cnt);
-		roce_set_field(context->qpc_bytes_156,
-			       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
-			       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
-			       attr->timeout);
+		if (attr->timeout < 0x12) {
+			dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
+				 attr->timeout);
+			roce_set_field(context->qpc_bytes_156,
+				       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+				       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+				       0x12);
+		} else {
+			roce_set_field(context->qpc_bytes_156,
+				       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+				       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+				       attr->timeout);
+		}
 		roce_set_field(context->qpc_bytes_156,
 			       QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
 			       QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
@@ -2292,7 +2429,7 @@
 		roce_set_field(context->qpc_bytes_156,
 			       QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
 			       QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
-			       hr_qp->port);
+			       hr_qp->phy_port);
 		roce_set_field(context->qpc_bytes_156,
 			       QP_CONTEXT_QPC_BYTES_156_SL_M,
 			       QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2357,21 +2494,15 @@
 			       QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
 			       QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
 			       0);
-	} else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+	} else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
 		   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
 		   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
 		   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
 		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
 		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
 		   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
-		   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
-		roce_set_field(context->qpc_bytes_144,
-			       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
-			       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
-			       attr->qp_state);
-
-	} else {
-		dev_err(dev, "not support this modify\n");
+		   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
+		dev_err(dev, "not support this status migration\n");
 		goto out;
 	}
 
@@ -2397,43 +2528,32 @@
 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
 		/* Memory barrier */
 		wmb();
-		if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
-			/* SW update GSI rq header */
-			reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
-					    QP1C_CFGN_OFFSET * hr_qp->port);
-			roce_set_field(reg_val,
-				       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
-				       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
-				       hr_qp->rq.head);
-			roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
-				    QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
-		} else {
-			rq_db.u32_4 = 0;
-			rq_db.u32_8 = 0;
 
-			roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
-				       RQ_DOORBELL_U32_4_RQ_HEAD_S,
-				       hr_qp->rq.head);
-			roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
-				       RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
-			roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
-				       RQ_DOORBELL_U32_8_CMD_S, 1);
-			roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
-				     1);
+		roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
+			       RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
+		roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
+			       RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+		roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
+			       RQ_DOORBELL_U32_8_CMD_S, 1);
+		roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
 
-			doorbell[0] = rq_db.u32_4;
-			doorbell[1] = rq_db.u32_8;
-
-			hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+		if (ibqp->uobject) {
+			hr_qp->rq.db_reg_l = hr_dev->reg_base +
+				     ROCEE_DB_OTHERS_L_0_REG +
+				     DB_REG_OFFSET * hr_dev->priv_uar.index;
 		}
+
+		hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
 	}
 
 	hr_qp->state = new_state;
 
 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
 		hr_qp->resp_depth = attr->max_dest_rd_atomic;
-	if (attr_mask & IB_QP_PORT)
-		hr_qp->port = (attr->port_num - 1);
+	if (attr_mask & IB_QP_PORT) {
+		hr_qp->port = attr->port_num - 1;
+		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+	}
 
 	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
 		hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -2789,6 +2909,7 @@
 	.set_mtu = hns_roce_v1_set_mtu,
 	.write_mtpt = hns_roce_v1_write_mtpt,
 	.write_cqc = hns_roce_v1_write_cqc,
+	.clear_hem = hns_roce_v1_clear_hem,
 	.modify_qp = hns_roce_v1_modify_qp,
 	.query_qp = hns_roce_v1_query_qp,
 	.destroy_qp = hns_roce_v1_destroy_qp,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 316b592..539b0a3b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -102,6 +102,8 @@
 #define HNS_ROCE_V1_EXT_ODB_ALFUL	\
 	(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
 
+#define HNS_ROCE_BT_RSV_BUF_SIZE			(1 << 17)
+
 #define HNS_ROCE_ODB_POLL_MODE				0
 
 #define HNS_ROCE_SDB_NORMAL_MODE			0
@@ -971,9 +973,16 @@
 	struct hns_roce_ext_db *ext_db;
 };
 
+struct hns_roce_bt_table {
+	struct hns_roce_buf_list qpc_buf;
+	struct hns_roce_buf_list mtpt_buf;
+	struct hns_roce_buf_list cqc_buf;
+};
+
 struct hns_roce_v1_priv {
 	struct hns_roce_db_table  db_table;
 	struct hns_roce_raq_table raq_table;
+	struct hns_roce_bt_table  bt_table;
 };
 
 int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index f64f0dd..764e35a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -355,8 +355,7 @@
 	props->max_qp = hr_dev->caps.num_qps;
 	props->max_qp_wr = hr_dev->caps.max_wqes;
 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
-				  IB_DEVICE_RC_RNR_NAK_GEN |
-				  IB_DEVICE_LOCAL_DMA_LKEY;
+				  IB_DEVICE_RC_RNR_NAK_GEN;
 	props->max_sge = hr_dev->caps.max_sq_sg;
 	props->max_sge_rd = 1;
 	props->max_cq = hr_dev->caps.num_cqs;
@@ -372,6 +371,25 @@
 	return 0;
 }
 
+static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
+					      u8 port_num)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+	struct net_device *ndev;
+
+	if (port_num < 1 || port_num > hr_dev->caps.num_ports)
+		return NULL;
+
+	rcu_read_lock();
+
+	ndev = hr_dev->iboe.netdevs[port_num - 1];
+	if (ndev)
+		dev_hold(ndev);
+
+	rcu_read_unlock();
+	return ndev;
+}
+
 static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
 			       struct ib_port_attr *props)
 {
@@ -584,6 +602,7 @@
 	struct device *dev = &hr_dev->pdev->dev;
 
 	iboe = &hr_dev->iboe;
+	spin_lock_init(&iboe->lock);
 
 	ib_dev = &hr_dev->ib_dev;
 	strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
@@ -618,6 +637,7 @@
 	ib_dev->query_port		= hns_roce_query_port;
 	ib_dev->modify_port		= hns_roce_modify_port;
 	ib_dev->get_link_layer		= hns_roce_get_link_layer;
+	ib_dev->get_netdev		= hns_roce_get_netdev;
 	ib_dev->query_gid		= hns_roce_query_gid;
 	ib_dev->query_pkey		= hns_roce_query_pkey;
 	ib_dev->alloc_ucontext		= hns_roce_alloc_ucontext;
@@ -667,8 +687,6 @@
 		goto error_failed_setup_mtu_gids;
 	}
 
-	spin_lock_init(&iboe->lock);
-
 	iboe->nb.notifier_call = hns_roce_netdev_event;
 	ret = register_netdevice_notifier(&iboe->nb);
 	if (ret) {
@@ -777,6 +795,15 @@
 	if (IS_ERR(hr_dev->reg_base))
 		return PTR_ERR(hr_dev->reg_base);
 
+	/* read the node_guid of IB device from the DT or ACPI */
+	ret = device_property_read_u8_array(dev, "node-guid",
+					    (u8 *)&hr_dev->ib_dev.node_guid,
+					    GUID_LEN);
+	if (ret) {
+		dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+		return ret;
+	}
+
 	/* get the RoCE associated ethernet ports or netdevices */
 	for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
 		if (dev_of_node(dev)) {
@@ -923,7 +950,6 @@
 	struct device *dev = &hr_dev->pdev->dev;
 
 	spin_lock_init(&hr_dev->sm_lock);
-	spin_lock_init(&hr_dev->cq_db_lock);
 	spin_lock_init(&hr_dev->bt_cmd_lock);
 
 	ret = hns_roce_init_uar_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 59f5e2b..fb87883 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -564,11 +564,14 @@
 	if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
 		dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
 			mr->umem->page_size);
+		ret = -EINVAL;
+		goto err_umem;
 	}
 
 	if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
 		dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
 			length);
+		ret = -EINVAL;
 		goto err_umem;
 	}
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 16271b5..05db7d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -35,19 +35,7 @@
 
 static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
 {
-	struct device *dev = &hr_dev->pdev->dev;
-	unsigned long pd_number;
-	int ret = 0;
-
-	ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
-	if (ret == -1) {
-		dev_err(dev, "alloc pdn from pdbitmap failed\n");
-		return -ENOMEM;
-	}
-
-	*pdn = pd_number;
-
-	return 0;
+	return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
 }
 
 static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
@@ -117,9 +105,15 @@
 	if (ret == -1)
 		return -ENOMEM;
 
-	uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
+	if (uar->index > 0)
+		uar->index = (uar->index - 1) %
+			     (hr_dev->caps.phy_num_uars - 1) + 1;
 
 	res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+		return -EINVAL;
+	}
 	uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
 
 	return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 645c18d..e86dd8d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,14 +32,14 @@
  */
 
 #include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
 #include <rdma/ib_umem.h>
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
 #include "hns_roce_hem.h"
 #include "hns_roce_user.h"
 
-#define DB_REG_OFFSET			0x1000
-#define SQP_NUM				12
+#define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
 
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 {
@@ -113,16 +113,8 @@
 				     int align, unsigned long *base)
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
-	int ret = 0;
-	unsigned long qpn;
 
-	ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
-	if (ret == -1)
-		return -ENOMEM;
-
-	*base = qpn;
-
-	return 0;
+	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
 }
 
 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -255,7 +247,7 @@
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
 
-	if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
+	if (base_qpn < SQP_NUM)
 		return;
 
 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
@@ -345,12 +337,10 @@
 
 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 				       struct ib_qp_cap *cap,
-				       enum ib_qp_type type,
 				       struct hns_roce_qp *hr_qp)
 {
 	struct device *dev = &hr_dev->pdev->dev;
 	u32 max_cnt;
-	(void)type;
 
 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -476,7 +466,7 @@
 
 		/* Set SQ size */
 		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
-						  init_attr->qp_type, hr_qp);
+						  hr_qp);
 		if (ret) {
 			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
 			goto err_out;
@@ -617,21 +607,19 @@
 			return ERR_PTR(-ENOMEM);
 
 		hr_qp = &hr_sqp->hr_qp;
+		hr_qp->port = init_attr->port_num - 1;
+		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+		hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+				     hr_dev->iboe.phy_port[hr_qp->port];
 
 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
-						hr_dev->caps.sqp_start +
-						hr_dev->caps.num_ports +
-						init_attr->port_num - 1, hr_qp);
+						hr_qp->ibqp.qp_num, hr_qp);
 		if (ret) {
 			dev_err(dev, "Create GSI QP failed!\n");
 			kfree(hr_sqp);
 			return ERR_PTR(ret);
 		}
 
-		hr_qp->port = (init_attr->port_num - 1);
-		hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
-				     hr_dev->caps.num_ports +
-				     init_attr->port_num - 1;
 		break;
 	}
 	default:{
@@ -670,6 +658,7 @@
 	struct device *dev = &hr_dev->pdev->dev;
 	int ret = -EINVAL;
 	int p;
+	enum ib_mtu active_mtu;
 
 	mutex_lock(&hr_qp->mutex);
 
@@ -700,6 +689,19 @@
 		}
 	}
 
+	if (attr_mask & IB_QP_PATH_MTU) {
+		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+		if (attr->path_mtu > IB_MTU_2048 ||
+		    attr->path_mtu < IB_MTU_256 ||
+		    attr->path_mtu > active_mtu) {
+			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
+				attr->path_mtu);
+			goto out;
+		}
+	}
+
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
 		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
@@ -782,29 +784,11 @@
 
 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
 {
-	struct ib_qp *ibqp = &hr_qp->ibqp;
-	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
-	if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
-		dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
-			n, hr_qp->rq.wqe_cnt);
-		return NULL;
-	}
-
 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
 }
 
 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
 {
-	struct ib_qp *ibqp = &hr_qp->ibqp;
-	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
-	if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
-		dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
-			n, hr_qp->sq.wqe_cnt);
-		return NULL;
-	}
-
 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
 }
 
@@ -837,8 +821,7 @@
 
 	/* A port include two SQP, six port total 12 */
 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
-				   hr_dev->caps.num_qps - 1,
-				   hr_dev->caps.sqp_start + SQP_NUM,
+				   hr_dev->caps.num_qps - 1, SQP_NUM,
 				   reserved_from_top);
 	if (ret) {
 		dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04..c6fe89d 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@
 		goto out;
 	}
 
-	ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+	ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
 	if (ret < 0)
 		goto out;
 
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 0000000..7c06d85
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_QEDR
+	tristate "QLogic RoCE driver"
+	depends on 64BIT && QEDE
+	select QED_LL2
+	---help---
+	  This driver provides low-level InfiniBand over Ethernet
+	  support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 0000000..ba7067c
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 0000000..7b74d09
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,914 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT	(3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+			    enum ib_event_type type)
+{
+	struct ib_event ibev;
+
+	ibev.device = &dev->ibdev;
+	ibev.element.port_num = port_num;
+	ibev.event = type;
+
+	ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+					    u8 port_num)
+{
+	return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+				size_t str_len)
+{
+	struct qedr_dev *qedr = get_qedr_dev(ibdev);
+	u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+	snprintf(str, str_len, "%d. %d. %d. %d",
+		 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+		 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+	struct qedr_dev *qdev;
+
+	qdev = get_qedr_dev(dev);
+	dev_hold(qdev->ndev);
+
+	/* The HW vendor's device driver must guarantee
+	 * that this function returns NULL before the net device reaches
+	 * NETDEV_UNREGISTER_FINAL state.
+	 */
+	return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+	strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+	dev->ibdev.node_guid = dev->attr.node_guid;
+	memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+	dev->ibdev.owner = THIS_MODULE;
+	dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+	dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+				     QEDR_UVERBS(QUERY_DEVICE) |
+				     QEDR_UVERBS(QUERY_PORT) |
+				     QEDR_UVERBS(ALLOC_PD) |
+				     QEDR_UVERBS(DEALLOC_PD) |
+				     QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+				     QEDR_UVERBS(CREATE_CQ) |
+				     QEDR_UVERBS(RESIZE_CQ) |
+				     QEDR_UVERBS(DESTROY_CQ) |
+				     QEDR_UVERBS(REQ_NOTIFY_CQ) |
+				     QEDR_UVERBS(CREATE_QP) |
+				     QEDR_UVERBS(MODIFY_QP) |
+				     QEDR_UVERBS(QUERY_QP) |
+				     QEDR_UVERBS(DESTROY_QP) |
+				     QEDR_UVERBS(REG_MR) |
+				     QEDR_UVERBS(DEREG_MR) |
+				     QEDR_UVERBS(POLL_CQ) |
+				     QEDR_UVERBS(POST_SEND) |
+				     QEDR_UVERBS(POST_RECV);
+
+	dev->ibdev.phys_port_cnt = 1;
+	dev->ibdev.num_comp_vectors = dev->num_cnq;
+	dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+	dev->ibdev.query_device = qedr_query_device;
+	dev->ibdev.query_port = qedr_query_port;
+	dev->ibdev.modify_port = qedr_modify_port;
+
+	dev->ibdev.query_gid = qedr_query_gid;
+	dev->ibdev.add_gid = qedr_add_gid;
+	dev->ibdev.del_gid = qedr_del_gid;
+
+	dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+	dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+	dev->ibdev.mmap = qedr_mmap;
+
+	dev->ibdev.alloc_pd = qedr_alloc_pd;
+	dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+	dev->ibdev.create_cq = qedr_create_cq;
+	dev->ibdev.destroy_cq = qedr_destroy_cq;
+	dev->ibdev.resize_cq = qedr_resize_cq;
+	dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+	dev->ibdev.create_qp = qedr_create_qp;
+	dev->ibdev.modify_qp = qedr_modify_qp;
+	dev->ibdev.query_qp = qedr_query_qp;
+	dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+	dev->ibdev.query_pkey = qedr_query_pkey;
+
+	dev->ibdev.create_ah = qedr_create_ah;
+	dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+	dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+	dev->ibdev.dereg_mr = qedr_dereg_mr;
+	dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+	dev->ibdev.alloc_mr = qedr_alloc_mr;
+	dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+	dev->ibdev.poll_cq = qedr_poll_cq;
+	dev->ibdev.post_send = qedr_post_send;
+	dev->ibdev.post_recv = qedr_post_recv;
+
+	dev->ibdev.process_mad = qedr_process_mad;
+	dev->ibdev.get_port_immutable = qedr_port_immutable;
+	dev->ibdev.get_netdev = qedr_get_netdev;
+
+	dev->ibdev.dma_device = &dev->pdev->dev;
+
+	dev->ibdev.get_link_layer = qedr_link_layer;
+	dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+	return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+			     struct qed_sb_info *sb_info, u16 sb_id)
+{
+	struct status_block *sb_virt;
+	dma_addr_t sb_phys;
+	int rc;
+
+	sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+	if (!sb_virt)
+		return -ENOMEM;
+
+	rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+				       sb_virt, sb_phys, sb_id,
+				       QED_SB_TYPE_CNQ);
+	if (rc) {
+		pr_err("Status block initialization failed\n");
+		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+				  sb_virt, sb_phys);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+			     struct qed_sb_info *sb_info, int sb_id)
+{
+	if (sb_info->sb_virt) {
+		dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+		dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+				  (void *)sb_info->sb_virt, sb_info->sb_phys);
+	}
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->num_cnq; i++) {
+		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+	}
+
+	kfree(dev->cnq_array);
+	kfree(dev->sb_array);
+	kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+	struct qedr_cnq *cnq;
+	__le16 *cons_pi;
+	u16 n_entries;
+	int i, rc;
+
+	dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+				QEDR_MAX_SGID, GFP_KERNEL);
+	if (!dev->sgid_tbl)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->sgid_lock);
+
+	/* Allocate Status blocks for CNQ */
+	dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+				GFP_KERNEL);
+	if (!dev->sb_array) {
+		rc = -ENOMEM;
+		goto err1;
+	}
+
+	dev->cnq_array = kcalloc(dev->num_cnq,
+				 sizeof(*dev->cnq_array), GFP_KERNEL);
+	if (!dev->cnq_array) {
+		rc = -ENOMEM;
+		goto err2;
+	}
+
+	dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+	/* Allocate CNQ PBLs */
+	n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+	for (i = 0; i < dev->num_cnq; i++) {
+		cnq = &dev->cnq_array[i];
+
+		rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+				       dev->sb_start + i);
+		if (rc)
+			goto err3;
+
+		rc = dev->ops->common->chain_alloc(dev->cdev,
+						   QED_CHAIN_USE_TO_CONSUME,
+						   QED_CHAIN_MODE_PBL,
+						   QED_CHAIN_CNT_TYPE_U16,
+						   n_entries,
+						   sizeof(struct regpair *),
+						   &cnq->pbl);
+		if (rc)
+			goto err4;
+
+		cnq->dev = dev;
+		cnq->sb = &dev->sb_array[i];
+		cons_pi = dev->sb_array[i].sb_virt->pi_array;
+		cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+		cnq->index = i;
+		sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+		DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+			 i, qed_chain_get_cons_idx(&cnq->pbl));
+	}
+
+	return 0;
+err4:
+	qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+	for (--i; i >= 0; i--) {
+		dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+		qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+	}
+	kfree(dev->cnq_array);
+err2:
+	kfree(dev->sb_array);
+err1:
+	kfree(dev->sgid_tbl);
+	return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct qedr_dev *dev = dev_get_drvdata(device);
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+			     struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+	&dev_attr_hw_rev,
+	&dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+		device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+	struct pci_dev *bridge;
+	u32 val;
+
+	dev->atomic_cap = IB_ATOMIC_NONE;
+
+	bridge = pdev->bus->self;
+	if (!bridge)
+		return;
+
+	/* Check whether we are connected directly or via a switch */
+	while (bridge && bridge->bus->parent) {
+		DP_DEBUG(dev, QEDR_MSG_INIT,
+			 "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+			 bridge->bus->number, bridge->bus->primary);
+		/* Need to check Atomic Op Routing Supported all the way to
+		 * root complex.
+		 */
+		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+		if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+			pcie_capability_clear_word(pdev,
+						   PCI_EXP_DEVCTL2,
+						   PCI_EXP_DEVCTL2_ATOMIC_REQ);
+			return;
+		}
+		bridge = bridge->bus->parent->self;
+	}
+	bridge = pdev->bus->self;
+
+	/* according to bridge capability */
+	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+	if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+					 PCI_EXP_DEVCTL2_ATOMIC_REQ);
+		dev->atomic_cap = IB_ATOMIC_GLOB;
+	} else {
+		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+					   PCI_EXP_DEVCTL2_ATOMIC_REQ);
+	}
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+	u16 hw_comp_cons, sw_comp_cons;
+	struct qedr_cnq *cnq = handle;
+	struct regpair *cq_handle;
+	struct qedr_cq *cq;
+
+	qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+	qed_sb_update_sb_idx(cnq->sb);
+
+	hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+	sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+	/* Align protocol-index and chain reads */
+	rmb();
+
+	while (sw_comp_cons != hw_comp_cons) {
+		cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+		cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+				cq_handle->lo);
+
+		if (cq == NULL) {
+			DP_ERR(cnq->dev,
+			       "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+			       cq_handle->hi, cq_handle->lo, sw_comp_cons,
+			       hw_comp_cons);
+
+			break;
+		}
+
+		if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+			DP_ERR(cnq->dev,
+			       "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+			       cq_handle->hi, cq_handle->lo, cq);
+			break;
+		}
+
+		cq->arm_flags = 0;
+
+		if (cq->ibcq.comp_handler)
+			(*cq->ibcq.comp_handler)
+				(&cq->ibcq, cq->ibcq.cq_context);
+
+		sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+		cnq->n_comp++;
+
+	}
+
+	qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+				      sw_comp_cons);
+
+	qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+	return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+	u32 vector;
+	int i;
+
+	for (i = 0; i < dev->int_info.used_cnt; i++) {
+		if (dev->int_info.msix_cnt) {
+			vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+			synchronize_irq(vector);
+			free_irq(vector, &dev->cnq_array[i]);
+		}
+	}
+
+	dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+	int i, rc = 0;
+
+	if (dev->num_cnq > dev->int_info.msix_cnt) {
+		DP_ERR(dev,
+		       "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+		       dev->num_cnq, dev->int_info.msix_cnt);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dev->num_cnq; i++) {
+		rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+				 qedr_irq_handler, 0, dev->cnq_array[i].name,
+				 &dev->cnq_array[i]);
+		if (rc) {
+			DP_ERR(dev, "Request cnq %d irq failed\n", i);
+			qedr_sync_free_irqs(dev);
+		} else {
+			DP_DEBUG(dev, QEDR_MSG_INIT,
+				 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+				 dev->cnq_array[i].name, i,
+				 &dev->cnq_array[i]);
+			dev->int_info.used_cnt++;
+		}
+	}
+
+	return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+	int rc;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+	/* Learn Interrupt configuration */
+	rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+	if (rc < 0)
+		return rc;
+
+	rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+	if (rc) {
+		DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+		return rc;
+	}
+
+	if (dev->int_info.msix_cnt) {
+		DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+			 dev->int_info.msix_cnt);
+		rc = qedr_req_msix_irqs(dev);
+		if (rc)
+			return rc;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+	return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+	struct qed_rdma_device *qed_attr;
+	struct qedr_device_attr *attr;
+	u32 page_size;
+
+	/* Part 1 - query core capabilities */
+	qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+	/* Part 2 - check capabilities */
+	page_size = ~dev->attr.page_size_caps + 1;
+	if (page_size > PAGE_SIZE) {
+		DP_ERR(dev,
+		       "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+		       PAGE_SIZE, page_size);
+		return -ENODEV;
+	}
+
+	/* Part 3 - copy and update capabilities */
+	attr = &dev->attr;
+	attr->vendor_id = qed_attr->vendor_id;
+	attr->vendor_part_id = qed_attr->vendor_part_id;
+	attr->hw_ver = qed_attr->hw_ver;
+	attr->fw_ver = qed_attr->fw_ver;
+	attr->node_guid = qed_attr->node_guid;
+	attr->sys_image_guid = qed_attr->sys_image_guid;
+	attr->max_cnq = qed_attr->max_cnq;
+	attr->max_sge = qed_attr->max_sge;
+	attr->max_inline = qed_attr->max_inline;
+	attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+	attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+	attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+	attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+	attr->max_dev_resp_rd_atomic_resc =
+	    qed_attr->max_dev_resp_rd_atomic_resc;
+	attr->max_cq = qed_attr->max_cq;
+	attr->max_qp = qed_attr->max_qp;
+	attr->max_mr = qed_attr->max_mr;
+	attr->max_mr_size = qed_attr->max_mr_size;
+	attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+	attr->max_mw = qed_attr->max_mw;
+	attr->max_fmr = qed_attr->max_fmr;
+	attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+	attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+	attr->max_pd = qed_attr->max_pd;
+	attr->max_ah = qed_attr->max_ah;
+	attr->max_pkey = qed_attr->max_pkey;
+	attr->max_srq = qed_attr->max_srq;
+	attr->max_srq_wr = qed_attr->max_srq_wr;
+	attr->dev_caps = qed_attr->dev_caps;
+	attr->page_size_caps = qed_attr->page_size_caps;
+	attr->dev_ack_delay = qed_attr->dev_ack_delay;
+	attr->reserved_lkey = qed_attr->reserved_lkey;
+	attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+	attr->max_stats_queues = qed_attr->max_stats_queues;
+
+	return 0;
+}
+
+void qedr_unaffiliated_event(void *context,
+			     u8 event_code)
+{
+	pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED	0
+#define EVENT_TYPE_CQ		1
+#define EVENT_TYPE_QP		2
+	struct qedr_dev *dev = (struct qedr_dev *)context;
+	union event_ring_data *data = fw_handle;
+	u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
+			    data->roce_handle.lo;
+	u8 event_type = EVENT_TYPE_NOT_DEFINED;
+	struct ib_event event;
+	struct ib_cq *ibcq;
+	struct ib_qp *ibqp;
+	struct qedr_cq *cq;
+	struct qedr_qp *qp;
+
+	switch (e_code) {
+	case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+		event.event = IB_EVENT_CQ_ERR;
+		event_type = EVENT_TYPE_CQ;
+		break;
+	case ROCE_ASYNC_EVENT_SQ_DRAINED:
+		event.event = IB_EVENT_SQ_DRAINED;
+		event_type = EVENT_TYPE_QP;
+		break;
+	case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+		event.event = IB_EVENT_QP_FATAL;
+		event_type = EVENT_TYPE_QP;
+		break;
+	case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+		event.event = IB_EVENT_QP_REQ_ERR;
+		event_type = EVENT_TYPE_QP;
+		break;
+	case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+		event.event = IB_EVENT_QP_ACCESS_ERR;
+		event_type = EVENT_TYPE_QP;
+		break;
+	default:
+		DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+		       roce_handle64);
+	}
+
+	switch (event_type) {
+	case EVENT_TYPE_CQ:
+		cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+		if (cq) {
+			ibcq = &cq->ibcq;
+			if (ibcq->event_handler) {
+				event.device = ibcq->device;
+				event.element.cq = ibcq;
+				ibcq->event_handler(&event, ibcq->cq_context);
+			}
+		} else {
+			WARN(1,
+			     "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+			     roce_handle64);
+		}
+		DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+		break;
+	case EVENT_TYPE_QP:
+		qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+		if (qp) {
+			ibqp = &qp->ibqp;
+			if (ibqp->event_handler) {
+				event.device = ibqp->device;
+				event.element.qp = ibqp;
+				ibqp->event_handler(&event, ibqp->qp_context);
+			}
+		} else {
+			WARN(1,
+			     "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+			     roce_handle64);
+		}
+		DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+		break;
+	default:
+		break;
+	}
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+	struct qed_rdma_add_user_out_params out_params;
+	struct qed_rdma_start_in_params *in_params;
+	struct qed_rdma_cnq_params *cur_pbl;
+	struct qed_rdma_events events;
+	dma_addr_t p_phys_table;
+	u32 page_cnt;
+	int rc = 0;
+	int i;
+
+	in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
+	if (!in_params) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	in_params->desired_cnq = dev->num_cnq;
+	for (i = 0; i < dev->num_cnq; i++) {
+		cur_pbl = &in_params->cnq_pbl_list[i];
+
+		page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+		cur_pbl->num_pbl_pages = page_cnt;
+
+		p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+		cur_pbl->pbl_ptr = (u64)p_phys_table;
+	}
+
+	events.affiliated_event = qedr_affiliated_event;
+	events.unaffiliated_event = qedr_unaffiliated_event;
+	events.context = dev;
+
+	in_params->events = &events;
+	in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+	in_params->max_mtu = dev->ndev->mtu;
+	ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+	rc = dev->ops->rdma_init(dev->cdev, in_params);
+	if (rc)
+		goto out;
+
+	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+	if (rc)
+		goto out;
+
+	dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+	dev->db_phys_addr = out_params.dpi_phys_addr;
+	dev->db_size = out_params.dpi_size;
+	dev->dpi = out_params.dpi;
+
+	rc = qedr_set_device_attr(dev);
+out:
+	kfree(in_params);
+	if (rc)
+		DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+	return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+	dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+	dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+				 struct net_device *ndev)
+{
+	struct qed_dev_rdma_info dev_info;
+	struct qedr_dev *dev;
+	int rc = 0, i;
+
+	dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+	if (!dev) {
+		pr_err("Unable to allocate ib device\n");
+		return NULL;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+	dev->pdev = pdev;
+	dev->ndev = ndev;
+	dev->cdev = cdev;
+
+	qed_ops = qed_get_rdma_ops();
+	if (!qed_ops) {
+		DP_ERR(dev, "Failed to get qed roce operations\n");
+		goto init_err;
+	}
+
+	dev->ops = qed_ops;
+	rc = qed_ops->fill_dev_info(cdev, &dev_info);
+	if (rc)
+		goto init_err;
+
+	dev->num_hwfns = dev_info.common.num_hwfns;
+	dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+	dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+	if (!dev->num_cnq) {
+		DP_ERR(dev, "not enough CNQ resources.\n");
+		goto init_err;
+	}
+
+	dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+	qedr_pci_set_atomic(dev, pdev);
+
+	rc = qedr_alloc_resources(dev);
+	if (rc)
+		goto init_err;
+
+	rc = qedr_init_hw(dev);
+	if (rc)
+		goto alloc_err;
+
+	rc = qedr_setup_irqs(dev);
+	if (rc)
+		goto irq_err;
+
+	rc = qedr_register_device(dev);
+	if (rc) {
+		DP_ERR(dev, "Unable to allocate register device\n");
+		goto reg_err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+		if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+			goto sysfs_err;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+	return dev;
+
+sysfs_err:
+	ib_unregister_device(&dev->ibdev);
+reg_err:
+	qedr_sync_free_irqs(dev);
+irq_err:
+	qedr_stop_hw(dev);
+alloc_err:
+	qedr_free_resources(dev);
+init_err:
+	ib_dealloc_device(&dev->ibdev);
+	DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+	return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+	/* First unregister with stack to stop all the active traffic
+	 * of the registered clients.
+	 */
+	qedr_remove_sysfiles(dev);
+	ib_unregister_device(&dev->ibdev);
+
+	qedr_stop_hw(dev);
+	qedr_sync_free_irqs(dev);
+	qedr_free_resources(dev);
+	ib_dealloc_device(&dev->ibdev);
+}
+
+static int qedr_close(struct qedr_dev *dev)
+{
+	qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+
+	return 0;
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+	qedr_close(dev);
+	qedr_remove(dev);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+	union ib_gid *sgid = &dev->sgid_tbl[0];
+	u8 guid[8], mac_addr[6];
+	int rc;
+
+	/* Update SGID */
+	ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+	guid[0] = mac_addr[0] ^ 2;
+	guid[1] = mac_addr[1];
+	guid[2] = mac_addr[2];
+	guid[3] = 0xff;
+	guid[4] = 0xfe;
+	guid[5] = mac_addr[3];
+	guid[6] = mac_addr[4];
+	guid[7] = mac_addr[5];
+	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+	memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+	/* Update LL2 */
+	rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+					       dev->gsi_ll2_mac_address,
+					       dev->ndev->dev_addr);
+
+	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+	qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+
+	if (rc)
+		DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+	switch (event) {
+	case QEDE_UP:
+		qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+		break;
+	case QEDE_DOWN:
+		qedr_close(dev);
+		break;
+	case QEDE_CLOSE:
+		qedr_shutdown(dev);
+		break;
+	case QEDE_CHANGE_ADDR:
+		qedr_mac_address_change(dev);
+		break;
+	default:
+		pr_err("Event not supported\n");
+	}
+}
+
+static struct qedr_driver qedr_drv = {
+	.name = "qedr_driver",
+	.add = qedr_add,
+	.remove = qedr_remove,
+	.notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+	return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+	qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 0000000..620badd
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,495 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include "qedr_hsi.h"
+
+#define QEDR_MODULE_VERSION	"8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...)					\
+	pr_debug("(%s) " module ": " fmt,				\
+		 DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ   "  CQ"
+#define QEDR_MSG_MR   "  MR"
+#define QEDR_MSG_RQ   "  RQ"
+#define QEDR_MSG_SQ   "  SQ"
+#define QEDR_MSG_QP   "  QP"
+#define QEDR_MSG_GSI  " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+	struct qedr_dev		*dev;
+	struct qed_chain	pbl;
+	struct qed_sb_info	*sb;
+	char			name[32];
+	u64			n_comp;
+	__le16			*hw_cons_ptr;
+	u8			index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+	u32	vendor_id;
+	u32	vendor_part_id;
+	u32	hw_ver;
+	u64	fw_ver;
+	u64	node_guid;
+	u64	sys_image_guid;
+	u8	max_cnq;
+	u8	max_sge;
+	u16	max_inline;
+	u32	max_sqe;
+	u32	max_rqe;
+	u8	max_qp_resp_rd_atomic_resc;
+	u8	max_qp_req_rd_atomic_resc;
+	u64	max_dev_resp_rd_atomic_resc;
+	u32	max_cq;
+	u32	max_qp;
+	u32	max_mr;
+	u64	max_mr_size;
+	u32	max_cqe;
+	u32	max_mw;
+	u32	max_fmr;
+	u32	max_mr_mw_fmr_pbl;
+	u64	max_mr_mw_fmr_size;
+	u32	max_pd;
+	u32	max_ah;
+	u8	max_pkey;
+	u32	max_srq;
+	u32	max_srq_wr;
+	u8	max_srq_sge;
+	u8	max_stats_queues;
+	u32	dev_caps;
+
+	u64	page_size_caps;
+	u8	dev_ack_delay;
+	u32	reserved_lkey;
+	u32	bad_pkey_counter;
+	struct qed_rdma_events events;
+};
+
+struct qedr_dev {
+	struct ib_device	ibdev;
+	struct qed_dev		*cdev;
+	struct pci_dev		*pdev;
+	struct net_device	*ndev;
+
+	enum ib_atomic_cap	atomic_cap;
+
+	void *rdma_ctx;
+	struct qedr_device_attr attr;
+
+	const struct qed_rdma_ops *ops;
+	struct qed_int_info	int_info;
+
+	struct qed_sb_info	*sb_array;
+	struct qedr_cnq		*cnq_array;
+	int			num_cnq;
+	int			sb_start;
+
+	void __iomem		*db_addr;
+	u64			db_phys_addr;
+	u32			db_size;
+	u16			dpi;
+
+	union ib_gid *sgid_tbl;
+
+	/* Lock for sgid table */
+	spinlock_t sgid_lock;
+
+	u64			guid;
+
+	u32			dp_module;
+	u8			dp_level;
+	u8			num_hwfns;
+	uint			wq_multiplier;
+	u8			gsi_ll2_mac_address[ETH_ALEN];
+	int			gsi_qp_created;
+	struct qedr_cq		*gsi_sqcq;
+	struct qedr_cq		*gsi_rqcq;
+	struct qedr_qp		*gsi_qp;
+};
+
+#define QEDR_MAX_SQ_PBL			(0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE		(sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE	(ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+					 QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
+					 QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE			((QEDR_MAX_SQ_PBL_ENTRIES) *\
+					 (RDMA_RING_PAGE_SIZE) / \
+					 (QEDR_SQE_ELEMENT_SIZE) /\
+					 (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL			(0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES		(0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE		(sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE	(RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE	((RDMA_RING_PAGE_SIZE) / \
+					 QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE			((QEDR_MAX_RQ_PBL_ENTRIES) *\
+					 (RDMA_RING_PAGE_SIZE) / \
+					 (QEDR_RQE_ELEMENT_SIZE) /\
+					 (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE	(sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+				  sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+			     (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE		(0x4000)
+
+#define QEDR_MAX_PORT			(1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+	struct list_head list_entry;
+	void *va;
+	dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+	struct ib_ucontext ibucontext;
+	struct qedr_dev *dev;
+	struct qedr_pd *pd;
+	u64 dpi_addr;
+	u64 dpi_phys_addr;
+	u32 dpi_size;
+	u16 dpi;
+
+	struct list_head mm_head;
+
+	/* Lock to protect mm list */
+	struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+	struct rdma_pwm_val32_data data;
+	u64 raw;
+};
+
+enum qedr_cq_type {
+	QEDR_CQ_TYPE_GSI,
+	QEDR_CQ_TYPE_KERNEL,
+	QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+	u32 num_pbls;
+	u32 num_pbes;
+	u32 pbl_size;
+	u32 pbe_size;
+	bool two_layered;
+};
+
+struct qedr_userq {
+	struct ib_umem *umem;
+	struct qedr_pbl_info pbl_info;
+	struct qedr_pbl *pbl_tbl;
+	u64 buf_addr;
+	size_t buf_len;
+};
+
+struct qedr_cq {
+	struct ib_cq ibcq;
+
+	enum qedr_cq_type cq_type;
+	u32 sig;
+
+	u16 icid;
+
+	/* Lock to protect completion handler */
+	spinlock_t comp_handler_lock;
+
+	/* Lock to protect multiplem CQ's */
+	spinlock_t cq_lock;
+	u8 arm_flags;
+	struct qed_chain pbl;
+
+	void __iomem *db_addr;
+	union db_prod64 db;
+
+	u8 pbl_toggle;
+	union rdma_cqe *latest_cqe;
+	union rdma_cqe *toggle_cqe;
+
+	u32 cq_cons;
+
+	struct qedr_userq q;
+};
+
+struct qedr_pd {
+	struct ib_pd ibpd;
+	u32 pd_id;
+	struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+	struct {
+		u64 phy_addr;
+		unsigned long len;
+	} key;
+	struct list_head entry;
+};
+
+union db_prod32 {
+	struct rdma_pwm_val16_data data;
+	u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+	/* WQE Elements */
+	struct qed_chain pbl;
+	u64 p_phys_addr_tbl;
+	u32 max_sges;
+
+	/* WQE */
+	u16 prod;
+	u16 cons;
+	u16 wqe_cons;
+	u16 gsi_cons;
+	u16 max_wr;
+
+	/* DB */
+	void __iomem *db;
+	union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index)					\
+	do {								\
+		p_info->index = (p_info->index + 1) &			\
+				qed_chain_get_capacity(p_info->pbl)	\
+	} while (0)
+
+enum qedr_qp_err_bitmap {
+	QEDR_QP_ERR_SQ_FULL = 1,
+	QEDR_QP_ERR_RQ_FULL = 2,
+	QEDR_QP_ERR_BAD_SR = 4,
+	QEDR_QP_ERR_BAD_RR = 8,
+	QEDR_QP_ERR_SQ_PBL_FULL = 16,
+	QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+	struct ib_qp ibqp;	/* must be first */
+	struct qedr_dev *dev;
+
+	struct qedr_qp_hwq_info sq;
+	struct qedr_qp_hwq_info rq;
+
+	u32 max_inline_data;
+
+	/* Lock for QP's */
+	spinlock_t q_lock;
+	struct qedr_cq *sq_cq;
+	struct qedr_cq *rq_cq;
+	struct qedr_srq *srq;
+	enum qed_roce_qp_state state;
+	u32 id;
+	struct qedr_pd *pd;
+	enum ib_qp_type qp_type;
+	struct qed_rdma_qp *qed_qp;
+	u32 qp_id;
+	u16 icid;
+	u16 mtu;
+	int sgid_idx;
+	u32 rq_psn;
+	u32 sq_psn;
+	u32 qkey;
+	u32 dest_qp_num;
+
+	/* Relevant to qps created from kernel space only (ULPs) */
+	u8 prev_wqe_size;
+	u16 wqe_cons;
+	u32 err_bitmap;
+	bool signaled;
+
+	/* SQ shadow */
+	struct {
+		u64 wr_id;
+		enum ib_wc_opcode opcode;
+		u32 bytes_len;
+		u8 wqe_size;
+		bool signaled;
+		dma_addr_t icrc_mapping;
+		u32 *icrc;
+		struct qedr_mr *mr;
+	} *wqe_wr_id;
+
+	/* RQ shadow */
+	struct {
+		u64 wr_id;
+		struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+		u8 wqe_size;
+
+		u8 smac[ETH_ALEN];
+		u16 vlan_id;
+		int rc;
+	} *rqe_wr_id;
+
+	/* Relevant to qps created from user space only (applications) */
+	struct qedr_userq usq;
+	struct qedr_userq urq;
+};
+
+struct qedr_ah {
+	struct ib_ah ibah;
+	struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+	QEDR_MR_USER,
+	QEDR_MR_KERNEL,
+	QEDR_MR_DMA,
+	QEDR_MR_FRMR,
+};
+
+struct mr_info {
+	struct qedr_pbl *pbl_table;
+	struct qedr_pbl_info pbl_info;
+	struct list_head free_pbl_list;
+	struct list_head inuse_pbl_list;
+	u32 completed;
+	u32 completed_handled;
+};
+
+struct qedr_mr {
+	struct ib_mr ibmr;
+	struct ib_umem *umem;
+
+	struct qed_rdma_register_tid_in_params hw_mr;
+	enum qedr_mr_type type;
+
+	struct qedr_dev *dev;
+	struct mr_info info;
+
+	u64 *pages;
+	u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+			 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+			 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+	info->cons = (info->cons + 1) % info->max_wr;
+	info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+	info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+				struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+	union ib_gid zero_sgid = { { 0 } };
+	struct in6_addr in6;
+
+	if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+		DP_ERR(dev, "Local port GID not supported\n");
+		eth_zero_addr(mac_addr);
+		return -EINVAL;
+	}
+
+	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+	ether_addr_copy(mac_addr, ah_attr->dmac);
+
+	return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+	return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+	return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+	return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+	return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+	return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644
index 0000000..63890eb
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -0,0 +1,622 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "qedr_hsi.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_hsi.h"
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+	info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+			  struct ib_qp_init_attr *attrs)
+{
+	dev->gsi_qp_created = 1;
+	dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+	dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+	dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+	struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+	struct qedr_cq *cq = dev->gsi_sqcq;
+	struct qedr_qp *qp = dev->gsi_qp;
+	unsigned long flags;
+
+	DP_DEBUG(dev, QEDR_MSG_GSI,
+		 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+		 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+		 cq->ibcq.comp_handler ? "Yes" : "No");
+
+	dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+			  pkt->header.baddr);
+	kfree(pkt);
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+	qedr_inc_sw_gsi_cons(&qp->sq);
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	if (cq->ibcq.comp_handler) {
+		spin_lock_irqsave(&cq->comp_handler_lock, flags);
+		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+	}
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+		    struct qed_roce_ll2_rx_params *params)
+{
+	struct qedr_dev *dev = (struct qedr_dev *)_dev;
+	struct qedr_cq *cq = dev->gsi_rqcq;
+	struct qedr_qp *qp = dev->gsi_qp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+	qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+	qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+	ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+	qedr_inc_sw_gsi_cons(&qp->rq);
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	if (cq->ibcq.comp_handler) {
+		spin_lock_irqsave(&cq->comp_handler_lock, flags);
+		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+	}
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+				struct ib_qp_init_attr *attrs)
+{
+	struct qed_rdma_destroy_cq_in_params iparams;
+	struct qed_rdma_destroy_cq_out_params oparams;
+	struct qedr_cq *cq;
+
+	cq = get_qedr_cq(attrs->send_cq);
+	iparams.icid = cq->icid;
+	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+	dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+	cq = get_qedr_cq(attrs->recv_cq);
+	/* if a dedicated recv_cq was used, delete it too */
+	if (iparams.icid != cq->icid) {
+		iparams.icid = cq->icid;
+		dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+	}
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+					  struct ib_qp_init_attr *attrs)
+{
+	if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+		DP_ERR(dev,
+		       " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+		       attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+		DP_ERR(dev,
+		       " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+		       attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+		DP_ERR(dev,
+		       " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+		       attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+				 struct ib_qp_init_attr *attrs,
+				 struct qedr_qp *qp)
+{
+	struct qed_roce_ll2_params ll2_params;
+	int rc;
+
+	rc = qedr_check_gsi_qp_attrs(dev, attrs);
+	if (rc)
+		return ERR_PTR(rc);
+
+	/* configure and start LL2 */
+	memset(&ll2_params, 0, sizeof(ll2_params));
+	ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+	ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+	ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+	ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+	ll2_params.cb_cookie = (void *)dev;
+	ll2_params.mtu = dev->ndev->mtu;
+	ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+	rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+	if (rc) {
+		DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+		return ERR_PTR(rc);
+	}
+
+	/* create QP */
+	qp->ibqp.qp_num = 1;
+	qp->rq.max_wr = attrs->cap.max_recv_wr;
+	qp->sq.max_wr = attrs->cap.max_send_wr;
+
+	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+				GFP_KERNEL);
+	if (!qp->rqe_wr_id)
+		goto err;
+	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+				GFP_KERNEL);
+	if (!qp->wqe_wr_id)
+		goto err;
+
+	qedr_store_gsi_qp_cq(dev, qp, attrs);
+	ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+	/* the GSI CQ is handled by the driver so remove it from the FW */
+	qedr_destroy_gsi_cq(dev, attrs);
+	dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+	dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+	DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+	return &qp->ibqp;
+
+err:
+	kfree(qp->rqe_wr_id);
+
+	rc = dev->ops->roce_ll2_stop(dev->cdev);
+	if (rc)
+		DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+	return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+	int rc;
+
+	rc = dev->ops->roce_ll2_stop(dev->cdev);
+	if (rc)
+		DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+	else
+		DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+	return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE	(100)
+#define QEDR_GSI_QPN		(1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+					struct qedr_qp *qp,
+					struct ib_send_wr *swr,
+					struct ib_ud_header *udh,
+					int *roce_mode)
+{
+	bool has_vlan = false, has_grh_ipv6 = true;
+	struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+	struct ib_global_route *grh = &ah_attr->grh;
+	union ib_gid sgid;
+	int send_size = 0;
+	u16 vlan_id = 0;
+	u16 ether_type;
+	struct ib_gid_attr sgid_attr;
+	int rc;
+	int ip_ver = 0;
+
+	bool has_udp = false;
+	int i;
+
+	send_size = 0;
+	for (i = 0; i < swr->num_sge; ++i)
+		send_size += swr->sg_list[i].length;
+
+	rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+			       grh->sgid_index, &sgid, &sgid_attr);
+	if (rc) {
+		DP_ERR(dev,
+		       "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+		       ah_attr->port_num, grh->sgid_index);
+		return rc;
+	}
+
+	vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+	if (vlan_id < VLAN_CFI_MASK)
+		has_vlan = true;
+	if (sgid_attr.ndev)
+		dev_put(sgid_attr.ndev);
+
+	if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+		DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+		       ah_attr->grh.sgid_index);
+		return -ENOENT;
+	}
+
+	has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+	if (!has_udp) {
+		/* RoCE v1 */
+		ether_type = ETH_P_ROCE;
+		*roce_mode = ROCE_V1;
+	} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+		/* RoCE v2 IPv4 */
+		ip_ver = 4;
+		ether_type = ETH_P_IP;
+		has_grh_ipv6 = false;
+		*roce_mode = ROCE_V2_IPV4;
+	} else {
+		/* RoCE v2 IPv6 */
+		ip_ver = 6;
+		ether_type = ETH_P_IPV6;
+		*roce_mode = ROCE_V2_IPV6;
+	}
+
+	rc = ib_ud_header_init(send_size, false, true, has_vlan,
+			       has_grh_ipv6, ip_ver, has_udp, 0, udh);
+	if (rc) {
+		DP_ERR(dev, "gsi post send: failed to init header\n");
+		return rc;
+	}
+
+	/* ENET + VLAN headers */
+	ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+	ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+	if (has_vlan) {
+		udh->eth.type = htons(ETH_P_8021Q);
+		udh->vlan.tag = htons(vlan_id);
+		udh->vlan.type = htons(ether_type);
+	} else {
+		udh->eth.type = htons(ether_type);
+	}
+
+	/* BTH */
+	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+	udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+	udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+	udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+	/* DETH */
+	udh->deth.qkey = htonl(0x80010000);
+	udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+	if (has_grh_ipv6) {
+		/* GRH / IPv6 header */
+		udh->grh.traffic_class = grh->traffic_class;
+		udh->grh.flow_label = grh->flow_label;
+		udh->grh.hop_limit = grh->hop_limit;
+		udh->grh.destination_gid = grh->dgid;
+		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+		       sizeof(udh->grh.source_gid.raw));
+	} else {
+		/* IPv4 header */
+		u32 ipv4_addr;
+
+		udh->ip4.protocol = IPPROTO_UDP;
+		udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+		udh->ip4.frag_off = htons(IP_DF);
+		udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+		ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+		udh->ip4.saddr = ipv4_addr;
+		ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+		udh->ip4.daddr = ipv4_addr;
+		/* note: checksum is calculated by the device */
+	}
+
+	/* UDP */
+	if (has_udp) {
+		udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+		udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+		udh->udp.csum = 0;
+		/* UDP length is untouched hence is zero */
+	}
+	return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+					struct qedr_qp *qp,
+					struct ib_send_wr *swr,
+					struct qed_roce_ll2_packet **p_packet)
+{
+	u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+	struct qed_roce_ll2_packet *packet;
+	struct pci_dev *pdev = dev->pdev;
+	int roce_mode, header_size;
+	struct ib_ud_header udh;
+	int i, rc;
+
+	*p_packet = NULL;
+
+	rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+	if (rc)
+		return rc;
+
+	header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+	if (!packet)
+		return -ENOMEM;
+
+	packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+						  &packet->header.baddr,
+						  GFP_ATOMIC);
+	if (!packet->header.vaddr) {
+		kfree(packet);
+		return -ENOMEM;
+	}
+
+	if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+		packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+	else
+		packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+
+	packet->roce_mode = roce_mode;
+	memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+	packet->header.len = header_size;
+	packet->n_seg = swr->num_sge;
+	for (i = 0; i < packet->n_seg; i++) {
+		packet->payload[i].baddr = swr->sg_list[i].addr;
+		packet->payload[i].len = swr->sg_list[i].length;
+	}
+
+	*p_packet = packet;
+
+	return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		       struct ib_send_wr **bad_wr)
+{
+	struct qed_roce_ll2_packet *pkt = NULL;
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qed_roce_ll2_tx_params params;
+	struct qedr_dev *dev = qp->dev;
+	unsigned long flags;
+	int rc;
+
+	if (qp->state != QED_ROCE_QP_STATE_RTS) {
+		*bad_wr = wr;
+		DP_ERR(dev,
+		       "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+		       qp->state);
+		return -EINVAL;
+	}
+
+	if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+		DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+		       wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	if (wr->opcode != IB_WR_SEND) {
+		DP_ERR(dev,
+		       "gsi post send: failed due to unsupported opcode %d\n",
+		       wr->opcode);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	memset(&params, 0, sizeof(params));
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+	if (rc) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		goto err;
+	}
+
+	rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+	if (!rc) {
+		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+		qedr_inc_sw_prod(&qp->sq);
+		DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+			 "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+			 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+	} else {
+		if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+			/* TX failed while posting header - release resources */
+			dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+					  pkt->header.vaddr, pkt->header.baddr);
+			kfree(pkt);
+		} else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+			/* NTD since TX failed while posting a fragment. We will
+			 * release the resources on TX callback
+			 */
+		}
+
+		DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+		rc = -EAGAIN;
+		*bad_wr = wr;
+	}
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	if (wr->next) {
+		DP_ERR(dev,
+		       "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+		*bad_wr = wr->next;
+		rc = -EINVAL;
+	}
+
+	return rc;
+
+err:
+	*bad_wr = wr;
+	return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		       struct ib_recv_wr **bad_wr)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qed_roce_ll2_buffer buf;
+	unsigned long flags;
+	int status = 0;
+	int rc;
+
+	if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+	    (qp->state != QED_ROCE_QP_STATE_RTS)) {
+		*bad_wr = wr;
+		DP_ERR(dev,
+		       "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+		       qp->state);
+		return -EINVAL;
+	}
+
+	memset(&buf, 0, sizeof(buf));
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	while (wr) {
+		if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+			DP_ERR(dev,
+			       "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+			       wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+			goto err;
+		}
+
+		buf.baddr = wr->sg_list[0].addr;
+		buf.len = wr->sg_list[0].length;
+
+		rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+		if (rc) {
+			DP_ERR(dev,
+			       "gsi post recv: failed to post rx buffer (rc=%d)\n",
+			       rc);
+			goto err;
+		}
+
+		memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+		       sizeof(qp->rqe_wr_id[qp->rq.prod]));
+		qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+		qedr_inc_sw_prod(&qp->rq);
+
+		wr = wr->next;
+	}
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	return status;
+err:
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+	*bad_wr = wr;
+	return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+	struct qedr_cq *cq = get_qedr_cq(ibcq);
+	struct qedr_qp *qp = dev->gsi_qp;
+	unsigned long flags;
+	int i = 0;
+
+	spin_lock_irqsave(&cq->cq_lock, flags);
+
+	while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+		memset(&wc[i], 0, sizeof(*wc));
+
+		wc[i].qp = &qp->ibqp;
+		wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+		wc[i].opcode = IB_WC_RECV;
+		wc[i].pkey_index = 0;
+		wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+		    IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+		/* 0 - currently only one recv sg is supported */
+		wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+		wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+		ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+		wc[i].wc_flags |= IB_WC_WITH_SMAC;
+		if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+			wc[i].wc_flags |= IB_WC_WITH_VLAN;
+			wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+		}
+
+		qedr_inc_sw_cons(&qp->rq);
+		i++;
+	}
+
+	while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+		memset(&wc[i], 0, sizeof(*wc));
+
+		wc[i].qp = &qp->ibqp;
+		wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+		wc[i].opcode = IB_WC_SEND;
+		wc[i].status = IB_WC_SUCCESS;
+
+		qedr_inc_sw_cons(&qp->sq);
+		i++;
+	}
+
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+	DP_DEBUG(dev, QEDR_MSG_GSI,
+		 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+		 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+		 qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+	return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
new file mode 100644
index 0000000..9ba6e15
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -0,0 +1,61 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR	(4096)
+#define QEDR_GSI_MAX_SEND_WR	(4096)
+
+#define QEDR_GSI_MAX_RECV_SGE	(1)	/* LL2 FW limitation */
+
+#define ETH_P_ROCE		(0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT	(0000)
+
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+	return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		       struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		       struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+				 struct ib_qp_init_attr *attrs,
+				 struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+			  struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
new file mode 100644
index 0000000..66d2752
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi.h
@@ -0,0 +1,56 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_ROCE__
+#define __QED_HSI_ROCE__
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+/* Affiliated asynchronous events / errors enumeration */
+enum roce_async_events_type {
+	ROCE_ASYNC_EVENT_NONE = 0,
+	ROCE_ASYNC_EVENT_COMM_EST = 1,
+	ROCE_ASYNC_EVENT_SQ_DRAINED,
+	ROCE_ASYNC_EVENT_SRQ_LIMIT,
+	ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+	ROCE_ASYNC_EVENT_CQ_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+	ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+	ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+	ROCE_ASYNC_EVENT_SRQ_EMPTY,
+	MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
+#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 0000000..5c98d20
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+	struct regpair	cq_handle;
+};
+
+struct rdma_cqe_responder {
+	struct regpair srq_wr_id;
+	struct regpair qp_handle;
+	__le32 imm_data_or_inv_r_Key;
+	__le32 length;
+	__le32 imm_data_hi;
+	__le16 rq_cons;
+	u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK        0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
+	u8 status;
+};
+
+struct rdma_cqe_requester {
+	__le16 sq_cons;
+	__le16 reserved0;
+	__le32 reserved1;
+	struct regpair qp_handle;
+	struct regpair reserved2;
+	__le32 reserved3;
+	__le16 reserved4;
+	u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK        0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
+	u8 status;
+};
+
+struct rdma_cqe_common {
+	struct regpair reserved0;
+	struct regpair qp_handle;
+	__le16 reserved1[7];
+	u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK        0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT       1
+#define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
+	u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+	struct rdma_cqe_responder resp;
+	struct rdma_cqe_requester req;
+	struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+	RDMA_CQE_REQ_STS_OK,
+	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+	RDMA_CQE_RESP_STS_OK,
+	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+	RDMA_CQE_TYPE_REQUESTER,
+	RDMA_CQE_TYPE_RESPONDER_RQ,
+	RDMA_CQE_TYPE_RESPONDER_SRQ,
+	RDMA_CQE_TYPE_INVALID,
+	MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+	__le32 length;
+	struct regpair	addr;
+	__le32 l_key;
+};
+
+struct rdma_rq_sge {
+	struct regpair addr;
+	__le32 length;
+	__le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT     0
+#define RDMA_RQ_SGE_NUM_SGES_MASK   0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
+#define RDMA_RQ_SGE_RESERVED0_MASK  0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+	struct regpair addr;
+	__le32 length;
+	__le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+	__le16 icid;
+	__le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+	struct rdma_pwm_val16_data as_struct;
+	__le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+	__le16 icid;
+	u8 agg_flags;
+	u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK    0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT   0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK  0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK   0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT  3
+	__le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+	RDMA_DIF_BLOCK_512 = 0,
+	RDMA_DIF_BLOCK_4096 = 1,
+	MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+	RDMA_DIF_CRC_SEED_0000 = 0,
+	RDMA_DIF_CRC_SEED_FFFF = 1,
+	MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+	__le32 error_intervals;
+	__le32 dif_error_1st_interval;
+	u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
+	u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+	RDMA_DIF_DIR_RX = 0,
+	RDMA_DIF_DIR_TX = 1,
+	MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+	__le16 guard_tag;
+	__le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+	RDMA_MW_TYPE_1,
+	RDMA_MW_TYPE_2A,
+	MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+	__le32 reserved1;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+	struct regpair remote_va;
+	__le32 r_key;
+	__le32 reserved2;
+	struct regpair cmp_data;
+	struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+	__le32 reserved1;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+	struct regpair remote_va;
+	__le32 r_key;
+	__le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+	struct regpair cmp_data;
+	struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+	struct regpair addr;
+	__le32 l_key;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
+	u8 wqe_size;
+	u8 prev_wqe_size;
+	u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
+	u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
+	u8 reserved3;
+	u8 length_hi;
+	__le32 length_lo;
+	__le32 parent_l_key;
+	__le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+	struct regpair addr;
+	__le32 l_key;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+	u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
+	u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
+	u8 reserved3;
+	u8 length_hi;
+	__le32 length_lo;
+	__le32 parent_l_key;
+	__le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+	__le32 reserved1[3];
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+	struct regpair addr;
+	__le32 l_key;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+	u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
+	u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
+	u8 reserved3;
+	u8 length_hi;
+	__le32 length_lo;
+	struct regpair pbl_addr;
+	__le32 dif_base_ref_tag;
+	__le16 dif_app_tag;
+	__le16 dif_app_tag_mask;
+	__le16 dif_runt_crc_value;
+	__le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              7
+	__le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+	struct regpair addr;
+	__le32 l_key;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+	u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
+	u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
+	u8 reserved3;
+	u8 length_hi;
+	__le32 length_lo;
+	struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+	__le32 dif_base_ref_tag;
+	__le16 dif_app_tag;
+	__le16 dif_app_tag_mask;
+	__le16 dif_runt_crc_value;
+	__le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
+	__le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+	struct regpair reserved;
+	__le32 inv_l_key;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+	__le32 imm_data;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+	struct regpair remote_va;
+	__le32 r_key;
+	u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
+	u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+	__le32 imm_data;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+	struct regpair remote_va;
+	__le32 r_key;
+	u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
+	u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+	RDMA_SQ_REQ_TYPE_SEND,
+	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+	RDMA_SQ_REQ_TYPE_RDMA_WR,
+	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+	RDMA_SQ_REQ_TYPE_RDMA_RD,
+	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+	RDMA_SQ_REQ_TYPE_FAST_MR,
+	RDMA_SQ_REQ_TYPE_BIND,
+	RDMA_SQ_REQ_TYPE_INVALID,
+	MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+	__le32 inv_key_or_imm_data;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
+	u8 wqe_size;
+	u8 prev_wqe_size;
+	__le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+	__le32 inv_key_or_imm_data;
+	__le32 length;
+	__le32 xrc_srq;
+	u8 req_type;
+	u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
+	u8 wqe_size;
+	u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+	__le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 0000000..a615142
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,3547 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+	if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+		return -EINVAL;
+
+	*pkey = QEDR_ROCE_PKEY_DEFAULT;
+	return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+		   union ib_gid *sgid)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+	int rc = 0;
+
+	if (!rdma_cap_roce_gid_table(ibdev, port))
+		return -ENODEV;
+
+	rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+	if (rc == -EAGAIN) {
+		memcpy(sgid, &zgid, sizeof(*sgid));
+		return 0;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+		 sgid->global.interface_id, sgid->global.subnet_prefix);
+
+	return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+		 unsigned int index, const union ib_gid *gid,
+		 const struct ib_gid_attr *attr, void **context)
+{
+	if (!rdma_cap_roce_gid_table(device, port_num))
+		return -EINVAL;
+
+	if (port_num > QEDR_MAX_PORT)
+		return -EINVAL;
+
+	if (!context)
+		return -EINVAL;
+
+	return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+		 unsigned int index, void **context)
+{
+	if (!rdma_cap_roce_gid_table(device, port_num))
+		return -EINVAL;
+
+	if (port_num > QEDR_MAX_PORT)
+		return -EINVAL;
+
+	if (!context)
+		return -EINVAL;
+
+	return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+		      struct ib_device_attr *attr, struct ib_udata *udata)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+	struct qedr_device_attr *qattr = &dev->attr;
+
+	if (!dev->rdma_ctx) {
+		DP_ERR(dev,
+		       "qedr_query_device called with invalid params rdma_ctx=%p\n",
+		       dev->rdma_ctx);
+		return -EINVAL;
+	}
+
+	memset(attr, 0, sizeof(*attr));
+
+	attr->fw_ver = qattr->fw_ver;
+	attr->sys_image_guid = qattr->sys_image_guid;
+	attr->max_mr_size = qattr->max_mr_size;
+	attr->page_size_cap = qattr->page_size_caps;
+	attr->vendor_id = qattr->vendor_id;
+	attr->vendor_part_id = qattr->vendor_part_id;
+	attr->hw_ver = qattr->hw_ver;
+	attr->max_qp = qattr->max_qp;
+	attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+	    IB_DEVICE_RC_RNR_NAK_GEN |
+	    IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+	attr->max_sge = qattr->max_sge;
+	attr->max_sge_rd = qattr->max_sge;
+	attr->max_cq = qattr->max_cq;
+	attr->max_cqe = qattr->max_cqe;
+	attr->max_mr = qattr->max_mr;
+	attr->max_mw = qattr->max_mw;
+	attr->max_pd = qattr->max_pd;
+	attr->atomic_cap = dev->atomic_cap;
+	attr->max_fmr = qattr->max_fmr;
+	attr->max_map_per_fmr = 16;
+	attr->max_qp_init_rd_atom =
+	    1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+	attr->max_qp_rd_atom =
+	    min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+		attr->max_qp_init_rd_atom);
+
+	attr->max_srq = qattr->max_srq;
+	attr->max_srq_sge = qattr->max_srq_sge;
+	attr->max_srq_wr = qattr->max_srq_wr;
+
+	attr->local_ca_ack_delay = qattr->dev_ack_delay;
+	attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+	attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+	attr->max_ah = qattr->max_ah;
+
+	return 0;
+}
+
+#define QEDR_SPEED_SDR		(1)
+#define QEDR_SPEED_DDR		(2)
+#define QEDR_SPEED_QDR		(4)
+#define QEDR_SPEED_FDR10	(8)
+#define QEDR_SPEED_FDR		(16)
+#define QEDR_SPEED_EDR		(32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+					    u8 *ib_width)
+{
+	switch (speed) {
+	case 1000:
+		*ib_speed = QEDR_SPEED_SDR;
+		*ib_width = IB_WIDTH_1X;
+		break;
+	case 10000:
+		*ib_speed = QEDR_SPEED_QDR;
+		*ib_width = IB_WIDTH_1X;
+		break;
+
+	case 20000:
+		*ib_speed = QEDR_SPEED_DDR;
+		*ib_width = IB_WIDTH_4X;
+		break;
+
+	case 25000:
+		*ib_speed = QEDR_SPEED_EDR;
+		*ib_width = IB_WIDTH_1X;
+		break;
+
+	case 40000:
+		*ib_speed = QEDR_SPEED_QDR;
+		*ib_width = IB_WIDTH_4X;
+		break;
+
+	case 50000:
+		*ib_speed = QEDR_SPEED_QDR;
+		*ib_width = IB_WIDTH_4X;
+		break;
+
+	case 100000:
+		*ib_speed = QEDR_SPEED_EDR;
+		*ib_width = IB_WIDTH_4X;
+		break;
+
+	default:
+		/* Unsupported */
+		*ib_speed = QEDR_SPEED_SDR;
+		*ib_width = IB_WIDTH_1X;
+	}
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+	struct qedr_dev *dev;
+	struct qed_rdma_port *rdma_port;
+
+	dev = get_qedr_dev(ibdev);
+	if (port > 1) {
+		DP_ERR(dev, "invalid_port=0x%x\n", port);
+		return -EINVAL;
+	}
+
+	if (!dev->rdma_ctx) {
+		DP_ERR(dev, "rdma_ctx is NULL\n");
+		return -EINVAL;
+	}
+
+	rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+	memset(attr, 0, sizeof(*attr));
+
+	if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+		attr->state = IB_PORT_ACTIVE;
+		attr->phys_state = 5;
+	} else {
+		attr->state = IB_PORT_DOWN;
+		attr->phys_state = 3;
+	}
+	attr->max_mtu = IB_MTU_4096;
+	attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+	attr->lid = 0;
+	attr->lmc = 0;
+	attr->sm_lid = 0;
+	attr->sm_sl = 0;
+	attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+	attr->gid_tbl_len = QEDR_MAX_SGID;
+	attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+	attr->qkey_viol_cntr = 0;
+	get_link_speed_and_width(rdma_port->link_speed,
+				 &attr->active_speed, &attr->active_width);
+	attr->max_msg_sz = rdma_port->max_msg_size;
+	attr->max_vl_num = 4;
+
+	return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+		     struct ib_port_modify *props)
+{
+	struct qedr_dev *dev;
+
+	dev = get_qedr_dev(ibdev);
+	if (port > 1) {
+		DP_ERR(dev, "invalid_port=0x%x\n", port);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+			 unsigned long len)
+{
+	struct qedr_mm *mm;
+
+	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+	if (!mm)
+		return -ENOMEM;
+
+	mm->key.phy_addr = phy_addr;
+	/* This function might be called with a length which is not a multiple
+	 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+	 * forces this granularity by increasing the requested size if needed.
+	 * When qedr_mmap is called, it will search the list with the updated
+	 * length as a key. To prevent search failures, the length is rounded up
+	 * in advance to PAGE_SIZE.
+	 */
+	mm->key.len = roundup(len, PAGE_SIZE);
+	INIT_LIST_HEAD(&mm->entry);
+
+	mutex_lock(&uctx->mm_list_lock);
+	list_add(&mm->entry, &uctx->mm_head);
+	mutex_unlock(&uctx->mm_list_lock);
+
+	DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+		 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+		 (unsigned long long)mm->key.phy_addr,
+		 (unsigned long)mm->key.len, uctx);
+
+	return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+			     unsigned long len)
+{
+	bool found = false;
+	struct qedr_mm *mm;
+
+	mutex_lock(&uctx->mm_list_lock);
+	list_for_each_entry(mm, &uctx->mm_head, entry) {
+		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+			continue;
+
+		found = true;
+		break;
+	}
+	mutex_unlock(&uctx->mm_list_lock);
+	DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+		 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+		 mm->key.phy_addr, mm->key.len, uctx, found);
+
+	return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+					struct ib_udata *udata)
+{
+	int rc;
+	struct qedr_ucontext *ctx;
+	struct qedr_alloc_ucontext_resp uresp;
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+	struct qed_rdma_add_user_out_params oparams;
+
+	if (!udata)
+		return ERR_PTR(-EFAULT);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+	if (rc) {
+		DP_ERR(dev,
+		       "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+		       rc);
+		goto err;
+	}
+
+	ctx->dpi = oparams.dpi;
+	ctx->dpi_addr = oparams.dpi_addr;
+	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+	ctx->dpi_size = oparams.dpi_size;
+	INIT_LIST_HEAD(&ctx->mm_head);
+	mutex_init(&ctx->mm_list_lock);
+
+	memset(&uresp, 0, sizeof(uresp));
+
+	uresp.db_pa = ctx->dpi_phys_addr;
+	uresp.db_size = ctx->dpi_size;
+	uresp.max_send_wr = dev->attr.max_sqe;
+	uresp.max_recv_wr = dev->attr.max_rqe;
+	uresp.max_srq_wr = dev->attr.max_srq_wr;
+	uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+	uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+	uresp.max_cqes = QEDR_MAX_CQES;
+
+	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (rc)
+		goto err;
+
+	ctx->dev = dev;
+
+	rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+	if (rc)
+		goto err;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+		 &ctx->ibucontext);
+	return &ctx->ibucontext;
+
+err:
+	kfree(ctx);
+	return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+	struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+	struct qedr_mm *mm, *tmp;
+	int status = 0;
+
+	DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+		 uctx);
+	uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+		DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+			 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+			 mm->key.phy_addr, mm->key.len, uctx);
+		list_del(&mm->entry);
+		kfree(mm);
+	}
+
+	kfree(uctx);
+	return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+	struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+	struct qedr_dev *dev = get_qedr_dev(context->device);
+	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+	u64 unmapped_db = dev->db_phys_addr;
+	unsigned long len = (vma->vm_end - vma->vm_start);
+	int rc = 0;
+	bool found;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT,
+		 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+		 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+	if (vma->vm_start & (PAGE_SIZE - 1)) {
+		DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+		       vma->vm_start);
+		return -EINVAL;
+	}
+
+	found = qedr_search_mmap(ucontext, vm_page, len);
+	if (!found) {
+		DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+		       vma->vm_pgoff);
+		return -EINVAL;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+						     dev->db_size))) {
+		DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+		if (vma->vm_flags & VM_READ) {
+			DP_ERR(dev, "Trying to map doorbell bar for read\n");
+			return -EPERM;
+		}
+
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+		rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+					PAGE_SIZE, vma->vm_page_prot);
+	} else {
+		DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+		rc = remap_pfn_range(vma, vma->vm_start,
+				     vma->vm_pgoff, len, vma->vm_page_prot);
+	}
+	DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+	return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+			    struct ib_ucontext *context, struct ib_udata *udata)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+	struct qedr_ucontext *uctx = NULL;
+	struct qedr_alloc_pd_uresp uresp;
+	struct qedr_pd *pd;
+	u16 pd_id;
+	int rc;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+		 (udata && context) ? "User Lib" : "Kernel");
+
+	if (!dev->rdma_ctx) {
+		DP_ERR(dev, "invlaid RDMA context\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+
+	dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+
+	uresp.pd_id = pd_id;
+	pd->pd_id = pd_id;
+
+	if (udata && context) {
+		rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+		if (rc)
+			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+		uctx = get_qedr_ucontext(context);
+		uctx->pd = pd;
+		pd->uctx = uctx;
+	}
+
+	return &pd->ibpd;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+	struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+	if (!pd)
+		pr_err("Invalid PD received in dealloc_pd\n");
+
+	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+	kfree(pd);
+
+	return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+			  struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int i;
+
+	for (i = 0; i < pbl_info->num_pbls; i++) {
+		if (!pbl[i].va)
+			continue;
+		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+				  pbl[i].va, pbl[i].pa);
+	}
+
+	kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+					   struct qedr_pbl_info *pbl_info,
+					   gfp_t flags)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct qedr_pbl *pbl_table;
+	dma_addr_t *pbl_main_tbl;
+	dma_addr_t pa;
+	void *va;
+	int i;
+
+	pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+	if (!pbl_table)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < pbl_info->num_pbls; i++) {
+		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+					&pa, flags);
+		if (!va)
+			goto err;
+
+		memset(va, 0, pbl_info->pbl_size);
+		pbl_table[i].va = va;
+		pbl_table[i].pa = pa;
+	}
+
+	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
+	 * the first one with physical pointers to all of the rest
+	 */
+	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+	for (i = 0; i < pbl_info->num_pbls - 1; i++)
+		pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+	return pbl_table;
+
+err:
+	for (i--; i >= 0; i--)
+		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+				  pbl_table[i].va, pbl_table[i].pa);
+
+	qedr_free_pbl(dev, pbl_info, pbl_table);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+				struct qedr_pbl_info *pbl_info,
+				u32 num_pbes, int two_layer_capable)
+{
+	u32 pbl_capacity;
+	u32 pbl_size;
+	u32 num_pbls;
+
+	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+		if (num_pbes > MAX_PBES_TWO_LAYER) {
+			DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+			       num_pbes);
+			return -EINVAL;
+		}
+
+		/* calculate required pbl page size */
+		pbl_size = MIN_FW_PBL_PAGE_SIZE;
+		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+			       NUM_PBES_ON_PAGE(pbl_size);
+
+		while (pbl_capacity < num_pbes) {
+			pbl_size *= 2;
+			pbl_capacity = pbl_size / sizeof(u64);
+			pbl_capacity = pbl_capacity * pbl_capacity;
+		}
+
+		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+		num_pbls++;	/* One for the layer0 ( points to the pbls) */
+		pbl_info->two_layered = true;
+	} else {
+		/* One layered PBL */
+		num_pbls = 1;
+		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+				 roundup_pow_of_two((num_pbes * sizeof(u64))));
+		pbl_info->two_layered = false;
+	}
+
+	pbl_info->num_pbls = num_pbls;
+	pbl_info->pbl_size = pbl_size;
+	pbl_info->num_pbes = num_pbes;
+
+	DP_DEBUG(dev, QEDR_MSG_MR,
+		 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+		 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+	return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+			       struct qedr_pbl *pbl,
+			       struct qedr_pbl_info *pbl_info)
+{
+	int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+	struct qedr_pbl *pbl_tbl;
+	struct scatterlist *sg;
+	struct regpair *pbe;
+	int entry;
+	u32 addr;
+
+	if (!pbl_info->num_pbes)
+		return;
+
+	/* If we have a two layered pbl, the first pbl points to the rest
+	 * of the pbls and the first entry lays on the second pbl in the table
+	 */
+	if (pbl_info->two_layered)
+		pbl_tbl = &pbl[1];
+	else
+		pbl_tbl = pbl;
+
+	pbe = (struct regpair *)pbl_tbl->va;
+	if (!pbe) {
+		DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+		return;
+	}
+
+	pbe_cnt = 0;
+
+	shift = ilog2(umem->page_size);
+
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+		pages = sg_dma_len(sg) >> shift;
+		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+			/* store the page address in pbe */
+			pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+					      umem->page_size * pg_cnt);
+			addr = upper_32_bits(sg_dma_address(sg) +
+					     umem->page_size * pg_cnt);
+			pbe->hi = cpu_to_le32(addr);
+			pbe_cnt++;
+			total_num_pbes++;
+			pbe++;
+
+			if (total_num_pbes == pbl_info->num_pbes)
+				return;
+
+			/* If the given pbl is full storing the pbes,
+			 * move to next pbl.
+			 */
+			if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+				pbl_tbl++;
+				pbe = (struct regpair *)pbl_tbl->va;
+				pbe_cnt = 0;
+			}
+		}
+	}
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+			      struct qedr_cq *cq, struct ib_udata *udata)
+{
+	struct qedr_create_cq_uresp uresp;
+	int rc;
+
+	memset(&uresp, 0, sizeof(uresp));
+
+	uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+	uresp.icid = cq->icid;
+
+	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (rc)
+		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+	return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+	if (cq->latest_cqe == cq->toggle_cqe)
+		cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+	cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+	u64 size, aligned_size;
+
+	/* We allocate an extra entry that we don't report to the FW. */
+	size = (entries + 1) * QEDR_CQE_SIZE;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+
+	return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+				       struct qedr_dev *dev,
+				       struct qedr_userq *q,
+				       u64 buf_addr, size_t buf_len,
+				       int access, int dmasync)
+{
+	int page_cnt;
+	int rc;
+
+	q->buf_addr = buf_addr;
+	q->buf_len = buf_len;
+	q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+	if (IS_ERR(q->umem)) {
+		DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+		       PTR_ERR(q->umem));
+		return PTR_ERR(q->umem);
+	}
+
+	page_cnt = ib_umem_page_count(q->umem);
+	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+	if (rc)
+		goto err0;
+
+	q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(q->pbl_tbl))
+		goto err0;
+
+	qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+	return 0;
+
+err0:
+	ib_umem_release(q->umem);
+
+	return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+				       struct qedr_ucontext *ctx,
+				       struct qedr_dev *dev, int vector,
+				       int chain_entries, int page_cnt,
+				       u64 pbl_ptr,
+				       struct qed_rdma_create_cq_in_params
+				       *params)
+{
+	memset(params, 0, sizeof(*params));
+	params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+	params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+	params->cnq_id = vector;
+	params->cq_size = chain_entries - 1;
+	params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+	params->pbl_num_pages = page_cnt;
+	params->pbl_ptr = pbl_ptr;
+	params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+	/* Flush data before signalling doorbell */
+	wmb();
+	cq->db.data.agg_flags = flags;
+	cq->db.data.value = cpu_to_le32(cons);
+	writeq(cq->db.raw, cq->db_addr);
+
+	/* Make sure write would stick */
+	mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+	struct qedr_cq *cq = get_qedr_cq(ibcq);
+	unsigned long sflags;
+
+	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+		return 0;
+
+	spin_lock_irqsave(&cq->cq_lock, sflags);
+
+	cq->arm_flags = 0;
+
+	if (flags & IB_CQ_SOLICITED)
+		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+	if (flags & IB_CQ_NEXT_COMP)
+		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+	doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+	spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+	return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+			     const struct ib_cq_init_attr *attr,
+			     struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+	struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+	struct qed_rdma_destroy_cq_out_params destroy_oparams;
+	struct qed_rdma_destroy_cq_in_params destroy_iparams;
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+	struct qed_rdma_create_cq_in_params params;
+	struct qedr_create_cq_ureq ureq;
+	int vector = attr->comp_vector;
+	int entries = attr->cqe;
+	struct qedr_cq *cq;
+	int chain_entries;
+	int page_cnt;
+	u64 pbl_ptr;
+	u16 icid;
+	int rc;
+
+	DP_DEBUG(dev, QEDR_MSG_INIT,
+		 "create_cq: called from %s. entries=%d, vector=%d\n",
+		 udata ? "User Lib" : "Kernel", entries, vector);
+
+	if (entries > QEDR_MAX_CQES) {
+		DP_ERR(dev,
+		       "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+		       entries, QEDR_MAX_CQES);
+		return ERR_PTR(-EINVAL);
+	}
+
+	chain_entries = qedr_align_cq_entries(entries);
+	chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq)
+		return ERR_PTR(-ENOMEM);
+
+	if (udata) {
+		memset(&ureq, 0, sizeof(ureq));
+		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+			DP_ERR(dev,
+			       "create cq: problem copying data from user space\n");
+			goto err0;
+		}
+
+		if (!ureq.len) {
+			DP_ERR(dev,
+			       "create cq: cannot create a cq with 0 entries\n");
+			goto err0;
+		}
+
+		cq->cq_type = QEDR_CQ_TYPE_USER;
+
+		rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+					  ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+		if (rc)
+			goto err0;
+
+		pbl_ptr = cq->q.pbl_tbl->pa;
+		page_cnt = cq->q.pbl_info.num_pbes;
+	} else {
+		cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+		rc = dev->ops->common->chain_alloc(dev->cdev,
+						   QED_CHAIN_USE_TO_CONSUME,
+						   QED_CHAIN_MODE_PBL,
+						   QED_CHAIN_CNT_TYPE_U32,
+						   chain_entries,
+						   sizeof(union rdma_cqe),
+						   &cq->pbl);
+		if (rc)
+			goto err1;
+
+		page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+		pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+	}
+
+	qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+			    pbl_ptr, &params);
+
+	rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+	if (rc)
+		goto err2;
+
+	cq->icid = icid;
+	cq->sig = QEDR_CQ_MAGIC_NUMBER;
+	spin_lock_init(&cq->cq_lock);
+
+	if (ib_ctx) {
+		rc = qedr_copy_cq_uresp(dev, cq, udata);
+		if (rc)
+			goto err3;
+	} else {
+		/* Generate doorbell address. */
+		cq->db_addr = dev->db_addr +
+		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+		cq->db.data.icid = cq->icid;
+		cq->db.data.params = DB_AGG_CMD_SET <<
+		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+		/* point to the very last element, passing it we will toggle */
+		cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+		cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+		cq->latest_cqe = NULL;
+		consume_cqe(cq);
+		cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_CQ,
+		 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+		 cq->icid, cq, params.cq_size);
+
+	return &cq->ibcq;
+
+err3:
+	destroy_iparams.icid = cq->icid;
+	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+				  &destroy_oparams);
+err2:
+	if (udata)
+		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+	else
+		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+	if (udata)
+		ib_umem_release(cq->q.umem);
+err0:
+	kfree(cq);
+	return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+	struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+	DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+	return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+	struct qed_rdma_destroy_cq_out_params oparams;
+	struct qed_rdma_destroy_cq_in_params iparams;
+	struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+	DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+	/* GSIs CQs are handled by driver, so they don't exist in the FW */
+	if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+		iparams.icid = cq->icid;
+		dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+	}
+
+	if (ibcq->uobject && ibcq->uobject->context) {
+		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+		ib_umem_release(cq->q.umem);
+	}
+
+	kfree(cq);
+
+	return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+					  struct ib_qp_attr *attr,
+					  int attr_mask,
+					  struct qed_rdma_modify_qp_in_params
+					  *qp_params)
+{
+	enum rdma_network_type nw_type;
+	struct ib_gid_attr gid_attr;
+	union ib_gid gid;
+	u32 ipv4_addr;
+	int rc = 0;
+	int i;
+
+	rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+			       attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+	if (rc)
+		return rc;
+
+	if (!memcmp(&gid, &zgid, sizeof(gid)))
+		return -ENOENT;
+
+	if (gid_attr.ndev) {
+		qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+		dev_put(gid_attr.ndev);
+		nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+		switch (nw_type) {
+		case RDMA_NETWORK_IPV6:
+			memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+			       sizeof(qp_params->sgid));
+			memcpy(&qp_params->dgid.bytes[0],
+			       &attr->ah_attr.grh.dgid,
+			       sizeof(qp_params->dgid));
+			qp_params->roce_mode = ROCE_V2_IPV6;
+			SET_FIELD(qp_params->modify_flags,
+				  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+			break;
+		case RDMA_NETWORK_IB:
+			memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+			       sizeof(qp_params->sgid));
+			memcpy(&qp_params->dgid.bytes[0],
+			       &attr->ah_attr.grh.dgid,
+			       sizeof(qp_params->dgid));
+			qp_params->roce_mode = ROCE_V1;
+			break;
+		case RDMA_NETWORK_IPV4:
+			memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+			memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+			ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+			qp_params->sgid.ipv4_addr = ipv4_addr;
+			ipv4_addr =
+			    qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+			qp_params->dgid.ipv4_addr = ipv4_addr;
+			SET_FIELD(qp_params->modify_flags,
+				  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+			qp_params->roce_mode = ROCE_V2_IPV4;
+			break;
+		}
+	}
+
+	for (i = 0; i < 4; i++) {
+		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+	}
+
+	if (qp_params->vlan_id >= VLAN_CFI_MASK)
+		qp_params->vlan_id = 0;
+
+	return 0;
+}
+
+static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+	ib_umem_release(qp->usq.umem);
+}
+
+static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+	ib_umem_release(qp->urq.umem);
+}
+
+static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+	kfree(qp->wqe_wr_id);
+}
+
+static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+	kfree(qp->rqe_wr_id);
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+			       struct ib_qp_init_attr *attrs)
+{
+	struct qedr_device_attr *qattr = &dev->attr;
+
+	/* QP0... attrs->qp_type == IB_QPT_GSI */
+	if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+		DP_DEBUG(dev, QEDR_MSG_QP,
+			 "create qp: unsupported qp type=0x%x requested\n",
+			 attrs->qp_type);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_send_wr > qattr->max_sqe) {
+		DP_ERR(dev,
+		       "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+		       attrs->cap.max_send_wr, qattr->max_sqe);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_inline_data > qattr->max_inline) {
+		DP_ERR(dev,
+		       "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+		       attrs->cap.max_inline_data, qattr->max_inline);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_send_sge > qattr->max_sge) {
+		DP_ERR(dev,
+		       "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+		       attrs->cap.max_send_sge, qattr->max_sge);
+		return -EINVAL;
+	}
+
+	if (attrs->cap.max_recv_sge > qattr->max_sge) {
+		DP_ERR(dev,
+		       "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+		       attrs->cap.max_recv_sge, qattr->max_sge);
+		return -EINVAL;
+	}
+
+	/* Unprivileged user space cannot create special QP */
+	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+		DP_ERR(dev,
+		       "create qp: userspace can't create special QPs of type=0x%x\n",
+		       attrs->qp_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+			       struct qedr_qp *qp)
+{
+	uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+	uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+			       struct qedr_qp *qp)
+{
+	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+	uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+			      struct qedr_qp *qp, struct ib_udata *udata)
+{
+	struct qedr_create_qp_uresp uresp;
+	int rc;
+
+	memset(&uresp, 0, sizeof(uresp));
+	qedr_copy_sq_uresp(&uresp, qp);
+	qedr_copy_rq_uresp(&uresp, qp);
+
+	uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+	uresp.qp_id = qp->qp_id;
+
+	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (rc)
+		DP_ERR(dev,
+		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
+		       qp->icid);
+
+	return rc;
+}
+
+static void qedr_set_qp_init_params(struct qedr_dev *dev,
+				    struct qedr_qp *qp,
+				    struct qedr_pd *pd,
+				    struct ib_qp_init_attr *attrs)
+{
+	qp->pd = pd;
+
+	spin_lock_init(&qp->q_lock);
+
+	qp->qp_type = attrs->qp_type;
+	qp->max_inline_data = attrs->cap.max_inline_data;
+	qp->sq.max_sges = attrs->cap.max_send_sge;
+	qp->state = QED_ROCE_QP_STATE_RESET;
+	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+	qp->sq_cq = get_qedr_cq(attrs->send_cq);
+	qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+	qp->dev = dev;
+
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+		 pd->pd_id, qp->qp_type, qp->max_inline_data,
+		 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+		 qp->sq.max_sges, qp->sq_cq->icid);
+	qp->rq.max_sges = attrs->cap.max_recv_sge;
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+		 qp->rq.max_sges, qp->rq_cq->icid);
+}
+
+static inline void
+qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
+			 struct qedr_create_qp_ureq *ureq)
+{
+	/* QP handle to be written in CQE */
+	params->qp_handle_lo = ureq->qp_handle_lo;
+	params->qp_handle_hi = ureq->qp_handle_hi;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	qp->sq.db = dev->db_addr +
+		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+	qp->sq.db_data.data.icid = qp->icid + 1;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	qp->rq.db = dev->db_addr +
+		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+	qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline int
+qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
+			      struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
+{
+	/* Allocate driver internal RQ array */
+	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+				GFP_KERNEL);
+	if (!qp->rqe_wr_id)
+		return -ENOMEM;
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
+
+	return 0;
+}
+
+static inline int
+qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
+			      struct qedr_qp *qp,
+			      struct ib_qp_init_attr *attrs,
+			      struct qed_rdma_create_qp_in_params *params)
+{
+	u32 temp_max_wr;
+
+	/* Allocate driver internal SQ array */
+	temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
+	temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
+
+	/* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
+	qp->sq.max_wr = (u16)temp_max_wr;
+	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+				GFP_KERNEL);
+	if (!qp->wqe_wr_id)
+		return -ENOMEM;
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
+
+	/* QP handle to be written in CQE */
+	params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
+	params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
+
+	return 0;
+}
+
+static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
+					 struct qedr_qp *qp,
+					 struct ib_qp_init_attr *attrs)
+{
+	u32 n_sq_elems, n_sq_entries;
+	int rc;
+
+	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+	 * the ring. The ring should allow at least a single WR, even if the
+	 * user requested none, due to allocation issues.
+	 */
+	n_sq_entries = attrs->cap.max_send_wr;
+	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+	n_sq_entries = max_t(u32, n_sq_entries, 1);
+	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+	rc = dev->ops->common->chain_alloc(dev->cdev,
+					   QED_CHAIN_USE_TO_PRODUCE,
+					   QED_CHAIN_MODE_PBL,
+					   QED_CHAIN_CNT_TYPE_U32,
+					   n_sq_elems,
+					   QEDR_SQE_ELEMENT_SIZE,
+					   &qp->sq.pbl);
+	if (rc) {
+		DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
+		return rc;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_SQ,
+		 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+		 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
+		 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
+	return 0;
+}
+
+static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
+					 struct qedr_qp *qp,
+					 struct ib_qp_init_attr *attrs)
+{
+	u32 n_rq_elems, n_rq_entries;
+	int rc;
+
+	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+	 * the ring. There ring should allow at least a single WR, even if the
+	 * user requested none, due to allocation issues.
+	 */
+	n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
+	n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+	rc = dev->ops->common->chain_alloc(dev->cdev,
+					   QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+					   QED_CHAIN_MODE_PBL,
+					   QED_CHAIN_CNT_TYPE_U32,
+					   n_rq_elems,
+					   QEDR_RQE_ELEMENT_SIZE,
+					   &qp->rq.pbl);
+
+	if (rc) {
+		DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
+		return -ENOMEM;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_RQ,
+		 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+		 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
+		 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
+
+	/* n_rq_entries < u16 so the casting is safe */
+	qp->rq.max_wr = (u16)n_rq_entries;
+
+	return 0;
+}
+
+static inline void
+qedr_init_qp_in_params_sq(struct qedr_dev *dev,
+			  struct qedr_pd *pd,
+			  struct qedr_qp *qp,
+			  struct ib_qp_init_attr *attrs,
+			  struct ib_udata *udata,
+			  struct qed_rdma_create_qp_in_params *params)
+{
+	/* QP handle to be written in an async event */
+	params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
+	params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
+
+	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+	params->fmr_and_reserved_lkey = !udata;
+	params->pd = pd->pd_id;
+	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+	params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+	params->max_sq_sges = 0;
+	params->stats_queue = 0;
+
+	if (udata) {
+		params->sq_num_pages = qp->usq.pbl_info.num_pbes;
+		params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+	} else {
+		params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+		params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+	}
+}
+
+static inline void
+qedr_init_qp_in_params_rq(struct qedr_qp *qp,
+			  struct ib_qp_init_attr *attrs,
+			  struct ib_udata *udata,
+			  struct qed_rdma_create_qp_in_params *params)
+{
+	params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+	params->srq_id = 0;
+	params->use_srq = false;
+
+	if (udata) {
+		params->rq_num_pages = qp->urq.pbl_info.num_pbes;
+		params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+	} else {
+		params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+		params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+	}
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
+		 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
+		 qp->urq.buf_len);
+}
+
+static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
+				    struct qedr_dev *dev,
+				    struct qedr_qp *qp,
+				    struct qedr_create_qp_ureq *ureq)
+{
+	int rc;
+
+	/* SQ - read access only (0), dma sync not required (0) */
+	rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
+				  ureq->sq_len, 0, 0);
+	if (rc)
+		return rc;
+
+	/* RQ - read access only (0), dma sync not required (0) */
+	rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
+				  ureq->rq_len, 0, 0);
+
+	if (rc)
+		qedr_cleanup_user_sq(dev, qp);
+	return rc;
+}
+
+static inline int
+qedr_init_kernel_qp(struct qedr_dev *dev,
+		    struct qedr_qp *qp,
+		    struct ib_qp_init_attr *attrs,
+		    struct qed_rdma_create_qp_in_params *params)
+{
+	int rc;
+
+	rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
+	if (rc) {
+		DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
+		return rc;
+	}
+
+	rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
+	if (rc) {
+		dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+		DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
+		return rc;
+	}
+
+	rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
+	if (rc) {
+		qedr_cleanup_kernel_sq(dev, qp);
+		DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
+		return rc;
+	}
+
+	rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
+	if (rc) {
+		DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
+		qedr_cleanup_kernel_sq(dev, qp);
+		dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+		return rc;
+	}
+
+	return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+			     struct ib_qp_init_attr *attrs,
+			     struct ib_udata *udata)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+	struct qed_rdma_create_qp_out_params out_params;
+	struct qed_rdma_create_qp_in_params in_params;
+	struct qedr_pd *pd = get_qedr_pd(ibpd);
+	struct ib_ucontext *ib_ctx = NULL;
+	struct qedr_ucontext *ctx = NULL;
+	struct qedr_create_qp_ureq ureq;
+	struct qedr_qp *qp;
+	int rc = 0;
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+		 udata ? "user library" : "kernel", pd);
+
+	rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+	if (rc)
+		return ERR_PTR(rc);
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp)
+		return ERR_PTR(-ENOMEM);
+
+	if (attrs->srq)
+		return ERR_PTR(-EINVAL);
+
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+		 get_qedr_cq(attrs->send_cq),
+		 get_qedr_cq(attrs->send_cq)->icid,
+		 get_qedr_cq(attrs->recv_cq),
+		 get_qedr_cq(attrs->recv_cq)->icid);
+
+	qedr_set_qp_init_params(dev, qp, pd, attrs);
+
+	if (attrs->qp_type == IB_QPT_GSI) {
+		if (udata) {
+			DP_ERR(dev,
+			       "create qp: unexpected udata when creating GSI QP\n");
+			goto err0;
+		}
+		return qedr_create_gsi_qp(dev, attrs, qp);
+	}
+
+	memset(&in_params, 0, sizeof(in_params));
+
+	if (udata) {
+		if (!(udata && ibpd->uobject && ibpd->uobject->context))
+			goto err0;
+
+		ib_ctx = ibpd->uobject->context;
+		ctx = get_qedr_ucontext(ib_ctx);
+
+		memset(&ureq, 0, sizeof(ureq));
+		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+			DP_ERR(dev,
+			       "create qp: problem copying data from user space\n");
+			goto err0;
+		}
+
+		rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
+		if (rc)
+			goto err0;
+
+		qedr_init_qp_user_params(&in_params, &ureq);
+	} else {
+		rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
+		if (rc)
+			goto err0;
+	}
+
+	qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
+	qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
+
+	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+					      &in_params, &out_params);
+
+	if (!qp->qed_qp)
+		goto err1;
+
+	qp->qp_id = out_params.qp_id;
+	qp->icid = out_params.icid;
+	qp->ibqp.qp_num = qp->qp_id;
+
+	if (udata) {
+		rc = qedr_copy_qp_uresp(dev, qp, udata);
+		if (rc)
+			goto err2;
+
+		qedr_qp_user_print(dev, qp);
+	} else {
+		qedr_init_qp_kernel_doorbell_sq(dev, qp);
+		qedr_init_qp_kernel_doorbell_rq(dev, qp);
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
+		 udata ? "user" : "kernel", qp);
+
+	return &qp->ibqp;
+
+err2:
+	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+	if (rc)
+		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+err1:
+	if (udata) {
+		qedr_cleanup_user_sq(dev, qp);
+		qedr_cleanup_user_rq(dev, qp);
+	} else {
+		qedr_cleanup_kernel_sq(dev, qp);
+		qedr_cleanup_kernel_rq(dev, qp);
+	}
+
+err0:
+	kfree(qp);
+
+	return ERR_PTR(-EFAULT);
+}
+
+enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+	switch (qp_state) {
+	case QED_ROCE_QP_STATE_RESET:
+		return IB_QPS_RESET;
+	case QED_ROCE_QP_STATE_INIT:
+		return IB_QPS_INIT;
+	case QED_ROCE_QP_STATE_RTR:
+		return IB_QPS_RTR;
+	case QED_ROCE_QP_STATE_RTS:
+		return IB_QPS_RTS;
+	case QED_ROCE_QP_STATE_SQD:
+		return IB_QPS_SQD;
+	case QED_ROCE_QP_STATE_ERR:
+		return IB_QPS_ERR;
+	case QED_ROCE_QP_STATE_SQE:
+		return IB_QPS_SQE;
+	}
+	return IB_QPS_ERR;
+}
+
+enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+{
+	switch (qp_state) {
+	case IB_QPS_RESET:
+		return QED_ROCE_QP_STATE_RESET;
+	case IB_QPS_INIT:
+		return QED_ROCE_QP_STATE_INIT;
+	case IB_QPS_RTR:
+		return QED_ROCE_QP_STATE_RTR;
+	case IB_QPS_RTS:
+		return QED_ROCE_QP_STATE_RTS;
+	case IB_QPS_SQD:
+		return QED_ROCE_QP_STATE_SQD;
+	case IB_QPS_ERR:
+		return QED_ROCE_QP_STATE_ERR;
+	default:
+		return QED_ROCE_QP_STATE_ERR;
+	}
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+	qed_chain_reset(&qph->pbl);
+	qph->prod = 0;
+	qph->cons = 0;
+	qph->wqe_cons = 0;
+	qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+				struct qedr_qp *qp,
+				enum qed_roce_qp_state new_state)
+{
+	int status = 0;
+
+	if (new_state == qp->state)
+		return 1;
+
+	switch (qp->state) {
+	case QED_ROCE_QP_STATE_RESET:
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_INIT:
+			qp->prev_wqe_size = 0;
+			qedr_reset_qp_hwq_info(&qp->sq);
+			qedr_reset_qp_hwq_info(&qp->rq);
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case QED_ROCE_QP_STATE_INIT:
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_RTR:
+			/* Update doorbell (in case post_recv was
+			 * done before move to RTR)
+			 */
+			wmb();
+			writel(qp->rq.db_data.raw, qp->rq.db);
+			/* Make sure write takes effect */
+			mmiowb();
+			break;
+		case QED_ROCE_QP_STATE_ERR:
+			break;
+		default:
+			/* Invalid state change. */
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case QED_ROCE_QP_STATE_RTR:
+		/* RTR->XXX */
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_RTS:
+			break;
+		case QED_ROCE_QP_STATE_ERR:
+			break;
+		default:
+			/* Invalid state change. */
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case QED_ROCE_QP_STATE_RTS:
+		/* RTS->XXX */
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_SQD:
+			break;
+		case QED_ROCE_QP_STATE_ERR:
+			break;
+		default:
+			/* Invalid state change. */
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case QED_ROCE_QP_STATE_SQD:
+		/* SQD->XXX */
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_RTS:
+		case QED_ROCE_QP_STATE_ERR:
+			break;
+		default:
+			/* Invalid state change. */
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case QED_ROCE_QP_STATE_ERR:
+		/* ERR->XXX */
+		switch (new_state) {
+		case QED_ROCE_QP_STATE_RESET:
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	default:
+		status = -EINVAL;
+		break;
+	};
+
+	return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		   int attr_mask, struct ib_udata *udata)
+{
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+	struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+	enum ib_qp_state old_qp_state, new_qp_state;
+	int rc = 0;
+
+	DP_DEBUG(dev, QEDR_MSG_QP,
+		 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+		 attr->qp_state);
+
+	old_qp_state = qedr_get_ibqp_state(qp->state);
+	if (attr_mask & IB_QP_STATE)
+		new_qp_state = attr->qp_state;
+	else
+		new_qp_state = old_qp_state;
+
+	if (!ib_modify_qp_is_ok
+	    (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+	     IB_LINK_LAYER_ETHERNET)) {
+		DP_ERR(dev,
+		       "modify qp: invalid attribute mask=0x%x specified for\n"
+		       "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+		       attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+		       new_qp_state);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	/* Translate the masks... */
+	if (attr_mask & IB_QP_STATE) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+		qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+	}
+
+	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+		qp_params.sqd_async = true;
+
+	if (attr_mask & IB_QP_PKEY_INDEX) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+		if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+			rc = -EINVAL;
+			goto err;
+		}
+
+		qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+	}
+
+	if (attr_mask & IB_QP_QKEY)
+		qp->qkey = attr->qkey;
+
+	if (attr_mask & IB_QP_ACCESS_FLAGS) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+		qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+						  IB_ACCESS_REMOTE_READ;
+		qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+						   IB_ACCESS_REMOTE_WRITE;
+		qp_params.incoming_atomic_en = attr->qp_access_flags &
+					       IB_ACCESS_REMOTE_ATOMIC;
+	}
+
+	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+		if (attr_mask & IB_QP_PATH_MTU) {
+			if (attr->path_mtu < IB_MTU_256 ||
+			    attr->path_mtu > IB_MTU_4096) {
+				pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+				rc = -EINVAL;
+				goto err;
+			}
+			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+				      ib_mtu_enum_to_int(iboe_get_mtu
+							 (dev->ndev->mtu)));
+		}
+
+		if (!qp->mtu) {
+			qp->mtu =
+			ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+			pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+		}
+
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+		qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+		qp_params.flow_label = attr->ah_attr.grh.flow_label;
+		qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+		qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+		rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+		if (rc) {
+			DP_ERR(dev,
+			       "modify qp: problems with GID index %d (rc=%d)\n",
+			       attr->ah_attr.grh.sgid_index, rc);
+			return rc;
+		}
+
+		rc = qedr_get_dmac(dev, &attr->ah_attr,
+				   qp_params.remote_mac_addr);
+		if (rc)
+			return rc;
+
+		qp_params.use_local_mac = true;
+		ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+		DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+			 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+			 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+		DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+			 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+			 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+		DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+			 qp_params.remote_mac_addr);
+;
+
+		qp_params.mtu = qp->mtu;
+		qp_params.lb_indication = false;
+	}
+
+	if (!qp_params.mtu) {
+		/* Stay with current MTU */
+		if (qp->mtu)
+			qp_params.mtu = qp->mtu;
+		else
+			qp_params.mtu =
+			    ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+	}
+
+	if (attr_mask & IB_QP_TIMEOUT) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+		qp_params.ack_timeout = attr->timeout;
+		if (attr->timeout) {
+			u32 temp;
+
+			temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+			/* FW requires [msec] */
+			qp_params.ack_timeout = temp;
+		} else {
+			/* Infinite */
+			qp_params.ack_timeout = 0;
+		}
+	}
+	if (attr_mask & IB_QP_RETRY_CNT) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+		qp_params.retry_cnt = attr->retry_cnt;
+	}
+
+	if (attr_mask & IB_QP_RNR_RETRY) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+		qp_params.rnr_retry_cnt = attr->rnr_retry;
+	}
+
+	if (attr_mask & IB_QP_RQ_PSN) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+		qp_params.rq_psn = attr->rq_psn;
+		qp->rq_psn = attr->rq_psn;
+	}
+
+	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+		if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+			rc = -EINVAL;
+			DP_ERR(dev,
+			       "unsupported max_rd_atomic=%d, supported=%d\n",
+			       attr->max_rd_atomic,
+			       dev->attr.max_qp_req_rd_atomic_resc);
+			goto err;
+		}
+
+		SET_FIELD(qp_params.modify_flags,
+			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+	}
+
+	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+	}
+
+	if (attr_mask & IB_QP_SQ_PSN) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+		qp_params.sq_psn = attr->sq_psn;
+		qp->sq_psn = attr->sq_psn;
+	}
+
+	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+		if (attr->max_dest_rd_atomic >
+		    dev->attr.max_qp_resp_rd_atomic_resc) {
+			DP_ERR(dev,
+			       "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+			       attr->max_dest_rd_atomic,
+			       dev->attr.max_qp_resp_rd_atomic_resc);
+
+			rc = -EINVAL;
+			goto err;
+		}
+
+		SET_FIELD(qp_params.modify_flags,
+			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+	}
+
+	if (attr_mask & IB_QP_DEST_QPN) {
+		SET_FIELD(qp_params.modify_flags,
+			  QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+		qp_params.dest_qp = attr->dest_qp_num;
+		qp->dest_qp_num = attr->dest_qp_num;
+	}
+
+	if (qp->qp_type != IB_QPT_GSI)
+		rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+					      qp->qed_qp, &qp_params);
+
+	if (attr_mask & IB_QP_STATE) {
+		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+			qedr_update_qp_state(dev, qp, qp_params.new_state);
+		qp->state = qp_params.new_state;
+	}
+
+err:
+	return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+	int ib_qp_acc_flags = 0;
+
+	if (params->incoming_rdma_write_en)
+		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+	if (params->incoming_rdma_read_en)
+		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+	if (params->incoming_atomic_en)
+		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+	ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+	return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+		  struct ib_qp_attr *qp_attr,
+		  int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+	struct qed_rdma_query_qp_out_params params;
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qedr_dev *dev = qp->dev;
+	int rc = 0;
+
+	memset(&params, 0, sizeof(params));
+
+	rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+	if (rc)
+		goto err;
+
+	memset(qp_attr, 0, sizeof(*qp_attr));
+	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+	qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+	qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+	qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+	qp_attr->path_mig_state = IB_MIG_MIGRATED;
+	qp_attr->rq_psn = params.rq_psn;
+	qp_attr->sq_psn = params.sq_psn;
+	qp_attr->dest_qp_num = params.dest_qp;
+
+	qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+	qp_attr->cap.max_send_wr = qp->sq.max_wr;
+	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+	qp_attr->cap.max_send_sge = qp->sq.max_sges;
+	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+	qp_attr->cap.max_inline_data = qp->max_inline_data;
+	qp_init_attr->cap = qp_attr->cap;
+
+	memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+	       sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+	qp_attr->ah_attr.grh.flow_label = params.flow_label;
+	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+	qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+	qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+	qp_attr->ah_attr.port_num = 1;
+	qp_attr->ah_attr.sl = 0;
+	qp_attr->timeout = params.timeout;
+	qp_attr->rnr_retry = params.rnr_retry;
+	qp_attr->retry_cnt = params.retry_cnt;
+	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+	qp_attr->pkey_index = params.pkey_index;
+	qp_attr->port_num = 1;
+	qp_attr->ah_attr.src_path_bits = 0;
+	qp_attr->ah_attr.static_rate = 0;
+	qp_attr->alt_pkey_index = 0;
+	qp_attr->alt_port_num = 0;
+	qp_attr->alt_timeout = 0;
+	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+	qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+	qp_attr->max_rd_atomic = params.max_rd_atomic;
+	qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+		 qp_attr->cap.max_inline_data);
+
+err:
+	return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qedr_dev *dev = qp->dev;
+	struct ib_qp_attr attr;
+	int attr_mask = 0;
+	int rc = 0;
+
+	DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+		 qp, qp->qp_type);
+
+	if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
+			  QED_ROCE_QP_STATE_INIT)) {
+		attr.qp_state = IB_QPS_ERR;
+		attr_mask |= IB_QP_STATE;
+
+		/* Change the QP state to ERROR */
+		qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+	}
+
+	if (qp->qp_type != IB_QPT_GSI) {
+		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+		if (rc)
+			return rc;
+	} else {
+		qedr_destroy_gsi_qp(dev);
+	}
+
+	if (ibqp->uobject && ibqp->uobject->context) {
+		qedr_cleanup_user_sq(dev, qp);
+		qedr_cleanup_user_rq(dev, qp);
+	} else {
+		qedr_cleanup_kernel_sq(dev, qp);
+		qedr_cleanup_kernel_rq(dev, qp);
+	}
+
+	kfree(qp);
+
+	return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+	struct qedr_ah *ah;
+
+	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+	if (!ah)
+		return ERR_PTR(-ENOMEM);
+
+	ah->attr = *attr;
+
+	return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+	struct qedr_ah *ah = get_qedr_ah(ibah);
+
+	kfree(ah);
+	return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+	struct qedr_pbl *pbl, *tmp;
+
+	if (info->pbl_table)
+		list_add_tail(&info->pbl_table->list_entry,
+			      &info->free_pbl_list);
+
+	if (!list_empty(&info->inuse_pbl_list))
+		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+		list_del(&pbl->list_entry);
+		qedr_free_pbl(dev, &info->pbl_info, pbl);
+	}
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+			size_t page_list_len, bool two_layered)
+{
+	struct qedr_pbl *tmp;
+	int rc;
+
+	INIT_LIST_HEAD(&info->free_pbl_list);
+	INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+	rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+				  page_list_len, two_layered);
+	if (rc)
+		goto done;
+
+	info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+	if (!info->pbl_table) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+		 &info->pbl_table->pa);
+
+	/* in usual case we use 2 PBLs, so we add one to free
+	 * list and allocating another one
+	 */
+	tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+	if (!tmp) {
+		DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+		goto done;
+	}
+
+	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+	if (rc)
+		free_mr_info(dev, info);
+
+	return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+			       u64 usr_addr, int acc, struct ib_udata *udata)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+	struct qedr_mr *mr;
+	struct qedr_pd *pd;
+	int rc = -ENOMEM;
+
+	pd = get_qedr_pd(ibpd);
+	DP_DEBUG(dev, QEDR_MSG_MR,
+		 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+		 pd->pd_id, start, len, usr_addr, acc);
+
+	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+		return ERR_PTR(-EINVAL);
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(rc);
+
+	mr->type = QEDR_MR_USER;
+
+	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+	if (IS_ERR(mr->umem)) {
+		rc = -EFAULT;
+		goto err0;
+	}
+
+	rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+	if (rc)
+		goto err1;
+
+	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+			   &mr->info.pbl_info);
+
+	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+	if (rc) {
+		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+		goto err1;
+	}
+
+	/* Index only, 18 bit long, lkey = itid << 8 | key */
+	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+	mr->hw_mr.key = 0;
+	mr->hw_mr.pd = pd->pd_id;
+	mr->hw_mr.local_read = 1;
+	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+	mr->hw_mr.mw_bind = false;
+	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+	mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+	mr->hw_mr.length = len;
+	mr->hw_mr.vaddr = usr_addr;
+	mr->hw_mr.zbva = false;
+	mr->hw_mr.phy_mr = false;
+	mr->hw_mr.dma_mr = false;
+
+	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+	if (rc) {
+		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+		goto err2;
+	}
+
+	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+	    mr->hw_mr.remote_atomic)
+		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+		 mr->ibmr.lkey);
+	return &mr->ibmr;
+
+err2:
+	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+	kfree(mr);
+	return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+	struct qedr_mr *mr = get_qedr_mr(ib_mr);
+	struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+	int rc = 0;
+
+	rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+	if (rc)
+		return rc;
+
+	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+	if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+		qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+	/* it could be user registered memory. */
+	if (mr->umem)
+		ib_umem_release(mr->umem);
+
+	kfree(mr);
+
+	return rc;
+}
+
+struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+{
+	struct qedr_pd *pd = get_qedr_pd(ibpd);
+	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+	struct qedr_mr *mr;
+	int rc = -ENOMEM;
+
+	DP_DEBUG(dev, QEDR_MSG_MR,
+		 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+		 max_page_list_len);
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(rc);
+
+	mr->dev = dev;
+	mr->type = QEDR_MR_FRMR;
+
+	rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+	if (rc)
+		goto err0;
+
+	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+	if (rc) {
+		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+		goto err0;
+	}
+
+	/* Index only, 18 bit long, lkey = itid << 8 | key */
+	mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+	mr->hw_mr.key = 0;
+	mr->hw_mr.pd = pd->pd_id;
+	mr->hw_mr.local_read = 1;
+	mr->hw_mr.local_write = 0;
+	mr->hw_mr.remote_read = 0;
+	mr->hw_mr.remote_write = 0;
+	mr->hw_mr.remote_atomic = 0;
+	mr->hw_mr.mw_bind = false;
+	mr->hw_mr.pbl_ptr = 0;
+	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+	mr->hw_mr.fbo = 0;
+	mr->hw_mr.length = 0;
+	mr->hw_mr.vaddr = 0;
+	mr->hw_mr.zbva = false;
+	mr->hw_mr.phy_mr = true;
+	mr->hw_mr.dma_mr = false;
+
+	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+	if (rc) {
+		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+		goto err1;
+	}
+
+	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+	mr->ibmr.rkey = mr->ibmr.lkey;
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+	return mr;
+
+err1:
+	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+	kfree(mr);
+	return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+			    enum ib_mr_type mr_type, u32 max_num_sg)
+{
+	struct qedr_dev *dev;
+	struct qedr_mr *mr;
+
+	if (mr_type != IB_MR_TYPE_MEM_REG)
+		return ERR_PTR(-EINVAL);
+
+	mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+	if (IS_ERR(mr))
+		return ERR_PTR(-EINVAL);
+
+	dev = mr->dev;
+
+	return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+	struct qedr_mr *mr = get_qedr_mr(ibmr);
+	struct qedr_pbl *pbl_table;
+	struct regpair *pbe;
+	u32 pbes_in_page;
+
+	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+		DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+		return -ENOMEM;
+	}
+
+	DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+		 mr->npages, addr);
+
+	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+	pbe = (struct regpair *)pbl_table->va;
+	pbe +=  mr->npages % pbes_in_page;
+	pbe->lo = cpu_to_le32((u32)addr);
+	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+	mr->npages++;
+
+	return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+	int work = info->completed - info->completed_handled - 1;
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+		struct qedr_pbl *pbl;
+
+		/* Free all the page list that are possible to be freed
+		 * (all the ones that were invalidated), under the assumption
+		 * that if an FMR was completed successfully that means that
+		 * if there was an invalidate operation before it also ended
+		 */
+		pbl = list_first_entry(&info->inuse_pbl_list,
+				       struct qedr_pbl, list_entry);
+		list_del(&pbl->list_entry);
+		list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+		info->completed_handled++;
+	}
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+		   int sg_nents, unsigned int *sg_offset)
+{
+	struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+	mr->npages = 0;
+
+	handle_completed_mrs(mr->dev, &mr->info);
+	return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+	struct qedr_pd *pd = get_qedr_pd(ibpd);
+	struct qedr_mr *mr;
+	int rc;
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
+
+	mr->type = QEDR_MR_DMA;
+
+	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+	if (rc) {
+		DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+		goto err1;
+	}
+
+	/* index only, 18 bit long, lkey = itid << 8 | key */
+	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+	mr->hw_mr.pd = pd->pd_id;
+	mr->hw_mr.local_read = 1;
+	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+	mr->hw_mr.dma_mr = true;
+
+	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+	if (rc) {
+		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+		goto err2;
+	}
+
+	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+	    mr->hw_mr.remote_atomic)
+		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+	DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+	return &mr->ibmr;
+
+err2:
+	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+	kfree(mr);
+	return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+	int i, len = 0;
+
+	for (i = 0; i < num_sge; i++)
+		len += sg_list[i].length;
+
+	return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+	int i;
+
+	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+		*p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+				       struct qedr_qp *qp, u8 *wqe_size,
+				       struct ib_send_wr *wr,
+				       struct ib_send_wr **bad_wr, u8 *bits,
+				       u8 bit)
+{
+	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+	char *seg_prt, *wqe;
+	int i, seg_siz;
+
+	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+		DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+		*bad_wr = wr;
+		return 0;
+	}
+
+	if (!data_size)
+		return data_size;
+
+	*bits |= bit;
+
+	seg_prt = NULL;
+	wqe = NULL;
+	seg_siz = 0;
+
+	/* Copy data inline */
+	for (i = 0; i < wr->num_sge; i++) {
+		u32 len = wr->sg_list[i].length;
+		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+		while (len > 0) {
+			u32 cur;
+
+			/* New segment required */
+			if (!seg_siz) {
+				wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+				seg_prt = wqe;
+				seg_siz = sizeof(struct rdma_sq_common_wqe);
+				(*wqe_size)++;
+			}
+
+			/* Calculate currently allowed length */
+			cur = min_t(u32, len, seg_siz);
+			memcpy(seg_prt, src, cur);
+
+			/* Update segment variables */
+			seg_prt += cur;
+			seg_siz -= cur;
+
+			/* Update sge variables */
+			src += cur;
+			len -= cur;
+
+			/* Swap fully-completed segments */
+			if (!seg_siz)
+				swap_wqe_data64((u64 *)wqe);
+		}
+	}
+
+	/* swap last not completed segment */
+	if (seg_siz)
+		swap_wqe_data64((u64 *)wqe);
+
+	return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
+	do {							\
+		DMA_REGPAIR_LE(sge->addr, vaddr);		\
+		(sge)->length = cpu_to_le32(vlength);		\
+		(sge)->flags = cpu_to_le32(vflags);		\
+	} while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
+	do {							\
+		DMA_REGPAIR_LE(hdr->wr_id, vwr_id);		\
+		(hdr)->num_sges = num_sge;			\
+	} while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
+	do {							\
+		DMA_REGPAIR_LE(sge->addr, vaddr);		\
+		(sge)->length = cpu_to_le32(vlength);		\
+		(sge)->l_key = cpu_to_le32(vlkey);		\
+	} while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+				struct ib_send_wr *wr)
+{
+	u32 data_size = 0;
+	int i;
+
+	for (i = 0; i < wr->num_sge; i++) {
+		struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+		DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+		sge->length = cpu_to_le32(wr->sg_list[i].length);
+		data_size += wr->sg_list[i].length;
+	}
+
+	if (wqe_size)
+		*wqe_size += wr->num_sge;
+
+	return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+				     struct qedr_qp *qp,
+				     struct rdma_sq_rdma_wqe_1st *rwqe,
+				     struct rdma_sq_rdma_wqe_2nd *rwqe2,
+				     struct ib_send_wr *wr,
+				     struct ib_send_wr **bad_wr)
+{
+	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+	DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+	if (wr->send_flags & IB_SEND_INLINE) {
+		u8 flags = 0;
+
+		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+		return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+						   bad_wr, &rwqe->flags, flags);
+	}
+
+	return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+				     struct qedr_qp *qp,
+				     struct rdma_sq_send_wqe_1st *swqe,
+				     struct rdma_sq_send_wqe_2st *swqe2,
+				     struct ib_send_wr *wr,
+				     struct ib_send_wr **bad_wr)
+{
+	memset(swqe2, 0, sizeof(*swqe2));
+	if (wr->send_flags & IB_SEND_INLINE) {
+		u8 flags = 0;
+
+		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+		return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+						   bad_wr, &swqe->flags, flags);
+	}
+
+	return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+			    struct rdma_sq_fmr_wqe_1st *fwqe1,
+			    struct ib_reg_wr *wr)
+{
+	struct qedr_mr *mr = get_qedr_mr(wr->mr);
+	struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+	fwqe1->l_key = wr->key;
+
+	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+		   !!(wr->access & IB_ACCESS_REMOTE_READ));
+	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+		   !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+		   !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+		   !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+	fwqe2->fmr_ctrl = 0;
+
+	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+		   ilog2(mr->ibmr.page_size) - 12);
+
+	fwqe2->length_hi = 0;
+	fwqe2->length_lo = mr->ibmr.length;
+	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+	qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+	return 0;
+}
+
+enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+	switch (opcode) {
+	case IB_WR_RDMA_WRITE:
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		return IB_WC_RDMA_WRITE;
+	case IB_WR_SEND_WITH_IMM:
+	case IB_WR_SEND:
+	case IB_WR_SEND_WITH_INV:
+		return IB_WC_SEND;
+	case IB_WR_RDMA_READ:
+		return IB_WC_RDMA_READ;
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return IB_WC_COMP_SWAP;
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return IB_WC_FETCH_ADD;
+	case IB_WR_REG_MR:
+		return IB_WC_REG_MR;
+	case IB_WR_LOCAL_INV:
+		return IB_WC_LOCAL_INV;
+	default:
+		return IB_WC_SEND;
+	}
+}
+
+inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+	int wq_is_full, err_wr, pbl_is_full;
+	struct qedr_dev *dev = qp->dev;
+
+	/* prevent SQ overflow and/or processing of a bad WR */
+	err_wr = wr->num_sge > qp->sq.max_sges;
+	wq_is_full = qedr_wq_is_full(&qp->sq);
+	pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+		      QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+	if (wq_is_full || err_wr || pbl_is_full) {
+		if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+			DP_ERR(dev,
+			       "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+			       qp);
+			qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+		}
+
+		if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+			DP_ERR(dev,
+			       "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+			       qp);
+			qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+		}
+
+		if (pbl_is_full &&
+		    !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+			DP_ERR(dev,
+			       "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+			       qp);
+			qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+		}
+		return false;
+	}
+	return true;
+}
+
+int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		     struct ib_send_wr **bad_wr)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct rdma_sq_atomic_wqe_1st *awqe1;
+	struct rdma_sq_atomic_wqe_2nd *awqe2;
+	struct rdma_sq_atomic_wqe_3rd *awqe3;
+	struct rdma_sq_send_wqe_2st *swqe2;
+	struct rdma_sq_local_inv_wqe *iwqe;
+	struct rdma_sq_rdma_wqe_2nd *rwqe2;
+	struct rdma_sq_send_wqe_1st *swqe;
+	struct rdma_sq_rdma_wqe_1st *rwqe;
+	struct rdma_sq_fmr_wqe_1st *fwqe1;
+	struct rdma_sq_common_wqe *wqe;
+	u32 length;
+	int rc = 0;
+	bool comp;
+
+	if (!qedr_can_post_send(qp, wr)) {
+		*bad_wr = wr;
+		return -ENOMEM;
+	}
+
+	wqe = qed_chain_produce(&qp->sq.pbl);
+	qp->wqe_wr_id[qp->sq.prod].signaled =
+		!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+	wqe->flags = 0;
+	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+		   !!(wr->send_flags & IB_SEND_SOLICITED));
+	comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+		   !!(wr->send_flags & IB_SEND_FENCE));
+	wqe->prev_wqe_size = qp->prev_wqe_size;
+
+	qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+	switch (wr->opcode) {
+	case IB_WR_SEND_WITH_IMM:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+		swqe->wqe_size = 2;
+		swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+						   wr, bad_wr);
+		swqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+		qp->prev_wqe_size = swqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+		break;
+	case IB_WR_SEND:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+		swqe->wqe_size = 2;
+		swqe2 = qed_chain_produce(&qp->sq.pbl);
+		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+						   wr, bad_wr);
+		swqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+		qp->prev_wqe_size = swqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+		break;
+	case IB_WR_SEND_WITH_INV:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+		swqe2 = qed_chain_produce(&qp->sq.pbl);
+		swqe->wqe_size = 2;
+		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+						   wr, bad_wr);
+		swqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+		qp->prev_wqe_size = swqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+		break;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+		rwqe->wqe_size = 2;
+		rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+		rwqe2 = qed_chain_produce(&qp->sq.pbl);
+		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+						   wr, bad_wr);
+		rwqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+		qp->prev_wqe_size = rwqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+		break;
+	case IB_WR_RDMA_WRITE:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+		rwqe->wqe_size = 2;
+		rwqe2 = qed_chain_produce(&qp->sq.pbl);
+		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+						   wr, bad_wr);
+		rwqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+		qp->prev_wqe_size = rwqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+		break;
+	case IB_WR_RDMA_READ_WITH_INV:
+		DP_ERR(dev,
+		       "RDMA READ WITH INVALIDATE not supported\n");
+		*bad_wr = wr;
+		rc = -EINVAL;
+		break;
+
+	case IB_WR_RDMA_READ:
+		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+		rwqe->wqe_size = 2;
+		rwqe2 = qed_chain_produce(&qp->sq.pbl);
+		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+						   wr, bad_wr);
+		rwqe->length = cpu_to_le32(length);
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+		qp->prev_wqe_size = rwqe->wqe_size;
+		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+		break;
+
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+		awqe1->wqe_size = 4;
+
+		awqe2 = qed_chain_produce(&qp->sq.pbl);
+		DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+		awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+		awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+		if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+			DMA_REGPAIR_LE(awqe3->swap_data,
+				       atomic_wr(wr)->compare_add);
+		} else {
+			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+			DMA_REGPAIR_LE(awqe3->swap_data,
+				       atomic_wr(wr)->swap);
+			DMA_REGPAIR_LE(awqe3->cmp_data,
+				       atomic_wr(wr)->compare_add);
+		}
+
+		qedr_prepare_sq_sges(qp, NULL, wr);
+
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+		qp->prev_wqe_size = awqe1->wqe_size;
+		break;
+
+	case IB_WR_LOCAL_INV:
+		iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+		iwqe->wqe_size = 1;
+
+		iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+		iwqe->inv_l_key = wr->ex.invalidate_rkey;
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+		qp->prev_wqe_size = iwqe->wqe_size;
+		break;
+	case IB_WR_REG_MR:
+		DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+		wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+		fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+		fwqe1->wqe_size = 2;
+
+		rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+		if (rc) {
+			DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+			*bad_wr = wr;
+			break;
+		}
+
+		qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+		qp->prev_wqe_size = fwqe1->wqe_size;
+		break;
+	default:
+		DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+		rc = -EINVAL;
+		*bad_wr = wr;
+		break;
+	}
+
+	if (*bad_wr) {
+		u16 value;
+
+		/* Restore prod to its position before
+		 * this WR was processed
+		 */
+		value = le16_to_cpu(qp->sq.db_data.data.value);
+		qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+		/* Restore prev_wqe_size */
+		qp->prev_wqe_size = wqe->prev_wqe_size;
+		rc = -EINVAL;
+		DP_ERR(dev, "POST SEND FAILED\n");
+	}
+
+	return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		   struct ib_send_wr **bad_wr)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	unsigned long flags;
+	int rc = 0;
+
+	*bad_wr = NULL;
+
+	if (qp->qp_type == IB_QPT_GSI)
+		return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+	    (qp->state == QED_ROCE_QP_STATE_ERR)) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		*bad_wr = wr;
+		DP_DEBUG(dev, QEDR_MSG_CQ,
+			 "QP in wrong state! QP icid=0x%x state %d\n",
+			 qp->icid, qp->state);
+		return -EINVAL;
+	}
+
+	if (!wr) {
+		DP_ERR(dev, "Got an empty post send.\n");
+		return -EINVAL;
+	}
+
+	while (wr) {
+		rc = __qedr_post_send(ibqp, wr, bad_wr);
+		if (rc)
+			break;
+
+		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+		qedr_inc_sw_prod(&qp->sq);
+
+		qp->sq.db_data.data.value++;
+
+		wr = wr->next;
+	}
+
+	/* Trigger doorbell
+	 * If there was a failure in the first WR then it will be triggered in
+	 * vane. However this is not harmful (as long as the producer value is
+	 * unchanged). For performance reasons we avoid checking for this
+	 * redundant doorbell.
+	 */
+	wmb();
+	writel(qp->sq.db_data.raw, qp->sq.db);
+
+	/* Make sure write sticks */
+	mmiowb();
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		   struct ib_recv_wr **bad_wr)
+{
+	struct qedr_qp *qp = get_qedr_qp(ibqp);
+	struct qedr_dev *dev = qp->dev;
+	unsigned long flags;
+	int status = 0;
+
+	if (qp->qp_type == IB_QPT_GSI)
+		return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+	    (qp->state == QED_ROCE_QP_STATE_ERR)) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+
+	while (wr) {
+		int i;
+
+		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+		    QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+		    wr->num_sge > qp->rq.max_sges) {
+			DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
+			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
+			       QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+			       qp->rq.max_sges);
+			status = -ENOMEM;
+			*bad_wr = wr;
+			break;
+		}
+		for (i = 0; i < wr->num_sge; i++) {
+			u32 flags = 0;
+			struct rdma_rq_sge *rqe =
+			    qed_chain_produce(&qp->rq.pbl);
+
+			/* First one must include the number
+			 * of SGE in the list
+			 */
+			if (!i)
+				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+					  wr->num_sge);
+
+			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+				  wr->sg_list[i].lkey);
+
+			RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+				   wr->sg_list[i].length, flags);
+		}
+
+		/* Special case of no sges. FW requires between 1-4 sges...
+		 * in this case we need to post 1 sge with length zero. this is
+		 * because rdma write with immediate consumes an RQ.
+		 */
+		if (!wr->num_sge) {
+			u32 flags = 0;
+			struct rdma_rq_sge *rqe =
+			    qed_chain_produce(&qp->rq.pbl);
+
+			/* First one must include the number
+			 * of SGE in the list
+			 */
+			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+			RQ_SGE_SET(rqe, 0, 0, flags);
+			i = 1;
+		}
+
+		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+		qedr_inc_sw_prod(&qp->rq);
+
+		/* Flush all the writes before signalling doorbell */
+		wmb();
+
+		qp->rq.db_data.data.value++;
+
+		writel(qp->rq.db_data.raw, qp->rq.db);
+
+		/* Make sure write sticks */
+		mmiowb();
+
+		wr = wr->next;
+	}
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+	struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+	return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+		cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+	struct rdma_cqe_requester *resp_cqe = &cqe->req;
+	struct qedr_qp *qp;
+
+	qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+						   resp_cqe->qp_handle.lo,
+						   u64);
+	return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+	struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+	return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+	return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+		       struct qedr_cq *cq, int num_entries,
+		       struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+		       int force)
+{
+	u16 cnt = 0;
+
+	while (num_entries && qp->sq.wqe_cons != hw_cons) {
+		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+			qedr_chk_if_fmr(qp);
+			/* skip WC */
+			goto next_cqe;
+		}
+
+		/* fill WC */
+		wc->status = status;
+		wc->wc_flags = 0;
+		wc->src_qp = qp->id;
+		wc->qp = &qp->ibqp;
+
+		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+		switch (wc->opcode) {
+		case IB_WC_RDMA_WRITE:
+			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+			break;
+		case IB_WC_COMP_SWAP:
+		case IB_WC_FETCH_ADD:
+			wc->byte_len = 8;
+			break;
+		case IB_WC_REG_MR:
+			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+			break;
+		default:
+			break;
+		}
+
+		num_entries--;
+		wc++;
+		cnt++;
+next_cqe:
+		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+			qed_chain_consume(&qp->sq.pbl);
+		qedr_inc_sw_cons(&qp->sq);
+	}
+
+	return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+			    struct qedr_qp *qp, struct qedr_cq *cq,
+			    int num_entries, struct ib_wc *wc,
+			    struct rdma_cqe_requester *req)
+{
+	int cnt = 0;
+
+	switch (req->status) {
+	case RDMA_CQE_REQ_STS_OK:
+		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+				  IB_WC_SUCCESS, 0);
+		break;
+	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+		DP_ERR(dev,
+		       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+		       cq->icid, qp->icid);
+		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+				  IB_WC_WR_FLUSH_ERR, 0);
+		break;
+	default:
+		/* process all WQE before the cosumer */
+		qp->state = QED_ROCE_QP_STATE_ERR;
+		cnt = process_req(dev, qp, cq, num_entries, wc,
+				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
+		wc += cnt;
+		/* if we have extra WC fill it with actual error info */
+		if (cnt < num_entries) {
+			enum ib_wc_status wc_status;
+
+			switch (req->status) {
+			case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_BAD_RESP_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_LOC_LEN_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_LOC_QP_OP_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_LOC_PROT_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_MW_BIND_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_REM_INV_REQ_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_REM_ACCESS_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_REM_OP_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+				break;
+			case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+				DP_ERR(dev,
+				       "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_RETRY_EXC_ERR;
+				break;
+			default:
+				DP_ERR(dev,
+				       "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+				       cq->icid, qp->icid);
+				wc_status = IB_WC_GENERAL_ERR;
+			}
+			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+					   wc_status, 1);
+		}
+	}
+
+	return cnt;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+			       struct qedr_cq *cq, struct ib_wc *wc,
+			       struct rdma_cqe_responder *resp, u64 wr_id)
+{
+	enum ib_wc_status wc_status = IB_WC_SUCCESS;
+	u8 flags;
+
+	wc->opcode = IB_WC_RECV;
+	wc->wc_flags = 0;
+
+	switch (resp->status) {
+	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+		wc_status = IB_WC_LOC_ACCESS_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+		wc_status = IB_WC_LOC_LEN_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+		wc_status = IB_WC_LOC_QP_OP_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+		wc_status = IB_WC_LOC_PROT_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+		wc_status = IB_WC_MW_BIND_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+		wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+		break;
+	case RDMA_CQE_RESP_STS_OK:
+		wc_status = IB_WC_SUCCESS;
+		wc->byte_len = le32_to_cpu(resp->length);
+
+		flags = resp->flags & QEDR_RESP_RDMA_IMM;
+
+		if (flags == QEDR_RESP_RDMA_IMM)
+			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+		if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
+			wc->ex.imm_data =
+				le32_to_cpu(resp->imm_data_or_inv_r_Key);
+			wc->wc_flags |= IB_WC_WITH_IMM;
+		}
+		break;
+	default:
+		wc->status = IB_WC_GENERAL_ERR;
+		DP_ERR(dev, "Invalid CQE status detected\n");
+	}
+
+	/* fill WC */
+	wc->status = wc_status;
+	wc->src_qp = qp->id;
+	wc->qp = &qp->ibqp;
+	wc->wr_id = wr_id;
+}
+
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+			    struct qedr_cq *cq, struct ib_wc *wc,
+			    struct rdma_cqe_responder *resp)
+{
+	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+		qed_chain_consume(&qp->rq.pbl);
+	qedr_inc_sw_cons(&qp->rq);
+
+	return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+			      int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+	u16 cnt = 0;
+
+	while (num_entries && qp->rq.wqe_cons != hw_cons) {
+		/* fill WC */
+		wc->status = IB_WC_WR_FLUSH_ERR;
+		wc->wc_flags = 0;
+		wc->src_qp = qp->id;
+		wc->byte_len = 0;
+		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+		wc->qp = &qp->ibqp;
+		num_entries--;
+		wc++;
+		cnt++;
+		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+			qed_chain_consume(&qp->rq.pbl);
+		qedr_inc_sw_cons(&qp->rq);
+	}
+
+	return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+				 struct rdma_cqe_responder *resp, int *update)
+{
+	if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+		consume_cqe(cq);
+		*update |= 1;
+	}
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+			     struct qedr_cq *cq, int num_entries,
+			     struct ib_wc *wc, struct rdma_cqe_responder *resp,
+			     int *update)
+{
+	int cnt;
+
+	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+		cnt = process_resp_flush(qp, cq, num_entries, wc,
+					 resp->rq_cons);
+		try_consume_resp_cqe(cq, qp, resp, update);
+	} else {
+		cnt = process_resp_one(dev, qp, cq, wc, resp);
+		consume_cqe(cq);
+		*update |= 1;
+	}
+
+	return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+				struct rdma_cqe_requester *req, int *update)
+{
+	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+		consume_cqe(cq);
+		*update |= 1;
+	}
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+	struct qedr_cq *cq = get_qedr_cq(ibcq);
+	union rdma_cqe *cqe = cq->latest_cqe;
+	u32 old_cons, new_cons;
+	unsigned long flags;
+	int update = 0;
+	int done = 0;
+
+	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+	spin_lock_irqsave(&cq->cq_lock, flags);
+	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+	while (num_entries && is_valid_cqe(cq, cqe)) {
+		struct qedr_qp *qp;
+		int cnt = 0;
+
+		/* prevent speculative reads of any field of CQE */
+		rmb();
+
+		qp = cqe_get_qp(cqe);
+		if (!qp) {
+			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+			break;
+		}
+
+		wc->qp = &qp->ibqp;
+
+		switch (cqe_get_type(cqe)) {
+		case RDMA_CQE_TYPE_REQUESTER:
+			cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+					       &cqe->req);
+			try_consume_req_cqe(cq, qp, &cqe->req, &update);
+			break;
+		case RDMA_CQE_TYPE_RESPONDER_RQ:
+			cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+						&cqe->resp, &update);
+			break;
+		case RDMA_CQE_TYPE_INVALID:
+		default:
+			DP_ERR(dev, "Error: invalid CQE type = %d\n",
+			       cqe_get_type(cqe));
+		}
+		num_entries -= cnt;
+		wc += cnt;
+		done += cnt;
+
+		cqe = get_cqe(cq);
+	}
+	new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+	cq->cq_cons += new_cons - old_cons;
+
+	if (update)
+		/* doorbell notifies abount latest VALID entry,
+		 * but chain already point to the next INVALID one
+		 */
+		doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+	return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+		     u8 port_num,
+		     const struct ib_wc *in_wc,
+		     const struct ib_grh *in_grh,
+		     const struct ib_mad_hdr *mad_hdr,
+		     size_t in_mad_size, struct ib_mad_hdr *out_mad,
+		     size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+	struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+	DP_DEBUG(dev, QEDR_MSG_GSI,
+		 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
+		 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
+		 mad_hdr->class_specific, mad_hdr->class_version,
+		 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
+	return IB_MAD_RESULT_SUCCESS;
+}
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+			struct ib_port_immutable *immutable)
+{
+	struct ib_port_attr attr;
+	int err;
+
+	err = qedr_query_port(ibdev, port_num, &attr);
+	if (err)
+		return err;
+
+	immutable->pkey_tbl_len = attr.pkey_tbl_len;
+	immutable->gid_tbl_len = attr.gid_tbl_len;
+	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+				    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+	return 0;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644
index 0000000..a9b5e67
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -0,0 +1,101 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+		      struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_modify_port(struct ib_device *, u8 port, int mask,
+		     struct ib_port_modify *props);
+
+int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
+int qedr_dealloc_ucontext(struct ib_ucontext *);
+
+int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+		 unsigned int index, void **context);
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+		 unsigned int index, const union ib_gid *gid,
+		 const struct ib_gid_attr *attr, void **context);
+struct ib_pd *qedr_alloc_pd(struct ib_device *,
+			    struct ib_ucontext *, struct ib_udata *);
+int qedr_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+			     const struct ib_cq_init_attr *attr,
+			     struct ib_ucontext *ib_ctx,
+			     struct ib_udata *udata);
+int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int qedr_destroy_cq(struct ib_cq *);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+			     struct ib_udata *);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+		   int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+		  int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp);
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+int qedr_destroy_ah(struct ib_ah *ibah);
+
+int qedr_dereg_mr(struct ib_mr *);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+			       u64 virt, int acc, struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+		   int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+			    u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
+		   struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
+		   struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+		     u8 port_num, const struct ib_wc *in_wc,
+		     const struct ib_grh *in_grh,
+		     const struct ib_mad_hdr *in_mad,
+		     size_t in_mad_size, struct ib_mad_hdr *out_mad,
+		     size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+			struct ib_port_immutable *immutable);
+#endif
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index c3edc03..f1e66ef 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -64,7 +64,7 @@
 	inode->i_uid = GLOBAL_ROOT_UID;
 	inode->i_gid = GLOBAL_ROOT_GID;
 	inode->i_blocks = 0;
-	inode->i_atime = CURRENT_TIME;
+	inode->i_atime = current_time(inode);
 	inode->i_mtime = inode->i_atime;
 	inode->i_ctime = inode->i_atime;
 	inode->i_private = data;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94f..75f0862 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@
 
 	for (got = 0; got < num_pages; got += ret) {
 		ret = get_user_pages(start_page + got * PAGE_SIZE,
-				     num_pages - got, 1, 1,
+				     num_pages - got,
+				     FOLL_WRITE | FOLL_FORCE,
 				     p + got, NULL);
 		if (ret < 0)
 			goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebe..1ccee6e 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@
 	int i;
 	int flags;
 	dma_addr_t pa;
+	unsigned int gup_flags;
 
 	if (!can_do_mlock())
 		return -EPERM;
@@ -135,6 +136,8 @@
 
 	flags = IOMMU_READ | IOMMU_CACHE;
 	flags |= (writable) ? IOMMU_WRITE : 0;
+	gup_flags = FOLL_WRITE;
+	gup_flags |= (writable) ? 0 : FOLL_FORCE;
 	cur_base = addr & PAGE_MASK;
 	ret = 0;
 
@@ -142,7 +145,7 @@
 		ret = get_user_pages(cur_base,
 					min_t(unsigned long, npages,
 					PAGE_SIZE / sizeof(struct page *)),
-					1, !writable, page_list, NULL);
+					gup_flags, page_list, NULL);
 
 		if (ret < 0)
 			goto out;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index f2f229e..6d9904a 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -129,7 +129,7 @@
 		if (likely(worker)) {
 			cq->notify = RVT_CQ_NONE;
 			cq->triggered++;
-			queue_kthread_work(worker, &cq->comptask);
+			kthread_queue_work(worker, &cq->comptask);
 		}
 	}
 
@@ -265,7 +265,7 @@
 	cq->ibcq.cqe = entries;
 	cq->notify = RVT_CQ_NONE;
 	spin_lock_init(&cq->lock);
-	init_kthread_work(&cq->comptask, send_complete);
+	kthread_init_work(&cq->comptask, send_complete);
 	cq->queue = wc;
 
 	ret = &cq->ibcq;
@@ -295,7 +295,7 @@
 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 	struct rvt_dev_info *rdi = cq->rdi;
 
-	flush_kthread_work(&cq->comptask);
+	kthread_flush_work(&cq->comptask);
 	spin_lock(&rdi->n_cqs_lock);
 	rdi->n_cqs_allocated--;
 	spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@
 	rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
 	if (!rdi->worker)
 		return -ENOMEM;
-	init_kthread_worker(rdi->worker);
+	kthread_init_worker(rdi->worker);
 	task = kthread_create_on_node(
 		kthread_worker_fn,
 		rdi->worker,
@@ -547,7 +547,7 @@
 	/* blocks future queuing from send_complete() */
 	rdi->worker = NULL;
 	smp_wmb(); /* See rdi_cq_enter */
-	flush_kthread_worker(worker);
+	kthread_flush_worker(worker);
 	kthread_stop(worker->task);
 	kfree(worker);
 }
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 936f07a..6d7de9b 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@
 					   6-byte ALPS packet */
 #define ALPS_STICK_BITS		0x100	/* separate stick button bits */
 #define ALPS_BUTTONPAD		0x200	/* device is a clickpad */
+#define ALPS_DUALPOINT_WITH_PRESSURE	0x400	/* device can report trackpoint pressure */
 
 static const struct alps_model_info alps_model_data[] = {
 	{ { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },	/* Toshiba Salellite Pro M10 */
@@ -1156,15 +1157,28 @@
 {
 	unsigned char pkt_id = SS4_PACKET_ID_IDLE;
 
-	if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
-	    (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) {
-		pkt_id = SS4_PACKET_ID_IDLE;
-	} else if (!(byte[3] & 0x10)) {
-		pkt_id = SS4_PACKET_ID_ONE;
-	} else if (!(byte[3] & 0x20)) {
+	switch (byte[3] & 0x30) {
+	case 0x00:
+		if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
+		    (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
+		    byte[5] == 0x00) {
+			pkt_id = SS4_PACKET_ID_IDLE;
+		} else {
+			pkt_id = SS4_PACKET_ID_ONE;
+		}
+		break;
+	case 0x10:
+		/* two-finger finger positions */
 		pkt_id = SS4_PACKET_ID_TWO;
-	} else {
+		break;
+	case 0x20:
+		/* stick pointer */
+		pkt_id = SS4_PACKET_ID_STICK;
+		break;
+	case 0x30:
+		/* third and fourth finger positions */
 		pkt_id = SS4_PACKET_ID_MULTI;
+		break;
 	}
 
 	return pkt_id;
@@ -1185,7 +1199,13 @@
 		f->mt[0].x = SS4_1F_X_V2(p);
 		f->mt[0].y = SS4_1F_Y_V2(p);
 		f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
-		f->fingers = 1;
+		/*
+		 * When a button is held the device will give us events
+		 * with x, y, and pressure of 0. This causes annoying jumps
+		 * if a touch is released while the button is held.
+		 * Handle this by claiming zero contacts.
+		 */
+		f->fingers = f->pressure > 0 ? 1 : 0;
 		f->first_mp = 0;
 		f->is_mp = 0;
 		break;
@@ -1246,16 +1266,40 @@
 		}
 		break;
 
+	case SS4_PACKET_ID_STICK:
+		if (!(priv->flags & ALPS_DUALPOINT)) {
+			psmouse_warn(psmouse,
+				     "Rejected trackstick packet from non DualPoint device");
+		} else {
+			int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
+			int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
+			int pressure = (s8)(p[4] & 0x7f);
+
+			input_report_rel(priv->dev2, REL_X, x);
+			input_report_rel(priv->dev2, REL_Y, -y);
+			input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
+		}
+		break;
+
 	case SS4_PACKET_ID_IDLE:
 	default:
 		memset(f, 0, sizeof(struct alps_fields));
 		break;
 	}
 
-	f->left = !!(SS4_BTN_V2(p) & 0x01);
-	if (!(priv->flags & ALPS_BUTTONPAD)) {
-		f->right = !!(SS4_BTN_V2(p) & 0x02);
-		f->middle = !!(SS4_BTN_V2(p) & 0x04);
+	/* handle buttons */
+	if (pkt_id == SS4_PACKET_ID_STICK) {
+		f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
+		if (!(priv->flags & ALPS_BUTTONPAD)) {
+			f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+			f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
+		}
+	} else {
+		f->left = !!(SS4_BTN_V2(p) & 0x01);
+		if (!(priv->flags & ALPS_BUTTONPAD)) {
+			f->right = !!(SS4_BTN_V2(p) & 0x02);
+			f->middle = !!(SS4_BTN_V2(p) & 0x04);
+		}
 	}
 
 	return 0;
@@ -1266,6 +1310,7 @@
 	struct alps_data *priv = psmouse->private;
 	unsigned char *packet = psmouse->packet;
 	struct input_dev *dev = psmouse->dev;
+	struct input_dev *dev2 = priv->dev2;
 	struct alps_fields *f = &priv->f;
 
 	memset(f, 0, sizeof(struct alps_fields));
@@ -1311,6 +1356,13 @@
 
 	input_report_abs(dev, ABS_PRESSURE, f->pressure);
 	input_sync(dev);
+
+	if (priv->flags & ALPS_DUALPOINT) {
+		input_report_key(dev2, BTN_LEFT, f->ts_left);
+		input_report_key(dev2, BTN_RIGHT, f->ts_right);
+		input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+		input_sync(dev2);
+	}
 }
 
 static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
@@ -2695,6 +2747,10 @@
 		if (alps_set_defaults_ss4_v2(psmouse, priv))
 			return -EIO;
 
+		if (priv->fw_ver[1] == 0x1)
+			priv->flags |= ALPS_DUALPOINT |
+					ALPS_DUALPOINT_WITH_PRESSURE;
+
 		break;
 	}
 
@@ -2767,6 +2823,9 @@
 		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
 			   e7[2] == 0x14 && ec[1] == 0x02) {
 			protocol = &alps_v8_protocol_data;
+		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
+			   e7[2] == 0x28 && ec[1] == 0x01) {
+			protocol = &alps_v8_protocol_data;
 		} else {
 			psmouse_dbg(psmouse,
 				    "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
@@ -2949,6 +3008,10 @@
 
 		input_set_capability(dev2, EV_REL, REL_X);
 		input_set_capability(dev2, EV_REL, REL_Y);
+		if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) {
+			input_set_capability(dev2, EV_ABS, ABS_PRESSURE);
+			input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0);
+		}
 		input_set_capability(dev2, EV_KEY, BTN_LEFT);
 		input_set_capability(dev2, EV_KEY, BTN_RIGHT);
 		input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index d37f814..b9417e2 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -37,12 +37,14 @@
  *  or there's button activities.
  * SS4_PACKET_ID_TWO: There's two or more fingers on touchpad
  * SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad
+ * SS4_PACKET_ID_STICK: A stick pointer packet
 */
 enum SS4_PACKET_ID {
 	SS4_PACKET_ID_IDLE = 0,
 	SS4_PACKET_ID_ONE,
 	SS4_PACKET_ID_TWO,
 	SS4_PACKET_ID_MULTI,
+	SS4_PACKET_ID_STICK,
 };
 
 #define SS4_COUNT_PER_ELECTRODE		256
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 08e252a..db7d1d6 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1134,7 +1134,7 @@
  * System76 Pangolin       0x250f01        ?               2 hw buttons
  * (*) + 3 trackpoint buttons
  * (**) + 0 trackpoint buttons
- * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
+ * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps
  */
 static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
 {
@@ -1159,6 +1159,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
 		},
 	},
+	{
+		/* Fujitsu H760 also has a middle button */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+		},
+	},
 #endif
 	{ }
 };
@@ -1503,10 +1510,10 @@
 		},
 	},
 	{
-		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
+		/* Fujitsu H760 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
 		},
 	},
 	{
@@ -1517,6 +1524,20 @@
 		},
 	},
 	{
+		/* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+		},
+	},
+	{
+		/* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
+		},
+	},
+	{
 		/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index f73df24..4c8a558 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -61,3 +61,14 @@
 
 	  Function 30 provides GPIO and LED support for RMI4 devices. This
 	  includes support for buttons on TouchPads and ClickPads.
+
+config RMI4_F54
+	bool "RMI4 Function 54 (Analog diagnostics)"
+	depends on RMI4_CORE
+	depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
+	select VIDEOBUF2_VMALLOC
+	help
+	  Say Y here if you want to add support for RMI4 function 54
+
+	  Function 54 provides access to various diagnostic features in certain
+	  RMI4 touch sensors.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 95c00a7..0bafc85 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -7,6 +7,7 @@
 rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
 rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
 rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
 
 # Transports
 obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index a735806..ef8c747 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -9,7 +9,6 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
-#include <linux/kconfig.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
@@ -312,6 +311,9 @@
 #ifdef CONFIG_RMI4_F30
 	&rmi_f30_handler,
 #endif
+#ifdef CONFIG_RMI4_F54
+	&rmi_f54_handler,
+#endif
 };
 
 static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index c83bce8..4a88312 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,7 +17,6 @@
 #include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/kconfig.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 6e140fa..8dfbebe 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -102,4 +102,5 @@
 extern struct rmi_function_handler rmi_f11_handler;
 extern struct rmi_function_handler rmi_f12_handler;
 extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f54_handler;
 #endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index fac81fc..b5d2dfc 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -8,7 +8,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/rmi.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index 20c7134..f798f42 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -12,7 +12,6 @@
 #include <linux/device.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
-#include <linux/kconfig.h>
 #include <linux/rmi.h>
 #include <linux/slab.h>
 #include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
new file mode 100644
index 0000000..cf805b9
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2012-2015 Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+#include "rmi_driver.h"
+
+#define F54_NAME		"rmi4_f54"
+
+/* F54 data offsets */
+#define F54_REPORT_DATA_OFFSET  3
+#define F54_FIFO_OFFSET         1
+#define F54_NUM_TX_OFFSET       1
+#define F54_NUM_RX_OFFSET       0
+
+/* F54 commands */
+#define F54_GET_REPORT          1
+#define F54_FORCE_CAL           2
+
+/* Fixed sizes of reports */
+#define F54_QUERY_LEN			27
+
+/* F54 capabilities */
+#define F54_CAP_BASELINE	(1 << 2)
+#define F54_CAP_IMAGE8		(1 << 3)
+#define F54_CAP_IMAGE16		(1 << 6)
+
+/**
+ * enum rmi_f54_report_type - RMI4 F54 report types
+ *
+ * @F54_8BIT_IMAGE:	Normalized 8-Bit Image Report. The capacitance variance
+ *			from baseline for each pixel.
+ *
+ * @F54_16BIT_IMAGE:	Normalized 16-Bit Image Report. The capacitance variance
+ *			from baseline for each pixel.
+ *
+ * @F54_RAW_16BIT_IMAGE:
+ *			Raw 16-Bit Image Report. The raw capacitance for each
+ *			pixel.
+ *
+ * @F54_TRUE_BASELINE:	True Baseline Report. The baseline capacitance for each
+ *			pixel.
+ *
+ * @F54_FULL_RAW_CAP:   Full Raw Capacitance Report. The raw capacitance with
+ *			low reference set to its minimum value and high
+ *			reference set to its maximum value.
+ *
+ * @F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+ *			Full Raw Capacitance with Receiver Offset Removed
+ *			Report. Set Low reference to its minimum value and high
+ *			references to its maximum value, then report the raw
+ *			capacitance for each pixel.
+ */
+enum rmi_f54_report_type {
+	F54_REPORT_NONE = 0,
+	F54_8BIT_IMAGE = 1,
+	F54_16BIT_IMAGE = 2,
+	F54_RAW_16BIT_IMAGE = 3,
+	F54_TRUE_BASELINE = 9,
+	F54_FULL_RAW_CAP = 19,
+	F54_FULL_RAW_CAP_RX_OFFSET_REMOVED = 20,
+	F54_MAX_REPORT_TYPE,
+};
+
+const char *rmi_f54_report_type_names[] = {
+	[F54_REPORT_NONE]		= "Unknown",
+	[F54_8BIT_IMAGE]		= "Normalized 8-Bit Image",
+	[F54_16BIT_IMAGE]		= "Normalized 16-Bit Image",
+	[F54_RAW_16BIT_IMAGE]		= "Raw 16-Bit Image",
+	[F54_TRUE_BASELINE]		= "True Baseline",
+	[F54_FULL_RAW_CAP]		= "Full Raw Capacitance",
+	[F54_FULL_RAW_CAP_RX_OFFSET_REMOVED]
+					= "Full Raw Capacitance RX Offset Removed",
+};
+
+struct rmi_f54_reports {
+	int start;
+	int size;
+};
+
+struct f54_data {
+	struct rmi_function *fn;
+
+	u8 qry[F54_QUERY_LEN];
+	u8 num_rx_electrodes;
+	u8 num_tx_electrodes;
+	u8 capabilities;
+	u16 clock_rate;
+	u8 family;
+
+	enum rmi_f54_report_type report_type;
+	u8 *report_data;
+	int report_size;
+	struct rmi_f54_reports standard_report[2];
+
+	bool is_busy;
+	struct mutex status_mutex;
+	struct mutex data_mutex;
+
+	struct workqueue_struct *workqueue;
+	struct delayed_work work;
+	unsigned long timeout;
+
+	struct completion cmd_done;
+
+	/* V4L2 support */
+	struct v4l2_device v4l2;
+	struct v4l2_pix_format format;
+	struct video_device vdev;
+	struct vb2_queue queue;
+	struct mutex lock;
+	int input;
+	enum rmi_f54_report_type inputs[F54_MAX_REPORT_TYPE];
+};
+
+/*
+ * Basic checks on report_type to ensure we write a valid type
+ * to the sensor.
+ */
+static bool is_f54_report_type_valid(struct f54_data *f54,
+				     enum rmi_f54_report_type reptype)
+{
+	switch (reptype) {
+	case F54_8BIT_IMAGE:
+		return f54->capabilities & F54_CAP_IMAGE8;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+		return f54->capabilities & F54_CAP_IMAGE16;
+	case F54_TRUE_BASELINE:
+		return f54->capabilities & F54_CAP_IMAGE16;
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static enum rmi_f54_report_type rmi_f54_get_reptype(struct f54_data *f54,
+						unsigned int i)
+{
+	if (i >= F54_MAX_REPORT_TYPE)
+		return F54_REPORT_NONE;
+
+	return f54->inputs[i];
+}
+
+static void rmi_f54_create_input_map(struct f54_data *f54)
+{
+	int i = 0;
+	enum rmi_f54_report_type reptype;
+
+	for (reptype = 1; reptype < F54_MAX_REPORT_TYPE; reptype++) {
+		if (!is_f54_report_type_valid(f54, reptype))
+			continue;
+
+		f54->inputs[i++] = reptype;
+	}
+
+	/* Remaining values are zero via kzalloc */
+}
+
+static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
+{
+	struct f54_data *f54 = dev_get_drvdata(&fn->dev);
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	int error;
+
+	/* Write Report Type into F54_AD_Data0 */
+	if (f54->report_type != report_type) {
+		error = rmi_write(rmi_dev, f54->fn->fd.data_base_addr,
+				  report_type);
+		if (error)
+			return error;
+		f54->report_type = report_type;
+	}
+
+	/*
+	 * Small delay after disabling interrupts to avoid race condition
+	 * in firmare. This value is a bit higher than absolutely necessary.
+	 * Should be removed once issue is resolved in firmware.
+	 */
+	usleep_range(2000, 3000);
+
+	mutex_lock(&f54->data_mutex);
+
+	error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
+	if (error < 0)
+		return error;
+
+	init_completion(&f54->cmd_done);
+
+	f54->is_busy = 1;
+	f54->timeout = jiffies + msecs_to_jiffies(100);
+
+	queue_delayed_work(f54->workqueue, &f54->work, 0);
+
+	mutex_unlock(&f54->data_mutex);
+
+	return 0;
+}
+
+static size_t rmi_f54_get_report_size(struct f54_data *f54)
+{
+	u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
+	u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+	size_t size;
+
+	switch (rmi_f54_get_reptype(f54, f54->input)) {
+	case F54_8BIT_IMAGE:
+		size = rx * tx;
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+		size = sizeof(u16) * rx * tx;
+		break;
+	default:
+		size = 0;
+	}
+
+	return size;
+}
+
+static int rmi_f54_get_pixel_fmt(enum rmi_f54_report_type reptype, u32 *pixfmt)
+{
+	int ret = 0;
+
+	switch (reptype) {
+	case F54_8BIT_IMAGE:
+		*pixfmt = V4L2_TCH_FMT_DELTA_TD08;
+		break;
+
+	case F54_16BIT_IMAGE:
+		*pixfmt = V4L2_TCH_FMT_DELTA_TD16;
+		break;
+
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+		*pixfmt = V4L2_TCH_FMT_TU16;
+		break;
+
+	case F54_REPORT_NONE:
+	case F54_MAX_REPORT_TYPE:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static const struct v4l2_file_operations rmi_f54_video_fops = {
+	.owner = THIS_MODULE,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release,
+	.unlocked_ioctl = video_ioctl2,
+	.read = vb2_fop_read,
+	.mmap = vb2_fop_mmap,
+	.poll = vb2_fop_poll,
+};
+
+static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+			       unsigned int *nplanes, unsigned int sizes[],
+			       struct device *alloc_devs[])
+{
+	struct f54_data *f54 = q->drv_priv;
+
+	if (*nplanes)
+		return sizes[0] < rmi_f54_get_report_size(f54) ? -EINVAL : 0;
+
+	*nplanes = 1;
+	sizes[0] = rmi_f54_get_report_size(f54);
+
+	return 0;
+}
+
+static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
+{
+	struct f54_data *f54 = vb2_get_drv_priv(vb->vb2_queue);
+	u16 *ptr;
+	enum vb2_buffer_state state;
+	enum rmi_f54_report_type reptype;
+	int ret;
+
+	mutex_lock(&f54->status_mutex);
+
+	reptype = rmi_f54_get_reptype(f54, f54->input);
+	if (reptype == F54_REPORT_NONE) {
+		state = VB2_BUF_STATE_ERROR;
+		goto done;
+	}
+
+	if (f54->is_busy) {
+		state = VB2_BUF_STATE_ERROR;
+		goto done;
+	}
+
+	ret = rmi_f54_request_report(f54->fn, reptype);
+	if (ret) {
+		dev_err(&f54->fn->dev, "Error requesting F54 report\n");
+		state = VB2_BUF_STATE_ERROR;
+		goto done;
+	}
+
+	/* get frame data */
+	mutex_lock(&f54->data_mutex);
+
+	while (f54->is_busy) {
+		mutex_unlock(&f54->data_mutex);
+		if (!wait_for_completion_timeout(&f54->cmd_done,
+						 msecs_to_jiffies(1000))) {
+			dev_err(&f54->fn->dev, "Timed out\n");
+			state = VB2_BUF_STATE_ERROR;
+			goto done;
+		}
+		mutex_lock(&f54->data_mutex);
+	}
+
+	ptr = vb2_plane_vaddr(vb, 0);
+	if (!ptr) {
+		dev_err(&f54->fn->dev, "Error acquiring frame ptr\n");
+		state = VB2_BUF_STATE_ERROR;
+		goto data_done;
+	}
+
+	memcpy(ptr, f54->report_data, f54->report_size);
+	vb2_set_plane_payload(vb, 0, rmi_f54_get_report_size(f54));
+	state = VB2_BUF_STATE_DONE;
+
+data_done:
+	mutex_unlock(&f54->data_mutex);
+done:
+	vb2_buffer_done(vb, state);
+	mutex_unlock(&f54->status_mutex);
+}
+
+/* V4L2 structures */
+static const struct vb2_ops rmi_f54_queue_ops = {
+	.queue_setup            = rmi_f54_queue_setup,
+	.buf_queue              = rmi_f54_buffer_queue,
+	.wait_prepare           = vb2_ops_wait_prepare,
+	.wait_finish            = vb2_ops_wait_finish,
+};
+
+static const struct vb2_queue rmi_f54_queue = {
+	.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+	.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
+	.buf_struct_size = sizeof(struct vb2_buffer),
+	.ops = &rmi_f54_queue_ops,
+	.mem_ops = &vb2_vmalloc_memops,
+	.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
+	.min_buffers_needed = 1,
+};
+
+static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
+				   struct v4l2_capability *cap)
+{
+	struct f54_data *f54 = video_drvdata(file);
+
+	strlcpy(cap->driver, F54_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		"rmi4:%s", dev_name(&f54->fn->dev));
+
+	return 0;
+}
+
+static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
+				     struct v4l2_input *i)
+{
+	struct f54_data *f54 = video_drvdata(file);
+	enum rmi_f54_report_type reptype;
+
+	reptype = rmi_f54_get_reptype(f54, i->index);
+	if (reptype == F54_REPORT_NONE)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_TOUCH;
+
+	strlcpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
+	return 0;
+}
+
+static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
+{
+	struct v4l2_pix_format *f = &f54->format;
+	enum rmi_f54_report_type reptype;
+	int ret;
+
+	reptype = rmi_f54_get_reptype(f54, i);
+	if (reptype == F54_REPORT_NONE)
+		return -EINVAL;
+
+	ret = rmi_f54_get_pixel_fmt(reptype, &f->pixelformat);
+	if (ret)
+		return ret;
+
+	f54->input = i;
+
+	f->width = f54->num_rx_electrodes;
+	f->height = f54->num_tx_electrodes;
+	f->field = V4L2_FIELD_NONE;
+	f->colorspace = V4L2_COLORSPACE_RAW;
+	f->bytesperline = f->width * sizeof(u16);
+	f->sizeimage = f->width * f->height * sizeof(u16);
+
+	return 0;
+}
+
+static int rmi_f54_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	return rmi_f54_set_input(video_drvdata(file), i);
+}
+
+static int rmi_f54_vidioc_g_input(struct file *file, void *priv,
+				  unsigned int *i)
+{
+	struct f54_data *f54 = video_drvdata(file);
+
+	*i = f54->input;
+
+	return 0;
+}
+
+static int rmi_f54_vidioc_fmt(struct file *file, void *priv,
+			      struct v4l2_format *f)
+{
+	struct f54_data *f54 = video_drvdata(file);
+
+	f->fmt.pix = f54->format;
+
+	return 0;
+}
+
+static int rmi_f54_vidioc_enum_fmt(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *fmt)
+{
+	if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	switch (fmt->index) {
+	case 0:
+		fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+		break;
+
+	case 1:
+		fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD08;
+		break;
+
+	case 2:
+		fmt->pixelformat = V4L2_TCH_FMT_TU16;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f54_vidioc_g_parm(struct file *file, void *fh,
+				 struct v4l2_streamparm *a)
+{
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	a->parm.capture.readbuffers = 1;
+	a->parm.capture.timeperframe.numerator = 1;
+	a->parm.capture.timeperframe.denominator = 10;
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops rmi_f54_video_ioctl_ops = {
+	.vidioc_querycap	= rmi_f54_vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = rmi_f54_vidioc_enum_fmt,
+	.vidioc_s_fmt_vid_cap	= rmi_f54_vidioc_fmt,
+	.vidioc_g_fmt_vid_cap	= rmi_f54_vidioc_fmt,
+	.vidioc_try_fmt_vid_cap	= rmi_f54_vidioc_fmt,
+	.vidioc_g_parm		= rmi_f54_vidioc_g_parm,
+
+	.vidioc_enum_input	= rmi_f54_vidioc_enum_input,
+	.vidioc_g_input		= rmi_f54_vidioc_g_input,
+	.vidioc_s_input		= rmi_f54_vidioc_s_input,
+
+	.vidioc_reqbufs		= vb2_ioctl_reqbufs,
+	.vidioc_create_bufs	= vb2_ioctl_create_bufs,
+	.vidioc_querybuf	= vb2_ioctl_querybuf,
+	.vidioc_qbuf		= vb2_ioctl_qbuf,
+	.vidioc_dqbuf		= vb2_ioctl_dqbuf,
+	.vidioc_expbuf		= vb2_ioctl_expbuf,
+
+	.vidioc_streamon	= vb2_ioctl_streamon,
+	.vidioc_streamoff	= vb2_ioctl_streamoff,
+};
+
+static const struct video_device rmi_f54_video_device = {
+	.name = "Synaptics RMI4",
+	.fops = &rmi_f54_video_fops,
+	.ioctl_ops = &rmi_f54_video_ioctl_ops,
+	.release = video_device_release_empty,
+	.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
+		       V4L2_CAP_READWRITE | V4L2_CAP_STREAMING,
+};
+
+static void rmi_f54_work(struct work_struct *work)
+{
+	struct f54_data *f54 = container_of(work, struct f54_data, work.work);
+	struct rmi_function *fn = f54->fn;
+	u8 fifo[2];
+	struct rmi_f54_reports *report;
+	int report_size;
+	u8 command;
+	u8 *data;
+	int error;
+
+	data = f54->report_data;
+	report_size = rmi_f54_get_report_size(f54);
+	if (report_size == 0) {
+		dev_err(&fn->dev, "Bad report size, report type=%d\n",
+				f54->report_type);
+		error = -EINVAL;
+		goto error;     /* retry won't help */
+	}
+	f54->standard_report[0].size = report_size;
+	report = f54->standard_report;
+
+	mutex_lock(&f54->data_mutex);
+
+	/*
+	 * Need to check if command has completed.
+	 * If not try again later.
+	 */
+	error = rmi_read(fn->rmi_dev, f54->fn->fd.command_base_addr,
+			 &command);
+	if (error) {
+		dev_err(&fn->dev, "Failed to read back command\n");
+		goto error;
+	}
+	if (command & F54_GET_REPORT) {
+		if (time_after(jiffies, f54->timeout)) {
+			dev_err(&fn->dev, "Get report command timed out\n");
+			error = -ETIMEDOUT;
+		}
+		report_size = 0;
+		goto error;
+	}
+
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
+
+	report_size = 0;
+	for (; report->size; report++) {
+		fifo[0] = report->start & 0xff;
+		fifo[1] = (report->start >> 8) & 0xff;
+		error = rmi_write_block(fn->rmi_dev,
+					fn->fd.data_base_addr + F54_FIFO_OFFSET,
+					fifo, sizeof(fifo));
+		if (error) {
+			dev_err(&fn->dev, "Failed to set fifo start offset\n");
+			goto abort;
+		}
+
+		error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+				       F54_REPORT_DATA_OFFSET, data,
+				       report->size);
+		if (error) {
+			dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+				__func__, report->size, error);
+			goto abort;
+		}
+		data += report->size;
+		report_size += report->size;
+	}
+
+abort:
+	f54->report_size = error ? 0 : report_size;
+error:
+	if (error)
+		report_size = 0;
+
+	if (report_size == 0 && !error) {
+		queue_delayed_work(f54->workqueue, &f54->work,
+				   msecs_to_jiffies(1));
+	} else {
+		f54->is_busy = false;
+		complete(&f54->cmd_done);
+	}
+
+	mutex_unlock(&f54->data_mutex);
+}
+
+static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
+{
+	return 0;
+}
+
+static int rmi_f54_config(struct rmi_function *fn)
+{
+	struct rmi_driver *drv = fn->rmi_dev->driver;
+
+	drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+	return 0;
+}
+
+static int rmi_f54_detect(struct rmi_function *fn)
+{
+	int error;
+	struct f54_data *f54;
+
+	f54 = dev_get_drvdata(&fn->dev);
+
+	error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+			       &f54->qry, sizeof(f54->qry));
+	if (error) {
+		dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
+			__func__);
+		return error;
+	}
+
+	f54->num_rx_electrodes = f54->qry[0];
+	f54->num_tx_electrodes = f54->qry[1];
+	f54->capabilities = f54->qry[2];
+	f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8);
+	f54->family = f54->qry[5];
+
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
+		f54->num_rx_electrodes);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_tx_electrodes: %d\n",
+		f54->num_tx_electrodes);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 capabilities: 0x%x\n",
+		f54->capabilities);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 clock rate: 0x%x\n",
+		f54->clock_rate);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 family: 0x%x\n",
+		f54->family);
+
+	f54->is_busy = false;
+
+	return 0;
+}
+
+static int rmi_f54_probe(struct rmi_function *fn)
+{
+	struct f54_data *f54;
+	int ret;
+	u8 rx, tx;
+
+	f54 = devm_kzalloc(&fn->dev, sizeof(struct f54_data), GFP_KERNEL);
+	if (!f54)
+		return -ENOMEM;
+
+	f54->fn = fn;
+	dev_set_drvdata(&fn->dev, f54);
+
+	ret = rmi_f54_detect(fn);
+	if (ret)
+		return ret;
+
+	mutex_init(&f54->data_mutex);
+	mutex_init(&f54->status_mutex);
+
+	rx = f54->num_rx_electrodes;
+	tx = f54->num_tx_electrodes;
+	f54->report_data = devm_kzalloc(&fn->dev,
+					sizeof(u16) * tx * rx,
+					GFP_KERNEL);
+	if (f54->report_data == NULL)
+		return -ENOMEM;
+
+	INIT_DELAYED_WORK(&f54->work, rmi_f54_work);
+
+	f54->workqueue = create_singlethread_workqueue("rmi4-poller");
+	if (!f54->workqueue)
+		return -ENOMEM;
+
+	rmi_f54_create_input_map(f54);
+
+	/* register video device */
+	strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
+	ret = v4l2_device_register(&fn->dev, &f54->v4l2);
+	if (ret) {
+		dev_err(&fn->dev, "Unable to register video dev.\n");
+		goto remove_wq;
+	}
+
+	/* initialize the queue */
+	mutex_init(&f54->lock);
+	f54->queue = rmi_f54_queue;
+	f54->queue.drv_priv = f54;
+	f54->queue.lock = &f54->lock;
+	f54->queue.dev = &fn->dev;
+
+	ret = vb2_queue_init(&f54->queue);
+	if (ret)
+		goto remove_v4l2;
+
+	f54->vdev = rmi_f54_video_device;
+	f54->vdev.v4l2_dev = &f54->v4l2;
+	f54->vdev.lock = &f54->lock;
+	f54->vdev.vfl_dir = VFL_DIR_RX;
+	f54->vdev.queue = &f54->queue;
+	video_set_drvdata(&f54->vdev, f54);
+
+	ret = video_register_device(&f54->vdev, VFL_TYPE_TOUCH, -1);
+	if (ret) {
+		dev_err(&fn->dev, "Unable to register video subdevice.");
+		goto remove_v4l2;
+	}
+
+	return 0;
+
+remove_v4l2:
+	v4l2_device_unregister(&f54->v4l2);
+remove_wq:
+	cancel_delayed_work_sync(&f54->work);
+	flush_workqueue(f54->workqueue);
+	destroy_workqueue(f54->workqueue);
+	return ret;
+}
+
+static void rmi_f54_remove(struct rmi_function *fn)
+{
+	struct f54_data *f54 = dev_get_drvdata(&fn->dev);
+
+	video_unregister_device(&f54->vdev);
+	v4l2_device_unregister(&f54->v4l2);
+}
+
+struct rmi_function_handler rmi_f54_handler = {
+	.driver = {
+		.name = F54_NAME,
+	},
+	.func = 0x54,
+	.probe = rmi_f54_probe,
+	.config = rmi_f54_config,
+	.attention = rmi_f54_attention,
+	.remove = rmi_f54_remove,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 6f2e0e4..1ebc2c1 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -221,6 +221,21 @@
 MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
 #endif
 
+static void rmi_i2c_regulator_bulk_disable(void *data)
+{
+	struct rmi_i2c_xport *rmi_i2c = data;
+
+	regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+			       rmi_i2c->supplies);
+}
+
+static void rmi_i2c_unregister_transport(void *data)
+{
+	struct rmi_i2c_xport *rmi_i2c = data;
+
+	rmi_unregister_transport_device(&rmi_i2c->xport);
+}
+
 static int rmi_i2c_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
@@ -264,6 +279,12 @@
 	if (retval < 0)
 		return retval;
 
+	retval = devm_add_action_or_reset(&client->dev,
+					  rmi_i2c_regulator_bulk_disable,
+					  rmi_i2c);
+	if (retval)
+		return retval;
+
 	of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
 			     &rmi_i2c->startup_delay);
 
@@ -294,6 +315,11 @@
 			client->addr);
 		return retval;
 	}
+	retval = devm_add_action_or_reset(&client->dev,
+					  rmi_i2c_unregister_transport,
+					  rmi_i2c);
+	if (retval)
+		return retval;
 
 	retval = rmi_i2c_init_irq(client);
 	if (retval < 0)
@@ -304,17 +330,6 @@
 	return 0;
 }
 
-static int rmi_i2c_remove(struct i2c_client *client)
-{
-	struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
-
-	rmi_unregister_transport_device(&rmi_i2c->xport);
-	regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
-			       rmi_i2c->supplies);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int rmi_i2c_suspend(struct device *dev)
 {
@@ -431,7 +446,6 @@
 	},
 	.id_table	= rmi_id,
 	.probe		= rmi_i2c_probe,
-	.remove		= rmi_i2c_remove,
 };
 
 module_i2c_driver(rmi_i2c_driver);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 55bd1b3..4ebef60 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -396,6 +396,13 @@
 }
 #endif
 
+static void rmi_spi_unregister_transport(void *data)
+{
+	struct rmi_spi_xport *rmi_spi = data;
+
+	rmi_unregister_transport_device(&rmi_spi->xport);
+}
+
 static int rmi_spi_probe(struct spi_device *spi)
 {
 	struct rmi_spi_xport *rmi_spi;
@@ -464,6 +471,11 @@
 		dev_err(&spi->dev, "failed to register transport.\n");
 		return retval;
 	}
+	retval = devm_add_action_or_reset(&spi->dev,
+					  rmi_spi_unregister_transport,
+					  rmi_spi);
+	if (retval)
+		return retval;
 
 	retval = rmi_spi_init_irq(spi);
 	if (retval < 0)
@@ -473,15 +485,6 @@
 	return 0;
 }
 
-static int rmi_spi_remove(struct spi_device *spi)
-{
-	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
-
-	rmi_unregister_transport_device(&rmi_spi->xport);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int rmi_spi_suspend(struct device *dev)
 {
@@ -577,7 +580,6 @@
 	},
 	.id_table	= rmi_id,
 	.probe		= rmi_spi_probe,
-	.remove		= rmi_spi_remove,
 };
 
 module_spi_driver(rmi_spi_driver);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index a5eed2a..34da81c 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -81,7 +81,7 @@
 		return -EBUSY;
 #endif
 
-	i8042_reset = 1;
+	i8042_reset = I8042_RESET_ALWAYS;
 	return 0;
 }
 
diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
index ee1ad27..08a1c10 100644
--- a/drivers/input/serio/i8042-ip22io.h
+++ b/drivers/input/serio/i8042-ip22io.h
@@ -61,7 +61,7 @@
 		return -EBUSY;
 #endif
 
-	i8042_reset = 1;
+	i8042_reset = I8042_RESET_ALWAYS;
 
 	return 0;
 }
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
index f708c75..1aabea4 100644
--- a/drivers/input/serio/i8042-ppcio.h
+++ b/drivers/input/serio/i8042-ppcio.h
@@ -44,7 +44,7 @@
 
 static inline int i8042_platform_init(void)
 {
-	i8042_reset = 1;
+	i8042_reset = I8042_RESET_ALWAYS;
 	return 0;
 }
 
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index afcd1c1..6231d63 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -130,7 +130,7 @@
 		}
 	}
 
-	i8042_reset = 1;
+	i8042_reset = I8042_RESET_ALWAYS;
 
 	return 0;
 }
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
index 73f5cc1..4557475 100644
--- a/drivers/input/serio/i8042-unicore32io.h
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -61,7 +61,7 @@
 	if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
 		return -EBUSY;
 
-	i8042_reset = 1;
+	i8042_reset = I8042_RESET_ALWAYS;
 	return 0;
 }
 
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 68f5f4a..f4bfb4b 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -510,6 +510,90 @@
 	{ }
 };
 
+/*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
+		},
+	},
+	{
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+		},
+	},
+	{ }
+};
 static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
 	{
 		/* MSI Wind U-100 */
@@ -1072,12 +1156,18 @@
 		return retval;
 
 #if defined(__ia64__)
-        i8042_reset = true;
+        i8042_reset = I8042_RESET_ALWAYS;
 #endif
 
 #ifdef CONFIG_X86
-	if (dmi_check_system(i8042_dmi_reset_table))
-		i8042_reset = true;
+	/* Honor module parameter when value is not default */
+	if (i8042_reset == I8042_RESET_DEFAULT) {
+		if (dmi_check_system(i8042_dmi_reset_table))
+			i8042_reset = I8042_RESET_ALWAYS;
+
+		if (dmi_check_system(i8042_dmi_noselftest_table))
+			i8042_reset = I8042_RESET_NEVER;
+	}
 
 	if (dmi_check_system(i8042_dmi_noloop_table))
 		i8042_noloop = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 405252a..89abfdb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -48,9 +48,39 @@
 module_param_named(unlock, i8042_unlock, bool, 0);
 MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
 
-static bool i8042_reset;
-module_param_named(reset, i8042_reset, bool, 0);
-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+enum i8042_controller_reset_mode {
+	I8042_RESET_NEVER,
+	I8042_RESET_ALWAYS,
+	I8042_RESET_ON_S2RAM,
+#define I8042_RESET_DEFAULT	I8042_RESET_ON_S2RAM
+};
+static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
+static int i8042_set_reset(const char *val, const struct kernel_param *kp)
+{
+	enum i8042_controller_reset_mode *arg = kp->arg;
+	int error;
+	bool reset;
+
+	if (val) {
+		error = kstrtobool(val, &reset);
+		if (error)
+			return error;
+	} else {
+		reset = true;
+	}
+
+	*arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
+	return 0;
+}
+
+static const struct kernel_param_ops param_ops_reset_param = {
+	.flags = KERNEL_PARAM_OPS_FL_NOARG,
+	.set = i8042_set_reset,
+};
+#define param_check_reset_param(name, p)	\
+	__param_check(name, p, enum i8042_controller_reset_mode)
+module_param_named(reset, i8042_reset, reset_param, 0);
+MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
 
 static bool i8042_direct;
 module_param_named(direct, i8042_direct, bool, 0);
@@ -1019,7 +1049,7 @@
  * Reset the controller and reset CRT to the original value set by BIOS.
  */
 
-static void i8042_controller_reset(bool force_reset)
+static void i8042_controller_reset(bool s2r_wants_reset)
 {
 	i8042_flush();
 
@@ -1044,8 +1074,10 @@
  * Reset the controller if requested.
  */
 
-	if (i8042_reset || force_reset)
+	if (i8042_reset == I8042_RESET_ALWAYS ||
+	    (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
 		i8042_controller_selftest();
+	}
 
 /*
  * Restore the original control register setting.
@@ -1110,7 +1142,7 @@
  * before suspending.
  */
 
-static int i8042_controller_resume(bool force_reset)
+static int i8042_controller_resume(bool s2r_wants_reset)
 {
 	int error;
 
@@ -1118,7 +1150,8 @@
 	if (error)
 		return error;
 
-	if (i8042_reset || force_reset) {
+	if (i8042_reset == I8042_RESET_ALWAYS ||
+	    (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
 		error = i8042_controller_selftest();
 		if (error)
 			return error;
@@ -1195,7 +1228,7 @@
 
 static int i8042_pm_resume(struct device *dev)
 {
-	bool force_reset;
+	bool want_reset;
 	int i;
 
 	for (i = 0; i < I8042_NUM_PORTS; i++) {
@@ -1218,9 +1251,9 @@
 	 * off control to the platform firmware, otherwise we can simply restore
 	 * the mode.
 	 */
-	force_reset = pm_resume_via_firmware();
+	want_reset = pm_resume_via_firmware();
 
-	return i8042_controller_resume(force_reset);
+	return i8042_controller_resume(want_reset);
 }
 
 static int i8042_pm_thaw(struct device *dev)
@@ -1482,7 +1515,7 @@
 
 	i8042_platform_device = dev;
 
-	if (i8042_reset) {
+	if (i8042_reset == I8042_RESET_ALWAYS) {
 		error = i8042_controller_selftest();
 		if (error)
 			return error;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 5079813..efca013 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -115,6 +115,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called atmel_mxt_ts.
 
+config TOUCHSCREEN_ATMEL_MXT_T37
+	bool "Support T37 Diagnostic Data"
+	depends on TOUCHSCREEN_ATMEL_MXT
+	depends on VIDEO_V4L2=y || (TOUCHSCREEN_ATMEL_MXT=m && VIDEO_V4L2=m)
+	select VIDEOBUF2_VMALLOC
+	help
+	  Say Y here if you want support to output data from the T37
+	  Diagnostic Data object using a V4L device.
+
 config TOUCHSCREEN_AUO_PIXCIR
 	tristate "AUO in-cell touchscreen using Pixcir ICs"
 	depends on I2C
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 5af7907..e5d185f 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2010 Samsung Electronics Co.Ltd
  * Copyright (C) 2011-2014 Atmel Corporation
  * Copyright (C) 2012 Google, Inc.
+ * Copyright (C) 2016 Zodiac Inflight Innovations
  *
  * Author: Joonyoung Shim <jy0922.shim@samsung.com>
  *
@@ -28,6 +29,10 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
 
 /* Firmware files */
 #define MXT_FW_NAME		"maxtouch.fw"
@@ -99,6 +104,8 @@
 
 /* MXT_TOUCH_MULTI_T9 field */
 #define MXT_T9_CTRL		0
+#define MXT_T9_XSIZE		3
+#define MXT_T9_YSIZE		4
 #define MXT_T9_ORIENT		9
 #define MXT_T9_RANGE		18
 
@@ -119,11 +126,31 @@
 
 /* MXT_TOUCH_MULTI_T9 orient */
 #define MXT_T9_ORIENT_SWITCH	(1 << 0)
+#define MXT_T9_ORIENT_INVERTX	(1 << 1)
+#define MXT_T9_ORIENT_INVERTY	(1 << 2)
 
 /* MXT_SPT_COMMSCONFIG_T18 */
 #define MXT_COMMS_CTRL		0
 #define MXT_COMMS_CMD		1
 
+/* MXT_DEBUG_DIAGNOSTIC_T37 */
+#define MXT_DIAGNOSTIC_PAGEUP	0x01
+#define MXT_DIAGNOSTIC_DELTAS	0x10
+#define MXT_DIAGNOSTIC_REFS	0x11
+#define MXT_DIAGNOSTIC_SIZE	128
+
+#define MXT_FAMILY_1386			160
+#define MXT1386_COLUMNS			3
+#define MXT1386_PAGES_PER_COLUMN	8
+
+struct t37_debug {
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+	u8 mode;
+	u8 page;
+	u8 data[MXT_DIAGNOSTIC_SIZE];
+#endif
+};
+
 /* Define for MXT_GEN_COMMAND_T6 */
 #define MXT_BOOT_VALUE		0xa5
 #define MXT_RESET_VALUE		0x01
@@ -133,10 +160,14 @@
 #define MXT_T100_CTRL		0
 #define MXT_T100_CFG1		1
 #define MXT_T100_TCHAUX		3
+#define MXT_T100_XSIZE		9
 #define MXT_T100_XRANGE		13
+#define MXT_T100_YSIZE		20
 #define MXT_T100_YRANGE		24
 
 #define MXT_T100_CFG_SWITCHXY	BIT(5)
+#define MXT_T100_CFG_INVERTY	BIT(6)
+#define MXT_T100_CFG_INVERTX	BIT(7)
 
 #define MXT_T100_TCHAUX_VECT	BIT(0)
 #define MXT_T100_TCHAUX_AMPL	BIT(1)
@@ -205,6 +236,37 @@
 	u8 num_report_ids;
 } __packed;
 
+struct mxt_dbg {
+	u16 t37_address;
+	u16 diag_cmd_address;
+	struct t37_debug *t37_buf;
+	unsigned int t37_pages;
+	unsigned int t37_nodes;
+
+	struct v4l2_device v4l2;
+	struct v4l2_pix_format format;
+	struct video_device vdev;
+	struct vb2_queue queue;
+	struct mutex lock;
+	int input;
+};
+
+enum v4l_dbg_inputs {
+	MXT_V4L_INPUT_DELTAS,
+	MXT_V4L_INPUT_REFS,
+	MXT_V4L_INPUT_MAX,
+};
+
+static const struct v4l2_file_operations mxt_video_fops = {
+	.owner = THIS_MODULE,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release,
+	.unlocked_ioctl = video_ioctl2,
+	.read = vb2_fop_read,
+	.mmap = vb2_fop_mmap,
+	.poll = vb2_fop_poll,
+};
+
 /* Each client has this additional data */
 struct mxt_data {
 	struct i2c_client *client;
@@ -216,7 +278,11 @@
 	unsigned int irq;
 	unsigned int max_x;
 	unsigned int max_y;
+	bool invertx;
+	bool inverty;
 	bool xy_switch;
+	u8 xsize;
+	u8 ysize;
 	bool in_bootloader;
 	u16 mem_size;
 	u8 t100_aux_ampl;
@@ -233,6 +299,7 @@
 	u8 num_touchids;
 	u8 multitouch;
 	struct t7_config t7_cfg;
+	struct mxt_dbg dbg;
 
 	/* Cached parameters from object table */
 	u16 T5_address;
@@ -257,6 +324,11 @@
 	struct completion crc_completion;
 };
 
+struct mxt_vb2_buffer {
+	struct vb2_buffer	vb;
+	struct list_head	list;
+};
+
 static size_t mxt_obj_size(const struct mxt_object *obj)
 {
 	return obj->size_minus_one + 1;
@@ -1503,6 +1575,11 @@
 
 static void mxt_free_object_table(struct mxt_data *data)
 {
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+	video_unregister_device(&data->dbg.vdev);
+	v4l2_device_unregister(&data->dbg.v4l2);
+#endif
+
 	kfree(data->object_table);
 	data->object_table = NULL;
 	kfree(data->msg_buf);
@@ -1661,6 +1738,18 @@
 		return -EINVAL;
 
 	error = __mxt_read_reg(client,
+			       object->start_address + MXT_T9_XSIZE,
+			       sizeof(data->xsize), &data->xsize);
+	if (error)
+		return error;
+
+	error = __mxt_read_reg(client,
+			       object->start_address + MXT_T9_YSIZE,
+			       sizeof(data->ysize), &data->ysize);
+	if (error)
+		return error;
+
+	error = __mxt_read_reg(client,
 			       object->start_address + MXT_T9_RANGE,
 			       sizeof(range), &range);
 	if (error)
@@ -1676,6 +1765,8 @@
 		return error;
 
 	data->xy_switch = orient & MXT_T9_ORIENT_SWITCH;
+	data->invertx = orient & MXT_T9_ORIENT_INVERTX;
+	data->inverty = orient & MXT_T9_ORIENT_INVERTY;
 
 	return 0;
 }
@@ -1710,6 +1801,18 @@
 
 	data->max_y = get_unaligned_le16(&range_y);
 
+	error = __mxt_read_reg(client,
+			       object->start_address + MXT_T100_XSIZE,
+			       sizeof(data->xsize), &data->xsize);
+	if (error)
+		return error;
+
+	error = __mxt_read_reg(client,
+			       object->start_address + MXT_T100_YSIZE,
+			       sizeof(data->ysize), &data->ysize);
+	if (error)
+		return error;
+
 	/* read orientation config */
 	error =  __mxt_read_reg(client,
 				object->start_address + MXT_T100_CFG1,
@@ -1718,6 +1821,8 @@
 		return error;
 
 	data->xy_switch = cfg & MXT_T100_CFG_SWITCHXY;
+	data->invertx = cfg & MXT_T100_CFG_INVERTX;
+	data->inverty = cfg & MXT_T100_CFG_INVERTY;
 
 	/* allocate aux bytes */
 	error =  __mxt_read_reg(client,
@@ -2043,6 +2148,420 @@
 	return 0;
 }
 
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
+			       unsigned int y)
+{
+	struct mxt_info *info = &data->info;
+	struct mxt_dbg *dbg = &data->dbg;
+	unsigned int ofs, page;
+	unsigned int col = 0;
+	unsigned int col_width;
+
+	if (info->family_id == MXT_FAMILY_1386) {
+		col_width = info->matrix_ysize / MXT1386_COLUMNS;
+		col = y / col_width;
+		y = y % col_width;
+	} else {
+		col_width = info->matrix_ysize;
+	}
+
+	ofs = (y + (x * col_width)) * sizeof(u16);
+	page = ofs / MXT_DIAGNOSTIC_SIZE;
+	ofs %= MXT_DIAGNOSTIC_SIZE;
+
+	if (info->family_id == MXT_FAMILY_1386)
+		page += col * MXT1386_PAGES_PER_COLUMN;
+
+	return get_unaligned_le16(&dbg->t37_buf[page].data[ofs]);
+}
+
+static int mxt_convert_debug_pages(struct mxt_data *data, u16 *outbuf)
+{
+	struct mxt_dbg *dbg = &data->dbg;
+	unsigned int x = 0;
+	unsigned int y = 0;
+	unsigned int i, rx, ry;
+
+	for (i = 0; i < dbg->t37_nodes; i++) {
+		/* Handle orientation */
+		rx = data->xy_switch ? y : x;
+		ry = data->xy_switch ? x : y;
+		rx = data->invertx ? (data->xsize - 1 - rx) : rx;
+		ry = data->inverty ? (data->ysize - 1 - ry) : ry;
+
+		outbuf[i] = mxt_get_debug_value(data, rx, ry);
+
+		/* Next value */
+		if (++x >= (data->xy_switch ? data->ysize : data->xsize)) {
+			x = 0;
+			y++;
+		}
+	}
+
+	return 0;
+}
+
+static int mxt_read_diagnostic_debug(struct mxt_data *data, u8 mode,
+				     u16 *outbuf)
+{
+	struct mxt_dbg *dbg = &data->dbg;
+	int retries = 0;
+	int page;
+	int ret;
+	u8 cmd = mode;
+	struct t37_debug *p;
+	u8 cmd_poll;
+
+	for (page = 0; page < dbg->t37_pages; page++) {
+		p = dbg->t37_buf + page;
+
+		ret = mxt_write_reg(data->client, dbg->diag_cmd_address,
+				    cmd);
+		if (ret)
+			return ret;
+
+		retries = 0;
+		msleep(20);
+wait_cmd:
+		/* Read back command byte */
+		ret = __mxt_read_reg(data->client, dbg->diag_cmd_address,
+				     sizeof(cmd_poll), &cmd_poll);
+		if (ret)
+			return ret;
+
+		/* Field is cleared once the command has been processed */
+		if (cmd_poll) {
+			if (retries++ > 100)
+				return -EINVAL;
+
+			msleep(20);
+			goto wait_cmd;
+		}
+
+		/* Read T37 page */
+		ret = __mxt_read_reg(data->client, dbg->t37_address,
+				     sizeof(struct t37_debug), p);
+		if (ret)
+			return ret;
+
+		if (p->mode != mode || p->page != page) {
+			dev_err(&data->client->dev, "T37 page mismatch\n");
+			return -EINVAL;
+		}
+
+		dev_dbg(&data->client->dev, "%s page:%d retries:%d\n",
+			__func__, page, retries);
+
+		/* For remaining pages, write PAGEUP rather than mode */
+		cmd = MXT_DIAGNOSTIC_PAGEUP;
+	}
+
+	return mxt_convert_debug_pages(data, outbuf);
+}
+
+static int mxt_queue_setup(struct vb2_queue *q,
+		       unsigned int *nbuffers, unsigned int *nplanes,
+		       unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct mxt_data *data = q->drv_priv;
+	size_t size = data->dbg.t37_nodes * sizeof(u16);
+
+	if (*nplanes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	*nplanes = 1;
+	sizes[0] = size;
+
+	return 0;
+}
+
+static void mxt_buffer_queue(struct vb2_buffer *vb)
+{
+	struct mxt_data *data = vb2_get_drv_priv(vb->vb2_queue);
+	u16 *ptr;
+	int ret;
+	u8 mode;
+
+	ptr = vb2_plane_vaddr(vb, 0);
+	if (!ptr) {
+		dev_err(&data->client->dev, "Error acquiring frame ptr\n");
+		goto fault;
+	}
+
+	switch (data->dbg.input) {
+	case MXT_V4L_INPUT_DELTAS:
+	default:
+		mode = MXT_DIAGNOSTIC_DELTAS;
+		break;
+
+	case MXT_V4L_INPUT_REFS:
+		mode = MXT_DIAGNOSTIC_REFS;
+		break;
+	}
+
+	ret = mxt_read_diagnostic_debug(data, mode, ptr);
+	if (ret)
+		goto fault;
+
+	vb2_set_plane_payload(vb, 0, data->dbg.t37_nodes * sizeof(u16));
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	return;
+
+fault:
+	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+/* V4L2 structures */
+static const struct vb2_ops mxt_queue_ops = {
+	.queue_setup		= mxt_queue_setup,
+	.buf_queue		= mxt_buffer_queue,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+static const struct vb2_queue mxt_queue = {
+	.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+	.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
+	.buf_struct_size = sizeof(struct mxt_vb2_buffer),
+	.ops = &mxt_queue_ops,
+	.mem_ops = &vb2_vmalloc_memops,
+	.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
+	.min_buffers_needed = 1,
+};
+
+static int mxt_vidioc_querycap(struct file *file, void *priv,
+				 struct v4l2_capability *cap)
+{
+	struct mxt_data *data = video_drvdata(file);
+
+	strlcpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
+	strlcpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		 "I2C:%s", dev_name(&data->client->dev));
+	return 0;
+}
+
+static int mxt_vidioc_enum_input(struct file *file, void *priv,
+				   struct v4l2_input *i)
+{
+	if (i->index >= MXT_V4L_INPUT_MAX)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_TOUCH;
+
+	switch (i->index) {
+	case MXT_V4L_INPUT_REFS:
+		strlcpy(i->name, "Mutual Capacitance References",
+			sizeof(i->name));
+		break;
+	case MXT_V4L_INPUT_DELTAS:
+		strlcpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
+		break;
+	}
+
+	return 0;
+}
+
+static int mxt_set_input(struct mxt_data *data, unsigned int i)
+{
+	struct v4l2_pix_format *f = &data->dbg.format;
+
+	if (i >= MXT_V4L_INPUT_MAX)
+		return -EINVAL;
+
+	if (i == MXT_V4L_INPUT_DELTAS)
+		f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+	else
+		f->pixelformat = V4L2_TCH_FMT_TU16;
+
+	f->width = data->xy_switch ? data->ysize : data->xsize;
+	f->height = data->xy_switch ? data->xsize : data->ysize;
+	f->field = V4L2_FIELD_NONE;
+	f->colorspace = V4L2_COLORSPACE_RAW;
+	f->bytesperline = f->width * sizeof(u16);
+	f->sizeimage = f->width * f->height * sizeof(u16);
+
+	data->dbg.input = i;
+
+	return 0;
+}
+
+static int mxt_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	return mxt_set_input(video_drvdata(file), i);
+}
+
+static int mxt_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct mxt_data *data = video_drvdata(file);
+
+	*i = data->dbg.input;
+
+	return 0;
+}
+
+static int mxt_vidioc_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct mxt_data *data = video_drvdata(file);
+
+	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	f->fmt.pix = data->dbg.format;
+
+	return 0;
+}
+
+static int mxt_vidioc_enum_fmt(struct file *file, void *priv,
+				 struct v4l2_fmtdesc *fmt)
+{
+	if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	switch (fmt->index) {
+	case 0:
+		fmt->pixelformat = V4L2_TCH_FMT_TU16;
+		break;
+
+	case 1:
+		fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mxt_vidioc_g_parm(struct file *file, void *fh,
+			     struct v4l2_streamparm *a)
+{
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	a->parm.capture.readbuffers = 1;
+	a->parm.capture.timeperframe.numerator = 1;
+	a->parm.capture.timeperframe.denominator = 10;
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops mxt_video_ioctl_ops = {
+	.vidioc_querycap        = mxt_vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = mxt_vidioc_enum_fmt,
+	.vidioc_s_fmt_vid_cap   = mxt_vidioc_fmt,
+	.vidioc_g_fmt_vid_cap   = mxt_vidioc_fmt,
+	.vidioc_try_fmt_vid_cap	= mxt_vidioc_fmt,
+	.vidioc_g_parm		= mxt_vidioc_g_parm,
+
+	.vidioc_enum_input      = mxt_vidioc_enum_input,
+	.vidioc_g_input         = mxt_vidioc_g_input,
+	.vidioc_s_input         = mxt_vidioc_s_input,
+
+	.vidioc_reqbufs         = vb2_ioctl_reqbufs,
+	.vidioc_create_bufs     = vb2_ioctl_create_bufs,
+	.vidioc_querybuf        = vb2_ioctl_querybuf,
+	.vidioc_qbuf            = vb2_ioctl_qbuf,
+	.vidioc_dqbuf           = vb2_ioctl_dqbuf,
+	.vidioc_expbuf          = vb2_ioctl_expbuf,
+
+	.vidioc_streamon        = vb2_ioctl_streamon,
+	.vidioc_streamoff       = vb2_ioctl_streamoff,
+};
+
+static const struct video_device mxt_video_device = {
+	.name = "Atmel maxTouch",
+	.fops = &mxt_video_fops,
+	.ioctl_ops = &mxt_video_ioctl_ops,
+	.release = video_device_release_empty,
+	.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
+		       V4L2_CAP_READWRITE | V4L2_CAP_STREAMING,
+};
+
+static void mxt_debug_init(struct mxt_data *data)
+{
+	struct mxt_info *info = &data->info;
+	struct mxt_dbg *dbg = &data->dbg;
+	struct mxt_object *object;
+	int error;
+
+	object = mxt_get_object(data, MXT_GEN_COMMAND_T6);
+	if (!object)
+		goto error;
+
+	dbg->diag_cmd_address = object->start_address + MXT_COMMAND_DIAGNOSTIC;
+
+	object = mxt_get_object(data, MXT_DEBUG_DIAGNOSTIC_T37);
+	if (!object)
+		goto error;
+
+	if (mxt_obj_size(object) != sizeof(struct t37_debug)) {
+		dev_warn(&data->client->dev, "Bad T37 size");
+		goto error;
+	}
+
+	dbg->t37_address = object->start_address;
+
+	/* Calculate size of data and allocate buffer */
+	dbg->t37_nodes = data->xsize * data->ysize;
+
+	if (info->family_id == MXT_FAMILY_1386)
+		dbg->t37_pages = MXT1386_COLUMNS * MXT1386_PAGES_PER_COLUMN;
+	else
+		dbg->t37_pages = DIV_ROUND_UP(data->xsize *
+					      data->info.matrix_ysize *
+					      sizeof(u16),
+					      sizeof(dbg->t37_buf->data));
+
+	dbg->t37_buf = devm_kmalloc_array(&data->client->dev, dbg->t37_pages,
+					  sizeof(struct t37_debug), GFP_KERNEL);
+	if (!dbg->t37_buf)
+		goto error;
+
+	/* init channel to zero */
+	mxt_set_input(data, 0);
+
+	/* register video device */
+	snprintf(dbg->v4l2.name, sizeof(dbg->v4l2.name), "%s", "atmel_mxt_ts");
+	error = v4l2_device_register(&data->client->dev, &dbg->v4l2);
+	if (error)
+		goto error;
+
+	/* initialize the queue */
+	mutex_init(&dbg->lock);
+	dbg->queue = mxt_queue;
+	dbg->queue.drv_priv = data;
+	dbg->queue.lock = &dbg->lock;
+	dbg->queue.dev = &data->client->dev;
+
+	error = vb2_queue_init(&dbg->queue);
+	if (error)
+		goto error_unreg_v4l2;
+
+	dbg->vdev = mxt_video_device;
+	dbg->vdev.v4l2_dev = &dbg->v4l2;
+	dbg->vdev.lock = &dbg->lock;
+	dbg->vdev.vfl_dir = VFL_DIR_RX;
+	dbg->vdev.queue = &dbg->queue;
+	video_set_drvdata(&dbg->vdev, data);
+
+	error = video_register_device(&dbg->vdev, VFL_TYPE_TOUCH, -1);
+	if (error)
+		goto error_unreg_v4l2;
+
+	return;
+
+error_unreg_v4l2:
+	v4l2_device_unregister(&dbg->v4l2);
+error:
+	dev_warn(&data->client->dev, "Error initializing T37\n");
+}
+#else
+static void mxt_debug_init(struct mxt_data *data)
+{
+}
+#endif
+
 static int mxt_configure_objects(struct mxt_data *data,
 				 const struct firmware *cfg)
 {
@@ -2070,6 +2589,8 @@
 		dev_warn(dev, "No touch object detected\n");
 	}
 
+	mxt_debug_init(data);
+
 	dev_info(dev,
 		 "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
 		 info->family_id, info->variant_id, info->version >> 4,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index fb5fb91..552a377 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -157,6 +157,7 @@
 
 	char phys[32];
 	char product_name[16];
+	char ic_name[4];
 
 	unsigned int max_x;
 	unsigned int max_y;
@@ -263,6 +264,18 @@
 		dev_dbg(&ts->client->dev, "product name: %.*s\n",
 			(int)sizeof(ts->product_name), ts->product_name);
 
+	/* IC name */
+	cmd[0] = MIP4_R0_INFO;
+	cmd[1] = MIP4_R1_INFO_IC_NAME;
+	error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+			      ts->ic_name, sizeof(ts->ic_name));
+	if (error)
+		dev_warn(&ts->client->dev,
+			 "Failed to retrieve IC name: %d\n", error);
+	else
+		dev_dbg(&ts->client->dev, "IC name: %.*s\n",
+			(int)sizeof(ts->ic_name), ts->ic_name);
+
 	/* Firmware version */
 	error = mip4_get_fw_version(ts);
 	if (error)
@@ -1326,7 +1339,7 @@
 	 * paired with current firmware in the chip.
 	 */
 	count = snprintf(buf, PAGE_SIZE, "%.*s\n",
-		(int)sizeof(ts->product_name), ts->product_name);
+			 (int)sizeof(ts->product_name), ts->product_name);
 
 	mutex_unlock(&ts->input->mutex);
 
@@ -1335,9 +1348,30 @@
 
 static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
 
+static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct mip4_ts *ts = i2c_get_clientdata(client);
+	size_t count;
+
+	mutex_lock(&ts->input->mutex);
+
+	count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+			 (int)sizeof(ts->ic_name), ts->ic_name);
+
+	mutex_unlock(&ts->input->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
+
 static struct attribute *mip4_attrs[] = {
 	&dev_attr_fw_version.attr,
 	&dev_attr_hw_version.attr,
+	&dev_attr_ic_name.attr,
 	&dev_attr_update_fw.attr,
 	NULL,
 };
@@ -1538,6 +1572,6 @@
 module_i2c_driver(mip4_driver);
 
 MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.03.12");
+MODULE_VERSION("2016.09.28");
 MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 4ea4757..aefb6e1 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -139,6 +139,27 @@
 #define SUR40_GET_STATE   0xc5 /*  4 bytes state (?) */
 #define SUR40_GET_SENSORS 0xb1 /*  8 bytes sensors   */
 
+static const struct v4l2_pix_format sur40_pix_format[] = {
+	{
+		.pixelformat = V4L2_TCH_FMT_TU08,
+		.width  = SENSOR_RES_X / 2,
+		.height = SENSOR_RES_Y / 2,
+		.field = V4L2_FIELD_NONE,
+		.colorspace = V4L2_COLORSPACE_SRGB,
+		.bytesperline = SENSOR_RES_X / 2,
+		.sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
+	},
+	{
+		.pixelformat = V4L2_PIX_FMT_GREY,
+		.width  = SENSOR_RES_X / 2,
+		.height = SENSOR_RES_Y / 2,
+		.field = V4L2_FIELD_NONE,
+		.colorspace = V4L2_COLORSPACE_SRGB,
+		.bytesperline = SENSOR_RES_X / 2,
+		.sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
+	}
+};
+
 /* master device state */
 struct sur40_state {
 
@@ -149,6 +170,7 @@
 	struct v4l2_device v4l2;
 	struct video_device vdev;
 	struct mutex lock;
+	struct v4l2_pix_format pix_fmt;
 
 	struct vb2_queue queue;
 	struct list_head buf_list;
@@ -169,7 +191,6 @@
 
 /* forward declarations */
 static const struct video_device sur40_video_device;
-static const struct v4l2_pix_format sur40_video_format;
 static const struct vb2_queue sur40_queue;
 static void sur40_process_video(struct sur40_state *sur40);
 
@@ -420,7 +441,7 @@
 		goto err_poll;
 	}
 
-	if (le32_to_cpu(img->size) != sur40_video_format.sizeimage) {
+	if (le32_to_cpu(img->size) != sur40->pix_fmt.sizeimage) {
 		dev_err(sur40->dev, "image size mismatch\n");
 		goto err_poll;
 	}
@@ -431,7 +452,7 @@
 
 	result = usb_sg_init(&sgr, sur40->usbdev,
 		usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
-		sgt->sgl, sgt->nents, sur40_video_format.sizeimage, 0);
+		sgt->sgl, sgt->nents, sur40->pix_fmt.sizeimage, 0);
 	if (result < 0) {
 		dev_err(sur40->dev, "error %d in usb_sg_init\n", result);
 		goto err_poll;
@@ -586,13 +607,14 @@
 	if (error)
 		goto err_unreg_v4l2;
 
+	sur40->pix_fmt = sur40_pix_format[0];
 	sur40->vdev = sur40_video_device;
 	sur40->vdev.v4l2_dev = &sur40->v4l2;
 	sur40->vdev.lock = &sur40->lock;
 	sur40->vdev.queue = &sur40->queue;
 	video_set_drvdata(&sur40->vdev, sur40);
 
-	error = video_register_device(&sur40->vdev, VFL_TYPE_GRABBER, -1);
+	error = video_register_device(&sur40->vdev, VFL_TYPE_TOUCH, -1);
 	if (error) {
 		dev_err(&interface->dev,
 			"Unable to register video subdevice.");
@@ -647,14 +669,16 @@
 		       unsigned int *nbuffers, unsigned int *nplanes,
 		       unsigned int sizes[], struct device *alloc_devs[])
 {
+	struct sur40_state *sur40 = vb2_get_drv_priv(q);
+
 	if (q->num_buffers + *nbuffers < 3)
 		*nbuffers = 3 - q->num_buffers;
 
 	if (*nplanes)
-		return sizes[0] < sur40_video_format.sizeimage ? -EINVAL : 0;
+		return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0;
 
 	*nplanes = 1;
-	sizes[0] = sur40_video_format.sizeimage;
+	sizes[0] = sur40->pix_fmt.sizeimage;
 
 	return 0;
 }
@@ -666,7 +690,7 @@
 static int sur40_buffer_prepare(struct vb2_buffer *vb)
 {
 	struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue);
-	unsigned long size = sur40_video_format.sizeimage;
+	unsigned long size = sur40->pix_fmt.sizeimage;
 
 	if (vb2_plane_size(vb, 0) < size) {
 		dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n",
@@ -741,7 +765,7 @@
 	strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
 	strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
 	usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
-	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
 		V4L2_CAP_READWRITE |
 		V4L2_CAP_STREAMING;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -753,7 +777,7 @@
 {
 	if (i->index != 0)
 		return -EINVAL;
-	i->type = V4L2_INPUT_TYPE_CAMERA;
+	i->type = V4L2_INPUT_TYPE_TOUCH;
 	i->std = V4L2_STD_UNKNOWN;
 	strlcpy(i->name, "In-Cell Sensor", sizeof(i->name));
 	i->capabilities = 0;
@@ -771,19 +795,70 @@
 	return 0;
 }
 
-static int sur40_vidioc_fmt(struct file *file, void *priv,
+static int sur40_vidioc_try_fmt(struct file *file, void *priv,
 			    struct v4l2_format *f)
 {
-	f->fmt.pix = sur40_video_format;
+	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_GREY:
+		f->fmt.pix = sur40_pix_format[1];
+		break;
+
+	default:
+		f->fmt.pix = sur40_pix_format[0];
+		break;
+	}
+
+	return 0;
+}
+
+static int sur40_vidioc_s_fmt(struct file *file, void *priv,
+			    struct v4l2_format *f)
+{
+	struct sur40_state *sur40 = video_drvdata(file);
+
+	switch (f->fmt.pix.pixelformat) {
+	case V4L2_PIX_FMT_GREY:
+		sur40->pix_fmt = sur40_pix_format[1];
+		break;
+
+	default:
+		sur40->pix_fmt = sur40_pix_format[0];
+		break;
+	}
+
+	f->fmt.pix = sur40->pix_fmt;
+	return 0;
+}
+
+static int sur40_vidioc_g_fmt(struct file *file, void *priv,
+			    struct v4l2_format *f)
+{
+	struct sur40_state *sur40 = video_drvdata(file);
+
+	f->fmt.pix = sur40->pix_fmt;
+	return 0;
+}
+
+static int sur40_ioctl_parm(struct file *file, void *priv,
+			    struct v4l2_streamparm *p)
+{
+	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+	p->parm.capture.timeperframe.numerator = 1;
+	p->parm.capture.timeperframe.denominator = 60;
+	p->parm.capture.readbuffers = 3;
 	return 0;
 }
 
 static int sur40_vidioc_enum_fmt(struct file *file, void *priv,
 				 struct v4l2_fmtdesc *f)
 {
-	if (f->index != 0)
+	if (f->index >= ARRAY_SIZE(sur40_pix_format))
 		return -EINVAL;
-	f->pixelformat = V4L2_PIX_FMT_GREY;
+
+	f->pixelformat = sur40_pix_format[f->index].pixelformat;
 	f->flags = 0;
 	return 0;
 }
@@ -791,25 +866,31 @@
 static int sur40_vidioc_enum_framesizes(struct file *file, void *priv,
 					struct v4l2_frmsizeenum *f)
 {
-	if ((f->index != 0) || (f->pixel_format != V4L2_PIX_FMT_GREY))
+	struct sur40_state *sur40 = video_drvdata(file);
+
+	if ((f->index != 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08)
+		&& (f->pixel_format != V4L2_PIX_FMT_GREY)))
 		return -EINVAL;
 
 	f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
-	f->discrete.width  = sur40_video_format.width;
-	f->discrete.height = sur40_video_format.height;
+	f->discrete.width  = sur40->pix_fmt.width;
+	f->discrete.height = sur40->pix_fmt.height;
 	return 0;
 }
 
 static int sur40_vidioc_enum_frameintervals(struct file *file, void *priv,
 					    struct v4l2_frmivalenum *f)
 {
-	if ((f->index > 1) || (f->pixel_format != V4L2_PIX_FMT_GREY)
-		|| (f->width  != sur40_video_format.width)
-		|| (f->height != sur40_video_format.height))
-			return -EINVAL;
+	struct sur40_state *sur40 = video_drvdata(file);
+
+	if ((f->index > 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08)
+		&& (f->pixel_format != V4L2_PIX_FMT_GREY))
+		|| (f->width  != sur40->pix_fmt.width)
+		|| (f->height != sur40->pix_fmt.height))
+		return -EINVAL;
 
 	f->type = V4L2_FRMIVAL_TYPE_DISCRETE;
-	f->discrete.denominator  = 60/(f->index+1);
+	f->discrete.denominator  = 60;
 	f->discrete.numerator = 1;
 	return 0;
 }
@@ -862,13 +943,16 @@
 	.vidioc_querycap	= sur40_vidioc_querycap,
 
 	.vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt,
-	.vidioc_try_fmt_vid_cap	= sur40_vidioc_fmt,
-	.vidioc_s_fmt_vid_cap	= sur40_vidioc_fmt,
-	.vidioc_g_fmt_vid_cap	= sur40_vidioc_fmt,
+	.vidioc_try_fmt_vid_cap	= sur40_vidioc_try_fmt,
+	.vidioc_s_fmt_vid_cap	= sur40_vidioc_s_fmt,
+	.vidioc_g_fmt_vid_cap	= sur40_vidioc_g_fmt,
 
 	.vidioc_enum_framesizes = sur40_vidioc_enum_framesizes,
 	.vidioc_enum_frameintervals = sur40_vidioc_enum_frameintervals,
 
+	.vidioc_g_parm = sur40_ioctl_parm,
+	.vidioc_s_parm = sur40_ioctl_parm,
+
 	.vidioc_enum_input	= sur40_vidioc_enum_input,
 	.vidioc_g_input		= sur40_vidioc_g_input,
 	.vidioc_s_input		= sur40_vidioc_s_input,
@@ -891,16 +975,6 @@
 	.release = video_device_release_empty,
 };
 
-static const struct v4l2_pix_format sur40_video_format = {
-	.pixelformat = V4L2_PIX_FMT_GREY,
-	.width  = SENSOR_RES_X / 2,
-	.height = SENSOR_RES_Y / 2,
-	.field = V4L2_FIELD_NONE,
-	.colorspace = V4L2_COLORSPACE_SRGB,
-	.bytesperline = SENSOR_RES_X / 2,
-	.sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
-};
-
 /* USB-specific object needed to register this driver with the USB subsystem. */
 static struct usb_driver sur40_driver = {
 	.name = DRIVER_SHORT,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d432ca8..8ee54d7 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -309,7 +309,7 @@
 
 config ARM_SMMU_V3
 	bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
-	depends on ARM64 && PCI
+	depends on ARM64
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
 	select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 58fa8cc..754595e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -103,7 +103,7 @@
 	struct flush_queue_entry *entries;
 };
 
-DEFINE_PER_CPU(struct flush_queue, flush_queue);
+static DEFINE_PER_CPU(struct flush_queue, flush_queue);
 
 static atomic_t queue_timer_on;
 static struct timer_list queue_timer;
@@ -1361,7 +1361,8 @@
 
 			__npte = PM_LEVEL_PDE(level, virt_to_phys(page));
 
-			if (cmpxchg64(pte, __pte, __npte)) {
+			/* pte could have been changed somewhere. */
+			if (cmpxchg64(pte, __pte, __npte) != __pte) {
 				free_page((unsigned long)page);
 				continue;
 			}
@@ -1741,6 +1742,9 @@
 
 	free_pagetable(&dom->domain);
 
+	if (dom->domain.id)
+		domain_id_free(dom->domain.id);
+
 	kfree(dom);
 }
 
@@ -3649,7 +3653,7 @@
 
 	table = irq_lookup_table[devid];
 	if (table)
-		goto out;
+		goto out_unlock;
 
 	alias = amd_iommu_alias_table[devid];
 	table = irq_lookup_table[alias];
@@ -3663,7 +3667,7 @@
 	/* Nothing there yet, allocate new irq remapping table */
 	table = kzalloc(sizeof(*table), GFP_ATOMIC);
 	if (!table)
-		goto out;
+		goto out_unlock;
 
 	/* Initialize table spin-lock */
 	spin_lock_init(&table->lock);
@@ -3676,7 +3680,7 @@
 	if (!table->table) {
 		kfree(table);
 		table = NULL;
-		goto out;
+		goto out_unlock;
 	}
 
 	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
@@ -4153,6 +4157,7 @@
 	}
 	if (index < 0) {
 		pr_warn("Failed to allocate IRTE\n");
+		ret = index;
 		goto out_free_parent;
 	}
 
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index cd17136..157e934 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -20,6 +20,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/list.h>
+#include <linux/bitmap.h>
 #include <linux/slab.h>
 #include <linux/syscore_ops.h>
 #include <linux/interrupt.h>
@@ -2285,7 +2286,7 @@
 	 * never allocate domain 0 because its used as the non-allocated and
 	 * error value placeholder
 	 */
-	amd_iommu_pd_alloc_bitmap[0] = 1;
+	__set_bit(0, amd_iommu_pd_alloc_bitmap);
 
 	spin_lock_init(&amd_iommu_pd_lock);
 
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index faa3b48..7eb60c1 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -79,12 +79,6 @@
 extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
 				  int status, int tag);
 
-#ifndef CONFIG_AMD_IOMMU_STATS
-
-static inline void amd_iommu_stats_init(void) { }
-
-#endif /* !CONFIG_AMD_IOMMU_STATS */
-
 static inline bool is_rd890_iommu(struct pci_dev *pdev)
 {
 	return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 641e887..15c01c3 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -30,10 +30,13 @@
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_iommu.h>
 #include <linux/of_platform.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 
+#include <linux/amba/bus.h>
+
 #include "io-pgtable.h"
 
 /* MMIO registers */
@@ -123,6 +126,10 @@
 #define CR2_RECINVSID			(1 << 1)
 #define CR2_E2H				(1 << 0)
 
+#define ARM_SMMU_GBPA			0x44
+#define GBPA_ABORT			(1 << 20)
+#define GBPA_UPDATE			(1 << 31)
+
 #define ARM_SMMU_IRQ_CTRL		0x50
 #define IRQ_CTRL_EVTQ_IRQEN		(1 << 2)
 #define IRQ_CTRL_PRIQ_IRQEN		(1 << 1)
@@ -260,6 +267,9 @@
 #define STRTAB_STE_1_SHCFG_INCOMING	1UL
 #define STRTAB_STE_1_SHCFG_SHIFT	44
 
+#define STRTAB_STE_1_PRIVCFG_UNPRIV	2UL
+#define STRTAB_STE_1_PRIVCFG_SHIFT	48
+
 #define STRTAB_STE_2_S2VMID_SHIFT	0
 #define STRTAB_STE_2_S2VMID_MASK	0xffffUL
 #define STRTAB_STE_2_VTCR_SHIFT		32
@@ -606,12 +616,9 @@
 	struct arm_smmu_strtab_cfg	strtab_cfg;
 };
 
-/* SMMU private data for an IOMMU group */
-struct arm_smmu_group {
+/* SMMU private data for each master */
+struct arm_smmu_master_data {
 	struct arm_smmu_device		*smmu;
-	struct arm_smmu_domain		*domain;
-	int				num_sids;
-	u32				*sids;
 	struct arm_smmu_strtab_ent	ste;
 };
 
@@ -713,19 +720,15 @@
 	writel(q->prod, q->prod_reg);
 }
 
-static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
-{
-	if (Q_WRP(q, q->cons) == Q_WRP(q, until))
-		return Q_IDX(q, q->cons) < Q_IDX(q, until);
-
-	return Q_IDX(q, q->cons) >= Q_IDX(q, until);
-}
-
-static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
+/*
+ * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * is empty. Otherwise, wait until there is at least one free slot.
+ */
+static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
 {
 	ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
 
-	while (queue_sync_cons(q), __queue_cons_before(q, until)) {
+	while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
 		if (ktime_compare(ktime_get(), timeout) > 0)
 			return -ETIMEDOUT;
 
@@ -896,8 +899,8 @@
 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
 				    struct arm_smmu_cmdq_ent *ent)
 {
-	u32 until;
 	u64 cmd[CMDQ_ENT_DWORDS];
+	unsigned long flags;
 	bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
 	struct arm_smmu_queue *q = &smmu->cmdq.q;
 
@@ -907,20 +910,15 @@
 		return;
 	}
 
-	spin_lock(&smmu->cmdq.lock);
-	while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
-		/*
-		 * Keep the queue locked, otherwise the producer could wrap
-		 * twice and we could see a future consumer pointer that looks
-		 * like it's behind us.
-		 */
-		if (queue_poll_cons(q, until, wfe))
+	spin_lock_irqsave(&smmu->cmdq.lock, flags);
+	while (queue_insert_raw(q, cmd) == -ENOSPC) {
+		if (queue_poll_cons(q, false, wfe))
 			dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
 	}
 
-	if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
+	if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
 		dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
-	spin_unlock(&smmu->cmdq.lock);
+	spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
 }
 
 /* Context descriptor manipulation functions */
@@ -1073,7 +1071,9 @@
 #ifdef CONFIG_PCI_ATS
 			 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
 #endif
-			 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
+			 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
+			 STRTAB_STE_1_PRIVCFG_UNPRIV <<
+			 STRTAB_STE_1_PRIVCFG_SHIFT);
 
 		if (smmu->features & ARM_SMMU_FEAT_STALLS)
 			dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1161,36 +1161,66 @@
 	struct arm_smmu_queue *q = &smmu->evtq.q;
 	u64 evt[EVTQ_ENT_DWORDS];
 
-	while (!queue_remove_raw(q, evt)) {
-		u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
+	do {
+		while (!queue_remove_raw(q, evt)) {
+			u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
 
-		dev_info(smmu->dev, "event 0x%02x received:\n", id);
-		for (i = 0; i < ARRAY_SIZE(evt); ++i)
-			dev_info(smmu->dev, "\t0x%016llx\n",
-				 (unsigned long long)evt[i]);
-	}
+			dev_info(smmu->dev, "event 0x%02x received:\n", id);
+			for (i = 0; i < ARRAY_SIZE(evt); ++i)
+				dev_info(smmu->dev, "\t0x%016llx\n",
+					 (unsigned long long)evt[i]);
+
+		}
+
+		/*
+		 * Not much we can do on overflow, so scream and pretend we're
+		 * trying harder.
+		 */
+		if (queue_sync_prod(q) == -EOVERFLOW)
+			dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
+	} while (!queue_empty(q));
 
 	/* Sync our overflow flag, as we believe we're up to speed */
 	q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
+static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
 {
-	irqreturn_t ret = IRQ_WAKE_THREAD;
-	struct arm_smmu_device *smmu = dev;
-	struct arm_smmu_queue *q = &smmu->evtq.q;
+	u32 sid, ssid;
+	u16 grpid;
+	bool ssv, last;
 
-	/*
-	 * Not much we can do on overflow, so scream and pretend we're
-	 * trying harder.
-	 */
-	if (queue_sync_prod(q) == -EOVERFLOW)
-		dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
-	else if (queue_empty(q))
-		ret = IRQ_NONE;
+	sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
+	ssv = evt[0] & PRIQ_0_SSID_V;
+	ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
+	last = evt[0] & PRIQ_0_PRG_LAST;
+	grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
 
-	return ret;
+	dev_info(smmu->dev, "unexpected PRI request received:\n");
+	dev_info(smmu->dev,
+		 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
+		 sid, ssid, grpid, last ? "L" : "",
+		 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
+		 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
+		 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
+		 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
+		 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
+
+	if (last) {
+		struct arm_smmu_cmdq_ent cmd = {
+			.opcode			= CMDQ_OP_PRI_RESP,
+			.substream_valid	= ssv,
+			.pri			= {
+				.sid	= sid,
+				.ssid	= ssid,
+				.grpid	= grpid,
+				.resp	= PRI_RESP_DENY,
+			},
+		};
+
+		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+	}
 }
 
 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
@@ -1199,63 +1229,19 @@
 	struct arm_smmu_queue *q = &smmu->priq.q;
 	u64 evt[PRIQ_ENT_DWORDS];
 
-	while (!queue_remove_raw(q, evt)) {
-		u32 sid, ssid;
-		u16 grpid;
-		bool ssv, last;
+	do {
+		while (!queue_remove_raw(q, evt))
+			arm_smmu_handle_ppr(smmu, evt);
 
-		sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
-		ssv = evt[0] & PRIQ_0_SSID_V;
-		ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
-		last = evt[0] & PRIQ_0_PRG_LAST;
-		grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
-
-		dev_info(smmu->dev, "unexpected PRI request received:\n");
-		dev_info(smmu->dev,
-			 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
-			 sid, ssid, grpid, last ? "L" : "",
-			 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
-			 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
-			 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
-			 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
-			 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
-
-		if (last) {
-			struct arm_smmu_cmdq_ent cmd = {
-				.opcode			= CMDQ_OP_PRI_RESP,
-				.substream_valid	= ssv,
-				.pri			= {
-					.sid	= sid,
-					.ssid	= ssid,
-					.grpid	= grpid,
-					.resp	= PRI_RESP_DENY,
-				},
-			};
-
-			arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-		}
-	}
+		if (queue_sync_prod(q) == -EOVERFLOW)
+			dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
+	} while (!queue_empty(q));
 
 	/* Sync our overflow flag, as we believe we're up to speed */
 	q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
-{
-	irqreturn_t ret = IRQ_WAKE_THREAD;
-	struct arm_smmu_device *smmu = dev;
-	struct arm_smmu_queue *q = &smmu->priq.q;
-
-	/* PRIQ overflow indicates a programming error */
-	if (queue_sync_prod(q) == -EOVERFLOW)
-		dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
-	else if (queue_empty(q))
-		ret = IRQ_NONE;
-
-	return ret;
-}
-
 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
 {
 	/* We don't actually use CMD_SYNC interrupts for anything */
@@ -1288,15 +1274,11 @@
 	if (active & GERROR_MSI_GERROR_ABT_ERR)
 		dev_warn(smmu->dev, "GERROR MSI write aborted\n");
 
-	if (active & GERROR_MSI_PRIQ_ABT_ERR) {
+	if (active & GERROR_MSI_PRIQ_ABT_ERR)
 		dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
-		arm_smmu_priq_handler(irq, smmu->dev);
-	}
 
-	if (active & GERROR_MSI_EVTQ_ABT_ERR) {
+	if (active & GERROR_MSI_EVTQ_ABT_ERR)
 		dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
-		arm_smmu_evtq_handler(irq, smmu->dev);
-	}
 
 	if (active & GERROR_MSI_CMDQ_ABT_ERR) {
 		dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
@@ -1569,6 +1551,8 @@
 		return -ENOMEM;
 
 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	domain->geometry.aperture_end = (1UL << ias) - 1;
+	domain->geometry.force_aperture = true;
 	smmu_domain->pgtbl_ops = pgtbl_ops;
 
 	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
@@ -1578,20 +1562,6 @@
 	return ret;
 }
 
-static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
-{
-	struct iommu_group *group;
-	struct arm_smmu_group *smmu_group;
-
-	group = iommu_group_get(dev);
-	if (!group)
-		return NULL;
-
-	smmu_group = iommu_group_get_iommudata(group);
-	iommu_group_put(group);
-	return smmu_group;
-}
-
 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
 {
 	__le64 *step;
@@ -1614,27 +1584,17 @@
 	return step;
 }
 
-static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
+static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
 {
 	int i;
-	struct arm_smmu_domain *smmu_domain = smmu_group->domain;
-	struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
-	struct arm_smmu_device *smmu = smmu_group->smmu;
+	struct arm_smmu_master_data *master = fwspec->iommu_priv;
+	struct arm_smmu_device *smmu = master->smmu;
 
-	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-		ste->s1_cfg = &smmu_domain->s1_cfg;
-		ste->s2_cfg = NULL;
-		arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
-	} else {
-		ste->s1_cfg = NULL;
-		ste->s2_cfg = &smmu_domain->s2_cfg;
-	}
-
-	for (i = 0; i < smmu_group->num_sids; ++i) {
-		u32 sid = smmu_group->sids[i];
+	for (i = 0; i < fwspec->num_ids; ++i) {
+		u32 sid = fwspec->ids[i];
 		__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
 
-		arm_smmu_write_strtab_ent(smmu, sid, step, ste);
+		arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
 	}
 
 	return 0;
@@ -1642,13 +1602,11 @@
 
 static void arm_smmu_detach_dev(struct device *dev)
 {
-	struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+	struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
 
-	smmu_group->ste.bypass = true;
-	if (arm_smmu_install_ste_for_group(smmu_group) < 0)
+	master->ste.bypass = true;
+	if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
 		dev_warn(dev, "failed to install bypass STE\n");
-
-	smmu_group->domain = NULL;
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1656,16 +1614,20 @@
 	int ret = 0;
 	struct arm_smmu_device *smmu;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+	struct arm_smmu_master_data *master;
+	struct arm_smmu_strtab_ent *ste;
 
-	if (!smmu_group)
+	if (!dev->iommu_fwspec)
 		return -ENOENT;
 
+	master = dev->iommu_fwspec->iommu_priv;
+	smmu = master->smmu;
+	ste = &master->ste;
+
 	/* Already attached to a different domain? */
-	if (smmu_group->domain && smmu_group->domain != smmu_domain)
+	if (!ste->bypass)
 		arm_smmu_detach_dev(dev);
 
-	smmu = smmu_group->smmu;
 	mutex_lock(&smmu_domain->init_mutex);
 
 	if (!smmu_domain->smmu) {
@@ -1684,21 +1646,21 @@
 		goto out_unlock;
 	}
 
-	/* Group already attached to this domain? */
-	if (smmu_group->domain)
-		goto out_unlock;
+	ste->bypass = false;
+	ste->valid = true;
 
-	smmu_group->domain	= smmu_domain;
+	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+		ste->s1_cfg = &smmu_domain->s1_cfg;
+		ste->s2_cfg = NULL;
+		arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
+	} else {
+		ste->s1_cfg = NULL;
+		ste->s2_cfg = &smmu_domain->s2_cfg;
+	}
 
-	/*
-	 * FIXME: This should always be "false" once we have IOMMU-backed
-	 * DMA ops for all devices behind the SMMU.
-	 */
-	smmu_group->ste.bypass	= domain->type == IOMMU_DOMAIN_DMA;
-
-	ret = arm_smmu_install_ste_for_group(smmu_group);
+	ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
 	if (ret < 0)
-		smmu_group->domain = NULL;
+		ste->valid = false;
 
 out_unlock:
 	mutex_unlock(&smmu_domain->init_mutex);
@@ -1757,40 +1719,19 @@
 	return ret;
 }
 
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
+static struct platform_driver arm_smmu_driver;
+
+static int arm_smmu_match_node(struct device *dev, void *data)
 {
-	*(u32 *)sidp = alias;
-	return 0; /* Continue walking */
+	return dev->of_node == data;
 }
 
-static void __arm_smmu_release_pci_iommudata(void *data)
+static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
 {
-	kfree(data);
-}
-
-static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
-{
-	struct device_node *of_node;
-	struct platform_device *smmu_pdev;
-	struct arm_smmu_device *smmu = NULL;
-	struct pci_bus *bus = pdev->bus;
-
-	/* Walk up to the root bus */
-	while (!pci_is_root_bus(bus))
-		bus = bus->parent;
-
-	/* Follow the "iommus" phandle from the host controller */
-	of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
-	if (!of_node)
-		return NULL;
-
-	/* See if we can find an SMMU corresponding to the phandle */
-	smmu_pdev = of_find_device_by_node(of_node);
-	if (smmu_pdev)
-		smmu = platform_get_drvdata(smmu_pdev);
-
-	of_node_put(of_node);
-	return smmu;
+	struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
+						np, arm_smmu_match_node);
+	put_device(dev);
+	return dev ? dev_get_drvdata(dev) : NULL;
 }
 
 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
@@ -1803,94 +1744,91 @@
 	return sid < limit;
 }
 
+static struct iommu_ops arm_smmu_ops;
+
 static int arm_smmu_add_device(struct device *dev)
 {
 	int i, ret;
-	u32 sid, *sids;
-	struct pci_dev *pdev;
-	struct iommu_group *group;
-	struct arm_smmu_group *smmu_group;
 	struct arm_smmu_device *smmu;
+	struct arm_smmu_master_data *master;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct iommu_group *group;
 
-	/* We only support PCI, for now */
-	if (!dev_is_pci(dev))
+	if (!fwspec || fwspec->ops != &arm_smmu_ops)
 		return -ENODEV;
-
-	pdev = to_pci_dev(dev);
-	group = iommu_group_get_for_dev(dev);
-	if (IS_ERR(group))
-		return PTR_ERR(group);
-
-	smmu_group = iommu_group_get_iommudata(group);
-	if (!smmu_group) {
-		smmu = arm_smmu_get_for_pci_dev(pdev);
-		if (!smmu) {
-			ret = -ENOENT;
-			goto out_remove_dev;
-		}
-
-		smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
-		if (!smmu_group) {
-			ret = -ENOMEM;
-			goto out_remove_dev;
-		}
-
-		smmu_group->ste.valid	= true;
-		smmu_group->smmu	= smmu;
-		iommu_group_set_iommudata(group, smmu_group,
-					  __arm_smmu_release_pci_iommudata);
+	/*
+	 * We _can_ actually withstand dodgy bus code re-calling add_device()
+	 * without an intervening remove_device()/of_xlate() sequence, but
+	 * we're not going to do so quietly...
+	 */
+	if (WARN_ON_ONCE(fwspec->iommu_priv)) {
+		master = fwspec->iommu_priv;
+		smmu = master->smmu;
 	} else {
-		smmu = smmu_group->smmu;
+		smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+		if (!smmu)
+			return -ENODEV;
+		master = kzalloc(sizeof(*master), GFP_KERNEL);
+		if (!master)
+			return -ENOMEM;
+
+		master->smmu = smmu;
+		fwspec->iommu_priv = master;
 	}
 
-	/* Assume SID == RID until firmware tells us otherwise */
-	pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
-	for (i = 0; i < smmu_group->num_sids; ++i) {
-		/* If we already know about this SID, then we're done */
-		if (smmu_group->sids[i] == sid)
-			goto out_put_group;
+	/* Check the SIDs are in range of the SMMU and our stream table */
+	for (i = 0; i < fwspec->num_ids; i++) {
+		u32 sid = fwspec->ids[i];
+
+		if (!arm_smmu_sid_in_range(smmu, sid))
+			return -ERANGE;
+
+		/* Ensure l2 strtab is initialised */
+		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+			ret = arm_smmu_init_l2_strtab(smmu, sid);
+			if (ret)
+				return ret;
+		}
 	}
 
-	/* Check the SID is in range of the SMMU and our stream table */
-	if (!arm_smmu_sid_in_range(smmu, sid)) {
-		ret = -ERANGE;
-		goto out_remove_dev;
-	}
+	group = iommu_group_get_for_dev(dev);
+	if (!IS_ERR(group))
+		iommu_group_put(group);
 
-	/* Ensure l2 strtab is initialised */
-	if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
-		ret = arm_smmu_init_l2_strtab(smmu, sid);
-		if (ret)
-			goto out_remove_dev;
-	}
-
-	/* Resize the SID array for the group */
-	smmu_group->num_sids++;
-	sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
-			GFP_KERNEL);
-	if (!sids) {
-		smmu_group->num_sids--;
-		ret = -ENOMEM;
-		goto out_remove_dev;
-	}
-
-	/* Add the new SID */
-	sids[smmu_group->num_sids - 1] = sid;
-	smmu_group->sids = sids;
-
-out_put_group:
-	iommu_group_put(group);
-	return 0;
-
-out_remove_dev:
-	iommu_group_remove_device(dev);
-	iommu_group_put(group);
-	return ret;
+	return PTR_ERR_OR_ZERO(group);
 }
 
 static void arm_smmu_remove_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct arm_smmu_master_data *master;
+
+	if (!fwspec || fwspec->ops != &arm_smmu_ops)
+		return;
+
+	master = fwspec->iommu_priv;
+	if (master && master->ste.valid)
+		arm_smmu_detach_dev(dev);
 	iommu_group_remove_device(dev);
+	kfree(master);
+	iommu_fwspec_free(dev);
+}
+
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+	struct iommu_group *group;
+
+	/*
+	 * We don't support devices sharing stream IDs other than PCI RID
+	 * aliases, since the necessary ID-to-device lookup becomes rather
+	 * impractical given a potential sparse 32-bit stream ID space.
+	 */
+	if (dev_is_pci(dev))
+		group = pci_device_group(dev);
+	else
+		group = generic_device_group(dev);
+
+	return group;
 }
 
 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
@@ -1937,6 +1875,11 @@
 	return ret;
 }
 
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+	return iommu_fwspec_add_ids(dev, args->args, 1);
+}
+
 static struct iommu_ops arm_smmu_ops = {
 	.capable		= arm_smmu_capable,
 	.domain_alloc		= arm_smmu_domain_alloc,
@@ -1948,9 +1891,10 @@
 	.iova_to_phys		= arm_smmu_iova_to_phys,
 	.add_device		= arm_smmu_add_device,
 	.remove_device		= arm_smmu_remove_device,
-	.device_group		= pci_device_group,
+	.device_group		= arm_smmu_device_group,
 	.domain_get_attr	= arm_smmu_domain_get_attr,
 	.domain_set_attr	= arm_smmu_domain_set_attr,
+	.of_xlate		= arm_smmu_of_xlate,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 };
 
@@ -2151,6 +2095,24 @@
 					  1, ARM_SMMU_POLL_TIMEOUT_US);
 }
 
+/* GBPA is "special" */
+static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
+{
+	int ret;
+	u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
+
+	ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+					 1, ARM_SMMU_POLL_TIMEOUT_US);
+	if (ret)
+		return ret;
+
+	reg &= ~clr;
+	reg |= set;
+	writel_relaxed(reg | GBPA_UPDATE, gbpa);
+	return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+					  1, ARM_SMMU_POLL_TIMEOUT_US);
+}
+
 static void arm_smmu_free_msis(void *data)
 {
 	struct device *dev = data;
@@ -2235,10 +2197,10 @@
 	/* Request interrupt lines */
 	irq = smmu->evtq.q.irq;
 	if (irq) {
-		ret = devm_request_threaded_irq(smmu->dev, irq,
-						arm_smmu_evtq_handler,
+		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
 						arm_smmu_evtq_thread,
-						0, "arm-smmu-v3-evtq", smmu);
+						IRQF_ONESHOT,
+						"arm-smmu-v3-evtq", smmu);
 		if (ret < 0)
 			dev_warn(smmu->dev, "failed to enable evtq irq\n");
 	}
@@ -2263,10 +2225,10 @@
 	if (smmu->features & ARM_SMMU_FEAT_PRI) {
 		irq = smmu->priq.q.irq;
 		if (irq) {
-			ret = devm_request_threaded_irq(smmu->dev, irq,
-							arm_smmu_priq_handler,
+			ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
 							arm_smmu_priq_thread,
-							0, "arm-smmu-v3-priq",
+							IRQF_ONESHOT,
+							"arm-smmu-v3-priq",
 							smmu);
 			if (ret < 0)
 				dev_warn(smmu->dev,
@@ -2296,7 +2258,7 @@
 	return ret;
 }
 
-static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
 {
 	int ret;
 	u32 reg, enables;
@@ -2397,8 +2359,17 @@
 		return ret;
 	}
 
-	/* Enable the SMMU interface */
-	enables |= CR0_SMMUEN;
+
+	/* Enable the SMMU interface, or ensure bypass */
+	if (!bypass || disable_bypass) {
+		enables |= CR0_SMMUEN;
+	} else {
+		ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
+		if (ret) {
+			dev_err(smmu->dev, "GBPA not responding to update\n");
+			return ret;
+		}
+	}
 	ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
 				      ARM_SMMU_CR0ACK);
 	if (ret) {
@@ -2597,6 +2568,15 @@
 	struct resource *res;
 	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
+	bool bypass = true;
+	u32 cells;
+
+	if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
+		dev_err(dev, "missing #iommu-cells property\n");
+	else if (cells != 1)
+		dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
+	else
+		bypass = false;
 
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 	if (!smmu) {
@@ -2649,7 +2629,24 @@
 	platform_set_drvdata(pdev, smmu);
 
 	/* Reset the device */
-	return arm_smmu_device_reset(smmu);
+	ret = arm_smmu_device_reset(smmu, bypass);
+	if (ret)
+		return ret;
+
+	/* And we're up. Go go go! */
+	of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+#ifdef CONFIG_PCI
+	pci_request_acs();
+	ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+	if (ret)
+		return ret;
+#endif
+#ifdef CONFIG_ARM_AMBA
+	ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+	if (ret)
+		return ret;
+#endif
+	return bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
 }
 
 static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -2677,22 +2674,14 @@
 
 static int __init arm_smmu_init(void)
 {
-	struct device_node *np;
-	int ret;
+	static bool registered;
+	int ret = 0;
 
-	np = of_find_matching_node(NULL, arm_smmu_of_match);
-	if (!np)
-		return 0;
-
-	of_node_put(np);
-
-	ret = platform_driver_register(&arm_smmu_driver);
-	if (ret)
-		return ret;
-
-	pci_request_acs();
-
-	return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+	if (!registered) {
+		ret = platform_driver_register(&arm_smmu_driver);
+		registered = !ret;
+	}
+	return ret;
 }
 
 static void __exit arm_smmu_exit(void)
@@ -2703,6 +2692,20 @@
 subsys_initcall(arm_smmu_init);
 module_exit(arm_smmu_exit);
 
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+	int ret = arm_smmu_init();
+
+	if (ret)
+		return ret;
+
+	if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
+		return -ENODEV;
+
+	return 0;
+}
+IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
+
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2db74eb..c841eb7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,6 +28,7 @@
 
 #define pr_fmt(fmt) "arm-smmu: " fmt
 
+#include <linux/atomic.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
 #include <linux/dma-mapping.h>
@@ -40,6 +41,8 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_iommu.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -49,15 +52,9 @@
 
 #include "io-pgtable.h"
 
-/* Maximum number of stream IDs assigned to a single device */
-#define MAX_MASTER_STREAMIDS		128
-
 /* Maximum number of context banks per SMMU */
 #define ARM_SMMU_MAX_CBS		128
 
-/* Maximum number of mapping groups per SMMU */
-#define ARM_SMMU_MAX_SMRS		128
-
 /* SMMU global address space */
 #define ARM_SMMU_GR0(smmu)		((smmu)->base)
 #define ARM_SMMU_GR1(smmu)		((smmu)->base + (1 << (smmu)->pgshift))
@@ -165,21 +162,27 @@
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
 #define SMR_VALID			(1 << 31)
 #define SMR_MASK_SHIFT			16
-#define SMR_MASK_MASK			0x7fff
 #define SMR_ID_SHIFT			0
-#define SMR_ID_MASK			0x7fff
 
 #define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
 #define S2CR_CBNDX_SHIFT		0
 #define S2CR_CBNDX_MASK			0xff
 #define S2CR_TYPE_SHIFT			16
 #define S2CR_TYPE_MASK			0x3
-#define S2CR_TYPE_TRANS			(0 << S2CR_TYPE_SHIFT)
-#define S2CR_TYPE_BYPASS		(1 << S2CR_TYPE_SHIFT)
-#define S2CR_TYPE_FAULT			(2 << S2CR_TYPE_SHIFT)
+enum arm_smmu_s2cr_type {
+	S2CR_TYPE_TRANS,
+	S2CR_TYPE_BYPASS,
+	S2CR_TYPE_FAULT,
+};
 
 #define S2CR_PRIVCFG_SHIFT		24
-#define S2CR_PRIVCFG_UNPRIV		(2 << S2CR_PRIVCFG_SHIFT)
+#define S2CR_PRIVCFG_MASK		0x3
+enum arm_smmu_s2cr_privcfg {
+	S2CR_PRIVCFG_DEFAULT,
+	S2CR_PRIVCFG_DIPAN,
+	S2CR_PRIVCFG_UNPRIV,
+	S2CR_PRIVCFG_PRIV,
+};
 
 /* Context bank attribute registers */
 #define ARM_SMMU_GR1_CBAR(n)		(0x0 + ((n) << 2))
@@ -217,6 +220,7 @@
 #define ARM_SMMU_CB_TTBR0		0x20
 #define ARM_SMMU_CB_TTBR1		0x28
 #define ARM_SMMU_CB_TTBCR		0x30
+#define ARM_SMMU_CB_CONTEXTIDR		0x34
 #define ARM_SMMU_CB_S1_MAIR0		0x38
 #define ARM_SMMU_CB_S1_MAIR1		0x3c
 #define ARM_SMMU_CB_PAR			0x50
@@ -239,7 +243,6 @@
 #define SCTLR_AFE			(1 << 2)
 #define SCTLR_TRE			(1 << 1)
 #define SCTLR_M				(1 << 0)
-#define SCTLR_EAE_SBOP			(SCTLR_AFE | SCTLR_TRE)
 
 #define ARM_MMU500_ACTLR_CPRE		(1 << 1)
 
@@ -296,23 +299,33 @@
 	CAVIUM_SMMUV2,
 };
 
+struct arm_smmu_s2cr {
+	struct iommu_group		*group;
+	int				count;
+	enum arm_smmu_s2cr_type		type;
+	enum arm_smmu_s2cr_privcfg	privcfg;
+	u8				cbndx;
+};
+
+#define s2cr_init_val (struct arm_smmu_s2cr){				\
+	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
+}
+
 struct arm_smmu_smr {
-	u8				idx;
 	u16				mask;
 	u16				id;
+	bool				valid;
 };
 
 struct arm_smmu_master_cfg {
-	int				num_streamids;
-	u16				streamids[MAX_MASTER_STREAMIDS];
-	struct arm_smmu_smr		*smrs;
+	struct arm_smmu_device		*smmu;
+	s16				smendx[];
 };
-
-struct arm_smmu_master {
-	struct device_node		*of_node;
-	struct rb_node			node;
-	struct arm_smmu_master_cfg	cfg;
-};
+#define INVALID_SMENDX			-1
+#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
+#define fwspec_smmu(fw)  (__fwspec_cfg(fw)->smmu)
+#define for_each_cfg_sme(fw, i, idx) \
+	for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
 
 struct arm_smmu_device {
 	struct device			*dev;
@@ -346,7 +359,11 @@
 	atomic_t			irptndx;
 
 	u32				num_mapping_groups;
-	DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+	u16				streamid_mask;
+	u16				smr_mask_mask;
+	struct arm_smmu_smr		*smrs;
+	struct arm_smmu_s2cr		*s2crs;
+	struct mutex			stream_map_mutex;
 
 	unsigned long			va_size;
 	unsigned long			ipa_size;
@@ -357,9 +374,6 @@
 	u32				num_context_irqs;
 	unsigned int			*irqs;
 
-	struct list_head		list;
-	struct rb_root			masters;
-
 	u32				cavium_id_base; /* Specific to Cavium */
 };
 
@@ -397,15 +411,6 @@
 	struct iommu_domain		domain;
 };
 
-struct arm_smmu_phandle_args {
-	struct device_node *np;
-	int args_count;
-	uint32_t args[MAX_MASTER_STREAMIDS];
-};
-
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
-static LIST_HEAD(arm_smmu_devices);
-
 struct arm_smmu_option_prop {
 	u32 opt;
 	const char *prop;
@@ -413,6 +418,8 @@
 
 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
 
+static bool using_legacy_binding, using_generic_binding;
+
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
 	{ 0, NULL},
@@ -444,131 +451,86 @@
 
 		while (!pci_is_root_bus(bus))
 			bus = bus->parent;
-		return bus->bridge->parent->of_node;
+		return of_node_get(bus->bridge->parent->of_node);
 	}
 
-	return dev->of_node;
+	return of_node_get(dev->of_node);
 }
 
-static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
-						struct device_node *dev_node)
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
 {
-	struct rb_node *node = smmu->masters.rb_node;
-
-	while (node) {
-		struct arm_smmu_master *master;
-
-		master = container_of(node, struct arm_smmu_master, node);
-
-		if (dev_node < master->of_node)
-			node = node->rb_left;
-		else if (dev_node > master->of_node)
-			node = node->rb_right;
-		else
-			return master;
-	}
-
-	return NULL;
+	*((__be32 *)data) = cpu_to_be32(alias);
+	return 0; /* Continue walking */
 }
 
-static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct device *dev)
+static int __find_legacy_master_phandle(struct device *dev, void *data)
 {
-	struct arm_smmu_master_cfg *cfg = NULL;
-	struct iommu_group *group = iommu_group_get(dev);
+	struct of_phandle_iterator *it = *(void **)data;
+	struct device_node *np = it->node;
+	int err;
 
-	if (group) {
-		cfg = iommu_group_get_iommudata(group);
-		iommu_group_put(group);
-	}
-
-	return cfg;
+	of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
+			    "#stream-id-cells", 0)
+		if (it->node == np) {
+			*(void **)data = dev;
+			return 1;
+		}
+	it->node = np;
+	return err == -ENOENT ? 0 : err;
 }
 
-static int insert_smmu_master(struct arm_smmu_device *smmu,
-			      struct arm_smmu_master *master)
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+static int arm_smmu_register_legacy_master(struct device *dev,
+					   struct arm_smmu_device **smmu)
 {
-	struct rb_node **new, *parent;
+	struct device *smmu_dev;
+	struct device_node *np;
+	struct of_phandle_iterator it;
+	void *data = &it;
+	u32 *sids;
+	__be32 pci_sid;
+	int err;
 
-	new = &smmu->masters.rb_node;
-	parent = NULL;
-	while (*new) {
-		struct arm_smmu_master *this
-			= container_of(*new, struct arm_smmu_master, node);
-
-		parent = *new;
-		if (master->of_node < this->of_node)
-			new = &((*new)->rb_left);
-		else if (master->of_node > this->of_node)
-			new = &((*new)->rb_right);
-		else
-			return -EEXIST;
+	np = dev_get_dev_node(dev);
+	if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
+		of_node_put(np);
+		return -ENODEV;
 	}
 
-	rb_link_node(&master->node, parent, new);
-	rb_insert_color(&master->node, &smmu->masters);
-	return 0;
-}
+	it.node = np;
+	err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
+				     __find_legacy_master_phandle);
+	smmu_dev = data;
+	of_node_put(np);
+	if (err == 0)
+		return -ENODEV;
+	if (err < 0)
+		return err;
 
-static int register_smmu_master(struct arm_smmu_device *smmu,
-				struct device *dev,
-				struct arm_smmu_phandle_args *masterspec)
-{
-	int i;
-	struct arm_smmu_master *master;
-
-	master = find_smmu_master(smmu, masterspec->np);
-	if (master) {
-		dev_err(dev,
-			"rejecting multiple registrations for master device %s\n",
-			masterspec->np->name);
-		return -EBUSY;
+	if (dev_is_pci(dev)) {
+		/* "mmu-masters" assumes Stream ID == Requester ID */
+		pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
+				       &pci_sid);
+		it.cur = &pci_sid;
+		it.cur_count = 1;
 	}
 
-	if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
-		dev_err(dev,
-			"reached maximum number (%d) of stream IDs for master device %s\n",
-			MAX_MASTER_STREAMIDS, masterspec->np->name);
-		return -ENOSPC;
-	}
+	err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
+				&arm_smmu_ops);
+	if (err)
+		return err;
 
-	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
-	if (!master)
+	sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
+	if (!sids)
 		return -ENOMEM;
 
-	master->of_node			= masterspec->np;
-	master->cfg.num_streamids	= masterspec->args_count;
-
-	for (i = 0; i < master->cfg.num_streamids; ++i) {
-		u16 streamid = masterspec->args[i];
-
-		if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
-		     (streamid >= smmu->num_mapping_groups)) {
-			dev_err(dev,
-				"stream ID for master device %s greater than maximum allowed (%d)\n",
-				masterspec->np->name, smmu->num_mapping_groups);
-			return -ERANGE;
-		}
-		master->cfg.streamids[i] = streamid;
-	}
-	return insert_smmu_master(smmu, master);
-}
-
-static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
-{
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_master *master = NULL;
-	struct device_node *dev_node = dev_get_dev_node(dev);
-
-	spin_lock(&arm_smmu_devices_lock);
-	list_for_each_entry(smmu, &arm_smmu_devices, list) {
-		master = find_smmu_master(smmu, dev_node);
-		if (master)
-			break;
-	}
-	spin_unlock(&arm_smmu_devices_lock);
-
-	return master ? smmu : NULL;
+	*smmu = dev_get_drvdata(smmu_dev);
+	of_phandle_iterator_args(&it, sids, it.cur_count);
+	err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
+	kfree(sids);
+	return err;
 }
 
 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -738,7 +700,7 @@
 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
-	u32 reg;
+	u32 reg, reg2;
 	u64 reg64;
 	bool stage1;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
@@ -781,14 +743,22 @@
 
 	/* TTBRs */
 	if (stage1) {
-		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+		u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
 
-		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
-		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
-
-		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
-		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+			reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
+			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
+			reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
+			writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
+		} else {
+			reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+			reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
+			writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+			reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+			reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
+			writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+		}
 	} else {
 		reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
 		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
@@ -796,28 +766,36 @@
 
 	/* TTBCR */
 	if (stage1) {
-		reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
-		writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
-		if (smmu->version > ARM_SMMU_V1) {
-			reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-			reg |= TTBCR2_SEP_UPSTREAM;
-			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+			reg = pgtbl_cfg->arm_v7s_cfg.tcr;
+			reg2 = 0;
+		} else {
+			reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+			reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+			reg2 |= TTBCR2_SEP_UPSTREAM;
 		}
+		if (smmu->version > ARM_SMMU_V1)
+			writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
 	} else {
 		reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
-		writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 	}
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 
 	/* MAIRs (stage-1 only) */
 	if (stage1) {
-		reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+			reg = pgtbl_cfg->arm_v7s_cfg.prrr;
+			reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
+		} else {
+			reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+			reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+		}
 		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
-		reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
-		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
+		writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
 	}
 
 	/* SCTLR */
-	reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+	reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
 	if (stage1)
 		reg |= SCTLR_S1_ASIDPNE;
 #ifdef __BIG_ENDIAN
@@ -841,12 +819,6 @@
 	if (smmu_domain->smmu)
 		goto out_unlock;
 
-	/* We're bypassing these SIDs, so don't allocate an actual context */
-	if (domain->type == IOMMU_DOMAIN_DMA) {
-		smmu_domain->smmu = smmu;
-		goto out_unlock;
-	}
-
 	/*
 	 * Mapping the requested stage onto what we support is surprisingly
 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -880,6 +852,11 @@
 	 */
 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+	if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
+	    !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
+	    (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
+	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
+		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
 	if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
 	    (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
 			       ARM_SMMU_FEAT_FMT_AARCH64_16K |
@@ -899,10 +876,14 @@
 		oas = smmu->ipa_size;
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
-		} else {
+		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
 			fmt = ARM_32_LPAE_S1;
 			ias = min(ias, 32UL);
 			oas = min(oas, 40UL);
+		} else {
+			fmt = ARM_V7S;
+			ias = min(ias, 32UL);
+			oas = min(oas, 32UL);
 		}
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
@@ -958,6 +939,8 @@
 
 	/* Update the domain's page sizes to reflect the page table format */
 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	domain->geometry.aperture_end = (1UL << ias) - 1;
+	domain->geometry.force_aperture = true;
 
 	/* Initialise the context bank with our page table cfg */
 	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
@@ -996,7 +979,7 @@
 	void __iomem *cb_base;
 	int irq;
 
-	if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
+	if (!smmu)
 		return;
 
 	/*
@@ -1030,8 +1013,8 @@
 	if (!smmu_domain)
 		return NULL;
 
-	if (type == IOMMU_DOMAIN_DMA &&
-	    iommu_get_dma_cookie(&smmu_domain->domain)) {
+	if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
+	    iommu_get_dma_cookie(&smmu_domain->domain))) {
 		kfree(smmu_domain);
 		return NULL;
 	}
@@ -1055,162 +1038,197 @@
 	kfree(smmu_domain);
 }
 
-static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
-					  struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 {
-	int i;
-	struct arm_smmu_smr *smrs;
-	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	struct arm_smmu_smr *smr = smmu->smrs + idx;
+	u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
 
-	if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
-		return 0;
-
-	if (cfg->smrs)
-		return -EEXIST;
-
-	smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
-	if (!smrs) {
-		dev_err(smmu->dev, "failed to allocate %d SMRs\n",
-			cfg->num_streamids);
-		return -ENOMEM;
-	}
-
-	/* Allocate the SMRs on the SMMU */
-	for (i = 0; i < cfg->num_streamids; ++i) {
-		int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
-						  smmu->num_mapping_groups);
-		if (idx < 0) {
-			dev_err(smmu->dev, "failed to allocate free SMR\n");
-			goto err_free_smrs;
-		}
-
-		smrs[i] = (struct arm_smmu_smr) {
-			.idx	= idx,
-			.mask	= 0, /* We don't currently share SMRs */
-			.id	= cfg->streamids[i],
-		};
-	}
-
-	/* It worked! Now, poke the actual hardware */
-	for (i = 0; i < cfg->num_streamids; ++i) {
-		u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
-			  smrs[i].mask << SMR_MASK_SHIFT;
-		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
-	}
-
-	cfg->smrs = smrs;
-	return 0;
-
-err_free_smrs:
-	while (--i >= 0)
-		__arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
-	kfree(smrs);
-	return -ENOSPC;
+	if (smr->valid)
+		reg |= SMR_VALID;
+	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
 }
 
-static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
-				      struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
 {
-	int i;
-	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-	struct arm_smmu_smr *smrs = cfg->smrs;
+	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+	u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
+		  (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
+		  (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
 
+	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
+}
+
+static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
+{
+	arm_smmu_write_s2cr(smmu, idx);
+	if (smmu->smrs)
+		arm_smmu_write_smr(smmu, idx);
+}
+
+static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
+{
+	struct arm_smmu_smr *smrs = smmu->smrs;
+	int i, free_idx = -ENOSPC;
+
+	/* Stream indexing is blissfully easy */
 	if (!smrs)
-		return;
+		return id;
 
-	/* Invalidate the SMRs before freeing back to the allocator */
-	for (i = 0; i < cfg->num_streamids; ++i) {
-		u8 idx = smrs[i].idx;
-
-		writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
-		__arm_smmu_free_bitmap(smmu->smr_map, idx);
+	/* Validating SMRs is... less so */
+	for (i = 0; i < smmu->num_mapping_groups; ++i) {
+		if (!smrs[i].valid) {
+			/*
+			 * Note the first free entry we come across, which
+			 * we'll claim in the end if nothing else matches.
+			 */
+			if (free_idx < 0)
+				free_idx = i;
+			continue;
+		}
+		/*
+		 * If the new entry is _entirely_ matched by an existing entry,
+		 * then reuse that, with the guarantee that there also cannot
+		 * be any subsequent conflicting entries. In normal use we'd
+		 * expect simply identical entries for this case, but there's
+		 * no harm in accommodating the generalisation.
+		 */
+		if ((mask & smrs[i].mask) == mask &&
+		    !((id ^ smrs[i].id) & ~smrs[i].mask))
+			return i;
+		/*
+		 * If the new entry has any other overlap with an existing one,
+		 * though, then there always exists at least one stream ID
+		 * which would cause a conflict, and we can't allow that risk.
+		 */
+		if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
+			return -EINVAL;
 	}
 
-	cfg->smrs = NULL;
-	kfree(smrs);
+	return free_idx;
+}
+
+static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
+{
+	if (--smmu->s2crs[idx].count)
+		return false;
+
+	smmu->s2crs[idx] = s2cr_init_val;
+	if (smmu->smrs)
+		smmu->smrs[idx].valid = false;
+
+	return true;
+}
+
+static int arm_smmu_master_alloc_smes(struct device *dev)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+	struct arm_smmu_device *smmu = cfg->smmu;
+	struct arm_smmu_smr *smrs = smmu->smrs;
+	struct iommu_group *group;
+	int i, idx, ret;
+
+	mutex_lock(&smmu->stream_map_mutex);
+	/* Figure out a viable stream map entry allocation */
+	for_each_cfg_sme(fwspec, i, idx) {
+		u16 sid = fwspec->ids[i];
+		u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+
+		if (idx != INVALID_SMENDX) {
+			ret = -EEXIST;
+			goto out_err;
+		}
+
+		ret = arm_smmu_find_sme(smmu, sid, mask);
+		if (ret < 0)
+			goto out_err;
+
+		idx = ret;
+		if (smrs && smmu->s2crs[idx].count == 0) {
+			smrs[idx].id = sid;
+			smrs[idx].mask = mask;
+			smrs[idx].valid = true;
+		}
+		smmu->s2crs[idx].count++;
+		cfg->smendx[i] = (s16)idx;
+	}
+
+	group = iommu_group_get_for_dev(dev);
+	if (!group)
+		group = ERR_PTR(-ENOMEM);
+	if (IS_ERR(group)) {
+		ret = PTR_ERR(group);
+		goto out_err;
+	}
+	iommu_group_put(group);
+
+	/* It worked! Now, poke the actual hardware */
+	for_each_cfg_sme(fwspec, i, idx) {
+		arm_smmu_write_sme(smmu, idx);
+		smmu->s2crs[idx].group = group;
+	}
+
+	mutex_unlock(&smmu->stream_map_mutex);
+	return 0;
+
+out_err:
+	while (i--) {
+		arm_smmu_free_sme(smmu, cfg->smendx[i]);
+		cfg->smendx[i] = INVALID_SMENDX;
+	}
+	mutex_unlock(&smmu->stream_map_mutex);
+	return ret;
+}
+
+static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
+{
+	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+	struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+	int i, idx;
+
+	mutex_lock(&smmu->stream_map_mutex);
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (arm_smmu_free_sme(smmu, idx))
+			arm_smmu_write_sme(smmu, idx);
+		cfg->smendx[i] = INVALID_SMENDX;
+	}
+	mutex_unlock(&smmu->stream_map_mutex);
 }
 
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
-				      struct arm_smmu_master_cfg *cfg)
+				      struct iommu_fwspec *fwspec)
 {
-	int i, ret;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	struct arm_smmu_s2cr *s2cr = smmu->s2crs;
+	enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
+	u8 cbndx = smmu_domain->cfg.cbndx;
+	int i, idx;
 
-	/*
-	 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
-	 * for all devices behind the SMMU. Note that we need to take
-	 * care configuring SMRs for devices both a platform_device and
-	 * and a PCI device (i.e. a PCI host controller)
-	 */
-	if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
-		return 0;
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
+			continue;
 
-	/* Devices in an IOMMU group may already be configured */
-	ret = arm_smmu_master_configure_smrs(smmu, cfg);
-	if (ret)
-		return ret == -EEXIST ? 0 : ret;
-
-	for (i = 0; i < cfg->num_streamids; ++i) {
-		u32 idx, s2cr;
-
-		idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
-		s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
-		       (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
-		writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+		s2cr[idx].type = type;
+		s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
+		s2cr[idx].cbndx = cbndx;
+		arm_smmu_write_s2cr(smmu, idx);
 	}
-
 	return 0;
 }
 
-static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
-					  struct arm_smmu_master_cfg *cfg)
-{
-	int i;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
-	/* An IOMMU group is torn down by the first device to be removed */
-	if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
-		return;
-
-	/*
-	 * We *must* clear the S2CR first, because freeing the SMR means
-	 * that it can be re-allocated immediately.
-	 */
-	for (i = 0; i < cfg->num_streamids; ++i) {
-		u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
-		u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
-
-		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
-	}
-
-	arm_smmu_master_free_smrs(smmu, cfg);
-}
-
-static void arm_smmu_detach_dev(struct device *dev,
-				struct arm_smmu_master_cfg *cfg)
-{
-	struct iommu_domain *domain = dev->archdata.iommu;
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	dev->archdata.iommu = NULL;
-	arm_smmu_domain_remove_master(smmu_domain, cfg);
-}
-
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
 	int ret;
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
 	struct arm_smmu_device *smmu;
-	struct arm_smmu_master_cfg *cfg;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
-	smmu = find_smmu_for_device(dev);
-	if (!smmu) {
+	if (!fwspec || fwspec->ops != &arm_smmu_ops) {
 		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
 		return -ENXIO;
 	}
 
+	smmu = fwspec_smmu(fwspec);
 	/* Ensure that the domain is finalised */
 	ret = arm_smmu_init_domain_context(domain, smmu);
 	if (ret < 0)
@@ -1228,18 +1246,7 @@
 	}
 
 	/* Looks ok, so add the device to the domain */
-	cfg = find_smmu_master_cfg(dev);
-	if (!cfg)
-		return -ENODEV;
-
-	/* Detach the dev from its current domain */
-	if (dev->archdata.iommu)
-		arm_smmu_detach_dev(dev, cfg);
-
-	ret = arm_smmu_domain_add_master(smmu_domain, cfg);
-	if (!ret)
-		dev->archdata.iommu = domain;
-	return ret;
+	return arm_smmu_domain_add_master(smmu_domain, fwspec);
 }
 
 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
@@ -1358,111 +1365,114 @@
 	}
 }
 
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
+static int arm_smmu_match_node(struct device *dev, void *data)
 {
-	*((u16 *)data) = alias;
-	return 0; /* Continue walking */
+	return dev->of_node == data;
 }
 
-static void __arm_smmu_release_pci_iommudata(void *data)
+static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
 {
-	kfree(data);
-}
-
-static int arm_smmu_init_pci_device(struct pci_dev *pdev,
-				    struct iommu_group *group)
-{
-	struct arm_smmu_master_cfg *cfg;
-	u16 sid;
-	int i;
-
-	cfg = iommu_group_get_iommudata(group);
-	if (!cfg) {
-		cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-		if (!cfg)
-			return -ENOMEM;
-
-		iommu_group_set_iommudata(group, cfg,
-					  __arm_smmu_release_pci_iommudata);
-	}
-
-	if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
-		return -ENOSPC;
-
-	/*
-	 * Assume Stream ID == Requester ID for now.
-	 * We need a way to describe the ID mappings in FDT.
-	 */
-	pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
-	for (i = 0; i < cfg->num_streamids; ++i)
-		if (cfg->streamids[i] == sid)
-			break;
-
-	/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
-	if (i == cfg->num_streamids)
-		cfg->streamids[cfg->num_streamids++] = sid;
-
-	return 0;
-}
-
-static int arm_smmu_init_platform_device(struct device *dev,
-					 struct iommu_group *group)
-{
-	struct arm_smmu_device *smmu = find_smmu_for_device(dev);
-	struct arm_smmu_master *master;
-
-	if (!smmu)
-		return -ENODEV;
-
-	master = find_smmu_master(smmu, dev->of_node);
-	if (!master)
-		return -ENODEV;
-
-	iommu_group_set_iommudata(group, &master->cfg, NULL);
-
-	return 0;
+	struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
+						np, arm_smmu_match_node);
+	put_device(dev);
+	return dev ? dev_get_drvdata(dev) : NULL;
 }
 
 static int arm_smmu_add_device(struct device *dev)
 {
-	struct iommu_group *group;
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_master_cfg *cfg;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	int i, ret;
 
-	group = iommu_group_get_for_dev(dev);
-	if (IS_ERR(group))
-		return PTR_ERR(group);
+	if (using_legacy_binding) {
+		ret = arm_smmu_register_legacy_master(dev, &smmu);
+		fwspec = dev->iommu_fwspec;
+		if (ret)
+			goto out_free;
+	} else if (fwspec) {
+		smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+	} else {
+		return -ENODEV;
+	}
 
-	iommu_group_put(group);
+	ret = -EINVAL;
+	for (i = 0; i < fwspec->num_ids; i++) {
+		u16 sid = fwspec->ids[i];
+		u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+
+		if (sid & ~smmu->streamid_mask) {
+			dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
+				sid, smmu->streamid_mask);
+			goto out_free;
+		}
+		if (mask & ~smmu->smr_mask_mask) {
+			dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
+				sid, smmu->smr_mask_mask);
+			goto out_free;
+		}
+	}
+
+	ret = -ENOMEM;
+	cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
+		      GFP_KERNEL);
+	if (!cfg)
+		goto out_free;
+
+	cfg->smmu = smmu;
+	fwspec->iommu_priv = cfg;
+	while (i--)
+		cfg->smendx[i] = INVALID_SMENDX;
+
+	ret = arm_smmu_master_alloc_smes(dev);
+	if (ret)
+		goto out_free;
+
 	return 0;
+
+out_free:
+	if (fwspec)
+		kfree(fwspec->iommu_priv);
+	iommu_fwspec_free(dev);
+	return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
 {
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+	if (!fwspec || fwspec->ops != &arm_smmu_ops)
+		return;
+
+	arm_smmu_master_free_smes(fwspec);
 	iommu_group_remove_device(dev);
+	kfree(fwspec->iommu_priv);
+	iommu_fwspec_free(dev);
 }
 
 static struct iommu_group *arm_smmu_device_group(struct device *dev)
 {
-	struct iommu_group *group;
-	int ret;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+	struct iommu_group *group = NULL;
+	int i, idx;
+
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (group && smmu->s2crs[idx].group &&
+		    group != smmu->s2crs[idx].group)
+			return ERR_PTR(-EINVAL);
+
+		group = smmu->s2crs[idx].group;
+	}
+
+	if (group)
+		return group;
 
 	if (dev_is_pci(dev))
 		group = pci_device_group(dev);
 	else
 		group = generic_device_group(dev);
 
-	if (IS_ERR(group))
-		return group;
-
-	if (dev_is_pci(dev))
-		ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
-	else
-		ret = arm_smmu_init_platform_device(dev, group);
-
-	if (ret) {
-		iommu_group_put(group);
-		group = ERR_PTR(ret);
-	}
-
 	return group;
 }
 
@@ -1510,6 +1520,19 @@
 	return ret;
 }
 
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+	u32 fwid = 0;
+
+	if (args->args_count > 0)
+		fwid |= (u16)args->args[0];
+
+	if (args->args_count > 1)
+		fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+
+	return iommu_fwspec_add_ids(dev, &fwid, 1);
+}
+
 static struct iommu_ops arm_smmu_ops = {
 	.capable		= arm_smmu_capable,
 	.domain_alloc		= arm_smmu_domain_alloc,
@@ -1524,6 +1547,7 @@
 	.device_group		= arm_smmu_device_group,
 	.domain_get_attr	= arm_smmu_domain_get_attr,
 	.domain_set_attr	= arm_smmu_domain_set_attr,
+	.of_xlate		= arm_smmu_of_xlate,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 };
 
@@ -1531,19 +1555,19 @@
 {
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	void __iomem *cb_base;
-	int i = 0;
+	int i;
 	u32 reg, major;
 
 	/* clear global FSR */
 	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
 	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
 
-	/* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
-	reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
-	for (i = 0; i < smmu->num_mapping_groups; ++i) {
-		writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
-		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
-	}
+	/*
+	 * Reset stream mapping groups: Initial values mark all SMRn as
+	 * invalid and all S2CRn as bypass unless overridden.
+	 */
+	for (i = 0; i < smmu->num_mapping_groups; ++i)
+		arm_smmu_write_sme(smmu, i);
 
 	/*
 	 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
@@ -1632,6 +1656,7 @@
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	u32 id;
 	bool cttw_dt, cttw_reg;
+	int i;
 
 	dev_notice(smmu->dev, "probing hardware configuration...\n");
 	dev_notice(smmu->dev, "SMMUv%d with:\n",
@@ -1690,39 +1715,55 @@
 		dev_notice(smmu->dev,
 			   "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
+	/* Max. number of entries we have for stream matching/indexing */
+	size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+	smmu->streamid_mask = size - 1;
 	if (id & ID0_SMS) {
-		u32 smr, sid, mask;
+		u32 smr;
 
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-		smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
-					   ID0_NUMSMRG_MASK;
-		if (smmu->num_mapping_groups == 0) {
+		size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
+		if (size == 0) {
 			dev_err(smmu->dev,
 				"stream-matching supported, but no SMRs present!\n");
 			return -ENODEV;
 		}
 
-		smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
-		smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+		/*
+		 * SMR.ID bits may not be preserved if the corresponding MASK
+		 * bits are set, so check each one separately. We can reject
+		 * masters later if they try to claim IDs outside these masks.
+		 */
+		smr = smmu->streamid_mask << SMR_ID_SHIFT;
 		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
 		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		smmu->streamid_mask = smr >> SMR_ID_SHIFT;
 
-		mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
-		sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
-		if ((mask & sid) != sid) {
-			dev_err(smmu->dev,
-				"SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
-				mask, sid);
-			return -ENODEV;
-		}
+		smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+
+		/* Zero-initialised to mark as invalid */
+		smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
+					  GFP_KERNEL);
+		if (!smmu->smrs)
+			return -ENOMEM;
 
 		dev_notice(smmu->dev,
-			   "\tstream matching with %u register groups, mask 0x%x",
-			   smmu->num_mapping_groups, mask);
-	} else {
-		smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
-					   ID0_NUMSIDB_MASK;
+			   "\tstream matching with %lu register groups, mask 0x%x",
+			   size, smmu->smr_mask_mask);
 	}
+	/* s2cr->type == 0 means translation, so initialise explicitly */
+	smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
+					 GFP_KERNEL);
+	if (!smmu->s2crs)
+		return -ENOMEM;
+	for (i = 0; i < size; i++)
+		smmu->s2crs[i] = s2cr_init_val;
+
+	smmu->num_mapping_groups = size;
+	mutex_init(&smmu->stream_map_mutex);
 
 	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
 		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
@@ -1855,15 +1896,24 @@
 
 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *of_id;
 	const struct arm_smmu_match_data *data;
 	struct resource *res;
 	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
-	struct rb_node *node;
-	struct of_phandle_iterator it;
-	struct arm_smmu_phandle_args *masterspec;
 	int num_irqs, i, err;
+	bool legacy_binding;
+
+	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
+	if (legacy_binding && !using_generic_binding) {
+		if (!using_legacy_binding)
+			pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+		using_legacy_binding = true;
+	} else if (!legacy_binding && !using_legacy_binding) {
+		using_generic_binding = true;
+	} else {
+		dev_err(dev, "not probing due to mismatched DT properties\n");
+		return -ENODEV;
+	}
 
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 	if (!smmu) {
@@ -1872,8 +1922,7 @@
 	}
 	smmu->dev = dev;
 
-	of_id = of_match_node(arm_smmu_of_match, dev->of_node);
-	data = of_id->data;
+	data = of_device_get_match_data(dev);
 	smmu->version = data->version;
 	smmu->model = data->model;
 
@@ -1923,37 +1972,6 @@
 	if (err)
 		return err;
 
-	i = 0;
-	smmu->masters = RB_ROOT;
-
-	err = -ENOMEM;
-	/* No need to zero the memory for masterspec */
-	masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
-	if (!masterspec)
-		goto out_put_masters;
-
-	of_for_each_phandle(&it, err, dev->of_node,
-			    "mmu-masters", "#stream-id-cells", 0) {
-		int count = of_phandle_iterator_args(&it, masterspec->args,
-						     MAX_MASTER_STREAMIDS);
-		masterspec->np		= of_node_get(it.node);
-		masterspec->args_count	= count;
-
-		err = register_smmu_master(smmu, dev, masterspec);
-		if (err) {
-			dev_err(dev, "failed to add master %s\n",
-				masterspec->np->name);
-			kfree(masterspec);
-			goto out_put_masters;
-		}
-
-		i++;
-	}
-
-	dev_notice(dev, "registered %d master devices\n", i);
-
-	kfree(masterspec);
-
 	parse_driver_options(smmu);
 
 	if (smmu->version == ARM_SMMU_V2 &&
@@ -1961,8 +1979,7 @@
 		dev_err(dev,
 			"found only %d context interrupt(s) but %d required\n",
 			smmu->num_context_irqs, smmu->num_context_banks);
-		err = -ENODEV;
-		goto out_put_masters;
+		return -ENODEV;
 	}
 
 	for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -1974,59 +1991,39 @@
 		if (err) {
 			dev_err(dev, "failed to request global IRQ %d (%u)\n",
 				i, smmu->irqs[i]);
-			goto out_put_masters;
+			return err;
 		}
 	}
 
-	INIT_LIST_HEAD(&smmu->list);
-	spin_lock(&arm_smmu_devices_lock);
-	list_add(&smmu->list, &arm_smmu_devices);
-	spin_unlock(&arm_smmu_devices_lock);
-
+	of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+	platform_set_drvdata(pdev, smmu);
 	arm_smmu_device_reset(smmu);
-	return 0;
 
-out_put_masters:
-	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
-		struct arm_smmu_master *master
-			= container_of(node, struct arm_smmu_master, node);
-		of_node_put(master->of_node);
+	/* Oh, for a proper bus abstraction */
+	if (!iommu_present(&platform_bus_type))
+		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
+	if (!iommu_present(&amba_bustype))
+		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+#ifdef CONFIG_PCI
+	if (!iommu_present(&pci_bus_type)) {
+		pci_request_acs();
+		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
 	}
-
-	return err;
+#endif
+	return 0;
 }
 
 static int arm_smmu_device_remove(struct platform_device *pdev)
 {
-	int i;
-	struct device *dev = &pdev->dev;
-	struct arm_smmu_device *curr, *smmu = NULL;
-	struct rb_node *node;
-
-	spin_lock(&arm_smmu_devices_lock);
-	list_for_each_entry(curr, &arm_smmu_devices, list) {
-		if (curr->dev == dev) {
-			smmu = curr;
-			list_del(&smmu->list);
-			break;
-		}
-	}
-	spin_unlock(&arm_smmu_devices_lock);
+	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
 	if (!smmu)
 		return -ENODEV;
 
-	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
-		struct arm_smmu_master *master
-			= container_of(node, struct arm_smmu_master, node);
-		of_node_put(master->of_node);
-	}
-
 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
-		dev_err(dev, "removing device with active domains!\n");
-
-	for (i = 0; i < smmu->num_global_irqs; ++i)
-		devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
+		dev_err(&pdev->dev, "removing device with active domains!\n");
 
 	/* Turn the thing off */
 	writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -2044,41 +2041,14 @@
 
 static int __init arm_smmu_init(void)
 {
-	struct device_node *np;
-	int ret;
+	static bool registered;
+	int ret = 0;
 
-	/*
-	 * Play nice with systems that don't have an ARM SMMU by checking that
-	 * an ARM SMMU exists in the system before proceeding with the driver
-	 * and IOMMU bus operation registration.
-	 */
-	np = of_find_matching_node(NULL, arm_smmu_of_match);
-	if (!np)
-		return 0;
-
-	of_node_put(np);
-
-	ret = platform_driver_register(&arm_smmu_driver);
-	if (ret)
-		return ret;
-
-	/* Oh, for a proper bus abstraction */
-	if (!iommu_present(&platform_bus_type))
-		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-
-#ifdef CONFIG_ARM_AMBA
-	if (!iommu_present(&amba_bustype))
-		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-#endif
-
-#ifdef CONFIG_PCI
-	if (!iommu_present(&pci_bus_type)) {
-		pci_request_acs();
-		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+	if (!registered) {
+		ret = platform_driver_register(&arm_smmu_driver);
+		registered = !ret;
 	}
-#endif
-
-	return 0;
+	return ret;
 }
 
 static void __exit arm_smmu_exit(void)
@@ -2089,6 +2059,25 @@
 subsys_initcall(arm_smmu_init);
 module_exit(arm_smmu_exit);
 
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+	int ret = arm_smmu_init();
+
+	if (ret)
+		return ret;
+
+	if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
+		return -ENODEV;
+
+	return 0;
+}
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 00c8a08..c5ab866 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -25,10 +25,29 @@
 #include <linux/huge_mm.h>
 #include <linux/iommu.h>
 #include <linux/iova.h>
+#include <linux/irq.h>
 #include <linux/mm.h>
+#include <linux/pci.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
 
+struct iommu_dma_msi_page {
+	struct list_head	list;
+	dma_addr_t		iova;
+	phys_addr_t		phys;
+};
+
+struct iommu_dma_cookie {
+	struct iova_domain	iovad;
+	struct list_head	msi_page_list;
+	spinlock_t		msi_lock;
+};
+
+static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
+{
+	return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+}
+
 int iommu_dma_init(void)
 {
 	return iova_cache_get();
@@ -43,15 +62,19 @@
  */
 int iommu_get_dma_cookie(struct iommu_domain *domain)
 {
-	struct iova_domain *iovad;
+	struct iommu_dma_cookie *cookie;
 
 	if (domain->iova_cookie)
 		return -EEXIST;
 
-	iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
-	domain->iova_cookie = iovad;
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie)
+		return -ENOMEM;
 
-	return iovad ? 0 : -ENOMEM;
+	spin_lock_init(&cookie->msi_lock);
+	INIT_LIST_HEAD(&cookie->msi_page_list);
+	domain->iova_cookie = cookie;
+	return 0;
 }
 EXPORT_SYMBOL(iommu_get_dma_cookie);
 
@@ -63,32 +86,58 @@
  */
 void iommu_put_dma_cookie(struct iommu_domain *domain)
 {
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iommu_dma_msi_page *msi, *tmp;
 
-	if (!iovad)
+	if (!cookie)
 		return;
 
-	if (iovad->granule)
-		put_iova_domain(iovad);
-	kfree(iovad);
+	if (cookie->iovad.granule)
+		put_iova_domain(&cookie->iovad);
+
+	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
+		list_del(&msi->list);
+		kfree(msi);
+	}
+	kfree(cookie);
 	domain->iova_cookie = NULL;
 }
 EXPORT_SYMBOL(iommu_put_dma_cookie);
 
+static void iova_reserve_pci_windows(struct pci_dev *dev,
+		struct iova_domain *iovad)
+{
+	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+	struct resource_entry *window;
+	unsigned long lo, hi;
+
+	resource_list_for_each_entry(window, &bridge->windows) {
+		if (resource_type(window->res) != IORESOURCE_MEM &&
+		    resource_type(window->res) != IORESOURCE_IO)
+			continue;
+
+		lo = iova_pfn(iovad, window->res->start - window->offset);
+		hi = iova_pfn(iovad, window->res->end - window->offset);
+		reserve_iova(iovad, lo, hi);
+	}
+}
+
 /**
  * iommu_dma_init_domain - Initialise a DMA mapping domain
  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
  * @base: IOVA at which the mappable address space starts
  * @size: Size of IOVA space
+ * @dev: Device the domain is being initialised for
  *
  * @base and @size should be exact multiples of IOMMU page granularity to
  * avoid rounding surprises. If necessary, we reserve the page at address 0
  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
  * any change which could make prior IOVAs invalid will fail.
  */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+		u64 size, struct device *dev)
 {
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	unsigned long order, base_pfn, end_pfn;
 
 	if (!iovad)
@@ -124,6 +173,8 @@
 		iovad->dma_32bit_pfn = end_pfn;
 	} else {
 		init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+		if (dev && dev_is_pci(dev))
+			iova_reserve_pci_windows(to_pci_dev(dev), iovad);
 	}
 	return 0;
 }
@@ -155,7 +206,7 @@
 static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
 		dma_addr_t dma_limit)
 {
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	unsigned long shift = iova_shift(iovad);
 	unsigned long length = iova_align(iovad, size) >> shift;
 
@@ -171,7 +222,7 @@
 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
 {
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	unsigned long shift = iova_shift(iovad);
 	unsigned long pfn = dma_addr >> shift;
 	struct iova *iova = find_iova(iovad, pfn);
@@ -294,7 +345,7 @@
 		void (*flush_page)(struct device *, const void *, phys_addr_t))
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	struct iova *iova;
 	struct page **pages;
 	struct sg_table sgt;
@@ -386,7 +437,7 @@
 {
 	dma_addr_t dma_addr;
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	phys_addr_t phys = page_to_phys(page) + offset;
 	size_t iova_off = iova_offset(iovad, phys);
 	size_t len = iova_align(iovad, size + iova_off);
@@ -495,7 +546,7 @@
 		int nents, int prot)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-	struct iova_domain *iovad = domain->iova_cookie;
+	struct iova_domain *iovad = cookie_iovad(domain);
 	struct iova *iova;
 	struct scatterlist *s, *prev = NULL;
 	dma_addr_t dma_addr;
@@ -587,3 +638,81 @@
 {
 	return dma_addr == DMA_ERROR_CODE;
 }
+
+static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
+		phys_addr_t msi_addr, struct iommu_domain *domain)
+{
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iommu_dma_msi_page *msi_page;
+	struct iova_domain *iovad = &cookie->iovad;
+	struct iova *iova;
+	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+	msi_addr &= ~(phys_addr_t)iova_mask(iovad);
+	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
+		if (msi_page->phys == msi_addr)
+			return msi_page;
+
+	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+	if (!msi_page)
+		return NULL;
+
+	iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
+	if (!iova)
+		goto out_free_page;
+
+	msi_page->phys = msi_addr;
+	msi_page->iova = iova_dma_addr(iovad, iova);
+	if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
+		goto out_free_iova;
+
+	INIT_LIST_HEAD(&msi_page->list);
+	list_add(&msi_page->list, &cookie->msi_page_list);
+	return msi_page;
+
+out_free_iova:
+	__free_iova(iovad, iova);
+out_free_page:
+	kfree(msi_page);
+	return NULL;
+}
+
+void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+{
+	struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
+	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+	struct iommu_dma_cookie *cookie;
+	struct iommu_dma_msi_page *msi_page;
+	phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
+	unsigned long flags;
+
+	if (!domain || !domain->iova_cookie)
+		return;
+
+	cookie = domain->iova_cookie;
+
+	/*
+	 * We disable IRQs to rule out a possible inversion against
+	 * irq_desc_lock if, say, someone tries to retarget the affinity
+	 * of an MSI from within an IPI handler.
+	 */
+	spin_lock_irqsave(&cookie->msi_lock, flags);
+	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+	spin_unlock_irqrestore(&cookie->msi_lock, flags);
+
+	if (WARN_ON(!msi_page)) {
+		/*
+		 * We're called from a void callback, so the best we can do is
+		 * 'fail' by filling the message with obviously bogus values.
+		 * Since we got this far due to an IOMMU being present, it's
+		 * not like the existing address would have worked anyway...
+		 */
+		msg->address_hi = ~0U;
+		msg->address_lo = ~0U;
+		msg->data = ~0U;
+	} else {
+		msg->address_hi = upper_32_bits(msi_page->iova);
+		msg->address_lo &= iova_mask(&cookie->iovad);
+		msg->address_lo += lower_32_bits(msi_page->iova);
+	}
+}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 33dcc29..30808e9 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1345,8 +1345,8 @@
 		exynos_iommu_init();
 
 	pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
-	if (IS_ERR(pdev))
-		return PTR_ERR(pdev);
+	if (!pdev)
+		return -ENODEV;
 
 	/*
 	 * use the first registered sysmmu device for performing
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ebb5bf3..a4407ea 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2452,20 +2452,15 @@
 	return 0;
 }
 
-/* domain is initialized */
-static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
+static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
 {
 	struct device_domain_info *info = NULL;
-	struct dmar_domain *domain, *tmp;
+	struct dmar_domain *domain = NULL;
 	struct intel_iommu *iommu;
 	u16 req_id, dma_alias;
 	unsigned long flags;
 	u8 bus, devfn;
 
-	domain = find_domain(dev);
-	if (domain)
-		return domain;
-
 	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return NULL;
@@ -2487,9 +2482,9 @@
 		}
 		spin_unlock_irqrestore(&device_domain_lock, flags);
 
-		/* DMA alias already has a domain, uses it */
+		/* DMA alias already has a domain, use it */
 		if (info)
-			goto found_domain;
+			goto out;
 	}
 
 	/* Allocate and initialize new domain for the device */
@@ -2501,28 +2496,67 @@
 		return NULL;
 	}
 
-	/* register PCI DMA alias device */
-	if (dev_is_pci(dev) && req_id != dma_alias) {
-		tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-					       dma_alias & 0xff, NULL, domain);
+out:
 
-		if (!tmp || tmp != domain) {
-			domain_exit(domain);
-			domain = tmp;
+	return domain;
+}
+
+static struct dmar_domain *set_domain_for_dev(struct device *dev,
+					      struct dmar_domain *domain)
+{
+	struct intel_iommu *iommu;
+	struct dmar_domain *tmp;
+	u16 req_id, dma_alias;
+	u8 bus, devfn;
+
+	iommu = device_to_iommu(dev, &bus, &devfn);
+	if (!iommu)
+		return NULL;
+
+	req_id = ((u16)bus << 8) | devfn;
+
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
+
+		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
+
+		/* register PCI DMA alias device */
+		if (req_id != dma_alias) {
+			tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+					dma_alias & 0xff, NULL, domain);
+
+			if (!tmp || tmp != domain)
+				return tmp;
 		}
-
-		if (!domain)
-			return NULL;
 	}
 
-found_domain:
 	tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
+	if (!tmp || tmp != domain)
+		return tmp;
 
-	if (!tmp || tmp != domain) {
+	return domain;
+}
+
+static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
+{
+	struct dmar_domain *domain, *tmp;
+
+	domain = find_domain(dev);
+	if (domain)
+		goto out;
+
+	domain = find_or_alloc_domain(dev, gaw);
+	if (!domain)
+		goto out;
+
+	tmp = set_domain_for_dev(dev, domain);
+	if (!tmp || domain != tmp) {
 		domain_exit(domain);
 		domain = tmp;
 	}
 
+out:
+
 	return domain;
 }
 
@@ -3394,17 +3428,18 @@
 
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
+	struct dmar_domain *domain, *tmp;
 	struct dmar_rmrr_unit *rmrr;
-	struct dmar_domain *domain;
 	struct device *i_dev;
 	int i, ret;
 
-	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-	if (!domain) {
-		pr_err("Allocating domain for %s failed\n",
-		       dev_name(dev));
-		return NULL;
-	}
+	domain = find_domain(dev);
+	if (domain)
+		goto out;
+
+	domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+	if (!domain)
+		goto out;
 
 	/* We have a new domain - setup possible RMRRs for the device */
 	rcu_read_lock();
@@ -3423,6 +3458,18 @@
 	}
 	rcu_read_unlock();
 
+	tmp = set_domain_for_dev(dev, domain);
+	if (!tmp || domain != tmp) {
+		domain_exit(domain);
+		domain = tmp;
+	}
+
+out:
+
+	if (!domain)
+		pr_err("Allocating domain for %s failed\n", dev_name(dev));
+
+
 	return domain;
 }
 
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index def8ca1..f50e51c 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -633,6 +633,10 @@
 {
 	struct arm_v7s_io_pgtable *data;
 
+#ifdef PHYS_OFFSET
+	if (upper_32_bits(PHYS_OFFSET))
+		return NULL;
+#endif
 	if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
 		return NULL;
 
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b06d935..9a2f196 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -31,6 +31,7 @@
 #include <linux/err.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
+#include <linux/property.h>
 #include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
@@ -1613,3 +1614,60 @@
 
 	return ret;
 }
+
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
+		      const struct iommu_ops *ops)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+	if (fwspec)
+		return ops == fwspec->ops ? 0 : -EINVAL;
+
+	fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+	if (!fwspec)
+		return -ENOMEM;
+
+	of_node_get(to_of_node(iommu_fwnode));
+	fwspec->iommu_fwnode = iommu_fwnode;
+	fwspec->ops = ops;
+	dev->iommu_fwspec = fwspec;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_init);
+
+void iommu_fwspec_free(struct device *dev)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+	if (fwspec) {
+		fwnode_handle_put(fwspec->iommu_fwnode);
+		kfree(fwspec);
+		dev->iommu_fwspec = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_free);
+
+int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	size_t size;
+	int i;
+
+	if (!fwspec)
+		return -EINVAL;
+
+	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
+	if (size > sizeof(*fwspec)) {
+		fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
+		if (!fwspec)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < num_ids; i++)
+		fwspec->ids[fwspec->num_ids + i] = ids[i];
+
+	fwspec->num_ids += num_ids;
+	dev->iommu_fwspec = fwspec;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2fdbac6..ace331d 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -636,7 +636,7 @@
 	spin_unlock(&ipmmu_devices_lock);
 
 	if (ret < 0)
-		return -ENODEV;
+		goto error;
 
 	for (i = 0; i < num_utlbs; ++i) {
 		if (utlbs[i] >= mmu->num_utlbs) {
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 57f23ea..5b82862 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -22,6 +22,7 @@
 #include <linux/limits.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
+#include <linux/of_pci.h>
 #include <linux/slab.h>
 
 static const struct of_device_id __iommu_of_table_sentinel
@@ -134,6 +135,47 @@
 	return ops;
 }
 
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+	struct of_phandle_args *iommu_spec = data;
+
+	iommu_spec->args[0] = alias;
+	return iommu_spec->np == pdev->bus->dev.of_node;
+}
+
+static const struct iommu_ops
+*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np)
+{
+	const struct iommu_ops *ops;
+	struct of_phandle_args iommu_spec;
+
+	/*
+	 * Start by tracing the RID alias down the PCI topology as
+	 * far as the host bridge whose OF node we have...
+	 * (we're not even attempting to handle multi-alias devices yet)
+	 */
+	iommu_spec.args_count = 1;
+	iommu_spec.np = bridge_np;
+	pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec);
+	/*
+	 * ...then find out what that becomes once it escapes the PCI
+	 * bus into the system beyond, and which IOMMU it ends up at.
+	 */
+	iommu_spec.np = NULL;
+	if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
+			   "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
+		return NULL;
+
+	ops = of_iommu_get_ops(iommu_spec.np);
+	if (!ops || !ops->of_xlate ||
+	    iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
+	    ops->of_xlate(&pdev->dev, &iommu_spec))
+		ops = NULL;
+
+	of_node_put(iommu_spec.np);
+	return ops;
+}
+
 const struct iommu_ops *of_iommu_configure(struct device *dev,
 					   struct device_node *master_np)
 {
@@ -142,12 +184,8 @@
 	const struct iommu_ops *ops = NULL;
 	int idx = 0;
 
-	/*
-	 * We can't do much for PCI devices without knowing how
-	 * device IDs are wired up from the PCI bus to the IOMMU.
-	 */
 	if (dev_is_pci(dev))
-		return NULL;
+		return of_pci_iommu_configure(to_pci_dev(dev), master_np);
 
 	/*
 	 * We don't currently walk up the tree looking for a parent IOMMU.
@@ -160,7 +198,9 @@
 		np = iommu_spec.np;
 		ops = of_iommu_get_ops(np);
 
-		if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+		if (!ops || !ops->of_xlate ||
+		    iommu_fwspec_init(dev, &np->fwnode, ops) ||
+		    ops->of_xlate(dev, &iommu_spec))
 			goto err_put_node;
 
 		of_node_put(np);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index b844c89..daa4ae8 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -52,7 +52,6 @@
 
 #include <linux/bitops.h>
 #include <linux/cpumask.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 0fea985..353c549 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -12,7 +12,6 @@
 #define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
 
 #include <linux/bitops.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 0ec9263..64c2692 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 1d4a5b4..bddf169 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
 #include <linux/of.h>
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4..ebc2b0b 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@
 	nps_ack_gic();
 }
 
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
 {
 	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
 
@@ -103,7 +103,7 @@
 	.name		= "NPS400 IC",
 	.irq_mask	= nps400_irq_mask,
 	.irq_unmask	= nps400_irq_unmask,
-	.irq_eoi	= nps400_irq_eoi,
+	.irq_ack	= nps400_irq_ack,
 };
 
 static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 35eb7ac..863e073 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -16,6 +16,7 @@
 #define pr_fmt(fmt) "GICv2m: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-iommu.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
@@ -108,6 +109,8 @@
 
 	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
 		msg->data -= v2m->spi_offset;
+
+	iommu_dma_map_msi_msg(data->irq, msg);
 }
 
 static struct irq_chip gicv2m_irq_chip = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 35c851c..003495d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -19,6 +19,7 @@
 #include <linux/bitmap.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
+#include <linux/dma-iommu.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/acpi_iort.h>
@@ -659,6 +660,8 @@
 	msg->address_lo		= addr & ((1UL << 32) - 1);
 	msg->address_hi		= addr >> 32;
 	msg->data		= its_get_event_id(d);
+
+	iommu_dma_map_msi_msg(d->irq, msg);
 }
 
 static struct irq_chip its_irq_chip = {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8..19d642e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@
 			return;	/* No PM support in this redistributor */
 	}
 
-	while (count--) {
+	while (--count) {
 		val = readl_relaxed(rbase + GICR_WAKER);
 		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
 			break;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 6b304eb..1aec12c 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -38,6 +38,7 @@
 static void enable_8259A_irq(struct irq_data *d);
 static void mask_and_ack_8259A(struct irq_data *d);
 static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
 
 static struct irq_chip i8259A_chip = {
 	.name			= "XT-PIC",
@@ -51,6 +52,11 @@
  * 8259A PIC functions to handle ISA devices:
  */
 
+void i8259_set_poll(int (*poll)(void))
+{
+	i8259_poll = poll;
+}
+
 /*
  * This contains the irq mask for both 8259A irq controllers,
  */
@@ -89,24 +95,6 @@
 	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
-int i8259A_irq_pending(unsigned int irq)
-{
-	unsigned int mask;
-	unsigned long flags;
-	int ret;
-
-	irq -= I8259A_IRQ_BASE;
-	mask = 1 << irq;
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-	if (irq < 8)
-		ret = inb(PIC_MASTER_CMD) & mask;
-	else
-		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
-	return ret;
-}
-
 void make_8259A_irq(unsigned int irq)
 {
 	disable_irq_nosync(irq);
@@ -355,7 +343,7 @@
 static void i8259_irq_dispatch(struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	int hwirq = i8259_irq();
+	int hwirq = i8259_poll();
 	unsigned int irq;
 
 	if (hwirq < 0)
@@ -370,13 +358,15 @@
 	struct irq_domain *domain;
 	unsigned int parent_irq;
 
+	domain = __init_i8259_irqs(node);
+
 	parent_irq = irq_of_parse_and_map(node, 0);
 	if (!parent_irq) {
 		pr_err("Failed to map i8259 parent IRQ\n");
+		irq_domain_remove(domain);
 		return -ENODEV;
 	}
 
-	domain = __init_i8259_irqs(node);
 	irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
 					 domain);
 	return 0;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01de..033bccb 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
 
 static struct irq_chip jcore_aic;
 
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+	if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+		handle_percpu_irq(desc);
+	else
+		handle_simple_irq(desc);
+}
+
 static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
 				   irq_hw_number_t hwirq)
 {
 	struct irq_chip *aic = d->host_data;
 
-	irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+	irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 8c38b3d..0cdd923 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -436,7 +436,6 @@
 
 /**
  * meta_intc_irq_demux() - external irq de-multiplexer
- * @irq:	the virtual interrupt number
  * @desc:	the interrupt description structure for this irq
  *
  * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index f811a7d..74192f6 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -498,7 +498,6 @@
  * vic_init_cascaded() - initialise a cascaded vectored interrupt controller
  * @base: iomem base address
  * @parent_irq: the parent IRQ we're cascaded off
- * @irq_start: starting interrupt number, must be muliple of 32
  * @vic_sources: bitmask of interrupt sources to allow
  * @resume_sources: bitmask of interrupt sources to allow for resume
  *
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 5eacce1..dc75bea 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -581,7 +581,7 @@
 	if (!md->init_tio_pdu)
 		memset(&tio->info, 0, sizeof(tio->info));
 	if (md->kworker_task)
-		init_kthread_work(&tio->work, map_tio_request);
+		kthread_init_work(&tio->work, map_tio_request);
 }
 
 static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
@@ -831,7 +831,7 @@
 		tio = tio_from_request(rq);
 		/* Establish tio->ti before queuing work (map_tio_request) */
 		tio->ti = ti;
-		queue_kthread_work(&md->kworker, &tio->work);
+		kthread_queue_work(&md->kworker, &tio->work);
 		BUG_ON(!irqs_disabled());
 	}
 }
@@ -853,7 +853,7 @@
 	blk_queue_prep_rq(md->queue, dm_old_prep_fn);
 
 	/* Initialize the request-based DM worker thread */
-	init_kthread_worker(&md->kworker);
+	kthread_init_worker(&md->kworker);
 	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
 				       "kdmwork-%s", dm_device_name(md));
 	if (IS_ERR(md->kworker_task))
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be35258..147af95 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1891,7 +1891,7 @@
 	spin_unlock_irq(q->queue_lock);
 
 	if (dm_request_based(md) && md->kworker_task)
-		flush_kthread_worker(&md->kworker);
+		kthread_flush_worker(&md->kworker);
 
 	/*
 	 * Take suspend_lock so that presuspend and postsuspend methods
@@ -2147,7 +2147,7 @@
 	if (dm_request_based(md)) {
 		dm_stop_queue(md->queue);
 		if (md->kworker_task)
-			flush_kthread_worker(&md->kworker);
+			kthread_flush_worker(&md->kworker);
 	}
 
 	flush_workqueue(md->wq);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 962f2a9a..7b85402 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -180,14 +180,14 @@
 # Common driver options
 source "drivers/media/common/Kconfig"
 
-comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
+comment "Media ancillary drivers (tuners, sensors, i2c, spi, frontends)"
 
 #
-# Ancillary drivers (tuners, i2c, frontends)
+# Ancillary drivers (tuners, i2c, spi, frontends)
 #
 
 config MEDIA_SUBDRV_AUTOSELECT
-	bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
+	bool "Autoselect ancillary drivers (tuners, sensors, i2c, spi, frontends)"
 	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT || MEDIA_SDR_SUPPORT
 	depends on HAS_IOMEM
 	select I2C
@@ -216,6 +216,7 @@
 	default MODULES
 
 source "drivers/media/i2c/Kconfig"
+source "drivers/media/spi/Kconfig"
 source "drivers/media/tuners/Kconfig"
 source "drivers/media/dvb-frontends/Kconfig"
 
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 081a786..0deaa93 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -32,6 +32,6 @@
 # Finally, merge the drivers that require the core
 #
 
-obj-y += common/ platform/ pci/ usb/ mmc/ firewire/
+obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ spi/
 obj-$(CONFIG_VIDEO_DEV) += radio/
 
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 3ec3ceb..1684810 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -504,14 +504,14 @@
 #define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
 
 	static const int bt601[3][3] = {
-		{ COEFF(0.299, 219),  COEFF(0.587, 219),  COEFF(0.114, 219)  },
-		{ COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224)    },
-		{ COEFF(0.5, 224),    COEFF(-0.419, 224), COEFF(-0.081, 224) },
+		{ COEFF(0.299, 219),   COEFF(0.587, 219),   COEFF(0.114, 219)   },
+		{ COEFF(-0.1687, 224), COEFF(-0.3313, 224), COEFF(0.5, 224)     },
+		{ COEFF(0.5, 224),     COEFF(-0.4187, 224), COEFF(-0.0813, 224) },
 	};
 	static const int bt601_full[3][3] = {
-		{ COEFF(0.299, 255),  COEFF(0.587, 255),  COEFF(0.114, 255)  },
-		{ COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255)    },
-		{ COEFF(0.5, 255),    COEFF(-0.419, 255), COEFF(-0.081, 255) },
+		{ COEFF(0.299, 255),   COEFF(0.587, 255),   COEFF(0.114, 255)   },
+		{ COEFF(-0.1687, 255), COEFF(-0.3313, 255), COEFF(0.5, 255)     },
+		{ COEFF(0.5, 255),     COEFF(-0.4187, 255), COEFF(-0.0813, 255) },
 	};
 	static const int rec709[3][3] = {
 		{ COEFF(0.2126, 219),  COEFF(0.7152, 219),  COEFF(0.0722, 219)  },
@@ -558,7 +558,6 @@
 
 	switch (tpg->real_ycbcr_enc) {
 	case V4L2_YCBCR_ENC_601:
-	case V4L2_YCBCR_ENC_SYCC:
 		rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
 		break;
 	case V4L2_YCBCR_ENC_XV601:
@@ -674,7 +673,6 @@
 
 	switch (tpg->real_ycbcr_enc) {
 	case V4L2_YCBCR_ENC_601:
-	case V4L2_YCBCR_ENC_SYCC:
 		ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
 		break;
 	case V4L2_YCBCR_ENC_XV601:
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index 4b4c1da..aeda2b6 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -202,7 +202,7 @@
  *
  * This function callback prototype, provided by the client of the demux API,
  * is called from the demux code. The function is only called when filtering
- * on ae TS feed has been enabled using the start_filtering() function at
+ * on a TS feed has been enabled using the start_filtering\(\) function at
  * the &dmx_demux.
  * Any TS packets that match the filter settings are copied to a circular
  * buffer. The filtered TS packets are delivered to the client using this
@@ -243,8 +243,10 @@
  * will also be sent to the hardware MPEG decoder.
  *
  * Return:
- * 	0, on success;
- * 	-EOVERFLOW, on buffer overflow.
+ *
+ * - 0, on success;
+ *
+ * - -EOVERFLOW, on buffer overflow.
  */
 typedef int (*dmx_ts_cb)(const u8 *buffer1,
 			 size_t buffer1_length,
@@ -293,9 +295,9 @@
 			      size_t buffer2_len,
 			      struct dmx_section_filter *source);
 
-/*--------------------------------------------------------------------------*/
-/* DVB Front-End */
-/*--------------------------------------------------------------------------*/
+/*
+ * DVB Front-End
+ */
 
 /**
  * enum dmx_frontend_source - Used to identify the type of frontend
@@ -349,15 +351,15 @@
 
 /*
  * Demux resource type identifier.
-*/
+ */
 
-/*
- * DMX_FE_ENTRY(): Casts elements in the list of registered
- * front-ends from the generic type struct list_head
- * to the type * struct dmx_frontend
- *.
-*/
-
+/**
+ * DMX_FE_ENTRY - Casts elements in the list of registered
+ *		  front-ends from the generic type struct list_head
+ *		  to the type * struct dmx_frontend
+ *
+ * @list: list of struct dmx_frontend
+ */
 #define DMX_FE_ENTRY(list) \
 	list_entry(list, struct dmx_frontend, connectivity_list)
 
@@ -551,7 +553,6 @@
  *	0 on success;
  *	-EINVAL on bad parameter.
  */
-
 struct dmx_demux {
 	enum dmx_demux_caps capabilities;
 	struct dmx_frontend *frontend;
@@ -581,15 +582,12 @@
 
 	int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
 
-	/* private: Not used upstream and never documented */
-#if 0
-	int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
-	int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
-#endif
+	/* private: */
+
 	/*
-	 * private: Only used at av7110, to read some data from firmware.
-	 *	As this was never documented, we have no clue about what's
-	 *	there, and its usage on other drivers aren't encouraged.
+	 * Only used at av7110, to read some data from firmware.
+	 * As this was never documented, we have no clue about what's
+	 * there, and its usage on other drivers aren't encouraged.
 	 */
 	int (*get_stc)(struct dmx_demux *demux, unsigned int num,
 		       u64 *stc, unsigned int *base);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index be99c8d..01511e5 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1969,17 +1969,9 @@
 		if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
 			return -EINVAL;
 
-		tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL);
-		if (!tvp) {
-			err = -ENOMEM;
-			goto out;
-		}
-
-		if (copy_from_user(tvp, (void __user *)tvps->props,
-				   tvps->num * sizeof(struct dtv_property))) {
-			err = -EFAULT;
-			goto out;
-		}
+		tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+		if (IS_ERR(tvp))
+			return PTR_ERR(tvp);
 
 		for (i = 0; i < tvps->num; i++) {
 			err = dtv_property_process_set(fe, tvp + i, file);
@@ -2002,17 +1994,9 @@
 		if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
 			return -EINVAL;
 
-		tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL);
-		if (!tvp) {
-			err = -ENOMEM;
-			goto out;
-		}
-
-		if (copy_from_user(tvp, (void __user *)tvps->props,
-				   tvps->num * sizeof(struct dtv_property))) {
-			err = -EFAULT;
-			goto out;
-		}
+		tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+		if (IS_ERR(tvp))
+			return PTR_ERR(tvp);
 
 		/*
 		 * Let's use our own copy of property cache, in order to
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index 2f03266..4d11d35 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -25,7 +25,7 @@
 #include <linux/types.h>
 
 /**
- * cintlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ * intlog2 - computes log2 of a value; the result is shifted left by 24 bits
  *
  * @value: The value (must be != 0)
  *
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 8af6423..bbe9487 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -18,10 +18,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #ifndef _DVB_RINGBUFFER_H_
@@ -30,6 +26,18 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
+/**
+ * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework
+ *
+ * @data: Area were the ringbuffer data is written
+ * @size: size of the ringbuffer
+ * @pread: next position to read
+ * @pwrite: next position to write
+ * @error: used by ringbuffer clients to indicate that an error happened.
+ * @queue: Wait queue used by ringbuffer clients to indicate when buffer
+ *         was filled
+ * @lock: Spinlock used to protect the ringbuffer
+ */
 struct dvb_ringbuffer {
 	u8               *data;
 	ssize_t           size;
@@ -43,99 +51,161 @@
 
 #define DVB_RINGBUFFER_PKTHDRSIZE 3
 
-
-/*
- * Notes:
- * ------
- * (1) For performance reasons read and write routines don't check buffer sizes
- *     and/or number of bytes free/available. This has to be done before these
- *     routines are called. For example:
+/**
+ * dvb_ringbuffer_init - initialize ring buffer, lock and queue
  *
- *     *** write @buflen: bytes ***
- *     free = dvb_ringbuffer_free(rbuf);
- *     if (free >= buflen)
- *         count = dvb_ringbuffer_write(rbuf, buffer, buflen);
- *     else
- *         ...
- *
- *     *** read min. 1000, max. @bufsize: bytes ***
- *     avail = dvb_ringbuffer_avail(rbuf);
- *     if (avail >= 1000)
- *         count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
- *     else
- *         ...
- *
- * (2) If there is exactly one reader and one writer, there is no need
- *     to lock read or write operations.
- *     Two or more readers must be locked against each other.
- *     Flushing the buffer counts as a read operation.
- *     Resetting the buffer counts as a read and write operation.
- *     Two or more writers must be locked against each other.
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @data: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
  */
+extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data,
+				size_t len);
 
-/* initialize ring buffer, lock and queue */
-extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len);
-
-/* test whether buffer is empty */
+/**
+ * dvb_ringbuffer_empty - test whether buffer is empty
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
 extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf);
 
-/* return the number of free bytes in the buffer */
+/**
+ * dvb_ringbuffer_free - returns the number of free bytes in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of free bytes in the buffer
+ */
 extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf);
 
-/* return the number of bytes waiting in the buffer */
+/**
+ * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of bytes waiting in the buffer
+ */
 extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf);
 
-
-/*
- * Reset the read and write pointers to zero and flush the buffer
+/**
+ * dvb_ringbuffer_reset - resets the ringbuffer to initial state
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Resets the read and write pointers to zero and flush the buffer.
+ *
  * This counts as a read and write operation
  */
 extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
 
+/*
+ * read routines & macros
+ */
 
-/* read routines & macros */
-/* ---------------------- */
-/* flush buffer */
+/**
+ * dvb_ringbuffer_flush - flush buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
 extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf);
 
-/* flush buffer protected by spinlock and wake-up waiting task(s) */
+/**
+ * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock
+ *      and wake-up waiting task(s)
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
 extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
 
-/* peek at byte @offs: in the buffer */
-#define DVB_RINGBUFFER_PEEK(rbuf,offs)	\
-			(rbuf)->data[((rbuf)->pread+(offs))%(rbuf)->size]
+/**
+ * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @offs: offset inside the ringbuffer
+ */
+#define DVB_RINGBUFFER_PEEK(rbuf, offs)	\
+			((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
 
-/* advance read ptr by @num: bytes */
-#define DVB_RINGBUFFER_SKIP(rbuf,num)	\
-			(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
+/**
+ * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @num: number of bytes to advance
+ */
+#define DVB_RINGBUFFER_SKIP(rbuf, num)	{\
+			(rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\
+}
 
-/*
- * read @len: bytes from ring buffer into @buf:
- * @usermem: specifies whether @buf: resides in user space
- * returns number of bytes transferred or -EFAULT
+/**
+ * dvb_ringbuffer_read_user - Reads a buffer into an user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_to_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
  */
 extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
 				   u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_read - Reads a buffer into a pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
 extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
 				   u8 *buf, size_t len);
 
-
-/* write routines & macros */
-/* ----------------------- */
-/* write single byte to ring buffer */
-#define DVB_RINGBUFFER_WRITE_BYTE(rbuf,byte)	\
-			{ (rbuf)->data[(rbuf)->pwrite]=(byte); \
-			(rbuf)->pwrite=((rbuf)->pwrite+1)%(rbuf)->size; }
 /*
- * write @len: bytes to ring buffer
- * @usermem: specifies whether @buf: resides in user space
- * returns number of bytes transferred or -EFAULT
-*/
+ * write routines & macros
+ */
+
+/**
+ * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @byte: byte to write
+ */
+#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte)	\
+			{ (rbuf)->data[(rbuf)->pwrite] = (byte); \
+			(rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; }
+
+/**
+ * dvb_ringbuffer_write - Writes a buffer into the ringbuffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * return: number of bytes transferred or -EFAULT
+ */
 extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
 				    size_t len);
-extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
-				         const u8 __user *buf, size_t len);
 
+/**
+ * dvb_ringbuffer_write_user - Writes a buffer received via an user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_from_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
+extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+					 const u8 __user *buf, size_t len);
 
 /**
  * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
@@ -143,9 +213,10 @@
  * @rbuf: Ringbuffer to write to.
  * @buf: Buffer to write.
  * @len: Length of buffer (currently limited to 65535 bytes max).
- * returns Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
+ *
+ * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
  */
-extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
+extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf,
 					size_t len);
 
 /**
@@ -157,7 +228,7 @@
  * @buf: Destination buffer for data.
  * @len: Size of destination buffer.
  *
- * returns Number of bytes read, or -EFAULT.
+ * Return: Number of bytes read, or -EFAULT.
  *
  * .. note::
  *
@@ -167,7 +238,7 @@
  */
 extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
 					    size_t idx,
-				            int offset, u8 __user *buf,
+					    int offset, u8 __user *buf,
 					    size_t len);
 
 /**
@@ -181,7 +252,7 @@
  * @buf: Destination buffer for data.
  * @len: Size of destination buffer.
  *
- * returns Number of bytes read, or -EFAULT.
+ * Return: Number of bytes read, or -EFAULT.
  */
 extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
 				       int offset, u8 *buf, size_t len);
@@ -199,10 +270,11 @@
  *
  * @rbuf: Ringbuffer concerned.
  * @idx: Previous packet index, or -1 to return the first packet index.
- * @pktlen: On success, will be updated to contain the length of the packet in bytes.
+ * @pktlen: On success, will be updated to contain the length of the packet
+ *          in bytes.
  * returns Packet index (if >=0), or -1 if no packets available.
  */
-extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen);
-
+extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
+				       size_t idx, size_t *pktlen);
 
 #endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index c645aa8..0122255 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -67,6 +67,7 @@
 config DVB_SI2165
 	tristate "Silicon Labs si2165 based"
 	depends on DVB_CORE && I2C
+	select REGMAP_I2C
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  A DVB-C/T demodulator.
@@ -463,6 +464,7 @@
 config DVB_CXD2820R
 	tristate "Sony CXD2820R"
 	depends on DVB_CORE && I2C
+	select REGMAP_I2C
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  Say Y when you want to support this frontend.
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 1dcc936..dcdd163 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -25,7 +25,6 @@
 #ifndef AF9013_H
 #define AF9013_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /* AF9013/5 GPIOs (mostly guessed)
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index 6ad22b6..5b83e4f 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -22,8 +22,6 @@
 #ifndef AF9033_H
 #define AF9033_H
 
-#include <linux/kconfig.h>
-
 /*
  * I2C address (TODO: are these in 8-bit format?)
  * 0x38, 0x3a, 0x3c, 0x3e
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 8cc8c45..ad304ee 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -464,7 +464,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops ascot2e_tuner_ops = {
+static const struct dvb_tuner_ops ascot2e_tuner_ops = {
 	.info = {
 		.name = "Sony ASCOT2E",
 		.frequency_min = 1000000,
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
index 6da4ae6..dc61bf7 100644
--- a/drivers/media/dvb-frontends/ascot2e.h
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -22,7 +22,6 @@
 #ifndef __DVB_ASCOT2E_H__
 #define __DVB_ASCOT2E_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/atbm8830.h b/drivers/media/dvb-frontends/atbm8830.h
index 5446d13..bb86238 100644
--- a/drivers/media/dvb-frontends/atbm8830.h
+++ b/drivers/media/dvb-frontends/atbm8830.h
@@ -22,7 +22,6 @@
 #ifndef __ATBM8830_H__
 #define __ATBM8830_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index 78bf3f7..21c51a4 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -22,7 +22,6 @@
 #ifndef __AU8522_H__
 #define __AU8522_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 enum au8522_if_freq {
diff --git a/drivers/media/dvb-frontends/cx22702.h b/drivers/media/dvb-frontends/cx22702.h
index 68b69a7..a1956a9 100644
--- a/drivers/media/dvb-frontends/cx22702.h
+++ b/drivers/media/dvb-frontends/cx22702.h
@@ -28,7 +28,6 @@
 #ifndef CX22702_H
 #define CX22702_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx22702_config {
diff --git a/drivers/media/dvb-frontends/cx24113.h b/drivers/media/dvb-frontends/cx24113.h
index 962919b..194c703 100644
--- a/drivers/media/dvb-frontends/cx24113.h
+++ b/drivers/media/dvb-frontends/cx24113.h
@@ -22,8 +22,6 @@
 #ifndef CX24113_H
 #define CX24113_H
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 
 struct cx24113_config {
diff --git a/drivers/media/dvb-frontends/cx24116.h b/drivers/media/dvb-frontends/cx24116.h
index f6dbabc..9ff8df8d 100644
--- a/drivers/media/dvb-frontends/cx24116.h
+++ b/drivers/media/dvb-frontends/cx24116.h
@@ -21,7 +21,6 @@
 #ifndef CX24116_H
 #define CX24116_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24116_config {
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h
index 1648ab4..445f13f 100644
--- a/drivers/media/dvb-frontends/cx24117.h
+++ b/drivers/media/dvb-frontends/cx24117.h
@@ -22,7 +22,6 @@
 #ifndef CX24117_H
 #define CX24117_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24117_config {
diff --git a/drivers/media/dvb-frontends/cx24120.h b/drivers/media/dvb-frontends/cx24120.h
index f097042..de4ca9a 100644
--- a/drivers/media/dvb-frontends/cx24120.h
+++ b/drivers/media/dvb-frontends/cx24120.h
@@ -20,7 +20,6 @@
 #ifndef CX24120_H
 #define CX24120_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/firmware.h>
 
diff --git a/drivers/media/dvb-frontends/cx24123.h b/drivers/media/dvb-frontends/cx24123.h
index 975f3c9..aac2344 100644
--- a/drivers/media/dvb-frontends/cx24123.h
+++ b/drivers/media/dvb-frontends/cx24123.h
@@ -21,7 +21,6 @@
 #ifndef CX24123_H
 #define CX24123_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24123_config {
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index 56d4276..f3ff8f6 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -22,7 +22,6 @@
 #ifndef CXD2820R_H
 #define CXD2820R_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define CXD2820R_GPIO_D (0 << 0) /* disable */
@@ -37,6 +36,32 @@
 #define CXD2820R_TS_PARALLEL      0x30
 #define CXD2820R_TS_PARALLEL_MSB  0x70
 
+/*
+ * I2C address: 0x6c, 0x6d
+ */
+
+/**
+ * struct cxd2820r_platform_data - Platform data for the cxd2820r driver
+ * @ts_mode: TS mode.
+ * @ts_clk_inv: TS clock inverted.
+ * @if_agc_polarity: IF AGC polarity.
+ * @spec_inv: Input spectrum inverted.
+ * @gpio_chip_base: GPIO.
+ * @get_dvb_frontend: Get DVB frontend.
+ */
+
+struct cxd2820r_platform_data {
+	u8 ts_mode;
+	bool ts_clk_inv;
+	bool if_agc_polarity;
+	bool spec_inv;
+	int **gpio_chip_base;
+
+	struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
+/* private: For legacy media attach wrapper. Do not set value. */
+	bool attach_in_use;
+};
+
 struct cxd2820r_config {
 	/* Demodulator I2C address.
 	 * Driver determines DVB-C slave I2C address automatically from master
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index a674a63..d75b077 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -24,12 +24,12 @@
 int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-	int ret, i;
+	int ret;
+	unsigned int utmp;
 	u8 buf[2];
-	u32 if_freq;
-	u16 if_ctl;
-	u64 num;
+	u32 if_frequency;
 	struct reg_val_mask tab[] = {
 		{ 0x00080, 0x01, 0xff },
 		{ 0x00081, 0x05, 0xff },
@@ -43,25 +43,24 @@
 		{ 0x10059, 0x50, 0xff },
 		{ 0x10087, 0x0c, 0x3c },
 		{ 0x1008b, 0x07, 0xff },
-		{ 0x1001f, priv->cfg.if_agc_polarity << 7, 0x80 },
-		{ 0x10070, priv->cfg.ts_mode, 0xff },
-		{ 0x10071, !priv->cfg.ts_clock_inv << 4, 0x10 },
+		{ 0x1001f, priv->if_agc_polarity << 7, 0x80 },
+		{ 0x10070, priv->ts_mode, 0xff },
+		{ 0x10071, !priv->ts_clk_inv << 4, 0x10 },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s: frequency=%d symbol_rate=%d\n", __func__,
-			c->frequency, c->symbol_rate);
+	dev_dbg(&client->dev,
+		"delivery_system=%d modulation=%d frequency=%u symbol_rate=%u inversion=%d\n",
+		c->delivery_system, c->modulation, c->frequency,
+		c->symbol_rate, c->inversion);
 
 	/* program tuner */
 	if (fe->ops.tuner_ops.set_params)
 		fe->ops.tuner_ops.set_params(fe);
 
 	if (priv->delivery_system !=  SYS_DVBC_ANNEX_A) {
-		for (i = 0; i < ARRAY_SIZE(tab); i++) {
-			ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
-				tab[i].val, tab[i].mask);
-			if (ret)
-				goto error;
-		}
+		ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+		if (ret)
+			goto error;
 	}
 
 	priv->delivery_system = SYS_DVBC_ANNEX_A;
@@ -69,35 +68,33 @@
 
 	/* program IF frequency */
 	if (fe->ops.tuner_ops.get_if_frequency) {
-		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
 		if (ret)
 			goto error;
-	} else
-		if_freq = 0;
+		dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+	} else {
+		ret = -EINVAL;
+		goto error;
+	}
 
-	dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
-
-	num = if_freq / 1000; /* Hz => kHz */
-	num *= 0x4000;
-	if_ctl = 0x4000 - DIV_ROUND_CLOSEST_ULL(num, 41000);
-	buf[0] = (if_ctl >> 8) & 0x3f;
-	buf[1] = (if_ctl >> 0) & 0xff;
-
-	ret = cxd2820r_wr_regs(priv, 0x10042, buf, 2);
+	utmp = 0x4000 - DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x4000, CXD2820R_CLK);
+	buf[0] = (utmp >> 8) & 0xff;
+	buf[1] = (utmp >> 0) & 0xff;
+	ret = regmap_bulk_write(priv->regmap[1], 0x0042, buf, 2);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
+	ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+	ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
@@ -105,20 +102,24 @@
 			    struct dtv_frontend_properties *c)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	int ret;
+	unsigned int utmp;
 	u8 buf[2];
 
-	ret = cxd2820r_rd_regs(priv, 0x1001a, buf, 2);
+	dev_dbg(&client->dev, "\n");
+
+	ret = regmap_bulk_read(priv->regmap[1], 0x001a, buf, 2);
 	if (ret)
 		goto error;
 
 	c->symbol_rate = 2500 * ((buf[0] & 0x0f) << 8 | buf[1]);
 
-	ret = cxd2820r_rd_reg(priv, 0x10019, &buf[0]);
+	ret = regmap_read(priv->regmap[1], 0x0019, &utmp);
 	if (ret)
 		goto error;
 
-	switch ((buf[0] >> 0) & 0x07) {
+	switch ((utmp >> 0) & 0x07) {
 	case 0:
 		c->modulation = QAM_16;
 		break;
@@ -136,7 +137,7 @@
 		break;
 	}
 
-	switch ((buf[0] >> 7) & 0x01) {
+	switch ((utmp >> 7) & 0x01) {
 	case 0:
 		c->inversion = INVERSION_OFF;
 		break;
@@ -147,167 +148,169 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
-int cxd2820r_read_ber_c(struct dvb_frontend *fe, u32 *ber)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[3], start_ber = 0;
-	*ber = 0;
-
-	if (priv->ber_running) {
-		ret = cxd2820r_rd_regs(priv, 0x10076, buf, sizeof(buf));
-		if (ret)
-			goto error;
-
-		if ((buf[2] >> 7) & 0x01 || (buf[2] >> 4) & 0x01) {
-			*ber = (buf[2] & 0x0f) << 16 | buf[1] << 8 | buf[0];
-			start_ber = 1;
-		}
-	} else {
-		priv->ber_running = true;
-		start_ber = 1;
-	}
-
-	if (start_ber) {
-		/* (re)start BER */
-		ret = cxd2820r_wr_reg(priv, 0x10079, 0x01);
-		if (ret)
-			goto error;
-	}
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_signal_strength_c(struct dvb_frontend *fe,
-	u16 *strength)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
-	u16 tmp;
-
-	ret = cxd2820r_rd_regs(priv, 0x10049, buf, sizeof(buf));
-	if (ret)
-		goto error;
-
-	tmp = (buf[0] & 0x03) << 8 | buf[1];
-	tmp = (~tmp & 0x03ff);
-
-	if (tmp == 512)
-		/* ~no signal */
-		tmp = 0;
-	else if (tmp > 350)
-		tmp = 350;
-
-	/* scale value to 0x0000-0xffff */
-	*strength = tmp * 0xffff / (350-0);
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_snr_c(struct dvb_frontend *fe, u16 *snr)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 tmp;
-	unsigned int A, B;
-	/* report SNR in dB * 10 */
-
-	ret = cxd2820r_rd_reg(priv, 0x10019, &tmp);
-	if (ret)
-		goto error;
-
-	if (((tmp >> 0) & 0x03) % 2) {
-		A = 875;
-		B = 650;
-	} else {
-		A = 950;
-		B = 760;
-	}
-
-	ret = cxd2820r_rd_reg(priv, 0x1004d, &tmp);
-	if (ret)
-		goto error;
-
-	#define CXD2820R_LOG2_E_24 24204406 /* log2(e) << 24 */
-	if (tmp)
-		*snr = A * (intlog2(B / tmp) >> 5) / (CXD2820R_LOG2_E_24 >> 5)
-			/ 10;
-	else
-		*snr = 0;
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_ucblocks_c(struct dvb_frontend *fe, u32 *ucblocks)
-{
-	*ucblocks = 0;
-	/* no way to read ? */
-	return 0;
-}
-
 int cxd2820r_read_status_c(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
-	u8 buf[2];
-	*status = 0;
+	unsigned int utmp, utmp1, utmp2;
+	u8 buf[3];
 
-	ret = cxd2820r_rd_regs(priv, 0x10088, buf, sizeof(buf));
+	/* Lock detection */
+	ret = regmap_bulk_read(priv->regmap[1], 0x0088, &buf[0], 1);
+	if (ret)
+		goto error;
+	ret = regmap_bulk_read(priv->regmap[1], 0x0073, &buf[1], 1);
 	if (ret)
 		goto error;
 
-	if (((buf[0] >> 0) & 0x01) == 1) {
-		*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-			FE_HAS_VITERBI | FE_HAS_SYNC;
+	utmp1 = (buf[0] >> 0) & 0x01;
+	utmp2 = (buf[1] >> 3) & 0x01;
 
-		if (((buf[1] >> 3) & 0x01) == 1) {
-			*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-				FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
-		}
+	if (utmp1 == 1 && utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+	} else if (utmp1 == 1 || utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC;
+	} else {
+		*status = 0;
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: lock=%02x %02x\n", __func__, buf[0],
-			buf[1]);
+	dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+		*status, 2, buf, utmp1, utmp2);
+
+	/* Signal strength */
+	if (*status & FE_HAS_SIGNAL) {
+		unsigned int strength;
+
+		ret = regmap_bulk_read(priv->regmap[1], 0x0049, buf, 2);
+		if (ret)
+			goto error;
+
+		utmp = buf[0] << 8 | buf[1] << 0;
+		utmp = 511 - sign_extend32(utmp, 9);
+		/* Scale value to 0x0000-0xffff */
+		strength = utmp << 6 | utmp >> 4;
+
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+		c->strength.stat[0].uvalue = strength;
+	} else {
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* CNR */
+	if (*status & FE_HAS_VITERBI) {
+		unsigned int cnr, const_a, const_b;
+
+		ret = regmap_read(priv->regmap[1], 0x0019, &utmp);
+		if (ret)
+			goto error;
+
+		if (((utmp >> 0) & 0x03) % 2) {
+			const_a = 8750;
+			const_b = 650;
+		} else {
+			const_a = 9500;
+			const_b = 760;
+		}
+
+		ret = regmap_read(priv->regmap[1], 0x004d, &utmp);
+		if (ret)
+			goto error;
+
+		#define CXD2820R_LOG2_E_24 24204406 /* log2(e) << 24 */
+		if (utmp)
+			cnr = div_u64((u64)(intlog2(const_b) - intlog2(utmp))
+				      * const_a, CXD2820R_LOG2_E_24);
+		else
+			cnr = 0;
+
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+		c->cnr.stat[0].svalue = cnr;
+	} else {
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* BER */
+	if (*status & FE_HAS_SYNC) {
+		unsigned int post_bit_error;
+		bool start_ber;
+
+		if (priv->ber_running) {
+			ret = regmap_bulk_read(priv->regmap[1], 0x0076, buf, 3);
+			if (ret)
+				goto error;
+
+			if ((buf[2] >> 7) & 0x01) {
+				post_bit_error = buf[2] << 16 | buf[1] << 8 |
+						 buf[0] << 0;
+				post_bit_error &= 0x0fffff;
+				start_ber = true;
+			} else {
+				post_bit_error = 0;
+				start_ber = false;
+			}
+		} else {
+			post_bit_error = 0;
+			start_ber = true;
+		}
+
+		if (start_ber) {
+			ret = regmap_write(priv->regmap[1], 0x0079, 0x01);
+			if (ret)
+				goto error;
+			priv->ber_running = true;
+		}
+
+		priv->post_bit_error += post_bit_error;
+
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+	} else {
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 int cxd2820r_init_c(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	int ret;
 
-	ret = cxd2820r_wr_reg(priv, 0x00085, 0x07);
+	dev_dbg(&client->dev, "\n");
+
+	ret = regmap_write(priv->regmap[0], 0x0085, 0x07);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 int cxd2820r_sleep_c(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret, i;
+	struct i2c_client *client = priv->client[0];
+	int ret;
 	struct reg_val_mask tab[] = {
 		{ 0x000ff, 0x1f, 0xff },
 		{ 0x00085, 0x00, 0xff },
@@ -316,20 +319,17 @@
 		{ 0x00080, 0x00, 0xff },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "\n");
 
 	priv->delivery_system = SYS_UNDEFINED;
 
-	for (i = 0; i < ARRAY_SIZE(tab); i++) {
-		ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
-			tab[i].mask);
-		if (ret)
-			goto error;
-	}
+	ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+	if (ret)
+		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 314d3b8..95267c6 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -21,178 +21,50 @@
 
 #include "cxd2820r_priv.h"
 
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  64
-
-/* write multiple registers */
-static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
-	u8 *val, int len)
+/* Write register table */
+int cxd2820r_wr_reg_val_mask_tab(struct cxd2820r_priv *priv,
+				 const struct reg_val_mask *tab, int tab_len)
 {
+	struct i2c_client *client = priv->client[0];
 	int ret;
-	u8 buf[MAX_XFER_SIZE];
-	struct i2c_msg msg[1] = {
-		{
-			.addr = i2c,
-			.flags = 0,
-			.len = len + 1,
-			.buf = buf,
-		}
-	};
+	unsigned int i, reg, mask, val;
+	struct regmap *regmap;
 
-	if (1 + len > sizeof(buf)) {
-		dev_warn(&priv->i2c->dev,
-			 "%s: i2c wr reg=%04x: len=%d is too big!\n",
-			 KBUILD_MODNAME, reg, len);
-		return -EINVAL;
+	dev_dbg(&client->dev, "tab_len=%d\n", tab_len);
+
+	for (i = 0; i < tab_len; i++) {
+		if ((tab[i].reg >> 16) & 0x1)
+			regmap = priv->regmap[1];
+		else
+			regmap = priv->regmap[0];
+
+		reg = (tab[i].reg >> 0) & 0xffff;
+		val = tab[i].val;
+		mask = tab[i].mask;
+
+		if (mask == 0xff)
+			ret = regmap_write(regmap, reg, val);
+		else
+			ret = regmap_write_bits(regmap, reg, mask, val);
+		if (ret)
+			goto error;
 	}
 
-	buf[0] = reg;
-	memcpy(&buf[1], val, len);
-
-	ret = i2c_transfer(priv->i2c, msg, 1);
-	if (ret == 1) {
-		ret = 0;
-	} else {
-		dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
-				"len=%d\n", KBUILD_MODNAME, ret, reg, len);
-		ret = -EREMOTEIO;
-	}
+	return 0;
+error:
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
-/* read multiple registers */
-static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
-	u8 *val, int len)
-{
-	int ret;
-	u8 buf[MAX_XFER_SIZE];
-	struct i2c_msg msg[2] = {
-		{
-			.addr = i2c,
-			.flags = 0,
-			.len = 1,
-			.buf = &reg,
-		}, {
-			.addr = i2c,
-			.flags = I2C_M_RD,
-			.len = len,
-			.buf = buf,
-		}
-	};
-
-	if (len > sizeof(buf)) {
-		dev_warn(&priv->i2c->dev,
-			 "%s: i2c wr reg=%04x: len=%d is too big!\n",
-			 KBUILD_MODNAME, reg, len);
-		return -EINVAL;
-	}
-
-	ret = i2c_transfer(priv->i2c, msg, 2);
-	if (ret == 2) {
-		memcpy(val, buf, len);
-		ret = 0;
-	} else {
-		dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
-				"len=%d\n", KBUILD_MODNAME, ret, reg, len);
-		ret = -EREMOTEIO;
-	}
-
-	return ret;
-}
-
-/* write multiple registers */
-int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
-	int len)
-{
-	int ret;
-	u8 i2c_addr;
-	u8 reg = (reginfo >> 0) & 0xff;
-	u8 bank = (reginfo >> 8) & 0xff;
-	u8 i2c = (reginfo >> 16) & 0x01;
-
-	/* select I2C */
-	if (i2c)
-		i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */
-	else
-		i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */
-
-	/* switch bank if needed */
-	if (bank != priv->bank[i2c]) {
-		ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1);
-		if (ret)
-			return ret;
-		priv->bank[i2c] = bank;
-	}
-	return cxd2820r_wr_regs_i2c(priv, i2c_addr, reg, val, len);
-}
-
-/* read multiple registers */
-int cxd2820r_rd_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
-	int len)
-{
-	int ret;
-	u8 i2c_addr;
-	u8 reg = (reginfo >> 0) & 0xff;
-	u8 bank = (reginfo >> 8) & 0xff;
-	u8 i2c = (reginfo >> 16) & 0x01;
-
-	/* select I2C */
-	if (i2c)
-		i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */
-	else
-		i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */
-
-	/* switch bank if needed */
-	if (bank != priv->bank[i2c]) {
-		ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1);
-		if (ret)
-			return ret;
-		priv->bank[i2c] = bank;
-	}
-	return cxd2820r_rd_regs_i2c(priv, i2c_addr, reg, val, len);
-}
-
-/* write single register */
-int cxd2820r_wr_reg(struct cxd2820r_priv *priv, u32 reg, u8 val)
-{
-	return cxd2820r_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val)
-{
-	return cxd2820r_rd_regs(priv, reg, val, 1);
-}
-
-/* write single register with mask */
-int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
-	u8 mask)
-{
-	int ret;
-	u8 tmp;
-
-	/* no need for read if whole reg is written */
-	if (mask != 0xff) {
-		ret = cxd2820r_rd_reg(priv, reg, &tmp);
-		if (ret)
-			return ret;
-
-		val &= mask;
-		tmp &= ~mask;
-		val |= tmp;
-	}
-
-	return cxd2820r_wr_reg(priv, reg, val);
-}
-
 int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, i;
 	u8 tmp0, tmp1;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
 	/* update GPIOs only when needed */
 	if (!memcmp(gpio, priv->gpio, sizeof(priv->gpio)))
@@ -219,20 +91,18 @@
 		else
 			tmp1 |= (0 << (0 + i));
 
-		dev_dbg(&priv->i2c->dev, "%s: gpio i=%d %02x %02x\n", __func__,
-				i, tmp0, tmp1);
+		dev_dbg(&client->dev, "gpio i=%d %02x %02x\n", i, tmp0, tmp1);
 	}
 
-	dev_dbg(&priv->i2c->dev, "%s: wr gpio=%02x %02x\n", __func__, tmp0,
-			tmp1);
+	dev_dbg(&client->dev, "wr gpio=%02x %02x\n", tmp0, tmp1);
 
 	/* write bits [7:2] */
-	ret = cxd2820r_wr_reg_mask(priv, 0x00089, tmp0, 0xfc);
+	ret = regmap_update_bits(priv->regmap[0], 0x0089, 0xfc, tmp0);
 	if (ret)
 		goto error;
 
 	/* write bits [5:0] */
-	ret = cxd2820r_wr_reg_mask(priv, 0x0008e, tmp1, 0x3f);
+	ret = regmap_update_bits(priv->regmap[0], 0x008e, 0x3f, tmp1);
 	if (ret)
 		goto error;
 
@@ -240,18 +110,18 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 static int cxd2820r_set_frontend(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
 	switch (c->delivery_system) {
 	case SYS_DVBT:
@@ -279,8 +149,7 @@
 			goto err;
 		break;
 	default:
-		dev_dbg(&priv->i2c->dev, "%s: error state=%d\n", __func__,
-				fe->dtv_property_cache.delivery_system);
+		dev_dbg(&client->dev, "invalid delivery_system\n");
 		ret = -EINVAL;
 		break;
 	}
@@ -291,12 +160,13 @@
 static int cxd2820r_read_status(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
+	switch (c->delivery_system) {
 	case SYS_DVBT:
 		ret = cxd2820r_read_status_t(fe, status);
 		break;
@@ -317,15 +187,16 @@
 				 struct dtv_frontend_properties *p)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
 	if (priv->delivery_system == SYS_UNDEFINED)
 		return 0;
 
-	switch (fe->dtv_property_cache.delivery_system) {
+	switch (c->delivery_system) {
 	case SYS_DVBT:
 		ret = cxd2820r_get_frontend_t(fe, p);
 		break;
@@ -345,101 +216,60 @@
 static int cxd2820r_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
-	case SYS_DVBT:
-		ret = cxd2820r_read_ber_t(fe, ber);
-		break;
-	case SYS_DVBT2:
-		ret = cxd2820r_read_ber_t2(fe, ber);
-		break;
-	case SYS_DVBC_ANNEX_A:
-		ret = cxd2820r_read_ber_c(fe, ber);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	*ber = (priv->post_bit_error - priv->post_bit_error_prev_dvbv3);
+	priv->post_bit_error_prev_dvbv3 = priv->post_bit_error;
+
+	return 0;
 }
 
 static int cxd2820r_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
-	case SYS_DVBT:
-		ret = cxd2820r_read_signal_strength_t(fe, strength);
-		break;
-	case SYS_DVBT2:
-		ret = cxd2820r_read_signal_strength_t2(fe, strength);
-		break;
-	case SYS_DVBC_ANNEX_A:
-		ret = cxd2820r_read_signal_strength_c(fe, strength);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	if (c->strength.stat[0].scale == FE_SCALE_RELATIVE)
+		*strength = c->strength.stat[0].uvalue;
+	else
+		*strength = 0;
+
+	return 0;
 }
 
 static int cxd2820r_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
-	case SYS_DVBT:
-		ret = cxd2820r_read_snr_t(fe, snr);
-		break;
-	case SYS_DVBT2:
-		ret = cxd2820r_read_snr_t2(fe, snr);
-		break;
-	case SYS_DVBC_ANNEX_A:
-		ret = cxd2820r_read_snr_c(fe, snr);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+		*snr = div_s64(c->cnr.stat[0].svalue, 100);
+	else
+		*snr = 0;
+
+	return 0;
 }
 
 static int cxd2820r_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
-	case SYS_DVBT:
-		ret = cxd2820r_read_ucblocks_t(fe, ucblocks);
-		break;
-	case SYS_DVBT2:
-		ret = cxd2820r_read_ucblocks_t2(fe, ucblocks);
-		break;
-	case SYS_DVBC_ANNEX_A:
-		ret = cxd2820r_read_ucblocks_c(fe, ucblocks);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	*ucblocks = 0;
+
+	return 0;
 }
 
 static int cxd2820r_init(struct dvb_frontend *fe)
@@ -450,12 +280,13 @@
 static int cxd2820r_sleep(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
+	switch (c->delivery_system) {
 	case SYS_DVBT:
 		ret = cxd2820r_sleep_t(fe);
 		break;
@@ -476,12 +307,13 @@
 				      struct dvb_frontend_tune_settings *s)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
-	switch (fe->dtv_property_cache.delivery_system) {
+	switch (c->delivery_system) {
 	case SYS_DVBT:
 		ret = cxd2820r_get_tune_settings_t(fe, s);
 		break;
@@ -501,12 +333,12 @@
 static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, i;
 	enum fe_status status = 0;
 
-	dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
-			fe->dtv_property_cache.delivery_system);
+	dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
 
 	/* switch between DVB-T and DVB-T2 when tune fails */
 	if (priv->last_tune_failed) {
@@ -530,7 +362,6 @@
 	if (ret)
 		goto error;
 
-
 	/* frontend lock wait loop count */
 	switch (priv->delivery_system) {
 	case SYS_DVBT:
@@ -548,7 +379,7 @@
 
 	/* wait frontend lock */
 	for (; i > 0; i--) {
-		dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+		dev_dbg(&client->dev, "loop=%d\n", i);
 		msleep(50);
 		ret = cxd2820r_read_status(fe, &status);
 		if (ret)
@@ -568,7 +399,7 @@
 	}
 
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return DVBFE_ALGO_SEARCH_ERROR;
 }
 
@@ -580,27 +411,23 @@
 static void cxd2820r_release(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 
-	dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "\n");
 
-#ifdef CONFIG_GPIOLIB
-	/* remove GPIOs */
-	if (priv->gpio_chip.label)
-		gpiochip_remove(&priv->gpio_chip);
+	i2c_unregister_device(client);
 
-#endif
-	kfree(priv);
 	return;
 }
 
 static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 
-	dev_dbg(&priv->i2c->dev, "%s: %d\n", __func__, enable);
+	dev_dbg_ratelimited(&client->dev, "enable=%d\n", enable);
 
-	/* Bit 0 of reg 0xdb in bank 0x00 controls I2C repeater */
-	return cxd2820r_wr_reg_mask(priv, 0xdb, enable ? 1 : 0, 0x1);
+	return regmap_update_bits(priv->regmap[0], 0x00db, 0x01, enable ? 1 : 0);
 }
 
 #ifdef CONFIG_GPIOLIB
@@ -608,9 +435,10 @@
 		int val)
 {
 	struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+	struct i2c_client *client = priv->client[0];
 	u8 gpio[GPIO_COUNT];
 
-	dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
+	dev_dbg(&client->dev, "nr=%u val=%d\n", nr, val);
 
 	memcpy(gpio, priv->gpio, sizeof(gpio));
 	gpio[nr] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | (val << 2);
@@ -621,9 +449,10 @@
 static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
 {
 	struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+	struct i2c_client *client = priv->client[0];
 	u8 gpio[GPIO_COUNT];
 
-	dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
+	dev_dbg(&client->dev, "nr=%u val=%d\n", nr, val);
 
 	memcpy(gpio, priv->gpio, sizeof(gpio));
 	gpio[nr] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | (val << 2);
@@ -636,8 +465,9 @@
 static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
 {
 	struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+	struct i2c_client *client = priv->client[0];
 
-	dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr);
+	dev_dbg(&client->dev, "nr=%u\n", nr);
 
 	return (priv->gpio[nr] >> 2) & 0x01;
 }
@@ -689,52 +519,163 @@
 	.read_signal_strength	= cxd2820r_read_signal_strength,
 };
 
-struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
-		struct i2c_adapter *i2c, int *gpio_chip_base
-)
+/*
+ * XXX: That is wrapper to cxd2820r_probe() via driver core in order to provide
+ * proper I2C client for legacy media attach binding.
+ * New users must use I2C client binding directly!
+ */
+struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
+				     struct i2c_adapter *adapter,
+				     int *gpio_chip_base)
 {
-	struct cxd2820r_priv *priv;
-	int ret;
-	u8 tmp;
+	struct i2c_client *client;
+	struct i2c_board_info board_info;
+	struct cxd2820r_platform_data pdata;
 
-	priv = kzalloc(sizeof(struct cxd2820r_priv), GFP_KERNEL);
+	pdata.ts_mode = config->ts_mode;
+	pdata.ts_clk_inv = config->ts_clock_inv;
+	pdata.if_agc_polarity = config->if_agc_polarity;
+	pdata.spec_inv = config->spec_inv;
+	pdata.gpio_chip_base = &gpio_chip_base;
+	pdata.attach_in_use = true;
+
+	memset(&board_info, 0, sizeof(board_info));
+	strlcpy(board_info.type, "cxd2820r", I2C_NAME_SIZE);
+	board_info.addr = config->i2c_address;
+	board_info.platform_data = &pdata;
+	client = i2c_new_device(adapter, &board_info);
+	if (!client || !client->dev.driver)
+		return NULL;
+
+	return pdata.get_dvb_frontend(client);
+}
+EXPORT_SYMBOL(cxd2820r_attach);
+
+static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
+{
+	struct cxd2820r_priv *priv = i2c_get_clientdata(client);
+
+	dev_dbg(&client->dev, "\n");
+
+	return &priv->fe;
+}
+
+static int cxd2820r_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct cxd2820r_platform_data *pdata = client->dev.platform_data;
+	struct cxd2820r_priv *priv;
+	int ret, *gpio_chip_base;
+	unsigned int utmp;
+	static const struct regmap_range_cfg regmap_range_cfg0[] = {
+		{
+			.range_min        = 0x0000,
+			.range_max        = 0x3fff,
+			.selector_reg     = 0x00,
+			.selector_mask    = 0xff,
+			.selector_shift   = 0,
+			.window_start     = 0x00,
+			.window_len       = 0x100,
+		},
+	};
+	static const struct regmap_range_cfg regmap_range_cfg1[] = {
+		{
+			.range_min        = 0x0000,
+			.range_max        = 0x01ff,
+			.selector_reg     = 0x00,
+			.selector_mask    = 0xff,
+			.selector_shift   = 0,
+			.window_start     = 0x00,
+			.window_len       = 0x100,
+		},
+	};
+	static const struct regmap_config regmap_config0 = {
+		.reg_bits = 8,
+		.val_bits = 8,
+		.max_register = 0x3fff,
+		.ranges = regmap_range_cfg0,
+		.num_ranges = ARRAY_SIZE(regmap_range_cfg0),
+		.cache_type = REGCACHE_NONE,
+	};
+	static const struct regmap_config regmap_config1 = {
+		.reg_bits = 8,
+		.val_bits = 8,
+		.max_register = 0x01ff,
+		.ranges = regmap_range_cfg1,
+		.num_ranges = ARRAY_SIZE(regmap_range_cfg1),
+		.cache_type = REGCACHE_NONE,
+	};
+
+	dev_dbg(&client->dev, "\n");
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
 		ret = -ENOMEM;
-		dev_err(&i2c->dev, "%s: kzalloc() failed\n",
-				KBUILD_MODNAME);
-		goto error;
+		goto err;
 	}
 
-	priv->i2c = i2c;
-	memcpy(&priv->cfg, cfg, sizeof(struct cxd2820r_config));
-	memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(struct dvb_frontend_ops));
-	priv->fe.demodulator_priv = priv;
+	priv->client[0] = client;
+	priv->i2c = client->adapter;
+	priv->ts_mode = pdata->ts_mode;
+	priv->ts_clk_inv = pdata->ts_clk_inv;
+	priv->if_agc_polarity = pdata->if_agc_polarity;
+	priv->spec_inv = pdata->spec_inv;
+	gpio_chip_base = *pdata->gpio_chip_base;
+	priv->regmap[0] = regmap_init_i2c(priv->client[0], &regmap_config0);
+	if (IS_ERR(priv->regmap[0])) {
+		ret = PTR_ERR(priv->regmap[0]);
+		goto err_kfree;
+	}
 
-	priv->bank[0] = priv->bank[1] = 0xff;
-	ret = cxd2820r_rd_reg(priv, 0x000fd, &tmp);
-	dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, tmp);
-	if (ret || tmp != 0xe1)
-		goto error;
+	/* Check demod answers with correct chip id */
+	ret = regmap_read(priv->regmap[0], 0x00fd, &utmp);
+	if (ret)
+		goto err_regmap_0_regmap_exit;
+
+	dev_dbg(&client->dev, "chip_id=%02x\n", utmp);
+
+	if (utmp != 0xe1) {
+		ret = -ENODEV;
+		goto err_regmap_0_regmap_exit;
+	}
+
+	/*
+	 * Chip has two I2C addresses for different register banks. We register
+	 * one dummy I2C client in in order to get own I2C client for each
+	 * register bank.
+	 */
+	priv->client[1] = i2c_new_dummy(client->adapter, client->addr | (1 << 1));
+	if (!priv->client[1]) {
+		ret = -ENODEV;
+		dev_err(&client->dev, "I2C registration failed\n");
+		if (ret)
+			goto err_regmap_0_regmap_exit;
+	}
+
+	priv->regmap[1] = regmap_init_i2c(priv->client[1], &regmap_config1);
+	if (IS_ERR(priv->regmap[1])) {
+		ret = PTR_ERR(priv->regmap[1]);
+		goto err_client_1_i2c_unregister_device;
+	}
 
 	if (gpio_chip_base) {
 #ifdef CONFIG_GPIOLIB
-		/* add GPIOs */
+		/* Add GPIOs */
 		priv->gpio_chip.label = KBUILD_MODNAME;
-		priv->gpio_chip.parent = &priv->i2c->dev;
+		priv->gpio_chip.parent = &client->dev;
 		priv->gpio_chip.owner = THIS_MODULE;
-		priv->gpio_chip.direction_output =
-				cxd2820r_gpio_direction_output;
+		priv->gpio_chip.direction_output = cxd2820r_gpio_direction_output;
 		priv->gpio_chip.set = cxd2820r_gpio_set;
 		priv->gpio_chip.get = cxd2820r_gpio_get;
-		priv->gpio_chip.base = -1; /* dynamic allocation */
+		priv->gpio_chip.base = -1; /* Dynamic allocation */
 		priv->gpio_chip.ngpio = GPIO_COUNT;
 		priv->gpio_chip.can_sleep = 1;
 		ret = gpiochip_add_data(&priv->gpio_chip, priv);
 		if (ret)
-			goto error;
+			goto err_regmap_1_regmap_exit;
 
-		dev_dbg(&priv->i2c->dev, "%s: gpio_chip.base=%d\n", __func__,
-				priv->gpio_chip.base);
+		dev_dbg(&client->dev, "gpio_chip.base=%d\n",
+			priv->gpio_chip.base);
 
 		*gpio_chip_base = priv->gpio_chip.base;
 #else
@@ -748,17 +689,73 @@
 		gpio[2] = 0;
 		ret = cxd2820r_gpio(&priv->fe, gpio);
 		if (ret)
-			goto error;
+			goto err_regmap_1_regmap_exit;
 #endif
 	}
 
-	return &priv->fe;
-error:
-	dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+	/* Create dvb frontend */
+	memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(priv->fe.ops));
+	if (!pdata->attach_in_use)
+		priv->fe.ops.release = NULL;
+	priv->fe.demodulator_priv = priv;
+	i2c_set_clientdata(client, priv);
+
+	/* Setup callbacks */
+	pdata->get_dvb_frontend = cxd2820r_get_dvb_frontend;
+
+	dev_info(&client->dev, "Sony CXD2820R successfully identified\n");
+
+	return 0;
+err_regmap_1_regmap_exit:
+	regmap_exit(priv->regmap[1]);
+err_client_1_i2c_unregister_device:
+	i2c_unregister_device(priv->client[1]);
+err_regmap_0_regmap_exit:
+	regmap_exit(priv->regmap[0]);
+err_kfree:
 	kfree(priv);
-	return NULL;
+err:
+	dev_dbg(&client->dev, "failed=%d\n", ret);
+	return ret;
 }
-EXPORT_SYMBOL(cxd2820r_attach);
+
+static int cxd2820r_remove(struct i2c_client *client)
+{
+	struct cxd2820r_priv *priv = i2c_get_clientdata(client);
+
+	dev_dbg(&client->dev, "\n");
+
+#ifdef CONFIG_GPIOLIB
+	if (priv->gpio_chip.label)
+		gpiochip_remove(&priv->gpio_chip);
+#endif
+	regmap_exit(priv->regmap[1]);
+	i2c_unregister_device(priv->client[1]);
+
+	regmap_exit(priv->regmap[0]);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static const struct i2c_device_id cxd2820r_id_table[] = {
+	{"cxd2820r", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, cxd2820r_id_table);
+
+static struct i2c_driver cxd2820r_driver = {
+	.driver = {
+		.name                = "cxd2820r",
+		.suppress_bind_attrs = true,
+	},
+	.probe    = cxd2820r_probe,
+	.remove   = cxd2820r_remove,
+	.id_table = cxd2820r_id_table,
+};
+
+module_i2c_driver(cxd2820r_driver);
 
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("Sony CXD2820R demodulator driver");
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index e31c48e..0d09620 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -27,6 +27,8 @@
 #include "dvb_math.h"
 #include "cxd2820r.h"
 #include <linux/gpio.h>
+#include <linux/math64.h>
+#include <linux/regmap.h>
 
 struct reg_val_mask {
 	u32 reg;
@@ -34,14 +36,23 @@
 	u8  mask;
 };
 
+#define CXD2820R_CLK 41000000
+
 struct cxd2820r_priv {
+	struct i2c_client *client[2];
+	struct regmap *regmap[2];
 	struct i2c_adapter *i2c;
 	struct dvb_frontend fe;
-	struct cxd2820r_config cfg;
+	u8 ts_mode;
+	bool ts_clk_inv;
+	bool if_agc_polarity;
+	bool spec_inv;
+
+	u64 post_bit_error_prev_dvbv3;
+	u64 post_bit_error;
 
 	bool ber_running;
 
-	u8 bank[2];
 #define GPIO_COUNT 3
 	u8 gpio[GPIO_COUNT];
 #ifdef CONFIG_GPIOLIB
@@ -58,6 +69,9 @@
 
 int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio);
 
+int cxd2820r_wr_reg_val_mask_tab(struct cxd2820r_priv *priv,
+				 const struct reg_val_mask *tab, int tab_len);
+
 int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
 	u8 mask);
 
@@ -83,14 +97,6 @@
 
 int cxd2820r_read_status_c(struct dvb_frontend *fe, enum fe_status *status);
 
-int cxd2820r_read_ber_c(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_c(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_c(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_c(struct dvb_frontend *fe, u32 *ucblocks);
-
 int cxd2820r_init_c(struct dvb_frontend *fe);
 
 int cxd2820r_sleep_c(struct dvb_frontend *fe);
@@ -107,14 +113,6 @@
 
 int cxd2820r_read_status_t(struct dvb_frontend *fe, enum fe_status *status);
 
-int cxd2820r_read_ber_t(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_t(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_t(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_t(struct dvb_frontend *fe, u32 *ucblocks);
-
 int cxd2820r_init_t(struct dvb_frontend *fe);
 
 int cxd2820r_sleep_t(struct dvb_frontend *fe);
@@ -131,14 +129,6 @@
 
 int cxd2820r_read_status_t2(struct dvb_frontend *fe, enum fe_status *status);
 
-int cxd2820r_read_ber_t2(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_t2(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_t2(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_t2(struct dvb_frontend *fe, u32 *ucblocks);
-
 int cxd2820r_init_t2(struct dvb_frontend *fe);
 
 int cxd2820r_sleep_t2(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 75ce7d8..c2e7caf 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -24,10 +24,11 @@
 int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-	int ret, i, bw_i;
-	u32 if_freq, if_ctl;
-	u64 num;
+	int ret, bw_i;
+	unsigned int utmp;
+	u32 if_frequency;
 	u8 buf[3], bw_param;
 	u8 bw_params1[][5] = {
 		{ 0x17, 0xea, 0xaa, 0xaa, 0xaa }, /* 6 MHz */
@@ -45,9 +46,9 @@
 		{ 0x00085, 0x07, 0xff },
 		{ 0x00088, 0x01, 0xff },
 
-		{ 0x00070, priv->cfg.ts_mode, 0xff },
-		{ 0x00071, !priv->cfg.ts_clock_inv << 4, 0x10 },
-		{ 0x000cb, priv->cfg.if_agc_polarity << 6, 0x40 },
+		{ 0x00070, priv->ts_mode, 0xff },
+		{ 0x00071, !priv->ts_clk_inv << 4, 0x10 },
+		{ 0x000cb, priv->if_agc_polarity << 6, 0x40 },
 		{ 0x000a5, 0x00, 0x01 },
 		{ 0x00082, 0x20, 0x60 },
 		{ 0x000c2, 0xc3, 0xff },
@@ -55,8 +56,10 @@
 		{ 0x00427, 0x41, 0xff },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n", __func__,
-			c->frequency, c->bandwidth_hz);
+	dev_dbg(&client->dev,
+		"delivery_system=%d modulation=%d frequency=%u bandwidth_hz=%u inversion=%d\n",
+		c->delivery_system, c->modulation, c->frequency,
+		c->bandwidth_hz, c->inversion);
 
 	switch (c->bandwidth_hz) {
 	case 6000000:
@@ -80,12 +83,9 @@
 		fe->ops.tuner_ops.set_params(fe);
 
 	if (priv->delivery_system != SYS_DVBT) {
-		for (i = 0; i < ARRAY_SIZE(tab); i++) {
-			ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
-				tab[i].val, tab[i].mask);
-			if (ret)
-				goto error;
-		}
+		ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+		if (ret)
+			goto error;
 	}
 
 	priv->delivery_system = SYS_DVBT;
@@ -93,48 +93,46 @@
 
 	/* program IF frequency */
 	if (fe->ops.tuner_ops.get_if_frequency) {
-		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
 		if (ret)
 			goto error;
-	} else
-		if_freq = 0;
+		dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+	} else {
+		ret = -EINVAL;
+		goto error;
+	}
 
-	dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
-
-	num = if_freq / 1000; /* Hz => kHz */
-	num *= 0x1000000;
-	if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
-	buf[0] = ((if_ctl >> 16) & 0xff);
-	buf[1] = ((if_ctl >>  8) & 0xff);
-	buf[2] = ((if_ctl >>  0) & 0xff);
-
-	ret = cxd2820r_wr_regs(priv, 0x000b6, buf, 3);
+	utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x1000000, CXD2820R_CLK);
+	buf[0] = (utmp >> 16) & 0xff;
+	buf[1] = (utmp >>  8) & 0xff;
+	buf[2] = (utmp >>  0) & 0xff;
+	ret = regmap_bulk_write(priv->regmap[0], 0x00b6, buf, 3);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_regs(priv, 0x0009f, bw_params1[bw_i], 5);
+	ret = regmap_bulk_write(priv->regmap[0], 0x009f, bw_params1[bw_i], 5);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg_mask(priv, 0x000d7, bw_param << 6, 0xc0);
+	ret = regmap_update_bits(priv->regmap[0], 0x00d7, 0xc0, bw_param << 6);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_regs(priv, 0x000d9, bw_params2[bw_i], 2);
+	ret = regmap_bulk_write(priv->regmap[0], 0x00d9, bw_params2[bw_i], 2);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
+	ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+	ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
@@ -142,10 +140,14 @@
 			    struct dtv_frontend_properties *c)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	int ret;
+	unsigned int utmp;
 	u8 buf[2];
 
-	ret = cxd2820r_rd_regs(priv, 0x0002f, buf, sizeof(buf));
+	dev_dbg(&client->dev, "\n");
+
+	ret = regmap_bulk_read(priv->regmap[0], 0x002f, buf, sizeof(buf));
 	if (ret)
 		goto error;
 
@@ -236,11 +238,11 @@
 		break;
 	}
 
-	ret = cxd2820r_rd_reg(priv, 0x007c6, &buf[0]);
+	ret = regmap_read(priv->regmap[0], 0x07c6, &utmp);
 	if (ret)
 		goto error;
 
-	switch ((buf[0] >> 0) & 0x01) {
+	switch ((utmp >> 0) & 0x01) {
 	case 0:
 		c->inversion = INVERSION_OFF;
 		break;
@@ -251,169 +253,158 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
-int cxd2820r_read_ber_t(struct dvb_frontend *fe, u32 *ber)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[3], start_ber = 0;
-	*ber = 0;
-
-	if (priv->ber_running) {
-		ret = cxd2820r_rd_regs(priv, 0x00076, buf, sizeof(buf));
-		if (ret)
-			goto error;
-
-		if ((buf[2] >> 7) & 0x01 || (buf[2] >> 4) & 0x01) {
-			*ber = (buf[2] & 0x0f) << 16 | buf[1] << 8 | buf[0];
-			start_ber = 1;
-		}
-	} else {
-		priv->ber_running = true;
-		start_ber = 1;
-	}
-
-	if (start_ber) {
-		/* (re)start BER */
-		ret = cxd2820r_wr_reg(priv, 0x00079, 0x01);
-		if (ret)
-			goto error;
-	}
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_signal_strength_t(struct dvb_frontend *fe,
-	u16 *strength)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
-	u16 tmp;
-
-	ret = cxd2820r_rd_regs(priv, 0x00026, buf, sizeof(buf));
-	if (ret)
-		goto error;
-
-	tmp = (buf[0] & 0x0f) << 8 | buf[1];
-	tmp = ~tmp & 0x0fff;
-
-	/* scale value to 0x0000-0xffff from 0x0000-0x0fff */
-	*strength = tmp * 0xffff / 0x0fff;
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_snr_t(struct dvb_frontend *fe, u16 *snr)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
-	u16 tmp;
-	/* report SNR in dB * 10 */
-
-	ret = cxd2820r_rd_regs(priv, 0x00028, buf, sizeof(buf));
-	if (ret)
-		goto error;
-
-	tmp = (buf[0] & 0x1f) << 8 | buf[1];
-	#define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
-	if (tmp)
-		*snr = (intlog10(tmp) - CXD2820R_LOG10_8_24) / ((1 << 24)
-			/ 100);
-	else
-		*snr = 0;
-
-	dev_dbg(&priv->i2c->dev, "%s: dBx10=%d val=%04x\n", __func__, *snr,
-			tmp);
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_ucblocks_t(struct dvb_frontend *fe, u32 *ucblocks)
-{
-	*ucblocks = 0;
-	/* no way to read ? */
-	return 0;
-}
-
 int cxd2820r_read_status_t(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret;
-	u8 buf[4];
-	*status = 0;
+	unsigned int utmp, utmp1, utmp2;
+	u8 buf[3];
 
-	ret = cxd2820r_rd_reg(priv, 0x00010, &buf[0]);
+	/* Lock detection */
+	ret = regmap_bulk_read(priv->regmap[0], 0x0010, &buf[0], 1);
+	if (ret)
+		goto error;
+	ret = regmap_bulk_read(priv->regmap[0], 0x0073, &buf[1], 1);
 	if (ret)
 		goto error;
 
-	if ((buf[0] & 0x07) == 6) {
-		ret = cxd2820r_rd_reg(priv, 0x00073, &buf[1]);
-		if (ret)
-			goto error;
+	utmp1 = (buf[0] >> 0) & 0x07;
+	utmp2 = (buf[1] >> 3) & 0x01;
 
-		if (((buf[1] >> 3) & 0x01) == 1) {
-			*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-				FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
-		} else {
-			*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-				FE_HAS_VITERBI | FE_HAS_SYNC;
-		}
+	if (utmp1 == 6 && utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+	} else if (utmp1 == 6 || utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC;
 	} else {
-		ret = cxd2820r_rd_reg(priv, 0x00014, &buf[2]);
+		*status = 0;
+	}
+
+	dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+		*status, 2, buf, utmp1, utmp2);
+
+	/* Signal strength */
+	if (*status & FE_HAS_SIGNAL) {
+		unsigned int strength;
+
+		ret = regmap_bulk_read(priv->regmap[0], 0x0026, buf, 2);
 		if (ret)
 			goto error;
 
-		if ((buf[2] & 0x0f) >= 4) {
-			ret = cxd2820r_rd_reg(priv, 0x00a14, &buf[3]);
+		utmp = buf[0] << 8 | buf[1] << 0;
+		utmp = ~utmp & 0x0fff;
+		/* Scale value to 0x0000-0xffff */
+		strength = utmp << 4 | utmp >> 8;
+
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+		c->strength.stat[0].uvalue = strength;
+	} else {
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* CNR */
+	if (*status & FE_HAS_VITERBI) {
+		unsigned int cnr;
+
+		ret = regmap_bulk_read(priv->regmap[0], 0x002c, buf, 2);
+		if (ret)
+			goto error;
+
+		utmp = buf[0] << 8 | buf[1] << 0;
+		if (utmp)
+			cnr = div_u64((u64)(intlog10(utmp)
+				      - intlog10(32000 - utmp) + 55532585)
+				      * 10000, (1 << 24));
+		else
+			cnr = 0;
+
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+		c->cnr.stat[0].svalue = cnr;
+	} else {
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* BER */
+	if (*status & FE_HAS_SYNC) {
+		unsigned int post_bit_error;
+		bool start_ber;
+
+		if (priv->ber_running) {
+			ret = regmap_bulk_read(priv->regmap[0], 0x0076, buf, 3);
 			if (ret)
 				goto error;
 
-			if (((buf[3] >> 4) & 0x01) == 1)
-				*status |= FE_HAS_SIGNAL;
+			if ((buf[2] >> 7) & 0x01) {
+				post_bit_error = buf[2] << 16 | buf[1] << 8 |
+						 buf[0] << 0;
+				post_bit_error &= 0x0fffff;
+				start_ber = true;
+			} else {
+				post_bit_error = 0;
+				start_ber = false;
+			}
+		} else {
+			post_bit_error = 0;
+			start_ber = true;
 		}
-	}
 
-	dev_dbg(&priv->i2c->dev, "%s: lock=%*ph\n", __func__, 4, buf);
+		if (start_ber) {
+			ret = regmap_write(priv->regmap[0], 0x0079, 0x01);
+			if (ret)
+				goto error;
+			priv->ber_running = true;
+		}
+
+		priv->post_bit_error += post_bit_error;
+
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+	} else {
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 int cxd2820r_init_t(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	int ret;
 
-	ret = cxd2820r_wr_reg(priv, 0x00085, 0x07);
+	dev_dbg(&client->dev, "\n");
+
+	ret = regmap_write(priv->regmap[0], 0x0085, 0x07);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 int cxd2820r_sleep_t(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret, i;
+	struct i2c_client *client = priv->client[0];
+	int ret;
 	struct reg_val_mask tab[] = {
 		{ 0x000ff, 0x1f, 0xff },
 		{ 0x00085, 0x00, 0xff },
@@ -422,20 +413,17 @@
 		{ 0x00080, 0x00, 0xff },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "\n");
 
 	priv->delivery_system = SYS_UNDEFINED;
 
-	for (i = 0; i < ARRAY_SIZE(tab); i++) {
-		ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
-			tab[i].mask);
-		if (ret)
-			goto error;
-	}
+	ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+	if (ret)
+		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index 704475676..e641fde 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -23,11 +23,12 @@
 
 int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
 {
-	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret, i, bw_i;
-	u32 if_freq, if_ctl;
-	u64 num;
+	struct i2c_client *client = priv->client[0];
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret, bw_i;
+	unsigned int utmp;
+	u32 if_frequency;
 	u8 buf[3], bw_param;
 	u8 bw_params1[][5] = {
 		{ 0x1c, 0xb3, 0x33, 0x33, 0x33 }, /* 5 MHz */
@@ -45,10 +46,10 @@
 		{ 0x0207f, 0x2a, 0xff },
 		{ 0x02082, 0x0a, 0xff },
 		{ 0x02083, 0x0a, 0xff },
-		{ 0x020cb, priv->cfg.if_agc_polarity << 6, 0x40 },
-		{ 0x02070, priv->cfg.ts_mode, 0xff },
-		{ 0x02071, !priv->cfg.ts_clock_inv << 6, 0x40 },
-		{ 0x020b5, priv->cfg.spec_inv << 4, 0x10 },
+		{ 0x020cb, priv->if_agc_polarity << 6, 0x40 },
+		{ 0x02070, priv->ts_mode, 0xff },
+		{ 0x02071, !priv->ts_clk_inv << 6, 0x40 },
+		{ 0x020b5, priv->spec_inv << 4, 0x10 },
 		{ 0x02567, 0x07, 0x0f },
 		{ 0x02569, 0x03, 0x03 },
 		{ 0x02595, 0x1a, 0xff },
@@ -69,8 +70,10 @@
 		{ 0x027ef, 0x10, 0x18 },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n", __func__,
-			c->frequency, c->bandwidth_hz);
+	dev_dbg(&client->dev,
+		"delivery_system=%d modulation=%d frequency=%u bandwidth_hz=%u inversion=%d stream_id=%u\n",
+		c->delivery_system, c->modulation, c->frequency,
+		c->bandwidth_hz, c->inversion, c->stream_id);
 
 	switch (c->bandwidth_hz) {
 	case 5000000:
@@ -98,73 +101,67 @@
 		fe->ops.tuner_ops.set_params(fe);
 
 	if (priv->delivery_system != SYS_DVBT2) {
-		for (i = 0; i < ARRAY_SIZE(tab); i++) {
-			ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
-				tab[i].val, tab[i].mask);
-			if (ret)
-				goto error;
-		}
+		ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+		if (ret)
+			goto error;
 	}
 
 	priv->delivery_system = SYS_DVBT2;
 
 	/* program IF frequency */
 	if (fe->ops.tuner_ops.get_if_frequency) {
-		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+		ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
 		if (ret)
 			goto error;
-	} else
-		if_freq = 0;
+		dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+	} else {
+		ret = -EINVAL;
+		goto error;
+	}
 
-	dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
-
-	num = if_freq / 1000; /* Hz => kHz */
-	num *= 0x1000000;
-	if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
-	buf[0] = ((if_ctl >> 16) & 0xff);
-	buf[1] = ((if_ctl >>  8) & 0xff);
-	buf[2] = ((if_ctl >>  0) & 0xff);
+	utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x1000000, CXD2820R_CLK);
+	buf[0] = (utmp >> 16) & 0xff;
+	buf[1] = (utmp >>  8) & 0xff;
+	buf[2] = (utmp >>  0) & 0xff;
+	ret = regmap_bulk_write(priv->regmap[0], 0x20b6, buf, 3);
+	if (ret)
+		goto error;
 
 	/* PLP filtering */
 	if (c->stream_id > 255) {
-		dev_dbg(&priv->i2c->dev, "%s: Disable PLP filtering\n", __func__);
-		ret = cxd2820r_wr_reg(priv, 0x023ad , 0);
+		dev_dbg(&client->dev, "disable PLP filtering\n");
+		ret = regmap_write(priv->regmap[0], 0x23ad, 0x00);
 		if (ret)
 			goto error;
 	} else {
-		dev_dbg(&priv->i2c->dev, "%s: Enable PLP filtering = %d\n", __func__,
-				c->stream_id);
-		ret = cxd2820r_wr_reg(priv, 0x023af , c->stream_id & 0xFF);
+		dev_dbg(&client->dev, "enable PLP filtering\n");
+		ret = regmap_write(priv->regmap[0], 0x23af, c->stream_id & 0xff);
 		if (ret)
 			goto error;
-		ret = cxd2820r_wr_reg(priv, 0x023ad , 1);
+		ret = regmap_write(priv->regmap[0], 0x23ad, 0x01);
 		if (ret)
 			goto error;
 	}
 
-	ret = cxd2820r_wr_regs(priv, 0x020b6, buf, 3);
+	ret = regmap_bulk_write(priv->regmap[0], 0x209f, bw_params1[bw_i], 5);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_regs(priv, 0x0209f, bw_params1[bw_i], 5);
+	ret = regmap_update_bits(priv->regmap[0], 0x20d7, 0xc0, bw_param << 6);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg_mask(priv, 0x020d7, bw_param << 6, 0xc0);
+	ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
 	if (ret)
 		goto error;
 
-	ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
-	if (ret)
-		goto error;
-
-	ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+	ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
 	if (ret)
 		goto error;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 
 }
@@ -173,10 +170,14 @@
 			     struct dtv_frontend_properties *c)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct i2c_client *client = priv->client[0];
 	int ret;
+	unsigned int utmp;
 	u8 buf[2];
 
-	ret = cxd2820r_rd_regs(priv, 0x0205c, buf, 2);
+	dev_dbg(&client->dev, "\n");
+
+	ret = regmap_bulk_read(priv->regmap[0], 0x205c, buf, 2);
 	if (ret)
 		goto error;
 
@@ -225,7 +226,7 @@
 		break;
 	}
 
-	ret = cxd2820r_rd_regs(priv, 0x0225b, buf, 2);
+	ret = regmap_bulk_read(priv->regmap[0], 0x225b, buf, 2);
 	if (ret)
 		goto error;
 
@@ -265,11 +266,11 @@
 		break;
 	}
 
-	ret = cxd2820r_rd_reg(priv, 0x020b5, &buf[0]);
+	ret = regmap_read(priv->regmap[0], 0x20b5, &utmp);
 	if (ret)
 		goto error;
 
-	switch ((buf[0] >> 4) & 0x01) {
+	switch ((utmp >> 4) & 0x01) {
 	case 0:
 		c->inversion = INVERSION_OFF;
 		break;
@@ -280,130 +281,124 @@
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
 int cxd2820r_read_status_t2(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	struct i2c_client *client = priv->client[0];
 	int ret;
-	u8 buf[1];
-	*status = 0;
-
-	ret = cxd2820r_rd_reg(priv, 0x02010 , &buf[0]);
-	if (ret)
-		goto error;
-
-	if ((buf[0] & 0x07) == 6) {
-		if (((buf[0] >> 5) & 0x01) == 1) {
-			*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-				FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
-		} else {
-			*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
-				FE_HAS_VITERBI | FE_HAS_SYNC;
-		}
-	}
-
-	dev_dbg(&priv->i2c->dev, "%s: lock=%02x\n", __func__, buf[0]);
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_ber_t2(struct dvb_frontend *fe, u32 *ber)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
+	unsigned int utmp, utmp1, utmp2;
 	u8 buf[4];
-	unsigned int errbits;
-	*ber = 0;
-	/* FIXME: correct calculation */
 
-	ret = cxd2820r_rd_regs(priv, 0x02039, buf, sizeof(buf));
+	/* Lock detection */
+	ret = regmap_bulk_read(priv->regmap[0], 0x2010, &buf[0], 1);
 	if (ret)
 		goto error;
 
-	if ((buf[0] >> 4) & 0x01) {
-		errbits = (buf[0] & 0x0f) << 24 | buf[1] << 16 |
-			buf[2] << 8 | buf[3];
+	utmp1 = (buf[0] >> 0) & 0x07;
+	utmp2 = (buf[0] >> 5) & 0x01;
 
-		if (errbits)
-			*ber = errbits * 64 / 16588800;
+	if (utmp1 == 6 && utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+	} else if (utmp1 == 6 || utmp2 == 1) {
+		*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+			  FE_HAS_VITERBI | FE_HAS_SYNC;
+	} else {
+		*status = 0;
+	}
+
+	dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+		*status, 1, buf, utmp1, utmp2);
+
+	/* Signal strength */
+	if (*status & FE_HAS_SIGNAL) {
+		unsigned int strength;
+
+		ret = regmap_bulk_read(priv->regmap[0], 0x2026, buf, 2);
+		if (ret)
+			goto error;
+
+		utmp = buf[0] << 8 | buf[1] << 0;
+		utmp = ~utmp & 0x0fff;
+		/* Scale value to 0x0000-0xffff */
+		strength = utmp << 4 | utmp >> 8;
+
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+		c->strength.stat[0].uvalue = strength;
+	} else {
+		c->strength.len = 1;
+		c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* CNR */
+	if (*status & FE_HAS_VITERBI) {
+		unsigned int cnr;
+
+		ret = regmap_bulk_read(priv->regmap[0], 0x2028, buf, 2);
+		if (ret)
+			goto error;
+
+		utmp = buf[0] << 8 | buf[1] << 0;
+		utmp = utmp & 0x0fff;
+		#define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
+		if (utmp)
+			cnr = div_u64((u64)(intlog10(utmp)
+				      - CXD2820R_LOG10_8_24) * 10000,
+				      (1 << 24));
+		else
+			cnr = 0;
+
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+		c->cnr.stat[0].svalue = cnr;
+	} else {
+		c->cnr.len = 1;
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* BER */
+	if (*status & FE_HAS_SYNC) {
+		unsigned int post_bit_error;
+
+		ret = regmap_bulk_read(priv->regmap[0], 0x2039, buf, 4);
+		if (ret)
+			goto error;
+
+		if ((buf[0] >> 4) & 0x01) {
+			post_bit_error = buf[0] << 24 | buf[1] << 16 |
+					 buf[2] << 8 | buf[3] << 0;
+			post_bit_error &= 0x0fffffff;
+		} else {
+			post_bit_error = 0;
+		}
+
+		priv->post_bit_error += post_bit_error;
+
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+	} else {
+		c->post_bit_error.len = 1;
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
 	}
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
-int cxd2820r_read_signal_strength_t2(struct dvb_frontend *fe,
-	u16 *strength)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
-	u16 tmp;
-
-	ret = cxd2820r_rd_regs(priv, 0x02026, buf, sizeof(buf));
-	if (ret)
-		goto error;
-
-	tmp = (buf[0] & 0x0f) << 8 | buf[1];
-	tmp = ~tmp & 0x0fff;
-
-	/* scale value to 0x0000-0xffff from 0x0000-0x0fff */
-	*strength = tmp * 0xffff / 0x0fff;
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_snr_t2(struct dvb_frontend *fe, u16 *snr)
-{
-	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret;
-	u8 buf[2];
-	u16 tmp;
-	/* report SNR in dB * 10 */
-
-	ret = cxd2820r_rd_regs(priv, 0x02028, buf, sizeof(buf));
-	if (ret)
-		goto error;
-
-	tmp = (buf[0] & 0x0f) << 8 | buf[1];
-	#define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
-	if (tmp)
-		*snr = (intlog10(tmp) - CXD2820R_LOG10_8_24) / ((1 << 24)
-			/ 100);
-	else
-		*snr = 0;
-
-	dev_dbg(&priv->i2c->dev, "%s: dBx10=%d val=%04x\n", __func__, *snr,
-			tmp);
-
-	return ret;
-error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
-	return ret;
-}
-
-int cxd2820r_read_ucblocks_t2(struct dvb_frontend *fe, u32 *ucblocks)
-{
-	*ucblocks = 0;
-	/* no way to read ? */
-	return 0;
-}
-
 int cxd2820r_sleep_t2(struct dvb_frontend *fe)
 {
 	struct cxd2820r_priv *priv = fe->demodulator_priv;
-	int ret, i;
+	struct i2c_client *client = priv->client[0];
+	int ret;
 	struct reg_val_mask tab[] = {
 		{ 0x000ff, 0x1f, 0xff },
 		{ 0x00085, 0x00, 0xff },
@@ -413,20 +408,17 @@
 		{ 0x00080, 0x00, 0xff },
 	};
 
-	dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "\n");
 
-	for (i = 0; i < ARRAY_SIZE(tab); i++) {
-		ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
-			tab[i].mask);
-		if (ret)
-			goto error;
-	}
+	ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+	if (ret)
+		goto error;
 
 	priv->delivery_system = SYS_UNDEFINED;
 
 	return ret;
 error:
-	dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index ffe88bc..5afb9c5 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -206,6 +206,9 @@
 		(u32)(((iffreq)/48.0)*16777216.0 + 0.5) : \
 		(u32)(((iffreq)/41.0)*16777216.0 + 0.5))
 
+static int cxd2841er_freeze_regs(struct cxd2841er_priv *priv);
+static int cxd2841er_unfreeze_regs(struct cxd2841er_priv *priv);
+
 static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
 				u8 addr, u8 reg, u8 write,
 				const u8 *data, u32 len)
@@ -1401,6 +1404,41 @@
 	return 0;
 }
 
+static int cxd2841er_read_ber_i(struct cxd2841er_priv *priv,
+		u32 *bit_error, u32 *bit_count)
+{
+	u8 data[3];
+	u8 pktnum[2];
+
+	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	if (priv->state != STATE_ACTIVE_TC) {
+		dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+				__func__, priv->state);
+		return -EINVAL;
+	}
+
+	cxd2841er_freeze_regs(priv);
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x5B, pktnum, sizeof(pktnum));
+	cxd2841er_read_regs(priv, I2C_SLVT, 0x16, data, sizeof(data));
+
+	if (!pktnum[0] && !pktnum[1]) {
+		dev_dbg(&priv->i2c->dev,
+				"%s(): no valid BER data\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
+		return -EINVAL;
+	}
+
+	*bit_error = ((u32)(data[0] & 0x7F) << 16) |
+		((u32)data[1] << 8) | data[2];
+	*bit_count = ((((u32)pktnum[0] << 8) | pktnum[1]) * 204 * 8);
+	dev_dbg(&priv->i2c->dev, "%s(): bit_error=%u bit_count=%u\n",
+			__func__, *bit_error, *bit_count);
+
+	cxd2841er_unfreeze_regs(priv);
+	return 0;
+}
+
 static int cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv,
 				    u32 *bit_error, u32 *bit_count)
 {
@@ -1570,6 +1608,25 @@
 	return 0;
 }
 
+static int cxd2841er_freeze_regs(struct cxd2841er_priv *priv)
+{
+	/*
+	 * Freeze registers: ensure multiple separate register reads
+	 * are from the same snapshot
+	 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
+	return 0;
+}
+
+static int cxd2841er_unfreeze_regs(struct cxd2841er_priv *priv)
+{
+	/*
+	 * un-freeze registers
+	 */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x00);
+	return 0;
+}
+
 static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
 		u8 delsys, u32 *snr)
 {
@@ -1578,6 +1635,7 @@
 	int min_index, max_index, index;
 	static const struct cxd2841er_cnr_data *cn_data;
 
+	cxd2841er_freeze_regs(priv);
 	/* Set SLV-T Bank : 0xA1 */
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa1);
 	/*
@@ -1629,9 +1687,11 @@
 	} else {
 		dev_dbg(&priv->i2c->dev,
 			"%s(): no data available\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
 		return -EINVAL;
 	}
 done:
+	cxd2841er_unfreeze_regs(priv);
 	*snr = res;
 	return 0;
 }
@@ -1655,12 +1715,7 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Freeze registers: ensure multiple separate register reads
-	 * are from the same snapshot
-	 */
-	cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
-
+	cxd2841er_freeze_regs(priv);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
 	cxd2841er_read_regs(priv, I2C_SLVT, 0x19, data, 1);
 	qam = (enum sony_dvbc_constellation_t) (data[0] & 0x07);
@@ -1670,6 +1725,7 @@
 	if (reg == 0) {
 		dev_dbg(&priv->i2c->dev,
 				"%s(): reg value out of range\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
 		return 0;
 	}
 
@@ -1690,9 +1746,11 @@
 		*snr = -88 * (int32_t)sony_log(reg) + 86999;
 		break;
 	default:
+		cxd2841er_unfreeze_regs(priv);
 		return -EINVAL;
 	}
 
+	cxd2841er_unfreeze_regs(priv);
 	return 0;
 }
 
@@ -1707,17 +1765,21 @@
 			"%s(): invalid state %d\n", __func__, priv->state);
 		return -EINVAL;
 	}
+
+	cxd2841er_freeze_regs(priv);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
 	cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
 	reg = ((u32)data[0] << 8) | (u32)data[1];
 	if (reg == 0) {
 		dev_dbg(&priv->i2c->dev,
 			"%s(): reg value out of range\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
 		return 0;
 	}
 	if (reg > 4996)
 		reg = 4996;
 	*snr = 10000 * ((intlog10(reg) - intlog10(5350 - reg)) >> 24) + 28500;
+	cxd2841er_unfreeze_regs(priv);
 	return 0;
 }
 
@@ -1732,18 +1794,22 @@
 			"%s(): invalid state %d\n", __func__, priv->state);
 		return -EINVAL;
 	}
+
+	cxd2841er_freeze_regs(priv);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
 	cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
 	reg = ((u32)data[0] << 8) | (u32)data[1];
 	if (reg == 0) {
 		dev_dbg(&priv->i2c->dev,
 			"%s(): reg value out of range\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
 		return 0;
 	}
 	if (reg > 10876)
 		reg = 10876;
 	*snr = 10000 * ((intlog10(reg) -
 		intlog10(12600 - reg)) >> 24) + 32000;
+	cxd2841er_unfreeze_regs(priv);
 	return 0;
 }
 
@@ -1760,21 +1826,18 @@
 		return -EINVAL;
 	}
 
-	/* Freeze all registers */
-	cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
-
-
+	cxd2841er_freeze_regs(priv);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
 	cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
 	reg = ((u32)data[0] << 8) | (u32)data[1];
 	if (reg == 0) {
 		dev_dbg(&priv->i2c->dev,
 				"%s(): reg value out of range\n", __func__);
+		cxd2841er_unfreeze_regs(priv);
 		return 0;
 	}
-	if (reg > 4996)
-		reg = 4996;
-	*snr = 100 * intlog10(reg) - 9031;
+	*snr = 10000 * (intlog10(reg) >> 24) - 9031;
+	cxd2841er_unfreeze_regs(priv);
 	return 0;
 }
 
@@ -1852,6 +1915,9 @@
 	case SYS_DVBC_ANNEX_C:
 		ret = cxd2841er_read_ber_c(priv, &bit_error, &bit_count);
 		break;
+	case SYS_ISDBT:
+		ret = cxd2841er_read_ber_i(priv, &bit_error, &bit_count);
+		break;
 	case SYS_DVBS:
 		ret = cxd2841er_mon_read_ber_s(priv, &bit_error, &bit_count);
 		break;
@@ -1965,6 +2031,9 @@
 		return;
 	}
 
+	dev_dbg(&priv->i2c->dev, "%s(): snr=%d\n",
+			__func__, (int32_t)tmp);
+
 	if (!ret) {
 		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
 		p->cnr.stat[0].svalue = tmp;
@@ -1977,7 +2046,7 @@
 {
 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
 	struct cxd2841er_priv *priv = fe->demodulator_priv;
-	u32 ucblocks;
+	u32 ucblocks = 0;
 
 	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
 	switch (p->delivery_system) {
@@ -1999,7 +2068,7 @@
 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
 		return;
 	}
-	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+	dev_dbg(&priv->i2c->dev, "%s() ucblocks=%u\n", __func__, ucblocks);
 
 	p->block_error.stat[0].scale = FE_SCALE_COUNTER;
 	p->block_error.stat[0].uvalue = ucblocks;
@@ -2694,6 +2763,14 @@
 	u8 b10_b6[3];
 	u32 iffreq;
 
+	if (bandwidth != 6000000 &&
+			bandwidth != 7000000 &&
+			bandwidth != 8000000) {
+		dev_info(&priv->i2c->dev, "%s(): unsupported bandwidth %d. Forcing 8Mhz!\n",
+				__func__, bandwidth);
+		bandwidth = 8000000;
+	}
+
 	dev_dbg(&priv->i2c->dev, "%s() bw=%d\n", __func__, bandwidth);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
 	switch (bandwidth) {
@@ -3076,6 +3153,7 @@
 	/* Enable demod clock */
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
 	/* Disable RF level monitor */
+	cxd2841er_write_reg(priv, I2C_SLVT, 0x59, 0x00);
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
 	/* Enable ADC clock */
 	cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
index 62ad5f0..7f1acfb 100644
--- a/drivers/media/dvb-frontends/cxd2841er.h
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -22,7 +22,6 @@
 #ifndef CXD2841ER_H
 #define CXD2841ER_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 enum cxd2841er_xtal {
diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h
index b37e69e..67a6d50 100644
--- a/drivers/media/dvb-frontends/dib3000mc.h
+++ b/drivers/media/dvb-frontends/dib3000mc.h
@@ -13,8 +13,6 @@
 #ifndef DIB3000MC_H
 #define DIB3000MC_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib3000mc_config {
diff --git a/drivers/media/dvb-frontends/dib7000m.h b/drivers/media/dvb-frontends/dib7000m.h
index 6468c27..8f84dfa 100644
--- a/drivers/media/dvb-frontends/dib7000m.h
+++ b/drivers/media/dvb-frontends/dib7000m.h
@@ -1,8 +1,6 @@
 #ifndef DIB7000M_H
 #define DIB7000M_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib7000m_config {
diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
index baa2789..205fbbf 100644
--- a/drivers/media/dvb-frontends/dib7000p.h
+++ b/drivers/media/dvb-frontends/dib7000p.h
@@ -1,8 +1,6 @@
 #ifndef DIB7000P_H
 #define DIB7000P_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib7000p_config {
diff --git a/drivers/media/dvb-frontends/drxd.h b/drivers/media/dvb-frontends/drxd.h
index a47c22d..f0507cd 100644
--- a/drivers/media/dvb-frontends/drxd.h
+++ b/drivers/media/dvb-frontends/drxd.h
@@ -24,7 +24,6 @@
 #ifndef _DRXD_H_
 #define _DRXD_H_
 
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index 8f0b9ee..a629897 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -1,7 +1,6 @@
 #ifndef _DRXK_H_
 #define _DRXK_H_
 
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index b975da0..c595adc 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6448,7 +6448,7 @@
 			return status;
 
 		/* SCU c.o.c. */
-		read16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, &scu_coc);
+		status = read16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, &scu_coc);
 		if (status < 0)
 			return status;
 
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h
index 153169d..82e8c25 100644
--- a/drivers/media/dvb-frontends/ds3000.h
+++ b/drivers/media/dvb-frontends/ds3000.h
@@ -22,7 +22,6 @@
 #ifndef DS3000_H
 #define DS3000_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ds3000_config {
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 53089e1..735a966 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -739,7 +739,7 @@
 	return -EINVAL;
 }
 
-static struct dvb_tuner_ops dvb_pll_tuner_ops = {
+static const struct dvb_tuner_ops dvb_pll_tuner_ops = {
 	.release = dvb_pll_release,
 	.sleep = dvb_pll_sleep,
 	.init = dvb_pll_init,
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 15e4cea..50f1af5 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -22,7 +22,6 @@
 #ifndef DVB_DUMMY_FE_H
 #define DVB_DUMMY_FE_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/ec100.h b/drivers/media/dvb-frontends/ec100.h
index 9544bab..e894bdc 100644
--- a/drivers/media/dvb-frontends/ec100.h
+++ b/drivers/media/dvb-frontends/ec100.h
@@ -22,7 +22,6 @@
 #ifndef EC100_H
 #define EC100_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ec100_config {
diff --git a/drivers/media/dvb-frontends/hd29l2.h b/drivers/media/dvb-frontends/hd29l2.h
index 48e9ab7..a14d6f3 100644
--- a/drivers/media/dvb-frontends/hd29l2.h
+++ b/drivers/media/dvb-frontends/hd29l2.h
@@ -23,7 +23,6 @@
 #ifndef HD29L2_H
 #define HD29L2_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct hd29l2_config {
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index 97a8982..dc43c5f 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -842,7 +842,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops helene_tuner_ops = {
+static const struct dvb_tuner_ops helene_tuner_ops = {
 	.info = {
 		.name = "Sony HELENE Ter tuner",
 		.frequency_min = 1000000,
@@ -856,7 +856,7 @@
 	.get_frequency = helene_get_frequency,
 };
 
-static struct dvb_tuner_ops helene_tuner_ops_s = {
+static const struct dvb_tuner_ops helene_tuner_ops_s = {
 	.info = {
 		.name = "Sony HELENE Sat tuner",
 		.frequency_min = 500000,
@@ -987,8 +987,10 @@
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
 
-	if (helene_x_pon(priv) != 0)
+	if (helene_x_pon(priv) != 0) {
+		kfree(priv);
 		return NULL;
+	}
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 0);
@@ -1021,8 +1023,10 @@
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
 
-	if (helene_x_pon(priv) != 0)
+	if (helene_x_pon(priv) != 0) {
+		kfree(priv);
 		return NULL;
+	}
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 0);
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index e1b9224..3336154 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -21,7 +21,6 @@
 #ifndef __DVB_HELENE_H__
 #define __DVB_HELENE_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index a98bca5..0c089b5 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -326,7 +326,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops horus3a_tuner_ops = {
+static const struct dvb_tuner_ops horus3a_tuner_ops = {
 	.info = {
 		.name = "Sony Horus3a",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index c1e2d18..672a556 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -22,7 +22,6 @@
 #ifndef __DVB_HORUS3A_H__
 #define __DVB_HORUS3A_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 0e3387e..2826bbb 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -258,7 +258,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops ix2505v_tuner_ops = {
+static const struct dvb_tuner_ops ix2505v_tuner_ops = {
 	.info = {
 		.name = "Sharp IX2505V (B0017)",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index af107a2..5eab397 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -20,7 +20,6 @@
 #ifndef DVB_IX2505V_H
 #define DVB_IX2505V_H
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/lg2160.h b/drivers/media/dvb-frontends/lg2160.h
index d20bd90..8c74ddc 100644
--- a/drivers/media/dvb-frontends/lg2160.h
+++ b/drivers/media/dvb-frontends/lg2160.h
@@ -22,7 +22,6 @@
 #ifndef _LG2160_H_
 #define _LG2160_H_
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h
index f91a1b4..e7dceb6 100644
--- a/drivers/media/dvb-frontends/lgdt3305.h
+++ b/drivers/media/dvb-frontends/lgdt3305.h
@@ -22,7 +22,6 @@
 #ifndef _LGDT3305_H_
 #define _LGDT3305_H_
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 179c26e..0ca4e81 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -731,7 +731,7 @@
 
 	switch (if_freq_khz) {
 	default:
-		pr_warn("IF=%d KHz is not supportted, 3250 assumed\n",
+		pr_warn("IF=%d KHz is not supported, 3250 assumed\n",
 			if_freq_khz);
 		/* fallthrough */
 	case 3250: /* 3.25Mhz */
@@ -1737,24 +1737,16 @@
 static int lgdt3306a_search(struct dvb_frontend *fe)
 {
 	enum fe_status status = 0;
-	int i, ret;
+	int ret;
 
 	/* set frontend */
 	ret = lgdt3306a_set_parameters(fe);
 	if (ret)
 		goto error;
 
-	/* wait frontend lock */
-	for (i = 20; i > 0; i--) {
-		dbg_info(": loop=%d\n", i);
-		msleep(50);
-		ret = lgdt3306a_read_status(fe, &status);
-		if (ret)
-			goto error;
-
-		if (status & FE_HAS_LOCK)
-			break;
-	}
+	ret = lgdt3306a_read_status(fe, &status);
+	if (ret)
+		goto error;
 
 	/* check if we have a valid signal */
 	if (status & FE_HAS_LOCK)
diff --git a/drivers/media/dvb-frontends/lgs8gl5.h b/drivers/media/dvb-frontends/lgs8gl5.h
index a5b3faf..f36a7fd 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.h
+++ b/drivers/media/dvb-frontends/lgs8gl5.h
@@ -23,7 +23,6 @@
 #ifndef LGS8GL5_H
 #define LGS8GL5_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct lgs8gl5_config {
diff --git a/drivers/media/dvb-frontends/lgs8gxx.h b/drivers/media/dvb-frontends/lgs8gxx.h
index 368c992..7519c02 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.h
+++ b/drivers/media/dvb-frontends/lgs8gxx.h
@@ -26,7 +26,6 @@
 #ifndef __LGS8GXX_H__
 #define __LGS8GXX_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
diff --git a/drivers/media/dvb-frontends/lnbh24.h b/drivers/media/dvb-frontends/lnbh24.h
index a088b8e..24431df 100644
--- a/drivers/media/dvb-frontends/lnbh24.h
+++ b/drivers/media/dvb-frontends/lnbh24.h
@@ -23,8 +23,6 @@
 #ifndef _LNBH24_H
 #define _LNBH24_H
 
-#include <linux/kconfig.h>
-
 /* system register bits */
 #define LNBH24_OLF	0x01
 #define LNBH24_OTF	0x02
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
index 1f329ef..f13fd03 100644
--- a/drivers/media/dvb-frontends/lnbh25.h
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -22,7 +22,6 @@
 #define LNBH25_H
 
 #include <linux/i2c.h>
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h
index cd9101f..4bb6439 100644
--- a/drivers/media/dvb-frontends/lnbp21.h
+++ b/drivers/media/dvb-frontends/lnbp21.h
@@ -27,8 +27,6 @@
 #ifndef _LNBP21_H
 #define _LNBP21_H
 
-#include <linux/kconfig.h>
-
 /* system register bits */
 /* [RO] 0=OK; 1=over current limit flag */
 #define LNBP21_OLF	0x01
diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h
index 5d01d92..0cb7212 100644
--- a/drivers/media/dvb-frontends/lnbp22.h
+++ b/drivers/media/dvb-frontends/lnbp22.h
@@ -28,8 +28,6 @@
 #ifndef _LNBP22_H
 #define _LNBP22_H
 
-#include <linux/kconfig.h>
-
 /* Enable */
 #define LNBP22_EN	  0x10
 /* Voltage selection */
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index de74301..1a313b0 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -20,7 +20,6 @@
 #ifndef M88RS2000_H
 #define M88RS2000_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4132532..fe79358 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -71,25 +71,27 @@
 };
 
 static struct regdata mb86a20s_init2[] = {
-	{ 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
+	{ 0x50, 0xd1 }, { 0x51, 0x22 },
+	{ 0x39, 0x01 },
+	{ 0x71, 0x00 },
 	{ 0x3b, 0x21 },
-	{ 0x3c, 0x38 },
+	{ 0x3c, 0x3a },
 	{ 0x01, 0x0d },
-	{ 0x04, 0x08 }, { 0x05, 0x03 },
+	{ 0x04, 0x08 }, { 0x05, 0x05 },
 	{ 0x04, 0x0e }, { 0x05, 0x00 },
-	{ 0x04, 0x0f }, { 0x05, 0x37 },
-	{ 0x04, 0x0b }, { 0x05, 0x78 },
+	{ 0x04, 0x0f }, { 0x05, 0x14 },
+	{ 0x04, 0x0b }, { 0x05, 0x8c },
 	{ 0x04, 0x00 }, { 0x05, 0x00 },
-	{ 0x04, 0x01 }, { 0x05, 0x1e },
-	{ 0x04, 0x02 }, { 0x05, 0x07 },
-	{ 0x04, 0x03 }, { 0x05, 0xd0 },
+	{ 0x04, 0x01 }, { 0x05, 0x07 },
+	{ 0x04, 0x02 }, { 0x05, 0x0f },
+	{ 0x04, 0x03 }, { 0x05, 0xa0 },
 	{ 0x04, 0x09 }, { 0x05, 0x00 },
 	{ 0x04, 0x0a }, { 0x05, 0xff },
-	{ 0x04, 0x27 }, { 0x05, 0x00 },
+	{ 0x04, 0x27 }, { 0x05, 0x64 },
 	{ 0x04, 0x28 }, { 0x05, 0x00 },
-	{ 0x04, 0x1e }, { 0x05, 0x00 },
-	{ 0x04, 0x29 }, { 0x05, 0x64 },
-	{ 0x04, 0x32 }, { 0x05, 0x02 },
+	{ 0x04, 0x1e }, { 0x05, 0xff },
+	{ 0x04, 0x29 }, { 0x05, 0x0a },
+	{ 0x04, 0x32 }, { 0x05, 0x0a },
 	{ 0x04, 0x14 }, { 0x05, 0x02 },
 	{ 0x04, 0x04 }, { 0x05, 0x00 },
 	{ 0x04, 0x05 }, { 0x05, 0x22 },
@@ -97,8 +99,6 @@
 	{ 0x04, 0x07 }, { 0x05, 0xd8 },
 	{ 0x04, 0x12 }, { 0x05, 0x00 },
 	{ 0x04, 0x13 }, { 0x05, 0xff },
-	{ 0x04, 0x15 }, { 0x05, 0x4e },
-	{ 0x04, 0x16 }, { 0x05, 0x20 },
 
 	/*
 	 * On this demod, when the bit count reaches the count below,
@@ -152,42 +152,36 @@
 	{ 0x50, 0x51 }, { 0x51, 0x04 },		/* MER symbol 4 */
 	{ 0x45, 0x04 },				/* CN symbol 4 */
 	{ 0x48, 0x04 },				/* CN manual mode */
-
+	{ 0x50, 0xd5 }, { 0x51, 0x01 },
 	{ 0x50, 0xd6 }, { 0x51, 0x1f },
 	{ 0x50, 0xd2 }, { 0x51, 0x03 },
-	{ 0x50, 0xd7 }, { 0x51, 0xbf },
-	{ 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
-	{ 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
-
-	{ 0x04, 0x40 }, { 0x05, 0x00 },
-	{ 0x28, 0x00 }, { 0x2b, 0x08 },
-	{ 0x28, 0x05 }, { 0x2b, 0x00 },
+	{ 0x50, 0xd7 }, { 0x51, 0x3f },
 	{ 0x1c, 0x01 },
-	{ 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
-	{ 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
-	{ 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
-	{ 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
-	{ 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
-	{ 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
-	{ 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
-	{ 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
-	{ 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
-	{ 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
-	{ 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
-	{ 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
-	{ 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
-	{ 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
-	{ 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
-	{ 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
-	{ 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
-	{ 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
-	{ 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
-	{ 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
-	{ 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
-	{ 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
-	{ 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
-	{ 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
-	{ 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
+	{ 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
+	{ 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
+	{ 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+	{ 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
+	{ 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
+	{ 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
+	{ 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+	{ 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
+	{ 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
+	{ 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
+	{ 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
+	{ 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+	{ 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
+	{ 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
+	{ 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
+	{ 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
+	{ 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
+	{ 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
+	{ 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
+	{ 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
+	{ 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
+	{ 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
+	{ 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
+	{ 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
+	{ 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
 	{ 0x50, 0x1e }, { 0x51, 0x5d },
 	{ 0x50, 0x22 }, { 0x51, 0x00 },
 	{ 0x50, 0x23 }, { 0x51, 0xc8 },
@@ -196,9 +190,7 @@
 	{ 0x50, 0x26 }, { 0x51, 0x00 },
 	{ 0x50, 0x27 }, { 0x51, 0xc3 },
 	{ 0x50, 0x39 }, { 0x51, 0x02 },
-	{ 0xec, 0x0f },
-	{ 0xeb, 0x1f },
-	{ 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
+	{ 0x50, 0xd5 }, { 0x51, 0x01 },
 	{ 0xd0, 0x00 },
 };
 
@@ -318,7 +310,11 @@
 	if (val >= 7)
 		*status |= FE_HAS_SYNC;
 
-	if (val >= 8)				/* Maybe 9? */
+	/*
+	 * Actually, on state S8, it starts receiving TS, but the TS
+	 * output is only on normal state after the transition to S9.
+	 */
+	if (val >= 9)
 		*status |= FE_HAS_LOCK;
 
 	dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
@@ -2058,6 +2054,11 @@
 	kfree(state);
 }
 
+static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+{
+        return DVBFE_ALGO_HW;
+}
+
 static struct dvb_frontend_ops mb86a20s_ops;
 
 struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
@@ -2130,6 +2131,7 @@
 	.read_status = mb86a20s_read_status_and_stats,
 	.read_signal_strength = mb86a20s_read_signal_strength_from_cache,
 	.tune = mb86a20s_tune,
+	.get_frontend_algo = mb86a20s_get_frontend_algo,
 };
 
 MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index a113282..dfb02db 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -16,7 +16,6 @@
 #ifndef MB86A20S_H
 #define MB86A20S_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /**
diff --git a/drivers/media/dvb-frontends/s5h1409.h b/drivers/media/dvb-frontends/s5h1409.h
index f58b9ca..b38557c 100644
--- a/drivers/media/dvb-frontends/s5h1409.h
+++ b/drivers/media/dvb-frontends/s5h1409.h
@@ -22,7 +22,6 @@
 #ifndef __S5H1409_H__
 #define __S5H1409_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct s5h1409_config {
diff --git a/drivers/media/dvb-frontends/s5h1411.h b/drivers/media/dvb-frontends/s5h1411.h
index f3a87f7..791bab0 100644
--- a/drivers/media/dvb-frontends/s5h1411.h
+++ b/drivers/media/dvb-frontends/s5h1411.h
@@ -22,7 +22,6 @@
 #ifndef __S5H1411_H__
 #define __S5H1411_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
diff --git a/drivers/media/dvb-frontends/s5h1432.h b/drivers/media/dvb-frontends/s5h1432.h
index f490c5e..b81c9bd 100644
--- a/drivers/media/dvb-frontends/s5h1432.h
+++ b/drivers/media/dvb-frontends/s5h1432.h
@@ -22,7 +22,6 @@
 #ifndef __S5H1432_H__
 #define __S5H1432_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
diff --git a/drivers/media/dvb-frontends/s921.h b/drivers/media/dvb-frontends/s921.h
index f5b722d..a47ed89 100644
--- a/drivers/media/dvb-frontends/s921.h
+++ b/drivers/media/dvb-frontends/s921.h
@@ -17,7 +17,6 @@
 #ifndef S921_H
 #define S921_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct s921_config {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 8bf716a..78669ea 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -25,6 +25,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
+#include <linux/regmap.h>
 
 #include "dvb_frontend.h"
 #include "dvb_math.h"
@@ -40,7 +41,9 @@
  */
 
 struct si2165_state {
-	struct i2c_adapter *i2c;
+	struct i2c_client *client;
+
+	struct regmap *regmap;
 
 	struct dvb_frontend fe;
 
@@ -108,61 +111,27 @@
 		       const u8 *src, const int count)
 {
 	int ret;
-	struct i2c_msg msg;
-	u8 buf[2 + 4]; /* write a maximum of 4 bytes of data */
-
-	if (count + 2 > sizeof(buf)) {
-		dev_warn(&state->i2c->dev,
-			  "%s: i2c wr reg=%04x: count=%d is too big!\n",
-			  KBUILD_MODNAME, reg, count);
-		return -EINVAL;
-	}
-	buf[0] = reg >> 8;
-	buf[1] = reg & 0xff;
-	memcpy(buf + 2, src, count);
-
-	msg.addr = state->config.i2c_addr;
-	msg.flags = 0;
-	msg.buf = buf;
-	msg.len = count + 2;
 
 	if (debug & DEBUG_I2C_WRITE)
 		deb_i2c_write("reg: 0x%04x, data: %*ph\n", reg, count, src);
 
-	ret = i2c_transfer(state->i2c, &msg, 1);
+	ret = regmap_bulk_write(state->regmap, reg, src, count);
 
-	if (ret != 1) {
-		dev_err(&state->i2c->dev, "%s: ret == %d\n", __func__, ret);
-		if (ret < 0)
-			return ret;
-		else
-			return -EREMOTEIO;
-	}
+	if (ret)
+		dev_err(&state->client->dev, "%s: ret == %d\n", __func__, ret);
 
-	return 0;
+	return ret;
 }
 
 static int si2165_read(struct si2165_state *state,
 		       const u16 reg, u8 *val, const int count)
 {
-	int ret;
-	u8 reg_buf[] = { reg >> 8, reg & 0xff };
-	struct i2c_msg msg[] = {
-		{ .addr = state->config.i2c_addr,
-		  .flags = 0, .buf = reg_buf, .len = 2 },
-		{ .addr = state->config.i2c_addr,
-		  .flags = I2C_M_RD, .buf = val, .len = count },
-	};
+	int ret = regmap_bulk_read(state->regmap, reg, val, count);
 
-	ret = i2c_transfer(state->i2c, msg, 2);
-
-	if (ret != 2) {
-		dev_err(&state->i2c->dev, "%s: error (addr %02x reg %04x error (ret == %i)\n",
+	if (ret) {
+		dev_err(&state->client->dev, "%s: error (addr %02x reg %04x error (ret == %i)\n",
 			__func__, state->config.i2c_addr, reg, ret);
-		if (ret < 0)
-			return ret;
-		else
-			return -EREMOTEIO;
+		return ret;
 	}
 
 	if (debug & DEBUG_I2C_READ)
@@ -174,9 +143,9 @@
 static int si2165_readreg8(struct si2165_state *state,
 		       const u16 reg, u8 *val)
 {
-	int ret;
-
-	ret = si2165_read(state, reg, val, 1);
+	unsigned int val_tmp;
+	int ret = regmap_read(state->regmap, reg, &val_tmp);
+	*val = (u8)val_tmp;
 	deb_readreg("R(0x%04x)=0x%02x\n", reg, *val);
 	return ret;
 }
@@ -194,7 +163,7 @@
 
 static int si2165_writereg8(struct si2165_state *state, const u16 reg, u8 val)
 {
-	return si2165_write(state, reg, &val, 1);
+	return regmap_write(state->regmap, reg, val);
 }
 
 static int si2165_writereg16(struct si2165_state *state, const u16 reg, u16 val)
@@ -345,7 +314,7 @@
 			return 0;
 		usleep_range(1000, 50000);
 	}
-	dev_err(&state->i2c->dev, "%s: init_done was not set\n",
+	dev_err(&state->client->dev, "%s: init_done was not set\n",
 		KBUILD_MODNAME);
 	return ret;
 }
@@ -374,14 +343,14 @@
 		wordcount = data[offset];
 		if (wordcount < 1 || data[offset+1] ||
 		    data[offset+2] || data[offset+3]) {
-			dev_warn(&state->i2c->dev,
+			dev_warn(&state->client->dev,
 				 "%s: bad fw data[0..3] = %*ph\n",
 				KBUILD_MODNAME, 4, data);
 			return -EINVAL;
 		}
 
 		if (offset + 8 + wordcount * 4 > len) {
-			dev_warn(&state->i2c->dev,
+			dev_warn(&state->client->dev,
 				 "%s: len is too small for block len=%d, wordcount=%d\n",
 				KBUILD_MODNAME, len, wordcount);
 			return -EINVAL;
@@ -444,15 +413,15 @@
 		fw_file = SI2165_FIRMWARE_REV_D;
 		break;
 	default:
-		dev_info(&state->i2c->dev, "%s: no firmware file for revision=%d\n",
+		dev_info(&state->client->dev, "%s: no firmware file for revision=%d\n",
 			KBUILD_MODNAME, state->chip_revcode);
 		return 0;
 	}
 
 	/* request the firmware, this will block and timeout */
-	ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
+	ret = request_firmware(&fw, fw_file, &state->client->dev);
 	if (ret) {
-		dev_warn(&state->i2c->dev, "%s: firmware file '%s' not found\n",
+		dev_warn(&state->client->dev, "%s: firmware file '%s' not found\n",
 				KBUILD_MODNAME, fw_file);
 		goto error;
 	}
@@ -460,11 +429,11 @@
 	data = fw->data;
 	len = fw->size;
 
-	dev_info(&state->i2c->dev, "%s: downloading firmware from file '%s' size=%d\n",
+	dev_info(&state->client->dev, "%s: downloading firmware from file '%s' size=%d\n",
 			KBUILD_MODNAME, fw_file, len);
 
 	if (len % 4 != 0) {
-		dev_warn(&state->i2c->dev, "%s: firmware size is not multiple of 4\n",
+		dev_warn(&state->client->dev, "%s: firmware size is not multiple of 4\n",
 				KBUILD_MODNAME);
 		ret = -EINVAL;
 		goto error;
@@ -472,14 +441,14 @@
 
 	/* check header (8 bytes) */
 	if (len < 8) {
-		dev_warn(&state->i2c->dev, "%s: firmware header is missing\n",
+		dev_warn(&state->client->dev, "%s: firmware header is missing\n",
 				KBUILD_MODNAME);
 		ret = -EINVAL;
 		goto error;
 	}
 
 	if (data[0] != 1 || data[1] != 0) {
-		dev_warn(&state->i2c->dev, "%s: firmware file version is wrong\n",
+		dev_warn(&state->client->dev, "%s: firmware file version is wrong\n",
 				KBUILD_MODNAME);
 		ret = -EINVAL;
 		goto error;
@@ -517,7 +486,7 @@
 	/* start right after the header */
 	offset = 8;
 
-	dev_info(&state->i2c->dev, "%s: si2165_upload_firmware extracted patch_version=0x%02x, block_count=0x%02x, crc_expected=0x%04x\n",
+	dev_info(&state->client->dev, "%s: si2165_upload_firmware extracted patch_version=0x%02x, block_count=0x%02x, crc_expected=0x%04x\n",
 		KBUILD_MODNAME, patch_version, block_count, crc_expected);
 
 	ret = si2165_upload_firmware_block(state, data, len, &offset, 1);
@@ -536,7 +505,7 @@
 	ret = si2165_upload_firmware_block(state, data, len,
 					   &offset, block_count);
 	if (ret < 0) {
-		dev_err(&state->i2c->dev,
+		dev_err(&state->client->dev,
 			"%s: firmware could not be uploaded\n",
 			KBUILD_MODNAME);
 		goto error;
@@ -548,7 +517,7 @@
 		goto error;
 
 	if (val16 != crc_expected) {
-		dev_err(&state->i2c->dev,
+		dev_err(&state->client->dev,
 			"%s: firmware crc mismatch %04x != %04x\n",
 			KBUILD_MODNAME, val16, crc_expected);
 		ret = -EINVAL;
@@ -560,7 +529,7 @@
 		goto error;
 
 	if (len != offset) {
-		dev_err(&state->i2c->dev,
+		dev_err(&state->client->dev,
 			"%s: firmware len mismatch %04x != %04x\n",
 			KBUILD_MODNAME, len, offset);
 		ret = -EINVAL;
@@ -577,7 +546,7 @@
 	if (ret < 0)
 		goto error;
 
-	dev_info(&state->i2c->dev, "%s: fw load finished\n", KBUILD_MODNAME);
+	dev_info(&state->client->dev, "%s: fw load finished\n", KBUILD_MODNAME);
 
 	ret = 0;
 	state->firmware_loaded = true;
@@ -611,7 +580,7 @@
 	if (ret < 0)
 		goto error;
 	if (val != state->config.chip_mode) {
-		dev_err(&state->i2c->dev, "%s: could not set chip_mode\n",
+		dev_err(&state->client->dev, "%s: could not set chip_mode\n",
 			KBUILD_MODNAME);
 		return -EINVAL;
 	}
@@ -751,6 +720,9 @@
 	u64 oversamp;
 	u32 reg_value;
 
+	if (!dvb_rate)
+		return -EINVAL;
+
 	oversamp = si2165_get_fe_clk(state);
 	oversamp <<= 23;
 	do_div(oversamp, dvb_rate);
@@ -769,12 +741,15 @@
 	u32 IF = 0;
 
 	if (!fe->ops.tuner_ops.get_if_frequency) {
-		dev_err(&state->i2c->dev,
+		dev_err(&state->client->dev,
 			"%s: Error: get_if_frequency() not defined at tuner. Can't work without it!\n",
 			KBUILD_MODNAME);
 		return -EINVAL;
 	}
 
+	if (!fe_clk)
+		return -EINVAL;
+
 	fe->ops.tuner_ops.get_if_frequency(fe, &IF);
 	if_freq_shift = IF;
 	if_freq_shift <<= 29;
@@ -1003,14 +978,6 @@
 	return 0;
 }
 
-static void si2165_release(struct dvb_frontend *fe)
-{
-	struct si2165_state *state = fe->demodulator_priv;
-
-	dprintk("%s: called\n", __func__);
-	kfree(state);
-}
-
 static struct dvb_frontend_ops si2165_ops = {
 	.info = {
 		.name = "Silicon Labs ",
@@ -1046,67 +1013,83 @@
 
 	.set_frontend      = si2165_set_frontend,
 	.read_status       = si2165_read_status,
-
-	.release = si2165_release,
 };
 
-struct dvb_frontend *si2165_attach(const struct si2165_config *config,
-				   struct i2c_adapter *i2c)
+static int si2165_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
 {
 	struct si2165_state *state = NULL;
+	struct si2165_platform_data *pdata = client->dev.platform_data;
 	int n;
-	int io_ret;
+	int ret = 0;
 	u8 val;
 	char rev_char;
 	const char *chip_name;
-
-	if (config == NULL || i2c == NULL)
-		goto error;
+	static const struct regmap_config regmap_config = {
+		.reg_bits = 16,
+		.val_bits = 8,
+		.max_register = 0x08ff,
+	};
 
 	/* allocate memory for the internal state */
 	state = kzalloc(sizeof(struct si2165_state), GFP_KERNEL);
-	if (state == NULL)
+	if (state == NULL) {
+		ret = -ENOMEM;
 		goto error;
+	}
+
+	/* create regmap */
+	state->regmap = devm_regmap_init_i2c(client, &regmap_config);
+	if (IS_ERR(state->regmap)) {
+		ret = PTR_ERR(state->regmap);
+		goto error;
+	}
 
 	/* setup the state */
-	state->i2c = i2c;
-	state->config = *config;
+	state->client = client;
+	state->config.i2c_addr = client->addr;
+	state->config.chip_mode = pdata->chip_mode;
+	state->config.ref_freq_Hz = pdata->ref_freq_Hz;
+	state->config.inversion = pdata->inversion;
 
 	if (state->config.ref_freq_Hz < 4000000
 	    || state->config.ref_freq_Hz > 27000000) {
-		dev_err(&state->i2c->dev, "%s: ref_freq of %d Hz not supported by this driver\n",
+		dev_err(&state->client->dev, "%s: ref_freq of %d Hz not supported by this driver\n",
 			 KBUILD_MODNAME, state->config.ref_freq_Hz);
+		ret = -EINVAL;
 		goto error;
 	}
 
 	/* create dvb_frontend */
 	memcpy(&state->fe.ops, &si2165_ops,
 		sizeof(struct dvb_frontend_ops));
+	state->fe.ops.release = NULL;
 	state->fe.demodulator_priv = state;
+	i2c_set_clientdata(client, state);
 
 	/* powerup */
-	io_ret = si2165_writereg8(state, 0x0000, state->config.chip_mode);
-	if (io_ret < 0)
-		goto error;
+	ret = si2165_writereg8(state, 0x0000, state->config.chip_mode);
+	if (ret < 0)
+		goto nodev_error;
 
-	io_ret = si2165_readreg8(state, 0x0000, &val);
-	if (io_ret < 0)
-		goto error;
+	ret = si2165_readreg8(state, 0x0000, &val);
+	if (ret < 0)
+		goto nodev_error;
 	if (val != state->config.chip_mode)
-		goto error;
+		goto nodev_error;
 
-	io_ret = si2165_readreg8(state, 0x0023, &state->chip_revcode);
-	if (io_ret < 0)
-		goto error;
+	ret = si2165_readreg8(state, 0x0023, &state->chip_revcode);
+	if (ret < 0)
+		goto nodev_error;
 
-	io_ret = si2165_readreg8(state, 0x0118, &state->chip_type);
-	if (io_ret < 0)
-		goto error;
+	ret = si2165_readreg8(state, 0x0118, &state->chip_type);
+	if (ret < 0)
+		goto nodev_error;
 
 	/* powerdown */
-	io_ret = si2165_writereg8(state, 0x0000, SI2165_MODE_OFF);
-	if (io_ret < 0)
-		goto error;
+	ret = si2165_writereg8(state, 0x0000, SI2165_MODE_OFF);
+	if (ret < 0)
+		goto nodev_error;
 
 	if (state->chip_revcode < 26)
 		rev_char = 'A' + state->chip_revcode;
@@ -1124,12 +1107,12 @@
 		state->has_dvbc = true;
 		break;
 	default:
-		dev_err(&state->i2c->dev, "%s: Unsupported Silicon Labs chip (type %d, rev %d)\n",
+		dev_err(&state->client->dev, "%s: Unsupported Silicon Labs chip (type %d, rev %d)\n",
 			KBUILD_MODNAME, state->chip_type, state->chip_revcode);
-		goto error;
+		goto nodev_error;
 	}
 
-	dev_info(&state->i2c->dev,
+	dev_info(&state->client->dev,
 		"%s: Detected Silicon Labs %s-%c (type %d, rev %d)\n",
 		KBUILD_MODNAME, chip_name, rev_char, state->chip_type,
 		state->chip_revcode);
@@ -1149,13 +1132,46 @@
 			sizeof(state->fe.ops.info.name));
 	}
 
-	return &state->fe;
+	/* return fe pointer */
+	*pdata->fe = &state->fe;
 
+	return 0;
+
+nodev_error:
+	ret = -ENODEV;
 error:
 	kfree(state);
-	return NULL;
+	dev_dbg(&client->dev, "failed=%d\n", ret);
+	return ret;
 }
-EXPORT_SYMBOL(si2165_attach);
+
+static int si2165_remove(struct i2c_client *client)
+{
+	struct si2165_state *state = i2c_get_clientdata(client);
+
+	dev_dbg(&client->dev, "\n");
+
+	kfree(state);
+	return 0;
+}
+
+static const struct i2c_device_id si2165_id_table[] = {
+	{"si2165", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, si2165_id_table);
+
+static struct i2c_driver si2165_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "si2165",
+	},
+	.probe		= si2165_probe,
+	.remove		= si2165_remove,
+	.id_table	= si2165_id_table,
+};
+
+module_i2c_driver(si2165_driver);
 
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/si2165.h b/drivers/media/dvb-frontends/si2165.h
index 8a15d6a..76c2ca7 100644
--- a/drivers/media/dvb-frontends/si2165.h
+++ b/drivers/media/dvb-frontends/si2165.h
@@ -28,10 +28,15 @@
 	SI2165_MODE_PLL_XTAL = 0x21
 };
 
-struct si2165_config {
-	/* i2c addr
-	 * possible values: 0x64,0x65,0x66,0x67 */
-	u8 i2c_addr;
+/* I2C addresses
+ * possible values: 0x64,0x65,0x66,0x67
+ */
+struct si2165_platform_data {
+	/*
+	 * frontend
+	 * returned by driver
+	 */
+	struct dvb_frontend **fe;
 
 	/* external clock or XTAL */
 	u8 chip_mode;
@@ -45,18 +50,4 @@
 	bool inversion;
 };
 
-#if IS_REACHABLE(CONFIG_DVB_SI2165)
-struct dvb_frontend *si2165_attach(
-	const struct si2165_config *config,
-	struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend *si2165_attach(
-	const struct si2165_config *config,
-	struct i2c_adapter *i2c)
-{
-	pr_warn("%s: driver disabled by Kconfig\n", __func__);
-	return NULL;
-}
-#endif /* CONFIG_DVB_SI2165 */
-
 #endif /* _DVB_SI2165_H */
diff --git a/drivers/media/dvb-frontends/si2165_priv.h b/drivers/media/dvb-frontends/si2165_priv.h
index 2b70cf1..e593211 100644
--- a/drivers/media/dvb-frontends/si2165_priv.h
+++ b/drivers/media/dvb-frontends/si2165_priv.h
@@ -20,4 +20,21 @@
 
 #define SI2165_FIRMWARE_REV_D "dvb-demod-si2165.fw"
 
+struct si2165_config {
+	/* i2c addr
+	 * possible values: 0x64,0x65,0x66,0x67 */
+	u8 i2c_addr;
+
+	/* external clock or XTAL */
+	u8 chip_mode;
+
+	/* frequency of external clock or xtal in Hz
+	 * possible values: 4000000, 16000000, 20000000, 240000000, 27000000
+	 */
+	u32 ref_freq_Hz;
+
+	/* invert the spectrum */
+	bool inversion;
+};
+
 #endif /* _DVB_SI2165_PRIV */
diff --git a/drivers/media/dvb-frontends/si21xx.h b/drivers/media/dvb-frontends/si21xx.h
index ef5f351..b1be62f 100644
--- a/drivers/media/dvb-frontends/si21xx.h
+++ b/drivers/media/dvb-frontends/si21xx.h
@@ -1,7 +1,6 @@
 #ifndef SI21XX_H
 #define SI21XX_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/sp2.h b/drivers/media/dvb-frontends/sp2.h
index 6cceea0..3901cd7 100644
--- a/drivers/media/dvb-frontends/sp2.h
+++ b/drivers/media/dvb-frontends/sp2.h
@@ -17,7 +17,6 @@
 #ifndef SP2_H
 #define SP2_H
 
-#include <linux/kconfig.h>
 #include "dvb_ca_en50221.h"
 
 /*
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index a0c3c52..73347d5 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -186,7 +186,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops stb6000_tuner_ops = {
+static const struct dvb_tuner_ops stb6000_tuner_ops = {
 	.info = {
 		.name = "ST STB6000",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index da581b6..78e75df 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -23,7 +23,6 @@
 #ifndef __DVB_STB6000_H__
 #define __DVB_STB6000_H__
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index b9c2511..5add118 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -522,7 +522,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops stb6100_ops = {
+static const struct dvb_tuner_ops stb6100_ops = {
 	.info = {
 		.name			= "STB6100 Silicon Tuner",
 		.frequency_min		= 950000,
diff --git a/drivers/media/dvb-frontends/stv0288.h b/drivers/media/dvb-frontends/stv0288.h
index b58603c..803acb9 100644
--- a/drivers/media/dvb-frontends/stv0288.h
+++ b/drivers/media/dvb-frontends/stv0288.h
@@ -27,7 +27,6 @@
 #ifndef STV0288_H
 #define STV0288_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h
index 92b3e85..b88166a 100644
--- a/drivers/media/dvb-frontends/stv0367.h
+++ b/drivers/media/dvb-frontends/stv0367.h
@@ -26,7 +26,6 @@
 #ifndef STV0367_H
 #define STV0367_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/stv0900.h b/drivers/media/dvb-frontends/stv0900.h
index c90bf00..9ca2da9 100644
--- a/drivers/media/dvb-frontends/stv0900.h
+++ b/drivers/media/dvb-frontends/stv0900.h
@@ -26,7 +26,6 @@
 #ifndef STV0900_H
 #define STV0900_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 91c6dcf..66a5a7f 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -382,7 +382,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops stv6110_tuner_ops = {
+static const struct dvb_tuner_ops stv6110_tuner_ops = {
 	.info = {
 		.name = "ST STV6110",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/stv6110.h b/drivers/media/dvb-frontends/stv6110.h
index f3c8a5c..4604f79 100644
--- a/drivers/media/dvb-frontends/stv6110.h
+++ b/drivers/media/dvb-frontends/stv6110.h
@@ -25,7 +25,6 @@
 #ifndef __DVB_STV6110_H__
 #define __DVB_STV6110_H__
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index a62c01e..c611ad2 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -345,7 +345,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops stv6110x_ops = {
+static const struct dvb_tuner_ops stv6110x_ops = {
 	.info = {
 		.name		= "STV6110(A) Silicon Tuner",
 		.frequency_min	=  950000,
diff --git a/drivers/media/dvb-frontends/tda10048.h b/drivers/media/dvb-frontends/tda10048.h
index bc77a73..a2cebb0 100644
--- a/drivers/media/dvb-frontends/tda10048.h
+++ b/drivers/media/dvb-frontends/tda10048.h
@@ -22,7 +22,6 @@
 #ifndef TDA10048_H
 #define TDA10048_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/firmware.h>
 
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index de0a1c1..bc247f9 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1217,7 +1217,7 @@
 }
 
 
-static struct dvb_tuner_ops tuner_ops = {
+static const struct dvb_tuner_ops tuner_ops = {
 	.info = {
 		.name = "NXP TDA18271C2D",
 		.frequency_min  =  47125000,
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.h b/drivers/media/dvb-frontends/tda18271c2dd.h
index 7ebd8ea..e6ccf24 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd.h
@@ -1,8 +1,6 @@
 #ifndef _TDA18271C2DD_H_
 #define _TDA18271C2DD_H_
 
-#include <linux/kconfig.h>
-
 #if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD)
 struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
 					 struct i2c_adapter *i2c, u8 adr);
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 82f8cc5..7ca9659 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -206,7 +206,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tda665x_ops = {
+static const struct dvb_tuner_ops tda665x_ops = {
 	.get_status	= tda665x_get_status,
 	.set_params	= tda665x_set_params,
 	.get_frequency	= tda665x_get_frequency,
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index 3285b1b..e0df931 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -161,7 +161,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tda8261_ops = {
+static const struct dvb_tuner_ops tda8261_ops = {
 
 	.info = {
 		.name		= "TDA8261",
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index 04bbcc2..2ec671d 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -129,7 +129,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tda826x_tuner_ops = {
+static const struct dvb_tuner_ops tda826x_tuner_ops = {
 	.info = {
 		.name = "Philips TDA826X",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 14b410f..a9f6bbe 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -496,7 +496,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops ts2020_tuner_ops = {
+static const struct dvb_tuner_ops ts2020_tuner_ops = {
 	.info = {
 		.name = "TS2020",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
index 9220e5c..facc54f 100644
--- a/drivers/media/dvb-frontends/ts2020.h
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -22,7 +22,6 @@
 #ifndef TS2020_H
 #define TS2020_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ts2020_config {
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 029384d..6da12b9 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -157,7 +157,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tua6100_tuner_ops = {
+static const struct dvb_tuner_ops tua6100_tuner_ops = {
 	.info = {
 		.name = "Infineon TUA6100",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 0903d46..7ed8131 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -446,7 +446,7 @@
 	return ret;
 }
 
-static struct dvb_tuner_ops zl10036_tuner_ops = {
+static const struct dvb_tuner_ops zl10036_tuner_ops = {
 	.info = {
 		.name = "Zarlink ZL10036",
 		.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 670e76a..c568d8d 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -21,7 +21,6 @@
 #ifndef DVB_ZL10036_H
 #define DVB_ZL10036_H
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index ee09ec2..f8c271b 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -255,7 +255,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops zl10039_ops = {
+static const struct dvb_tuner_ops zl10039_ops = {
 	.release = zl10039_release,
 	.init = zl10039_init,
 	.sleep = zl10039_sleep,
diff --git a/drivers/media/dvb-frontends/zl10039.h b/drivers/media/dvb-frontends/zl10039.h
index 0709294..66e7085 100644
--- a/drivers/media/dvb-frontends/zl10039.h
+++ b/drivers/media/dvb-frontends/zl10039.h
@@ -22,8 +22,6 @@
 #ifndef ZL10039_H
 #define ZL10039_H
 
-#include <linux/kconfig.h>
-
 #if IS_REACHABLE(CONFIG_DVB_ZL10039)
 struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
 					u8 i2c_addr,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index ce9006e..2669b4b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -21,7 +21,7 @@
 # Encoder / Decoder module configuration
 #
 
-menu "Encoders, decoders, sensors and other helper chips"
+menu "I2C Encoders, decoders, sensors and other helper chips"
 	visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
 
 comment "Audio decoders, processors and mixers"
@@ -187,7 +187,7 @@
 
 config VIDEO_ADV7180
 	tristate "Analog Devices ADV7180 decoder"
-	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+	depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
 	---help---
 	  Support for the Analog Devices ADV7180 video decoder.
 
@@ -295,6 +295,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ml86v7667.
 
+config VIDEO_AD5820
+	tristate "AD5820 lens voice coil support"
+	depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+	---help---
+	  This is a driver for the AD5820 camera lens voice coil.
+	  It is used for example in Nokia N900 (RX-51).
+
 config VIDEO_SAA7110
 	tristate "Philips SAA7110 video decoder"
 	depends on VIDEO_V4L2 && I2C
@@ -571,6 +578,13 @@
 	  This driver supports MT9M032 camera sensors from Aptina, monochrome
 	  models only.
 
+config VIDEO_MT9M111
+	tristate "mt9m111, mt9m112 and mt9m131 support"
+	depends on I2C && VIDEO_V4L2
+	help
+	  This driver supports MT9M111, MT9M112 and MT9M131 cameras from
+	  Micron/Aptina
+
 config VIDEO_MT9P031
 	tristate "Aptina MT9P031 support"
 	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 94f2c99..92773b2 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
 obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
 obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
+obj-$(CONFIG_VIDEO_AD5820)  += ad5820.o
 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
 obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -59,6 +60,7 @@
 obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
 obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
 obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
+obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
 obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
 obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
 obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
new file mode 100644
index 0000000..beab2f3
--- /dev/null
+++ b/drivers/media/i2c/ad5820.c
@@ -0,0 +1,372 @@
+/*
+ * drivers/media/i2c/ad5820.c
+ *
+ * AD5820 DAC driver for camera voice coil focus.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2007 Texas Instruments
+ * Copyright (C) 2016 Pavel Machek <pavel@ucw.cz>
+ *
+ * Contact: Tuukka Toivonen <tuukkat76@gmail.com>
+ *	    Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * Based on af_d88.c by Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define AD5820_NAME		"ad5820"
+
+/* Register definitions */
+#define AD5820_POWER_DOWN		(1 << 15)
+#define AD5820_DAC_SHIFT		4
+#define AD5820_RAMP_MODE_LINEAR		(0 << 3)
+#define AD5820_RAMP_MODE_64_16		(1 << 3)
+
+#define CODE_TO_RAMP_US(s)	((s) == 0 ? 0 : (1 << ((s) - 1)) * 50)
+#define RAMP_US_TO_CODE(c)	fls(((c) + ((c)>>1)) / 50)
+
+#define to_ad5820_device(sd)	container_of(sd, struct ad5820_device, subdev)
+
+struct ad5820_device {
+	struct v4l2_subdev subdev;
+	struct ad5820_platform_data *platform_data;
+	struct regulator *vana;
+
+	struct v4l2_ctrl_handler ctrls;
+	u32 focus_absolute;
+	u32 focus_ramp_time;
+	u32 focus_ramp_mode;
+
+	struct mutex power_lock;
+	int power_count;
+
+	bool standby;
+};
+
+static int ad5820_write(struct ad5820_device *coil, u16 data)
+{
+	struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
+	struct i2c_msg msg;
+	int r;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	data = cpu_to_be16(data);
+	msg.addr  = client->addr;
+	msg.flags = 0;
+	msg.len   = 2;
+	msg.buf   = (u8 *)&data;
+
+	r = i2c_transfer(client->adapter, &msg, 1);
+	if (r < 0) {
+		dev_err(&client->dev, "write failed, error %d\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+/*
+ * Calculate status word and write it to the device based on current
+ * values of V4L2 controls. It is assumed that the stored V4L2 control
+ * values are properly limited and rounded.
+ */
+static int ad5820_update_hw(struct ad5820_device *coil)
+{
+	u16 status;
+
+	status = RAMP_US_TO_CODE(coil->focus_ramp_time);
+	status |= coil->focus_ramp_mode
+		? AD5820_RAMP_MODE_64_16 : AD5820_RAMP_MODE_LINEAR;
+	status |= coil->focus_absolute << AD5820_DAC_SHIFT;
+
+	if (coil->standby)
+		status |= AD5820_POWER_DOWN;
+
+	return ad5820_write(coil, status);
+}
+
+/*
+ * Power handling
+ */
+static int ad5820_power_off(struct ad5820_device *coil, bool standby)
+{
+	int ret = 0, ret2;
+
+	/*
+	 * Go to standby first as real power off my be denied by the hardware
+	 * (single power line control for both coil and sensor).
+	 */
+	if (standby) {
+		coil->standby = true;
+		ret = ad5820_update_hw(coil);
+	}
+
+	ret2 = regulator_disable(coil->vana);
+	if (ret)
+		return ret;
+	return ret2;
+}
+
+static int ad5820_power_on(struct ad5820_device *coil, bool restore)
+{
+	int ret;
+
+	ret = regulator_enable(coil->vana);
+	if (ret < 0)
+		return ret;
+
+	if (restore) {
+		/* Restore the hardware settings. */
+		coil->standby = false;
+		ret = ad5820_update_hw(coil);
+		if (ret)
+			goto fail;
+	}
+	return 0;
+
+fail:
+	coil->standby = true;
+	regulator_disable(coil->vana);
+
+	return ret;
+}
+
+/*
+ * V4L2 controls
+ */
+static int ad5820_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct ad5820_device *coil =
+		container_of(ctrl->handler, struct ad5820_device, ctrls);
+
+	switch (ctrl->id) {
+	case V4L2_CID_FOCUS_ABSOLUTE:
+		coil->focus_absolute = ctrl->val;
+		return ad5820_update_hw(coil);
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops ad5820_ctrl_ops = {
+	.s_ctrl = ad5820_set_ctrl,
+};
+
+
+static int ad5820_init_controls(struct ad5820_device *coil)
+{
+	v4l2_ctrl_handler_init(&coil->ctrls, 1);
+
+	/*
+	 * V4L2_CID_FOCUS_ABSOLUTE
+	 *
+	 * Minimum current is 0 mA, maximum is 100 mA. Thus, 1 code is
+	 * equivalent to 100/1023 = 0.0978 mA. Nevertheless, we do not use [mA]
+	 * for focus position, because it is meaningless for user. Meaningful
+	 * would be to use focus distance or even its inverse, but since the
+	 * driver doesn't have sufficiently knowledge to do the conversion, we
+	 * will just use abstract codes here. In any case, smaller value = focus
+	 * position farther from camera. The default zero value means focus at
+	 * infinity, and also least current consumption.
+	 */
+	v4l2_ctrl_new_std(&coil->ctrls, &ad5820_ctrl_ops,
+			  V4L2_CID_FOCUS_ABSOLUTE, 0, 1023, 1, 0);
+
+	if (coil->ctrls.error)
+		return coil->ctrls.error;
+
+	coil->focus_absolute = 0;
+	coil->focus_ramp_time = 0;
+	coil->focus_ramp_mode = 0;
+
+	coil->subdev.ctrl_handler = &coil->ctrls;
+
+	return 0;
+}
+
+/*
+ * V4L2 subdev operations
+ */
+static int ad5820_registered(struct v4l2_subdev *subdev)
+{
+	struct ad5820_device *coil = to_ad5820_device(subdev);
+
+	return ad5820_init_controls(coil);
+}
+
+static int
+ad5820_set_power(struct v4l2_subdev *subdev, int on)
+{
+	struct ad5820_device *coil = to_ad5820_device(subdev);
+	int ret = 0;
+
+	mutex_lock(&coil->power_lock);
+
+	/*
+	 * If the power count is modified from 0 to != 0 or from != 0 to 0,
+	 * update the power state.
+	 */
+	if (coil->power_count == !on) {
+		ret = on ? ad5820_power_on(coil, true) :
+			ad5820_power_off(coil, true);
+		if (ret < 0)
+			goto done;
+	}
+
+	/* Update the power count. */
+	coil->power_count += on ? 1 : -1;
+	WARN_ON(coil->power_count < 0);
+
+done:
+	mutex_unlock(&coil->power_lock);
+	return ret;
+}
+
+static int ad5820_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	return ad5820_set_power(sd, 1);
+}
+
+static int ad5820_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+	return ad5820_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_core_ops ad5820_core_ops = {
+	.s_power = ad5820_set_power,
+};
+
+static const struct v4l2_subdev_ops ad5820_ops = {
+	.core = &ad5820_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ad5820_internal_ops = {
+	.registered = ad5820_registered,
+	.open = ad5820_open,
+	.close = ad5820_close,
+};
+
+/*
+ * I2C driver
+ */
+static int __maybe_unused ad5820_suspend(struct device *dev)
+{
+	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+	struct ad5820_device *coil = to_ad5820_device(subdev);
+
+	if (!coil->power_count)
+		return 0;
+
+	return ad5820_power_off(coil, false);
+}
+
+static int __maybe_unused ad5820_resume(struct device *dev)
+{
+	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+	struct ad5820_device *coil = to_ad5820_device(subdev);
+
+	if (!coil->power_count)
+		return 0;
+
+	return ad5820_power_on(coil, true);
+}
+
+static int ad5820_probe(struct i2c_client *client,
+			const struct i2c_device_id *devid)
+{
+	struct ad5820_device *coil;
+	int ret;
+
+	coil = devm_kzalloc(&client->dev, sizeof(*coil), GFP_KERNEL);
+	if (!coil)
+		return -ENOMEM;
+
+	coil->vana = devm_regulator_get(&client->dev, "VANA");
+	if (IS_ERR(coil->vana)) {
+		ret = PTR_ERR(coil->vana);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&client->dev, "could not get regulator for vana\n");
+		return ret;
+	}
+
+	mutex_init(&coil->power_lock);
+
+	v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
+	coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	coil->subdev.internal_ops = &ad5820_internal_ops;
+	strcpy(coil->subdev.name, "ad5820 focus");
+
+	ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+	if (ret < 0)
+		goto cleanup2;
+
+	ret = v4l2_async_register_subdev(&coil->subdev);
+	if (ret < 0)
+		goto cleanup;
+
+	return ret;
+
+cleanup2:
+	mutex_destroy(&coil->power_lock);
+cleanup:
+	media_entity_cleanup(&coil->subdev.entity);
+	return ret;
+}
+
+static int __exit ad5820_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+	struct ad5820_device *coil = to_ad5820_device(subdev);
+
+	v4l2_device_unregister_subdev(&coil->subdev);
+	v4l2_ctrl_handler_free(&coil->ctrls);
+	media_entity_cleanup(&coil->subdev.entity);
+	mutex_destroy(&coil->power_lock);
+	return 0;
+}
+
+static const struct i2c_device_id ad5820_id_table[] = {
+	{ AD5820_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+
+static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
+
+static struct i2c_driver ad5820_i2c_driver = {
+	.driver		= {
+		.name	= AD5820_NAME,
+		.pm	= &ad5820_pm,
+	},
+	.probe		= ad5820_probe,
+	.remove		= __exit_p(ad5820_remove),
+	.id_table	= ad5820_id_table,
+};
+
+module_i2c_driver(ad5820_i2c_driver);
+
+MODULE_AUTHOR("Tuukka Toivonen");
+MODULE_DESCRIPTION("AD5820 camera lens driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 0462f46..50f3541 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -98,7 +98,6 @@
 	struct ad9389b_state_edid edid;
 	/* Running counter of the number of detected EDIDs (for debugging) */
 	unsigned edid_detect_counter;
-	struct workqueue_struct *work_queue;
 	struct delayed_work edid_handler; /* work entry */
 };
 
@@ -843,8 +842,7 @@
 			v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
 			ad9389b_s_power(sd, false);
 			ad9389b_s_power(sd, true);
-			queue_delayed_work(state->work_queue,
-					   &state->edid_handler, EDID_DELAY);
+			schedule_delayed_work(&state->edid_handler, EDID_DELAY);
 			return;
 		}
 	}
@@ -933,8 +931,7 @@
 		ad9389b_setup(sd);
 		ad9389b_notify_monitor_detect(sd);
 		state->edid.read_retries = EDID_MAX_RETRIES;
-		queue_delayed_work(state->work_queue,
-				   &state->edid_handler, EDID_DELAY);
+		schedule_delayed_work(&state->edid_handler, EDID_DELAY);
 	} else if (!(status & MASK_AD9389B_HPD_DETECT)) {
 		v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
 		state->have_monitor = false;
@@ -1065,8 +1062,7 @@
 		ad9389b_wr(sd, 0xc9, 0xf);
 		ad9389b_wr(sd, 0xc4, state->edid.segments);
 		state->edid.read_retries = EDID_MAX_RETRIES;
-		queue_delayed_work(state->work_queue,
-				   &state->edid_handler, EDID_DELAY);
+		schedule_delayed_work(&state->edid_handler, EDID_DELAY);
 		return false;
 	}
 
@@ -1170,13 +1166,6 @@
 		goto err_entity;
 	}
 
-	state->work_queue = create_singlethread_workqueue(sd->name);
-	if (state->work_queue == NULL) {
-		v4l2_err(sd, "could not create workqueue\n");
-		err = -ENOMEM;
-		goto err_unreg;
-	}
-
 	INIT_DELAYED_WORK(&state->edid_handler, ad9389b_edid_handler);
 	state->dv_timings = dv1080p60;
 
@@ -1187,8 +1176,6 @@
 		  client->addr << 1, client->adapter->name);
 	return 0;
 
-err_unreg:
-	i2c_unregister_device(state->edid_i2c_client);
 err_entity:
 	media_entity_cleanup(&sd->entity);
 err_hdl:
@@ -1211,9 +1198,8 @@
 	ad9389b_s_stream(sd, false);
 	ad9389b_s_audio_stream(sd, false);
 	ad9389b_init_setup(sd);
-	cancel_delayed_work(&state->edid_handler);
+	cancel_delayed_work_sync(&state->edid_handler);
 	i2c_unregister_device(state->edid_i2c_client);
-	destroy_workqueue(state->work_queue);
 	v4l2_device_unregister_subdev(sd);
 	media_entity_cleanup(&sd->entity);
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
@@ -1231,7 +1217,6 @@
 
 static struct i2c_driver ad9389b_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "ad9389b",
 	},
 	.probe = ad9389b_probe,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 95cbc85..cbed2bc 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -26,6 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-event.h>
@@ -56,10 +57,11 @@
 
 #define ADV7182_REG_INPUT_VIDSEL			0x0002
 
+#define ADV7180_REG_OUTPUT_CONTROL			0x0003
 #define ADV7180_REG_EXTENDED_OUTPUT_CONTROL		0x0004
 #define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS		0xC5
 
-#define ADV7180_REG_AUTODETECT_ENABLE			0x07
+#define ADV7180_REG_AUTODETECT_ENABLE			0x0007
 #define ADV7180_AUTODETECT_DEFAULT			0x7f
 /* Contrast */
 #define ADV7180_REG_CON		0x0008	/*Unsigned */
@@ -100,6 +102,20 @@
 #define ADV7180_REG_IDENT 0x0011
 #define ADV7180_ID_7180 0x18
 
+#define ADV7180_REG_STATUS3		0x0013
+#define ADV7180_REG_ANALOG_CLAMP_CTL	0x0014
+#define ADV7180_REG_SHAP_FILTER_CTL_1	0x0017
+#define ADV7180_REG_CTRL_2		0x001d
+#define ADV7180_REG_VSYNC_FIELD_CTL_1	0x0031
+#define ADV7180_REG_MANUAL_WIN_CTL_1	0x003d
+#define ADV7180_REG_MANUAL_WIN_CTL_2	0x003e
+#define ADV7180_REG_MANUAL_WIN_CTL_3	0x003f
+#define ADV7180_REG_LOCK_CNT		0x0051
+#define ADV7180_REG_CVBS_TRIM		0x0052
+#define ADV7180_REG_CLAMP_ADJ		0x005a
+#define ADV7180_REG_RES_CIR		0x005f
+#define ADV7180_REG_DIFF_MODE		0x0060
+
 #define ADV7180_REG_ICONF1		0x2040
 #define ADV7180_ICONF1_ACTIVE_LOW	0x01
 #define ADV7180_ICONF1_PSYNC_ONLY	0x10
@@ -129,9 +145,15 @@
 #define ADV7180_REG_VPP_SLAVE_ADDR	0xFD
 #define ADV7180_REG_CSI_SLAVE_ADDR	0xFE
 
-#define ADV7180_REG_FLCONTROL 0x40e0
+#define ADV7180_REG_ACE_CTRL1		0x4080
+#define ADV7180_REG_ACE_CTRL5		0x4084
+#define ADV7180_REG_FLCONTROL		0x40e0
 #define ADV7180_FLCONTROL_FL_ENABLE 0x1
 
+#define ADV7180_REG_RST_CLAMP	0x809c
+#define ADV7180_REG_AGC_ADJ1	0x80b6
+#define ADV7180_REG_AGC_ADJ2	0x80c0
+
 #define ADV7180_CSI_REG_PWRDN	0x00
 #define ADV7180_CSI_PWRDN	0x80
 
@@ -192,6 +214,7 @@
 	struct media_pad	pad;
 	struct mutex		mutex; /* mutual excl. when accessing chip */
 	int			irq;
+	struct gpio_desc	*pwdn_gpio;
 	v4l2_std_id		curr_norm;
 	bool			powered;
 	bool			streaming;
@@ -442,6 +465,19 @@
 	return 0;
 }
 
+static void adv7180_set_power_pin(struct adv7180_state *state, bool on)
+{
+	if (!state->pwdn_gpio)
+		return;
+
+	if (on) {
+		gpiod_set_value_cansleep(state->pwdn_gpio, 0);
+		usleep_range(5000, 10000);
+	} else {
+		gpiod_set_value_cansleep(state->pwdn_gpio, 1);
+	}
+}
+
 static int adv7180_set_power(struct adv7180_state *state, bool on)
 {
 	u8 val;
@@ -597,7 +633,7 @@
 	if (code->index != 0)
 		return -EINVAL;
 
-	code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+	code->code = MEDIA_BUS_FMT_UYVY8_2X8;
 
 	return 0;
 }
@@ -607,7 +643,7 @@
 {
 	struct adv7180_state *state = to_state(sd);
 
-	fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
+	fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
 	fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
 	fmt->width = 720;
 	fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
@@ -675,6 +711,7 @@
 {
 	struct adv7180_state *state = to_state(sd);
 	struct v4l2_mbus_framefmt *framefmt;
+	int ret;
 
 	switch (format->format.field) {
 	case V4L2_FIELD_NONE:
@@ -686,8 +723,9 @@
 		break;
 	}
 
+	ret = adv7180_mbus_fmt(sd,  &format->format);
+
 	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
-		framefmt = &format->format;
 		if (state->field != format->format.field) {
 			state->field = format->format.field;
 			adv7180_set_power(state, false);
@@ -699,7 +737,7 @@
 		*framefmt = format->format;
 	}
 
-	return adv7180_mbus_fmt(sd, framefmt);
+	return ret;
 }
 
 static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
@@ -725,16 +763,16 @@
 	return 0;
 }
 
-static int adv7180_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *cropcap)
+static int adv7180_g_pixelaspect(struct v4l2_subdev *sd, struct v4l2_fract *aspect)
 {
 	struct adv7180_state *state = to_state(sd);
 
 	if (state->curr_norm & V4L2_STD_525_60) {
-		cropcap->pixelaspect.numerator = 11;
-		cropcap->pixelaspect.denominator = 10;
+		aspect->numerator = 11;
+		aspect->denominator = 10;
 	} else {
-		cropcap->pixelaspect.numerator = 54;
-		cropcap->pixelaspect.denominator = 59;
+		aspect->numerator = 54;
+		aspect->denominator = 59;
 	}
 
 	return 0;
@@ -787,7 +825,7 @@
 	.g_input_status = adv7180_g_input_status,
 	.s_routing = adv7180_s_routing,
 	.g_mbus_config = adv7180_g_mbus_config,
-	.cropcap = adv7180_cropcap,
+	.g_pixelaspect = adv7180_g_pixelaspect,
 	.g_tvnorms = adv7180_g_tvnorms,
 	.s_stream = adv7180_s_stream,
 };
@@ -886,16 +924,20 @@
 
 	/* ADI required writes */
 	if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
-		adv7180_write(state, 0x0003, 0x4e);
-		adv7180_write(state, 0x0004, 0x57);
-		adv7180_write(state, 0x001d, 0xc0);
+		adv7180_write(state, ADV7180_REG_OUTPUT_CONTROL, 0x4e);
+		adv7180_write(state, ADV7180_REG_EXTENDED_OUTPUT_CONTROL, 0x57);
+		adv7180_write(state, ADV7180_REG_CTRL_2, 0xc0);
 	} else {
 		if (state->chip_info->flags & ADV7180_FLAG_V2)
-			adv7180_write(state, 0x0004, 0x17);
+			adv7180_write(state,
+				      ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
+				      0x17);
 		else
-			adv7180_write(state, 0x0004, 0x07);
-		adv7180_write(state, 0x0003, 0x0c);
-		adv7180_write(state, 0x001d, 0x40);
+			adv7180_write(state,
+				      ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
+				      0x07);
+		adv7180_write(state, ADV7180_REG_OUTPUT_CONTROL, 0x0c);
+		adv7180_write(state, ADV7180_REG_CTRL_2, 0x40);
 	}
 
 	adv7180_write(state, 0x0013, 0x00);
@@ -972,8 +1014,8 @@
 		return ret;
 
 	/* Reset clamp circuitry - ADI recommended writes */
-	adv7180_write(state, 0x809c, 0x00);
-	adv7180_write(state, 0x809c, 0xff);
+	adv7180_write(state, ADV7180_REG_RST_CLAMP, 0x00);
+	adv7180_write(state, ADV7180_REG_RST_CLAMP, 0xff);
 
 	input_type = adv7182_get_input_type(input);
 
@@ -981,10 +1023,10 @@
 	case ADV7182_INPUT_TYPE_CVBS:
 	case ADV7182_INPUT_TYPE_DIFF_CVBS:
 		/* ADI recommends to use the SH1 filter */
-		adv7180_write(state, 0x0017, 0x41);
+		adv7180_write(state, ADV7180_REG_SHAP_FILTER_CTL_1, 0x41);
 		break;
 	default:
-		adv7180_write(state, 0x0017, 0x01);
+		adv7180_write(state, ADV7180_REG_SHAP_FILTER_CTL_1, 0x01);
 		break;
 	}
 
@@ -994,21 +1036,21 @@
 		lbias = adv7182_lbias_settings[input_type];
 
 	for (i = 0; i < ARRAY_SIZE(adv7182_lbias_settings[0]); i++)
-		adv7180_write(state, 0x0052 + i, lbias[i]);
+		adv7180_write(state, ADV7180_REG_CVBS_TRIM + i, lbias[i]);
 
 	if (input_type == ADV7182_INPUT_TYPE_DIFF_CVBS) {
 		/* ADI required writes to make differential CVBS work */
-		adv7180_write(state, 0x005f, 0xa8);
-		adv7180_write(state, 0x005a, 0x90);
-		adv7180_write(state, 0x0060, 0xb0);
-		adv7180_write(state, 0x80b6, 0x08);
-		adv7180_write(state, 0x80c0, 0xa0);
+		adv7180_write(state, ADV7180_REG_RES_CIR, 0xa8);
+		adv7180_write(state, ADV7180_REG_CLAMP_ADJ, 0x90);
+		adv7180_write(state, ADV7180_REG_DIFF_MODE, 0xb0);
+		adv7180_write(state, ADV7180_REG_AGC_ADJ1, 0x08);
+		adv7180_write(state, ADV7180_REG_AGC_ADJ2, 0xa0);
 	} else {
-		adv7180_write(state, 0x005f, 0xf0);
-		adv7180_write(state, 0x005a, 0xd0);
-		adv7180_write(state, 0x0060, 0x10);
-		adv7180_write(state, 0x80b6, 0x9c);
-		adv7180_write(state, 0x80c0, 0x00);
+		adv7180_write(state, ADV7180_REG_RES_CIR, 0xf0);
+		adv7180_write(state, ADV7180_REG_CLAMP_ADJ, 0xd0);
+		adv7180_write(state, ADV7180_REG_DIFF_MODE, 0x10);
+		adv7180_write(state, ADV7180_REG_AGC_ADJ1, 0x9c);
+		adv7180_write(state, ADV7180_REG_AGC_ADJ2, 0x00);
 	}
 
 	return 0;
@@ -1185,6 +1227,8 @@
 
 	mutex_lock(&state->mutex);
 
+	adv7180_set_power_pin(state, true);
+
 	adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
 	usleep_range(5000, 10000);
 
@@ -1254,6 +1298,14 @@
 	state->field = V4L2_FIELD_INTERLACED;
 	state->chip_info = (struct adv7180_chip_info *)id->driver_data;
 
+	state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+						   GPIOD_OUT_HIGH);
+	if (IS_ERR(state->pwdn_gpio)) {
+		ret = PTR_ERR(state->pwdn_gpio);
+		v4l_err(client, "request for power pin failed: %d\n", ret);
+		return ret;
+	}
+
 	if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
 		state->csi_client = i2c_new_dummy(client->adapter,
 				ADV7180_DEFAULT_CSI_I2C_ADDR);
@@ -1345,6 +1397,8 @@
 	if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2)
 		i2c_unregister_device(state->csi_client);
 
+	adv7180_set_power_pin(state, false);
+
 	mutex_destroy(&state->mutex);
 
 	return 0;
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 2bec737..04eecda 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -644,7 +644,6 @@
 
 static struct i2c_driver adv7183_driver = {
 	.driver = {
-		.owner  = THIS_MODULE,
 		.name   = "adv7183",
 	},
 	.probe          = adv7183_probe,
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 76d9874..f19ad4e 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -456,7 +456,6 @@
 
 static struct i2c_driver adv7393_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "adv7393",
 	},
 	.probe		= adv7393_probe,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 53030d6..5ba0f21 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1898,6 +1898,7 @@
 					       state->i2c_cec_addr >> 1);
 		if (state->i2c_cec == NULL) {
 			v4l2_err(sd, "failed to register cec i2c client\n");
+			err = -ENOMEM;
 			goto err_unreg_edid;
 		}
 		adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index d9f2b6b..3a795dc 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -124,21 +124,27 @@
 	return 0;
 }
 
-static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ak881x_get_selection(struct v4l2_subdev *sd,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct ak881x *ak881x = to_ak881x(client);
 
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= 720;
-	a->bounds.height		= ak881x->lines;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = 720;
+		sel->r.height = ak881x->lines;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
@@ -207,13 +213,13 @@
 };
 
 static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
-	.cropcap	= ak881x_cropcap,
 	.s_std_output	= ak881x_s_std_output,
 	.s_stream	= ak881x_s_stream,
 };
 
 static const struct v4l2_subdev_pad_ops ak881x_subdev_pad_ops = {
 	.enum_mbus_code = ak881x_enum_mbus_code,
+	.get_selection	= ak881x_get_selection,
 	.set_fmt	= ak881x_fill_fmt,
 	.get_fmt	= ak881x_fill_fmt,
 };
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index d28b4f3..7da5f69 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -127,7 +127,6 @@
 
 static struct i2c_driver cs3308_driver = {
 	.driver = {
-		.owner  = THIS_MODULE,
 		.name   = "cs3308",
 	},
 	.probe          = cs3308_probe,
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index bf82726..f95a6bc 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -35,6 +35,7 @@
  *
  */
 
+#include <asm/unaligned.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -63,51 +64,80 @@
 /* ----------------------------------------------------------------------- */
 
 static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
-			       u32 *scancode, u8 *ptoggle, int size, int offset)
+					u32 *scancode, u8 *ptoggle, int size)
 {
 	unsigned char buf[6];
-	int start, range, toggle, dev, code, ircode;
+	int start, range, toggle, dev, code, ircode, vendor;
 
 	/* poll IR chip */
 	if (size != i2c_master_recv(ir->c, buf, size))
 		return -EIO;
 
-	/* split rc5 data block ... */
-	start  = (buf[offset] >> 7) &    1;
-	range  = (buf[offset] >> 6) &    1;
-	toggle = (buf[offset] >> 5) &    1;
-	dev    =  buf[offset]       & 0x1f;
-	code   = (buf[offset+1] >> 2) & 0x3f;
+	if (buf[0] & 0x80) {
+		int offset = (size == 6) ? 3 : 0;
 
-	/* rc5 has two start bits
-	 * the first bit must be one
-	 * the second bit defines the command range (1 = 0-63, 0 = 64 - 127)
-	 */
-	if (!start)
-		/* no key pressed */
-		return 0;
+		/* split rc5 data block ... */
+		start  = (buf[offset] >> 7) &    1;
+		range  = (buf[offset] >> 6) &    1;
+		toggle = (buf[offset] >> 5) &    1;
+		dev    =  buf[offset]       & 0x1f;
+		code   = (buf[offset+1] >> 2) & 0x3f;
 
-	/* filter out invalid key presses */
-	ircode = (start << 12) | (toggle << 11) | (dev << 6) | code;
-	if ((ircode & 0x1fff) == 0x1fff)
-		return 0;
+		/* rc5 has two start bits
+		 * the first bit must be one
+		 * the second bit defines the command range:
+		 * 1 = 0-63, 0 = 64 - 127
+		 */
+		if (!start)
+			/* no key pressed */
+			return 0;
 
-	if (!range)
-		code += 64;
+		/* filter out invalid key presses */
+		ircode = (start << 12) | (toggle << 11) | (dev << 6) | code;
+		if ((ircode & 0x1fff) == 0x1fff)
+			return 0;
 
-	dprintk(1,"ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
-		start, range, toggle, dev, code);
+		if (!range)
+			code += 64;
 
-	*protocol = RC_TYPE_RC5;
-	*scancode = RC_SCANCODE_RC5(dev, code);
-	*ptoggle = toggle;
-	return 1;
+		dprintk(1, "ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
+			start, range, toggle, dev, code);
+
+		*protocol = RC_TYPE_RC5;
+		*scancode = RC_SCANCODE_RC5(dev, code);
+		*ptoggle = toggle;
+
+		return 1;
+	} else if (size == 6 && (buf[0] & 0x40)) {
+		code = buf[4];
+		dev = buf[3];
+		vendor = get_unaligned_be16(buf + 1);
+
+		if (vendor == 0x800f) {
+			*ptoggle = (dev & 0x80) != 0;
+			*protocol = RC_TYPE_RC6_MCE;
+			dev &= 0x7f;
+			dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
+						toggle, vendor, dev, code);
+		} else {
+			*ptoggle = 0;
+			*protocol = RC_TYPE_RC6_6A_32;
+			dprintk(1, "ir hauppauge (rc6-6a-32): vendor=%d dev=%d code=%d\n",
+							vendor, dev, code);
+		}
+
+		*scancode = RC_SCANCODE_RC6_6A(vendor, dev, code);
+
+		return 1;
+	}
+
+	return 0;
 }
 
 static int get_key_haup(struct IR_i2c *ir, enum rc_type *protocol,
 			u32 *scancode, u8 *toggle)
 {
-	return get_key_haup_common (ir, protocol, scancode, toggle, 3, 0);
+	return get_key_haup_common(ir, protocol, scancode, toggle, 3);
 }
 
 static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
@@ -126,7 +156,7 @@
 	if (ret != 1)
 		return (ret < 0) ? ret : -EINVAL;
 
-	return get_key_haup_common(ir, protocol, scancode, toggle, 6, 3);
+	return get_key_haup_common(ir, protocol, scancode, toggle, 6);
 }
 
 static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol,
@@ -347,7 +377,7 @@
 	case 0x71:
 		name        = "Hauppauge/Zilog Z8";
 		ir->get_key = get_key_haup_xvr;
-		rc_type     = RC_BIT_RC5;
+		rc_type     = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32;
 		ir_codes    = RC_MAP_HAUPPAUGE;
 		break;
 	}
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/mt9m111.c
similarity index 91%
rename from drivers/media/i2c/soc_camera/mt9m111.c
rename to drivers/media/i2c/mt9m111.c
index 6dfaead..72e71b7 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -16,10 +16,11 @@
 #include <linux/v4l2-mediabus.h>
 #include <linux/module.h>
 
-#include <media/soc_camera.h>
+#include <media/v4l2-async.h>
 #include <media/v4l2-clk.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
 
 /*
  * MT9M111, MT9M112 and MT9M131:
@@ -187,10 +188,10 @@
 };
 
 static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
-	{MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
-	{MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
-	{MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
-	{MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
+	{MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB},
+	{MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_SRGB},
+	{MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB},
+	{MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_SRGB},
 	{MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
 	{MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
 	{MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
@@ -383,30 +384,36 @@
 	return ret;
 }
 
-static int mt9m111_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9m111_set_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_selection *sel)
 {
-	struct v4l2_rect rect = a->c;
-	struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct mt9m111 *mt9m111 = to_mt9m111(client);
+	struct v4l2_rect rect = sel->r;
 	int width, height;
-	int ret;
+	int ret, align = 0;
 
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
 	if (mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
 	    mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
 		/* Bayer format - even size lengths */
-		rect.width	= ALIGN(rect.width, 2);
-		rect.height	= ALIGN(rect.height, 2);
+		align = 1;
 		/* Let the user play with the starting pixel */
 	}
 
 	/* FIXME: the datasheet doesn't specify minimum sizes */
-	soc_camera_limit_side(&rect.left, &rect.width,
-		     MT9M111_MIN_DARK_COLS, 2, MT9M111_MAX_WIDTH);
-
-	soc_camera_limit_side(&rect.top, &rect.height,
-		     MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
+	v4l_bound_align_image(&rect.width, 2, MT9M111_MAX_WIDTH, align,
+			      &rect.height, 2, MT9M111_MAX_HEIGHT, align, 0);
+	rect.left = clamp(rect.left, MT9M111_MIN_DARK_COLS,
+			  MT9M111_MIN_DARK_COLS + MT9M111_MAX_WIDTH -
+			  (__s32)rect.width);
+	rect.top = clamp(rect.top, MT9M111_MIN_DARK_ROWS,
+			 MT9M111_MIN_DARK_ROWS + MT9M111_MAX_HEIGHT -
+			 (__s32)rect.height);
 
 	width = min(mt9m111->width, rect.width);
 	height = min(mt9m111->height, rect.height);
@@ -421,30 +428,30 @@
 	return ret;
 }
 
-static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9m111_get_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_selection *sel)
 {
-	struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
+	struct mt9m111 *mt9m111 = to_mt9m111(client);
 
-	a->c	= mt9m111->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	return 0;
-}
-
-static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
 		return -EINVAL;
 
-	a->bounds.left			= MT9M111_MIN_DARK_COLS;
-	a->bounds.top			= MT9M111_MIN_DARK_ROWS;
-	a->bounds.width			= MT9M111_MAX_WIDTH;
-	a->bounds.height		= MT9M111_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = MT9M111_MIN_DARK_COLS;
+		sel->r.top = MT9M111_MIN_DARK_ROWS;
+		sel->r.width = MT9M111_MAX_WIDTH;
+		sel->r.height = MT9M111_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = mt9m111->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int mt9m111_get_fmt(struct v4l2_subdev *sd,
@@ -775,17 +782,16 @@
 static int mt9m111_power_on(struct mt9m111 *mt9m111)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
-	struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
 	int ret;
 
-	ret = soc_camera_power_on(&client->dev, ssdd, mt9m111->clk);
+	ret = v4l2_clk_enable(mt9m111->clk);
 	if (ret < 0)
 		return ret;
 
 	ret = mt9m111_resume(mt9m111);
 	if (ret < 0) {
 		dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
-		soc_camera_power_off(&client->dev, ssdd, mt9m111->clk);
+		v4l2_clk_disable(mt9m111->clk);
 	}
 
 	return ret;
@@ -793,11 +799,8 @@
 
 static void mt9m111_power_off(struct mt9m111 *mt9m111)
 {
-	struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
-	struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
 	mt9m111_suspend(mt9m111);
-	soc_camera_power_off(&client->dev, ssdd, mt9m111->clk);
+	v4l2_clk_disable(mt9m111->clk);
 }
 
 static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -854,27 +857,22 @@
 static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
 				struct v4l2_mbus_config *cfg)
 {
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
 	cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
 		V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
 		V4L2_MBUS_DATA_ACTIVE_HIGH;
 	cfg->type = V4L2_MBUS_PARALLEL;
-	cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
 
 	return 0;
 }
 
 static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
-	.s_crop		= mt9m111_s_crop,
-	.g_crop		= mt9m111_g_crop,
-	.cropcap	= mt9m111_cropcap,
 	.g_mbus_config	= mt9m111_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
 	.enum_mbus_code = mt9m111_enum_mbus_code,
+	.get_selection	= mt9m111_get_selection,
+	.set_selection	= mt9m111_set_selection,
 	.get_fmt	= mt9m111_get_fmt,
 	.set_fmt	= mt9m111_set_fmt,
 };
@@ -933,20 +931,8 @@
 {
 	struct mt9m111 *mt9m111;
 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-	struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
 	int ret;
 
-	if (client->dev.of_node) {
-		ssdd = devm_kzalloc(&client->dev, sizeof(*ssdd), GFP_KERNEL);
-		if (!ssdd)
-			return -ENOMEM;
-		client->dev.platform_data = ssdd;
-	}
-	if (!ssdd) {
-		dev_err(&client->dev, "mt9m111: driver needs platform data\n");
-		return -EINVAL;
-	}
-
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
 		dev_warn(&adapter->dev,
 			 "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
@@ -992,10 +978,6 @@
 	mt9m111->lastpage	= -1;
 	mutex_init(&mt9m111->power_lock);
 
-	ret = soc_camera_power_init(&client->dev, ssdd);
-	if (ret < 0)
-		goto out_hdlfree;
-
 	ret = mt9m111_video_probe(client);
 	if (ret < 0)
 		goto out_hdlfree;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index be5a7fd..502c722 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -23,6 +23,7 @@
 #include <linux/videodev2.h>
 
 #include <media/media-entity.h>
+#include <media/v4l2-async.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-event.h>
@@ -1520,6 +1521,10 @@
 	/* Update exposure time min/max to match frame format */
 	ov965x_update_exposure_ctrl(ov965x);
 
+	ret = v4l2_async_register_subdev(sd);
+	if (ret < 0)
+		goto err_ctrls;
+
 	return 0;
 err_ctrls:
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
@@ -1532,7 +1537,7 @@
 {
 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
 
-	v4l2_device_unregister_subdev(sd);
+	v4l2_async_unregister_subdev(sd);
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
 	media_entity_cleanup(&sd->entity);
 
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 08af58f..3844853 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1706,7 +1706,7 @@
 	state->oif_pads[OIF_ISP_PAD].flags = MEDIA_PAD_FL_SINK;
 	state->oif_pads[OIF_JPEG_PAD].flags = MEDIA_PAD_FL_SINK;
 	state->oif_pads[OIF_SOURCE_PAD].flags = MEDIA_PAD_FL_SOURCE;
-	oif_sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+	oif_sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
 
 	ret = media_entity_pads_init(&oif_sd->entity, OIF_NUM_PADS,
 							state->oif_pads);
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 8a0f22d..6ebcf25 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -1019,7 +1019,6 @@
 
 static struct i2c_driver v4l2_i2c_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name = S5K4ECGX_DRIVER_NAME,
 	},
 	.probe = s5k4ecgx_probe,
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index cbe4711..7699640 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -331,6 +331,7 @@
 	sensor->format.width = S5K6A3_DEFAULT_WIDTH;
 	sensor->format.height = S5K6A3_DEFAULT_HEIGHT;
 
+	sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
 	sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
 	ret = media_entity_pads_init(&sd->entity, 1, &sensor->pad);
 	if (ret < 0)
@@ -376,7 +377,6 @@
 	.driver = {
 		.of_match_table	= of_match_ptr(s5k6a3_of_match),
 		.name		= S5K6A3_DRV_NAME,
-		.owner		= THIS_MODULE,
 	},
 	.probe		= s5k6a3_probe,
 	.remove		= s5k6a3_remove,
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index d08ab6c..44f8c7e 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -24,8 +24,8 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/smiapp.h>
@@ -328,6 +328,14 @@
  *    orders must be defined.
  */
 static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
+	{ MEDIA_BUS_FMT_SGRBG16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_GRBG, },
+	{ MEDIA_BUS_FMT_SRGGB16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_RGGB, },
+	{ MEDIA_BUS_FMT_SBGGR16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_BGGR, },
+	{ MEDIA_BUS_FMT_SGBRG16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_GBRG, },
+	{ MEDIA_BUS_FMT_SGRBG14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_GRBG, },
+	{ MEDIA_BUS_FMT_SRGGB14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_RGGB, },
+	{ MEDIA_BUS_FMT_SBGGR14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_BGGR, },
+	{ MEDIA_BUS_FMT_SGBRG14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_GBRG, },
 	{ MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
 	{ MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
 	{ MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
@@ -625,12 +633,12 @@
 				0, max_value, 1, max_value);
 	}
 
-	for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+	for (max = 0; sensor->hwcfg->op_sys_clock[max + 1]; max++);
 
 	sensor->link_freq = v4l2_ctrl_new_int_menu(
 		&sensor->src->ctrl_handler, &smiapp_ctrl_ops,
 		V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
-		__ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
+		__ffs(*valid_link_freqs), sensor->hwcfg->op_sys_clock);
 
 	return sensor->src->ctrl_handler.error;
 }
@@ -833,8 +841,8 @@
 
 		pll->bits_per_pixel = f->compressed;
 
-		for (j = 0; sensor->platform_data->op_sys_clock[j]; j++) {
-			pll->link_freq = sensor->platform_data->op_sys_clock[j];
+		for (j = 0; sensor->hwcfg->op_sys_clock[j]; j++) {
+			pll->link_freq = sensor->hwcfg->op_sys_clock[j];
 
 			rval = smiapp_pll_try(sensor, pll);
 			dev_dbg(&client->dev, "link freq %u Hz, bpp %u %s\n",
@@ -1032,22 +1040,22 @@
 	int rval;
 	u32 val;
 
-	client->addr = sensor->platform_data->i2c_addr_dfl;
+	client->addr = sensor->hwcfg->i2c_addr_dfl;
 
 	rval = smiapp_write(sensor,
 			    SMIAPP_REG_U8_CCI_ADDRESS_CONTROL,
-			    sensor->platform_data->i2c_addr_alt << 1);
+			    sensor->hwcfg->i2c_addr_alt << 1);
 	if (rval)
 		return rval;
 
-	client->addr = sensor->platform_data->i2c_addr_alt;
+	client->addr = sensor->hwcfg->i2c_addr_alt;
 
 	/* verify addr change went ok */
 	rval = smiapp_read(sensor, SMIAPP_REG_U8_CCI_ADDRESS_CONTROL, &val);
 	if (rval)
 		return rval;
 
-	if (val != sensor->platform_data->i2c_addr_alt << 1)
+	if (val != sensor->hwcfg->i2c_addr_alt << 1)
 		return -ENODEV;
 
 	return 0;
@@ -1061,13 +1069,13 @@
 static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
 {
 	struct smiapp_flash_strobe_parms *strobe_setup;
-	unsigned int ext_freq = sensor->platform_data->ext_clk;
+	unsigned int ext_freq = sensor->hwcfg->ext_clk;
 	u32 tmp;
 	u32 strobe_adjustment;
 	u32 strobe_width_high_rs;
 	int rval;
 
-	strobe_setup = sensor->platform_data->strobe_setup;
+	strobe_setup = sensor->hwcfg->strobe_setup;
 
 	/*
 	 * How to calculate registers related to strobe length. Please
@@ -1179,7 +1187,7 @@
 			    strobe_setup->trigger);
 
 out:
-	sensor->platform_data->strobe_setup->trigger = 0;
+	sensor->hwcfg->strobe_setup->trigger = 0;
 
 	return rval;
 }
@@ -1201,21 +1209,16 @@
 	}
 	usleep_range(1000, 1000);
 
-	if (sensor->platform_data->set_xclk)
-		rval = sensor->platform_data->set_xclk(
-			&sensor->src->sd, sensor->platform_data->ext_clk);
-	else
-		rval = clk_prepare_enable(sensor->ext_clk);
+	rval = clk_prepare_enable(sensor->ext_clk);
 	if (rval < 0) {
 		dev_dbg(&client->dev, "failed to enable xclk\n");
 		goto out_xclk_fail;
 	}
 	usleep_range(1000, 1000);
 
-	if (gpio_is_valid(sensor->platform_data->xshutdown))
-		gpio_set_value(sensor->platform_data->xshutdown, 1);
+	gpiod_set_value(sensor->xshutdown, 1);
 
-	sleep = SMIAPP_RESET_DELAY(sensor->platform_data->ext_clk);
+	sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
 	usleep_range(sleep, sleep);
 
 	/*
@@ -1229,7 +1232,7 @@
 	 * is found.
 	 */
 
-	if (sensor->platform_data->i2c_addr_alt) {
+	if (sensor->hwcfg->i2c_addr_alt) {
 		rval = smiapp_change_cci_addr(sensor);
 		if (rval) {
 			dev_err(&client->dev, "cci address change error\n");
@@ -1244,7 +1247,7 @@
 		goto out_cci_addr_fail;
 	}
 
-	if (sensor->platform_data->i2c_addr_alt) {
+	if (sensor->hwcfg->i2c_addr_alt) {
 		rval = smiapp_change_cci_addr(sensor);
 		if (rval) {
 			dev_err(&client->dev, "cci address change error\n");
@@ -1261,14 +1264,14 @@
 
 	rval = smiapp_write(
 		sensor, SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ,
-		sensor->platform_data->ext_clk / (1000000 / (1 << 8)));
+		sensor->hwcfg->ext_clk / (1000000 / (1 << 8)));
 	if (rval) {
 		dev_err(&client->dev, "extclk frequency set failed\n");
 		goto out_cci_addr_fail;
 	}
 
 	rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_LANE_MODE,
-			    sensor->platform_data->lanes - 1);
+			    sensor->hwcfg->lanes - 1);
 	if (rval) {
 		dev_err(&client->dev, "csi lane mode set failed\n");
 		goto out_cci_addr_fail;
@@ -1282,7 +1285,7 @@
 	}
 
 	rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_SIGNALLING_MODE,
-			    sensor->platform_data->csi_signalling_mode);
+			    sensor->hwcfg->csi_signalling_mode);
 	if (rval) {
 		dev_err(&client->dev, "csi signalling mode set failed\n");
 		goto out_cci_addr_fail;
@@ -1322,12 +1325,8 @@
 	return 0;
 
 out_cci_addr_fail:
-	if (gpio_is_valid(sensor->platform_data->xshutdown))
-		gpio_set_value(sensor->platform_data->xshutdown, 0);
-	if (sensor->platform_data->set_xclk)
-		sensor->platform_data->set_xclk(&sensor->src->sd, 0);
-	else
-		clk_disable_unprepare(sensor->ext_clk);
+	gpiod_set_value(sensor->xshutdown, 0);
+	clk_disable_unprepare(sensor->ext_clk);
 
 out_xclk_fail:
 	regulator_disable(sensor->vana);
@@ -1343,17 +1342,13 @@
 	 * really see a power off and next time the cci address change
 	 * will fail. So do a soft reset explicitly here.
 	 */
-	if (sensor->platform_data->i2c_addr_alt)
+	if (sensor->hwcfg->i2c_addr_alt)
 		smiapp_write(sensor,
 			     SMIAPP_REG_U8_SOFTWARE_RESET,
 			     SMIAPP_SOFTWARE_RESET);
 
-	if (gpio_is_valid(sensor->platform_data->xshutdown))
-		gpio_set_value(sensor->platform_data->xshutdown, 0);
-	if (sensor->platform_data->set_xclk)
-		sensor->platform_data->set_xclk(&sensor->src->sd, 0);
-	else
-		clk_disable_unprepare(sensor->ext_clk);
+	gpiod_set_value(sensor->xshutdown, 0);
+	clk_disable_unprepare(sensor->ext_clk);
 	usleep_range(5000, 5000);
 	regulator_disable(sensor->vana);
 	sensor->streaming = false;
@@ -1491,8 +1486,8 @@
 	if ((sensor->limits[SMIAPP_LIMIT_FLASH_MODE_CAPABILITY] &
 	     (SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
 	      SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
-	    sensor->platform_data->strobe_setup != NULL &&
-	    sensor->platform_data->strobe_setup->trigger != 0) {
+	    sensor->hwcfg->strobe_setup != NULL &&
+	    sensor->hwcfg->strobe_setup->trigger != 0) {
 		rval = smiapp_setup_flash_strobe(sensor);
 		if (rval)
 			goto out;
@@ -2309,7 +2304,7 @@
 
 	if (!sensor->nvm_size) {
 		/* NVM not read yet - read it now */
-		sensor->nvm_size = sensor->platform_data->nvm_size;
+		sensor->nvm_size = sensor->hwcfg->nvm_size;
 		if (smiapp_set_power(subdev, 1) < 0)
 			return -ENODEV;
 		if (smiapp_read_nvm(sensor, sensor->nvm)) {
@@ -2554,35 +2549,27 @@
 		return PTR_ERR(sensor->vana);
 	}
 
-	if (!sensor->platform_data->set_xclk) {
-		sensor->ext_clk = devm_clk_get(&client->dev, NULL);
-		if (IS_ERR(sensor->ext_clk)) {
-			dev_err(&client->dev, "could not get clock\n");
-			return PTR_ERR(sensor->ext_clk);
-		}
-
-		rval = clk_set_rate(sensor->ext_clk,
-				    sensor->platform_data->ext_clk);
-		if (rval < 0) {
-			dev_err(&client->dev,
-				"unable to set clock freq to %u\n",
-				sensor->platform_data->ext_clk);
-			return rval;
-		}
+	sensor->ext_clk = devm_clk_get(&client->dev, NULL);
+	if (IS_ERR(sensor->ext_clk)) {
+		dev_err(&client->dev, "could not get clock (%ld)\n",
+			PTR_ERR(sensor->ext_clk));
+		return -EPROBE_DEFER;
 	}
 
-	if (gpio_is_valid(sensor->platform_data->xshutdown)) {
-		rval = devm_gpio_request_one(
-			&client->dev, sensor->platform_data->xshutdown, 0,
-			"SMIA++ xshutdown");
-		if (rval < 0) {
-			dev_err(&client->dev,
-				"unable to acquire reset gpio %d\n",
-				sensor->platform_data->xshutdown);
-			return rval;
-		}
+	rval = clk_set_rate(sensor->ext_clk,
+			    sensor->hwcfg->ext_clk);
+	if (rval < 0) {
+		dev_err(&client->dev,
+			"unable to set clock freq to %u\n",
+			sensor->hwcfg->ext_clk);
+		return rval;
 	}
 
+	sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
+						    GPIOD_OUT_LOW);
+	if (IS_ERR(sensor->xshutdown))
+		return PTR_ERR(sensor->xshutdown);
+
 	rval = smiapp_power_on(sensor);
 	if (rval)
 		return -ENODEV;
@@ -2612,7 +2599,7 @@
 	 *
 	 * Rotation also changes the bayer pattern.
 	 */
-	if (sensor->platform_data->module_board_orient ==
+	if (sensor->hwcfg->module_board_orient ==
 	    SMIAPP_MODULE_BOARD_ORIENT_180)
 		sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
 					  SMIAPP_IMAGE_ORIENTATION_VFLIP;
@@ -2661,9 +2648,9 @@
 	/* SMIA++ NVM initialization - it will be read from the sensor
 	 * when it is first requested by userspace.
 	 */
-	if (sensor->minfo.smiapp_version && sensor->platform_data->nvm_size) {
+	if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
 		sensor->nvm = devm_kzalloc(&client->dev,
-				sensor->platform_data->nvm_size, GFP_KERNEL);
+				sensor->hwcfg->nvm_size, GFP_KERNEL);
 		if (sensor->nvm == NULL) {
 			dev_err(&client->dev, "nvm buf allocation failed\n");
 			rval = -ENOMEM;
@@ -2706,8 +2693,8 @@
 
 	/* prepare PLL configuration input values */
 	pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
-	pll->csi2.lanes = sensor->platform_data->lanes;
-	pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+	pll->csi2.lanes = sensor->hwcfg->lanes;
+	pll->ext_clk_freq_hz = sensor->hwcfg->ext_clk;
 	pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
 	/* Profile 0 sensors have no separate OP clock branch. */
 	if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
@@ -2984,9 +2971,9 @@
 
 #endif /* CONFIG_PM */
 
-static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
+static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
 {
-	struct smiapp_platform_data *pdata;
+	struct smiapp_hwconfig *hwcfg;
 	struct v4l2_of_endpoint *bus_cfg;
 	struct device_node *ep;
 	int i;
@@ -3003,58 +2990,55 @@
 	if (IS_ERR(bus_cfg))
 		goto out_err;
 
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
+	hwcfg = devm_kzalloc(dev, sizeof(*hwcfg), GFP_KERNEL);
+	if (!hwcfg)
 		goto out_err;
 
 	switch (bus_cfg->bus_type) {
 	case V4L2_MBUS_CSI2:
-		pdata->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
+		hwcfg->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
 		break;
 		/* FIXME: add CCP2 support. */
 	default:
 		goto out_err;
 	}
 
-	pdata->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
-	dev_dbg(dev, "lanes %u\n", pdata->lanes);
-
-	/* xshutdown GPIO is optional */
-	pdata->xshutdown = of_get_named_gpio(dev->of_node, "reset-gpios", 0);
+	hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
+	dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
 
 	/* NVM size is not mandatory */
 	of_property_read_u32(dev->of_node, "nokia,nvm-size",
-				    &pdata->nvm_size);
+				    &hwcfg->nvm_size);
 
 	rval = of_property_read_u32(dev->of_node, "clock-frequency",
-				    &pdata->ext_clk);
+				    &hwcfg->ext_clk);
 	if (rval) {
 		dev_warn(dev, "can't get clock-frequency\n");
 		goto out_err;
 	}
 
-	dev_dbg(dev, "reset %d, nvm %d, clk %d, csi %d\n", pdata->xshutdown,
-		pdata->nvm_size, pdata->ext_clk, pdata->csi_signalling_mode);
+	dev_dbg(dev, "nvm %d, clk %d, csi %d\n", hwcfg->nvm_size,
+		hwcfg->ext_clk, hwcfg->csi_signalling_mode);
 
 	if (!bus_cfg->nr_of_link_frequencies) {
 		dev_warn(dev, "no link frequencies defined\n");
 		goto out_err;
 	}
 
-	pdata->op_sys_clock = devm_kcalloc(
+	hwcfg->op_sys_clock = devm_kcalloc(
 		dev, bus_cfg->nr_of_link_frequencies + 1 /* guardian */,
-		sizeof(*pdata->op_sys_clock), GFP_KERNEL);
-	if (!pdata->op_sys_clock)
+		sizeof(*hwcfg->op_sys_clock), GFP_KERNEL);
+	if (!hwcfg->op_sys_clock)
 		goto out_err;
 
 	for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
-		pdata->op_sys_clock[i] = bus_cfg->link_frequencies[i];
-		dev_dbg(dev, "freq %d: %lld\n", i, pdata->op_sys_clock[i]);
+		hwcfg->op_sys_clock[i] = bus_cfg->link_frequencies[i];
+		dev_dbg(dev, "freq %d: %lld\n", i, hwcfg->op_sys_clock[i]);
 	}
 
 	v4l2_of_free_endpoint(bus_cfg);
 	of_node_put(ep);
-	return pdata;
+	return hwcfg;
 
 out_err:
 	v4l2_of_free_endpoint(bus_cfg);
@@ -3066,17 +3050,17 @@
 			const struct i2c_device_id *devid)
 {
 	struct smiapp_sensor *sensor;
-	struct smiapp_platform_data *pdata = smiapp_get_pdata(&client->dev);
+	struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
 	int rval;
 
-	if (pdata == NULL)
+	if (hwcfg == NULL)
 		return -ENODEV;
 
 	sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
 	if (sensor == NULL)
 		return -ENOMEM;
 
-	sensor->platform_data = pdata;
+	sensor->hwcfg = hwcfg;
 	mutex_init(&sensor->mutex);
 	mutex_init(&sensor->power_mutex);
 	sensor->src = &sensor->ssds[sensor->ssds_used];
@@ -3119,12 +3103,8 @@
 	v4l2_async_unregister_subdev(subdev);
 
 	if (sensor->power_count) {
-		if (gpio_is_valid(sensor->platform_data->xshutdown))
-			gpio_set_value(sensor->platform_data->xshutdown, 0);
-		if (sensor->platform_data->set_xclk)
-			sensor->platform_data->set_xclk(&sensor->src->sd, 0);
-		else
-			clk_disable_unprepare(sensor->ext_clk);
+		gpiod_set_value(sensor->xshutdown, 0);
+		clk_disable_unprepare(sensor->ext_clk);
 		sensor->power_count = 0;
 	}
 
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index abf9ea7..cb128ea 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -26,7 +26,7 @@
 }
 
 static int smiapp_write_8s(struct smiapp_sensor *sensor,
-			   struct smiapp_reg_8 *regs, int len)
+			   const struct smiapp_reg_8 *regs, int len)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
 	int rval;
@@ -71,7 +71,7 @@
 
 static int jt8ew9_post_poweron(struct smiapp_sensor *sensor)
 {
-	struct smiapp_reg_8 regs[] = {
+	const struct smiapp_reg_8 regs[] = {
 		{ 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */
 		{ 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
 		{ 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
@@ -115,7 +115,7 @@
 static int imx125es_post_poweron(struct smiapp_sensor *sensor)
 {
 	/* Taken from v02. No idea what the other two are. */
-	struct smiapp_reg_8 regs[] = {
+	const struct smiapp_reg_8 regs[] = {
 		/*
 		 * 0x3302: clk during frame blanking:
 		 * 0x00 - HS mode, 0x01 - LP11
@@ -145,8 +145,7 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
 	int rval;
-
-	struct smiapp_reg_8 regs[] = {
+	const struct smiapp_reg_8 regs[] = {
 		{ 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */
 		{ 0x30a3, 0xd0 }, /* FLASH STROBE enable */
 		{ 0x3237, 0x00 }, /* For control of pulse timing for ADC */
@@ -167,8 +166,7 @@
 		{ 0x33cf, 0xec }, /* For Black sun */
 		{ 0x3328, 0x80 }, /* Ugh. No idea what's this. */
 	};
-
-	struct smiapp_reg_8 regs_96[] = {
+	const struct smiapp_reg_8 regs_96[] = {
 		{ 0x30ae, 0x00 }, /* For control of ADC clock */
 		{ 0x30af, 0xd0 },
 		{ 0x30b0, 0x01 },
@@ -178,13 +176,13 @@
 	if (rval < 0)
 		return rval;
 
-	switch (sensor->platform_data->ext_clk) {
+	switch (sensor->hwcfg->ext_clk) {
 	case 9600000:
 		return smiapp_write_8s(sensor, regs_96,
 				       ARRAY_SIZE(regs_96));
 	default:
 		dev_warn(&client->dev, "no MSRs for %d Hz ext_clk\n",
-			 sensor->platform_data->ext_clk);
+			 sensor->hwcfg->ext_clk);
 		return 0;
 	}
 }
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 6b6c20b..1e501c0 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -188,7 +188,8 @@
 				   SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY));
 }
 
-int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+static int smiapp_read_quirk(struct smiapp_sensor *sensor, u32 reg, u32 *val,
+			     bool force8)
 {
 	int rval;
 
@@ -199,21 +200,20 @@
 	if (rval < 0)
 		return rval;
 
+	if (force8)
+		return __smiapp_read(sensor, reg, val, true);
+
 	return smiapp_read_no_quirk(sensor, reg, val);
 }
 
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+	return smiapp_read_quirk(sensor, reg, val, false);
+}
+
 int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val)
 {
-	int rval;
-
-	*val = 0;
-	rval = smiapp_call_quirk(sensor, reg_access, false, &reg, val);
-	if (rval == -ENOIOCTLCMD)
-		return 0;
-	if (rval < 0)
-		return rval;
-
-	return __smiapp_read(sensor, reg, val, true);
+	return smiapp_read_quirk(sensor, reg, val, true);
 }
 
 int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 2174f89..aae72bc 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -151,7 +151,7 @@
 #define SMIAPP_PADS			2
 
 #define SMIAPP_COMPRESSED_BASE		8
-#define SMIAPP_COMPRESSED_MAX		12
+#define SMIAPP_COMPRESSED_MAX		16
 #define SMIAPP_NR_OF_COMPRESSED		(SMIAPP_COMPRESSED_MAX - \
 					 SMIAPP_COMPRESSED_BASE + 1)
 
@@ -197,9 +197,10 @@
 	struct smiapp_subdev *binner;
 	struct smiapp_subdev *scaler;
 	struct smiapp_subdev *pixel_array;
-	struct smiapp_platform_data *platform_data;
+	struct smiapp_hwconfig *hwcfg;
 	struct regulator *vana;
 	struct clk *ext_clk;
+	struct gpio_desc *xshutdown;
 	u32 limits[SMIAPP_LIMIT_LAST];
 	u8 nbinning_subtypes;
 	struct smiapp_binning_subtype binning_subtypes[SMIAPP_BINNING_SUBTYPES];
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig
index 23d352f..7704bcf 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/media/i2c/soc_camera/Kconfig
@@ -14,11 +14,14 @@
 	  and colour models.
 
 config SOC_CAMERA_MT9M111
-	tristate "mt9m111, mt9m112 and mt9m131 support"
+	tristate "legacy soc_camera mt9m111, mt9m112 and mt9m131 support"
 	depends on SOC_CAMERA && I2C
+	select VIDEO_MT9M111
 	help
 	  This driver supports MT9M111, MT9M112 and MT9M131 cameras from
-	  Micron/Aptina
+	  Micron/Aptina.
+	  This is the legacy configuration which shouldn't be used anymore,
+	  while VIDEO_MT9M111 should be used instead.
 
 config SOC_CAMERA_MT9T031
 	tristate "mt9t031 support"
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index d0421fe..6f994f9 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -1,6 +1,5 @@
 obj-$(CONFIG_SOC_CAMERA_IMX074)		+= imx074.o
 obj-$(CONFIG_SOC_CAMERA_MT9M001)	+= mt9m001.o
-obj-$(CONFIG_SOC_CAMERA_MT9M111)	+= mt9m111.o
 obj-$(CONFIG_SOC_CAMERA_MT9T031)	+= mt9t031.o
 obj-$(CONFIG_SOC_CAMERA_MT9T112)	+= mt9t112.o
 obj-$(CONFIG_SOC_CAMERA_MT9V022)	+= mt9v022.o
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index f68c235..05b55cf 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -209,31 +209,26 @@
 	return 0;
 }
 
-static int imx074_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int imx074_get_selection(struct v4l2_subdev *sd,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_selection *sel)
 {
-	struct v4l2_rect *rect = &a->c;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	rect->top	= 0;
-	rect->left	= 0;
-	rect->width	= IMX074_WIDTH;
-	rect->height	= IMX074_HEIGHT;
+	sel->r.left = 0;
+	sel->r.top = 0;
+	sel->r.width = IMX074_WIDTH;
+	sel->r.height = IMX074_HEIGHT;
 
-	return 0;
-}
-
-static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= IMX074_WIDTH;
-	a->bounds.height		= IMX074_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP:
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int imx074_enum_mbus_code(struct v4l2_subdev *sd,
@@ -278,8 +273,6 @@
 
 static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
 	.s_stream	= imx074_s_stream,
-	.g_crop		= imx074_g_crop,
-	.cropcap	= imx074_cropcap,
 	.g_mbus_config	= imx074_g_mbus_config,
 };
 
@@ -289,6 +282,7 @@
 
 static const struct v4l2_subdev_pad_ops imx074_subdev_pad_ops = {
 	.enum_mbus_code = imx074_enum_mbus_code,
+	.get_selection	= imx074_get_selection,
 	.get_fmt	= imx074_get_fmt,
 	.set_fmt	= imx074_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 69becc3..3d6378d 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -171,13 +171,19 @@
 	return 0;
 }
 
-static int mt9m001_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9m001_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9m001 *mt9m001 = to_mt9m001(client);
-	struct v4l2_rect rect = a->c;
-	int ret;
+	struct v4l2_rect rect = sel->r;
 	const u16 hblank = 9, vblank = 25;
+	int ret;
+
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
 
 	if (mt9m001->fmts == mt9m001_colour_fmts)
 		/*
@@ -225,29 +231,30 @@
 	return ret;
 }
 
-static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9m001_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9m001 *mt9m001 = to_mt9m001(client);
 
-	a->c	= mt9m001->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= MT9M001_COLUMN_SKIP;
-	a->bounds.top			= MT9M001_ROW_SKIP;
-	a->bounds.width			= MT9M001_MAX_WIDTH;
-	a->bounds.height		= MT9M001_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = MT9M001_COLUMN_SKIP;
+		sel->r.top = MT9M001_ROW_SKIP;
+		sel->r.width = MT9M001_MAX_WIDTH;
+		sel->r.height = MT9M001_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = mt9m001->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int mt9m001_get_fmt(struct v4l2_subdev *sd,
@@ -275,18 +282,18 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9m001 *mt9m001 = to_mt9m001(client);
-	struct v4l2_crop a = {
-		.c = {
-			.left	= mt9m001->rect.left,
-			.top	= mt9m001->rect.top,
-			.width	= mf->width,
-			.height	= mf->height,
-		},
+	struct v4l2_subdev_selection sel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP,
+		.r.left = mt9m001->rect.left,
+		.r.top = mt9m001->rect.top,
+		.r.width = mf->width,
+		.r.height = mf->height,
 	};
 	int ret;
 
 	/* No support for scaling so far, just crop. TODO: use skipping */
-	ret = mt9m001_s_crop(sd, &a);
+	ret = mt9m001_set_selection(sd, NULL, &sel);
 	if (!ret) {
 		mf->width	= mt9m001->rect.width;
 		mf->height	= mt9m001->rect.height;
@@ -625,9 +632,6 @@
 
 static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
 	.s_stream	= mt9m001_s_stream,
-	.s_crop		= mt9m001_s_crop,
-	.g_crop		= mt9m001_g_crop,
-	.cropcap	= mt9m001_cropcap,
 	.g_mbus_config	= mt9m001_g_mbus_config,
 	.s_mbus_config	= mt9m001_s_mbus_config,
 };
@@ -638,6 +642,8 @@
 
 static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
 	.enum_mbus_code = mt9m001_enum_mbus_code,
+	.get_selection	= mt9m001_get_selection,
+	.set_selection	= mt9m001_set_selection,
 	.get_fmt	= mt9m001_get_fmt,
 	.set_fmt	= mt9m001_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 5c8e3ff..3aa5569 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -264,7 +264,7 @@
 
 	/*
 	 * The caller provides a supported format, as guaranteed by
-	 * .set_fmt(FORMAT_TRY), soc_camera_s_crop() and soc_camera_cropcap()
+	 * .set_fmt(FORMAT_TRY), soc_camera_s_selection() and soc_camera_cropcap()
 	 */
 	if (ret >= 0)
 		ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
@@ -294,11 +294,17 @@
 	return ret < 0 ? ret : 0;
 }
 
-static int mt9t031_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9t031_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
-	struct v4l2_rect rect = a->c;
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9t031 *mt9t031 = to_mt9t031(client);
+	struct v4l2_rect rect = sel->r;
+
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
 
 	rect.width = ALIGN(rect.width, 2);
 	rect.height = ALIGN(rect.height, 2);
@@ -312,29 +318,30 @@
 	return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip);
 }
 
-static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9t031_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9t031 *mt9t031 = to_mt9t031(client);
 
-	a->c	= mt9t031->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= MT9T031_COLUMN_SKIP;
-	a->bounds.top			= MT9T031_ROW_SKIP;
-	a->bounds.width			= MT9T031_MAX_WIDTH;
-	a->bounds.height		= MT9T031_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = MT9T031_COLUMN_SKIP;
+		sel->r.top = MT9T031_ROW_SKIP;
+		sel->r.width = MT9T031_MAX_WIDTH;
+		sel->r.height = MT9T031_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = mt9t031->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int mt9t031_get_fmt(struct v4l2_subdev *sd,
@@ -721,9 +728,6 @@
 
 static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
 	.s_stream	= mt9t031_s_stream,
-	.s_crop		= mt9t031_s_crop,
-	.g_crop		= mt9t031_g_crop,
-	.cropcap	= mt9t031_cropcap,
 	.g_mbus_config	= mt9t031_g_mbus_config,
 	.s_mbus_config	= mt9t031_s_mbus_config,
 };
@@ -734,6 +738,8 @@
 
 static const struct v4l2_subdev_pad_ops mt9t031_subdev_pad_ops = {
 	.enum_mbus_code = mt9t031_enum_mbus_code,
+	.get_selection	= mt9t031_get_selection,
+	.set_selection	= mt9t031_set_selection,
 	.get_fmt	= mt9t031_get_fmt,
 	.set_fmt	= mt9t031_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 6a1b2a9..2ef2224 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -867,39 +867,48 @@
 	return 0;
 }
 
-static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= MAX_WIDTH;
-	a->bounds.height		= MAX_HEIGHT;
-	a->defrect.left			= 0;
-	a->defrect.top			= 0;
-	a->defrect.width		= VGA_WIDTH;
-	a->defrect.height		= VGA_HEIGHT;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
-}
-
-static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9t112_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9t112_priv *priv = to_mt9t112(client);
 
-	a->c	= priv->frame;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = MAX_WIDTH;
+		sel->r.height = MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = VGA_WIDTH;
+		sel->r.height = VGA_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = priv->frame;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
-static int mt9t112_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9t112_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9t112_priv *priv = to_mt9t112(client);
-	const struct v4l2_rect *rect = &a->c;
+	const struct v4l2_rect *rect = &sel->r;
+
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
 
 	return mt9t112_set_params(priv, rect, priv->format->code);
 }
@@ -1024,15 +1033,14 @@
 
 static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
 	.s_stream	= mt9t112_s_stream,
-	.cropcap	= mt9t112_cropcap,
-	.g_crop		= mt9t112_g_crop,
-	.s_crop		= mt9t112_s_crop,
 	.g_mbus_config	= mt9t112_g_mbus_config,
 	.s_mbus_config	= mt9t112_s_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops mt9t112_subdev_pad_ops = {
 	.enum_mbus_code = mt9t112_enum_mbus_code,
+	.get_selection	= mt9t112_get_selection,
+	.set_selection	= mt9t112_set_selection,
 	.get_fmt	= mt9t112_get_fmt,
 	.set_fmt	= mt9t112_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 2721e58..6a14ab5 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -276,14 +276,20 @@
 	return 0;
 }
 
-static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9v022_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9v022 *mt9v022 = to_mt9v022(client);
-	struct v4l2_rect rect = a->c;
+	struct v4l2_rect rect = sel->r;
 	int min_row, min_blank;
 	int ret;
 
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
 	/* Bayer format - even size lengths */
 	if (mt9v022->fmts == mt9v022_colour_fmts) {
 		rect.width	= ALIGN(rect.width, 2);
@@ -350,29 +356,30 @@
 	return 0;
 }
 
-static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9v022_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9v022 *mt9v022 = to_mt9v022(client);
 
-	a->c	= mt9v022->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= MT9V022_COLUMN_SKIP;
-	a->bounds.top			= MT9V022_ROW_SKIP;
-	a->bounds.width			= MT9V022_MAX_WIDTH;
-	a->bounds.height		= MT9V022_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = MT9V022_COLUMN_SKIP;
+		sel->r.top = MT9V022_ROW_SKIP;
+		sel->r.width = MT9V022_MAX_WIDTH;
+		sel->r.height = MT9V022_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = mt9v022->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int mt9v022_get_fmt(struct v4l2_subdev *sd,
@@ -400,13 +407,13 @@
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct mt9v022 *mt9v022 = to_mt9v022(client);
-	struct v4l2_crop a = {
-		.c = {
-			.left	= mt9v022->rect.left,
-			.top	= mt9v022->rect.top,
-			.width	= mf->width,
-			.height	= mf->height,
-		},
+	struct v4l2_subdev_selection sel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP,
+		.r.left = mt9v022->rect.left,
+		.r.top = mt9v022->rect.top,
+		.r.width = mf->width,
+		.r.height = mf->height,
 	};
 	int ret;
 
@@ -430,7 +437,7 @@
 	}
 
 	/* No support for scaling on this camera, just crop. */
-	ret = mt9v022_s_crop(sd, &a);
+	ret = mt9v022_set_selection(sd, NULL, &sel);
 	if (!ret) {
 		mf->width	= mt9v022->rect.width;
 		mf->height	= mt9v022->rect.height;
@@ -853,9 +860,6 @@
 
 static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
 	.s_stream	= mt9v022_s_stream,
-	.s_crop		= mt9v022_s_crop,
-	.g_crop		= mt9v022_g_crop,
-	.cropcap	= mt9v022_cropcap,
 	.g_mbus_config	= mt9v022_g_mbus_config,
 	.s_mbus_config	= mt9v022_s_mbus_config,
 };
@@ -866,6 +870,8 @@
 
 static const struct v4l2_subdev_pad_ops mt9v022_subdev_pad_ops = {
 	.enum_mbus_code = mt9v022_enum_mbus_code,
+	.get_selection	= mt9v022_get_selection,
+	.set_selection	= mt9v022_set_selection,
 	.get_fmt	= mt9v022_get_fmt,
 	.set_fmt	= mt9v022_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 9b4f5de..56de182 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -928,29 +928,25 @@
 	return 0;
 }
 
-static int ov2640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov2640_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
-	a->c.left	= 0;
-	a->c.top	= 0;
-	a->c.width	= UXGA_WIDTH;
-	a->c.height	= UXGA_HEIGHT;
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int ov2640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= UXGA_WIDTH;
-	a->bounds.height		= UXGA_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = UXGA_WIDTH;
+		sel->r.height = UXGA_HEIGHT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int ov2640_video_probe(struct i2c_client *client)
@@ -1024,13 +1020,12 @@
 
 static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
 	.s_stream	= ov2640_s_stream,
-	.cropcap	= ov2640_cropcap,
-	.g_crop		= ov2640_g_crop,
 	.g_mbus_config	= ov2640_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
 	.enum_mbus_code = ov2640_enum_mbus_code,
+	.get_selection	= ov2640_get_selection,
 	.get_fmt	= ov2640_get_fmt,
 	.set_fmt	= ov2640_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index bab9ac0..3d185bd 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -850,13 +850,19 @@
 	return 0;
 }
 
-static int ov5642_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int ov5642_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct ov5642 *priv = to_ov5642(client);
-	struct v4l2_rect rect = a->c;
+	struct v4l2_rect rect = sel->r;
 	int ret;
 
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
 	v4l_bound_align_image(&rect.width, 48, OV5642_MAX_WIDTH, 1,
 			      &rect.height, 32, OV5642_MAX_HEIGHT, 1, 0);
 
@@ -878,32 +884,30 @@
 	return ret;
 }
 
-static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov5642_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct ov5642 *priv = to_ov5642(client);
-	struct v4l2_rect *rect = &a->c;
 
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
 		return -EINVAL;
 
-	*rect = priv->crop_rect;
-
-	return 0;
-}
-
-static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= OV5642_MAX_WIDTH;
-	a->bounds.height		= OV5642_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = OV5642_MAX_WIDTH;
+		sel->r.height = OV5642_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = priv->crop_rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int ov5642_g_mbus_config(struct v4l2_subdev *sd,
@@ -940,14 +944,13 @@
 }
 
 static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
-	.s_crop		= ov5642_s_crop,
-	.g_crop		= ov5642_g_crop,
-	.cropcap	= ov5642_cropcap,
 	.g_mbus_config	= ov5642_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops ov5642_subdev_pad_ops = {
 	.enum_mbus_code = ov5642_enum_mbus_code,
+	.get_selection	= ov5642_get_selection,
+	.set_selection	= ov5642_set_selection,
 	.get_fmt	= ov5642_get_fmt,
 	.set_fmt	= ov5642_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 1f8af1e..4bf2995 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -432,25 +432,43 @@
 	return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
 }
 
-static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov6650_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct ov6650 *priv = to_ov6650(client);
 
-	a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->c = priv->rect;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = DEF_HSTRT << 1;
+		sel->r.top = DEF_VSTRT << 1;
+		sel->r.width = W_CIF;
+		sel->r.height = H_CIF;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = priv->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
-static int ov6650_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int ov6650_set_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct ov6650 *priv = to_ov6650(client);
-	struct v4l2_rect rect = a->c;
+	struct v4l2_rect rect = sel->r;
 	int ret;
 
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
 	rect.left   = ALIGN(rect.left,   2);
@@ -483,22 +501,6 @@
 	return ret;
 }
 
-static int ov6650_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-		return -EINVAL;
-
-	a->bounds.left			= DEF_HSTRT << 1;
-	a->bounds.top			= DEF_VSTRT << 1;
-	a->bounds.width			= W_CIF;
-	a->bounds.height		= H_CIF;
-	a->defrect			= a->bounds;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
-}
-
 static int ov6650_get_fmt(struct v4l2_subdev *sd,
 		struct v4l2_subdev_pad_config *cfg,
 		struct v4l2_subdev_format *format)
@@ -549,16 +551,15 @@
 	struct soc_camera_sense *sense = icd->sense;
 	struct ov6650 *priv = to_ov6650(client);
 	bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
-	struct v4l2_crop a = {
-		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
-		.c = {
-			.left	= priv->rect.left + (priv->rect.width >> 1) -
-					(mf->width >> (1 - half_scale)),
-			.top	= priv->rect.top + (priv->rect.height >> 1) -
-					(mf->height >> (1 - half_scale)),
-			.width	= mf->width << half_scale,
-			.height	= mf->height << half_scale,
-		},
+	struct v4l2_subdev_selection sel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP,
+		.r.left = priv->rect.left + (priv->rect.width >> 1) -
+			(mf->width >> (1 - half_scale)),
+		.r.top = priv->rect.top + (priv->rect.height >> 1) -
+			(mf->height >> (1 - half_scale)),
+		.r.width = mf->width << half_scale,
+		.r.height = mf->height << half_scale,
 	};
 	u32 code = mf->code;
 	unsigned long mclk, pclk;
@@ -672,7 +673,7 @@
 	dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
 			mclk / pclk, 10 * mclk % pclk / pclk);
 
-	ret = ov6650_s_crop(sd, &a);
+	ret = ov6650_set_selection(sd, NULL, &sel);
 	if (!ret)
 		ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
 	if (!ret)
@@ -943,9 +944,6 @@
 
 static struct v4l2_subdev_video_ops ov6650_video_ops = {
 	.s_stream	= ov6650_s_stream,
-	.cropcap	= ov6650_cropcap,
-	.g_crop		= ov6650_g_crop,
-	.s_crop		= ov6650_s_crop,
 	.g_parm		= ov6650_g_parm,
 	.s_parm		= ov6650_s_parm,
 	.g_mbus_config	= ov6650_g_mbus_config,
@@ -954,6 +952,8 @@
 
 static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
 	.enum_mbus_code = ov6650_enum_mbus_code,
+	.get_selection	= ov6650_get_selection,
+	.set_selection	= ov6650_set_selection,
 	.get_fmt	= ov6650_get_fmt,
 	.set_fmt	= ov6650_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index a43410c..7e68762 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -851,29 +851,28 @@
 	return ret;
 }
 
-static int ov772x_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov772x_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
-	a->c.left	= 0;
-	a->c.top	= 0;
-	a->c.width	= VGA_WIDTH;
-	a->c.height	= VGA_HEIGHT;
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= OV772X_MAX_WIDTH;
-	a->bounds.height		= OV772X_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	sel->r.left = 0;
+	sel->r.top = 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.width = OV772X_MAX_WIDTH;
+		sel->r.height = OV772X_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r.width = VGA_WIDTH;
+		sel->r.height = VGA_HEIGHT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int ov772x_get_fmt(struct v4l2_subdev *sd,
@@ -1030,13 +1029,12 @@
 
 static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
 	.s_stream	= ov772x_s_stream,
-	.cropcap	= ov772x_cropcap,
-	.g_crop		= ov772x_g_crop,
 	.g_mbus_config	= ov772x_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
 	.enum_mbus_code = ov772x_enum_mbus_code,
+	.get_selection	= ov772x_get_selection,
 	.get_fmt	= ov772x_get_fmt,
 	.set_fmt	= ov772x_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 8caae1c..8c93c57 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -561,29 +561,25 @@
 	return 0;
 }
 
-static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov9640_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
-	a->c.left	= 0;
-	a->c.top	= 0;
-	a->c.width	= W_SXGA;
-	a->c.height	= H_SXGA;
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int ov9640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= W_SXGA;
-	a->bounds.height		= H_SXGA;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	sel->r.left = 0;
+	sel->r.top = 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP:
+		sel->r.width = W_SXGA;
+		sel->r.height = H_SXGA;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int ov9640_video_probe(struct i2c_client *client)
@@ -667,13 +663,12 @@
 
 static struct v4l2_subdev_video_ops ov9640_video_ops = {
 	.s_stream	= ov9640_s_stream,
-	.cropcap	= ov9640_cropcap,
-	.g_crop		= ov9640_g_crop,
 	.g_mbus_config	= ov9640_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops ov9640_pad_ops = {
 	.enum_mbus_code = ov9640_enum_mbus_code,
+	.get_selection	= ov9640_get_selection,
 	.set_fmt	= ov9640_set_fmt,
 };
 
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 03a7fc7..0da632d 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -737,29 +737,25 @@
 	return 0;
 }
 
-static int ov9740_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ov9740_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
-	a->bounds.left		= 0;
-	a->bounds.top		= 0;
-	a->bounds.width		= OV9740_MAX_WIDTH;
-	a->bounds.height	= OV9740_MAX_HEIGHT;
-	a->defrect		= a->bounds;
-	a->type			= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
-	a->c.left		= 0;
-	a->c.top		= 0;
-	a->c.width		= OV9740_MAX_WIDTH;
-	a->c.height		= OV9740_MAX_HEIGHT;
-	a->type			= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = OV9740_MAX_WIDTH;
+		sel->r.height = OV9740_MAX_HEIGHT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 /* Set status of additional camera capabilities */
@@ -914,8 +910,6 @@
 
 static struct v4l2_subdev_video_ops ov9740_video_ops = {
 	.s_stream	= ov9740_s_stream,
-	.cropcap	= ov9740_cropcap,
-	.g_crop		= ov9740_g_crop,
 	.g_mbus_config	= ov9740_g_mbus_config,
 };
 
@@ -929,6 +923,7 @@
 
 static const struct v4l2_subdev_pad_ops ov9740_pad_ops = {
 	.enum_mbus_code = ov9740_enum_mbus_code,
+	.get_selection	= ov9740_get_selection,
 	.set_fmt	= ov9740_set_fmt,
 };
 
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index aa7bfbb..bc8ec59 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -538,15 +538,21 @@
 static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
 			       s32 *out_w, s32 *out_h);
 
-static int rj54n1_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int rj54n1_set_selection(struct v4l2_subdev *sd,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct rj54n1 *rj54n1 = to_rj54n1(client);
-	const struct v4l2_rect *rect = &a->c;
+	const struct v4l2_rect *rect = &sel->r;
 	int dummy = 0, output_w, output_h,
 		input_w = rect->width, input_h = rect->height;
 	int ret;
 
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
 	/* arbitrary minimum width and height, edges unimportant */
 	soc_camera_limit_side(&dummy, &input_w,
 		     RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
@@ -573,29 +579,30 @@
 	return 0;
 }
 
-static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int rj54n1_get_selection(struct v4l2_subdev *sd,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct rj54n1 *rj54n1 = to_rj54n1(client);
 
-	a->c	= rj54n1->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	a->bounds.left			= RJ54N1_COLUMN_SKIP;
-	a->bounds.top			= RJ54N1_ROW_SKIP;
-	a->bounds.width			= RJ54N1_MAX_WIDTH;
-	a->bounds.height		= RJ54N1_MAX_HEIGHT;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = RJ54N1_COLUMN_SKIP;
+		sel->r.top = RJ54N1_ROW_SKIP;
+		sel->r.width = RJ54N1_MAX_WIDTH;
+		sel->r.height = RJ54N1_MAX_HEIGHT;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = rj54n1->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int rj54n1_get_fmt(struct v4l2_subdev *sd,
@@ -1246,15 +1253,14 @@
 
 static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
 	.s_stream	= rj54n1_s_stream,
-	.g_crop		= rj54n1_g_crop,
-	.s_crop		= rj54n1_s_crop,
-	.cropcap	= rj54n1_cropcap,
 	.g_mbus_config	= rj54n1_g_mbus_config,
 	.s_mbus_config	= rj54n1_s_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
 	.enum_mbus_code = rj54n1_enum_mbus_code,
+	.get_selection	= rj54n1_get_selection,
+	.set_selection	= rj54n1_set_selection,
 	.get_fmt	= rj54n1_get_fmt,
 	.set_fmt	= rj54n1_set_fmt,
 };
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 06aff81..4002c07 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -676,44 +676,28 @@
 	return ret;
 }
 
-static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int tw9910_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	struct tw9910_priv *priv = to_tw9910(client);
 
-	a->c.left	= 0;
-	a->c.top	= 0;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
+	/* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
+	if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
+		return -EINVAL;
+
+	sel->r.left	= 0;
+	sel->r.top	= 0;
 	if (priv->norm & V4L2_STD_NTSC) {
-		a->c.width	= 640;
-		a->c.height	= 480;
+		sel->r.width	= 640;
+		sel->r.height	= 480;
 	} else {
-		a->c.width	= 768;
-		a->c.height	= 576;
+		sel->r.width	= 768;
+		sel->r.height	= 576;
 	}
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	return 0;
-}
-
-static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct tw9910_priv *priv = to_tw9910(client);
-
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	if (priv->norm & V4L2_STD_NTSC) {
-		a->bounds.width		= 640;
-		a->bounds.height	= 480;
-	} else {
-		a->bounds.width		= 768;
-		a->bounds.height	= 576;
-	}
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
 	return 0;
 }
 
@@ -921,8 +905,6 @@
 	.s_std		= tw9910_s_std,
 	.g_std		= tw9910_g_std,
 	.s_stream	= tw9910_s_stream,
-	.cropcap	= tw9910_cropcap,
-	.g_crop		= tw9910_g_crop,
 	.g_mbus_config	= tw9910_g_mbus_config,
 	.s_mbus_config	= tw9910_s_mbus_config,
 	.g_tvnorms	= tw9910_g_tvnorms,
@@ -930,6 +912,7 @@
 
 static const struct v4l2_subdev_pad_ops tw9910_subdev_pad_ops = {
 	.enum_mbus_code = tw9910_enum_mbus_code,
+	.get_selection	= tw9910_get_selection,
 	.get_fmt	= tw9910_get_fmt,
 	.set_fmt	= tw9910_set_fmt,
 };
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 73fc42b..42340e3 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -499,7 +499,6 @@
 
 static struct i2c_driver ths8200_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "ths8200",
 		.of_match_table = of_match_ptr(ths8200_of_match),
 	},
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index 0370dd8..2e06c06 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -210,7 +210,6 @@
 
 static struct i2c_driver tlv320aic23b_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tlv320aic23b",
 	},
 	.probe		= tlv320aic23b_probe,
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 7cdd948..d5c9347 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -977,7 +977,7 @@
 	.pad = &tvp514x_pad_ops,
 };
 
-static struct tvp514x_decoder tvp514x_dev = {
+static const struct tvp514x_decoder tvp514x_dev = {
 	.streaming = 0,
 	.fmt_list = tvp514x_fmt_list,
 	.num_fmts = ARRAY_SIZE(tvp514x_fmt_list),
@@ -1233,7 +1233,6 @@
 static struct i2c_driver tvp514x_driver = {
 	.driver = {
 		.of_match_table = of_match_ptr(tvp514x_of_match),
-		.owner = THIS_MODULE,
 		.name = TVP514X_MODULE_NAME,
 	},
 	.probe = tvp514x_probe,
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 0b6d46c..4740da3 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -871,19 +871,22 @@
 	return 0;
 }
 
-static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int tvp5150_set_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_selection *sel)
 {
-	struct v4l2_rect rect = a->c;
 	struct tvp5150 *decoder = to_tvp5150(sd);
+	struct v4l2_rect rect = sel->r;
 	v4l2_std_id std;
-	unsigned int hmax;
+	int hmax;
+
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
 
 	v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
 		__func__, rect.left, rect.top, rect.width, rect.height);
 
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-		return -EINVAL;
-
 	/* tvp5150 has some special limits */
 	rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
 	rect.width = clamp_t(unsigned int, rect.width,
@@ -924,44 +927,39 @@
 	return 0;
 }
 
-static int tvp5150_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int tvp5150_get_selection(struct v4l2_subdev *sd,
+				 struct v4l2_subdev_pad_config *cfg,
+				 struct v4l2_subdev_selection *sel)
 {
-	struct tvp5150 *decoder = to_tvp5150(sd);
-
-	a->c	= decoder->rect;
-	a->type	= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	return 0;
-}
-
-static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
-	struct tvp5150 *decoder = to_tvp5150(sd);
+	struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd);
 	v4l2_std_id std;
 
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
 		return -EINVAL;
 
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= TVP5150_H_MAX;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = TVP5150_H_MAX;
 
-	/* Calculate height based on current standard */
-	if (decoder->norm == V4L2_STD_ALL)
-		std = tvp5150_read_std(sd);
-	else
-		std = decoder->norm;
-
-	if (std & V4L2_STD_525_60)
-		a->bounds.height = TVP5150_V_MAX_525_60;
-	else
-		a->bounds.height = TVP5150_V_MAX_OTHERS;
-
-	a->defrect			= a->bounds;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+		/* Calculate height based on current standard */
+		if (decoder->norm == V4L2_STD_ALL)
+			std = tvp5150_read_std(sd);
+		else
+			std = decoder->norm;
+		if (std & V4L2_STD_525_60)
+			sel->r.height = TVP5150_V_MAX_525_60;
+		else
+			sel->r.height = TVP5150_V_MAX_OTHERS;
+		return 0;
+	case V4L2_SEL_TGT_CROP:
+		sel->r = decoder->rect;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
@@ -1173,7 +1171,7 @@
 	return 0;
 }
 
-static int tvp5150_registered_async(struct v4l2_subdev *sd)
+static int tvp5150_registered(struct v4l2_subdev *sd)
 {
 #ifdef CONFIG_MEDIA_CONTROLLER
 	struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1222,7 +1220,6 @@
 	.g_register = tvp5150_g_register,
 	.s_register = tvp5150_s_register,
 #endif
-	.registered_async = tvp5150_registered_async,
 };
 
 static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
@@ -1233,9 +1230,6 @@
 	.s_std = tvp5150_s_std,
 	.s_stream = tvp5150_s_stream,
 	.s_routing = tvp5150_s_routing,
-	.s_crop = tvp5150_s_crop,
-	.g_crop = tvp5150_g_crop,
-	.cropcap = tvp5150_cropcap,
 	.g_mbus_config = tvp5150_g_mbus_config,
 };
 
@@ -1251,6 +1245,8 @@
 	.enum_frame_size = tvp5150_enum_frame_size,
 	.set_fmt = tvp5150_fill_fmt,
 	.get_fmt = tvp5150_fill_fmt,
+	.get_selection = tvp5150_get_selection,
+	.set_selection = tvp5150_set_selection,
 };
 
 static const struct v4l2_subdev_ops tvp5150_ops = {
@@ -1261,6 +1257,10 @@
 	.pad = &tvp5150_pad_ops,
 };
 
+static const struct v4l2_subdev_internal_ops tvp5150_internal_ops = {
+	.registered = tvp5150_registered,
+};
+
 
 /****************************************************************************
 			I2C Client & Driver
@@ -1474,6 +1474,7 @@
 	}
 
 	v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+	sd->internal_ops = &tvp5150_internal_ops;
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
 #if defined(CONFIG_MEDIA_CONTROLLER)
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4df640c..3dc3341 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1086,7 +1086,6 @@
 static struct i2c_driver tvp7002_driver = {
 	.driver = {
 		.of_match_table = of_match_ptr(tvp7002_of_match),
-		.owner = THIS_MODULE,
 		.name = TVP7002_MODULE_NAME,
 	},
 	.probe = tvp7002_probe,
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 4c72a18..be4cb7a 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -863,7 +863,6 @@
 
 static struct i2c_driver vs6624_driver = {
 	.driver = {
-		.owner  = THIS_MODULE,
 		.name   = "vs6624",
 	},
 	.probe          = vs6624_probe,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 1795abe..2783531 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -59,27 +59,24 @@
 }
 
 static int media_device_get_info(struct media_device *dev,
-				 struct media_device_info __user *__info)
+				 struct media_device_info *info)
 {
-	struct media_device_info info;
-
-	memset(&info, 0, sizeof(info));
+	memset(info, 0, sizeof(*info));
 
 	if (dev->driver_name[0])
-		strlcpy(info.driver, dev->driver_name, sizeof(info.driver));
+		strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
 	else
-		strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver));
+		strlcpy(info->driver, dev->dev->driver->name,
+			sizeof(info->driver));
 
-	strlcpy(info.model, dev->model, sizeof(info.model));
-	strlcpy(info.serial, dev->serial, sizeof(info.serial));
-	strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info));
+	strlcpy(info->model, dev->model, sizeof(info->model));
+	strlcpy(info->serial, dev->serial, sizeof(info->serial));
+	strlcpy(info->bus_info, dev->bus_info, sizeof(info->bus_info));
 
-	info.media_version = MEDIA_API_VERSION;
-	info.hw_revision = dev->hw_revision;
-	info.driver_version = dev->driver_version;
+	info->media_version = MEDIA_API_VERSION;
+	info->hw_revision = dev->hw_revision;
+	info->driver_version = dev->driver_version;
 
-	if (copy_to_user(__info, &info, sizeof(*__info)))
-		return -EFAULT;
 	return 0;
 }
 
@@ -101,29 +98,25 @@
 }
 
 static long media_device_enum_entities(struct media_device *mdev,
-				       struct media_entity_desc __user *uent)
+				       struct media_entity_desc *entd)
 {
 	struct media_entity *ent;
-	struct media_entity_desc u_ent;
 
-	memset(&u_ent, 0, sizeof(u_ent));
-	if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
-		return -EFAULT;
-
-	ent = find_entity(mdev, u_ent.id);
-
+	ent = find_entity(mdev, entd->id);
 	if (ent == NULL)
 		return -EINVAL;
 
-	u_ent.id = media_entity_id(ent);
+	memset(entd, 0, sizeof(*entd));
+
+	entd->id = media_entity_id(ent);
 	if (ent->name)
-		strlcpy(u_ent.name, ent->name, sizeof(u_ent.name));
-	u_ent.type = ent->function;
-	u_ent.revision = 0;		/* Unused */
-	u_ent.flags = ent->flags;
-	u_ent.group_id = 0;		/* Unused */
-	u_ent.pads = ent->num_pads;
-	u_ent.links = ent->num_links - ent->num_backlinks;
+		strlcpy(entd->name, ent->name, sizeof(entd->name));
+	entd->type = ent->function;
+	entd->revision = 0;		/* Unused */
+	entd->flags = ent->flags;
+	entd->group_id = 0;		/* Unused */
+	entd->pads = ent->num_pads;
+	entd->links = ent->num_links - ent->num_backlinks;
 
 	/*
 	 * Workaround for a bug at media-ctl <= v1.10 that makes it to
@@ -139,14 +132,13 @@
 	if (ent->function < MEDIA_ENT_F_OLD_BASE ||
 	    ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
 		if (is_media_entity_v4l2_subdev(ent))
-			u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+			entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
 		else if (ent->function != MEDIA_ENT_F_IO_V4L)
-			u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+			entd->type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
 	}
 
-	memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
-	if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
-		return -EFAULT;
+	memcpy(&entd->raw, &ent->info, sizeof(ent->info));
+
 	return 0;
 }
 
@@ -158,8 +150,8 @@
 	upad->flags = kpad->flags;
 }
 
-static long __media_device_enum_links(struct media_device *mdev,
-				      struct media_links_enum *links)
+static long media_device_enum_links(struct media_device *mdev,
+				    struct media_links_enum *links)
 {
 	struct media_entity *entity;
 
@@ -206,64 +198,35 @@
 	return 0;
 }
 
-static long media_device_enum_links(struct media_device *mdev,
-				    struct media_links_enum __user *ulinks)
-{
-	struct media_links_enum links;
-	int rval;
-
-	if (copy_from_user(&links, ulinks, sizeof(links)))
-		return -EFAULT;
-
-	rval = __media_device_enum_links(mdev, &links);
-	if (rval < 0)
-		return rval;
-
-	if (copy_to_user(ulinks, &links, sizeof(*ulinks)))
-		return -EFAULT;
-
-	return 0;
-}
-
 static long media_device_setup_link(struct media_device *mdev,
-				    struct media_link_desc __user *_ulink)
+				    struct media_link_desc *linkd)
 {
 	struct media_link *link = NULL;
-	struct media_link_desc ulink;
 	struct media_entity *source;
 	struct media_entity *sink;
-	int ret;
-
-	if (copy_from_user(&ulink, _ulink, sizeof(ulink)))
-		return -EFAULT;
 
 	/* Find the source and sink entities and link.
 	 */
-	source = find_entity(mdev, ulink.source.entity);
-	sink = find_entity(mdev, ulink.sink.entity);
+	source = find_entity(mdev, linkd->source.entity);
+	sink = find_entity(mdev, linkd->sink.entity);
 
 	if (source == NULL || sink == NULL)
 		return -EINVAL;
 
-	if (ulink.source.index >= source->num_pads ||
-	    ulink.sink.index >= sink->num_pads)
+	if (linkd->source.index >= source->num_pads ||
+	    linkd->sink.index >= sink->num_pads)
 		return -EINVAL;
 
-	link = media_entity_find_link(&source->pads[ulink.source.index],
-				      &sink->pads[ulink.sink.index]);
+	link = media_entity_find_link(&source->pads[linkd->source.index],
+				      &sink->pads[linkd->sink.index]);
 	if (link == NULL)
 		return -EINVAL;
 
 	/* Setup the link on both entities. */
-	ret = __media_entity_setup_link(link, ulink.flags);
-
-	if (copy_to_user(_ulink, &ulink, sizeof(ulink)))
-		return -EFAULT;
-
-	return ret;
+	return __media_entity_setup_link(link, linkd->flags);
 }
 
-static long __media_device_get_topology(struct media_device *mdev,
+static long media_device_get_topology(struct media_device *mdev,
 				      struct media_v2_topology *topo)
 {
 	struct media_entity *entity;
@@ -400,63 +363,98 @@
 	return ret;
 }
 
-static long media_device_get_topology(struct media_device *mdev,
-				      struct media_v2_topology __user *utopo)
+static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
 {
-	struct media_v2_topology ktopo;
-	int ret;
-
-	if (copy_from_user(&ktopo, utopo, sizeof(ktopo)))
-		return -EFAULT;
-
-	ret = __media_device_get_topology(mdev, &ktopo);
-	if (ret < 0)
-		return ret;
-
-	if (copy_to_user(utopo, &ktopo, sizeof(*utopo)))
+	/* All media IOCTLs are _IOWR() */
+	if (copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
 		return -EFAULT;
 
 	return 0;
 }
 
+static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
+{
+	/* All media IOCTLs are _IOWR() */
+	if (copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	return 0;
+}
+
+/* Do acquire the graph mutex */
+#define MEDIA_IOC_FL_GRAPH_MUTEX	BIT(0)
+
+#define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user)		\
+	[_IOC_NR(MEDIA_IOC_##__cmd)] = {				\
+		.cmd = MEDIA_IOC_##__cmd,				\
+		.fn = (long (*)(struct media_device *, void *))func,	\
+		.flags = fl,						\
+		.arg_from_user = from_user,				\
+		.arg_to_user = to_user,					\
+	}
+
+#define MEDIA_IOC(__cmd, func, fl)					\
+	MEDIA_IOC_ARG(__cmd, func, fl, copy_arg_from_user, copy_arg_to_user)
+
+/* the table is indexed by _IOC_NR(cmd) */
+struct media_ioctl_info {
+	unsigned int cmd;
+	unsigned short flags;
+	long (*fn)(struct media_device *dev, void *arg);
+	long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd);
+	long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd);
+};
+
+static const struct media_ioctl_info ioctl_info[] = {
+	MEDIA_IOC(DEVICE_INFO, media_device_get_info, MEDIA_IOC_FL_GRAPH_MUTEX),
+	MEDIA_IOC(ENUM_ENTITIES, media_device_enum_entities, MEDIA_IOC_FL_GRAPH_MUTEX),
+	MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
+	MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
+	MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
+};
+
 static long media_device_ioctl(struct file *filp, unsigned int cmd,
-			       unsigned long arg)
+			       unsigned long __arg)
 {
 	struct media_devnode *devnode = media_devnode_data(filp);
 	struct media_device *dev = devnode->media_dev;
+	const struct media_ioctl_info *info;
+	void __user *arg = (void __user *)__arg;
+	char __karg[256], *karg = __karg;
 	long ret;
 
-	mutex_lock(&dev->graph_mutex);
-	switch (cmd) {
-	case MEDIA_IOC_DEVICE_INFO:
-		ret = media_device_get_info(dev,
-				(struct media_device_info __user *)arg);
-		break;
+	if (_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_info)
+	    || ioctl_info[_IOC_NR(cmd)].cmd != cmd)
+		return -ENOIOCTLCMD;
 
-	case MEDIA_IOC_ENUM_ENTITIES:
-		ret = media_device_enum_entities(dev,
-				(struct media_entity_desc __user *)arg);
-		break;
+	info = &ioctl_info[_IOC_NR(cmd)];
 
-	case MEDIA_IOC_ENUM_LINKS:
-		ret = media_device_enum_links(dev,
-				(struct media_links_enum __user *)arg);
-		break;
-
-	case MEDIA_IOC_SETUP_LINK:
-		ret = media_device_setup_link(dev,
-				(struct media_link_desc __user *)arg);
-		break;
-
-	case MEDIA_IOC_G_TOPOLOGY:
-		ret = media_device_get_topology(dev,
-				(struct media_v2_topology __user *)arg);
-		break;
-
-	default:
-		ret = -ENOIOCTLCMD;
+	if (_IOC_SIZE(info->cmd) > sizeof(__karg)) {
+		karg = kmalloc(_IOC_SIZE(info->cmd), GFP_KERNEL);
+		if (!karg)
+			return -ENOMEM;
 	}
-	mutex_unlock(&dev->graph_mutex);
+
+	if (info->arg_from_user) {
+		ret = info->arg_from_user(karg, arg, cmd);
+		if (ret)
+			goto out_free;
+	}
+
+	if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+		mutex_lock(&dev->graph_mutex);
+
+	ret = info->fn(dev, karg);
+
+	if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+		mutex_unlock(&dev->graph_mutex);
+
+	if (!ret && info->arg_to_user)
+		ret = info->arg_to_user(arg, karg, cmd);
+
+out_free:
+	if (karg != __karg)
+		kfree(karg);
 
 	return ret;
 }
@@ -486,7 +484,7 @@
 	links.pads = compat_ptr(pads_ptr);
 	links.links = compat_ptr(links_ptr);
 
-	return __media_device_enum_links(mdev, &links);
+	return media_device_enum_links(mdev, &links);
 }
 
 #define MEDIA_IOC_ENUM_LINKS32		_IOWR('|', 0x02, struct media_links_enum32)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index d8a2299..c68239e 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -65,6 +65,8 @@
 		return "v4l-subdev";
 	case MEDIA_INTF_T_V4L_SWRADIO:
 		return "v4l-swradio";
+	case MEDIA_INTF_T_V4L_TOUCH:
+		return "v4l-touch";
 	case MEDIA_INTF_T_ALSA_PCM_CAPTURE:
 		return "alsa-pcm-capture";
 	case MEDIA_INTF_T_ALSA_PCM_PLAYBACK:
@@ -806,17 +808,18 @@
 
 	mdev = source->graph_obj.mdev;
 
-	if (mdev->link_notify) {
-		ret = mdev->link_notify(link, flags,
-					MEDIA_DEV_NOTIFY_PRE_LINK_CH);
+	if (mdev->ops && mdev->ops->link_notify) {
+		ret = mdev->ops->link_notify(link, flags,
+					     MEDIA_DEV_NOTIFY_PRE_LINK_CH);
 		if (ret < 0)
 			return ret;
 	}
 
 	ret = __media_entity_setup_link_notify(link, flags);
 
-	if (mdev->link_notify)
-		mdev->link_notify(link, flags, MEDIA_DEV_NOTIFY_POST_LINK_CH);
+	if (mdev->ops && mdev->ops->link_notify)
+		mdev->ops->link_notify(link, flags,
+				       MEDIA_DEV_NOTIFY_POST_LINK_CH);
 
 	return ret;
 }
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 4f6467f..da28e68 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -13,6 +13,7 @@
 source "drivers/media/pci/meye/Kconfig"
 source "drivers/media/pci/solo6x10/Kconfig"
 source "drivers/media/pci/sta2x11/Kconfig"
+source "drivers/media/pci/tw5864/Kconfig"
 source "drivers/media/pci/tw68/Kconfig"
 source "drivers/media/pci/tw686x/Kconfig"
 source "drivers/media/pci/zoran/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 2e54c36..a7e8af0 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -31,3 +31,4 @@
 obj-$(CONFIG_STA2X11_VIP) += sta2x11/
 obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
 obj-$(CONFIG_VIDEO_COBALT) += cobalt/
+obj-$(CONFIG_VIDEO_TW5864) += tw5864/
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index df54e17..97b91a9 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2804,30 +2804,44 @@
 	    cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
 		return -EINVAL;
 
-	*cap = bttv_tvnorms[btv->tvnorm].cropcap;
+	/* defrect and bounds are set via g_selection */
+	cap->pixelaspect = bttv_tvnorms[btv->tvnorm].cropcap.pixelaspect;
 
 	return 0;
 }
 
-static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
+static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
 {
 	struct bttv_fh *fh = f;
 	struct bttv *btv = fh->btv;
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
 		return -EINVAL;
 
-	/* No fh->do_crop = 1; because btv->crop[1] may be
-	   inconsistent with fh->width or fh->height and apps
-	   do not expect a change here. */
-
-	crop->c = btv->crop[!!fh->do_crop].rect;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		/*
+		 * No fh->do_crop = 1; because btv->crop[1] may be
+		 * inconsistent with fh->width or fh->height and apps
+		 * do not expect a change here.
+		 */
+		sel->r = btv->crop[!!fh->do_crop].rect;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r = bttv_tvnorms[btv->tvnorm].cropcap.defrect;
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r = bttv_tvnorms[btv->tvnorm].cropcap.bounds;
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	return 0;
 }
 
-static int bttv_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
+static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
 {
 	struct bttv_fh *fh = f;
 	struct bttv *btv = fh->btv;
@@ -2839,8 +2853,11 @@
 	__s32 b_right;
 	__s32 b_bottom;
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
 	/* Make sure tvnorm, vbi_end and the current cropping
@@ -2864,22 +2881,24 @@
 	}
 
 	/* Min. scaled size 48 x 32. */
-	c.rect.left = clamp_t(s32, crop->c.left, b_left, b_right - 48);
+	c.rect.left = clamp_t(s32, sel->r.left, b_left, b_right - 48);
 	c.rect.left = min(c.rect.left, (__s32) MAX_HDELAY);
 
-	c.rect.width = clamp_t(s32, crop->c.width,
+	c.rect.width = clamp_t(s32, sel->r.width,
 			     48, b_right - c.rect.left);
 
-	c.rect.top = clamp_t(s32, crop->c.top, b_top, b_bottom - 32);
+	c.rect.top = clamp_t(s32, sel->r.top, b_top, b_bottom - 32);
 	/* Top and height must be a multiple of two. */
 	c.rect.top = (c.rect.top + 1) & ~1;
 
-	c.rect.height = clamp_t(s32, crop->c.height,
+	c.rect.height = clamp_t(s32, sel->r.height,
 			      32, b_bottom - c.rect.top);
 	c.rect.height = (c.rect.height + 1) & ~1;
 
 	bttv_crop_calc_limits(&c);
 
+	sel->r = c.rect;
+
 	btv->crop[1] = c;
 
 	fh->do_crop = 1;
@@ -3047,10 +3066,10 @@
 	   which only change on request. These are stored in btv->crop[1].
 	   However for compatibility with V4L apps and cropping unaware
 	   V4L2 apps we now reset the cropping parameters as seen through
-	   this fh, which is to say VIDIOC_G_CROP and scaling limit checks
+	   this fh, which is to say VIDIOC_G_SELECTION and scaling limit checks
 	   will use btv->crop[0], the default cropping parameters for the
 	   current video standard, and VIDIOC_S_FMT will not implicitely
-	   change the cropping parameters until VIDIOC_S_CROP has been
+	   change the cropping parameters until VIDIOC_S_SELECTION has been
 	   called. */
 	fh->do_crop = !reset_crop; /* module parameter */
 
@@ -3159,8 +3178,8 @@
 	.vidioc_streamoff               = bttv_streamoff,
 	.vidioc_g_tuner                 = bttv_g_tuner,
 	.vidioc_s_tuner                 = bttv_s_tuner,
-	.vidioc_g_crop                  = bttv_g_crop,
-	.vidioc_s_crop                  = bttv_s_crop,
+	.vidioc_g_selection             = bttv_g_selection,
+	.vidioc_s_selection             = bttv_s_selection,
 	.vidioc_g_fbuf                  = bttv_g_fbuf,
 	.vidioc_s_fbuf                  = bttv_s_fbuf,
 	.vidioc_overlay                 = bttv_overlay,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index b1e0023..9efc455 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -232,7 +232,7 @@
 	const struct bttv_format *ovfmt;
 	struct bttv_overlay      ov;
 
-	/* Application called VIDIOC_S_CROP. */
+	/* Application called VIDIOC_S_SELECTION. */
 	int                      do_crop;
 
 	/* vbi capture */
diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
index f0bdf10..49013c6 100644
--- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
+++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
@@ -510,7 +510,7 @@
 	return vmalloc_to_page(pageptr);
 }
 
-static struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
 	.open		= snd_cobalt_pcm_capture_open,
 	.close		= snd_cobalt_pcm_capture_close,
 	.ioctl		= snd_cobalt_pcm_ioctl,
@@ -522,7 +522,7 @@
 	.page		= snd_pcm_get_vmalloc_page,
 };
 
-static struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
+static const struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
 	.open		= snd_cobalt_pcm_playback_open,
 	.close		= snd_cobalt_pcm_playback_close,
 	.ioctl		= snd_cobalt_pcm_ioctl,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 476f7f0..9796340 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -60,30 +60,27 @@
 MODULE_LICENSE("GPL");
 
 static u8 edid[256] = {
-	0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
-	0x50, 0x21, 0x9C, 0x27, 0x00, 0x00, 0x00, 0x00,
-	0x19, 0x12, 0x01, 0x03, 0x80, 0x00, 0x00, 0x78,
-	0x0E, 0x00, 0xB2, 0xA0, 0x57, 0x49, 0x9B, 0x26,
-	0x10, 0x48, 0x4F, 0x2F, 0xCF, 0x00, 0x31, 0x59,
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x50, 0x21, 0x32, 0x27, 0x00, 0x00, 0x00, 0x00,
+	0x22, 0x1a, 0x01, 0x03, 0x80, 0x30, 0x1b, 0x78,
+	0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+	0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
 	0x45, 0x59, 0x61, 0x59, 0x81, 0x99, 0x01, 0x01,
-	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A,
-	0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C,
-	0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,
-	0x00, 0x00, 0x00, 0xFD, 0x00, 0x31, 0x55, 0x18,
-	0x5E, 0x11, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20,
-	0x20, 0x20, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x43,
-	0x20, 0x39, 0x30, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A,
-	0x0A, 0x0A, 0x0A, 0x0A, 0x00, 0x00, 0x00, 0x10,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+	0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+	0x46, 0x00, 0xe0, 0x0e, 0x11, 0x00, 0x00, 0x1e,
+	0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+	0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x63,
+	0x6f, 0x62, 0x61, 0x6c, 0x74, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x68,
-	0x02, 0x03, 0x1a, 0xc0, 0x48, 0xa2, 0x10, 0x04,
-	0x02, 0x01, 0x21, 0x14, 0x13, 0x23, 0x09, 0x07,
-	0x07, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0xe2,
-	0x00, 0x2a, 0x01, 0x1d, 0x00, 0x80, 0x51, 0xd0,
-	0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a,
-	0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x9c,
+
+	0x02, 0x03, 0x1f, 0xf0, 0x4a, 0x90, 0x1f, 0x04,
+	0x13, 0x22, 0x21, 0x20, 0x02, 0x11, 0x01, 0x23,
+	0x09, 0x07, 0x07, 0x68, 0x03, 0x0c, 0x00, 0x10,
+	0x00, 0x00, 0x22, 0x0f, 0xe2, 0x00, 0xea, 0x00,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -91,7 +88,11 @@
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7,
 };
 
 static void cobalt_set_interrupt(struct cobalt *cobalt, bool enable)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index d05672f..5c76637 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -161,8 +161,11 @@
 	struct v4l2_subdev_format sd_fmt = {
 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
 	};
+	u64 clk = bt->pixelclock;
 
-	if (!cobalt_cpld_set_freq(cobalt, bt->pixelclock)) {
+	if (bt->flags & V4L2_DV_FL_REDUCED_FPS)
+		clk = div_u64(clk * 1000ULL, 1001);
+	if (!cobalt_cpld_set_freq(cobalt, clk)) {
 		cobalt_err("pixelclock out of range\n");
 		return;
 	}
@@ -644,7 +647,7 @@
 		return 0;
 	}
 
-	if (v4l2_match_dv_timings(timings, &s->timings, 0, false))
+	if (v4l2_match_dv_timings(timings, &s->timings, 0, true))
 		return 0;
 
 	if (vb2_is_busy(&s->q))
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index ffb6acd..5344510 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -311,7 +311,7 @@
 	return vmalloc_to_page(pageptr);
 }
 
-static struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
 	.open		= snd_cx18_pcm_capture_open,
 	.close		= snd_cx18_pcm_capture_close,
 	.ioctl		= snd_cx18_pcm_ioctl,
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 4af8cd6..c932937 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -98,7 +98,8 @@
 	case CX18_HW_Z8F0811_IR_RX_HAUP:
 		init_data->ir_codes = RC_MAP_HAUPPAUGE;
 		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-		init_data->type = RC_BIT_RC5;
+		init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+							RC_BIT_RC6_6A_32;
 		init_data->name = cx->card_name;
 		info.platform_data = init_data;
 		break;
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 6c51172..57a40c8 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -20,8 +20,6 @@
 #ifndef __ALTERA_CI_H
 #define __ALTERA_CI_H
 
-#include <linux/kconfig.h>
-
 #define ALT_DATA	0x000000ff
 #define ALT_TDI		0x00008000
 #define ALT_TDO		0x00004000
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 4d080da..da892f3 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1223,7 +1223,7 @@
 	cx23885_cancel_buffers(&dev->ts1);
 }
 
-static struct vb2_ops cx23885_qops = {
+static const struct vb2_ops cx23885_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ae7c2e8..6115d4e 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -506,7 +506,7 @@
 /*
  * operators
  */
-static struct snd_pcm_ops snd_cx23885_pcm_ops = {
+static const struct snd_pcm_ops snd_cx23885_pcm_ops = {
 	.open = snd_cx23885_pcm_open,
 	.close = snd_cx23885_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 4abf50f..99ba8d6 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -770,6 +770,11 @@
 		.portb        = CX23885_MPEG_DVB,
 		.portc        = CX23885_MPEG_DVB,
 	},
+	[CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC] = {
+		.name        = "Hauppauge WinTV-QuadHD-ATSC",
+		.portb        = CX23885_MPEG_DVB,
+		.portc        = CX23885_MPEG_DVB,
+	},
 };
 const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
 
@@ -1073,6 +1078,14 @@
 		.subvendor = 0x0070,
 		.subdevice = 0x6b28,
 		.card      = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB, /* Tuner Pair 2 */
+	}, {
+		.subvendor = 0x0070,
+		.subdevice = 0x6a18,
+		.card      = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC, /* Tuner Pair 1 */
+	}, {
+		.subvendor = 0x0070,
+		.subdevice = 0x6b18,
+		.card      = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC, /* Tuner Pair 2 */
 	},
 };
 const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1278,6 +1291,18 @@
 		/* WinTV-QuadHD (DVB) Tuner Pair 2 (PCIe, IR, half height,
 		   DVB-T/T2/C, DVB-T/T2/C */
 		break;
+	case 165100:
+		/*
+		 * WinTV-QuadHD (ATSC) Tuner Pair 1 (PCIe, IR, half height,
+		 * ATSC, ATSC
+		 */
+		break;
+	case 165101:
+		/*
+		 * WinTV-QuadHD (DVB) Tuner Pair 2 (PCIe, IR, half height,
+		 * ATSC, ATSC
+		 */
+		break;
 	default:
 		printk(KERN_WARNING "%s: warning: "
 			"unknown hauppauge model #%d\n",
@@ -1751,6 +1776,7 @@
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR5525:
 	case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+	case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
 		/*
 		 * HVR5525 GPIO Details:
 		 *  GPIO-00 IR_WIDE
@@ -1826,6 +1852,7 @@
 	case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
 	case CX23885_BOARD_HAUPPAUGE_HVR1210:
 	case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+	case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
 		/* FIXME: Implement me */
 		break;
 	case CX23885_BOARD_HAUPPAUGE_HVR1270:
@@ -2025,6 +2052,7 @@
 	case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
 	case CX23885_BOARD_HAUPPAUGE_HVR5525:
 	case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+	case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
 		if (dev->i2c_bus[0].i2c_rc == 0)
 			hauppauge_eeprom(dev, eeprom+0xc0);
 		break;
@@ -2171,6 +2199,7 @@
 		ts2->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
 		break;
 	case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+	case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
 		ts1->gen_ctrl_val  = 0xc; /* Serial bus + punctured clock */
 		ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
 		ts1->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index e5748a9..818f3c2 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -74,6 +74,7 @@
 #include "sp2.h"
 #include "m88ds3103.h"
 #include "m88rs6000t.h"
+#include "lgdt3306a.h"
 
 static unsigned int debug;
 
@@ -172,7 +173,7 @@
 	cx23885_cancel_buffers(port);
 }
 
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
@@ -574,6 +575,30 @@
 	.refclock = 27000000,
 };
 
+static struct lgdt3306a_config hauppauge_quadHD_ATSC_a_config = {
+	.i2c_addr               = 0x59,
+	.qam_if_khz             = 4000,
+	.vsb_if_khz             = 3250,
+	.deny_i2c_rptr          = 1, /* Disabled */
+	.spectral_inversion     = 0, /* Disabled */
+	.mpeg_mode              = LGDT3306A_MPEG_SERIAL,
+	.tpclk_edge             = LGDT3306A_TPCLK_RISING_EDGE,
+	.tpvalid_polarity       = LGDT3306A_TP_VALID_HIGH,
+	.xtalMHz                = 25, /* 24 or 25 */
+};
+
+static struct lgdt3306a_config hauppauge_quadHD_ATSC_b_config = {
+	.i2c_addr               = 0x0e,
+	.qam_if_khz             = 4000,
+	.vsb_if_khz             = 3250,
+	.deny_i2c_rptr          = 1, /* Disabled */
+	.spectral_inversion     = 0, /* Disabled */
+	.mpeg_mode              = LGDT3306A_MPEG_SERIAL,
+	.tpclk_edge             = LGDT3306A_TPCLK_RISING_EDGE,
+	.tpvalid_polarity       = LGDT3306A_TP_VALID_HIGH,
+	.xtalMHz                = 25, /* 24 or 25 */
+};
+
 static int p8000_set_voltage(struct dvb_frontend *fe,
 			     enum fe_sec_voltage voltage)
 {
@@ -867,12 +892,6 @@
 	.tuner_i2c_addr = 0x54,
 };
 
-static const struct si2165_config hauppauge_hvr4400_si2165_config = {
-	.i2c_addr	= 0x64,
-	.chip_mode	= SI2165_MODE_PLL_XTAL,
-	.ref_freq_Hz	= 16000000,
-};
-
 static const struct m88ds3103_config dvbsky_t9580_m88ds3103_config = {
 	.i2c_addr = 0x68,
 	.clock = 27000000,
@@ -1182,6 +1201,7 @@
 	struct cx23885_i2c *i2c_bus = NULL, *i2c_bus2 = NULL;
 	struct vb2_dvb_frontend *fe0, *fe1 = NULL;
 	struct si2168_config si2168_config;
+	struct si2165_platform_data si2165_pdata;
 	struct si2157_config si2157_config;
 	struct ts2020_config ts2020_config;
 	struct i2c_board_info info;
@@ -1700,6 +1720,9 @@
 		}
 		break;
 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
+		if (port->nr > 2)
+			return 0;
+
 		i2c_bus = &dev->i2c_bus[0];
 		mfe_shared = 1;/* MFE */
 		port->frontends.gate = 0;/* not clear for me yet */
@@ -1839,9 +1862,26 @@
 			break;
 		/* port c */
 		case 2:
-			fe0->dvb.frontend = dvb_attach(si2165_attach,
-					&hauppauge_hvr4400_si2165_config,
-					&i2c_bus->i2c_adap);
+			/* attach frontend */
+			memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+			si2165_pdata.fe = &fe0->dvb.frontend;
+			si2165_pdata.chip_mode = SI2165_MODE_PLL_XTAL,
+			si2165_pdata.ref_freq_Hz = 16000000,
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+			info.addr = 0x64;
+			info.platform_data = &si2165_pdata;
+			request_module(info.type);
+			client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+			if (client_demod == NULL ||
+					client_demod->dev.driver == NULL)
+				goto frontend_detach;
+			if (!try_module_get(client_demod->dev.driver->owner)) {
+				i2c_unregister_device(client_demod);
+				goto frontend_detach;
+			}
+			port->i2c_client_demod = client_demod;
+
 			if (fe0->dvb.frontend == NULL)
 				break;
 			fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
@@ -2365,6 +2405,81 @@
 			break;
 		}
 		break;
+	case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+		switch (port->nr) {
+		/* port b - Terrestrial/cable */
+		case 1:
+			/* attach frontend */
+			i2c_bus = &dev->i2c_bus[0];
+			fe0->dvb.frontend = dvb_attach(lgdt3306a_attach,
+				&hauppauge_quadHD_ATSC_a_config, &i2c_bus->i2c_adap);
+			if (fe0->dvb.frontend == NULL)
+				break;
+
+			/* attach tuner */
+			memset(&si2157_config, 0, sizeof(si2157_config));
+			si2157_config.fe = fe0->dvb.frontend;
+			si2157_config.if_port = 1;
+			si2157_config.inversion = 1;
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+			info.addr = 0x60;
+			info.platform_data = &si2157_config;
+			request_module("%s", info.type);
+			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!client_tuner || !client_tuner->dev.driver) {
+				module_put(client_demod->dev.driver->owner);
+				i2c_unregister_device(client_demod);
+				port->i2c_client_demod = NULL;
+				goto frontend_detach;
+			}
+			if (!try_module_get(client_tuner->dev.driver->owner)) {
+				i2c_unregister_device(client_tuner);
+				module_put(client_demod->dev.driver->owner);
+				i2c_unregister_device(client_demod);
+				port->i2c_client_demod = NULL;
+				goto frontend_detach;
+			}
+			port->i2c_client_tuner = client_tuner;
+			break;
+
+		/* port c - terrestrial/cable */
+		case 2:
+			/* attach frontend */
+			i2c_bus = &dev->i2c_bus[0];
+			fe0->dvb.frontend = dvb_attach(lgdt3306a_attach,
+				&hauppauge_quadHD_ATSC_b_config, &i2c_bus->i2c_adap);
+			if (fe0->dvb.frontend == NULL)
+				break;
+
+			/* attach tuner */
+			memset(&si2157_config, 0, sizeof(si2157_config));
+			si2157_config.fe = fe0->dvb.frontend;
+			si2157_config.if_port = 1;
+			si2157_config.inversion = 1;
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+			info.addr = 0x62;
+			info.platform_data = &si2157_config;
+			request_module("%s", info.type);
+			client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+			if (!client_tuner || !client_tuner->dev.driver) {
+				module_put(client_demod->dev.driver->owner);
+				i2c_unregister_device(client_demod);
+				port->i2c_client_demod = NULL;
+				goto frontend_detach;
+			}
+			if (!try_module_get(client_tuner->dev.driver->owner)) {
+				i2c_unregister_device(client_tuner);
+				module_put(client_demod->dev.driver->owner);
+				i2c_unregister_device(client_demod);
+				port->i2c_client_demod = NULL;
+				goto frontend_detach;
+			}
+			port->i2c_client_tuner = client_tuner;
+			break;
+		}
+		break;
 
 	default:
 		printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index ae061b3..6159122 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -258,7 +258,7 @@
 	return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
 }
 
-static struct i2c_algorithm cx23885_i2c_algo_template = {
+static const struct i2c_algorithm cx23885_i2c_algo_template = {
 	.master_xfer	= i2c_xfer,
 	.functionality	= cx23885_functionality,
 };
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 64328d0..410c314 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -293,7 +293,7 @@
 	case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
 		/* Integrated CX23885 IR controller */
 		driver_type = RC_DRIVER_IR_RAW;
-		allowed_protos = RC_BIT_NEC;
+		allowed_protos = RC_BIT_ALL;
 		/* The grey Terratec remote with orange buttons */
 		rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
 		break;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 6d73522..33d168e 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -517,7 +517,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops cx23885_video_qops = {
+static const struct vb2_ops cx23885_video_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 24a0a6c..a6735af 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -103,7 +103,8 @@
 #define CX23885_BOARD_HAUPPAUGE_STARBURST      53
 #define CX23885_BOARD_VIEWCAST_260E            54
 #define CX23885_BOARD_VIEWCAST_460E            55
-#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB    56
+#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB     56
+#define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC    57
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
@@ -256,7 +257,7 @@
 struct cx23885_tsport {
 	struct cx23885_dev *dev;
 
-	int                        nr;
+	unsigned                   nr;
 	int                        sram_chno;
 
 	struct vb2_dvb_frontends   frontends;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index df189b1..4711583 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -649,7 +649,7 @@
 /*
  * operators
  */
-static struct snd_pcm_ops snd_cx25821_pcm_ops = {
+static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
 	.open = snd_cx25821_pcm_open,
 	.close = snd_cx25821_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 68dbc2d..7c8edb6 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -242,8 +242,7 @@
 	dev->_audioframe_count = 0;
 	dev->_audiofile_status = END_OF_FILE;
 
-	kfree(dev->_irq_audio_queues);
-	dev->_irq_audio_queues = NULL;
+	flush_work(&dev->_audio_work_entry);
 
 	kfree(dev->_audiofilename);
 }
@@ -446,8 +445,7 @@
 
 			dev->_audioframe_index = dev->_last_index_irq;
 
-			queue_work(dev->_irq_audio_queues,
-				   &dev->_audio_work_entry);
+			schedule_work(&dev->_audio_work_entry);
 		}
 
 		if (dev->_is_first_audio_frame) {
@@ -639,14 +637,6 @@
 
 	/* Work queue */
 	INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
-	dev->_irq_audio_queues =
-	    create_singlethread_workqueue("cx25821_audioworkqueue");
-
-	if (!dev->_irq_audio_queues) {
-		printk(KERN_DEBUG
-			pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n"));
-		return -ENOMEM;
-	}
 
 	dev->_last_index_irq = 0;
 	dev->_audio_is_running = 0;
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index dca37c7..63ba25b 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -281,7 +281,7 @@
 		I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
 }
 
-static struct i2c_algorithm cx25821_i2c_algo_template = {
+static const struct i2c_algorithm cx25821_i2c_algo_template = {
 	.master_xfer = i2c_xfer,
 	.functionality = cx25821_functionality,
 #ifdef NEED_ALGO_CONTROL
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index adcd09b..7ce352a 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -307,7 +307,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops cx25821_video_qops = {
+static const struct vb2_ops cx25821_video_qops = {
 	.queue_setup    = cx25821_queue_setup,
 	.buf_prepare  = cx25821_buffer_prepare,
 	.buf_finish = cx25821_buffer_finish,
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 35c7375..ef61dea 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -293,7 +293,6 @@
 	u32 audio_upstream_riscbuf_size;
 	u32 audio_upstream_databuf_size;
 	int _audioframe_index;
-	struct workqueue_struct *_irq_audio_queues;
 	struct work_struct _audio_work_entry;
 	char *input_audiofilename;
 
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index f3f13eb..723f064 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -599,7 +599,7 @@
 /*
  * operators
  */
-static struct snd_pcm_ops snd_cx88_pcm_ops = {
+static const struct snd_pcm_ops snd_cx88_pcm_ops = {
 	.open = snd_cx88_pcm_open,
 	.close = snd_cx88_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 04fe9af..b532e49 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -756,7 +756,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops blackbird_qops = {
+static const struct vb2_ops blackbird_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 5bb63e7..ac2392d 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -156,7 +156,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 3f1342c..cd76871 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -144,7 +144,8 @@
 		scancode = RC_SCANCODE_NECX(addr, cmd);
 
 		if (0 == (gpio & ir->mask_keyup))
-			rc_keydown_notimeout(ir->dev, RC_TYPE_NEC, scancode, 0);
+			rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
+									0);
 		else
 			rc_keyup(ir->dev);
 
@@ -345,7 +346,7 @@
 		 * 002-T mini RC, provided with newer PV hardware
 		 */
 		ir_codes = RC_MAP_PIXELVIEW_MK12;
-		rc_type = RC_BIT_NEC;
+		rc_type = RC_BIT_NECX;
 		ir->gpio_addr = MO_GP1_IO;
 		ir->mask_keyup = 0x80;
 		ir->polling = 10; /* ms */
@@ -631,7 +632,8 @@
 			/* Hauppauge XVR */
 			core->init_data.name = "cx88 Hauppauge XVR remote";
 			core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
-			core->init_data.type = RC_BIT_RC5;
+			core->init_data.type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+							RC_BIT_RC6_6A_32;
 			core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
 
 			info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 5dc1e3f..d83eb3b 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -567,7 +567,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops cx8800_video_qops = {
+static const struct vb2_ops cx8800_video_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare  = buffer_prepare,
 	.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 47def73..18e3a4d 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1643,53 +1643,53 @@
 /******************************************************************************/
 /******************************************************************************/
 
-static struct ddb_info ddb_none = {
+static const struct ddb_info ddb_none = {
 	.type     = DDB_NONE,
 	.name     = "Digital Devices PCIe bridge",
 };
 
-static struct ddb_info ddb_octopus = {
+static const struct ddb_info ddb_octopus = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Octopus DVB adapter",
 	.port_num = 4,
 };
 
-static struct ddb_info ddb_octopus_le = {
+static const struct ddb_info ddb_octopus_le = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Octopus LE DVB adapter",
 	.port_num = 2,
 };
 
-static struct ddb_info ddb_octopus_mini = {
+static const struct ddb_info ddb_octopus_mini = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Octopus Mini",
 	.port_num = 4,
 };
 
-static struct ddb_info ddb_v6 = {
+static const struct ddb_info ddb_v6 = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Cine S2 V6 DVB adapter",
 	.port_num = 3,
 };
-static struct ddb_info ddb_v6_5 = {
+static const struct ddb_info ddb_v6_5 = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Cine S2 V6.5 DVB adapter",
 	.port_num = 4,
 };
 
-static struct ddb_info ddb_dvbct = {
+static const struct ddb_info ddb_dvbct = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices DVBCT V6.1 DVB adapter",
 	.port_num = 3,
 };
 
-static struct ddb_info ddb_satixS2v3 = {
+static const struct ddb_info ddb_satixS2v3 = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Mystique SaTiX-S2 V3 DVB adapter",
 	.port_num = 3,
 };
 
-static struct ddb_info ddb_octopusv3 = {
+static const struct ddb_info ddb_octopusv3 = {
 	.type     = DDB_OCTOPUS,
 	.name     = "Digital Devices Octopus V3 DVB adapter",
 	.port_num = 4,
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index f198b98..a26f980 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -318,7 +318,7 @@
 	return vmalloc_to_page(pageptr);
 }
 
-static struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
 	.open		= snd_ivtv_pcm_capture_open,
 	.close		= snd_ivtv_pcm_capture_close,
 	.ioctl		= snd_ivtv_pcm_ioctl,
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 374033a..ee48c3e 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -750,7 +750,7 @@
 	spin_lock_init(&itv->lock);
 	spin_lock_init(&itv->dma_reg_lock);
 
-	init_kthread_worker(&itv->irq_worker);
+	kthread_init_worker(&itv->irq_worker);
 	itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
 					   "%s", itv->v4l2_dev.name);
 	if (IS_ERR(itv->irq_worker_task)) {
@@ -760,7 +760,7 @@
 	/* must use the FIFO scheduler as it is realtime sensitive */
 	sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
 
-	init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+	kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
 
 	/* Initial settings */
 	itv->cxhdl.port = CX2341X_PORT_MEMORY;
@@ -1441,7 +1441,7 @@
 	del_timer_sync(&itv->dma_timer);
 
 	/* Kill irq worker */
-	flush_kthread_worker(&itv->irq_worker);
+	kthread_flush_worker(&itv->irq_worker);
 	kthread_stop(itv->irq_worker_task);
 
 	ivtv_streams_cleanup(itv);
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index bccbf2d..dea80ef 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -215,7 +215,8 @@
 		/* Default to grey remote */
 		init_data->ir_codes = RC_MAP_HAUPPAUGE;
 		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-		init_data->type = RC_BIT_RC5;
+		init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+							RC_BIT_RC6_6A_32;
 		init_data->name = itv->card_name;
 		break;
 	case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -625,7 +626,7 @@
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-static struct i2c_algorithm ivtv_algo = {
+static const struct i2c_algorithm ivtv_algo = {
 	.master_xfer   = ivtv_xfer,
 	.functionality = ivtv_functionality,
 };
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 36ca2d6..6efe1f7 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1062,7 +1062,7 @@
 	}
 
 	if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
-		queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+		kthread_queue_work(&itv->irq_worker, &itv->irq_work);
 	}
 
 	spin_unlock(&itv->dma_reg_lock);
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469..2c9232e 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@
 	}
 
 	/* Get user pages for DMA Xfer */
-	err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
-			1, dma->map);
+	err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+			dma->map, FOLL_FORCE);
 
 	if (user_dma.page_count != err) {
 		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054..f7299d3 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@
 
 	/* Get user pages for DMA Xfer */
 	y_pages = get_user_pages_unlocked(y_dma.uaddr,
-			y_dma.page_count, 0, 1, &dma->map[0]);
+			y_dma.page_count, &dma->map[0], FOLL_FORCE);
 	uv_pages = 0; /* silence gcc. value is set and consumed only if: */
 	if (y_pages == y_dma.page_count) {
 		uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
-				uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+				uv_dma.page_count, &dma->map[y_pages],
+				FOLL_FORCE);
 	}
 
 	if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index ac547cb..b078ac2 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -353,7 +353,7 @@
 	netup_unidvb_queue_cleanup(dma);
 }
 
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
 	.queue_setup		= netup_unidvb_queue_setup,
 	.buf_prepare		= netup_unidvb_buf_prepare,
 	.buf_queue		= netup_unidvb_buf_queue,
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index c09c52b..b49e4f9 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -327,11 +327,8 @@
 	i2c->adap.dev.parent = &ndev->pci_dev->dev;
 	i2c_set_adapdata(&i2c->adap, i2c);
 	ret = i2c_add_adapter(&i2c->adap);
-	if (ret) {
-		dev_err(&ndev->pci_dev->dev,
-			"%s(): failed to add I2C adapter\n", __func__);
+	if (ret)
 		return ret;
-	}
 	dev_info(&ndev->pci_dev->dev,
 		"%s(): registered I2C bus %d at 0x%x\n",
 		__func__,
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 4e783a6..423e8c8 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -613,7 +613,7 @@
 	.clk_div = 1,
 };
 
-static struct ngene_info ngene_info_cineS2 = {
+static const struct ngene_info ngene_info_cineS2 = {
 	.type		= NGENE_SIDEWINDER,
 	.name		= "Linux4Media cineS2 DVB-S2 Twin Tuner",
 	.io_type	= {NGENE_IO_TSIN, NGENE_IO_TSIN},
@@ -627,7 +627,7 @@
 	.msi_supported	= true,
 };
 
-static struct ngene_info ngene_info_satixS2 = {
+static const struct ngene_info ngene_info_satixS2 = {
 	.type		= NGENE_SIDEWINDER,
 	.name		= "Mystique SaTiX-S2 Dual",
 	.io_type	= {NGENE_IO_TSIN, NGENE_IO_TSIN},
@@ -641,7 +641,7 @@
 	.msi_supported	= true,
 };
 
-static struct ngene_info ngene_info_satixS2v2 = {
+static const struct ngene_info ngene_info_satixS2v2 = {
 	.type		= NGENE_SIDEWINDER,
 	.name		= "Mystique SaTiX-S2 Dual (v2)",
 	.io_type	= {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -656,7 +656,7 @@
 	.msi_supported	= true,
 };
 
-static struct ngene_info ngene_info_cineS2v5 = {
+static const struct ngene_info ngene_info_cineS2v5 = {
 	.type		= NGENE_SIDEWINDER,
 	.name		= "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
 	.io_type	= {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -672,7 +672,7 @@
 };
 
 
-static struct ngene_info ngene_info_duoFlex = {
+static const struct ngene_info ngene_info_duoFlex = {
 	.type           = NGENE_SIDEWINDER,
 	.name           = "Digital Devices DuoFlex PCIe or miniPCIe",
 	.io_type        = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -687,7 +687,7 @@
 	.msi_supported	= true,
 };
 
-static struct ngene_info ngene_info_m780 = {
+static const struct ngene_info ngene_info_m780 = {
 	.type           = NGENE_APP,
 	.name           = "Aver M780 ATSC/QAM-B",
 
@@ -727,7 +727,7 @@
 	.osc_deviation  = osc_deviation,
 };
 
-static struct ngene_info ngene_info_terratec = {
+static const struct ngene_info ngene_info_terratec = {
 	.type           = NGENE_TERRATEC,
 	.name           = "Terratec Integra/Cinergy2400i Dual DVB-T",
 	.io_type        = {NGENE_IO_TSIN, NGENE_IO_TSIN},
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index eff5e9f..7fb649e 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -798,10 +798,8 @@
 	strlcpy(i2c->name, DRV_NAME, sizeof(i2c->name));
 	i2c_set_adapdata(i2c, pt3);
 	ret = i2c_add_adapter(i2c);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to add i2c adapter\n");
+	if (ret < 0)
 		goto err_i2cbuf;
-	}
 
 	for (i = 0; i < PT3_NUM_FE; i++) {
 		ret = pt3_alloc_adapter(pt3, i);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 94f8162..dc0e2fc 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -877,7 +877,7 @@
  * ALSA capture callbacks definition
  */
 
-static struct snd_pcm_ops snd_card_saa7134_capture_ops = {
+static const struct snd_pcm_ops snd_card_saa7134_capture_ops = {
 	.open =			snd_card_saa7134_capture_open,
 	.close =		snd_card_saa7134_capture_close,
 	.ioctl =		snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 791a516..f0fe252 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -85,7 +85,7 @@
 	dev->empress_started = 0;
 }
 
-static struct vb2_ops saa7134_empress_qops = {
+static const struct vb2_ops saa7134_empress_qops = {
 	.queue_setup	= saa7134_ts_queue_setup,
 	.buf_init	= saa7134_ts_buffer_init,
 	.buf_prepare	= saa7134_ts_buffer_prepare,
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8ef6399..2dac48f 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -338,7 +338,7 @@
 	return I2C_FUNC_SMBUS_EMUL;
 }
 
-static struct i2c_algorithm saa7134_algo = {
+static const struct i2c_algorithm saa7134_algo = {
 	.master_xfer   = saa7134_i2c_xfer,
 	.functionality = functionality,
 };
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index c8042c3..eff52bb 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -345,7 +345,7 @@
 	if (data[9] != (unsigned char)(~data[8]))
 		return 0;
 
-	*protocol = RC_TYPE_NEC;
+	*protocol = RC_TYPE_NECX;
 	*scancode = RC_SCANCODE_NECX(data[11] << 8 | data[10], data[9]);
 	*toggle = 0;
 	return 1;
@@ -1035,7 +1035,7 @@
 		dev->init_data.name = "BeholdTV";
 		dev->init_data.get_key = get_key_beholdm6xx;
 		dev->init_data.ir_codes = RC_MAP_BEHOLD;
-		dev->init_data.type = RC_BIT_NEC;
+		dev->init_data.type = RC_BIT_NECX;
 		info.addr = 0x2d;
 		break;
 	case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 8a6ebd0..cbb173d 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1054,7 +1054,7 @@
 		pm_qos_remove_request(&dev->qos_request);
 }
 
-static struct vb2_ops vb2_qops = {
+static const struct vb2_ops vb2_qops = {
 	.queue_setup	= queue_setup,
 	.buf_init	= buffer_init,
 	.buf_prepare	= buffer_prepare,
@@ -1659,8 +1659,6 @@
 	if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
 	    cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
 		return -EINVAL;
-	cap->bounds  = dev->crop_bounds;
-	cap->defrect = dev->crop_defrect;
 	cap->pixelaspect.numerator   = 1;
 	cap->pixelaspect.denominator = 1;
 	if (dev->tvnorm->id & V4L2_STD_525_60) {
@@ -1674,25 +1672,41 @@
 	return 0;
 }
 
-static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
+static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
 {
 	struct saa7134_dev *dev = video_drvdata(file);
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
 		return -EINVAL;
-	crop->c = dev->crop_current;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r = dev->crop_current;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r = dev->crop_defrect;
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r  = dev->crop_bounds;
+		break;
+	default:
+		return -EINVAL;
+	}
 	return 0;
 }
 
-static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
+static int saa7134_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
 {
 	struct saa7134_dev *dev = video_drvdata(file);
 	struct v4l2_rect *b = &dev->crop_bounds;
 	struct v4l2_rect *c = &dev->crop_current;
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-	    crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
 	if (dev->overlay_owner)
@@ -1700,7 +1714,7 @@
 	if (vb2_is_streaming(&dev->video_vbq))
 		return -EBUSY;
 
-	*c = crop->c;
+	*c = sel->r;
 	if (c->top < b->top)
 		c->top = b->top;
 	if (c->top > b->top + b->height)
@@ -1714,6 +1728,7 @@
 		c->left = b->left + b->width;
 	if (c->width > b->left - c->left + b->width)
 		c->width = b->left - c->left + b->width;
+	sel->r = *c;
 	return 0;
 }
 
@@ -1989,8 +2004,8 @@
 	.vidioc_streamoff		= vb2_ioctl_streamoff,
 	.vidioc_g_tuner			= saa7134_g_tuner,
 	.vidioc_s_tuner			= saa7134_s_tuner,
-	.vidioc_g_crop			= saa7134_g_crop,
-	.vidioc_s_crop			= saa7134_s_crop,
+	.vidioc_g_selection		= saa7134_g_selection,
+	.vidioc_s_selection		= saa7134_s_selection,
 	.vidioc_g_fbuf			= saa7134_g_fbuf,
 	.vidioc_s_fbuf			= saa7134_s_fbuf,
 	.vidioc_overlay			= saa7134_overlay,
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 0342d84..024f4e2 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -75,7 +75,7 @@
 	return I2C_FUNC_I2C;
 }
 
-static struct i2c_algorithm saa7164_i2c_algo_template = {
+static const struct i2c_algorithm saa7164_i2c_algo_template = {
 	.master_xfer	= i2c_xfer,
 	.functionality	= saa7164_functionality,
 };
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index 83981d61..6dbe3b4 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -1060,7 +1060,7 @@
 }
 
 /* DVBSky cards */
-static struct smi_cfg_info dvbsky_s950_cfg = {
+static const struct smi_cfg_info dvbsky_s950_cfg = {
 	.type = SMI_DVBSKY_S950,
 	.name = "DVBSky S950 V3",
 	.ts_0 = SMI_TS_NULL,
@@ -1070,7 +1070,7 @@
 	.rc_map = RC_MAP_DVBSKY,
 };
 
-static struct smi_cfg_info dvbsky_s952_cfg = {
+static const struct smi_cfg_info dvbsky_s952_cfg = {
 	.type = SMI_DVBSKY_S952,
 	.name = "DVBSky S952 V3",
 	.ts_0 = SMI_TS_DMA_BOTH,
@@ -1080,7 +1080,7 @@
 	.rc_map = RC_MAP_DVBSKY,
 };
 
-static struct smi_cfg_info dvbsky_t9580_cfg = {
+static const struct smi_cfg_info dvbsky_t9580_cfg = {
 	.type = SMI_DVBSKY_T9580,
 	.name = "DVBSky T9580 V3",
 	.ts_0 = SMI_TS_DMA_BOTH,
@@ -1090,7 +1090,7 @@
 	.rc_map = RC_MAP_DVBSKY,
 };
 
-static struct smi_cfg_info technotrend_s2_4200_cfg = {
+static const struct smi_cfg_info technotrend_s2_4200_cfg = {
 	.type = SMI_TECHNOTREND_S2_4200,
 	.name = "TechnoTrend TT-budget S2-4200 Twin",
 	.ts_0 = SMI_TS_DMA_BOTH,
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 4a37a1c..6a35107 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -252,7 +252,7 @@
 	return 0;
 }
 
-static struct snd_pcm_ops snd_solo_pcm_ops = {
+static const struct snd_pcm_ops snd_solo_pcm_ops = {
 	.open = snd_solo_pcm_open,
 	.close = snd_solo_pcm_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 3991643..25a2137 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -759,7 +759,7 @@
 	}
 }
 
-static struct vb2_ops solo_enc_video_qops = {
+static const struct vb2_ops solo_enc_video_qops = {
 	.queue_setup	= solo_enc_queue_setup,
 	.buf_queue	= solo_enc_buf_queue,
 	.buf_finish	= solo_enc_buf_finish,
diff --git a/drivers/media/pci/tw5864/Kconfig b/drivers/media/pci/tw5864/Kconfig
new file mode 100644
index 0000000..87c8f32
--- /dev/null
+++ b/drivers/media/pci/tw5864/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_TW5864
+	tristate "Techwell TW5864 video/audio grabber and encoder"
+	depends on VIDEO_DEV && PCI && VIDEO_V4L2
+	depends on HAS_DMA
+	select VIDEOBUF2_DMA_CONTIG
+	---help---
+	  Support for boards based on Techwell TW5864 chip which provides
+	  multichannel video & audio grabbing and encoding (H.264, MJPEG,
+	  ADPCM G.726).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tw5864.
diff --git a/drivers/media/pci/tw5864/Makefile b/drivers/media/pci/tw5864/Makefile
new file mode 100644
index 0000000..4fc8b3b
--- /dev/null
+++ b/drivers/media/pci/tw5864/Makefile
@@ -0,0 +1,3 @@
+tw5864-objs := tw5864-core.o tw5864-video.o tw5864-h264.o tw5864-util.o
+
+obj-$(CONFIG_VIDEO_TW5864) += tw5864.o
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
new file mode 100644
index 0000000..1d43b96
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -0,0 +1,359 @@
+/*
+ *  TW5864 driver - core functions
+ *
+ *  Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/sound.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/pci_ids.h>
+#include <linux/jiffies.h>
+#include <asm/dma.h>
+#include <media/v4l2-dev.h>
+
+#include "tw5864.h"
+#include "tw5864-reg.h"
+
+MODULE_DESCRIPTION("V4L2 driver module for tw5864-based multimedia capture & encoding devices");
+MODULE_AUTHOR("Bluecherry Maintainers <maintainers@bluecherrydvr.com>");
+MODULE_AUTHOR("Andrey Utkin <andrey.utkin@corp.bluecherry.net>");
+MODULE_LICENSE("GPL");
+
+/*
+ * BEWARE OF KNOWN ISSUES WITH VIDEO QUALITY
+ *
+ * This driver was developed by Bluecherry LLC by deducing behaviour of
+ * original manufacturer's driver, from both source code and execution traces.
+ * It is known that there are some artifacts on output video with this driver:
+ *  - on all known hardware samples: random pixels of wrong color (mostly
+ *    white, red or blue) appearing and disappearing on sequences of P-frames;
+ *  - on some hardware samples (known with H.264 core version e006:2800):
+ *    total madness on P-frames: blocks of wrong luminance; blocks of wrong
+ *    colors "creeping" across the picture.
+ * There is a workaround for both issues: avoid P-frames by setting GOP size
+ * to 1. To do that, run this command on device files created by this driver:
+ *
+ * v4l2-ctl --device /dev/videoX --set-ctrl=video_gop_size=1
+ *
+ * These issues are not decoding errors; all produced H.264 streams are decoded
+ * properly. Streams without P-frames don't have these artifacts so it's not
+ * analog-to-digital conversion issues nor internal memory errors; we conclude
+ * it's internal H.264 encoder issues.
+ * We cannot even check the original driver's behaviour because it has never
+ * worked properly at all in our development environment. So these issues may
+ * be actually related to firmware or hardware. However it may be that there's
+ * just some more register settings missing in the driver which would please
+ * the hardware.
+ * Manufacturer didn't help much on our inquiries, but feel free to disturb
+ * again the support of Intersil (owner of former Techwell).
+ */
+
+/* take first free /dev/videoX indexes by default */
+static unsigned int video_nr[] = {[0 ... (TW5864_INPUTS - 1)] = -1 };
+
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "video devices numbers array");
+
+/*
+ * Please add any new PCI IDs to: http://pci-ids.ucw.cz.  This keeps
+ * the PCI ID database up to date.  Note that the entries must be
+ * added under vendor 0x1797 (Techwell Inc.) as subsystem IDs.
+ */
+static const struct pci_device_id tw5864_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_5864)},
+	{0,}
+};
+
+void tw5864_irqmask_apply(struct tw5864_dev *dev)
+{
+	tw_writel(TW5864_INTR_ENABLE_L, dev->irqmask & 0xffff);
+	tw_writel(TW5864_INTR_ENABLE_H, (dev->irqmask >> 16));
+}
+
+static void tw5864_interrupts_disable(struct tw5864_dev *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->slock, flags);
+	dev->irqmask = 0;
+	tw5864_irqmask_apply(dev);
+	spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static void tw5864_timer_isr(struct tw5864_dev *dev);
+static void tw5864_h264_isr(struct tw5864_dev *dev);
+
+static irqreturn_t tw5864_isr(int irq, void *dev_id)
+{
+	struct tw5864_dev *dev = dev_id;
+	u32 status;
+
+	status = tw_readl(TW5864_INTR_STATUS_L) |
+		tw_readl(TW5864_INTR_STATUS_H) << 16;
+	if (!status)
+		return IRQ_NONE;
+
+	tw_writel(TW5864_INTR_CLR_L, 0xffff);
+	tw_writel(TW5864_INTR_CLR_H, 0xffff);
+
+	if (status & TW5864_INTR_VLC_DONE)
+		tw5864_h264_isr(dev);
+
+	if (status & TW5864_INTR_TIMER)
+		tw5864_timer_isr(dev);
+
+	if (!(status & (TW5864_INTR_TIMER | TW5864_INTR_VLC_DONE))) {
+		dev_dbg(&dev->pci->dev, "Unknown interrupt, status 0x%08X\n",
+			status);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tw5864_h264_isr(struct tw5864_dev *dev)
+{
+	int channel = tw_readl(TW5864_DSP) & TW5864_DSP_ENC_CHN;
+	struct tw5864_input *input = &dev->inputs[channel];
+	int cur_frame_index, next_frame_index;
+	struct tw5864_h264_frame *cur_frame, *next_frame;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->slock, flags);
+
+	cur_frame_index = dev->h264_buf_w_index;
+	next_frame_index = (cur_frame_index + 1) % H264_BUF_CNT;
+	cur_frame = &dev->h264_buf[cur_frame_index];
+	next_frame = &dev->h264_buf[next_frame_index];
+
+	if (next_frame_index != dev->h264_buf_r_index) {
+		cur_frame->vlc_len = tw_readl(TW5864_VLC_LENGTH) << 2;
+		cur_frame->checksum = tw_readl(TW5864_VLC_CRC_REG);
+		cur_frame->input = input;
+		cur_frame->timestamp = ktime_get_ns();
+		cur_frame->seqno = input->frame_seqno;
+		cur_frame->gop_seqno = input->frame_gop_seqno;
+
+		dev->h264_buf_w_index = next_frame_index;
+		tasklet_schedule(&dev->tasklet);
+
+		cur_frame = next_frame;
+
+		spin_lock(&input->slock);
+		input->frame_seqno++;
+		input->frame_gop_seqno++;
+		if (input->frame_gop_seqno >= input->gop)
+			input->frame_gop_seqno = 0;
+		spin_unlock(&input->slock);
+	} else {
+		dev_err(&dev->pci->dev,
+			"Skipped frame on input %d because all buffers busy\n",
+			channel);
+	}
+
+	dev->encoder_busy = 0;
+
+	spin_unlock_irqrestore(&dev->slock, flags);
+
+	tw_writel(TW5864_VLC_STREAM_BASE_ADDR, cur_frame->vlc.dma_addr);
+	tw_writel(TW5864_MV_STREAM_BASE_ADDR, cur_frame->mv.dma_addr);
+
+	/* Additional ack for this interrupt */
+	tw_writel(TW5864_VLC_DSP_INTR, 0x00000001);
+	tw_writel(TW5864_PCI_INTR_STATUS, TW5864_VLC_DONE_INTR);
+}
+
+static void tw5864_input_deadline_update(struct tw5864_input *input)
+{
+	input->new_frame_deadline = jiffies + msecs_to_jiffies(1000);
+}
+
+static void tw5864_timer_isr(struct tw5864_dev *dev)
+{
+	unsigned long flags;
+	int i;
+	int encoder_busy;
+
+	/* Additional ack for this interrupt */
+	tw_writel(TW5864_PCI_INTR_STATUS, TW5864_TIMER_INTR);
+
+	spin_lock_irqsave(&dev->slock, flags);
+	encoder_busy = dev->encoder_busy;
+	spin_unlock_irqrestore(&dev->slock, flags);
+
+	if (encoder_busy)
+		return;
+
+	/*
+	 * Traversing inputs in round-robin fashion, starting from next to the
+	 * last processed one
+	 */
+	for (i = 0; i < TW5864_INPUTS; i++) {
+		int next_input = (i + dev->next_input) % TW5864_INPUTS;
+		struct tw5864_input *input = &dev->inputs[next_input];
+		int raw_buf_id; /* id of internal buf with last raw frame */
+
+		spin_lock_irqsave(&input->slock, flags);
+		if (!input->enabled)
+			goto next;
+
+		/* Check if new raw frame is available */
+		raw_buf_id = tw_mask_shift_readl(TW5864_SENIF_ORG_FRM_PTR1, 0x3,
+						 2 * input->nr);
+
+		if (input->buf_id != raw_buf_id) {
+			input->buf_id = raw_buf_id;
+			tw5864_input_deadline_update(input);
+			spin_unlock_irqrestore(&input->slock, flags);
+
+			spin_lock_irqsave(&dev->slock, flags);
+			dev->encoder_busy = 1;
+			dev->next_input = (next_input + 1) % TW5864_INPUTS;
+			spin_unlock_irqrestore(&dev->slock, flags);
+
+			tw5864_request_encoded_frame(input);
+			break;
+		}
+
+		/* No new raw frame; check if channel is stuck */
+		if (time_is_after_jiffies(input->new_frame_deadline)) {
+			/* If stuck, request new raw frames again */
+			tw_mask_shift_writel(TW5864_ENC_BUF_PTR_REC1, 0x3,
+					     2 * input->nr, input->buf_id + 3);
+			tw5864_input_deadline_update(input);
+		}
+next:
+		spin_unlock_irqrestore(&input->slock, flags);
+	}
+}
+
+static int tw5864_initdev(struct pci_dev *pci_dev,
+			  const struct pci_device_id *pci_id)
+{
+	struct tw5864_dev *dev;
+	int err;
+
+	dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	snprintf(dev->name, sizeof(dev->name), "tw5864:%s", pci_name(pci_dev));
+
+	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
+	if (err)
+		return err;
+
+	/* pci init */
+	dev->pci = pci_dev;
+	err = pci_enable_device(pci_dev);
+	if (err) {
+		dev_err(&dev->pci->dev, "pci_enable_device() failed\n");
+		goto unreg_v4l2;
+	}
+
+	pci_set_master(pci_dev);
+
+	err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+	if (err) {
+		dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
+		goto disable_pci;
+	}
+
+	/* get mmio */
+	err = pci_request_regions(pci_dev, dev->name);
+	if (err) {
+		dev_err(&dev->pci->dev, "Cannot request regions for MMIO\n");
+		goto disable_pci;
+	}
+	dev->mmio = pci_ioremap_bar(pci_dev, 0);
+	if (!dev->mmio) {
+		err = -EIO;
+		dev_err(&dev->pci->dev, "can't ioremap() MMIO memory\n");
+		goto release_mmio;
+	}
+
+	spin_lock_init(&dev->slock);
+
+	dev_info(&pci_dev->dev, "TW5864 hardware version: %04x\n",
+		 tw_readl(TW5864_HW_VERSION));
+	dev_info(&pci_dev->dev, "TW5864 H.264 core version: %04x:%04x\n",
+		 tw_readl(TW5864_H264REV),
+		 tw_readl(TW5864_UNDECLARED_H264REV_PART2));
+
+	err = tw5864_video_init(dev, video_nr);
+	if (err)
+		goto unmap_mmio;
+
+	/* get irq */
+	err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw5864_isr,
+			       IRQF_SHARED, "tw5864", dev);
+	if (err < 0) {
+		dev_err(&dev->pci->dev, "can't get IRQ %d\n", pci_dev->irq);
+		goto fini_video;
+	}
+
+	dev_info(&pci_dev->dev, "Note: there are known video quality issues. For details\n");
+	dev_info(&pci_dev->dev, "see the comment in drivers/media/pci/tw5864/tw5864-core.c.\n");
+
+	return 0;
+
+fini_video:
+	tw5864_video_fini(dev);
+unmap_mmio:
+	iounmap(dev->mmio);
+release_mmio:
+	pci_release_regions(pci_dev);
+disable_pci:
+	pci_disable_device(pci_dev);
+unreg_v4l2:
+	v4l2_device_unregister(&dev->v4l2_dev);
+	return err;
+}
+
+static void tw5864_finidev(struct pci_dev *pci_dev)
+{
+	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+	struct tw5864_dev *dev =
+		container_of(v4l2_dev, struct tw5864_dev, v4l2_dev);
+
+	/* shutdown subsystems */
+	tw5864_interrupts_disable(dev);
+
+	/* unregister */
+	tw5864_video_fini(dev);
+
+	/* release resources */
+	iounmap(dev->mmio);
+	release_mem_region(pci_resource_start(pci_dev, 0),
+			   pci_resource_len(pci_dev, 0));
+
+	v4l2_device_unregister(&dev->v4l2_dev);
+	devm_kfree(&pci_dev->dev, dev);
+}
+
+static struct pci_driver tw5864_pci_driver = {
+	.name = "tw5864",
+	.id_table = tw5864_pci_tbl,
+	.probe = tw5864_initdev,
+	.remove = tw5864_finidev,
+};
+
+module_pci_driver(tw5864_pci_driver);
diff --git a/drivers/media/pci/tw5864/tw5864-h264.c b/drivers/media/pci/tw5864/tw5864-h264.c
new file mode 100644
index 0000000..330d200
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-h264.c
@@ -0,0 +1,259 @@
+/*
+ *  TW5864 driver - H.264 headers generation functions
+ *
+ *  Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/log2.h>
+
+#include "tw5864.h"
+
+static u8 marker[] = { 0x00, 0x00, 0x00, 0x01 };
+
+/*
+ * Exponential-Golomb coding functions
+ *
+ * These functions are used for generation of H.264 bitstream headers.
+ *
+ * This code is derived from tw5864 reference driver by manufacturers, which
+ * itself apparently was derived from x264 project.
+ */
+
+/* Bitstream writing context */
+struct bs {
+	u8 *buf; /* pointer to buffer beginning */
+	u8 *buf_end; /* pointer to buffer end */
+	u8 *ptr; /* pointer to current byte in buffer */
+	unsigned int bits_left; /* number of available bits in current byte */
+};
+
+static void bs_init(struct bs *s, void *buf, int size)
+{
+	s->buf = buf;
+	s->ptr = buf;
+	s->buf_end = s->ptr + size;
+	s->bits_left = 8;
+}
+
+static int bs_len(struct bs *s)
+{
+	return s->ptr - s->buf;
+}
+
+static void bs_write(struct bs *s, int count, u32 bits)
+{
+	if (s->ptr >= s->buf_end - 4)
+		return;
+	while (count > 0) {
+		if (count < 32)
+			bits &= (1 << count) - 1;
+		if (count < s->bits_left) {
+			*s->ptr = (*s->ptr << count) | bits;
+			s->bits_left -= count;
+			break;
+		}
+		*s->ptr = (*s->ptr << s->bits_left) |
+			(bits >> (count - s->bits_left));
+		count -= s->bits_left;
+		s->ptr++;
+		s->bits_left = 8;
+	}
+}
+
+static void bs_write1(struct bs *s, u32 bit)
+{
+	if (s->ptr < s->buf_end) {
+		*s->ptr <<= 1;
+		*s->ptr |= bit;
+		s->bits_left--;
+		if (s->bits_left == 0) {
+			s->ptr++;
+			s->bits_left = 8;
+		}
+	}
+}
+
+static void bs_write_ue(struct bs *s, u32 val)
+{
+	if (val == 0) {
+		bs_write1(s, 1);
+	} else {
+		val++;
+		bs_write(s, 2 * fls(val) - 1, val);
+	}
+}
+
+static void bs_write_se(struct bs *s, int val)
+{
+	bs_write_ue(s, val <= 0 ? -val * 2 : val * 2 - 1);
+}
+
+static void bs_rbsp_trailing(struct bs *s)
+{
+	bs_write1(s, 1);
+	if (s->bits_left != 8)
+		bs_write(s, s->bits_left, 0x00);
+}
+
+/* H.264 headers generation functions */
+
+static int tw5864_h264_gen_sps_rbsp(u8 *buf, size_t size, int width, int height)
+{
+	struct bs bs, *s;
+
+	s = &bs;
+	bs_init(s, buf, size);
+	bs_write(s, 8, 0x42); /* profile_idc, baseline */
+	bs_write(s, 1, 1); /* constraint_set0_flag */
+	bs_write(s, 1, 1); /* constraint_set1_flag */
+	bs_write(s, 1, 0); /* constraint_set2_flag */
+	bs_write(s, 5, 0); /* reserved_zero_5bits */
+	bs_write(s, 8, 0x1e); /* level_idc */
+	bs_write_ue(s, 0); /* seq_parameter_set_id */
+	bs_write_ue(s, ilog2(MAX_GOP_SIZE) - 4); /* log2_max_frame_num_minus4 */
+	bs_write_ue(s, 0); /* pic_order_cnt_type */
+	/* log2_max_pic_order_cnt_lsb_minus4 */
+	bs_write_ue(s, ilog2(MAX_GOP_SIZE) - 4);
+	bs_write_ue(s, 1); /* num_ref_frames */
+	bs_write(s, 1, 0); /* gaps_in_frame_num_value_allowed_flag */
+	bs_write_ue(s, width / 16 - 1); /* pic_width_in_mbs_minus1 */
+	bs_write_ue(s, height / 16 - 1); /* pic_height_in_map_units_minus1 */
+	bs_write(s, 1, 1); /* frame_mbs_only_flag */
+	bs_write(s, 1, 0); /* direct_8x8_inference_flag */
+	bs_write(s, 1, 0); /* frame_cropping_flag */
+	bs_write(s, 1, 0); /* vui_parameters_present_flag */
+	bs_rbsp_trailing(s);
+	return bs_len(s);
+}
+
+static int tw5864_h264_gen_pps_rbsp(u8 *buf, size_t size, int qp)
+{
+	struct bs bs, *s;
+
+	s = &bs;
+	bs_init(s, buf, size);
+	bs_write_ue(s, 0); /* pic_parameter_set_id */
+	bs_write_ue(s, 0); /* seq_parameter_set_id */
+	bs_write(s, 1, 0); /* entropy_coding_mode_flag */
+	bs_write(s, 1, 0); /* pic_order_present_flag */
+	bs_write_ue(s, 0); /* num_slice_groups_minus1 */
+	bs_write_ue(s, 0); /* i_num_ref_idx_l0_active_minus1 */
+	bs_write_ue(s, 0); /* i_num_ref_idx_l1_active_minus1 */
+	bs_write(s, 1, 0); /* weighted_pred_flag */
+	bs_write(s, 2, 0); /* weighted_bipred_idc */
+	bs_write_se(s, qp - 26); /* pic_init_qp_minus26 */
+	bs_write_se(s, qp - 26); /* pic_init_qs_minus26 */
+	bs_write_se(s, 0); /* chroma_qp_index_offset */
+	bs_write(s, 1, 0); /* deblocking_filter_control_present_flag */
+	bs_write(s, 1, 0); /* constrained_intra_pred_flag */
+	bs_write(s, 1, 0); /* redundant_pic_cnt_present_flag */
+	bs_rbsp_trailing(s);
+	return bs_len(s);
+}
+
+static int tw5864_h264_gen_slice_head(u8 *buf, size_t size,
+				      unsigned int idr_pic_id,
+				      unsigned int frame_gop_seqno,
+				      int *tail_nb_bits, u8 *tail)
+{
+	struct bs bs, *s;
+	int is_i_frame = frame_gop_seqno == 0;
+
+	s = &bs;
+	bs_init(s, buf, size);
+	bs_write_ue(s, 0); /* first_mb_in_slice */
+	bs_write_ue(s, is_i_frame ? 2 : 5); /* slice_type - I or P */
+	bs_write_ue(s, 0); /* pic_parameter_set_id */
+	bs_write(s, ilog2(MAX_GOP_SIZE), frame_gop_seqno); /* frame_num */
+	if (is_i_frame)
+		bs_write_ue(s, idr_pic_id);
+
+	/* pic_order_cnt_lsb */
+	bs_write(s, ilog2(MAX_GOP_SIZE), frame_gop_seqno);
+
+	if (is_i_frame) {
+		bs_write1(s, 0); /* no_output_of_prior_pics_flag */
+		bs_write1(s, 0); /* long_term_reference_flag */
+	} else {
+		bs_write1(s, 0); /* num_ref_idx_active_override_flag */
+		bs_write1(s, 0); /* ref_pic_list_reordering_flag_l0 */
+		bs_write1(s, 0); /* adaptive_ref_pic_marking_mode_flag */
+	}
+
+	bs_write_se(s, 0); /* slice_qp_delta */
+
+	if (s->bits_left != 8) {
+		*tail = ((s->ptr[0]) << s->bits_left);
+		*tail_nb_bits = 8 - s->bits_left;
+	} else {
+		*tail = 0;
+		*tail_nb_bits = 0;
+	}
+
+	return bs_len(s);
+}
+
+void tw5864_h264_put_stream_header(u8 **buf, size_t *space_left, int qp,
+				   int width, int height)
+{
+	int nal_len;
+
+	/* SPS */
+	memcpy(*buf, marker, sizeof(marker));
+	*buf += 4;
+	*space_left -= 4;
+
+	**buf = 0x67; /* SPS NAL header */
+	*buf += 1;
+	*space_left -= 1;
+
+	nal_len = tw5864_h264_gen_sps_rbsp(*buf, *space_left, width, height);
+	*buf += nal_len;
+	*space_left -= nal_len;
+
+	/* PPS */
+	memcpy(*buf, marker, sizeof(marker));
+	*buf += 4;
+	*space_left -= 4;
+
+	**buf = 0x68; /* PPS NAL header */
+	*buf += 1;
+	*space_left -= 1;
+
+	nal_len = tw5864_h264_gen_pps_rbsp(*buf, *space_left, qp);
+	*buf += nal_len;
+	*space_left -= nal_len;
+}
+
+void tw5864_h264_put_slice_header(u8 **buf, size_t *space_left,
+				  unsigned int idr_pic_id,
+				  unsigned int frame_gop_seqno,
+				  int *tail_nb_bits, u8 *tail)
+{
+	int nal_len;
+
+	memcpy(*buf, marker, sizeof(marker));
+	*buf += 4;
+	*space_left -= 4;
+
+	/* Frame NAL header */
+	**buf = (frame_gop_seqno == 0) ? 0x25 : 0x21;
+	*buf += 1;
+	*space_left -= 1;
+
+	nal_len = tw5864_h264_gen_slice_head(*buf, *space_left, idr_pic_id,
+					     frame_gop_seqno, tail_nb_bits,
+					     tail);
+	*buf += nal_len;
+	*space_left -= nal_len;
+}
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
new file mode 100644
index 0000000..92a1b07
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -0,0 +1,2133 @@
+/*
+ *  TW5864 driver - registers description
+ *
+ *  Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+/* According to TW5864_datasheet_0.6d.pdf, tw5864b1-ds.pdf */
+
+/* Register Description - Direct Map Space */
+/* 0x0000 ~ 0x1ffc - H264 Register Map */
+/* [15:0] The Version register for H264 core (Read Only) */
+#define TW5864_H264REV 0x0000
+
+#define TW5864_EMU 0x0004
+/* Define controls in register TW5864_EMU */
+/* DDR controller enabled */
+#define TW5864_EMU_EN_DDR BIT(0)
+/* Enable bit for Inter module */
+#define TW5864_EMU_EN_ME BIT(1)
+/* Enable bit for Sensor Interface module */
+#define TW5864_EMU_EN_SEN BIT(2)
+/* Enable bit for Host Burst Access */
+#define TW5864_EMU_EN_BHOST BIT(3)
+/* Enable bit for Loop Filter module */
+#define TW5864_EMU_EN_LPF BIT(4)
+/* Enable bit for PLBK module */
+#define TW5864_EMU_EN_PLBK BIT(5)
+/*
+ * Video Frame mapping in DDR
+ * 00 CIF
+ * 01 D1
+ * 10 Reserved
+ * 11 Reserved
+ *
+ */
+#define TW5864_DSP_FRAME_TYPE (3 << 6)
+#define TW5864_DSP_FRAME_TYPE_D1 BIT(6)
+
+#define TW5864_UNDECLARED_H264REV_PART2 0x0008
+
+#define TW5864_SLICE 0x000c
+/* Define controls in register TW5864_SLICE */
+/* VLC Slice end flag */
+#define TW5864_VLC_SLICE_END BIT(0)
+/* Master Slice End Flag */
+#define TW5864_MAS_SLICE_END BIT(4)
+/* Host to start a new slice Address */
+#define TW5864_START_NSLICE BIT(15)
+
+/*
+ * [15:0] Two bit for each channel (channel 0 ~ 7). Each two bits are the buffer
+ * pointer for the last encoded frame of the corresponding channel.
+ */
+#define TW5864_ENC_BUF_PTR_REC1 0x0010
+
+/* [5:0] DSP_MB_QP and [15:10] DSP_LPF_OFFSET */
+#define TW5864_DSP_QP 0x0018
+/* Define controls in register TW5864_DSP_QP */
+/* [5:0] H264 QP Value for codec */
+#define TW5864_DSP_MB_QP 0x003f
+/*
+ * [15:10] H264 LPF_OFFSET Address
+ * (Default 0)
+ */
+#define TW5864_DSP_LPF_OFFSET 0xfc00
+
+#define TW5864_DSP_CODEC 0x001c
+/* Define controls in register TW5864_DSP_CODEC */
+/*
+ * 0: Encode (TW5864 Default)
+ * 1: Decode
+ */
+#define TW5864_DSP_CODEC_MODE BIT(0)
+/*
+ * 0->3 4 VLC data buffer in DDR (1M each)
+ * 0->7 8 VLC data buffer in DDR (512k each)
+ */
+#define TW5864_VLC_BUF_ID (7 << 2)
+/*
+ * 0 4CIF in 1 MB
+ * 1 1CIF in 1 MB
+ */
+#define TW5864_CIF_MAP_MD BIT(6)
+/*
+ * 0 2 falf D1 in 1 MB
+ * 1 1 half D1 in 1 MB
+ */
+#define TW5864_HD1_MAP_MD BIT(7)
+/* VLC Stream valid */
+#define TW5864_VLC_VLD BIT(8)
+/* MV Vector Valid */
+#define TW5864_MV_VECT_VLD BIT(9)
+/* MV Flag Valid */
+#define TW5864_MV_FLAG_VLD BIT(10)
+
+#define TW5864_DSP_SEN 0x0020
+/* Define controls in register TW5864_DSP_SEN */
+/* Org Buffer Base for Luma (default 0) */
+#define TW5864_DSP_SEN_PIC_LU 0x000f
+/* Org Buffer Base for Chroma (default 4) */
+#define TW5864_DSP_SEN_PIC_CHM 0x00f0
+/* Maximum Number of Buffers (default 4) */
+#define TW5864_DSP_SEN_PIC_MAX 0x0700
+/*
+ * Original Frame D1 or HD1 switch
+ * (Default 0)
+ */
+#define TW5864_DSP_SEN_HFULL 0x1000
+
+#define TW5864_DSP_REF_PIC 0x0024
+/* Define controls in register TW5864_DSP_REF_PIC */
+/* Ref Buffer Base for Luma (default 0) */
+#define TW5864_DSP_REF_PIC_LU 0x000f
+/* Ref Buffer Base for Chroma (default 4) */
+#define TW5864_DSP_REF_PIC_CHM 0x00f0
+/* Maximum Number of Buffers (default 4) */
+#define TW5864_DSP_REF_PIC_MAX 0x0700
+
+/* [15:0] SEN_EN_CH[n] SENIF original frame capture enable for each channel */
+#define TW5864_SEN_EN_CH 0x0028
+
+#define TW5864_DSP 0x002c
+/* Define controls in register TW5864_DSP */
+/* The ID for channel selected for encoding operation */
+#define TW5864_DSP_ENC_CHN 0x000f
+/* See DSP_MB_DELAY below */
+#define TW5864_DSP_MB_WAIT 0x0010
+/*
+ * DSP Chroma Switch
+ * 0 DDRB
+ * 1 DDRA
+ */
+#define TW5864_DSP_CHROM_SW 0x0020
+/* VLC Flow Control: 1 for enable */
+#define TW5864_DSP_FLW_CNTL 0x0040
+/*
+ * If DSP_MB_WAIT == 0, MB delay is DSP_MB_DELAY * 16
+ * If DSP_MB_DELAY == 1, MB delay is DSP_MB_DELAY * 128
+ */
+#define TW5864_DSP_MB_DELAY 0x0f00
+
+#define TW5864_DDR 0x0030
+/* Define controls in register TW5864_DDR */
+/* DDR Single Access Page Number */
+#define TW5864_DDR_PAGE_CNTL 0x00ff
+/* DDR-DPR Burst Read Enable */
+#define TW5864_DDR_BRST_EN BIT(13)
+/*
+ * DDR A/B Select as HOST access
+ * 0 Select DDRA
+ * 1 Select DDRB
+ */
+#define TW5864_DDR_AB_SEL BIT(14)
+/*
+ * DDR Access Mode Select
+ * 0 Single R/W Access (Host <-> DDR)
+ * 1 Burst R/W Access (Host <-> DPR)
+ */
+#define TW5864_DDR_MODE BIT(15)
+
+/* The original frame capture pointer. Two bits for each channel */
+/* SENIF_ORG_FRM_PTR [15:0] */
+#define TW5864_SENIF_ORG_FRM_PTR1 0x0038
+/* SENIF_ORG_FRM_PTR [31:16] */
+#define TW5864_SENIF_ORG_FRM_PTR2 0x003c
+
+#define TW5864_DSP_SEN_MODE 0x0040
+/* Define controls in register TW5864_DSP_SEN_MODE */
+#define TW5864_DSP_SEN_MODE_CH0 0x000f
+#define TW5864_DSP_SEN_MODE_CH1 0x00f0
+
+/*
+ * [15:0]: ENC_BUF_PTR_REC[31:16] Two bit for each channel (channel 8 ~ 15).
+ * Each two bits are the buffer pointer for the last encoded frame of a channel
+ */
+#define TW5864_ENC_BUF_PTR_REC2 0x004c
+
+/* Current MV Flag Status Pointer for Channel n. (Read only) */
+/*
+ * [1:0] CH0_MV_PTR, ..., [15:14] CH7_MV_PTR
+ */
+#define TW5864_CH_MV_PTR1 0x0060
+/*
+ * [1:0] CH8_MV_PTR, ..., [15:14] CH15_MV_PTR
+ */
+#define TW5864_CH_MV_PTR2 0x0064
+
+/*
+ * [15:0] Reset Current MV Flag Status Pointer for Channel n (one bit each)
+ */
+#define TW5864_RST_MV_PTR 0x0068
+#define TW5864_INTERLACING 0x0200
+/* Define controls in register TW5864_INTERLACING */
+/*
+ * Inter_Mode Start. 2-nd bit? A guess. Missing in datasheet. Without this bit
+ * set, the output video is interlaced (stripy).
+ */
+#define TW5864_DSP_INTER_ST BIT(1)
+/* Deinterlacer Enable */
+#define TW5864_DI_EN BIT(2)
+/*
+ * De-interlacer Mode
+ * 1 Shuffled frame
+ * 0 Normal Un-Shuffled Frame
+ */
+#define TW5864_DI_MD BIT(3)
+/*
+ * Down scale original frame in X direction
+ * 11: Un-used
+ * 10: down-sample to 1/4
+ * 01: down-sample to 1/2
+ * 00: down-sample disabled
+ */
+#define TW5864_DSP_DWN_X (3 << 4)
+/*
+ * Down scale original frame in Y direction
+ * 11: Un-used
+ * 10: down-sample to 1/4
+ * 01: down-sample to 1/2
+ * 00: down-sample disabled
+ */
+#define TW5864_DSP_DWN_Y (3 << 6)
+/*
+ * 1 Dual Stream
+ * 0 Single Stream
+ */
+#define TW5864_DUAL_STR BIT(8)
+
+#define TW5864_DSP_REF 0x0204
+/* Define controls in register TW5864_DSP_REF */
+/* Number of reference frame (Default 1 for TW5864B) */
+#define TW5864_DSP_REF_FRM 0x000f
+/* Window size */
+#define TW5864_DSP_WIN_SIZE 0x02f0
+
+#define TW5864_DSP_SKIP 0x0208
+/* Define controls in register TW5864_DSP_SKIP */
+/*
+ * Skip Offset Enable bit
+ * 0 DSP_SKIP_OFFSET value is not used (default 8)
+ * 1 DSP_SKIP_OFFSET value is used in HW
+ */
+#define TW5864_DSP_SKIP_OFEN 0x0080
+/* Skip mode cost offset (default 8) */
+#define TW5864_DSP_SKIP_OFFSET 0x007f
+
+#define TW5864_MOTION_SEARCH_ETC 0x020c
+/* Define controls in register TW5864_MOTION_SEARCH_ETC */
+/* Enable quarter pel search mode */
+#define TW5864_QPEL_EN BIT(0)
+/* Enable half pel search mode */
+#define TW5864_HPEL_EN BIT(1)
+/* Enable motion search mode */
+#define TW5864_ME_EN BIT(2)
+/* Enable Intra mode */
+#define TW5864_INTRA_EN BIT(3)
+/* Enable Skip Mode */
+#define TW5864_SKIP_EN BIT(4)
+/* Search Option (Default 2"b01) */
+#define TW5864_SRCH_OPT (3 << 5)
+
+#define TW5864_DSP_ENC_REC 0x0210
+/* Define controls in register TW5864_DSP_ENC_REC */
+/* Reference Buffer Pointer for encoding */
+#define TW5864_DSP_ENC_REF_PTR 0x0007
+/* Reconstruct Buffer pointer */
+#define TW5864_DSP_REC_BUF_PTR 0x7000
+
+/* [15:0] Lambda Value for H264 */
+#define TW5864_DSP_REF_MVP_LAMBDA 0x0214
+
+#define TW5864_DSP_PIC_MAX_MB 0x0218
+/* Define controls in register TW5864_DSP_PIC_MAX_MB */
+/* The MB number in Y direction for a frame */
+#define TW5864_DSP_PIC_MAX_MB_Y 0x007f
+/* The MB number in X direction for a frame */
+#define TW5864_DSP_PIC_MAX_MB_X 0x7f00
+
+/* The original frame pointer for encoding */
+#define TW5864_DSP_ENC_ORG_PTR_REG 0x021c
+/* Mask to use with TW5864_DSP_ENC_ORG_PTR */
+#define TW5864_DSP_ENC_ORG_PTR_MASK 0x7000
+/* Number of bits to shift with TW5864_DSP_ENC_ORG_PTR */
+#define TW5864_DSP_ENC_ORG_PTR_SHIFT 12
+
+/* DDR base address of OSD rectangle attribute data */
+#define TW5864_DSP_OSD_ATTRI_BASE 0x0220
+/* OSD enable bit for each channel */
+#define TW5864_DSP_OSD_ENABLE 0x0228
+
+/* 0x0280 ~ 0x029c – Motion Vector for 1st 4x4 Block, e.g., 80 (X), 84 (Y) */
+#define TW5864_ME_MV_VEC1 0x0280
+/* 0x02a0 ~ 0x02bc – Motion Vector for 2nd 4x4 Block, e.g., A0 (X), A4 (Y) */
+#define TW5864_ME_MV_VEC2 0x02a0
+/* 0x02c0 ~ 0x02dc – Motion Vector for 3rd 4x4 Block, e.g., C0 (X), C4 (Y) */
+#define TW5864_ME_MV_VEC3 0x02c0
+/* 0x02e0 ~ 0x02fc – Motion Vector for 4th 4x4 Block, e.g., E0 (X), E4 (Y) */
+#define TW5864_ME_MV_VEC4 0x02e0
+
+/*
+ * [5:0]
+ * if (intra16x16_cost < (intra4x4_cost+dsp_i4x4_offset))
+ * Intra_mode = intra16x16_mode
+ * Else
+ * Intra_mode = intra4x4_mode
+ */
+#define TW5864_DSP_I4x4_OFFSET 0x040c
+
+/*
+ * [6:4]
+ * 0x5 Only 4x4
+ * 0x6 Only 16x16
+ * 0x7 16x16 & 4x4
+ */
+#define TW5864_DSP_INTRA_MODE 0x0410
+#define TW5864_DSP_INTRA_MODE_SHIFT 4
+#define TW5864_DSP_INTRA_MODE_MASK (7 << 4)
+#define TW5864_DSP_INTRA_MODE_4x4 0x5
+#define TW5864_DSP_INTRA_MODE_16x16 0x6
+#define TW5864_DSP_INTRA_MODE_4x4_AND_16x16 0x7
+/*
+ * [5:0] WEIGHT Factor for I4x4 cost calculation (QP dependent)
+ */
+#define TW5864_DSP_I4x4_WEIGHT 0x0414
+
+/*
+ * [7:0] Offset used to affect Intra/ME model decision
+ * If (me_cost < intra_cost + dsp_resid_mode_offset)
+ * Pred_Mode = me_mode
+ * Else
+ * Pred_mode = intra_mode
+ */
+#define TW5864_DSP_RESID_MODE_OFFSET 0x0604
+
+/* 0x0800 ~ 0x09ff - Quantization TABLE Values */
+#define TW5864_QUAN_TAB 0x0800
+
+/* Valid channel value [0; f], frame value [0; 3] */
+#define TW5864_RT_CNTR_CH_FRM(channel, frame) \
+	(0x0c00 | (channel << 4) | (frame << 2))
+
+#define TW5864_FRAME_BUS1 0x0d00
+/*
+ * 1 Progressive in part A in bus n
+ * 0 Interlaced in part A in bus n
+ */
+#define TW5864_PROG_A BIT(0)
+/*
+ * 1 Progressive in part B in bus n
+ * 0 Interlaced in part B in bus n
+ */
+#define TW5864_PROG_B BIT(1)
+/*
+ * 1 Frame Mode in bus n
+ * 0 Field Mode in bus n
+ */
+#define TW5864_FRAME BIT(2)
+/*
+ * 0 4CIF in bus n
+ * 1 1D1 + 4 CIF in bus n
+ * 2 2D1 in bus n
+ */
+#define TW5864_BUS_D1 (3 << 3)
+/* Bus 1 goes in TW5864_FRAME_BUS1 in [4:0] */
+/* Bus 2 goes in TW5864_FRAME_BUS1 in [12:8] */
+#define TW5864_FRAME_BUS2 0x0d04
+/* Bus 3 goes in TW5864_FRAME_BUS2 in [4:0] */
+/* Bus 4 goes in TW5864_FRAME_BUS2 in [12:8] */
+
+/* [15:0] Horizontal Mirror for channel n */
+#define TW5864_SENIF_HOR_MIR 0x0d08
+/* [15:0] Vertical Mirror for channel n */
+#define TW5864_SENIF_VER_MIR 0x0d0c
+
+/*
+ * FRAME_WIDTH_BUSn_A
+ * 0x15f: 4 CIF
+ * 0x2cf: 1 D1 + 3 CIF
+ * 0x2cf: 2 D1
+ * FRAME_WIDTH_BUSn_B
+ * 0x15f: 4 CIF
+ * 0x2cf: 1 D1 + 3 CIF
+ * 0x2cf: 2 D1
+ * FRAME_HEIGHT_BUSn_A
+ * 0x11f: 4CIF (PAL)
+ * 0x23f: 1D1 + 3CIF (PAL)
+ * 0x23f: 2 D1 (PAL)
+ * 0x0ef: 4CIF (NTSC)
+ * 0x1df: 1D1 + 3CIF (NTSC)
+ * 0x1df: 2 D1 (NTSC)
+ * FRAME_HEIGHT_BUSn_B
+ * 0x11f: 4CIF (PAL)
+ * 0x23f: 1D1 + 3CIF (PAL)
+ * 0x23f: 2 D1 (PAL)
+ * 0x0ef: 4CIF (NTSC)
+ * 0x1df: 1D1 + 3CIF (NTSC)
+ * 0x1df: 2 D1 (NTSC)
+ */
+#define TW5864_FRAME_WIDTH_BUS_A(bus) (0x0d10 + 0x0010 * bus)
+#define TW5864_FRAME_WIDTH_BUS_B(bus) (0x0d14 + 0x0010 * bus)
+#define TW5864_FRAME_HEIGHT_BUS_A(bus) (0x0d18 + 0x0010 * bus)
+#define TW5864_FRAME_HEIGHT_BUS_B(bus) (0x0d1c + 0x0010 * bus)
+
+/*
+ * 1: the bus mapped Channel n Full D1
+ * 0: the bus mapped Channel n Half D1
+ */
+#define TW5864_FULL_HALF_FLAG 0x0d50
+
+/*
+ * 0 The bus mapped Channel select partA Mode
+ * 1 The bus mapped Channel select partB Mode
+ */
+#define TW5864_FULL_HALF_MODE_SEL 0x0d54
+
+#define TW5864_VLC 0x1000
+/* Define controls in register TW5864_VLC */
+/* QP Value used by H264 CAVLC */
+#define TW5864_VLC_SLICE_QP 0x003f
+/*
+ * Swap byte order of VLC stream in d-word.
+ * 1 Normal (VLC output= [31:0])
+ * 0 Swap (VLC output={[23:16],[31:24],[7:0], [15:8]})
+ */
+#define TW5864_VLC_BYTE_SWP BIT(6)
+/* Enable Adding 03 circuit for VLC stream */
+#define TW5864_VLC_ADD03_EN BIT(7)
+/* Number of bit for VLC bit Align */
+#define TW5864_VLC_BIT_ALIGN_SHIFT 8
+#define TW5864_VLC_BIT_ALIGN_MASK (0x1f << 8)
+/*
+ * Synchronous Interface select for VLC Stream
+ * 1 CDC_VLCS_MAS read VLC stream
+ * 0 CPU read VLC stream
+ */
+#define TW5864_VLC_INF_SEL BIT(13)
+/* Enable VLC overflow control */
+#define TW5864_VLC_OVFL_CNTL BIT(14)
+/*
+ * 1 PCI Master Mode
+ * 0 Non PCI Master Mode
+ */
+#define TW5864_VLC_PCI_SEL BIT(15)
+/*
+ * 0 Enable Adding 03 to VLC header and stream
+ * 1 Disable Adding 03 to VLC header of "00000001"
+ */
+#define TW5864_VLC_A03_DISAB BIT(16)
+/*
+ * Status of VLC stream in DDR (one bit for each buffer)
+ * 1 VLC is ready in buffer n (HW set)
+ * 0 VLC is not ready in buffer n (SW clear)
+ */
+#define TW5864_VLC_BUF_RDY_SHIFT 24
+#define TW5864_VLC_BUF_RDY_MASK (0xff << 24)
+
+/* Total number of bit in the slice */
+#define TW5864_SLICE_TOTAL_BIT 0x1004
+/* Total number of bit in the residue */
+#define TW5864_RES_TOTAL_BIT 0x1008
+
+#define TW5864_VLC_BUF 0x100c
+/* Define controls in register TW5864_VLC_BUF */
+/* VLC BK0 full status, write ‘1’ to clear */
+#define TW5864_VLC_BK0_FULL BIT(0)
+/* VLC BK1 full status, write ‘1’ to clear */
+#define TW5864_VLC_BK1_FULL BIT(1)
+/* VLC end slice status, write ‘1’ to clear */
+#define TW5864_VLC_END_SLICE BIT(2)
+/* VLC Buffer overflow status, write ‘1’ to clear */
+#define TW5864_DSP_RD_OF BIT(3)
+/* VLC string length in either buffer 0 or 1 at end of frame */
+#define TW5864_VLC_STREAM_LEN_SHIFT 4
+#define TW5864_VLC_STREAM_LEN_MASK (0x1ff << 4)
+
+/* [15:0] Total coefficient number in a frame */
+#define TW5864_TOTAL_COEF_NO 0x1010
+/* [0] VLC Encoder Interrupt. Write ‘1’ to clear */
+#define TW5864_VLC_DSP_INTR 0x1014
+/* [31:0] VLC stream CRC checksum */
+#define TW5864_VLC_STREAM_CRC 0x1018
+
+#define TW5864_VLC_RD 0x101c
+/* Define controls in register TW5864_VLC_RD */
+/*
+ * 1 Read VLC lookup Memory
+ * 0 Read VLC Stream Memory
+ */
+#define TW5864_VLC_RD_MEM BIT(0)
+/*
+ * 1 Read VLC Stream Memory in burst mode
+ * 0 Read VLC Stream Memory in single mode
+ */
+#define TW5864_VLC_RD_BRST BIT(1)
+
+/* 0x2000 ~ 0x2ffc -- H264 Stream Memory Map */
+/*
+ * A word is 4 bytes. I.e.,
+ * VLC_STREAM_MEM[0] address: 0x2000
+ * VLC_STREAM_MEM[1] address: 0x2004
+ * ...
+ * VLC_STREAM_MEM[3FF] address: 0x2ffc
+ */
+#define TW5864_VLC_STREAM_MEM_START 0x2000
+#define TW5864_VLC_STREAM_MEM_MAX_OFFSET 0x3ff
+#define TW5864_VLC_STREAM_MEM(offset) (TW5864_VLC_STREAM_MEM_START + 4 * offset)
+
+/* 0x4000 ~ 0x4ffc -- Audio Register Map */
+/* [31:0] config 1ms cnt = Realtime clk/1000 */
+#define TW5864_CFG_1MS_CNT 0x4000
+
+#define TW5864_ADPCM 0x4004
+/* Define controls in register TW5864_ADPCM */
+/* ADPCM decoder enable */
+#define TW5864_ADPCM_DEC BIT(0)
+/* ADPCM input data enable */
+#define TW5864_ADPCM_IN_DATA BIT(1)
+/* ADPCM encoder enable */
+#define TW5864_ADPCM_ENC BIT(2)
+
+#define TW5864_AUD 0x4008
+/* Define controls in register TW5864_AUD */
+/* Record path PCM Audio enable bit for each channel */
+#define TW5864_AUD_ORG_CH_EN 0x00ff
+/* Speaker path PCM Audio Enable */
+#define TW5864_SPK_ORG_EN BIT(16)
+/*
+ * 0 16bit
+ * 1 8bit
+ */
+#define TW5864_AD_BIT_MODE BIT(17)
+#define TW5864_AUD_TYPE_SHIFT 18
+/*
+ * 0 PCM
+ * 3 ADPCM
+ */
+#define TW5864_AUD_TYPE (0xf << 18)
+#define TW5864_AUD_SAMPLE_RATE_SHIFT 22
+/*
+ * 0 8K
+ * 1 16K
+ */
+#define TW5864_AUD_SAMPLE_RATE (3 << 22)
+/* Channel ID used to select audio channel (0 to 16) for loopback */
+#define TW5864_TESTLOOP_CHID_SHIFT 24
+#define TW5864_TESTLOOP_CHID (0x1f << 24)
+/* Enable AD Loopback Test */
+#define TW5864_TEST_ADLOOP_EN BIT(30)
+/*
+ * 0 Asynchronous Mode or PCI target mode
+ * 1 PCI Initiator Mode
+ */
+#define TW5864_AUD_MODE BIT(31)
+
+#define TW5864_AUD_ADPCM 0x400c
+/* Define controls in register TW5864_AUD_ADPCM */
+/* Record path ADPCM audio channel enable, one bit for each */
+#define TW5864_AUD_ADPCM_CH_EN 0x00ff
+/* Speaker path ADPCM audio channel enable */
+#define TW5864_SPK_ADPCM_EN BIT(16)
+
+#define TW5864_PC_BLOCK_ADPCM_RD_NO 0x4018
+#define TW5864_PC_BLOCK_ADPCM_RD_NO_MASK 0x1f
+
+/*
+ * For ADPCM_ENC_WR_PTR, ADPCM_ENC_RD_PTR (see below):
+ * Bit[2:0] ch0
+ * Bit[5:3] ch1
+ * Bit[8:6] ch2
+ * Bit[11:9] ch3
+ * Bit[14:12] ch4
+ * Bit[17:15] ch5
+ * Bit[20:18] ch6
+ * Bit[23:21] ch7
+ * Bit[26:24] ch8
+ * Bit[29:27] ch9
+ * Bit[32:30] ch10
+ * Bit[35:33] ch11
+ * Bit[38:36] ch12
+ * Bit[41:39] ch13
+ * Bit[44:42] ch14
+ * Bit[47:45] ch15
+ * Bit[50:48] ch16
+ */
+#define TW5864_ADPCM_ENC_XX_MASK 0x3fff
+#define TW5864_ADPCM_ENC_XX_PTR2_SHIFT 30
+/* ADPCM_ENC_WR_PTR[29:0] */
+#define TW5864_ADPCM_ENC_WR_PTR1 0x401c
+/* ADPCM_ENC_WR_PTR[50:30] */
+#define TW5864_ADPCM_ENC_WR_PTR2 0x4020
+
+/* ADPCM_ENC_RD_PTR[29:0] */
+#define TW5864_ADPCM_ENC_RD_PTR1 0x4024
+/* ADPCM_ENC_RD_PTR[50:30] */
+#define TW5864_ADPCM_ENC_RD_PTR2 0x4028
+
+/* [3:0] rd ch0, [7:4] rd ch1, [11:8] wr ch0, [15:12] wr ch1 */
+#define TW5864_ADPCM_DEC_RD_WR_PTR 0x402c
+
+/*
+ * For TW5864_AD_ORIG_WR_PTR, TW5864_AD_ORIG_RD_PTR:
+ * Bit[3:0] ch0
+ * Bit[7:4] ch1
+ * Bit[11:8] ch2
+ * Bit[15:12] ch3
+ * Bit[19:16] ch4
+ * Bit[23:20] ch5
+ * Bit[27:24] ch6
+ * Bit[31:28] ch7
+ * Bit[35:32] ch8
+ * Bit[39:36] ch9
+ * Bit[43:40] ch10
+ * Bit[47:44] ch11
+ * Bit[51:48] ch12
+ * Bit[55:52] ch13
+ * Bit[59:56] ch14
+ * Bit[63:60] ch15
+ * Bit[67:64] ch16
+ */
+/* AD_ORIG_WR_PTR[31:0] */
+#define TW5864_AD_ORIG_WR_PTR1 0x4030
+/* AD_ORIG_WR_PTR[63:32] */
+#define TW5864_AD_ORIG_WR_PTR2 0x4034
+/* AD_ORIG_WR_PTR[67:64] */
+#define TW5864_AD_ORIG_WR_PTR3 0x4038
+
+/* AD_ORIG_RD_PTR[31:0] */
+#define TW5864_AD_ORIG_RD_PTR1 0x403c
+/* AD_ORIG_RD_PTR[63:32] */
+#define TW5864_AD_ORIG_RD_PTR2 0x4040
+/* AD_ORIG_RD_PTR[67:64] */
+#define TW5864_AD_ORIG_RD_PTR3 0x4044
+
+#define TW5864_PC_BLOCK_ORIG_RD_NO 0x4048
+#define TW5864_PC_BLOCK_ORIG_RD_NO_MASK 0x1f
+
+#define TW5864_PCI_AUD 0x404c
+/* Define controls in register TW5864_PCI_AUD */
+/*
+ * The register is applicable to PCI initiator mode only. Used to select PCM(0)
+ * or ADPCM(1) audio data sent to PC. One bit for each channel
+ */
+#define TW5864_PCI_DATA_SEL 0xffff
+/*
+ * Audio flow control mode selection bit.
+ * 0 Flow control disabled. TW5864 continuously sends audio frame to PC
+ * (initiator mode)
+ * 1 Flow control enabled
+ */
+#define TW5864_PCI_FLOW_EN BIT(16)
+/*
+ * When PCI_FLOW_EN is set, PCI need to toggle this bit to send an audio frame
+ * to PC. One toggle to send one frame.
+ */
+#define TW5864_PCI_AUD_FRM_EN BIT(17)
+
+/* [1:0] CS valid to data valid CLK cycles when writing operation */
+#define TW5864_CS2DAT_CNT 0x8000
+/* [2:0] Data valid signal width by system clock cycles */
+#define TW5864_DATA_VLD_WIDTH 0x8004
+
+#define TW5864_SYNC 0x8008
+/* Define controls in register TW5864_SYNC */
+/*
+ * 0 vlc stream to syncrous port
+ * 1 vlc stream to ddr buffers
+ */
+#define TW5864_SYNC_CFG BIT(7)
+/*
+ * 0 SYNC Address sampled on Rising edge
+ * 1 SYNC Address sampled on Falling edge
+ */
+#define TW5864_SYNC_ADR_EDGE BIT(0)
+#define TW5864_VLC_STR_DELAY_SHIFT 1
+/*
+ * 0 No system delay
+ * 1 One system clock delay
+ * 2 Two system clock delay
+ * 3 Three system clock delay
+ */
+#define TW5864_VLC_STR_DELAY (3 << 1)
+/*
+ * 0 Rising edge output
+ * 1 Falling edge output
+ */
+#define TW5864_VLC_OUT_EDGE BIT(3)
+
+/*
+ * [1:0]
+ * 2’b00 phase set to 180 degree
+ * 2’b01 phase set to 270 degree
+ * 2’b10 phase set to 0 degree
+ * 2’b11 phase set to 90 degree
+ */
+#define TW5864_I2C_PHASE_CFG 0x800c
+
+/*
+ * The system / DDR clock (166 MHz) is generated with an on-chip system clock
+ * PLL (SYSPLL) using input crystal clock of 27 MHz. The system clock PLL
+ * frequency is controlled with the following equation.
+ * CLK_OUT = CLK_IN * (M+1) / ((N+1) * P)
+ * SYSPLL_M M parameter
+ * SYSPLL_N N parameter
+ * SYSPLL_P P parameter
+ */
+/* SYSPLL_M[7:0] */
+#define TW5864_SYSPLL1 0x8018
+/* Define controls in register TW5864_SYSPLL1 */
+#define TW5864_SYSPLL_M_LOW 0x00ff
+
+/* [2:0]: SYSPLL_M[10:8], [7:3]: SYSPLL_N[4:0] */
+#define TW5864_SYSPLL2 0x8019
+/* Define controls in register TW5864_SYSPLL2 */
+#define TW5864_SYSPLL_M_HI 0x07
+#define TW5864_SYSPLL_N_LOW_SHIFT 3
+#define TW5864_SYSPLL_N_LOW (0x1f << 3)
+
+/*
+ * [1:0]: SYSPLL_N[6:5], [3:2]: SYSPLL_P, [4]: SYSPLL_IREF, [7:5]: SYSPLL_CP_SEL
+ */
+#define TW5864_SYSPLL3 0x8020
+/* Define controls in register TW5864_SYSPLL3 */
+#define TW5864_SYSPLL_N_HI 0x03
+#define TW5864_SYSPLL_P_SHIFT 2
+#define TW5864_SYSPLL_P (0x03 << 2)
+/*
+ * SYSPLL bias current control
+ * 0 Lower current (default)
+ * 1 30% higher current
+ */
+#define TW5864_SYSPLL_IREF BIT(4)
+/*
+ * SYSPLL charge pump current selection
+ * 0 1,5 uA
+ * 1 4 uA
+ * 2 9 uA
+ * 3 19 uA
+ * 4 39 uA
+ * 5 79 uA
+ * 6 159 uA
+ * 7 319 uA
+ */
+#define TW5864_SYSPLL_CP_SEL_SHIFT 5
+#define TW5864_SYSPLL_CP_SEL (0x07 << 5)
+
+/*
+ * [1:0]: SYSPLL_VCO, [3:2]: SYSPLL_LP_X8, [5:4]: SYSPLL_ICP_SEL,
+ * [6]: SYSPLL_LPF_5PF, [7]: SYSPLL_ED_SEL
+ */
+#define TW5864_SYSPLL4 0x8021
+/* Define controls in register TW5864_SYSPLL4 */
+/*
+ * SYSPLL_VCO VCO Range selection
+ * 00 5 ~ 75 MHz
+ * 01 50 ~ 140 MHz
+ * 10 110 ~ 320 MHz
+ * 11 270 ~ 700 MHz
+ */
+#define TW5864_SYSPLL_VCO 0x03
+#define TW5864_SYSPLL_LP_X8_SHIFT 2
+/*
+ * Loop resister
+ * 0 38.5K ohms
+ * 1 6.6K ohms (default)
+ * 2 2.2K ohms
+ * 3 1.1K ohms
+ */
+#define TW5864_SYSPLL_LP_X8 (0x03 << 2)
+#define TW5864_SYSPLL_ICP_SEL_SHIFT 4
+/*
+ * PLL charge pump fine tune
+ * 00 x1 (default)
+ * 01 x1/2
+ * 10 x1/7
+ * 11 x1/8
+ */
+#define TW5864_SYSPLL_ICP_SEL (0x03 << 4)
+/*
+ * PLL low pass filter phase margin adjustment
+ * 0 no 5pF (default)
+ * 1 5pF added
+ */
+#define TW5864_SYSPLL_LPF_5PF BIT(6)
+/*
+ * PFD select edge for detection
+ * 0 Falling edge (default)
+ * 1 Rising edge
+ */
+#define TW5864_SYSPLL_ED_SEL BIT(7)
+
+/* [0]: SYSPLL_RST, [4]: SYSPLL_PD */
+#define TW5864_SYSPLL5 0x8024
+/* Define controls in register TW5864_SYSPLL5 */
+/* Reset SYSPLL */
+#define TW5864_SYSPLL_RST BIT(0)
+/* Power down SYSPLL */
+#define TW5864_SYSPLL_PD BIT(4)
+
+#define TW5864_PLL_CFG 0x801c
+/* Define controls in register TW5864_PLL_CFG */
+/*
+ * Issue Soft Reset from Async Host Interface / PCI Interface clock domain.
+ * Become valid after sync to the xtal clock domain. This bit is set only if
+ * LOAD register bit is also set to 1.
+ */
+#define TW5864_SRST BIT(0)
+/*
+ * Issue SYSPLL (166 MHz) configuration latch from Async host interface / PCI
+ * Interface clock domain. The configuration setting becomes effective only if
+ * LOAD register bit is also set to 1.
+ */
+#define TW5864_SYSPLL_CFG BIT(2)
+/*
+ * Issue SPLL (108 MHz) configuration load from Async host interface / PCI
+ * Interface clock domain. The configuration setting becomes effective only if
+ * the LOAD register bit is also set to 1.
+ */
+#define TW5864_SPLL_CFG BIT(4)
+/*
+ * Set this bit to latch the SRST, SYSPLL_CFG, SPLL_CFG setting into the xtal
+ * clock domain to restart the PLL. This bit is self cleared.
+ */
+#define TW5864_LOAD BIT(3)
+
+/* SPLL_IREF, SPLL_LPX4, SPLL_CPX4, SPLL_PD, SPLL_DBG */
+#define TW5864_SPLL 0x8028
+
+/* 0x8800 ~ 0x88fc -- Interrupt Register Map */
+/*
+ * Trigger mode of interrupt source 0 ~ 15
+ * 1 Edge trigger mode
+ * 0 Level trigger mode
+ */
+#define TW5864_TRIGGER_MODE_L 0x8800
+/* Trigger mode of interrupt source 16 ~ 31 */
+#define TW5864_TRIGGER_MODE_H 0x8804
+/* Enable of interrupt source 0 ~ 15 */
+#define TW5864_INTR_ENABLE_L 0x8808
+/* Enable of interrupt source 16 ~ 31 */
+#define TW5864_INTR_ENABLE_H 0x880c
+/* Clear interrupt command of interrupt source 0 ~ 15 */
+#define TW5864_INTR_CLR_L 0x8810
+/* Clear interrupt command of interrupt source 16 ~ 31 */
+#define TW5864_INTR_CLR_H 0x8814
+/*
+ * Assertion of interrupt source 0 ~ 15
+ * 1 High level or pos-edge is assertion
+ * 0 Low level or neg-edge is assertion
+ */
+#define TW5864_INTR_ASSERT_L 0x8818
+/* Assertion of interrupt source 16 ~ 31 */
+#define TW5864_INTR_ASSERT_H 0x881c
+/*
+ * Output level of interrupt
+ * 1 Interrupt output is high assertion
+ * 0 Interrupt output is low assertion
+ */
+#define TW5864_INTR_OUT_LEVEL 0x8820
+/*
+ * Status of interrupt source 0 ~ 15
+ * Bit[0]: VLC 4k RAM interrupt
+ * Bit[1]: BURST DDR RAM interrupt
+ * Bit[2]: MV DSP interrupt
+ * Bit[3]: video lost interrupt
+ * Bit[4]: gpio 0 interrupt
+ * Bit[5]: gpio 1 interrupt
+ * Bit[6]: gpio 2 interrupt
+ * Bit[7]: gpio 3 interrupt
+ * Bit[8]: gpio 4 interrupt
+ * Bit[9]: gpio 5 interrupt
+ * Bit[10]: gpio 6 interrupt
+ * Bit[11]: gpio 7 interrupt
+ * Bit[12]: JPEG interrupt
+ * Bit[13:15]: Reserved
+ */
+#define TW5864_INTR_STATUS_L 0x8838
+/*
+ * Status of interrupt source 16 ~ 31
+ * Bit[0]: Reserved
+ * Bit[1]: VLC done interrupt
+ * Bit[2]: Reserved
+ * Bit[3]: AD Vsync interrupt
+ * Bit[4]: Preview eof interrupt
+ * Bit[5]: Preview overflow interrupt
+ * Bit[6]: Timer interrupt
+ * Bit[7]: Reserved
+ * Bit[8]: Audio eof interrupt
+ * Bit[9]: I2C done interrupt
+ * Bit[10]: AD interrupt
+ * Bit[11:15]: Reserved
+ */
+#define TW5864_INTR_STATUS_H 0x883c
+
+/* Defines of interrupt bits, united for both low and high word registers */
+#define TW5864_INTR_VLC_RAM BIT(0)
+#define TW5864_INTR_BURST BIT(1)
+#define TW5864_INTR_MV_DSP BIT(2)
+#define TW5864_INTR_VIN_LOST BIT(3)
+/* n belongs to [0; 7] */
+#define TW5864_INTR_GPIO(n) (1 << (4 + n))
+#define TW5864_INTR_JPEG BIT(12)
+#define TW5864_INTR_VLC_DONE BIT(17)
+#define TW5864_INTR_AD_VSYNC BIT(19)
+#define TW5864_INTR_PV_EOF BIT(20)
+#define TW5864_INTR_PV_OVERFLOW BIT(21)
+#define TW5864_INTR_TIMER BIT(22)
+#define TW5864_INTR_AUD_EOF BIT(24)
+#define TW5864_INTR_I2C_DONE BIT(25)
+#define TW5864_INTR_AD BIT(26)
+
+/* 0x9000 ~ 0x920c -- Video Capture (VIF) Register Map */
+/*
+ * H264EN_CH_STATUS[n] Status of Vsync synchronized H264EN_CH_EN (Read Only)
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_H264EN_CH_STATUS 0x9000
+/*
+ * [15:0] H264EN_CH_EN[n] H264 Encoding Path Enable for channel
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_H264EN_CH_EN 0x9004
+/*
+ * H264EN_CH_DNS[n] H264 Encoding Path Downscale Video Decoder Input for
+ * channel n
+ * 1 Downscale Y to 1/2
+ * 0 Does not downscale
+ */
+#define TW5864_H264EN_CH_DNS 0x9008
+/*
+ * H264EN_CH_PROG[n] H264 Encoding Path channel n is progressive
+ * 1 Progressive (Not valid for TW5864)
+ * 0 Interlaced (TW5864 default)
+ */
+#define TW5864_H264EN_CH_PROG 0x900c
+/*
+ * [3:0] H264EN_BUS_MAX_CH[n]
+ * H264 Encoding Path maximum number of channel on BUS n
+ * 0 Max 4 channels
+ * 1 Max 2 channels
+ */
+#define TW5864_H264EN_BUS_MAX_CH 0x9010
+
+/*
+ * H264EN_RATE_MAX_LINE_n H264 Encoding path Rate Mapping Maximum Line Number
+ * on Bus n
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_EVEN 0x1f
+#define TW5864_H264EN_RATE_MAX_LINE_ODD_SHIFT 5
+#define TW5864_H264EN_RATE_MAX_LINE_ODD (0x1f << 5)
+/*
+ * [4:0] H264EN_RATE_MAX_LINE_0
+ * [9:5] H264EN_RATE_MAX_LINE_1
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_REG1 0x9014
+/*
+ * [4:0] H264EN_RATE_MAX_LINE_2
+ * [9:5] H264EN_RATE_MAX_LINE_3
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_REG2 0x9018
+
+/*
+ * H264EN_CHn_FMT H264 Encoding Path Format configuration of Channel n
+ * 00 D1 (For D1 and hD1 frame)
+ * 01 (Reserved)
+ * 10 (Reserved)
+ * 11 D1 with 1/2 size in X (for CIF frame)
+ * Note: To be used with 0x9008 register to configure the frame size
+ */
+/*
+ * [1:0]: H264EN_CH0_FMT,
+ * ..., [15:14]: H264EN_CH7_FMT
+ */
+#define TW5864_H264EN_CH_FMT_REG1 0x9020
+/*
+ * [1:0]: H264EN_CH8_FMT (?),
+ * ..., [15:14]: H264EN_CH15_FMT (?)
+ */
+#define TW5864_H264EN_CH_FMT_REG2 0x9024
+
+/*
+ * H264EN_RATE_CNTL_BUSm_CHn H264 Encoding Path BUS m Rate Control for Channel n
+ */
+#define TW5864_H264EN_RATE_CNTL_LO_WORD(bus, channel) \
+	(0x9100 + bus * 0x20 + channel * 0x08)
+#define TW5864_H264EN_RATE_CNTL_HI_WORD(bus, channel) \
+	(0x9104 + bus * 0x20 + channel * 0x08)
+
+/*
+ * H264EN_BUSm_MAP_CHn The 16-to-1 MUX configuration register for each encoding
+ * channel (total of 16 channels). Four bits for each channel.
+ */
+#define TW5864_H264EN_BUS0_MAP 0x9200
+#define TW5864_H264EN_BUS1_MAP 0x9204
+#define TW5864_H264EN_BUS2_MAP 0x9208
+#define TW5864_H264EN_BUS3_MAP 0x920c
+
+/* This register is not defined in datasheet, but used in reference driver */
+#define TW5864_UNDECLARED_ERROR_FLAGS_0x9218 0x9218
+
+#define TW5864_GPIO1 0x9800
+#define TW5864_GPIO2 0x9804
+/* Define controls in registers TW5864_GPIO1, TW5864_GPIO2 */
+/* GPIO DATA of Group n */
+#define TW5864_GPIO_DATA 0x00ff
+#define TW5864_GPIO_OEN_SHIFT 8
+/* GPIO Output Enable of Group n */
+#define TW5864_GPIO_OEN (0xff << 8)
+
+/* 0xa000 ~ 0xa8ff – DDR Controller Register Map */
+/* DDR Controller A */
+/*
+ * [2:0] Data valid counter after read command to DDR. This is the delay value
+ * to show how many cycles the data will be back from DDR after we issue a read
+ * command.
+ */
+#define TW5864_RD_ACK_VLD_MUX 0xa000
+
+#define TW5864_DDR_PERIODS 0xa004
+/* Define controls in register TW5864_DDR_PERIODS */
+/*
+ * Tras value, the minimum cycle of active to precharge command period,
+ * default is 7
+ */
+#define TW5864_TRAS_CNT_MAX 0x000f
+/*
+ * Trfc value, the minimum cycle of refresh to active or refresh command period,
+ * default is 4"hf
+ */
+#define TW5864_RFC_CNT_MAX_SHIFT 8
+#define TW5864_RFC_CNT_MAX (0x0f << 8)
+/*
+ * Trcd value, the minimum cycle of active to internal read/write command
+ * period, default is 4"h2
+ */
+#define TW5864_TCD_CNT_MAX_SHIFT 4
+#define TW5864_TCD_CNT_MAX (0x0f << 4)
+/* Twr value, write recovery time, default is 4"h3 */
+#define TW5864_TWR_CNT_MAX_SHIFT 12
+#define TW5864_TWR_CNT_MAX (0x0f << 12)
+
+/*
+ * [2:0] CAS latency, the delay cycle between internal read command and the
+ * availability of the first bit of output data, default is 3
+ */
+#define TW5864_CAS_LATENCY 0xa008
+/*
+ * [15:0] Maximum average periodic refresh, the value is based on the current
+ * frequency to match 7.8mcs
+ */
+#define TW5864_DDR_REF_CNTR_MAX 0xa00c
+/*
+ * DDR_ON_CHIP_MAP [1:0]
+ * 0 256M DDR on board
+ * 1 512M DDR on board
+ * 2 1G DDR on board
+ * DDR_ON_CHIP_MAP [2]
+ * 0 Only one DDR chip
+ * 1 Two DDR chips
+ */
+#define TW5864_DDR_ON_CHIP_MAP 0xa01c
+#define TW5864_DDR_SELFTEST_MODE 0xa020
+/* Define controls in register TW5864_DDR_SELFTEST_MODE */
+/*
+ * 0 Common read/write mode
+ * 1 DDR self-test mode
+ */
+#define TW5864_MASTER_MODE BIT(0)
+/*
+ * 0 DDR self-test single read/write
+ * 1 DDR self-test burst read/write
+ */
+#define TW5864_SINGLE_PROC BIT(1)
+/*
+ * 0 DDR self-test write command
+ * 1 DDR self-test read command
+ */
+#define TW5864_WRITE_FLAG BIT(2)
+#define TW5864_DATA_MODE_SHIFT 4
+/*
+ * 0 write 32'haaaa5555 to DDR
+ * 1 write 32'hffffffff to DDR
+ * 2 write 32'hha5a55a5a to DDR
+ * 3 write increasing data to DDR
+ */
+#define TW5864_DATA_MODE (0x3 << 4)
+
+/* [7:0] The maximum data of one burst in DDR self-test mode */
+#define TW5864_BURST_CNTR_MAX 0xa024
+/* [15:0] The maximum burst counter (bit 15~0) in DDR self-test mode */
+#define TW5864_DDR_PROC_CNTR_MAX_L 0xa028
+/* The maximum burst counter (bit 31~16) in DDR self-test mode */
+#define TW5864_DDR_PROC_CNTR_MAX_H 0xa02c
+/* [0]: Start one DDR self-test */
+#define TW5864_DDR_SELF_TEST_CMD 0xa030
+/* The maximum error counter (bit 15 ~ 0) in DDR self-test */
+#define TW5864_ERR_CNTR_L 0xa034
+
+#define TW5864_ERR_CNTR_H_AND_FLAG 0xa038
+/* Define controls in register TW5864_ERR_CNTR_H_AND_FLAG */
+/* The maximum error counter (bit 30 ~ 16) in DDR self-test */
+#define TW5864_ERR_CNTR_H_MASK 0x3fff
+/* DDR self-test end flag */
+#define TW5864_END_FLAG 0x8000
+
+/*
+ * DDR Controller B: same as 0xa000 ~ 0xa038, but add TW5864_DDR_B_OFFSET to all
+ * addresses
+ */
+#define TW5864_DDR_B_OFFSET 0x0800
+
+/* 0xb004 ~ 0xb018 – HW version/ARB12 Register Map */
+/* [15:0] Default is C013 */
+#define TW5864_HW_VERSION 0xb004
+
+#define TW5864_REQS_ENABLE 0xb010
+/* Define controls in register TW5864_REQS_ENABLE */
+/* Audio data in to DDR enable (default 1) */
+#define TW5864_AUD_DATA_IN_ENB BIT(0)
+/* Audio encode request to DDR enable (default 1) */
+#define TW5864_AUD_ENC_REQ_ENB BIT(1)
+/* Audio decode request0 to DDR enable (default 1) */
+#define TW5864_AUD_DEC_REQ0_ENB BIT(2)
+/* Audio decode request1 to DDR enable (default 1) */
+#define TW5864_AUD_DEC_REQ1_ENB BIT(3)
+/* VLC stream request to DDR enable (default 1) */
+#define TW5864_VLC_STRM_REQ_ENB BIT(4)
+/* H264 MV request to DDR enable (default 1) */
+#define TW5864_DVM_MV_REQ_ENB BIT(5)
+/* mux_core MVD request to DDR enable (default 1) */
+#define TW5864_MVD_REQ_ENB BIT(6)
+/* mux_core MVD temp data request to DDR enable (default 1) */
+#define TW5864_MVD_TMP_REQ_ENB BIT(7)
+/* JPEG request to DDR enable (default 1) */
+#define TW5864_JPEG_REQ_ENB BIT(8)
+/* mv_flag request to DDR enable (default 1) */
+#define TW5864_MV_FLAG_REQ_ENB BIT(9)
+
+#define TW5864_ARB12 0xb018
+/* Define controls in register TW5864_ARB12 */
+/* ARB12 Enable (default 1) */
+#define TW5864_ARB12_ENB BIT(15)
+/* ARB12 maximum value of time out counter (default 15"h1FF) */
+#define TW5864_ARB12_TIME_OUT_CNT 0x7fff
+
+/* 0xb800 ~ 0xb80c -- Indirect Access Register Map */
+/*
+ * Spec says:
+ * In order to access the indirect register space, the following procedure is
+ * followed.
+ * But reference driver implementation, and current driver, too, does it
+ * differently.
+ *
+ * Write Registers:
+ * (1) Write IND_DATA at 0xb804 ~ 0xb807
+ * (2) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (3) Write IND_ADDR at 0xb800 ~ 0xb801. Set R/W to "1", ENABLE to "1"
+ * Read Registers:
+ * (1) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (2) Write IND_ADDR at 0xb800 ~ 0xb801. Set R/W to "0", ENABLE to "1"
+ * (3) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (4) Read IND_DATA from 0xb804 ~ 0xb807
+ */
+#define TW5864_IND_CTL 0xb800
+/* Define controls in register TW5864_IND_CTL */
+/* Address used to access indirect register space */
+#define TW5864_IND_ADDR 0x0000ffff
+/* Wait until this bit is "0" before using indirect access */
+#define TW5864_BUSY BIT(31)
+/* Activate the indirect access. This bit is self cleared */
+#define TW5864_ENABLE BIT(25)
+/* Read/Write command */
+#define TW5864_RW BIT(24)
+
+/* [31:0] Data used to read/write indirect register space */
+#define TW5864_IND_DATA 0xb804
+
+/* 0xc000 ~ 0xc7fc -- Preview Register Map */
+/* Mostly skipped this section. */
+/*
+ * [15:0] Status of Vsync Synchronized PCI_PV_CH_EN (Read Only)
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_PCI_PV_CH_STATUS 0xc000
+/*
+ * [15:0] PCI Preview Path Enable for channel n
+ * 1 Channel Enable
+ * 0 Channel Disable
+ */
+#define TW5864_PCI_PV_CH_EN 0xc004
+
+/* 0xc800 ~ 0xc804 -- JPEG Capture Register Map */
+/* Skipped. */
+/* 0xd000 ~ 0xd0fc -- JPEG Control Register Map */
+/* Skipped. */
+
+/* 0xe000 ~ 0xfc04 – Motion Vector Register Map */
+
+/* ME Motion Vector data (Four Byte Each) 0xe000 ~ 0xe7fc */
+#define TW5864_ME_MV_VEC_START 0xe000
+#define TW5864_ME_MV_VEC_MAX_OFFSET 0x1ff
+#define TW5864_ME_MV_VEC(offset) (TW5864_ME_MV_VEC_START + 4 * offset)
+
+#define TW5864_MV 0xfc00
+/* Define controls in register TW5864_MV */
+/* mv bank0 full status , write "1" to clear */
+#define TW5864_MV_BK0_FULL BIT(0)
+/* mv bank1 full status , write "1" to clear */
+#define TW5864_MV_BK1_FULL BIT(1)
+/* slice end status; write "1" to clear */
+#define TW5864_MV_EOF BIT(2)
+/* mv encode interrupt status; write "1" to clear */
+#define TW5864_MV_DSP_INTR BIT(3)
+/* mv write memory overflow, write "1" to clear */
+#define TW5864_DSP_WR_OF BIT(4)
+#define TW5864_MV_LEN_SHIFT 5
+/* mv stream length */
+#define TW5864_MV_LEN (0xff << 5)
+/* The configured status bit written into bit 15 of 0xfc04 */
+#define TW5864_MPI_DDR_SEL BIT(13)
+
+#define TW5864_MPI_DDR_SEL_REG 0xfc04
+/* Define controls in register TW5864_MPI_DDR_SEL_REG */
+/*
+ * SW configure register
+ * 0 MV is saved in internal DPR
+ * 1 MV is saved in DDR
+ */
+#define TW5864_MPI_DDR_SEL2 BIT(15)
+
+/* 0x18000 ~ 0x181fc – PCI Master/Slave Control Map */
+#define TW5864_PCI_INTR_STATUS 0x18000
+/* Define controls in register TW5864_PCI_INTR_STATUS */
+/* vlc done */
+#define TW5864_VLC_DONE_INTR BIT(1)
+/* ad vsync */
+#define TW5864_AD_VSYNC_INTR BIT(3)
+/* preview eof */
+#define TW5864_PREV_EOF_INTR BIT(4)
+/* preview overflow interrupt */
+#define TW5864_PREV_OVERFLOW_INTR BIT(5)
+/* timer interrupt */
+#define TW5864_TIMER_INTR BIT(6)
+/* audio eof */
+#define TW5864_AUDIO_EOF_INTR BIT(8)
+/* IIC done */
+#define TW5864_IIC_DONE_INTR BIT(24)
+/* ad interrupt (e.g.: video lost, video format changed) */
+#define TW5864_AD_INTR_REG BIT(25)
+
+#define TW5864_PCI_INTR_CTL 0x18004
+/* Define controls in register TW5864_PCI_INTR_CTL */
+/* master enable */
+#define TW5864_PCI_MAST_ENB BIT(0)
+/* mvd&vlc master enable */
+#define TW5864_MVD_VLC_MAST_ENB 0x06
+/* (Need to set 0 in TW5864A) */
+#define TW5864_AD_MAST_ENB BIT(3)
+/* preview master enable */
+#define TW5864_PREV_MAST_ENB BIT(4)
+/* preview overflow enable */
+#define TW5864_PREV_OVERFLOW_ENB BIT(5)
+/* timer interrupt enable */
+#define TW5864_TIMER_INTR_ENB BIT(6)
+/* JPEG master (push mode) enable */
+#define TW5864_JPEG_MAST_ENB BIT(7)
+#define TW5864_AU_MAST_ENB_CHN_SHIFT 8
+/* audio master channel enable */
+#define TW5864_AU_MAST_ENB_CHN (0xffff << 8)
+/* IIC interrupt enable */
+#define TW5864_IIC_INTR_ENB BIT(24)
+/* ad interrupt enable */
+#define TW5864_AD_INTR_ENB BIT(25)
+/* target burst enable */
+#define TW5864_PCI_TAR_BURST_ENB BIT(26)
+/* vlc stream burst enable */
+#define TW5864_PCI_VLC_BURST_ENB BIT(27)
+/* ddr burst enable (1 enable, and must set DDR_BRST_EN) */
+#define TW5864_PCI_DDR_BURST_ENB BIT(28)
+
+/*
+ * Because preview and audio have 16 channels separately, so using this
+ * registers to indicate interrupt status for every channels. This is secondary
+ * interrupt status register. OR operating of the PREV_INTR_REG is
+ * PREV_EOF_INTR, OR operating of the AU_INTR_REG bits is AUDIO_EOF_INTR
+ */
+#define TW5864_PREV_AND_AU_INTR 0x18008
+/* Define controls in register TW5864_PREV_AND_AU_INTR */
+/* preview eof interrupt flag */
+#define TW5864_PREV_INTR_REG 0x0000ffff
+#define TW5864_AU_INTR_REG_SHIFT 16
+/* audio eof interrupt flag */
+#define TW5864_AU_INTR_REG (0xffff << 16)
+
+#define TW5864_MASTER_ENB_REG 0x1800c
+/* Define controls in register TW5864_MASTER_ENB_REG */
+/* master enable */
+#define TW5864_PCI_VLC_INTR_ENB BIT(1)
+/* mvd and vlc master enable */
+#define TW5864_PCI_PREV_INTR_ENB BIT(4)
+/* ad vsync master enable */
+#define TW5864_PCI_PREV_OF_INTR_ENB BIT(5)
+/* jpeg master enable */
+#define TW5864_PCI_JPEG_INTR_ENB BIT(7)
+/* preview master enable */
+#define TW5864_PCI_AUD_INTR_ENB BIT(8)
+
+/*
+ * Every channel of preview and audio have ping-pong buffers in system memory,
+ * this register is the buffer flag to notify software which buffer is been
+ * operated.
+ */
+#define TW5864_PREV_AND_AU_BUF_FLAG 0x18010
+/* Define controls in register TW5864_PREV_AND_AU_BUF_FLAG */
+/* preview buffer A/B flag */
+#define TW5864_PREV_BUF_FLAG 0xffff
+#define TW5864_AUDIO_BUF_FLAG_SHIFT 16
+/* audio buffer A/B flag */
+#define TW5864_AUDIO_BUF_FLAG (0xffff << 16)
+
+#define TW5864_IIC 0x18014
+/* Define controls in register TW5864_IIC */
+/* register data */
+#define TW5864_IIC_DATA 0x00ff
+#define TW5864_IIC_REG_ADDR_SHIFT 8
+/* register addr */
+#define TW5864_IIC_REG_ADDR (0xff << 8)
+/* rd/wr flag rd=1,wr=0 */
+#define TW5864_IIC_RW BIT(16)
+#define TW5864_IIC_DEV_ADDR_SHIFT 17
+/* device addr */
+#define TW5864_IIC_DEV_ADDR (0x7f << 17)
+/*
+ * iic done, software kick off one time iic transaction through setting this
+ * bit to 1. Then poll this bit, value 1 indicate iic transaction have
+ * completed, if read, valid data have been stored in iic_data
+ */
+#define TW5864_IIC_DONE BIT(24)
+
+#define TW5864_RST_AND_IF_INFO 0x18018
+/* Define controls in register TW5864_RST_AND_IF_INFO */
+/* application software soft reset */
+#define TW5864_APP_SOFT_RST BIT(0)
+#define TW5864_PCI_INF_VERSION_SHIFT 16
+/* PCI interface version, read only */
+#define TW5864_PCI_INF_VERSION (0xffff << 16)
+
+/* vlc stream crc value, it is calculated in pci module */
+#define TW5864_VLC_CRC_REG 0x1801c
+/*
+ * vlc max length, it is defined by software based on software assign memory
+ * space for vlc
+ */
+#define TW5864_VLC_MAX_LENGTH 0x18020
+/* vlc length of one frame */
+#define TW5864_VLC_LENGTH 0x18024
+/* vlc original crc value */
+#define TW5864_VLC_INTRA_CRC_I_REG 0x18028
+/* vlc original crc value */
+#define TW5864_VLC_INTRA_CRC_O_REG 0x1802c
+/* mv stream crc value, it is calculated in pci module */
+#define TW5864_VLC_PAR_CRC_REG 0x18030
+/* mv length */
+#define TW5864_VLC_PAR_LENGTH_REG 0x18034
+/* mv original crc value */
+#define TW5864_VLC_PAR_I_REG 0x18038
+/* mv original crc value */
+#define TW5864_VLC_PAR_O_REG 0x1803c
+
+/*
+ * Configuration register for 9[or 10] CIFs or 1D1+15QCIF Preview mode.
+ * PREV_PCI_ENB_CHN[0] Enable 9th preview channel (9CIF prev) or 1D1 channel in
+ * (1D1+15QCIF prev)
+ * PREV_PCI_ENB_CHN[1] Enable 10th preview channel
+ */
+#define TW5864_PREV_PCI_ENB_CHN 0x18040
+/* Description skipped. */
+#define TW5864_PREV_FRAME_FORMAT_IN 0x18044
+/* IIC enable */
+#define TW5864_IIC_ENB 0x18048
+/*
+ * Timer interrupt interval
+ * 0 1ms
+ * 1 2ms
+ * 2 4ms
+ * 3 8ms
+ */
+#define TW5864_PCI_INTTM_SCALE 0x1804c
+
+/*
+ * The above register is pci base address registers. Application software will
+ * initialize them to tell chip where the corresponding stream will be dumped
+ * to. Application software will select appropriate base address interval based
+ * on the stream length.
+ */
+/* VLC stream base address */
+#define TW5864_VLC_STREAM_BASE_ADDR 0x18080
+/* MV stream base address */
+#define TW5864_MV_STREAM_BASE_ADDR 0x18084
+/* 0x180a0 – 0x180bc: audio burst base address. Skipped. */
+/* 0x180c0 ~ 0x180dc – JPEG Push Mode Buffer Base Address. Skipped. */
+/* 0x18100 – 0x1817c: preview burst base address. Skipped. */
+
+/* 0x80000 ~ 0x87fff -- DDR Burst RW Register Map */
+#define TW5864_DDR_CTL 0x80000
+/* Define controls in register TW5864_DDR_CTL */
+#define TW5864_BRST_LENGTH_SHIFT 2
+/* Length of 32-bit data burst */
+#define TW5864_BRST_LENGTH (0x3fff << 2)
+/*
+ * Burst Read/Write
+ * 0 Read Burst from DDR
+ * 1 Write Burst to DDR
+ */
+#define TW5864_BRST_RW BIT(16)
+/* Begin a new DDR Burst. This bit is self cleared */
+#define TW5864_NEW_BRST_CMD BIT(17)
+/* DDR Burst End Flag */
+#define TW5864_BRST_END BIT(24)
+/* Enable Error Interrupt for Single DDR Access */
+#define TW5864_SING_ERR_INTR BIT(25)
+/* Enable Error Interrupt for Burst DDR Access */
+#define TW5864_BRST_ERR_INTR BIT(26)
+/* Enable Interrupt for End of DDR Burst Access */
+#define TW5864_BRST_END_INTR BIT(27)
+/* DDR Single Access Error Flag */
+#define TW5864_SINGLE_ERR BIT(28)
+/* DDR Single Access Busy Flag */
+#define TW5864_SINGLE_BUSY BIT(29)
+/* DDR Burst Access Error Flag */
+#define TW5864_BRST_ERR BIT(30)
+/* DDR Burst Access Busy Flag */
+#define TW5864_BRST_BUSY BIT(31)
+
+/* [27:0] DDR Access Address. Bit [1:0] has to be 0 */
+#define TW5864_DDR_ADDR 0x80004
+/* DDR Access Internal Buffer Address. Bit [1:0] has to be 0 */
+#define TW5864_DPR_BUF_ADDR 0x80008
+/* SRAM Buffer MPI Access Space. Totally 16 KB */
+#define TW5864_DPR_BUF_START 0x84000
+/* 0x84000 - 0x87ffc */
+#define TW5864_DPR_BUF_SIZE 0x4000
+
+/* Indirect Map Space */
+/*
+ * The indirect space is accessed through 0xb800 ~ 0xb807 registers in direct
+ * access space
+ */
+/* Analog Video / Audio Decoder / Encoder */
+/* Allowed channel values: [0; 3] */
+/* Read-only register */
+#define TW5864_INDIR_VIN_0(channel) (0x000 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_0 */
+/*
+ * 1 Video not present. (sync is not detected in number of consecutive line
+ * periods specified by MISSCNT register)
+ * 0 Video detected.
+ */
+#define TW5864_INDIR_VIN_0_VDLOSS BIT(7)
+/*
+ * 1 Horizontal sync PLL is locked to the incoming video source.
+ * 0 Horizontal sync PLL is not locked.
+ */
+#define TW5864_INDIR_VIN_0_HLOCK BIT(6)
+/*
+ * 1 Sub-carrier PLL is locked to the incoming video source.
+ * 0 Sub-carrier PLL is not locked.
+ */
+#define TW5864_INDIR_VIN_0_SLOCK BIT(5)
+/*
+ * 1 Even field is being decoded.
+ * 0 Odd field is being decoded.
+ */
+#define TW5864_INDIR_VIN_0_FLD BIT(4)
+/*
+ * 1 Vertical logic is locked to the incoming video source.
+ * 0 Vertical logic is not locked.
+ */
+#define TW5864_INDIR_VIN_0_VLOCK BIT(3)
+/*
+ * 1 No color burst signal detected.
+ * 0 Color burst signal detected.
+ */
+#define TW5864_INDIR_VIN_0_MONO BIT(1)
+/*
+ * 0 60Hz source detected
+ * 1 50Hz source detected
+ * The actual vertical scanning frequency depends on the current standard
+ * invoked.
+ */
+#define TW5864_INDIR_VIN_0_DET50 BIT(0)
+
+#define TW5864_INDIR_VIN_1(channel) (0x001 + channel * 0x010)
+/* VCR signal indicator. Read-only. */
+#define TW5864_INDIR_VIN_1_VCR BIT(7)
+/* Weak signal indicator 2. Read-only. */
+#define TW5864_INDIR_VIN_1_WKAIR BIT(6)
+/* Weak signal indicator controlled by WKTH. Read-only. */
+#define TW5864_INDIR_VIN_1_WKAIR1 BIT(5)
+/*
+ * 1 = Standard signal
+ * 0 = Non-standard signal
+ * Read-only
+ */
+#define TW5864_INDIR_VIN_1_VSTD BIT(4)
+/*
+ * 1 = Non-interlaced signal
+ * 0 = interlaced signal
+ * Read-only
+ */
+#define TW5864_INDIR_VIN_1_NINTL BIT(3)
+/*
+ * Vertical Sharpness Control. Writable.
+ * 0 = None (default)
+ * 7 = Highest
+ * **Note: VSHP must be set to ‘0’ if COMB = 0
+ */
+#define TW5864_INDIR_VIN_1_VSHP 0x07
+
+/* HDELAY_XY[7:0] */
+#define TW5864_INDIR_VIN_2_HDELAY_XY_LO(channel) (0x002 + channel * 0x010)
+/* HACTIVE_XY[7:0] */
+#define TW5864_INDIR_VIN_3_HACTIVE_XY_LO(channel) (0x003 + channel * 0x010)
+/* VDELAY_XY[7:0] */
+#define TW5864_INDIR_VIN_4_VDELAY_XY_LO(channel) (0x004 + channel * 0x010)
+/* VACTIVE_XY[7:0] */
+#define TW5864_INDIR_VIN_5_VACTIVE_XY_LO(channel) (0x005 + channel * 0x010)
+
+#define TW5864_INDIR_VIN_6(channel) (0x006 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_6 */
+#define TW5864_INDIR_VIN_6_HDELAY_XY_HI 0x03
+#define TW5864_INDIR_VIN_6_HACTIVE_XY_HI_SHIFT 2
+#define TW5864_INDIR_VIN_6_HACTIVE_XY_HI (0x03 << 2)
+#define TW5864_INDIR_VIN_6_VDELAY_XY_HI BIT(4)
+#define TW5864_INDIR_VIN_6_VACTIVE_XY_HI BIT(5)
+
+/*
+ * HDELAY_XY This 10bit register defines the starting location of horizontal
+ * active pixel for display / record path. A unit is 1 pixel. The default value
+ * is 0x00f for NTSC and 0x00a for PAL.
+ *
+ * HACTIVE_XY This 10bit register defines the number of horizontal active pixel
+ * for display / record path. A unit is 1 pixel. The default value is decimal
+ * 720.
+ *
+ * VDELAY_XY This 9bit register defines the starting location of vertical
+ * active for display / record path. A unit is 1 line. The default value is
+ * decimal 6.
+ *
+ * VACTIVE_XY This 9bit register defines the number of vertical active lines
+ * for display / record path. A unit is 1 line. The default value is decimal
+ * 240.
+ */
+
+/* HUE These bits control the color hue as 2's complement number. They have
+ * value from +36o (7Fh) to -36o (80h) with an increment of 2.8o. The 2 LSB has
+ * no effect. The positive value gives greenish tone and negative value gives
+ * purplish tone. The default value is 0o (00h). This is effective only on NTSC
+ * system. The default is 00h.
+ */
+#define TW5864_INDIR_VIN_7_HUE(channel) (0x007 + channel * 0x010)
+
+#define TW5864_INDIR_VIN_8(channel) (0x008 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_8 */
+/*
+ * This bit controls the center frequency of the peaking filter.
+ * The corresponding gain adjustment is HFLT.
+ * 0 Low
+ * 1 center
+ */
+#define TW5864_INDIR_VIN_8_SCURVE BIT(7)
+/* CTI level selection. The default is 1.
+ * 0 None
+ * 3 Highest
+ */
+#define TW5864_INDIR_VIN_8_CTI_SHIFT 4
+#define TW5864_INDIR_VIN_8_CTI (0x03 << 4)
+
+/*
+ * These bits control the amount of sharpness enhancement on the luminance
+ * signals. There are 16 levels of control with "0" having no effect on the
+ * output image. 1 through 15 provides sharpness enhancement with "F" being the
+ * strongest. The default is 1.
+ */
+#define TW5864_INDIR_VIN_8_SHARPNESS 0x0f
+
+/*
+ * These bits control the luminance contrast gain. A value of 100 (64h) has a
+ * gain of 1. The range adjustment is from 0% to 255% at 1% per step. The
+ * default is 64h.
+ */
+#define TW5864_INDIR_VIN_9_CNTRST(channel) (0x009 + channel * 0x010)
+
+/*
+ * These bits control the brightness. They have value of –128 to 127 in 2's
+ * complement form. Positive value increases brightness. A value 0 has no
+ * effect on the data. The default is 00h.
+ */
+#define TW5864_INDIR_VIN_A_BRIGHT(channel) (0x00a + channel * 0x010)
+
+/*
+ * These bits control the digital gain adjustment to the U (or Cb) component of
+ * the digital video signal. The color saturation can be adjusted by adjusting
+ * the U and V color gain components by the same amount in the normal
+ * situation. The U and V can also be adjusted independently to provide greater
+ * flexibility. The range of adjustment is 0 to 200%. A value of 128 (80h) has
+ * gain of 100%. The default is 80h.
+ */
+#define TW5864_INDIR_VIN_B_SAT_U(channel) (0x00b + channel * 0x010)
+
+/*
+ * These bits control the digital gain adjustment to the V (or Cr) component of
+ * the digital video signal. The color saturation can be adjusted by adjusting
+ * the U and V color gain components by the same amount in the normal
+ * situation. The U and V can also be adjusted independently to provide greater
+ * flexibility. The range of adjustment is 0 to 200%. A value of 128 (80h) has
+ * gain of 100%. The default is 80h.
+ */
+#define TW5864_INDIR_VIN_C_SAT_V(channel) (0x00c + channel * 0x010)
+
+/* Read-only */
+#define TW5864_INDIR_VIN_D(channel) (0x00d + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_D */
+/* Macrovision color stripe detection may be un-reliable */
+#define TW5864_INDIR_VIN_D_CSBAD BIT(3)
+/* Macrovision AGC pulse detected */
+#define TW5864_INDIR_VIN_D_MCVSN BIT(2)
+/* Macrovision color stripe protection burst detected */
+#define TW5864_INDIR_VIN_D_CSTRIPE BIT(1)
+/*
+ * This bit is valid only when color stripe protection is detected, i.e. if
+ * CSTRIPE=1,
+ * 1 Type 2 color stripe protection
+ * 0 Type 3 color stripe protection
+ */
+#define TW5864_INDIR_VIN_D_CTYPE2 BIT(0)
+
+/* Read-only */
+#define TW5864_INDIR_VIN_E(channel) (0x00e + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_E */
+/*
+ * Read-only.
+ * 0 Idle
+ * 1 Detection in progress
+ */
+#define TW5864_INDIR_VIN_E_DETSTUS BIT(7)
+/*
+ * STDNOW Current standard invoked
+ * 0 NTSC (M)
+ * 1 PAL (B, D, G, H, I)
+ * 2 SECAM
+ * 3 NTSC4.43
+ * 4 PAL (M)
+ * 5 PAL (CN)
+ * 6 PAL 60
+ * 7 Not valid
+ */
+#define TW5864_INDIR_VIN_E_STDNOW_SHIFT 4
+#define TW5864_INDIR_VIN_E_STDNOW (0x07 << 4)
+
+/*
+ * 1 Disable the shadow registers
+ * 0 Enable VACTIVE and HDELAY shadow registers value depending on STANDARD.
+ * (Default)
+ */
+#define TW5864_INDIR_VIN_E_ATREG BIT(3)
+/*
+ * STANDARD Standard selection
+ * 0 NTSC (M)
+ * 1 PAL (B, D, G, H, I)
+ * 2 SECAM
+ * 3 NTSC4.43
+ * 4 PAL (M)
+ * 5 PAL (CN)
+ * 6 PAL 60
+ * 7 Auto detection (Default)
+ */
+#define TW5864_INDIR_VIN_E_STANDARD 0x07
+
+#define TW5864_INDIR_VIN_F(channel) (0x00f + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_F */
+/*
+ * 1 Writing 1 to this bit will manually initiate the auto format detection
+ * process. This bit is a self-clearing bit
+ * 0 Manual initiation of auto format detection is done. (Default)
+ */
+#define TW5864_INDIR_VIN_F_ATSTART BIT(7)
+/* Enable recognition of PAL60 (Default) */
+#define TW5864_INDIR_VIN_F_PAL60EN BIT(6)
+/* Enable recognition of PAL (CN). (Default) */
+#define TW5864_INDIR_VIN_F_PALCNEN BIT(5)
+/* Enable recognition of PAL (M). (Default) */
+#define TW5864_INDIR_VIN_F_PALMEN BIT(4)
+/* Enable recognition of NTSC 4.43. (Default) */
+#define TW5864_INDIR_VIN_F_NTSC44EN BIT(3)
+/* Enable recognition of SECAM. (Default) */
+#define TW5864_INDIR_VIN_F_SECAMEN BIT(2)
+/* Enable recognition of PAL (B, D, G, H, I). (Default) */
+#define TW5864_INDIR_VIN_F_PALBEN BIT(1)
+/* Enable recognition of NTSC (M). (Default) */
+#define TW5864_INDIR_VIN_F_NTSCEN BIT(0)
+
+/* Some registers skipped. */
+
+/* Use falling edge to sample VD1-VD4 from 54 MHz to 108 MHz */
+#define TW5864_INDIR_VD_108_POL 0x041
+#define TW5864_INDIR_VD_108_POL_VD12 BIT(0)
+#define TW5864_INDIR_VD_108_POL_VD34 BIT(1)
+#define TW5864_INDIR_VD_108_POL_BOTH \
+	(TW5864_INDIR_VD_108_POL_VD12 | TW5864_INDIR_VD_108_POL_VD34)
+
+/* Some registers skipped. */
+
+/*
+ * Audio Input ADC gain control
+ * 0 0.25
+ * 1 0.31
+ * 2 0.38
+ * 3 0.44
+ * 4 0.50
+ * 5 0.63
+ * 6 0.75
+ * 7 0.88
+ * 8 1.00 (default)
+ * 9 1.25
+ * 10 1.50
+ * 11 1.75
+ * 12 2.00
+ * 13 2.25
+ * 14 2.50
+ * 15 2.75
+ */
+/* [3:0] channel 0, [7:4] channel 1 */
+#define TW5864_INDIR_AIGAIN1 0x060
+/* [3:0] channel 2, [7:4] channel 3 */
+#define TW5864_INDIR_AIGAIN2 0x061
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_AIN_0x06D 0x06d
+/* Define controls in register TW5864_INDIR_AIN_0x06D */
+/*
+ * LAWMD Select u-Law/A-Law/PCM/SB data output format on ADATR and ADATM pin.
+ * 0 PCM output (default)
+ * 1 SB (Signed MSB bit in PCM data is inverted) output
+ * 2 u-Law output
+ * 3 A-Law output
+ */
+#define TW5864_INDIR_AIN_LAWMD_SHIFT 6
+#define TW5864_INDIR_AIN_LAWMD (0x03 << 6)
+/*
+ * Disable the mixing ratio value for all audio.
+ * 0 Apply individual mixing ratio value for each audio (default)
+ * 1 Apply nominal value for all audio commonly
+ */
+#define TW5864_INDIR_AIN_MIX_DERATIO BIT(5)
+/*
+ * Enable the mute function for audio channel AINn when n is 0 to 3. It effects
+ * only for mixing. When n = 4, it enable the mute function of the playback
+ * audio input. It effects only for single chip or the last stage chip
+ * 0 Normal
+ * 1 Muted (default)
+ */
+#define TW5864_INDIR_AIN_MIX_MUTE 0x1f
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_AIN_0x0E3 0x0e3
+/* Define controls in register TW5864_INDIR_AIN_0x0E3 */
+/*
+ * ADATP signal is coming from external ADPCM decoder, instead of on-chip ADPCM
+ * decoder
+ */
+#define TW5864_INDIR_AIN_0x0E3_EXT_ADATP BIT(7)
+/* ACLKP output signal polarity inverse */
+#define TW5864_INDIR_AIN_0x0E3_ACLKPPOLO BIT(6)
+/*
+ * ACLKR input signal polarity inverse.
+ * 0 Not inversed (Default)
+ * 1 Inversed
+ */
+#define TW5864_INDIR_AIN_0x0E3_ACLKRPOL BIT(5)
+/*
+ * ACLKP input signal polarity inverse.
+ * 0 Not inversed (Default)
+ * 1 Inversed
+ */
+#define TW5864_INDIR_AIN_0x0E3_ACLKPPOLI BIT(4)
+/*
+ * ACKI [21:0] control automatic set up with AFMD registers
+ * This mode is only effective when ACLKRMASTER=1
+ * 0 ACKI [21:0] registers set up ACKI control
+ * 1 ACKI control is automatically set up by AFMD register values
+ */
+#define TW5864_INDIR_AIN_0x0E3_AFAUTO BIT(3)
+/*
+ * AFAUTO control mode
+ * 0 8kHz setting (Default)
+ * 1 16kHz setting
+ * 2 32kHz setting
+ * 3 44.1kHz setting
+ * 4 48kHz setting
+ */
+#define TW5864_INDIR_AIN_0x0E3_AFMD 0x07
+
+#define TW5864_INDIR_AIN_0x0E4 0x0e4
+/* Define controls in register TW5864_INDIR_AIN_0x0ED */
+/*
+ * 8bit I2S Record output mode.
+ * 0 L/R half length separated output (Default).
+ * 1 One continuous packed output equal to DSP output format.
+ */
+#define TW5864_INDIR_AIN_0x0E4_I2S8MODE BIT(7)
+/*
+ * Audio Clock Master ACLKR output wave format.
+ * 0 High periods is one 27MHz clock period (default).
+ * 1 Almost duty 50-50% clock output on ACLKR pin. If this mode is selected, two
+ * times bigger number value need to be set up on the ACKI register. If
+ * AFAUTO=1, ACKI control is automatically set up even if MASCKMD=1.
+ */
+#define TW5864_INDIR_AIN_0x0E4_MASCKMD BIT(6)
+/* Playback ACLKP/ASYNP/ADATP input data MSB-LSB swapping */
+#define TW5864_INDIR_AIN_0x0E4_PBINSWAP BIT(5)
+/*
+ * ASYNR input signal delay.
+ * 0 No delay
+ * 1 Add one 27MHz period delay in ASYNR signal input
+ */
+#define TW5864_INDIR_AIN_0x0E4_ASYNRDLY BIT(4)
+/*
+ * ASYNP input signal delay.
+ * 0 no delay
+ * 1 add one 27MHz period delay in ASYNP signal input
+ */
+#define TW5864_INDIR_AIN_0x0E4_ASYNPDLY BIT(3)
+/*
+ * ADATP input data delay by one ACLKP clock.
+ * 0 No delay (Default). This is for I2S type 1T delay input interface.
+ * 1 Add 1 ACLKP clock delay in ADATP input data. This is for left-justified
+ * type 0T delay input interface.
+ */
+#define TW5864_INDIR_AIN_0x0E4_ADATPDLY BIT(2)
+/*
+ * Select u-Law/A-Law/PCM/SB data input format on ADATP pin.
+ * 0 PCM input (Default)
+ * 1 SB (Signed MSB bit in PCM data is inverted) input
+ * 2 u-Law input
+ * 3 A-Law input
+ */
+#define TW5864_INDIR_AIN_0x0E4_INLAWMD 0x03
+
+/*
+ * Enable state register updating and interrupt request of audio AIN5 detection
+ * for each input
+ */
+#define TW5864_INDIR_AIN_A5DETENA 0x0e5
+
+/* Some registers skipped */
+
+/*
+ * [7:3]: DEV_ID The TW5864 product ID code is 01000
+ * [2:0]: REV_ID The revision number is 0h
+ */
+#define TW5864_INDIR_ID 0x0fe
+
+#define TW5864_INDIR_IN_PIC_WIDTH(channel) (0x200 + 4 * channel)
+#define TW5864_INDIR_IN_PIC_HEIGHT(channel) (0x201 + 4 * channel)
+#define TW5864_INDIR_OUT_PIC_WIDTH(channel) (0x202 + 4 * channel)
+#define TW5864_INDIR_OUT_PIC_HEIGHT(channel) (0x203 + 4 * channel)
+/*
+ * Interrupt status register from the front-end. Write "1" to each bit to clear
+ * the interrupt
+ * 15:0 Motion detection interrupt for channel 0 ~ 15
+ * 31:16 Night detection interrupt for channel 0 ~ 15
+ * 47:32 Blind detection interrupt for channel 0 ~ 15
+ * 63:48 No video interrupt for channel 0 ~ 15
+ * 79:64 Line mode underflow interrupt for channel 0 ~ 15
+ * 95:80 Line mode overflow interrupt for channel 0 ~ 15
+ */
+/* 0x2d0~0x2d7: [63:0] bits */
+#define TW5864_INDIR_INTERRUPT1 0x2d0
+/* 0x2e0~0x2e3: [95:64] bits */
+#define TW5864_INDIR_INTERRUPT2 0x2e0
+
+/*
+ * Interrupt mask register for interrupts in 0x2d0 ~ 0x2d7
+ * 15:0 Motion detection interrupt for channel 0 ~ 15
+ * 31:16 Night detection interrupt for channel 0 ~ 15
+ * 47:32 Blind detection interrupt for channel 0 ~ 15
+ * 63:48 No video interrupt for channel 0 ~ 15
+ * 79:64 Line mode underflow interrupt for channel 0 ~ 15
+ * 95:80 Line mode overflow interrupt for channel 0 ~ 15
+ */
+/* 0x2d8~0x2df: [63:0] bits */
+#define TW5864_INDIR_INTERRUPT_MASK1 0x2d8
+/* 0x2e8~0x2eb: [95:64] bits */
+#define TW5864_INDIR_INTERRUPT_MASK2 0x2e8
+
+/* [11:0]: Interrupt summary register for interrupts & interrupt mask from in
+ * 0x2d0 ~ 0x2d7 and 0x2d8 ~ 0x2df
+ * bit 0: interrupt occurs in 0x2d0 & 0x2d8
+ * bit 1: interrupt occurs in 0x2d1 & 0x2d9
+ * bit 2: interrupt occurs in 0x2d2 & 0x2da
+ * bit 3: interrupt occurs in 0x2d3 & 0x2db
+ * bit 4: interrupt occurs in 0x2d4 & 0x2dc
+ * bit 5: interrupt occurs in 0x2d5 & 0x2dd
+ * bit 6: interrupt occurs in 0x2d6 & 0x2de
+ * bit 7: interrupt occurs in 0x2d7 & 0x2df
+ * bit 8: interrupt occurs in 0x2e0 & 0x2e8
+ * bit 9: interrupt occurs in 0x2e1 & 0x2e9
+ * bit 10: interrupt occurs in 0x2e2 & 0x2ea
+ * bit 11: interrupt occurs in 0x2e3 & 0x2eb
+ */
+#define TW5864_INDIR_INTERRUPT_SUMMARY 0x2f0
+
+/* Motion / Blind / Night Detection */
+/* valid value for channel is [0:15] */
+#define TW5864_INDIR_DETECTION_CTL0(channel) (0x300 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL0 */
+/*
+ * Disable the motion and blind detection.
+ * 0 Enable motion and blind detection (default)
+ * 1 Disable motion and blind detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_DIS BIT(5)
+/*
+ * Request to start motion detection on manual trigger mode
+ * 0 None Operation (default)
+ * 1 Request to start motion detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_STRB BIT(3)
+/*
+ * Select the trigger mode of motion detection
+ * 0 Automatic trigger mode of motion detection (default)
+ * 1 Manual trigger mode for motion detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_STRB_EN BIT(2)
+/*
+ * Define the threshold of cell for blind detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 3 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL0_BD_CELSENS 0x03
+
+#define TW5864_INDIR_DETECTION_CTL1(channel) (0x301 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL1 */
+/*
+ * Control the temporal sensitivity of motion detector.
+ * 0 More Sensitive (default)
+ * : :
+ * 15 Less Sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL1_MD_TMPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL1_MD_TMPSENS (0x0f << 4)
+/*
+ * Adjust the horizontal starting position for motion detection
+ * 0 0 pixel (default)
+ * : :
+ * 15 15 pixels
+ */
+#define TW5864_INDIR_DETECTION_CTL1_MD_PIXEL_OS 0x0f
+
+#define TW5864_INDIR_DETECTION_CTL2(channel) (0x302 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL2 */
+/*
+ * Control the updating time of reference field for motion detection.
+ * 0 Update reference field every field (default)
+ * 1 Update reference field according to MD_SPEED
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_REFFLD BIT(7)
+/*
+ * Select the field for motion detection.
+ * 0 Detecting motion for only odd field (default)
+ * 1 Detecting motion for only even field
+ * 2 Detecting motion for any field
+ * 3 Detecting motion for both odd and even field
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_FIELD_SHIFT 5
+#define TW5864_INDIR_DETECTION_CTL2_MD_FIELD (0x03 << 5)
+/*
+ * Control the level sensitivity of motion detector.
+ * 0 More sensitive (default)
+ * : :
+ * 15 Less sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_LVSENS 0x1f
+
+#define TW5864_INDIR_DETECTION_CTL3(channel) (0x303 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL3 */
+/*
+ * Define the threshold of sub-cell number for motion detection.
+ * 0 Motion is detected if 1 sub-cell has motion (More sensitive) (default)
+ * 1 Motion is detected if 2 sub-cells have motion
+ * 2 Motion is detected if 3 sub-cells have motion
+ * 3 Motion is detected if 4 sub-cells have motion (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL3_MD_CELSENS_SHIFT 6
+#define TW5864_INDIR_DETECTION_CTL3_MD_CELSENS (0x03 << 6)
+/*
+ * Control the velocity of motion detector.
+ * Large value is suitable for slow motion detection.
+ * In MD_DUAL_EN = 1, MD_SPEED should be limited to 0 ~ 31.
+ * 0 1 field intervals (default)
+ * 1 2 field intervals
+ * : :
+ * 61 62 field intervals
+ * 62 63 field intervals
+ * 63 Not supported
+ */
+#define TW5864_INDIR_DETECTION_CTL3_MD_SPEED 0x3f
+
+#define TW5864_INDIR_DETECTION_CTL4(channel) (0x304 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL4 */
+/*
+ * Control the spatial sensitivity of motion detector.
+ * 0 More Sensitive (default)
+ * : :
+ * 15 Less Sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL4_MD_SPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL4_MD_SPSENS (0x0f << 4)
+/*
+ * Define the threshold of level for blind detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 15 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL4_BD_LVSENS 0x0f
+
+#define TW5864_INDIR_DETECTION_CTL5(channel) (0x305 + channel * 0x08)
+/*
+ * Define the threshold of temporal sensitivity for night detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 15 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL5_ND_TMPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL5_ND_TMPSENS (0x0f << 4)
+/*
+ * Define the threshold of level for night detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 3 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL5_ND_LVSENS 0x0f
+
+/*
+ * [11:0] The base address of the motion detection buffer. This address is in
+ * unit of 64K bytes. The generated DDR address will be {MD_BASE_ADDR,
+ * 16"h0000}. The default value should be 12"h000
+ */
+#define TW5864_INDIR_MD_BASE_ADDR 0x380
+
+/*
+ * This controls the channel of the motion detection result shown in register
+ * 0x3a0 ~ 0x3b7. Before reading back motion result, always set this first.
+ */
+#define TW5864_INDIR_RGR_MOTION_SEL 0x382
+
+/* [15:0] MD strobe has been performed at channel n (read only) */
+#define TW5864_INDIR_MD_STRB 0x386
+/* NO_VIDEO Detected from channel n (read only) */
+#define TW5864_INDIR_NOVID_DET 0x388
+/* Motion Detected from channel n (read only) */
+#define TW5864_INDIR_MD_DET 0x38a
+/* Blind Detected from channel n (read only) */
+#define TW5864_INDIR_BD_DET 0x38c
+/* Night Detected from channel n (read only) */
+#define TW5864_INDIR_ND_DET 0x38e
+
+/* 192 bit motion flag of the channel specified by RGR_MOTION_SEL in 0x382 */
+#define TW5864_INDIR_MOTION_FLAG 0x3a0
+#define TW5864_INDIR_MOTION_FLAG_BYTE_COUNT 24
+
+/*
+ * [9:0] The motion cell count of a specific channel selected by 0x382. This is
+ * for DI purpose
+ */
+#define TW5864_INDIR_MD_DI_CNT 0x3b8
+/* The motion detection cell sensitivity for DI purpose */
+#define TW5864_INDIR_MD_DI_CELLSENS 0x3ba
+/* The motion detection threshold level for DI purpose */
+#define TW5864_INDIR_MD_DI_LVSENS 0x3bb
+
+/* 192 bit motion mask of the channel specified by MASK_CH_SEL in 0x3fe */
+#define TW5864_INDIR_MOTION_MASK 0x3e0
+#define TW5864_INDIR_MOTION_MASK_BYTE_COUNT 24
+
+/* [4:0] The channel selection to access masks in 0x3e0 ~ 0x3f7 */
+#define TW5864_INDIR_MASK_CH_SEL 0x3fe
+
+/* Clock PLL / Analog IP Control */
+/* Some registers skipped */
+
+#define TW5864_INDIR_DDRA_DLL_DQS_SEL0 0xee6
+#define TW5864_INDIR_DDRA_DLL_DQS_SEL1 0xee7
+#define TW5864_INDIR_DDRA_DLL_CLK90_SEL 0xee8
+#define TW5864_INDIR_DDRA_DLL_TEST_SEL_AND_TAP_S 0xee9
+
+#define TW5864_INDIR_DDRB_DLL_DQS_SEL0 0xeeb
+#define TW5864_INDIR_DDRB_DLL_DQS_SEL1 0xeec
+#define TW5864_INDIR_DDRB_DLL_CLK90_SEL 0xeed
+#define TW5864_INDIR_DDRB_DLL_TEST_SEL_AND_TAP_S 0xeee
+
+#define TW5864_INDIR_RESET 0xef0
+#define TW5864_INDIR_RESET_VD BIT(7)
+#define TW5864_INDIR_RESET_DLL BIT(6)
+#define TW5864_INDIR_RESET_MUX_CORE BIT(5)
+
+#define TW5864_INDIR_PV_VD_CK_POL 0xefd
+#define TW5864_INDIR_PV_VD_CK_POL_PV(channel) BIT(channel)
+#define TW5864_INDIR_PV_VD_CK_POL_VD(channel) BIT(channel + 4)
+
+#define TW5864_INDIR_CLK0_SEL 0xefe
+#define TW5864_INDIR_CLK0_SEL_VD_SHIFT 0
+#define TW5864_INDIR_CLK0_SEL_VD_MASK 0x3
+#define TW5864_INDIR_CLK0_SEL_PV_SHIFT 2
+#define TW5864_INDIR_CLK0_SEL_PV_MASK (0x3 << 2)
+#define TW5864_INDIR_CLK0_SEL_PV2_SHIFT 4
+#define TW5864_INDIR_CLK0_SEL_PV2_MASK (0x3 << 4)
diff --git a/drivers/media/pci/tw5864/tw5864-util.c b/drivers/media/pci/tw5864/tw5864-util.c
new file mode 100644
index 0000000..771eef2
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-util.c
@@ -0,0 +1,37 @@
+#include "tw5864.h"
+
+void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data)
+{
+	int retries = 30000;
+
+	while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+		;
+	if (!retries)
+		dev_err(&dev->pci->dev,
+			"tw_indir_writel() retries exhausted before writing\n");
+
+	tw_writel(TW5864_IND_DATA, data);
+	tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_RW | TW5864_ENABLE);
+}
+
+u8 tw5864_indir_readb(struct tw5864_dev *dev, u16 addr)
+{
+	int retries = 30000;
+
+	while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+		;
+	if (!retries)
+		dev_err(&dev->pci->dev,
+			"tw_indir_readl() retries exhausted before reading\n");
+
+	tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_ENABLE);
+
+	retries = 30000;
+	while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+		;
+	if (!retries)
+		dev_err(&dev->pci->dev,
+			"tw_indir_readl() retries exhausted at reading\n");
+
+	return tw_readl(TW5864_IND_DATA);
+}
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
new file mode 100644
index 0000000..652a059
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -0,0 +1,1510 @@
+/*
+ *  TW5864 driver - video encoding functions
+ *
+ *  Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "tw5864.h"
+#include "tw5864-reg.h"
+
+#define QUANTIZATION_TABLE_LEN 96
+#define VLC_LOOKUP_TABLE_LEN 1024
+
+static const u16 forward_quantization_table[QUANTIZATION_TABLE_LEN] = {
+	0x3333, 0x1f82, 0x3333, 0x1f82, 0x1f82, 0x147b, 0x1f82, 0x147b,
+	0x3333, 0x1f82, 0x3333, 0x1f82, 0x1f82, 0x147b, 0x1f82, 0x147b,
+	0x2e8c, 0x1d42, 0x2e8c, 0x1d42, 0x1d42, 0x1234, 0x1d42, 0x1234,
+	0x2e8c, 0x1d42, 0x2e8c, 0x1d42, 0x1d42, 0x1234, 0x1d42, 0x1234,
+	0x2762, 0x199a, 0x2762, 0x199a, 0x199a, 0x1062, 0x199a, 0x1062,
+	0x2762, 0x199a, 0x2762, 0x199a, 0x199a, 0x1062, 0x199a, 0x1062,
+	0x2492, 0x16c1, 0x2492, 0x16c1, 0x16c1, 0x0e3f, 0x16c1, 0x0e3f,
+	0x2492, 0x16c1, 0x2492, 0x16c1, 0x16c1, 0x0e3f, 0x16c1, 0x0e3f,
+	0x2000, 0x147b, 0x2000, 0x147b, 0x147b, 0x0d1b, 0x147b, 0x0d1b,
+	0x2000, 0x147b, 0x2000, 0x147b, 0x147b, 0x0d1b, 0x147b, 0x0d1b,
+	0x1c72, 0x11cf, 0x1c72, 0x11cf, 0x11cf, 0x0b4d, 0x11cf, 0x0b4d,
+	0x1c72, 0x11cf, 0x1c72, 0x11cf, 0x11cf, 0x0b4d, 0x11cf, 0x0b4d
+};
+
+static const u16 inverse_quantization_table[QUANTIZATION_TABLE_LEN] = {
+	0x800a, 0x800d, 0x800a, 0x800d, 0x800d, 0x8010, 0x800d, 0x8010,
+	0x800a, 0x800d, 0x800a, 0x800d, 0x800d, 0x8010, 0x800d, 0x8010,
+	0x800b, 0x800e, 0x800b, 0x800e, 0x800e, 0x8012, 0x800e, 0x8012,
+	0x800b, 0x800e, 0x800b, 0x800e, 0x800e, 0x8012, 0x800e, 0x8012,
+	0x800d, 0x8010, 0x800d, 0x8010, 0x8010, 0x8014, 0x8010, 0x8014,
+	0x800d, 0x8010, 0x800d, 0x8010, 0x8010, 0x8014, 0x8010, 0x8014,
+	0x800e, 0x8012, 0x800e, 0x8012, 0x8012, 0x8017, 0x8012, 0x8017,
+	0x800e, 0x8012, 0x800e, 0x8012, 0x8012, 0x8017, 0x8012, 0x8017,
+	0x8010, 0x8014, 0x8010, 0x8014, 0x8014, 0x8019, 0x8014, 0x8019,
+	0x8010, 0x8014, 0x8010, 0x8014, 0x8014, 0x8019, 0x8014, 0x8019,
+	0x8012, 0x8017, 0x8012, 0x8017, 0x8017, 0x801d, 0x8017, 0x801d,
+	0x8012, 0x8017, 0x8012, 0x8017, 0x8017, 0x801d, 0x8017, 0x801d
+};
+
+static const u16 encoder_vlc_lookup_table[VLC_LOOKUP_TABLE_LEN] = {
+	0x011, 0x000, 0x000, 0x000, 0x065, 0x021, 0x000, 0x000, 0x087, 0x064,
+	0x031, 0x000, 0x097, 0x086, 0x075, 0x053, 0x0a7, 0x096, 0x085, 0x063,
+	0x0b7, 0x0a6, 0x095, 0x074, 0x0df, 0x0b6, 0x0a5, 0x084, 0x0db, 0x0de,
+	0x0b5, 0x094, 0x0d8, 0x0da, 0x0dd, 0x0a4, 0x0ef, 0x0ee, 0x0d9, 0x0b4,
+	0x0eb, 0x0ea, 0x0ed, 0x0dc, 0x0ff, 0x0fe, 0x0e9, 0x0ec, 0x0fb, 0x0fa,
+	0x0fd, 0x0e8, 0x10f, 0x0f1, 0x0f9, 0x0fc, 0x10b, 0x10e, 0x10d, 0x0f8,
+	0x107, 0x10a, 0x109, 0x10c, 0x104, 0x106, 0x105, 0x108, 0x023, 0x000,
+	0x000, 0x000, 0x06b, 0x022, 0x000, 0x000, 0x067, 0x057, 0x033, 0x000,
+	0x077, 0x06a, 0x069, 0x045, 0x087, 0x066, 0x065, 0x044, 0x084, 0x076,
+	0x075, 0x056, 0x097, 0x086, 0x085, 0x068, 0x0bf, 0x096, 0x095, 0x064,
+	0x0bb, 0x0be, 0x0bd, 0x074, 0x0cf, 0x0ba, 0x0b9, 0x094, 0x0cb, 0x0ce,
+	0x0cd, 0x0bc, 0x0c8, 0x0ca, 0x0c9, 0x0b8, 0x0df, 0x0de, 0x0dd, 0x0cc,
+	0x0db, 0x0da, 0x0d9, 0x0dc, 0x0d7, 0x0eb, 0x0d6, 0x0d8, 0x0e9, 0x0e8,
+	0x0ea, 0x0d1, 0x0e7, 0x0e6, 0x0e5, 0x0e4, 0x04f, 0x000, 0x000, 0x000,
+	0x06f, 0x04e, 0x000, 0x000, 0x06b, 0x05f, 0x04d, 0x000, 0x068, 0x05c,
+	0x05e, 0x04c, 0x07f, 0x05a, 0x05b, 0x04b, 0x07b, 0x058, 0x059, 0x04a,
+	0x079, 0x06e, 0x06d, 0x049, 0x078, 0x06a, 0x069, 0x048, 0x08f, 0x07e,
+	0x07d, 0x05d, 0x08b, 0x08e, 0x07a, 0x06c, 0x09f, 0x08a, 0x08d, 0x07c,
+	0x09b, 0x09e, 0x089, 0x08c, 0x098, 0x09a, 0x09d, 0x088, 0x0ad, 0x097,
+	0x099, 0x09c, 0x0a9, 0x0ac, 0x0ab, 0x0aa, 0x0a5, 0x0a8, 0x0a7, 0x0a6,
+	0x0a1, 0x0a4, 0x0a3, 0x0a2, 0x021, 0x000, 0x000, 0x000, 0x067, 0x011,
+	0x000, 0x000, 0x064, 0x066, 0x031, 0x000, 0x063, 0x073, 0x072, 0x065,
+	0x062, 0x083, 0x082, 0x070, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x011, 0x010,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x011, 0x021, 0x020, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x023, 0x022, 0x021, 0x020, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x023, 0x022, 0x021, 0x031,
+	0x030, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x023, 0x022, 0x033, 0x032, 0x031, 0x030, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x023, 0x030,
+	0x031, 0x033, 0x032, 0x035, 0x034, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x037, 0x036, 0x035, 0x034, 0x033, 0x032,
+	0x031, 0x041, 0x051, 0x061, 0x071, 0x081, 0x091, 0x0a1, 0x0b1, 0x000,
+	0x002, 0x000, 0x0e4, 0x011, 0x0f4, 0x002, 0x024, 0x003, 0x005, 0x012,
+	0x034, 0x013, 0x065, 0x024, 0x013, 0x063, 0x015, 0x022, 0x075, 0x034,
+	0x044, 0x023, 0x023, 0x073, 0x054, 0x033, 0x033, 0x004, 0x043, 0x014,
+	0x011, 0x043, 0x014, 0x001, 0x025, 0x015, 0x035, 0x025, 0x064, 0x055,
+	0x045, 0x035, 0x074, 0x065, 0x085, 0x0d5, 0x012, 0x095, 0x055, 0x045,
+	0x095, 0x0e5, 0x084, 0x075, 0x022, 0x0a5, 0x094, 0x085, 0x032, 0x0b5,
+	0x003, 0x0c5, 0x001, 0x044, 0x0a5, 0x032, 0x0b5, 0x094, 0x0c5, 0x0a4,
+	0x0a4, 0x054, 0x0d5, 0x0b4, 0x0b4, 0x064, 0x0f5, 0x0f5, 0x053, 0x0d4,
+	0x0e5, 0x0c4, 0x105, 0x105, 0x0c4, 0x074, 0x063, 0x0e4, 0x0d4, 0x084,
+	0x073, 0x0f4, 0x004, 0x005, 0x000, 0x053, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x011, 0x021, 0x031, 0x030, 0x011, 0x021, 0x020, 0x000,
+	0x011, 0x010, 0x000, 0x000, 0x011, 0x033, 0x032, 0x043, 0x042, 0x053,
+	0x052, 0x063, 0x062, 0x073, 0x072, 0x083, 0x082, 0x093, 0x092, 0x091,
+	0x037, 0x036, 0x035, 0x034, 0x033, 0x045, 0x044, 0x043, 0x042, 0x053,
+	0x052, 0x063, 0x062, 0x061, 0x060, 0x000, 0x045, 0x037, 0x036, 0x035,
+	0x044, 0x043, 0x034, 0x033, 0x042, 0x053, 0x052, 0x061, 0x051, 0x060,
+	0x000, 0x000, 0x053, 0x037, 0x045, 0x044, 0x036, 0x035, 0x034, 0x043,
+	0x033, 0x042, 0x052, 0x051, 0x050, 0x000, 0x000, 0x000, 0x045, 0x044,
+	0x043, 0x037, 0x036, 0x035, 0x034, 0x033, 0x042, 0x051, 0x041, 0x050,
+	0x000, 0x000, 0x000, 0x000, 0x061, 0x051, 0x037, 0x036, 0x035, 0x034,
+	0x033, 0x032, 0x041, 0x031, 0x060, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x061, 0x051, 0x035, 0x034, 0x033, 0x023, 0x032, 0x041, 0x031, 0x060,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x061, 0x041, 0x051, 0x033,
+	0x023, 0x022, 0x032, 0x031, 0x060, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x061, 0x060, 0x041, 0x023, 0x022, 0x031, 0x021, 0x051,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x051, 0x050,
+	0x031, 0x023, 0x022, 0x021, 0x041, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x040, 0x041, 0x031, 0x032, 0x011, 0x033,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x040, 0x041, 0x021, 0x011, 0x031, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x030, 0x031, 0x011, 0x021,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x020, 0x021, 0x011, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x010, 0x011,
+	0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+	0x000, 0x000, 0x000, 0x000
+};
+
+static const unsigned int lambda_lookup_table[] = {
+	0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020,
+	0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020,
+	0x0040, 0x0040, 0x0040, 0x0040, 0x0060, 0x0060, 0x0060, 0x0080,
+	0x0080, 0x0080, 0x00a0, 0x00c0, 0x00c0, 0x00e0, 0x0100, 0x0120,
+	0x0140, 0x0160, 0x01a0, 0x01c0, 0x0200, 0x0240, 0x0280, 0x02e0,
+	0x0320, 0x03a0, 0x0400, 0x0480, 0x0500, 0x05a0, 0x0660, 0x0720,
+	0x0800, 0x0900, 0x0a20, 0x0b60
+};
+
+static const unsigned int intra4x4_lambda3[] = {
+	1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,
+	2, 2, 2, 2, 3, 3, 3, 4,
+	4, 4, 5, 6, 6, 7, 8, 9,
+	10, 11, 13, 14, 16, 18, 20, 23,
+	25, 29, 32, 36, 40, 45, 51, 57,
+	64, 72, 81, 91
+};
+
+static v4l2_std_id tw5864_get_v4l2_std(enum tw5864_vid_std std);
+static enum tw5864_vid_std tw5864_from_v4l2_std(v4l2_std_id v4l2_std);
+
+static void tw5864_handle_frame_task(unsigned long data);
+static void tw5864_handle_frame(struct tw5864_h264_frame *frame);
+static void tw5864_frame_interval_set(struct tw5864_input *input);
+
+static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+			      unsigned int *num_planes, unsigned int sizes[],
+			      struct device *alloc_ctxs[])
+{
+	if (*num_planes)
+		return sizes[0] < H264_VLC_BUF_SIZE ? -EINVAL : 0;
+
+	sizes[0] = H264_VLC_BUF_SIZE;
+	*num_planes = 1;
+
+	return 0;
+}
+
+static void tw5864_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct vb2_queue *vq = vb->vb2_queue;
+	struct tw5864_input *dev = vb2_get_drv_priv(vq);
+	struct tw5864_buf *buf = container_of(vbuf, struct tw5864_buf, vb);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->slock, flags);
+	list_add_tail(&buf->list, &dev->active);
+	spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static int tw5864_input_std_get(struct tw5864_input *input,
+				enum tw5864_vid_std *std)
+{
+	struct tw5864_dev *dev = input->root;
+	u8 std_reg = tw_indir_readb(TW5864_INDIR_VIN_E(input->nr));
+
+	*std = (std_reg & 0x70) >> 4;
+
+	if (std_reg & 0x80) {
+		dev_dbg(&dev->pci->dev,
+			"Video format detection is in progress, please wait\n");
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int tw5864_enable_input(struct tw5864_input *input)
+{
+	struct tw5864_dev *dev = input->root;
+	int nr = input->nr;
+	unsigned long flags;
+	int d1_width = 720;
+	int d1_height;
+	int frame_width_bus_value = 0;
+	int frame_height_bus_value = 0;
+	int reg_frame_bus = 0x1c;
+	int fmt_reg_value = 0;
+	int downscale_enabled = 0;
+
+	dev_dbg(&dev->pci->dev, "Enabling channel %d\n", nr);
+
+	input->frame_seqno = 0;
+	input->frame_gop_seqno = 0;
+	input->h264_idr_pic_id = 0;
+
+	input->reg_dsp_qp = input->qp;
+	input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
+	input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
+	input->reg_emu = TW5864_EMU_EN_LPF | TW5864_EMU_EN_BHOST
+		| TW5864_EMU_EN_SEN | TW5864_EMU_EN_ME | TW5864_EMU_EN_DDR;
+	input->reg_dsp = nr /* channel id */
+		| TW5864_DSP_CHROM_SW
+		| ((0xa << 8) & TW5864_DSP_MB_DELAY)
+		;
+
+	input->resolution = D1;
+
+	d1_height = (input->std == STD_NTSC) ? 480 : 576;
+
+	input->width = d1_width;
+	input->height = d1_height;
+
+	input->reg_interlacing = 0x4;
+
+	switch (input->resolution) {
+	case D1:
+		frame_width_bus_value = 0x2cf;
+		frame_height_bus_value = input->height - 1;
+		reg_frame_bus = 0x1c;
+		fmt_reg_value = 0;
+		downscale_enabled = 0;
+		input->reg_dsp_codec |= TW5864_CIF_MAP_MD | TW5864_HD1_MAP_MD;
+		input->reg_emu |= TW5864_DSP_FRAME_TYPE_D1;
+		input->reg_interlacing = TW5864_DI_EN | TW5864_DSP_INTER_ST;
+
+		tw_setl(TW5864_FULL_HALF_FLAG, 1 << nr);
+		break;
+	case HD1:
+		input->height /= 2;
+		input->width /= 2;
+		frame_width_bus_value = 0x2cf;
+		frame_height_bus_value = input->height * 2 - 1;
+		reg_frame_bus = 0x1c;
+		fmt_reg_value = 0;
+		downscale_enabled = 0;
+		input->reg_dsp_codec |= TW5864_HD1_MAP_MD;
+		input->reg_emu |= TW5864_DSP_FRAME_TYPE_D1;
+
+		tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+
+		break;
+	case CIF:
+		input->height /= 4;
+		input->width /= 2;
+		frame_width_bus_value = 0x15f;
+		frame_height_bus_value = input->height * 2 - 1;
+		reg_frame_bus = 0x07;
+		fmt_reg_value = 1;
+		downscale_enabled = 1;
+		input->reg_dsp_codec |= TW5864_CIF_MAP_MD;
+
+		tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+		break;
+	case QCIF:
+		input->height /= 4;
+		input->width /= 4;
+		frame_width_bus_value = 0x15f;
+		frame_height_bus_value = input->height * 2 - 1;
+		reg_frame_bus = 0x07;
+		fmt_reg_value = 1;
+		downscale_enabled = 1;
+		input->reg_dsp_codec |= TW5864_CIF_MAP_MD;
+
+		tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+		break;
+	}
+
+	/* analog input width / 4 */
+	tw_indir_writeb(TW5864_INDIR_IN_PIC_WIDTH(nr), d1_width / 4);
+	tw_indir_writeb(TW5864_INDIR_IN_PIC_HEIGHT(nr), d1_height / 4);
+
+	/* output width / 4 */
+	tw_indir_writeb(TW5864_INDIR_OUT_PIC_WIDTH(nr), input->width / 4);
+	tw_indir_writeb(TW5864_INDIR_OUT_PIC_HEIGHT(nr), input->height / 4);
+
+	tw_writel(TW5864_DSP_PIC_MAX_MB,
+		  ((input->width / 16) << 8) | (input->height / 16));
+
+	tw_writel(TW5864_FRAME_WIDTH_BUS_A(nr),
+		  frame_width_bus_value);
+	tw_writel(TW5864_FRAME_WIDTH_BUS_B(nr),
+		  frame_width_bus_value);
+	tw_writel(TW5864_FRAME_HEIGHT_BUS_A(nr),
+		  frame_height_bus_value);
+	tw_writel(TW5864_FRAME_HEIGHT_BUS_B(nr),
+		  (frame_height_bus_value + 1) / 2 - 1);
+
+	tw5864_frame_interval_set(input);
+
+	if (downscale_enabled)
+		tw_setl(TW5864_H264EN_CH_DNS, 1 << nr);
+
+	tw_mask_shift_writel(TW5864_H264EN_CH_FMT_REG1, 0x3, 2 * nr,
+			     fmt_reg_value);
+
+	tw_mask_shift_writel((nr < 2
+			      ? TW5864_H264EN_RATE_MAX_LINE_REG1
+			      : TW5864_H264EN_RATE_MAX_LINE_REG2),
+			     0x1f, 5 * (nr % 2),
+			     input->std == STD_NTSC ? 29 : 24);
+
+	tw_mask_shift_writel((nr < 2) ? TW5864_FRAME_BUS1 :
+			     TW5864_FRAME_BUS2, 0xff, (nr % 2) * 8,
+			     reg_frame_bus);
+
+	spin_lock_irqsave(&dev->slock, flags);
+	input->enabled = 1;
+	spin_unlock_irqrestore(&dev->slock, flags);
+
+	return 0;
+}
+
+void tw5864_request_encoded_frame(struct tw5864_input *input)
+{
+	struct tw5864_dev *dev = input->root;
+	u32 enc_buf_id_new;
+
+	tw_setl(TW5864_DSP_CODEC, TW5864_CIF_MAP_MD | TW5864_HD1_MAP_MD);
+	tw_writel(TW5864_EMU, input->reg_emu);
+	tw_writel(TW5864_INTERLACING, input->reg_interlacing);
+	tw_writel(TW5864_DSP, input->reg_dsp);
+
+	tw_writel(TW5864_DSP_QP, input->reg_dsp_qp);
+	tw_writel(TW5864_DSP_REF_MVP_LAMBDA, input->reg_dsp_ref_mvp_lambda);
+	tw_writel(TW5864_DSP_I4x4_WEIGHT, input->reg_dsp_i4x4_weight);
+	tw_mask_shift_writel(TW5864_DSP_INTRA_MODE, TW5864_DSP_INTRA_MODE_MASK,
+			     TW5864_DSP_INTRA_MODE_SHIFT,
+			     TW5864_DSP_INTRA_MODE_16x16);
+
+	if (input->frame_gop_seqno == 0) {
+		/* Produce I-frame */
+		tw_writel(TW5864_MOTION_SEARCH_ETC, TW5864_INTRA_EN);
+		input->h264_idr_pic_id++;
+		input->h264_idr_pic_id &= TW5864_DSP_REF_FRM;
+	} else {
+		/* Produce P-frame */
+		tw_writel(TW5864_MOTION_SEARCH_ETC, TW5864_INTRA_EN |
+			  TW5864_ME_EN | BIT(5) /* SRCH_OPT default */);
+	}
+	tw5864_prepare_frame_headers(input);
+	tw_writel(TW5864_VLC,
+		  TW5864_VLC_PCI_SEL |
+		  ((input->tail_nb_bits + 24) << TW5864_VLC_BIT_ALIGN_SHIFT) |
+		  input->reg_dsp_qp);
+
+	enc_buf_id_new = tw_mask_shift_readl(TW5864_ENC_BUF_PTR_REC1, 0x3,
+					     2 * input->nr);
+	tw_writel(TW5864_DSP_ENC_ORG_PTR_REG,
+		  enc_buf_id_new << TW5864_DSP_ENC_ORG_PTR_SHIFT);
+	tw_writel(TW5864_DSP_ENC_REC,
+		  enc_buf_id_new << 12 | ((enc_buf_id_new + 3) & 3));
+
+	tw_writel(TW5864_SLICE, TW5864_START_NSLICE);
+	tw_writel(TW5864_SLICE, 0);
+}
+
+static int tw5864_disable_input(struct tw5864_input *input)
+{
+	struct tw5864_dev *dev = input->root;
+	unsigned long flags;
+
+	dev_dbg(&dev->pci->dev, "Disabling channel %d\n", input->nr);
+
+	spin_lock_irqsave(&dev->slock, flags);
+	input->enabled = 0;
+	spin_unlock_irqrestore(&dev->slock, flags);
+	return 0;
+}
+
+static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct tw5864_input *input = vb2_get_drv_priv(q);
+	int ret;
+
+	ret = tw5864_enable_input(input);
+	if (!ret)
+		return 0;
+
+	while (!list_empty(&input->active)) {
+		struct tw5864_buf *buf = list_entry(input->active.next,
+						    struct tw5864_buf, list);
+
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+	}
+	return ret;
+}
+
+static void tw5864_stop_streaming(struct vb2_queue *q)
+{
+	unsigned long flags;
+	struct tw5864_input *input = vb2_get_drv_priv(q);
+
+	tw5864_disable_input(input);
+
+	spin_lock_irqsave(&input->slock, flags);
+	if (input->vb) {
+		vb2_buffer_done(&input->vb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+		input->vb = NULL;
+	}
+	while (!list_empty(&input->active)) {
+		struct tw5864_buf *buf = list_entry(input->active.next,
+						    struct tw5864_buf, list);
+
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+	}
+	spin_unlock_irqrestore(&input->slock, flags);
+}
+
+static const struct vb2_ops tw5864_video_qops = {
+	.queue_setup = tw5864_queue_setup,
+	.buf_queue = tw5864_buf_queue,
+	.start_streaming = tw5864_start_streaming,
+	.stop_streaming = tw5864_stop_streaming,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+};
+
+static int tw5864_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct tw5864_input *input =
+		container_of(ctrl->handler, struct tw5864_input, hdl);
+	struct tw5864_dev *dev = input->root;
+	unsigned long flags;
+
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+		tw_indir_writeb(TW5864_INDIR_VIN_A_BRIGHT(input->nr),
+				(u8)ctrl->val);
+		break;
+	case V4L2_CID_HUE:
+		tw_indir_writeb(TW5864_INDIR_VIN_7_HUE(input->nr),
+				(u8)ctrl->val);
+		break;
+	case V4L2_CID_CONTRAST:
+		tw_indir_writeb(TW5864_INDIR_VIN_9_CNTRST(input->nr),
+				(u8)ctrl->val);
+		break;
+	case V4L2_CID_SATURATION:
+		tw_indir_writeb(TW5864_INDIR_VIN_B_SAT_U(input->nr),
+				(u8)ctrl->val);
+		tw_indir_writeb(TW5864_INDIR_VIN_C_SAT_V(input->nr),
+				(u8)ctrl->val);
+		break;
+	case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+		input->gop = ctrl->val;
+		return 0;
+	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+		spin_lock_irqsave(&input->slock, flags);
+		input->qp = ctrl->val;
+		input->reg_dsp_qp = input->qp;
+		input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
+		input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
+		spin_unlock_irqrestore(&input->slock, flags);
+		return 0;
+	case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+		memset(input->md_threshold_grid_values, ctrl->val,
+		       sizeof(input->md_threshold_grid_values));
+		return 0;
+	case V4L2_CID_DETECT_MD_MODE:
+		return 0;
+	case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+		/* input->md_threshold_grid_ctrl->p_new.p_u16 contains data */
+		memcpy(input->md_threshold_grid_values,
+		       input->md_threshold_grid_ctrl->p_new.p_u16,
+		       sizeof(input->md_threshold_grid_values));
+		return 0;
+	}
+	return 0;
+}
+
+static int tw5864_fmt_vid_cap(struct file *file, void *priv,
+			      struct v4l2_format *f)
+{
+	struct tw5864_input *input = video_drvdata(file);
+
+	f->fmt.pix.width = 720;
+	switch (input->std) {
+	default:
+		WARN_ON_ONCE(1);
+	case STD_NTSC:
+		f->fmt.pix.height = 480;
+		break;
+	case STD_PAL:
+	case STD_SECAM:
+		f->fmt.pix.height = 576;
+		break;
+	}
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+	f->fmt.pix.sizeimage = H264_VLC_BUF_SIZE;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	return 0;
+}
+
+static int tw5864_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *i)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct tw5864_dev *dev = input->root;
+
+	u8 indir_0x000 = tw_indir_readb(TW5864_INDIR_VIN_0(input->nr));
+	u8 indir_0x00d = tw_indir_readb(TW5864_INDIR_VIN_D(input->nr));
+	u8 v1 = indir_0x000;
+	u8 v2 = indir_0x00d;
+
+	if (i->index)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_CAMERA;
+	snprintf(i->name, sizeof(i->name), "Encoder %d", input->nr);
+	i->std = TW5864_NORMS;
+	if (v1 & (1 << 7))
+		i->status |= V4L2_IN_ST_NO_SYNC;
+	if (!(v1 & (1 << 6)))
+		i->status |= V4L2_IN_ST_NO_H_LOCK;
+	if (v1 & (1 << 2))
+		i->status |= V4L2_IN_ST_NO_SIGNAL;
+	if (v1 & (1 << 1))
+		i->status |= V4L2_IN_ST_NO_COLOR;
+	if (v2 & (1 << 2))
+		i->status |= V4L2_IN_ST_MACROVISION;
+
+	return 0;
+}
+
+static int tw5864_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+	return 0;
+}
+
+static int tw5864_s_input(struct file *file, void *priv, unsigned int i)
+{
+	if (i)
+		return -EINVAL;
+	return 0;
+}
+
+static int tw5864_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct tw5864_input *input = video_drvdata(file);
+
+	strcpy(cap->driver, "tw5864");
+	snprintf(cap->card, sizeof(cap->card), "TW5864 Encoder %d",
+		 input->nr);
+	sprintf(cap->bus_info, "PCI:%s", pci_name(input->root->pci));
+	return 0;
+}
+
+static int tw5864_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	enum tw5864_vid_std tw_std;
+	int ret;
+
+	ret = tw5864_input_std_get(input, &tw_std);
+	if (ret)
+		return ret;
+	*std = tw5864_get_v4l2_std(tw_std);
+
+	return 0;
+}
+
+static int tw5864_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct tw5864_input *input = video_drvdata(file);
+
+	*std = input->v4l2_std;
+	return 0;
+}
+
+static int tw5864_s_std(struct file *file, void *priv, v4l2_std_id std)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct tw5864_dev *dev = input->root;
+
+	input->v4l2_std = std;
+	input->std = tw5864_from_v4l2_std(std);
+	tw_indir_writeb(TW5864_INDIR_VIN_E(input->nr), input->std);
+	return 0;
+}
+
+static int tw5864_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	if (f->index)
+		return -EINVAL;
+
+	f->pixelformat = V4L2_PIX_FMT_H264;
+
+	return 0;
+}
+
+static int tw5864_subscribe_event(struct v4l2_fh *fh,
+				  const struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	case V4L2_EVENT_MOTION_DET:
+		/*
+		 * Allow for up to 30 events (1 second for NTSC) to be stored.
+		 */
+		return v4l2_event_subscribe(fh, sub, 30, NULL);
+	}
+	return -EINVAL;
+}
+
+static void tw5864_frame_interval_set(struct tw5864_input *input)
+{
+	/*
+	 * This register value seems to follow such approach: In each second
+	 * interval, when processing Nth frame, it checks Nth bit of register
+	 * value and, if the bit is 1, it processes the frame, otherwise the
+	 * frame is discarded.
+	 * So unary representation would work, but more or less equal gaps
+	 * between the frames should be preserved.
+	 *
+	 * For 1 FPS - 0x00000001
+	 * 00000000 00000000 00000000 00000001
+	 *
+	 * For max FPS - set all 25/30 lower bits:
+	 * 00111111 11111111 11111111 11111111 (NTSC)
+	 * 00000001 11111111 11111111 11111111 (PAL)
+	 *
+	 * For half of max FPS - use such pattern:
+	 * 00010101 01010101 01010101 01010101 (NTSC)
+	 * 00000001 01010101 01010101 01010101 (PAL)
+	 *
+	 * Et cetera.
+	 *
+	 * The value supplied to hardware is capped by mask of 25/30 lower bits.
+	 */
+	struct tw5864_dev *dev = input->root;
+	u32 unary_framerate = 0;
+	int shift = 0;
+	int std_max_fps = input->std == STD_NTSC ? 30 : 25;
+
+	for (shift = 0; shift < std_max_fps; shift += input->frame_interval)
+		unary_framerate |= 0x00000001 << shift;
+
+	tw_writel(TW5864_H264EN_RATE_CNTL_LO_WORD(input->nr, 0),
+		  unary_framerate >> 16);
+	tw_writel(TW5864_H264EN_RATE_CNTL_HI_WORD(input->nr, 0),
+		  unary_framerate & 0xffff);
+}
+
+static int tw5864_frameinterval_get(struct tw5864_input *input,
+				    struct v4l2_fract *frameinterval)
+{
+	switch (input->std) {
+	case STD_NTSC:
+		frameinterval->numerator = 1001;
+		frameinterval->denominator = 30000;
+		break;
+	case STD_PAL:
+	case STD_SECAM:
+		frameinterval->numerator = 1;
+		frameinterval->denominator = 25;
+		break;
+	default:
+		WARN(1, "tw5864_frameinterval_get requested for unknown std %d\n",
+		     input->std);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tw5864_enum_framesizes(struct file *file, void *priv,
+				  struct v4l2_frmsizeenum *fsize)
+{
+	struct tw5864_input *input = video_drvdata(file);
+
+	if (fsize->index > 0)
+		return -EINVAL;
+	if (fsize->pixel_format != V4L2_PIX_FMT_H264)
+		return -EINVAL;
+
+	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+	fsize->discrete.width = 720;
+	fsize->discrete.height = input->std == STD_NTSC ? 480 : 576;
+
+	return 0;
+}
+
+static int tw5864_enum_frameintervals(struct file *file, void *priv,
+				      struct v4l2_frmivalenum *fintv)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct v4l2_fract frameinterval;
+	int std_max_fps = input->std == STD_NTSC ? 30 : 25;
+	struct v4l2_frmsizeenum fsize = { .index = fintv->index,
+		.pixel_format = fintv->pixel_format };
+	int ret;
+
+	ret = tw5864_enum_framesizes(file, priv, &fsize);
+	if (ret)
+		return ret;
+
+	if (fintv->width != fsize.discrete.width ||
+	    fintv->height != fsize.discrete.height)
+		return -EINVAL;
+
+	fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+	ret = tw5864_frameinterval_get(input, &frameinterval);
+	fintv->stepwise.step = frameinterval;
+	fintv->stepwise.min = frameinterval;
+	fintv->stepwise.max = frameinterval;
+	fintv->stepwise.max.numerator *= std_max_fps;
+
+	return ret;
+}
+
+static int tw5864_g_parm(struct file *file, void *priv,
+			 struct v4l2_streamparm *sp)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct v4l2_captureparm *cp = &sp->parm.capture;
+	int ret;
+
+	cp->capability = V4L2_CAP_TIMEPERFRAME;
+
+	ret = tw5864_frameinterval_get(input, &cp->timeperframe);
+	cp->timeperframe.numerator *= input->frame_interval;
+	cp->capturemode = 0;
+	cp->readbuffers = 2;
+
+	return ret;
+}
+
+static int tw5864_s_parm(struct file *file, void *priv,
+			 struct v4l2_streamparm *sp)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct v4l2_fract *t = &sp->parm.capture.timeperframe;
+	struct v4l2_fract time_base;
+	int ret;
+
+	ret = tw5864_frameinterval_get(input, &time_base);
+	if (ret)
+		return ret;
+
+	if (!t->numerator || !t->denominator) {
+		t->numerator = time_base.numerator * input->frame_interval;
+		t->denominator = time_base.denominator;
+	} else if (t->denominator != time_base.denominator) {
+		t->numerator = t->numerator * time_base.denominator /
+			t->denominator;
+		t->denominator = time_base.denominator;
+	}
+
+	input->frame_interval = t->numerator / time_base.numerator;
+	if (input->frame_interval < 1)
+		input->frame_interval = 1;
+	tw5864_frame_interval_set(input);
+	return tw5864_g_parm(file, priv, sp);
+}
+
+static const struct v4l2_ctrl_ops tw5864_ctrl_ops = {
+	.s_ctrl = tw5864_s_ctrl,
+};
+
+static const struct v4l2_file_operations video_fops = {
+	.owner = THIS_MODULE,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release,
+	.read = vb2_fop_read,
+	.poll = vb2_fop_poll,
+	.mmap = vb2_fop_mmap,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+#define INDIR_SPACE_MAP_SHIFT 0x100000
+
+static int tw5864_g_reg(struct file *file, void *fh,
+			struct v4l2_dbg_register *reg)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct tw5864_dev *dev = input->root;
+
+	if (reg->reg < INDIR_SPACE_MAP_SHIFT) {
+		if (reg->reg > 0x87fff)
+			return -EINVAL;
+		reg->size = 4;
+		reg->val = tw_readl(reg->reg);
+	} else {
+		__u64 indir_addr = reg->reg - INDIR_SPACE_MAP_SHIFT;
+
+		if (indir_addr > 0xefe)
+			return -EINVAL;
+		reg->size = 1;
+		reg->val = tw_indir_readb(reg->reg);
+	}
+	return 0;
+}
+
+static int tw5864_s_reg(struct file *file, void *fh,
+			const struct v4l2_dbg_register *reg)
+{
+	struct tw5864_input *input = video_drvdata(file);
+	struct tw5864_dev *dev = input->root;
+
+	if (reg->reg < INDIR_SPACE_MAP_SHIFT) {
+		if (reg->reg > 0x87fff)
+			return -EINVAL;
+		tw_writel(reg->reg, reg->val);
+	} else {
+		__u64 indir_addr = reg->reg - INDIR_SPACE_MAP_SHIFT;
+
+		if (indir_addr > 0xefe)
+			return -EINVAL;
+		tw_indir_writeb(reg->reg, reg->val);
+	}
+	return 0;
+}
+#endif
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+	.vidioc_querycap = tw5864_querycap,
+	.vidioc_enum_fmt_vid_cap = tw5864_enum_fmt_vid_cap,
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_querystd = tw5864_querystd,
+	.vidioc_s_std = tw5864_s_std,
+	.vidioc_g_std = tw5864_g_std,
+	.vidioc_enum_input = tw5864_enum_input,
+	.vidioc_g_input = tw5864_g_input,
+	.vidioc_s_input = tw5864_s_input,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+	.vidioc_try_fmt_vid_cap = tw5864_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap = tw5864_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap = tw5864_fmt_vid_cap,
+	.vidioc_log_status = v4l2_ctrl_log_status,
+	.vidioc_subscribe_event = tw5864_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+	.vidioc_enum_framesizes = tw5864_enum_framesizes,
+	.vidioc_enum_frameintervals = tw5864_enum_frameintervals,
+	.vidioc_s_parm = tw5864_s_parm,
+	.vidioc_g_parm = tw5864_g_parm,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register = tw5864_g_reg,
+	.vidioc_s_register = tw5864_s_reg,
+#endif
+};
+
+static const struct video_device tw5864_video_template = {
+	.name = "tw5864_video",
+	.fops = &video_fops,
+	.ioctl_ops = &video_ioctl_ops,
+	.release = video_device_release_empty,
+	.tvnorms = TW5864_NORMS,
+	.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+		V4L2_CAP_STREAMING,
+};
+
+/* Motion Detection Threshold matrix */
+static const struct v4l2_ctrl_config tw5864_md_thresholds = {
+	.ops = &tw5864_ctrl_ops,
+	.id = V4L2_CID_DETECT_MD_THRESHOLD_GRID,
+	.dims = {MD_CELLS_HOR, MD_CELLS_VERT},
+	.def = 14,
+	/* See tw5864_md_metric_from_mvd() */
+	.max = 2 * 0x0f,
+	.step = 1,
+};
+
+static int tw5864_video_input_init(struct tw5864_input *dev, int video_nr);
+static void tw5864_video_input_fini(struct tw5864_input *dev);
+static void tw5864_encoder_tables_upload(struct tw5864_dev *dev);
+
+int tw5864_video_init(struct tw5864_dev *dev, int *video_nr)
+{
+	int i;
+	int ret;
+	unsigned long flags;
+	int last_dma_allocated = -1;
+	int last_input_nr_registered = -1;
+
+	for (i = 0; i < H264_BUF_CNT; i++) {
+		struct tw5864_h264_frame *frame = &dev->h264_buf[i];
+
+		frame->vlc.addr = dma_alloc_coherent(&dev->pci->dev,
+						     H264_VLC_BUF_SIZE,
+						     &frame->vlc.dma_addr,
+						     GFP_KERNEL | GFP_DMA32);
+		if (!frame->vlc.addr) {
+			dev_err(&dev->pci->dev, "dma alloc fail\n");
+			ret = -ENOMEM;
+			goto free_dma;
+		}
+		frame->mv.addr = dma_alloc_coherent(&dev->pci->dev,
+						    H264_MV_BUF_SIZE,
+						    &frame->mv.dma_addr,
+						    GFP_KERNEL | GFP_DMA32);
+		if (!frame->mv.addr) {
+			dev_err(&dev->pci->dev, "dma alloc fail\n");
+			ret = -ENOMEM;
+			dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+					  frame->vlc.addr, frame->vlc.dma_addr);
+			goto free_dma;
+		}
+		last_dma_allocated = i;
+	}
+
+	tw5864_encoder_tables_upload(dev);
+
+	/* Picture is distorted without this block */
+	/* use falling edge to sample 54M to 108M */
+	tw_indir_writeb(TW5864_INDIR_VD_108_POL, TW5864_INDIR_VD_108_POL_BOTH);
+	tw_indir_writeb(TW5864_INDIR_CLK0_SEL, 0x00);
+
+	tw_indir_writeb(TW5864_INDIR_DDRA_DLL_DQS_SEL0, 0x02);
+	tw_indir_writeb(TW5864_INDIR_DDRA_DLL_DQS_SEL1, 0x02);
+	tw_indir_writeb(TW5864_INDIR_DDRA_DLL_CLK90_SEL, 0x02);
+	tw_indir_writeb(TW5864_INDIR_DDRB_DLL_DQS_SEL0, 0x02);
+	tw_indir_writeb(TW5864_INDIR_DDRB_DLL_DQS_SEL1, 0x02);
+	tw_indir_writeb(TW5864_INDIR_DDRB_DLL_CLK90_SEL, 0x02);
+
+	/* video input reset */
+	tw_indir_writeb(TW5864_INDIR_RESET, 0);
+	tw_indir_writeb(TW5864_INDIR_RESET, TW5864_INDIR_RESET_VD |
+			TW5864_INDIR_RESET_DLL | TW5864_INDIR_RESET_MUX_CORE);
+	msleep(20);
+
+	/*
+	 * Select Part A mode for all channels.
+	 * tw_setl instead of tw_clearl for Part B mode.
+	 *
+	 * I guess "Part B" is primarily for downscaled version of same channel
+	 * which goes in Part A of same bus
+	 */
+	tw_writel(TW5864_FULL_HALF_MODE_SEL, 0);
+
+	tw_indir_writeb(TW5864_INDIR_PV_VD_CK_POL,
+			TW5864_INDIR_PV_VD_CK_POL_VD(0) |
+			TW5864_INDIR_PV_VD_CK_POL_VD(1) |
+			TW5864_INDIR_PV_VD_CK_POL_VD(2) |
+			TW5864_INDIR_PV_VD_CK_POL_VD(3));
+
+	spin_lock_irqsave(&dev->slock, flags);
+	dev->encoder_busy = 0;
+	dev->h264_buf_r_index = 0;
+	dev->h264_buf_w_index = 0;
+	tw_writel(TW5864_VLC_STREAM_BASE_ADDR,
+		  dev->h264_buf[dev->h264_buf_w_index].vlc.dma_addr);
+	tw_writel(TW5864_MV_STREAM_BASE_ADDR,
+		  dev->h264_buf[dev->h264_buf_w_index].mv.dma_addr);
+	spin_unlock_irqrestore(&dev->slock, flags);
+
+	tw_writel(TW5864_SEN_EN_CH, 0x000f);
+	tw_writel(TW5864_H264EN_CH_EN, 0x000f);
+
+	tw_writel(TW5864_H264EN_BUS0_MAP, 0x00000000);
+	tw_writel(TW5864_H264EN_BUS1_MAP, 0x00001111);
+	tw_writel(TW5864_H264EN_BUS2_MAP, 0x00002222);
+	tw_writel(TW5864_H264EN_BUS3_MAP, 0x00003333);
+
+	/*
+	 * Quote from Intersil (manufacturer):
+	 * 0x0038 is managed by HW, and by default it won't pass the pointer set
+	 * at 0x0010. So if you don't do encoding, 0x0038 should stay at '3'
+	 * (with 4 frames in buffer). If you encode one frame and then move
+	 * 0x0010 to '1' for example, HW will take one more frame and set it to
+	 * buffer #0, and then you should see 0x0038 is set to '0'.  There is
+	 * only one HW encoder engine, so 4 channels cannot get encoded
+	 * simultaneously. But each channel does have its own buffer (for
+	 * original frames and reconstructed frames). So there is no problem to
+	 * manage encoding for 4 channels at same time and no need to force
+	 * I-frames in switching channels.
+	 * End of quote.
+	 *
+	 * If we set 0x0010 (TW5864_ENC_BUF_PTR_REC1) to 0 (for any channel), we
+	 * have no "rolling" (until we change this value).
+	 * If we set 0x0010 (TW5864_ENC_BUF_PTR_REC1) to 0x3, it starts to roll
+	 * continuously together with 0x0038.
+	 */
+	tw_writel(TW5864_ENC_BUF_PTR_REC1, 0x00ff);
+	tw_writel(TW5864_PCI_INTTM_SCALE, 0);
+
+	tw_writel(TW5864_INTERLACING, TW5864_DI_EN);
+	tw_writel(TW5864_MASTER_ENB_REG, TW5864_PCI_VLC_INTR_ENB);
+	tw_writel(TW5864_PCI_INTR_CTL,
+		  TW5864_TIMER_INTR_ENB | TW5864_PCI_MAST_ENB |
+		  TW5864_MVD_VLC_MAST_ENB);
+
+	dev->irqmask |= TW5864_INTR_VLC_DONE | TW5864_INTR_TIMER;
+	tw5864_irqmask_apply(dev);
+
+	tasklet_init(&dev->tasklet, tw5864_handle_frame_task,
+		     (unsigned long)dev);
+
+	for (i = 0; i < TW5864_INPUTS; i++) {
+		dev->inputs[i].root = dev;
+		dev->inputs[i].nr = i;
+		ret = tw5864_video_input_init(&dev->inputs[i], video_nr[i]);
+		if (ret)
+			goto fini_video_inputs;
+		last_input_nr_registered = i;
+	}
+
+	return 0;
+
+fini_video_inputs:
+	for (i = last_input_nr_registered; i >= 0; i--)
+		tw5864_video_input_fini(&dev->inputs[i]);
+
+	tasklet_kill(&dev->tasklet);
+
+free_dma:
+	for (i = last_dma_allocated; i >= 0; i--) {
+		dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+				  dev->h264_buf[i].vlc.addr,
+				  dev->h264_buf[i].vlc.dma_addr);
+		dma_free_coherent(&dev->pci->dev, H264_MV_BUF_SIZE,
+				  dev->h264_buf[i].mv.addr,
+				  dev->h264_buf[i].mv.dma_addr);
+	}
+
+	return ret;
+}
+
+static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
+{
+	struct tw5864_dev *dev = input->root;
+	int ret;
+	struct v4l2_ctrl_handler *hdl = &input->hdl;
+
+	mutex_init(&input->lock);
+	spin_lock_init(&input->slock);
+
+	/* setup video buffers queue */
+	INIT_LIST_HEAD(&input->active);
+	input->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	input->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	input->vidq.io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+	input->vidq.ops = &tw5864_video_qops;
+	input->vidq.mem_ops = &vb2_dma_contig_memops;
+	input->vidq.drv_priv = input;
+	input->vidq.gfp_flags = 0;
+	input->vidq.buf_struct_size = sizeof(struct tw5864_buf);
+	input->vidq.lock = &input->lock;
+	input->vidq.min_buffers_needed = 2;
+	input->vidq.dev = &input->root->pci->dev;
+	ret = vb2_queue_init(&input->vidq);
+	if (ret)
+		goto free_mutex;
+
+	input->vdev = tw5864_video_template;
+	input->vdev.v4l2_dev = &input->root->v4l2_dev;
+	input->vdev.lock = &input->lock;
+	input->vdev.queue = &input->vidq;
+	video_set_drvdata(&input->vdev, input);
+
+	/* Initialize the device control structures */
+	v4l2_ctrl_handler_init(hdl, 6);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+			  V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+			  V4L2_CID_CONTRAST, 0, 255, 1, 100);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+			  V4L2_CID_SATURATION, 0, 255, 1, 128);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+			  1, MAX_GOP_SIZE, 1, GOP_SIZE);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 28, 51, 1, QP_VALUE);
+	v4l2_ctrl_new_std_menu(hdl, &tw5864_ctrl_ops,
+			       V4L2_CID_DETECT_MD_MODE,
+			       V4L2_DETECT_MD_MODE_THRESHOLD_GRID, 0,
+			       V4L2_DETECT_MD_MODE_DISABLED);
+	v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+			  V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD,
+			  tw5864_md_thresholds.min, tw5864_md_thresholds.max,
+			  tw5864_md_thresholds.step, tw5864_md_thresholds.def);
+	input->md_threshold_grid_ctrl =
+		v4l2_ctrl_new_custom(hdl, &tw5864_md_thresholds, NULL);
+	if (hdl->error) {
+		ret = hdl->error;
+		goto free_v4l2_hdl;
+	}
+	input->vdev.ctrl_handler = hdl;
+	v4l2_ctrl_handler_setup(hdl);
+
+	input->qp = QP_VALUE;
+	input->gop = GOP_SIZE;
+	input->frame_interval = 1;
+
+	ret = video_register_device(&input->vdev, VFL_TYPE_GRABBER, video_nr);
+	if (ret)
+		goto free_v4l2_hdl;
+
+	dev_info(&input->root->pci->dev, "Registered video device %s\n",
+		 video_device_node_name(&input->vdev));
+
+	/*
+	 * Set default video standard. Doesn't matter which, the detected value
+	 * will be found out by VIDIOC_QUERYSTD handler.
+	 */
+	input->v4l2_std = V4L2_STD_NTSC_M;
+	input->std = STD_NTSC;
+
+	tw_indir_writeb(TW5864_INDIR_VIN_E(video_nr), 0x07);
+	/* to initiate auto format recognition */
+	tw_indir_writeb(TW5864_INDIR_VIN_F(video_nr), 0xff);
+
+	return 0;
+
+free_v4l2_hdl:
+	v4l2_ctrl_handler_free(hdl);
+	vb2_queue_release(&input->vidq);
+free_mutex:
+	mutex_destroy(&input->lock);
+
+	return ret;
+}
+
+static void tw5864_video_input_fini(struct tw5864_input *dev)
+{
+	video_unregister_device(&dev->vdev);
+	v4l2_ctrl_handler_free(&dev->hdl);
+	vb2_queue_release(&dev->vidq);
+}
+
+void tw5864_video_fini(struct tw5864_dev *dev)
+{
+	int i;
+
+	tasklet_kill(&dev->tasklet);
+
+	for (i = 0; i < TW5864_INPUTS; i++)
+		tw5864_video_input_fini(&dev->inputs[i]);
+
+	for (i = 0; i < H264_BUF_CNT; i++) {
+		dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+				  dev->h264_buf[i].vlc.addr,
+				  dev->h264_buf[i].vlc.dma_addr);
+		dma_free_coherent(&dev->pci->dev, H264_MV_BUF_SIZE,
+				  dev->h264_buf[i].mv.addr,
+				  dev->h264_buf[i].mv.dma_addr);
+	}
+}
+
+void tw5864_prepare_frame_headers(struct tw5864_input *input)
+{
+	struct tw5864_buf *vb = input->vb;
+	u8 *dst;
+	size_t dst_space;
+	unsigned long flags;
+
+	if (!vb) {
+		spin_lock_irqsave(&input->slock, flags);
+		if (list_empty(&input->active)) {
+			spin_unlock_irqrestore(&input->slock, flags);
+			input->vb = NULL;
+			return;
+		}
+		vb = list_first_entry(&input->active, struct tw5864_buf, list);
+		list_del(&vb->list);
+		spin_unlock_irqrestore(&input->slock, flags);
+	}
+
+	dst = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
+	dst_space = vb2_plane_size(&vb->vb.vb2_buf, 0);
+
+	/*
+	 * Low-level bitstream writing functions don't have a fine way to say
+	 * correctly that supplied buffer is too small. So we just check there
+	 * and warn, and don't care at lower level.
+	 * Currently all headers take below 32 bytes.
+	 * The buffer is supposed to have plenty of free space at this point,
+	 * anyway.
+	 */
+	if (WARN_ON_ONCE(dst_space < 128))
+		return;
+
+	/*
+	 * Generate H264 headers:
+	 * If this is first frame, put SPS and PPS
+	 */
+	if (input->frame_gop_seqno == 0)
+		tw5864_h264_put_stream_header(&dst, &dst_space, input->qp,
+					      input->width, input->height);
+
+	/* Put slice header */
+	tw5864_h264_put_slice_header(&dst, &dst_space, input->h264_idr_pic_id,
+				     input->frame_gop_seqno,
+				     &input->tail_nb_bits, &input->tail);
+	input->vb = vb;
+	input->buf_cur_ptr = dst;
+	input->buf_cur_space_left = dst_space;
+}
+
+/*
+ * Returns heuristic motion detection metric value from known components of
+ * hardware-provided Motion Vector Data.
+ */
+static unsigned int tw5864_md_metric_from_mvd(u32 mvd)
+{
+	/*
+	 * Format of motion vector data exposed by tw5864, according to
+	 * manufacturer:
+	 * mv_x 10 bits
+	 * mv_y 10 bits
+	 * non_zero_members 8 bits
+	 * mb_type 3 bits
+	 * reserved 1 bit
+	 *
+	 * non_zero_members: number of non-zero residuals in each macro block
+	 * after quantization
+	 *
+	 * unsigned int reserved = mvd >> 31;
+	 * unsigned int mb_type = (mvd >> 28) & 0x7;
+	 * unsigned int non_zero_members = (mvd >> 20) & 0xff;
+	 */
+	unsigned int mv_y = (mvd >> 10) & 0x3ff;
+	unsigned int mv_x = mvd & 0x3ff;
+
+	/* heuristic: */
+	mv_x &= 0x0f;
+	mv_y &= 0x0f;
+
+	return mv_y + mv_x;
+}
+
+static int tw5864_is_motion_triggered(struct tw5864_h264_frame *frame)
+{
+	struct tw5864_input *input = frame->input;
+	u32 *mv = (u32 *)frame->mv.addr;
+	int i;
+	int detected = 0;
+
+	for (i = 0; i < MD_CELLS; i++) {
+		const u16 thresh = input->md_threshold_grid_values[i];
+		const unsigned int metric = tw5864_md_metric_from_mvd(mv[i]);
+
+		if (metric > thresh)
+			detected = 1;
+
+		if (detected)
+			break;
+	}
+	return detected;
+}
+
+static void tw5864_handle_frame_task(unsigned long data)
+{
+	struct tw5864_dev *dev = (struct tw5864_dev *)data;
+	unsigned long flags;
+	int batch_size = H264_BUF_CNT;
+
+	spin_lock_irqsave(&dev->slock, flags);
+	while (dev->h264_buf_r_index != dev->h264_buf_w_index && batch_size--) {
+		struct tw5864_h264_frame *frame =
+			&dev->h264_buf[dev->h264_buf_r_index];
+
+		spin_unlock_irqrestore(&dev->slock, flags);
+		dma_sync_single_for_cpu(&dev->pci->dev, frame->vlc.dma_addr,
+					H264_VLC_BUF_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(&dev->pci->dev, frame->mv.dma_addr,
+					H264_MV_BUF_SIZE, DMA_FROM_DEVICE);
+		tw5864_handle_frame(frame);
+		dma_sync_single_for_device(&dev->pci->dev, frame->vlc.dma_addr,
+					   H264_VLC_BUF_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_device(&dev->pci->dev, frame->mv.dma_addr,
+					   H264_MV_BUF_SIZE, DMA_FROM_DEVICE);
+		spin_lock_irqsave(&dev->slock, flags);
+
+		dev->h264_buf_r_index++;
+		dev->h264_buf_r_index %= H264_BUF_CNT;
+	}
+	spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+#ifdef DEBUG
+static u32 tw5864_vlc_checksum(u32 *data, int len)
+{
+	u32 val, count_len = len;
+
+	val = *data++;
+	while (((count_len >> 2) - 1) > 0) {
+		val ^= *data++;
+		count_len -= 4;
+	}
+	val ^= htonl((len >> 2));
+	return val;
+}
+#endif
+
+static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
+{
+#define SKIP_VLCBUF_BYTES 3
+	struct tw5864_input *input = frame->input;
+	struct tw5864_dev *dev = input->root;
+	struct tw5864_buf *vb;
+	struct vb2_v4l2_buffer *v4l2_buf;
+	int frame_len = frame->vlc_len - SKIP_VLCBUF_BYTES;
+	u8 *dst = input->buf_cur_ptr;
+	u8 tail_mask, vlc_mask = 0;
+	int i;
+	u8 vlc_first_byte = ((u8 *)(frame->vlc.addr + SKIP_VLCBUF_BYTES))[0];
+	unsigned long flags;
+	int zero_run;
+	u8 *src;
+	u8 *src_end;
+
+#ifdef DEBUG
+	if (frame->checksum !=
+	    tw5864_vlc_checksum((u32 *)frame->vlc.addr, frame_len))
+		dev_err(&dev->pci->dev,
+			"Checksum of encoded frame doesn't match!\n");
+#endif
+
+	spin_lock_irqsave(&input->slock, flags);
+	vb = input->vb;
+	input->vb = NULL;
+	spin_unlock_irqrestore(&input->slock, flags);
+
+	v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+
+	if (!vb) { /* Gone because of disabling */
+		dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
+		return;
+	}
+
+	/*
+	 * Check for space.
+	 * Mind the overhead of startcode emulation prevention.
+	 */
+	if (input->buf_cur_space_left < frame_len * 5 / 4) {
+		dev_err_once(&dev->pci->dev,
+			     "Left space in vb2 buffer, %d bytes, is less than considered safely enough to put frame of length %d. Dropping this frame.\n",
+			     input->buf_cur_space_left, frame_len);
+		return;
+	}
+
+	for (i = 0; i < 8 - input->tail_nb_bits; i++)
+		vlc_mask |= 1 << i;
+	tail_mask = (~vlc_mask) & 0xff;
+
+	dst[0] = (input->tail & tail_mask) | (vlc_first_byte & vlc_mask);
+	frame_len--;
+	dst++;
+
+	/* H.264 startcode emulation prevention */
+	src = frame->vlc.addr + SKIP_VLCBUF_BYTES + 1;
+	src_end = src + frame_len;
+	zero_run = 0;
+	for (; src < src_end; src++) {
+		if (zero_run < 2) {
+			if (*src == 0)
+				++zero_run;
+			else
+				zero_run = 0;
+		} else {
+			if ((*src & ~0x03) == 0)
+				*dst++ = 0x03;
+			zero_run = *src == 0;
+		}
+		*dst++ = *src;
+	}
+
+	vb2_set_plane_payload(&vb->vb.vb2_buf, 0,
+			      dst - (u8 *)vb2_plane_vaddr(&vb->vb.vb2_buf, 0));
+
+	vb->vb.vb2_buf.timestamp = frame->timestamp;
+	v4l2_buf->field = V4L2_FIELD_INTERLACED;
+	v4l2_buf->sequence = frame->seqno;
+
+	/* Check for motion flags */
+	if (frame->gop_seqno /* P-frame */ &&
+	    tw5864_is_motion_triggered(frame)) {
+		struct v4l2_event ev = {
+			.type = V4L2_EVENT_MOTION_DET,
+			.u.motion_det = {
+				.flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
+				.frame_sequence = v4l2_buf->sequence,
+			},
+		};
+
+		v4l2_event_queue(&input->vdev, &ev);
+	}
+
+	vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static v4l2_std_id tw5864_get_v4l2_std(enum tw5864_vid_std std)
+{
+	switch (std) {
+	case STD_NTSC:    return V4L2_STD_NTSC_M;
+	case STD_PAL:     return V4L2_STD_PAL_B;
+	case STD_SECAM:   return V4L2_STD_SECAM_B;
+	case STD_NTSC443: return V4L2_STD_NTSC_443;
+	case STD_PAL_M:   return V4L2_STD_PAL_M;
+	case STD_PAL_CN:  return V4L2_STD_PAL_Nc;
+	case STD_PAL_60:  return V4L2_STD_PAL_60;
+	case STD_INVALID: return V4L2_STD_UNKNOWN;
+	}
+	return 0;
+}
+
+static enum tw5864_vid_std tw5864_from_v4l2_std(v4l2_std_id v4l2_std)
+{
+	if (v4l2_std & V4L2_STD_NTSC_M)
+		return STD_NTSC;
+	if (v4l2_std & V4L2_STD_PAL_B)
+		return STD_PAL;
+	if (v4l2_std & V4L2_STD_SECAM_B)
+		return STD_SECAM;
+	if (v4l2_std & V4L2_STD_NTSC_443)
+		return STD_NTSC443;
+	if (v4l2_std & V4L2_STD_PAL_M)
+		return STD_PAL_M;
+	if (v4l2_std & V4L2_STD_PAL_Nc)
+		return STD_PAL_CN;
+	if (v4l2_std & V4L2_STD_PAL_60)
+		return STD_PAL_60;
+
+	return STD_INVALID;
+}
+
+static void tw5864_encoder_tables_upload(struct tw5864_dev *dev)
+{
+	int i;
+
+	tw_writel(TW5864_VLC_RD, 0x1);
+	for (i = 0; i < VLC_LOOKUP_TABLE_LEN; i++) {
+		tw_writel((TW5864_VLC_STREAM_MEM_START + i * 4),
+			  encoder_vlc_lookup_table[i]);
+	}
+	tw_writel(TW5864_VLC_RD, 0x0);
+
+	for (i = 0; i < QUANTIZATION_TABLE_LEN; i++) {
+		tw_writel((TW5864_QUAN_TAB + i * 4),
+			  forward_quantization_table[i]);
+	}
+
+	for (i = 0; i < QUANTIZATION_TABLE_LEN; i++) {
+		tw_writel((TW5864_QUAN_TAB + i * 4),
+			  inverse_quantization_table[i]);
+	}
+}
diff --git a/drivers/media/pci/tw5864/tw5864.h b/drivers/media/pci/tw5864/tw5864.h
new file mode 100644
index 0000000..f5de9f6
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864.h
@@ -0,0 +1,205 @@
+/*
+ *  TW5864 driver  - common header file
+ *
+ *  Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/videodev2.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "tw5864-reg.h"
+
+#define PCI_DEVICE_ID_TECHWELL_5864 0x5864
+
+#define TW5864_NORMS V4L2_STD_ALL
+
+/* ----------------------------------------------------------- */
+/* card configuration   */
+
+#define TW5864_INPUTS 4
+
+/* The TW5864 uses 192 (16x12) detection cells in full screen for motion
+ * detection. Each detection cell is composed of 44 pixels and 20 lines for
+ * NTSC and 24 lines for PAL.
+ */
+#define MD_CELLS_HOR 16
+#define MD_CELLS_VERT 12
+#define MD_CELLS (MD_CELLS_HOR * MD_CELLS_VERT)
+
+#define H264_VLC_BUF_SIZE 0x80000
+#define H264_MV_BUF_SIZE 0x2000 /* device writes 5396 bytes */
+#define QP_VALUE 28
+#define MAX_GOP_SIZE 255
+#define GOP_SIZE MAX_GOP_SIZE
+
+enum resolution {
+	D1 = 1,
+	HD1 = 2, /* half d1 - 360x(240|288) */
+	CIF = 3,
+	QCIF = 4,
+};
+
+/* ----------------------------------------------------------- */
+/* device / file handle status                                 */
+
+struct tw5864_dev; /* forward delclaration */
+
+/* buffer for one video/vbi/ts frame */
+struct tw5864_buf {
+	struct vb2_v4l2_buffer vb;
+	struct list_head list;
+
+	unsigned int size;
+};
+
+struct tw5864_dma_buf {
+	void *addr;
+	dma_addr_t dma_addr;
+};
+
+enum tw5864_vid_std {
+	STD_NTSC = 0, /* NTSC (M) */
+	STD_PAL = 1, /* PAL (B, D, G, H, I) */
+	STD_SECAM = 2, /* SECAM */
+	STD_NTSC443 = 3, /* NTSC4.43 */
+	STD_PAL_M = 4, /* PAL (M) */
+	STD_PAL_CN = 5, /* PAL (CN) */
+	STD_PAL_60 = 6, /* PAL 60 */
+	STD_INVALID = 7,
+	STD_AUTO = 7,
+};
+
+struct tw5864_input {
+	int nr; /* input number */
+	struct tw5864_dev *root;
+	struct mutex lock; /* used for vidq and vdev */
+	spinlock_t slock; /* used for sync between ISR, tasklet & V4L2 API */
+	struct video_device vdev;
+	struct v4l2_ctrl_handler hdl;
+	struct vb2_queue vidq;
+	struct list_head active;
+	enum resolution resolution;
+	unsigned int width, height;
+	unsigned int frame_seqno;
+	unsigned int frame_gop_seqno;
+	unsigned int h264_idr_pic_id;
+	int enabled;
+	enum tw5864_vid_std std;
+	v4l2_std_id v4l2_std;
+	int tail_nb_bits;
+	u8 tail;
+	u8 *buf_cur_ptr;
+	int buf_cur_space_left;
+
+	u32 reg_interlacing;
+	u32 reg_vlc;
+	u32 reg_dsp_codec;
+	u32 reg_dsp;
+	u32 reg_emu;
+	u32 reg_dsp_qp;
+	u32 reg_dsp_ref_mvp_lambda;
+	u32 reg_dsp_i4x4_weight;
+	u32 buf_id;
+
+	struct tw5864_buf *vb;
+
+	struct v4l2_ctrl *md_threshold_grid_ctrl;
+	u16 md_threshold_grid_values[12 * 16];
+	int qp;
+	int gop;
+
+	/*
+	 * In (1/MAX_FPS) units.
+	 * For max FPS (default), set to 1.
+	 * For 1 FPS, set to e.g. 32.
+	 */
+	int frame_interval;
+	unsigned long new_frame_deadline;
+};
+
+struct tw5864_h264_frame {
+	struct tw5864_dma_buf vlc;
+	struct tw5864_dma_buf mv;
+	int vlc_len;
+	u32 checksum;
+	struct tw5864_input *input;
+	u64 timestamp;
+	unsigned int seqno;
+	unsigned int gop_seqno;
+};
+
+/* global device status */
+struct tw5864_dev {
+	spinlock_t slock; /* used for sync between ISR, tasklet & V4L2 API */
+	struct v4l2_device v4l2_dev;
+	struct tw5864_input inputs[TW5864_INPUTS];
+#define H264_BUF_CNT 4
+	struct tw5864_h264_frame h264_buf[H264_BUF_CNT];
+	int h264_buf_r_index;
+	int h264_buf_w_index;
+
+	struct tasklet_struct tasklet;
+
+	int encoder_busy;
+	/* Input number to check next for ready raw picture (in RR fashion) */
+	int next_input;
+
+	/* pci i/o */
+	char name[64];
+	struct pci_dev *pci;
+	void __iomem *mmio;
+	u32 irqmask;
+};
+
+#define tw_readl(reg) readl(dev->mmio + reg)
+#define tw_mask_readl(reg, mask) \
+	(tw_readl(reg) & (mask))
+#define tw_mask_shift_readl(reg, mask, shift) \
+	(tw_mask_readl((reg), ((mask) << (shift))) >> (shift))
+
+#define tw_writel(reg, value) writel((value), dev->mmio + reg)
+#define tw_mask_writel(reg, mask, value) \
+	tw_writel(reg, (tw_readl(reg) & ~(mask)) | ((value) & (mask)))
+#define tw_mask_shift_writel(reg, mask, shift, value) \
+	tw_mask_writel((reg), ((mask) << (shift)), ((value) << (shift)))
+
+#define tw_setl(reg, bit) tw_writel((reg), tw_readl(reg) | (bit))
+#define tw_clearl(reg, bit) tw_writel((reg), tw_readl(reg) & ~(bit))
+
+u8 tw5864_indir_readb(struct tw5864_dev *dev, u16 addr);
+#define tw_indir_readb(addr) tw5864_indir_readb(dev, addr)
+void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data);
+#define tw_indir_writeb(addr, data) tw5864_indir_writeb(dev, addr, data)
+
+void tw5864_irqmask_apply(struct tw5864_dev *dev);
+int tw5864_video_init(struct tw5864_dev *dev, int *video_nr);
+void tw5864_video_fini(struct tw5864_dev *dev);
+void tw5864_prepare_frame_headers(struct tw5864_input *input);
+void tw5864_h264_put_stream_header(u8 **buf, size_t *space_left, int qp,
+				   int width, int height);
+void tw5864_h264_put_slice_header(u8 **buf, size_t *space_left,
+				  unsigned int idr_pic_id,
+				  unsigned int frame_gop_seqno,
+				  int *tail_nb_bits, u8 *tail);
+void tw5864_request_encoded_frame(struct tw5864_input *input);
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 5e82128..a45e023 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -535,7 +535,7 @@
 	}
 }
 
-static struct vb2_ops tw68_video_qops = {
+static const struct vb2_ops tw68_video_qops = {
 	.queue_setup	= tw68_queue_setup,
 	.buf_queue	= tw68_buf_queue,
 	.buf_prepare	= tw68_buf_prepare,
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 96e444c..7719076 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -269,7 +269,7 @@
 	return bytes_to_frames(ss->runtime, ac->ptr);
 }
 
-static struct snd_pcm_ops tw686x_pcm_ops = {
+static const struct snd_pcm_ops tw686x_pcm_ops = {
 	.open = tw686x_pcm_open,
 	.close = tw686x_pcm_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index cdb16de..c3fafa9 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -577,7 +577,7 @@
 	return 0;
 }
 
-static struct vb2_ops tw686x_video_qops = {
+static const struct vb2_ops tw686x_video_qops = {
 	.queue_setup		= tw686x_queue_setup,
 	.buf_queue		= tw686x_buf_queue,
 	.buf_prepare		= tw686x_buf_prepare,
@@ -672,30 +672,20 @@
 	return 0;
 }
 
-static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
-				struct v4l2_format *f)
+static int tw686x_set_format(struct tw686x_video_channel *vc,
+			     unsigned int pixelformat, unsigned int width,
+			     unsigned int height, bool realloc)
 {
-	struct tw686x_video_channel *vc = video_drvdata(file);
 	struct tw686x_dev *dev = vc->dev;
-	u32 val, width, line_width, height;
-	unsigned long bitsperframe;
+	u32 val, dma_width, dma_height, dma_line_width;
 	int err, pb;
 
-	if (vb2_is_busy(&vc->vidq))
-		return -EBUSY;
-
-	bitsperframe = vc->width * vc->height * vc->format->depth;
-	err = tw686x_try_fmt_vid_cap(file, priv, f);
-	if (err)
-		return err;
-
-	vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
-	vc->width = f->fmt.pix.width;
-	vc->height = f->fmt.pix.height;
+	vc->format = format_by_fourcc(pixelformat);
+	vc->width = width;
+	vc->height = height;
 
 	/* We need new DMA buffers if the framesize has changed */
-	if (dev->dma_ops->alloc &&
-	    bitsperframe != vc->width * vc->height * vc->format->depth) {
+	if (dev->dma_ops->alloc && realloc) {
 		for (pb = 0; pb < 2; pb++)
 			dev->dma_ops->free(vc, pb);
 
@@ -739,14 +729,36 @@
 	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
 
 	/* Program the DMA frame size */
-	width = (vc->width * 2) & 0x7ff;
-	height = vc->height / 2;
-	line_width = (vc->width * 2) & 0x7ff;
-	val = (height << 22) | (line_width << 11)  | width;
+	dma_width = (vc->width * 2) & 0x7ff;
+	dma_height = vc->height / 2;
+	dma_line_width = (vc->width * 2) & 0x7ff;
+	val = (dma_height << 22) | (dma_line_width << 11)  | dma_width;
 	reg_write(vc->dev, VDMA_WHP[vc->ch], val);
 	return 0;
 }
 
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	unsigned long area;
+	bool realloc;
+	int err;
+
+	if (vb2_is_busy(&vc->vidq))
+		return -EBUSY;
+
+	area = vc->width * vc->height;
+	err = tw686x_try_fmt_vid_cap(file, priv, f);
+	if (err)
+		return err;
+
+	realloc = area != (f->fmt.pix.width * f->fmt.pix.height);
+	return tw686x_set_format(vc, f->fmt.pix.pixelformat,
+				 f->fmt.pix.width, f->fmt.pix.height,
+				 realloc);
+}
+
 static int tw686x_querycap(struct file *file, void *priv,
 			   struct v4l2_capability *cap)
 {
@@ -763,17 +775,9 @@
 	return 0;
 }
 
-static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+static int tw686x_set_standard(struct tw686x_video_channel *vc, v4l2_std_id id)
 {
-	struct tw686x_video_channel *vc = video_drvdata(file);
-	struct v4l2_format f;
-	u32 val, ret;
-
-	if (vc->video_standard == id)
-		return 0;
-
-	if (vb2_is_busy(&vc->vidq))
-		return -EBUSY;
+	u32 val;
 
 	if (id & V4L2_STD_NTSC)
 		val = 0;
@@ -802,14 +806,31 @@
 		val |= (1 << (SYS_MODE_DMA_SHIFT + vc->ch));
 	reg_write(vc->dev, VIDEO_CONTROL1, val);
 
+	return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	struct v4l2_format f;
+	int ret;
+
+	if (vc->video_standard == id)
+		return 0;
+
+	if (vb2_is_busy(&vc->vidq))
+		return -EBUSY;
+
+	ret = tw686x_set_standard(vc, id);
+	if (ret)
+		return ret;
 	/*
 	 * Adjust format after V4L2_STD_525_60/V4L2_STD_625_50 change,
 	 * calling g_fmt and s_fmt will sanitize the height
 	 * according to the standard.
 	 */
-	ret = tw686x_g_fmt_vid_cap(file, priv, &f);
-	if (!ret)
-		tw686x_s_fmt_vid_cap(file, priv, &f);
+	tw686x_g_fmt_vid_cap(file, priv, &f);
+	tw686x_s_fmt_vid_cap(file, priv, &f);
 
 	/*
 	 * Frame decimation depends on the chosen standard,
@@ -885,6 +906,42 @@
 	return 0;
 }
 
+static int tw686x_enum_framesizes(struct file *file, void *priv,
+				  struct v4l2_frmsizeenum *fsize)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	if (fsize->index)
+		return -EINVAL;
+	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+	fsize->stepwise.max_width = TW686X_VIDEO_WIDTH;
+	fsize->stepwise.min_width = fsize->stepwise.max_width / 2;
+	fsize->stepwise.step_width = fsize->stepwise.min_width;
+	fsize->stepwise.max_height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+	fsize->stepwise.min_height = fsize->stepwise.max_height / 2;
+	fsize->stepwise.step_height = fsize->stepwise.min_height;
+	return 0;
+}
+
+static int tw686x_enum_frameintervals(struct file *file, void *priv,
+				      struct v4l2_frmivalenum *ival)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	int max_fps = TW686X_MAX_FPS(vc->video_standard);
+	int max_rates = DIV_ROUND_UP(max_fps, 2);
+
+	if (ival->index >= max_rates)
+		return -EINVAL;
+
+	ival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+	ival->discrete.numerator = 1;
+	if (ival->index < (max_rates - 1))
+		ival->discrete.denominator = (ival->index + 1) * 2;
+	else
+		ival->discrete.denominator = max_fps;
+	return 0;
+}
+
 static int tw686x_g_parm(struct file *file, void *priv,
 			 struct v4l2_streamparm *sp)
 {
@@ -928,10 +985,21 @@
 	return 0;
 }
 
+static void tw686x_set_input(struct tw686x_video_channel *vc, unsigned int i)
+{
+	u32 val;
+
+	vc->input = i;
+
+	val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+	val &= ~(0x3 << 30);
+	val |= i << 30;
+	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+}
+
 static int tw686x_s_input(struct file *file, void *priv, unsigned int i)
 {
 	struct tw686x_video_channel *vc = video_drvdata(file);
-	u32 val;
 
 	if (i >= TW686X_INPUTS_PER_CH)
 		return -EINVAL;
@@ -943,12 +1011,7 @@
 	if (vb2_is_busy(&vc->vidq))
 		return -EBUSY;
 
-	vc->input = i;
-
-	val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
-	val &= ~(0x3 << 30);
-	val |= i << 30;
-	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+	tw686x_set_input(vc, i);
 	return 0;
 }
 
@@ -1007,6 +1070,8 @@
 
 	.vidioc_g_parm			= tw686x_g_parm,
 	.vidioc_s_parm			= tw686x_s_parm,
+	.vidioc_enum_framesizes		= tw686x_enum_framesizes,
+	.vidioc_enum_frameintervals	= tw686x_enum_frameintervals,
 
 	.vidioc_enum_input		= tw686x_enum_input,
 	.vidioc_g_input			= tw686x_g_input,
@@ -1093,8 +1158,7 @@
 	for (ch = 0; ch < max_channels(dev); ch++) {
 		struct tw686x_video_channel *vc = &dev->video_channels[ch];
 
-		if (vc->device)
-			video_unregister_device(vc->device);
+		video_unregister_device(vc->device);
 
 		if (dev->dma_ops->free)
 			for (pb = 0; pb < 2; pb++)
@@ -1104,7 +1168,7 @@
 
 int tw686x_video_init(struct tw686x_dev *dev)
 {
-	unsigned int ch, val, pb;
+	unsigned int ch, val;
 	int err;
 
 	if (dev->dma_mode == TW686X_DMA_MODE_MEMCPY)
@@ -1138,27 +1202,23 @@
 		vc->ch = ch;
 
 		/* default settings */
-		vc->format = &formats[0];
-		vc->video_standard = V4L2_STD_NTSC;
-		vc->width = TW686X_VIDEO_WIDTH;
-		vc->height = TW686X_VIDEO_HEIGHT(vc->video_standard);
-		vc->input = 0;
+		err = tw686x_set_standard(vc, V4L2_STD_NTSC);
+		if (err)
+			goto error;
 
-		reg_write(vc->dev, SDT[ch], 0);
+		err = tw686x_set_format(vc, formats[0].fourcc,
+				TW686X_VIDEO_WIDTH,
+				TW686X_VIDEO_HEIGHT(vc->video_standard),
+				true);
+		if (err)
+			goto error;
+
+		tw686x_set_input(vc, 0);
 		tw686x_set_framerate(vc, 30);
-
 		reg_write(dev, VDELAY_LO[ch], 0x14);
 		reg_write(dev, HACTIVE_LO[ch], 0xd0);
 		reg_write(dev, VIDEO_SIZE[ch], 0);
 
-		if (dev->dma_ops->alloc) {
-			for (pb = 0; pb < 2; pb++) {
-				err = dev->dma_ops->alloc(vc, pb);
-				if (err)
-					goto error;
-			}
-		}
-
 		vc->vidq.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
 		vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 		vc->vidq.drv_priv = vc;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 80caa70..d6b631a 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2365,94 +2365,80 @@
 }
 
 /* cropping (sub-frame capture) */
-static int zoran_cropcap(struct file *file, void *__fh,
-					struct v4l2_cropcap *cropcap)
+static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
 {
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
-	int type = cropcap->type, res = 0;
 
-	memset(cropcap, 0, sizeof(*cropcap));
-	cropcap->type = type;
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
 
-	if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-	    (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
-	     fh->map_mode == ZORAN_MAP_MODE_RAW)) {
+	if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
 		dprintk(1, KERN_ERR
-			"%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n",
+			"%s: VIDIOC_G_SELECTION - subcapture only supported for compressed capture\n",
 			ZR_DEVNAME(zr));
-		res = -EINVAL;
-		return res;
+		return -EINVAL;
 	}
 
-	cropcap->bounds.top = cropcap->bounds.left = 0;
-	cropcap->bounds.width = BUZ_MAX_WIDTH;
-	cropcap->bounds.height = BUZ_MAX_HEIGHT;
-	cropcap->defrect.top = cropcap->defrect.left = 0;
-	cropcap->defrect.width = BUZ_MIN_WIDTH;
-	cropcap->defrect.height = BUZ_MIN_HEIGHT;
-	return res;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r.top = fh->jpg_settings.img_y;
+		sel->r.left = fh->jpg_settings.img_x;
+		sel->r.width = fh->jpg_settings.img_width;
+		sel->r.height = fh->jpg_settings.img_height;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		sel->r.top = sel->r.left = 0;
+		sel->r.width = BUZ_MIN_WIDTH;
+		sel->r.height = BUZ_MIN_HEIGHT;
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.top = sel->r.left = 0;
+		sel->r.width = BUZ_MAX_WIDTH;
+		sel->r.height = BUZ_MAX_HEIGHT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
 }
 
-static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
+static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
 {
 	struct zoran_fh *fh = __fh;
 	struct zoran *zr = fh->zr;
-	int type = crop->type, res = 0;
-
-	memset(crop, 0, sizeof(*crop));
-	crop->type = type;
-
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-	    (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
-	     fh->map_mode == ZORAN_MAP_MODE_RAW)) {
-		dprintk(1,
-			KERN_ERR
-			"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
-			ZR_DEVNAME(zr));
-		res = -EINVAL;
-		return res;
-	}
-
-	crop->c.top = fh->jpg_settings.img_y;
-	crop->c.left = fh->jpg_settings.img_x;
-	crop->c.width = fh->jpg_settings.img_width;
-	crop->c.height = fh->jpg_settings.img_height;
-	return res;
-}
-
-static int zoran_s_crop(struct file *file, void *__fh, const struct v4l2_crop *crop)
-{
-	struct zoran_fh *fh = __fh;
-	struct zoran *zr = fh->zr;
-	int res = 0;
 	struct zoran_jpg_settings settings;
+	int res;
+
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+	    sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
+	if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
+		dprintk(1, KERN_ERR
+			"%s: VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n",
+			ZR_DEVNAME(zr));
+		return -EINVAL;
+	}
 
 	settings = fh->jpg_settings;
 
 	if (fh->buffers.allocated) {
 		dprintk(1, KERN_ERR
-			"%s: VIDIOC_S_CROP - cannot change settings while active\n",
+			"%s: VIDIOC_S_SELECTION - cannot change settings while active\n",
 			ZR_DEVNAME(zr));
-		res = -EBUSY;
-		return res;
-	}
-
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-	    (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
-	     fh->map_mode == ZORAN_MAP_MODE_RAW)) {
-		dprintk(1, KERN_ERR
-			"%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
-			ZR_DEVNAME(zr));
-		res = -EINVAL;
-		return res;
+		return -EBUSY;
 	}
 
 	/* move into a form that we understand */
-	settings.img_x = crop->c.left;
-	settings.img_y = crop->c.top;
-	settings.img_width = crop->c.width;
-	settings.img_height = crop->c.height;
+	settings.img_x = sel->r.left;
+	settings.img_y = sel->r.top;
+	settings.img_width = sel->r.width;
+	settings.img_height = sel->r.height;
 
 	/* check validity */
 	res = zoran_check_jpg_settings(zr, &settings, 0);
@@ -2808,9 +2794,8 @@
 
 static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
 	.vidioc_querycap    		    = zoran_querycap,
-	.vidioc_cropcap       		    = zoran_cropcap,
-	.vidioc_s_crop       		    = zoran_s_crop,
-	.vidioc_g_crop       		    = zoran_g_crop,
+	.vidioc_s_selection		    = zoran_s_selection,
+	.vidioc_g_selection		    = zoran_g_selection,
 	.vidioc_enum_input     		    = zoran_enum_input,
 	.vidioc_g_input      		    = zoran_g_input,
 	.vidioc_s_input      		    = zoran_s_input,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 552b635..ce4a96f 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -91,6 +91,15 @@
 	---help---
 	  Enable debug messages on OMAP 3 camera controller driver.
 
+config VIDEO_PXA27x
+	tristate "PXA27x Quick Capture Interface driver"
+	depends on VIDEO_DEV && HAS_DMA
+	depends on PXA27x || COMPILE_TEST
+	select VIDEOBUF2_DMA_SG
+	select SG_SPLIT
+	---help---
+	  This is a v4l2 driver for the PXA27x Quick Capture Interface
+
 config VIDEO_S3C_CAMIF
 	tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
 	depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
@@ -107,10 +116,10 @@
 
 source "drivers/media/platform/soc_camera/Kconfig"
 source "drivers/media/platform/exynos4-is/Kconfig"
-source "drivers/media/platform/s5p-tv/Kconfig"
 source "drivers/media/platform/am437x/Kconfig"
 source "drivers/media/platform/xilinx/Kconfig"
 source "drivers/media/platform/rcar-vin/Kconfig"
+source "drivers/media/platform/atmel/Kconfig"
 
 config VIDEO_TI_CAL
 	tristate "TI CAL (Camera Adaptation Layer) driver"
@@ -155,7 +164,7 @@
 
 config VIDEO_MEDIATEK_VPU
 	tristate "Mediatek Video Processor Unit"
-	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
 	depends on ARCH_MEDIATEK || COMPILE_TEST
 	---help---
 	    This driver provides downloading VPU firmware and
@@ -257,6 +266,21 @@
 	help
 	  This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
 
+config VIDEO_STI_HVA
+	tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
+	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on HAS_DMA
+	depends on ARCH_STI || COMPILE_TEST
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	help
+	  This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
+	  video encoder of STMicroelectronics SoC, allowing hardware encoding of
+	  raw uncompressed formats in various compressed video bitstreams format.
+
+	  To compile this driver as a module, choose M here:
+	  the module will be called st-hva.
+
 config VIDEO_SH_VEU
 	tristate "SuperH VEU mem2mem video processing driver"
 	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 21771c1..40b18d1 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
 
 obj-$(CONFIG_VIDEO_OMAP3)	+= omap3isp/
+obj-$(CONFIG_VIDEO_PXA27x)	+= pxa_camera.o
 
 obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
 
@@ -30,12 +31,12 @@
 obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) 	+= exynos4-is/
 obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG)	+= s5p-jpeg/
 obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC)	+= s5p-mfc/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV)	+= s5p-tv/
 
 obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D)	+= s5p-g2d/
 obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC)	+= exynos-gsc/
 
 obj-$(CONFIG_VIDEO_STI_BDISP)		+= sti/bdisp/
+obj-$(CONFIG_VIDEO_STI_HVA)		+= sti/hva/
 obj-$(CONFIG_DVB_C8SECTPFE)		+= sti/c8sectpfe/
 
 obj-$(CONFIG_BLACKFIN)                  += blackfin/
@@ -58,6 +59,8 @@
 
 obj-$(CONFIG_VIDEO_RCAR_VIN)		+= rcar-vin/
 
+obj-$(CONFIG_VIDEO_ATMEL_ISC)		+= atmel/
+
 ccflags-y += -I$(srctree)/drivers/media/i2c
 
 obj-$(CONFIG_VIDEO_MEDIATEK_VPU)	+= mtk-vpu/
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
new file mode 100644
index 0000000..867dca2
--- /dev/null
+++ b/drivers/media/platform/atmel/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_ATMEL_ISC
+	tristate "ATMEL Image Sensor Controller (ISC) support"
+	depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+	depends on ARCH_AT91 || COMPILE_TEST
+	select VIDEOBUF2_DMA_CONTIG
+	select REGMAP_MMIO
+	help
+	   This module makes the ATMEL Image Sensor Controller available
+	   as a v4l2 device.
\ No newline at end of file
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
new file mode 100644
index 0000000..9d7c999
--- /dev/null
+++ b/drivers/media/platform/atmel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
new file mode 100644
index 0000000..00c4497
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -0,0 +1,165 @@
+#ifndef __ATMEL_ISC_REGS_H
+#define __ATMEL_ISC_REGS_H
+
+#include <linux/bitops.h>
+
+/* ISC Control Enable Register 0 */
+#define ISC_CTRLEN      0x00000000
+
+/* ISC Control Disable Register 0 */
+#define ISC_CTRLDIS     0x00000004
+
+/* ISC Control Status Register 0 */
+#define ISC_CTRLSR      0x00000008
+
+#define ISC_CTRL_CAPTURE	BIT(0)
+#define ISC_CTRL_UPPRO		BIT(1)
+#define ISC_CTRL_HISREQ		BIT(2)
+#define ISC_CTRL_HISCLR		BIT(3)
+
+/* ISC Parallel Front End Configuration 0 Register */
+#define ISC_PFE_CFG0    0x0000000c
+
+#define ISC_PFE_CFG0_HPOL_LOW   BIT(0)
+#define ISC_PFE_CFG0_VPOL_LOW   BIT(1)
+#define ISC_PFE_CFG0_PPOL_LOW   BIT(2)
+
+#define ISC_PFE_CFG0_MODE_PROGRESSIVE   (0x0 << 4)
+#define ISC_PFE_CFG0_MODE_MASK          GENMASK(6, 4)
+
+#define ISC_PFE_CFG0_BPS_EIGHT  (0x4 << 28)
+#define ISC_PFG_CFG0_BPS_NINE   (0x3 << 28)
+#define ISC_PFG_CFG0_BPS_TEN    (0x2 << 28)
+#define ISC_PFG_CFG0_BPS_ELEVEN (0x1 << 28)
+#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
+#define ISC_PFE_CFG0_BPS_MASK   GENMASK(30, 28)
+
+/* ISC Clock Enable Register */
+#define ISC_CLKEN               0x00000018
+
+/* ISC Clock Disable Register */
+#define ISC_CLKDIS              0x0000001c
+
+/* ISC Clock Status Register */
+#define ISC_CLKSR               0x00000020
+
+#define ISC_CLK(n)		BIT(n)
+
+/* ISC Clock Configuration Register */
+#define ISC_CLKCFG              0x00000024
+#define ISC_CLKCFG_DIV_SHIFT(n) ((n)*16)
+#define ISC_CLKCFG_DIV_MASK(n)  GENMASK(((n)*16 + 7), (n)*16)
+#define ISC_CLKCFG_SEL_SHIFT(n) ((n)*16 + 8)
+#define ISC_CLKCFG_SEL_MASK(n)  GENMASK(((n)*17 + 8), ((n)*16 + 8))
+
+/* ISC Interrupt Enable Register */
+#define ISC_INTEN       0x00000028
+
+/* ISC Interrupt Disable Register */
+#define ISC_INTDIS      0x0000002c
+
+/* ISC Interrupt Mask Register */
+#define ISC_INTMASK     0x00000030
+
+/* ISC Interrupt Status Register */
+#define ISC_INTSR       0x00000034
+
+#define ISC_INT_DDONE		BIT(8)
+
+/* ISC White Balance Control Register */
+#define ISC_WB_CTRL     0x00000058
+
+/* ISC White Balance Configuration Register */
+#define ISC_WB_CFG      0x0000005c
+
+/* ISC Color Filter Array Control Register */
+#define ISC_CFA_CTRL    0x00000070
+
+/* ISC Color Filter Array Configuration Register */
+#define ISC_CFA_CFG     0x00000074
+
+#define ISC_BAY_CFG_GRGR	0x0
+#define ISC_BAY_CFG_RGRG	0x1
+#define ISC_BAY_CFG_GBGB	0x2
+#define ISC_BAY_CFG_BGBG	0x3
+#define ISC_BAY_CFG_MASK	GENMASK(1, 0)
+
+/* ISC Color Correction Control Register */
+#define ISC_CC_CTRL     0x00000078
+
+/* ISC Gamma Correction Control Register */
+#define ISC_GAM_CTRL    0x00000094
+
+/* Color Space Conversion Control Register */
+#define ISC_CSC_CTRL    0x00000398
+
+/* Contrast And Brightness Control Register */
+#define ISC_CBC_CTRL    0x000003b4
+
+/* Subsampling 4:4:4 to 4:2:2 Control Register */
+#define ISC_SUB422_CTRL 0x000003c4
+
+/* Subsampling 4:2:2 to 4:2:0 Control Register */
+#define ISC_SUB420_CTRL 0x000003cc
+
+/* Rounding, Limiting and Packing Configuration Register */
+#define ISC_RLP_CFG     0x000003d0
+
+#define ISC_RLP_CFG_MODE_DAT8           0x0
+#define ISC_RLP_CFG_MODE_DAT9           0x1
+#define ISC_RLP_CFG_MODE_DAT10          0x2
+#define ISC_RLP_CFG_MODE_DAT11          0x3
+#define ISC_RLP_CFG_MODE_DAT12          0x4
+#define ISC_RLP_CFG_MODE_DATY8          0x5
+#define ISC_RLP_CFG_MODE_DATY10         0x6
+#define ISC_RLP_CFG_MODE_ARGB444        0x7
+#define ISC_RLP_CFG_MODE_ARGB555        0x8
+#define ISC_RLP_CFG_MODE_RGB565         0x9
+#define ISC_RLP_CFG_MODE_ARGB32         0xa
+#define ISC_RLP_CFG_MODE_YYCC           0xb
+#define ISC_RLP_CFG_MODE_YYCC_LIMITED   0xc
+#define ISC_RLP_CFG_MODE_MASK           GENMASK(3, 0)
+
+/* DMA Configuration Register */
+#define ISC_DCFG        0x000003e0
+#define ISC_DCFG_IMODE_PACKED8          0x0
+#define ISC_DCFG_IMODE_PACKED16         0x1
+#define ISC_DCFG_IMODE_PACKED32         0x2
+#define ISC_DCFG_IMODE_YC422SP          0x3
+#define ISC_DCFG_IMODE_YC422P           0x4
+#define ISC_DCFG_IMODE_YC420SP          0x5
+#define ISC_DCFG_IMODE_YC420P           0x6
+#define ISC_DCFG_IMODE_MASK             GENMASK(2, 0)
+
+#define ISC_DCFG_YMBSIZE_SINGLE         (0x0 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS4         (0x1 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS8         (0x2 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS16        (0x3 << 4)
+#define ISC_DCFG_YMBSIZE_MASK           GENMASK(5, 4)
+
+#define ISC_DCFG_CMBSIZE_SINGLE         (0x0 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS4         (0x1 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS8         (0x2 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS16        (0x3 << 8)
+#define ISC_DCFG_CMBSIZE_MASK           GENMASK(9, 8)
+
+/* DMA Control Register */
+#define ISC_DCTRL       0x000003e4
+
+#define ISC_DCTRL_DVIEW_PACKED          (0x0 << 1)
+#define ISC_DCTRL_DVIEW_SEMIPLANAR      (0x1 << 1)
+#define ISC_DCTRL_DVIEW_PLANAR          (0x2 << 1)
+#define ISC_DCTRL_DVIEW_MASK            GENMASK(2, 1)
+
+#define ISC_DCTRL_IE_IS			(0x0 << 4)
+
+/* DMA Descriptor Address Register */
+#define ISC_DNDA        0x000003e8
+
+/* DMA Address 0 Register */
+#define ISC_DAD0        0x000003ec
+
+/* DMA Stride 0 Register */
+#define ISC_DST0        0x000003f0
+
+#endif
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
new file mode 100644
index 0000000..ccfe13b
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -0,0 +1,1520 @@
+/*
+ * Atmel Image Sensor Controller (ISC) driver
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Songjun Wu <songjun.wu@microchip.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ *  WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ *  CC: Programmable color correction
+ * GAM: Gamma correction
+ * CSC: Programmable color space conversion
+ * CBC: Contrast and Brightness control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ *      and packing of the incoming data
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+
+#define ATMEL_ISC_NAME		"atmel_isc"
+
+#define ISC_MAX_SUPPORT_WIDTH   2592
+#define ISC_MAX_SUPPORT_HEIGHT  1944
+
+#define ISC_CLK_MAX_DIV		255
+
+enum isc_clk_id {
+	ISC_ISPCK = 0,
+	ISC_MCK = 1,
+};
+
+struct isc_clk {
+	struct clk_hw   hw;
+	struct clk      *clk;
+	struct regmap   *regmap;
+	u8		id;
+	u8		parent_id;
+	u32		div;
+	struct device	*dev;
+};
+
+#define to_isc_clk(hw) container_of(hw, struct isc_clk, hw)
+
+struct isc_buffer {
+	struct vb2_v4l2_buffer  vb;
+	struct list_head	list;
+};
+
+struct isc_subdev_entity {
+	struct v4l2_subdev		*sd;
+	struct v4l2_async_subdev	*asd;
+	struct v4l2_async_notifier      notifier;
+	struct v4l2_subdev_pad_config	*config;
+
+	u32 pfe_cfg0;
+
+	struct list_head list;
+};
+
+/*
+ * struct isc_format - ISC media bus format information
+ * @fourcc:		Fourcc code for this format
+ * @mbus_code:		V4L2 media bus format code.
+ * @bpp:		Bytes per pixel (when stored in memory)
+ * @reg_bps:		reg value for bits per sample
+ *			(when transferred over a bus)
+ * @support:		Indicates format supported by subdev
+ */
+struct isc_format {
+	u32	fourcc;
+	u32	mbus_code;
+	u8	bpp;
+
+	u32	reg_bps;
+	u32	reg_rlp_mode;
+	u32	reg_dcfg_imode;
+	u32	reg_dctrl_dview;
+
+	bool	support;
+};
+
+#define ISC_PIPE_LINE_NODE_NUM	11
+
+struct isc_device {
+	struct regmap		*regmap;
+	struct clk		*hclock;
+	struct clk		*ispck;
+	struct isc_clk		isc_clks[2];
+
+	struct device		*dev;
+	struct v4l2_device	v4l2_dev;
+	struct video_device	video_dev;
+
+	struct vb2_queue	vb2_vidq;
+	spinlock_t		dma_queue_lock;
+	struct list_head	dma_queue;
+	struct isc_buffer	*cur_frm;
+	unsigned int		sequence;
+	bool			stop;
+	struct completion	comp;
+
+	struct v4l2_format	fmt;
+	struct isc_format	**user_formats;
+	unsigned int		num_user_formats;
+	const struct isc_format	*current_fmt;
+
+	struct mutex		lock;
+
+	struct regmap_field	*pipeline[ISC_PIPE_LINE_NODE_NUM];
+
+	struct isc_subdev_entity	*current_subdev;
+	struct list_head		subdev_entities;
+};
+
+static struct isc_format isc_formats[] = {
+	{ V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8,
+	  1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+	  ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8,
+	  1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+	  ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8,
+	  1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+	  ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8,
+	  1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+	  ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+
+	{ V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10,
+	  2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10,
+	  2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10,
+	  2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10,
+	  2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+
+	{ V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12,
+	  2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12,
+	  2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12,
+	  2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+	{ V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12,
+	  2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+	  ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+
+	{ V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8,
+	  2, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+	  ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+};
+
+static int isc_clk_enable(struct clk_hw *hw)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+	u32 id = isc_clk->id;
+	struct regmap *regmap = isc_clk->regmap;
+
+	dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
+		__func__, isc_clk->div, isc_clk->parent_id);
+
+	regmap_update_bits(regmap, ISC_CLKCFG,
+			   ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
+			   (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
+			   (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
+
+	regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+
+	return 0;
+}
+
+static void isc_clk_disable(struct clk_hw *hw)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+	u32 id = isc_clk->id;
+
+	regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+}
+
+static int isc_clk_is_enabled(struct clk_hw *hw)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+	u32 status;
+
+	regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+
+	return status & ISC_CLK(isc_clk->id) ? 1 : 0;
+}
+
+static unsigned long
+isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+
+	return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
+}
+
+static int isc_clk_determine_rate(struct clk_hw *hw,
+				   struct clk_rate_request *req)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+	long best_rate = -EINVAL;
+	int best_diff = -1;
+	unsigned int i, div;
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+		struct clk_hw *parent;
+		unsigned long parent_rate;
+
+		parent = clk_hw_get_parent_by_index(hw, i);
+		if (!parent)
+			continue;
+
+		parent_rate = clk_hw_get_rate(parent);
+		if (!parent_rate)
+			continue;
+
+		for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
+			unsigned long rate;
+			int diff;
+
+			rate = DIV_ROUND_CLOSEST(parent_rate, div);
+			diff = abs(req->rate - rate);
+
+			if (best_diff < 0 || best_diff > diff) {
+				best_rate = rate;
+				best_diff = diff;
+				req->best_parent_rate = parent_rate;
+				req->best_parent_hw = parent;
+			}
+
+			if (!best_diff || rate < req->rate)
+				break;
+		}
+
+		if (!best_diff)
+			break;
+	}
+
+	dev_dbg(isc_clk->dev,
+		"ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+		__func__, best_rate,
+		__clk_get_name((req->best_parent_hw)->clk),
+		req->best_parent_rate);
+
+	if (best_rate < 0)
+		return best_rate;
+
+	req->rate = best_rate;
+
+	return 0;
+}
+
+static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+
+	if (index >= clk_hw_get_num_parents(hw))
+		return -EINVAL;
+
+	isc_clk->parent_id = index;
+
+	return 0;
+}
+
+static u8 isc_clk_get_parent(struct clk_hw *hw)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+
+	return isc_clk->parent_id;
+}
+
+static int isc_clk_set_rate(struct clk_hw *hw,
+			     unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct isc_clk *isc_clk = to_isc_clk(hw);
+	u32 div;
+
+	if (!rate)
+		return -EINVAL;
+
+	div = DIV_ROUND_CLOSEST(parent_rate, rate);
+	if (div > (ISC_CLK_MAX_DIV + 1) || !div)
+		return -EINVAL;
+
+	isc_clk->div = div - 1;
+
+	return 0;
+}
+
+static const struct clk_ops isc_clk_ops = {
+	.enable		= isc_clk_enable,
+	.disable	= isc_clk_disable,
+	.is_enabled	= isc_clk_is_enabled,
+	.recalc_rate	= isc_clk_recalc_rate,
+	.determine_rate	= isc_clk_determine_rate,
+	.set_parent	= isc_clk_set_parent,
+	.get_parent	= isc_clk_get_parent,
+	.set_rate	= isc_clk_set_rate,
+};
+
+static int isc_clk_register(struct isc_device *isc, unsigned int id)
+{
+	struct regmap *regmap = isc->regmap;
+	struct device_node *np = isc->dev->of_node;
+	struct isc_clk *isc_clk;
+	struct clk_init_data init;
+	const char *clk_name = np->name;
+	const char *parent_names[3];
+	int num_parents;
+
+	num_parents = of_clk_get_parent_count(np);
+	if (num_parents < 1 || num_parents > 3)
+		return -EINVAL;
+
+	if (num_parents > 2 && id == ISC_ISPCK)
+		num_parents = 2;
+
+	of_clk_parent_fill(np, parent_names, num_parents);
+
+	if (id == ISC_MCK)
+		of_property_read_string(np, "clock-output-names", &clk_name);
+	else
+		clk_name = "isc-ispck";
+
+	init.parent_names	= parent_names;
+	init.num_parents	= num_parents;
+	init.name		= clk_name;
+	init.ops		= &isc_clk_ops;
+	init.flags		= CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+	isc_clk = &isc->isc_clks[id];
+	isc_clk->hw.init	= &init;
+	isc_clk->regmap		= regmap;
+	isc_clk->id		= id;
+	isc_clk->dev		= isc->dev;
+
+	isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
+	if (IS_ERR(isc_clk->clk)) {
+		dev_err(isc->dev, "%s: clock register fail\n", clk_name);
+		return PTR_ERR(isc_clk->clk);
+	} else if (id == ISC_MCK)
+		of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
+
+	return 0;
+}
+
+static int isc_clk_init(struct isc_device *isc)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
+		isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
+
+	for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+		ret = isc_clk_register(isc, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void isc_clk_cleanup(struct isc_device *isc)
+{
+	unsigned int i;
+
+	of_clk_del_provider(isc->dev->of_node);
+
+	for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+		struct isc_clk *isc_clk = &isc->isc_clks[i];
+
+		if (!IS_ERR(isc_clk->clk))
+			clk_unregister(isc_clk->clk);
+	}
+}
+
+static int isc_queue_setup(struct vb2_queue *vq,
+			    unsigned int *nbuffers, unsigned int *nplanes,
+			    unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct isc_device *isc = vb2_get_drv_priv(vq);
+	unsigned int size = isc->fmt.fmt.pix.sizeimage;
+
+	if (*nplanes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	*nplanes = 1;
+	sizes[0] = size;
+
+	return 0;
+}
+
+static int isc_buffer_prepare(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+	unsigned long size = isc->fmt.fmt.pix.sizeimage;
+
+	if (vb2_plane_size(vb, 0) < size) {
+		v4l2_err(&isc->v4l2_dev, "buffer too small (%lu < %lu)\n",
+			 vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, size);
+
+	vbuf->field = isc->fmt.fmt.pix.field;
+
+	return 0;
+}
+
+static inline void isc_start_dma(struct regmap *regmap,
+				  struct isc_buffer *frm, u32 dview)
+{
+	dma_addr_t addr;
+
+	addr = vb2_dma_contig_plane_dma_addr(&frm->vb.vb2_buf, 0);
+
+	regmap_write(regmap, ISC_DCTRL, dview | ISC_DCTRL_IE_IS);
+	regmap_write(regmap, ISC_DAD0, addr);
+	regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
+}
+
+static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
+{
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+		val = pipeline & BIT(i) ? 1 : 0;
+		regmap_field_write(isc->pipeline[i], val);
+	}
+}
+
+static int isc_configure(struct isc_device *isc)
+{
+	struct regmap *regmap = isc->regmap;
+	const struct isc_format *current_fmt = isc->current_fmt;
+	struct isc_subdev_entity *subdev = isc->current_subdev;
+	u32 val, mask;
+	int counter = 10;
+
+	val = current_fmt->reg_bps | subdev->pfe_cfg0 |
+	      ISC_PFE_CFG0_MODE_PROGRESSIVE;
+	mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
+	       ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
+	       ISC_PFE_CFG0_MODE_MASK;
+
+	regmap_update_bits(regmap, ISC_PFE_CFG0, mask, val);
+
+	regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
+			   current_fmt->reg_rlp_mode);
+
+	regmap_update_bits(regmap, ISC_DCFG, ISC_DCFG_IMODE_MASK,
+			   current_fmt->reg_dcfg_imode);
+
+	/* Disable the pipeline */
+	isc_set_pipeline(isc, 0x0);
+
+	/* Update profile */
+	regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
+
+	regmap_read(regmap, ISC_CTRLSR, &val);
+	while ((val & ISC_CTRL_UPPRO) && counter--) {
+		usleep_range(1000, 2000);
+		regmap_read(regmap, ISC_CTRLSR, &val);
+	}
+
+	if (counter < 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct isc_device *isc = vb2_get_drv_priv(vq);
+	struct regmap *regmap = isc->regmap;
+	struct isc_buffer *buf;
+	unsigned long flags;
+	int ret;
+	u32 val;
+
+	/* Enable stream on the sub device */
+	ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
+	if (ret && ret != -ENOIOCTLCMD) {
+		v4l2_err(&isc->v4l2_dev, "stream on failed in subdev\n");
+		goto err_start_stream;
+	}
+
+	pm_runtime_get_sync(isc->dev);
+
+	/* Disable all the interrupts */
+	regmap_write(isc->regmap, ISC_INTDIS, (u32)~0UL);
+
+	/* Clean the interrupt status register */
+	regmap_read(regmap, ISC_INTSR, &val);
+
+	ret = isc_configure(isc);
+	if (unlikely(ret))
+		goto err_configure;
+
+	/* Enable DMA interrupt */
+	regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE);
+
+	spin_lock_irqsave(&isc->dma_queue_lock, flags);
+
+	isc->sequence = 0;
+	isc->stop = false;
+	reinit_completion(&isc->comp);
+
+	isc->cur_frm = list_first_entry(&isc->dma_queue,
+					struct isc_buffer, list);
+	list_del(&isc->cur_frm->list);
+
+	isc_start_dma(regmap, isc->cur_frm, isc->current_fmt->reg_dctrl_dview);
+
+	spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+	return 0;
+
+err_configure:
+	pm_runtime_put_sync(isc->dev);
+
+	v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+
+err_start_stream:
+	spin_lock_irqsave(&isc->dma_queue_lock, flags);
+	list_for_each_entry(buf, &isc->dma_queue, list)
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+	INIT_LIST_HEAD(&isc->dma_queue);
+	spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+	return ret;
+}
+
+static void isc_stop_streaming(struct vb2_queue *vq)
+{
+	struct isc_device *isc = vb2_get_drv_priv(vq);
+	unsigned long flags;
+	struct isc_buffer *buf;
+	int ret;
+
+	isc->stop = true;
+
+	/* Wait until the end of the current frame */
+	if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ))
+		v4l2_err(&isc->v4l2_dev,
+			 "Timeout waiting for end of the capture\n");
+
+	/* Disable DMA interrupt */
+	regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE);
+
+	pm_runtime_put_sync(isc->dev);
+
+	/* Disable stream on the sub device */
+	ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+	if (ret && ret != -ENOIOCTLCMD)
+		v4l2_err(&isc->v4l2_dev, "stream off failed in subdev\n");
+
+	/* Release all active buffers */
+	spin_lock_irqsave(&isc->dma_queue_lock, flags);
+	if (unlikely(isc->cur_frm)) {
+		vb2_buffer_done(&isc->cur_frm->vb.vb2_buf,
+				VB2_BUF_STATE_ERROR);
+		isc->cur_frm = NULL;
+	}
+	list_for_each_entry(buf, &isc->dma_queue, list)
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+	INIT_LIST_HEAD(&isc->dma_queue);
+	spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static void isc_buffer_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb);
+	struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+	unsigned long flags;
+
+	spin_lock_irqsave(&isc->dma_queue_lock, flags);
+	list_add_tail(&buf->list, &isc->dma_queue);
+	spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static struct vb2_ops isc_vb2_ops = {
+	.queue_setup		= isc_queue_setup,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+	.buf_prepare		= isc_buffer_prepare,
+	.start_streaming	= isc_start_streaming,
+	.stop_streaming		= isc_stop_streaming,
+	.buf_queue		= isc_buffer_queue,
+};
+
+static int isc_querycap(struct file *file, void *priv,
+			 struct v4l2_capability *cap)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	strcpy(cap->driver, ATMEL_ISC_NAME);
+	strcpy(cap->card, "Atmel Image Sensor Controller");
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		 "platform:%s", isc->v4l2_dev.name);
+
+	return 0;
+}
+
+static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
+				 struct v4l2_fmtdesc *f)
+{
+	struct isc_device *isc = video_drvdata(file);
+	u32 index = f->index;
+
+	if (index >= isc->num_user_formats)
+		return -EINVAL;
+
+	f->pixelformat = isc->user_formats[index]->fourcc;
+
+	return 0;
+}
+
+static int isc_g_fmt_vid_cap(struct file *file, void *priv,
+			      struct v4l2_format *fmt)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	*fmt = isc->fmt;
+
+	return 0;
+}
+
+static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
+						 unsigned int fourcc)
+{
+	unsigned int num_formats = isc->num_user_formats;
+	struct isc_format *fmt;
+	unsigned int i;
+
+	for (i = 0; i < num_formats; i++) {
+		fmt = isc->user_formats[i];
+		if (fmt->fourcc == fourcc)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
+			struct isc_format **current_fmt)
+{
+	struct isc_format *isc_fmt;
+	struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_TRY,
+	};
+	int ret;
+
+	if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	isc_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
+	if (!isc_fmt) {
+		v4l2_warn(&isc->v4l2_dev, "Format 0x%x not found\n",
+			  pixfmt->pixelformat);
+		isc_fmt = isc->user_formats[isc->num_user_formats - 1];
+		pixfmt->pixelformat = isc_fmt->fourcc;
+	}
+
+	/* Limit to Atmel ISC hardware capabilities */
+	if (pixfmt->width > ISC_MAX_SUPPORT_WIDTH)
+		pixfmt->width = ISC_MAX_SUPPORT_WIDTH;
+	if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
+		pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
+
+	v4l2_fill_mbus_format(&format.format, pixfmt, isc_fmt->mbus_code);
+	ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
+			       isc->current_subdev->config, &format);
+	if (ret < 0)
+		return ret;
+
+	v4l2_fill_pix_format(pixfmt, &format.format);
+
+	pixfmt->field = V4L2_FIELD_NONE;
+	pixfmt->bytesperline = pixfmt->width * isc_fmt->bpp;
+	pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+	if (current_fmt)
+		*current_fmt = isc_fmt;
+
+	return 0;
+}
+
+static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
+{
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	struct isc_format *current_fmt;
+	int ret;
+
+	ret = isc_try_fmt(isc, f, &current_fmt);
+	if (ret)
+		return ret;
+
+	v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
+			      current_fmt->mbus_code);
+	ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+			       set_fmt, NULL, &format);
+	if (ret < 0)
+		return ret;
+
+	isc->fmt = *f;
+	isc->current_fmt = current_fmt;
+
+	return 0;
+}
+
+static int isc_s_fmt_vid_cap(struct file *file, void *priv,
+			      struct v4l2_format *f)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	if (vb2_is_streaming(&isc->vb2_vidq))
+		return -EBUSY;
+
+	return isc_set_fmt(isc, f);
+}
+
+static int isc_try_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	return isc_try_fmt(isc, f, NULL);
+}
+
+static int isc_enum_input(struct file *file, void *priv,
+			   struct v4l2_input *inp)
+{
+	if (inp->index != 0)
+		return -EINVAL;
+
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	inp->std = 0;
+	strcpy(inp->name, "Camera");
+
+	return 0;
+}
+
+static int isc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int isc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	if (i > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	return v4l2_subdev_call(isc->current_subdev->sd, video, g_parm, a);
+}
+
+static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+	struct isc_device *isc = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	return v4l2_subdev_call(isc->current_subdev->sd, video, s_parm, a);
+}
+
+static int isc_enum_framesizes(struct file *file, void *fh,
+			       struct v4l2_frmsizeenum *fsize)
+{
+	struct isc_device *isc = video_drvdata(file);
+	const struct isc_format *isc_fmt;
+	struct v4l2_subdev_frame_size_enum fse = {
+		.index = fsize->index,
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	int ret;
+
+	isc_fmt = find_format_by_fourcc(isc, fsize->pixel_format);
+	if (!isc_fmt)
+		return -EINVAL;
+
+	fse.code = isc_fmt->mbus_code;
+
+	ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+			       NULL, &fse);
+	if (ret)
+		return ret;
+
+	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+	fsize->discrete.width = fse.max_width;
+	fsize->discrete.height = fse.max_height;
+
+	return 0;
+}
+
+static int isc_enum_frameintervals(struct file *file, void *fh,
+				    struct v4l2_frmivalenum *fival)
+{
+	struct isc_device *isc = video_drvdata(file);
+	const struct isc_format *isc_fmt;
+	struct v4l2_subdev_frame_interval_enum fie = {
+		.index = fival->index,
+		.width = fival->width,
+		.height = fival->height,
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	int ret;
+
+	isc_fmt = find_format_by_fourcc(isc, fival->pixel_format);
+	if (!isc_fmt)
+		return -EINVAL;
+
+	fie.code = isc_fmt->mbus_code;
+
+	ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+			       enum_frame_interval, NULL, &fie);
+	if (ret)
+		return ret;
+
+	fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+	fival->discrete = fie.interval;
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops isc_ioctl_ops = {
+	.vidioc_querycap		= isc_querycap,
+	.vidioc_enum_fmt_vid_cap	= isc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap		= isc_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap		= isc_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap		= isc_try_fmt_vid_cap,
+
+	.vidioc_enum_input		= isc_enum_input,
+	.vidioc_g_input			= isc_g_input,
+	.vidioc_s_input			= isc_s_input,
+
+	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
+	.vidioc_querybuf		= vb2_ioctl_querybuf,
+	.vidioc_qbuf			= vb2_ioctl_qbuf,
+	.vidioc_expbuf			= vb2_ioctl_expbuf,
+	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
+	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
+	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
+	.vidioc_streamon		= vb2_ioctl_streamon,
+	.vidioc_streamoff		= vb2_ioctl_streamoff,
+
+	.vidioc_g_parm			= isc_g_parm,
+	.vidioc_s_parm			= isc_s_parm,
+	.vidioc_enum_framesizes		= isc_enum_framesizes,
+	.vidioc_enum_frameintervals	= isc_enum_frameintervals,
+};
+
+static int isc_open(struct file *file)
+{
+	struct isc_device *isc = video_drvdata(file);
+	struct v4l2_subdev *sd = isc->current_subdev->sd;
+	int ret;
+
+	if (mutex_lock_interruptible(&isc->lock))
+		return -ERESTARTSYS;
+
+	ret = v4l2_fh_open(file);
+	if (ret < 0)
+		goto unlock;
+
+	if (!v4l2_fh_is_singular_file(file))
+		goto unlock;
+
+	ret = v4l2_subdev_call(sd, core, s_power, 1);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		v4l2_fh_release(file);
+		goto unlock;
+	}
+
+	ret = isc_set_fmt(isc, &isc->fmt);
+	if (ret) {
+		v4l2_subdev_call(sd, core, s_power, 0);
+		v4l2_fh_release(file);
+	}
+
+unlock:
+	mutex_unlock(&isc->lock);
+	return ret;
+}
+
+static int isc_release(struct file *file)
+{
+	struct isc_device *isc = video_drvdata(file);
+	struct v4l2_subdev *sd = isc->current_subdev->sd;
+	bool fh_singular;
+	int ret;
+
+	mutex_lock(&isc->lock);
+
+	fh_singular = v4l2_fh_is_singular_file(file);
+
+	ret = _vb2_fop_release(file, NULL);
+
+	if (fh_singular)
+		v4l2_subdev_call(sd, core, s_power, 0);
+
+	mutex_unlock(&isc->lock);
+
+	return ret;
+}
+
+static const struct v4l2_file_operations isc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= isc_open,
+	.release	= isc_release,
+	.unlocked_ioctl	= video_ioctl2,
+	.read		= vb2_fop_read,
+	.mmap		= vb2_fop_mmap,
+	.poll		= vb2_fop_poll,
+};
+
+static irqreturn_t isc_interrupt(int irq, void *dev_id)
+{
+	struct isc_device *isc = (struct isc_device *)dev_id;
+	struct regmap *regmap = isc->regmap;
+	u32 isc_intsr, isc_intmask, pending;
+	irqreturn_t ret = IRQ_NONE;
+
+	spin_lock(&isc->dma_queue_lock);
+
+	regmap_read(regmap, ISC_INTSR, &isc_intsr);
+	regmap_read(regmap, ISC_INTMASK, &isc_intmask);
+
+	pending = isc_intsr & isc_intmask;
+
+	if (likely(pending & ISC_INT_DDONE)) {
+		if (isc->cur_frm) {
+			struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
+			struct vb2_buffer *vb = &vbuf->vb2_buf;
+
+			vb->timestamp = ktime_get_ns();
+			vbuf->sequence = isc->sequence++;
+			vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+			isc->cur_frm = NULL;
+		}
+
+		if (!list_empty(&isc->dma_queue) && !isc->stop) {
+			isc->cur_frm = list_first_entry(&isc->dma_queue,
+						     struct isc_buffer, list);
+			list_del(&isc->cur_frm->list);
+
+			isc_start_dma(regmap, isc->cur_frm,
+				      isc->current_fmt->reg_dctrl_dview);
+		}
+
+		if (isc->stop)
+			complete(&isc->comp);
+
+		ret = IRQ_HANDLED;
+	}
+
+	spin_unlock(&isc->dma_queue_lock);
+
+	return ret;
+}
+
+static int isc_async_bound(struct v4l2_async_notifier *notifier,
+			    struct v4l2_subdev *subdev,
+			    struct v4l2_async_subdev *asd)
+{
+	struct isc_device *isc = container_of(notifier->v4l2_dev,
+					      struct isc_device, v4l2_dev);
+	struct isc_subdev_entity *subdev_entity =
+		container_of(notifier, struct isc_subdev_entity, notifier);
+
+	if (video_is_registered(&isc->video_dev)) {
+		v4l2_err(&isc->v4l2_dev, "only supports one sub-device.\n");
+		return -EBUSY;
+	}
+
+	subdev_entity->sd = subdev;
+
+	return 0;
+}
+
+static void isc_async_unbind(struct v4l2_async_notifier *notifier,
+			      struct v4l2_subdev *subdev,
+			      struct v4l2_async_subdev *asd)
+{
+	struct isc_device *isc = container_of(notifier->v4l2_dev,
+					      struct isc_device, v4l2_dev);
+
+	video_unregister_device(&isc->video_dev);
+	if (isc->current_subdev->config)
+		v4l2_subdev_free_pad_config(isc->current_subdev->config);
+}
+
+static struct isc_format *find_format_by_code(unsigned int code, int *index)
+{
+	struct isc_format *fmt = &isc_formats[0];
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+		if (fmt->mbus_code == code) {
+			*index = i;
+			return fmt;
+		}
+
+		fmt++;
+	}
+
+	return NULL;
+}
+
+static int isc_formats_init(struct isc_device *isc)
+{
+	struct isc_format *fmt;
+	struct v4l2_subdev *subdev = isc->current_subdev->sd;
+	int num_fmts = 0, i, j;
+	struct v4l2_subdev_mbus_code_enum mbus_code = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+
+	fmt = &isc_formats[0];
+	for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+		fmt->support = false;
+		fmt++;
+	}
+
+	while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+	       NULL, &mbus_code)) {
+		mbus_code.index++;
+		fmt = find_format_by_code(mbus_code.code, &i);
+		if (!fmt)
+			continue;
+
+		fmt->support = true;
+		num_fmts++;
+	}
+
+	if (!num_fmts)
+		return -ENXIO;
+
+	isc->num_user_formats = num_fmts;
+	isc->user_formats = devm_kcalloc(isc->dev,
+					 num_fmts, sizeof(struct isc_format *),
+					 GFP_KERNEL);
+	if (!isc->user_formats) {
+		v4l2_err(&isc->v4l2_dev, "could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	fmt = &isc_formats[0];
+	for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
+		if (fmt->support)
+			isc->user_formats[j++] = fmt;
+
+		fmt++;
+	}
+
+	return 0;
+}
+
+static int isc_set_default_fmt(struct isc_device *isc)
+{
+	struct v4l2_format f = {
+		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+		.fmt.pix = {
+			.width		= VGA_WIDTH,
+			.height		= VGA_HEIGHT,
+			.field		= V4L2_FIELD_NONE,
+			.pixelformat	= isc->user_formats[0]->fourcc,
+		},
+	};
+	int ret;
+
+	ret = isc_try_fmt(isc, &f, NULL);
+	if (ret)
+		return ret;
+
+	isc->current_fmt = isc->user_formats[0];
+	isc->fmt = f;
+
+	return 0;
+}
+
+static int isc_async_complete(struct v4l2_async_notifier *notifier)
+{
+	struct isc_device *isc = container_of(notifier->v4l2_dev,
+					      struct isc_device, v4l2_dev);
+	struct isc_subdev_entity *sd_entity;
+	struct video_device *vdev = &isc->video_dev;
+	struct vb2_queue *q = &isc->vb2_vidq;
+	int ret;
+
+	isc->current_subdev = container_of(notifier,
+					   struct isc_subdev_entity, notifier);
+	sd_entity = isc->current_subdev;
+
+	mutex_init(&isc->lock);
+	init_completion(&isc->comp);
+
+	/* Initialize videobuf2 queue */
+	q->type			= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	q->io_modes		= VB2_MMAP | VB2_DMABUF | VB2_READ;
+	q->drv_priv		= isc;
+	q->buf_struct_size	= sizeof(struct isc_buffer);
+	q->ops			= &isc_vb2_ops;
+	q->mem_ops		= &vb2_dma_contig_memops;
+	q->timestamp_flags	= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	q->lock			= &isc->lock;
+	q->min_buffers_needed	= 1;
+	q->dev			= isc->dev;
+
+	ret = vb2_queue_init(q);
+	if (ret < 0) {
+		v4l2_err(&isc->v4l2_dev,
+			 "vb2_queue_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Init video dma queues */
+	INIT_LIST_HEAD(&isc->dma_queue);
+	spin_lock_init(&isc->dma_queue_lock);
+
+	sd_entity->config = v4l2_subdev_alloc_pad_config(sd_entity->sd);
+	if (sd_entity->config == NULL)
+		return -ENOMEM;
+
+	ret = isc_formats_init(isc);
+	if (ret < 0) {
+		v4l2_err(&isc->v4l2_dev,
+			 "Init format failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = isc_set_default_fmt(isc);
+	if (ret) {
+		v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
+		return ret;
+	}
+
+	/* Register video device */
+	strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
+	vdev->release		= video_device_release_empty;
+	vdev->fops		= &isc_fops;
+	vdev->ioctl_ops		= &isc_ioctl_ops;
+	vdev->v4l2_dev		= &isc->v4l2_dev;
+	vdev->vfl_dir		= VFL_DIR_RX;
+	vdev->queue		= q;
+	vdev->lock		= &isc->lock;
+	vdev->ctrl_handler	= isc->current_subdev->sd->ctrl_handler;
+	vdev->device_caps	= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+	video_set_drvdata(vdev, isc);
+
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret < 0) {
+		v4l2_err(&isc->v4l2_dev,
+			 "video_register_device failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void isc_subdev_cleanup(struct isc_device *isc)
+{
+	struct isc_subdev_entity *subdev_entity;
+
+	list_for_each_entry(subdev_entity, &isc->subdev_entities, list)
+		v4l2_async_notifier_unregister(&subdev_entity->notifier);
+
+	INIT_LIST_HEAD(&isc->subdev_entities);
+}
+
+static int isc_pipeline_init(struct isc_device *isc)
+{
+	struct device *dev = isc->dev;
+	struct regmap *regmap = isc->regmap;
+	struct regmap_field *regs;
+	unsigned int i;
+
+	/* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+	const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
+		REG_FIELD(ISC_WB_CTRL, 0, 0),
+		REG_FIELD(ISC_CFA_CTRL, 0, 0),
+		REG_FIELD(ISC_CC_CTRL, 0, 0),
+		REG_FIELD(ISC_GAM_CTRL, 0, 0),
+		REG_FIELD(ISC_GAM_CTRL, 1, 1),
+		REG_FIELD(ISC_GAM_CTRL, 2, 2),
+		REG_FIELD(ISC_GAM_CTRL, 3, 3),
+		REG_FIELD(ISC_CSC_CTRL, 0, 0),
+		REG_FIELD(ISC_CBC_CTRL, 0, 0),
+		REG_FIELD(ISC_SUB422_CTRL, 0, 0),
+		REG_FIELD(ISC_SUB420_CTRL, 0, 0),
+	};
+
+	for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+		regs = devm_regmap_field_alloc(dev, regmap, regfields[i]);
+		if (IS_ERR(regs))
+			return PTR_ERR(regs);
+
+		isc->pipeline[i] =  regs;
+	}
+
+	return 0;
+}
+
+static int isc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *epn = NULL, *rem;
+	struct v4l2_of_endpoint v4l2_epn;
+	struct isc_subdev_entity *subdev_entity;
+	unsigned int flags;
+	int ret;
+
+	INIT_LIST_HEAD(&isc->subdev_entities);
+
+	for (; ;) {
+		epn = of_graph_get_next_endpoint(np, epn);
+		if (!epn)
+			break;
+
+		rem = of_graph_get_remote_port_parent(epn);
+		if (!rem) {
+			dev_notice(dev, "Remote device at %s not found\n",
+				   of_node_full_name(epn));
+			continue;
+		}
+
+		ret = v4l2_of_parse_endpoint(epn, &v4l2_epn);
+		if (ret) {
+			of_node_put(rem);
+			ret = -EINVAL;
+			dev_err(dev, "Could not parse the endpoint\n");
+			break;
+		}
+
+		subdev_entity = devm_kzalloc(dev,
+					  sizeof(*subdev_entity), GFP_KERNEL);
+		if (subdev_entity == NULL) {
+			of_node_put(rem);
+			ret = -ENOMEM;
+			break;
+		}
+
+		subdev_entity->asd = devm_kzalloc(dev,
+				     sizeof(*subdev_entity->asd), GFP_KERNEL);
+		if (subdev_entity->asd == NULL) {
+			of_node_put(rem);
+			ret = -ENOMEM;
+			break;
+		}
+
+		flags = v4l2_epn.bus.parallel.flags;
+
+		if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+			subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+		if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+			subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+		if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+			subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+		subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_OF;
+		subdev_entity->asd->match.of.node = rem;
+		list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+	}
+
+	of_node_put(epn);
+	return ret;
+}
+
+/* regmap configuration */
+#define ATMEL_ISC_REG_MAX    0xbfc
+static const struct regmap_config isc_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.max_register	= ATMEL_ISC_REG_MAX,
+};
+
+static int atmel_isc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct isc_device *isc;
+	struct resource *res;
+	void __iomem *io_base;
+	struct isc_subdev_entity *subdev_entity;
+	int irq;
+	int ret;
+
+	isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+	if (!isc)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, isc);
+	isc->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	io_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
+
+	isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
+	if (IS_ERR(isc->regmap)) {
+		ret = PTR_ERR(isc->regmap);
+		dev_err(dev, "failed to init register map: %d\n", ret);
+		return ret;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		dev_err(dev, "failed to get irq: %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(dev, irq, isc_interrupt, 0,
+			       ATMEL_ISC_NAME, isc);
+	if (ret < 0) {
+		dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+			irq, ret);
+		return ret;
+	}
+
+	ret = isc_pipeline_init(isc);
+	if (ret)
+		return ret;
+
+	isc->hclock = devm_clk_get(dev, "hclock");
+	if (IS_ERR(isc->hclock)) {
+		ret = PTR_ERR(isc->hclock);
+		dev_err(dev, "failed to get hclock: %d\n", ret);
+		return ret;
+	}
+
+	ret = isc_clk_init(isc);
+	if (ret) {
+		dev_err(dev, "failed to init isc clock: %d\n", ret);
+		goto clean_isc_clk;
+	}
+
+	isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+	/* ispck should be greater or equal to hclock */
+	ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+	if (ret) {
+		dev_err(dev, "failed to set ispck rate: %d\n", ret);
+		goto clean_isc_clk;
+	}
+
+	ret = v4l2_device_register(dev, &isc->v4l2_dev);
+	if (ret) {
+		dev_err(dev, "unable to register v4l2 device.\n");
+		goto clean_isc_clk;
+	}
+
+	ret = isc_parse_dt(dev, isc);
+	if (ret) {
+		dev_err(dev, "fail to parse device tree\n");
+		goto unregister_v4l2_device;
+	}
+
+	if (list_empty(&isc->subdev_entities)) {
+		dev_err(dev, "no subdev found\n");
+		goto unregister_v4l2_device;
+	}
+
+	list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+		subdev_entity->notifier.subdevs = &subdev_entity->asd;
+		subdev_entity->notifier.num_subdevs = 1;
+		subdev_entity->notifier.bound = isc_async_bound;
+		subdev_entity->notifier.unbind = isc_async_unbind;
+		subdev_entity->notifier.complete = isc_async_complete;
+
+		ret = v4l2_async_notifier_register(&isc->v4l2_dev,
+						   &subdev_entity->notifier);
+		if (ret) {
+			dev_err(dev, "fail to register async notifier\n");
+			goto cleanup_subdev;
+		}
+
+		if (video_is_registered(&isc->video_dev))
+			break;
+	}
+
+	pm_runtime_enable(dev);
+
+	return 0;
+
+cleanup_subdev:
+	isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+	v4l2_device_unregister(&isc->v4l2_dev);
+
+clean_isc_clk:
+	isc_clk_cleanup(isc);
+
+	return ret;
+}
+
+static int atmel_isc_remove(struct platform_device *pdev)
+{
+	struct isc_device *isc = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+
+	isc_subdev_cleanup(isc);
+
+	v4l2_device_unregister(&isc->v4l2_dev);
+
+	isc_clk_cleanup(isc);
+
+	return 0;
+}
+
+static int __maybe_unused isc_runtime_suspend(struct device *dev)
+{
+	struct isc_device *isc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(isc->ispck);
+	clk_disable_unprepare(isc->hclock);
+
+	return 0;
+}
+
+static int __maybe_unused isc_runtime_resume(struct device *dev)
+{
+	struct isc_device *isc = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(isc->hclock);
+	if (ret)
+		return ret;
+
+	return clk_prepare_enable(isc->ispck);
+}
+
+static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
+	SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isc_of_match[] = {
+	{ .compatible = "atmel,sama5d2-isc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
+
+static struct platform_driver atmel_isc_driver = {
+	.probe	= atmel_isc_probe,
+	.remove	= atmel_isc_remove,
+	.driver	= {
+		.name		= ATMEL_ISC_NAME,
+		.pm		= &atmel_isc_dev_pm_ops,
+		.of_match_table = of_match_ptr(atmel_isc_of_match),
+	},
+};
+
+module_platform_driver(atmel_isc_driver);
+
+MODULE_AUTHOR("Songjun Wu <songjun.wu@microchip.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 0b1709e..a9bc017 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -440,7 +440,7 @@
 	/*
 	 * Application initially set the image format. Current display
 	 * size is obtained from the vpbe display controller. expected_xsize
-	 * and expected_ysize are set through S_CROP ioctl. Based on this,
+	 * and expected_ysize are set through S_SELECTION ioctl. Based on this,
 	 * driver will calculate the scale factors for vertical and
 	 * horizontal direction so that the image is displayed scaled
 	 * and expanded. Application uses expansion to display the image
@@ -649,24 +649,23 @@
 	return 0;
 }
 
-static int vpbe_display_s_crop(struct file *file, void *priv,
-			     const struct v4l2_crop *crop)
+static int vpbe_display_s_selection(struct file *file, void *priv,
+			     struct v4l2_selection *sel)
 {
 	struct vpbe_layer *layer = video_drvdata(file);
 	struct vpbe_display *disp_dev = layer->disp_dev;
 	struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
 	struct osd_layer_config *cfg = &layer->layer_info.config;
 	struct osd_state *osd_device = disp_dev->osd_device;
-	struct v4l2_rect rect = crop->c;
+	struct v4l2_rect rect = sel->r;
 	int ret;
 
 	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
-		"VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+		"VIDIOC_S_SELECTION, layer id = %d\n", layer->device_id);
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+	    sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
-	}
 
 	if (rect.top < 0)
 		rect.top = 0;
@@ -714,32 +713,45 @@
 	else
 		osd_device->ops.set_interpolation_filter(osd_device, 0);
 
+	sel->r = rect;
 	return 0;
 }
 
-static int vpbe_display_g_crop(struct file *file, void *priv,
-			     struct v4l2_crop *crop)
+static int vpbe_display_g_selection(struct file *file, void *priv,
+				    struct v4l2_selection *sel)
 {
 	struct vpbe_layer *layer = video_drvdata(file);
 	struct osd_layer_config *cfg = &layer->layer_info.config;
 	struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
 	struct osd_state *osd_device = layer->disp_dev->osd_device;
-	struct v4l2_rect *rect = &crop->c;
+	struct v4l2_rect *rect = &sel->r;
 
 	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
-			"VIDIOC_G_CROP, layer id = %d\n",
+			"VIDIOC_G_SELECTION, layer id = %d\n",
 			layer->device_id);
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-		v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		osd_device->ops.get_layer_config(osd_device,
+						 layer->layer_info.id, cfg);
+		rect->top = cfg->ypos;
+		rect->left = cfg->xpos;
+		rect->width = cfg->xsize;
+		rect->height = cfg->ysize;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		rect->left = 0;
+		rect->top = 0;
+		rect->width = vpbe_dev->current_timings.xres;
+		rect->height = vpbe_dev->current_timings.yres;
+		break;
+	default:
 		return -EINVAL;
 	}
-	osd_device->ops.get_layer_config(osd_device,
-				layer->layer_info.id, cfg);
-	rect->top = cfg->ypos;
-	rect->left = cfg->xpos;
-	rect->width = cfg->xsize;
-	rect->height = cfg->ysize;
 
 	return 0;
 }
@@ -752,13 +764,10 @@
 
 	v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
 
-	cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-	cropcap->bounds.left = 0;
-	cropcap->bounds.top = 0;
-	cropcap->bounds.width = vpbe_dev->current_timings.xres;
-	cropcap->bounds.height = vpbe_dev->current_timings.yres;
+	if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
 	cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
-	cropcap->defrect = cropcap->bounds;
 	return 0;
 }
 
@@ -1251,8 +1260,8 @@
 	.vidioc_expbuf		 = vb2_ioctl_expbuf,
 
 	.vidioc_cropcap		 = vpbe_display_cropcap,
-	.vidioc_g_crop		 = vpbe_display_g_crop,
-	.vidioc_s_crop		 = vpbe_display_s_crop,
+	.vidioc_g_selection	 = vpbe_display_g_selection,
+	.vidioc_s_selection	 = vpbe_display_s_selection,
 
 	.vidioc_s_std		 = vpbe_display_s_std,
 	.vidioc_g_std		 = vpbe_display_g_std,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7767e07..6efb2f1 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1610,38 +1610,53 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
 
-	if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
+	/* If std_index is invalid, then just return (== 1:1 aspect) */
+	if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+		return 0;
 
-	memset(crop, 0, sizeof(struct v4l2_cropcap));
-	crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	crop->bounds.width = crop->defrect.width =
-		vpfe_standards[vpfe_dev->std_index].width;
-	crop->bounds.height = crop->defrect.height =
-		vpfe_standards[vpfe_dev->std_index].height;
 	crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
 	return 0;
 }
 
-static int vpfe_g_crop(struct file *file, void *priv,
-			     struct v4l2_crop *crop)
+static int vpfe_g_selection(struct file *file, void *priv,
+			    struct v4l2_selection *sel)
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 
-	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_crop\n");
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_selection\n");
 
-	crop->c = vpfe_dev->crop;
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r = vpfe_dev->crop;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		sel->r.width = vpfe_standards[vpfe_dev->std_index].width;
+		sel->r.height = vpfe_standards[vpfe_dev->std_index].height;
+		break;
+	default:
+		return -EINVAL;
+	}
 	return 0;
 }
 
-static int vpfe_s_crop(struct file *file, void *priv,
-			     const struct v4l2_crop *crop)
+static int vpfe_s_selection(struct file *file, void *priv,
+			    struct v4l2_selection *sel)
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
-	struct v4l2_rect rect = crop->c;
+	struct v4l2_rect rect = sel->r;
 	int ret = 0;
 
-	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n");
+	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
+
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
 
 	if (vpfe_dev->started) {
 		/* make sure streaming is not started */
@@ -1669,7 +1684,7 @@
 		vpfe_dev->std_info.active_pixels) ||
 	    (rect.top + rect.height >
 		vpfe_dev->std_info.active_lines)) {
-		v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n");
+		v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_SELECTION params\n");
 		ret = -EINVAL;
 		goto unlock_out;
 	}
@@ -1682,6 +1697,7 @@
 		vpfe_dev->fmt.fmt.pix.bytesperline *
 		vpfe_dev->fmt.fmt.pix.height;
 	vpfe_dev->crop = rect;
+	sel->r = rect;
 unlock_out:
 	mutex_unlock(&vpfe_dev->lock);
 	return ret;
@@ -1760,8 +1776,8 @@
 	.vidioc_streamon	 = vpfe_streamon,
 	.vidioc_streamoff	 = vpfe_streamoff,
 	.vidioc_cropcap		 = vpfe_cropcap,
-	.vidioc_g_crop		 = vpfe_g_crop,
-	.vidioc_s_crop		 = vpfe_s_crop,
+	.vidioc_g_selection	 = vpfe_g_selection,
+	.vidioc_s_selection	 = vpfe_s_selection,
 	.vidioc_default		 = vpfe_param_handler,
 };
 
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index ec6494c..9f03b79 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -261,7 +261,7 @@
 		v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
-static struct vb2_ops gsc_m2m_qops = {
+static const struct vb2_ops gsc_m2m_qops = {
 	.queue_setup	 = gsc_m2m_queue_setup,
 	.buf_prepare	 = gsc_m2m_buf_prepare,
 	.buf_queue	 = gsc_m2m_buf_queue,
@@ -277,9 +277,10 @@
 	struct gsc_ctx *ctx = fh_to_ctx(fh);
 	struct gsc_dev *gsc = ctx->gsc_dev;
 
-	strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
-	strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
-	strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+	strlcpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+		 dev_name(&gsc->pdev->dev));
 	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
 		V4L2_CAP_VIDEO_CAPTURE_MPLANE |	V4L2_CAP_VIDEO_OUTPUT_MPLANE;
 
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index fdec499..964f4a6 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -452,7 +452,7 @@
 	spin_unlock_irqrestore(&fimc->slock, flags);
 }
 
-static struct vb2_ops fimc_capture_qops = {
+static const struct vb2_ops fimc_capture_qops = {
 	.queue_setup		= queue_setup,
 	.buf_prepare		= buffer_prepare,
 	.buf_queue		= buffer_queue,
@@ -1796,6 +1796,7 @@
 	vid_cap->wb_fmt.code = fmt->mbus_code;
 
 	vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+	vfd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
 	ret = media_entity_pads_init(&vfd->entity, 1, &vid_cap->vd_pad);
 	if (ret)
 		goto err_free_ctx;
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 7521aa5..6bba4ca 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -55,26 +55,33 @@
 	i2c_adap->algo = &fimc_is_i2c_algorithm;
 	i2c_adap->class = I2C_CLASS_SPD;
 
-	ret = i2c_add_adapter(i2c_adap);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to add I2C bus %s\n",
-						node->full_name);
-		return ret;
-	}
-
 	platform_set_drvdata(pdev, isp_i2c);
-
 	pm_runtime_enable(&pdev->dev);
-	pm_runtime_enable(&i2c_adap->dev);
 
+	ret = i2c_add_adapter(i2c_adap);
+	if (ret < 0)
+		goto err_pm_dis;
+	/*
+	 * Client drivers of this adapter don't do any I2C transfers as that
+	 * is handled by the ISP firmware.  But we rely on the runtime PM
+	 * state propagation from the clients up to the adapter driver so
+	 * clear the ignore_children flags here.  PM rutnime calls are not
+	 * used in probe() handler of clients of this adapter so there is
+	 * no issues with clearing the flag right after registering the I2C
+	 * adapter.
+	 */
+	pm_suspend_ignore_children(&i2c_adap->dev, false);
 	return 0;
+
+err_pm_dis:
+	pm_runtime_disable(&pdev->dev);
+	return ret;
 }
 
 static int fimc_is_i2c_remove(struct platform_device *pdev)
 {
 	struct fimc_is_i2c *isp_i2c = platform_get_drvdata(pdev);
 
-	pm_runtime_disable(&isp_i2c->adapter.dev);
 	pm_runtime_disable(&pdev->dev);
 	i2c_del_adapter(&isp_i2c->adapter);
 
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 32ca55f..518ad34 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -52,6 +52,9 @@
 	[ISS_CLK_DRC]			= "drc",
 	[ISS_CLK_FD]			= "fd",
 	[ISS_CLK_MCUISP]		= "mcuisp",
+	[ISS_CLK_GICISP]		= "gicisp",
+	[ISS_CLK_PWM_ISP]		= "pwm_isp",
+	[ISS_CLK_MCUCTL_ISP]		= "mcuctl_isp",
 	[ISS_CLK_UART]			= "uart",
 	[ISS_CLK_ISP_DIV0]		= "ispdiv0",
 	[ISS_CLK_ISP_DIV1]		= "ispdiv1",
@@ -165,6 +168,7 @@
 						struct device_node *node)
 {
 	struct fimc_is_sensor *sensor = &is->sensor[index];
+	struct device_node *ep, *port;
 	u32 tmp = 0;
 	int ret;
 
@@ -175,22 +179,25 @@
 		return -EINVAL;
 	}
 
-	node = of_graph_get_next_endpoint(node, NULL);
-	if (!node)
+	ep = of_graph_get_next_endpoint(node, NULL);
+	if (!ep)
 		return -ENXIO;
 
-	node = of_graph_get_remote_port(node);
-	if (!node)
+	port = of_graph_get_remote_port(ep);
+	of_node_put(ep);
+	if (!port)
 		return -ENXIO;
 
 	/* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
-	ret = of_property_read_u32(node, "reg", &tmp);
+	ret = of_property_read_u32(port, "reg", &tmp);
 	if (ret < 0) {
 		dev_err(&is->pdev->dev, "reg property not found at: %s\n",
-							 node->full_name);
+							 port->full_name);
+		of_node_put(port);
 		return ret;
 	}
 
+	of_node_put(port);
 	sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
 	return 0;
 }
@@ -845,13 +852,18 @@
 		goto err_pm;
 
 	vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+	if (ret < 0)
+		goto err_pm;
+
 	/*
 	 * Register FIMC-IS V4L2 subdevs to this driver. The video nodes
 	 * will be created within the subdev's registered() callback.
 	 */
 	ret = fimc_is_register_subdevs(is);
 	if (ret < 0)
-		goto err_pm;
+		goto err_of_dep;
 
 	ret = fimc_is_debugfs_create(is);
 	if (ret < 0)
@@ -870,6 +882,8 @@
 	fimc_is_debugfs_remove(is);
 err_sd:
 	fimc_is_unregister_subdevs(is);
+err_of_dep:
+	of_platform_depopulate(dev);
 err_pm:
 	if (!pm_runtime_enabled(dev))
 		fimc_is_runtime_suspend(dev);
@@ -929,6 +943,7 @@
 	if (!pm_runtime_status_suspended(dev))
 		fimc_is_runtime_suspend(dev);
 	free_irq(is->irq, is);
+	of_platform_depopulate(dev);
 	fimc_is_unregister_subdevs(is);
 	vb2_dma_contig_clear_max_seg_size(dev);
 	fimc_is_put_clocks(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index 3a82c6a..ee05da0 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -77,6 +77,9 @@
 	ISS_CLK_DRC,
 	ISS_CLK_FD,
 	ISS_CLK_MCUISP,
+	ISS_CLK_GICISP,
+	ISS_CLK_PWM_ISP,
+	ISS_CLK_MCUCTL_ISP,
 	ISS_CLK_UART,
 	ISS_GATE_CLKS_MAX,
 	ISS_CLK_ISP_DIV0 = ISS_GATE_CLKS_MAX,
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 293b807..8efe916 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -705,6 +705,7 @@
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 	snprintf(sd->name, sizeof(sd->name), "FIMC-IS-ISP");
 
+	sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
 	isp->subdev_pads[FIMC_ISP_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
 	isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_FIFO].flags = MEDIA_PAD_FL_SOURCE;
 	isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_DMA].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index a0f149f..b91abf1 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1432,6 +1432,7 @@
 
 	sd->ctrl_handler = handler;
 	sd->internal_ops = &fimc_lite_subdev_internal_ops;
+	sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
 	sd->entity.ops = &fimc_lite_subdev_media_ops;
 	sd->owner = THIS_MODULE;
 	v4l2_set_subdevdata(sd, fimc);
@@ -1454,25 +1455,17 @@
 	if (IS_ERR(fimc->clock))
 		return;
 
-	clk_unprepare(fimc->clock);
 	clk_put(fimc->clock);
 	fimc->clock = ERR_PTR(-EINVAL);
 }
 
 static int fimc_lite_clk_get(struct fimc_lite *fimc)
 {
-	int ret;
-
 	fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
 	if (IS_ERR(fimc->clock))
 		return PTR_ERR(fimc->clock);
 
-	ret = clk_prepare(fimc->clock);
-	if (ret < 0) {
-		clk_put(fimc->clock);
-		fimc->clock = ERR_PTR(-EINVAL);
-	}
-	return ret;
+	return 0;
 }
 
 static const struct of_device_id flite_of_match[];
@@ -1543,7 +1536,7 @@
 	pm_runtime_enable(dev);
 
 	if (!pm_runtime_enabled(dev)) {
-		ret = clk_enable(fimc->clock);
+		ret = clk_prepare_enable(fimc->clock);
 		if (ret < 0)
 			goto err_sd;
 	}
@@ -1568,7 +1561,7 @@
 {
 	struct fimc_lite *fimc = dev_get_drvdata(dev);
 
-	clk_enable(fimc->clock);
+	clk_prepare_enable(fimc->clock);
 	return 0;
 }
 
@@ -1576,7 +1569,7 @@
 {
 	struct fimc_lite *fimc = dev_get_drvdata(dev);
 
-	clk_disable(fimc->clock);
+	clk_disable_unprepare(fimc->clock);
 	return 0;
 }
 #endif
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index b1309e1..6028e4f 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -219,7 +219,7 @@
 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
-static struct vb2_ops fimc_qops = {
+static const struct vb2_ops fimc_qops = {
 	.queue_setup	 = fimc_queue_setup,
 	.buf_prepare	 = fimc_buf_prepare,
 	.buf_queue	 = fimc_buf_queue,
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 891625e..1a1154a 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1190,6 +1190,10 @@
 	return ret ? -EPIPE : 0;
 }
 
+static const struct media_device_ops fimc_md_ops = {
+	.link_notify = fimc_md_link_notify,
+};
+
 static ssize_t fimc_md_sysfs_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
@@ -1416,7 +1420,7 @@
 
 	strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
 		sizeof(fmd->media_dev.model));
-	fmd->media_dev.link_notify = fimc_md_link_notify;
+	fmd->media_dev.ops = &fimc_md_ops;
 	fmd->media_dev.dev = dev;
 
 	v4l2_dev = &fmd->v4l2_dev;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 86e681d..befd9fc 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -853,6 +853,7 @@
 	state->format.width = S5PCSIS_DEF_PIX_WIDTH;
 	state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
 
+	state->sd.entity.function = MEDIA_ENT_F_IO_V4L;
 	state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
 	state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
 	ret = media_entity_pads_init(&state->sd.entity,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 0fcb5c78..bedc7cc 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -852,7 +852,7 @@
 	v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
-static struct vb2_ops deinterlace_qops = {
+static const struct vb2_ops deinterlace_qops = {
 	.queue_setup	 = deinterlace_queue_setup,
 	.buf_prepare	 = deinterlace_buf_prepare,
 	.buf_queue	 = deinterlace_buf_queue,
@@ -1016,7 +1016,7 @@
 		return -ENODEV;
 
 	if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
-		v4l2_err(&pcdev->v4l2_dev, "DMA does not support INTERLEAVE\n");
+		dev_err(&pdev->dev, "DMA does not support INTERLEAVE\n");
 		goto rel_dma;
 	}
 
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 3a8e695..c8eaa41 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -239,7 +239,7 @@
 	enum mtk_encode_param param_change;
 	struct mtk_enc_params enc_params;
 
-	struct venc_common_if *enc_if;
+	const struct venc_common_if *enc_if;
 	unsigned long drv_handle;
 
 	int int_cond;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 2c5719a..1b1a28a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -243,6 +243,8 @@
 			a->parm.output.timeperframe.numerator;
 	ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE;
 
+	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+
 	return 0;
 }
 
@@ -254,6 +256,7 @@
 	if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
 		return -EINVAL;
 
+	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
 	a->parm.output.timeperframe.denominator =
 			ctx->enc_params.framerate_num;
 	a->parm.output.timeperframe.numerator =
@@ -621,6 +624,69 @@
 	return vidioc_try_fmt(f, fmt);
 }
 
+static int vidioc_venc_g_selection(struct file *file, void *priv,
+				     struct v4l2_selection *s)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct mtk_q_data *q_data;
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	q_data = mtk_venc_get_q_data(ctx, s->type);
+	if (!q_data)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		s->r.top = 0;
+		s->r.left = 0;
+		s->r.width = q_data->coded_width;
+		s->r.height = q_data->coded_height;
+		break;
+	case V4L2_SEL_TGT_CROP:
+		s->r.top = 0;
+		s->r.left = 0;
+		s->r.width = q_data->visible_width;
+		s->r.height = q_data->visible_height;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_venc_s_selection(struct file *file, void *priv,
+				     struct v4l2_selection *s)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct mtk_q_data *q_data;
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	q_data = mtk_venc_get_q_data(ctx, s->type);
+	if (!q_data)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+		/* Only support crop from (0,0) */
+		s->r.top = 0;
+		s->r.left = 0;
+		s->r.width = min(s->r.width, q_data->coded_width);
+		s->r.height = min(s->r.height, q_data->coded_height);
+		q_data->visible_width = s->r.width;
+		q_data->visible_height = s->r.height;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int vidioc_venc_qbuf(struct file *file, void *priv,
 			    struct v4l2_buffer *buf)
 {
@@ -679,6 +745,9 @@
 
 	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
 	.vidioc_prepare_buf		= v4l2_m2m_ioctl_prepare_buf,
+
+	.vidioc_g_selection		= vidioc_venc_g_selection,
+	.vidioc_s_selection		= vidioc_venc_s_selection,
 };
 
 static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
@@ -854,7 +923,7 @@
 	ctx->state = MTK_STATE_FREE;
 }
 
-static struct vb2_ops mtk_venc_vb2_ops = {
+static const struct vb2_ops mtk_venc_vb2_ops = {
 	.queue_setup		= vb2ops_venc_queue_setup,
 	.buf_prepare		= vb2ops_venc_buf_prepare,
 	.buf_queue		= vb2ops_venc_buf_queue,
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 63d4be4..b76c80b 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -664,16 +664,16 @@
 	return ret;
 }
 
-static struct venc_common_if venc_h264_if = {
+static const struct venc_common_if venc_h264_if = {
 	h264_enc_init,
 	h264_enc_encode,
 	h264_enc_set_param,
 	h264_enc_deinit,
 };
 
-struct venc_common_if *get_h264_enc_comm_if(void);
+const struct venc_common_if *get_h264_enc_comm_if(void);
 
-struct venc_common_if *get_h264_enc_comm_if(void)
+const struct venc_common_if *get_h264_enc_comm_if(void)
 {
 	return &venc_h264_if;
 }
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 6d97584..544f571 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -469,16 +469,16 @@
 	return ret;
 }
 
-static struct venc_common_if venc_vp8_if = {
+static const struct venc_common_if venc_vp8_if = {
 	vp8_enc_init,
 	vp8_enc_encode,
 	vp8_enc_set_param,
 	vp8_enc_deinit,
 };
 
-struct venc_common_if *get_vp8_enc_comm_if(void);
+const struct venc_common_if *get_vp8_enc_comm_if(void);
 
-struct venc_common_if *get_vp8_enc_comm_if(void)
+const struct venc_common_if *get_vp8_enc_comm_if(void)
 {
 	return &venc_vp8_if;
 }
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
index c4c83e7..d02d5f1 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -26,8 +26,8 @@
 #include "mtk_vcodec_enc_pm.h"
 #include "mtk_vpu.h"
 
-struct venc_common_if *get_h264_enc_comm_if(void);
-struct venc_common_if *get_vp8_enc_comm_if(void);
+const struct venc_common_if *get_h264_enc_comm_if(void);
+const struct venc_common_if *get_vp8_enc_comm_if(void);
 
 int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
 {
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index c639406..e68d271 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -743,7 +743,7 @@
 	v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
 }
 
-static struct vb2_ops emmaprp_qops = {
+static const struct vb2_ops emmaprp_qops = {
 	.queue_setup	 = emmaprp_queue_setup,
 	.buf_prepare	 = emmaprp_buf_prepare,
 	.buf_queue	 = emmaprp_buf_queue,
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6b01e12..a31b95c 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@
 	if (!vec)
 		return -ENOMEM;
 
-	ret = get_vaddr_frames(virtp, 1, true, false, vec);
+	ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
 	if (ret != 1) {
 		frame_vector_destroy(vec);
 		return -EINVAL;
@@ -1247,36 +1247,33 @@
 	return 0;
 }
 
-static int vidioc_cropcap(struct file *file, void *fh,
-		struct v4l2_cropcap *cropcap)
+static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
 {
 	struct omap_vout_device *vout = fh;
 	struct v4l2_pix_format *pix = &vout->pix;
 
-	if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
 		return -EINVAL;
 
-	/* Width and height are always even */
-	cropcap->bounds.width = pix->width & ~1;
-	cropcap->bounds.height = pix->height & ~1;
-
-	omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
-	cropcap->pixelaspect.numerator = 1;
-	cropcap->pixelaspect.denominator = 1;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		sel->r = vout->crop;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		omap_vout_default_crop(&vout->pix, &vout->fbuf, &sel->r);
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		/* Width and height are always even */
+		sel->r.width = pix->width & ~1;
+		sel->r.height = pix->height & ~1;
+		break;
+	default:
+		return -EINVAL;
+	}
 	return 0;
 }
 
-static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
-{
-	struct omap_vout_device *vout = fh;
-
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
-		return -EINVAL;
-	crop->c = vout->crop;
-	return 0;
-}
-
-static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
 {
 	int ret = -EINVAL;
 	struct omap_vout_device *vout = fh;
@@ -1285,6 +1282,12 @@
 	struct omap_video_timings *timing;
 	struct omap_dss_device *dssdev;
 
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_CROP)
+		return -EINVAL;
+
 	if (vout->streaming)
 		return -EBUSY;
 
@@ -1309,9 +1312,8 @@
 		vout->fbuf.fmt.width = timing->x_res;
 	}
 
-	if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
-		ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
-				&vout->fbuf, &crop->c);
+	ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+				 &vout->fbuf, &sel->r);
 
 s_crop_err:
 	mutex_unlock(&vout->lock);
@@ -1780,9 +1782,8 @@
 	.vidioc_try_fmt_vid_out_overlay		= vidioc_try_fmt_vid_overlay,
 	.vidioc_s_fmt_vid_out_overlay		= vidioc_s_fmt_vid_overlay,
 	.vidioc_g_fmt_vid_out_overlay		= vidioc_g_fmt_vid_overlay,
-	.vidioc_cropcap				= vidioc_cropcap,
-	.vidioc_g_crop				= vidioc_g_crop,
-	.vidioc_s_crop				= vidioc_s_crop,
+	.vidioc_g_selection			= vidioc_g_selection,
+	.vidioc_s_selection			= vidioc_s_selection,
 	.vidioc_reqbufs				= vidioc_reqbufs,
 	.vidioc_querybuf			= vidioc_querybuf,
 	.vidioc_qbuf				= vidioc_qbuf,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 5d54e2c..0321d84 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -657,6 +657,10 @@
 	return IRQ_HANDLED;
 }
 
+static const struct media_device_ops isp_media_ops = {
+	.link_notify = v4l2_pipeline_link_notify,
+};
+
 /* -----------------------------------------------------------------------------
  * Pipeline stream management
  */
@@ -1680,7 +1684,7 @@
 	strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
 		sizeof(isp->media_dev.model));
 	isp->media_dev.hw_revision = isp->revision;
-	isp->media_dev.link_notify = v4l2_pipeline_link_notify;
+	isp->media_dev.ops = &isp_media_ops;
 	media_device_init(&isp->media_dev);
 
 	isp->v4l2_dev.mdev = &isp->media_dev;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 7d9f359..7354469 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -772,40 +772,45 @@
 }
 
 static int
-isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
-{
-	struct isp_video *video = video_drvdata(file);
-	struct v4l2_subdev *subdev;
-	int ret;
-
-	subdev = isp_video_remote_subdev(video, NULL);
-	if (subdev == NULL)
-		return -EINVAL;
-
-	mutex_lock(&video->mutex);
-	ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
-	mutex_unlock(&video->mutex);
-
-	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
-isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
 {
 	struct isp_video *video = video_drvdata(file);
 	struct v4l2_subdev_format format;
 	struct v4l2_subdev *subdev;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+	};
 	u32 pad;
 	int ret;
 
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
 	subdev = isp_video_remote_subdev(video, &pad);
 	if (subdev == NULL)
 		return -EINVAL;
 
-	/* Try the get crop operation first and fallback to get format if not
+	/* Try the get selection operation first and fallback to get format if not
 	 * implemented.
 	 */
-	ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+	sdsel.pad = pad;
+	ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+	if (!ret)
+		sel->r = sdsel.r;
 	if (ret != -ENOIOCTLCMD)
 		return ret;
 
@@ -815,28 +820,50 @@
 	if (ret < 0)
 		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
 
-	crop->c.left = 0;
-	crop->c.top = 0;
-	crop->c.width = format.format.width;
-	crop->c.height = format.format.height;
+	sel->r.left = 0;
+	sel->r.top = 0;
+	sel->r.width = format.format.width;
+	sel->r.height = format.format.height;
 
 	return 0;
 }
 
 static int
-isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
 {
 	struct isp_video *video = video_drvdata(file);
 	struct v4l2_subdev *subdev;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+		.flags = sel->flags,
+		.r = sel->r,
+	};
+	u32 pad;
 	int ret;
 
-	subdev = isp_video_remote_subdev(video, NULL);
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	subdev = isp_video_remote_subdev(video, &pad);
 	if (subdev == NULL)
 		return -EINVAL;
 
+	sdsel.pad = pad;
 	mutex_lock(&video->mutex);
-	ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+	ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
 	mutex_unlock(&video->mutex);
+	if (!ret)
+		sel->r = sdsel.r;
 
 	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
 }
@@ -1252,9 +1279,8 @@
 	.vidioc_g_fmt_vid_out		= isp_video_get_format,
 	.vidioc_s_fmt_vid_out		= isp_video_set_format,
 	.vidioc_try_fmt_vid_out		= isp_video_try_format,
-	.vidioc_cropcap			= isp_video_cropcap,
-	.vidioc_g_crop			= isp_video_get_crop,
-	.vidioc_s_crop			= isp_video_set_crop,
+	.vidioc_g_selection		= isp_video_get_selection,
+	.vidioc_s_selection		= isp_video_set_selection,
 	.vidioc_g_parm			= isp_video_get_param,
 	.vidioc_s_parm			= isp_video_set_param,
 	.vidioc_reqbufs			= isp_video_reqbufs,
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
new file mode 100644
index 0000000..c12209c
--- /dev/null
+++ b/drivers/media/platform/pxa_camera.c
@@ -0,0 +1,2533 @@
+/*
+ * V4L2 Driver for PXA camera host
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ * Copyright (C) 2016, Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-clk.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-of.h>
+
+#include <media/videobuf2-dma-sg.h>
+
+#include <linux/videodev2.h>
+
+#include <linux/platform_data/media/camera-pxa.h>
+
+#define PXA_CAM_VERSION "0.0.6"
+#define PXA_CAM_DRV_NAME "pxa27x-camera"
+
+#define DEFAULT_WIDTH	640
+#define DEFAULT_HEIGHT	480
+
+/* Camera Interface */
+#define CICR0		0x0000
+#define CICR1		0x0004
+#define CICR2		0x0008
+#define CICR3		0x000C
+#define CICR4		0x0010
+#define CISR		0x0014
+#define CIFR		0x0018
+#define CITOR		0x001C
+#define CIBR0		0x0028
+#define CIBR1		0x0030
+#define CIBR2		0x0038
+
+#define CICR0_DMAEN	(1 << 31)	/* DMA request enable */
+#define CICR0_PAR_EN	(1 << 30)	/* Parity enable */
+#define CICR0_SL_CAP_EN	(1 << 29)	/* Capture enable for slave mode */
+#define CICR0_ENB	(1 << 28)	/* Camera interface enable */
+#define CICR0_DIS	(1 << 27)	/* Camera interface disable */
+#define CICR0_SIM	(0x7 << 24)	/* Sensor interface mode mask */
+#define CICR0_TOM	(1 << 9)	/* Time-out mask */
+#define CICR0_RDAVM	(1 << 8)	/* Receive-data-available mask */
+#define CICR0_FEM	(1 << 7)	/* FIFO-empty mask */
+#define CICR0_EOLM	(1 << 6)	/* End-of-line mask */
+#define CICR0_PERRM	(1 << 5)	/* Parity-error mask */
+#define CICR0_QDM	(1 << 4)	/* Quick-disable mask */
+#define CICR0_CDM	(1 << 3)	/* Disable-done mask */
+#define CICR0_SOFM	(1 << 2)	/* Start-of-frame mask */
+#define CICR0_EOFM	(1 << 1)	/* End-of-frame mask */
+#define CICR0_FOM	(1 << 0)	/* FIFO-overrun mask */
+
+#define CICR1_TBIT	(1 << 31)	/* Transparency bit */
+#define CICR1_RGBT_CONV	(0x3 << 29)	/* RGBT conversion mask */
+#define CICR1_PPL	(0x7ff << 15)	/* Pixels per line mask */
+#define CICR1_RGB_CONV	(0x7 << 12)	/* RGB conversion mask */
+#define CICR1_RGB_F	(1 << 11)	/* RGB format */
+#define CICR1_YCBCR_F	(1 << 10)	/* YCbCr format */
+#define CICR1_RGB_BPP	(0x7 << 7)	/* RGB bis per pixel mask */
+#define CICR1_RAW_BPP	(0x3 << 5)	/* Raw bis per pixel mask */
+#define CICR1_COLOR_SP	(0x3 << 3)	/* Color space mask */
+#define CICR1_DW	(0x7 << 0)	/* Data width mask */
+
+#define CICR2_BLW	(0xff << 24)	/* Beginning-of-line pixel clock
+					   wait count mask */
+#define CICR2_ELW	(0xff << 16)	/* End-of-line pixel clock
+					   wait count mask */
+#define CICR2_HSW	(0x3f << 10)	/* Horizontal sync pulse width mask */
+#define CICR2_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
+					   wait count mask */
+#define CICR2_FSW	(0x7 << 0)	/* Frame stabilization
+					   wait count mask */
+
+#define CICR3_BFW	(0xff << 24)	/* Beginning-of-frame line clock
+					   wait count mask */
+#define CICR3_EFW	(0xff << 16)	/* End-of-frame line clock
+					   wait count mask */
+#define CICR3_VSW	(0x3f << 10)	/* Vertical sync pulse width mask */
+#define CICR3_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
+					   wait count mask */
+#define CICR3_LPF	(0x7ff << 0)	/* Lines per frame mask */
+
+#define CICR4_MCLK_DLY	(0x3 << 24)	/* MCLK Data Capture Delay mask */
+#define CICR4_PCLK_EN	(1 << 23)	/* Pixel clock enable */
+#define CICR4_PCP	(1 << 22)	/* Pixel clock polarity */
+#define CICR4_HSP	(1 << 21)	/* Horizontal sync polarity */
+#define CICR4_VSP	(1 << 20)	/* Vertical sync polarity */
+#define CICR4_MCLK_EN	(1 << 19)	/* MCLK enable */
+#define CICR4_FR_RATE	(0x7 << 8)	/* Frame rate mask */
+#define CICR4_DIV	(0xff << 0)	/* Clock divisor mask */
+
+#define CISR_FTO	(1 << 15)	/* FIFO time-out */
+#define CISR_RDAV_2	(1 << 14)	/* Channel 2 receive data available */
+#define CISR_RDAV_1	(1 << 13)	/* Channel 1 receive data available */
+#define CISR_RDAV_0	(1 << 12)	/* Channel 0 receive data available */
+#define CISR_FEMPTY_2	(1 << 11)	/* Channel 2 FIFO empty */
+#define CISR_FEMPTY_1	(1 << 10)	/* Channel 1 FIFO empty */
+#define CISR_FEMPTY_0	(1 << 9)	/* Channel 0 FIFO empty */
+#define CISR_EOL	(1 << 8)	/* End of line */
+#define CISR_PAR_ERR	(1 << 7)	/* Parity error */
+#define CISR_CQD	(1 << 6)	/* Camera interface quick disable */
+#define CISR_CDD	(1 << 5)	/* Camera interface disable done */
+#define CISR_SOF	(1 << 4)	/* Start of frame */
+#define CISR_EOF	(1 << 3)	/* End of frame */
+#define CISR_IFO_2	(1 << 2)	/* FIFO overrun for Channel 2 */
+#define CISR_IFO_1	(1 << 1)	/* FIFO overrun for Channel 1 */
+#define CISR_IFO_0	(1 << 0)	/* FIFO overrun for Channel 0 */
+
+#define CIFR_FLVL2	(0x7f << 23)	/* FIFO 2 level mask */
+#define CIFR_FLVL1	(0x7f << 16)	/* FIFO 1 level mask */
+#define CIFR_FLVL0	(0xff << 8)	/* FIFO 0 level mask */
+#define CIFR_THL_0	(0x3 << 4)	/* Threshold Level for Channel 0 FIFO */
+#define CIFR_RESET_F	(1 << 3)	/* Reset input FIFOs */
+#define CIFR_FEN2	(1 << 2)	/* FIFO enable for channel 2 */
+#define CIFR_FEN1	(1 << 1)	/* FIFO enable for channel 1 */
+#define CIFR_FEN0	(1 << 0)	/* FIFO enable for channel 0 */
+
+#define CICR0_SIM_MP	(0 << 24)
+#define CICR0_SIM_SP	(1 << 24)
+#define CICR0_SIM_MS	(2 << 24)
+#define CICR0_SIM_EP	(3 << 24)
+#define CICR0_SIM_ES	(4 << 24)
+
+#define CICR1_DW_VAL(x)   ((x) & CICR1_DW)	    /* Data bus width */
+#define CICR1_PPL_VAL(x)  (((x) << 15) & CICR1_PPL) /* Pixels per line */
+#define CICR1_COLOR_SP_VAL(x)	(((x) << 3) & CICR1_COLOR_SP)	/* color space */
+#define CICR1_RGB_BPP_VAL(x)	(((x) << 7) & CICR1_RGB_BPP)	/* bpp for rgb */
+#define CICR1_RGBT_CONV_VAL(x)	(((x) << 29) & CICR1_RGBT_CONV)	/* rgbt conv */
+
+#define CICR2_BLW_VAL(x)  (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */
+#define CICR2_ELW_VAL(x)  (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */
+#define CICR2_HSW_VAL(x)  (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */
+#define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */
+#define CICR2_FSW_VAL(x)  (((x) << 0) & CICR2_FSW)  /* Frame stabilization wait count */
+
+#define CICR3_BFW_VAL(x)  (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count  */
+#define CICR3_EFW_VAL(x)  (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */
+#define CICR3_VSW_VAL(x)  (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */
+#define CICR3_LPF_VAL(x)  (((x) << 0) & CICR3_LPF)  /* Lines per frame */
+
+#define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \
+			CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
+			CICR0_EOFM | CICR0_FOM)
+
+#define sensor_call(cam, o, f, args...) \
+	v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+/*
+ * Format handling
+ */
+
+/**
+ * enum pxa_mbus_packing - data packing types on the media-bus
+ * @PXA_MBUS_PACKING_NONE:	no packing, bit-for-bit transfer to RAM, one
+ *				sample represents one pixel
+ * @PXA_MBUS_PACKING_2X8_PADHI:	16 bits transferred in 2 8-bit samples, in the
+ *				possibly incomplete byte high bits are padding
+ * @PXA_MBUS_PACKING_EXTEND16:	sample width (e.g., 10 bits) has to be extended
+ *				to 16 bits
+ */
+enum pxa_mbus_packing {
+	PXA_MBUS_PACKING_NONE,
+	PXA_MBUS_PACKING_2X8_PADHI,
+	PXA_MBUS_PACKING_EXTEND16,
+};
+
+/**
+ * enum pxa_mbus_order - sample order on the media bus
+ * @PXA_MBUS_ORDER_LE:		least significant sample first
+ * @PXA_MBUS_ORDER_BE:		most significant sample first
+ */
+enum pxa_mbus_order {
+	PXA_MBUS_ORDER_LE,
+	PXA_MBUS_ORDER_BE,
+};
+
+/**
+ * enum pxa_mbus_layout - planes layout in memory
+ * @PXA_MBUS_LAYOUT_PACKED:		color components packed
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_U_V:	YUV components stored in 3 planes (4:2:2)
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_C:	YUV components stored in a luma and a
+ *					chroma plane (C plane is half the size
+ *					of Y plane)
+ * @PXA_MBUS_LAYOUT_PLANAR_Y_C:		YUV components stored in a luma and a
+ *					chroma plane (C plane is the same size
+ *					as Y plane)
+ */
+enum pxa_mbus_layout {
+	PXA_MBUS_LAYOUT_PACKED = 0,
+	PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
+	PXA_MBUS_LAYOUT_PLANAR_2Y_C,
+	PXA_MBUS_LAYOUT_PLANAR_Y_C,
+};
+
+/**
+ * struct pxa_mbus_pixelfmt - Data format on the media bus
+ * @name:		Name of the format
+ * @fourcc:		Fourcc code, that will be obtained if the data is
+ *			stored in memory in the following way:
+ * @packing:		Type of sample-packing, that has to be used
+ * @order:		Sample order when storing in memory
+ * @bits_per_sample:	How many bits the bridge has to sample
+ */
+struct pxa_mbus_pixelfmt {
+	const char		*name;
+	u32			fourcc;
+	enum pxa_mbus_packing	packing;
+	enum pxa_mbus_order	order;
+	enum pxa_mbus_layout	layout;
+	u8			bits_per_sample;
+};
+
+/**
+ * struct pxa_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through
+ * @code:	mediabus pixel-code
+ * @fmt:	pixel format description
+ */
+struct pxa_mbus_lookup {
+	u32	code;
+	struct pxa_mbus_pixelfmt	fmt;
+};
+
+static const struct pxa_mbus_lookup mbus_fmt[] = {
+{
+	.code = MEDIA_BUS_FMT_YUYV8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "YUYV",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_YVYU8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVYU,
+		.name			= "YVYU",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_UYVY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_UYVY,
+		.name			= "UYVY",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_VYUY8_2X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_VYUY,
+		.name			= "VYUY",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555,
+		.name			= "RGB555",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB555X,
+		.name			= "RGB555X",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_BE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565,
+		.name			= "RGB565",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB565X,
+		.name			= "RGB565X",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_BE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SBGGR8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR8,
+		.name			= "Bayer 8 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_NONE,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SBGGR10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 10,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_Y8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_GREY,
+		.name			= "Grey",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_NONE,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_Y10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_Y10,
+		.name			= "Grey 10bit",
+		.bits_per_sample	= 10,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR10,
+		.name			= "Bayer 10 BGGR",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_BE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_RGB444,
+		.name			= "RGB444",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_BE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_UYVY8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_UYVY,
+		.name			= "UYVY 16bit",
+		.bits_per_sample	= 16,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_VYUY8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_VYUY,
+		.name			= "VYUY 16bit",
+		.bits_per_sample	= 16,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_YUYV8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YUYV,
+		.name			= "YUYV 16bit",
+		.bits_per_sample	= 16,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_YVYU8_1X16,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_YVYU,
+		.name			= "YVYU 16bit",
+		.bits_per_sample	= 16,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGRBG8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG8,
+		.name			= "Bayer 8 GRBG",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_NONE,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG10DPCM8,
+		.name			= "Bayer 10 BGGR DPCM 8",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_NONE,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGBRG10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGBRG10,
+		.name			= "Bayer 10 GBRG",
+		.bits_per_sample	= 10,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGRBG10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG10,
+		.name			= "Bayer 10 GRBG",
+		.bits_per_sample	= 10,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SRGGB10_1X10,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SRGGB10,
+		.name			= "Bayer 10 RGGB",
+		.bits_per_sample	= 10,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SBGGR12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SBGGR12,
+		.name			= "Bayer 12 BGGR",
+		.bits_per_sample	= 12,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGBRG12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGBRG12,
+		.name			= "Bayer 12 GBRG",
+		.bits_per_sample	= 12,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SGRBG12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SGRBG12,
+		.name			= "Bayer 12 GRBG",
+		.bits_per_sample	= 12,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+}, {
+	.code = MEDIA_BUS_FMT_SRGGB12_1X12,
+	.fmt = {
+		.fourcc			= V4L2_PIX_FMT_SRGGB12,
+		.name			= "Bayer 12 RGGB",
+		.bits_per_sample	= 12,
+		.packing		= PXA_MBUS_PACKING_EXTEND16,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PACKED,
+	},
+},
+};
+
+static s32 pxa_mbus_bytes_per_line(u32 width, const struct pxa_mbus_pixelfmt *mf)
+{
+	if (mf->layout != PXA_MBUS_LAYOUT_PACKED)
+		return width * mf->bits_per_sample / 8;
+
+	switch (mf->packing) {
+	case PXA_MBUS_PACKING_NONE:
+		return width * mf->bits_per_sample / 8;
+	case PXA_MBUS_PACKING_2X8_PADHI:
+	case PXA_MBUS_PACKING_EXTEND16:
+		return width * 2;
+	}
+	return -EINVAL;
+}
+
+static s32 pxa_mbus_image_size(const struct pxa_mbus_pixelfmt *mf,
+			u32 bytes_per_line, u32 height)
+{
+	switch (mf->packing) {
+	case PXA_MBUS_PACKING_2X8_PADHI:
+		return bytes_per_line * height * 2;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_find_fmtdesc(
+	u32 code,
+	const struct pxa_mbus_lookup *lookup,
+	int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		if (lookup[i].code == code)
+			return &lookup[i].fmt;
+
+	return NULL;
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_get_fmtdesc(
+	u32 code)
+{
+	return pxa_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+
+static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+					unsigned int flags)
+{
+	unsigned long common_flags;
+	bool hsync = true, vsync = true, pclk, data, mode;
+	bool mipi_lanes, mipi_clock;
+
+	common_flags = cfg->flags & flags;
+
+	switch (cfg->type) {
+	case V4L2_MBUS_PARALLEL:
+		hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+					V4L2_MBUS_HSYNC_ACTIVE_LOW);
+		vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+					V4L2_MBUS_VSYNC_ACTIVE_LOW);
+		/* fall through */
+	case V4L2_MBUS_BT656:
+		pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+				       V4L2_MBUS_PCLK_SAMPLE_FALLING);
+		data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+				       V4L2_MBUS_DATA_ACTIVE_LOW);
+		mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+		return (!hsync || !vsync || !pclk || !data || !mode) ?
+			0 : common_flags;
+	case V4L2_MBUS_CSI2:
+		mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+		mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+					     V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+		return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+	}
+	return 0;
+}
+
+/**
+ * struct soc_camera_format_xlate - match between host and sensor formats
+ * @code: code of a sensor provided format
+ * @host_fmt: host format after host translation from code
+ *
+ * Host and sensor translation structure. Used in table of host and sensor
+ * formats matchings in soc_camera_device. A host can override the generic list
+ * generation by implementing get_formats(), and use it for format checks and
+ * format setup.
+ */
+struct soc_camera_format_xlate {
+	u32 code;
+	const struct pxa_mbus_pixelfmt *host_fmt;
+};
+
+/*
+ * Structures
+ */
+enum pxa_camera_active_dma {
+	DMA_Y = 0x1,
+	DMA_U = 0x2,
+	DMA_V = 0x4,
+};
+
+/* buffer for one video frame */
+struct pxa_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_v4l2_buffer		vbuf;
+	struct list_head		queue;
+	u32	code;
+	int				nb_planes;
+	/* our descriptor lists for Y, U and V channels */
+	struct dma_async_tx_descriptor	*descs[3];
+	dma_cookie_t			cookie[3];
+	struct scatterlist		*sg[3];
+	int				sg_len[3];
+	size_t				plane_sizes[3];
+	int				inwork;
+	enum pxa_camera_active_dma	active_dma;
+};
+
+struct pxa_camera_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	vdev;
+	struct v4l2_async_notifier notifier;
+	struct vb2_queue	vb2_vq;
+	struct v4l2_subdev	*sensor;
+	struct soc_camera_format_xlate *user_formats;
+	const struct soc_camera_format_xlate *current_fmt;
+	struct v4l2_pix_format	current_pix;
+
+	struct v4l2_async_subdev asd;
+	struct v4l2_async_subdev *asds[1];
+
+	/*
+	 * PXA27x is only supposed to handle one camera on its Quick Capture
+	 * interface. If anyone ever builds hardware to enable more than
+	 * one camera, they will have to modify this driver too
+	 */
+	struct clk		*clk;
+
+	unsigned int		irq;
+	void __iomem		*base;
+
+	int			channels;
+	struct dma_chan		*dma_chans[3];
+
+	struct pxacamera_platform_data *pdata;
+	struct resource		*res;
+	unsigned long		platform_flags;
+	unsigned long		ciclk;
+	unsigned long		mclk;
+	u32			mclk_divisor;
+	struct v4l2_clk		*mclk_clk;
+	u16			width_flags;	/* max 10 bits */
+
+	struct list_head	capture;
+
+	spinlock_t		lock;
+	struct mutex		mlock;
+	unsigned int		buf_sequence;
+
+	struct pxa_buffer	*active;
+	struct tasklet_struct	task_eof;
+
+	u32			save_cicr[5];
+};
+
+struct pxa_cam {
+	unsigned long flags;
+};
+
+static const char *pxa_cam_driver_description = "PXA_Camera";
+
+/*
+ * Format translation functions
+ */
+static const struct soc_camera_format_xlate
+*pxa_mbus_xlate_by_fourcc(struct soc_camera_format_xlate *user_formats,
+			  unsigned int fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; user_formats[i].code; i++)
+		if (user_formats[i].host_fmt->fourcc == fourcc)
+			return user_formats + i;
+	return NULL;
+}
+
+static struct soc_camera_format_xlate *pxa_mbus_build_fmts_xlate(
+	struct v4l2_device *v4l2_dev, struct v4l2_subdev *subdev,
+	int (*get_formats)(struct v4l2_device *, unsigned int,
+			   struct soc_camera_format_xlate *xlate))
+{
+	unsigned int i, fmts = 0, raw_fmts = 0;
+	int ret;
+	struct v4l2_subdev_mbus_code_enum code = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	struct soc_camera_format_xlate *user_formats;
+
+	while (!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
+		raw_fmts++;
+		code.index++;
+	}
+
+	/*
+	 * First pass - only count formats this host-sensor
+	 * configuration can provide
+	 */
+	for (i = 0; i < raw_fmts; i++) {
+		ret = get_formats(v4l2_dev, i, NULL);
+		if (ret < 0)
+			return ERR_PTR(ret);
+		fmts += ret;
+	}
+
+	if (!fmts)
+		return ERR_PTR(-ENXIO);
+
+	user_formats = kcalloc(fmts + 1, sizeof(*user_formats), GFP_KERNEL);
+	if (!user_formats)
+		return ERR_PTR(-ENOMEM);
+
+	/* Second pass - actually fill data formats */
+	fmts = 0;
+	for (i = 0; i < raw_fmts; i++) {
+		ret = get_formats(v4l2_dev, i, user_formats + fmts);
+		if (ret < 0)
+			goto egfmt;
+		fmts += ret;
+	}
+	user_formats[fmts].code = 0;
+
+	return user_formats;
+egfmt:
+	kfree(user_formats);
+	return ERR_PTR(ret);
+}
+
+/*
+ *  Videobuf operations
+ */
+static struct pxa_buffer *vb2_to_pxa_buffer(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	return container_of(vbuf, struct pxa_buffer, vbuf);
+}
+
+static struct device *pcdev_to_dev(struct pxa_camera_dev *pcdev)
+{
+	return pcdev->v4l2_dev.dev;
+}
+
+static struct pxa_camera_dev *v4l2_dev_to_pcdev(struct v4l2_device *v4l2_dev)
+{
+	return container_of(v4l2_dev, struct pxa_camera_dev, v4l2_dev);
+}
+
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+			       enum pxa_camera_active_dma act_dma);
+
+static void pxa_camera_dma_irq_y(void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+
+	pxa_camera_dma_irq(pcdev, DMA_Y);
+}
+
+static void pxa_camera_dma_irq_u(void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+
+	pxa_camera_dma_irq(pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+
+	pxa_camera_dma_irq(pcdev, DMA_V);
+}
+
+/**
+ * pxa_init_dma_channel - init dma descriptors
+ * @pcdev: pxa camera device
+ * @vb: videobuffer2 buffer
+ * @dma: dma video buffer
+ * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
+ * @cibr: camera Receive Buffer Register
+ *
+ * Prepares the pxa dma descriptors to transfer one camera channel.
+ *
+ * Returns 0 if success or -ENOMEM if no memory is available
+ */
+static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
+				struct pxa_buffer *buf, int channel,
+				struct scatterlist *sg, int sglen)
+{
+	struct dma_chan *dma_chan = pcdev->dma_chans[channel];
+	struct dma_async_tx_descriptor *tx;
+
+	tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
+				     DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
+	if (!tx) {
+		dev_err(pcdev_to_dev(pcdev),
+			"dmaengine_prep_slave_sg failed\n");
+		goto fail;
+	}
+
+	tx->callback_param = pcdev;
+	switch (channel) {
+	case 0:
+		tx->callback = pxa_camera_dma_irq_y;
+		break;
+	case 1:
+		tx->callback = pxa_camera_dma_irq_u;
+		break;
+	case 2:
+		tx->callback = pxa_camera_dma_irq_v;
+		break;
+	}
+
+	buf->descs[channel] = tx;
+	return 0;
+fail:
+	dev_dbg(pcdev_to_dev(pcdev),
+		"%s (vb=%p) dma_tx=%p\n",
+		__func__, buf, tx);
+
+	return -ENOMEM;
+}
+
+static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+				    struct pxa_buffer *buf)
+{
+	buf->active_dma = DMA_Y;
+	if (buf->nb_planes == 3)
+		buf->active_dma |= DMA_U | DMA_V;
+}
+
+/**
+ * pxa_dma_start_channels - start DMA channel for active buffer
+ * @pcdev: pxa camera device
+ *
+ * Initialize DMA channels to the beginning of the active video buffer, and
+ * start these channels.
+ */
+static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
+{
+	int i;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		dev_dbg(pcdev_to_dev(pcdev),
+			"%s (channel=%d)\n", __func__, i);
+		dma_async_issue_pending(pcdev->dma_chans[i]);
+	}
+}
+
+static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
+{
+	int i;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		dev_dbg(pcdev_to_dev(pcdev),
+			"%s (channel=%d)\n", __func__, i);
+		dmaengine_terminate_all(pcdev->dma_chans[i]);
+	}
+}
+
+static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
+				 struct pxa_buffer *buf)
+{
+	int i;
+
+	for (i = 0; i < pcdev->channels; i++) {
+		buf->cookie[i] = dmaengine_submit(buf->descs[i]);
+		dev_dbg(pcdev_to_dev(pcdev),
+			"%s (channel=%d) : submit vb=%p cookie=%d\n",
+			__func__, i, buf, buf->descs[i]->cookie);
+	}
+}
+
+/**
+ * pxa_camera_start_capture - start video capturing
+ * @pcdev: camera device
+ *
+ * Launch capturing. DMA channels should not be active yet. They should get
+ * activated at the end of frame interrupt, to capture only whole frames, and
+ * never begin the capture of a partial frame.
+ */
+static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
+{
+	unsigned long cicr0;
+
+	dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
+	__raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
+	/* Enable End-Of-Frame Interrupt */
+	cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
+	cicr0 &= ~CICR0_EOFM;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
+{
+	unsigned long cicr0;
+
+	pxa_dma_stop_channels(pcdev);
+
+	cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+
+	pcdev->active = NULL;
+	dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
+}
+
+static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
+			      struct pxa_buffer *buf,
+			      enum vb2_buffer_state state)
+{
+	struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
+	list_del_init(&buf->queue);
+	vb->timestamp = ktime_get_ns();
+	vbuf->sequence = pcdev->buf_sequence++;
+	vbuf->field = V4L2_FIELD_NONE;
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	dev_dbg(pcdev_to_dev(pcdev), "%s dequeued buffer (buf=0x%p)\n",
+		__func__, buf);
+
+	if (list_empty(&pcdev->capture)) {
+		pxa_camera_stop_capture(pcdev);
+		return;
+	}
+
+	pcdev->active = list_entry(pcdev->capture.next,
+				   struct pxa_buffer, queue);
+}
+
+/**
+ * pxa_camera_check_link_miss - check missed DMA linking
+ * @pcdev: camera device
+ *
+ * The DMA chaining is done with DMA running. This means a tiny temporal window
+ * remains, where a buffer is queued on the chain, while the chain is already
+ * stopped. This means the tailed buffer would never be transferred by DMA.
+ * This function restarts the capture for this corner case, where :
+ *  - DADR() == DADDR_STOP
+ *  - a videobuffer is queued on the pcdev->capture list
+ *
+ * Please check the "DMA hot chaining timeslice issue" in
+ *   Documentation/video4linux/pxa_camera.txt
+ *
+ * Context: should only be called within the dma irq handler
+ */
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
+				       dma_cookie_t last_submitted,
+				       dma_cookie_t last_issued)
+{
+	bool is_dma_stopped = last_submitted != last_issued;
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		"%s : top queued buffer=%p, is_dma_stopped=%d\n",
+		__func__, pcdev->active, is_dma_stopped);
+
+	if (pcdev->active && is_dma_stopped)
+		pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+			       enum pxa_camera_active_dma act_dma)
+{
+	struct pxa_buffer *buf, *last_buf;
+	unsigned long flags;
+	u32 camera_status, overrun;
+	int chan;
+	enum dma_status last_status;
+	dma_cookie_t last_issued;
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+
+	camera_status = __raw_readl(pcdev->base + CISR);
+	dev_dbg(pcdev_to_dev(pcdev), "camera dma irq, cisr=0x%x dma=%d\n",
+		camera_status, act_dma);
+	overrun = CISR_IFO_0;
+	if (pcdev->channels == 3)
+		overrun |= CISR_IFO_1 | CISR_IFO_2;
+
+	/*
+	 * pcdev->active should not be NULL in DMA irq handler.
+	 *
+	 * But there is one corner case : if capture was stopped due to an
+	 * overrun of channel 1, and at that same channel 2 was completed.
+	 *
+	 * When handling the overrun in DMA irq for channel 1, we'll stop the
+	 * capture and restart it (and thus set pcdev->active to NULL). But the
+	 * DMA irq handler will already be pending for channel 2. So on entering
+	 * the DMA irq handler for channel 2 there will be no active buffer, yet
+	 * that is normal.
+	 */
+	if (!pcdev->active)
+		goto out;
+
+	buf = pcdev->active;
+	WARN_ON(buf->inwork || list_empty(&buf->queue));
+
+	/*
+	 * It's normal if the last frame creates an overrun, as there
+	 * are no more DMA descriptors to fetch from QCI fifos
+	 */
+	switch (act_dma) {
+	case DMA_U:
+		chan = 1;
+		break;
+	case DMA_V:
+		chan = 2;
+		break;
+	default:
+		chan = 0;
+		break;
+	}
+	last_buf = list_entry(pcdev->capture.prev,
+			      struct pxa_buffer, queue);
+	last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
+					       last_buf->cookie[chan],
+					       NULL, &last_issued);
+	if (camera_status & overrun &&
+	    last_status != DMA_COMPLETE) {
+		dev_dbg(pcdev_to_dev(pcdev), "FIFO overrun! CISR: %x\n",
+			camera_status);
+		pxa_camera_stop_capture(pcdev);
+		list_for_each_entry(buf, &pcdev->capture, queue)
+			pxa_dma_add_tail_buf(pcdev, buf);
+		pxa_camera_start_capture(pcdev);
+		goto out;
+	}
+	buf->active_dma &= ~act_dma;
+	if (!buf->active_dma) {
+		pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_DONE);
+		pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
+					   last_issued);
+	}
+
+out:
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static u32 mclk_get_divisor(struct platform_device *pdev,
+			    struct pxa_camera_dev *pcdev)
+{
+	unsigned long mclk = pcdev->mclk;
+	u32 div;
+	unsigned long lcdclk;
+
+	lcdclk = clk_get_rate(pcdev->clk);
+	pcdev->ciclk = lcdclk;
+
+	/* mclk <= ciclk / 4 (27.4.2) */
+	if (mclk > lcdclk / 4) {
+		mclk = lcdclk / 4;
+		dev_warn(pcdev_to_dev(pcdev),
+			 "Limiting master clock to %lu\n", mclk);
+	}
+
+	/* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
+	div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+	/* If we're not supplying MCLK, leave it at 0 */
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		pcdev->mclk = lcdclk / (2 * (div + 1));
+
+	dev_dbg(pcdev_to_dev(pcdev), "LCD clock %luHz, target freq %luHz, divisor %u\n",
+		lcdclk, mclk, div);
+
+	return div;
+}
+
+static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
+				     unsigned long pclk)
+{
+	/* We want a timeout > 1 pixel time, not ">=" */
+	u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
+
+	__raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
+}
+
+static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
+{
+	u32 cicr4 = 0;
+
+	/* disable all interrupts */
+	__raw_writel(0x3ff, pcdev->base + CICR0);
+
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		cicr4 |= CICR4_PCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		cicr4 |= CICR4_MCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_PCP)
+		cicr4 |= CICR4_PCP;
+	if (pcdev->platform_flags & PXA_CAMERA_HSP)
+		cicr4 |= CICR4_HSP;
+	if (pcdev->platform_flags & PXA_CAMERA_VSP)
+		cicr4 |= CICR4_VSP;
+
+	__raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
+
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		/* Initialise the timeout under the assumption pclk = mclk */
+		recalculate_fifo_timeout(pcdev, pcdev->mclk);
+	else
+		/* "Safe default" - 13MHz */
+		recalculate_fifo_timeout(pcdev, 13000000);
+
+	clk_prepare_enable(pcdev->clk);
+}
+
+static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
+{
+	clk_disable_unprepare(pcdev->clk);
+}
+
+static void pxa_camera_eof(unsigned long arg)
+{
+	struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
+	unsigned long cifr;
+	struct pxa_buffer *buf;
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		"Camera interrupt status 0x%x\n",
+		__raw_readl(pcdev->base + CISR));
+
+	/* Reset the FIFOs */
+	cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+	__raw_writel(cifr, pcdev->base + CIFR);
+
+	pcdev->active = list_first_entry(&pcdev->capture,
+					 struct pxa_buffer, queue);
+	buf = pcdev->active;
+	pxa_videobuf_set_actdma(pcdev, buf);
+
+	pxa_dma_start_channels(pcdev);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+	struct pxa_camera_dev *pcdev = data;
+	unsigned long status, cicr0;
+
+	status = __raw_readl(pcdev->base + CISR);
+	dev_dbg(pcdev_to_dev(pcdev),
+		"Camera interrupt status 0x%lx\n", status);
+
+	if (!status)
+		return IRQ_NONE;
+
+	__raw_writel(status, pcdev->base + CISR);
+
+	if (status & CISR_EOF) {
+		cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
+		__raw_writel(cicr0, pcdev->base + CICR0);
+		tasklet_schedule(&pcdev->task_eof);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int test_platform_param(struct pxa_camera_dev *pcdev,
+			       unsigned char buswidth, unsigned long *flags)
+{
+	/*
+	 * Platform specified synchronization and pixel clock polarities are
+	 * only a recommendation and are only used during probing. The PXA270
+	 * quick capture interface supports both.
+	 */
+	*flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+		  V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
+		V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_HSYNC_ACTIVE_LOW |
+		V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+		V4L2_MBUS_VSYNC_ACTIVE_LOW |
+		V4L2_MBUS_DATA_ACTIVE_HIGH |
+		V4L2_MBUS_PCLK_SAMPLE_RISING |
+		V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+	/* If requested data width is supported by the platform, use it */
+	if ((1 << (buswidth - 1)) & pcdev->width_flags)
+		return 0;
+
+	return -EINVAL;
+}
+
+static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev,
+				  unsigned long flags, __u32 pixfmt)
+{
+	unsigned long dw, bpp;
+	u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
+	int ret = sensor_call(pcdev, sensor, g_skip_top_lines, &y_skip_top);
+
+	if (ret < 0)
+		y_skip_top = 0;
+
+	/*
+	 * Datawidth is now guaranteed to be equal to one of the three values.
+	 * We fix bit-per-pixel equal to data-width...
+	 */
+	switch (pcdev->current_fmt->host_fmt->bits_per_sample) {
+	case 10:
+		dw = 4;
+		bpp = 0x40;
+		break;
+	case 9:
+		dw = 3;
+		bpp = 0x20;
+		break;
+	default:
+		/*
+		 * Actually it can only be 8 now,
+		 * default is just to silence compiler warnings
+		 */
+	case 8:
+		dw = 2;
+		bpp = 0;
+	}
+
+	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+		cicr4 |= CICR4_PCLK_EN;
+	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+		cicr4 |= CICR4_MCLK_EN;
+	if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		cicr4 |= CICR4_PCP;
+	if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+		cicr4 |= CICR4_HSP;
+	if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+		cicr4 |= CICR4_VSP;
+
+	cicr0 = __raw_readl(pcdev->base + CICR0);
+	if (cicr0 & CICR0_ENB)
+		__raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
+
+	cicr1 = CICR1_PPL_VAL(pcdev->current_pix.width - 1) | bpp | dw;
+
+	switch (pixfmt) {
+	case V4L2_PIX_FMT_YUV422P:
+		pcdev->channels = 3;
+		cicr1 |= CICR1_YCBCR_F;
+		/*
+		 * Normally, pxa bus wants as input UYVY format. We allow all
+		 * reorderings of the YUV422 format, as no processing is done,
+		 * and the YUV stream is just passed through without any
+		 * transformation. Note that UYVY is the only format that
+		 * should be used if pxa framebuffer Overlay2 is used.
+		 */
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_YVYU:
+		cicr1 |= CICR1_COLOR_SP_VAL(2);
+		break;
+	case V4L2_PIX_FMT_RGB555:
+		cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) |
+			CICR1_TBIT | CICR1_COLOR_SP_VAL(1);
+		break;
+	case V4L2_PIX_FMT_RGB565:
+		cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2);
+		break;
+	}
+
+	cicr2 = 0;
+	cicr3 = CICR3_LPF_VAL(pcdev->current_pix.height - 1) |
+		CICR3_BFW_VAL(min((u32)255, y_skip_top));
+	cicr4 |= pcdev->mclk_divisor;
+
+	__raw_writel(cicr1, pcdev->base + CICR1);
+	__raw_writel(cicr2, pcdev->base + CICR2);
+	__raw_writel(cicr3, pcdev->base + CICR3);
+	__raw_writel(cicr4, pcdev->base + CICR4);
+
+	/* CIF interrupts are not used, only DMA */
+	cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+		CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
+	cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
+	__raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+/*
+ * Videobuf2 section
+ */
+static void pxa_buffer_cleanup(struct pxa_buffer *buf)
+{
+	int i;
+
+	for (i = 0; i < 3 && buf->descs[i]; i++) {
+		dmaengine_desc_free(buf->descs[i]);
+		kfree(buf->sg[i]);
+		buf->descs[i] = NULL;
+		buf->sg[i] = NULL;
+		buf->sg_len[i] = 0;
+		buf->plane_sizes[i] = 0;
+	}
+	buf->nb_planes = 0;
+}
+
+static int pxa_buffer_init(struct pxa_camera_dev *pcdev,
+			   struct pxa_buffer *buf)
+{
+	struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+	struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+	int nb_channels = pcdev->channels;
+	int i, ret = 0;
+	unsigned long size = vb2_plane_size(vb, 0);
+
+	switch (nb_channels) {
+	case 1:
+		buf->plane_sizes[0] = size;
+		break;
+	case 3:
+		buf->plane_sizes[0] = size / 2;
+		buf->plane_sizes[1] = size / 4;
+		buf->plane_sizes[2] = size / 4;
+		break;
+	default:
+		return -EINVAL;
+	};
+	buf->nb_planes = nb_channels;
+
+	ret = sg_split(sgt->sgl, sgt->nents, 0, nb_channels,
+		       buf->plane_sizes, buf->sg, buf->sg_len, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(pcdev_to_dev(pcdev),
+			"sg_split failed: %d\n", ret);
+		return ret;
+	}
+	for (i = 0; i < nb_channels; i++) {
+		ret = pxa_init_dma_channel(pcdev, buf, i,
+					   buf->sg[i], buf->sg_len[i]);
+		if (ret) {
+			pxa_buffer_cleanup(buf);
+			return ret;
+		}
+	}
+	INIT_LIST_HEAD(&buf->queue);
+
+	return ret;
+}
+
+static void pxac_vb2_cleanup(struct vb2_buffer *vb)
+{
+	struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "%s(vb=%p)\n", __func__, vb);
+	pxa_buffer_cleanup(buf);
+}
+
+static void pxac_vb2_queue(struct vb2_buffer *vb)
+{
+	struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "%s(vb=%p) nb_channels=%d size=%lu active=%p\n",
+		__func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0),
+		pcdev->active);
+
+	list_add_tail(&buf->queue, &pcdev->capture);
+
+	pxa_dma_add_tail_buf(pcdev, buf);
+}
+
+/*
+ * Please check the DMA prepared buffer structure in :
+ *   Documentation/video4linux/pxa_camera.txt
+ * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
+ * modification while DMA chain is running will work anyway.
+ */
+static int pxac_vb2_prepare(struct vb2_buffer *vb)
+{
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+	struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+	int ret = 0;
+
+	switch (pcdev->channels) {
+	case 1:
+	case 3:
+		vb2_set_plane_payload(vb, 0, pcdev->current_pix.sizeimage);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "%s (vb=%p) nb_channels=%d size=%lu\n",
+		__func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0));
+
+	WARN_ON(!pcdev->current_fmt);
+
+#ifdef DEBUG
+	/*
+	 * This can be useful if you want to see if we actually fill
+	 * the buffer with something
+	 */
+	for (i = 0; i < vb->num_planes; i++)
+		memset((void *)vb2_plane_vaddr(vb, i),
+		       0xaa, vb2_get_plane_payload(vb, i));
+#endif
+
+	/*
+	 * I think, in buf_prepare you only have to protect global data,
+	 * the actual buffer is yours
+	 */
+	buf->inwork = 0;
+	pxa_videobuf_set_actdma(pcdev, buf);
+
+	return ret;
+}
+
+static int pxac_vb2_init(struct vb2_buffer *vb)
+{
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+	struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "%s(nb_channels=%d)\n",
+		__func__, pcdev->channels);
+
+	return pxa_buffer_init(pcdev, buf);
+}
+
+static int pxac_vb2_queue_setup(struct vb2_queue *vq,
+				unsigned int *nbufs,
+				unsigned int *num_planes, unsigned int sizes[],
+				struct device *alloc_devs[])
+{
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+	int size = pcdev->current_pix.sizeimage;
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "%s(vq=%p nbufs=%d num_planes=%d size=%d)\n",
+		__func__, vq, *nbufs, *num_planes, size);
+	/*
+	 * Called from VIDIOC_REQBUFS or in compatibility mode For YUV422P
+	 * format, even if there are 3 planes Y, U and V, we reply there is only
+	 * one plane, containing Y, U and V data, one after the other.
+	 */
+	if (*num_planes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	*num_planes = 1;
+	switch (pcdev->channels) {
+	case 1:
+	case 3:
+		sizes[0] = size;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!*nbufs)
+		*nbufs = 1;
+
+	return 0;
+}
+
+static int pxac_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+
+	dev_dbg(pcdev_to_dev(pcdev), "%s(count=%d) active=%p\n",
+		__func__, count, pcdev->active);
+
+	pcdev->buf_sequence = 0;
+	if (!pcdev->active)
+		pxa_camera_start_capture(pcdev);
+
+	return 0;
+}
+
+static void pxac_vb2_stop_streaming(struct vb2_queue *vq)
+{
+	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+	struct pxa_buffer *buf, *tmp;
+
+	dev_dbg(pcdev_to_dev(pcdev), "%s active=%p\n",
+		__func__, pcdev->active);
+	pxa_camera_stop_capture(pcdev);
+
+	list_for_each_entry_safe(buf, tmp, &pcdev->capture, queue)
+		pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_ERROR);
+}
+
+static struct vb2_ops pxac_vb2_ops = {
+	.queue_setup		= pxac_vb2_queue_setup,
+	.buf_init		= pxac_vb2_init,
+	.buf_prepare		= pxac_vb2_prepare,
+	.buf_queue		= pxac_vb2_queue,
+	.buf_cleanup		= pxac_vb2_cleanup,
+	.start_streaming	= pxac_vb2_start_streaming,
+	.stop_streaming		= pxac_vb2_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+static int pxa_camera_init_videobuf2(struct pxa_camera_dev *pcdev)
+{
+	int ret;
+	struct vb2_queue *vq = &pcdev->vb2_vq;
+
+	memset(vq, 0, sizeof(*vq));
+	vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	vq->drv_priv = pcdev;
+	vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	vq->buf_struct_size = sizeof(struct pxa_buffer);
+	vq->dev = pcdev->v4l2_dev.dev;
+
+	vq->ops = &pxac_vb2_ops;
+	vq->mem_ops = &vb2_dma_sg_memops;
+	vq->lock = &pcdev->mlock;
+
+	ret = vb2_queue_init(vq);
+	dev_dbg(pcdev_to_dev(pcdev),
+		 "vb2_queue_init(vq=%p): %d\n", vq, ret);
+
+	return ret;
+}
+
+/*
+ * Video ioctls section
+ */
+static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
+{
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	u32 pixfmt = pcdev->current_fmt->host_fmt->fourcc;
+	unsigned long bus_flags, common_flags;
+	int ret;
+
+	ret = test_platform_param(pcdev,
+				  pcdev->current_fmt->host_fmt->bits_per_sample,
+				  &bus_flags);
+	if (ret < 0)
+		return ret;
+
+	ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = pxa_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(pcdev_to_dev(pcdev),
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret != -ENOIOCTLCMD) {
+		return ret;
+	} else {
+		common_flags = bus_flags;
+	}
+
+	pcdev->channels = 1;
+
+	/* Make choises, based on platform preferences */
+	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+		if (pcdev->platform_flags & PXA_CAMERA_HSP)
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+		if (pcdev->platform_flags & PXA_CAMERA_VSP)
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+		else
+			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+	}
+
+	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+		if (pcdev->platform_flags & PXA_CAMERA_PCP)
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+		else
+			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+	}
+
+	cfg.flags = common_flags;
+	ret = sensor_call(pcdev, video, s_mbus_config, &cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		dev_dbg(pcdev_to_dev(pcdev),
+			"camera s_mbus_config(0x%lx) returned %d\n",
+			common_flags, ret);
+		return ret;
+	}
+
+	pxa_camera_setup_cicr(pcdev, common_flags, pixfmt);
+
+	return 0;
+}
+
+static int pxa_camera_try_bus_param(struct pxa_camera_dev *pcdev,
+				    unsigned char buswidth)
+{
+	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+	unsigned long bus_flags, common_flags;
+	int ret = test_platform_param(pcdev, buswidth, &bus_flags);
+
+	if (ret < 0)
+		return ret;
+
+	ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
+	if (!ret) {
+		common_flags = pxa_mbus_config_compatible(&cfg,
+							  bus_flags);
+		if (!common_flags) {
+			dev_warn(pcdev_to_dev(pcdev),
+				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
+				 cfg.flags, bus_flags);
+			return -EINVAL;
+		}
+	} else if (ret == -ENOIOCTLCMD) {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static const struct pxa_mbus_pixelfmt pxa_camera_formats[] = {
+	{
+		.fourcc			= V4L2_PIX_FMT_YUV422P,
+		.name			= "Planar YUV422 16 bit",
+		.bits_per_sample	= 8,
+		.packing		= PXA_MBUS_PACKING_2X8_PADHI,
+		.order			= PXA_MBUS_ORDER_LE,
+		.layout			= PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
+	},
+};
+
+/* This will be corrected as we get more formats */
+static bool pxa_camera_packing_supported(const struct pxa_mbus_pixelfmt *fmt)
+{
+	return	fmt->packing == PXA_MBUS_PACKING_NONE ||
+		(fmt->bits_per_sample == 8 &&
+		 fmt->packing == PXA_MBUS_PACKING_2X8_PADHI) ||
+		(fmt->bits_per_sample > 8 &&
+		 fmt->packing == PXA_MBUS_PACKING_EXTEND16);
+}
+
+static int pxa_camera_get_formats(struct v4l2_device *v4l2_dev,
+				  unsigned int idx,
+				  struct soc_camera_format_xlate *xlate)
+{
+	struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
+	int formats = 0, ret;
+	struct v4l2_subdev_mbus_code_enum code = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.index = idx,
+	};
+	const struct pxa_mbus_pixelfmt *fmt;
+
+	ret = sensor_call(pcdev, pad, enum_mbus_code, NULL, &code);
+	if (ret < 0)
+		/* No more formats */
+		return 0;
+
+	fmt = pxa_mbus_get_fmtdesc(code.code);
+	if (!fmt) {
+		dev_err(pcdev_to_dev(pcdev),
+			"Invalid format code #%u: %d\n", idx, code.code);
+		return 0;
+	}
+
+	/* This also checks support for the requested bits-per-sample */
+	ret = pxa_camera_try_bus_param(pcdev, fmt->bits_per_sample);
+	if (ret < 0)
+		return 0;
+
+	switch (code.code) {
+	case MEDIA_BUS_FMT_UYVY8_2X8:
+		formats++;
+		if (xlate) {
+			xlate->host_fmt	= &pxa_camera_formats[0];
+			xlate->code	= code.code;
+			xlate++;
+			dev_dbg(pcdev_to_dev(pcdev),
+				"Providing format %s using code %d\n",
+				pxa_camera_formats[0].name, code.code);
+		}
+	/* fall through */
+	case MEDIA_BUS_FMT_VYUY8_2X8:
+	case MEDIA_BUS_FMT_YUYV8_2X8:
+	case MEDIA_BUS_FMT_YVYU8_2X8:
+	case MEDIA_BUS_FMT_RGB565_2X8_LE:
+	case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+		if (xlate)
+			dev_dbg(pcdev_to_dev(pcdev),
+				"Providing format %s packed\n",
+				fmt->name);
+		break;
+	default:
+		if (!pxa_camera_packing_supported(fmt))
+			return 0;
+		if (xlate)
+			dev_dbg(pcdev_to_dev(pcdev),
+				"Providing format %s in pass-through mode\n",
+				fmt->name);
+		break;
+	}
+
+	/* Generic pass-through */
+	formats++;
+	if (xlate) {
+		xlate->host_fmt	= fmt;
+		xlate->code	= code.code;
+		xlate++;
+	}
+
+	return formats;
+}
+
+static int pxa_camera_build_formats(struct pxa_camera_dev *pcdev)
+{
+	struct soc_camera_format_xlate *xlate;
+
+	xlate = pxa_mbus_build_fmts_xlate(&pcdev->v4l2_dev, pcdev->sensor,
+					  pxa_camera_get_formats);
+	if (IS_ERR(xlate))
+		return PTR_ERR(xlate);
+
+	pcdev->user_formats = xlate;
+	return 0;
+}
+
+static void pxa_camera_destroy_formats(struct pxa_camera_dev *pcdev)
+{
+	kfree(pcdev->user_formats);
+}
+
+static int pxa_camera_check_frame(u32 width, u32 height)
+{
+	/* limit to pxa hardware capabilities */
+	return height < 32 || height > 2048 || width < 48 || width > 2048 ||
+		(width & 0x01);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int pxac_vidioc_g_register(struct file *file, void *priv,
+				  struct v4l2_dbg_register *reg)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(file);
+
+	if (reg->reg > CIBR2)
+		return -ERANGE;
+
+	reg->val = __raw_readl(pcdev->base + reg->reg);
+	reg->size = sizeof(__u32);
+	return 0;
+}
+
+static int pxac_vidioc_s_register(struct file *file, void *priv,
+				  const struct v4l2_dbg_register *reg)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(file);
+
+	if (reg->reg > CIBR2)
+		return -ERANGE;
+	if (reg->size != sizeof(__u32))
+		return -EINVAL;
+	__raw_writel(reg->val, pcdev->base + reg->reg);
+	return 0;
+}
+#endif
+
+static int pxac_vidioc_enum_fmt_vid_cap(struct file *filp, void  *priv,
+					struct v4l2_fmtdesc *f)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	const struct pxa_mbus_pixelfmt *format;
+	unsigned int idx;
+
+	for (idx = 0; pcdev->user_formats[idx].code; idx++);
+	if (f->index >= idx)
+		return -EINVAL;
+
+	format = pcdev->user_formats[f->index].host_fmt;
+	f->pixelformat = format->fourcc;
+	return 0;
+}
+
+static int pxac_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+				    struct v4l2_format *f)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+
+	pix->width		= pcdev->current_pix.width;
+	pix->height		= pcdev->current_pix.height;
+	pix->bytesperline	= pcdev->current_pix.bytesperline;
+	pix->sizeimage		= pcdev->current_pix.sizeimage;
+	pix->field		= pcdev->current_pix.field;
+	pix->pixelformat	= pcdev->current_fmt->host_fmt->fourcc;
+	pix->colorspace		= pcdev->current_pix.colorspace;
+	dev_dbg(pcdev_to_dev(pcdev), "current_fmt->fourcc: 0x%08x\n",
+		pcdev->current_fmt->host_fmt->fourcc);
+	return 0;
+}
+
+static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+				      struct v4l2_format *f)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_subdev_pad_config pad_cfg;
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_TRY,
+	};
+	struct v4l2_mbus_framefmt *mf = &format.format;
+	__u32 pixfmt = pix->pixelformat;
+	int ret;
+
+	xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats, pixfmt);
+	if (!xlate) {
+		dev_warn(pcdev_to_dev(pcdev), "Format %x not found\n", pixfmt);
+		return -EINVAL;
+	}
+
+	/*
+	 * Limit to pxa hardware capabilities.  YUV422P planar format requires
+	 * images size to be a multiple of 16 bytes.  If not, zeros will be
+	 * inserted between Y and U planes, and U and V planes, which violates
+	 * the YUV422P standard.
+	 */
+	v4l_bound_align_image(&pix->width, 48, 2048, 1,
+			      &pix->height, 32, 2048, 0,
+			      pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+
+	v4l2_fill_mbus_format(mf, pix, xlate->code);
+	ret = sensor_call(pcdev, pad, set_fmt, &pad_cfg, &format);
+	if (ret < 0)
+		return ret;
+
+	v4l2_fill_pix_format(pix, mf);
+
+	/* Only progressive video supported so far */
+	switch (mf->field) {
+	case V4L2_FIELD_ANY:
+	case V4L2_FIELD_NONE:
+		pix->field = V4L2_FIELD_NONE;
+		break;
+	default:
+		/* TODO: support interlaced at least in pass-through mode */
+		dev_err(pcdev_to_dev(pcdev), "Field type %d unsupported.\n",
+			mf->field);
+		return -EINVAL;
+	}
+
+	ret = pxa_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+	if (ret < 0)
+		return ret;
+
+	pix->bytesperline = ret;
+	ret = pxa_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+				  pix->height);
+	if (ret < 0)
+		return ret;
+
+	pix->sizeimage = ret;
+	return 0;
+}
+
+static int pxac_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+				    struct v4l2_format *f)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	const struct soc_camera_format_xlate *xlate;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	unsigned long flags;
+	int ret, is_busy;
+
+	dev_dbg(pcdev_to_dev(pcdev),
+		"s_fmt_vid_cap(pix=%dx%d:%x)\n",
+		pix->width, pix->height, pix->pixelformat);
+
+	spin_lock_irqsave(&pcdev->lock, flags);
+	is_busy = pcdev->active || vb2_is_busy(&pcdev->vb2_vq);
+	spin_unlock_irqrestore(&pcdev->lock, flags);
+
+	if (is_busy)
+		return -EBUSY;
+
+	ret = pxac_vidioc_try_fmt_vid_cap(filp, priv, f);
+	if (ret)
+		return ret;
+
+	xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats,
+					 pix->pixelformat);
+	v4l2_fill_mbus_format(&format.format, pix, xlate->code);
+	ret = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+	if (ret < 0) {
+		dev_warn(pcdev_to_dev(pcdev),
+			 "Failed to configure for format %x\n",
+			 pix->pixelformat);
+	} else if (pxa_camera_check_frame(pix->width, pix->height)) {
+		dev_warn(pcdev_to_dev(pcdev),
+			 "Camera driver produced an unsupported frame %dx%d\n",
+			 pix->width, pix->height);
+		return -EINVAL;
+	}
+
+	pcdev->current_fmt = xlate;
+	pcdev->current_pix = *pix;
+
+	ret = pxa_camera_set_bus_param(pcdev);
+	return ret;
+}
+
+static int pxac_vidioc_querycap(struct file *file, void *priv,
+				struct v4l2_capability *cap)
+{
+	strlcpy(cap->bus_info, "platform:pxa-camera", sizeof(cap->bus_info));
+	strlcpy(cap->driver, PXA_CAM_DRV_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+	return 0;
+}
+
+static int pxac_vidioc_enum_input(struct file *file, void *priv,
+				  struct v4l2_input *i)
+{
+	if (i->index > 0)
+		return -EINVAL;
+
+	i->type = V4L2_INPUT_TYPE_CAMERA;
+	strlcpy(i->name, "Camera", sizeof(i->name));
+
+	return 0;
+}
+
+static int pxac_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int pxac_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+	if (i > 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int pxac_fops_camera_open(struct file *filp)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	int ret;
+
+	mutex_lock(&pcdev->mlock);
+	ret = v4l2_fh_open(filp);
+	if (ret < 0)
+		goto out;
+
+	ret = sensor_call(pcdev, core, s_power, 1);
+	if (ret)
+		v4l2_fh_release(filp);
+out:
+	mutex_unlock(&pcdev->mlock);
+	return ret;
+}
+
+static int pxac_fops_camera_release(struct file *filp)
+{
+	struct pxa_camera_dev *pcdev = video_drvdata(filp);
+	int ret;
+
+	ret = vb2_fop_release(filp);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&pcdev->mlock);
+	ret = sensor_call(pcdev, core, s_power, 0);
+	mutex_unlock(&pcdev->mlock);
+
+	return ret;
+}
+
+static const struct v4l2_file_operations pxa_camera_fops = {
+	.owner		= THIS_MODULE,
+	.open		= pxac_fops_camera_open,
+	.release	= pxac_fops_camera_release,
+	.read		= vb2_fop_read,
+	.poll		= vb2_fop_poll,
+	.mmap		= vb2_fop_mmap,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
+	.vidioc_querycap		= pxac_vidioc_querycap,
+
+	.vidioc_enum_input		= pxac_vidioc_enum_input,
+	.vidioc_g_input			= pxac_vidioc_g_input,
+	.vidioc_s_input			= pxac_vidioc_s_input,
+
+	.vidioc_enum_fmt_vid_cap	= pxac_vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap		= pxac_vidioc_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap		= pxac_vidioc_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap		= pxac_vidioc_try_fmt_vid_cap,
+
+	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
+	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
+	.vidioc_querybuf		= vb2_ioctl_querybuf,
+	.vidioc_qbuf			= vb2_ioctl_qbuf,
+	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
+	.vidioc_expbuf			= vb2_ioctl_expbuf,
+	.vidioc_streamon		= vb2_ioctl_streamon,
+	.vidioc_streamoff		= vb2_ioctl_streamoff,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= pxac_vidioc_g_register,
+	.vidioc_s_register		= pxac_vidioc_s_register,
+#endif
+};
+
+static struct v4l2_clk_ops pxa_camera_mclk_ops = {
+};
+
+static const struct video_device pxa_camera_videodev_template = {
+	.name = "pxa-camera",
+	.minor = -1,
+	.fops = &pxa_camera_fops,
+	.ioctl_ops = &pxa_camera_ioctl_ops,
+	.release = video_device_release_empty,
+	.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
+};
+
+static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
+		     struct v4l2_subdev *subdev,
+		     struct v4l2_async_subdev *asd)
+{
+	int err;
+	struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+	struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
+	struct video_device *vdev = &pcdev->vdev;
+	struct v4l2_pix_format *pix = &pcdev->current_pix;
+	struct v4l2_subdev_format format = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	struct v4l2_mbus_framefmt *mf = &format.format;
+
+	dev_info(pcdev_to_dev(pcdev), "%s(): trying to bind a device\n",
+		 __func__);
+	mutex_lock(&pcdev->mlock);
+	*vdev = pxa_camera_videodev_template;
+	vdev->v4l2_dev = v4l2_dev;
+	vdev->lock = &pcdev->mlock;
+	pcdev->sensor = subdev;
+	pcdev->vdev.queue = &pcdev->vb2_vq;
+	pcdev->vdev.v4l2_dev = &pcdev->v4l2_dev;
+	pcdev->vdev.ctrl_handler = subdev->ctrl_handler;
+	video_set_drvdata(&pcdev->vdev, pcdev);
+
+	err = pxa_camera_build_formats(pcdev);
+	if (err) {
+		dev_err(pcdev_to_dev(pcdev), "building formats failed: %d\n",
+			err);
+		goto out;
+	}
+
+	pcdev->current_fmt = pcdev->user_formats;
+	pix->field = V4L2_FIELD_NONE;
+	pix->width = DEFAULT_WIDTH;
+	pix->height = DEFAULT_HEIGHT;
+	pix->bytesperline =
+		pxa_mbus_bytes_per_line(pix->width,
+					pcdev->current_fmt->host_fmt);
+	pix->sizeimage =
+		pxa_mbus_image_size(pcdev->current_fmt->host_fmt,
+				    pix->bytesperline, pix->height);
+	pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
+	v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
+	err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+	if (err)
+		goto out;
+
+	v4l2_fill_pix_format(pix, mf);
+	pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
+		__func__, pix->colorspace, pix->pixelformat);
+
+	err = pxa_camera_init_videobuf2(pcdev);
+	if (err)
+		goto out;
+
+	err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
+	if (err) {
+		v4l2_err(v4l2_dev, "register video device failed: %d\n", err);
+		pcdev->sensor = NULL;
+	} else {
+		dev_info(pcdev_to_dev(pcdev),
+			 "PXA Camera driver attached to camera %s\n",
+			 subdev->name);
+	}
+out:
+	mutex_unlock(&pcdev->mlock);
+	return err;
+}
+
+static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
+		     struct v4l2_subdev *subdev,
+		     struct v4l2_async_subdev *asd)
+{
+	struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(notifier->v4l2_dev);
+
+	mutex_lock(&pcdev->mlock);
+	dev_info(pcdev_to_dev(pcdev),
+		 "PXA Camera driver detached from camera %s\n",
+		 subdev->name);
+
+	/* disable capture, disable interrupts */
+	__raw_writel(0x3ff, pcdev->base + CICR0);
+
+	/* Stop DMA engine */
+	pxa_dma_stop_channels(pcdev);
+
+	pxa_camera_destroy_formats(pcdev);
+	video_unregister_device(&pcdev->vdev);
+	pcdev->sensor = NULL;
+
+	mutex_unlock(&pcdev->mlock);
+}
+
+/*
+ * Driver probe, remove, suspend and resume operations
+ */
+static int pxa_camera_suspend(struct device *dev)
+{
+	struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
+	int i = 0, ret = 0;
+
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
+	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
+
+	if (pcdev->sensor) {
+		ret = sensor_call(pcdev, core, s_power, 0);
+		if (ret == -ENOIOCTLCMD)
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static int pxa_camera_resume(struct device *dev)
+{
+	struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
+	int i = 0, ret = 0;
+
+	__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
+	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
+
+	if (pcdev->sensor) {
+		ret = sensor_call(pcdev, core, s_power, 1);
+		if (ret == -ENOIOCTLCMD)
+			ret = 0;
+	}
+
+	/* Restart frame capture if active buffer exists */
+	if (!ret && pcdev->active)
+		pxa_camera_start_capture(pcdev);
+
+	return ret;
+}
+
+static int pxa_camera_pdata_from_dt(struct device *dev,
+				    struct pxa_camera_dev *pcdev,
+				    struct v4l2_async_subdev *asd)
+{
+	u32 mclk_rate;
+	struct device_node *remote, *np = dev->of_node;
+	struct v4l2_of_endpoint ep;
+	int err = of_property_read_u32(np, "clock-frequency",
+				       &mclk_rate);
+	if (!err) {
+		pcdev->platform_flags |= PXA_CAMERA_MCLK_EN;
+		pcdev->mclk = mclk_rate;
+	}
+
+	np = of_graph_get_next_endpoint(np, NULL);
+	if (!np) {
+		dev_err(dev, "could not find endpoint\n");
+		return -EINVAL;
+	}
+
+	err = v4l2_of_parse_endpoint(np, &ep);
+	if (err) {
+		dev_err(dev, "could not parse endpoint\n");
+		goto out;
+	}
+
+	switch (ep.bus.parallel.bus_width) {
+	case 4:
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_4;
+		break;
+	case 5:
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_5;
+		break;
+	case 8:
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_8;
+		break;
+	case 9:
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_9;
+		break;
+	case 10:
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+		break;
+	default:
+		break;
+	}
+
+	if (ep.bus.parallel.flags & V4L2_MBUS_MASTER)
+		pcdev->platform_flags |= PXA_CAMERA_MASTER;
+	if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+		pcdev->platform_flags |= PXA_CAMERA_HSP;
+	if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+		pcdev->platform_flags |= PXA_CAMERA_VSP;
+	if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+		pcdev->platform_flags |= PXA_CAMERA_PCLK_EN | PXA_CAMERA_PCP;
+	if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+		pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
+
+	asd->match_type = V4L2_ASYNC_MATCH_OF;
+	remote = of_graph_get_remote_port(np);
+	if (remote) {
+		asd->match.of.node = remote;
+		of_node_put(remote);
+	} else {
+		dev_notice(dev, "no remote for %s\n", of_node_full_name(np));
+	}
+
+out:
+	of_node_put(np);
+
+	return err;
+}
+
+static int pxa_camera_probe(struct platform_device *pdev)
+{
+	struct pxa_camera_dev *pcdev;
+	struct resource *res;
+	void __iomem *base;
+	struct dma_slave_config config = {
+		.src_addr_width = 0,
+		.src_maxburst = 8,
+		.direction = DMA_DEV_TO_MEM,
+	};
+	dma_cap_mask_t mask;
+	struct pxad_param params;
+	char clk_name[V4L2_CLK_NAME_SIZE];
+	int irq;
+	int err = 0, i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || irq < 0)
+		return -ENODEV;
+
+	pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+	if (!pcdev) {
+		dev_err(&pdev->dev, "Could not allocate pcdev\n");
+		return -ENOMEM;
+	}
+
+	pcdev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pcdev->clk))
+		return PTR_ERR(pcdev->clk);
+
+	pcdev->res = res;
+
+	pcdev->pdata = pdev->dev.platform_data;
+	if (&pdev->dev.of_node && !pcdev->pdata) {
+		err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+	} else {
+		pcdev->platform_flags = pcdev->pdata->flags;
+		pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+		pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+		pcdev->asd.match.i2c.adapter_id =
+			pcdev->pdata->sensor_i2c_adapter_id;
+		pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
+	}
+	if (err < 0)
+		return err;
+
+	if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
+			PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
+		/*
+		 * Platform hasn't set available data widths. This is bad.
+		 * Warn and use a default.
+		 */
+		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
+			 "data widths, using default 10 bit\n");
+		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+	}
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
+		pcdev->width_flags = 1 << 7;
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
+		pcdev->width_flags |= 1 << 8;
+	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
+		pcdev->width_flags |= 1 << 9;
+	if (!pcdev->mclk) {
+		dev_warn(&pdev->dev,
+			 "mclk == 0! Please, fix your platform data. "
+			 "Using default 20MHz\n");
+		pcdev->mclk = 20000000;
+	}
+
+	pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev);
+
+	INIT_LIST_HEAD(&pcdev->capture);
+	spin_lock_init(&pcdev->lock);
+	mutex_init(&pcdev->mlock);
+
+	/*
+	 * Request the regions.
+	 */
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	pcdev->irq = irq;
+	pcdev->base = base;
+
+	/* request dma */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_PRIVATE, mask);
+
+	params.prio = 0;
+	params.drcmr = 68;
+	pcdev->dma_chans[0] =
+		dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						 &params, &pdev->dev, "CI_Y");
+	if (!pcdev->dma_chans[0]) {
+		dev_err(&pdev->dev, "Can't request DMA for Y\n");
+		return -ENODEV;
+	}
+
+	params.drcmr = 69;
+	pcdev->dma_chans[1] =
+		dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						 &params, &pdev->dev, "CI_U");
+	if (!pcdev->dma_chans[1]) {
+		dev_err(&pdev->dev, "Can't request DMA for Y\n");
+		err = -ENODEV;
+		goto exit_free_dma_y;
+	}
+
+	params.drcmr = 70;
+	pcdev->dma_chans[2] =
+		dma_request_slave_channel_compat(mask, pxad_filter_fn,
+						 &params, &pdev->dev, "CI_V");
+	if (!pcdev->dma_chans[2]) {
+		dev_err(&pdev->dev, "Can't request DMA for V\n");
+		err = -ENODEV;
+		goto exit_free_dma_u;
+	}
+
+	for (i = 0; i < 3; i++) {
+		config.src_addr = pcdev->res->start + CIBR0 + i * 8;
+		err = dmaengine_slave_config(pcdev->dma_chans[i], &config);
+		if (err < 0) {
+			dev_err(&pdev->dev, "dma slave config failed: %d\n",
+				err);
+			goto exit_free_dma;
+		}
+	}
+
+	/* request irq */
+	err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
+			       PXA_CAM_DRV_NAME, pcdev);
+	if (err) {
+		dev_err(&pdev->dev, "Camera interrupt register failed\n");
+		goto exit_free_dma;
+	}
+
+	tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
+
+	pxa_camera_activate(pcdev);
+
+	dev_set_drvdata(&pdev->dev, pcdev);
+	err = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+	if (err)
+		goto exit_free_dma;
+
+	pcdev->asds[0] = &pcdev->asd;
+	pcdev->notifier.subdevs = pcdev->asds;
+	pcdev->notifier.num_subdevs = 1;
+	pcdev->notifier.bound = pxa_camera_sensor_bound;
+	pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+
+	if (!of_have_populated_dt())
+		pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+
+	err = pxa_camera_init_videobuf2(pcdev);
+	if (err)
+		goto exit_free_v4l2dev;
+
+	if (pcdev->mclk) {
+		v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+				  pcdev->asd.match.i2c.adapter_id,
+				  pcdev->asd.match.i2c.address);
+
+		pcdev->mclk_clk = v4l2_clk_register(&pxa_camera_mclk_ops,
+						    clk_name, NULL);
+		if (IS_ERR(pcdev->mclk_clk)) {
+			err = PTR_ERR(pcdev->mclk_clk);
+			goto exit_free_v4l2dev;
+		}
+	}
+
+	err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+	if (err)
+		goto exit_free_clk;
+
+	return 0;
+exit_free_clk:
+	v4l2_clk_unregister(pcdev->mclk_clk);
+exit_free_v4l2dev:
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+exit_free_dma:
+	dma_release_channel(pcdev->dma_chans[2]);
+exit_free_dma_u:
+	dma_release_channel(pcdev->dma_chans[1]);
+exit_free_dma_y:
+	dma_release_channel(pcdev->dma_chans[0]);
+	return err;
+}
+
+static int pxa_camera_remove(struct platform_device *pdev)
+{
+	struct pxa_camera_dev *pcdev = dev_get_drvdata(&pdev->dev);
+
+	pxa_camera_deactivate(pcdev);
+	dma_release_channel(pcdev->dma_chans[0]);
+	dma_release_channel(pcdev->dma_chans[1]);
+	dma_release_channel(pcdev->dma_chans[2]);
+
+	v4l2_clk_unregister(pcdev->mclk_clk);
+	v4l2_device_unregister(&pcdev->v4l2_dev);
+
+	dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
+
+	return 0;
+}
+
+static const struct dev_pm_ops pxa_camera_pm = {
+	.suspend	= pxa_camera_suspend,
+	.resume		= pxa_camera_resume,
+};
+
+static const struct of_device_id pxa_camera_of_match[] = {
+	{ .compatible = "marvell,pxa270-qci", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, pxa_camera_of_match);
+
+static struct platform_driver pxa_camera_driver = {
+	.driver		= {
+		.name	= PXA_CAM_DRV_NAME,
+		.pm	= &pxa_camera_pm,
+		.of_match_table = of_match_ptr(pxa_camera_of_match),
+	},
+	.probe		= pxa_camera_probe,
+	.remove		= pxa_camera_remove,
+};
+
+module_platform_driver(pxa_camera_driver);
+
+MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
+MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index bc50c69..f3a3f31 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -99,14 +99,14 @@
  */
 int rcar_fcp_enable(struct rcar_fcp_device *fcp)
 {
-	int error;
+	int ret;
 
 	if (!fcp)
 		return 0;
 
-	error = pm_runtime_get_sync(fcp->dev);
-	if (error < 0)
-		return error;
+	ret = pm_runtime_get_sync(fcp->dev);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -165,6 +165,7 @@
 }
 
 static const struct of_device_id rcar_fcp_of_match[] = {
+	{ .compatible = "renesas,fcpf" },
 	{ .compatible = "renesas,fcpv" },
 	{ },
 };
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index b2ff2d4..111d2a1 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -1,6 +1,6 @@
 config VIDEO_RCAR_VIN
 	tristate "R-Car Video Input (VIN) Driver"
-	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
+	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA && MEDIA_CONTROLLER
 	depends on ARCH_RENESAS || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
 	---help---
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 4b2007b..098a0b1 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -31,26 +31,22 @@
 
 #define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
 
-static int rvin_mbus_supported(struct rvin_dev *vin)
+static bool rvin_mbus_supported(struct rvin_graph_entity *entity)
 {
-	struct v4l2_subdev *sd;
+	struct v4l2_subdev *sd = entity->subdev;
 	struct v4l2_subdev_mbus_code_enum code = {
 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
 	};
 
-	sd = vin_to_source(vin);
-
 	code.index = 0;
 	while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) {
 		code.index++;
 		switch (code.code) {
 		case MEDIA_BUS_FMT_YUYV8_1X16:
-		case MEDIA_BUS_FMT_YUYV8_2X8:
-		case MEDIA_BUS_FMT_YUYV10_2X10:
+		case MEDIA_BUS_FMT_UYVY8_2X8:
+		case MEDIA_BUS_FMT_UYVY10_2X10:
 		case MEDIA_BUS_FMT_RGB888_1X24:
-			vin->source.code = code.code;
-			vin_dbg(vin, "Found supported media bus format: %d\n",
-				vin->source.code);
+			entity->code = code.code;
 			return true;
 		default:
 			break;
@@ -60,142 +56,168 @@
 	return false;
 }
 
-static int rvin_graph_notify_complete(struct v4l2_async_notifier *notifier)
+static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
 {
 	struct rvin_dev *vin = notifier_to_vin(notifier);
 	int ret;
 
+	/* Verify subdevices mbus format */
+	if (!rvin_mbus_supported(&vin->digital)) {
+		vin_err(vin, "Unsupported media bus format for %s\n",
+			vin->digital.subdev->name);
+		return -EINVAL;
+	}
+
+	vin_dbg(vin, "Found media bus format for %s: %d\n",
+		vin->digital.subdev->name, vin->digital.code);
+
 	ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
 	if (ret < 0) {
 		vin_err(vin, "Failed to register subdev nodes\n");
 		return ret;
 	}
 
-	if (!rvin_mbus_supported(vin)) {
-		vin_err(vin, "No supported mediabus format found\n");
-		return -EINVAL;
-	}
-
 	return rvin_v4l2_probe(vin);
 }
 
-static void rvin_graph_notify_unbind(struct v4l2_async_notifier *notifier,
-				     struct v4l2_subdev *sd,
+static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
+				       struct v4l2_subdev *subdev,
+				       struct v4l2_async_subdev *asd)
+{
+	struct rvin_dev *vin = notifier_to_vin(notifier);
+
+	if (vin->digital.subdev == subdev) {
+		vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
+		rvin_v4l2_remove(vin);
+		vin->digital.subdev = NULL;
+		return;
+	}
+
+	vin_err(vin, "no entity for subdev %s to unbind\n", subdev->name);
+}
+
+static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
+				     struct v4l2_subdev *subdev,
 				     struct v4l2_async_subdev *asd)
 {
 	struct rvin_dev *vin = notifier_to_vin(notifier);
 
-	rvin_v4l2_remove(vin);
+	v4l2_set_subdev_hostdata(subdev, vin);
+
+	if (vin->digital.asd.match.of.node == subdev->dev->of_node) {
+		vin_dbg(vin, "bound digital subdev %s\n", subdev->name);
+		vin->digital.subdev = subdev;
+		return 0;
+	}
+
+	vin_err(vin, "no entity for subdev %s to bind\n", subdev->name);
+	return -EINVAL;
 }
 
-static int rvin_graph_notify_bound(struct v4l2_async_notifier *notifier,
-				   struct v4l2_subdev *subdev,
-				   struct v4l2_async_subdev *asd)
+static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
+				    struct device_node *ep,
+				    struct v4l2_mbus_config *mbus_cfg)
 {
-	struct rvin_dev *vin = notifier_to_vin(notifier);
+	struct v4l2_of_endpoint v4l2_ep;
+	int ret;
 
-	vin_dbg(vin, "subdev %s bound\n", subdev->name);
+	ret = v4l2_of_parse_endpoint(ep, &v4l2_ep);
+	if (ret) {
+		vin_err(vin, "Could not parse v4l2 endpoint\n");
+		return -EINVAL;
+	}
 
-	vin->entity.entity = &subdev->entity;
-	vin->entity.subdev = subdev;
+	mbus_cfg->type = v4l2_ep.bus_type;
+
+	switch (mbus_cfg->type) {
+	case V4L2_MBUS_PARALLEL:
+		vin_dbg(vin, "Found PARALLEL media bus\n");
+		mbus_cfg->flags = v4l2_ep.bus.parallel.flags;
+		break;
+	case V4L2_MBUS_BT656:
+		vin_dbg(vin, "Found BT656 media bus\n");
+		mbus_cfg->flags = 0;
+		break;
+	default:
+		vin_err(vin, "Unknown media bus type\n");
+		return -EINVAL;
+	}
 
 	return 0;
 }
 
-static int rvin_graph_parse(struct rvin_dev *vin,
-			    struct device_node *node)
+static int rvin_digital_graph_parse(struct rvin_dev *vin)
 {
-	struct device_node *remote;
-	struct device_node *ep = NULL;
-	struct device_node *next;
-	int ret = 0;
+	struct device_node *ep, *np;
+	int ret;
 
-	while (1) {
-		next = of_graph_get_next_endpoint(node, ep);
-		if (!next)
-			break;
+	vin->digital.asd.match.of.node = NULL;
+	vin->digital.subdev = NULL;
 
+	/*
+	 * Port 0 id 0 is local digital input, try to get it.
+	 * Not all instances can or will have this, that is OK
+	 */
+	ep = of_graph_get_endpoint_by_regs(vin->dev->of_node, 0, 0);
+	if (!ep)
+		return 0;
+
+	np = of_graph_get_remote_port_parent(ep);
+	if (!np) {
+		vin_err(vin, "No remote parent for digital input\n");
 		of_node_put(ep);
-		ep = next;
-
-		remote = of_graph_get_remote_port_parent(ep);
-		if (!remote) {
-			ret = -EINVAL;
-			break;
-		}
-
-		/* Skip entities that we have already processed. */
-		if (remote == vin->dev->of_node) {
-			of_node_put(remote);
-			continue;
-		}
-
-		/* Remote node to connect */
-		if (!vin->entity.node) {
-			vin->entity.node = remote;
-			vin->entity.asd.match_type = V4L2_ASYNC_MATCH_OF;
-			vin->entity.asd.match.of.node = remote;
-			ret++;
-		}
+		return -EINVAL;
 	}
+	of_node_put(np);
 
+	ret = rvin_digitial_parse_v4l2(vin, ep, &vin->digital.mbus_cfg);
 	of_node_put(ep);
+	if (ret)
+		return ret;
 
-	return ret;
+	vin->digital.asd.match.of.node = np;
+	vin->digital.asd.match_type = V4L2_ASYNC_MATCH_OF;
+
+	return 0;
 }
 
-static int rvin_graph_init(struct rvin_dev *vin)
+static int rvin_digital_graph_init(struct rvin_dev *vin)
 {
 	struct v4l2_async_subdev **subdevs = NULL;
 	int ret;
 
-	/* Parse the graph to extract a list of subdevice DT nodes. */
-	ret = rvin_graph_parse(vin, vin->dev->of_node);
-	if (ret < 0) {
-		vin_err(vin, "Graph parsing failed\n");
-		goto done;
-	}
+	ret = rvin_digital_graph_parse(vin);
+	if (ret)
+		return ret;
 
-	if (!ret) {
-		vin_err(vin, "No subdev found in graph\n");
-		goto done;
-	}
-
-	if (ret != 1) {
-		vin_err(vin, "More then one subdev found in graph\n");
-		goto done;
+	if (!vin->digital.asd.match.of.node) {
+		vin_dbg(vin, "No digital subdevice found\n");
+		return -ENODEV;
 	}
 
 	/* Register the subdevices notifier. */
 	subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
-	if (subdevs == NULL) {
-		ret = -ENOMEM;
-		goto done;
-	}
+	if (subdevs == NULL)
+		return -ENOMEM;
 
-	subdevs[0] = &vin->entity.asd;
+	subdevs[0] = &vin->digital.asd;
 
-	vin->notifier.subdevs = subdevs;
+	vin_dbg(vin, "Found digital subdevice %s\n",
+		of_node_full_name(subdevs[0]->match.of.node));
+
 	vin->notifier.num_subdevs = 1;
-	vin->notifier.bound = rvin_graph_notify_bound;
-	vin->notifier.unbind = rvin_graph_notify_unbind;
-	vin->notifier.complete = rvin_graph_notify_complete;
+	vin->notifier.subdevs = subdevs;
+	vin->notifier.bound = rvin_digital_notify_bound;
+	vin->notifier.unbind = rvin_digital_notify_unbind;
+	vin->notifier.complete = rvin_digital_notify_complete;
 
 	ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
 	if (ret < 0) {
 		vin_err(vin, "Notifier registration failed\n");
-		goto done;
+		return ret;
 	}
 
-	ret = 0;
-
-done:
-	if (ret < 0) {
-		v4l2_async_notifier_unregister(&vin->notifier);
-		of_node_put(vin->entity.node);
-	}
-
-	return ret;
+	return 0;
 }
 
 /* -----------------------------------------------------------------------------
@@ -209,56 +231,14 @@
 	{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
 	{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
 	{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
+	{ .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, rvin_of_id_table);
 
-static int rvin_parse_dt(struct rvin_dev *vin)
-{
-	const struct of_device_id *match;
-	struct v4l2_of_endpoint ep;
-	struct device_node *np;
-	int ret;
-
-	match = of_match_device(of_match_ptr(rvin_of_id_table), vin->dev);
-	if (!match)
-		return -ENODEV;
-
-	vin->chip = (enum chip_id)match->data;
-
-	np = of_graph_get_next_endpoint(vin->dev->of_node, NULL);
-	if (!np) {
-		vin_err(vin, "Could not find endpoint\n");
-		return -EINVAL;
-	}
-
-	ret = v4l2_of_parse_endpoint(np, &ep);
-	if (ret) {
-		vin_err(vin, "Could not parse endpoint\n");
-		return ret;
-	}
-
-	of_node_put(np);
-
-	vin->mbus_cfg.type = ep.bus_type;
-
-	switch (vin->mbus_cfg.type) {
-	case V4L2_MBUS_PARALLEL:
-		vin->mbus_cfg.flags = ep.bus.parallel.flags;
-		break;
-	case V4L2_MBUS_BT656:
-		vin->mbus_cfg.flags = 0;
-		break;
-	default:
-		vin_err(vin, "Unknown media bus type\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int rcar_vin_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
 	struct rvin_dev *vin;
 	struct resource *mem;
 	int irq, ret;
@@ -267,11 +247,12 @@
 	if (!vin)
 		return -ENOMEM;
 
-	vin->dev = &pdev->dev;
+	match = of_match_device(of_match_ptr(rvin_of_id_table), &pdev->dev);
+	if (!match)
+		return -ENODEV;
 
-	ret = rvin_parse_dt(vin);
-	if (ret)
-		return ret;
+	vin->dev = &pdev->dev;
+	vin->chip = (enum chip_id)match->data;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (mem == NULL)
@@ -282,14 +263,14 @@
 		return PTR_ERR(vin->base);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
-		return ret;
+	if (irq < 0)
+		return irq;
 
 	ret = rvin_dma_probe(vin, irq);
 	if (ret)
 		return ret;
 
-	ret = rvin_graph_init(vin);
+	ret = rvin_digital_graph_init(vin);
 	if (ret < 0)
 		goto error;
 
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 496aa97..9ccd5ff 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -95,6 +95,7 @@
 /* Video n Module Status Register bits */
 #define VNMS_FBS_MASK		(3 << 3)
 #define VNMS_FBS_SHIFT		3
+#define VNMS_FS			(1 << 2)
 #define VNMS_AV			(1 << 1)
 #define VNMS_CA			(1 << 0)
 
@@ -131,6 +132,7 @@
 static int rvin_setup(struct rvin_dev *vin)
 {
 	u32 vnmc, dmr, dmr2, interrupts;
+	v4l2_std_id std;
 	bool progressive = false, output_is_yuv = false, input_is_yuv = false;
 
 	switch (vin->format.field) {
@@ -141,12 +143,21 @@
 		vnmc = VNMC_IM_EVEN;
 		break;
 	case V4L2_FIELD_INTERLACED:
+		/* Default to TB */
+		vnmc = VNMC_IM_FULL;
+		/* Use BT if video standard can be read and is 60 Hz format */
+		if (!v4l2_subdev_call(vin_to_source(vin), video, g_std, &std)) {
+			if (std & V4L2_STD_525_60)
+				vnmc = VNMC_IM_FULL | VNMC_FOC;
+		}
+		break;
 	case V4L2_FIELD_INTERLACED_TB:
 		vnmc = VNMC_IM_FULL;
 		break;
 	case V4L2_FIELD_INTERLACED_BT:
 		vnmc = VNMC_IM_FULL | VNMC_FOC;
 		break;
+	case V4L2_FIELD_ALTERNATE:
 	case V4L2_FIELD_NONE:
 		if (vin->continuous) {
 			vnmc = VNMC_IM_ODD_EVEN;
@@ -163,24 +174,24 @@
 	/*
 	 * Input interface
 	 */
-	switch (vin->source.code) {
+	switch (vin->digital.code) {
 	case MEDIA_BUS_FMT_YUYV8_1X16:
 		/* BT.601/BT.1358 16bit YCbCr422 */
 		vnmc |= VNMC_INF_YUV16;
 		input_is_yuv = true;
 		break;
-	case MEDIA_BUS_FMT_YUYV8_2X8:
+	case MEDIA_BUS_FMT_UYVY8_2X8:
 		/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
-		vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+		vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
 			VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
 		input_is_yuv = true;
 		break;
 	case MEDIA_BUS_FMT_RGB888_1X24:
 		vnmc |= VNMC_INF_RGB888;
 		break;
-	case MEDIA_BUS_FMT_YUYV10_2X10:
+	case MEDIA_BUS_FMT_UYVY10_2X10:
 		/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
-		vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+		vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
 			VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
 		input_is_yuv = true;
 		break;
@@ -192,11 +203,11 @@
 	dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
 
 	/* Hsync Signal Polarity Select */
-	if (!(vin->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+	if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
 		dmr2 |= VNDMR2_HPS;
 
 	/* Vsync Signal Polarity Select */
-	if (!(vin->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+	if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
 		dmr2 |= VNDMR2_VPS;
 
 	/*
@@ -225,11 +236,9 @@
 		dmr = 0;
 		break;
 	case V4L2_PIX_FMT_XBGR32:
-		if (vin->chip == RCAR_GEN2 || vin->chip == RCAR_H1) {
-			dmr = VNDMR_EXRGB;
-			break;
-		}
-		/* fall through */
+		/* Note: not supported on M1 */
+		dmr = VNDMR_EXRGB;
+		break;
 	default:
 		vin_err(vin, "Invalid pixelformat (0x%x)\n",
 			vin->format.pixelformat);
@@ -322,15 +331,26 @@
 	return rvin_read(vin, VNMS_REG) & VNMS_CA;
 }
 
-static int rvin_get_active_slot(struct rvin_dev *vin)
+static int rvin_get_active_slot(struct rvin_dev *vin, u32 vnms)
 {
 	if (vin->continuous)
-		return (rvin_read(vin, VNMS_REG) & VNMS_FBS_MASK)
-			>> VNMS_FBS_SHIFT;
+		return (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
 
 	return 0;
 }
 
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+	if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+		/* If FS is set it's a Even field */
+		if (vnms & VNMS_FS)
+			return V4L2_FIELD_BOTTOM;
+		return V4L2_FIELD_TOP;
+	}
+
+	return vin->format.field;
+}
+
 static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
 {
 	const struct rvin_video_format *fmt;
@@ -871,7 +891,7 @@
 static irqreturn_t rvin_irq(int irq, void *data)
 {
 	struct rvin_dev *vin = data;
-	u32 int_status;
+	u32 int_status, vnms;
 	int slot;
 	unsigned int sequence, handled = 0;
 	unsigned long flags;
@@ -898,7 +918,8 @@
 	}
 
 	/* Prepare for capture and update state */
-	slot = rvin_get_active_slot(vin);
+	vnms = rvin_read(vin, VNMS_REG);
+	slot = rvin_get_active_slot(vin, vnms);
 	sequence = vin->sequence++;
 
 	vin_dbg(vin, "IRQ %02d: %d\tbuf0: %c buf1: %c buf2: %c\tmore: %d\n",
@@ -913,7 +934,7 @@
 		goto done;
 
 	/* Capture frame */
-	vin->queue_buf[slot]->field = vin->format.field;
+	vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
 	vin->queue_buf[slot]->sequence = sequence;
 	vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
 	vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf, VB2_BUF_STATE_DONE);
@@ -1116,7 +1137,7 @@
 	rvin_disable_interrupts(vin);
 }
 
-static struct vb2_ops rvin_qops = {
+static const struct vb2_ops rvin_qops = {
 	.queue_setup		= rvin_queue_setup,
 	.buf_prepare		= rvin_buffer_prepare,
 	.buf_queue		= rvin_buffer_queue,
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 10a5c10..2bbe6d4 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -92,21 +92,84 @@
  * V4L2
  */
 
+static void rvin_reset_crop_compose(struct rvin_dev *vin)
+{
+	vin->crop.top = vin->crop.left = 0;
+	vin->crop.width = vin->source.width;
+	vin->crop.height = vin->source.height;
+
+	vin->compose.top = vin->compose.left = 0;
+	vin->compose.width = vin->format.width;
+	vin->compose.height = vin->format.height;
+}
+
+static int rvin_reset_format(struct rvin_dev *vin)
+{
+	struct v4l2_subdev_format fmt = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+	};
+	struct v4l2_mbus_framefmt *mf = &fmt.format;
+	int ret;
+
+	fmt.pad = vin->src_pad_idx;
+
+	ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
+	if (ret)
+		return ret;
+
+	vin->format.width	= mf->width;
+	vin->format.height	= mf->height;
+	vin->format.colorspace	= mf->colorspace;
+	vin->format.field	= mf->field;
+
+	/*
+	 * If the subdevice uses ALTERNATE field mode and G_STD is
+	 * implemented use the VIN HW to combine the two fields to
+	 * one INTERLACED frame. The ALTERNATE field mode can still
+	 * be requested in S_FMT and be respected, this is just the
+	 * default which is applied at probing or when S_STD is called.
+	 */
+	if (vin->format.field == V4L2_FIELD_ALTERNATE &&
+	    v4l2_subdev_has_op(vin_to_source(vin), video, g_std))
+		vin->format.field = V4L2_FIELD_INTERLACED;
+
+	switch (vin->format.field) {
+	case V4L2_FIELD_TOP:
+	case V4L2_FIELD_BOTTOM:
+	case V4L2_FIELD_ALTERNATE:
+		vin->format.height /= 2;
+		break;
+	case V4L2_FIELD_NONE:
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_INTERLACED_BT:
+	case V4L2_FIELD_INTERLACED:
+		break;
+	default:
+		vin->format.field = V4L2_FIELD_NONE;
+		break;
+	}
+
+	rvin_reset_crop_compose(vin);
+
+	return 0;
+}
+
 static int __rvin_try_format_source(struct rvin_dev *vin,
-					u32 which,
-					struct v4l2_pix_format *pix,
-					struct rvin_source_fmt *source)
+				    u32 which,
+				    struct v4l2_pix_format *pix,
+				    struct rvin_source_fmt *source)
 {
 	struct v4l2_subdev *sd;
 	struct v4l2_subdev_pad_config *pad_cfg;
 	struct v4l2_subdev_format format = {
 		.which = which,
 	};
+	enum v4l2_field field;
 	int ret;
 
 	sd = vin_to_source(vin);
 
-	v4l2_fill_mbus_format(&format.format, pix, vin->source.code);
+	v4l2_fill_mbus_format(&format.format, pix, vin->digital.code);
 
 	pad_cfg = v4l2_subdev_alloc_pad_config(sd);
 	if (pad_cfg == NULL)
@@ -114,28 +177,31 @@
 
 	format.pad = vin->src_pad_idx;
 
-	ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, pad, set_fmt,
-					 pad_cfg, &format);
-	if (ret < 0)
-		goto cleanup;
+	field = pix->field;
+
+	ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+	if (ret < 0 && ret != -ENOIOCTLCMD)
+		goto done;
 
 	v4l2_fill_pix_format(pix, &format.format);
 
+	pix->field = field;
+
 	source->width = pix->width;
 	source->height = pix->height;
 
 	vin_dbg(vin, "Source resolution: %ux%u\n", source->width,
 		source->height);
 
-cleanup:
+done:
 	v4l2_subdev_free_pad_config(pad_cfg);
-	return 0;
+	return ret;
 }
 
 static int __rvin_try_format(struct rvin_dev *vin,
-				 u32 which,
-				 struct v4l2_pix_format *pix,
-				 struct rvin_source_fmt *source)
+			     u32 which,
+			     struct v4l2_pix_format *pix,
+			     struct rvin_source_fmt *source)
 {
 	const struct rvin_video_format *info;
 	u32 rwidth, rheight, walign;
@@ -144,6 +210,10 @@
 	rwidth = pix->width;
 	rheight = pix->height;
 
+	/* Keep current field if no specific one is asked for */
+	if (pix->field == V4L2_FIELD_ANY)
+		pix->field = vin->format.field;
+
 	/*
 	 * Retrieve format information and select the current format if the
 	 * requested format isn't supported.
@@ -164,6 +234,23 @@
 	/* Limit to source capabilities */
 	__rvin_try_format_source(vin, which, pix, source);
 
+	switch (pix->field) {
+	case V4L2_FIELD_TOP:
+	case V4L2_FIELD_BOTTOM:
+	case V4L2_FIELD_ALTERNATE:
+		pix->height /= 2;
+		source->height /= 2;
+		break;
+	case V4L2_FIELD_NONE:
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_INTERLACED_BT:
+	case V4L2_FIELD_INTERLACED:
+		break;
+	default:
+		pix->field = V4L2_FIELD_NONE;
+		break;
+	}
+
 	/* If source can't match format try if VIN can scale */
 	if (source->width != rwidth || source->height != rheight)
 		rvin_scale_try(vin, pix, rwidth, rheight);
@@ -175,24 +262,16 @@
 	v4l_bound_align_image(&pix->width, 2, RVIN_MAX_WIDTH, walign,
 			      &pix->height, 4, RVIN_MAX_HEIGHT, 2, 0);
 
-	switch (pix->field) {
-	case V4L2_FIELD_NONE:
-	case V4L2_FIELD_TOP:
-	case V4L2_FIELD_BOTTOM:
-	case V4L2_FIELD_INTERLACED_TB:
-	case V4L2_FIELD_INTERLACED_BT:
-	case V4L2_FIELD_INTERLACED:
-		break;
-	default:
-		pix->field = V4L2_FIELD_NONE;
-		break;
-	}
-
 	pix->bytesperline = max_t(u32, pix->bytesperline,
 				  rvin_format_bytesperline(pix));
 	pix->sizeimage = max_t(u32, pix->sizeimage,
 			       rvin_format_sizeimage(pix));
 
+	if (vin->chip == RCAR_M1 && pix->pixelformat == V4L2_PIX_FMT_XBGR32) {
+		vin_err(vin, "pixel format XBGR32 not supported on M1\n");
+		return -EINVAL;
+	}
+
 	vin_dbg(vin, "Requested %ux%u Got %ux%u bpl: %d size: %d\n",
 		rwidth, rheight, pix->width, pix->height,
 		pix->bytesperline, pix->sizeimage);
@@ -219,7 +298,7 @@
 	struct rvin_source_fmt source;
 
 	return __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix,
-				     &source);
+				 &source);
 }
 
 static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
@@ -233,7 +312,7 @@
 		return -EBUSY;
 
 	ret = __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
-				    &source);
+				&source);
 	if (ret)
 		return ret;
 
@@ -242,6 +321,8 @@
 
 	vin->format = f->fmt.pix;
 
+	rvin_reset_crop_compose(vin);
+
 	return 0;
 }
 
@@ -334,8 +415,8 @@
 		vin->crop = s->r = r;
 
 		vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
-			 r.width, r.height, r.left, r.top,
-			 vin->source.width, vin->source.height);
+			r.width, r.height, r.left, r.top,
+			vin->source.width, vin->source.height);
 		break;
 	case V4L2_SEL_TGT_COMPOSE:
 		/* Make sure compose rect fits inside output format */
@@ -359,8 +440,8 @@
 		vin->compose = s->r = r;
 
 		vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
-			 r.width, r.height, r.left, r.top,
-			 vin->format.width, vin->format.height);
+			r.width, r.height, r.left, r.top,
+			vin->format.width, vin->format.height);
 		break;
 	default:
 		return -EINVAL;
@@ -381,7 +462,7 @@
 	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 
-	return v4l2_subdev_call(sd, video, cropcap, crop);
+	return v4l2_subdev_call(sd, video, g_pixelaspect, &crop->pixelaspect);
 }
 
 static int rvin_enum_input(struct file *file, void *priv,
@@ -433,35 +514,14 @@
 static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
 {
 	struct rvin_dev *vin = video_drvdata(file);
-	struct v4l2_subdev *sd = vin_to_source(vin);
-	struct v4l2_subdev_format fmt = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	struct v4l2_mbus_framefmt *mf = &fmt.format;
-	int ret = v4l2_subdev_call(sd, video, s_std, a);
+	int ret;
 
+	ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
 	if (ret < 0)
 		return ret;
 
 	/* Changing the standard will change the width/height */
-	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-	if (ret) {
-		vin_err(vin, "Failed to get initial format\n");
-		return ret;
-	}
-
-	vin->format.width = mf->width;
-	vin->format.height = mf->height;
-
-	vin->crop.top = vin->crop.left = 0;
-	vin->crop.width = mf->width;
-	vin->crop.height = mf->height;
-
-	vin->compose.top = vin->compose.left = 0;
-	vin->compose.width = mf->width;
-	vin->compose.height = mf->height;
-
-	return 0;
+	return rvin_reset_format(vin);
 }
 
 static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
@@ -483,14 +543,14 @@
 }
 
 static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
-				    struct v4l2_enum_dv_timings *timings)
+				struct v4l2_enum_dv_timings *timings)
 {
 	struct rvin_dev *vin = video_drvdata(file);
 	struct v4l2_subdev *sd = vin_to_source(vin);
 	int pad, ret;
 
 	pad = timings->pad;
-	timings->pad = vin->src_pad_idx;
+	timings->pad = vin->sink_pad_idx;
 
 	ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
 
@@ -500,52 +560,51 @@
 }
 
 static int rvin_s_dv_timings(struct file *file, void *priv_fh,
-				    struct v4l2_dv_timings *timings)
+			     struct v4l2_dv_timings *timings)
 {
 	struct rvin_dev *vin = video_drvdata(file);
 	struct v4l2_subdev *sd = vin_to_source(vin);
-	int err;
+	int ret;
 
-	err = v4l2_subdev_call(sd,
-			video, s_dv_timings, timings);
-	if (!err) {
-		vin->source.width = timings->bt.width;
-		vin->source.height = timings->bt.height;
-		vin->format.width = timings->bt.width;
-		vin->format.height = timings->bt.height;
-	}
-	return err;
+	ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+	if (ret)
+		return ret;
+
+	vin->source.width = timings->bt.width;
+	vin->source.height = timings->bt.height;
+	vin->format.width = timings->bt.width;
+	vin->format.height = timings->bt.height;
+
+	return 0;
 }
 
 static int rvin_g_dv_timings(struct file *file, void *priv_fh,
-				    struct v4l2_dv_timings *timings)
+			     struct v4l2_dv_timings *timings)
 {
 	struct rvin_dev *vin = video_drvdata(file);
 	struct v4l2_subdev *sd = vin_to_source(vin);
 
-	return v4l2_subdev_call(sd,
-			video, g_dv_timings, timings);
+	return v4l2_subdev_call(sd, video, g_dv_timings, timings);
 }
 
 static int rvin_query_dv_timings(struct file *file, void *priv_fh,
-				    struct v4l2_dv_timings *timings)
+				 struct v4l2_dv_timings *timings)
 {
 	struct rvin_dev *vin = video_drvdata(file);
 	struct v4l2_subdev *sd = vin_to_source(vin);
 
-	return v4l2_subdev_call(sd,
-			video, query_dv_timings, timings);
+	return v4l2_subdev_call(sd, video, query_dv_timings, timings);
 }
 
 static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
-				    struct v4l2_dv_timings_cap *cap)
+			       struct v4l2_dv_timings_cap *cap)
 {
 	struct rvin_dev *vin = video_drvdata(file);
 	struct v4l2_subdev *sd = vin_to_source(vin);
 	int pad, ret;
 
 	pad = cap->pad;
-	cap->pad = vin->src_pad_idx;
+	cap->pad = vin->sink_pad_idx;
 
 	ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
 
@@ -554,6 +613,44 @@
 	return ret;
 }
 
+static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+	struct rvin_dev *vin = video_drvdata(file);
+	struct v4l2_subdev *sd = vin_to_source(vin);
+	int input, ret;
+
+	if (edid->pad)
+		return -EINVAL;
+
+	input = edid->pad;
+	edid->pad = vin->sink_pad_idx;
+
+	ret = v4l2_subdev_call(sd, pad, get_edid, edid);
+
+	edid->pad = input;
+
+	return ret;
+}
+
+static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+	struct rvin_dev *vin = video_drvdata(file);
+	struct v4l2_subdev *sd = vin_to_source(vin);
+	int input, ret;
+
+	if (edid->pad)
+		return -EINVAL;
+
+	input = edid->pad;
+	edid->pad = vin->sink_pad_idx;
+
+	ret = v4l2_subdev_call(sd, pad, set_edid, edid);
+
+	edid->pad = input;
+
+	return ret;
+}
+
 static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
 	.vidioc_querycap		= rvin_querycap,
 	.vidioc_try_fmt_vid_cap		= rvin_try_fmt_vid_cap,
@@ -576,6 +673,9 @@
 	.vidioc_s_dv_timings		= rvin_s_dv_timings,
 	.vidioc_query_dv_timings	= rvin_query_dv_timings,
 
+	.vidioc_g_edid			= rvin_g_edid,
+	.vidioc_s_edid			= rvin_s_edid,
+
 	.vidioc_querystd		= rvin_querystd,
 	.vidioc_g_std			= rvin_g_std,
 	.vidioc_s_std			= rvin_s_std,
@@ -767,16 +867,9 @@
 
 int rvin_v4l2_probe(struct rvin_dev *vin)
 {
-	struct v4l2_subdev_format fmt = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	struct v4l2_mbus_framefmt *mf = &fmt.format;
 	struct video_device *vdev = &vin->vdev;
 	struct v4l2_subdev *sd = vin_to_source(vin);
-#if defined(CONFIG_MEDIA_CONTROLLER)
-	int pad_idx;
-#endif
-	int ret;
+	int pad_idx, ret;
 
 	v4l2_set_subdev_hostdata(sd, vin);
 
@@ -823,41 +916,23 @@
 		V4L2_CAP_READWRITE;
 
 	vin->src_pad_idx = 0;
-#if defined(CONFIG_MEDIA_CONTROLLER)
 	for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
-		if (sd->entity.pads[pad_idx].flags
-				== MEDIA_PAD_FL_SOURCE)
+		if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SOURCE)
 			break;
 	if (pad_idx >= sd->entity.num_pads)
 		return -EINVAL;
 
 	vin->src_pad_idx = pad_idx;
-#endif
-	fmt.pad = vin->src_pad_idx;
 
-	/* Try to improve our guess of a reasonable window format */
-	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-	if (ret) {
-		vin_err(vin, "Failed to get initial format\n");
-		return ret;
-	}
+	vin->sink_pad_idx = 0;
+	for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
+		if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SINK) {
+			vin->sink_pad_idx = pad_idx;
+			break;
+		}
 
-	/* Set default format */
-	vin->format.width	= mf->width;
-	vin->format.height	= mf->height;
-	vin->format.colorspace	= mf->colorspace;
-	vin->format.field	= mf->field;
 	vin->format.pixelformat	= RVIN_DEFAULT_FORMAT;
-
-
-	/* Set initial crop and compose */
-	vin->crop.top = vin->crop.left = 0;
-	vin->crop.width = mf->width;
-	vin->crop.height = mf->height;
-
-	vin->compose.top = vin->compose.left = 0;
-	vin->compose.width = mf->width;
-	vin->compose.height = mf->height;
+	rvin_reset_format(vin);
 
 	ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1);
 	if (ret) {
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 31ad39a..727e215 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -30,9 +30,9 @@
 #define HW_BUFFER_MASK 0x7f
 
 enum chip_id {
-	RCAR_GEN2,
 	RCAR_H1,
 	RCAR_M1,
+	RCAR_GEN2,
 };
 
 /**
@@ -50,12 +50,10 @@
 
 /**
  * struct rvin_source_fmt - Source information
- * @code:	Media bus format from source
  * @width:	Width from source
  * @height:	Height from source
  */
 struct rvin_source_fmt {
-	u32 code;
 	u32 width;
 	u32 height;
 };
@@ -70,12 +68,19 @@
 	u8 bpp;
 };
 
+/**
+ * struct rvin_graph_entity - Video endpoint from async framework
+ * @asd:	sub-device descriptor for async framework
+ * @subdev:	subdevice matched using async framework
+ * @code:	Media bus format from source
+ * @mbus_cfg:	Media bus format from DT
+ */
 struct rvin_graph_entity {
-	struct device_node *node;
-	struct media_entity *entity;
-
 	struct v4l2_async_subdev asd;
 	struct v4l2_subdev *subdev;
+
+	u32 code;
+	struct v4l2_mbus_config mbus_cfg;
 };
 
 /**
@@ -83,14 +88,14 @@
  * @dev:		(OF) device
  * @base:		device I/O register space remapped to virtual memory
  * @chip:		type of VIN chip
- * @mbus_cfg		media bus configuration
  *
  * @vdev:		V4L2 video device associated with VIN
  * @v4l2_dev:		V4L2 device
  * @src_pad_idx:	source pad index for media controller drivers
+ * @sink_pad_idx:	sink pad index for media controller drivers
  * @ctrl_handler:	V4L2 control handler
  * @notifier:		V4L2 asynchronous subdevs notifier
- * @entity:		entity in the DT for subdevice
+ * @digital:		entity in the DT for local digital subdevice
  *
  * @lock:		protects @queue
  * @queue:		vb2 buffers queue
@@ -113,14 +118,14 @@
 	struct device *dev;
 	void __iomem *base;
 	enum chip_id chip;
-	struct v4l2_mbus_config mbus_cfg;
 
 	struct video_device vdev;
 	struct v4l2_device v4l2_dev;
 	int src_pad_idx;
+	int sink_pad_idx;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct v4l2_async_notifier notifier;
-	struct rvin_graph_entity entity;
+	struct rvin_graph_entity digital;
 
 	struct mutex lock;
 	struct vb2_queue queue;
@@ -139,7 +144,7 @@
 	struct v4l2_rect compose;
 };
 
-#define vin_to_source(vin)		vin->entity.subdev
+#define vin_to_source(vin)		vin->digital.subdev
 
 /* Debug */
 #define vin_dbg(d, fmt, arg...)		dev_dbg(d->dev, fmt, ##arg)
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 16782ce..d1746ec 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1183,7 +1183,7 @@
 	}
 }
 
-static struct vb2_ops jpu_qops = {
+static const struct vb2_ops jpu_qops = {
 	.queue_setup		= jpu_queue_setup,
 	.buf_prepare		= jpu_buf_prepare,
 	.buf_queue		= jpu_buf_queue,
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 391dd7a..62c0dec 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -138,7 +138,7 @@
 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
-static struct vb2_ops g2d_qops = {
+static const struct vb2_ops g2d_qops = {
 	.queue_setup	= g2d_queue_setup,
 	.buf_prepare	= g2d_buf_prepare,
 	.buf_queue	= g2d_buf_queue,
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 785e693..52dc794 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -537,6 +537,7 @@
 static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
 {
 	int i;
+
 	for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
 		if (fourcc_to_dwngrd_schema_id[i] == fourcc)
 			return i;
@@ -1246,17 +1247,18 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (ctx->mode == S5P_JPEG_ENCODE) {
-		strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder",
+		strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
 			sizeof(cap->driver));
 		strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
 			sizeof(cap->card));
 	} else {
-		strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder",
+		strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
 			sizeof(cap->driver));
 		strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
 			sizeof(cap->card));
 	}
-	cap->bus_info[0] = 0;
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+		 dev_name(ctx->jpeg->dev));
 	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
@@ -1273,7 +1275,8 @@
 			if (num == f->index)
 				break;
 			/* Correct type but haven't reached our index yet,
-			 * just increment per-type index */
+			 * just increment per-type index
+			 */
 			++num;
 		}
 	}
@@ -1349,6 +1352,7 @@
 	pix->bytesperline = 0;
 	if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
 		u32 bpl = q_data->w;
+
 		if (q_data->fmt->colplanes == 1)
 			bpl = (bpl * q_data->fmt->depth) >> 3;
 		pix->bytesperline = bpl;
@@ -1374,6 +1378,7 @@
 
 	for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
 		struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+
 		if (fmt->fourcc == pixelformat &&
 		    fmt->flags & fmt_flag &&
 		    fmt->flags & ctx->jpeg->variant->fmt_ver_flag) {
@@ -1431,7 +1436,8 @@
 		return -EINVAL;
 
 	/* V4L2 specification suggests the driver corrects the format struct
-	 * if any of the dimensions is unsupported */
+	 * if any of the dimensions is unsupported
+	 */
 	if (q_type == FMT_TYPE_OUTPUT)
 		jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
 				       S5P_JPEG_MAX_WIDTH, 0,
@@ -2489,6 +2495,7 @@
 	if (ctx->mode == S5P_JPEG_DECODE &&
 	    vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		struct s5p_jpeg_q_data tmp, *q_data;
+
 		ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
 		     (unsigned long)vb2_plane_vaddr(vb, 0),
 		     min((unsigned long)ctx->out_q.size,
@@ -2538,7 +2545,7 @@
 	pm_runtime_put(ctx->jpeg->dev);
 }
 
-static struct vb2_ops s5p_jpeg_qops = {
+static const struct vb2_ops s5p_jpeg_qops = {
 	.queue_setup		= s5p_jpeg_queue_setup,
 	.buf_prepare		= s5p_jpeg_buf_prepare,
 	.buf_queue		= s5p_jpeg_buf_queue,
@@ -2996,27 +3003,11 @@
 }
 #endif /* CONFIG_PM */
 
-#ifdef CONFIG_PM_SLEEP
-static int s5p_jpeg_suspend(struct device *dev)
-{
-	if (pm_runtime_suspended(dev))
-		return 0;
-
-	return s5p_jpeg_runtime_suspend(dev);
-}
-
-static int s5p_jpeg_resume(struct device *dev)
-{
-	if (pm_runtime_suspended(dev))
-		return 0;
-
-	return s5p_jpeg_runtime_resume(dev);
-}
-#endif
-
 static const struct dev_pm_ops s5p_jpeg_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
-	SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume,
+			   NULL)
 };
 
 static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e3f104f..0a5b8f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -153,7 +153,7 @@
 		 * error. Now it is time to kill all instances and
 		 * reset the MFC. */
 		mfc_err("Time out during waiting for HW\n");
-		queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+		schedule_work(&dev->watchdog_work);
 	}
 	dev->watchdog_timer.expires = jiffies +
 					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
@@ -494,7 +494,6 @@
 	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
 	s5p_mfc_clock_off();
 	wake_up_dev(dev, reason, err);
-	return;
 }
 
 /* Header parsing interrupt handling */
@@ -759,7 +758,6 @@
 	/* Allocate memory for context */
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx) {
-		mfc_err("Not enough memory\n");
 		ret = -ENOMEM;
 		goto err_alloc;
 	}
@@ -776,7 +774,7 @@
 	while (dev->ctx[ctx->num]) {
 		ctx->num++;
 		if (ctx->num >= MFC_NUM_CONTEXTS) {
-			mfc_err("Too many open contexts\n");
+			mfc_debug(2, "Too many open contexts\n");
 			ret = -EBUSY;
 			goto err_no_ctx;
 		}
@@ -924,39 +922,50 @@
 	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
 	struct s5p_mfc_dev *dev = ctx->dev;
 
+	/* if dev is null, do cleanup that doesn't need dev */
 	mfc_debug_enter();
-	mutex_lock(&dev->mfc_mutex);
+	if (dev)
+		mutex_lock(&dev->mfc_mutex);
 	s5p_mfc_clock_on();
 	vb2_queue_release(&ctx->vq_src);
 	vb2_queue_release(&ctx->vq_dst);
-	/* Mark context as idle */
-	clear_work_bit_irqsave(ctx);
-	/* If instance was initialised and not yet freed,
-	 * return instance and free resources */
-	if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
-		mfc_debug(2, "Has to free instance\n");
-		s5p_mfc_close_mfc_inst(dev, ctx);
-	}
-	/* hardware locking scheme */
-	if (dev->curr_ctx == ctx->num)
-		clear_bit(0, &dev->hw_lock);
-	dev->num_inst--;
-	if (dev->num_inst == 0) {
-		mfc_debug(2, "Last instance\n");
-		s5p_mfc_deinit_hw(dev);
-		del_timer_sync(&dev->watchdog_timer);
-		if (s5p_mfc_power_off() < 0)
-			mfc_err("Power off failed\n");
+	if (dev) {
+		/* Mark context as idle */
+		clear_work_bit_irqsave(ctx);
+		/*
+		 * If instance was initialised and not yet freed,
+		 * return instance and free resources
+		*/
+		if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
+			mfc_debug(2, "Has to free instance\n");
+			s5p_mfc_close_mfc_inst(dev, ctx);
+		}
+		/* hardware locking scheme */
+		if (dev->curr_ctx == ctx->num)
+			clear_bit(0, &dev->hw_lock);
+		dev->num_inst--;
+		if (dev->num_inst == 0) {
+			mfc_debug(2, "Last instance\n");
+			s5p_mfc_deinit_hw(dev);
+			del_timer_sync(&dev->watchdog_timer);
+			if (s5p_mfc_power_off() < 0)
+				mfc_err("Power off failed\n");
+		}
 	}
 	mfc_debug(2, "Shutting down clock\n");
 	s5p_mfc_clock_off();
-	dev->ctx[ctx->num] = NULL;
+	if (dev)
+		dev->ctx[ctx->num] = NULL;
 	s5p_mfc_dec_ctrls_delete(ctx);
 	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
+	/* vdev is gone if dev is null */
+	if (dev)
+		v4l2_fh_exit(&ctx->fh);
 	kfree(ctx);
 	mfc_debug_leave();
-	mutex_unlock(&dev->mfc_mutex);
+	if (dev)
+		mutex_unlock(&dev->mfc_mutex);
+
 	return 0;
 }
 
@@ -1158,10 +1167,6 @@
 	dev->variant = mfc_get_drv_data(pdev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res == NULL) {
-		dev_err(&pdev->dev, "failed to get io resource\n");
-		return -ENOENT;
-	}
 	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(dev->regs_base))
 		return PTR_ERR(dev->regs_base);
@@ -1241,7 +1246,6 @@
 	platform_set_drvdata(pdev, dev);
 
 	dev->hw_lock = 0;
-	dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
 	INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
 	atomic_set(&dev->watchdog_cnt, 0);
 	init_timer(&dev->watchdog_timer);
@@ -1298,12 +1302,28 @@
 static int s5p_mfc_remove(struct platform_device *pdev)
 {
 	struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+	struct s5p_mfc_ctx *ctx;
+	int i;
 
 	v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
 
+	/*
+	 * Clear ctx dev pointer to avoid races between s5p_mfc_remove()
+	 * and s5p_mfc_release() and s5p_mfc_release() accessing ctx->dev
+	 * after s5p_mfc_remove() is run during unbind.
+	*/
+	mutex_lock(&dev->mfc_mutex);
+	for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+		ctx = dev->ctx[i];
+		if (!ctx)
+			continue;
+		/* clear ctx->dev */
+		ctx->dev = NULL;
+	}
+	mutex_unlock(&dev->mfc_mutex);
+
 	del_timer_sync(&dev->watchdog_timer);
-	flush_workqueue(dev->watchdog_workqueue);
-	destroy_workqueue(dev->watchdog_workqueue);
+	flush_work(&dev->watchdog_work);
 
 	video_unregister_device(dev->vfd_enc);
 	video_unregister_device(dev->vfd_dec);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 373e346..46b99f2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -292,7 +292,9 @@
  * @warn_start:		hardware error code from which warnings start
  * @mfc_ops:		ops structure holding HW operation function pointers
  * @mfc_cmds:		cmd structure holding HW commands function pointers
+ * @mfc_regs:		structure holding MFC registers
  * @fw_ver:		loaded firmware sub-version
+ * risc_on:		flag indicates RISC is on or off
  *
  */
 struct s5p_mfc_dev {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 47c997d..52081dd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -776,11 +776,12 @@
 	u32 left, right, top, bottom;
 
 	if (ctx->state != MFCINST_HEAD_PARSED &&
-	ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
-					&& ctx->state != MFCINST_FINISHED) {
-			mfc_err("Cannont set crop\n");
-			return -EINVAL;
-		}
+	    ctx->state != MFCINST_RUNNING &&
+	    ctx->state != MFCINST_FINISHING &&
+	    ctx->state != MFCINST_FINISHED) {
+		mfc_err("Can not get crop information\n");
+		return -EINVAL;
+	}
 	if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
 		left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx);
 		right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
deleted file mode 100644
index 697aaed..0000000
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ /dev/null
@@ -1,88 +0,0 @@
-# drivers/media/platform/s5p-tv/Kconfig
-#
-# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
-#	http://www.samsung.com/
-# Tomasz Stanislawski <t.stanislaws@samsung.com>
-#
-# Licensed under GPL
-
-config VIDEO_SAMSUNG_S5P_TV
-	bool "Samsung TV driver for S5P platform"
-	depends on PM
-	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
-	default n
-	---help---
-	  Say Y here to enable selecting the TV output devices for
-	  Samsung S5P platform.
-
-if VIDEO_SAMSUNG_S5P_TV
-
-config VIDEO_SAMSUNG_S5P_HDMI
-	tristate "Samsung HDMI Driver"
-	depends on VIDEO_V4L2
-	depends on I2C
-	depends on VIDEO_SAMSUNG_S5P_TV
-	select VIDEO_SAMSUNG_S5P_HDMIPHY
-	help
-	  Say Y here if you want support for the HDMI output
-	  interface in S5P Samsung SoC. The driver can be compiled
-	  as module. It is an auxiliary driver, that exposes a V4L2
-	  subdev for use by other drivers. This driver requires
-	  hdmiphy driver to work correctly.
-
-config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
-	bool "Enable debug for HDMI Driver"
-	depends on VIDEO_SAMSUNG_S5P_HDMI
-	default n
-	help
-	  Enables debugging for HDMI driver.
-
-config VIDEO_SAMSUNG_S5P_HDMIPHY
-	tristate "Samsung HDMIPHY Driver"
-	depends on VIDEO_DEV && VIDEO_V4L2 && I2C
-	depends on VIDEO_SAMSUNG_S5P_TV
-	help
-	  Say Y here if you want support for the physical HDMI
-	  interface in S5P Samsung SoC. The driver can be compiled
-	  as module. It is an I2C driver, that exposes a V4L2
-	  subdev for use by other drivers.
-
-config VIDEO_SAMSUNG_S5P_SII9234
-	tristate "Samsung SII9234 Driver"
-	depends on VIDEO_DEV && VIDEO_V4L2 && I2C
-	depends on VIDEO_SAMSUNG_S5P_TV
-	help
-	  Say Y here if you want support for the MHL interface
-	  in S5P Samsung SoC. The driver can be compiled
-	  as module. It is an I2C driver, that exposes a V4L2
-	  subdev for use by other drivers.
-
-config VIDEO_SAMSUNG_S5P_SDO
-	tristate "Samsung Analog TV Driver"
-	depends on VIDEO_DEV && VIDEO_V4L2
-	depends on VIDEO_SAMSUNG_S5P_TV
-	help
-	  Say Y here if you want support for the analog TV output
-	  interface in S5P Samsung SoC. The driver can be compiled
-	  as module. It is an auxiliary driver, that exposes a V4L2
-	  subdev for use by other drivers. This driver requires
-	  hdmiphy driver to work correctly.
-
-config VIDEO_SAMSUNG_S5P_MIXER
-	tristate "Samsung Mixer and Video Processor Driver"
-	depends on VIDEO_DEV && VIDEO_V4L2
-	depends on VIDEO_SAMSUNG_S5P_TV
-	depends on HAS_DMA
-	select VIDEOBUF2_DMA_CONTIG
-	help
-	  Say Y here if you want support for the Mixer in Samsung S5P SoCs.
-	  This device produce image data to one of output interfaces.
-
-config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
-	bool "Enable debug for Mixer Driver"
-	depends on VIDEO_SAMSUNG_S5P_MIXER
-	default n
-	help
-	  Enables debugging for Mixer driver.
-
-endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/platform/s5p-tv/Makefile b/drivers/media/platform/s5p-tv/Makefile
deleted file mode 100644
index 7cd4790..0000000
--- a/drivers/media/platform/s5p-tv/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# drivers/media/platform/samsung/tvout/Makefile
-#
-# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
-#	http://www.samsung.com/
-# Tomasz Stanislawski <t.stanislaws@samsung.com>
-#
-# Licensed under GPL
-
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
-s5p-hdmiphy-y += hdmiphy_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SII9234) += s5p-sii9234.o
-s5p-sii9234-y += sii9234_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
-s5p-hdmi-y += hdmi_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
-s5p-sdo-y += sdo_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
-s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
-
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
deleted file mode 100644
index e71b13e..0000000
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * Samsung HDMI interface driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#define pr_fmt(fmt) "s5p-tv (hdmi_drv): " fmt
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <media/v4l2-subdev.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/bug.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/v4l2-dv-timings.h>
-
-#include <linux/platform_data/media/s5p_hdmi.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-dv-timings.h>
-
-#include "regs-hdmi.h"
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung HDMI");
-MODULE_LICENSE("GPL");
-
-struct hdmi_pulse {
-	u32 beg;
-	u32 end;
-};
-
-struct hdmi_timings {
-	struct hdmi_pulse hact;
-	u32 hsyn_pol; /* 0 - high, 1 - low */
-	struct hdmi_pulse hsyn;
-	u32 interlaced;
-	struct hdmi_pulse vact[2];
-	u32 vsyn_pol; /* 0 - high, 1 - low */
-	u32 vsyn_off;
-	struct hdmi_pulse vsyn[2];
-};
-
-struct hdmi_resources {
-	struct clk *hdmi;
-	struct clk *sclk_hdmi;
-	struct clk *sclk_pixel;
-	struct clk *sclk_hdmiphy;
-	struct clk *hdmiphy;
-	struct regulator_bulk_data *regul_bulk;
-	int regul_count;
-};
-
-struct hdmi_device {
-	/** base address of HDMI registers */
-	void __iomem *regs;
-	/** HDMI interrupt */
-	unsigned int irq;
-	/** pointer to device parent */
-	struct device *dev;
-	/** subdev generated by HDMI device */
-	struct v4l2_subdev sd;
-	/** V4L2 device structure */
-	struct v4l2_device v4l2_dev;
-	/** subdev of HDMIPHY interface */
-	struct v4l2_subdev *phy_sd;
-	/** subdev of MHL interface */
-	struct v4l2_subdev *mhl_sd;
-	/** configuration of current graphic mode */
-	const struct hdmi_timings *cur_conf;
-	/** flag indicating that timings are dirty */
-	int cur_conf_dirty;
-	/** current timings */
-	struct v4l2_dv_timings cur_timings;
-	/** other resources */
-	struct hdmi_resources res;
-};
-
-static const struct platform_device_id hdmi_driver_types[] = {
-	{
-		.name		= "s5pv210-hdmi",
-	}, {
-		.name		= "exynos4-hdmi",
-	}, {
-		/* end node */
-	}
-};
-
-static const struct v4l2_subdev_ops hdmi_sd_ops;
-
-static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct hdmi_device, sd);
-}
-
-static inline
-void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
-{
-	writel(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
-{
-	u32 old = readl(hdev->regs + reg_id);
-	value = (value & mask) | (old & ~mask);
-	writel(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
-{
-	writeb(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
-{
-	switch (n) {
-	default:
-		writeb(value >> 24, hdev->regs + reg_id + 12);
-	case 3:
-		writeb(value >> 16, hdev->regs + reg_id + 8);
-	case 2:
-		writeb(value >>  8, hdev->regs + reg_id + 4);
-	case 1:
-		writeb(value >>  0, hdev->regs + reg_id + 0);
-	}
-}
-
-static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
-{
-	return readl(hdev->regs + reg_id);
-}
-
-static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
-{
-	struct hdmi_device *hdev = dev_data;
-	u32 intc_flag;
-
-	(void)irq;
-	intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
-	/* clearing flags for HPD plug/unplug */
-	if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
-		pr_info("unplugged\n");
-		hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
-			HDMI_INTC_FLAG_HPD_UNPLUG);
-	}
-	if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
-		pr_info("plugged\n");
-		hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
-			HDMI_INTC_FLAG_HPD_PLUG);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static void hdmi_reg_init(struct hdmi_device *hdev)
-{
-	/* enable HPD interrupts */
-	hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
-		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
-	/* choose DVI mode */
-	hdmi_write_mask(hdev, HDMI_MODE_SEL,
-		HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
-	hdmi_write_mask(hdev, HDMI_CON_2, ~0,
-		HDMI_DVI_PERAMBLE_EN | HDMI_DVI_BAND_EN);
-	/* disable bluescreen */
-	hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
-	/* choose bluescreen (fecal) color */
-	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
-	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
-	hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
-}
-
-static void hdmi_timing_apply(struct hdmi_device *hdev,
-	const struct hdmi_timings *t)
-{
-	/* setting core registers */
-	hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
-	hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
-		(t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
-	hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
-	hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
-		(t->vact[0].beg << 11) | t->vact[0].end);
-	hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
-		(t->vsyn[0].beg << 12) | t->vsyn[0].end);
-	if (t->interlaced) {
-		u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
-
-		hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
-		hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
-			(t->hact.end << 12) | t->vact[1].end);
-		hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
-			(t->vact[1].end << 11) | t->vact[1].beg);
-		hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
-			(t->vsyn[1].beg << 12) | t->vsyn[1].end);
-		hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
-			(vsyn_trans << 12) | vsyn_trans);
-	} else {
-		hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
-		hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
-			(t->hact.end << 12) | t->vact[0].end);
-	}
-
-	/* Timing generator registers */
-	hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
-	hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
-	hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
-	hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
-	hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
-	hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
-		t->vact[0].end - t->vact[0].beg);
-	hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
-	hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
-	if (t->interlaced) {
-		hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
-		hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
-		hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
-		hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
-		hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
-		hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
-		hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
-	} else {
-		hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
-		hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
-	}
-}
-
-static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
-{
-	struct device *dev = hdmi_dev->dev;
-	const struct hdmi_timings *conf = hdmi_dev->cur_conf;
-	int ret;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	/* skip if conf is already synchronized with HW */
-	if (!hdmi_dev->cur_conf_dirty)
-		return 0;
-
-	/* reset hdmiphy */
-	hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
-	mdelay(10);
-	hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
-	mdelay(10);
-
-	/* configure timings */
-	ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_timings,
-				&hdmi_dev->cur_timings);
-	if (ret) {
-		dev_err(dev, "failed to set timings\n");
-		return ret;
-	}
-
-	/* resetting HDMI core */
-	hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT,  0, HDMI_CORE_SW_RSTOUT);
-	mdelay(10);
-	hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
-	mdelay(10);
-
-	hdmi_reg_init(hdmi_dev);
-
-	/* setting core registers */
-	hdmi_timing_apply(hdmi_dev, conf);
-
-	hdmi_dev->cur_conf_dirty = 0;
-
-	return 0;
-}
-
-static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
-{
-#define DUMPREG(reg_id) \
-	dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
-		readl(hdev->regs + reg_id))
-
-	dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_INTC_FLAG);
-	DUMPREG(HDMI_INTC_CON);
-	DUMPREG(HDMI_HPD_STATUS);
-	DUMPREG(HDMI_PHY_RSTOUT);
-	DUMPREG(HDMI_PHY_VPLL);
-	DUMPREG(HDMI_PHY_CMU);
-	DUMPREG(HDMI_CORE_RSTOUT);
-
-	dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_CON_0);
-	DUMPREG(HDMI_CON_1);
-	DUMPREG(HDMI_CON_2);
-	DUMPREG(HDMI_SYS_STATUS);
-	DUMPREG(HDMI_PHY_STATUS);
-	DUMPREG(HDMI_STATUS_EN);
-	DUMPREG(HDMI_HPD);
-	DUMPREG(HDMI_MODE_SEL);
-	DUMPREG(HDMI_HPD_GEN);
-	DUMPREG(HDMI_DC_CONTROL);
-	DUMPREG(HDMI_VIDEO_PATTERN_GEN);
-
-	dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_H_BLANK_0);
-	DUMPREG(HDMI_H_BLANK_1);
-	DUMPREG(HDMI_V_BLANK_0);
-	DUMPREG(HDMI_V_BLANK_1);
-	DUMPREG(HDMI_V_BLANK_2);
-	DUMPREG(HDMI_H_V_LINE_0);
-	DUMPREG(HDMI_H_V_LINE_1);
-	DUMPREG(HDMI_H_V_LINE_2);
-	DUMPREG(HDMI_VSYNC_POL);
-	DUMPREG(HDMI_INT_PRO_MODE);
-	DUMPREG(HDMI_V_BLANK_F_0);
-	DUMPREG(HDMI_V_BLANK_F_1);
-	DUMPREG(HDMI_V_BLANK_F_2);
-	DUMPREG(HDMI_H_SYNC_GEN_0);
-	DUMPREG(HDMI_H_SYNC_GEN_1);
-	DUMPREG(HDMI_H_SYNC_GEN_2);
-	DUMPREG(HDMI_V_SYNC_GEN_1_0);
-	DUMPREG(HDMI_V_SYNC_GEN_1_1);
-	DUMPREG(HDMI_V_SYNC_GEN_1_2);
-	DUMPREG(HDMI_V_SYNC_GEN_2_0);
-	DUMPREG(HDMI_V_SYNC_GEN_2_1);
-	DUMPREG(HDMI_V_SYNC_GEN_2_2);
-	DUMPREG(HDMI_V_SYNC_GEN_3_0);
-	DUMPREG(HDMI_V_SYNC_GEN_3_1);
-	DUMPREG(HDMI_V_SYNC_GEN_3_2);
-
-	dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_TG_CMD);
-	DUMPREG(HDMI_TG_H_FSZ_L);
-	DUMPREG(HDMI_TG_H_FSZ_H);
-	DUMPREG(HDMI_TG_HACT_ST_L);
-	DUMPREG(HDMI_TG_HACT_ST_H);
-	DUMPREG(HDMI_TG_HACT_SZ_L);
-	DUMPREG(HDMI_TG_HACT_SZ_H);
-	DUMPREG(HDMI_TG_V_FSZ_L);
-	DUMPREG(HDMI_TG_V_FSZ_H);
-	DUMPREG(HDMI_TG_VSYNC_L);
-	DUMPREG(HDMI_TG_VSYNC_H);
-	DUMPREG(HDMI_TG_VSYNC2_L);
-	DUMPREG(HDMI_TG_VSYNC2_H);
-	DUMPREG(HDMI_TG_VACT_ST_L);
-	DUMPREG(HDMI_TG_VACT_ST_H);
-	DUMPREG(HDMI_TG_VACT_SZ_L);
-	DUMPREG(HDMI_TG_VACT_SZ_H);
-	DUMPREG(HDMI_TG_FIELD_CHG_L);
-	DUMPREG(HDMI_TG_FIELD_CHG_H);
-	DUMPREG(HDMI_TG_VACT_ST2_L);
-	DUMPREG(HDMI_TG_VACT_ST2_H);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-#undef DUMPREG
-}
-
-static const struct hdmi_timings hdmi_timings_480p = {
-	.hact = { .beg = 138, .end = 858 },
-	.hsyn_pol = 1,
-	.hsyn = { .beg = 16, .end = 16 + 62 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
-	.vsyn_pol = 1,
-	.vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
-};
-
-static const struct hdmi_timings hdmi_timings_576p50 = {
-	.hact = { .beg = 144, .end = 864 },
-	.hsyn_pol = 1,
-	.hsyn = { .beg = 12, .end = 12 + 64 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
-	.vsyn_pol = 1,
-	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_720p60 = {
-	.hact = { .beg = 370, .end = 1650 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 110, .end = 110 + 40 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
-	.vsyn_pol = 0,
-	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_720p50 = {
-	.hact = { .beg = 700, .end = 1980 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 440, .end = 440 + 40 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
-	.vsyn_pol = 0,
-	.vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p24 = {
-	.hact = { .beg = 830, .end = 2750 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 638, .end = 638 + 44 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
-	.vsyn_pol = 0,
-	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p60 = {
-	.hact = { .beg = 280, .end = 2200 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 88, .end = 88 + 44 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
-	.vsyn_pol = 0,
-	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-static const struct hdmi_timings hdmi_timings_1080i60 = {
-	.hact = { .beg = 280, .end = 2200 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 88, .end = 88 + 44 },
-	.interlaced = 1,
-	.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
-	.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
-	.vsyn_pol = 0,
-	.vsyn_off = 1100,
-	.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
-	.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
-};
-
-static const struct hdmi_timings hdmi_timings_1080i50 = {
-	.hact = { .beg = 720, .end = 2640 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 528, .end = 528 + 44 },
-	.interlaced = 1,
-	.vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
-	.vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
-	.vsyn_pol = 0,
-	.vsyn_off = 1320,
-	.vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
-	.vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p50 = {
-	.hact = { .beg = 720, .end = 2640 },
-	.hsyn_pol = 0,
-	.hsyn = { .beg = 528, .end = 528 + 44 },
-	.interlaced = 0,
-	.vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
-	.vsyn_pol = 0,
-	.vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-/* default hdmi_timings index of the timings configured on probe */
-#define HDMI_DEFAULT_TIMINGS_IDX (0)
-
-static const struct {
-	bool reduced_fps;
-	const struct v4l2_dv_timings dv_timings;
-	const struct hdmi_timings *hdmi_timings;
-} hdmi_timings[] = {
-	{ false, V4L2_DV_BT_CEA_720X480P59_94, &hdmi_timings_480p    },
-	{ false, V4L2_DV_BT_CEA_720X576P50,    &hdmi_timings_576p50  },
-	{ false, V4L2_DV_BT_CEA_1280X720P50,   &hdmi_timings_720p50  },
-	{ true,  V4L2_DV_BT_CEA_1280X720P60,   &hdmi_timings_720p60  },
-	{ false, V4L2_DV_BT_CEA_1920X1080P24,  &hdmi_timings_1080p24 },
-	{ false, V4L2_DV_BT_CEA_1920X1080P30,  &hdmi_timings_1080p60 },
-	{ false, V4L2_DV_BT_CEA_1920X1080P50,  &hdmi_timings_1080p50 },
-	{ false, V4L2_DV_BT_CEA_1920X1080I50,  &hdmi_timings_1080i50 },
-	{ false, V4L2_DV_BT_CEA_1920X1080I60,  &hdmi_timings_1080i60 },
-	{ false, V4L2_DV_BT_CEA_1920X1080P60,  &hdmi_timings_1080p60 },
-};
-
-static int hdmi_streamon(struct hdmi_device *hdev)
-{
-	struct device *dev = hdev->dev;
-	struct hdmi_resources *res = &hdev->res;
-	int ret, tries;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	ret = hdmi_conf_apply(hdev);
-	if (ret)
-		return ret;
-
-	ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
-	if (ret)
-		return ret;
-
-	/* waiting for HDMIPHY's PLL to get to steady state */
-	for (tries = 100; tries; --tries) {
-		u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
-		if (val & HDMI_PHY_STATUS_READY)
-			break;
-		mdelay(1);
-	}
-	/* steady state not achieved */
-	if (tries == 0) {
-		dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
-		v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
-		hdmi_dumpregs(hdev, "hdmiphy - s_stream");
-		return -EIO;
-	}
-
-	/* starting MHL */
-	ret = v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 1);
-	if (hdev->mhl_sd && ret) {
-		v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
-		hdmi_dumpregs(hdev, "mhl - s_stream");
-		return -EIO;
-	}
-
-	/* hdmiphy clock is used for HDMI in streaming mode */
-	clk_disable(res->sclk_hdmi);
-	clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
-	clk_enable(res->sclk_hdmi);
-
-	/* enable HDMI and timing generator */
-	hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
-	hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
-	hdmi_dumpregs(hdev, "streamon");
-	return 0;
-}
-
-static int hdmi_streamoff(struct hdmi_device *hdev)
-{
-	struct device *dev = hdev->dev;
-	struct hdmi_resources *res = &hdev->res;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
-	hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
-
-	/* pixel(vpll) clock is used for HDMI in config mode */
-	clk_disable(res->sclk_hdmi);
-	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
-	clk_enable(res->sclk_hdmi);
-
-	v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 0);
-	v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
-
-	hdmi_dumpregs(hdev, "streamoff");
-	return 0;
-}
-
-static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
-{
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-	struct device *dev = hdev->dev;
-
-	dev_dbg(dev, "%s(%d)\n", __func__, enable);
-	if (enable)
-		return hdmi_streamon(hdev);
-	return hdmi_streamoff(hdev);
-}
-
-static int hdmi_resource_poweron(struct hdmi_resources *res)
-{
-	int ret;
-
-	/* turn HDMI power on */
-	ret = regulator_bulk_enable(res->regul_count, res->regul_bulk);
-	if (ret < 0)
-		return ret;
-	/* power-on hdmi physical interface */
-	clk_enable(res->hdmiphy);
-	/* use VPP as parent clock; HDMIPHY is not working yet */
-	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
-	/* turn clocks on */
-	clk_enable(res->sclk_hdmi);
-
-	return 0;
-}
-
-static void hdmi_resource_poweroff(struct hdmi_resources *res)
-{
-	/* turn clocks off */
-	clk_disable(res->sclk_hdmi);
-	/* power-off hdmiphy */
-	clk_disable(res->hdmiphy);
-	/* turn HDMI power off */
-	regulator_bulk_disable(res->regul_count, res->regul_bulk);
-}
-
-static int hdmi_s_power(struct v4l2_subdev *sd, int on)
-{
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-	int ret;
-
-	if (on)
-		ret = pm_runtime_get_sync(hdev->dev);
-	else
-		ret = pm_runtime_put_sync(hdev->dev);
-	/* only values < 0 indicate errors */
-	return ret < 0 ? ret : 0;
-}
-
-static int hdmi_s_dv_timings(struct v4l2_subdev *sd,
-	struct v4l2_dv_timings *timings)
-{
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-	struct device *dev = hdev->dev;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(hdmi_timings); i++)
-		if (v4l2_match_dv_timings(&hdmi_timings[i].dv_timings,
-					timings, 0, false))
-			break;
-	if (i == ARRAY_SIZE(hdmi_timings)) {
-		dev_err(dev, "timings not supported\n");
-		return -EINVAL;
-	}
-	hdev->cur_conf = hdmi_timings[i].hdmi_timings;
-	hdev->cur_conf_dirty = 1;
-	hdev->cur_timings = *timings;
-	if (!hdmi_timings[i].reduced_fps)
-		hdev->cur_timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
-	return 0;
-}
-
-static int hdmi_g_dv_timings(struct v4l2_subdev *sd,
-	struct v4l2_dv_timings *timings)
-{
-	*timings = sd_to_hdmi_dev(sd)->cur_timings;
-	return 0;
-}
-
-static int hdmi_get_fmt(struct v4l2_subdev *sd,
-		struct v4l2_subdev_pad_config *cfg,
-		struct v4l2_subdev_format *format)
-{
-	struct v4l2_mbus_framefmt *fmt = &format->format;
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-	const struct hdmi_timings *t = hdev->cur_conf;
-
-	dev_dbg(hdev->dev, "%s\n", __func__);
-	if (!hdev->cur_conf)
-		return -EINVAL;
-	if (format->pad)
-		return -EINVAL;
-
-	memset(fmt, 0, sizeof(*fmt));
-	fmt->width = t->hact.end - t->hact.beg;
-	fmt->height = t->vact[0].end - t->vact[0].beg;
-	fmt->code = MEDIA_BUS_FMT_FIXED; /* means RGB888 */
-	fmt->colorspace = V4L2_COLORSPACE_SRGB;
-	if (t->interlaced) {
-		fmt->field = V4L2_FIELD_INTERLACED;
-		fmt->height *= 2;
-	} else {
-		fmt->field = V4L2_FIELD_NONE;
-	}
-	return 0;
-}
-
-static int hdmi_enum_dv_timings(struct v4l2_subdev *sd,
-	struct v4l2_enum_dv_timings *timings)
-{
-	if (timings->pad != 0)
-		return -EINVAL;
-	if (timings->index >= ARRAY_SIZE(hdmi_timings))
-		return -EINVAL;
-	timings->timings = hdmi_timings[timings->index].dv_timings;
-	if (!hdmi_timings[timings->index].reduced_fps)
-		timings->timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
-	return 0;
-}
-
-static int hdmi_dv_timings_cap(struct v4l2_subdev *sd,
-	struct v4l2_dv_timings_cap *cap)
-{
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-
-	if (cap->pad != 0)
-		return -EINVAL;
-
-	/* Let the phy fill in the pixelclock range */
-	v4l2_subdev_call(hdev->phy_sd, pad, dv_timings_cap, cap);
-	cap->type = V4L2_DV_BT_656_1120;
-	cap->bt.min_width = 720;
-	cap->bt.max_width = 1920;
-	cap->bt.min_height = 480;
-	cap->bt.max_height = 1080;
-	cap->bt.standards = V4L2_DV_BT_STD_CEA861;
-	cap->bt.capabilities = V4L2_DV_BT_CAP_INTERLACED |
-			       V4L2_DV_BT_CAP_PROGRESSIVE;
-	return 0;
-}
-
-static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
-	.s_power = hdmi_s_power,
-};
-
-static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
-	.s_dv_timings = hdmi_s_dv_timings,
-	.g_dv_timings = hdmi_g_dv_timings,
-	.s_stream = hdmi_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops hdmi_sd_pad_ops = {
-	.enum_dv_timings = hdmi_enum_dv_timings,
-	.dv_timings_cap = hdmi_dv_timings_cap,
-	.get_fmt = hdmi_get_fmt,
-};
-
-static const struct v4l2_subdev_ops hdmi_sd_ops = {
-	.core = &hdmi_sd_core_ops,
-	.video = &hdmi_sd_video_ops,
-	.pad = &hdmi_sd_pad_ops,
-};
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-
-	dev_dbg(dev, "%s\n", __func__);
-	v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
-	hdmi_resource_poweroff(&hdev->res);
-	/* flag that device context is lost */
-	hdev->cur_conf_dirty = 1;
-	return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-	int ret;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	ret = hdmi_resource_poweron(&hdev->res);
-	if (ret < 0)
-		return ret;
-
-	/* starting MHL */
-	ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
-	if (hdev->mhl_sd && ret)
-		goto fail;
-
-	dev_dbg(dev, "poweron succeed\n");
-
-	return 0;
-
-fail:
-	hdmi_resource_poweroff(&hdev->res);
-	dev_err(dev, "poweron failed\n");
-
-	return ret;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-	.runtime_suspend = hdmi_runtime_suspend,
-	.runtime_resume	 = hdmi_runtime_resume,
-};
-
-static void hdmi_resource_clear_clocks(struct hdmi_resources *res)
-{
-	res->hdmi	 = ERR_PTR(-EINVAL);
-	res->sclk_hdmi	 = ERR_PTR(-EINVAL);
-	res->sclk_pixel	 = ERR_PTR(-EINVAL);
-	res->sclk_hdmiphy = ERR_PTR(-EINVAL);
-	res->hdmiphy	 = ERR_PTR(-EINVAL);
-}
-
-static void hdmi_resources_cleanup(struct hdmi_device *hdev)
-{
-	struct hdmi_resources *res = &hdev->res;
-
-	dev_dbg(hdev->dev, "HDMI resource cleanup\n");
-	/* put clocks, power */
-	if (res->regul_count)
-		regulator_bulk_free(res->regul_count, res->regul_bulk);
-	/* kfree is NULL-safe */
-	kfree(res->regul_bulk);
-	if (!IS_ERR(res->hdmiphy))
-		clk_put(res->hdmiphy);
-	if (!IS_ERR(res->sclk_hdmiphy))
-		clk_put(res->sclk_hdmiphy);
-	if (!IS_ERR(res->sclk_pixel))
-		clk_put(res->sclk_pixel);
-	if (!IS_ERR(res->sclk_hdmi))
-		clk_put(res->sclk_hdmi);
-	if (!IS_ERR(res->hdmi))
-		clk_put(res->hdmi);
-	memset(res, 0, sizeof(*res));
-	hdmi_resource_clear_clocks(res);
-}
-
-static int hdmi_resources_init(struct hdmi_device *hdev)
-{
-	struct device *dev = hdev->dev;
-	struct hdmi_resources *res = &hdev->res;
-	static char *supply[] = {
-		"hdmi-en",
-		"vdd",
-		"vdd_osc",
-		"vdd_pll",
-	};
-	int i, ret;
-
-	dev_dbg(dev, "HDMI resource init\n");
-
-	memset(res, 0, sizeof(*res));
-	hdmi_resource_clear_clocks(res);
-
-	/* get clocks, power */
-	res->hdmi = clk_get(dev, "hdmi");
-	if (IS_ERR(res->hdmi)) {
-		dev_err(dev, "failed to get clock 'hdmi'\n");
-		goto fail;
-	}
-	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
-	if (IS_ERR(res->sclk_hdmi)) {
-		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
-		goto fail;
-	}
-	res->sclk_pixel = clk_get(dev, "sclk_pixel");
-	if (IS_ERR(res->sclk_pixel)) {
-		dev_err(dev, "failed to get clock 'sclk_pixel'\n");
-		goto fail;
-	}
-	res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
-	if (IS_ERR(res->sclk_hdmiphy)) {
-		dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
-		goto fail;
-	}
-	res->hdmiphy = clk_get(dev, "hdmiphy");
-	if (IS_ERR(res->hdmiphy)) {
-		dev_err(dev, "failed to get clock 'hdmiphy'\n");
-		goto fail;
-	}
-	res->regul_bulk = kcalloc(ARRAY_SIZE(supply),
-				  sizeof(res->regul_bulk[0]), GFP_KERNEL);
-	if (!res->regul_bulk) {
-		dev_err(dev, "failed to get memory for regulators\n");
-		goto fail;
-	}
-	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
-		res->regul_bulk[i].supply = supply[i];
-		res->regul_bulk[i].consumer = NULL;
-	}
-
-	ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
-	if (ret) {
-		dev_err(dev, "failed to get regulators\n");
-		goto fail;
-	}
-	res->regul_count = ARRAY_SIZE(supply);
-
-	return 0;
-fail:
-	dev_err(dev, "HDMI resource init - failed\n");
-	hdmi_resources_cleanup(hdev);
-	return -ENODEV;
-}
-
-static int hdmi_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct resource *res;
-	struct i2c_adapter *adapter;
-	struct v4l2_subdev *sd;
-	struct hdmi_device *hdmi_dev = NULL;
-	struct s5p_hdmi_platform_data *pdata = dev->platform_data;
-	int ret;
-
-	dev_dbg(dev, "probe start\n");
-
-	if (!pdata) {
-		dev_err(dev, "platform data is missing\n");
-		ret = -ENODEV;
-		goto fail;
-	}
-
-	hdmi_dev = devm_kzalloc(&pdev->dev, sizeof(*hdmi_dev), GFP_KERNEL);
-	if (!hdmi_dev) {
-		dev_err(dev, "out of memory\n");
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	hdmi_dev->dev = dev;
-
-	ret = hdmi_resources_init(hdmi_dev);
-	if (ret)
-		goto fail;
-
-	/* mapping HDMI registers */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res == NULL) {
-		dev_err(dev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail_init;
-	}
-
-	hdmi_dev->regs = devm_ioremap(&pdev->dev, res->start,
-				      resource_size(res));
-	if (hdmi_dev->regs == NULL) {
-		dev_err(dev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail_init;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res == NULL) {
-		dev_err(dev, "get interrupt resource failed.\n");
-		ret = -ENXIO;
-		goto fail_init;
-	}
-
-	ret = devm_request_irq(&pdev->dev, res->start, hdmi_irq_handler, 0,
-			       "hdmi", hdmi_dev);
-	if (ret) {
-		dev_err(dev, "request interrupt failed.\n");
-		goto fail_init;
-	}
-	hdmi_dev->irq = res->start;
-
-	/* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
-	strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
-		sizeof(hdmi_dev->v4l2_dev.name));
-	/* passing NULL owner prevents driver from erasing drvdata */
-	ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
-	if (ret) {
-		dev_err(dev, "could not register v4l2 device.\n");
-		goto fail_init;
-	}
-
-	/* testing if hdmiphy info is present */
-	if (!pdata->hdmiphy_info) {
-		dev_err(dev, "hdmiphy info is missing in platform data\n");
-		ret = -ENXIO;
-		goto fail_vdev;
-	}
-
-	adapter = i2c_get_adapter(pdata->hdmiphy_bus);
-	if (adapter == NULL) {
-		dev_err(dev, "hdmiphy adapter request failed\n");
-		ret = -ENXIO;
-		goto fail_vdev;
-	}
-
-	hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
-		adapter, pdata->hdmiphy_info, NULL);
-	/* on failure or not adapter is no longer useful */
-	i2c_put_adapter(adapter);
-	if (hdmi_dev->phy_sd == NULL) {
-		dev_err(dev, "missing subdev for hdmiphy\n");
-		ret = -ENODEV;
-		goto fail_vdev;
-	}
-
-	/* initialization of MHL interface if present */
-	if (pdata->mhl_info) {
-		adapter = i2c_get_adapter(pdata->mhl_bus);
-		if (adapter == NULL) {
-			dev_err(dev, "MHL adapter request failed\n");
-			ret = -ENXIO;
-			goto fail_vdev;
-		}
-
-		hdmi_dev->mhl_sd = v4l2_i2c_new_subdev_board(
-			&hdmi_dev->v4l2_dev, adapter,
-			pdata->mhl_info, NULL);
-		/* on failure or not adapter is no longer useful */
-		i2c_put_adapter(adapter);
-		if (hdmi_dev->mhl_sd == NULL) {
-			dev_err(dev, "missing subdev for MHL\n");
-			ret = -ENODEV;
-			goto fail_vdev;
-		}
-	}
-
-	clk_enable(hdmi_dev->res.hdmi);
-
-	pm_runtime_enable(dev);
-
-	sd = &hdmi_dev->sd;
-	v4l2_subdev_init(sd, &hdmi_sd_ops);
-	sd->owner = THIS_MODULE;
-
-	strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
-	hdmi_dev->cur_timings =
-		hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].dv_timings;
-	/* FIXME: missing fail timings is not supported */
-	hdmi_dev->cur_conf =
-		hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].hdmi_timings;
-	hdmi_dev->cur_conf_dirty = 1;
-
-	/* storing subdev for call that have only access to struct device */
-	dev_set_drvdata(dev, sd);
-
-	dev_info(dev, "probe successful\n");
-
-	return 0;
-
-fail_vdev:
-	v4l2_device_unregister(&hdmi_dev->v4l2_dev);
-
-fail_init:
-	hdmi_resources_cleanup(hdmi_dev);
-
-fail:
-	dev_err(dev, "probe failed\n");
-	return ret;
-}
-
-static int hdmi_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
-
-	pm_runtime_disable(dev);
-	clk_disable(hdmi_dev->res.hdmi);
-	v4l2_device_unregister(&hdmi_dev->v4l2_dev);
-	disable_irq(hdmi_dev->irq);
-	hdmi_resources_cleanup(hdmi_dev);
-	dev_info(dev, "remove successful\n");
-
-	return 0;
-}
-
-static struct platform_driver hdmi_driver __refdata = {
-	.probe = hdmi_probe,
-	.remove = hdmi_remove,
-	.id_table = hdmi_driver_types,
-	.driver = {
-		.name = "s5p-hdmi",
-		.pm = &hdmi_pm_ops,
-	}
-};
-
-module_platform_driver(hdmi_driver);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
deleted file mode 100644
index aae6523..0000000
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Samsung HDMI Physical interface driver
- *
- * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
- * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/err.h>
-
-#include <media/v4l2-subdev.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
-MODULE_LICENSE("GPL");
-
-struct hdmiphy_conf {
-	unsigned long pixclk;
-	const u8 *data;
-};
-
-struct hdmiphy_ctx {
-	struct v4l2_subdev sd;
-	const struct hdmiphy_conf *conf_tab;
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_s5pv210[] = {
-	{ .pixclk = 27000000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
-		0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 27027000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
-		0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 74176000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
-		0x6D, 0x10, 0x01, 0x52, 0xEF, 0xF3, 0x54, 0xB9,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 74250000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
-		0x6A, 0x10, 0x01, 0x52, 0xFF, 0xF1, 0x54, 0xBA,
-		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
-	},
-	{ /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4210[] = {
-	{ .pixclk = 27000000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
-		0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 27027000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
-		0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 74176000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
-		0x6D, 0x10, 0x01, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 74250000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
-		0x6A, 0x10, 0x01, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
-		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 148352000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
-		0x6D, 0x18, 0x00, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
-		0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x11, 0x40, 0xA5, 0x26, 0x02, 0x00, 0x00, 0x00, }
-	},
-	{ .pixclk = 148500000, .data = (u8 [32]) {
-		0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
-		0x6A, 0x18, 0x00, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
-		0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
-		0x11, 0x40, 0xA4, 0x26, 0x02, 0x00, 0x00, 0x00, }
-	},
-	{ /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4212[] = {
-	{ .pixclk = 27000000, .data = (u8 [32]) {
-		0x01, 0x11, 0x2D, 0x75, 0x00, 0x01, 0x00, 0x08,
-		0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
-		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
-		0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 27027000, .data = (u8 [32]) {
-		0x01, 0x91, 0x2D, 0x72, 0x00, 0x64, 0x12, 0x08,
-		0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
-		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
-		0x54, 0xE2, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 74176000, .data = (u8 [32]) {
-		0x01, 0x91, 0x3E, 0x35, 0x00, 0x5B, 0xDE, 0x08,
-		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
-		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
-		0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 74250000, .data = (u8 [32]) {
-		0x01, 0x91, 0x3E, 0x35, 0x00, 0x40, 0xF0, 0x08,
-		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
-		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
-		0x54, 0xA4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 148500000, .data = (u8 [32]) {
-		0x01, 0x91, 0x3E, 0x15, 0x00, 0x40, 0xF0, 0x08,
-		0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
-		0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0xA4,
-		0x54, 0x4A, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4412[] = {
-	{ .pixclk = 27000000, .data = (u8 [32]) {
-		0x01, 0x11, 0x2D, 0x75, 0x40, 0x01, 0x00, 0x08,
-		0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
-		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
-		0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 27027000, .data = (u8 [32]) {
-		0x01, 0x91, 0x2D, 0x72, 0x40, 0x64, 0x12, 0x08,
-		0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
-		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
-		0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 74176000, .data = (u8 [32]) {
-		0x01, 0x91, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0x08,
-		0x81, 0x20, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
-		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
-		0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 74250000, .data = (u8 [32]) {
-		0x01, 0x91, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
-		0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
-		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
-		0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ .pixclk = 148500000, .data = (u8 [32]) {
-		0x01, 0x91, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
-		0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
-		0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
-		0x54, 0x4B, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
-	},
-	{ /* end marker */ }
-};
-
-static inline struct hdmiphy_ctx *sd_to_ctx(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct hdmiphy_ctx, sd);
-}
-
-static const u8 *hdmiphy_find_conf(unsigned long pixclk,
-		const struct hdmiphy_conf *conf)
-{
-	for (; conf->pixclk; ++conf)
-		if (conf->pixclk == pixclk)
-			return conf->data;
-	return NULL;
-}
-
-static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
-{
-	/* to be implemented */
-	return 0;
-}
-
-static int hdmiphy_s_dv_timings(struct v4l2_subdev *sd,
-	struct v4l2_dv_timings *timings)
-{
-	const u8 *data;
-	u8 buffer[32];
-	int ret;
-	struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct device *dev = &client->dev;
-	unsigned long pixclk = timings->bt.pixelclock;
-
-	dev_info(dev, "s_dv_timings\n");
-	if ((timings->bt.flags & V4L2_DV_FL_REDUCED_FPS) && pixclk == 74250000)
-		pixclk = 74176000;
-	data = hdmiphy_find_conf(pixclk, ctx->conf_tab);
-	if (!data) {
-		dev_err(dev, "format not supported\n");
-		return -EINVAL;
-	}
-
-	/* storing configuration to the device */
-	memcpy(buffer, data, 32);
-	ret = i2c_master_send(client, buffer, 32);
-	if (ret != 32) {
-		dev_err(dev, "failed to configure HDMIPHY via I2C\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int hdmiphy_dv_timings_cap(struct v4l2_subdev *sd,
-	struct v4l2_dv_timings_cap *cap)
-{
-	if (cap->pad != 0)
-		return -EINVAL;
-
-	cap->type = V4L2_DV_BT_656_1120;
-	/* The phy only determines the pixelclock, leave the other values
-	 * at 0 to signify that we have no information for them. */
-	cap->bt.min_pixelclock = 27000000;
-	cap->bt.max_pixelclock = 148500000;
-	return 0;
-}
-
-static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct device *dev = &client->dev;
-	u8 buffer[2];
-	int ret;
-
-	dev_info(dev, "s_stream(%d)\n", enable);
-	/* going to/from configuration from/to operation mode */
-	buffer[0] = 0x1f;
-	buffer[1] = enable ? 0x80 : 0x00;
-
-	ret = i2c_master_send(client, buffer, 2);
-	if (ret != 2) {
-		dev_err(dev, "stream (%d) failed\n", enable);
-		return -EIO;
-	}
-	return 0;
-}
-
-static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
-	.s_power =  hdmiphy_s_power,
-};
-
-static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
-	.s_dv_timings = hdmiphy_s_dv_timings,
-	.s_stream =  hdmiphy_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops hdmiphy_pad_ops = {
-	.dv_timings_cap = hdmiphy_dv_timings_cap,
-};
-
-static const struct v4l2_subdev_ops hdmiphy_ops = {
-	.core = &hdmiphy_core_ops,
-	.video = &hdmiphy_video_ops,
-	.pad = &hdmiphy_pad_ops,
-};
-
-static int hdmiphy_probe(struct i2c_client *client,
-			 const struct i2c_device_id *id)
-{
-	struct hdmiphy_ctx *ctx;
-
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	ctx->conf_tab = (struct hdmiphy_conf *)id->driver_data;
-	v4l2_i2c_subdev_init(&ctx->sd, client, &hdmiphy_ops);
-
-	dev_info(&client->dev, "probe successful\n");
-	return 0;
-}
-
-static int hdmiphy_remove(struct i2c_client *client)
-{
-	struct v4l2_subdev *sd = i2c_get_clientdata(client);
-	struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
-
-	kfree(ctx);
-	dev_info(&client->dev, "remove successful\n");
-
-	return 0;
-}
-
-static const struct i2c_device_id hdmiphy_id[] = {
-	{ "hdmiphy", (unsigned long)hdmiphy_conf_exynos4210 },
-	{ "hdmiphy-s5pv210", (unsigned long)hdmiphy_conf_s5pv210 },
-	{ "hdmiphy-exynos4210", (unsigned long)hdmiphy_conf_exynos4210 },
-	{ "hdmiphy-exynos4212", (unsigned long)hdmiphy_conf_exynos4212 },
-	{ "hdmiphy-exynos4412", (unsigned long)hdmiphy_conf_exynos4412 },
-	{ },
-};
-MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
-
-static struct i2c_driver hdmiphy_driver = {
-	.driver = {
-		.name	= "s5p-hdmiphy",
-	},
-	.probe		= hdmiphy_probe,
-	.remove		= hdmiphy_remove,
-	.id_table = hdmiphy_id,
-};
-
-module_i2c_driver(hdmiphy_driver);
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
deleted file mode 100644
index 869f0ce..0000000
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#ifndef SAMSUNG_MIXER_H
-#define SAMSUNG_MIXER_H
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
-	#define DEBUG
-#endif
-
-#include <linux/fb.h>
-#include <linux/irqreturn.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <media/v4l2-device.h>
-#include <media/videobuf2-v4l2.h>
-
-#include "regs-mixer.h"
-
-/** maximum number of output interfaces */
-#define MXR_MAX_OUTPUTS 2
-/** maximum number of input interfaces (layers) */
-#define MXR_MAX_LAYERS 3
-#define MXR_DRIVER_NAME "s5p-mixer"
-/** maximal number of planes for every layer */
-#define MXR_MAX_PLANES	2
-
-#define MXR_ENABLE 1
-#define MXR_DISABLE 0
-
-/** description of a macroblock for packed formats */
-struct mxr_block {
-	/** vertical number of pixels in macroblock */
-	unsigned int width;
-	/** horizontal number of pixels in macroblock */
-	unsigned int height;
-	/** size of block in bytes */
-	unsigned int size;
-};
-
-/** description of supported format */
-struct mxr_format {
-	/** format name/mnemonic */
-	const char *name;
-	/** fourcc identifier */
-	u32 fourcc;
-	/** colorspace identifier */
-	enum v4l2_colorspace colorspace;
-	/** number of planes in image data */
-	int num_planes;
-	/** description of block for each plane */
-	struct mxr_block plane[MXR_MAX_PLANES];
-	/** number of subframes in image data */
-	int num_subframes;
-	/** specifies to which subframe belong given plane */
-	int plane2subframe[MXR_MAX_PLANES];
-	/** internal code, driver dependent */
-	unsigned long cookie;
-};
-
-/** description of crop configuration for image */
-struct mxr_crop {
-	/** width of layer in pixels */
-	unsigned int full_width;
-	/** height of layer in pixels */
-	unsigned int full_height;
-	/** horizontal offset of first pixel to be displayed */
-	unsigned int x_offset;
-	/** vertical offset of first pixel to be displayed */
-	unsigned int y_offset;
-	/** width of displayed data in pixels */
-	unsigned int width;
-	/** height of displayed data in pixels */
-	unsigned int height;
-	/** indicate which fields are present in buffer */
-	unsigned int field;
-};
-
-/** stages of geometry operations */
-enum mxr_geometry_stage {
-	MXR_GEOMETRY_SINK,
-	MXR_GEOMETRY_COMPOSE,
-	MXR_GEOMETRY_CROP,
-	MXR_GEOMETRY_SOURCE,
-};
-
-/* flag indicating that offset should be 0 */
-#define MXR_NO_OFFSET	0x80000000
-
-/** description of transformation from source to destination image */
-struct mxr_geometry {
-	/** cropping for source image */
-	struct mxr_crop src;
-	/** cropping for destination image */
-	struct mxr_crop dst;
-	/** layer-dependant description of horizontal scaling */
-	unsigned int x_ratio;
-	/** layer-dependant description of vertical scaling */
-	unsigned int y_ratio;
-};
-
-/** instance of a buffer */
-struct mxr_buffer {
-	/** common v4l buffer stuff -- must be first */
-	struct vb2_v4l2_buffer vb;
-	/** node for layer's lists */
-	struct list_head	list;
-};
-
-
-/** internal states of layer */
-enum mxr_layer_state {
-	/** layers is not shown */
-	MXR_LAYER_IDLE = 0,
-	/** layer is shown */
-	MXR_LAYER_STREAMING,
-	/** state before STREAMOFF is finished */
-	MXR_LAYER_STREAMING_FINISH,
-};
-
-/** forward declarations */
-struct mxr_device;
-struct mxr_layer;
-
-/** callback for layers operation */
-struct mxr_layer_ops {
-	/* TODO: try to port it to subdev API */
-	/** handler for resource release function */
-	void (*release)(struct mxr_layer *);
-	/** setting buffer to HW */
-	void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
-	/** setting format and geometry in HW */
-	void (*format_set)(struct mxr_layer *);
-	/** streaming stop/start */
-	void (*stream_set)(struct mxr_layer *, int);
-	/** adjusting geometry */
-	void (*fix_geometry)(struct mxr_layer *,
-		enum mxr_geometry_stage, unsigned long);
-};
-
-/** layer instance, a single window and content displayed on output */
-struct mxr_layer {
-	/** parent mixer device */
-	struct mxr_device *mdev;
-	/** layer index (unique identifier) */
-	int idx;
-	/** callbacks for layer methods */
-	struct mxr_layer_ops ops;
-	/** format array */
-	const struct mxr_format **fmt_array;
-	/** size of format array */
-	unsigned long fmt_array_size;
-
-	/** lock for protection of list and state fields */
-	spinlock_t enq_slock;
-	/** list for enqueued buffers */
-	struct list_head enq_list;
-	/** buffer currently owned by hardware in temporary registers */
-	struct mxr_buffer *update_buf;
-	/** buffer currently owned by hardware in shadow registers */
-	struct mxr_buffer *shadow_buf;
-	/** state of layer IDLE/STREAMING */
-	enum mxr_layer_state state;
-
-	/** mutex for protection of fields below */
-	struct mutex mutex;
-	/** handler for video node */
-	struct video_device vfd;
-	/** queue for output buffers */
-	struct vb2_queue vb_queue;
-	/** current image format */
-	const struct mxr_format *fmt;
-	/** current geometry of image */
-	struct mxr_geometry geo;
-};
-
-/** description of mixers output interface */
-struct mxr_output {
-	/** name of output */
-	char name[32];
-	/** output subdev */
-	struct v4l2_subdev *sd;
-	/** cookie used for configuration of registers */
-	int cookie;
-};
-
-/** specify source of output subdevs */
-struct mxr_output_conf {
-	/** name of output (connector) */
-	char *output_name;
-	/** name of module that generates output subdev */
-	char *module_name;
-	/** cookie need for mixer HW */
-	int cookie;
-};
-
-struct clk;
-struct regulator;
-
-/** auxiliary resources used my mixer */
-struct mxr_resources {
-	/** interrupt index */
-	int irq;
-	/** pointer to Mixer registers */
-	void __iomem *mxr_regs;
-	/** pointer to Video Processor registers */
-	void __iomem *vp_regs;
-	/** other resources, should used under mxr_device.mutex */
-	struct clk *mixer;
-	struct clk *vp;
-	struct clk *sclk_mixer;
-	struct clk *sclk_hdmi;
-	struct clk *sclk_dac;
-};
-
-/* event flags used  */
-enum mxr_devide_flags {
-	MXR_EVENT_VSYNC = 0,
-	MXR_EVENT_TOP = 1,
-};
-
-/** drivers instance */
-struct mxr_device {
-	/** master device */
-	struct device *dev;
-	/** state of each layer */
-	struct mxr_layer *layer[MXR_MAX_LAYERS];
-	/** state of each output */
-	struct mxr_output *output[MXR_MAX_OUTPUTS];
-	/** number of registered outputs */
-	int output_cnt;
-
-	/* video resources */
-
-	/** V4L2 device */
-	struct v4l2_device v4l2_dev;
-	/** event wait queue */
-	wait_queue_head_t event_queue;
-	/** state flags */
-	unsigned long event_flags;
-
-	/** spinlock for protection of registers */
-	spinlock_t reg_slock;
-
-	/** mutex for protection of fields below */
-	struct mutex mutex;
-	/** number of entities depndant on output configuration */
-	int n_output;
-	/** number of users that do streaming */
-	int n_streamer;
-	/** index of current output */
-	int current_output;
-	/** auxiliary resources used my mixer */
-	struct mxr_resources res;
-};
-
-/** transform device structure into mixer device */
-static inline struct mxr_device *to_mdev(struct device *dev)
-{
-	struct v4l2_device *vdev = dev_get_drvdata(dev);
-	return container_of(vdev, struct mxr_device, v4l2_dev);
-}
-
-/** get current output data, should be called under mdev's mutex */
-static inline struct mxr_output *to_output(struct mxr_device *mdev)
-{
-	return mdev->output[mdev->current_output];
-}
-
-/** get current output subdev, should be called under mdev's mutex */
-static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
-{
-	struct mxr_output *out = to_output(mdev);
-	return out ? out->sd : NULL;
-}
-
-/** forward declaration for mixer platform data */
-struct mxr_platform_data;
-
-/** acquiring common video resources */
-int mxr_acquire_video(struct mxr_device *mdev,
-	struct mxr_output_conf *output_cont, int output_count);
-
-/** releasing common video resources */
-void mxr_release_video(struct mxr_device *mdev);
-
-struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
-struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
-struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
-	int idx, char *name, const struct mxr_layer_ops *ops);
-
-void mxr_base_layer_release(struct mxr_layer *layer);
-void mxr_layer_release(struct mxr_layer *layer);
-
-int mxr_base_layer_register(struct mxr_layer *layer);
-void mxr_base_layer_unregister(struct mxr_layer *layer);
-
-unsigned long mxr_get_plane_size(const struct mxr_block *blk,
-	unsigned int width, unsigned int height);
-
-/** adds new consumer for mixer's power */
-int __must_check mxr_power_get(struct mxr_device *mdev);
-/** removes consumer for mixer's power */
-void mxr_power_put(struct mxr_device *mdev);
-/** add new client for output configuration */
-void mxr_output_get(struct mxr_device *mdev);
-/** removes new client for output configuration */
-void mxr_output_put(struct mxr_device *mdev);
-/** add new client for streaming */
-void mxr_streamer_get(struct mxr_device *mdev);
-/** removes new client for streaming */
-void mxr_streamer_put(struct mxr_device *mdev);
-/** returns format of data delivared to current output */
-void mxr_get_mbus_fmt(struct mxr_device *mdev,
-	struct v4l2_mbus_framefmt *mbus_fmt);
-
-/* Debug */
-
-#define mxr_err(mdev, fmt, ...)  dev_err(mdev->dev, fmt, ##__VA_ARGS__)
-#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
-#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
-	#define mxr_dbg(mdev, fmt, ...)  dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
-#else
-	#define mxr_dbg(mdev, fmt, ...)  do { (void) mdev; } while (0)
-#endif
-
-/* accessing Mixer's and Video Processor's registers */
-
-void mxr_vsync_set_update(struct mxr_device *mdev, int en);
-void mxr_reg_reset(struct mxr_device *mdev);
-irqreturn_t mxr_irq_handler(int irq, void *dev_data);
-void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
-void mxr_reg_streamon(struct mxr_device *mdev);
-void mxr_reg_streamoff(struct mxr_device *mdev);
-int mxr_reg_wait4vsync(struct mxr_device *mdev);
-void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
-	struct v4l2_mbus_framefmt *fmt);
-void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
-void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
-void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
-	const struct mxr_format *fmt, const struct mxr_geometry *geo);
-
-void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
-void mxr_reg_vp_buffer(struct mxr_device *mdev,
-	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
-void mxr_reg_vp_format(struct mxr_device *mdev,
-	const struct mxr_format *fmt, const struct mxr_geometry *geo);
-void mxr_reg_dump(struct mxr_device *mdev);
-
-#endif /* SAMSUNG_MIXER_H */
-
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
deleted file mode 100644
index 8a5d194..0000000
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung MIXER");
-MODULE_LICENSE("GPL");
-
-/* --------- DRIVER PARAMETERS ---------- */
-
-static struct mxr_output_conf mxr_output_conf[] = {
-	{
-		.output_name = "S5P HDMI connector",
-		.module_name = "s5p-hdmi",
-		.cookie = 1,
-	},
-	{
-		.output_name = "S5P SDO connector",
-		.module_name = "s5p-sdo",
-		.cookie = 0,
-	},
-};
-
-void mxr_get_mbus_fmt(struct mxr_device *mdev,
-	struct v4l2_mbus_framefmt *mbus_fmt)
-{
-	struct v4l2_subdev *sd;
-	struct v4l2_subdev_format fmt = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	int ret;
-
-	mutex_lock(&mdev->mutex);
-	sd = to_outsd(mdev);
-	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-	*mbus_fmt = fmt.format;
-	WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
-	mutex_unlock(&mdev->mutex);
-}
-
-void mxr_streamer_get(struct mxr_device *mdev)
-{
-	mutex_lock(&mdev->mutex);
-	++mdev->n_streamer;
-	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
-	if (mdev->n_streamer == 1) {
-		struct v4l2_subdev *sd = to_outsd(mdev);
-		struct v4l2_subdev_format fmt = {
-			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-		};
-		struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
-		struct mxr_resources *res = &mdev->res;
-		int ret;
-
-		if (to_output(mdev)->cookie == 0)
-			clk_set_parent(res->sclk_mixer, res->sclk_dac);
-		else
-			clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
-		mxr_reg_s_output(mdev, to_output(mdev)->cookie);
-
-		ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-		WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
-		ret = v4l2_subdev_call(sd, video, s_stream, 1);
-		WARN(ret, "starting stream failed for output %s\n", sd->name);
-
-		mxr_reg_set_mbus_fmt(mdev, mbus_fmt);
-		mxr_reg_streamon(mdev);
-		ret = mxr_reg_wait4vsync(mdev);
-		WARN(ret, "failed to get vsync (%d) from output\n", ret);
-	}
-	mutex_unlock(&mdev->mutex);
-	mxr_reg_dump(mdev);
-	/* FIXME: what to do when streaming fails? */
-}
-
-void mxr_streamer_put(struct mxr_device *mdev)
-{
-	mutex_lock(&mdev->mutex);
-	--mdev->n_streamer;
-	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
-	if (mdev->n_streamer == 0) {
-		int ret;
-		struct v4l2_subdev *sd = to_outsd(mdev);
-
-		mxr_reg_streamoff(mdev);
-		/* vsync applies Mixer setup */
-		ret = mxr_reg_wait4vsync(mdev);
-		WARN(ret, "failed to get vsync (%d) from output\n", ret);
-		ret = v4l2_subdev_call(sd, video, s_stream, 0);
-		WARN(ret, "stopping stream failed for output %s\n", sd->name);
-	}
-	WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
-		mdev->n_streamer);
-	mutex_unlock(&mdev->mutex);
-	mxr_reg_dump(mdev);
-}
-
-void mxr_output_get(struct mxr_device *mdev)
-{
-	mutex_lock(&mdev->mutex);
-	++mdev->n_output;
-	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
-	/* turn on auxiliary driver */
-	if (mdev->n_output == 1)
-		v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
-	mutex_unlock(&mdev->mutex);
-}
-
-void mxr_output_put(struct mxr_device *mdev)
-{
-	mutex_lock(&mdev->mutex);
-	--mdev->n_output;
-	mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
-	/* turn on auxiliary driver */
-	if (mdev->n_output == 0)
-		v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
-	WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
-		mdev->n_output);
-	mutex_unlock(&mdev->mutex);
-}
-
-int mxr_power_get(struct mxr_device *mdev)
-{
-	int ret = pm_runtime_get_sync(mdev->dev);
-
-	/* returning 1 means that power is already enabled,
-	 * so zero success be returned */
-	if (ret < 0)
-		return ret;
-	return 0;
-}
-
-void mxr_power_put(struct mxr_device *mdev)
-{
-	pm_runtime_put_sync(mdev->dev);
-}
-
-/* --------- RESOURCE MANAGEMENT -------------*/
-
-static int mxr_acquire_plat_resources(struct mxr_device *mdev,
-				      struct platform_device *pdev)
-{
-	struct resource *res;
-	int ret;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
-	if (res == NULL) {
-		mxr_err(mdev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-
-	mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
-	if (mdev->res.mxr_regs == NULL) {
-		mxr_err(mdev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
-	if (res == NULL) {
-		mxr_err(mdev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail_mxr_regs;
-	}
-
-	mdev->res.vp_regs = ioremap(res->start, resource_size(res));
-	if (mdev->res.vp_regs == NULL) {
-		mxr_err(mdev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail_mxr_regs;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
-	if (res == NULL) {
-		mxr_err(mdev, "get interrupt resource failed.\n");
-		ret = -ENXIO;
-		goto fail_vp_regs;
-	}
-
-	ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
-	if (ret) {
-		mxr_err(mdev, "request interrupt failed.\n");
-		goto fail_vp_regs;
-	}
-	mdev->res.irq = res->start;
-
-	return 0;
-
-fail_vp_regs:
-	iounmap(mdev->res.vp_regs);
-
-fail_mxr_regs:
-	iounmap(mdev->res.mxr_regs);
-
-fail:
-	return ret;
-}
-
-static void mxr_resource_clear_clocks(struct mxr_resources *res)
-{
-	res->mixer	= ERR_PTR(-EINVAL);
-	res->vp		= ERR_PTR(-EINVAL);
-	res->sclk_mixer	= ERR_PTR(-EINVAL);
-	res->sclk_hdmi	= ERR_PTR(-EINVAL);
-	res->sclk_dac	= ERR_PTR(-EINVAL);
-}
-
-static void mxr_release_plat_resources(struct mxr_device *mdev)
-{
-	free_irq(mdev->res.irq, mdev);
-	iounmap(mdev->res.vp_regs);
-	iounmap(mdev->res.mxr_regs);
-}
-
-static void mxr_release_clocks(struct mxr_device *mdev)
-{
-	struct mxr_resources *res = &mdev->res;
-
-	if (!IS_ERR(res->sclk_dac))
-		clk_put(res->sclk_dac);
-	if (!IS_ERR(res->sclk_hdmi))
-		clk_put(res->sclk_hdmi);
-	if (!IS_ERR(res->sclk_mixer))
-		clk_put(res->sclk_mixer);
-	if (!IS_ERR(res->vp))
-		clk_put(res->vp);
-	if (!IS_ERR(res->mixer))
-		clk_put(res->mixer);
-}
-
-static int mxr_acquire_clocks(struct mxr_device *mdev)
-{
-	struct mxr_resources *res = &mdev->res;
-	struct device *dev = mdev->dev;
-
-	mxr_resource_clear_clocks(res);
-
-	res->mixer = clk_get(dev, "mixer");
-	if (IS_ERR(res->mixer)) {
-		mxr_err(mdev, "failed to get clock 'mixer'\n");
-		goto fail;
-	}
-	res->vp = clk_get(dev, "vp");
-	if (IS_ERR(res->vp)) {
-		mxr_err(mdev, "failed to get clock 'vp'\n");
-		goto fail;
-	}
-	res->sclk_mixer = clk_get(dev, "sclk_mixer");
-	if (IS_ERR(res->sclk_mixer)) {
-		mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
-		goto fail;
-	}
-	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
-	if (IS_ERR(res->sclk_hdmi)) {
-		mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
-		goto fail;
-	}
-	res->sclk_dac = clk_get(dev, "sclk_dac");
-	if (IS_ERR(res->sclk_dac)) {
-		mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
-		goto fail;
-	}
-
-	return 0;
-fail:
-	mxr_release_clocks(mdev);
-	return -ENODEV;
-}
-
-static int mxr_acquire_resources(struct mxr_device *mdev,
-				 struct platform_device *pdev)
-{
-	int ret;
-	ret = mxr_acquire_plat_resources(mdev, pdev);
-
-	if (ret)
-		goto fail;
-
-	ret = mxr_acquire_clocks(mdev);
-	if (ret)
-		goto fail_plat;
-
-	mxr_info(mdev, "resources acquired\n");
-	return 0;
-
-fail_plat:
-	mxr_release_plat_resources(mdev);
-fail:
-	mxr_err(mdev, "resources acquire failed\n");
-	return ret;
-}
-
-static void mxr_release_resources(struct mxr_device *mdev)
-{
-	mxr_release_clocks(mdev);
-	mxr_release_plat_resources(mdev);
-	memset(&mdev->res, 0, sizeof(mdev->res));
-	mxr_resource_clear_clocks(&mdev->res);
-}
-
-static void mxr_release_layers(struct mxr_device *mdev)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
-		if (mdev->layer[i])
-			mxr_layer_release(mdev->layer[i]);
-}
-
-static int mxr_acquire_layers(struct mxr_device *mdev,
-			      struct mxr_platform_data *pdata)
-{
-	mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
-	mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
-	mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
-
-	if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
-		mxr_err(mdev, "failed to acquire layers\n");
-		goto fail;
-	}
-
-	return 0;
-
-fail:
-	mxr_release_layers(mdev);
-	return -ENODEV;
-}
-
-/* ---------- POWER MANAGEMENT ----------- */
-
-static int mxr_runtime_resume(struct device *dev)
-{
-	struct mxr_device *mdev = to_mdev(dev);
-	struct mxr_resources *res = &mdev->res;
-	int ret;
-
-	mxr_dbg(mdev, "resume - start\n");
-	mutex_lock(&mdev->mutex);
-	/* turn clocks on */
-	ret = clk_prepare_enable(res->mixer);
-	if (ret < 0) {
-		dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n");
-		goto fail;
-	}
-	ret = clk_prepare_enable(res->vp);
-	if (ret < 0) {
-		dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n");
-		goto fail_mixer;
-	}
-	ret = clk_prepare_enable(res->sclk_mixer);
-	if (ret < 0) {
-		dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n");
-		goto fail_vp;
-	}
-	/* apply default configuration */
-	mxr_reg_reset(mdev);
-	mxr_dbg(mdev, "resume - finished\n");
-
-	mutex_unlock(&mdev->mutex);
-	return 0;
-
-fail_vp:
-	clk_disable_unprepare(res->vp);
-fail_mixer:
-	clk_disable_unprepare(res->mixer);
-fail:
-	mutex_unlock(&mdev->mutex);
-	dev_err(mdev->dev, "resume failed\n");
-	return ret;
-}
-
-static int mxr_runtime_suspend(struct device *dev)
-{
-	struct mxr_device *mdev = to_mdev(dev);
-	struct mxr_resources *res = &mdev->res;
-	mxr_dbg(mdev, "suspend - start\n");
-	mutex_lock(&mdev->mutex);
-	/* turn clocks off */
-	clk_disable_unprepare(res->sclk_mixer);
-	clk_disable_unprepare(res->vp);
-	clk_disable_unprepare(res->mixer);
-	mutex_unlock(&mdev->mutex);
-	mxr_dbg(mdev, "suspend - finished\n");
-	return 0;
-}
-
-static const struct dev_pm_ops mxr_pm_ops = {
-	.runtime_suspend = mxr_runtime_suspend,
-	.runtime_resume	 = mxr_runtime_resume,
-};
-
-/* --------- DRIVER INITIALIZATION ---------- */
-
-static int mxr_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct mxr_platform_data *pdata = dev->platform_data;
-	struct mxr_device *mdev;
-	int ret;
-
-	/* mdev does not exist yet so no mxr_dbg is used */
-	dev_info(dev, "probe start\n");
-
-	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
-	if (!mdev) {
-		dev_err(dev, "not enough memory.\n");
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	/* setup pointer to master device */
-	mdev->dev = dev;
-
-	mutex_init(&mdev->mutex);
-	spin_lock_init(&mdev->reg_slock);
-	init_waitqueue_head(&mdev->event_queue);
-
-	/* acquire resources: regs, irqs, clocks, regulators */
-	ret = mxr_acquire_resources(mdev, pdev);
-	if (ret)
-		goto fail_mem;
-
-	/* configure resources for video output */
-	ret = mxr_acquire_video(mdev, mxr_output_conf,
-		ARRAY_SIZE(mxr_output_conf));
-	if (ret)
-		goto fail_resources;
-
-	/* configure layers */
-	ret = mxr_acquire_layers(mdev, pdata);
-	if (ret)
-		goto fail_video;
-
-	pm_runtime_enable(dev);
-
-	mxr_info(mdev, "probe successful\n");
-	return 0;
-
-fail_video:
-	mxr_release_video(mdev);
-
-fail_resources:
-	mxr_release_resources(mdev);
-
-fail_mem:
-	kfree(mdev);
-
-fail:
-	dev_info(dev, "probe failed\n");
-	return ret;
-}
-
-static int mxr_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct mxr_device *mdev = to_mdev(dev);
-
-	pm_runtime_disable(dev);
-
-	mxr_release_layers(mdev);
-	mxr_release_video(mdev);
-	mxr_release_resources(mdev);
-
-	kfree(mdev);
-
-	dev_info(dev, "remove successful\n");
-	return 0;
-}
-
-static struct platform_driver mxr_driver __refdata = {
-	.probe = mxr_probe,
-	.remove = mxr_remove,
-	.driver = {
-		.name = MXR_DRIVER_NAME,
-		.pm = &mxr_pm_ops,
-	}
-};
-
-static int __init mxr_init(void)
-{
-	int i, ret;
-	static const char banner[] __initconst =
-		"Samsung TV Mixer driver, "
-		"(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
-	pr_info("%s\n", banner);
-
-	/* Loading auxiliary modules */
-	for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
-		request_module(mxr_output_conf[i].module_name);
-
-	ret = platform_driver_register(&mxr_driver);
-	if (ret != 0) {
-		pr_err("s5p-tv: registration of MIXER driver failed\n");
-		return -ENXIO;
-	}
-
-	return 0;
-}
-module_init(mxr_init);
-
-static void __exit mxr_exit(void)
-{
-	platform_driver_unregister(&mxr_driver);
-}
-module_exit(mxr_exit);
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
deleted file mode 100644
index d4d2564..0000000
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include <media/videobuf2-dma-contig.h>
-
-/* FORMAT DEFINITIONS */
-
-static const struct mxr_format mxr_fb_fmt_rgb565 = {
-	.name = "RGB565",
-	.fourcc = V4L2_PIX_FMT_RGB565,
-	.colorspace = V4L2_COLORSPACE_SRGB,
-	.num_planes = 1,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 2 },
-	},
-	.num_subframes = 1,
-	.cookie = 4,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb1555 = {
-	.name = "ARGB1555",
-	.num_planes = 1,
-	.fourcc = V4L2_PIX_FMT_RGB555,
-	.colorspace = V4L2_COLORSPACE_SRGB,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 2 },
-	},
-	.num_subframes = 1,
-	.cookie = 5,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb4444 = {
-	.name = "ARGB4444",
-	.num_planes = 1,
-	.fourcc = V4L2_PIX_FMT_RGB444,
-	.colorspace = V4L2_COLORSPACE_SRGB,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 2 },
-	},
-	.num_subframes = 1,
-	.cookie = 6,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb8888 = {
-	.name = "ARGB8888",
-	.fourcc = V4L2_PIX_FMT_BGR32,
-	.colorspace = V4L2_COLORSPACE_SRGB,
-	.num_planes = 1,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 4 },
-	},
-	.num_subframes = 1,
-	.cookie = 7,
-};
-
-static const struct mxr_format *mxr_graph_format[] = {
-	&mxr_fb_fmt_rgb565,
-	&mxr_fb_fmt_argb1555,
-	&mxr_fb_fmt_argb4444,
-	&mxr_fb_fmt_argb8888,
-};
-
-/* AUXILIARY CALLBACKS */
-
-static void mxr_graph_layer_release(struct mxr_layer *layer)
-{
-	mxr_base_layer_unregister(layer);
-	mxr_base_layer_release(layer);
-}
-
-static void mxr_graph_buffer_set(struct mxr_layer *layer,
-	struct mxr_buffer *buf)
-{
-	dma_addr_t addr = 0;
-
-	if (buf)
-		addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
-	mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
-}
-
-static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
-{
-	mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
-}
-
-static void mxr_graph_format_set(struct mxr_layer *layer)
-{
-	mxr_reg_graph_format(layer->mdev, layer->idx,
-		layer->fmt, &layer->geo);
-}
-
-static inline unsigned int closest(unsigned int x, unsigned int a,
-	unsigned int b, unsigned long flags)
-{
-	unsigned int mid = (a + b) / 2;
-
-	/* choosing closest value with constraints according to table:
-	 * -------------+-----+-----+-----+-------+
-	 * flags	|  0  |  LE |  GE | LE|GE |
-	 * -------------+-----+-----+-----+-------+
-	 * x <= a	|  a  |  a  |  a  |   a   |
-	 * a < x <= mid	|  a  |  a  |  b  |   a   |
-	 * mid < x < b	|  b  |  a  |  b  |   b   |
-	 * b <= x	|  b  |  b  |  b  |   b   |
-	 * -------------+-----+-----+-----+-------+
-	 */
-
-	/* remove all non-constraint flags */
-	flags &= V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE;
-
-	if (x <= a)
-		return  a;
-	if (x >= b)
-		return b;
-	if (flags == V4L2_SEL_FLAG_LE)
-		return a;
-	if (flags == V4L2_SEL_FLAG_GE)
-		return b;
-	if (x <= mid)
-		return a;
-	return b;
-}
-
-static inline unsigned int do_center(unsigned int center,
-	unsigned int size, unsigned int upper, unsigned int flags)
-{
-	unsigned int lower;
-
-	if (flags & MXR_NO_OFFSET)
-		return 0;
-
-	lower = center - min(center, size / 2);
-	return min(lower, upper - size);
-}
-
-static void mxr_graph_fix_geometry(struct mxr_layer *layer,
-	enum mxr_geometry_stage stage, unsigned long flags)
-{
-	struct mxr_geometry *geo = &layer->geo;
-	struct mxr_crop *src = &geo->src;
-	struct mxr_crop *dst = &geo->dst;
-	unsigned int x_center, y_center;
-
-	switch (stage) {
-
-	case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
-		flags = 0;
-		/* fall through */
-
-	case MXR_GEOMETRY_COMPOSE:
-		/* remember center of the area */
-		x_center = dst->x_offset + dst->width / 2;
-		y_center = dst->y_offset + dst->height / 2;
-		/* round up/down to 2 multiple depending on flags */
-		if (flags & V4L2_SEL_FLAG_LE) {
-			dst->width = round_down(dst->width, 2);
-			dst->height = round_down(dst->height, 2);
-		} else {
-			dst->width = round_up(dst->width, 2);
-			dst->height = round_up(dst->height, 2);
-		}
-		/* assure that compose rect is inside display area */
-		dst->width = min(dst->width, dst->full_width);
-		dst->height = min(dst->height, dst->full_height);
-
-		/* ensure that compose is reachable using 2x scaling */
-		dst->width = min(dst->width, 2 * src->full_width);
-		dst->height = min(dst->height, 2 * src->full_height);
-
-		/* setup offsets */
-		dst->x_offset = do_center(x_center, dst->width,
-			dst->full_width, flags);
-		dst->y_offset = do_center(y_center, dst->height,
-			dst->full_height, flags);
-		flags = 0;
-		/* fall through */
-
-	case MXR_GEOMETRY_CROP:
-		/* remember center of the area */
-		x_center = src->x_offset + src->width / 2;
-		y_center = src->y_offset + src->height / 2;
-		/* ensure that cropping area lies inside the buffer */
-		if (src->full_width < dst->width)
-			src->width = dst->width / 2;
-		else
-			src->width = closest(src->width, dst->width / 2,
-				dst->width, flags);
-
-		if (src->width == dst->width)
-			geo->x_ratio = 0;
-		else
-			geo->x_ratio = 1;
-
-		if (src->full_height < dst->height)
-			src->height = dst->height / 2;
-		else
-			src->height = closest(src->height, dst->height / 2,
-				dst->height, flags);
-
-		if (src->height == dst->height)
-			geo->y_ratio = 0;
-		else
-			geo->y_ratio = 1;
-
-		/* setup offsets */
-		src->x_offset = do_center(x_center, src->width,
-			src->full_width, flags);
-		src->y_offset = do_center(y_center, src->height,
-			src->full_height, flags);
-		flags = 0;
-		/* fall through */
-	case MXR_GEOMETRY_SOURCE:
-		src->full_width = clamp_val(src->full_width,
-			src->width + src->x_offset, 32767);
-		src->full_height = clamp_val(src->full_height,
-			src->height + src->y_offset, 2047);
-	}
-}
-
-/* PUBLIC API */
-
-struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
-{
-	struct mxr_layer *layer;
-	int ret;
-	const struct mxr_layer_ops ops = {
-		.release = mxr_graph_layer_release,
-		.buffer_set = mxr_graph_buffer_set,
-		.stream_set = mxr_graph_stream_set,
-		.format_set = mxr_graph_format_set,
-		.fix_geometry = mxr_graph_fix_geometry,
-	};
-	char name[32];
-
-	sprintf(name, "graph%d", idx);
-
-	layer = mxr_base_layer_create(mdev, idx, name, &ops);
-	if (layer == NULL) {
-		mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
-		goto fail;
-	}
-
-	layer->fmt_array = mxr_graph_format;
-	layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
-
-	ret = mxr_base_layer_register(layer);
-	if (ret)
-		goto fail_layer;
-
-	return layer;
-
-fail_layer:
-	mxr_base_layer_release(layer);
-
-fail:
-	return NULL;
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
deleted file mode 100644
index a0ec14a..0000000
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-#include "regs-mixer.h"
-#include "regs-vp.h"
-
-#include <linux/delay.h>
-
-/* Register access subroutines */
-
-static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
-{
-	return readl(mdev->res.vp_regs + reg_id);
-}
-
-static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
-{
-	writel(val, mdev->res.vp_regs + reg_id);
-}
-
-static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
-	u32 val, u32 mask)
-{
-	u32 old = vp_read(mdev, reg_id);
-
-	val = (val & mask) | (old & ~mask);
-	writel(val, mdev->res.vp_regs + reg_id);
-}
-
-static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
-{
-	return readl(mdev->res.mxr_regs + reg_id);
-}
-
-static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
-{
-	writel(val, mdev->res.mxr_regs + reg_id);
-}
-
-static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
-	u32 val, u32 mask)
-{
-	u32 old = mxr_read(mdev, reg_id);
-
-	val = (val & mask) | (old & ~mask);
-	writel(val, mdev->res.mxr_regs + reg_id);
-}
-
-void mxr_vsync_set_update(struct mxr_device *mdev, int en)
-{
-	/* block update on vsync */
-	mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
-		MXR_STATUS_SYNC_ENABLE);
-	vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
-}
-
-static void __mxr_reg_vp_reset(struct mxr_device *mdev)
-{
-	int tries = 100;
-
-	vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
-	for (tries = 100; tries; --tries) {
-		/* waiting until VP_SRESET_PROCESSING is 0 */
-		if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
-			break;
-		mdelay(10);
-	}
-	WARN(tries == 0, "failed to reset Video Processor\n");
-}
-
-static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
-
-void mxr_reg_reset(struct mxr_device *mdev)
-{
-	unsigned long flags;
-	u32 val; /* value stored to register */
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	/* set output in RGB888 mode */
-	mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
-
-	/* 16 beat burst in DMA */
-	mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
-		MXR_STATUS_BURST_MASK);
-
-	/* setting default layer priority: layer1 > video > layer0
-	 * because typical usage scenario would be
-	 * layer0 - framebuffer
-	 * video - video overlay
-	 * layer1 - OSD
-	 */
-	val  = MXR_LAYER_CFG_GRP0_VAL(1);
-	val |= MXR_LAYER_CFG_VP_VAL(2);
-	val |= MXR_LAYER_CFG_GRP1_VAL(3);
-	mxr_write(mdev, MXR_LAYER_CFG, val);
-
-	/* use dark gray background color */
-	mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
-	mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
-	mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
-
-	/* setting graphical layers */
-
-	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
-	val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
-	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
-
-	/* the same configuration for both layers */
-	mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
-	mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
-
-	/* configuration of Video Processor Registers */
-	__mxr_reg_vp_reset(mdev);
-	mxr_reg_vp_default_filter(mdev);
-
-	/* enable all interrupts */
-	mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
-	const struct mxr_format *fmt, const struct mxr_geometry *geo)
-{
-	u32 val;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	/* setup format */
-	mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
-		MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
-
-	/* setup geometry */
-	mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
-	val  = MXR_GRP_WH_WIDTH(geo->src.width);
-	val |= MXR_GRP_WH_HEIGHT(geo->src.height);
-	val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
-	val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
-	mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
-
-	/* setup offsets in source image */
-	val  = MXR_GRP_SXY_SX(geo->src.x_offset);
-	val |= MXR_GRP_SXY_SY(geo->src.y_offset);
-	mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
-
-	/* setup offsets in display image */
-	val  = MXR_GRP_DXY_DX(geo->dst.x_offset);
-	val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
-	mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_vp_format(struct mxr_device *mdev,
-	const struct mxr_format *fmt, const struct mxr_geometry *geo)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
-
-	/* setting size of input image */
-	vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
-		VP_IMG_VSIZE(geo->src.full_height));
-	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
-		VP_IMG_VSIZE(geo->src.full_height / 2));
-
-	vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
-	vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
-	vp_write(mdev, VP_SRC_H_POSITION,
-		VP_SRC_H_POSITION_VAL(geo->src.x_offset));
-	vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
-
-	vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
-	vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
-	if (geo->dst.field == V4L2_FIELD_INTERLACED) {
-		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
-		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
-	} else {
-		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
-		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
-	}
-
-	vp_write(mdev, VP_H_RATIO, geo->x_ratio);
-	vp_write(mdev, VP_V_RATIO, geo->y_ratio);
-
-	vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-
-}
-
-void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
-{
-	u32 val = addr ? ~0 : 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	if (idx == 0)
-		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
-	else
-		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
-	mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_vp_buffer(struct mxr_device *mdev,
-	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
-{
-	u32 val = luma_addr[0] ? ~0 : 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
-	vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
-	/* TODO: fix tiled mode */
-	vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
-	vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
-	vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
-	vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-static void mxr_irq_layer_handle(struct mxr_layer *layer)
-{
-	struct list_head *head = &layer->enq_list;
-	struct mxr_buffer *done;
-
-	/* skip non-existing layer */
-	if (layer == NULL)
-		return;
-
-	spin_lock(&layer->enq_slock);
-	if (layer->state == MXR_LAYER_IDLE)
-		goto done;
-
-	done = layer->shadow_buf;
-	layer->shadow_buf = layer->update_buf;
-
-	if (list_empty(head)) {
-		if (layer->state != MXR_LAYER_STREAMING)
-			layer->update_buf = NULL;
-	} else {
-		struct mxr_buffer *next;
-		next = list_first_entry(head, struct mxr_buffer, list);
-		list_del(&next->list);
-		layer->update_buf = next;
-	}
-
-	layer->ops.buffer_set(layer, layer->update_buf);
-
-	if (done && done != layer->shadow_buf)
-		vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
-
-done:
-	spin_unlock(&layer->enq_slock);
-}
-
-irqreturn_t mxr_irq_handler(int irq, void *dev_data)
-{
-	struct mxr_device *mdev = dev_data;
-	u32 i, val;
-
-	spin_lock(&mdev->reg_slock);
-	val = mxr_read(mdev, MXR_INT_STATUS);
-
-	/* wake up process waiting for VSYNC */
-	if (val & MXR_INT_STATUS_VSYNC) {
-		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
-		/* toggle TOP field event if working in interlaced mode */
-		if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
-			change_bit(MXR_EVENT_TOP, &mdev->event_flags);
-		wake_up(&mdev->event_queue);
-		/* vsync interrupt use different bit for read and clear */
-		val &= ~MXR_INT_STATUS_VSYNC;
-		val |= MXR_INT_CLEAR_VSYNC;
-	}
-
-	/* clear interrupts */
-	mxr_write(mdev, MXR_INT_STATUS, val);
-
-	spin_unlock(&mdev->reg_slock);
-	/* leave on non-vsync event */
-	if (~val & MXR_INT_CLEAR_VSYNC)
-		return IRQ_HANDLED;
-	/* skip layer update on bottom field */
-	if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
-		return IRQ_HANDLED;
-	for (i = 0; i < MXR_MAX_LAYERS; ++i)
-		mxr_irq_layer_handle(mdev->layer[i]);
-	return IRQ_HANDLED;
-}
-
-void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
-{
-	u32 val;
-
-	val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
-	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
-}
-
-void mxr_reg_streamon(struct mxr_device *mdev)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	/* single write -> no need to block vsync update */
-
-	/* start MIXER */
-	mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
-	set_bit(MXR_EVENT_TOP, &mdev->event_flags);
-
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_streamoff(struct mxr_device *mdev)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	/* single write -> no need to block vsync update */
-
-	/* stop MIXER */
-	mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
-
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-int mxr_reg_wait4vsync(struct mxr_device *mdev)
-{
-	long time_left;
-
-	clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
-	/* TODO: consider adding interruptible */
-	time_left = wait_event_timeout(mdev->event_queue,
-			test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
-				 msecs_to_jiffies(1000));
-	if (time_left > 0)
-		return 0;
-	mxr_warn(mdev, "no vsync detected - timeout\n");
-	return -ETIME;
-}
-
-void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
-	struct v4l2_mbus_framefmt *fmt)
-{
-	u32 val = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&mdev->reg_slock, flags);
-	mxr_vsync_set_update(mdev, MXR_DISABLE);
-
-	/* selecting colorspace accepted by output */
-	if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
-		val |= MXR_CFG_OUT_YUV444;
-	else
-		val |= MXR_CFG_OUT_RGB888;
-
-	/* choosing between interlace and progressive mode */
-	if (fmt->field == V4L2_FIELD_INTERLACED)
-		val |= MXR_CFG_SCAN_INTERLACE;
-	else
-		val |= MXR_CFG_SCAN_PROGRASSIVE;
-
-	/* choosing between porper HD and SD mode */
-	if (fmt->height == 480)
-		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
-	else if (fmt->height == 576)
-		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
-	else if (fmt->height == 720)
-		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
-	else if (fmt->height == 1080)
-		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
-	else
-		WARN(1, "unrecognized mbus height %u!\n", fmt->height);
-
-	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
-		MXR_CFG_OUT_MASK);
-
-	val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
-	vp_write_mask(mdev, VP_MODE, val,
-		VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
-
-	mxr_vsync_set_update(mdev, MXR_ENABLE);
-	spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
-{
-	/* no extra actions need to be done */
-}
-
-void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
-{
-	/* no extra actions need to be done */
-}
-
-static const u8 filter_y_horiz_tap8[] = {
-	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
-	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
-	0,	2,	4,	5,	6,	6,	6,	6,
-	6,	5,	5,	4,	3,	2,	1,	1,
-	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
-	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
-	127,	126,	125,	121,	114,	107,	99,	89,
-	79,	68,	57,	46,	35,	25,	16,	8,
-};
-
-static const u8 filter_y_vert_tap4[] = {
-	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
-	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
-	127,	126,	124,	118,	111,	102,	92,	81,
-	70,	59,	48,	37,	27,	19,	11,	5,
-	0,	5,	11,	19,	27,	37,	48,	59,
-	70,	81,	92,	102,	111,	118,	124,	126,
-	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
-	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
-};
-
-static const u8 filter_cr_horiz_tap4[] = {
-	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
-	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
-	127,	126,	124,	118,	111,	102,	92,	81,
-	70,	59,	48,	37,	27,	19,	11,	5,
-};
-
-static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
-	int reg_id, const u8 *data, unsigned int size)
-{
-	/* assure 4-byte align */
-	BUG_ON(size & 3);
-	for (; size; size -= 4, reg_id += 4, data += 4) {
-		u32 val = (data[0] << 24) |  (data[1] << 16) |
-			(data[2] << 8) | data[3];
-		vp_write(mdev, reg_id, val);
-	}
-}
-
-static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
-{
-	mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
-		filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
-	mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
-		filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
-	mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
-		filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
-}
-
-static void mxr_reg_mxr_dump(struct mxr_device *mdev)
-{
-#define DUMPREG(reg_id) \
-do { \
-	mxr_dbg(mdev, #reg_id " = %08x\n", \
-		(u32)readl(mdev->res.mxr_regs + reg_id)); \
-} while (0)
-
-	DUMPREG(MXR_STATUS);
-	DUMPREG(MXR_CFG);
-	DUMPREG(MXR_INT_EN);
-	DUMPREG(MXR_INT_STATUS);
-
-	DUMPREG(MXR_LAYER_CFG);
-	DUMPREG(MXR_VIDEO_CFG);
-
-	DUMPREG(MXR_GRAPHIC0_CFG);
-	DUMPREG(MXR_GRAPHIC0_BASE);
-	DUMPREG(MXR_GRAPHIC0_SPAN);
-	DUMPREG(MXR_GRAPHIC0_WH);
-	DUMPREG(MXR_GRAPHIC0_SXY);
-	DUMPREG(MXR_GRAPHIC0_DXY);
-
-	DUMPREG(MXR_GRAPHIC1_CFG);
-	DUMPREG(MXR_GRAPHIC1_BASE);
-	DUMPREG(MXR_GRAPHIC1_SPAN);
-	DUMPREG(MXR_GRAPHIC1_WH);
-	DUMPREG(MXR_GRAPHIC1_SXY);
-	DUMPREG(MXR_GRAPHIC1_DXY);
-#undef DUMPREG
-}
-
-static void mxr_reg_vp_dump(struct mxr_device *mdev)
-{
-#define DUMPREG(reg_id) \
-do { \
-	mxr_dbg(mdev, #reg_id " = %08x\n", \
-		(u32) readl(mdev->res.vp_regs + reg_id)); \
-} while (0)
-
-
-	DUMPREG(VP_ENABLE);
-	DUMPREG(VP_SRESET);
-	DUMPREG(VP_SHADOW_UPDATE);
-	DUMPREG(VP_FIELD_ID);
-	DUMPREG(VP_MODE);
-	DUMPREG(VP_IMG_SIZE_Y);
-	DUMPREG(VP_IMG_SIZE_C);
-	DUMPREG(VP_PER_RATE_CTRL);
-	DUMPREG(VP_TOP_Y_PTR);
-	DUMPREG(VP_BOT_Y_PTR);
-	DUMPREG(VP_TOP_C_PTR);
-	DUMPREG(VP_BOT_C_PTR);
-	DUMPREG(VP_ENDIAN_MODE);
-	DUMPREG(VP_SRC_H_POSITION);
-	DUMPREG(VP_SRC_V_POSITION);
-	DUMPREG(VP_SRC_WIDTH);
-	DUMPREG(VP_SRC_HEIGHT);
-	DUMPREG(VP_DST_H_POSITION);
-	DUMPREG(VP_DST_V_POSITION);
-	DUMPREG(VP_DST_WIDTH);
-	DUMPREG(VP_DST_HEIGHT);
-	DUMPREG(VP_H_RATIO);
-	DUMPREG(VP_V_RATIO);
-
-#undef DUMPREG
-}
-
-void mxr_reg_dump(struct mxr_device *mdev)
-{
-	mxr_reg_mxr_dump(mdev);
-	mxr_reg_vp_dump(mdev);
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
deleted file mode 100644
index ee74e2b..0000000
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#define pr_fmt(fmt) "s5p-tv (mixer): " fmt
-
-#include "mixer.h"
-
-#include <media/v4l2-ioctl.h>
-#include <linux/videodev2.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/timer.h>
-#include <media/videobuf2-dma-contig.h>
-
-static int find_reg_callback(struct device *dev, void *p)
-{
-	struct v4l2_subdev **sd = p;
-
-	*sd = dev_get_drvdata(dev);
-	/* non-zero value stops iteration */
-	return 1;
-}
-
-static struct v4l2_subdev *find_and_register_subdev(
-	struct mxr_device *mdev, char *module_name)
-{
-	struct device_driver *drv;
-	struct v4l2_subdev *sd = NULL;
-	int ret;
-
-	/* TODO: add waiting until probe is finished */
-	drv = driver_find(module_name, &platform_bus_type);
-	if (!drv) {
-		mxr_warn(mdev, "module %s is missing\n", module_name);
-		return NULL;
-	}
-	/* driver refcnt is increased, it is safe to iterate over devices */
-	ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
-	/* ret == 0 means that find_reg_callback was never executed */
-	if (sd == NULL) {
-		mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
-		goto done;
-	}
-	/* v4l2_device_register_subdev detects if sd is NULL */
-	ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
-	if (ret) {
-		mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
-		sd = NULL;
-	}
-
-done:
-	return sd;
-}
-
-int mxr_acquire_video(struct mxr_device *mdev,
-		      struct mxr_output_conf *output_conf, int output_count)
-{
-	struct device *dev = mdev->dev;
-	struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
-	int i;
-	int ret = 0;
-	struct v4l2_subdev *sd;
-
-	strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
-	/* prepare context for V4L2 device */
-	ret = v4l2_device_register(dev, v4l2_dev);
-	if (ret) {
-		mxr_err(mdev, "could not register v4l2 device.\n");
-		goto fail;
-	}
-
-	vb2_dma_contig_set_max_seg_size(mdev->dev, DMA_BIT_MASK(32));
-
-	/* registering outputs */
-	mdev->output_cnt = 0;
-	for (i = 0; i < output_count; ++i) {
-		struct mxr_output_conf *conf = &output_conf[i];
-		struct mxr_output *out;
-
-		sd = find_and_register_subdev(mdev, conf->module_name);
-		/* trying to register next output */
-		if (sd == NULL)
-			continue;
-		out = kzalloc(sizeof(*out), GFP_KERNEL);
-		if (out == NULL) {
-			mxr_err(mdev, "no memory for '%s'\n",
-				conf->output_name);
-			ret = -ENOMEM;
-			/* registered subdevs are removed in fail_v4l2_dev */
-			goto fail_output;
-		}
-		strlcpy(out->name, conf->output_name, sizeof(out->name));
-		out->sd = sd;
-		out->cookie = conf->cookie;
-		mdev->output[mdev->output_cnt++] = out;
-		mxr_info(mdev, "added output '%s' from module '%s'\n",
-			conf->output_name, conf->module_name);
-		/* checking if maximal number of outputs is reached */
-		if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
-			break;
-	}
-
-	if (mdev->output_cnt == 0) {
-		mxr_err(mdev, "failed to register any output\n");
-		ret = -ENODEV;
-		/* skipping fail_output because there is nothing to free */
-		goto fail_v4l2_dev;
-	}
-
-	return 0;
-
-fail_output:
-	/* kfree is NULL-safe */
-	for (i = 0; i < mdev->output_cnt; ++i)
-		kfree(mdev->output[i]);
-	memset(mdev->output, 0, sizeof(mdev->output));
-
-fail_v4l2_dev:
-	/* NOTE: automatically unregister all subdevs */
-	v4l2_device_unregister(v4l2_dev);
-
-fail:
-	return ret;
-}
-
-void mxr_release_video(struct mxr_device *mdev)
-{
-	int i;
-
-	/* kfree is NULL-safe */
-	for (i = 0; i < mdev->output_cnt; ++i)
-		kfree(mdev->output[i]);
-
-	vb2_dma_contig_clear_max_seg_size(mdev->dev);
-	v4l2_device_unregister(&mdev->v4l2_dev);
-}
-
-static int mxr_querycap(struct file *file, void *priv,
-	struct v4l2_capability *cap)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
-	strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
-	strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
-	sprintf(cap->bus_info, "%d", layer->idx);
-	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
-	return 0;
-}
-
-static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
-{
-	mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
-		geo->src.full_width, geo->src.full_height);
-	mxr_dbg(mdev, "src.size = (%u, %u)\n",
-		geo->src.width, geo->src.height);
-	mxr_dbg(mdev, "src.offset = (%u, %u)\n",
-		geo->src.x_offset, geo->src.y_offset);
-	mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
-		geo->dst.full_width, geo->dst.full_height);
-	mxr_dbg(mdev, "dst.size = (%u, %u)\n",
-		geo->dst.width, geo->dst.height);
-	mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
-		geo->dst.x_offset, geo->dst.y_offset);
-	mxr_dbg(mdev, "ratio = (%u, %u)\n",
-		geo->x_ratio, geo->y_ratio);
-}
-
-static void mxr_layer_default_geo(struct mxr_layer *layer)
-{
-	struct mxr_device *mdev = layer->mdev;
-	struct v4l2_mbus_framefmt mbus_fmt;
-
-	memset(&layer->geo, 0, sizeof(layer->geo));
-
-	mxr_get_mbus_fmt(mdev, &mbus_fmt);
-
-	layer->geo.dst.full_width = mbus_fmt.width;
-	layer->geo.dst.full_height = mbus_fmt.height;
-	layer->geo.dst.width = layer->geo.dst.full_width;
-	layer->geo.dst.height = layer->geo.dst.full_height;
-	layer->geo.dst.field = mbus_fmt.field;
-
-	layer->geo.src.full_width = mbus_fmt.width;
-	layer->geo.src.full_height = mbus_fmt.height;
-	layer->geo.src.width = layer->geo.src.full_width;
-	layer->geo.src.height = layer->geo.src.full_height;
-
-	mxr_geometry_dump(mdev, &layer->geo);
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
-	mxr_geometry_dump(mdev, &layer->geo);
-}
-
-static void mxr_layer_update_output(struct mxr_layer *layer)
-{
-	struct mxr_device *mdev = layer->mdev;
-	struct v4l2_mbus_framefmt mbus_fmt;
-
-	mxr_get_mbus_fmt(mdev, &mbus_fmt);
-	/* checking if update is needed */
-	if (layer->geo.dst.full_width == mbus_fmt.width &&
-		layer->geo.dst.full_height == mbus_fmt.width)
-		return;
-
-	layer->geo.dst.full_width = mbus_fmt.width;
-	layer->geo.dst.full_height = mbus_fmt.height;
-	layer->geo.dst.field = mbus_fmt.field;
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
-
-	mxr_geometry_dump(mdev, &layer->geo);
-}
-
-static const struct mxr_format *find_format_by_fourcc(
-	struct mxr_layer *layer, unsigned long fourcc);
-static const struct mxr_format *find_format_by_index(
-	struct mxr_layer *layer, unsigned long index);
-
-static int mxr_enum_fmt(struct file *file, void  *priv,
-	struct v4l2_fmtdesc *f)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	const struct mxr_format *fmt;
-
-	mxr_dbg(mdev, "%s\n", __func__);
-	fmt = find_format_by_index(layer, f->index);
-	if (fmt == NULL)
-		return -EINVAL;
-
-	strlcpy(f->description, fmt->name, sizeof(f->description));
-	f->pixelformat = fmt->fourcc;
-
-	return 0;
-}
-
-static unsigned int divup(unsigned int divident, unsigned int divisor)
-{
-	return (divident + divisor - 1) / divisor;
-}
-
-unsigned long mxr_get_plane_size(const struct mxr_block *blk,
-	unsigned int width, unsigned int height)
-{
-	unsigned int bl_width = divup(width, blk->width);
-	unsigned int bl_height = divup(height, blk->height);
-
-	return bl_width * bl_height * blk->size;
-}
-
-static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
-	const struct mxr_format *fmt, u32 width, u32 height)
-{
-	int i;
-
-	/* checking if nothing to fill */
-	if (!planes)
-		return;
-
-	memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
-	for (i = 0; i < fmt->num_planes; ++i) {
-		struct v4l2_plane_pix_format *plane = planes
-			+ fmt->plane2subframe[i];
-		const struct mxr_block *blk = &fmt->plane[i];
-		u32 bl_width = divup(width, blk->width);
-		u32 bl_height = divup(height, blk->height);
-		u32 sizeimage = bl_width * bl_height * blk->size;
-		u32 bytesperline = bl_width * blk->size / blk->height;
-
-		plane->sizeimage += sizeimage;
-		plane->bytesperline = max(plane->bytesperline, bytesperline);
-	}
-}
-
-static int mxr_g_fmt(struct file *file, void *priv,
-			     struct v4l2_format *f)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
-	pix->width = layer->geo.src.full_width;
-	pix->height = layer->geo.src.full_height;
-	pix->field = V4L2_FIELD_NONE;
-	pix->pixelformat = layer->fmt->fourcc;
-	pix->colorspace = layer->fmt->colorspace;
-	mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
-
-	return 0;
-}
-
-static int mxr_s_fmt(struct file *file, void *priv,
-	struct v4l2_format *f)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	const struct mxr_format *fmt;
-	struct v4l2_pix_format_mplane *pix;
-	struct mxr_device *mdev = layer->mdev;
-	struct mxr_geometry *geo = &layer->geo;
-
-	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
-
-	pix = &f->fmt.pix_mp;
-	fmt = find_format_by_fourcc(layer, pix->pixelformat);
-	if (fmt == NULL) {
-		mxr_warn(mdev, "not recognized fourcc: %08x\n",
-			pix->pixelformat);
-		return -EINVAL;
-	}
-	layer->fmt = fmt;
-	/* set source size to highest accepted value */
-	geo->src.full_width = max(geo->dst.full_width, pix->width);
-	geo->src.full_height = max(geo->dst.full_height, pix->height);
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
-	mxr_geometry_dump(mdev, &layer->geo);
-	/* set cropping to total visible screen */
-	geo->src.width = pix->width;
-	geo->src.height = pix->height;
-	geo->src.x_offset = 0;
-	geo->src.y_offset = 0;
-	/* assure consistency of geometry */
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
-	mxr_geometry_dump(mdev, &layer->geo);
-	/* set full size to lowest possible value */
-	geo->src.full_width = 0;
-	geo->src.full_height = 0;
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
-	mxr_geometry_dump(mdev, &layer->geo);
-
-	/* returning results */
-	mxr_g_fmt(file, priv, f);
-
-	return 0;
-}
-
-static int mxr_g_selection(struct file *file, void *fh,
-	struct v4l2_selection *s)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_geometry *geo = &layer->geo;
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
-	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		return -EINVAL;
-
-	switch (s->target) {
-	case V4L2_SEL_TGT_CROP:
-		s->r.left = geo->src.x_offset;
-		s->r.top = geo->src.y_offset;
-		s->r.width = geo->src.width;
-		s->r.height = geo->src.height;
-		break;
-	case V4L2_SEL_TGT_CROP_DEFAULT:
-	case V4L2_SEL_TGT_CROP_BOUNDS:
-		s->r.left = 0;
-		s->r.top = 0;
-		s->r.width = geo->src.full_width;
-		s->r.height = geo->src.full_height;
-		break;
-	case V4L2_SEL_TGT_COMPOSE:
-	case V4L2_SEL_TGT_COMPOSE_PADDED:
-		s->r.left = geo->dst.x_offset;
-		s->r.top = geo->dst.y_offset;
-		s->r.width = geo->dst.width;
-		s->r.height = geo->dst.height;
-		break;
-	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
-	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
-		s->r.left = 0;
-		s->r.top = 0;
-		s->r.width = geo->dst.full_width;
-		s->r.height = geo->dst.full_height;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/* returns 1 if rectangle 'a' is inside 'b' */
-static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
-{
-	if (a->left < b->left)
-		return 0;
-	if (a->top < b->top)
-		return 0;
-	if (a->left + a->width > b->left + b->width)
-		return 0;
-	if (a->top + a->height > b->top + b->height)
-		return 0;
-	return 1;
-}
-
-static int mxr_s_selection(struct file *file, void *fh,
-	struct v4l2_selection *s)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_geometry *geo = &layer->geo;
-	struct mxr_crop *target = NULL;
-	enum mxr_geometry_stage stage;
-	struct mxr_geometry tmp;
-	struct v4l2_rect res;
-
-	memset(&res, 0, sizeof(res));
-
-	mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
-		s->r.width, s->r.height, s->r.left, s->r.top);
-
-	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
-		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		return -EINVAL;
-
-	switch (s->target) {
-	/* ignore read-only targets */
-	case V4L2_SEL_TGT_CROP_DEFAULT:
-	case V4L2_SEL_TGT_CROP_BOUNDS:
-		res.width = geo->src.full_width;
-		res.height = geo->src.full_height;
-		break;
-
-	/* ignore read-only targets */
-	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
-	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
-		res.width = geo->dst.full_width;
-		res.height = geo->dst.full_height;
-		break;
-
-	case V4L2_SEL_TGT_CROP:
-		target = &geo->src;
-		stage = MXR_GEOMETRY_CROP;
-		break;
-	case V4L2_SEL_TGT_COMPOSE:
-	case V4L2_SEL_TGT_COMPOSE_PADDED:
-		target = &geo->dst;
-		stage = MXR_GEOMETRY_COMPOSE;
-		break;
-	default:
-		return -EINVAL;
-	}
-	/* apply change and update geometry if needed */
-	if (target) {
-		/* backup current geometry if setup fails */
-		memcpy(&tmp, geo, sizeof(tmp));
-
-		/* apply requested selection */
-		target->x_offset = s->r.left;
-		target->y_offset = s->r.top;
-		target->width = s->r.width;
-		target->height = s->r.height;
-
-		layer->ops.fix_geometry(layer, stage, s->flags);
-
-		/* retrieve update selection rectangle */
-		res.left = target->x_offset;
-		res.top = target->y_offset;
-		res.width = target->width;
-		res.height = target->height;
-
-		mxr_geometry_dump(layer->mdev, &layer->geo);
-	}
-
-	/* checking if the rectangle satisfies constraints */
-	if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
-		goto fail;
-	if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
-		goto fail;
-
-	/* return result rectangle */
-	s->r = res;
-
-	return 0;
-fail:
-	/* restore old geometry, which is not touched if target is NULL */
-	if (target)
-		memcpy(geo, &tmp, sizeof(tmp));
-	return -ERANGE;
-}
-
-static int mxr_enum_dv_timings(struct file *file, void *fh,
-	struct v4l2_enum_dv_timings *timings)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	timings->pad = 0;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-	ret = v4l2_subdev_call(to_outsd(mdev), pad, enum_dv_timings, timings);
-	mutex_unlock(&mdev->mutex);
-
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_s_dv_timings(struct file *file, void *fh,
-	struct v4l2_dv_timings *timings)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-
-	/* timings change cannot be done while there is an entity
-	 * dependent on output configuration
-	 */
-	if (mdev->n_output > 0) {
-		mutex_unlock(&mdev->mutex);
-		return -EBUSY;
-	}
-
-	ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
-
-	mutex_unlock(&mdev->mutex);
-
-	mxr_layer_update_output(layer);
-
-	/* any failure should return EINVAL according to V4L2 doc */
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_g_dv_timings(struct file *file, void *fh,
-	struct v4l2_dv_timings *timings)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-	ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
-	mutex_unlock(&mdev->mutex);
-
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_dv_timings_cap(struct file *file, void *fh,
-	struct v4l2_dv_timings_cap *cap)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	cap->pad = 0;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-	ret = v4l2_subdev_call(to_outsd(mdev), pad, dv_timings_cap, cap);
-	mutex_unlock(&mdev->mutex);
-
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-
-	/* standard change cannot be done while there is an entity
-	 * dependent on output configuration
-	 */
-	if (mdev->n_output > 0) {
-		mutex_unlock(&mdev->mutex);
-		return -EBUSY;
-	}
-
-	ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
-
-	mutex_unlock(&mdev->mutex);
-
-	mxr_layer_update_output(layer);
-
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	/* lock protects from changing sd_out */
-	mutex_lock(&mdev->mutex);
-	ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
-	mutex_unlock(&mdev->mutex);
-
-	return ret ? -EINVAL : 0;
-}
-
-static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	struct mxr_output *out;
-	struct v4l2_subdev *sd;
-
-	if (a->index >= mdev->output_cnt)
-		return -EINVAL;
-	out = mdev->output[a->index];
-	BUG_ON(out == NULL);
-	sd = out->sd;
-	strlcpy(a->name, out->name, sizeof(a->name));
-
-	/* try to obtain supported tv norms */
-	v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
-	a->capabilities = 0;
-	if (sd->ops->video && sd->ops->video->s_dv_timings)
-		a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
-	if (sd->ops->video && sd->ops->video->s_std_output)
-		a->capabilities |= V4L2_OUT_CAP_STD;
-	a->type = V4L2_OUTPUT_TYPE_ANALOG;
-
-	return 0;
-}
-
-static int mxr_s_output(struct file *file, void *fh, unsigned int i)
-{
-	struct video_device *vfd = video_devdata(file);
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-
-	if (i >= mdev->output_cnt || mdev->output[i] == NULL)
-		return -EINVAL;
-
-	mutex_lock(&mdev->mutex);
-	if (mdev->n_output > 0) {
-		mutex_unlock(&mdev->mutex);
-		return -EBUSY;
-	}
-	mdev->current_output = i;
-	vfd->tvnorms = 0;
-	v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
-		&vfd->tvnorms);
-	mutex_unlock(&mdev->mutex);
-
-	/* update layers geometry */
-	mxr_layer_update_output(layer);
-
-	mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
-
-	return 0;
-}
-
-static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-
-	mutex_lock(&mdev->mutex);
-	*p = mdev->current_output;
-	mutex_unlock(&mdev->mutex);
-
-	return 0;
-}
-
-static int mxr_reqbufs(struct file *file, void *priv,
-			  struct v4l2_requestbuffers *p)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_reqbufs(&layer->vb_queue, p);
-}
-
-static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_querybuf(&layer->vb_queue, p);
-}
-
-static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
-	return vb2_qbuf(&layer->vb_queue, p);
-}
-
-static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
-}
-
-static int mxr_expbuf(struct file *file, void *priv,
-	struct v4l2_exportbuffer *eb)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_expbuf(&layer->vb_queue, eb);
-}
-
-static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_streamon(&layer->vb_queue, i);
-}
-
-static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	return vb2_streamoff(&layer->vb_queue, i);
-}
-
-static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
-	.vidioc_querycap = mxr_querycap,
-	/* format handling */
-	.vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
-	.vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
-	.vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
-	/* buffer control */
-	.vidioc_reqbufs = mxr_reqbufs,
-	.vidioc_querybuf = mxr_querybuf,
-	.vidioc_qbuf = mxr_qbuf,
-	.vidioc_dqbuf = mxr_dqbuf,
-	.vidioc_expbuf = mxr_expbuf,
-	/* Streaming control */
-	.vidioc_streamon = mxr_streamon,
-	.vidioc_streamoff = mxr_streamoff,
-	/* DV Timings functions */
-	.vidioc_enum_dv_timings = mxr_enum_dv_timings,
-	.vidioc_s_dv_timings = mxr_s_dv_timings,
-	.vidioc_g_dv_timings = mxr_g_dv_timings,
-	.vidioc_dv_timings_cap = mxr_dv_timings_cap,
-	/* analog TV standard functions */
-	.vidioc_s_std = mxr_s_std,
-	.vidioc_g_std = mxr_g_std,
-	/* Output handling */
-	.vidioc_enum_output = mxr_enum_output,
-	.vidioc_s_output = mxr_s_output,
-	.vidioc_g_output = mxr_g_output,
-	/* selection ioctls */
-	.vidioc_g_selection = mxr_g_selection,
-	.vidioc_s_selection = mxr_s_selection,
-};
-
-static int mxr_video_open(struct file *file)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	struct mxr_device *mdev = layer->mdev;
-	int ret = 0;
-
-	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
-	if (mutex_lock_interruptible(&layer->mutex))
-		return -ERESTARTSYS;
-	/* assure device probe is finished */
-	wait_for_device_probe();
-	/* creating context for file descriptor */
-	ret = v4l2_fh_open(file);
-	if (ret) {
-		mxr_err(mdev, "v4l2_fh_open failed\n");
-		goto unlock;
-	}
-
-	/* leaving if layer is already initialized */
-	if (!v4l2_fh_is_singular_file(file))
-		goto unlock;
-
-	/* FIXME: should power be enabled on open? */
-	ret = mxr_power_get(mdev);
-	if (ret) {
-		mxr_err(mdev, "power on failed\n");
-		goto fail_fh_open;
-	}
-
-	ret = vb2_queue_init(&layer->vb_queue);
-	if (ret != 0) {
-		mxr_err(mdev, "failed to initialize vb2 queue\n");
-		goto fail_power;
-	}
-	/* set default format, first on the list */
-	layer->fmt = layer->fmt_array[0];
-	/* setup default geometry */
-	mxr_layer_default_geo(layer);
-	mutex_unlock(&layer->mutex);
-
-	return 0;
-
-fail_power:
-	mxr_power_put(mdev);
-
-fail_fh_open:
-	v4l2_fh_release(file);
-
-unlock:
-	mutex_unlock(&layer->mutex);
-
-	return ret;
-}
-
-static unsigned int
-mxr_video_poll(struct file *file, struct poll_table_struct *wait)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	unsigned int res;
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
-	mutex_lock(&layer->mutex);
-	res = vb2_poll(&layer->vb_queue, file, wait);
-	mutex_unlock(&layer->mutex);
-	return res;
-}
-
-static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-	int ret;
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
-	if (mutex_lock_interruptible(&layer->mutex))
-		return -ERESTARTSYS;
-	ret = vb2_mmap(&layer->vb_queue, vma);
-	mutex_unlock(&layer->mutex);
-	return ret;
-}
-
-static int mxr_video_release(struct file *file)
-{
-	struct mxr_layer *layer = video_drvdata(file);
-
-	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-	mutex_lock(&layer->mutex);
-	if (v4l2_fh_is_singular_file(file)) {
-		vb2_queue_release(&layer->vb_queue);
-		mxr_power_put(layer->mdev);
-	}
-	v4l2_fh_release(file);
-	mutex_unlock(&layer->mutex);
-	return 0;
-}
-
-static const struct v4l2_file_operations mxr_fops = {
-	.owner = THIS_MODULE,
-	.open = mxr_video_open,
-	.poll = mxr_video_poll,
-	.mmap = mxr_video_mmap,
-	.release = mxr_video_release,
-	.unlocked_ioctl = video_ioctl2,
-};
-
-static int queue_setup(struct vb2_queue *vq,
-	unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
-	struct device *alloc_devs[])
-{
-	struct mxr_layer *layer = vb2_get_drv_priv(vq);
-	const struct mxr_format *fmt = layer->fmt;
-	int i;
-	struct mxr_device *mdev = layer->mdev;
-	struct v4l2_plane_pix_format planes[3];
-
-	mxr_dbg(mdev, "%s\n", __func__);
-	/* checking if format was configured */
-	if (fmt == NULL)
-		return -EINVAL;
-	mxr_dbg(mdev, "fmt = %s\n", fmt->name);
-	mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
-		layer->geo.src.full_height);
-
-	*nplanes = fmt->num_subframes;
-	for (i = 0; i < fmt->num_subframes; ++i) {
-		sizes[i] = planes[i].sizeimage;
-		mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
-	}
-
-	if (*nbuffers == 0)
-		*nbuffers = 1;
-
-	return 0;
-}
-
-static void buf_queue(struct vb2_buffer *vb)
-{
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-	struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
-	struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
-	struct mxr_device *mdev = layer->mdev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&layer->enq_slock, flags);
-	list_add_tail(&buffer->list, &layer->enq_list);
-	spin_unlock_irqrestore(&layer->enq_slock, flags);
-
-	mxr_dbg(mdev, "queuing buffer\n");
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
-	struct mxr_layer *layer = vb2_get_drv_priv(vq);
-	struct mxr_device *mdev = layer->mdev;
-	unsigned long flags;
-
-	mxr_dbg(mdev, "%s\n", __func__);
-
-	/* block any changes in output configuration */
-	mxr_output_get(mdev);
-
-	mxr_layer_update_output(layer);
-	layer->ops.format_set(layer);
-	/* enabling layer in hardware */
-	spin_lock_irqsave(&layer->enq_slock, flags);
-	layer->state = MXR_LAYER_STREAMING;
-	spin_unlock_irqrestore(&layer->enq_slock, flags);
-
-	layer->ops.stream_set(layer, MXR_ENABLE);
-	mxr_streamer_get(mdev);
-
-	return 0;
-}
-
-static void mxr_watchdog(unsigned long arg)
-{
-	struct mxr_layer *layer = (struct mxr_layer *) arg;
-	struct mxr_device *mdev = layer->mdev;
-	unsigned long flags;
-
-	mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
-
-	spin_lock_irqsave(&layer->enq_slock, flags);
-
-	if (layer->update_buf == layer->shadow_buf)
-		layer->update_buf = NULL;
-	if (layer->update_buf) {
-		vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
-				VB2_BUF_STATE_ERROR);
-		layer->update_buf = NULL;
-	}
-	if (layer->shadow_buf) {
-		vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
-				VB2_BUF_STATE_ERROR);
-		layer->shadow_buf = NULL;
-	}
-	spin_unlock_irqrestore(&layer->enq_slock, flags);
-}
-
-static void stop_streaming(struct vb2_queue *vq)
-{
-	struct mxr_layer *layer = vb2_get_drv_priv(vq);
-	struct mxr_device *mdev = layer->mdev;
-	unsigned long flags;
-	struct timer_list watchdog;
-	struct mxr_buffer *buf, *buf_tmp;
-
-	mxr_dbg(mdev, "%s\n", __func__);
-
-	spin_lock_irqsave(&layer->enq_slock, flags);
-
-	/* reset list */
-	layer->state = MXR_LAYER_STREAMING_FINISH;
-
-	/* set all buffer to be done */
-	list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
-		list_del(&buf->list);
-		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
-	}
-
-	spin_unlock_irqrestore(&layer->enq_slock, flags);
-
-	/* give 1 seconds to complete to complete last buffers */
-	setup_timer_on_stack(&watchdog, mxr_watchdog,
-		(unsigned long)layer);
-	mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
-
-	/* wait until all buffers are goes to done state */
-	vb2_wait_for_all_buffers(vq);
-
-	/* stop timer if all synchronization is done */
-	del_timer_sync(&watchdog);
-	destroy_timer_on_stack(&watchdog);
-
-	/* stopping hardware */
-	spin_lock_irqsave(&layer->enq_slock, flags);
-	layer->state = MXR_LAYER_IDLE;
-	spin_unlock_irqrestore(&layer->enq_slock, flags);
-
-	/* disabling layer in hardware */
-	layer->ops.stream_set(layer, MXR_DISABLE);
-	/* remove one streamer */
-	mxr_streamer_put(mdev);
-	/* allow changes in output configuration */
-	mxr_output_put(mdev);
-}
-
-static struct vb2_ops mxr_video_qops = {
-	.queue_setup = queue_setup,
-	.buf_queue = buf_queue,
-	.wait_prepare = vb2_ops_wait_prepare,
-	.wait_finish = vb2_ops_wait_finish,
-	.start_streaming = start_streaming,
-	.stop_streaming = stop_streaming,
-};
-
-/* FIXME: try to put this functions to mxr_base_layer_create */
-int mxr_base_layer_register(struct mxr_layer *layer)
-{
-	struct mxr_device *mdev = layer->mdev;
-	int ret;
-
-	ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
-	if (ret)
-		mxr_err(mdev, "failed to register video device\n");
-	else
-		mxr_info(mdev, "registered layer %s as /dev/video%d\n",
-			layer->vfd.name, layer->vfd.num);
-	return ret;
-}
-
-void mxr_base_layer_unregister(struct mxr_layer *layer)
-{
-	video_unregister_device(&layer->vfd);
-}
-
-void mxr_layer_release(struct mxr_layer *layer)
-{
-	if (layer->ops.release)
-		layer->ops.release(layer);
-}
-
-void mxr_base_layer_release(struct mxr_layer *layer)
-{
-	kfree(layer);
-}
-
-static void mxr_vfd_release(struct video_device *vdev)
-{
-	pr_info("video device release\n");
-}
-
-struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
-	int idx, char *name, const struct mxr_layer_ops *ops)
-{
-	struct mxr_layer *layer;
-
-	layer = kzalloc(sizeof(*layer), GFP_KERNEL);
-	if (layer == NULL) {
-		mxr_err(mdev, "not enough memory for layer.\n");
-		goto fail;
-	}
-
-	layer->mdev = mdev;
-	layer->idx = idx;
-	layer->ops = *ops;
-
-	spin_lock_init(&layer->enq_slock);
-	INIT_LIST_HEAD(&layer->enq_list);
-	mutex_init(&layer->mutex);
-
-	layer->vfd = (struct video_device) {
-		.minor = -1,
-		.release = mxr_vfd_release,
-		.fops = &mxr_fops,
-		.vfl_dir = VFL_DIR_TX,
-		.ioctl_ops = &mxr_ioctl_ops,
-	};
-	strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
-
-	video_set_drvdata(&layer->vfd, layer);
-	layer->vfd.lock = &layer->mutex;
-	layer->vfd.v4l2_dev = &mdev->v4l2_dev;
-
-	layer->vb_queue = (struct vb2_queue) {
-		.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
-		.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
-		.drv_priv = layer,
-		.buf_struct_size = sizeof(struct mxr_buffer),
-		.ops = &mxr_video_qops,
-		.min_buffers_needed = 1,
-		.mem_ops = &vb2_dma_contig_memops,
-		.lock = &layer->mutex,
-		.dev = mdev->dev,
-	};
-
-	return layer;
-
-fail:
-	return NULL;
-}
-
-static const struct mxr_format *find_format_by_fourcc(
-	struct mxr_layer *layer, unsigned long fourcc)
-{
-	int i;
-
-	for (i = 0; i < layer->fmt_array_size; ++i)
-		if (layer->fmt_array[i]->fourcc == fourcc)
-			return layer->fmt_array[i];
-	return NULL;
-}
-
-static const struct mxr_format *find_format_by_index(
-	struct mxr_layer *layer, unsigned long index)
-{
-	if (index >= layer->fmt_array_size)
-		return NULL;
-	return layer->fmt_array[index];
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
deleted file mode 100644
index 6fa6f67..0000000
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include "regs-vp.h"
-
-#include <media/videobuf2-dma-contig.h>
-
-/* FORMAT DEFINITIONS */
-static const struct mxr_format mxr_fmt_nv12 = {
-	.name = "NV12",
-	.fourcc = V4L2_PIX_FMT_NV12,
-	.colorspace = V4L2_COLORSPACE_JPEG,
-	.num_planes = 2,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 1 },
-		{ .width = 2, .height = 2, .size = 2 },
-	},
-	.num_subframes = 1,
-	.cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv21 = {
-	.name = "NV21",
-	.fourcc = V4L2_PIX_FMT_NV21,
-	.colorspace = V4L2_COLORSPACE_JPEG,
-	.num_planes = 2,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 1 },
-		{ .width = 2, .height = 2, .size = 2 },
-	},
-	.num_subframes = 1,
-	.cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv12m = {
-	.name = "NV12 (mplane)",
-	.fourcc = V4L2_PIX_FMT_NV12M,
-	.colorspace = V4L2_COLORSPACE_JPEG,
-	.num_planes = 2,
-	.plane = {
-		{ .width = 1, .height = 1, .size = 1 },
-		{ .width = 2, .height = 2, .size = 2 },
-	},
-	.num_subframes = 2,
-	.plane2subframe = {0, 1},
-	.cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv12mt = {
-	.name = "NV12 tiled (mplane)",
-	.fourcc = V4L2_PIX_FMT_NV12MT,
-	.colorspace = V4L2_COLORSPACE_JPEG,
-	.num_planes = 2,
-	.plane = {
-		{ .width = 128, .height = 32, .size = 4096 },
-		{ .width = 128, .height = 32, .size = 2048 },
-	},
-	.num_subframes = 2,
-	.plane2subframe = {0, 1},
-	.cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
-};
-
-static const struct mxr_format *mxr_video_format[] = {
-	&mxr_fmt_nv12,
-	&mxr_fmt_nv21,
-	&mxr_fmt_nv12m,
-	&mxr_fmt_nv12mt,
-};
-
-/* AUXILIARY CALLBACKS */
-
-static void mxr_vp_layer_release(struct mxr_layer *layer)
-{
-	mxr_base_layer_unregister(layer);
-	mxr_base_layer_release(layer);
-}
-
-static void mxr_vp_buffer_set(struct mxr_layer *layer,
-	struct mxr_buffer *buf)
-{
-	dma_addr_t luma_addr[2] = {0, 0};
-	dma_addr_t chroma_addr[2] = {0, 0};
-
-	if (buf == NULL) {
-		mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
-		return;
-	}
-	luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
-	if (layer->fmt->num_subframes == 2) {
-		chroma_addr[0] =
-			vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
-	} else {
-		/* FIXME: mxr_get_plane_size compute integer division,
-		 * which is slow and should not be performed in interrupt */
-		chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
-			&layer->fmt->plane[0], layer->geo.src.full_width,
-			layer->geo.src.full_height);
-	}
-	if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
-		luma_addr[1] = luma_addr[0] + 0x40;
-		chroma_addr[1] = chroma_addr[0] + 0x40;
-	} else {
-		luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
-		chroma_addr[1] = chroma_addr[0];
-	}
-	mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
-}
-
-static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
-{
-	mxr_reg_vp_layer_stream(layer->mdev, en);
-}
-
-static void mxr_vp_format_set(struct mxr_layer *layer)
-{
-	mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
-}
-
-static inline unsigned int do_center(unsigned int center,
-	unsigned int size, unsigned int upper, unsigned int flags)
-{
-	unsigned int lower;
-
-	if (flags & MXR_NO_OFFSET)
-		return 0;
-
-	lower = center - min(center, size / 2);
-	return min(lower, upper - size);
-}
-
-static void mxr_vp_fix_geometry(struct mxr_layer *layer,
-	enum mxr_geometry_stage stage, unsigned long flags)
-{
-	struct mxr_geometry *geo = &layer->geo;
-	struct mxr_crop *src = &geo->src;
-	struct mxr_crop *dst = &geo->dst;
-	unsigned long x_center, y_center;
-
-	switch (stage) {
-
-	case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
-	case MXR_GEOMETRY_COMPOSE:
-		/* remember center of the area */
-		x_center = dst->x_offset + dst->width / 2;
-		y_center = dst->y_offset + dst->height / 2;
-
-		/* ensure that compose is reachable using 16x scaling */
-		dst->width = clamp(dst->width, 8U, 16 * src->full_width);
-		dst->height = clamp(dst->height, 1U, 16 * src->full_height);
-
-		/* setup offsets */
-		dst->x_offset = do_center(x_center, dst->width,
-			dst->full_width, flags);
-		dst->y_offset = do_center(y_center, dst->height,
-			dst->full_height, flags);
-		flags = 0; /* remove possible MXR_NO_OFFSET flag */
-		/* fall through */
-	case MXR_GEOMETRY_CROP:
-		/* remember center of the area */
-		x_center = src->x_offset + src->width / 2;
-		y_center = src->y_offset + src->height / 2;
-
-		/* ensure scaling is between 0.25x .. 16x */
-		src->width = clamp(src->width, round_up(dst->width / 16, 4),
-			dst->width * 4);
-		src->height = clamp(src->height, round_up(dst->height / 16, 4),
-			dst->height * 4);
-
-		/* hardware limits */
-		src->width = clamp(src->width, 32U, 2047U);
-		src->height = clamp(src->height, 4U, 2047U);
-
-		/* setup offsets */
-		src->x_offset = do_center(x_center, src->width,
-			src->full_width, flags);
-		src->y_offset = do_center(y_center, src->height,
-			src->full_height, flags);
-
-		/* setting scaling ratio */
-		geo->x_ratio = (src->width << 16) / dst->width;
-		geo->y_ratio = (src->height << 16) / dst->height;
-		/* fall through */
-
-	case MXR_GEOMETRY_SOURCE:
-		src->full_width = clamp(src->full_width,
-			ALIGN(src->width + src->x_offset, 8), 8192U);
-		src->full_height = clamp(src->full_height,
-			src->height + src->y_offset, 8192U);
-	}
-}
-
-/* PUBLIC API */
-
-struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
-{
-	struct mxr_layer *layer;
-	int ret;
-	const struct mxr_layer_ops ops = {
-		.release = mxr_vp_layer_release,
-		.buffer_set = mxr_vp_buffer_set,
-		.stream_set = mxr_vp_stream_set,
-		.format_set = mxr_vp_format_set,
-		.fix_geometry = mxr_vp_fix_geometry,
-	};
-	char name[32];
-
-	sprintf(name, "video%d", idx);
-
-	layer = mxr_base_layer_create(mdev, idx, name, &ops);
-	if (layer == NULL) {
-		mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
-		goto fail;
-	}
-
-	layer->fmt_array = mxr_video_format;
-	layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
-
-	ret = mxr_base_layer_register(layer);
-	if (ret)
-		goto fail_layer;
-
-	return layer;
-
-fail_layer:
-	mxr_base_layer_release(layer);
-
-fail:
-	return NULL;
-}
-
diff --git a/drivers/media/platform/s5p-tv/regs-hdmi.h b/drivers/media/platform/s5p-tv/regs-hdmi.h
deleted file mode 100644
index a889d1f..0000000
--- a/drivers/media/platform/s5p-tv/regs-hdmi.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * HDMI register header file for Samsung TVOUT driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef SAMSUNG_REGS_HDMI_H
-#define SAMSUNG_REGS_HDMI_H
-
-/*
- * Register part
-*/
-
-#define HDMI_CTRL_BASE(x)		((x) + 0x00000000)
-#define HDMI_CORE_BASE(x)		((x) + 0x00010000)
-#define HDMI_TG_BASE(x)			((x) + 0x00050000)
-
-/* Control registers */
-#define HDMI_INTC_CON			HDMI_CTRL_BASE(0x0000)
-#define HDMI_INTC_FLAG			HDMI_CTRL_BASE(0x0004)
-#define HDMI_HPD_STATUS			HDMI_CTRL_BASE(0x000C)
-#define HDMI_PHY_RSTOUT			HDMI_CTRL_BASE(0x0014)
-#define HDMI_PHY_VPLL			HDMI_CTRL_BASE(0x0018)
-#define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x001C)
-#define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0020)
-
-/* Core registers */
-#define HDMI_CON_0			HDMI_CORE_BASE(0x0000)
-#define HDMI_CON_1			HDMI_CORE_BASE(0x0004)
-#define HDMI_CON_2			HDMI_CORE_BASE(0x0008)
-#define HDMI_SYS_STATUS			HDMI_CORE_BASE(0x0010)
-#define HDMI_PHY_STATUS			HDMI_CORE_BASE(0x0014)
-#define HDMI_STATUS_EN			HDMI_CORE_BASE(0x0020)
-#define HDMI_HPD			HDMI_CORE_BASE(0x0030)
-#define HDMI_MODE_SEL			HDMI_CORE_BASE(0x0040)
-#define HDMI_BLUE_SCREEN_0		HDMI_CORE_BASE(0x0050)
-#define HDMI_BLUE_SCREEN_1		HDMI_CORE_BASE(0x0054)
-#define HDMI_BLUE_SCREEN_2		HDMI_CORE_BASE(0x0058)
-#define HDMI_H_BLANK_0			HDMI_CORE_BASE(0x00A0)
-#define HDMI_H_BLANK_1			HDMI_CORE_BASE(0x00A4)
-#define HDMI_V_BLANK_0			HDMI_CORE_BASE(0x00B0)
-#define HDMI_V_BLANK_1			HDMI_CORE_BASE(0x00B4)
-#define HDMI_V_BLANK_2			HDMI_CORE_BASE(0x00B8)
-#define HDMI_H_V_LINE_0			HDMI_CORE_BASE(0x00C0)
-#define HDMI_H_V_LINE_1			HDMI_CORE_BASE(0x00C4)
-#define HDMI_H_V_LINE_2			HDMI_CORE_BASE(0x00C8)
-#define HDMI_VSYNC_POL			HDMI_CORE_BASE(0x00E4)
-#define HDMI_INT_PRO_MODE		HDMI_CORE_BASE(0x00E8)
-#define HDMI_V_BLANK_F_0		HDMI_CORE_BASE(0x0110)
-#define HDMI_V_BLANK_F_1		HDMI_CORE_BASE(0x0114)
-#define HDMI_V_BLANK_F_2		HDMI_CORE_BASE(0x0118)
-#define HDMI_H_SYNC_GEN_0		HDMI_CORE_BASE(0x0120)
-#define HDMI_H_SYNC_GEN_1		HDMI_CORE_BASE(0x0124)
-#define HDMI_H_SYNC_GEN_2		HDMI_CORE_BASE(0x0128)
-#define HDMI_V_SYNC_GEN_1_0		HDMI_CORE_BASE(0x0130)
-#define HDMI_V_SYNC_GEN_1_1		HDMI_CORE_BASE(0x0134)
-#define HDMI_V_SYNC_GEN_1_2		HDMI_CORE_BASE(0x0138)
-#define HDMI_V_SYNC_GEN_2_0		HDMI_CORE_BASE(0x0140)
-#define HDMI_V_SYNC_GEN_2_1		HDMI_CORE_BASE(0x0144)
-#define HDMI_V_SYNC_GEN_2_2		HDMI_CORE_BASE(0x0148)
-#define HDMI_V_SYNC_GEN_3_0		HDMI_CORE_BASE(0x0150)
-#define HDMI_V_SYNC_GEN_3_1		HDMI_CORE_BASE(0x0154)
-#define HDMI_V_SYNC_GEN_3_2		HDMI_CORE_BASE(0x0158)
-#define HDMI_AVI_CON			HDMI_CORE_BASE(0x0300)
-#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0320 + 4 * (n))
-#define	HDMI_DC_CONTROL			HDMI_CORE_BASE(0x05C0)
-#define HDMI_VIDEO_PATTERN_GEN		HDMI_CORE_BASE(0x05C4)
-#define HDMI_HPD_GEN			HDMI_CORE_BASE(0x05C8)
-
-/* Timing generator registers */
-#define HDMI_TG_CMD			HDMI_TG_BASE(0x0000)
-#define HDMI_TG_H_FSZ_L			HDMI_TG_BASE(0x0018)
-#define HDMI_TG_H_FSZ_H			HDMI_TG_BASE(0x001C)
-#define HDMI_TG_HACT_ST_L		HDMI_TG_BASE(0x0020)
-#define HDMI_TG_HACT_ST_H		HDMI_TG_BASE(0x0024)
-#define HDMI_TG_HACT_SZ_L		HDMI_TG_BASE(0x0028)
-#define HDMI_TG_HACT_SZ_H		HDMI_TG_BASE(0x002C)
-#define HDMI_TG_V_FSZ_L			HDMI_TG_BASE(0x0030)
-#define HDMI_TG_V_FSZ_H			HDMI_TG_BASE(0x0034)
-#define HDMI_TG_VSYNC_L			HDMI_TG_BASE(0x0038)
-#define HDMI_TG_VSYNC_H			HDMI_TG_BASE(0x003C)
-#define HDMI_TG_VSYNC2_L		HDMI_TG_BASE(0x0040)
-#define HDMI_TG_VSYNC2_H		HDMI_TG_BASE(0x0044)
-#define HDMI_TG_VACT_ST_L		HDMI_TG_BASE(0x0048)
-#define HDMI_TG_VACT_ST_H		HDMI_TG_BASE(0x004C)
-#define HDMI_TG_VACT_SZ_L		HDMI_TG_BASE(0x0050)
-#define HDMI_TG_VACT_SZ_H		HDMI_TG_BASE(0x0054)
-#define HDMI_TG_FIELD_CHG_L		HDMI_TG_BASE(0x0058)
-#define HDMI_TG_FIELD_CHG_H		HDMI_TG_BASE(0x005C)
-#define HDMI_TG_VACT_ST2_L		HDMI_TG_BASE(0x0060)
-#define HDMI_TG_VACT_ST2_H		HDMI_TG_BASE(0x0064)
-#define HDMI_TG_VSYNC_TOP_HDMI_L	HDMI_TG_BASE(0x0078)
-#define HDMI_TG_VSYNC_TOP_HDMI_H	HDMI_TG_BASE(0x007C)
-#define HDMI_TG_VSYNC_BOT_HDMI_L	HDMI_TG_BASE(0x0080)
-#define HDMI_TG_VSYNC_BOT_HDMI_H	HDMI_TG_BASE(0x0084)
-#define HDMI_TG_FIELD_TOP_HDMI_L	HDMI_TG_BASE(0x0088)
-#define HDMI_TG_FIELD_TOP_HDMI_H	HDMI_TG_BASE(0x008C)
-#define HDMI_TG_FIELD_BOT_HDMI_L	HDMI_TG_BASE(0x0090)
-#define HDMI_TG_FIELD_BOT_HDMI_H	HDMI_TG_BASE(0x0094)
-
-/*
- * Bit definition part
- */
-
-/* HDMI_INTC_CON */
-#define HDMI_INTC_EN_GLOBAL		(1 << 6)
-#define HDMI_INTC_EN_HPD_PLUG		(1 << 3)
-#define HDMI_INTC_EN_HPD_UNPLUG		(1 << 2)
-
-/* HDMI_INTC_FLAG */
-#define HDMI_INTC_FLAG_HPD_PLUG		(1 << 3)
-#define HDMI_INTC_FLAG_HPD_UNPLUG	(1 << 2)
-
-/* HDMI_PHY_RSTOUT */
-#define HDMI_PHY_SW_RSTOUT		(1 << 0)
-
-/* HDMI_CORE_RSTOUT */
-#define HDMI_CORE_SW_RSTOUT		(1 << 0)
-
-/* HDMI_CON_0 */
-#define HDMI_BLUE_SCR_EN		(1 << 5)
-#define HDMI_EN				(1 << 0)
-
-/* HDMI_CON_2 */
-#define HDMI_DVI_PERAMBLE_EN		(1 << 5)
-#define HDMI_DVI_BAND_EN		(1 << 1)
-
-/* HDMI_PHY_STATUS */
-#define HDMI_PHY_STATUS_READY		(1 << 0)
-
-/* HDMI_MODE_SEL */
-#define HDMI_MODE_HDMI_EN		(1 << 1)
-#define HDMI_MODE_DVI_EN		(1 << 0)
-#define HDMI_MODE_MASK			(3 << 0)
-
-/* HDMI_TG_CMD */
-#define HDMI_TG_FIELD_EN		(1 << 1)
-#define HDMI_TG_EN			(1 << 0)
-
-#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/platform/s5p-tv/regs-mixer.h b/drivers/media/platform/s5p-tv/regs-mixer.h
deleted file mode 100644
index 158abb4..0000000
--- a/drivers/media/platform/s5p-tv/regs-mixer.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Mixer register header file for Samsung Mixer driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-#ifndef SAMSUNG_REGS_MIXER_H
-#define SAMSUNG_REGS_MIXER_H
-
-/*
- * Register part
- */
-#define MXR_STATUS			0x0000
-#define MXR_CFG				0x0004
-#define MXR_INT_EN			0x0008
-#define MXR_INT_STATUS			0x000C
-#define MXR_LAYER_CFG			0x0010
-#define MXR_VIDEO_CFG			0x0014
-#define MXR_GRAPHIC0_CFG		0x0020
-#define MXR_GRAPHIC0_BASE		0x0024
-#define MXR_GRAPHIC0_SPAN		0x0028
-#define MXR_GRAPHIC0_SXY		0x002C
-#define MXR_GRAPHIC0_WH			0x0030
-#define MXR_GRAPHIC0_DXY		0x0034
-#define MXR_GRAPHIC0_BLANK		0x0038
-#define MXR_GRAPHIC1_CFG		0x0040
-#define MXR_GRAPHIC1_BASE		0x0044
-#define MXR_GRAPHIC1_SPAN		0x0048
-#define MXR_GRAPHIC1_SXY		0x004C
-#define MXR_GRAPHIC1_WH			0x0050
-#define MXR_GRAPHIC1_DXY		0x0054
-#define MXR_GRAPHIC1_BLANK		0x0058
-#define MXR_BG_CFG			0x0060
-#define MXR_BG_COLOR0			0x0064
-#define MXR_BG_COLOR1			0x0068
-#define MXR_BG_COLOR2			0x006C
-
-/* for parametrized access to layer registers */
-#define MXR_GRAPHIC_CFG(i)		(0x0020 + (i) * 0x20)
-#define MXR_GRAPHIC_BASE(i)		(0x0024 + (i) * 0x20)
-#define MXR_GRAPHIC_SPAN(i)		(0x0028 + (i) * 0x20)
-#define MXR_GRAPHIC_SXY(i)		(0x002C + (i) * 0x20)
-#define MXR_GRAPHIC_WH(i)		(0x0030 + (i) * 0x20)
-#define MXR_GRAPHIC_DXY(i)		(0x0034 + (i) * 0x20)
-
-/*
- * Bit definition part
- */
-
-/* generates mask for range of bits */
-#define MXR_MASK(high_bit, low_bit) \
-	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
-
-#define MXR_MASK_VAL(val, high_bit, low_bit) \
-	(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
-
-/* bits for MXR_STATUS */
-#define MXR_STATUS_16_BURST		(1 << 7)
-#define MXR_STATUS_BURST_MASK		(1 << 7)
-#define MXR_STATUS_SYNC_ENABLE		(1 << 2)
-#define MXR_STATUS_REG_RUN		(1 << 0)
-
-/* bits for MXR_CFG */
-#define MXR_CFG_OUT_YUV444		(0 << 8)
-#define MXR_CFG_OUT_RGB888		(1 << 8)
-#define MXR_CFG_OUT_MASK		(1 << 8)
-#define MXR_CFG_DST_SDO			(0 << 7)
-#define MXR_CFG_DST_HDMI		(1 << 7)
-#define MXR_CFG_DST_MASK		(1 << 7)
-#define MXR_CFG_SCAN_HD_720		(0 << 6)
-#define MXR_CFG_SCAN_HD_1080		(1 << 6)
-#define MXR_CFG_GRP1_ENABLE		(1 << 5)
-#define MXR_CFG_GRP0_ENABLE		(1 << 4)
-#define MXR_CFG_VP_ENABLE		(1 << 3)
-#define MXR_CFG_SCAN_INTERLACE		(0 << 2)
-#define MXR_CFG_SCAN_PROGRASSIVE	(1 << 2)
-#define MXR_CFG_SCAN_NTSC		(0 << 1)
-#define MXR_CFG_SCAN_PAL		(1 << 1)
-#define MXR_CFG_SCAN_SD			(0 << 0)
-#define MXR_CFG_SCAN_HD			(1 << 0)
-#define MXR_CFG_SCAN_MASK		0x47
-
-/* bits for MXR_GRAPHICn_CFG */
-#define MXR_GRP_CFG_COLOR_KEY_DISABLE	(1 << 21)
-#define MXR_GRP_CFG_BLEND_PRE_MUL	(1 << 20)
-#define MXR_GRP_CFG_FORMAT_VAL(x)	MXR_MASK_VAL(x, 11, 8)
-#define MXR_GRP_CFG_FORMAT_MASK		MXR_GRP_CFG_FORMAT_VAL(~0)
-#define MXR_GRP_CFG_ALPHA_VAL(x)	MXR_MASK_VAL(x, 7, 0)
-
-/* bits for MXR_GRAPHICn_WH */
-#define MXR_GRP_WH_H_SCALE(x)		MXR_MASK_VAL(x, 28, 28)
-#define MXR_GRP_WH_V_SCALE(x)		MXR_MASK_VAL(x, 12, 12)
-#define MXR_GRP_WH_WIDTH(x)		MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_WH_HEIGHT(x)		MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_GRAPHICn_SXY */
-#define MXR_GRP_SXY_SX(x)		MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_SXY_SY(x)		MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_GRAPHICn_DXY */
-#define MXR_GRP_DXY_DX(x)		MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_DXY_DY(x)		MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_INT_EN */
-#define MXR_INT_EN_VSYNC		(1 << 11)
-#define MXR_INT_EN_ALL			(0x0f << 8)
-
-/* bit for MXR_INT_STATUS */
-#define MXR_INT_CLEAR_VSYNC		(1 << 11)
-#define MXR_INT_STATUS_VSYNC		(1 << 0)
-
-/* bit for MXR_LAYER_CFG */
-#define MXR_LAYER_CFG_GRP1_VAL(x)	MXR_MASK_VAL(x, 11, 8)
-#define MXR_LAYER_CFG_GRP0_VAL(x)	MXR_MASK_VAL(x, 7, 4)
-#define MXR_LAYER_CFG_VP_VAL(x)		MXR_MASK_VAL(x, 3, 0)
-
-#endif /* SAMSUNG_REGS_MIXER_H */
-
diff --git a/drivers/media/platform/s5p-tv/regs-sdo.h b/drivers/media/platform/s5p-tv/regs-sdo.h
deleted file mode 100644
index 6f22fbf..0000000
--- a/drivers/media/platform/s5p-tv/regs-sdo.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* drivers/media/platform/s5p-tv/regs-sdo.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * SDO register description file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SAMSUNG_REGS_SDO_H
-#define SAMSUNG_REGS_SDO_H
-
-/*
- * Register part
- */
-
-#define SDO_CLKCON			0x0000
-#define SDO_CONFIG			0x0008
-#define SDO_VBI				0x0014
-#define SDO_DAC				0x003C
-#define SDO_CCCON			0x0180
-#define SDO_IRQ				0x0280
-#define SDO_IRQMASK			0x0284
-#define SDO_VERSION			0x03D8
-
-/*
- * Bit definition part
- */
-
-/* SDO Clock Control Register (SDO_CLKCON) */
-#define SDO_TVOUT_SW_RESET		(1 << 4)
-#define SDO_TVOUT_CLOCK_READY		(1 << 1)
-#define SDO_TVOUT_CLOCK_ON		(1 << 0)
-
-/* SDO Video Standard Configuration Register (SDO_CONFIG) */
-#define SDO_PROGRESSIVE			(1 << 4)
-#define SDO_NTSC_M			0
-#define SDO_PAL_M			1
-#define SDO_PAL_BGHID			2
-#define SDO_PAL_N			3
-#define SDO_PAL_NC			4
-#define SDO_NTSC_443			8
-#define SDO_PAL_60			9
-#define SDO_STANDARD_MASK		0xf
-
-/* SDO VBI Configuration Register (SDO_VBI) */
-#define SDO_CVBS_WSS_INS		(1 << 14)
-#define SDO_CVBS_CLOSED_CAPTION_MASK	(3 << 12)
-
-/* SDO DAC Configuration Register (SDO_DAC) */
-#define SDO_POWER_ON_DAC		(1 << 0)
-
-/* SDO Color Compensation On/Off Control (SDO_CCCON) */
-#define SDO_COMPENSATION_BHS_ADJ_OFF	(1 << 4)
-#define SDO_COMPENSATION_CVBS_COMP_OFF	(1 << 0)
-
-/* SDO Interrupt Request Register (SDO_IRQ) */
-#define SDO_VSYNC_IRQ_PEND		(1 << 0)
-
-#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/platform/s5p-tv/regs-vp.h b/drivers/media/platform/s5p-tv/regs-vp.h
deleted file mode 100644
index 6c63984..0000000
--- a/drivers/media/platform/s5p-tv/regs-vp.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Video processor register header file for Samsung Mixer driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SAMSUNG_REGS_VP_H
-#define SAMSUNG_REGS_VP_H
-
-/*
- * Register part
- */
-
-#define VP_ENABLE			0x0000
-#define VP_SRESET			0x0004
-#define VP_SHADOW_UPDATE		0x0008
-#define VP_FIELD_ID			0x000C
-#define VP_MODE				0x0010
-#define VP_IMG_SIZE_Y			0x0014
-#define VP_IMG_SIZE_C			0x0018
-#define VP_PER_RATE_CTRL		0x001C
-#define VP_TOP_Y_PTR			0x0028
-#define VP_BOT_Y_PTR			0x002C
-#define VP_TOP_C_PTR			0x0030
-#define VP_BOT_C_PTR			0x0034
-#define VP_ENDIAN_MODE			0x03CC
-#define VP_SRC_H_POSITION		0x0044
-#define VP_SRC_V_POSITION		0x0048
-#define VP_SRC_WIDTH			0x004C
-#define VP_SRC_HEIGHT			0x0050
-#define VP_DST_H_POSITION		0x0054
-#define VP_DST_V_POSITION		0x0058
-#define VP_DST_WIDTH			0x005C
-#define VP_DST_HEIGHT			0x0060
-#define VP_H_RATIO			0x0064
-#define VP_V_RATIO			0x0068
-#define VP_POLY8_Y0_LL			0x006C
-#define VP_POLY4_Y0_LL			0x00EC
-#define VP_POLY4_C0_LL			0x012C
-
-/*
- * Bit definition part
- */
-
-/* generates mask for range of bits */
-
-#define VP_MASK(high_bit, low_bit) \
-	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
-
-#define VP_MASK_VAL(val, high_bit, low_bit) \
-	(((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
-
- /* VP_ENABLE */
-#define VP_ENABLE_ON			(1 << 0)
-
-/* VP_SRESET */
-#define VP_SRESET_PROCESSING		(1 << 0)
-
-/* VP_SHADOW_UPDATE */
-#define VP_SHADOW_UPDATE_ENABLE		(1 << 0)
-
-/* VP_MODE */
-#define VP_MODE_NV12			(0 << 6)
-#define VP_MODE_NV21			(1 << 6)
-#define VP_MODE_LINE_SKIP		(1 << 5)
-#define VP_MODE_MEM_LINEAR		(0 << 4)
-#define VP_MODE_MEM_TILED		(1 << 4)
-#define VP_MODE_FMT_MASK		(5 << 4)
-#define VP_MODE_FIELD_ID_AUTO_TOGGLING	(1 << 2)
-#define VP_MODE_2D_IPC			(1 << 1)
-
-/* VP_IMG_SIZE_Y */
-/* VP_IMG_SIZE_C */
-#define VP_IMG_HSIZE(x)			VP_MASK_VAL(x, 29, 16)
-#define VP_IMG_VSIZE(x)			VP_MASK_VAL(x, 13, 0)
-
-/* VP_SRC_H_POSITION */
-#define VP_SRC_H_POSITION_VAL(x)	VP_MASK_VAL(x, 14, 4)
-
-/* VP_ENDIAN_MODE */
-#define VP_ENDIAN_MODE_LITTLE		(1 << 0)
-
-#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
deleted file mode 100644
index c75d435..0000000
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Samsung Standard Definition Output (SDO) driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-
-#include <media/v4l2-subdev.h>
-
-#include "regs-sdo.h"
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
-MODULE_LICENSE("GPL");
-
-#define SDO_DEFAULT_STD	V4L2_STD_PAL
-
-struct sdo_format {
-	v4l2_std_id id;
-	/* all modes are 720 pixels wide */
-	unsigned int height;
-	unsigned int cookie;
-};
-
-struct sdo_device {
-	/** pointer to device parent */
-	struct device *dev;
-	/** base address of SDO registers */
-	void __iomem *regs;
-	/** SDO interrupt */
-	unsigned int irq;
-	/** DAC source clock */
-	struct clk *sclk_dac;
-	/** DAC clock */
-	struct clk *dac;
-	/** DAC physical interface */
-	struct clk *dacphy;
-	/** clock for control of VPLL */
-	struct clk *fout_vpll;
-	/** vpll rate before sdo stream was on */
-	unsigned long vpll_rate;
-	/** regulator for SDO IP power */
-	struct regulator *vdac;
-	/** regulator for SDO plug detection */
-	struct regulator *vdet;
-	/** subdev used as device interface */
-	struct v4l2_subdev sd;
-	/** current format */
-	const struct sdo_format *fmt;
-};
-
-static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct sdo_device, sd);
-}
-
-static inline
-void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
-{
-	u32 old = readl(sdev->regs + reg_id);
-	value = (value & mask) | (old & ~mask);
-	writel(value, sdev->regs + reg_id);
-}
-
-static inline
-void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
-{
-	writel(value, sdev->regs + reg_id);
-}
-
-static inline
-u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
-{
-	return readl(sdev->regs + reg_id);
-}
-
-static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
-{
-	struct sdo_device *sdev = dev_data;
-
-	/* clear interrupt */
-	sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
-	return IRQ_HANDLED;
-}
-
-static void sdo_reg_debug(struct sdo_device *sdev)
-{
-#define DBGREG(reg_id) \
-	dev_info(sdev->dev, #reg_id " = %08x\n", \
-		sdo_read(sdev, reg_id))
-
-	DBGREG(SDO_CLKCON);
-	DBGREG(SDO_CONFIG);
-	DBGREG(SDO_VBI);
-	DBGREG(SDO_DAC);
-	DBGREG(SDO_IRQ);
-	DBGREG(SDO_IRQMASK);
-	DBGREG(SDO_VERSION);
-}
-
-static const struct sdo_format sdo_format[] = {
-	{ V4L2_STD_PAL_N,	.height = 576, .cookie = SDO_PAL_N },
-	{ V4L2_STD_PAL_Nc,	.height = 576, .cookie = SDO_PAL_NC },
-	{ V4L2_STD_PAL_M,	.height = 480, .cookie = SDO_PAL_M },
-	{ V4L2_STD_PAL_60,	.height = 480, .cookie = SDO_PAL_60 },
-	{ V4L2_STD_NTSC_443,	.height = 480, .cookie = SDO_NTSC_443 },
-	{ V4L2_STD_PAL,		.height = 576, .cookie = SDO_PAL_BGHID },
-	{ V4L2_STD_NTSC_M,	.height = 480, .cookie = SDO_NTSC_M },
-};
-
-static const struct sdo_format *sdo_find_format(v4l2_std_id id)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
-		if (sdo_format[i].id & id)
-			return &sdo_format[i];
-	return NULL;
-}
-
-static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
-{
-	*std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
-		V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
-		V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
-	return 0;
-}
-
-static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
-{
-	struct sdo_device *sdev = sd_to_sdev(sd);
-	const struct sdo_format *fmt;
-	fmt = sdo_find_format(std);
-	if (fmt == NULL)
-		return -EINVAL;
-	sdev->fmt = fmt;
-	return 0;
-}
-
-static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
-{
-	*std = sd_to_sdev(sd)->fmt->id;
-	return 0;
-}
-
-static int sdo_get_fmt(struct v4l2_subdev *sd,
-		struct v4l2_subdev_pad_config *cfg,
-		struct v4l2_subdev_format *format)
-{
-	struct v4l2_mbus_framefmt *fmt = &format->format;
-	struct sdo_device *sdev = sd_to_sdev(sd);
-
-	if (!sdev->fmt)
-		return -ENXIO;
-	if (format->pad)
-		return -EINVAL;
-	/* all modes are 720 pixels wide */
-	fmt->width = 720;
-	fmt->height = sdev->fmt->height;
-	fmt->code = MEDIA_BUS_FMT_FIXED;
-	fmt->field = V4L2_FIELD_INTERLACED;
-	fmt->colorspace = V4L2_COLORSPACE_JPEG;
-	return 0;
-}
-
-static int sdo_s_power(struct v4l2_subdev *sd, int on)
-{
-	struct sdo_device *sdev = sd_to_sdev(sd);
-	struct device *dev = sdev->dev;
-	int ret;
-
-	dev_info(dev, "sdo_s_power(%d)\n", on);
-
-	if (on)
-		ret = pm_runtime_get_sync(dev);
-	else
-		ret = pm_runtime_put_sync(dev);
-
-	/* only values < 0 indicate errors */
-	return ret < 0 ? ret : 0;
-}
-
-static int sdo_streamon(struct sdo_device *sdev)
-{
-	int ret;
-
-	/* set proper clock for Timing Generator */
-	sdev->vpll_rate = clk_get_rate(sdev->fout_vpll);
-	ret = clk_set_rate(sdev->fout_vpll, 54000000);
-	if (ret < 0) {
-		dev_err(sdev->dev, "Failed to set vpll rate\n");
-		return ret;
-	}
-	dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
-	clk_get_rate(sdev->fout_vpll));
-	/* enable clock in SDO */
-	sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
-	ret = clk_prepare_enable(sdev->dacphy);
-	if (ret < 0) {
-		dev_err(sdev->dev, "clk_prepare_enable(dacphy) failed\n");
-		goto fail;
-	}
-	/* enable DAC */
-	sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
-	sdo_reg_debug(sdev);
-	return 0;
-
-fail:
-	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
-	clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
-	return ret;
-}
-
-static int sdo_streamoff(struct sdo_device *sdev)
-{
-	int tries;
-
-	sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
-	clk_disable_unprepare(sdev->dacphy);
-	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
-	for (tries = 100; tries; --tries) {
-		if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
-			break;
-		mdelay(1);
-	}
-	if (tries == 0)
-		dev_err(sdev->dev, "failed to stop streaming\n");
-	clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
-	return tries ? 0 : -EIO;
-}
-
-static int sdo_s_stream(struct v4l2_subdev *sd, int on)
-{
-	struct sdo_device *sdev = sd_to_sdev(sd);
-	return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
-}
-
-static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
-	.s_power = sdo_s_power,
-};
-
-static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
-	.s_std_output = sdo_s_std_output,
-	.g_std_output = sdo_g_std_output,
-	.g_tvnorms_output = sdo_g_tvnorms_output,
-	.s_stream = sdo_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops sdo_sd_pad_ops = {
-	.get_fmt = sdo_get_fmt,
-};
-
-static const struct v4l2_subdev_ops sdo_sd_ops = {
-	.core = &sdo_sd_core_ops,
-	.video = &sdo_sd_video_ops,
-	.pad = &sdo_sd_pad_ops,
-};
-
-static int sdo_runtime_suspend(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct sdo_device *sdev = sd_to_sdev(sd);
-
-	dev_info(dev, "suspend\n");
-	regulator_disable(sdev->vdet);
-	regulator_disable(sdev->vdac);
-	clk_disable_unprepare(sdev->sclk_dac);
-	return 0;
-}
-
-static int sdo_runtime_resume(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct sdo_device *sdev = sd_to_sdev(sd);
-	int ret;
-
-	dev_info(dev, "resume\n");
-
-	ret = clk_prepare_enable(sdev->sclk_dac);
-	if (ret < 0)
-		return ret;
-
-	ret = regulator_enable(sdev->vdac);
-	if (ret < 0)
-		goto dac_clk_dis;
-
-	ret = regulator_enable(sdev->vdet);
-	if (ret < 0)
-		goto vdac_r_dis;
-
-	/* software reset */
-	sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
-	mdelay(10);
-	sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
-
-	/* setting TV mode */
-	sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
-	/* XXX: forcing interlaced mode using undocumented bit */
-	sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
-	/* turn all VBI off */
-	sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
-		SDO_CVBS_CLOSED_CAPTION_MASK);
-	/* turn all post processing off */
-	sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
-		SDO_COMPENSATION_CVBS_COMP_OFF);
-	sdo_reg_debug(sdev);
-	return 0;
-
-vdac_r_dis:
-	regulator_disable(sdev->vdac);
-dac_clk_dis:
-	clk_disable_unprepare(sdev->sclk_dac);
-	return ret;
-}
-
-static const struct dev_pm_ops sdo_pm_ops = {
-	.runtime_suspend = sdo_runtime_suspend,
-	.runtime_resume	 = sdo_runtime_resume,
-};
-
-static int sdo_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct sdo_device *sdev;
-	struct resource *res;
-	int ret = 0;
-	struct clk *sclk_vpll;
-
-	dev_info(dev, "probe start\n");
-	sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
-	if (!sdev) {
-		dev_err(dev, "not enough memory.\n");
-		ret = -ENOMEM;
-		goto fail;
-	}
-	sdev->dev = dev;
-
-	/* mapping registers */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res == NULL) {
-		dev_err(dev, "get memory resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-
-	sdev->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (sdev->regs == NULL) {
-		dev_err(dev, "register mapping failed.\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-
-	/* acquiring interrupt */
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res == NULL) {
-		dev_err(dev, "get interrupt resource failed.\n");
-		ret = -ENXIO;
-		goto fail;
-	}
-	ret = devm_request_irq(&pdev->dev, res->start, sdo_irq_handler, 0,
-			       "s5p-sdo", sdev);
-	if (ret) {
-		dev_err(dev, "request interrupt failed.\n");
-		goto fail;
-	}
-	sdev->irq = res->start;
-
-	/* acquire clocks */
-	sdev->sclk_dac = clk_get(dev, "sclk_dac");
-	if (IS_ERR(sdev->sclk_dac)) {
-		dev_err(dev, "failed to get clock 'sclk_dac'\n");
-		ret = PTR_ERR(sdev->sclk_dac);
-		goto fail;
-	}
-	sdev->dac = clk_get(dev, "dac");
-	if (IS_ERR(sdev->dac)) {
-		dev_err(dev, "failed to get clock 'dac'\n");
-		ret = PTR_ERR(sdev->dac);
-		goto fail_sclk_dac;
-	}
-	sdev->dacphy = clk_get(dev, "dacphy");
-	if (IS_ERR(sdev->dacphy)) {
-		dev_err(dev, "failed to get clock 'dacphy'\n");
-		ret = PTR_ERR(sdev->dacphy);
-		goto fail_dac;
-	}
-	sclk_vpll = clk_get(dev, "sclk_vpll");
-	if (IS_ERR(sclk_vpll)) {
-		dev_err(dev, "failed to get clock 'sclk_vpll'\n");
-		ret = PTR_ERR(sclk_vpll);
-		goto fail_dacphy;
-	}
-	clk_set_parent(sdev->sclk_dac, sclk_vpll);
-	clk_put(sclk_vpll);
-	sdev->fout_vpll = clk_get(dev, "fout_vpll");
-	if (IS_ERR(sdev->fout_vpll)) {
-		dev_err(dev, "failed to get clock 'fout_vpll'\n");
-		ret = PTR_ERR(sdev->fout_vpll);
-		goto fail_dacphy;
-	}
-	dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
-
-	/* acquire regulator */
-	sdev->vdac = devm_regulator_get(dev, "vdd33a_dac");
-	if (IS_ERR(sdev->vdac)) {
-		dev_err(dev, "failed to get regulator 'vdac'\n");
-		ret = PTR_ERR(sdev->vdac);
-		goto fail_fout_vpll;
-	}
-	sdev->vdet = devm_regulator_get(dev, "vdet");
-	if (IS_ERR(sdev->vdet)) {
-		dev_err(dev, "failed to get regulator 'vdet'\n");
-		ret = PTR_ERR(sdev->vdet);
-		goto fail_fout_vpll;
-	}
-
-	/* enable gate for dac clock, because mixer uses it */
-	ret = clk_prepare_enable(sdev->dac);
-	if (ret < 0) {
-		dev_err(dev, "clk_prepare_enable(dac) failed\n");
-		goto fail_fout_vpll;
-	}
-
-	/* configure power management */
-	pm_runtime_enable(dev);
-
-	/* configuration of interface subdevice */
-	v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
-	sdev->sd.owner = THIS_MODULE;
-	strlcpy(sdev->sd.name, "s5p-sdo", sizeof(sdev->sd.name));
-
-	/* set default format */
-	sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
-	BUG_ON(sdev->fmt == NULL);
-
-	/* keeping subdev in device's private for use by other drivers */
-	dev_set_drvdata(dev, &sdev->sd);
-
-	dev_info(dev, "probe succeeded\n");
-	return 0;
-
-fail_fout_vpll:
-	clk_put(sdev->fout_vpll);
-fail_dacphy:
-	clk_put(sdev->dacphy);
-fail_dac:
-	clk_put(sdev->dac);
-fail_sclk_dac:
-	clk_put(sdev->sclk_dac);
-fail:
-	dev_info(dev, "probe failed\n");
-	return ret;
-}
-
-static int sdo_remove(struct platform_device *pdev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
-	struct sdo_device *sdev = sd_to_sdev(sd);
-
-	pm_runtime_disable(&pdev->dev);
-	clk_disable_unprepare(sdev->dac);
-	clk_put(sdev->fout_vpll);
-	clk_put(sdev->dacphy);
-	clk_put(sdev->dac);
-	clk_put(sdev->sclk_dac);
-
-	dev_info(&pdev->dev, "remove successful\n");
-	return 0;
-}
-
-static struct platform_driver sdo_driver __refdata = {
-	.probe = sdo_probe,
-	.remove = sdo_remove,
-	.driver = {
-		.name = "s5p-sdo",
-		.pm = &sdo_pm_ops,
-	}
-};
-
-module_platform_driver(sdo_driver);
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
deleted file mode 100644
index 0a97f9a..0000000
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Samsung MHL interface driver
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/machine.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/media/sii9234.h>
-#include <media/v4l2-subdev.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung MHL interface driver");
-MODULE_LICENSE("GPL");
-
-struct sii9234_context {
-	struct i2c_client *client;
-	struct regulator *power;
-	int gpio_n_reset;
-	struct v4l2_subdev sd;
-};
-
-static inline struct sii9234_context *sd_to_context(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct sii9234_context, sd);
-}
-
-static inline int sii9234_readb(struct i2c_client *client, int addr)
-{
-	return i2c_smbus_read_byte_data(client, addr);
-}
-
-static inline int sii9234_writeb(struct i2c_client *client, int addr, int value)
-{
-	return i2c_smbus_write_byte_data(client, addr, value);
-}
-
-static inline int sii9234_writeb_mask(struct i2c_client *client, int addr,
-	int value, int mask)
-{
-	int ret;
-
-	ret = i2c_smbus_read_byte_data(client, addr);
-	if (ret < 0)
-		return ret;
-	ret = (ret & ~mask) | (value & mask);
-	return i2c_smbus_write_byte_data(client, addr, ret);
-}
-
-static inline int sii9234_readb_idx(struct i2c_client *client, int addr)
-{
-	int ret;
-	ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
-	if (ret < 0)
-		return ret;
-	ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
-	if (ret < 0)
-		return ret;
-	return i2c_smbus_read_byte_data(client, 0xbe);
-}
-
-static inline int sii9234_writeb_idx(struct i2c_client *client, int addr,
-	int value)
-{
-	int ret;
-	ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
-	if (ret < 0)
-		return ret;
-	ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
-	if (ret < 0)
-		return ret;
-	ret = i2c_smbus_write_byte_data(client, 0xbe, value);
-	return ret;
-}
-
-static inline int sii9234_writeb_idx_mask(struct i2c_client *client, int addr,
-	int value, int mask)
-{
-	int ret;
-
-	ret = sii9234_readb_idx(client, addr);
-	if (ret < 0)
-		return ret;
-	ret = (ret & ~mask) | (value & mask);
-	return sii9234_writeb_idx(client, addr, ret);
-}
-
-static int sii9234_reset(struct sii9234_context *ctx)
-{
-	struct i2c_client *client = ctx->client;
-	struct device *dev = &client->dev;
-	int ret, tries;
-
-	gpio_direction_output(ctx->gpio_n_reset, 1);
-	mdelay(1);
-	gpio_direction_output(ctx->gpio_n_reset, 0);
-	mdelay(1);
-	gpio_direction_output(ctx->gpio_n_reset, 1);
-	mdelay(1);
-
-	/* going to TTPI mode */
-	ret = sii9234_writeb(client, 0xc7, 0);
-	if (ret < 0) {
-		dev_err(dev, "failed to set TTPI mode\n");
-		return ret;
-	}
-	for (tries = 0; tries < 100 ; ++tries) {
-		ret = sii9234_readb(client, 0x1b);
-		if (ret > 0)
-			break;
-		if (ret < 0) {
-			dev_err(dev, "failed to reset device\n");
-			return -EIO;
-		}
-		mdelay(1);
-	}
-	if (tries == 100) {
-		dev_err(dev, "maximal number of tries reached\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int sii9234_verify_version(struct i2c_client *client)
-{
-	struct device *dev = &client->dev;
-	int family, rev, tpi_rev, dev_id, sub_id, hdcp, id;
-
-	family = sii9234_readb(client, 0x1b);
-	rev = sii9234_readb(client, 0x1c) & 0x0f;
-	tpi_rev = sii9234_readb(client, 0x1d) & 0x7f;
-	dev_id = sii9234_readb_idx(client, 0x0103);
-	sub_id = sii9234_readb_idx(client, 0x0102);
-	hdcp = sii9234_readb(client, 0x30);
-
-	if (family < 0 || rev < 0 || tpi_rev < 0 || dev_id < 0 ||
-		sub_id < 0 || hdcp < 0) {
-		dev_err(dev, "failed to read chip's version\n");
-		return -EIO;
-	}
-
-	id = (dev_id << 8) | sub_id;
-
-	dev_info(dev, "chip: SiL%02x family: %02x, rev: %02x\n",
-		id, family, rev);
-	dev_info(dev, "tpi_rev:%02x, hdcp: %02x\n", tpi_rev, hdcp);
-	if (id != 0x9234) {
-		dev_err(dev, "not supported chip\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static u8 data[][3] = {
-/* setup from driver created by doonsoo45.kim */
-	{ 0x01, 0x05, 0x04 }, /* Enable Auto soft reset on SCDT = 0 */
-	{ 0x01, 0x08, 0x35 }, /* Power Up TMDS Tx Core */
-	{ 0x01, 0x0d, 0x1c }, /* HDMI Transcode mode enable */
-	{ 0x01, 0x2b, 0x01 }, /* Enable HDCP Compliance workaround */
-	{ 0x01, 0x79, 0x40 }, /* daniel test...MHL_INT */
-	{ 0x01, 0x80, 0x34 }, /* Enable Rx PLL Clock Value */
-	{ 0x01, 0x90, 0x27 }, /* Enable CBUS discovery */
-	{ 0x01, 0x91, 0xe5 }, /* Skip RGND detection */
-	{ 0x01, 0x92, 0x46 }, /* Force MHD mode */
-	{ 0x01, 0x93, 0xdc }, /* Disable CBUS pull-up during RGND measurement */
-	{ 0x01, 0x94, 0x66 }, /* 1.8V CBUS VTH & GND threshold */
-	{ 0x01, 0x95, 0x31 }, /* RGND block & single discovery attempt */
-	{ 0x01, 0x96, 0x22 }, /* use 1K and 2K setting */
-	{ 0x01, 0xa0, 0x10 }, /* SIMG: Term mode */
-	{ 0x01, 0xa1, 0xfc }, /* Disable internal Mobile HD driver */
-	{ 0x01, 0xa3, 0xfa }, /* SIMG: Output Swing  default EB, 3x Clk Mult */
-	{ 0x01, 0xa5, 0x80 }, /* SIMG: RGND Hysterisis, 3x mode for Beast */
-	{ 0x01, 0xa6, 0x0c }, /* SIMG: Swing Offset */
-	{ 0x02, 0x3d, 0x3f }, /* Power up CVCC 1.2V core */
-	{ 0x03, 0x00, 0x00 }, /* SIMG: correcting HW default */
-	{ 0x03, 0x11, 0x01 }, /* Enable TxPLL Clock */
-	{ 0x03, 0x12, 0x15 }, /* Enable Tx Clock Path & Equalizer */
-	{ 0x03, 0x13, 0x60 }, /* SIMG: Set termination value */
-	{ 0x03, 0x14, 0xf0 }, /* SIMG: Change CKDT level */
-	{ 0x03, 0x17, 0x07 }, /* SIMG: PLL Calrefsel */
-	{ 0x03, 0x1a, 0x20 }, /* VCO Cal */
-	{ 0x03, 0x22, 0xe0 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x23, 0xc0 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x24, 0xa0 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x25, 0x80 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x26, 0x60 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x27, 0x40 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x28, 0x20 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x29, 0x00 }, /* SIMG: Auto EQ */
-	{ 0x03, 0x31, 0x0b }, /* SIMG: Rx PLL BW value from I2C BW ~ 4MHz */
-	{ 0x03, 0x45, 0x06 }, /* SIMG: DPLL Mode */
-	{ 0x03, 0x4b, 0x06 }, /* SIMG: Correcting HW default */
-	{ 0x03, 0x4c, 0xa0 }, /* Manual zone control */
-	{ 0x03, 0x4d, 0x02 }, /* SIMG: PLL Mode Value (order is important) */
-};
-
-static int sii9234_set_internal(struct sii9234_context *ctx)
-{
-	struct i2c_client *client = ctx->client;
-	int i, ret;
-
-	for (i = 0; i < ARRAY_SIZE(data); ++i) {
-		int addr = (data[i][0] << 8) | data[i][1];
-		ret = sii9234_writeb_idx(client, addr, data[i][2]);
-		if (ret < 0)
-			return ret;
-	}
-	return 0;
-}
-
-static int sii9234_runtime_suspend(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct sii9234_context *ctx = sd_to_context(sd);
-	struct i2c_client *client = ctx->client;
-
-	dev_info(dev, "suspend start\n");
-
-	sii9234_writeb_mask(client, 0x1e, 3, 3);
-	regulator_disable(ctx->power);
-
-	return 0;
-}
-
-static int sii9234_runtime_resume(struct device *dev)
-{
-	struct v4l2_subdev *sd = dev_get_drvdata(dev);
-	struct sii9234_context *ctx = sd_to_context(sd);
-	struct i2c_client *client = ctx->client;
-	int ret;
-
-	dev_info(dev, "resume start\n");
-	ret = regulator_enable(ctx->power);
-	if (ret < 0)
-		return ret;
-
-	ret = sii9234_reset(ctx);
-	if (ret)
-		goto fail;
-
-	/* enable tpi */
-	ret = sii9234_writeb_mask(client, 0x1e, 1, 0);
-	if (ret < 0)
-		goto fail;
-	ret = sii9234_set_internal(ctx);
-	if (ret < 0)
-		goto fail;
-
-	return 0;
-
-fail:
-	dev_err(dev, "failed to resume\n");
-	regulator_disable(ctx->power);
-
-	return ret;
-}
-
-static const struct dev_pm_ops sii9234_pm_ops = {
-	.runtime_suspend = sii9234_runtime_suspend,
-	.runtime_resume	 = sii9234_runtime_resume,
-};
-
-static int sii9234_s_power(struct v4l2_subdev *sd, int on)
-{
-	struct sii9234_context *ctx = sd_to_context(sd);
-	int ret;
-
-	if (on)
-		ret = pm_runtime_get_sync(&ctx->client->dev);
-	else
-		ret = pm_runtime_put(&ctx->client->dev);
-	/* only values < 0 indicate errors */
-	return ret < 0 ? ret : 0;
-}
-
-static int sii9234_s_stream(struct v4l2_subdev *sd, int enable)
-{
-	struct sii9234_context *ctx = sd_to_context(sd);
-
-	/* (dis/en)able TDMS output */
-	sii9234_writeb_mask(ctx->client, 0x1a, enable ? 0 : ~0 , 1 << 4);
-	return 0;
-}
-
-static const struct v4l2_subdev_core_ops sii9234_core_ops = {
-	.s_power =  sii9234_s_power,
-};
-
-static const struct v4l2_subdev_video_ops sii9234_video_ops = {
-	.s_stream =  sii9234_s_stream,
-};
-
-static const struct v4l2_subdev_ops sii9234_ops = {
-	.core = &sii9234_core_ops,
-	.video = &sii9234_video_ops,
-};
-
-static int sii9234_probe(struct i2c_client *client,
-			 const struct i2c_device_id *id)
-{
-	struct device *dev = &client->dev;
-	struct sii9234_platform_data *pdata = dev->platform_data;
-	struct sii9234_context *ctx;
-	int ret;
-
-	ctx = devm_kzalloc(&client->dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		dev_err(dev, "out of memory\n");
-		ret = -ENOMEM;
-		goto fail;
-	}
-	ctx->client = client;
-
-	ctx->power = devm_regulator_get(dev, "hdmi-en");
-	if (IS_ERR(ctx->power)) {
-		dev_err(dev, "failed to acquire regulator hdmi-en\n");
-		return PTR_ERR(ctx->power);
-	}
-
-	ctx->gpio_n_reset = pdata->gpio_n_reset;
-	ret = devm_gpio_request(dev, ctx->gpio_n_reset, "MHL_RST");
-	if (ret) {
-		dev_err(dev, "failed to acquire MHL_RST gpio\n");
-		return ret;
-	}
-
-	v4l2_i2c_subdev_init(&ctx->sd, client, &sii9234_ops);
-
-	pm_runtime_enable(dev);
-
-	/* enable device */
-	ret = pm_runtime_get_sync(dev);
-	if (ret)
-		goto fail_pm;
-
-	/* verify chip version */
-	ret = sii9234_verify_version(client);
-	if (ret)
-		goto fail_pm_get;
-
-	/* stop processing */
-	pm_runtime_put(dev);
-
-	dev_info(dev, "probe successful\n");
-
-	return 0;
-
-fail_pm_get:
-	pm_runtime_put_sync(dev);
-
-fail_pm:
-	pm_runtime_disable(dev);
-
-fail:
-	dev_err(dev, "probe failed\n");
-
-	return ret;
-}
-
-static int sii9234_remove(struct i2c_client *client)
-{
-	struct device *dev = &client->dev;
-
-	pm_runtime_disable(dev);
-
-	dev_info(dev, "remove successful\n");
-
-	return 0;
-}
-
-
-static const struct i2c_device_id sii9234_id[] = {
-	{ "SII9234", 0 },
-	{ },
-};
-
-MODULE_DEVICE_TABLE(i2c, sii9234_id);
-static struct i2c_driver sii9234_driver = {
-	.driver = {
-		.name	= "sii9234",
-		.pm = &sii9234_pm_ops,
-	},
-	.probe		= sii9234_probe,
-	.remove		= sii9234_remove,
-	.id_table = sii9234_id,
-};
-
-module_i2c_driver(sii9234_driver);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index e1f39b4..ef2a519 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -362,7 +362,7 @@
 	spin_unlock_irqrestore(&vou_dev->lock, flags);
 }
 
-static struct vb2_ops sh_vou_qops = {
+static const struct vb2_ops sh_vou_qops = {
 	.queue_setup		= sh_vou_queue_setup,
 	.buf_prepare		= sh_vou_buf_prepare,
 	.buf_queue		= sh_vou_buf_queue,
@@ -937,7 +937,10 @@
 {
 	struct v4l2_rect *rect = &sel->r;
 	struct sh_vou_device *vou_dev = video_drvdata(file);
-	struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+	struct v4l2_subdev_selection sd_sel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_COMPOSE,
+	};
 	struct v4l2_pix_format *pix = &vou_dev->pix;
 	struct sh_vou_geometry geo;
 	struct v4l2_subdev_format format = {
@@ -978,14 +981,14 @@
 	geo.in_height = pix->height;
 
 	/* Configure the encoder one-to-one, position at 0, ignore errors */
-	sd_crop.c.width = geo.output.width;
-	sd_crop.c.height = geo.output.height;
+	sd_sel.r.width = geo.output.width;
+	sd_sel.r.height = geo.output.height;
 	/*
-	 * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+	 * We first issue a S_SELECTION, so that the subsequent S_FMT delivers the
 	 * final encoder configuration.
 	 */
-	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
-				   s_crop, &sd_crop);
+	v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+				   set_selection, NULL, &sd_sel);
 	format.format.width = geo.output.width;
 	format.format.height = geo.output.height;
 	ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 39f6641..86d7478 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -17,31 +17,6 @@
 	help
 	  This is a generic SoC camera platform driver, useful for testing
 
-config VIDEO_PXA27x
-	tristate "PXA27x Quick Capture Interface driver"
-	depends on VIDEO_DEV && PXA27x && SOC_CAMERA
-	select VIDEOBUF_DMA_SG
-	select SG_SPLIT
-	---help---
-	  This is a v4l2 driver for the PXA27x Quick Capture Interface
-
-config VIDEO_RCAR_VIN_OLD
-	tristate "R-Car Video Input (VIN) support (DEPRECATED)"
-	depends on VIDEO_DEV && SOC_CAMERA
-	depends on ARCH_RENESAS || COMPILE_TEST
-	depends on HAS_DMA
-	select VIDEOBUF2_DMA_CONTIG
-	select SOC_CAMERA_SCALE_CROP
-	---help---
-	  This is a v4l2 driver for the R-Car VIN Interface
-
-config VIDEO_SH_MOBILE_CSI2
-	tristate "SuperH Mobile MIPI CSI-2 Interface driver"
-	depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
-	depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
-	---help---
-	  This is a v4l2 driver for the SuperH MIPI CSI-2 Interface
-
 config VIDEO_SH_MOBILE_CEU
 	tristate "SuperH Mobile CEU Interface driver"
 	depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 7703cb7..7633a0f 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -7,7 +7,4 @@
 
 # soc-camera host drivers have to be linked after camera drivers
 obj-$(CONFIG_VIDEO_ATMEL_ISI)		+= atmel-isi.o
-obj-$(CONFIG_VIDEO_PXA27x)		+= pxa_camera.o
 obj-$(CONFIG_VIDEO_SH_MOBILE_CEU)	+= sh_mobile_ceu_camera.o
-obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2)	+= sh_mobile_csi2.o
-obj-$(CONFIG_VIDEO_RCAR_VIN_OLD)	+= rcar_vin.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 30211f6..46de657 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -536,7 +536,7 @@
 	pm_runtime_put(ici->v4l2_dev.dev);
 }
 
-static struct vb2_ops isi_video_qops = {
+static const struct vb2_ops isi_video_qops = {
 	.queue_setup		= queue_setup,
 	.buf_init		= buffer_init,
 	.buf_prepare		= buffer_prepare,
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
deleted file mode 100644
index 2aaf4a8..0000000
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ /dev/null
@@ -1,1866 +0,0 @@
-/*
- * V4L2 Driver for PXA camera host
- *
- * Copyright (C) 2006, Sascha Hauer, Pengutronix
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/time.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
-
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/videobuf-dma-sg.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-of.h>
-
-#include <linux/videodev2.h>
-
-#include <linux/platform_data/media/camera-pxa.h>
-
-#define PXA_CAM_VERSION "0.0.6"
-#define PXA_CAM_DRV_NAME "pxa27x-camera"
-
-/* Camera Interface */
-#define CICR0		0x0000
-#define CICR1		0x0004
-#define CICR2		0x0008
-#define CICR3		0x000C
-#define CICR4		0x0010
-#define CISR		0x0014
-#define CIFR		0x0018
-#define CITOR		0x001C
-#define CIBR0		0x0028
-#define CIBR1		0x0030
-#define CIBR2		0x0038
-
-#define CICR0_DMAEN	(1 << 31)	/* DMA request enable */
-#define CICR0_PAR_EN	(1 << 30)	/* Parity enable */
-#define CICR0_SL_CAP_EN	(1 << 29)	/* Capture enable for slave mode */
-#define CICR0_ENB	(1 << 28)	/* Camera interface enable */
-#define CICR0_DIS	(1 << 27)	/* Camera interface disable */
-#define CICR0_SIM	(0x7 << 24)	/* Sensor interface mode mask */
-#define CICR0_TOM	(1 << 9)	/* Time-out mask */
-#define CICR0_RDAVM	(1 << 8)	/* Receive-data-available mask */
-#define CICR0_FEM	(1 << 7)	/* FIFO-empty mask */
-#define CICR0_EOLM	(1 << 6)	/* End-of-line mask */
-#define CICR0_PERRM	(1 << 5)	/* Parity-error mask */
-#define CICR0_QDM	(1 << 4)	/* Quick-disable mask */
-#define CICR0_CDM	(1 << 3)	/* Disable-done mask */
-#define CICR0_SOFM	(1 << 2)	/* Start-of-frame mask */
-#define CICR0_EOFM	(1 << 1)	/* End-of-frame mask */
-#define CICR0_FOM	(1 << 0)	/* FIFO-overrun mask */
-
-#define CICR1_TBIT	(1 << 31)	/* Transparency bit */
-#define CICR1_RGBT_CONV	(0x3 << 29)	/* RGBT conversion mask */
-#define CICR1_PPL	(0x7ff << 15)	/* Pixels per line mask */
-#define CICR1_RGB_CONV	(0x7 << 12)	/* RGB conversion mask */
-#define CICR1_RGB_F	(1 << 11)	/* RGB format */
-#define CICR1_YCBCR_F	(1 << 10)	/* YCbCr format */
-#define CICR1_RGB_BPP	(0x7 << 7)	/* RGB bis per pixel mask */
-#define CICR1_RAW_BPP	(0x3 << 5)	/* Raw bis per pixel mask */
-#define CICR1_COLOR_SP	(0x3 << 3)	/* Color space mask */
-#define CICR1_DW	(0x7 << 0)	/* Data width mask */
-
-#define CICR2_BLW	(0xff << 24)	/* Beginning-of-line pixel clock
-					   wait count mask */
-#define CICR2_ELW	(0xff << 16)	/* End-of-line pixel clock
-					   wait count mask */
-#define CICR2_HSW	(0x3f << 10)	/* Horizontal sync pulse width mask */
-#define CICR2_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
-					   wait count mask */
-#define CICR2_FSW	(0x7 << 0)	/* Frame stabilization
-					   wait count mask */
-
-#define CICR3_BFW	(0xff << 24)	/* Beginning-of-frame line clock
-					   wait count mask */
-#define CICR3_EFW	(0xff << 16)	/* End-of-frame line clock
-					   wait count mask */
-#define CICR3_VSW	(0x3f << 10)	/* Vertical sync pulse width mask */
-#define CICR3_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
-					   wait count mask */
-#define CICR3_LPF	(0x7ff << 0)	/* Lines per frame mask */
-
-#define CICR4_MCLK_DLY	(0x3 << 24)	/* MCLK Data Capture Delay mask */
-#define CICR4_PCLK_EN	(1 << 23)	/* Pixel clock enable */
-#define CICR4_PCP	(1 << 22)	/* Pixel clock polarity */
-#define CICR4_HSP	(1 << 21)	/* Horizontal sync polarity */
-#define CICR4_VSP	(1 << 20)	/* Vertical sync polarity */
-#define CICR4_MCLK_EN	(1 << 19)	/* MCLK enable */
-#define CICR4_FR_RATE	(0x7 << 8)	/* Frame rate mask */
-#define CICR4_DIV	(0xff << 0)	/* Clock divisor mask */
-
-#define CISR_FTO	(1 << 15)	/* FIFO time-out */
-#define CISR_RDAV_2	(1 << 14)	/* Channel 2 receive data available */
-#define CISR_RDAV_1	(1 << 13)	/* Channel 1 receive data available */
-#define CISR_RDAV_0	(1 << 12)	/* Channel 0 receive data available */
-#define CISR_FEMPTY_2	(1 << 11)	/* Channel 2 FIFO empty */
-#define CISR_FEMPTY_1	(1 << 10)	/* Channel 1 FIFO empty */
-#define CISR_FEMPTY_0	(1 << 9)	/* Channel 0 FIFO empty */
-#define CISR_EOL	(1 << 8)	/* End of line */
-#define CISR_PAR_ERR	(1 << 7)	/* Parity error */
-#define CISR_CQD	(1 << 6)	/* Camera interface quick disable */
-#define CISR_CDD	(1 << 5)	/* Camera interface disable done */
-#define CISR_SOF	(1 << 4)	/* Start of frame */
-#define CISR_EOF	(1 << 3)	/* End of frame */
-#define CISR_IFO_2	(1 << 2)	/* FIFO overrun for Channel 2 */
-#define CISR_IFO_1	(1 << 1)	/* FIFO overrun for Channel 1 */
-#define CISR_IFO_0	(1 << 0)	/* FIFO overrun for Channel 0 */
-
-#define CIFR_FLVL2	(0x7f << 23)	/* FIFO 2 level mask */
-#define CIFR_FLVL1	(0x7f << 16)	/* FIFO 1 level mask */
-#define CIFR_FLVL0	(0xff << 8)	/* FIFO 0 level mask */
-#define CIFR_THL_0	(0x3 << 4)	/* Threshold Level for Channel 0 FIFO */
-#define CIFR_RESET_F	(1 << 3)	/* Reset input FIFOs */
-#define CIFR_FEN2	(1 << 2)	/* FIFO enable for channel 2 */
-#define CIFR_FEN1	(1 << 1)	/* FIFO enable for channel 1 */
-#define CIFR_FEN0	(1 << 0)	/* FIFO enable for channel 0 */
-
-#define CICR0_SIM_MP	(0 << 24)
-#define CICR0_SIM_SP	(1 << 24)
-#define CICR0_SIM_MS	(2 << 24)
-#define CICR0_SIM_EP	(3 << 24)
-#define CICR0_SIM_ES	(4 << 24)
-
-#define CICR1_DW_VAL(x)   ((x) & CICR1_DW)	    /* Data bus width */
-#define CICR1_PPL_VAL(x)  (((x) << 15) & CICR1_PPL) /* Pixels per line */
-#define CICR1_COLOR_SP_VAL(x)	(((x) << 3) & CICR1_COLOR_SP)	/* color space */
-#define CICR1_RGB_BPP_VAL(x)	(((x) << 7) & CICR1_RGB_BPP)	/* bpp for rgb */
-#define CICR1_RGBT_CONV_VAL(x)	(((x) << 29) & CICR1_RGBT_CONV)	/* rgbt conv */
-
-#define CICR2_BLW_VAL(x)  (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */
-#define CICR2_ELW_VAL(x)  (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */
-#define CICR2_HSW_VAL(x)  (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */
-#define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */
-#define CICR2_FSW_VAL(x)  (((x) << 0) & CICR2_FSW)  /* Frame stabilization wait count */
-
-#define CICR3_BFW_VAL(x)  (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count  */
-#define CICR3_EFW_VAL(x)  (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */
-#define CICR3_VSW_VAL(x)  (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */
-#define CICR3_LPF_VAL(x)  (((x) << 0) & CICR3_LPF)  /* Lines per frame */
-
-#define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \
-			CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
-			CICR0_EOFM | CICR0_FOM)
-
-/*
- * Structures
- */
-enum pxa_camera_active_dma {
-	DMA_Y = 0x1,
-	DMA_U = 0x2,
-	DMA_V = 0x4,
-};
-
-/* buffer for one video frame */
-struct pxa_buffer {
-	/* common v4l buffer stuff -- must be first */
-	struct videobuf_buffer		vb;
-	u32	code;
-	/* our descriptor lists for Y, U and V channels */
-	struct dma_async_tx_descriptor	*descs[3];
-	dma_cookie_t			cookie[3];
-	struct scatterlist		*sg[3];
-	int				sg_len[3];
-	int				inwork;
-	enum pxa_camera_active_dma	active_dma;
-};
-
-struct pxa_camera_dev {
-	struct soc_camera_host	soc_host;
-	/*
-	 * PXA27x is only supposed to handle one camera on its Quick Capture
-	 * interface. If anyone ever builds hardware to enable more than
-	 * one camera, they will have to modify this driver too
-	 */
-	struct clk		*clk;
-
-	unsigned int		irq;
-	void __iomem		*base;
-
-	int			channels;
-	struct dma_chan		*dma_chans[3];
-
-	struct pxacamera_platform_data *pdata;
-	struct resource		*res;
-	unsigned long		platform_flags;
-	unsigned long		ciclk;
-	unsigned long		mclk;
-	u32			mclk_divisor;
-	u16			width_flags;	/* max 10 bits */
-
-	struct list_head	capture;
-
-	spinlock_t		lock;
-
-	struct pxa_buffer	*active;
-	struct tasklet_struct	task_eof;
-
-	u32			save_cicr[5];
-};
-
-struct pxa_cam {
-	unsigned long flags;
-};
-
-static const char *pxa_cam_driver_description = "PXA_Camera";
-
-static unsigned int vid_limit = 16;	/* Video memory limit, in Mb */
-
-/*
- *  Videobuf operations
- */
-static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
-			      unsigned int *size)
-{
-	struct soc_camera_device *icd = vq->priv_data;
-
-	dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
-
-	*size = icd->sizeimage;
-
-	if (0 == *count)
-		*count = 32;
-	if (*size * *count > vid_limit * 1024 * 1024)
-		*count = (vid_limit * 1024 * 1024) / *size;
-
-	return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
-{
-	struct soc_camera_device *icd = vq->priv_data;
-	struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
-	int i;
-
-	BUG_ON(in_interrupt());
-
-	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
-		&buf->vb, buf->vb.baddr, buf->vb.bsize);
-
-	/*
-	 * This waits until this buffer is out of danger, i.e., until it is no
-	 * longer in STATE_QUEUED or STATE_ACTIVE
-	 */
-	videobuf_waiton(vq, &buf->vb, 0, 0);
-
-	for (i = 0; i < 3 && buf->descs[i]; i++) {
-		dmaengine_desc_free(buf->descs[i]);
-		kfree(buf->sg[i]);
-		buf->descs[i] = NULL;
-		buf->sg[i] = NULL;
-		buf->sg_len[i] = 0;
-	}
-	videobuf_dma_unmap(vq->dev, dma);
-	videobuf_dma_free(dma);
-
-	buf->vb.state = VIDEOBUF_NEEDS_INIT;
-
-	dev_dbg(icd->parent, "%s end (vb=0x%p) 0x%08lx %d\n", __func__,
-		&buf->vb, buf->vb.baddr, buf->vb.bsize);
-}
-
-static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
-			       enum pxa_camera_active_dma act_dma);
-
-static void pxa_camera_dma_irq_y(void *data)
-{
-	struct pxa_camera_dev *pcdev = data;
-
-	pxa_camera_dma_irq(pcdev, DMA_Y);
-}
-
-static void pxa_camera_dma_irq_u(void *data)
-{
-	struct pxa_camera_dev *pcdev = data;
-
-	pxa_camera_dma_irq(pcdev, DMA_U);
-}
-
-static void pxa_camera_dma_irq_v(void *data)
-{
-	struct pxa_camera_dev *pcdev = data;
-
-	pxa_camera_dma_irq(pcdev, DMA_V);
-}
-
-/**
- * pxa_init_dma_channel - init dma descriptors
- * @pcdev: pxa camera device
- * @buf: pxa buffer to find pxa dma channel
- * @dma: dma video buffer
- * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
- * @cibr: camera Receive Buffer Register
- * @size: bytes to transfer
- * @offset: offset in videobuffer of the first byte to transfer
- *
- * Prepares the pxa dma descriptors to transfer one camera channel.
- *
- * Returns 0 if success or -ENOMEM if no memory is available
- */
-static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
-				struct pxa_buffer *buf,
-				struct videobuf_dmabuf *dma, int channel,
-				int cibr, int size, int offset)
-{
-	struct dma_chan *dma_chan = pcdev->dma_chans[channel];
-	struct scatterlist *sg = buf->sg[channel];
-	int sglen = buf->sg_len[channel];
-	struct dma_async_tx_descriptor *tx;
-
-	tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
-				     DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
-	if (!tx) {
-		dev_err(pcdev->soc_host.v4l2_dev.dev,
-			"dmaengine_prep_slave_sg failed\n");
-		goto fail;
-	}
-
-	tx->callback_param = pcdev;
-	switch (channel) {
-	case 0:
-		tx->callback = pxa_camera_dma_irq_y;
-		break;
-	case 1:
-		tx->callback = pxa_camera_dma_irq_u;
-		break;
-	case 2:
-		tx->callback = pxa_camera_dma_irq_v;
-		break;
-	}
-
-	buf->descs[channel] = tx;
-	return 0;
-fail:
-	kfree(sg);
-
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-		"%s (vb=0x%p) dma_tx=%p\n",
-		__func__, &buf->vb, tx);
-
-	return -ENOMEM;
-}
-
-static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
-				    struct pxa_buffer *buf)
-{
-	buf->active_dma = DMA_Y;
-	if (pcdev->channels == 3)
-		buf->active_dma |= DMA_U | DMA_V;
-}
-
-/*
- * Please check the DMA prepared buffer structure in :
- *   Documentation/video4linux/pxa_camera.txt
- * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
- * modification while DMA chain is running will work anyway.
- */
-static int pxa_videobuf_prepare(struct videobuf_queue *vq,
-		struct videobuf_buffer *vb, enum v4l2_field field)
-{
-	struct soc_camera_device *icd = vq->priv_data;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct device *dev = pcdev->soc_host.v4l2_dev.dev;
-	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
-	int ret;
-	int size_y, size_u = 0, size_v = 0;
-	size_t sizes[3];
-
-	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
-		vb, vb->baddr, vb->bsize);
-
-	/* Added list head initialization on alloc */
-	WARN_ON(!list_empty(&vb->queue));
-
-#ifdef DEBUG
-	/*
-	 * This can be useful if you want to see if we actually fill
-	 * the buffer with something
-	 */
-	memset((void *)vb->baddr, 0xaa, vb->bsize);
-#endif
-
-	BUG_ON(NULL == icd->current_fmt);
-
-	/*
-	 * I think, in buf_prepare you only have to protect global data,
-	 * the actual buffer is yours
-	 */
-	buf->inwork = 1;
-
-	if (buf->code	!= icd->current_fmt->code ||
-	    vb->width	!= icd->user_width ||
-	    vb->height	!= icd->user_height ||
-	    vb->field	!= field) {
-		buf->code	= icd->current_fmt->code;
-		vb->width	= icd->user_width;
-		vb->height	= icd->user_height;
-		vb->field	= field;
-		vb->state	= VIDEOBUF_NEEDS_INIT;
-	}
-
-	vb->size = icd->sizeimage;
-	if (0 != vb->baddr && vb->bsize < vb->size) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (vb->state == VIDEOBUF_NEEDS_INIT) {
-		int size = vb->size;
-		struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
-
-		ret = videobuf_iolock(vq, vb, NULL);
-		if (ret)
-			goto out;
-
-		if (pcdev->channels == 3) {
-			size_y = size / 2;
-			size_u = size_v = size / 4;
-		} else {
-			size_y = size;
-		}
-
-		sizes[0] = size_y;
-		sizes[1] = size_u;
-		sizes[2] = size_v;
-		ret = sg_split(dma->sglist, dma->sglen, 0, pcdev->channels,
-			       sizes, buf->sg, buf->sg_len, GFP_KERNEL);
-		if (ret < 0) {
-			dev_err(dev, "sg_split failed: %d\n", ret);
-			goto fail;
-		}
-
-		/* init DMA for Y channel */
-		ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0,
-					   size_y, 0);
-		if (ret) {
-			dev_err(dev, "DMA initialization for Y/RGB failed\n");
-			goto fail;
-		}
-
-		/* init DMA for U channel */
-		if (size_u)
-			ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
-						   size_u, size_y);
-		if (ret) {
-			dev_err(dev, "DMA initialization for U failed\n");
-			goto fail;
-		}
-
-		/* init DMA for V channel */
-		if (size_v)
-			ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
-						   size_v, size_y + size_u);
-		if (ret) {
-			dev_err(dev, "DMA initialization for V failed\n");
-			goto fail;
-		}
-
-		vb->state = VIDEOBUF_PREPARED;
-	}
-
-	buf->inwork = 0;
-	pxa_videobuf_set_actdma(pcdev, buf);
-
-	return 0;
-
-fail:
-	free_buffer(vq, buf);
-out:
-	buf->inwork = 0;
-	return ret;
-}
-
-/**
- * pxa_dma_start_channels - start DMA channel for active buffer
- * @pcdev: pxa camera device
- *
- * Initialize DMA channels to the beginning of the active video buffer, and
- * start these channels.
- */
-static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
-{
-	int i;
-	struct pxa_buffer *active;
-
-	active = pcdev->active;
-
-	for (i = 0; i < pcdev->channels; i++) {
-		dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-			"%s (channel=%d)\n", __func__, i);
-		dma_async_issue_pending(pcdev->dma_chans[i]);
-	}
-}
-
-static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
-{
-	int i;
-
-	for (i = 0; i < pcdev->channels; i++) {
-		dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-			"%s (channel=%d)\n", __func__, i);
-		dmaengine_terminate_all(pcdev->dma_chans[i]);
-	}
-}
-
-static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
-				 struct pxa_buffer *buf)
-{
-	int i;
-
-	for (i = 0; i < pcdev->channels; i++) {
-		buf->cookie[i] = dmaengine_submit(buf->descs[i]);
-		dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-			"%s (channel=%d) : submit vb=%p cookie=%d\n",
-			__func__, i, buf, buf->descs[i]->cookie);
-	}
-}
-
-/**
- * pxa_camera_start_capture - start video capturing
- * @pcdev: camera device
- *
- * Launch capturing. DMA channels should not be active yet. They should get
- * activated at the end of frame interrupt, to capture only whole frames, and
- * never begin the capture of a partial frame.
- */
-static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
-{
-	unsigned long cicr0;
-
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
-	__raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
-	/* Enable End-Of-Frame Interrupt */
-	cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
-	cicr0 &= ~CICR0_EOFM;
-	__raw_writel(cicr0, pcdev->base + CICR0);
-}
-
-static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
-{
-	unsigned long cicr0;
-
-	pxa_dma_stop_channels(pcdev);
-
-	cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
-	__raw_writel(cicr0, pcdev->base + CICR0);
-
-	pcdev->active = NULL;
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
-}
-
-/* Called under spinlock_irqsave(&pcdev->lock, ...) */
-static void pxa_videobuf_queue(struct videobuf_queue *vq,
-			       struct videobuf_buffer *vb)
-{
-	struct soc_camera_device *icd = vq->priv_data;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
-
-	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
-		__func__, vb, vb->baddr, vb->bsize, pcdev->active);
-
-	list_add_tail(&vb->queue, &pcdev->capture);
-
-	vb->state = VIDEOBUF_ACTIVE;
-	pxa_dma_add_tail_buf(pcdev, buf);
-
-	if (!pcdev->active)
-		pxa_camera_start_capture(pcdev);
-}
-
-static void pxa_videobuf_release(struct videobuf_queue *vq,
-				 struct videobuf_buffer *vb)
-{
-	struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
-#ifdef DEBUG
-	struct soc_camera_device *icd = vq->priv_data;
-	struct device *dev = icd->parent;
-
-	dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
-		vb, vb->baddr, vb->bsize);
-
-	switch (vb->state) {
-	case VIDEOBUF_ACTIVE:
-		dev_dbg(dev, "%s (active)\n", __func__);
-		break;
-	case VIDEOBUF_QUEUED:
-		dev_dbg(dev, "%s (queued)\n", __func__);
-		break;
-	case VIDEOBUF_PREPARED:
-		dev_dbg(dev, "%s (prepared)\n", __func__);
-		break;
-	default:
-		dev_dbg(dev, "%s (unknown)\n", __func__);
-		break;
-	}
-#endif
-
-	free_buffer(vq, buf);
-}
-
-static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
-			      struct videobuf_buffer *vb,
-			      struct pxa_buffer *buf)
-{
-	/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
-	list_del_init(&vb->queue);
-	vb->state = VIDEOBUF_DONE;
-	v4l2_get_timestamp(&vb->ts);
-	vb->field_count++;
-	wake_up(&vb->done);
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
-		__func__, vb);
-
-	if (list_empty(&pcdev->capture)) {
-		pxa_camera_stop_capture(pcdev);
-		return;
-	}
-
-	pcdev->active = list_entry(pcdev->capture.next,
-				   struct pxa_buffer, vb.queue);
-}
-
-/**
- * pxa_camera_check_link_miss - check missed DMA linking
- * @pcdev: camera device
- *
- * The DMA chaining is done with DMA running. This means a tiny temporal window
- * remains, where a buffer is queued on the chain, while the chain is already
- * stopped. This means the tailed buffer would never be transferred by DMA.
- * This function restarts the capture for this corner case, where :
- *  - DADR() == DADDR_STOP
- *  - a videobuffer is queued on the pcdev->capture list
- *
- * Please check the "DMA hot chaining timeslice issue" in
- *   Documentation/video4linux/pxa_camera.txt
- *
- * Context: should only be called within the dma irq handler
- */
-static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
-				       dma_cookie_t last_submitted,
-				       dma_cookie_t last_issued)
-{
-	bool is_dma_stopped = last_submitted != last_issued;
-
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-		"%s : top queued buffer=%p, is_dma_stopped=%d\n",
-		__func__, pcdev->active, is_dma_stopped);
-
-	if (pcdev->active && is_dma_stopped)
-		pxa_camera_start_capture(pcdev);
-}
-
-static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
-			       enum pxa_camera_active_dma act_dma)
-{
-	struct device *dev = pcdev->soc_host.v4l2_dev.dev;
-	struct pxa_buffer *buf, *last_buf;
-	unsigned long flags;
-	u32 camera_status, overrun;
-	int chan;
-	struct videobuf_buffer *vb;
-	enum dma_status last_status;
-	dma_cookie_t last_issued;
-
-	spin_lock_irqsave(&pcdev->lock, flags);
-
-	camera_status = __raw_readl(pcdev->base + CISR);
-	dev_dbg(dev, "camera dma irq, cisr=0x%x dma=%d\n",
-		camera_status, act_dma);
-	overrun = CISR_IFO_0;
-	if (pcdev->channels == 3)
-		overrun |= CISR_IFO_1 | CISR_IFO_2;
-
-	/*
-	 * pcdev->active should not be NULL in DMA irq handler.
-	 *
-	 * But there is one corner case : if capture was stopped due to an
-	 * overrun of channel 1, and at that same channel 2 was completed.
-	 *
-	 * When handling the overrun in DMA irq for channel 1, we'll stop the
-	 * capture and restart it (and thus set pcdev->active to NULL). But the
-	 * DMA irq handler will already be pending for channel 2. So on entering
-	 * the DMA irq handler for channel 2 there will be no active buffer, yet
-	 * that is normal.
-	 */
-	if (!pcdev->active)
-		goto out;
-
-	vb = &pcdev->active->vb;
-	buf = container_of(vb, struct pxa_buffer, vb);
-	WARN_ON(buf->inwork || list_empty(&vb->queue));
-
-	/*
-	 * It's normal if the last frame creates an overrun, as there
-	 * are no more DMA descriptors to fetch from QCI fifos
-	 */
-	switch (act_dma) {
-	case DMA_U:
-		chan = 1;
-		break;
-	case DMA_V:
-		chan = 2;
-		break;
-	default:
-		chan = 0;
-		break;
-	}
-	last_buf = list_entry(pcdev->capture.prev,
-			      struct pxa_buffer, vb.queue);
-	last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
-					       last_buf->cookie[chan],
-					       NULL, &last_issued);
-	if (camera_status & overrun &&
-	    last_status != DMA_COMPLETE) {
-		dev_dbg(dev, "FIFO overrun! CISR: %x\n",
-			camera_status);
-		pxa_camera_stop_capture(pcdev);
-		list_for_each_entry(buf, &pcdev->capture, vb.queue)
-			pxa_dma_add_tail_buf(pcdev, buf);
-		pxa_camera_start_capture(pcdev);
-		goto out;
-	}
-	buf->active_dma &= ~act_dma;
-	if (!buf->active_dma) {
-		pxa_camera_wakeup(pcdev, vb, buf);
-		pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
-					   last_issued);
-	}
-
-out:
-	spin_unlock_irqrestore(&pcdev->lock, flags);
-}
-
-static struct videobuf_queue_ops pxa_videobuf_ops = {
-	.buf_setup      = pxa_videobuf_setup,
-	.buf_prepare    = pxa_videobuf_prepare,
-	.buf_queue      = pxa_videobuf_queue,
-	.buf_release    = pxa_videobuf_release,
-};
-
-static void pxa_camera_init_videobuf(struct videobuf_queue *q,
-			      struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-
-	/*
-	 * We must pass NULL as dev pointer, then all pci_* dma operations
-	 * transform to normal dma_* ones.
-	 */
-	videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
-				V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
-				sizeof(struct pxa_buffer), icd, &ici->host_lock);
-}
-
-static u32 mclk_get_divisor(struct platform_device *pdev,
-			    struct pxa_camera_dev *pcdev)
-{
-	unsigned long mclk = pcdev->mclk;
-	struct device *dev = &pdev->dev;
-	u32 div;
-	unsigned long lcdclk;
-
-	lcdclk = clk_get_rate(pcdev->clk);
-	pcdev->ciclk = lcdclk;
-
-	/* mclk <= ciclk / 4 (27.4.2) */
-	if (mclk > lcdclk / 4) {
-		mclk = lcdclk / 4;
-		dev_warn(dev, "Limiting master clock to %lu\n", mclk);
-	}
-
-	/* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
-	div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
-
-	/* If we're not supplying MCLK, leave it at 0 */
-	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
-		pcdev->mclk = lcdclk / (2 * (div + 1));
-
-	dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
-		lcdclk, mclk, div);
-
-	return div;
-}
-
-static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
-				     unsigned long pclk)
-{
-	/* We want a timeout > 1 pixel time, not ">=" */
-	u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
-
-	__raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
-}
-
-static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
-{
-	u32 cicr4 = 0;
-
-	/* disable all interrupts */
-	__raw_writel(0x3ff, pcdev->base + CICR0);
-
-	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
-		cicr4 |= CICR4_PCLK_EN;
-	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
-		cicr4 |= CICR4_MCLK_EN;
-	if (pcdev->platform_flags & PXA_CAMERA_PCP)
-		cicr4 |= CICR4_PCP;
-	if (pcdev->platform_flags & PXA_CAMERA_HSP)
-		cicr4 |= CICR4_HSP;
-	if (pcdev->platform_flags & PXA_CAMERA_VSP)
-		cicr4 |= CICR4_VSP;
-
-	__raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
-
-	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
-		/* Initialise the timeout under the assumption pclk = mclk */
-		recalculate_fifo_timeout(pcdev, pcdev->mclk);
-	else
-		/* "Safe default" - 13MHz */
-		recalculate_fifo_timeout(pcdev, 13000000);
-
-	clk_prepare_enable(pcdev->clk);
-}
-
-static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
-{
-	clk_disable_unprepare(pcdev->clk);
-}
-
-static void pxa_camera_eof(unsigned long arg)
-{
-	struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
-	unsigned long cifr;
-	struct pxa_buffer *buf;
-	struct videobuf_buffer *vb;
-
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-		"Camera interrupt status 0x%x\n",
-		__raw_readl(pcdev->base + CISR));
-
-	/* Reset the FIFOs */
-	cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
-	__raw_writel(cifr, pcdev->base + CIFR);
-
-	pcdev->active = list_first_entry(&pcdev->capture,
-					 struct pxa_buffer, vb.queue);
-	vb = &pcdev->active->vb;
-	buf = container_of(vb, struct pxa_buffer, vb);
-	pxa_videobuf_set_actdma(pcdev, buf);
-
-	pxa_dma_start_channels(pcdev);
-}
-
-static irqreturn_t pxa_camera_irq(int irq, void *data)
-{
-	struct pxa_camera_dev *pcdev = data;
-	unsigned long status, cicr0;
-
-	status = __raw_readl(pcdev->base + CISR);
-	dev_dbg(pcdev->soc_host.v4l2_dev.dev,
-		"Camera interrupt status 0x%lx\n", status);
-
-	if (!status)
-		return IRQ_NONE;
-
-	__raw_writel(status, pcdev->base + CISR);
-
-	if (status & CISR_EOF) {
-		cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
-		__raw_writel(cicr0, pcdev->base + CICR0);
-		tasklet_schedule(&pcdev->task_eof);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int pxa_camera_add_device(struct soc_camera_device *icd)
-{
-	dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
-		 icd->devnum);
-
-	return 0;
-}
-
-static void pxa_camera_remove_device(struct soc_camera_device *icd)
-{
-	dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
-		 icd->devnum);
-}
-
-/*
- * The following two functions absolutely depend on the fact, that
- * there can be only one camera on PXA quick capture interface
- * Called with .host_lock held
- */
-static int pxa_camera_clock_start(struct soc_camera_host *ici)
-{
-	struct pxa_camera_dev *pcdev = ici->priv;
-
-	pxa_camera_activate(pcdev);
-
-	return 0;
-}
-
-/* Called with .host_lock held */
-static void pxa_camera_clock_stop(struct soc_camera_host *ici)
-{
-	struct pxa_camera_dev *pcdev = ici->priv;
-
-	/* disable capture, disable interrupts */
-	__raw_writel(0x3ff, pcdev->base + CICR0);
-
-	/* Stop DMA engine */
-	pxa_dma_stop_channels(pcdev);
-	pxa_camera_deactivate(pcdev);
-}
-
-static int test_platform_param(struct pxa_camera_dev *pcdev,
-			       unsigned char buswidth, unsigned long *flags)
-{
-	/*
-	 * Platform specified synchronization and pixel clock polarities are
-	 * only a recommendation and are only used during probing. The PXA270
-	 * quick capture interface supports both.
-	 */
-	*flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
-		  V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
-		V4L2_MBUS_HSYNC_ACTIVE_HIGH |
-		V4L2_MBUS_HSYNC_ACTIVE_LOW |
-		V4L2_MBUS_VSYNC_ACTIVE_HIGH |
-		V4L2_MBUS_VSYNC_ACTIVE_LOW |
-		V4L2_MBUS_DATA_ACTIVE_HIGH |
-		V4L2_MBUS_PCLK_SAMPLE_RISING |
-		V4L2_MBUS_PCLK_SAMPLE_FALLING;
-
-	/* If requested data width is supported by the platform, use it */
-	if ((1 << (buswidth - 1)) & pcdev->width_flags)
-		return 0;
-
-	return -EINVAL;
-}
-
-static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
-				  unsigned long flags, __u32 pixfmt)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	unsigned long dw, bpp;
-	u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
-	int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
-
-	if (ret < 0)
-		y_skip_top = 0;
-
-	/*
-	 * Datawidth is now guaranteed to be equal to one of the three values.
-	 * We fix bit-per-pixel equal to data-width...
-	 */
-	switch (icd->current_fmt->host_fmt->bits_per_sample) {
-	case 10:
-		dw = 4;
-		bpp = 0x40;
-		break;
-	case 9:
-		dw = 3;
-		bpp = 0x20;
-		break;
-	default:
-		/*
-		 * Actually it can only be 8 now,
-		 * default is just to silence compiler warnings
-		 */
-	case 8:
-		dw = 2;
-		bpp = 0;
-	}
-
-	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
-		cicr4 |= CICR4_PCLK_EN;
-	if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
-		cicr4 |= CICR4_MCLK_EN;
-	if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
-		cicr4 |= CICR4_PCP;
-	if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
-		cicr4 |= CICR4_HSP;
-	if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
-		cicr4 |= CICR4_VSP;
-
-	cicr0 = __raw_readl(pcdev->base + CICR0);
-	if (cicr0 & CICR0_ENB)
-		__raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
-
-	cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw;
-
-	switch (pixfmt) {
-	case V4L2_PIX_FMT_YUV422P:
-		pcdev->channels = 3;
-		cicr1 |= CICR1_YCBCR_F;
-		/*
-		 * Normally, pxa bus wants as input UYVY format. We allow all
-		 * reorderings of the YUV422 format, as no processing is done,
-		 * and the YUV stream is just passed through without any
-		 * transformation. Note that UYVY is the only format that
-		 * should be used if pxa framebuffer Overlay2 is used.
-		 */
-	case V4L2_PIX_FMT_UYVY:
-	case V4L2_PIX_FMT_VYUY:
-	case V4L2_PIX_FMT_YUYV:
-	case V4L2_PIX_FMT_YVYU:
-		cicr1 |= CICR1_COLOR_SP_VAL(2);
-		break;
-	case V4L2_PIX_FMT_RGB555:
-		cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) |
-			CICR1_TBIT | CICR1_COLOR_SP_VAL(1);
-		break;
-	case V4L2_PIX_FMT_RGB565:
-		cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2);
-		break;
-	}
-
-	cicr2 = 0;
-	cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
-		CICR3_BFW_VAL(min((u32)255, y_skip_top));
-	cicr4 |= pcdev->mclk_divisor;
-
-	__raw_writel(cicr1, pcdev->base + CICR1);
-	__raw_writel(cicr2, pcdev->base + CICR2);
-	__raw_writel(cicr3, pcdev->base + CICR3);
-	__raw_writel(cicr4, pcdev->base + CICR4);
-
-	/* CIF interrupts are not used, only DMA */
-	cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
-		CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
-	cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
-	__raw_writel(cicr0, pcdev->base + CICR0);
-}
-
-static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-	u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
-	unsigned long bus_flags, common_flags;
-	int ret;
-	struct pxa_cam *cam = icd->host_priv;
-
-	ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample,
-				  &bus_flags);
-	if (ret < 0)
-		return ret;
-
-	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
-	if (!ret) {
-		common_flags = soc_mbus_config_compatible(&cfg,
-							  bus_flags);
-		if (!common_flags) {
-			dev_warn(icd->parent,
-				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
-				 cfg.flags, bus_flags);
-			return -EINVAL;
-		}
-	} else if (ret != -ENOIOCTLCMD) {
-		return ret;
-	} else {
-		common_flags = bus_flags;
-	}
-
-	pcdev->channels = 1;
-
-	/* Make choises, based on platform preferences */
-	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
-	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
-		if (pcdev->platform_flags & PXA_CAMERA_HSP)
-			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
-		else
-			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
-	}
-
-	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
-	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
-		if (pcdev->platform_flags & PXA_CAMERA_VSP)
-			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
-		else
-			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
-	}
-
-	if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
-	    (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
-		if (pcdev->platform_flags & PXA_CAMERA_PCP)
-			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
-		else
-			common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
-	}
-
-	cfg.flags = common_flags;
-	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
-	if (ret < 0 && ret != -ENOIOCTLCMD) {
-		dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
-			common_flags, ret);
-		return ret;
-	}
-
-	cam->flags = common_flags;
-
-	pxa_camera_setup_cicr(icd, common_flags, pixfmt);
-
-	return 0;
-}
-
-static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
-				    unsigned char buswidth)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-	unsigned long bus_flags, common_flags;
-	int ret = test_platform_param(pcdev, buswidth, &bus_flags);
-
-	if (ret < 0)
-		return ret;
-
-	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
-	if (!ret) {
-		common_flags = soc_mbus_config_compatible(&cfg,
-							  bus_flags);
-		if (!common_flags) {
-			dev_warn(icd->parent,
-				 "Flags incompatible: camera 0x%x, host 0x%lx\n",
-				 cfg.flags, bus_flags);
-			return -EINVAL;
-		}
-	} else if (ret == -ENOIOCTLCMD) {
-		ret = 0;
-	}
-
-	return ret;
-}
-
-static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
-	{
-		.fourcc			= V4L2_PIX_FMT_YUV422P,
-		.name			= "Planar YUV422 16 bit",
-		.bits_per_sample	= 8,
-		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
-	},
-};
-
-/* This will be corrected as we get more formats */
-static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
-	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
-		(fmt->bits_per_sample == 8 &&
-		 fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
-		(fmt->bits_per_sample > 8 &&
-		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
-				  struct soc_camera_format_xlate *xlate)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct device *dev = icd->parent;
-	int formats = 0, ret;
-	struct pxa_cam *cam;
-	struct v4l2_subdev_mbus_code_enum code = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-		.index = idx,
-	};
-	const struct soc_mbus_pixelfmt *fmt;
-
-	ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
-	if (ret < 0)
-		/* No more formats */
-		return 0;
-
-	fmt = soc_mbus_get_fmtdesc(code.code);
-	if (!fmt) {
-		dev_err(dev, "Invalid format code #%u: %d\n", idx, code.code);
-		return 0;
-	}
-
-	/* This also checks support for the requested bits-per-sample */
-	ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
-	if (ret < 0)
-		return 0;
-
-	if (!icd->host_priv) {
-		cam = kzalloc(sizeof(*cam), GFP_KERNEL);
-		if (!cam)
-			return -ENOMEM;
-
-		icd->host_priv = cam;
-	} else {
-		cam = icd->host_priv;
-	}
-
-	switch (code.code) {
-	case MEDIA_BUS_FMT_UYVY8_2X8:
-		formats++;
-		if (xlate) {
-			xlate->host_fmt	= &pxa_camera_formats[0];
-			xlate->code	= code.code;
-			xlate++;
-			dev_dbg(dev, "Providing format %s using code %d\n",
-				pxa_camera_formats[0].name, code.code);
-		}
-	case MEDIA_BUS_FMT_VYUY8_2X8:
-	case MEDIA_BUS_FMT_YUYV8_2X8:
-	case MEDIA_BUS_FMT_YVYU8_2X8:
-	case MEDIA_BUS_FMT_RGB565_2X8_LE:
-	case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
-		if (xlate)
-			dev_dbg(dev, "Providing format %s packed\n",
-				fmt->name);
-		break;
-	default:
-		if (!pxa_camera_packing_supported(fmt))
-			return 0;
-		if (xlate)
-			dev_dbg(dev,
-				"Providing format %s in pass-through mode\n",
-				fmt->name);
-	}
-
-	/* Generic pass-through */
-	formats++;
-	if (xlate) {
-		xlate->host_fmt	= fmt;
-		xlate->code	= code.code;
-		xlate++;
-	}
-
-	return formats;
-}
-
-static void pxa_camera_put_formats(struct soc_camera_device *icd)
-{
-	kfree(icd->host_priv);
-	icd->host_priv = NULL;
-}
-
-static int pxa_camera_check_frame(u32 width, u32 height)
-{
-	/* limit to pxa hardware capabilities */
-	return height < 32 || height > 2048 || width < 48 || width > 2048 ||
-		(width & 0x01);
-}
-
-static int pxa_camera_set_crop(struct soc_camera_device *icd,
-			       const struct v4l2_crop *a)
-{
-	const struct v4l2_rect *rect = &a->c;
-	struct device *dev = icd->parent;
-	struct soc_camera_host *ici = to_soc_camera_host(dev);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct soc_camera_sense sense = {
-		.master_clock = pcdev->mclk,
-		.pixel_clock_max = pcdev->ciclk / 4,
-	};
-	struct v4l2_subdev_format fmt = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	struct v4l2_mbus_framefmt *mf = &fmt.format;
-	struct pxa_cam *cam = icd->host_priv;
-	u32 fourcc = icd->current_fmt->host_fmt->fourcc;
-	int ret;
-
-	/* If PCLK is used to latch data from the sensor, check sense */
-	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
-		icd->sense = &sense;
-
-	ret = v4l2_subdev_call(sd, video, s_crop, a);
-
-	icd->sense = NULL;
-
-	if (ret < 0) {
-		dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n",
-			 rect->width, rect->height, rect->left, rect->top);
-		return ret;
-	}
-
-	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-	if (ret < 0)
-		return ret;
-
-	if (pxa_camera_check_frame(mf->width, mf->height)) {
-		/*
-		 * Camera cropping produced a frame beyond our capabilities.
-		 * FIXME: just extract a subframe, that we can process.
-		 */
-		v4l_bound_align_image(&mf->width, 48, 2048, 1,
-			&mf->height, 32, 2048, 0,
-			fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
-		ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &fmt);
-		if (ret < 0)
-			return ret;
-
-		if (pxa_camera_check_frame(mf->width, mf->height)) {
-			dev_warn(icd->parent,
-				 "Inconsistent state. Use S_FMT to repair\n");
-			return -EINVAL;
-		}
-	}
-
-	if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
-		if (sense.pixel_clock > sense.pixel_clock_max) {
-			dev_err(dev,
-				"pixel clock %lu set by the camera too high!",
-				sense.pixel_clock);
-			return -EIO;
-		}
-		recalculate_fifo_timeout(pcdev, sense.pixel_clock);
-	}
-
-	icd->user_width		= mf->width;
-	icd->user_height	= mf->height;
-
-	pxa_camera_setup_cicr(icd, cam->flags, fourcc);
-
-	return ret;
-}
-
-static int pxa_camera_set_fmt(struct soc_camera_device *icd,
-			      struct v4l2_format *f)
-{
-	struct device *dev = icd->parent;
-	struct soc_camera_host *ici = to_soc_camera_host(dev);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	const struct soc_camera_format_xlate *xlate = NULL;
-	struct soc_camera_sense sense = {
-		.master_clock = pcdev->mclk,
-		.pixel_clock_max = pcdev->ciclk / 4,
-	};
-	struct v4l2_pix_format *pix = &f->fmt.pix;
-	struct v4l2_subdev_format format = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	struct v4l2_mbus_framefmt *mf = &format.format;
-	int ret;
-
-	xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
-	if (!xlate) {
-		dev_warn(dev, "Format %x not found\n", pix->pixelformat);
-		return -EINVAL;
-	}
-
-	/* If PCLK is used to latch data from the sensor, check sense */
-	if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
-		/* The caller holds a mutex. */
-		icd->sense = &sense;
-
-	mf->width	= pix->width;
-	mf->height	= pix->height;
-	mf->field	= pix->field;
-	mf->colorspace	= pix->colorspace;
-	mf->code	= xlate->code;
-
-	ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &format);
-
-	if (mf->code != xlate->code)
-		return -EINVAL;
-
-	icd->sense = NULL;
-
-	if (ret < 0) {
-		dev_warn(dev, "Failed to configure for format %x\n",
-			 pix->pixelformat);
-	} else if (pxa_camera_check_frame(mf->width, mf->height)) {
-		dev_warn(dev,
-			 "Camera driver produced an unsupported frame %dx%d\n",
-			 mf->width, mf->height);
-		ret = -EINVAL;
-	} else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
-		if (sense.pixel_clock > sense.pixel_clock_max) {
-			dev_err(dev,
-				"pixel clock %lu set by the camera too high!",
-				sense.pixel_clock);
-			return -EIO;
-		}
-		recalculate_fifo_timeout(pcdev, sense.pixel_clock);
-	}
-
-	if (ret < 0)
-		return ret;
-
-	pix->width		= mf->width;
-	pix->height		= mf->height;
-	pix->field		= mf->field;
-	pix->colorspace		= mf->colorspace;
-	icd->current_fmt	= xlate;
-
-	return ret;
-}
-
-static int pxa_camera_try_fmt(struct soc_camera_device *icd,
-			      struct v4l2_format *f)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	const struct soc_camera_format_xlate *xlate;
-	struct v4l2_pix_format *pix = &f->fmt.pix;
-	struct v4l2_subdev_pad_config pad_cfg;
-	struct v4l2_subdev_format format = {
-		.which = V4L2_SUBDEV_FORMAT_TRY,
-	};
-	struct v4l2_mbus_framefmt *mf = &format.format;
-	__u32 pixfmt = pix->pixelformat;
-	int ret;
-
-	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-	if (!xlate) {
-		dev_warn(icd->parent, "Format %x not found\n", pixfmt);
-		return -EINVAL;
-	}
-
-	/*
-	 * Limit to pxa hardware capabilities.  YUV422P planar format requires
-	 * images size to be a multiple of 16 bytes.  If not, zeros will be
-	 * inserted between Y and U planes, and U and V planes, which violates
-	 * the YUV422P standard.
-	 */
-	v4l_bound_align_image(&pix->width, 48, 2048, 1,
-			      &pix->height, 32, 2048, 0,
-			      pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
-
-	/* limit to sensor capabilities */
-	mf->width	= pix->width;
-	mf->height	= pix->height;
-	/* Only progressive video supported so far */
-	mf->field	= V4L2_FIELD_NONE;
-	mf->colorspace	= pix->colorspace;
-	mf->code	= xlate->code;
-
-	ret = v4l2_subdev_call(sd, pad, set_fmt, &pad_cfg, &format);
-	if (ret < 0)
-		return ret;
-
-	pix->width	= mf->width;
-	pix->height	= mf->height;
-	pix->colorspace	= mf->colorspace;
-
-	switch (mf->field) {
-	case V4L2_FIELD_ANY:
-	case V4L2_FIELD_NONE:
-		pix->field	= V4L2_FIELD_NONE;
-		break;
-	default:
-		/* TODO: support interlaced at least in pass-through mode */
-		dev_err(icd->parent, "Field type %d unsupported.\n",
-			mf->field);
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-static int pxa_camera_reqbufs(struct soc_camera_device *icd,
-			      struct v4l2_requestbuffers *p)
-{
-	int i;
-
-	/*
-	 * This is for locking debugging only. I removed spinlocks and now I
-	 * check whether .prepare is ever called on a linked buffer, or whether
-	 * a dma IRQ can occur for an in-work or unlinked buffer. Until now
-	 * it hadn't triggered
-	 */
-	for (i = 0; i < p->count; i++) {
-		struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
-						      struct pxa_buffer, vb);
-		buf->inwork = 0;
-		INIT_LIST_HEAD(&buf->vb.queue);
-	}
-
-	return 0;
-}
-
-static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
-{
-	struct soc_camera_device *icd = file->private_data;
-	struct pxa_buffer *buf;
-
-	buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
-			 vb.stream);
-
-	poll_wait(file, &buf->vb.done, pt);
-
-	if (buf->vb.state == VIDEOBUF_DONE ||
-	    buf->vb.state == VIDEOBUF_ERROR)
-		return POLLIN|POLLRDNORM;
-
-	return 0;
-}
-
-static int pxa_camera_querycap(struct soc_camera_host *ici,
-			       struct v4l2_capability *cap)
-{
-	/* cap->name is set by the firendly caller:-> */
-	strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
-	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
-	return 0;
-}
-
-static int pxa_camera_suspend(struct device *dev)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(dev);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	int i = 0, ret = 0;
-
-	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
-	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
-	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
-	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
-	pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
-
-	if (pcdev->soc_host.icd) {
-		struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd);
-		ret = v4l2_subdev_call(sd, core, s_power, 0);
-		if (ret == -ENOIOCTLCMD)
-			ret = 0;
-	}
-
-	return ret;
-}
-
-static int pxa_camera_resume(struct device *dev)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(dev);
-	struct pxa_camera_dev *pcdev = ici->priv;
-	int i = 0, ret = 0;
-
-	__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
-	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
-	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
-	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
-	__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
-
-	if (pcdev->soc_host.icd) {
-		struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd);
-		ret = v4l2_subdev_call(sd, core, s_power, 1);
-		if (ret == -ENOIOCTLCMD)
-			ret = 0;
-	}
-
-	/* Restart frame capture if active buffer exists */
-	if (!ret && pcdev->active)
-		pxa_camera_start_capture(pcdev);
-
-	return ret;
-}
-
-static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
-	.owner		= THIS_MODULE,
-	.add		= pxa_camera_add_device,
-	.remove		= pxa_camera_remove_device,
-	.clock_start	= pxa_camera_clock_start,
-	.clock_stop	= pxa_camera_clock_stop,
-	.set_crop	= pxa_camera_set_crop,
-	.get_formats	= pxa_camera_get_formats,
-	.put_formats	= pxa_camera_put_formats,
-	.set_fmt	= pxa_camera_set_fmt,
-	.try_fmt	= pxa_camera_try_fmt,
-	.init_videobuf	= pxa_camera_init_videobuf,
-	.reqbufs	= pxa_camera_reqbufs,
-	.poll		= pxa_camera_poll,
-	.querycap	= pxa_camera_querycap,
-	.set_bus_param	= pxa_camera_set_bus_param,
-};
-
-static int pxa_camera_pdata_from_dt(struct device *dev,
-				    struct pxa_camera_dev *pcdev)
-{
-	u32 mclk_rate;
-	struct device_node *np = dev->of_node;
-	struct v4l2_of_endpoint ep;
-	int err = of_property_read_u32(np, "clock-frequency",
-				       &mclk_rate);
-	if (!err) {
-		pcdev->platform_flags |= PXA_CAMERA_MCLK_EN;
-		pcdev->mclk = mclk_rate;
-	}
-
-	np = of_graph_get_next_endpoint(np, NULL);
-	if (!np) {
-		dev_err(dev, "could not find endpoint\n");
-		return -EINVAL;
-	}
-
-	err = v4l2_of_parse_endpoint(np, &ep);
-	if (err) {
-		dev_err(dev, "could not parse endpoint\n");
-		goto out;
-	}
-
-	switch (ep.bus.parallel.bus_width) {
-	case 4:
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_4;
-		break;
-	case 5:
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_5;
-		break;
-	case 8:
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_8;
-		break;
-	case 9:
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_9;
-		break;
-	case 10:
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
-		break;
-	default:
-		break;
-	}
-
-	if (ep.bus.parallel.flags & V4L2_MBUS_MASTER)
-		pcdev->platform_flags |= PXA_CAMERA_MASTER;
-	if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
-		pcdev->platform_flags |= PXA_CAMERA_HSP;
-	if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
-		pcdev->platform_flags |= PXA_CAMERA_VSP;
-	if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
-		pcdev->platform_flags |= PXA_CAMERA_PCLK_EN | PXA_CAMERA_PCP;
-	if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
-		pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
-
-out:
-	of_node_put(np);
-
-	return err;
-}
-
-static int pxa_camera_probe(struct platform_device *pdev)
-{
-	struct pxa_camera_dev *pcdev;
-	struct resource *res;
-	void __iomem *base;
-	struct dma_slave_config config = {
-		.src_addr_width = 0,
-		.src_maxburst = 8,
-		.direction = DMA_DEV_TO_MEM,
-	};
-	dma_cap_mask_t mask;
-	struct pxad_param params;
-	int irq;
-	int err = 0, i;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	irq = platform_get_irq(pdev, 0);
-	if (!res || irq < 0)
-		return -ENODEV;
-
-	pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
-	if (!pcdev) {
-		dev_err(&pdev->dev, "Could not allocate pcdev\n");
-		return -ENOMEM;
-	}
-
-	pcdev->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pcdev->clk))
-		return PTR_ERR(pcdev->clk);
-
-	pcdev->res = res;
-
-	pcdev->pdata = pdev->dev.platform_data;
-	if (&pdev->dev.of_node && !pcdev->pdata) {
-		err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev);
-	} else {
-		pcdev->platform_flags = pcdev->pdata->flags;
-		pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
-	}
-	if (err < 0)
-		return err;
-
-	if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
-			PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
-		/*
-		 * Platform hasn't set available data widths. This is bad.
-		 * Warn and use a default.
-		 */
-		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
-			 "data widths, using default 10 bit\n");
-		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
-	}
-	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
-		pcdev->width_flags = 1 << 7;
-	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
-		pcdev->width_flags |= 1 << 8;
-	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
-		pcdev->width_flags |= 1 << 9;
-	if (!pcdev->mclk) {
-		dev_warn(&pdev->dev,
-			 "mclk == 0! Please, fix your platform data. "
-			 "Using default 20MHz\n");
-		pcdev->mclk = 20000000;
-	}
-
-	pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev);
-
-	INIT_LIST_HEAD(&pcdev->capture);
-	spin_lock_init(&pcdev->lock);
-
-	/*
-	 * Request the regions.
-	 */
-	base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
-
-	pcdev->irq = irq;
-	pcdev->base = base;
-
-	/* request dma */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	dma_cap_set(DMA_PRIVATE, mask);
-
-	params.prio = 0;
-	params.drcmr = 68;
-	pcdev->dma_chans[0] =
-		dma_request_slave_channel_compat(mask, pxad_filter_fn,
-						 &params, &pdev->dev, "CI_Y");
-	if (!pcdev->dma_chans[0]) {
-		dev_err(&pdev->dev, "Can't request DMA for Y\n");
-		return -ENODEV;
-	}
-
-	params.drcmr = 69;
-	pcdev->dma_chans[1] =
-		dma_request_slave_channel_compat(mask, pxad_filter_fn,
-						 &params, &pdev->dev, "CI_U");
-	if (!pcdev->dma_chans[1]) {
-		dev_err(&pdev->dev, "Can't request DMA for Y\n");
-		goto exit_free_dma_y;
-	}
-
-	params.drcmr = 70;
-	pcdev->dma_chans[2] =
-		dma_request_slave_channel_compat(mask, pxad_filter_fn,
-						 &params, &pdev->dev, "CI_V");
-	if (!pcdev->dma_chans[2]) {
-		dev_err(&pdev->dev, "Can't request DMA for V\n");
-		goto exit_free_dma_u;
-	}
-
-	for (i = 0; i < 3; i++) {
-		config.src_addr = pcdev->res->start + CIBR0 + i * 8;
-		err = dmaengine_slave_config(pcdev->dma_chans[i], &config);
-		if (err < 0) {
-			dev_err(&pdev->dev, "dma slave config failed: %d\n",
-				err);
-			goto exit_free_dma;
-		}
-	}
-
-	/* request irq */
-	err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
-			       PXA_CAM_DRV_NAME, pcdev);
-	if (err) {
-		dev_err(&pdev->dev, "Camera interrupt register failed\n");
-		goto exit_free_dma;
-	}
-
-	pcdev->soc_host.drv_name	= PXA_CAM_DRV_NAME;
-	pcdev->soc_host.ops		= &pxa_soc_camera_host_ops;
-	pcdev->soc_host.priv		= pcdev;
-	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
-	pcdev->soc_host.nr		= pdev->id;
-	tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
-
-	err = soc_camera_host_register(&pcdev->soc_host);
-	if (err)
-		goto exit_free_dma;
-
-	return 0;
-
-exit_free_dma:
-	dma_release_channel(pcdev->dma_chans[2]);
-exit_free_dma_u:
-	dma_release_channel(pcdev->dma_chans[1]);
-exit_free_dma_y:
-	dma_release_channel(pcdev->dma_chans[0]);
-	return err;
-}
-
-static int pxa_camera_remove(struct platform_device *pdev)
-{
-	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-	struct pxa_camera_dev *pcdev = container_of(soc_host,
-					struct pxa_camera_dev, soc_host);
-
-	dma_release_channel(pcdev->dma_chans[0]);
-	dma_release_channel(pcdev->dma_chans[1]);
-	dma_release_channel(pcdev->dma_chans[2]);
-
-	soc_camera_host_unregister(soc_host);
-
-	dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
-
-	return 0;
-}
-
-static const struct dev_pm_ops pxa_camera_pm = {
-	.suspend	= pxa_camera_suspend,
-	.resume		= pxa_camera_resume,
-};
-
-static const struct of_device_id pxa_camera_of_match[] = {
-	{ .compatible = "marvell,pxa270-qci", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, pxa_camera_of_match);
-
-static struct platform_driver pxa_camera_driver = {
-	.driver		= {
-		.name	= PXA_CAM_DRV_NAME,
-		.pm	= &pxa_camera_pm,
-		.of_match_table = of_match_ptr(pxa_camera_of_match),
-	},
-	.probe		= pxa_camera_probe,
-	.remove		= pxa_camera_remove,
-};
-
-module_platform_driver(pxa_camera_driver);
-
-MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(PXA_CAM_VERSION);
-MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
deleted file mode 100644
index 9c13752..0000000
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ /dev/null
@@ -1,1970 +0,0 @@
-/*
- * SoC-camera host driver for Renesas R-Car VIN unit
- *
- * Copyright (C) 2011-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
- *
- * Based on V4L2 Driver for SuperH Mobile CEU interface "sh_mobile_ceu_camera.c"
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
-#include <media/v4l2-subdev.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "soc_scale_crop.h"
-
-#define DRV_NAME "rcar_vin"
-
-/* Register offsets for R-Car VIN */
-#define VNMC_REG	0x00	/* Video n Main Control Register */
-#define VNMS_REG	0x04	/* Video n Module Status Register */
-#define VNFC_REG	0x08	/* Video n Frame Capture Register */
-#define VNSLPRC_REG	0x0C	/* Video n Start Line Pre-Clip Register */
-#define VNELPRC_REG	0x10	/* Video n End Line Pre-Clip Register */
-#define VNSPPRC_REG	0x14	/* Video n Start Pixel Pre-Clip Register */
-#define VNEPPRC_REG	0x18	/* Video n End Pixel Pre-Clip Register */
-#define VNSLPOC_REG	0x1C	/* Video n Start Line Post-Clip Register */
-#define VNELPOC_REG	0x20	/* Video n End Line Post-Clip Register */
-#define VNSPPOC_REG	0x24	/* Video n Start Pixel Post-Clip Register */
-#define VNEPPOC_REG	0x28	/* Video n End Pixel Post-Clip Register */
-#define VNIS_REG	0x2C	/* Video n Image Stride Register */
-#define VNMB_REG(m)	(0x30 + ((m) << 2)) /* Video n Memory Base m Register */
-#define VNIE_REG	0x40	/* Video n Interrupt Enable Register */
-#define VNINTS_REG	0x44	/* Video n Interrupt Status Register */
-#define VNSI_REG	0x48	/* Video n Scanline Interrupt Register */
-#define VNMTC_REG	0x4C	/* Video n Memory Transfer Control Register */
-#define VNYS_REG	0x50	/* Video n Y Scale Register */
-#define VNXS_REG	0x54	/* Video n X Scale Register */
-#define VNDMR_REG	0x58	/* Video n Data Mode Register */
-#define VNDMR2_REG	0x5C	/* Video n Data Mode Register 2 */
-#define VNUVAOF_REG	0x60	/* Video n UV Address Offset Register */
-#define VNC1A_REG	0x80	/* Video n Coefficient Set C1A Register */
-#define VNC1B_REG	0x84	/* Video n Coefficient Set C1B Register */
-#define VNC1C_REG	0x88	/* Video n Coefficient Set C1C Register */
-#define VNC2A_REG	0x90	/* Video n Coefficient Set C2A Register */
-#define VNC2B_REG	0x94	/* Video n Coefficient Set C2B Register */
-#define VNC2C_REG	0x98	/* Video n Coefficient Set C2C Register */
-#define VNC3A_REG	0xA0	/* Video n Coefficient Set C3A Register */
-#define VNC3B_REG	0xA4	/* Video n Coefficient Set C3B Register */
-#define VNC3C_REG	0xA8	/* Video n Coefficient Set C3C Register */
-#define VNC4A_REG	0xB0	/* Video n Coefficient Set C4A Register */
-#define VNC4B_REG	0xB4	/* Video n Coefficient Set C4B Register */
-#define VNC4C_REG	0xB8	/* Video n Coefficient Set C4C Register */
-#define VNC5A_REG	0xC0	/* Video n Coefficient Set C5A Register */
-#define VNC5B_REG	0xC4	/* Video n Coefficient Set C5B Register */
-#define VNC5C_REG	0xC8	/* Video n Coefficient Set C5C Register */
-#define VNC6A_REG	0xD0	/* Video n Coefficient Set C6A Register */
-#define VNC6B_REG	0xD4	/* Video n Coefficient Set C6B Register */
-#define VNC6C_REG	0xD8	/* Video n Coefficient Set C6C Register */
-#define VNC7A_REG	0xE0	/* Video n Coefficient Set C7A Register */
-#define VNC7B_REG	0xE4	/* Video n Coefficient Set C7B Register */
-#define VNC7C_REG	0xE8	/* Video n Coefficient Set C7C Register */
-#define VNC8A_REG	0xF0	/* Video n Coefficient Set C8A Register */
-#define VNC8B_REG	0xF4	/* Video n Coefficient Set C8B Register */
-#define VNC8C_REG	0xF8	/* Video n Coefficient Set C8C Register */
-
-/* Register bit fields for R-Car VIN */
-/* Video n Main Control Register bits */
-#define VNMC_FOC		(1 << 21)
-#define VNMC_YCAL		(1 << 19)
-#define VNMC_INF_YUV8_BT656	(0 << 16)
-#define VNMC_INF_YUV8_BT601	(1 << 16)
-#define VNMC_INF_YUV10_BT656	(2 << 16)
-#define VNMC_INF_YUV10_BT601	(3 << 16)
-#define VNMC_INF_YUV16		(5 << 16)
-#define VNMC_INF_RGB888		(6 << 16)
-#define VNMC_VUP		(1 << 10)
-#define VNMC_IM_ODD		(0 << 3)
-#define VNMC_IM_ODD_EVEN	(1 << 3)
-#define VNMC_IM_EVEN		(2 << 3)
-#define VNMC_IM_FULL		(3 << 3)
-#define VNMC_BPS		(1 << 1)
-#define VNMC_ME			(1 << 0)
-
-/* Video n Module Status Register bits */
-#define VNMS_FBS_MASK		(3 << 3)
-#define VNMS_FBS_SHIFT		3
-#define VNMS_AV			(1 << 1)
-#define VNMS_CA			(1 << 0)
-
-/* Video n Frame Capture Register bits */
-#define VNFC_C_FRAME		(1 << 1)
-#define VNFC_S_FRAME		(1 << 0)
-
-/* Video n Interrupt Enable Register bits */
-#define VNIE_FIE		(1 << 4)
-#define VNIE_EFE		(1 << 1)
-
-/* Video n Data Mode Register bits */
-#define VNDMR_EXRGB		(1 << 8)
-#define VNDMR_BPSM		(1 << 4)
-#define VNDMR_DTMD_YCSEP	(1 << 1)
-#define VNDMR_DTMD_ARGB		(1 << 0)
-
-/* Video n Data Mode Register 2 bits */
-#define VNDMR2_VPS		(1 << 30)
-#define VNDMR2_HPS		(1 << 29)
-#define VNDMR2_FTEV		(1 << 17)
-#define VNDMR2_VLV(n)		((n & 0xf) << 12)
-
-#define VIN_MAX_WIDTH		2048
-#define VIN_MAX_HEIGHT		2048
-
-#define TIMEOUT_MS		100
-
-#define RCAR_VIN_HSYNC_ACTIVE_LOW	(1 << 0)
-#define RCAR_VIN_VSYNC_ACTIVE_LOW	(1 << 1)
-#define RCAR_VIN_BT601			(1 << 2)
-#define RCAR_VIN_BT656			(1 << 3)
-
-enum chip_id {
-	RCAR_GEN3,
-	RCAR_GEN2,
-	RCAR_H1,
-	RCAR_M1,
-	RCAR_E1,
-};
-
-struct vin_coeff {
-	unsigned short xs_value;
-	u32 coeff_set[24];
-};
-
-static const struct vin_coeff vin_coeff_set[] = {
-	{ 0x0000, {
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000,
-		0x00000000,		0x00000000,		0x00000000 },
-	},
-	{ 0x1000, {
-		0x000fa400,		0x000fa400,		0x09625902,
-		0x000003f8,		0x00000403,		0x3de0d9f0,
-		0x001fffed,		0x00000804,		0x3cc1f9c3,
-		0x001003de,		0x00000c01,		0x3cb34d7f,
-		0x002003d2,		0x00000c00,		0x3d24a92d,
-		0x00200bca,		0x00000bff,		0x3df600d2,
-		0x002013cc,		0x000007ff,		0x3ed70c7e,
-		0x00100fde,		0x00000000,		0x3f87c036 },
-	},
-	{ 0x1200, {
-		0x002ffff1,		0x002ffff1,		0x02a0a9c8,
-		0x002003e7,		0x001ffffa,		0x000185bc,
-		0x002007dc,		0x000003ff,		0x3e52859c,
-		0x00200bd4,		0x00000002,		0x3d53996b,
-		0x00100fd0,		0x00000403,		0x3d04ad2d,
-		0x00000bd5,		0x00000403,		0x3d35ace7,
-		0x3ff003e4,		0x00000801,		0x3dc674a1,
-		0x3fffe800,		0x00000800,		0x3e76f461 },
-	},
-	{ 0x1400, {
-		0x00100be3,		0x00100be3,		0x04d1359a,
-		0x00000fdb,		0x002003ed,		0x0211fd93,
-		0x00000fd6,		0x002003f4,		0x0002d97b,
-		0x000007d6,		0x002ffffb,		0x3e93b956,
-		0x3ff003da,		0x001003ff,		0x3db49926,
-		0x3fffefe9,		0x00100001,		0x3d655cee,
-		0x3fffd400,		0x00000003,		0x3d65f4b6,
-		0x000fb421,		0x00000402,		0x3dc6547e },
-	},
-	{ 0x1600, {
-		0x00000bdd,		0x00000bdd,		0x06519578,
-		0x3ff007da,		0x00000be3,		0x03c24973,
-		0x3ff003d9,		0x00000be9,		0x01b30d5f,
-		0x3ffff7df,		0x001003f1,		0x0003c542,
-		0x000fdfec,		0x001003f7,		0x3ec4711d,
-		0x000fc400,		0x002ffffd,		0x3df504f1,
-		0x001fa81a,		0x002ffc00,		0x3d957cc2,
-		0x002f8c3c,		0x00100000,		0x3db5c891 },
-	},
-	{ 0x1800, {
-		0x3ff003dc,		0x3ff003dc,		0x0791e558,
-		0x000ff7dd,		0x3ff007de,		0x05328554,
-		0x000fe7e3,		0x3ff00be2,		0x03232546,
-		0x000fd7ee,		0x000007e9,		0x0143bd30,
-		0x001fb800,		0x000007ee,		0x00044511,
-		0x002fa015,		0x000007f4,		0x3ef4bcee,
-		0x002f8832,		0x001003f9,		0x3e4514c7,
-		0x001f7853,		0x001003fd,		0x3de54c9f },
-	},
-	{ 0x1a00, {
-		0x000fefe0,		0x000fefe0,		0x08721d3c,
-		0x001fdbe7,		0x000ffbde,		0x0652a139,
-		0x001fcbf0,		0x000003df,		0x0463292e,
-		0x002fb3ff,		0x3ff007e3,		0x0293a91d,
-		0x002f9c12,		0x3ff00be7,		0x01241905,
-		0x001f8c29,		0x000007ed,		0x3fe470eb,
-		0x000f7c46,		0x000007f2,		0x3f04b8ca,
-		0x3fef7865,		0x000007f6,		0x3e74e4a8 },
-	},
-	{ 0x1c00, {
-		0x001fd3e9,		0x001fd3e9,		0x08f23d26,
-		0x002fbff3,		0x001fe3e4,		0x0712ad23,
-		0x002fa800,		0x000ff3e0,		0x05631d1b,
-		0x001f9810,		0x000ffbe1,		0x03b3890d,
-		0x000f8c23,		0x000003e3,		0x0233e8fa,
-		0x3fef843b,		0x000003e7,		0x00f430e4,
-		0x3fbf8456,		0x3ff00bea,		0x00046cc8,
-		0x3f8f8c72,		0x3ff00bef,		0x3f3490ac },
-	},
-	{ 0x1e00, {
-		0x001fbbf4,		0x001fbbf4,		0x09425112,
-		0x001fa800,		0x002fc7ed,		0x0792b110,
-		0x000f980e,		0x001fdbe6,		0x0613110a,
-		0x3fff8c20,		0x001fe7e3,		0x04a368fd,
-		0x3fcf8c33,		0x000ff7e2,		0x0343b8ed,
-		0x3f9f8c4a,		0x000fffe3,		0x0203f8da,
-		0x3f5f9c61,		0x000003e6,		0x00e428c5,
-		0x3f1fb07b,		0x000003eb,		0x3fe440af },
-	},
-	{ 0x2000, {
-		0x000fa400,		0x000fa400,		0x09625902,
-		0x3fff980c,		0x001fb7f5,		0x0812b0ff,
-		0x3fdf901c,		0x001fc7ed,		0x06b2fcfa,
-		0x3faf902d,		0x001fd3e8,		0x055348f1,
-		0x3f7f983f,		0x001fe3e5,		0x04038ce3,
-		0x3f3fa454,		0x001fefe3,		0x02e3c8d1,
-		0x3f0fb86a,		0x001ff7e4,		0x01c3e8c0,
-		0x3ecfd880,		0x000fffe6,		0x00c404ac },
-	},
-	{ 0x2200, {
-		0x3fdf9c0b,		0x3fdf9c0b,		0x09725cf4,
-		0x3fbf9818,		0x3fffa400,		0x0842a8f1,
-		0x3f8f9827,		0x000fb3f7,		0x0702f0ec,
-		0x3f5fa037,		0x000fc3ef,		0x05d330e4,
-		0x3f2fac49,		0x001fcfea,		0x04a364d9,
-		0x3effc05c,		0x001fdbe7,		0x038394ca,
-		0x3ecfdc6f,		0x001fe7e6,		0x0273b0bb,
-		0x3ea00083,		0x001fefe6,		0x0183c0a9 },
-	},
-	{ 0x2400, {
-		0x3f9fa014,		0x3f9fa014,		0x098260e6,
-		0x3f7f9c23,		0x3fcf9c0a,		0x08629ce5,
-		0x3f4fa431,		0x3fefa400,		0x0742d8e1,
-		0x3f1fb440,		0x3fffb3f8,		0x062310d9,
-		0x3eefc850,		0x000fbbf2,		0x050340d0,
-		0x3ecfe062,		0x000fcbec,		0x041364c2,
-		0x3ea00073,		0x001fd3ea,		0x03037cb5,
-		0x3e902086,		0x001fdfe8,		0x022388a5 },
-	},
-	{ 0x2600, {
-		0x3f5fa81e,		0x3f5fa81e,		0x096258da,
-		0x3f3fac2b,		0x3f8fa412,		0x088290d8,
-		0x3f0fbc38,		0x3fafa408,		0x0772c8d5,
-		0x3eefcc47,		0x3fcfa800,		0x0672f4ce,
-		0x3ecfe456,		0x3fefaffa,		0x05531cc6,
-		0x3eb00066,		0x3fffbbf3,		0x047334bb,
-		0x3ea01c77,		0x000fc7ee,		0x039348ae,
-		0x3ea04486,		0x000fd3eb,		0x02b350a1 },
-	},
-	{ 0x2800, {
-		0x3f2fb426,		0x3f2fb426,		0x094250ce,
-		0x3f0fc032,		0x3f4fac1b,		0x086284cd,
-		0x3eefd040,		0x3f7fa811,		0x0782acc9,
-		0x3ecfe84c,		0x3f9fa807,		0x06a2d8c4,
-		0x3eb0005b,		0x3fbfac00,		0x05b2f4bc,
-		0x3eb0186a,		0x3fdfb3fa,		0x04c308b4,
-		0x3eb04077,		0x3fefbbf4,		0x03f31ca8,
-		0x3ec06884,		0x000fbff2,		0x03031c9e },
-	},
-	{ 0x2a00, {
-		0x3f0fc42d,		0x3f0fc42d,		0x090240c4,
-		0x3eefd439,		0x3f2fb822,		0x08526cc2,
-		0x3edfe845,		0x3f4fb018,		0x078294bf,
-		0x3ec00051,		0x3f6fac0f,		0x06b2b4bb,
-		0x3ec0185f,		0x3f8fac07,		0x05e2ccb4,
-		0x3ec0386b,		0x3fafac00,		0x0502e8ac,
-		0x3ed05c77,		0x3fcfb3fb,		0x0432f0a3,
-		0x3ef08482,		0x3fdfbbf6,		0x0372f898 },
-	},
-	{ 0x2c00, {
-		0x3eefdc31,		0x3eefdc31,		0x08e238b8,
-		0x3edfec3d,		0x3f0fc828,		0x082258b9,
-		0x3ed00049,		0x3f1fc01e,		0x077278b6,
-		0x3ed01455,		0x3f3fb815,		0x06c294b2,
-		0x3ed03460,		0x3f5fb40d,		0x0602acac,
-		0x3ef0506c,		0x3f7fb006,		0x0542c0a4,
-		0x3f107476,		0x3f9fb400,		0x0472c89d,
-		0x3f309c80,		0x3fbfb7fc,		0x03b2cc94 },
-	},
-	{ 0x2e00, {
-		0x3eefec37,		0x3eefec37,		0x088220b0,
-		0x3ee00041,		0x3effdc2d,		0x07f244ae,
-		0x3ee0144c,		0x3f0fd023,		0x07625cad,
-		0x3ef02c57,		0x3f1fc81a,		0x06c274a9,
-		0x3f004861,		0x3f3fbc13,		0x060288a6,
-		0x3f20686b,		0x3f5fb80c,		0x05529c9e,
-		0x3f408c74,		0x3f6fb805,		0x04b2ac96,
-		0x3f80ac7e,		0x3f8fb800,		0x0402ac8e },
-	},
-	{ 0x3000, {
-		0x3ef0003a,		0x3ef0003a,		0x084210a6,
-		0x3ef01045,		0x3effec32,		0x07b228a7,
-		0x3f00284e,		0x3f0fdc29,		0x073244a4,
-		0x3f104058,		0x3f0fd420,		0x06a258a2,
-		0x3f305c62,		0x3f2fc818,		0x0612689d,
-		0x3f508069,		0x3f3fc011,		0x05728496,
-		0x3f80a072,		0x3f4fc00a,		0x04d28c90,
-		0x3fc0c07b,		0x3f6fbc04,		0x04429088 },
-	},
-	{ 0x3200, {
-		0x3f00103e,		0x3f00103e,		0x07f1fc9e,
-		0x3f102447,		0x3f000035,		0x0782149d,
-		0x3f203c4f,		0x3f0ff02c,		0x07122c9c,
-		0x3f405458,		0x3f0fe424,		0x06924099,
-		0x3f607061,		0x3f1fd41d,		0x06024c97,
-		0x3f909068,		0x3f2fcc16,		0x05726490,
-		0x3fc0b070,		0x3f3fc80f,		0x04f26c8a,
-		0x0000d077,		0x3f4fc409,		0x04627484 },
-	},
-	{ 0x3400, {
-		0x3f202040,		0x3f202040,		0x07a1e898,
-		0x3f303449,		0x3f100c38,		0x0741fc98,
-		0x3f504c50,		0x3f10002f,		0x06e21495,
-		0x3f706459,		0x3f1ff028,		0x06722492,
-		0x3fa08060,		0x3f1fe421,		0x05f2348f,
-		0x3fd09c67,		0x3f1fdc19,		0x05824c89,
-		0x0000bc6e,		0x3f2fd014,		0x04f25086,
-		0x0040dc74,		0x3f3fcc0d,		0x04825c7f },
-	},
-	{ 0x3600, {
-		0x3f403042,		0x3f403042,		0x0761d890,
-		0x3f504848,		0x3f301c3b,		0x0701f090,
-		0x3f805c50,		0x3f200c33,		0x06a2008f,
-		0x3fa07458,		0x3f10002b,		0x06520c8d,
-		0x3fd0905e,		0x3f1ff424,		0x05e22089,
-		0x0000ac65,		0x3f1fe81d,		0x05823483,
-		0x0030cc6a,		0x3f2fdc18,		0x04f23c81,
-		0x0080e871,		0x3f2fd412,		0x0482407c },
-	},
-	{ 0x3800, {
-		0x3f604043,		0x3f604043,		0x0721c88a,
-		0x3f80544a,		0x3f502c3c,		0x06d1d88a,
-		0x3fb06851,		0x3f301c35,		0x0681e889,
-		0x3fd08456,		0x3f30082f,		0x0611fc88,
-		0x00009c5d,		0x3f200027,		0x05d20884,
-		0x0030b863,		0x3f2ff421,		0x05621880,
-		0x0070d468,		0x3f2fe81b,		0x0502247c,
-		0x00c0ec6f,		0x3f2fe015,		0x04a22877 },
-	},
-	{ 0x3a00, {
-		0x3f904c44,		0x3f904c44,		0x06e1b884,
-		0x3fb0604a,		0x3f70383e,		0x0691c885,
-		0x3fe07451,		0x3f502c36,		0x0661d483,
-		0x00009055,		0x3f401831,		0x0601ec81,
-		0x0030a85b,		0x3f300c2a,		0x05b1f480,
-		0x0070c061,		0x3f300024,		0x0562047a,
-		0x00b0d867,		0x3f3ff41e,		0x05020c77,
-		0x00f0f46b,		0x3f2fec19,		0x04a21474 },
-	},
-	{ 0x3c00, {
-		0x3fb05c43,		0x3fb05c43,		0x06c1b07e,
-		0x3fe06c4b,		0x3f902c3f,		0x0681c081,
-		0x0000844f,		0x3f703838,		0x0631cc7d,
-		0x00309855,		0x3f602433,		0x05d1d47e,
-		0x0060b459,		0x3f50142e,		0x0581e47b,
-		0x00a0c85f,		0x3f400828,		0x0531f078,
-		0x00e0e064,		0x3f300021,		0x0501fc73,
-		0x00b0fc6a,		0x3f3ff41d,		0x04a20873 },
-	},
-	{ 0x3e00, {
-		0x3fe06444,		0x3fe06444,		0x0681a07a,
-		0x00007849,		0x3fc0503f,		0x0641b07a,
-		0x0020904d,		0x3fa0403a,		0x05f1c07a,
-		0x0060a453,		0x3f803034,		0x05c1c878,
-		0x0090b858,		0x3f70202f,		0x0571d477,
-		0x00d0d05d,		0x3f501829,		0x0531e073,
-		0x0110e462,		0x3f500825,		0x04e1e471,
-		0x01510065,		0x3f40001f,		0x04a1f06d },
-	},
-	{ 0x4000, {
-		0x00007044,		0x00007044,		0x06519476,
-		0x00208448,		0x3fe05c3f,		0x0621a476,
-		0x0050984d,		0x3fc04c3a,		0x05e1b075,
-		0x0080ac52,		0x3fa03c35,		0x05a1b875,
-		0x00c0c056,		0x3f803030,		0x0561c473,
-		0x0100d45b,		0x3f70202b,		0x0521d46f,
-		0x0140e860,		0x3f601427,		0x04d1d46e,
-		0x01810064,		0x3f500822,		0x0491dc6b },
-	},
-	{ 0x5000, {
-		0x0110a442,		0x0110a442,		0x0551545e,
-		0x0140b045,		0x00e0983f,		0x0531585f,
-		0x0160c047,		0x00c08c3c,		0x0511645e,
-		0x0190cc4a,		0x00908039,		0x04f1685f,
-		0x01c0dc4c,		0x00707436,		0x04d1705e,
-		0x0200e850,		0x00506833,		0x04b1785b,
-		0x0230f453,		0x00305c30,		0x0491805a,
-		0x02710056,		0x0010542d,		0x04718059 },
-	},
-	{ 0x6000, {
-		0x01c0bc40,		0x01c0bc40,		0x04c13052,
-		0x01e0c841,		0x01a0b43d,		0x04c13851,
-		0x0210cc44,		0x0180a83c,		0x04a13453,
-		0x0230d845,		0x0160a03a,		0x04913c52,
-		0x0260e047,		0x01409838,		0x04714052,
-		0x0280ec49,		0x01208c37,		0x04514c50,
-		0x02b0f44b,		0x01008435,		0x04414c50,
-		0x02d1004c,		0x00e07c33,		0x0431544f },
-	},
-	{ 0x7000, {
-		0x0230c83e,		0x0230c83e,		0x04711c4c,
-		0x0250d03f,		0x0210c43c,		0x0471204b,
-		0x0270d840,		0x0200b83c,		0x0451244b,
-		0x0290dc42,		0x01e0b43a,		0x0441244c,
-		0x02b0e443,		0x01c0b038,		0x0441284b,
-		0x02d0ec44,		0x01b0a438,		0x0421304a,
-		0x02f0f445,		0x0190a036,		0x04213449,
-		0x0310f847,		0x01709c34,		0x04213848 },
-	},
-	{ 0x8000, {
-		0x0280d03d,		0x0280d03d,		0x04310c48,
-		0x02a0d43e,		0x0270c83c,		0x04311047,
-		0x02b0dc3e,		0x0250c83a,		0x04311447,
-		0x02d0e040,		0x0240c03a,		0x04211446,
-		0x02e0e840,		0x0220bc39,		0x04111847,
-		0x0300e842,		0x0210b438,		0x04012445,
-		0x0310f043,		0x0200b037,		0x04012045,
-		0x0330f444,		0x01e0ac36,		0x03f12445 },
-	},
-	{ 0xefff, {
-		0x0340dc3a,		0x0340dc3a,		0x03b0ec40,
-		0x0340e03a,		0x0330e039,		0x03c0f03e,
-		0x0350e03b,		0x0330dc39,		0x03c0ec3e,
-		0x0350e43a,		0x0320dc38,		0x03c0f43e,
-		0x0360e43b,		0x0320d839,		0x03b0f03e,
-		0x0360e83b,		0x0310d838,		0x03c0fc3b,
-		0x0370e83b,		0x0310d439,		0x03a0f83d,
-		0x0370e83c,		0x0300d438,		0x03b0fc3c },
-	}
-};
-
-enum rcar_vin_state {
-	STOPPED = 0,
-	RUNNING,
-	STOPPING,
-};
-
-struct rcar_vin_priv {
-	void __iomem			*base;
-	spinlock_t			lock;
-	int				sequence;
-	/* State of the VIN module in capturing mode */
-	enum rcar_vin_state		state;
-	struct soc_camera_host		ici;
-	struct list_head		capture;
-#define MAX_BUFFER_NUM			3
-	struct vb2_v4l2_buffer		*queue_buf[MAX_BUFFER_NUM];
-	enum v4l2_field			field;
-	unsigned int			pdata_flags;
-	unsigned int			vb_count;
-	unsigned int			nr_hw_slots;
-	bool				request_to_stop;
-	struct completion		capture_stop;
-	enum chip_id			chip;
-};
-
-#define is_continuous_transfer(priv)	(priv->vb_count > MAX_BUFFER_NUM)
-
-struct rcar_vin_buffer {
-	struct vb2_v4l2_buffer vb;
-	struct list_head		list;
-};
-
-#define to_buf_list(vb2_buffer)	(&container_of(vb2_buffer, \
-						       struct rcar_vin_buffer, \
-						       vb)->list)
-
-struct rcar_vin_cam {
-	/* VIN offsets within the camera output, before the VIN scaler */
-	unsigned int			vin_left;
-	unsigned int			vin_top;
-	/* Client output, as seen by the VIN */
-	unsigned int			width;
-	unsigned int			height;
-	/* User window from S_FMT */
-	unsigned int out_width;
-	unsigned int out_height;
-	/*
-	 * User window from S_CROP / G_CROP, produced by client cropping and
-	 * scaling, VIN scaling and VIN cropping, mapped back onto the client
-	 * input window
-	 */
-	struct v4l2_rect		subrect;
-	/* Camera cropping rectangle */
-	struct v4l2_rect		rect;
-	const struct soc_mbus_pixelfmt	*extra_fmt;
-};
-
-/*
- * .queue_setup() is called to check whether the driver can accept the requested
- * number of buffers and to fill in plane sizes for the current frame format if
- * required
- */
-static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
-				   unsigned int *count,
-				   unsigned int *num_planes,
-				   unsigned int sizes[], struct device *alloc_devs[])
-{
-	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-
-	if (!vq->num_buffers)
-		priv->sequence = 0;
-
-	if (!*count)
-		*count = 2;
-	priv->vb_count = *count;
-
-	/* Number of hardware slots */
-	if (is_continuous_transfer(priv))
-		priv->nr_hw_slots = MAX_BUFFER_NUM;
-	else
-		priv->nr_hw_slots = 1;
-
-	if (*num_planes)
-		return sizes[0] < icd->sizeimage ? -EINVAL : 0;
-
-	sizes[0] = icd->sizeimage;
-	*num_planes = 1;
-
-	dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
-
-	return 0;
-}
-
-static int rcar_vin_setup(struct rcar_vin_priv *priv)
-{
-	struct soc_camera_device *icd = priv->ici.icd;
-	struct rcar_vin_cam *cam = icd->host_priv;
-	u32 vnmc, dmr, interrupts;
-	bool progressive = false, output_is_yuv = false, input_is_yuv = false;
-
-	switch (priv->field) {
-	case V4L2_FIELD_TOP:
-		vnmc = VNMC_IM_ODD;
-		break;
-	case V4L2_FIELD_BOTTOM:
-		vnmc = VNMC_IM_EVEN;
-		break;
-	case V4L2_FIELD_INTERLACED:
-	case V4L2_FIELD_INTERLACED_TB:
-		vnmc = VNMC_IM_FULL;
-		break;
-	case V4L2_FIELD_INTERLACED_BT:
-		vnmc = VNMC_IM_FULL | VNMC_FOC;
-		break;
-	case V4L2_FIELD_NONE:
-		if (is_continuous_transfer(priv)) {
-			vnmc = VNMC_IM_ODD_EVEN;
-			progressive = true;
-		} else {
-			vnmc = VNMC_IM_ODD;
-		}
-		break;
-	default:
-		vnmc = VNMC_IM_ODD;
-		break;
-	}
-
-	/* input interface */
-	switch (icd->current_fmt->code) {
-	case MEDIA_BUS_FMT_YUYV8_1X16:
-		/* BT.601/BT.1358 16bit YCbCr422 */
-		vnmc |= VNMC_INF_YUV16;
-		input_is_yuv = true;
-		break;
-	case MEDIA_BUS_FMT_YUYV8_2X8:
-		/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
-		vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
-			VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
-		input_is_yuv = true;
-		break;
-	case MEDIA_BUS_FMT_RGB888_1X24:
-		vnmc |= VNMC_INF_RGB888;
-		break;
-	case MEDIA_BUS_FMT_YUYV10_2X10:
-		/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
-		vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
-			VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
-		input_is_yuv = true;
-		break;
-	default:
-		break;
-	}
-
-	/* output format */
-	switch (icd->current_fmt->host_fmt->fourcc) {
-	case V4L2_PIX_FMT_NV16:
-		iowrite32(ALIGN(cam->width * cam->height, 0x80),
-			  priv->base + VNUVAOF_REG);
-		dmr = VNDMR_DTMD_YCSEP;
-		output_is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_YUYV:
-		dmr = VNDMR_BPSM;
-		output_is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_UYVY:
-		dmr = 0;
-		output_is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_RGB555X:
-		dmr = VNDMR_DTMD_ARGB;
-		break;
-	case V4L2_PIX_FMT_RGB565:
-		dmr = 0;
-		break;
-	case V4L2_PIX_FMT_RGB32:
-		if (priv->chip != RCAR_GEN2 && priv->chip != RCAR_H1 &&
-		    priv->chip != RCAR_E1)
-			goto e_format;
-
-		dmr = VNDMR_EXRGB;
-		break;
-	case V4L2_PIX_FMT_ARGB32:
-		if (priv->chip != RCAR_GEN3)
-			goto e_format;
-
-		dmr = VNDMR_EXRGB | VNDMR_DTMD_ARGB;
-		break;
-	default:
-		goto e_format;
-	}
-
-	/* Always update on field change */
-	vnmc |= VNMC_VUP;
-
-	/* If input and output use the same colorspace, use bypass mode */
-	if (input_is_yuv == output_is_yuv)
-		vnmc |= VNMC_BPS;
-
-	/* progressive or interlaced mode */
-	interrupts = progressive ? VNIE_FIE : VNIE_EFE;
-
-	/* ack interrupts */
-	iowrite32(interrupts, priv->base + VNINTS_REG);
-	/* enable interrupts */
-	iowrite32(interrupts, priv->base + VNIE_REG);
-	/* start capturing */
-	iowrite32(dmr, priv->base + VNDMR_REG);
-	iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
-
-	return 0;
-
-e_format:
-	dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
-		 icd->current_fmt->host_fmt->fourcc);
-	return -EINVAL;
-}
-
-static void rcar_vin_capture(struct rcar_vin_priv *priv)
-{
-	if (is_continuous_transfer(priv))
-		/* Continuous Frame Capture Mode */
-		iowrite32(VNFC_C_FRAME, priv->base + VNFC_REG);
-	else
-		/* Single Frame Capture Mode */
-		iowrite32(VNFC_S_FRAME, priv->base + VNFC_REG);
-}
-
-static void rcar_vin_request_capture_stop(struct rcar_vin_priv *priv)
-{
-	priv->state = STOPPING;
-
-	/* set continuous & single transfer off */
-	iowrite32(0, priv->base + VNFC_REG);
-	/* disable capture (release DMA buffer), reset */
-	iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
-		  priv->base + VNMC_REG);
-
-	/* update the status if stopped already */
-	if (!(ioread32(priv->base + VNMS_REG) & VNMS_CA))
-		priv->state = STOPPED;
-}
-
-static int rcar_vin_get_free_hw_slot(struct rcar_vin_priv *priv)
-{
-	int slot;
-
-	for (slot = 0; slot < priv->nr_hw_slots; slot++)
-		if (priv->queue_buf[slot] == NULL)
-			return slot;
-
-	return -1;
-}
-
-static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
-{
-	/* Ensure all HW slots are filled */
-	return rcar_vin_get_free_hw_slot(priv) < 0 ? 1 : 0;
-}
-
-/* Moves a buffer from the queue to the HW slots */
-static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
-{
-	struct vb2_v4l2_buffer *vbuf;
-	dma_addr_t phys_addr_top;
-	int slot;
-
-	if (list_empty(&priv->capture))
-		return 0;
-
-	/* Find a free HW slot */
-	slot = rcar_vin_get_free_hw_slot(priv);
-	if (slot < 0)
-		return 0;
-
-	vbuf = &list_entry(priv->capture.next,
-			struct rcar_vin_buffer, list)->vb;
-	list_del_init(to_buf_list(vbuf));
-	priv->queue_buf[slot] = vbuf;
-	phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
-	iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
-
-	return 1;
-}
-
-static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
-{
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	unsigned long size;
-
-	size = icd->sizeimage;
-
-	if (vb2_plane_size(vb, 0) < size) {
-		dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
-			vb->index, vb2_plane_size(vb, 0), size);
-		goto error;
-	}
-
-	vb2_set_plane_payload(vb, 0, size);
-
-	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
-		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
-	spin_lock_irq(&priv->lock);
-
-	list_add_tail(to_buf_list(vbuf), &priv->capture);
-	rcar_vin_fill_hw_slot(priv);
-
-	/* If we weren't running, and have enough buffers, start capturing! */
-	if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
-		if (rcar_vin_setup(priv)) {
-			/* Submit error */
-			list_del_init(to_buf_list(vbuf));
-			spin_unlock_irq(&priv->lock);
-			goto error;
-		}
-		priv->request_to_stop = false;
-		init_completion(&priv->capture_stop);
-		priv->state = RUNNING;
-		rcar_vin_capture(priv);
-	}
-
-	spin_unlock_irq(&priv->lock);
-
-	return;
-
-error:
-	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
-}
-
-/*
- * Wait for capture to stop and all in-flight buffers to be finished with by
- * the video hardware. This must be called under &priv->lock
- *
- */
-static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
-{
-	while (priv->state != STOPPED) {
-		/* issue stop if running */
-		if (priv->state == RUNNING)
-			rcar_vin_request_capture_stop(priv);
-
-		/* wait until capturing has been stopped */
-		if (priv->state == STOPPING) {
-			priv->request_to_stop = true;
-			spin_unlock_irq(&priv->lock);
-			if (!wait_for_completion_timeout(
-					&priv->capture_stop,
-					msecs_to_jiffies(TIMEOUT_MS)))
-				priv->state = STOPPED;
-			spin_lock_irq(&priv->lock);
-		}
-	}
-}
-
-static void rcar_vin_stop_streaming(struct vb2_queue *vq)
-{
-	struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	struct list_head *buf_head, *tmp;
-	int i;
-
-	spin_lock_irq(&priv->lock);
-	rcar_vin_wait_stop_streaming(priv);
-
-	for (i = 0; i < MAX_BUFFER_NUM; i++) {
-		if (priv->queue_buf[i]) {
-			vb2_buffer_done(&priv->queue_buf[i]->vb2_buf,
-					VB2_BUF_STATE_ERROR);
-			priv->queue_buf[i] = NULL;
-		}
-	}
-
-	list_for_each_safe(buf_head, tmp, &priv->capture) {
-		vb2_buffer_done(&list_entry(buf_head,
-				struct rcar_vin_buffer, list)->vb.vb2_buf,
-				VB2_BUF_STATE_ERROR);
-		list_del_init(buf_head);
-	}
-	spin_unlock_irq(&priv->lock);
-}
-
-static struct vb2_ops rcar_vin_vb2_ops = {
-	.queue_setup	= rcar_vin_videobuf_setup,
-	.buf_queue	= rcar_vin_videobuf_queue,
-	.stop_streaming	= rcar_vin_stop_streaming,
-	.wait_prepare	= vb2_ops_wait_prepare,
-	.wait_finish	= vb2_ops_wait_finish,
-};
-
-static irqreturn_t rcar_vin_irq(int irq, void *data)
-{
-	struct rcar_vin_priv *priv = data;
-	u32 int_status;
-	bool can_run = false, hw_stopped;
-	int slot;
-	unsigned int handled = 0;
-
-	spin_lock(&priv->lock);
-
-	int_status = ioread32(priv->base + VNINTS_REG);
-	if (!int_status)
-		goto done;
-	/* ack interrupts */
-	iowrite32(int_status, priv->base + VNINTS_REG);
-	handled = 1;
-
-	/* nothing to do if capture status is 'STOPPED' */
-	if (priv->state == STOPPED)
-		goto done;
-
-	hw_stopped = !(ioread32(priv->base + VNMS_REG) & VNMS_CA);
-
-	if (!priv->request_to_stop) {
-		if (is_continuous_transfer(priv))
-			slot = (ioread32(priv->base + VNMS_REG) &
-				VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
-		else
-			slot = 0;
-
-		priv->queue_buf[slot]->field = priv->field;
-		priv->queue_buf[slot]->sequence = priv->sequence++;
-		priv->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
-		vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf,
-				VB2_BUF_STATE_DONE);
-		priv->queue_buf[slot] = NULL;
-
-		if (priv->state != STOPPING)
-			can_run = rcar_vin_fill_hw_slot(priv);
-
-		if (hw_stopped || !can_run) {
-			priv->state = STOPPED;
-		} else if (is_continuous_transfer(priv) &&
-			   list_empty(&priv->capture) &&
-			   priv->state == RUNNING) {
-			/*
-			 * The continuous capturing requires an explicit stop
-			 * operation when there is no buffer to be set into
-			 * the VnMBm registers.
-			 */
-			rcar_vin_request_capture_stop(priv);
-		} else {
-			rcar_vin_capture(priv);
-		}
-
-	} else if (hw_stopped) {
-		priv->state = STOPPED;
-		priv->request_to_stop = false;
-		complete(&priv->capture_stop);
-	}
-
-done:
-	spin_unlock(&priv->lock);
-
-	return IRQ_RETVAL(handled);
-}
-
-static int rcar_vin_add_device(struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	int i;
-
-	for (i = 0; i < MAX_BUFFER_NUM; i++)
-		priv->queue_buf[i] = NULL;
-
-	pm_runtime_get_sync(ici->v4l2_dev.dev);
-
-	dev_dbg(icd->parent, "R-Car VIN driver attached to camera %d\n",
-		icd->devnum);
-
-	return 0;
-}
-
-static void rcar_vin_remove_device(struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	struct vb2_v4l2_buffer *vbuf;
-	int i;
-
-	/* disable capture, disable interrupts */
-	iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
-		  priv->base + VNMC_REG);
-	iowrite32(0, priv->base + VNIE_REG);
-
-	priv->state = STOPPED;
-	priv->request_to_stop = false;
-
-	/* make sure active buffer is cancelled */
-	spin_lock_irq(&priv->lock);
-	for (i = 0; i < MAX_BUFFER_NUM; i++) {
-		vbuf = priv->queue_buf[i];
-		if (vbuf) {
-			list_del_init(to_buf_list(vbuf));
-			vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
-		}
-	}
-	spin_unlock_irq(&priv->lock);
-
-	pm_runtime_put(ici->v4l2_dev.dev);
-
-	dev_dbg(icd->parent, "R-Car VIN driver detached from camera %d\n",
-		icd->devnum);
-}
-
-static void set_coeff(struct rcar_vin_priv *priv, unsigned short xs)
-{
-	int i;
-	const struct vin_coeff *p_prev_set = NULL;
-	const struct vin_coeff *p_set = NULL;
-
-	/* Look for suitable coefficient values */
-	for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
-		p_prev_set = p_set;
-		p_set = &vin_coeff_set[i];
-
-		if (xs < p_set->xs_value)
-			break;
-	}
-
-	/* Use previous value if its XS value is closer */
-	if (p_prev_set && p_set &&
-	    xs - p_prev_set->xs_value < p_set->xs_value - xs)
-		p_set = p_prev_set;
-
-	/* Set coefficient registers */
-	iowrite32(p_set->coeff_set[0], priv->base + VNC1A_REG);
-	iowrite32(p_set->coeff_set[1], priv->base + VNC1B_REG);
-	iowrite32(p_set->coeff_set[2], priv->base + VNC1C_REG);
-
-	iowrite32(p_set->coeff_set[3], priv->base + VNC2A_REG);
-	iowrite32(p_set->coeff_set[4], priv->base + VNC2B_REG);
-	iowrite32(p_set->coeff_set[5], priv->base + VNC2C_REG);
-
-	iowrite32(p_set->coeff_set[6], priv->base + VNC3A_REG);
-	iowrite32(p_set->coeff_set[7], priv->base + VNC3B_REG);
-	iowrite32(p_set->coeff_set[8], priv->base + VNC3C_REG);
-
-	iowrite32(p_set->coeff_set[9], priv->base + VNC4A_REG);
-	iowrite32(p_set->coeff_set[10], priv->base + VNC4B_REG);
-	iowrite32(p_set->coeff_set[11], priv->base + VNC4C_REG);
-
-	iowrite32(p_set->coeff_set[12], priv->base + VNC5A_REG);
-	iowrite32(p_set->coeff_set[13], priv->base + VNC5B_REG);
-	iowrite32(p_set->coeff_set[14], priv->base + VNC5C_REG);
-
-	iowrite32(p_set->coeff_set[15], priv->base + VNC6A_REG);
-	iowrite32(p_set->coeff_set[16], priv->base + VNC6B_REG);
-	iowrite32(p_set->coeff_set[17], priv->base + VNC6C_REG);
-
-	iowrite32(p_set->coeff_set[18], priv->base + VNC7A_REG);
-	iowrite32(p_set->coeff_set[19], priv->base + VNC7B_REG);
-	iowrite32(p_set->coeff_set[20], priv->base + VNC7C_REG);
-
-	iowrite32(p_set->coeff_set[21], priv->base + VNC8A_REG);
-	iowrite32(p_set->coeff_set[22], priv->base + VNC8B_REG);
-	iowrite32(p_set->coeff_set[23], priv->base + VNC8C_REG);
-}
-
-/* rect is guaranteed to not exceed the scaled camera rectangle */
-static int rcar_vin_set_rect(struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_cam *cam = icd->host_priv;
-	struct rcar_vin_priv *priv = ici->priv;
-	unsigned int left_offset, top_offset;
-	unsigned char dsize = 0;
-	struct v4l2_rect *cam_subrect = &cam->subrect;
-	u32 value;
-
-	dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
-		icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
-
-	left_offset = cam->vin_left;
-	top_offset = cam->vin_top;
-
-	if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_RGB32 &&
-	    priv->chip == RCAR_E1)
-		dsize = 1;
-
-	dev_dbg(icd->parent, "Cam %ux%u@%u:%u\n",
-		cam->width, cam->height, cam->vin_left, cam->vin_top);
-	dev_dbg(icd->parent, "Cam subrect %ux%u@%u:%u\n",
-		cam_subrect->width, cam_subrect->height,
-		cam_subrect->left, cam_subrect->top);
-
-	/* Set Start/End Pixel/Line Pre-Clip */
-	iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
-	iowrite32((left_offset + cam_subrect->width - 1) << dsize,
-		  priv->base + VNEPPRC_REG);
-	switch (priv->field) {
-	case V4L2_FIELD_INTERLACED:
-	case V4L2_FIELD_INTERLACED_TB:
-	case V4L2_FIELD_INTERLACED_BT:
-		iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
-		iowrite32((top_offset + cam_subrect->height) / 2 - 1,
-			  priv->base + VNELPRC_REG);
-		break;
-	default:
-		iowrite32(top_offset, priv->base + VNSLPRC_REG);
-		iowrite32(top_offset + cam_subrect->height - 1,
-			  priv->base + VNELPRC_REG);
-		break;
-	}
-
-	/* Set scaling coefficient */
-	value = 0;
-	if (cam_subrect->height != cam->out_height)
-		value = (4096 * cam_subrect->height) / cam->out_height;
-	dev_dbg(icd->parent, "YS Value: %x\n", value);
-	iowrite32(value, priv->base + VNYS_REG);
-
-	value = 0;
-	if (cam_subrect->width != cam->out_width)
-		value = (4096 * cam_subrect->width) / cam->out_width;
-
-	/* Horizontal upscaling is up to double size */
-	if (0 < value && value < 2048)
-		value = 2048;
-
-	dev_dbg(icd->parent, "XS Value: %x\n", value);
-	iowrite32(value, priv->base + VNXS_REG);
-
-	/* Horizontal upscaling is carried out by scaling down from double size */
-	if (value < 4096)
-		value *= 2;
-
-	set_coeff(priv, value);
-
-	/* Set Start/End Pixel/Line Post-Clip */
-	iowrite32(0, priv->base + VNSPPOC_REG);
-	iowrite32(0, priv->base + VNSLPOC_REG);
-	iowrite32((cam->out_width - 1) << dsize, priv->base + VNEPPOC_REG);
-	switch (priv->field) {
-	case V4L2_FIELD_INTERLACED:
-	case V4L2_FIELD_INTERLACED_TB:
-	case V4L2_FIELD_INTERLACED_BT:
-		iowrite32(cam->out_height / 2 - 1,
-			  priv->base + VNELPOC_REG);
-		break;
-	default:
-		iowrite32(cam->out_height - 1, priv->base + VNELPOC_REG);
-		break;
-	}
-
-	iowrite32(ALIGN(cam->out_width, 0x10), priv->base + VNIS_REG);
-
-	return 0;
-}
-
-static void capture_stop_preserve(struct rcar_vin_priv *priv, u32 *vnmc)
-{
-	*vnmc = ioread32(priv->base + VNMC_REG);
-	/* module disable */
-	iowrite32(*vnmc & ~VNMC_ME, priv->base + VNMC_REG);
-}
-
-static void capture_restore(struct rcar_vin_priv *priv, u32 vnmc)
-{
-	unsigned long timeout = jiffies + 10 * HZ;
-
-	/*
-	 * Wait until the end of the current frame. It can take a long time,
-	 * but if it has been aborted by a MRST1 reset, it should exit sooner.
-	 */
-	while ((ioread32(priv->base + VNMS_REG) & VNMS_AV) &&
-		time_before(jiffies, timeout))
-		msleep(1);
-
-	if (time_after(jiffies, timeout)) {
-		dev_err(priv->ici.v4l2_dev.dev,
-			"Timeout waiting for frame end! Interface problem?\n");
-		return;
-	}
-
-	iowrite32(vnmc, priv->base + VNMC_REG);
-}
-
-#define VIN_MBUS_FLAGS	(V4L2_MBUS_MASTER |		\
-			 V4L2_MBUS_PCLK_SAMPLE_RISING |	\
-			 V4L2_MBUS_HSYNC_ACTIVE_HIGH |	\
-			 V4L2_MBUS_HSYNC_ACTIVE_LOW |	\
-			 V4L2_MBUS_VSYNC_ACTIVE_HIGH |	\
-			 V4L2_MBUS_VSYNC_ACTIVE_LOW |	\
-			 V4L2_MBUS_DATA_ACTIVE_HIGH)
-
-static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct v4l2_mbus_config cfg;
-	unsigned long common_flags;
-	u32 vnmc;
-	u32 val;
-	int ret;
-
-	capture_stop_preserve(priv, &vnmc);
-
-	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
-	if (!ret) {
-		common_flags = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
-		if (!common_flags) {
-			dev_warn(icd->parent,
-				 "MBUS flags incompatible: camera 0x%x, host 0x%x\n",
-				 cfg.flags, VIN_MBUS_FLAGS);
-			return -EINVAL;
-		}
-	} else if (ret != -ENOIOCTLCMD) {
-		return ret;
-	} else {
-		common_flags = VIN_MBUS_FLAGS;
-	}
-
-	/* Make choises, based on platform preferences */
-	if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
-	    (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
-		if (priv->pdata_flags & RCAR_VIN_HSYNC_ACTIVE_LOW)
-			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
-		else
-			common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
-	}
-
-	if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
-	    (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
-		if (priv->pdata_flags & RCAR_VIN_VSYNC_ACTIVE_LOW)
-			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
-		else
-			common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
-	}
-
-	cfg.flags = common_flags;
-	ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
-	if (ret < 0 && ret != -ENOIOCTLCMD)
-		return ret;
-
-	val = VNDMR2_FTEV | VNDMR2_VLV(1);
-	if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
-		val |= VNDMR2_VPS;
-	if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
-		val |= VNDMR2_HPS;
-	iowrite32(val, priv->base + VNDMR2_REG);
-
-	ret = rcar_vin_set_rect(icd);
-	if (ret < 0)
-		return ret;
-
-	capture_restore(priv, vnmc);
-
-	return 0;
-}
-
-static int rcar_vin_try_bus_param(struct soc_camera_device *icd,
-				  unsigned char buswidth)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct v4l2_mbus_config cfg;
-	int ret;
-
-	ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
-	if (ret == -ENOIOCTLCMD)
-		return 0;
-	else if (ret)
-		return ret;
-
-	if (buswidth > 24)
-		return -EINVAL;
-
-	/* check is there common mbus flags */
-	ret = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
-	if (ret)
-		return 0;
-
-	dev_warn(icd->parent,
-		"MBUS flags incompatible: camera 0x%x, host 0x%x\n",
-		 cfg.flags, VIN_MBUS_FLAGS);
-
-	return -EINVAL;
-}
-
-static bool rcar_vin_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
-	return	fmt->packing == SOC_MBUS_PACKING_NONE ||
-		(fmt->bits_per_sample > 8 &&
-		 fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
-	{
-		.fourcc			= V4L2_PIX_FMT_NV16,
-		.name			= "NV16",
-		.bits_per_sample	= 8,
-		.packing		= SOC_MBUS_PACKING_2X8_PADHI,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PLANAR_Y_C,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_YUYV,
-		.name			= "YUYV",
-		.bits_per_sample	= 16,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_UYVY,
-		.name			= "UYVY",
-		.bits_per_sample	= 16,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_RGB565,
-		.name			= "RGB565",
-		.bits_per_sample	= 16,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_RGB555X,
-		.name			= "ARGB1555",
-		.bits_per_sample	= 16,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_RGB32,
-		.name			= "RGB888",
-		.bits_per_sample	= 32,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-	{
-		.fourcc			= V4L2_PIX_FMT_ARGB32,
-		.name			= "ARGB8888",
-		.bits_per_sample	= 32,
-		.packing		= SOC_MBUS_PACKING_NONE,
-		.order			= SOC_MBUS_ORDER_LE,
-		.layout			= SOC_MBUS_LAYOUT_PACKED,
-	},
-};
-
-static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
-				struct soc_camera_format_xlate *xlate)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct device *dev = icd->parent;
-	int ret, k, n;
-	int formats = 0;
-	struct rcar_vin_cam *cam;
-	struct v4l2_subdev_mbus_code_enum code = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-		.index = idx,
-	};
-	const struct soc_mbus_pixelfmt *fmt;
-
-	ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
-	if (ret < 0)
-		return 0;
-
-	fmt = soc_mbus_get_fmtdesc(code.code);
-	if (!fmt) {
-		dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code);
-		return 0;
-	}
-
-	ret = rcar_vin_try_bus_param(icd, fmt->bits_per_sample);
-	if (ret < 0)
-		return 0;
-
-	if (!icd->host_priv) {
-		struct v4l2_subdev_format fmt = {
-			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-		};
-		struct v4l2_mbus_framefmt *mf = &fmt.format;
-		struct v4l2_rect rect;
-		struct device *dev = icd->parent;
-		int shift;
-
-		ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-		if (ret < 0)
-			return ret;
-
-		/* Cache current client geometry */
-		ret = soc_camera_client_g_rect(sd, &rect);
-		if (ret == -ENOIOCTLCMD) {
-			/* Sensor driver doesn't support cropping */
-			rect.left = 0;
-			rect.top = 0;
-			rect.width = mf->width;
-			rect.height = mf->height;
-		} else if (ret < 0) {
-			return ret;
-		}
-
-		/*
-		 * If sensor proposes too large format then try smaller ones:
-		 * 1280x960, 640x480, 320x240
-		 */
-		for (shift = 0; shift < 3; shift++) {
-			if (mf->width <= VIN_MAX_WIDTH &&
-			    mf->height <= VIN_MAX_HEIGHT)
-				break;
-
-			mf->width = 1280 >> shift;
-			mf->height = 960 >> shift;
-			ret = v4l2_device_call_until_err(sd->v4l2_dev,
-							 soc_camera_grp_id(icd),
-							 pad, set_fmt, NULL,
-							 &fmt);
-			if (ret < 0)
-				return ret;
-		}
-
-		if (shift == 3) {
-			dev_err(dev,
-				"Failed to configure the client below %ux%u\n",
-				mf->width, mf->height);
-			return -EIO;
-		}
-
-		dev_dbg(dev, "camera fmt %ux%u\n", mf->width, mf->height);
-
-		cam = kzalloc(sizeof(*cam), GFP_KERNEL);
-		if (!cam)
-			return -ENOMEM;
-		/*
-		 * We are called with current camera crop,
-		 * initialise subrect with it
-		 */
-		cam->rect = rect;
-		cam->subrect = rect;
-		cam->width = mf->width;
-		cam->height = mf->height;
-		cam->out_width	= mf->width;
-		cam->out_height	= mf->height;
-
-		icd->host_priv = cam;
-	} else {
-		cam = icd->host_priv;
-	}
-
-	/* Beginning of a pass */
-	if (!idx)
-		cam->extra_fmt = NULL;
-
-	switch (code.code) {
-	case MEDIA_BUS_FMT_YUYV8_1X16:
-	case MEDIA_BUS_FMT_YUYV8_2X8:
-	case MEDIA_BUS_FMT_YUYV10_2X10:
-	case MEDIA_BUS_FMT_RGB888_1X24:
-		if (cam->extra_fmt)
-			break;
-
-		/* Add all our formats that can be generated by VIN */
-		cam->extra_fmt = rcar_vin_formats;
-
-		n = ARRAY_SIZE(rcar_vin_formats);
-		formats += n;
-		for (k = 0; xlate && k < n; k++, xlate++) {
-			xlate->host_fmt = &rcar_vin_formats[k];
-			xlate->code = code.code;
-			dev_dbg(dev, "Providing format %s using code %d\n",
-				rcar_vin_formats[k].name, code.code);
-		}
-		break;
-	default:
-		if (!rcar_vin_packing_supported(fmt))
-			return 0;
-
-		dev_dbg(dev, "Providing format %s in pass-through mode\n",
-			fmt->name);
-		break;
-	}
-
-	/* Generic pass-through */
-	formats++;
-	if (xlate) {
-		xlate->host_fmt = fmt;
-		xlate->code = code.code;
-		xlate++;
-	}
-
-	return formats;
-}
-
-static void rcar_vin_put_formats(struct soc_camera_device *icd)
-{
-	kfree(icd->host_priv);
-	icd->host_priv = NULL;
-}
-
-static int rcar_vin_set_crop(struct soc_camera_device *icd,
-			     const struct v4l2_crop *a)
-{
-	struct v4l2_crop a_writable = *a;
-	const struct v4l2_rect *rect = &a_writable.c;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	struct v4l2_crop cam_crop;
-	struct rcar_vin_cam *cam = icd->host_priv;
-	struct v4l2_rect *cam_rect = &cam_crop.c;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct device *dev = icd->parent;
-	struct v4l2_subdev_format fmt = {
-		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
-	};
-	struct v4l2_mbus_framefmt *mf = &fmt.format;
-	u32 vnmc;
-	int ret, i;
-
-	dev_dbg(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
-		rect->left, rect->top);
-
-	/* During camera cropping its output window can change too, stop VIN */
-	capture_stop_preserve(priv, &vnmc);
-	dev_dbg(dev, "VNMC_REG 0x%x\n", vnmc);
-
-	/* Apply iterative camera S_CROP for new input window. */
-	ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
-				       &cam->rect, &cam->subrect);
-	if (ret < 0)
-		return ret;
-
-	dev_dbg(dev, "camera cropped to %ux%u@%u:%u\n",
-		cam_rect->width, cam_rect->height,
-		cam_rect->left, cam_rect->top);
-
-	/* On success cam_crop contains current camera crop */
-
-	/* Retrieve camera output window */
-	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
-	if (ret < 0)
-		return ret;
-
-	if (mf->width > VIN_MAX_WIDTH || mf->height > VIN_MAX_HEIGHT)
-		return -EINVAL;
-
-	/* Cache camera output window */
-	cam->width = mf->width;
-	cam->height = mf->height;
-
-	icd->user_width  = cam->width;
-	icd->user_height = cam->height;
-
-	cam->vin_left = rect->left & ~1;
-	cam->vin_top = rect->top & ~1;
-
-	/* Use VIN cropping to crop to the new window. */
-	ret = rcar_vin_set_rect(icd);
-	if (ret < 0)
-		return ret;
-
-	cam->subrect = *rect;
-
-	dev_dbg(dev, "VIN cropped to %ux%u@%u:%u\n",
-		icd->user_width, icd->user_height,
-		cam->vin_left, cam->vin_top);
-
-	/* Restore capture */
-	for (i = 0; i < MAX_BUFFER_NUM; i++) {
-		if (priv->queue_buf[i] && priv->state == STOPPED) {
-			vnmc |= VNMC_ME;
-			break;
-		}
-	}
-	capture_restore(priv, vnmc);
-
-	/* Even if only camera cropping succeeded */
-	return ret;
-}
-
-static int rcar_vin_get_crop(struct soc_camera_device *icd,
-			     struct v4l2_crop *a)
-{
-	struct rcar_vin_cam *cam = icd->host_priv;
-
-	a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->c = cam->subrect;
-
-	return 0;
-}
-
-/* Similar to set_crop multistage iterative algorithm */
-static int rcar_vin_set_fmt(struct soc_camera_device *icd,
-			    struct v4l2_format *f)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct rcar_vin_priv *priv = ici->priv;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct rcar_vin_cam *cam = icd->host_priv;
-	struct v4l2_pix_format *pix = &f->fmt.pix;
-	struct v4l2_mbus_framefmt mf;
-	struct device *dev = icd->parent;
-	__u32 pixfmt = pix->pixelformat;
-	const struct soc_camera_format_xlate *xlate;
-	unsigned int vin_sub_width = 0, vin_sub_height = 0;
-	int ret;
-	bool can_scale;
-	enum v4l2_field field;
-	v4l2_std_id std;
-
-	dev_dbg(dev, "S_FMT(pix=0x%x, %ux%u)\n",
-		pixfmt, pix->width, pix->height);
-
-	switch (pix->field) {
-	default:
-		pix->field = V4L2_FIELD_NONE;
-		/* fall-through */
-	case V4L2_FIELD_NONE:
-	case V4L2_FIELD_TOP:
-	case V4L2_FIELD_BOTTOM:
-	case V4L2_FIELD_INTERLACED_TB:
-	case V4L2_FIELD_INTERLACED_BT:
-		field = pix->field;
-		break;
-	case V4L2_FIELD_INTERLACED:
-		/* Query for standard if not explicitly mentioned _TB/_BT */
-		ret = v4l2_subdev_call(sd, video, querystd, &std);
-		if (ret == -ENOIOCTLCMD) {
-			field = V4L2_FIELD_NONE;
-		} else if (ret < 0) {
-			return ret;
-		} else {
-			field = std & V4L2_STD_625_50 ?
-				V4L2_FIELD_INTERLACED_TB :
-				V4L2_FIELD_INTERLACED_BT;
-		}
-		break;
-	}
-
-	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-	if (!xlate) {
-		dev_warn(dev, "Format %x not found\n", pixfmt);
-		return -EINVAL;
-	}
-	/* Calculate client output geometry */
-	soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf,
-				      12);
-	mf.field = pix->field;
-	mf.colorspace = pix->colorspace;
-	mf.code	 = xlate->code;
-
-	switch (pixfmt) {
-	case V4L2_PIX_FMT_RGB32:
-		can_scale = priv->chip != RCAR_E1;
-		break;
-	case V4L2_PIX_FMT_ARGB32:
-	case V4L2_PIX_FMT_UYVY:
-	case V4L2_PIX_FMT_YUYV:
-	case V4L2_PIX_FMT_RGB565:
-	case V4L2_PIX_FMT_RGB555X:
-		can_scale = true;
-		break;
-	default:
-		can_scale = false;
-		break;
-	}
-
-	dev_dbg(dev, "request camera output %ux%u\n", mf.width, mf.height);
-
-	ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
-				      &mf, &vin_sub_width, &vin_sub_height,
-				      can_scale, 12);
-
-	/* Done with the camera. Now see if we can improve the result */
-	dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
-		ret, mf.width, mf.height, pix->width, pix->height);
-
-	if (ret == -ENOIOCTLCMD)
-		dev_dbg(dev, "Sensor doesn't support scaling\n");
-	else if (ret < 0)
-		return ret;
-
-	if (mf.code != xlate->code)
-		return -EINVAL;
-
-	/* Prepare VIN crop */
-	cam->width = mf.width;
-	cam->height = mf.height;
-
-	/* Use VIN scaling to scale to the requested user window. */
-
-	/* We cannot scale up */
-	if (pix->width > vin_sub_width)
-		vin_sub_width = pix->width;
-
-	if (pix->height > vin_sub_height)
-		vin_sub_height = pix->height;
-
-	pix->colorspace = mf.colorspace;
-
-	if (!can_scale) {
-		pix->width = vin_sub_width;
-		pix->height = vin_sub_height;
-	}
-
-	/*
-	 * We have calculated CFLCR, the actual configuration will be performed
-	 * in rcar_vin_set_bus_param()
-	 */
-
-	dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
-		vin_sub_width, pix->width, vin_sub_height, pix->height);
-
-	cam->out_width = pix->width;
-	cam->out_height = pix->height;
-
-	icd->current_fmt = xlate;
-
-	priv->field = field;
-
-	return 0;
-}
-
-static int rcar_vin_try_fmt(struct soc_camera_device *icd,
-			    struct v4l2_format *f)
-{
-	const struct soc_camera_format_xlate *xlate;
-	struct v4l2_pix_format *pix = &f->fmt.pix;
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	struct v4l2_subdev_pad_config pad_cfg;
-	struct v4l2_subdev_format format = {
-		.which = V4L2_SUBDEV_FORMAT_TRY,
-	};
-	struct v4l2_mbus_framefmt *mf = &format.format;
-	__u32 pixfmt = pix->pixelformat;
-	int width, height;
-	int ret;
-
-	xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-	if (!xlate) {
-		xlate = icd->current_fmt;
-		dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
-			pixfmt, xlate->host_fmt->fourcc);
-		pixfmt = xlate->host_fmt->fourcc;
-		pix->pixelformat = pixfmt;
-		pix->colorspace = icd->colorspace;
-	}
-
-	/* FIXME: calculate using depth and bus width */
-	v4l_bound_align_image(&pix->width, 2, VIN_MAX_WIDTH, 1,
-			      &pix->height, 4, VIN_MAX_HEIGHT, 2, 0);
-
-	width = pix->width;
-	height = pix->height;
-
-	/* let soc-camera calculate these values */
-	pix->bytesperline = 0;
-	pix->sizeimage = 0;
-
-	/* limit to sensor capabilities */
-	mf->width = pix->width;
-	mf->height = pix->height;
-	mf->field = pix->field;
-	mf->code = xlate->code;
-	mf->colorspace = pix->colorspace;
-
-	ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
-					 pad, set_fmt, &pad_cfg, &format);
-	if (ret < 0)
-		return ret;
-
-	/* Adjust only if VIN cannot scale */
-	if (pix->width > mf->width * 2)
-		pix->width = mf->width * 2;
-	if (pix->height > mf->height * 3)
-		pix->height = mf->height * 3;
-
-	pix->field = mf->field;
-	pix->colorspace = mf->colorspace;
-
-	if (pixfmt == V4L2_PIX_FMT_NV16) {
-		/* FIXME: check against rect_max after converting soc-camera */
-		/* We can scale precisely, need a bigger image from camera */
-		if (pix->width < width || pix->height < height) {
-			/*
-			 * We presume, the sensor behaves sanely, i.e. if
-			 * requested a bigger rectangle, it will not return a
-			 * smaller one.
-			 */
-			mf->width = VIN_MAX_WIDTH;
-			mf->height = VIN_MAX_HEIGHT;
-			ret = v4l2_device_call_until_err(sd->v4l2_dev,
-							 soc_camera_grp_id(icd),
-							 pad, set_fmt, &pad_cfg,
-							 &format);
-			if (ret < 0) {
-				dev_err(icd->parent,
-					"client try_fmt() = %d\n", ret);
-				return ret;
-			}
-		}
-		/* We will scale exactly */
-		if (mf->width > width)
-			pix->width = width;
-		if (mf->height > height)
-			pix->height = height;
-	}
-
-	return ret;
-}
-
-static unsigned int rcar_vin_poll(struct file *file, poll_table *pt)
-{
-	struct soc_camera_device *icd = file->private_data;
-
-	return vb2_poll(&icd->vb2_vidq, file, pt);
-}
-
-static int rcar_vin_querycap(struct soc_camera_host *ici,
-			     struct v4l2_capability *cap)
-{
-	strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
-	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d", DRV_NAME, ici->nr);
-
-	return 0;
-}
-
-static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
-				   struct soc_camera_device *icd)
-{
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
-	vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	vq->io_modes = VB2_MMAP | VB2_USERPTR;
-	vq->drv_priv = icd;
-	vq->ops = &rcar_vin_vb2_ops;
-	vq->mem_ops = &vb2_dma_contig_memops;
-	vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
-	vq->timestamp_flags  = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-	vq->lock = &ici->host_lock;
-	vq->dev = ici->v4l2_dev.dev;
-
-	return vb2_queue_init(vq);
-}
-
-static struct soc_camera_host_ops rcar_vin_host_ops = {
-	.owner		= THIS_MODULE,
-	.add		= rcar_vin_add_device,
-	.remove		= rcar_vin_remove_device,
-	.get_formats	= rcar_vin_get_formats,
-	.put_formats	= rcar_vin_put_formats,
-	.get_crop	= rcar_vin_get_crop,
-	.set_crop	= rcar_vin_set_crop,
-	.try_fmt	= rcar_vin_try_fmt,
-	.set_fmt	= rcar_vin_set_fmt,
-	.poll		= rcar_vin_poll,
-	.querycap	= rcar_vin_querycap,
-	.set_bus_param	= rcar_vin_set_bus_param,
-	.init_videobuf2	= rcar_vin_init_videobuf2,
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id rcar_vin_of_table[] = {
-	{ .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 },
-	{ .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
-	{ .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
-	{ .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
-	{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
-	{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
-	{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
-	{ .compatible = "renesas,rcar-gen3-vin", .data = (void *)RCAR_GEN3 },
-	{ .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
-#endif
-
-static int rcar_vin_probe(struct platform_device *pdev)
-{
-	const struct of_device_id *match = NULL;
-	struct rcar_vin_priv *priv;
-	struct v4l2_of_endpoint ep;
-	struct device_node *np;
-	struct resource *mem;
-	unsigned int pdata_flags;
-	int irq, ret;
-
-	match = of_match_device(of_match_ptr(rcar_vin_of_table), &pdev->dev);
-
-	np = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
-	if (!np) {
-		dev_err(&pdev->dev, "could not find endpoint\n");
-		return -EINVAL;
-	}
-
-	ret = v4l2_of_parse_endpoint(np, &ep);
-	if (ret) {
-		dev_err(&pdev->dev, "could not parse endpoint\n");
-		return ret;
-	}
-
-	if (ep.bus_type == V4L2_MBUS_BT656)
-		pdata_flags = RCAR_VIN_BT656;
-	else {
-		pdata_flags = 0;
-		if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
-			pdata_flags |= RCAR_VIN_HSYNC_ACTIVE_LOW;
-		if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
-			pdata_flags |= RCAR_VIN_VSYNC_ACTIVE_LOW;
-	}
-
-	of_node_put(np);
-
-	dev_dbg(&pdev->dev, "pdata_flags = %08x\n", pdata_flags);
-
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (mem == NULL)
-		return -EINVAL;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
-		return -EINVAL;
-
-	priv = devm_kzalloc(&pdev->dev, sizeof(struct rcar_vin_priv),
-			    GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(priv->base))
-		return PTR_ERR(priv->base);
-
-	ret = devm_request_irq(&pdev->dev, irq, rcar_vin_irq, IRQF_SHARED,
-			       dev_name(&pdev->dev), priv);
-	if (ret)
-		return ret;
-
-	priv->ici.priv = priv;
-	priv->ici.v4l2_dev.dev = &pdev->dev;
-	priv->ici.drv_name = dev_name(&pdev->dev);
-	priv->ici.ops = &rcar_vin_host_ops;
-
-	priv->pdata_flags = pdata_flags;
-	if (!match) {
-		priv->ici.nr = pdev->id;
-		priv->chip = pdev->id_entry->driver_data;
-	} else {
-		priv->ici.nr = of_alias_get_id(pdev->dev.of_node, "vin");
-		priv->chip = (enum chip_id)match->data;
-	}
-
-	spin_lock_init(&priv->lock);
-	INIT_LIST_HEAD(&priv->capture);
-
-	priv->state = STOPPED;
-
-	pm_suspend_ignore_children(&pdev->dev, true);
-	pm_runtime_enable(&pdev->dev);
-
-	ret = soc_camera_host_register(&priv->ici);
-	if (ret)
-		goto cleanup;
-
-	return 0;
-
-cleanup:
-	pm_runtime_disable(&pdev->dev);
-
-	return ret;
-}
-
-static int rcar_vin_remove(struct platform_device *pdev)
-{
-	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-
-	soc_camera_host_unregister(soc_host);
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
-static struct platform_driver rcar_vin_driver = {
-	.probe		= rcar_vin_probe,
-	.remove		= rcar_vin_remove,
-	.driver		= {
-		.name		= DRV_NAME,
-		.of_match_table	= of_match_ptr(rcar_vin_of_table),
-	},
-};
-
-module_platform_driver(rcar_vin_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:rcar_vin");
-MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 02b519d..a15bfb5 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -41,7 +41,6 @@
 #include <media/v4l2-dev.h>
 #include <media/soc_camera.h>
 #include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/drv-intf/sh_mobile_csi2.h>
 #include <media/videobuf2-dma-contig.h>
 #include <media/v4l2-mediabus.h>
 #include <media/drv-intf/soc_mediabus.h>
@@ -99,11 +98,6 @@
 
 struct sh_mobile_ceu_dev {
 	struct soc_camera_host ici;
-	/* Asynchronous CSI2 linking */
-	struct v4l2_async_subdev *csi2_asd;
-	struct v4l2_subdev *csi2_sd;
-	/* Synchronous probing compatibility */
-	struct platform_device *csi2_pdev;
 
 	unsigned int irq;
 	void __iomem *base;
@@ -140,7 +134,7 @@
 	unsigned int width;
 	unsigned int height;
 	/*
-	 * User window from S_CROP / G_CROP, produced by client cropping and
+	 * User window from S_SELECTION / G_SELECTION, produced by client cropping and
 	 * scaling, CEU scaling and CEU cropping, mapped back onto the client
 	 * input window
 	 */
@@ -470,7 +464,7 @@
 	sh_mobile_ceu_soft_reset(pcdev);
 }
 
-static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
+static const struct vb2_ops sh_mobile_ceu_videobuf_ops = {
 	.queue_setup	= sh_mobile_ceu_videobuf_setup,
 	.buf_prepare	= sh_mobile_ceu_videobuf_prepare,
 	.buf_queue	= sh_mobile_ceu_videobuf_queue,
@@ -517,74 +511,20 @@
 	return IRQ_HANDLED;
 }
 
-static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
-{
-	struct v4l2_subdev *sd;
-
-	if (pcdev->csi2_sd)
-		return pcdev->csi2_sd;
-
-	if (pcdev->csi2_asd) {
-		char name[] = "sh-mobile-csi2";
-		v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
-			if (!strncmp(name, sd->name, sizeof(name) - 1)) {
-				pcdev->csi2_sd = sd;
-				return sd;
-			}
-	}
-
-	return NULL;
-}
-
-static struct v4l2_subdev *csi2_subdev(struct sh_mobile_ceu_dev *pcdev,
-				       struct soc_camera_device *icd)
-{
-	struct v4l2_subdev *sd = pcdev->csi2_sd;
-
-	return sd && sd->grp_id == soc_camera_grp_id(icd) ? sd : NULL;
-}
-
 static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
 {
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct sh_mobile_ceu_dev *pcdev = ici->priv;
-	struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-	int ret;
-
-	if (csi2_sd) {
-		csi2_sd->grp_id = soc_camera_grp_id(icd);
-		v4l2_set_subdev_hostdata(csi2_sd, icd);
-	}
-
-	ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
-	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
-		return ret;
-
-	/*
-	 * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
-	 * has not found this soc-camera device among its clients
-	 */
-	if (csi2_sd && ret == -ENODEV)
-		csi2_sd->grp_id = 0;
-
 	dev_info(icd->parent,
-		 "SuperH Mobile CEU%s driver attached to camera %d\n",
-		 csi2_sd && csi2_sd->grp_id ? "/CSI-2" : "", icd->devnum);
+		 "SuperH Mobile CEU driver attached to camera %d\n",
+		 icd->devnum);
 
 	return 0;
 }
 
 static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
 {
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct sh_mobile_ceu_dev *pcdev = ici->priv;
-	struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-
 	dev_info(icd->parent,
 		 "SuperH Mobile CEU driver detached from camera %d\n",
 		 icd->devnum);
-
-	v4l2_subdev_call(csi2_sd, core, s_power, 0);
 }
 
 /* Called with .host_lock held */
@@ -704,12 +644,6 @@
 		cdwdr_width *= 2;
 	}
 
-	/* CSI2 special configuration */
-	if (csi2_subdev(pcdev, icd)) {
-		in_width = ((in_width - 2) * 2);
-		left_offset *= 2;
-	}
-
 	/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
 	camor = left_offset | (top_offset << 16);
 
@@ -758,13 +692,6 @@
 		ceu_write(pcdev, CAPSR, capsr);
 }
 
-/* Find the bus subdevice driver, e.g., CSI2 */
-static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
-					   struct soc_camera_device *icd)
-{
-	return csi2_subdev(pcdev, icd) ? : soc_camera_to_subdev(icd);
-}
-
 #define CEU_BUS_FLAGS (V4L2_MBUS_MASTER |	\
 		V4L2_MBUS_PCLK_SAMPLE_RISING |	\
 		V4L2_MBUS_HSYNC_ACTIVE_HIGH |	\
@@ -778,7 +705,7 @@
 {
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
 	struct sh_mobile_ceu_dev *pcdev = ici->priv;
-	struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 	struct sh_mobile_ceu_cam *cam = icd->host_priv;
 	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
 	unsigned long value, common_flags = CEU_BUS_FLAGS;
@@ -866,9 +793,7 @@
 	value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
 	value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
 
-	if (csi2_subdev(pcdev, icd)) /* CSI2 mode */
-		value |= 3 << 12;
-	else if (pcdev->is_16bit)
+	if (pcdev->is_16bit)
 		value |= 1 << 12;
 	else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
 		value |= 2 << 12;
@@ -923,9 +848,7 @@
 static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
 				       unsigned char buswidth)
 {
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	struct sh_mobile_ceu_dev *pcdev = ici->priv;
-	struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 	unsigned long common_flags = CEU_BUS_FLAGS;
 	struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
 	int ret;
@@ -1046,12 +969,9 @@
 		return 0;
 	}
 
-	if (!csi2_subdev(pcdev, icd)) {
-		/* Are there any restrictions in the CSI-2 case? */
-		ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
-		if (ret < 0)
-			return 0;
-	}
+	ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
+	if (ret < 0)
+		return 0;
 
 	if (!icd->host_priv) {
 		struct v4l2_subdev_format fmt = {
@@ -1189,17 +1109,16 @@
  * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
  * scaling and cropping algorithms and for the meaning of referenced here steps.
  */
-static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
-				  const struct v4l2_crop *a)
+static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
+				       struct v4l2_selection *sel)
 {
-	struct v4l2_crop a_writable = *a;
-	const struct v4l2_rect *rect = &a_writable.c;
+	struct v4l2_rect *rect = &sel->r;
 	struct device *dev = icd->parent;
 	struct soc_camera_host *ici = to_soc_camera_host(dev);
 	struct sh_mobile_ceu_dev *pcdev = ici->priv;
-	struct v4l2_crop cam_crop;
+	struct v4l2_selection cam_sel;
 	struct sh_mobile_ceu_cam *cam = icd->host_priv;
-	struct v4l2_rect *cam_rect = &cam_crop.c;
+	struct v4l2_rect *cam_rect = &cam_sel.r;
 	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 	struct v4l2_subdev_format fmt = {
 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -1211,7 +1130,7 @@
 	u32 capsr, cflcr;
 	int ret;
 
-	dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+	dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height,
 		rect->left, rect->top);
 
 	/* During camera cropping its output window can change too, stop CEU */
@@ -1219,10 +1138,10 @@
 	dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
 
 	/*
-	 * 1. - 2. Apply iterative camera S_CROP for new input window, read back
+	 * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back
 	 * actual camera rectangle.
 	 */
-	ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
+	ret = soc_camera_client_s_selection(sd, sel, &cam_sel,
 				       &cam->rect, &cam->subrect);
 	if (ret < 0)
 		return ret;
@@ -1331,13 +1250,12 @@
 	return ret;
 }
 
-static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
-				  struct v4l2_crop *a)
+static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd,
+				       struct v4l2_selection *sel)
 {
 	struct sh_mobile_ceu_cam *cam = icd->host_priv;
 
-	a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->c = cam->subrect;
+	sel->r = cam->subrect;
 
 	return 0;
 }
@@ -1579,8 +1497,8 @@
 	return ret;
 }
 
-static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
-				      const struct v4l2_crop *a)
+static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
+					   struct v4l2_selection *sel)
 {
 	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
 	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -1599,7 +1517,7 @@
 			 "Client failed to stop the stream: %d\n", ret);
 	else
 		/* Do the crop, if it fails, there's nothing more we can do */
-		sh_mobile_ceu_set_crop(icd, a);
+		sh_mobile_ceu_set_selection(icd, sel);
 
 	dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
 
@@ -1680,9 +1598,9 @@
 	.clock_stop	= sh_mobile_ceu_clock_stop,
 	.get_formats	= sh_mobile_ceu_get_formats,
 	.put_formats	= sh_mobile_ceu_put_formats,
-	.get_crop	= sh_mobile_ceu_get_crop,
-	.set_crop	= sh_mobile_ceu_set_crop,
-	.set_livecrop	= sh_mobile_ceu_set_livecrop,
+	.get_selection	= sh_mobile_ceu_get_selection,
+	.set_selection	= sh_mobile_ceu_set_selection,
+	.set_liveselection	= sh_mobile_ceu_set_liveselection,
 	.set_fmt	= sh_mobile_ceu_set_fmt,
 	.try_fmt	= sh_mobile_ceu_try_fmt,
 	.poll		= sh_mobile_ceu_poll,
@@ -1721,12 +1639,11 @@
 	struct resource *res;
 	void __iomem *base;
 	unsigned int irq;
-	int err, i;
+	int err;
 	struct bus_wait wait = {
 		.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
 		.notifier.notifier_call = bus_notify,
 	};
-	struct sh_mobile_ceu_companion *csi2;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
@@ -1821,132 +1738,16 @@
 	pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
 
 	if (pcdev->pdata && pcdev->pdata->asd_sizes) {
-		struct v4l2_async_subdev **asd;
-		char name[] = "sh-mobile-csi2";
-		int j;
-
-		/*
-		 * CSI2 interfacing: several groups can use CSI2, pick up the
-		 * first one
-		 */
-		asd = pcdev->pdata->asd;
-		for (j = 0; pcdev->pdata->asd_sizes[j]; j++) {
-			for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) {
-				dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n",
-					__func__, i, (*asd)->match_type);
-				if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME &&
-				    !strncmp(name, (*asd)->match.device_name.name,
-					     sizeof(name) - 1)) {
-					pcdev->csi2_asd = *asd;
-					break;
-				}
-			}
-			if (pcdev->csi2_asd)
-				break;
-		}
-
 		pcdev->ici.asd = pcdev->pdata->asd;
 		pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
 	}
 
-	/* Legacy CSI2 interfacing */
-	csi2 = pcdev->pdata ? pcdev->pdata->csi2 : NULL;
-	if (csi2) {
-		/*
-		 * TODO: remove this once all users are converted to
-		 * asynchronous CSI2 probing. If it has to be kept, csi2
-		 * platform device resources have to be added, using
-		 * platform_device_add_resources()
-		 */
-		struct platform_device *csi2_pdev =
-			platform_device_alloc("sh-mobile-csi2", csi2->id);
-		struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
-
-		if (!csi2_pdev) {
-			err = -ENOMEM;
-			goto exit_free_clk;
-		}
-
-		pcdev->csi2_pdev		= csi2_pdev;
-
-		err = platform_device_add_data(csi2_pdev, csi2_pdata,
-					       sizeof(*csi2_pdata));
-		if (err < 0)
-			goto exit_pdev_put;
-
-		csi2_pdev->resource		= csi2->resource;
-		csi2_pdev->num_resources	= csi2->num_resources;
-
-		err = platform_device_add(csi2_pdev);
-		if (err < 0)
-			goto exit_pdev_put;
-
-		wait.dev = &csi2_pdev->dev;
-
-		err = bus_register_notifier(&platform_bus_type, &wait.notifier);
-		if (err < 0)
-			goto exit_pdev_unregister;
-
-		/*
-		 * From this point the driver module will not unload, until
-		 * we complete the completion.
-		 */
-
-		if (!csi2_pdev->dev.driver) {
-			complete(&wait.completion);
-			/* Either too late, or probing failed */
-			bus_unregister_notifier(&platform_bus_type, &wait.notifier);
-			err = -ENXIO;
-			goto exit_pdev_unregister;
-		}
-
-		/*
-		 * The module is still loaded, in the worst case it is hanging
-		 * in device release on our completion. So, _now_ dereferencing
-		 * the "owner" is safe!
-		 */
-
-		err = try_module_get(csi2_pdev->dev.driver->owner);
-
-		/* Let notifier complete, if it has been locked */
-		complete(&wait.completion);
-		bus_unregister_notifier(&platform_bus_type, &wait.notifier);
-		if (!err) {
-			err = -ENODEV;
-			goto exit_pdev_unregister;
-		}
-
-		pcdev->csi2_sd = platform_get_drvdata(csi2_pdev);
-	}
-
 	err = soc_camera_host_register(&pcdev->ici);
 	if (err)
-		goto exit_csi2_unregister;
-
-	if (csi2) {
-		err = v4l2_device_register_subdev(&pcdev->ici.v4l2_dev,
-						  pcdev->csi2_sd);
-		dev_dbg(&pdev->dev, "%s(): ret(register_subdev) = %d\n",
-			__func__, err);
-		if (err < 0)
-			goto exit_host_unregister;
-		/* v4l2_device_register_subdev() took a reference too */
-		module_put(pcdev->csi2_sd->owner);
-	}
+		goto exit_free_clk;
 
 	return 0;
 
-exit_host_unregister:
-	soc_camera_host_unregister(&pcdev->ici);
-exit_csi2_unregister:
-	if (csi2) {
-		module_put(pcdev->csi2_pdev->dev.driver->owner);
-exit_pdev_unregister:
-		platform_device_del(pcdev->csi2_pdev);
-exit_pdev_put:
-		pcdev->csi2_pdev->resource = NULL;
-		platform_device_put(pcdev->csi2_pdev);
-	}
 exit_free_clk:
 	pm_runtime_disable(&pdev->dev);
 exit_release_mem:
@@ -1958,21 +1759,11 @@
 static int sh_mobile_ceu_remove(struct platform_device *pdev)
 {
 	struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-	struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
-					struct sh_mobile_ceu_dev, ici);
-	struct platform_device *csi2_pdev = pcdev->csi2_pdev;
 
 	soc_camera_host_unregister(soc_host);
 	pm_runtime_disable(&pdev->dev);
 	if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
 		dma_release_declared_memory(&pdev->dev);
-	if (csi2_pdev && csi2_pdev->dev.driver) {
-		struct module *csi2_drv = csi2_pdev->dev.driver->owner;
-		platform_device_del(csi2_pdev);
-		csi2_pdev->resource = NULL;
-		platform_device_put(csi2_pdev);
-		module_put(csi2_drv);
-	}
 
 	return 0;
 }
@@ -2012,8 +1803,6 @@
 
 static int __init sh_mobile_ceu_init(void)
 {
-	/* Whatever return code */
-	request_module("sh_mobile_csi2");
 	return platform_driver_register(&sh_mobile_ceu_driver);
 }
 
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
deleted file mode 100644
index 09b1836..0000000
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Driver for the SH-Mobile MIPI CSI-2 unit
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-#include <linux/module.h>
-
-#include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/drv-intf/sh_mobile_csi2.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-subdev.h>
-
-#define SH_CSI2_TREF	0x00
-#define SH_CSI2_SRST	0x04
-#define SH_CSI2_PHYCNT	0x08
-#define SH_CSI2_CHKSUM	0x0C
-#define SH_CSI2_VCDT	0x10
-
-struct sh_csi2 {
-	struct v4l2_subdev		subdev;
-	unsigned int			irq;
-	unsigned long			mipi_flags;
-	void __iomem			*base;
-	struct platform_device		*pdev;
-	struct sh_csi2_client_config	*client;
-};
-
-static void sh_csi2_hwinit(struct sh_csi2 *priv);
-
-static int sh_csi2_set_fmt(struct v4l2_subdev *sd,
-		struct v4l2_subdev_pad_config *cfg,
-		struct v4l2_subdev_format *format)
-{
-	struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-	struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-	struct v4l2_mbus_framefmt *mf = &format->format;
-	u32 tmp = (priv->client->channel & 3) << 8;
-
-	if (format->pad)
-		return -EINVAL;
-
-	if (mf->width > 8188)
-		mf->width = 8188;
-	else if (mf->width & 1)
-		mf->width &= ~1;
-
-	switch (pdata->type) {
-	case SH_CSI2C:
-		switch (mf->code) {
-		case MEDIA_BUS_FMT_UYVY8_2X8:		/* YUV422 */
-		case MEDIA_BUS_FMT_YUYV8_1_5X8:		/* YUV420 */
-		case MEDIA_BUS_FMT_Y8_1X8:		/* RAW8 */
-		case MEDIA_BUS_FMT_SBGGR8_1X8:
-		case MEDIA_BUS_FMT_SGRBG8_1X8:
-			break;
-		default:
-			/* All MIPI CSI-2 devices must support one of primary formats */
-			mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
-		}
-		break;
-	case SH_CSI2I:
-		switch (mf->code) {
-		case MEDIA_BUS_FMT_Y8_1X8:		/* RAW8 */
-		case MEDIA_BUS_FMT_SBGGR8_1X8:
-		case MEDIA_BUS_FMT_SGRBG8_1X8:
-		case MEDIA_BUS_FMT_SBGGR10_1X10:	/* RAW10 */
-		case MEDIA_BUS_FMT_SBGGR12_1X12:	/* RAW12 */
-			break;
-		default:
-			/* All MIPI CSI-2 devices must support one of primary formats */
-			mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
-		}
-		break;
-	}
-
-	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-		cfg->try_fmt = *mf;
-		return 0;
-	}
-
-	if (mf->width > 8188 || mf->width & 1)
-		return -EINVAL;
-
-	switch (mf->code) {
-	case MEDIA_BUS_FMT_UYVY8_2X8:
-		tmp |= 0x1e;	/* YUV422 8 bit */
-		break;
-	case MEDIA_BUS_FMT_YUYV8_1_5X8:
-		tmp |= 0x18;	/* YUV420 8 bit */
-		break;
-	case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
-		tmp |= 0x21;	/* RGB555 */
-		break;
-	case MEDIA_BUS_FMT_RGB565_2X8_BE:
-		tmp |= 0x22;	/* RGB565 */
-		break;
-	case MEDIA_BUS_FMT_Y8_1X8:
-	case MEDIA_BUS_FMT_SBGGR8_1X8:
-	case MEDIA_BUS_FMT_SGRBG8_1X8:
-		tmp |= 0x2a;	/* RAW8 */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	iowrite32(tmp, priv->base + SH_CSI2_VCDT);
-
-	return 0;
-}
-
-static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd,
-				 struct v4l2_mbus_config *cfg)
-{
-	struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-
-	if (!priv->mipi_flags) {
-		struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
-		struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
-		struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-		unsigned long common_flags, csi2_flags;
-		struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,};
-		int ret;
-
-		/* Check if we can support this camera */
-		csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK |
-			V4L2_MBUS_CSI2_1_LANE;
-
-		switch (pdata->type) {
-		case SH_CSI2C:
-			if (priv->client->lanes != 1)
-				csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
-			break;
-		case SH_CSI2I:
-			switch (priv->client->lanes) {
-			default:
-				csi2_flags |= V4L2_MBUS_CSI2_4_LANE;
-			case 3:
-				csi2_flags |= V4L2_MBUS_CSI2_3_LANE;
-			case 2:
-				csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
-			}
-		}
-
-		ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &client_cfg);
-		if (ret == -ENOIOCTLCMD)
-			common_flags = csi2_flags;
-		else if (!ret)
-			common_flags = soc_mbus_config_compatible(&client_cfg,
-								  csi2_flags);
-		else
-			common_flags = 0;
-
-		if (!common_flags)
-			return -EINVAL;
-
-		/* All good: camera MIPI configuration supported */
-		priv->mipi_flags = common_flags;
-	}
-
-	if (cfg) {
-		cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
-			V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
-			V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH;
-		cfg->type = V4L2_MBUS_PARALLEL;
-	}
-
-	return 0;
-}
-
-static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
-				 const struct v4l2_mbus_config *cfg)
-{
-	struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
-	struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
-	struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,};
-	int ret = sh_csi2_g_mbus_config(sd, NULL);
-
-	if (ret < 0)
-		return ret;
-
-	pm_runtime_get_sync(&priv->pdev->dev);
-
-	sh_csi2_hwinit(priv);
-
-	client_cfg.flags = priv->mipi_flags;
-
-	return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg);
-}
-
-static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
-	.g_mbus_config	= sh_csi2_g_mbus_config,
-	.s_mbus_config	= sh_csi2_s_mbus_config,
-};
-
-static struct v4l2_subdev_pad_ops sh_csi2_subdev_pad_ops = {
-	.set_fmt	= sh_csi2_set_fmt,
-};
-
-static void sh_csi2_hwinit(struct sh_csi2 *priv)
-{
-	struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-	__u32 tmp = 0x10; /* Enable MIPI CSI clock lane */
-
-	/* Reflect registers immediately */
-	iowrite32(0x00000001, priv->base + SH_CSI2_TREF);
-	/* reset CSI2 harware */
-	iowrite32(0x00000001, priv->base + SH_CSI2_SRST);
-	udelay(5);
-	iowrite32(0x00000000, priv->base + SH_CSI2_SRST);
-
-	switch (pdata->type) {
-	case SH_CSI2C:
-		if (priv->client->lanes == 1)
-			tmp |= 1;
-		else
-			/* Default - both lanes */
-			tmp |= 3;
-		break;
-	case SH_CSI2I:
-		if (!priv->client->lanes || priv->client->lanes > 4)
-			/* Default - all 4 lanes */
-			tmp |= 0xf;
-		else
-			tmp |= (1 << priv->client->lanes) - 1;
-	}
-
-	if (priv->client->phy == SH_CSI2_PHY_MAIN)
-		tmp |= 0x8000;
-
-	iowrite32(tmp, priv->base + SH_CSI2_PHYCNT);
-
-	tmp = 0;
-	if (pdata->flags & SH_CSI2_ECC)
-		tmp |= 2;
-	if (pdata->flags & SH_CSI2_CRC)
-		tmp |= 1;
-	iowrite32(tmp, priv->base + SH_CSI2_CHKSUM);
-}
-
-static int sh_csi2_client_connect(struct sh_csi2 *priv)
-{
-	struct device *dev = v4l2_get_subdevdata(&priv->subdev);
-	struct sh_csi2_pdata *pdata = dev->platform_data;
-	struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
-	int i;
-
-	if (priv->client)
-		return -EBUSY;
-
-	for (i = 0; i < pdata->num_clients; i++)
-		if ((pdata->clients[i].pdev &&
-		     &pdata->clients[i].pdev->dev == icd->pdev) ||
-		    (icd->control &&
-		     strcmp(pdata->clients[i].name, dev_name(icd->control))))
-			break;
-
-	dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
-
-	if (i == pdata->num_clients)
-		return -ENODEV;
-
-	priv->client = pdata->clients + i;
-
-	return 0;
-}
-
-static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
-{
-	if (!priv->client)
-		return;
-
-	priv->client = NULL;
-
-	pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
-}
-
-static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
-{
-	struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-
-	if (on)
-		return sh_csi2_client_connect(priv);
-
-	sh_csi2_client_disconnect(priv);
-	return 0;
-}
-
-static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
-	.s_power	= sh_csi2_s_power,
-};
-
-static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
-	.core	= &sh_csi2_subdev_core_ops,
-	.video	= &sh_csi2_subdev_video_ops,
-	.pad	= &sh_csi2_subdev_pad_ops,
-};
-
-static int sh_csi2_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	unsigned int irq;
-	int ret;
-	struct sh_csi2 *priv;
-	/* Platform data specify the PHY, lanes, ECC, CRC */
-	struct sh_csi2_pdata *pdata = pdev->dev.platform_data;
-
-	if (!pdata)
-		return -EINVAL;
-
-	priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_csi2), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	/* Interrupt unused so far */
-	irq = platform_get_irq(pdev, 0);
-
-	if (!res || (int)irq <= 0) {
-		dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n");
-		return -ENODEV;
-	}
-
-	/* TODO: Add support for CSI2I. Careful: different register layout! */
-	if (pdata->type != SH_CSI2C) {
-		dev_err(&pdev->dev, "Only CSI2C supported ATM.\n");
-		return -EINVAL;
-	}
-
-	priv->irq = irq;
-
-	priv->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(priv->base))
-		return PTR_ERR(priv->base);
-
-	priv->pdev = pdev;
-	priv->subdev.owner = THIS_MODULE;
-	priv->subdev.dev = &pdev->dev;
-	platform_set_drvdata(pdev, &priv->subdev);
-
-	v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
-	v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
-
-	snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
-		 dev_name(&pdev->dev));
-
-	ret = v4l2_async_register_subdev(&priv->subdev);
-	if (ret < 0)
-		return ret;
-
-	pm_runtime_enable(&pdev->dev);
-
-	dev_dbg(&pdev->dev, "CSI2 probed.\n");
-
-	return 0;
-}
-
-static int sh_csi2_remove(struct platform_device *pdev)
-{
-	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
-	struct sh_csi2 *priv = container_of(subdev, struct sh_csi2, subdev);
-
-	v4l2_async_unregister_subdev(&priv->subdev);
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
-static struct platform_driver __refdata sh_csi2_pdrv = {
-	.remove	= sh_csi2_remove,
-	.probe	= sh_csi2_probe,
-	.driver	= {
-		.name	= "sh-mobile-csi2",
-	},
-};
-
-module_platform_driver(sh_csi2_pdrv);
-
-MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sh-mobile-csi2");
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 46c7186..edd1c1d 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -581,7 +581,7 @@
 	dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
 		pixfmtstr(pix->pixelformat), pix->width, pix->height);
 
-	/* We always call try_fmt() before set_fmt() or set_crop() */
+	/* We always call try_fmt() before set_fmt() or set_selection() */
 	ret = soc_camera_try_fmt(icd, f);
 	if (ret < 0)
 		return ret;
@@ -1025,72 +1025,6 @@
 	return ret;
 }
 
-static int soc_camera_cropcap(struct file *file, void *fh,
-			      struct v4l2_cropcap *a)
-{
-	struct soc_camera_device *icd = file->private_data;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
-	return ici->ops->cropcap(icd, a);
-}
-
-static int soc_camera_g_crop(struct file *file, void *fh,
-			     struct v4l2_crop *a)
-{
-	struct soc_camera_device *icd = file->private_data;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	int ret;
-
-	ret = ici->ops->get_crop(icd, a);
-
-	return ret;
-}
-
-/*
- * According to the V4L2 API, drivers shall not update the struct v4l2_crop
- * argument with the actual geometry, instead, the user shall use G_CROP to
- * retrieve it.
- */
-static int soc_camera_s_crop(struct file *file, void *fh,
-			     const struct v4l2_crop *a)
-{
-	struct soc_camera_device *icd = file->private_data;
-	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-	const struct v4l2_rect *rect = &a->c;
-	struct v4l2_crop current_crop;
-	int ret;
-
-	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-		return -EINVAL;
-
-	dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
-		rect->width, rect->height, rect->left, rect->top);
-
-	current_crop.type = a->type;
-
-	/* If get_crop fails, we'll let host and / or client drivers decide */
-	ret = ici->ops->get_crop(icd, &current_crop);
-
-	/* Prohibit window size change with initialised buffers */
-	if (ret < 0) {
-		dev_err(icd->pdev,
-			"S_CROP denied: getting current crop failed\n");
-	} else if ((a->c.width == current_crop.c.width &&
-		    a->c.height == current_crop.c.height) ||
-		   !is_streaming(ici, icd)) {
-		/* same size or not streaming - use .set_crop() */
-		ret = ici->ops->set_crop(icd, a);
-	} else if (ici->ops->set_livecrop) {
-		ret = ici->ops->set_livecrop(icd, a);
-	} else {
-		dev_err(icd->pdev,
-			"S_CROP denied: queue initialised and sizes differ\n");
-		ret = -EBUSY;
-	}
-
-	return ret;
-}
-
 static int soc_camera_g_selection(struct file *file, void *fh,
 				  struct v4l2_selection *s)
 {
@@ -1101,9 +1035,6 @@
 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 
-	if (!ici->ops->get_selection)
-		return -ENOTTY;
-
 	return ici->ops->get_selection(icd, s);
 }
 
@@ -1135,10 +1066,11 @@
 			return -EBUSY;
 	}
 
-	if (!ici->ops->set_selection)
-		return -ENOTTY;
-
-	ret = ici->ops->set_selection(icd, s);
+	if (s->target == V4L2_SEL_TGT_CROP && is_streaming(ici, icd) &&
+	    ici->ops->set_liveselection)
+		ret = ici->ops->set_liveselection(icd, s);
+	else
+		ret = ici->ops->set_selection(icd, s);
 	if (!ret &&
 	    s->target == V4L2_SEL_TGT_COMPOSE) {
 		icd->user_width = s->r.width;
@@ -1881,23 +1813,40 @@
 	return 0;
 }
 
-static int default_cropcap(struct soc_camera_device *icd,
-			   struct v4l2_cropcap *a)
+static int default_g_selection(struct soc_camera_device *icd,
+			       struct v4l2_selection *sel)
 {
 	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	return v4l2_subdev_call(sd, video, cropcap, a);
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+	};
+	int ret;
+
+	ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+	if (ret)
+		return ret;
+	sel->r = sdsel.r;
+	return 0;
 }
 
-static int default_g_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
+static int default_s_selection(struct soc_camera_device *icd,
+			       struct v4l2_selection *sel)
 {
 	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	return v4l2_subdev_call(sd, video, g_crop, a);
-}
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+		.flags = sel->flags,
+		.r = sel->r,
+	};
+	int ret;
 
-static int default_s_crop(struct soc_camera_device *icd, const struct v4l2_crop *a)
-{
-	struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-	return v4l2_subdev_call(sd, video, s_crop, a);
+	ret = v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+	if (ret)
+		return ret;
+	sel->r = sdsel.r;
+	return 0;
 }
 
 static int default_g_parm(struct soc_camera_device *icd,
@@ -1968,12 +1917,10 @@
 	    !ici->v4l2_dev.dev)
 		return -EINVAL;
 
-	if (!ici->ops->set_crop)
-		ici->ops->set_crop = default_s_crop;
-	if (!ici->ops->get_crop)
-		ici->ops->get_crop = default_g_crop;
-	if (!ici->ops->cropcap)
-		ici->ops->cropcap = default_cropcap;
+	if (!ici->ops->set_selection)
+		ici->ops->set_selection = default_s_selection;
+	if (!ici->ops->get_selection)
+		ici->ops->get_selection = default_g_selection;
 	if (!ici->ops->set_parm)
 		ici->ops->set_parm = default_s_parm;
 	if (!ici->ops->get_parm)
@@ -2126,9 +2073,6 @@
 	.vidioc_expbuf		 = soc_camera_expbuf,
 	.vidioc_streamon	 = soc_camera_streamon,
 	.vidioc_streamoff	 = soc_camera_streamoff,
-	.vidioc_cropcap		 = soc_camera_cropcap,
-	.vidioc_g_crop		 = soc_camera_g_crop,
-	.vidioc_s_crop		 = soc_camera_s_crop,
 	.vidioc_g_selection	 = soc_camera_g_selection,
 	.vidioc_s_selection	 = soc_camera_s_selection,
 	.vidioc_g_parm		 = soc_camera_g_parm,
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index a51d2a4..534d6c3 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -76,35 +76,27 @@
 	return 0;
 }
 
-static int soc_camera_platform_g_crop(struct v4l2_subdev *sd,
-				      struct v4l2_crop *a)
+static int soc_camera_platform_get_selection(struct v4l2_subdev *sd,
+		struct v4l2_subdev_pad_config *cfg,
+		struct v4l2_subdev_selection *sel)
 {
 	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
 
-	a->c.left	= 0;
-	a->c.top	= 0;
-	a->c.width	= p->format.width;
-	a->c.height	= p->format.height;
-	a->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+		return -EINVAL;
 
-	return 0;
-}
-
-static int soc_camera_platform_cropcap(struct v4l2_subdev *sd,
-				       struct v4l2_cropcap *a)
-{
-	struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
-	a->bounds.left			= 0;
-	a->bounds.top			= 0;
-	a->bounds.width			= p->format.width;
-	a->bounds.height		= p->format.height;
-	a->defrect			= a->bounds;
-	a->type				= V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	a->pixelaspect.numerator	= 1;
-	a->pixelaspect.denominator	= 1;
-
-	return 0;
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = p->format.width;
+		sel->r.height = p->format.height;
+		return 0;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
@@ -120,13 +112,12 @@
 
 static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
 	.s_stream	= soc_camera_platform_s_stream,
-	.cropcap	= soc_camera_platform_cropcap,
-	.g_crop		= soc_camera_platform_g_crop,
 	.g_mbus_config	= soc_camera_platform_g_mbus_config,
 };
 
 static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
 	.enum_mbus_code = soc_camera_platform_enum_mbus_code,
+	.get_selection	= soc_camera_platform_get_selection,
 	.get_fmt	= soc_camera_platform_fill_fmt,
 	.set_fmt	= soc_camera_platform_fill_fmt,
 };
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index bda29bc..f77252d 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -40,24 +40,22 @@
 /* Get and store current client crop */
 int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
 {
-	struct v4l2_crop crop;
-	struct v4l2_cropcap cap;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP,
+	};
 	int ret;
 
-	crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	ret = v4l2_subdev_call(sd, video, g_crop, &crop);
+	ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
 	if (!ret) {
-		*rect = crop.c;
+		*rect = sdsel.r;
 		return ret;
 	}
 
-	/* Camera driver doesn't support .g_crop(), assume default rectangle */
-	cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	sdsel.target = V4L2_SEL_TGT_CROP_DEFAULT;
+	ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
 	if (!ret)
-		*rect = cap.defrect;
+		*rect = sdsel.r;
 
 	return ret;
 }
@@ -93,17 +91,27 @@
  * 2. if (1) failed, try to double the client image until we get one big enough
  * 3. if (2) failed, try to request the maximum image
  */
-int soc_camera_client_s_crop(struct v4l2_subdev *sd,
-			struct v4l2_crop *crop, struct v4l2_crop *cam_crop,
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+			struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
 			struct v4l2_rect *target_rect, struct v4l2_rect *subrect)
 {
-	struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+		.flags = sel->flags,
+		.r = sel->r,
+	};
+	struct v4l2_subdev_selection bounds = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP_BOUNDS,
+	};
+	struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r;
 	struct device *dev = sd->v4l2_dev->dev;
-	struct v4l2_cropcap cap;
 	int ret;
 	unsigned int width, height;
 
-	v4l2_subdev_call(sd, video, s_crop, crop);
+	v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+	sel->r = sdsel.r;
 	ret = soc_camera_client_g_rect(sd, cam_rect);
 	if (ret < 0)
 		return ret;
@@ -113,29 +121,29 @@
 	 * be within camera cropcap bounds
 	 */
 	if (!memcmp(rect, cam_rect, sizeof(*rect))) {
-		/* Even if camera S_CROP failed, but camera rectangle matches */
-		dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
+		/* Even if camera S_SELECTION failed, but camera rectangle matches */
+		dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n",
 			rect->width, rect->height, rect->left, rect->top);
 		*target_rect = *cam_rect;
 		return 0;
 	}
 
 	/* Try to fix cropping, that camera hasn't managed to set */
-	dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
+	dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n",
 		cam_rect->width, cam_rect->height,
 		cam_rect->left, cam_rect->top,
 		rect->width, rect->height, rect->left, rect->top);
 
 	/* We need sensor maximum rectangle */
-	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds);
 	if (ret < 0)
 		return ret;
 
 	/* Put user requested rectangle within sensor bounds */
-	soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
-			      cap.bounds.width);
-	soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
-			      cap.bounds.height);
+	soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2,
+			      bounds.r.width);
+	soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4,
+			      bounds.r.height);
 
 	/*
 	 * Popular special case - some cameras can only handle fixed sizes like
@@ -150,7 +158,7 @@
 	 */
 	while (!ret && (is_smaller(cam_rect, rect) ||
 			is_inside(cam_rect, rect)) &&
-	       (cap.bounds.width > width || cap.bounds.height > height)) {
+	       (bounds.r.width > width || bounds.r.height > height)) {
 
 		width *= 2;
 		height *= 2;
@@ -168,36 +176,40 @@
 		 * Instead we just drop to the left and top bounds.
 		 */
 		if (cam_rect->left > rect->left)
-			cam_rect->left = cap.bounds.left;
+			cam_rect->left = bounds.r.left;
 
 		if (cam_rect->left + cam_rect->width < rect->left + rect->width)
 			cam_rect->width = rect->left + rect->width -
 				cam_rect->left;
 
 		if (cam_rect->top > rect->top)
-			cam_rect->top = cap.bounds.top;
+			cam_rect->top = bounds.r.top;
 
 		if (cam_rect->top + cam_rect->height < rect->top + rect->height)
 			cam_rect->height = rect->top + rect->height -
 				cam_rect->top;
 
-		v4l2_subdev_call(sd, video, s_crop, cam_crop);
+		sdsel.r = *cam_rect;
+		v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+		*cam_rect = sdsel.r;
 		ret = soc_camera_client_g_rect(sd, cam_rect);
-		dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
+		dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret,
 			cam_rect->width, cam_rect->height,
 			cam_rect->left, cam_rect->top);
 	}
 
-	/* S_CROP must not modify the rectangle */
+	/* S_SELECTION must not modify the rectangle */
 	if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
 		/*
 		 * The camera failed to configure a suitable cropping,
 		 * we cannot use the current rectangle, set to max
 		 */
-		*cam_rect = cap.bounds;
-		v4l2_subdev_call(sd, video, s_crop, cam_crop);
+		sdsel.r = bounds.r;
+		v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+		*cam_rect = sdsel.r;
+
 		ret = soc_camera_client_g_rect(sd, cam_rect);
-		dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
+		dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret,
 			cam_rect->width, cam_rect->height,
 			cam_rect->left, cam_rect->top);
 	}
@@ -209,7 +221,7 @@
 
 	return ret;
 }
-EXPORT_SYMBOL(soc_camera_client_s_crop);
+EXPORT_SYMBOL(soc_camera_client_s_selection);
 
 /* Iterative set_fmt, also updates cached client crop on success */
 static int client_set_fmt(struct soc_camera_device *icd,
@@ -221,7 +233,10 @@
 	struct device *dev = icd->parent;
 	struct v4l2_mbus_framefmt *mf = &format->format;
 	unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
-	struct v4l2_cropcap cap;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = V4L2_SEL_TGT_CROP_BOUNDS,
+	};
 	bool host_1to1;
 	int ret;
 
@@ -243,16 +258,14 @@
 	if (!host_can_scale)
 		goto update_cache;
 
-	cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-	ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+	ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
 	if (ret < 0)
 		return ret;
 
-	if (max_width > cap.bounds.width)
-		max_width = cap.bounds.width;
-	if (max_height > cap.bounds.height)
-		max_height = cap.bounds.height;
+	if (max_width > sdsel.r.width)
+		max_width = sdsel.r.width;
+	if (max_height > sdsel.r.height)
+		max_height = sdsel.r.height;
 
 	/* Camera set a format, but geometry is not precise, try to improve */
 	tmp_w = mf->width;
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h
index 184a30d..9ca4693 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.h
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.h
@@ -16,7 +16,7 @@
 
 struct soc_camera_device;
 
-struct v4l2_crop;
+struct v4l2_selection;
 struct v4l2_mbus_framefmt;
 struct v4l2_pix_format;
 struct v4l2_rect;
@@ -31,8 +31,8 @@
 #define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out)
 
 int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
-int soc_camera_client_s_crop(struct v4l2_subdev *sd,
-			struct v4l2_crop *crop, struct v4l2_crop *cam_crop,
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+			struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
 			struct v4l2_rect *target_rect, struct v4l2_rect *subrect);
 int soc_camera_client_scale(struct soc_camera_device *icd,
 			struct v4l2_rect *rect, struct v4l2_rect *subrect,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 3b1ac68..45f82b5 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -527,7 +527,7 @@
 	pm_runtime_put(ctx->bdisp_dev->dev);
 }
 
-static struct vb2_ops bdisp_qops = {
+static const struct vb2_ops bdisp_qops = {
 	.queue_setup     = bdisp_queue_setup,
 	.buf_prepare     = bdisp_buf_prepare,
 	.buf_queue       = bdisp_buf_queue,
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
new file mode 100644
index 0000000..ffb69ce
--- /dev/null
+++ b/drivers/media/platform/sti/hva/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
+st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
new file mode 100644
index 0000000..8cc8467
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MAX_SPS_PPS_SIZE 128
+
+#define BITSTREAM_OFFSET_MASK 0x7F
+
+/* video max size*/
+#define H264_MAX_SIZE_W 1920
+#define H264_MAX_SIZE_H 1920
+
+/* macroBlocs number (width & height) */
+#define MB_W(w) ((w + 0xF)  / 0x10)
+#define MB_H(h) ((h + 0xF)  / 0x10)
+
+/* formula to get temporal or spatial data size */
+#define DATA_SIZE(w, h) (MB_W(w) * MB_H(h) * 16)
+
+#define SEARCH_WINDOW_BUFFER_MAX_SIZE(w) ((4 * MB_W(w) + 42) * 256 * 3 / 2)
+#define CABAC_CONTEXT_BUFFER_MAX_SIZE(w) (MB_W(w) * 16)
+#define CTX_MB_BUFFER_MAX_SIZE(w) (MB_W(w) * 16 * 8)
+#define SLICE_HEADER_SIZE (4 * 16)
+#define BRC_DATA_SIZE (5 * 16)
+
+/* source buffer copy in YUV 420 MB-tiled format with size=16*256*3/2 */
+#define CURRENT_WINDOW_BUFFER_MAX_SIZE (16 * 256 * 3 / 2)
+
+/*
+ * 4 lines of pixels (in Luma, Chroma blue and Chroma red) of top MB
+ * for deblocking with size=4*16*MBx*2
+ */
+#define LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(w) (4 * 16 * MB_W(w) * 2)
+
+/* factor for bitrate and cpb buffer size max values if profile >= high */
+#define H264_FACTOR_HIGH 1200
+
+/* factor for bitrate and cpb buffer size max values if profile < high */
+#define H264_FACTOR_BASELINE 1000
+
+/* number of bytes for NALU_TYPE_FILLER_DATA header and footer */
+#define H264_FILLER_DATA_SIZE 6
+
+struct h264_profile {
+	enum v4l2_mpeg_video_h264_level level;
+	u32 max_mb_per_seconds;
+	u32 max_frame_size;
+	u32 max_bitrate;
+	u32 max_cpb_size;
+	u32 min_comp_ratio;
+};
+
+static const struct h264_profile h264_infos_list[] = {
+	{V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 1485, 99, 64, 175, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_1B, 1485, 99, 128, 350, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_1_1, 3000, 396, 192, 500, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_1_2, 6000, 396, 384, 1000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_1_3, 11880, 396, 768, 2000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_2_0, 11880, 396, 2000, 2000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_2_1, 19800, 792, 4000, 4000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_2_2, 20250, 1620, 4000, 4000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_3_0, 40500, 1620, 10000, 10000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_3_1, 108000, 3600, 14000, 14000, 4},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_3_2, 216000, 5120, 20000, 20000, 4},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 245760, 8192, 20000, 25000, 4},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_4_1, 245760, 8192, 50000, 62500, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_4_2, 522240, 8704, 50000, 62500, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 589824, 22080, 135000, 135000, 2},
+	{V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 983040, 36864, 240000, 240000, 2}
+};
+
+enum hva_brc_type {
+	BRC_TYPE_NONE = 0,
+	BRC_TYPE_CBR = 1,
+	BRC_TYPE_VBR = 2,
+	BRC_TYPE_VBR_LOW_DELAY = 3
+};
+
+enum hva_entropy_coding_mode {
+	CAVLC = 0,
+	CABAC = 1
+};
+
+enum hva_picture_coding_type {
+	PICTURE_CODING_TYPE_I = 0,
+	PICTURE_CODING_TYPE_P = 1,
+	PICTURE_CODING_TYPE_B = 2
+};
+
+enum hva_h264_sampling_mode {
+	SAMPLING_MODE_NV12 = 0,
+	SAMPLING_MODE_UYVY = 1,
+	SAMPLING_MODE_RGB3 = 3,
+	SAMPLING_MODE_XRGB4 = 4,
+	SAMPLING_MODE_NV21 = 8,
+	SAMPLING_MODE_VYUY = 9,
+	SAMPLING_MODE_BGR3 = 11,
+	SAMPLING_MODE_XBGR4 = 12,
+	SAMPLING_MODE_RGBX4 = 20,
+	SAMPLING_MODE_BGRX4 = 28
+};
+
+enum hva_h264_nalu_type {
+	NALU_TYPE_UNKNOWN = 0,
+	NALU_TYPE_SLICE = 1,
+	NALU_TYPE_SLICE_DPA = 2,
+	NALU_TYPE_SLICE_DPB = 3,
+	NALU_TYPE_SLICE_DPC = 4,
+	NALU_TYPE_SLICE_IDR = 5,
+	NALU_TYPE_SEI = 6,
+	NALU_TYPE_SPS = 7,
+	NALU_TYPE_PPS = 8,
+	NALU_TYPE_AU_DELIMITER = 9,
+	NALU_TYPE_SEQ_END = 10,
+	NALU_TYPE_STREAM_END = 11,
+	NALU_TYPE_FILLER_DATA = 12,
+	NALU_TYPE_SPS_EXT = 13,
+	NALU_TYPE_PREFIX_UNIT = 14,
+	NALU_TYPE_SUBSET_SPS = 15,
+	NALU_TYPE_SLICE_AUX = 19,
+	NALU_TYPE_SLICE_EXT = 20
+};
+
+enum hva_h264_sei_payload_type {
+	SEI_BUFFERING_PERIOD = 0,
+	SEI_PICTURE_TIMING = 1,
+	SEI_STEREO_VIDEO_INFO = 21,
+	SEI_FRAME_PACKING_ARRANGEMENT = 45
+};
+
+/**
+ * stereo Video Info struct
+ */
+struct hva_h264_stereo_video_sei {
+	u8 field_views_flag;
+	u8 top_field_is_left_view_flag;
+	u8 current_frame_is_left_view_flag;
+	u8 next_frame_is_second_view_flag;
+	u8 left_view_self_contained_flag;
+	u8 right_view_self_contained_flag;
+};
+
+/**
+ * @frame_width: width in pixels of the buffer containing the input frame
+ * @frame_height: height in pixels of the buffer containing the input frame
+ * @frame_num: the parameter to be written in the slice header
+ * @picture_coding_type: type I, P or B
+ * @pic_order_cnt_type: POC mode, as defined in H264 std : can be 0,1,2
+ * @first_picture_in_sequence: flag telling to encoder that this is the
+ *			       first picture in a video sequence.
+ *			       Used for VBR
+ * @slice_size_type: 0 = no constraint to close the slice
+ *		     1= a slice is closed as soon as the slice_mb_size limit
+ *			is reached
+ *		     2= a slice is closed as soon as the slice_byte_size limit
+ *			is reached
+ *		     3= a slice is closed as soon as either the slice_byte_size
+ *			limit or the slice_mb_size limit is reached
+ * @slice_mb_size: defines the slice size in number of macroblocks
+ *		   (used when slice_size_type=1 or slice_size_type=3)
+ * @ir_param_option: defines the number of macroblocks per frame to be
+ *		     refreshed by AIR algorithm OR the refresh period
+ *		     by CIR algorithm
+ * @intra_refresh_type: enables the adaptive intra refresh algorithm.
+ *			Disable=0 / Adaptative=1 and Cycle=2 as intra refresh
+ * @use_constrained_intra_flag: constrained_intra_pred_flag from PPS
+ * @transform_mode: controls the use of 4x4/8x8 transform mode
+ * @disable_deblocking_filter_idc:
+ *		     0: specifies that all luma and chroma block edges of
+ *			the slice are filtered.
+ *		     1: specifies that deblocking is disabled for all block
+ *			edges of the slice.
+ *		     2: specifies that all luma and chroma block edges of
+ *			the slice are filtered with exception of the block edges
+ *			that coincide with slice boundaries
+ * @slice_alpha_c0_offset_div2: to be written in slice header,
+ *				controls deblocking
+ * @slice_beta_offset_div2: to be written in slice header,
+ *			    controls deblocking
+ * @encoder_complexity: encoder complexity control (IME).
+ *		     0 = I_16x16, P_16x16, Full ME Complexity
+ *		     1 = I_16x16, I_NxN, P_16x16, Full ME Complexity
+ *		     2 = I_16x16, I_NXN, P_16x16, P_WxH, Full ME Complexity
+ *		     4 = I_16x16, P_16x16, Reduced ME Complexity
+ *		     5 = I_16x16, I_NxN, P_16x16, Reduced ME Complexity
+ *		     6 = I_16x16, I_NXN, P_16x16, P_WxH, Reduced ME Complexity
+ *  @chroma_qp_index_offset: coming from picture parameter set
+ *			     (PPS see [H.264 STD] 7.4.2.2)
+ *  @entropy_coding_mode: entropy coding mode.
+ *			  0 = CAVLC
+ *			  1 = CABAC
+ * @brc_type: selects the bit-rate control algorithm
+ *		     0 = constant Qp, (no BRC)
+ *		     1 = CBR
+ *		     2 = VBR
+ * @quant: Quantization param used in case of fix QP encoding (no BRC)
+ * @non_VCL_NALU_Size: size of non-VCL NALUs (SPS, PPS, filler),
+ *		       used by BRC
+ * @cpb_buffer_size: size of Coded Picture Buffer, used by BRC
+ * @bit_rate: target bitrate, for BRC
+ * @qp_min: min QP threshold
+ * @qp_max: max QP threshold
+ * @framerate_num: target framerate numerator , used by BRC
+ * @framerate_den: target framerate denomurator , used by BRC
+ * @delay: End-to-End Initial Delay
+ * @strict_HRD_compliancy: flag for HDR compliancy (1)
+ *			   May impact quality encoding
+ * @addr_source_buffer: address of input frame buffer for current frame
+ * @addr_fwd_Ref_Buffer: address of reference frame buffer
+ * @addr_rec_buffer: address of reconstructed frame buffer
+ * @addr_output_bitstream_start: output bitstream start address
+ * @addr_output_bitstream_end: output bitstream end address
+ * @addr_external_sw : address of external search window
+ * @addr_lctx : address of context picture buffer
+ * @addr_local_rec_buffer: address of local reconstructed buffer
+ * @addr_spatial_context: address of spatial context buffer
+ * @bitstream_offset: offset in bits between aligned bitstream start
+ *		      address and first bit to be written by HVA.
+ *		      Range value is [0..63]
+ * @sampling_mode: Input picture format .
+ *		     0: YUV420 semi_planar Interleaved
+ *		     1: YUV422 raster Interleaved
+ * @addr_param_out: address of output parameters structure
+ * @addr_scaling_matrix: address to the coefficient of
+ *			 the inverse scaling matrix
+ * @addr_scaling_matrix_dir: address to the coefficient of
+ *			     the direct scaling matrix
+ * @addr_cabac_context_buffer: address of cabac context buffer
+ * @GmvX: Input information about the horizontal global displacement of
+ *	  the encoded frame versus the previous one
+ * @GmvY: Input information about the vertical global displacement of
+ *	  the encoded frame versus the previous one
+ * @window_width: width in pixels of the window to be encoded inside
+ *		  the input frame
+ * @window_height: width in pixels of the window to be encoded inside
+ *		   the input frame
+ * @window_horizontal_offset: horizontal offset in pels for input window
+ *			      within input frame
+ * @window_vertical_offset: vertical offset in pels for input window
+ *			    within input frame
+ * @addr_roi: Map of QP offset for the Region of Interest algorithm and
+ *	      also used for Error map.
+ *	      Bit 0-6 used for qp offset (value -64 to 63).
+ *	      Bit 7 used to force intra
+ * @addr_slice_header: address to slice header
+ * @slice_header_size_in_bits: size in bits of the Slice header
+ * @slice_header_offset0: Slice header offset where to insert
+ *			  first_Mb_in_slice
+ * @slice_header_offset1: Slice header offset where to insert
+ *			  slice_qp_delta
+ * @slice_header_offset2: Slice header offset where to insert
+ *			  num_MBs_in_slice
+ * @slice_synchro_enable: enable "slice ready" interrupt after each slice
+ * @max_slice_number: Maximum number of slice in a frame
+ *		      (0 is strictly forbidden)
+ * @rgb2_yuv_y_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ *		      YUV for the Y component.
+ *		      Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_u_coeff: four coefficients (C0C1C2C3) to convert from RGB to
+ *		      YUV for the Y component.
+ *		      Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_v_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ *		      YUV for the U (Cb) component.
+ *		      U = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @slice_byte_size: maximum slice size in bytes
+ *		     (used when slice_size_type=2 or slice_size_type=3)
+ * @max_air_intra_mb_nb: Maximum number of intra macroblock in a frame
+ *			 for the AIR algorithm
+ * @brc_no_skip: Disable skipping in the Bitrate Controller
+ * @addr_brc_in_out_parameter: address of static buffer for BRC parameters
+ */
+struct hva_h264_td {
+	u16 frame_width;
+	u16 frame_height;
+	u32 frame_num;
+	u16 picture_coding_type;
+	u16 reserved1;
+	u16 pic_order_cnt_type;
+	u16 first_picture_in_sequence;
+	u16 slice_size_type;
+	u16 reserved2;
+	u32 slice_mb_size;
+	u16 ir_param_option;
+	u16 intra_refresh_type;
+	u16 use_constrained_intra_flag;
+	u16 transform_mode;
+	u16 disable_deblocking_filter_idc;
+	s16 slice_alpha_c0_offset_div2;
+	s16 slice_beta_offset_div2;
+	u16 encoder_complexity;
+	s16 chroma_qp_index_offset;
+	u16 entropy_coding_mode;
+	u16 brc_type;
+	u16 quant;
+	u32 non_vcl_nalu_size;
+	u32 cpb_buffer_size;
+	u32 bit_rate;
+	u16 qp_min;
+	u16 qp_max;
+	u16 framerate_num;
+	u16 framerate_den;
+	u16 delay;
+	u16 strict_hrd_compliancy;
+	u32 addr_source_buffer;
+	u32 addr_fwd_ref_buffer;
+	u32 addr_rec_buffer;
+	u32 addr_output_bitstream_start;
+	u32 addr_output_bitstream_end;
+	u32 addr_external_sw;
+	u32 addr_lctx;
+	u32 addr_local_rec_buffer;
+	u32 addr_spatial_context;
+	u16 bitstream_offset;
+	u16 sampling_mode;
+	u32 addr_param_out;
+	u32 addr_scaling_matrix;
+	u32 addr_scaling_matrix_dir;
+	u32 addr_cabac_context_buffer;
+	u32 reserved3;
+	u32 reserved4;
+	s16 gmv_x;
+	s16 gmv_y;
+	u16 window_width;
+	u16 window_height;
+	u16 window_horizontal_offset;
+	u16 window_vertical_offset;
+	u32 addr_roi;
+	u32 addr_slice_header;
+	u16 slice_header_size_in_bits;
+	u16 slice_header_offset0;
+	u16 slice_header_offset1;
+	u16 slice_header_offset2;
+	u32 reserved5;
+	u32 reserved6;
+	u16 reserved7;
+	u16 reserved8;
+	u16 slice_synchro_enable;
+	u16 max_slice_number;
+	u32 rgb2_yuv_y_coeff;
+	u32 rgb2_yuv_u_coeff;
+	u32 rgb2_yuv_v_coeff;
+	u32 slice_byte_size;
+	u16 max_air_intra_mb_nb;
+	u16 brc_no_skip;
+	u32 addr_temporal_context;
+	u32 addr_brc_in_out_parameter;
+};
+
+/**
+ * @ slice_size: slice size
+ * @ slice_start_time: start time
+ * @ slice_stop_time: stop time
+ * @ slice_num: slice number
+ */
+struct hva_h264_slice_po {
+	u32 slice_size;
+	u32 slice_start_time;
+	u32 slice_end_time;
+	u32 slice_num;
+};
+
+/**
+ * @ bitstream_size: bitstream size
+ * @ dct_bitstream_size: dtc bitstream size
+ * @ stuffing_bits: number of stuffing bits inserted by the encoder
+ * @ removal_time: removal time of current frame (nb of ticks 1/framerate)
+ * @ hvc_start_time: hvc start time
+ * @ hvc_stop_time: hvc stop time
+ * @ slice_count: slice count
+ */
+struct hva_h264_po {
+	u32 bitstream_size;
+	u32 dct_bitstream_size;
+	u32 stuffing_bits;
+	u32 removal_time;
+	u32 hvc_start_time;
+	u32 hvc_stop_time;
+	u32 slice_count;
+	u32 reserved0;
+	struct hva_h264_slice_po slice_params[16];
+};
+
+struct hva_h264_task {
+	struct hva_h264_td td;
+	struct hva_h264_po po;
+};
+
+/**
+ * @seq_info:  sequence information buffer
+ * @ref_frame: reference frame buffer
+ * @rec_frame: reconstructed frame buffer
+ * @task:      task descriptor
+ */
+struct hva_h264_ctx {
+	struct hva_buffer *seq_info;
+	struct hva_buffer *ref_frame;
+	struct hva_buffer *rec_frame;
+	struct hva_buffer *task;
+};
+
+static int hva_h264_fill_slice_header(struct hva_ctx *pctx,
+				      u8 *slice_header_addr,
+				      struct hva_controls *ctrls,
+				      int frame_num,
+				      u16 *header_size,
+				      u16 *header_offset0,
+				      u16 *header_offset1,
+				      u16 *header_offset2)
+{
+	/*
+	 * with this HVA hardware version, part of the slice header is computed
+	 * on host and part by hardware.
+	 * The part of host is precomputed and available through this array.
+	 */
+	struct device *dev = ctx_to_dev(pctx);
+	int  cabac = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC;
+	const unsigned char slice_header[] = { 0x00, 0x00, 0x00, 0x01,
+					       0x41, 0x34, 0x07, 0x00};
+	int idr_pic_id = frame_num % 2;
+	enum hva_picture_coding_type type;
+	u32 frame_order = frame_num % ctrls->gop_size;
+
+	if (!(frame_num % ctrls->gop_size))
+		type = PICTURE_CODING_TYPE_I;
+	else
+		type = PICTURE_CODING_TYPE_P;
+
+	memcpy(slice_header_addr, slice_header, sizeof(slice_header));
+
+	*header_size = 56;
+	*header_offset0 = 40;
+	*header_offset1 = 13;
+	*header_offset2 = 0;
+
+	if (type == PICTURE_CODING_TYPE_I) {
+		slice_header_addr[4] = 0x65;
+		slice_header_addr[5] = 0x11;
+
+		/* toggle the I frame */
+		if ((frame_num / ctrls->gop_size) % 2) {
+			*header_size += 4;
+			*header_offset1 += 4;
+			slice_header_addr[6] = 0x04;
+			slice_header_addr[7] = 0x70;
+
+		} else {
+			*header_size += 2;
+			*header_offset1 += 2;
+			slice_header_addr[6] = 0x09;
+			slice_header_addr[7] = 0xC0;
+		}
+	} else {
+		if (ctrls->entropy_mode == cabac) {
+			*header_size += 1;
+			*header_offset1 += 1;
+			slice_header_addr[7] = 0x80;
+		}
+		/*
+		 * update slice header with P frame order
+		 * frame order is limited to 16 (coded on 4bits only)
+		 */
+		slice_header_addr[5] += ((frame_order & 0x0C) >> 2);
+		slice_header_addr[6] += ((frame_order & 0x03) << 6);
+	}
+
+	dev_dbg(dev,
+		"%s   %s slice header order %d idrPicId %d header size %d\n",
+		pctx->name, __func__, frame_order, idr_pic_id, *header_size);
+	return 0;
+}
+
+static int hva_h264_fill_data_nal(struct hva_ctx *pctx,
+				  unsigned int stuffing_bytes, u8 *addr,
+				  unsigned int stream_size, unsigned int *size)
+{
+	struct device *dev = ctx_to_dev(pctx);
+	const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+
+	dev_dbg(dev, "%s   %s stuffing bytes %d\n", pctx->name, __func__,
+		stuffing_bytes);
+
+	if ((*size + stuffing_bytes + H264_FILLER_DATA_SIZE) > stream_size) {
+		dev_dbg(dev, "%s   %s too many stuffing bytes %d\n",
+			pctx->name, __func__, stuffing_bytes);
+		return 0;
+	}
+
+	/* start code */
+	memcpy(addr + *size, start, sizeof(start));
+	*size += sizeof(start);
+
+	/* nal_unit_type */
+	addr[*size] = NALU_TYPE_FILLER_DATA;
+	*size += 1;
+
+	memset(addr + *size, 0xff, stuffing_bytes);
+	*size += stuffing_bytes;
+
+	addr[*size] = 0x80;
+	*size += 1;
+
+	return 0;
+}
+
+static int hva_h264_fill_sei_nal(struct hva_ctx *pctx,
+				 enum hva_h264_sei_payload_type type,
+				 u8 *addr, u32 *size)
+{
+	struct device *dev = ctx_to_dev(pctx);
+	const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+	struct hva_h264_stereo_video_sei info;
+	u8 offset = 7;
+	u8 msg = 0;
+
+	/* start code */
+	memcpy(addr + *size, start, sizeof(start));
+	*size += sizeof(start);
+
+	/* nal_unit_type */
+	addr[*size] = NALU_TYPE_SEI;
+	*size += 1;
+
+	/* payload type */
+	addr[*size] = type;
+	*size += 1;
+
+	switch (type) {
+	case SEI_STEREO_VIDEO_INFO:
+		memset(&info, 0, sizeof(info));
+
+		/* set to top/bottom frame packing arrangement */
+		info.field_views_flag = 1;
+		info.top_field_is_left_view_flag = 1;
+
+		/* payload size */
+		addr[*size] = 1;
+		*size += 1;
+
+		/* payload */
+		msg = info.field_views_flag << offset--;
+
+		if (info.field_views_flag) {
+			msg |= info.top_field_is_left_view_flag <<
+			       offset--;
+		} else {
+			msg |= info.current_frame_is_left_view_flag <<
+			       offset--;
+			msg |= info.next_frame_is_second_view_flag <<
+			       offset--;
+		}
+		msg |= info.left_view_self_contained_flag << offset--;
+		msg |= info.right_view_self_contained_flag << offset--;
+
+		addr[*size] = msg;
+		*size += 1;
+
+		addr[*size] = 0x80;
+		*size += 1;
+
+		return 0;
+	case SEI_BUFFERING_PERIOD:
+	case SEI_PICTURE_TIMING:
+	case SEI_FRAME_PACKING_ARRANGEMENT:
+	default:
+		dev_err(dev, "%s   sei nal type not supported %d\n",
+			pctx->name, type);
+		return -EINVAL;
+	}
+}
+
+static int hva_h264_prepare_task(struct hva_ctx *pctx,
+				 struct hva_h264_task *task,
+				 struct hva_frame *frame,
+				 struct hva_stream *stream)
+{
+	struct hva_dev *hva = ctx_to_hdev(pctx);
+	struct device *dev = ctx_to_dev(pctx);
+	struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+	struct hva_buffer *seq_info = ctx->seq_info;
+	struct hva_buffer *fwd_ref_frame = ctx->ref_frame;
+	struct hva_buffer *loc_rec_frame = ctx->rec_frame;
+	struct hva_h264_td *td = &task->td;
+	struct hva_controls *ctrls = &pctx->ctrls;
+	struct v4l2_fract *time_per_frame = &pctx->ctrls.time_per_frame;
+	int cavlc =  V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+	u32 frame_num = pctx->stream_num;
+	u32 addr_esram = hva->esram_addr;
+	enum v4l2_mpeg_video_h264_level level;
+	dma_addr_t paddr = 0;
+	u8 *slice_header_vaddr;
+	u32 frame_width = frame->info.aligned_width;
+	u32 frame_height = frame->info.aligned_height;
+	u32 max_cpb_buffer_size;
+	unsigned int payload = stream->bytesused;
+	u32 max_bitrate;
+
+	/* check width and height parameters */
+	if ((frame_width > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H)) ||
+	    (frame_height > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H))) {
+		dev_err(dev,
+			"%s   width(%d) or height(%d) exceeds limits (%dx%d)\n",
+			pctx->name, frame_width, frame_height,
+			H264_MAX_SIZE_W, H264_MAX_SIZE_H);
+		return -EINVAL;
+	}
+
+	level = ctrls->level;
+
+	memset(td, 0, sizeof(struct hva_h264_td));
+
+	td->frame_width = frame_width;
+	td->frame_height = frame_height;
+
+	/* set frame alignement */
+	td->window_width =  frame_width;
+	td->window_height = frame_height;
+	td->window_horizontal_offset = 0;
+	td->window_vertical_offset = 0;
+
+	td->first_picture_in_sequence = (!frame_num) ? 1 : 0;
+
+	/* pic_order_cnt_type hard coded to '2' as only I & P frames */
+	td->pic_order_cnt_type = 2;
+
+	/* useConstrainedIntraFlag set to false for better coding efficiency */
+	td->use_constrained_intra_flag = false;
+	td->brc_type = (ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+			? BRC_TYPE_CBR : BRC_TYPE_VBR;
+
+	td->entropy_coding_mode = (ctrls->entropy_mode == cavlc) ? CAVLC :
+				  CABAC;
+
+	td->bit_rate = ctrls->bitrate;
+
+	/* set framerate, framerate = 1 n/ time per frame */
+	if (time_per_frame->numerator >= 536) {
+		/*
+		 * due to a hardware bug, framerate denominator can't exceed
+		 * 536 (BRC overflow). Compute nearest framerate
+		 */
+		td->framerate_den = 1;
+		td->framerate_num = (time_per_frame->denominator +
+				    (time_per_frame->numerator >> 1) - 1) /
+				    time_per_frame->numerator;
+
+		/*
+		 * update bitrate to introduce a correction due to
+		 * the new framerate
+		 * new bitrate = (old bitrate * new framerate) / old framerate
+		 */
+		td->bit_rate /= time_per_frame->numerator;
+		td->bit_rate *= time_per_frame->denominator;
+		td->bit_rate /= td->framerate_num;
+	} else {
+		td->framerate_den = time_per_frame->numerator;
+		td->framerate_num = time_per_frame->denominator;
+	}
+
+	/* compute maximum bitrate depending on profile */
+	if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+		max_bitrate = h264_infos_list[level].max_bitrate *
+			      H264_FACTOR_HIGH;
+	else
+		max_bitrate = h264_infos_list[level].max_bitrate *
+			      H264_FACTOR_BASELINE;
+
+	/* check if bitrate doesn't exceed max size */
+	if (td->bit_rate > max_bitrate) {
+		dev_dbg(dev,
+			"%s   bitrate (%d) larger than level and profile allow, clip to %d\n",
+			pctx->name, td->bit_rate, max_bitrate);
+		td->bit_rate = max_bitrate;
+	}
+
+	/* convert cpb_buffer_size in bits */
+	td->cpb_buffer_size = ctrls->cpb_size * 8000;
+
+	/* compute maximum cpb buffer size depending on profile */
+	if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+		max_cpb_buffer_size =
+		    h264_infos_list[level].max_cpb_size * H264_FACTOR_HIGH;
+	else
+		max_cpb_buffer_size =
+		    h264_infos_list[level].max_cpb_size * H264_FACTOR_BASELINE;
+
+	/* check if cpb buffer size doesn't exceed max size */
+	if (td->cpb_buffer_size > max_cpb_buffer_size) {
+		dev_dbg(dev,
+			"%s   cpb size larger than level %d allows, clip to %d\n",
+			pctx->name, td->cpb_buffer_size, max_cpb_buffer_size);
+		td->cpb_buffer_size = max_cpb_buffer_size;
+	}
+
+	/* enable skipping in the Bitrate Controller */
+	td->brc_no_skip = 0;
+
+	/* initial delay */
+	if ((ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) &&
+	    td->bit_rate)
+		td->delay = 1000 * (td->cpb_buffer_size / td->bit_rate);
+	else
+		td->delay = 0;
+
+	switch (frame->info.pixelformat) {
+	case V4L2_PIX_FMT_NV12:
+		td->sampling_mode = SAMPLING_MODE_NV12;
+		break;
+	case V4L2_PIX_FMT_NV21:
+		td->sampling_mode = SAMPLING_MODE_NV21;
+		break;
+	default:
+		dev_err(dev, "%s   invalid source pixel format\n",
+			pctx->name);
+		return -EINVAL;
+	}
+
+	/*
+	 * fill matrix color converter (RGB to YUV)
+	 * Y = 0,299 R + 0,587 G + 0,114 B
+	 * Cb = -0,1687 R -0,3313 G + 0,5 B + 128
+	 * Cr = 0,5 R - 0,4187 G - 0,0813 B + 128
+	 */
+	td->rgb2_yuv_y_coeff = 0x12031008;
+	td->rgb2_yuv_u_coeff = 0x800EF7FB;
+	td->rgb2_yuv_v_coeff = 0x80FEF40E;
+
+	/* enable/disable transform mode */
+	td->transform_mode = ctrls->dct8x8;
+
+	/* encoder complexity fix to 2, ENCODE_I_16x16_I_NxN_P_16x16_P_WxH */
+	td->encoder_complexity = 2;
+
+	/* quant fix to 28, default VBR value */
+	td->quant = 28;
+
+	if (td->framerate_den == 0) {
+		dev_err(dev, "%s   invalid framerate\n", pctx->name);
+		return -EINVAL;
+	}
+
+	/* if automatic framerate, deactivate bitrate controller */
+	if (td->framerate_num == 0)
+		td->brc_type = 0;
+
+	/* compliancy fix to true */
+	td->strict_hrd_compliancy = 1;
+
+	/* set minimum & maximum quantizers */
+	td->qp_min = clamp_val(ctrls->qpmin, 0, 51);
+	td->qp_max = clamp_val(ctrls->qpmax, 0, 51);
+
+	td->addr_source_buffer = frame->paddr;
+	td->addr_fwd_ref_buffer = fwd_ref_frame->paddr;
+	td->addr_rec_buffer = loc_rec_frame->paddr;
+
+	td->addr_output_bitstream_end = (u32)stream->paddr + stream->size;
+
+	td->addr_output_bitstream_start = (u32)stream->paddr;
+	td->bitstream_offset = (((u32)stream->paddr & 0xF) << 3) &
+			       BITSTREAM_OFFSET_MASK;
+
+	td->addr_param_out = (u32)ctx->task->paddr +
+			     offsetof(struct hva_h264_task, po);
+
+	/* swap spatial and temporal context */
+	if (frame_num % 2) {
+		paddr = seq_info->paddr;
+		td->addr_spatial_context =  ALIGN(paddr, 0x100);
+		paddr = seq_info->paddr + DATA_SIZE(frame_width,
+							frame_height);
+		td->addr_temporal_context = ALIGN(paddr, 0x100);
+	} else {
+		paddr = seq_info->paddr;
+		td->addr_temporal_context = ALIGN(paddr, 0x100);
+		paddr = seq_info->paddr + DATA_SIZE(frame_width,
+							frame_height);
+		td->addr_spatial_context =  ALIGN(paddr, 0x100);
+	}
+
+	paddr = seq_info->paddr + 2 * DATA_SIZE(frame_width, frame_height);
+
+	td->addr_brc_in_out_parameter =  ALIGN(paddr, 0x100);
+
+	paddr = td->addr_brc_in_out_parameter + BRC_DATA_SIZE;
+	td->addr_slice_header =  ALIGN(paddr, 0x100);
+	td->addr_external_sw =  ALIGN(addr_esram, 0x100);
+
+	addr_esram += SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width);
+	td->addr_local_rec_buffer = ALIGN(addr_esram, 0x100);
+
+	addr_esram += LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width);
+	td->addr_lctx = ALIGN(addr_esram, 0x100);
+
+	addr_esram += CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height));
+	td->addr_cabac_context_buffer = ALIGN(addr_esram, 0x100);
+
+	if (!(frame_num % ctrls->gop_size)) {
+		td->picture_coding_type = PICTURE_CODING_TYPE_I;
+		stream->vbuf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+	} else {
+		td->picture_coding_type = PICTURE_CODING_TYPE_P;
+		stream->vbuf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+	}
+
+	/* fill the slice header part */
+	slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header -
+			     seq_info->paddr);
+
+	hva_h264_fill_slice_header(pctx, slice_header_vaddr, ctrls, frame_num,
+				   &td->slice_header_size_in_bits,
+				   &td->slice_header_offset0,
+				   &td->slice_header_offset1,
+				   &td->slice_header_offset2);
+
+	td->chroma_qp_index_offset = 2;
+	td->slice_synchro_enable = 0;
+	td->max_slice_number = 1;
+
+	/*
+	 * check the sps/pps header size for key frame only
+	 * sps/pps header was previously fill by libv4l
+	 * during qbuf of stream buffer
+	 */
+	if ((stream->vbuf.flags == V4L2_BUF_FLAG_KEYFRAME) &&
+	    (payload > MAX_SPS_PPS_SIZE)) {
+		dev_err(dev, "%s   invalid sps/pps size %d\n", pctx->name,
+			payload);
+		return -EINVAL;
+	}
+
+	if (stream->vbuf.flags != V4L2_BUF_FLAG_KEYFRAME)
+		payload = 0;
+
+	/* add SEI nal (video stereo info) */
+	if (ctrls->sei_fp && hva_h264_fill_sei_nal(pctx, SEI_STEREO_VIDEO_INFO,
+						   (u8 *)stream->vaddr,
+						   &payload)) {
+		dev_err(dev, "%s   fail to get SEI nal\n", pctx->name);
+		return -EINVAL;
+	}
+
+	/* fill size of non-VCL NAL units (SPS, PPS, filler and SEI) */
+	td->non_vcl_nalu_size = payload * 8;
+
+	/* compute bitstream offset & new start address of bitstream */
+	td->addr_output_bitstream_start += ((payload >> 4) << 4);
+	td->bitstream_offset += (payload - ((payload >> 4) << 4)) * 8;
+
+	stream->bytesused = payload;
+
+	return 0;
+}
+
+static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task)
+{
+	struct hva_h264_po *po = &task->po;
+
+	return po->bitstream_size;
+}
+
+static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task)
+{
+	struct hva_h264_po *po = &task->po;
+
+	return po->stuffing_bits >> 3;
+}
+
+static int hva_h264_open(struct hva_ctx *pctx)
+{
+	struct device *dev = ctx_to_dev(pctx);
+	struct hva_h264_ctx *ctx;
+	struct hva_dev *hva = ctx_to_hdev(pctx);
+	u32 frame_width = pctx->frameinfo.aligned_width;
+	u32 frame_height = pctx->frameinfo.aligned_height;
+	u32 size;
+	int ret;
+
+	/* check esram size necessary to encode a frame */
+	size = SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width) +
+	       LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width) +
+	       CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height)) +
+	       CABAC_CONTEXT_BUFFER_MAX_SIZE(frame_width);
+
+	if (hva->esram_size < size) {
+		dev_err(dev, "%s   not enough esram (max:%d request:%d)\n",
+			pctx->name, hva->esram_size, size);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* allocate context for codec */
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* allocate sequence info buffer */
+	ret = hva_mem_alloc(pctx,
+			    2 * DATA_SIZE(frame_width, frame_height) +
+			    SLICE_HEADER_SIZE +
+			    BRC_DATA_SIZE,
+			    "hva sequence info",
+			    &ctx->seq_info);
+	if (ret) {
+		dev_err(dev,
+			"%s   failed to allocate sequence info buffer\n",
+			pctx->name);
+		goto err_ctx;
+	}
+
+	/* allocate reference frame buffer */
+	ret = hva_mem_alloc(pctx,
+			    frame_width * frame_height * 3 / 2,
+			    "hva reference frame",
+			    &ctx->ref_frame);
+	if (ret) {
+		dev_err(dev, "%s   failed to allocate reference frame buffer\n",
+			pctx->name);
+		goto err_seq_info;
+	}
+
+	/* allocate reconstructed frame buffer */
+	ret = hva_mem_alloc(pctx,
+			    frame_width * frame_height * 3 / 2,
+			    "hva reconstructed frame",
+			    &ctx->rec_frame);
+	if (ret) {
+		dev_err(dev,
+			"%s   failed to allocate reconstructed frame buffer\n",
+			pctx->name);
+		goto err_ref_frame;
+	}
+
+	/* allocate task descriptor */
+	ret = hva_mem_alloc(pctx,
+			    sizeof(struct hva_h264_task),
+			    "hva task descriptor",
+			    &ctx->task);
+	if (ret) {
+		dev_err(dev,
+			"%s   failed to allocate task descriptor\n",
+			pctx->name);
+		goto err_rec_frame;
+	}
+
+	pctx->priv = (void *)ctx;
+
+	return 0;
+
+err_rec_frame:
+	hva_mem_free(pctx, ctx->rec_frame);
+err_ref_frame:
+	hva_mem_free(pctx, ctx->ref_frame);
+err_seq_info:
+	hva_mem_free(pctx, ctx->seq_info);
+err_ctx:
+	devm_kfree(dev, ctx);
+err:
+	return ret;
+}
+
+static int hva_h264_close(struct hva_ctx *pctx)
+{
+	struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+	struct device *dev = ctx_to_dev(pctx);
+
+	if (ctx->seq_info)
+		hva_mem_free(pctx, ctx->seq_info);
+
+	if (ctx->ref_frame)
+		hva_mem_free(pctx, ctx->ref_frame);
+
+	if (ctx->rec_frame)
+		hva_mem_free(pctx, ctx->rec_frame);
+
+	if (ctx->task)
+		hva_mem_free(pctx, ctx->task);
+
+	devm_kfree(dev, ctx);
+
+	return 0;
+}
+
+static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
+			   struct hva_stream *stream)
+{
+	struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+	struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
+	struct hva_buffer *tmp_frame;
+	u32 stuffing_bytes = 0;
+	int ret = 0;
+
+	ret = hva_h264_prepare_task(pctx, task, frame, stream);
+	if (ret)
+		goto err;
+
+	ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task);
+	if (ret)
+		goto err;
+
+	pctx->stream_num++;
+	stream->bytesused += hva_h264_get_stream_size(task);
+
+	stuffing_bytes = hva_h264_get_stuffing_bytes(task);
+
+	if (stuffing_bytes)
+		hva_h264_fill_data_nal(pctx, stuffing_bytes,
+				       (u8 *)stream->vaddr,
+				       stream->size,
+				       &stream->bytesused);
+
+	/* switch reference & reconstructed frame */
+	tmp_frame = ctx->ref_frame;
+	ctx->ref_frame = ctx->rec_frame;
+	ctx->rec_frame = tmp_frame;
+
+	return 0;
+err:
+	stream->bytesused = 0;
+	return ret;
+}
+
+const struct hva_enc nv12h264enc = {
+	.name = "H264(NV12)",
+	.pixelformat = V4L2_PIX_FMT_NV12,
+	.streamformat = V4L2_PIX_FMT_H264,
+	.max_width = H264_MAX_SIZE_W,
+	.max_height = H264_MAX_SIZE_H,
+	.open = hva_h264_open,
+	.close = hva_h264_close,
+	.encode = hva_h264_encode,
+};
+
+const struct hva_enc nv21h264enc = {
+	.name = "H264(NV21)",
+	.pixelformat = V4L2_PIX_FMT_NV21,
+	.streamformat = V4L2_PIX_FMT_H264,
+	.max_width = H264_MAX_SIZE_W,
+	.max_height = H264_MAX_SIZE_H,
+	.open = hva_h264_open,
+	.close = hva_h264_close,
+	.encode = hva_h264_encode,
+};
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
new file mode 100644
index 0000000..d341d49
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+/* HVA register offsets */
+#define HVA_HIF_REG_RST                 0x0100U
+#define HVA_HIF_REG_RST_ACK             0x0104U
+#define HVA_HIF_REG_MIF_CFG             0x0108U
+#define HVA_HIF_REG_HEC_MIF_CFG         0x010CU
+#define HVA_HIF_REG_CFL                 0x0110U
+#define HVA_HIF_FIFO_CMD                0x0114U
+#define HVA_HIF_FIFO_STS                0x0118U
+#define HVA_HIF_REG_SFL                 0x011CU
+#define HVA_HIF_REG_IT_ACK              0x0120U
+#define HVA_HIF_REG_ERR_IT_ACK          0x0124U
+#define HVA_HIF_REG_LMI_ERR             0x0128U
+#define HVA_HIF_REG_EMI_ERR             0x012CU
+#define HVA_HIF_REG_HEC_MIF_ERR         0x0130U
+#define HVA_HIF_REG_HEC_STS             0x0134U
+#define HVA_HIF_REG_HVC_STS             0x0138U
+#define HVA_HIF_REG_HJE_STS             0x013CU
+#define HVA_HIF_REG_CNT                 0x0140U
+#define HVA_HIF_REG_HEC_CHKSYN_DIS      0x0144U
+#define HVA_HIF_REG_CLK_GATING          0x0148U
+#define HVA_HIF_REG_VERSION             0x014CU
+#define HVA_HIF_REG_BSM                 0x0150U
+
+/* define value for version id register (HVA_HIF_REG_VERSION) */
+#define VERSION_ID_MASK	0x0000FFFF
+
+/* define values for BSM register (HVA_HIF_REG_BSM) */
+#define BSM_CFG_VAL1	0x0003F000
+#define BSM_CFG_VAL2	0x003F0000
+
+/* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define MIF_CFG_VAL1	0x04460446
+#define MIF_CFG_VAL2	0x04460806
+#define MIF_CFG_VAL3	0x00000000
+
+/* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define HEC_MIF_CFG_VAL	0x000000C4
+
+/*  Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
+#define CLK_GATING_HVC	BIT(0)
+#define CLK_GATING_HEC	BIT(1)
+#define CLK_GATING_HJE	BIT(2)
+
+/* fix hva clock rate */
+#define CLK_RATE		300000000
+
+/* fix delay for pmruntime */
+#define AUTOSUSPEND_DELAY_MS	3
+
+/*
+ * hw encode error values
+ * NO_ERROR: Success, Task OK
+ * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
+ * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
+ * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
+ * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
+ * H264_SLICE_READY: VECH264 Slice ready
+ * TASK_LIST_FULL: HVA/FPC task list full
+		   (discard latest transform command)
+ * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
+ * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
+ * NO_INT_COMPLETION: Time-out on interrupt completion
+ * LMI_ERR: Local Memory Interface Error
+ * EMI_ERR: External Memory Interface Error
+ * HECMI_ERR: HEC Memory Interface Error
+ */
+enum hva_hw_error {
+	NO_ERROR = 0x0,
+	H264_BITSTREAM_OVERSIZE = 0x2,
+	H264_FRAME_SKIPPED = 0x4,
+	H264_SLICE_LIMIT_SIZE = 0x5,
+	H264_MAX_SLICE_NUMBER = 0x7,
+	H264_SLICE_READY = 0x8,
+	TASK_LIST_FULL = 0xF0,
+	UNKNOWN_COMMAND = 0xF1,
+	WRONG_CODEC_OR_RESOLUTION = 0xF4,
+	NO_INT_COMPLETION = 0x100,
+	LMI_ERR = 0x101,
+	EMI_ERR = 0x102,
+	HECMI_ERR = 0x103,
+};
+
+static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
+{
+	struct hva_dev *hva = data;
+
+	/* read status registers */
+	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+	/* acknowledge interruption */
+	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
+{
+	struct hva_dev *hva = arg;
+	struct device *dev = hva_to_dev(hva);
+	u32 status = hva->sts_reg & 0xFF;
+	u8 ctx_id = 0;
+	struct hva_ctx *ctx = NULL;
+
+	dev_dbg(dev, "%s     %s: status: 0x%02x fifo level: 0x%02x\n",
+		HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+	/*
+	 * status: task_id[31:16] client_id[15:8] status[7:0]
+	 * the context identifier is retrieved from the client identifier
+	 */
+	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+	if (ctx_id >= HVA_MAX_INSTANCES) {
+		dev_err(dev, "%s     %s: bad context identifier: %d\n",
+			ctx->name, __func__, ctx_id);
+		ctx->hw_err = true;
+		goto out;
+	}
+
+	ctx = hva->instances[ctx_id];
+	if (!ctx)
+		goto out;
+
+	switch (status) {
+	case NO_ERROR:
+		dev_dbg(dev, "%s     %s: no error\n",
+			ctx->name, __func__);
+		ctx->hw_err = false;
+		break;
+	case H264_SLICE_READY:
+		dev_dbg(dev, "%s     %s: h264 slice ready\n",
+			ctx->name, __func__);
+		ctx->hw_err = false;
+		break;
+	case H264_FRAME_SKIPPED:
+		dev_dbg(dev, "%s     %s: h264 frame skipped\n",
+			ctx->name, __func__);
+		ctx->hw_err = false;
+		break;
+	case H264_BITSTREAM_OVERSIZE:
+		dev_err(dev, "%s     %s:h264 bitstream oversize\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	case H264_SLICE_LIMIT_SIZE:
+		dev_err(dev, "%s     %s: h264 slice limit size is reached\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	case H264_MAX_SLICE_NUMBER:
+		dev_err(dev, "%s     %s: h264 max slice number is reached\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	case TASK_LIST_FULL:
+		dev_err(dev, "%s     %s:task list full\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	case UNKNOWN_COMMAND:
+		dev_err(dev, "%s     %s: command not known\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	case WRONG_CODEC_OR_RESOLUTION:
+		dev_err(dev, "%s     %s: wrong codec or resolution\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	default:
+		dev_err(dev, "%s     %s: status not recognized\n",
+			ctx->name, __func__);
+		ctx->hw_err = true;
+		break;
+	}
+out:
+	complete(&hva->interrupt);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
+{
+	struct hva_dev *hva = data;
+
+	/* read status registers */
+	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+	/* read error registers */
+	hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
+	hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
+	hva->hec_mif_err_reg = readl_relaxed(hva->regs +
+					     HVA_HIF_REG_HEC_MIF_ERR);
+
+	/* acknowledge interruption */
+	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
+{
+	struct hva_dev *hva = arg;
+	struct device *dev = hva_to_dev(hva);
+	u8 ctx_id = 0;
+	struct hva_ctx *ctx;
+
+	dev_dbg(dev, "%s     status: 0x%02x fifo level: 0x%02x\n",
+		HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+	/*
+	 * status: task_id[31:16] client_id[15:8] status[7:0]
+	 * the context identifier is retrieved from the client identifier
+	 */
+	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+	if (ctx_id >= HVA_MAX_INSTANCES) {
+		dev_err(dev, "%s     bad context identifier: %d\n", HVA_PREFIX,
+			ctx_id);
+		goto out;
+	}
+
+	ctx = hva->instances[ctx_id];
+	if (!ctx)
+		goto out;
+
+	if (hva->lmi_err_reg) {
+		dev_err(dev, "%s     local memory interface error: 0x%08x\n",
+			ctx->name, hva->lmi_err_reg);
+		ctx->hw_err = true;
+	}
+
+	if (hva->lmi_err_reg) {
+		dev_err(dev, "%s     external memory interface error: 0x%08x\n",
+			ctx->name, hva->emi_err_reg);
+		ctx->hw_err = true;
+	}
+
+	if (hva->hec_mif_err_reg) {
+		dev_err(dev, "%s     hec memory interface error: 0x%08x\n",
+			ctx->name, hva->hec_mif_err_reg);
+		ctx->hw_err = true;
+	}
+out:
+	complete(&hva->interrupt);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+{
+	struct device *dev = hva_to_dev(hva);
+	unsigned long int version;
+
+	if (pm_runtime_get_sync(dev) < 0) {
+		dev_err(dev, "%s     failed to get pm_runtime\n", HVA_PREFIX);
+		mutex_unlock(&hva->protect_mutex);
+		return -EFAULT;
+	}
+
+	version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
+				VERSION_ID_MASK;
+
+	pm_runtime_put_autosuspend(dev);
+
+	switch (version) {
+	case HVA_VERSION_V400:
+		dev_dbg(dev, "%s     IP hardware version 0x%lx\n",
+			HVA_PREFIX, version);
+		break;
+	default:
+		dev_err(dev, "%s     unknown IP hardware version 0x%lx\n",
+			HVA_PREFIX, version);
+		version = HVA_VERSION_UNKNOWN;
+		break;
+	}
+
+	return version;
+}
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *regs;
+	struct resource *esram;
+	int ret;
+
+	WARN_ON(!hva);
+
+	/* get memory for registers */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hva->regs = devm_ioremap_resource(dev, regs);
+	if (IS_ERR_OR_NULL(hva->regs)) {
+		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
+		return PTR_ERR(hva->regs);
+	}
+
+	/* get memory for esram */
+	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (IS_ERR_OR_NULL(esram)) {
+		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
+		return PTR_ERR(esram);
+	}
+	hva->esram_addr = esram->start;
+	hva->esram_size = resource_size(esram);
+
+	dev_info(dev, "%s     esram reserved for address: 0x%x size:%d\n",
+		 HVA_PREFIX, hva->esram_addr, hva->esram_size);
+
+	/* get clock resource */
+	hva->clk = devm_clk_get(dev, "clk_hva");
+	if (IS_ERR(hva->clk)) {
+		dev_err(dev, "%s     failed to get clock\n", HVA_PREFIX);
+		return PTR_ERR(hva->clk);
+	}
+
+	ret = clk_prepare(hva->clk);
+	if (ret < 0) {
+		dev_err(dev, "%s     failed to prepare clock\n", HVA_PREFIX);
+		hva->clk = ERR_PTR(-EINVAL);
+		return ret;
+	}
+
+	/* get status interruption resource */
+	ret  = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(dev, "%s     failed to get status IRQ\n", HVA_PREFIX);
+		goto err_clk;
+	}
+	hva->irq_its = ret;
+
+	ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
+					hva_hw_its_irq_thread,
+					IRQF_ONESHOT,
+					"hva_its_irq", hva);
+	if (ret) {
+		dev_err(dev, "%s     failed to install status IRQ 0x%x\n",
+			HVA_PREFIX, hva->irq_its);
+		goto err_clk;
+	}
+	disable_irq(hva->irq_its);
+
+	/* get error interruption resource */
+	ret = platform_get_irq(pdev, 1);
+	if (ret < 0) {
+		dev_err(dev, "%s     failed to get error IRQ\n", HVA_PREFIX);
+		goto err_clk;
+	}
+	hva->irq_err = ret;
+
+	ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
+					hva_hw_err_irq_thread,
+					IRQF_ONESHOT,
+					"hva_err_irq", hva);
+	if (ret) {
+		dev_err(dev, "%s     failed to install error IRQ 0x%x\n",
+			HVA_PREFIX, hva->irq_err);
+		goto err_clk;
+	}
+	disable_irq(hva->irq_err);
+
+	/* initialise protection mutex */
+	mutex_init(&hva->protect_mutex);
+
+	/* initialise completion signal */
+	init_completion(&hva->interrupt);
+
+	/* initialise runtime power management */
+	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_enable(dev);
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "%s     failed to set PM\n", HVA_PREFIX);
+		goto err_clk;
+	}
+
+	/* check IP hardware version */
+	hva->ip_version = hva_hw_get_ip_version(hva);
+
+	if (hva->ip_version == HVA_VERSION_UNKNOWN) {
+		ret = -EINVAL;
+		goto err_pm;
+	}
+
+	dev_info(dev, "%s     found hva device (version 0x%lx)\n", HVA_PREFIX,
+		 hva->ip_version);
+
+	return 0;
+
+err_pm:
+	pm_runtime_put(dev);
+err_clk:
+	if (hva->clk)
+		clk_unprepare(hva->clk);
+
+	return ret;
+}
+
+void hva_hw_remove(struct hva_dev *hva)
+{
+	struct device *dev = hva_to_dev(hva);
+
+	disable_irq(hva->irq_its);
+	disable_irq(hva->irq_err);
+
+	pm_runtime_put_autosuspend(dev);
+	pm_runtime_disable(dev);
+}
+
+int hva_hw_runtime_suspend(struct device *dev)
+{
+	struct hva_dev *hva = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(hva->clk);
+
+	return 0;
+}
+
+int hva_hw_runtime_resume(struct device *dev)
+{
+	struct hva_dev *hva = dev_get_drvdata(dev);
+
+	if (clk_prepare_enable(hva->clk)) {
+		dev_err(hva->dev, "%s     failed to prepare hva clk\n",
+			HVA_PREFIX);
+		return -EINVAL;
+	}
+
+	if (clk_set_rate(hva->clk, CLK_RATE)) {
+		dev_err(dev, "%s     failed to set clock frequency\n",
+			HVA_PREFIX);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+			struct hva_buffer *task)
+{
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	struct device *dev = hva_to_dev(hva);
+	u8 client_id = ctx->id;
+	int ret;
+	u32 reg = 0;
+
+	mutex_lock(&hva->protect_mutex);
+
+	/* enable irqs */
+	enable_irq(hva->irq_its);
+	enable_irq(hva->irq_err);
+
+	if (pm_runtime_get_sync(dev) < 0) {
+		dev_err(dev, "%s     failed to get pm_runtime\n", ctx->name);
+		ret = -EFAULT;
+		goto out;
+	}
+
+	reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
+	switch (cmd) {
+	case H264_ENC:
+		reg |= CLK_GATING_HVC;
+		break;
+	default:
+		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
+		ret = -EFAULT;
+		goto out;
+	}
+	writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+
+	dev_dbg(dev, "%s     %s: write configuration registers\n", ctx->name,
+		__func__);
+
+	/* byte swap config */
+	writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
+
+	/* define Max Opcode Size and Max Message Size for LMI and EMI */
+	writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
+	writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
+
+	/*
+	 * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
+	 * the context identifier is provided as client identifier to the
+	 * hardware, and is retrieved in the interrupt functions from the
+	 * status register
+	 */
+	dev_dbg(dev, "%s     %s: send task (cmd: %d, task_desc: %pad)\n",
+		ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
+	writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
+	writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
+
+	if (!wait_for_completion_timeout(&hva->interrupt,
+					 msecs_to_jiffies(2000))) {
+		dev_err(dev, "%s     %s: time out on completion\n", ctx->name,
+			__func__);
+		ret = -EFAULT;
+		goto out;
+	}
+
+	/* get encoding status */
+	ret = ctx->hw_err ? -EFAULT : 0;
+
+out:
+	disable_irq(hva->irq_its);
+	disable_irq(hva->irq_err);
+
+	switch (cmd) {
+	case H264_ENC:
+		reg &= ~CLK_GATING_HVC;
+		writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+		break;
+	default:
+		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
+	}
+
+	pm_runtime_put_autosuspend(dev);
+	mutex_unlock(&hva->protect_mutex);
+
+	return ret;
+}
diff --git a/drivers/media/platform/sti/hva/hva-hw.h b/drivers/media/platform/sti/hva/hva-hw.h
new file mode 100644
index 0000000..efb45b9
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_HW_H
+#define HVA_HW_H
+
+#include "hva-mem.h"
+
+/* HVA Versions */
+#define HVA_VERSION_UNKNOWN    0x000
+#define HVA_VERSION_V400       0x400
+
+/* HVA command types */
+enum hva_hw_cmd_type {
+	/* RESERVED = 0x00 */
+	/* RESERVED = 0x01 */
+	H264_ENC = 0x02,
+	/* RESERVED = 0x03 */
+	/* RESERVED = 0x04 */
+	/* RESERVED = 0x05 */
+	/* RESERVED = 0x06 */
+	/* RESERVED = 0x07 */
+	REMOVE_CLIENT = 0x08,
+	FREEZE_CLIENT = 0x09,
+	START_CLIENT = 0x0A,
+	FREEZE_ALL = 0x0B,
+	START_ALL = 0x0C,
+	REMOVE_ALL = 0x0D
+};
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva);
+void hva_hw_remove(struct hva_dev *hva);
+int hva_hw_runtime_suspend(struct device *dev);
+int hva_hw_runtime_resume(struct device *dev);
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+			struct hva_buffer *task);
+
+#endif /* HVA_HW_H */
diff --git a/drivers/media/platform/sti/hva/hva-mem.c b/drivers/media/platform/sti/hva/hva-mem.c
new file mode 100644
index 0000000..649f660
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include "hva.h"
+#include "hva-mem.h"
+
+int hva_mem_alloc(struct hva_ctx *ctx, u32 size, const char *name,
+		  struct hva_buffer **buf)
+{
+	struct device *dev = ctx_to_dev(ctx);
+	struct hva_buffer *b;
+	dma_addr_t paddr;
+	void *base;
+
+	b = devm_kzalloc(dev, sizeof(*b), GFP_KERNEL);
+	if (!b)
+		return -ENOMEM;
+
+	base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+			       DMA_ATTR_WRITE_COMBINE);
+	if (!base) {
+		dev_err(dev, "%s %s : dma_alloc_attrs failed for %s (size=%d)\n",
+			ctx->name, __func__, name, size);
+		devm_kfree(dev, b);
+		return -ENOMEM;
+	}
+
+	b->size = size;
+	b->paddr = paddr;
+	b->vaddr = base;
+	b->name = name;
+
+	dev_dbg(dev,
+		"%s allocate %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+		ctx->name, size, b->vaddr, &b->paddr, b->name);
+
+	/* return  hva buffer to user */
+	*buf = b;
+
+	return 0;
+}
+
+void hva_mem_free(struct hva_ctx *ctx, struct hva_buffer *buf)
+{
+	struct device *dev = ctx_to_dev(ctx);
+
+	dev_dbg(dev,
+		"%s free %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+		ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+	dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr,
+		       DMA_ATTR_WRITE_COMBINE);
+
+	devm_kfree(dev, buf);
+}
diff --git a/drivers/media/platform/sti/hva/hva-mem.h b/drivers/media/platform/sti/hva/hva-mem.h
new file mode 100644
index 0000000..a95c728
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_MEM_H
+#define HVA_MEM_H
+
+/**
+ * struct hva_buffer - hva buffer
+ *
+ * @name:  name of requester
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @size:  size of buffer
+ */
+struct hva_buffer {
+	const char		*name;
+	dma_addr_t		paddr;
+	void			*vaddr;
+	u32			size;
+};
+
+int hva_mem_alloc(struct hva_ctx *ctx,
+		  __u32 size,
+		  const char *name,
+		  struct hva_buffer **buf);
+
+void hva_mem_free(struct hva_ctx *ctx,
+		  struct hva_buffer *buf);
+
+#endif /* HVA_MEM_H */
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
new file mode 100644
index 0000000..6bf3c858
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -0,0 +1,1415 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define HVA_NAME "st-hva"
+
+#define MIN_FRAMES	1
+#define MIN_STREAMS	1
+
+#define HVA_MIN_WIDTH	32
+#define HVA_MAX_WIDTH	1920
+#define HVA_MIN_HEIGHT	32
+#define HVA_MAX_HEIGHT	1920
+
+/* HVA requires a 16x16 pixels alignment for frames */
+#define HVA_WIDTH_ALIGNMENT	16
+#define HVA_HEIGHT_ALIGNMENT	16
+
+#define HVA_DEFAULT_WIDTH	HVA_MIN_WIDTH
+#define	HVA_DEFAULT_HEIGHT	HVA_MIN_HEIGHT
+#define HVA_DEFAULT_FRAME_NUM	1
+#define HVA_DEFAULT_FRAME_DEN	30
+
+#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
+			   "frame" : "stream")
+
+#define fh_to_ctx(f)    (container_of(f, struct hva_ctx, fh))
+
+/* registry of available encoders */
+static const struct hva_enc *hva_encoders[] = {
+	&nv12h264enc,
+	&nv21h264enc,
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+	switch (fmt) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		return (w * h * 3) / 2;
+	default:
+		return 0;
+	}
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+	switch (fmt) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		return w;
+	default:
+		return 0;
+	}
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+	switch (fmt) {
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		/* multiple of 2 */
+		return 2;
+	default:
+		return 1;
+	}
+}
+
+static inline int estimated_stream_size(u32 w, u32 h)
+{
+	/*
+	 * HVA only encodes in YUV420 format, whatever the frame format.
+	 * A compression ratio of 2 is assumed: thus, the maximum size
+	 * of a stream is estimated to ((width x height x 3 / 2) / 2)
+	 */
+	return (w * h * 3) / 4;
+}
+
+static void set_default_params(struct hva_ctx *ctx)
+{
+	struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+	struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+	frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+	frameinfo->width = HVA_DEFAULT_WIDTH;
+	frameinfo->height = HVA_DEFAULT_HEIGHT;
+	frameinfo->aligned_width = ALIGN(frameinfo->width,
+					 HVA_WIDTH_ALIGNMENT);
+	frameinfo->aligned_height = ALIGN(frameinfo->height,
+					  HVA_HEIGHT_ALIGNMENT);
+	frameinfo->size = frame_size(frameinfo->aligned_width,
+				     frameinfo->aligned_height,
+				     frameinfo->pixelformat);
+
+	streaminfo->streamformat = V4L2_PIX_FMT_H264;
+	streaminfo->width = HVA_DEFAULT_WIDTH;
+	streaminfo->height = HVA_DEFAULT_HEIGHT;
+
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+	ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+	ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+	ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+	ctx->max_stream_size = estimated_stream_size(streaminfo->width,
+						     streaminfo->height);
+}
+
+static const struct hva_enc *hva_find_encoder(struct hva_ctx *ctx,
+					      u32 pixelformat,
+					      u32 streamformat)
+{
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	const struct hva_enc *enc;
+	unsigned int i;
+
+	for (i = 0; i < hva->nb_of_encoders; i++) {
+		enc = hva->encoders[i];
+		if ((enc->pixelformat == pixelformat) &&
+		    (enc->streamformat == streamformat))
+			return enc;
+	}
+
+	return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+	u32 i;
+	bool found = false;
+
+	for (i = 0; i < *nb_of_formats; i++) {
+		if (format == formats[i]) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct hva_dev *hva)
+{
+	unsigned int i;
+
+	for (i = 0; i < hva->nb_of_encoders; i++) {
+		register_format(hva->encoders[i]->pixelformat,
+				hva->pixelformats,
+				&hva->nb_of_pixelformats);
+
+		register_format(hva->encoders[i]->streamformat,
+				hva->streamformats,
+				&hva->nb_of_streamformats);
+	}
+}
+
+static void register_encoders(struct hva_dev *hva)
+{
+	struct device *dev = hva_to_dev(hva);
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hva_encoders); i++) {
+		if (hva->nb_of_encoders >= HVA_MAX_ENCODERS) {
+			dev_dbg(dev,
+				"%s failed to register %s encoder (%d maximum reached)\n",
+				HVA_PREFIX, hva_encoders[i]->name,
+				HVA_MAX_ENCODERS);
+			return;
+		}
+
+		hva->encoders[hva->nb_of_encoders++] = hva_encoders[i];
+		dev_info(dev, "%s %s encoder registered\n", HVA_PREFIX,
+			 hva_encoders[i]->name);
+	}
+}
+
+static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
+			    u32 pixelformat, struct hva_enc **penc)
+{
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	struct device *dev = ctx_to_dev(ctx);
+	struct hva_enc *enc;
+	int ret;
+
+	/* find an encoder which can deal with these formats */
+	enc = (struct hva_enc *)hva_find_encoder(ctx, pixelformat,
+						 streamformat);
+	if (!enc) {
+		dev_err(dev, "%s no encoder found matching %4.4s => %4.4s\n",
+			ctx->name, (char *)&pixelformat, (char *)&streamformat);
+		return -EINVAL;
+	}
+
+	dev_dbg(dev, "%s one encoder matching %4.4s => %4.4s\n",
+		ctx->name, (char *)&pixelformat, (char *)&streamformat);
+
+	/* update instance name */
+	snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+		 hva->instance_id, (char *)&streamformat);
+
+	/* open encoder instance */
+	ret = enc->open(ctx);
+	if (ret) {
+		dev_err(dev, "%s failed to open encoder instance (%d)\n",
+			ctx->name, ret);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s %s encoder opened\n", ctx->name, enc->name);
+
+	*penc = enc;
+
+	return ret;
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int hva_querycap(struct file *file, void *priv,
+			struct v4l2_capability *cap)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+
+	strlcpy(cap->driver, HVA_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, hva->vdev->name, sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+		 hva->pdev->name);
+
+	return 0;
+}
+
+static int hva_enum_fmt_stream(struct file *file, void *priv,
+			       struct v4l2_fmtdesc *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+
+	if (unlikely(f->index >= hva->nb_of_streamformats))
+		return -EINVAL;
+
+	f->pixelformat = hva->streamformats[f->index];
+
+	return 0;
+}
+
+static int hva_enum_fmt_frame(struct file *file, void *priv,
+			      struct v4l2_fmtdesc *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+
+	if (unlikely(f->index >= hva->nb_of_pixelformats))
+		return -EINVAL;
+
+	f->pixelformat = hva->pixelformats[f->index];
+
+	return 0;
+}
+
+static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+	f->fmt.pix.width = streaminfo->width;
+	f->fmt.pix.height = streaminfo->height;
+	f->fmt.pix.field = V4L2_FIELD_NONE;
+	f->fmt.pix.colorspace = ctx->colorspace;
+	f->fmt.pix.xfer_func = ctx->xfer_func;
+	f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+	f->fmt.pix.quantization = ctx->quantization;
+	f->fmt.pix.pixelformat = streaminfo->streamformat;
+	f->fmt.pix.bytesperline = 0;
+	f->fmt.pix.sizeimage = ctx->max_stream_size;
+
+	return 0;
+}
+
+static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+
+	f->fmt.pix.width = frameinfo->width;
+	f->fmt.pix.height = frameinfo->height;
+	f->fmt.pix.field = V4L2_FIELD_NONE;
+	f->fmt.pix.colorspace = ctx->colorspace;
+	f->fmt.pix.xfer_func = ctx->xfer_func;
+	f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+	f->fmt.pix.quantization = ctx->quantization;
+	f->fmt.pix.pixelformat = frameinfo->pixelformat;
+	f->fmt.pix.bytesperline = frame_stride(frameinfo->aligned_width,
+					       frameinfo->pixelformat);
+	f->fmt.pix.sizeimage = frameinfo->size;
+
+	return 0;
+}
+
+static int hva_try_fmt_stream(struct file *file, void *priv,
+			      struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct device *dev = ctx_to_dev(ctx);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	u32 streamformat = pix->pixelformat;
+	const struct hva_enc *enc;
+	u32 width, height;
+	u32 stream_size;
+
+	enc = hva_find_encoder(ctx, ctx->frameinfo.pixelformat, streamformat);
+	if (!enc) {
+		dev_dbg(dev,
+			"%s V4L2 TRY_FMT (CAPTURE): unsupported format %.4s\n",
+			ctx->name, (char *)&pix->pixelformat);
+		return -EINVAL;
+	}
+
+	width = pix->width;
+	height = pix->height;
+	if (ctx->flags & HVA_FLAG_FRAMEINFO) {
+		/*
+		 * if the frame resolution is already fixed, only allow the
+		 * same stream resolution
+		 */
+		pix->width = ctx->frameinfo.width;
+		pix->height = ctx->frameinfo.height;
+		if ((pix->width != width) || (pix->height != height))
+			dev_dbg(dev,
+				"%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit frame resolution\n",
+				ctx->name, width, height,
+				pix->width, pix->height);
+	} else {
+		/* adjust width & height */
+		v4l_bound_align_image(&pix->width,
+				      HVA_MIN_WIDTH, enc->max_width,
+				      0,
+				      &pix->height,
+				      HVA_MIN_HEIGHT, enc->max_height,
+				      0,
+				      0);
+
+		if ((pix->width != width) || (pix->height != height))
+			dev_dbg(dev,
+				"%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+				ctx->name, width, height,
+				pix->width, pix->height);
+	}
+
+	stream_size = estimated_stream_size(pix->width, pix->height);
+	if (pix->sizeimage < stream_size)
+		pix->sizeimage = stream_size;
+
+	pix->bytesperline = 0;
+	pix->colorspace = ctx->colorspace;
+	pix->xfer_func = ctx->xfer_func;
+	pix->ycbcr_enc = ctx->ycbcr_enc;
+	pix->quantization = ctx->quantization;
+	pix->field = V4L2_FIELD_NONE;
+
+	return 0;
+}
+
+static int hva_try_fmt_frame(struct file *file, void *priv,
+			     struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct device *dev = ctx_to_dev(ctx);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	u32 pixelformat = pix->pixelformat;
+	const struct hva_enc *enc;
+	u32 width, height;
+
+	enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat);
+	if (!enc) {
+		dev_dbg(dev,
+			"%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n",
+			ctx->name, (char *)&pixelformat);
+		return -EINVAL;
+	}
+
+	/* adjust width & height */
+	width = pix->width;
+	height = pix->height;
+	v4l_bound_align_image(&pix->width,
+			      HVA_MIN_WIDTH, HVA_MAX_WIDTH,
+			      frame_alignment(pixelformat) - 1,
+			      &pix->height,
+			      HVA_MIN_HEIGHT, HVA_MAX_HEIGHT,
+			      frame_alignment(pixelformat) - 1,
+			      0);
+
+	if ((pix->width != width) || (pix->height != height))
+		dev_dbg(dev,
+			"%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+			ctx->name, width, height, pix->width, pix->height);
+
+	width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+	height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT);
+
+	if (!pix->colorspace) {
+		pix->colorspace = V4L2_COLORSPACE_REC709;
+		pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+		pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+		pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+	}
+
+	pix->bytesperline = frame_stride(width, pixelformat);
+	pix->sizeimage = frame_size(width, height, pixelformat);
+	pix->field = V4L2_FIELD_NONE;
+
+	return 0;
+}
+
+static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct device *dev = ctx_to_dev(ctx);
+	struct vb2_queue *vq;
+	int ret;
+
+	ret = hva_try_fmt_stream(file, fh, f);
+	if (ret) {
+		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
+			ctx->name, (char *)&f->fmt.pix.pixelformat);
+		return ret;
+	}
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+	if (vb2_is_streaming(vq)) {
+		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+			ctx->name);
+		return -EBUSY;
+	}
+
+	ctx->max_stream_size = f->fmt.pix.sizeimage;
+	ctx->streaminfo.width = f->fmt.pix.width;
+	ctx->streaminfo.height = f->fmt.pix.height;
+	ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
+	ctx->flags |= HVA_FLAG_STREAMINFO;
+
+	return 0;
+}
+
+static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct device *dev = ctx_to_dev(ctx);
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct vb2_queue *vq;
+	int ret;
+
+	ret = hva_try_fmt_frame(file, fh, f);
+	if (ret) {
+		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
+			ctx->name, (char *)&pix->pixelformat);
+		return ret;
+	}
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+	if (vb2_is_streaming(vq)) {
+		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
+		return -EBUSY;
+	}
+
+	ctx->colorspace = pix->colorspace;
+	ctx->xfer_func = pix->xfer_func;
+	ctx->ycbcr_enc = pix->ycbcr_enc;
+	ctx->quantization = pix->quantization;
+
+	ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+	ctx->frameinfo.aligned_height = ALIGN(pix->height,
+					      HVA_HEIGHT_ALIGNMENT);
+	ctx->frameinfo.size = pix->sizeimage;
+	ctx->frameinfo.pixelformat = pix->pixelformat;
+	ctx->frameinfo.width = pix->width;
+	ctx->frameinfo.height = pix->height;
+	ctx->flags |= HVA_FLAG_FRAMEINFO;
+
+	return 0;
+}
+
+static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+	if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+	sp->parm.output.timeperframe.numerator = time_per_frame->numerator;
+	sp->parm.output.timeperframe.denominator =
+		time_per_frame->denominator;
+
+	return 0;
+}
+
+static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+	if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	if (!sp->parm.output.timeperframe.numerator ||
+	    !sp->parm.output.timeperframe.denominator)
+		return hva_g_parm(file, fh, sp);
+
+	sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+	time_per_frame->numerator = sp->parm.output.timeperframe.numerator;
+	time_per_frame->denominator =
+		sp->parm.output.timeperframe.denominator;
+
+	return 0;
+}
+
+static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct device *dev = ctx_to_dev(ctx);
+
+	if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		/*
+		 * depending on the targeted compressed video format, the
+		 * capture buffer might contain headers (e.g. H.264 SPS/PPS)
+		 * filled in by the driver client; the size of these data is
+		 * copied from the bytesused field of the V4L2 buffer in the
+		 * payload field of the hva stream buffer
+		 */
+		struct vb2_queue *vq;
+		struct hva_stream *stream;
+
+		vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
+
+		if (buf->index >= vq->num_buffers) {
+			dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
+				ctx->name, buf->index, vq->num_buffers);
+			return -EINVAL;
+		}
+
+		stream = (struct hva_stream *)vq->bufs[buf->index];
+		stream->bytesused = buf->bytesused;
+	}
+
+	return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+/* V4L2 ioctl ops */
+static const struct v4l2_ioctl_ops hva_ioctl_ops = {
+	.vidioc_querycap		= hva_querycap,
+	.vidioc_enum_fmt_vid_cap	= hva_enum_fmt_stream,
+	.vidioc_enum_fmt_vid_out	= hva_enum_fmt_frame,
+	.vidioc_g_fmt_vid_cap		= hva_g_fmt_stream,
+	.vidioc_g_fmt_vid_out		= hva_g_fmt_frame,
+	.vidioc_try_fmt_vid_cap		= hva_try_fmt_stream,
+	.vidioc_try_fmt_vid_out		= hva_try_fmt_frame,
+	.vidioc_s_fmt_vid_cap		= hva_s_fmt_stream,
+	.vidioc_s_fmt_vid_out		= hva_s_fmt_frame,
+	.vidioc_g_parm			= hva_g_parm,
+	.vidioc_s_parm			= hva_s_parm,
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_create_bufs             = v4l2_m2m_ioctl_create_bufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
+	.vidioc_qbuf			= hva_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 control operations
+ */
+
+static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct hva_ctx *ctx = container_of(ctrl->handler, struct hva_ctx,
+					   ctrl_handler);
+	struct device *dev = ctx_to_dev(ctx);
+
+	dev_dbg(dev, "%s S_CTRL: id = %d, val = %d\n", ctx->name,
+		ctrl->id, ctrl->val);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+		ctx->ctrls.bitrate_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+		ctx->ctrls.gop_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_BITRATE:
+		ctx->ctrls.bitrate = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_ASPECT:
+		ctx->ctrls.aspect = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+		ctx->ctrls.profile = ctrl->val;
+		if (ctx->flags & HVA_FLAG_STREAMINFO)
+			snprintf(ctx->streaminfo.profile,
+				 sizeof(ctx->streaminfo.profile),
+				 "%s profile",
+				 v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		ctx->ctrls.level = ctrl->val;
+		if (ctx->flags & HVA_FLAG_STREAMINFO)
+			snprintf(ctx->streaminfo.level,
+				 sizeof(ctx->streaminfo.level),
+				 "level %s",
+				 v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		ctx->ctrls.entropy_mode = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+		ctx->ctrls.cpb_size = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+		ctx->ctrls.dct8x8 = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+		ctx->ctrls.qpmin = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+		ctx->ctrls.qpmax = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+		ctx->ctrls.vui_sar = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+		ctx->ctrls.vui_sar_idc = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING:
+		ctx->ctrls.sei_fp = ctrl->val;
+		break;
+	case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+		ctx->ctrls.sei_fp_type = ctrl->val;
+		break;
+	default:
+		dev_dbg(dev, "%s S_CTRL: invalid control (id = %d)\n",
+			ctx->name, ctrl->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* V4L2 control ops */
+static const struct v4l2_ctrl_ops hva_ctrl_ops = {
+	.s_ctrl = hva_s_ctrl,
+};
+
+static int hva_ctrls_setup(struct hva_ctx *ctx)
+{
+	struct device *dev = ctx_to_dev(ctx);
+	u64 mask;
+	enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type =
+		V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, 15);
+
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+			       V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+			       0,
+			       V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+			  1, 60, 1, 16);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_BITRATE,
+			  1000, 60000000, 1000, 20000000);
+
+	mask = ~(1 << V4L2_MPEG_VIDEO_ASPECT_1x1);
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_ASPECT,
+			       V4L2_MPEG_VIDEO_ASPECT_1x1,
+			       mask,
+			       V4L2_MPEG_VIDEO_ASPECT_1x1);
+
+	mask = ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+		 (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH));
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+			       V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH,
+			       mask,
+			       V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+			       V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+			       0,
+			       V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+			       V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+			       0,
+			       V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+			  1, 10000, 1, 3000);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+			  0, 1, 1, 0);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+			  0, 51, 1, 5);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+			  0, 51, 1, 51);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+			  0, 1, 1, 1);
+
+	mask = ~(1 << V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+			       V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1,
+			       mask,
+			       V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+
+	v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+			  V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING,
+			  0, 1, 1, 0);
+
+	mask = ~(1 << sei_fp_type);
+	v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+			       V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE,
+			       sei_fp_type,
+			       mask,
+			       sei_fp_type);
+
+	if (ctx->ctrl_handler.error) {
+		int err = ctx->ctrl_handler.error;
+
+		dev_dbg(dev, "%s controls setup failed (%d)\n",
+			ctx->name, err);
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		return err;
+	}
+
+	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+	/* set default time per frame */
+	ctx->ctrls.time_per_frame.numerator = HVA_DEFAULT_FRAME_NUM;
+	ctx->ctrls.time_per_frame.denominator = HVA_DEFAULT_FRAME_DEN;
+
+	return 0;
+}
+
+/*
+ * mem-to-mem operations
+ */
+
+static void hva_run_work(struct work_struct *work)
+{
+	struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
+	const struct hva_enc *enc = ctx->enc;
+	struct hva_frame *frame;
+	struct hva_stream *stream;
+	int ret;
+
+	/* protect instance against reentrancy */
+	mutex_lock(&ctx->lock);
+
+	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+	frame = to_hva_frame(src_buf);
+	stream = to_hva_stream(dst_buf);
+	frame->vbuf.sequence = ctx->frame_num++;
+
+	ret = enc->encode(ctx, frame, stream);
+
+	vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
+	if (ret) {
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+	} else {
+		/* propagate frame timestamp */
+		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+		dst_buf->field = V4L2_FIELD_NONE;
+		dst_buf->sequence = ctx->stream_num - 1;
+
+		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+	}
+
+	mutex_unlock(&ctx->lock);
+
+	v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void hva_device_run(void *priv)
+{
+	struct hva_ctx *ctx = priv;
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+
+	queue_work(hva->work_queue, &ctx->run_work);
+}
+
+static void hva_job_abort(void *priv)
+{
+	struct hva_ctx *ctx = priv;
+	struct device *dev = ctx_to_dev(ctx);
+
+	dev_dbg(dev, "%s aborting job\n", ctx->name);
+
+	ctx->aborting = true;
+}
+
+static int hva_job_ready(void *priv)
+{
+	struct hva_ctx *ctx = priv;
+	struct device *dev = ctx_to_dev(ctx);
+
+	if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+		dev_dbg(dev, "%s job not ready: no frame buffers\n",
+			ctx->name);
+		return 0;
+	}
+
+	if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+		dev_dbg(dev, "%s job not ready: no stream buffers\n",
+			ctx->name);
+		return 0;
+	}
+
+	if (ctx->aborting) {
+		dev_dbg(dev, "%s job not ready: aborting\n", ctx->name);
+		return 0;
+	}
+
+	return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops hva_m2m_ops = {
+	.device_run	= hva_device_run,
+	.job_abort	= hva_job_abort,
+	.job_ready	= hva_job_ready,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int hva_queue_setup(struct vb2_queue *vq,
+			   unsigned int *num_buffers, unsigned int *num_planes,
+			   unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+	struct device *dev = ctx_to_dev(ctx);
+	unsigned int size;
+
+	dev_dbg(dev, "%s %s queue setup: num_buffers %d\n", ctx->name,
+		to_type_str(vq->type), *num_buffers);
+
+	size = vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
+		ctx->frameinfo.size : ctx->max_stream_size;
+
+	if (*num_planes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	/* only one plane supported */
+	*num_planes = 1;
+	sizes[0] = size;
+
+	return 0;
+}
+
+static int hva_buf_prepare(struct vb2_buffer *vb)
+{
+	struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct device *dev = ctx_to_dev(ctx);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		struct hva_frame *frame = to_hva_frame(vbuf);
+
+		if (vbuf->field == V4L2_FIELD_ANY)
+			vbuf->field = V4L2_FIELD_NONE;
+		if (vbuf->field != V4L2_FIELD_NONE) {
+			dev_dbg(dev,
+				"%s frame[%d] prepare: %d field not supported\n",
+				ctx->name, vb->index, vbuf->field);
+			return -EINVAL;
+		}
+
+		if (!frame->prepared) {
+			/* get memory addresses */
+			frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+			frame->paddr = vb2_dma_contig_plane_dma_addr(
+					&vbuf->vb2_buf, 0);
+			frame->info = ctx->frameinfo;
+			frame->prepared = true;
+
+			dev_dbg(dev,
+				"%s frame[%d] prepared; virt=%p, phy=%pad\n",
+				ctx->name, vb->index,
+				frame->vaddr, &frame->paddr);
+		}
+	} else {
+		struct hva_stream *stream = to_hva_stream(vbuf);
+
+		if (!stream->prepared) {
+			/* get memory addresses */
+			stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+			stream->paddr = vb2_dma_contig_plane_dma_addr(
+					&vbuf->vb2_buf, 0);
+			stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
+			stream->prepared = true;
+
+			dev_dbg(dev,
+				"%s stream[%d] prepared; virt=%p, phy=%pad\n",
+				ctx->name, vb->index,
+				stream->vaddr, &stream->paddr);
+		}
+	}
+
+	return 0;
+}
+
+static void hva_buf_queue(struct vb2_buffer *vb)
+{
+	struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+	if (ctx->fh.m2m_ctx)
+		v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	struct device *dev = ctx_to_dev(ctx);
+	struct vb2_v4l2_buffer *vbuf;
+	int ret;
+	unsigned int i;
+	bool found = false;
+
+	dev_dbg(dev, "%s %s start streaming\n", ctx->name,
+		to_type_str(vq->type));
+
+	/* open encoder when both start_streaming have been called */
+	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
+			return 0;
+	} else {
+		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
+			return 0;
+	}
+
+	/* store the instance context in the instances array */
+	for (i = 0; i < HVA_MAX_INSTANCES; i++) {
+		if (!hva->instances[i]) {
+			hva->instances[i] = ctx;
+			/* save the context identifier in the context */
+			ctx->id = i;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		dev_err(dev, "%s maximum instances reached\n", ctx->name);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	hva->nb_of_instances++;
+
+	if (!ctx->enc) {
+		ret = hva_open_encoder(ctx,
+				       ctx->streaminfo.streamformat,
+				       ctx->frameinfo.pixelformat,
+				       &ctx->enc);
+		if (ret < 0)
+			goto err_ctx;
+	}
+
+	return 0;
+
+err_ctx:
+	hva->instances[ctx->id] = NULL;
+	hva->nb_of_instances--;
+err:
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		/* return of all pending buffers to vb2 (in queued state) */
+		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+	} else {
+		/* return of all pending buffers to vb2 (in queued state) */
+		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+	}
+
+	return ret;
+}
+
+static void hva_stop_streaming(struct vb2_queue *vq)
+{
+	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	struct device *dev = ctx_to_dev(ctx);
+	const struct hva_enc *enc = ctx->enc;
+	struct vb2_v4l2_buffer *vbuf;
+
+	dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
+		to_type_str(vq->type));
+
+	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		/* return of all pending buffers to vb2 (in error state) */
+		ctx->frame_num = 0;
+		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+	} else {
+		/* return of all pending buffers to vb2 (in error state) */
+		ctx->stream_num = 0;
+		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+	}
+
+	if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
+	     vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
+	    (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+	     vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
+		dev_dbg(dev, "%s %s out=%d cap=%d\n",
+			ctx->name, to_type_str(vq->type),
+			vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
+			vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
+		return;
+	}
+
+	/* close encoder when both stop_streaming have been called */
+	if (enc) {
+		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+		enc->close(ctx);
+		ctx->enc = NULL;
+
+		/* clear instance context in instances array */
+		hva->instances[ctx->id] = NULL;
+		hva->nb_of_instances--;
+	}
+
+	ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops hva_qops = {
+	.queue_setup		= hva_queue_setup,
+	.buf_prepare		= hva_buf_prepare,
+	.buf_queue		= hva_buf_queue,
+	.start_streaming	= hva_start_streaming,
+	.stop_streaming		= hva_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(struct hva_ctx *ctx, struct vb2_queue *vq)
+{
+	vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	vq->drv_priv = ctx;
+	vq->ops = &hva_qops;
+	vq->mem_ops = &vb2_dma_contig_memops;
+	vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	vq->lock = &ctx->hva_dev->lock;
+
+	return vb2_queue_init(vq);
+}
+
+static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
+			  struct vb2_queue *dst_vq)
+{
+	struct hva_ctx *ctx = priv;
+	int ret;
+
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->buf_struct_size = sizeof(struct hva_frame);
+	src_vq->min_buffers_needed = MIN_FRAMES;
+	src_vq->dev = ctx->hva_dev->dev;
+
+	ret = queue_init(ctx, src_vq);
+	if (ret)
+		return ret;
+
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->buf_struct_size = sizeof(struct hva_stream);
+	dst_vq->min_buffers_needed = MIN_STREAMS;
+	dst_vq->dev = ctx->hva_dev->dev;
+
+	return queue_init(ctx, dst_vq);
+}
+
+static int hva_open(struct file *file)
+{
+	struct hva_dev *hva = video_drvdata(file);
+	struct device *dev = hva_to_dev(hva);
+	struct hva_ctx *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ctx->hva_dev = hva;
+
+	INIT_WORK(&ctx->run_work, hva_run_work);
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+
+	ret = hva_ctrls_setup(ctx);
+	if (ret) {
+		dev_err(dev, "%s [x:x] failed to setup controls\n",
+			HVA_PREFIX);
+		goto err_fh;
+	}
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+	mutex_init(&ctx->lock);
+
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(hva->m2m_dev, ctx,
+					    &hva_queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
+		dev_err(dev, "%s failed to initialize m2m context (%d)\n",
+			HVA_PREFIX, ret);
+		goto err_ctrls;
+	}
+
+	/* set the instance name */
+	mutex_lock(&hva->lock);
+	hva->instance_id++;
+	snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+		 hva->instance_id);
+	mutex_unlock(&hva->lock);
+
+	/* default parameters for frame and stream */
+	set_default_params(ctx);
+
+	dev_info(dev, "%s encoder instance created\n", ctx->name);
+
+	return 0;
+
+err_ctrls:
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+err_fh:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+out:
+	return ret;
+}
+
+static int hva_release(struct file *file)
+{
+	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+	struct hva_dev *hva = ctx_to_hdev(ctx);
+	struct device *dev = ctx_to_dev(ctx);
+	const struct hva_enc *enc = ctx->enc;
+
+	if (enc) {
+		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+		enc->close(ctx);
+		ctx->enc = NULL;
+
+		/* clear instance context in instances array */
+		hva->instances[ctx->id] = NULL;
+		hva->nb_of_instances--;
+	}
+
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+
+	dev_info(dev, "%s encoder instance released\n", ctx->name);
+
+	kfree(ctx);
+
+	return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations hva_fops = {
+	.owner			= THIS_MODULE,
+	.open			= hva_open,
+	.release		= hva_release,
+	.unlocked_ioctl		= video_ioctl2,
+	.mmap			= v4l2_m2m_fop_mmap,
+	.poll			= v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int hva_register_device(struct hva_dev *hva)
+{
+	int ret;
+	struct video_device *vdev;
+	struct device *dev;
+
+	if (!hva)
+		return -ENODEV;
+	dev = hva_to_dev(hva);
+
+	hva->m2m_dev = v4l2_m2m_init(&hva_m2m_ops);
+	if (IS_ERR(hva->m2m_dev)) {
+		dev_err(dev, "%s failed to initialize v4l2-m2m device\n",
+			HVA_PREFIX);
+		ret = PTR_ERR(hva->m2m_dev);
+		goto err;
+	}
+
+	vdev = video_device_alloc();
+	if (!vdev) {
+		dev_err(dev, "%s failed to allocate video device\n",
+			HVA_PREFIX);
+		ret = -ENOMEM;
+		goto err_m2m_release;
+	}
+
+	vdev->fops = &hva_fops;
+	vdev->ioctl_ops = &hva_ioctl_ops;
+	vdev->release = video_device_release;
+	vdev->lock = &hva->lock;
+	vdev->vfl_dir = VFL_DIR_M2M;
+	vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+	vdev->v4l2_dev = &hva->v4l2_dev;
+	snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
+		 hva->ip_version);
+
+	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		dev_err(dev, "%s failed to register video device\n",
+			HVA_PREFIX);
+		goto err_vdev_release;
+	}
+
+	hva->vdev = vdev;
+	video_set_drvdata(vdev, hva);
+	return 0;
+
+err_vdev_release:
+	video_device_release(vdev);
+err_m2m_release:
+	v4l2_m2m_release(hva->m2m_dev);
+err:
+	return ret;
+}
+
+static void hva_unregister_device(struct hva_dev *hva)
+{
+	if (!hva)
+		return;
+
+	if (hva->m2m_dev)
+		v4l2_m2m_release(hva->m2m_dev);
+
+	video_unregister_device(hva->vdev);
+}
+
+static int hva_probe(struct platform_device *pdev)
+{
+	struct hva_dev *hva;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	hva = devm_kzalloc(dev, sizeof(*hva), GFP_KERNEL);
+	if (!hva) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	hva->dev = dev;
+	hva->pdev = pdev;
+	platform_set_drvdata(pdev, hva);
+
+	mutex_init(&hva->lock);
+
+	/* probe hardware */
+	ret = hva_hw_probe(pdev, hva);
+	if (ret)
+		goto err;
+
+	/* register all available encoders */
+	register_encoders(hva);
+
+	/* register all supported formats */
+	register_formats(hva);
+
+	/* register on V4L2 */
+	ret = v4l2_device_register(dev, &hva->v4l2_dev);
+	if (ret) {
+		dev_err(dev, "%s %s failed to register V4L2 device\n",
+			HVA_PREFIX, HVA_NAME);
+		goto err_hw;
+	}
+
+	hva->work_queue = create_workqueue(HVA_NAME);
+	if (!hva->work_queue) {
+		dev_err(dev, "%s %s failed to allocate work queue\n",
+			HVA_PREFIX, HVA_NAME);
+		ret = -ENOMEM;
+		goto err_v4l2;
+	}
+
+	/* register device */
+	ret = hva_register_device(hva);
+	if (ret)
+		goto err_work_queue;
+
+	dev_info(dev, "%s %s registered as /dev/video%d\n", HVA_PREFIX,
+		 HVA_NAME, hva->vdev->num);
+
+	return 0;
+
+err_work_queue:
+	destroy_workqueue(hva->work_queue);
+err_v4l2:
+	v4l2_device_unregister(&hva->v4l2_dev);
+err_hw:
+	hva_hw_remove(hva);
+err:
+	return ret;
+}
+
+static int hva_remove(struct platform_device *pdev)
+{
+	struct hva_dev *hva = platform_get_drvdata(pdev);
+	struct device *dev = hva_to_dev(hva);
+
+	hva_unregister_device(hva);
+
+	destroy_workqueue(hva->work_queue);
+
+	hva_hw_remove(hva);
+
+	v4l2_device_unregister(&hva->v4l2_dev);
+
+	dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);
+
+	return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops hva_pm_ops = {
+	.runtime_suspend	= hva_hw_runtime_suspend,
+	.runtime_resume		= hva_hw_runtime_resume,
+};
+
+static const struct of_device_id hva_match_types[] = {
+	{
+	 .compatible = "st,st-hva",
+	},
+	{ /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, hva_match_types);
+
+static struct platform_driver hva_driver = {
+	.probe  = hva_probe,
+	.remove = hva_remove,
+	.driver = {
+		.name		= HVA_NAME,
+		.of_match_table	= hva_match_types,
+		.pm		= &hva_pm_ops,
+		},
+};
+
+module_platform_driver(hva_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics HVA video encoder V4L2 driver");
diff --git a/drivers/media/platform/sti/hva/hva.h b/drivers/media/platform/sti/hva/hva.h
new file mode 100644
index 0000000..caa5808
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ *          Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_H
+#define HVA_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+
+#define fh_to_ctx(f)    (container_of(f, struct hva_ctx, fh))
+
+#define hva_to_dev(h)   (h->dev)
+
+#define ctx_to_dev(c)   (c->hva_dev->dev)
+
+#define ctx_to_hdev(c)  (c->hva_dev)
+
+#define HVA_PREFIX "[---:----]"
+
+extern const struct hva_enc nv12h264enc;
+extern const struct hva_enc nv21h264enc;
+
+/**
+ * struct hva_frameinfo - information about hva frame
+ *
+ * @pixelformat:    fourcc code for uncompressed video format
+ * @width:          width of frame
+ * @height:         height of frame
+ * @aligned_width:  width of frame (with encoder alignment constraint)
+ * @aligned_height: height of frame (with encoder alignment constraint)
+ * @size:           maximum size in bytes required for data
+*/
+struct hva_frameinfo {
+	u32	pixelformat;
+	u32	width;
+	u32	height;
+	u32	aligned_width;
+	u32	aligned_height;
+	u32	size;
+};
+
+/**
+ * struct hva_streaminfo - information about hva stream
+ *
+ * @streamformat: fourcc code of compressed video format (H.264...)
+ * @width:        width of stream
+ * @height:       height of stream
+ * @profile:      profile string
+ * @level:        level string
+ */
+struct hva_streaminfo {
+	u32	streamformat;
+	u32	width;
+	u32	height;
+	u8	profile[32];
+	u8	level[32];
+};
+
+/**
+ * struct hva_controls - hva controls set
+ *
+ * @time_per_frame: time per frame in seconds
+ * @bitrate_mode:   bitrate mode (constant bitrate or variable bitrate)
+ * @gop_size:       groupe of picture size
+ * @bitrate:        bitrate (in bps)
+ * @aspect:         video aspect
+ * @profile:        H.264 profile
+ * @level:          H.264 level
+ * @entropy_mode:   H.264 entropy mode (CABAC or CVLC)
+ * @cpb_size:       coded picture buffer size (in kB)
+ * @dct8x8:         transform mode 8x8 enable
+ * @qpmin:          minimum quantizer
+ * @qpmax:          maximum quantizer
+ * @vui_sar:        pixel aspect ratio enable
+ * @vui_sar_idc:    pixel aspect ratio identifier
+ * @sei_fp:         sei frame packing arrangement enable
+ * @sei_fp_type:    sei frame packing arrangement type
+ */
+struct hva_controls {
+	struct v4l2_fract					time_per_frame;
+	enum v4l2_mpeg_video_bitrate_mode			bitrate_mode;
+	u32							gop_size;
+	u32							bitrate;
+	enum v4l2_mpeg_video_aspect				aspect;
+	enum v4l2_mpeg_video_h264_profile			profile;
+	enum v4l2_mpeg_video_h264_level				level;
+	enum v4l2_mpeg_video_h264_entropy_mode			entropy_mode;
+	u32							cpb_size;
+	bool							dct8x8;
+	u32							qpmin;
+	u32							qpmax;
+	bool							vui_sar;
+	enum v4l2_mpeg_video_h264_vui_sar_idc			vui_sar_idc;
+	bool							sei_fp;
+	enum v4l2_mpeg_video_h264_sei_fp_arrangement_type	sei_fp_type;
+};
+
+/**
+ * struct hva_frame - hva frame buffer (output)
+ *
+ * @vbuf:     video buffer information for V4L2
+ * @list:     V4L2 m2m list that the frame belongs to
+ * @info:     frame information (width, height, format, alignment...)
+ * @paddr:    physical address (for hardware)
+ * @vaddr:    virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ */
+struct hva_frame {
+	struct vb2_v4l2_buffer	vbuf;
+	struct list_head	list;
+	struct hva_frameinfo	info;
+	dma_addr_t		paddr;
+	void			*vaddr;
+	bool			prepared;
+};
+
+/*
+ * to_hva_frame() - cast struct vb2_v4l2_buffer * to struct hva_frame *
+ */
+#define to_hva_frame(vb) \
+	container_of(vb, struct hva_frame, vbuf)
+
+/**
+ * struct hva_stream - hva stream buffer (capture)
+ *
+ * @v4l2:       video buffer information for V4L2
+ * @list:       V4L2 m2m list that the frame belongs to
+ * @paddr:      physical address (for hardware)
+ * @vaddr:      virtual address (kernel can read/write)
+ * @prepared:   true if vaddr/paddr are resolved
+ * @size:       size of the buffer in bytes
+ * @bytesused:  number of bytes occupied by data in the buffer
+ */
+struct hva_stream {
+	struct vb2_v4l2_buffer	vbuf;
+	struct list_head	list;
+	dma_addr_t		paddr;
+	void			*vaddr;
+	bool			prepared;
+	unsigned int		size;
+	unsigned int		bytesused;
+};
+
+/*
+ * to_hva_stream() - cast struct vb2_v4l2_buffer * to struct hva_stream *
+ */
+#define to_hva_stream(vb) \
+	container_of(vb, struct hva_stream, vbuf)
+
+struct hva_dev;
+struct hva_enc;
+
+/**
+ * struct hva_ctx - context of hva instance
+ *
+ * @hva_dev:         the device that this instance is associated with
+ * @fh:              V4L2 file handle
+ * @ctrl_handler:    V4L2 controls handler
+ * @ctrls:           hva controls set
+ * @id:              instance identifier
+ * @aborting:        true if current job aborted
+ * @name:            instance name (debug purpose)
+ * @run_work:        encode work
+ * @lock:            mutex used to lock access of this context
+ * @flags:           validity of streaminfo and frameinfo fields
+ * @frame_num:       frame number
+ * @stream_num:      stream number
+ * @max_stream_size: maximum size in bytes required for stream data
+ * @colorspace:      colorspace identifier
+ * @xfer_func:       transfer function identifier
+ * @ycbcr_enc:       Y'CbCr encoding identifier
+ * @quantization:    quantization identifier
+ * @streaminfo:      stream properties
+ * @frameinfo:       frame properties
+ * @enc:             current encoder
+ * @priv:            private codec data for this instance, allocated
+ *                   by encoder @open time
+ * @hw_err:          true if hardware error detected
+ */
+struct hva_ctx {
+	struct hva_dev		        *hva_dev;
+	struct v4l2_fh			fh;
+	struct v4l2_ctrl_handler	ctrl_handler;
+	struct hva_controls		ctrls;
+	u8				id;
+	bool				aborting;
+	char				name[100];
+	struct work_struct		run_work;
+	/* mutex protecting this data structure */
+	struct mutex			lock;
+	u32				flags;
+	u32				frame_num;
+	u32				stream_num;
+	u32				max_stream_size;
+	enum v4l2_colorspace		colorspace;
+	enum v4l2_xfer_func		xfer_func;
+	enum v4l2_ycbcr_encoding	ycbcr_enc;
+	enum v4l2_quantization		quantization;
+	struct hva_streaminfo		streaminfo;
+	struct hva_frameinfo		frameinfo;
+	struct hva_enc			*enc;
+	void				*priv;
+	bool				hw_err;
+};
+
+#define HVA_FLAG_STREAMINFO	0x0001
+#define HVA_FLAG_FRAMEINFO	0x0002
+
+#define HVA_MAX_INSTANCES	16
+#define HVA_MAX_ENCODERS	10
+#define HVA_MAX_FORMATS		HVA_MAX_ENCODERS
+
+/**
+ * struct hva_dev - abstraction for hva entity
+ *
+ * @v4l2_dev:            V4L2 device
+ * @vdev:                video device
+ * @pdev:                platform device
+ * @dev:                 device
+ * @lock:                mutex used for critical sections & V4L2 ops
+ *                       serialization
+ * @m2m_dev:             memory-to-memory V4L2 device information
+ * @instances:           opened instances
+ * @nb_of_instances:     number of opened instances
+ * @instance_id:         rolling counter identifying an instance (debug purpose)
+ * @regs:                register io memory access
+ * @esram_addr:          esram address
+ * @esram_size:          esram size
+ * @clk:                 hva clock
+ * @irq_its:             status interruption
+ * @irq_err:             error interruption
+ * @work_queue:          work queue to handle the encode jobs
+ * @protect_mutex:       mutex used to lock access of hardware
+ * @interrupt:           completion interrupt
+ * @ip_version:          IP hardware version
+ * @encoders:            registered encoders
+ * @nb_of_encoders:      number of registered encoders
+ * @pixelformats:        supported uncompressed video formats
+ * @nb_of_pixelformats:  number of supported umcompressed video formats
+ * @streamformats:       supported compressed video formats
+ * @nb_of_streamformats: number of supported compressed video formats
+ * @sfl_reg:             status fifo level register value
+ * @sts_reg:             status register value
+ * @lmi_err_reg:         local memory interface error register value
+ * @emi_err_reg:         external memory interface error register value
+ * @hec_mif_err_reg:     HEC memory interface error register value
+ */
+struct hva_dev {
+	struct v4l2_device	v4l2_dev;
+	struct video_device	*vdev;
+	struct platform_device	*pdev;
+	struct device		*dev;
+	/* mutex protecting vb2_queue structure */
+	struct mutex		lock;
+	struct v4l2_m2m_dev	*m2m_dev;
+	struct hva_ctx		*instances[HVA_MAX_INSTANCES];
+	unsigned int		nb_of_instances;
+	unsigned int		instance_id;
+	void __iomem		*regs;
+	u32			esram_addr;
+	u32			esram_size;
+	struct clk		*clk;
+	int			irq_its;
+	int			irq_err;
+	struct workqueue_struct *work_queue;
+	/* mutex protecting hardware access */
+	struct mutex		protect_mutex;
+	struct completion	interrupt;
+	unsigned long int	ip_version;
+	const struct hva_enc	*encoders[HVA_MAX_ENCODERS];
+	u32			nb_of_encoders;
+	u32			pixelformats[HVA_MAX_FORMATS];
+	u32			nb_of_pixelformats;
+	u32			streamformats[HVA_MAX_FORMATS];
+	u32			nb_of_streamformats;
+	u32			sfl_reg;
+	u32			sts_reg;
+	u32			lmi_err_reg;
+	u32			emi_err_reg;
+	u32			hec_mif_err_reg;
+};
+
+/**
+ * struct hva_enc - hva encoder
+ *
+ * @name:         encoder name
+ * @streamformat: fourcc code for compressed video format (H.264...)
+ * @pixelformat:  fourcc code for uncompressed video format
+ * @max_width:    maximum width of frame for this encoder
+ * @max_height:   maximum height of frame for this encoder
+ * @open:         open encoder
+ * @close:        close encoder
+ * @encode:       encode a frame (struct hva_frame) in a stream
+ *                (struct hva_stream)
+ */
+
+struct hva_enc {
+	const char	*name;
+	u32		streamformat;
+	u32		pixelformat;
+	u32		max_width;
+	u32		max_height;
+	int		(*open)(struct hva_ctx *ctx);
+	int		(*close)(struct hva_ctx *ctx);
+	int		(*encode)(struct hva_ctx *ctx, struct hva_frame *frame,
+				  struct hva_stream *stream);
+};
+
+#endif /* HVA_H */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index e967fcf..44323cb 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1379,7 +1379,7 @@
 	cal_runtime_put(ctx->dev);
 }
 
-static struct vb2_ops cal_video_qops = {
+static const struct vb2_ops cal_video_qops = {
 	.queue_setup		= cal_queue_setup,
 	.buf_prepare		= cal_buffer_prepare,
 	.buf_queue		= cal_buffer_queue,
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 55a1458..0189f7f 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1878,7 +1878,7 @@
 	vpdma_dump_regs(ctx->dev->vpdma);
 }
 
-static struct vb2_ops vpe_qops = {
+static const struct vb2_ops vpe_qops = {
 	.queue_setup	 = vpe_queue_setup,
 	.buf_prepare	 = vpe_buf_prepare,
 	.buf_queue	 = vpe_buf_queue,
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index cd0ff4a..a98f679 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -815,7 +815,7 @@
 	}
 }
 
-static struct vb2_ops vim2m_qops = {
+static const struct vb2_ops vim2m_qops = {
 	.queue_setup	 = vim2m_queue_setup,
 	.buf_prepare	 = vim2m_buf_prepare,
 	.buf_queue	 = vim2m_buf_queue,
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 7f93713..5464fef 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -163,38 +163,38 @@
 
 static const u8 vivid_hdmi_edid[256] = {
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-	0x63, 0x3a, 0xaa, 0x55, 0x00, 0x00, 0x00, 0x00,
-	0x0a, 0x18, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
-	0x0e, 0x00, 0xb2, 0xa0, 0x57, 0x49, 0x9b, 0x26,
-	0x10, 0x48, 0x4f, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+	0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
+	0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+	0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+	0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
 	0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
-	0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x02, 0x3a,
-	0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
-	0x46, 0x00, 0x10, 0x09, 0x00, 0x00, 0x00, 0x1e,
+	0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x08, 0xe8,
+	0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+	0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
 	0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
-	0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
-	0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00,  'v',
-	'4',   'l',  '2',  '-',  'h',  'd',  'm',  'i',
-	0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x10,
+	0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x76,
+	0x69, 0x76, 0x69, 0x64, 0x0a, 0x20, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10,
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
 
-	0x02, 0x03, 0x1a, 0xc0, 0x48, 0xa2, 0x10, 0x04,
-	0x02, 0x01, 0x21, 0x14, 0x13, 0x23, 0x09, 0x07,
-	0x07, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0xe2,
-	0x00, 0x2a, 0x01, 0x1d, 0x00, 0x80, 0x51, 0xd0,
-	0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a,
-	0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
+	0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
+	0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
+	0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
+	0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
+	0x0c, 0x00, 0x10, 0x00, 0x00, 0x78, 0x21, 0x00,
+	0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
+	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
+	0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
+	0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
+	0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
+	0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38, 0x1f,
+	0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32,
+	0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
+	0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
+	0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
 };
 
 static int vidioc_querycap(struct file *file, void  *priv,
@@ -839,6 +839,7 @@
 		dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
 				     V4L2_CAP_READWRITE;
 
+	ret = -ENOMEM;
 	/* initialize the test pattern generator */
 	tpg_init(&dev->tpg, 640, 360);
 	if (tpg_alloc(&dev->tpg, MAX_ZOOM * MAX_WIDTH))
@@ -1033,8 +1034,10 @@
 	 */
 	dev->cec_workqueue =
 		alloc_ordered_workqueue("vivid-%03d-cec", WQ_MEM_RECLAIM, inst);
-	if (!dev->cec_workqueue)
+	if (!dev->cec_workqueue) {
+		ret = -ENOMEM;
 		goto unreg_dev;
+	}
 
 	/* start creating the vb2 queues */
 	if (dev->has_vid_cap) {
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index b98089c..aceb38d 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -761,7 +761,7 @@
 	"Rec. 709",
 	"xvYCC 601",
 	"xvYCC 709",
-	"sYCC",
+	"",
 	"BT.2020",
 	"BT.2020 Constant Luminance",
 	"SMPTE 240M",
@@ -773,6 +773,7 @@
 	.id = VIVID_CID_YCBCR_ENC,
 	.name = "Y'CbCr Encoding",
 	.type = V4L2_CTRL_TYPE_MENU,
+	.menu_skip_mask = 1 << 5,
 	.max = ARRAY_SIZE(vivid_ctrl_ycbcr_enc_strings) - 2,
 	.qmenu = vivid_ctrl_ycbcr_enc_strings,
 };
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index d404a7c..d5c84ec 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -823,7 +823,7 @@
 	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 	if (vivid_is_webcam(dev))
-		return -EINVAL;
+		return -ENODATA;
 
 	sel->r.left = sel->r.top = 0;
 	switch (sel->target) {
@@ -872,7 +872,7 @@
 	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 	if (vivid_is_webcam(dev))
-		return -EINVAL;
+		return -ENODATA;
 
 	switch (s->target) {
 	case V4L2_SEL_TGT_CROP:
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 06a2ec7..b23fa87 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -53,6 +53,7 @@
 
 struct vsp1_device_info {
 	u32 version;
+	const char *model;
 	unsigned int gen;
 	unsigned int features;
 	unsigned int rpf_count;
@@ -65,6 +66,7 @@
 struct vsp1_device {
 	struct device *dev;
 	const struct vsp1_device_info *info;
+	u32 version;
 
 	void __iomem *mmio;
 	struct rcar_fcp_device *fcp;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index 8268b87..ee8355c 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -142,10 +142,15 @@
 	struct vsp1_bru *bru = to_bru(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&bru->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&bru->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	bru_try_format(bru, config, fmt->pad, &fmt->format);
 
@@ -174,7 +179,9 @@
 		}
 	}
 
-	return 0;
+done:
+	mutex_unlock(&bru->entity.lock);
+	return ret;
 }
 
 static int bru_get_selection(struct v4l2_subdev *subdev,
@@ -201,7 +208,9 @@
 		if (!config)
 			return -EINVAL;
 
+		mutex_lock(&bru->entity.lock);
 		sel->r = *bru_get_compose(bru, config, sel->pad);
+		mutex_unlock(&bru->entity.lock);
 		return 0;
 
 	default:
@@ -217,6 +226,7 @@
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 	struct v4l2_rect *compose;
+	int ret = 0;
 
 	if (sel->pad == bru->entity.source_pad)
 		return -EINVAL;
@@ -224,11 +234,16 @@
 	if (sel->target != V4L2_SEL_TGT_COMPOSE)
 		return -EINVAL;
 
-	config = vsp1_entity_get_pad_config(&bru->entity, cfg, sel->which);
-	if (!config)
-		return -EINVAL;
+	mutex_lock(&bru->entity.lock);
 
-	/* The compose rectangle top left corner must be inside the output
+	config = vsp1_entity_get_pad_config(&bru->entity, cfg, sel->which);
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * The compose rectangle top left corner must be inside the output
 	 * frame.
 	 */
 	format = vsp1_entity_get_pad_format(&bru->entity, config,
@@ -246,7 +261,9 @@
 	compose = bru_get_compose(bru, config, sel->pad);
 	*compose = sel->r;
 
-	return 0;
+done:
+	mutex_unlock(&bru->entity.lock);
+	return ret;
 }
 
 static const struct v4l2_subdev_pad_ops bru_pad_ops = {
@@ -269,14 +286,15 @@
 
 static void bru_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_bru *bru = to_bru(&entity->subdev);
 	struct v4l2_mbus_framefmt *format;
 	unsigned int flags;
 	unsigned int i;
 
-	if (!full)
+	if (params != VSP1_ENTITY_PARAMS_INIT)
 		return;
 
 	format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
index b63d2db..f2fb26e5 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -148,10 +148,15 @@
 	struct vsp1_clu *clu = to_clu(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&clu->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&clu->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -164,7 +169,7 @@
 	if (fmt->pad == CLU_PAD_SOURCE) {
 		/* The CLU output format can't be modified. */
 		fmt->format = *format;
-		return 0;
+		goto done;
 	}
 
 	format->code = fmt->format.code;
@@ -182,7 +187,9 @@
 					    CLU_PAD_SOURCE);
 	*format = fmt->format;
 
-	return 0;
+done:
+	mutex_unlock(&clu->entity.lock);
+	return ret;
 }
 
 /* -----------------------------------------------------------------------------
@@ -207,42 +214,51 @@
 
 static void clu_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_clu *clu = to_clu(&entity->subdev);
 	struct vsp1_dl_body *dlb;
 	unsigned long flags;
 	u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN;
 
-	/* The format can't be changed during streaming, only verify it at
-	 * stream start and store the information internally for future partial
-	 * reconfiguration calls.
-	 */
-	if (full) {
+	switch (params) {
+	case VSP1_ENTITY_PARAMS_INIT: {
+		/*
+		 * The format can't be changed during streaming, only verify it
+		 * at setup time and store the information internally for future
+		 * runtime configuration calls.
+		 */
 		struct v4l2_mbus_framefmt *format;
 
 		format = vsp1_entity_get_pad_format(&clu->entity,
 						    clu->entity.config,
 						    CLU_PAD_SINK);
 		clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
-		return;
+		break;
 	}
 
-	/* 2D mode can only be used with the YCbCr pixel encoding. */
-	if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
-		ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
-		     |  VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
-		     |  VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
+	case VSP1_ENTITY_PARAMS_PARTITION:
+		break;
 
-	vsp1_clu_write(clu, dl, VI6_CLU_CTRL, ctrl);
+	case VSP1_ENTITY_PARAMS_RUNTIME:
+		/* 2D mode can only be used with the YCbCr pixel encoding. */
+		if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
+			ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
+			     |  VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
+			     |  VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
 
-	spin_lock_irqsave(&clu->lock, flags);
-	dlb = clu->clu;
-	clu->clu = NULL;
-	spin_unlock_irqrestore(&clu->lock, flags);
+		vsp1_clu_write(clu, dl, VI6_CLU_CTRL, ctrl);
 
-	if (dlb)
-		vsp1_dl_list_add_fragment(dl, dlb);
+		spin_lock_irqsave(&clu->lock, flags);
+		dlb = clu->clu;
+		clu->clu = NULL;
+		spin_unlock_irqrestore(&clu->lock, flags);
+
+		if (dlb)
+			vsp1_dl_list_add_fragment(dl, dlb);
+		break;
+	}
 }
 
 static const struct vsp1_entity_operations clu_entity_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 37c3518..ad545af 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -21,7 +21,6 @@
 #include "vsp1_dl.h"
 
 #define VSP1_DL_NUM_ENTRIES		256
-#define VSP1_DL_NUM_LISTS		3
 
 #define VSP1_DLH_INT_ENABLE		(1 << 1)
 #define VSP1_DLH_AUTO_START		(1 << 0)
@@ -71,6 +70,7 @@
  * @dma: DMA address for the header
  * @body0: first display list body
  * @fragments: list of extra display list bodies
+ * @chain: entry in the display list partition chain
  */
 struct vsp1_dl_list {
 	struct list_head list;
@@ -81,6 +81,9 @@
 
 	struct vsp1_dl_body body0;
 	struct list_head fragments;
+
+	bool has_chain;
+	struct list_head chain;
 };
 
 enum vsp1_dl_mode {
@@ -262,7 +265,6 @@
 
 		memset(dl->header, 0, sizeof(*dl->header));
 		dl->header->lists[0].addr = dl->body0.dma;
-		dl->header->flags = VSP1_DLH_INT_ENABLE;
 	}
 
 	return dl;
@@ -293,6 +295,12 @@
 	if (!list_empty(&dlm->free)) {
 		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
 		list_del(&dl->list);
+
+		/*
+		 * The display list chain must be initialised to ensure every
+		 * display list can assert list_empty() if it is not in a chain.
+		 */
+		INIT_LIST_HEAD(&dl->chain);
 	}
 
 	spin_unlock_irqrestore(&dlm->lock, flags);
@@ -303,10 +311,24 @@
 /* This function must be called with the display list manager lock held.*/
 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
 {
+	struct vsp1_dl_list *dl_child;
+
 	if (!dl)
 		return;
 
-	/* We can't free fragments here as DMA memory can only be freed in
+	/*
+	 * Release any linked display-lists which were chained for a single
+	 * hardware operation.
+	 */
+	if (dl->has_chain) {
+		list_for_each_entry(dl_child, &dl->chain, chain)
+			__vsp1_dl_list_put(dl_child);
+	}
+
+	dl->has_chain = false;
+
+	/*
+	 * We can't free fragments here as DMA memory can only be freed in
 	 * interruptible context. Move all fragments to the display list
 	 * manager's list of fragments to be freed, they will be
 	 * garbage-collected by the work queue.
@@ -383,6 +405,76 @@
 	return 0;
 }
 
+/**
+ * vsp1_dl_list_add_chain - Add a display list to a chain
+ * @head: The head display list
+ * @dl: The new display list
+ *
+ * Add a display list to an existing display list chain. The chained lists
+ * will be automatically processed by the hardware without intervention from
+ * the CPU. A display list end interrupt will only complete after the last
+ * display list in the chain has completed processing.
+ *
+ * Adding a display list to a chain passes ownership of the display list to
+ * the head display list item. The chain is released when the head dl item is
+ * put back with __vsp1_dl_list_put().
+ *
+ * Chained display lists are only usable in header mode. Attempts to add a
+ * display list to a chain in header-less mode will return an error.
+ */
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
+			   struct vsp1_dl_list *dl)
+{
+	/* Chained lists are only available in header mode. */
+	if (head->dlm->mode != VSP1_DL_MODE_HEADER)
+		return -EINVAL;
+
+	head->has_chain = true;
+	list_add_tail(&dl->chain, &head->chain);
+	return 0;
+}
+
+static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
+{
+	struct vsp1_dl_header_list *hdr = dl->header->lists;
+	struct vsp1_dl_body *dlb;
+	unsigned int num_lists = 0;
+
+	/*
+	 * Fill the header with the display list bodies addresses and sizes. The
+	 * address of the first body has already been filled when the display
+	 * list was allocated.
+	 */
+
+	hdr->num_bytes = dl->body0.num_entries
+		       * sizeof(*dl->header->lists);
+
+	list_for_each_entry(dlb, &dl->fragments, list) {
+		num_lists++;
+		hdr++;
+
+		hdr->addr = dlb->dma;
+		hdr->num_bytes = dlb->num_entries
+			       * sizeof(*dl->header->lists);
+	}
+
+	dl->header->num_lists = num_lists;
+
+	/*
+	 * If this display list's chain is not empty, we are on a list, where
+	 * the next item in the list is the display list entity which should be
+	 * automatically queued by the hardware.
+	 */
+	if (!list_empty(&dl->chain) && !is_last) {
+		struct vsp1_dl_list *next = list_next_entry(dl, chain);
+
+		dl->header->next_header = next->dma;
+		dl->header->flags = VSP1_DLH_AUTO_START;
+	} else {
+		dl->header->flags = VSP1_DLH_INT_ENABLE;
+	}
+}
+
 void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
 {
 	struct vsp1_dl_manager *dlm = dl->dlm;
@@ -393,30 +485,26 @@
 	spin_lock_irqsave(&dlm->lock, flags);
 
 	if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
-		struct vsp1_dl_header_list *hdr = dl->header->lists;
-		struct vsp1_dl_body *dlb;
-		unsigned int num_lists = 0;
+		struct vsp1_dl_list *dl_child;
 
-		/* Fill the header with the display list bodies addresses and
-		 * sizes. The address of the first body has already been filled
-		 * when the display list was allocated.
-		 *
+		/*
 		 * In header mode the caller guarantees that the hardware is
 		 * idle at this point.
 		 */
-		hdr->num_bytes = dl->body0.num_entries
-			       * sizeof(*dl->header->lists);
 
-		list_for_each_entry(dlb, &dl->fragments, list) {
-			num_lists++;
-			hdr++;
+		/* Fill the header for the head and chained display lists. */
+		vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
 
-			hdr->addr = dlb->dma;
-			hdr->num_bytes = dlb->num_entries
-				       * sizeof(*dl->header->lists);
+		list_for_each_entry(dl_child, &dl->chain, chain) {
+			bool last = list_is_last(&dl_child->chain, &dl->chain);
+
+			vsp1_dl_list_fill_header(dl_child, last);
 		}
 
-		dl->header->num_lists = num_lists;
+		/*
+		 * Commit the head display list to hardware. Chained headers
+		 * will auto-start.
+		 */
 		vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
 
 		dlm->active = dl;
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index de387cd..7131aa3 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -41,5 +41,6 @@
 void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
 int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
 			      struct vsp1_dl_body *dlb);
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl);
 
 #endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index fe9665e..cd209dc 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -276,17 +276,18 @@
 	}
 
 	dev_dbg(vsp1->dev,
-		"%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad } zpos %u\n",
+		"%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
 		__func__, rpf_index,
 		cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
 		cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
 		cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
-		cfg->zpos);
+		&cfg->mem[2], cfg->zpos);
 
-	/* Store the format, stride, memory buffer address, crop and compose
+	/*
+	 * Store the format, stride, memory buffer address, crop and compose
 	 * rectangles and Z-order position and for the input.
 	 */
-	fmtinfo = vsp1_get_format_info(cfg->pixelformat);
+	fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
 	if (!fmtinfo) {
 		dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
 			cfg->pixelformat);
@@ -301,7 +302,7 @@
 
 	rpf->mem.addr[0] = cfg->mem[0];
 	rpf->mem.addr[1] = cfg->mem[1];
-	rpf->mem.addr[2] = 0;
+	rpf->mem.addr[2] = cfg->mem[2];
 
 	vsp1->drm->inputs[rpf_index].crop = cfg->src;
 	vsp1->drm->inputs[rpf_index].compose = cfg->dst;
@@ -492,16 +493,13 @@
 		vsp1_entity_route_setup(entity, pipe->dl);
 
 		if (entity->ops->configure) {
-			entity->ops->configure(entity, pipe, pipe->dl, true);
-			entity->ops->configure(entity, pipe, pipe->dl, false);
+			entity->ops->configure(entity, pipe, pipe->dl,
+					       VSP1_ENTITY_PARAMS_INIT);
+			entity->ops->configure(entity, pipe, pipe->dl,
+					       VSP1_ENTITY_PARAMS_RUNTIME);
+			entity->ops->configure(entity, pipe, pipe->dl,
+					       VSP1_ENTITY_PARAMS_PARTITION);
 		}
-
-		/* The memory buffer address must be applied after configuring
-		 * the RPF to make sure the crop offset are computed.
-		 */
-		if (entity->type == VSP1_ENTITY_RPF)
-			vsp1_rwpf_set_memory(to_rwpf(&entity->subdev),
-					     pipe->dl);
 	}
 
 	vsp1_dl_list_commit(pipe->dl);
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index cc316d2..57c713a 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -60,7 +60,7 @@
 		status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
 		vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
 
-		if (status & VI6_WFP_IRQ_STA_FRE) {
+		if (status & VI6_WFP_IRQ_STA_DFE) {
 			vsp1_pipeline_frame_end(wpf->pipe);
 			ret = IRQ_HANDLED;
 		}
@@ -220,7 +220,8 @@
 	int ret;
 
 	mdev->dev = vsp1->dev;
-	strlcpy(mdev->model, "VSP1", sizeof(mdev->model));
+	mdev->hw_revision = vsp1->version;
+	strlcpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
 	snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
 		 dev_name(mdev->dev));
 	media_device_init(mdev);
@@ -559,6 +560,7 @@
 static const struct vsp1_device_info vsp1_device_infos[] = {
 	{
 		.version = VI6_IP_VERSION_MODEL_VSPS_H2,
+		.model = "VSP1-S",
 		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
 			  | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
@@ -569,6 +571,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPR_H2,
+		.model = "VSP1-R",
 		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
 		.rpf_count = 5,
@@ -578,6 +581,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+		.model = "VSP1-D",
 		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
 		.rpf_count = 4,
@@ -587,6 +591,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPS_M2,
+		.model = "VSP1-S",
 		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
 			  | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
@@ -596,7 +601,30 @@
 		.num_bru_inputs = 4,
 		.uapi = true,
 	}, {
+		.version = VI6_IP_VERSION_MODEL_VSPS_V2H,
+		.model = "VSP1V-S",
+		.gen = 2,
+		.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+			  | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+		.rpf_count = 4,
+		.uds_count = 1,
+		.wpf_count = 4,
+		.num_bru_inputs = 4,
+		.uapi = true,
+	}, {
+		.version = VI6_IP_VERSION_MODEL_VSPD_V2H,
+		.model = "VSP1V-D",
+		.gen = 2,
+		.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+			  | VSP1_HAS_LIF,
+		.rpf_count = 4,
+		.uds_count = 1,
+		.wpf_count = 1,
+		.num_bru_inputs = 4,
+		.uapi = true,
+	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+		.model = "VSP2-I",
 		.gen = 3,
 		.features = VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU
 			  | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
@@ -606,6 +634,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+		.model = "VSP2-BD",
 		.gen = 3,
 		.features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
 		.rpf_count = 5,
@@ -614,6 +643,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+		.model = "VSP2-BC",
 		.gen = 3,
 		.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
 			  | VSP1_HAS_WPF_VFLIP,
@@ -623,6 +653,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
+		.model = "VSP2-D",
 		.gen = 3,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP,
 		.rpf_count = 5,
@@ -638,7 +669,6 @@
 	struct resource *irq;
 	struct resource *io;
 	unsigned int i;
-	u32 version;
 	int ret;
 
 	vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
@@ -689,11 +719,11 @@
 	if (ret < 0)
 		goto done;
 
-	version = vsp1_read(vsp1, VI6_IP_VERSION);
+	vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
 	pm_runtime_put_sync(&pdev->dev);
 
 	for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
-		if ((version & VI6_IP_VERSION_MODEL_MASK) ==
+		if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
 		    vsp1_device_infos[i].version) {
 			vsp1->info = &vsp1_device_infos[i];
 			break;
@@ -701,12 +731,13 @@
 	}
 
 	if (!vsp1->info) {
-		dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", version);
+		dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
+			vsp1->version);
 		ret = -ENXIO;
 		goto done;
 	}
 
-	dev_dbg(&pdev->dev, "IP version 0x%08x\n", version);
+	dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
 
 	/* Instanciate entities */
 	ret = vsp1_create_entities(vsp1);
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 4cf6cc7..da67349 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -51,6 +51,9 @@
  * @cfg: the TRY pad configuration
  * @which: configuration selector (ACTIVE or TRY)
  *
+ * When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
+ * the entity lock to access the returned configuration.
+ *
  * Return the pad configuration requested by the which argument. The TRY
  * configuration is passed explicitly to the function through the cfg argument
  * and simply returned when requested. The ACTIVE configuration comes from the
@@ -160,7 +163,9 @@
 	if (!config)
 		return -EINVAL;
 
+	mutex_lock(&entity->lock);
 	fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+	mutex_unlock(&entity->lock);
 
 	return 0;
 }
@@ -204,8 +209,10 @@
 		if (!config)
 			return -EINVAL;
 
+		mutex_lock(&entity->lock);
 		format = vsp1_entity_get_pad_format(entity, config, 0);
 		code->code = format->code;
+		mutex_unlock(&entity->lock);
 	}
 
 	return 0;
@@ -235,6 +242,7 @@
 	struct vsp1_entity *entity = to_vsp1_entity(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
 
 	config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
 	if (!config)
@@ -242,8 +250,12 @@
 
 	format = vsp1_entity_get_pad_format(entity, config, fse->pad);
 
-	if (fse->index || fse->code != format->code)
-		return -EINVAL;
+	mutex_lock(&entity->lock);
+
+	if (fse->index || fse->code != format->code) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	if (fse->pad == 0) {
 		fse->min_width = min_width;
@@ -260,7 +272,9 @@
 		fse->max_height = format->height;
 	}
 
-	return 0;
+done:
+	mutex_unlock(&entity->lock);
+	return ret;
 }
 
 /* -----------------------------------------------------------------------------
@@ -358,6 +372,8 @@
 	if (i == ARRAY_SIZE(vsp1_routes))
 		return -EINVAL;
 
+	mutex_init(&entity->lock);
+
 	entity->vsp1 = vsp1;
 	entity->source_pad = num_pads - 1;
 
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index b43457f..901146f 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -14,7 +14,7 @@
 #define __VSP1_ENTITY_H__
 
 #include <linux/list.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 #include <media/v4l2-subdev.h>
 
@@ -35,6 +35,18 @@
 	VSP1_ENTITY_WPF,
 };
 
+/**
+ * enum vsp1_entity_params - Entity configuration parameters class
+ * @VSP1_ENTITY_PARAMS_INIT - Initial parameters
+ * @VSP1_ENTITY_PARAMS_PARTITION - Per-image partition parameters
+ * @VSP1_ENTITY_PARAMS_RUNTIME - Runtime-configurable parameters
+ */
+enum vsp1_entity_params {
+	VSP1_ENTITY_PARAMS_INIT,
+	VSP1_ENTITY_PARAMS_PARTITION,
+	VSP1_ENTITY_PARAMS_RUNTIME,
+};
+
 #define VSP1_ENTITY_MAX_INPUTS		5	/* For the BRU */
 
 /*
@@ -63,17 +75,16 @@
 /**
  * struct vsp1_entity_operations - Entity operations
  * @destroy:	Destroy the entity.
- * @set_memory:	Setup memory buffer access. This operation applies the settings
- *		stored in the rwpf mem field to the display list. Valid for RPF
- *		and WPF only.
  * @configure:	Setup the hardware based on the entity state (pipeline, formats,
  *		selection rectangles, ...)
+ * @max_width:	Return the max supported width of data that the entity can
+ *		process in a single operation.
  */
 struct vsp1_entity_operations {
 	void (*destroy)(struct vsp1_entity *);
-	void (*set_memory)(struct vsp1_entity *, struct vsp1_dl_list *dl);
 	void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *,
-			  struct vsp1_dl_list *, bool);
+			  struct vsp1_dl_list *, enum vsp1_entity_params);
+	unsigned int (*max_width)(struct vsp1_entity *, struct vsp1_pipeline *);
 };
 
 struct vsp1_entity {
@@ -96,6 +107,8 @@
 
 	struct v4l2_subdev subdev;
 	struct v4l2_subdev_pad_config *config;
+
+	struct mutex lock;	/* Protects the pad config */
 };
 
 static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 6e5077b..94316af 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -71,10 +71,15 @@
 	struct vsp1_hsit *hsit = to_hsit(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&hsit->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
 
@@ -83,7 +88,7 @@
 		 * modified.
 		 */
 		fmt->format = *format;
-		return 0;
+		goto done;
 	}
 
 	format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
@@ -104,7 +109,9 @@
 	format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
 		     : MEDIA_BUS_FMT_AHSV8888_1X32;
 
-	return 0;
+done:
+	mutex_unlock(&hsit->entity.lock);
+	return ret;
 }
 
 static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
@@ -125,11 +132,12 @@
 
 static void hsit_configure(struct vsp1_entity *entity,
 			   struct vsp1_pipeline *pipe,
-			   struct vsp1_dl_list *dl, bool full)
+			   struct vsp1_dl_list *dl,
+			   enum vsp1_entity_params params)
 {
 	struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
 
-	if (!full)
+	if (params != VSP1_ENTITY_PARAMS_INIT)
 		return;
 
 	if (hsit->inverse)
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index a720063..e32acae 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -66,10 +66,15 @@
 	struct vsp1_lif *lif = to_lif(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&lif->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&lif->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -83,7 +88,7 @@
 		 * format.
 		 */
 		fmt->format = *format;
-		return 0;
+		goto done;
 	}
 
 	format->code = fmt->format.code;
@@ -101,7 +106,9 @@
 					    LIF_PAD_SOURCE);
 	*format = fmt->format;
 
-	return 0;
+done:
+	mutex_unlock(&lif->entity.lock);
+	return ret;
 }
 
 static const struct v4l2_subdev_pad_ops lif_pad_ops = {
@@ -122,7 +129,8 @@
 
 static void lif_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	const struct v4l2_mbus_framefmt *format;
 	struct vsp1_lif *lif = to_lif(&entity->subdev);
@@ -130,7 +138,7 @@
 	unsigned int obth = 400;
 	unsigned int lbth = 200;
 
-	if (!full)
+	if (params != VSP1_ENTITY_PARAMS_INIT)
 		return;
 
 	format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index dc31de9..c67cc60d 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -124,10 +124,15 @@
 	struct vsp1_lut *lut = to_lut(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&lut->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&lut->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -140,7 +145,7 @@
 	if (fmt->pad == LUT_PAD_SOURCE) {
 		/* The LUT output format can't be modified. */
 		fmt->format = *format;
-		return 0;
+		goto done;
 	}
 
 	format->code = fmt->format.code;
@@ -158,7 +163,9 @@
 					    LUT_PAD_SOURCE);
 	*format = fmt->format;
 
-	return 0;
+done:
+	mutex_unlock(&lut->entity.lock);
+	return ret;
 }
 
 /* -----------------------------------------------------------------------------
@@ -183,24 +190,31 @@
 
 static void lut_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_lut *lut = to_lut(&entity->subdev);
 	struct vsp1_dl_body *dlb;
 	unsigned long flags;
 
-	if (full) {
+	switch (params) {
+	case VSP1_ENTITY_PARAMS_INIT:
 		vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
-		return;
+		break;
+
+	case VSP1_ENTITY_PARAMS_PARTITION:
+		break;
+
+	case VSP1_ENTITY_PARAMS_RUNTIME:
+		spin_lock_irqsave(&lut->lock, flags);
+		dlb = lut->lut;
+		lut->lut = NULL;
+		spin_unlock_irqrestore(&lut->lock, flags);
+
+		if (dlb)
+			vsp1_dl_list_add_fragment(dl, dlb);
+		break;
 	}
-
-	spin_lock_irqsave(&lut->lock, flags);
-	dlb = lut->lut;
-	lut->lut = NULL;
-	spin_unlock_irqrestore(&lut->lock, flags);
-
-	if (dlb)
-		vsp1_dl_list_add_fragment(dl, dlb);
 }
 
 static const struct vsp1_entity_operations lut_entity_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 3e75fb3..756ca4e 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -136,17 +136,23 @@
 	  3, { 8, 8, 8 }, false, true, 1, 1, false },
 };
 
-/*
+/**
  * vsp1_get_format_info - Retrieve format information for a 4CC
+ * @vsp1: the VSP1 device
  * @fourcc: the format 4CC
  *
  * Return a pointer to the format information structure corresponding to the
  * given V4L2 format 4CC, or NULL if no corresponding format can be found.
  */
-const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc)
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+						    u32 fourcc)
 {
 	unsigned int i;
 
+	/* Special case, the VYUY format is supported on Gen2 only. */
+	if (vsp1->info->gen != 2 && fourcc == V4L2_PIX_FMT_VYUY)
+		return NULL;
+
 	for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
 		const struct vsp1_format_info *info = &vsp1_video_formats[i];
 
@@ -365,6 +371,7 @@
 
 void vsp1_pipelines_resume(struct vsp1_device *vsp1)
 {
+	unsigned long flags;
 	unsigned int i;
 
 	/* Resume all running pipelines. */
@@ -379,7 +386,9 @@
 		if (pipe == NULL)
 			continue;
 
+		spin_lock_irqsave(&pipe->irqlock, flags);
 		if (vsp1_pipeline_ready(pipe))
 			vsp1_pipeline_run(pipe);
+		spin_unlock_irqrestore(&pipe->irqlock, flags);
 	}
 }
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index d20d997..ac4ad26 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -77,6 +77,9 @@
  * @uds_input: entity at the input of the UDS, if the UDS is present
  * @entities: list of entities in the pipeline
  * @dl: display list associated with the pipeline
+ * @div_size: The maximum allowed partition size for the pipeline
+ * @partitions: The number of partitions used to process one frame
+ * @current_partition: The partition number currently being configured
  */
 struct vsp1_pipeline {
 	struct media_pipeline pipe;
@@ -104,6 +107,11 @@
 	struct list_head entities;
 
 	struct vsp1_dl_list *dl;
+
+	unsigned int div_size;
+	unsigned int partitions;
+	struct v4l2_rect partition;
+	unsigned int current_partition;
 };
 
 void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
@@ -122,6 +130,7 @@
 void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
 void vsp1_pipelines_resume(struct vsp1_device *vsp1);
 
-const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc);
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+						    u32 fourcc);
 
 #endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 3b03007..47b1dee 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -660,6 +660,8 @@
 #define VI6_IP_VERSION_MODEL_VSPR_H2	(0x0a << 8)
 #define VI6_IP_VERSION_MODEL_VSPD_GEN2	(0x0b << 8)
 #define VI6_IP_VERSION_MODEL_VSPS_M2	(0x0c << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_V2H	(0x12 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V2H	(0x13 << 8)
 #define VI6_IP_VERSION_MODEL_VSPI_GEN3	(0x14 << 8)
 #define VI6_IP_VERSION_MODEL_VSPBD_GEN3	(0x15 << 8)
 #define VI6_IP_VERSION_MODEL_VSPBC_GEN3	(0x16 << 8)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 3888389..b2e34a8 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -46,34 +46,22 @@
  * VSP1 Entity Operations
  */
 
-static void rpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
-{
-	struct vsp1_rwpf *rpf = entity_to_rwpf(entity);
-
-	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
-		       rpf->mem.addr[0] + rpf->offsets[0]);
-	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
-		       rpf->mem.addr[1] + rpf->offsets[1]);
-	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
-		       rpf->mem.addr[2] + rpf->offsets[1]);
-}
-
 static void rpf_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
 	const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
 	const struct v4l2_pix_format_mplane *format = &rpf->format;
 	const struct v4l2_mbus_framefmt *source_format;
 	const struct v4l2_mbus_framefmt *sink_format;
-	const struct v4l2_rect *crop;
 	unsigned int left = 0;
 	unsigned int top = 0;
 	u32 pstride;
 	u32 infmt;
 
-	if (!full) {
+	if (params == VSP1_ENTITY_PARAMS_RUNTIME) {
 		vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
 			       rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
 		vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, rpf->mult_alpha |
@@ -83,34 +71,80 @@
 		return;
 	}
 
-	/* Source size, stride and crop offsets.
-	 *
-	 * The crop offsets correspond to the location of the crop rectangle top
-	 * left corner in the plane buffer. Only two offsets are needed, as
-	 * planes 2 and 3 always have identical strides.
-	 */
-	crop = vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+	if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+		unsigned int offsets[2];
+		struct v4l2_rect crop;
 
-	vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
-		       (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
-		       (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
-	vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
-		       (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
-		       (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+		/*
+		 * Source size and crop offsets.
+		 *
+		 * The crop offsets correspond to the location of the crop
+		 * rectangle top left corner in the plane buffer. Only two
+		 * offsets are needed, as planes 2 and 3 always have identical
+		 * strides.
+		 */
+		crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config);
 
-	rpf->offsets[0] = crop->top * format->plane_fmt[0].bytesperline
-			+ crop->left * fmtinfo->bpp[0] / 8;
+		/*
+		 * Partition Algorithm Control
+		 *
+		 * The partition algorithm can split this frame into multiple
+		 * slices. We must scale our partition window based on the pipe
+		 * configuration to match the destination partition window.
+		 * To achieve this, we adjust our crop to provide a 'sub-crop'
+		 * matching the expected partition window. Only 'left' and
+		 * 'width' need to be adjusted.
+		 */
+		if (pipe->partitions > 1) {
+			const struct v4l2_mbus_framefmt *output;
+			struct vsp1_entity *wpf = &pipe->output->entity;
+			unsigned int input_width = crop.width;
+
+			/*
+			 * Scale the partition window based on the configuration
+			 * of the pipeline.
+			 */
+			output = vsp1_entity_get_pad_format(wpf, wpf->config,
+							    RWPF_PAD_SOURCE);
+
+			crop.width = pipe->partition.width * input_width
+				   / output->width;
+			crop.left += pipe->partition.left * input_width
+				   / output->width;
+		}
+
+		vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
+			       (crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+			       (crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+		vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
+			       (crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+			       (crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+
+		offsets[0] = crop.top * format->plane_fmt[0].bytesperline
+			   + crop.left * fmtinfo->bpp[0] / 8;
+
+		if (format->num_planes > 1)
+			offsets[1] = crop.top * format->plane_fmt[1].bytesperline
+				   + crop.left / fmtinfo->hsub
+				   * fmtinfo->bpp[1] / 8;
+		else
+			offsets[1] = 0;
+
+		vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
+			       rpf->mem.addr[0] + offsets[0]);
+		vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
+			       rpf->mem.addr[1] + offsets[1]);
+		vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
+			       rpf->mem.addr[2] + offsets[1]);
+		return;
+	}
+
+	/* Stride */
 	pstride = format->plane_fmt[0].bytesperline
 		<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
-
-	if (format->num_planes > 1) {
-		rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
-				+ crop->left * fmtinfo->bpp[1] / 8;
+	if (format->num_planes > 1)
 		pstride |= format->plane_fmt[1].bytesperline
 			<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
-	} else {
-		rpf->offsets[1] = 0;
-	}
 
 	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_PSTRIDE, pstride);
 
@@ -215,7 +249,6 @@
 }
 
 static const struct vsp1_entity_operations rpf_entity_ops = {
-	.set_memory = rpf_set_memory,
 	.configure = rpf_configure,
 };
 
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 8d461b3..66e4d7e 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -66,11 +66,15 @@
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
-	struct v4l2_rect *crop;
+	int ret = 0;
+
+	mutex_lock(&rwpf->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -85,7 +89,7 @@
 		 */
 		format->code = fmt->format.code;
 		fmt->format = *format;
-		return 0;
+		goto done;
 	}
 
 	format->code = fmt->format.code;
@@ -98,19 +102,25 @@
 
 	fmt->format = *format;
 
-	/* Update the sink crop rectangle. */
-	crop = vsp1_rwpf_get_crop(rwpf, config);
-	crop->left = 0;
-	crop->top = 0;
-	crop->width = fmt->format.width;
-	crop->height = fmt->format.height;
+	if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+		struct v4l2_rect *crop;
+
+		/* Update the sink crop rectangle. */
+		crop = vsp1_rwpf_get_crop(rwpf, config);
+		crop->left = 0;
+		crop->top = 0;
+		crop->width = fmt->format.width;
+		crop->height = fmt->format.height;
+	}
 
 	/* Propagate the format to the source pad. */
 	format = vsp1_entity_get_pad_format(&rwpf->entity, config,
 					    RWPF_PAD_SOURCE);
 	*format = fmt->format;
 
-	return 0;
+done:
+	mutex_unlock(&rwpf->entity.lock);
+	return ret;
 }
 
 static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
@@ -120,14 +130,22 @@
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
 
-	/* Cropping is implemented on the sink pad. */
-	if (sel->pad != RWPF_PAD_SINK)
+	/*
+	 * Cropping is only supported on the RPF and is implemented on the sink
+	 * pad.
+	 */
+	if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
 		return -EINVAL;
 
+	mutex_lock(&rwpf->entity.lock);
+
 	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	switch (sel->target) {
 	case V4L2_SEL_TGT_CROP:
@@ -144,10 +162,13 @@
 		break;
 
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		break;
 	}
 
-	return 0;
+done:
+	mutex_unlock(&rwpf->entity.lock);
+	return ret;
 }
 
 static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
@@ -158,21 +179,27 @@
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 	struct v4l2_rect *crop;
+	int ret = 0;
 
-	/* Cropping is implemented on the sink pad. */
-	if (sel->pad != RWPF_PAD_SINK)
+	/*
+	 * Cropping is only supported on the RPF and is implemented on the sink
+	 * pad.
+	 */
+	if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
 		return -EINVAL;
 
 	if (sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
-	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
-	if (!config)
-		return -EINVAL;
+	mutex_lock(&rwpf->entity.lock);
 
-	/* Make sure the crop rectangle is entirely contained in the image. The
-	 * WPF top and left offsets are limited to 255.
-	 */
+	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Make sure the crop rectangle is entirely contained in the image. */
 	format = vsp1_entity_get_pad_format(&rwpf->entity, config,
 					    RWPF_PAD_SINK);
 
@@ -188,10 +215,6 @@
 
 	sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
 	sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
-	if (rwpf->entity.type == VSP1_ENTITY_WPF) {
-		sel->r.left = min_t(unsigned int, sel->r.left, 255);
-		sel->r.top = min_t(unsigned int, sel->r.top, 255);
-	}
 	sel->r.width = min_t(unsigned int, sel->r.width,
 			     format->width - sel->r.left);
 	sel->r.height = min_t(unsigned int, sel->r.height,
@@ -206,7 +229,9 @@
 	format->width = crop->width;
 	format->height = crop->height;
 
-	return 0;
+done:
+	mutex_unlock(&rwpf->entity.lock);
+	return ret;
 }
 
 const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index cb20484..1c98aff 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -61,7 +61,6 @@
 		unsigned int active;
 	} flip;
 
-	unsigned int offsets[2];
 	struct vsp1_rwpf_memory mem;
 
 	struct vsp1_dl_manager *dlm;
@@ -86,17 +85,5 @@
 
 struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
 				     struct v4l2_subdev_pad_config *config);
-/**
- * vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
- * @rwpf: the [RW]PF instance
- * @dl: the display list
- *
- * This function applies the cached memory buffer address to the display list.
- */
-static inline void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf,
-					struct vsp1_dl_list *dl)
-{
-	rwpf->entity.ops->set_memory(&rwpf->entity, dl);
-}
 
 #endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 47f5e0c..b4e568a 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -128,6 +128,7 @@
 	struct vsp1_sru *sru = to_sru(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
 
 	config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
 	if (!config)
@@ -135,8 +136,12 @@
 
 	format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
 
-	if (fse->index || fse->code != format->code)
-		return -EINVAL;
+	mutex_lock(&sru->entity.lock);
+
+	if (fse->index || fse->code != format->code) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	if (fse->pad == SRU_PAD_SINK) {
 		fse->min_width = SRU_MIN_SIZE;
@@ -156,7 +161,9 @@
 		}
 	}
 
-	return 0;
+done:
+	mutex_unlock(&sru->entity.lock);
+	return ret;
 }
 
 static void sru_try_format(struct vsp1_sru *sru,
@@ -217,10 +224,15 @@
 	struct vsp1_sru *sru = to_sru(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&sru->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	sru_try_format(sru, config, fmt->pad, &fmt->format);
 
@@ -236,7 +248,9 @@
 		sru_try_format(sru, config, SRU_PAD_SOURCE, format);
 	}
 
-	return 0;
+done:
+	mutex_unlock(&sru->entity.lock);
+	return ret;
 }
 
 static const struct v4l2_subdev_pad_ops sru_pad_ops = {
@@ -257,7 +271,8 @@
 
 static void sru_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	const struct vsp1_sru_param *param;
 	struct vsp1_sru *sru = to_sru(&entity->subdev);
@@ -265,7 +280,7 @@
 	struct v4l2_mbus_framefmt *output;
 	u32 ctrl0;
 
-	if (!full)
+	if (params != VSP1_ENTITY_PARAMS_INIT)
 		return;
 
 	input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
@@ -291,8 +306,27 @@
 	vsp1_sru_write(sru, dl, VI6_SRU_CTRL2, param->ctrl2);
 }
 
+static unsigned int sru_max_width(struct vsp1_entity *entity,
+				  struct vsp1_pipeline *pipe)
+{
+	struct vsp1_sru *sru = to_sru(&entity->subdev);
+	struct v4l2_mbus_framefmt *input;
+	struct v4l2_mbus_framefmt *output;
+
+	input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+					   SRU_PAD_SINK);
+	output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+					    SRU_PAD_SOURCE);
+
+	if (input->width != output->width)
+		return 512;
+	else
+		return 256;
+}
+
 static const struct vsp1_entity_operations sru_entity_ops = {
 	.configure = sru_configure,
+	.max_width = sru_max_width,
 };
 
 /* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 652dcd8..da8f89a 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -18,6 +18,7 @@
 
 #include "vsp1.h"
 #include "vsp1_dl.h"
+#include "vsp1_pipe.h"
 #include "vsp1_uds.h"
 
 #define UDS_MIN_SIZE				4U
@@ -133,6 +134,7 @@
 	struct vsp1_uds *uds = to_uds(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
 
 	config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
 	if (!config)
@@ -141,8 +143,12 @@
 	format = vsp1_entity_get_pad_format(&uds->entity, config,
 					    UDS_PAD_SINK);
 
-	if (fse->index || fse->code != format->code)
-		return -EINVAL;
+	mutex_lock(&uds->entity.lock);
+
+	if (fse->index || fse->code != format->code) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	if (fse->pad == UDS_PAD_SINK) {
 		fse->min_width = UDS_MIN_SIZE;
@@ -156,7 +162,9 @@
 				  &fse->max_height);
 	}
 
-	return 0;
+done:
+	mutex_unlock(&uds->entity.lock);
+	return ret;
 }
 
 static void uds_try_format(struct vsp1_uds *uds,
@@ -202,10 +210,15 @@
 	struct vsp1_uds *uds = to_uds(subdev);
 	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
+	int ret = 0;
+
+	mutex_lock(&uds->entity.lock);
 
 	config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
-	if (!config)
-		return -EINVAL;
+	if (!config) {
+		ret = -EINVAL;
+		goto done;
+	}
 
 	uds_try_format(uds, config, fmt->pad, &fmt->format);
 
@@ -221,7 +234,9 @@
 		uds_try_format(uds, config, UDS_PAD_SOURCE, format);
 	}
 
-	return 0;
+done:
+	mutex_unlock(&uds->entity.lock);
+	return ret;
 }
 
 /* -----------------------------------------------------------------------------
@@ -246,7 +261,8 @@
 
 static void uds_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_uds *uds = to_uds(&entity->subdev);
 	const struct v4l2_mbus_framefmt *output;
@@ -255,7 +271,16 @@
 	unsigned int vscale;
 	bool multitap;
 
-	if (!full)
+	if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+		const struct v4l2_rect *clip = &pipe->partition;
+
+		vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
+			       (clip->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+			       (clip->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+		return;
+	}
+
+	if (params != VSP1_ENTITY_PARAMS_INIT)
 		return;
 
 	input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
@@ -287,17 +312,39 @@
 		       (uds_passband_width(vscale)
 				<< VI6_UDS_PASS_BWIDTH_V_SHIFT));
 
-	/* Set the scaling ratios and the output size. */
+	/* Set the scaling ratios. */
 	vsp1_uds_write(uds, dl, VI6_UDS_SCALE,
 		       (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
 		       (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
-	vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
-		       (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
-		       (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static unsigned int uds_max_width(struct vsp1_entity *entity,
+				  struct vsp1_pipeline *pipe)
+{
+	struct vsp1_uds *uds = to_uds(&entity->subdev);
+	const struct v4l2_mbus_framefmt *output;
+	const struct v4l2_mbus_framefmt *input;
+	unsigned int hscale;
+
+	input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+					   UDS_PAD_SINK);
+	output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+					    UDS_PAD_SOURCE);
+	hscale = output->width / input->width;
+
+	if (hscale <= 2)
+		return 256;
+	else if (hscale <= 4)
+		return 512;
+	else if (hscale <= 8)
+		return 1024;
+	else
+		return 2048;
 }
 
 static const struct vsp1_entity_operations uds_entity_ops = {
 	.configure = uds_configure,
+	.max_width = uds_max_width,
 };
 
 /* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 9fb4fc2..d351b9c 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -117,9 +117,9 @@
 	/* Retrieve format information and select the default format if the
 	 * requested format isn't supported.
 	 */
-	info = vsp1_get_format_info(pix->pixelformat);
+	info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
 	if (info == NULL)
-		info = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
+		info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
 
 	pix->pixelformat = info->fourcc;
 	pix->colorspace = V4L2_COLORSPACE_SRGB;
@@ -169,6 +169,113 @@
 }
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Partition Algorithm support
+ */
+
+static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
+{
+	struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+	const struct v4l2_mbus_framefmt *format;
+	struct vsp1_entity *entity;
+	unsigned int div_size;
+
+	format = vsp1_entity_get_pad_format(&pipe->output->entity,
+					    pipe->output->entity.config,
+					    RWPF_PAD_SOURCE);
+	div_size = format->width;
+
+	/* Gen2 hardware doesn't require image partitioning. */
+	if (vsp1->info->gen == 2) {
+		pipe->div_size = div_size;
+		pipe->partitions = 1;
+		return;
+	}
+
+	list_for_each_entry(entity, &pipe->entities, list_pipe) {
+		unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH;
+
+		if (entity->ops->max_width) {
+			entity_max = entity->ops->max_width(entity, pipe);
+			if (entity_max)
+				div_size = min(div_size, entity_max);
+		}
+	}
+
+	pipe->div_size = div_size;
+	pipe->partitions = DIV_ROUND_UP(format->width, div_size);
+}
+
+/**
+ * vsp1_video_partition - Calculate the active partition output window
+ *
+ * @div_size: pre-determined maximum partition division size
+ * @index: partition index
+ *
+ * Returns a v4l2_rect describing the partition window.
+ */
+static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
+					     unsigned int div_size,
+					     unsigned int index)
+{
+	const struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect partition;
+	unsigned int modulus;
+
+	format = vsp1_entity_get_pad_format(&pipe->output->entity,
+					    pipe->output->entity.config,
+					    RWPF_PAD_SOURCE);
+
+	/* A single partition simply processes the output size in full. */
+	if (pipe->partitions <= 1) {
+		partition.left = 0;
+		partition.top = 0;
+		partition.width = format->width;
+		partition.height = format->height;
+		return partition;
+	}
+
+	/* Initialise the partition with sane starting conditions. */
+	partition.left = index * div_size;
+	partition.top = 0;
+	partition.width = div_size;
+	partition.height = format->height;
+
+	modulus = format->width % div_size;
+
+	/*
+	 * We need to prevent the last partition from being smaller than the
+	 * *minimum* width of the hardware capabilities.
+	 *
+	 * If the modulus is less than half of the partition size,
+	 * the penultimate partition is reduced to half, which is added
+	 * to the final partition: |1234|1234|1234|12|341|
+	 * to prevents this:       |1234|1234|1234|1234|1|.
+	 */
+	if (modulus) {
+		/*
+		 * pipe->partitions is 1 based, whilst index is a 0 based index.
+		 * Normalise this locally.
+		 */
+		unsigned int partitions = pipe->partitions - 1;
+
+		if (modulus < div_size / 2) {
+			if (index == partitions - 1) {
+				/* Halve the penultimate partition. */
+				partition.width = div_size / 2;
+			} else if (index == partitions) {
+				/* Increase the final partition. */
+				partition.width = (div_size / 2) + modulus;
+				partition.left -= div_size / 2;
+			}
+		} else if (index == partitions) {
+			partition.width = modulus;
+		}
+	}
+
+	return partition;
+}
+
+/* -----------------------------------------------------------------------------
  * Pipeline Management
  */
 
@@ -234,44 +341,81 @@
 {
 	struct vsp1_video *video = rwpf->video;
 	struct vsp1_vb2_buffer *buf;
-	unsigned long flags;
 
 	buf = vsp1_video_complete_buffer(video);
 	if (buf == NULL)
 		return;
 
-	spin_lock_irqsave(&pipe->irqlock, flags);
-
 	video->rwpf->mem = buf->mem;
 	pipe->buffers_ready |= 1 << video->pipe_index;
+}
 
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
+static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
+					      struct vsp1_dl_list *dl)
+{
+	struct vsp1_entity *entity;
+
+	pipe->partition = vsp1_video_partition(pipe, pipe->div_size,
+					       pipe->current_partition);
+
+	list_for_each_entry(entity, &pipe->entities, list_pipe) {
+		if (entity->ops->configure)
+			entity->ops->configure(entity, pipe, dl,
+					       VSP1_ENTITY_PARAMS_PARTITION);
+	}
 }
 
 static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
 {
 	struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 	struct vsp1_entity *entity;
-	unsigned int i;
 
 	if (!pipe->dl)
 		pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 
+	/*
+	 * Start with the runtime parameters as the configure operation can
+	 * compute/cache information needed when configuring partitions. This
+	 * is the case with flipping in the WPF.
+	 */
 	list_for_each_entry(entity, &pipe->entities, list_pipe) {
 		if (entity->ops->configure)
-			entity->ops->configure(entity, pipe, pipe->dl, false);
+			entity->ops->configure(entity, pipe, pipe->dl,
+					       VSP1_ENTITY_PARAMS_RUNTIME);
 	}
 
-	for (i = 0; i < vsp1->info->rpf_count; ++i) {
-		struct vsp1_rwpf *rwpf = pipe->inputs[i];
+	/* Run the first partition */
+	pipe->current_partition = 0;
+	vsp1_video_pipeline_run_partition(pipe, pipe->dl);
 
-		if (rwpf)
-			vsp1_rwpf_set_memory(rwpf, pipe->dl);
+	/* Process consecutive partitions as necessary */
+	for (pipe->current_partition = 1;
+	     pipe->current_partition < pipe->partitions;
+	     pipe->current_partition++) {
+		struct vsp1_dl_list *dl;
+
+		/*
+		 * Partition configuration operations will utilise
+		 * the pipe->current_partition variable to determine
+		 * the work they should complete.
+		 */
+		dl = vsp1_dl_list_get(pipe->output->dlm);
+
+		/*
+		 * An incomplete chain will still function, but output only
+		 * the partitions that had a dl available. The frame end
+		 * interrupt will be marked on the last dl in the chain.
+		 */
+		if (!dl) {
+			dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
+			break;
+		}
+
+		vsp1_video_pipeline_run_partition(pipe, dl);
+		vsp1_dl_list_add_chain(pipe->dl, dl);
 	}
 
-	if (!pipe->lif)
-		vsp1_rwpf_set_memory(pipe->output, pipe->dl);
-
+	/* Complete, and commit the head display list. */
 	vsp1_dl_list_commit(pipe->dl);
 	pipe->dl = NULL;
 
@@ -285,6 +429,8 @@
 	unsigned long flags;
 	unsigned int i;
 
+	spin_lock_irqsave(&pipe->irqlock, flags);
+
 	/* Complete buffers on all video nodes. */
 	for (i = 0; i < vsp1->info->rpf_count; ++i) {
 		if (!pipe->inputs[i])
@@ -295,8 +441,6 @@
 
 	vsp1_video_frame_end(pipe, pipe->output);
 
-	spin_lock_irqsave(&pipe->irqlock, flags);
-
 	state = pipe->state;
 	pipe->state = VSP1_PIPELINE_STOPPED;
 
@@ -607,6 +751,9 @@
 {
 	struct vsp1_entity *entity;
 
+	/* Determine this pipelines sizes for image partitioning support. */
+	vsp1_video_pipeline_setup_partitions(pipe);
+
 	/* Prepare the display list. */
 	pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 	if (!pipe->dl)
@@ -634,7 +781,8 @@
 		vsp1_entity_route_setup(entity, pipe->dl);
 
 		if (entity->ops->configure)
-			entity->ops->configure(entity, pipe, pipe->dl, true);
+			entity->ops->configure(entity, pipe, pipe->dl,
+					       VSP1_ENTITY_PARAMS_INIT);
 	}
 
 	return 0;
@@ -675,6 +823,14 @@
 	unsigned long flags;
 	int ret;
 
+	/*
+	 * Clear the buffers ready flag to make sure the device won't be started
+	 * by a QBUF on the video node on the other side of the pipeline.
+	 */
+	spin_lock_irqsave(&video->irqlock, flags);
+	pipe->buffers_ready &= ~(1 << video->pipe_index);
+	spin_unlock_irqrestore(&video->irqlock, flags);
+
 	mutex_lock(&pipe->lock);
 	if (--pipe->stream_count == pipe->num_inputs) {
 		/* Stop the pipeline. */
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 3198316..7c48f81 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -173,58 +173,28 @@
 	vsp1_dlm_destroy(wpf->dlm);
 }
 
-static void wpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
-{
-	struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
-	const struct v4l2_pix_format_mplane *format = &wpf->format;
-	struct vsp1_rwpf_memory mem = wpf->mem;
-	unsigned int flip = wpf->flip.active;
-	unsigned int offset;
-
-	/* Update the memory offsets based on flipping configuration. The
-	 * destination addresses point to the locations where the VSP starts
-	 * writing to memory, which can be different corners of the image
-	 * depending on vertical flipping. Horizontal flipping is handled
-	 * through a line buffer and doesn't modify the start address.
-	 */
-	if (flip & BIT(WPF_CTRL_VFLIP)) {
-		mem.addr[0] += (format->height - 1)
-			     * format->plane_fmt[0].bytesperline;
-
-		if (format->num_planes > 1) {
-			offset = (format->height / wpf->fmtinfo->vsub - 1)
-			       * format->plane_fmt[1].bytesperline;
-			mem.addr[1] += offset;
-			mem.addr[2] += offset;
-		}
-	}
-
-	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
-	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
-	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
-}
-
 static void wpf_configure(struct vsp1_entity *entity,
 			  struct vsp1_pipeline *pipe,
-			  struct vsp1_dl_list *dl, bool full)
+			  struct vsp1_dl_list *dl,
+			  enum vsp1_entity_params params)
 {
 	struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
 	struct vsp1_device *vsp1 = wpf->entity.vsp1;
 	const struct v4l2_mbus_framefmt *source_format;
 	const struct v4l2_mbus_framefmt *sink_format;
-	const struct v4l2_rect *crop;
 	unsigned int i;
 	u32 outfmt = 0;
 	u32 srcrpf = 0;
 
-	if (!full) {
+	if (params == VSP1_ENTITY_PARAMS_RUNTIME) {
 		const unsigned int mask = BIT(WPF_CTRL_VFLIP)
 					| BIT(WPF_CTRL_HFLIP);
+		unsigned long flags;
 
-		spin_lock(&wpf->flip.lock);
+		spin_lock_irqsave(&wpf->flip.lock, flags);
 		wpf->flip.active = (wpf->flip.active & ~mask)
 				 | (wpf->flip.pending & mask);
-		spin_unlock(&wpf->flip.lock);
+		spin_unlock_irqrestore(&wpf->flip.lock, flags);
 
 		outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt;
 
@@ -237,17 +207,6 @@
 		return;
 	}
 
-	/* Cropping */
-	crop = vsp1_rwpf_get_crop(wpf, wpf->entity.config);
-
-	vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
-		       (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
-		       (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
-	vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
-		       (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
-		       (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
-
-	/* Format */
 	sink_format = vsp1_entity_get_pad_format(&wpf->entity,
 						 wpf->entity.config,
 						 RWPF_PAD_SINK);
@@ -255,6 +214,80 @@
 						   wpf->entity.config,
 						   RWPF_PAD_SOURCE);
 
+	if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+		const struct v4l2_pix_format_mplane *format = &wpf->format;
+		struct vsp1_rwpf_memory mem = wpf->mem;
+		unsigned int flip = wpf->flip.active;
+		unsigned int width = source_format->width;
+		unsigned int height = source_format->height;
+		unsigned int offset;
+
+		/*
+		 * Cropping. The partition algorithm can split the image into
+		 * multiple slices.
+		 */
+		if (pipe->partitions > 1)
+			width = pipe->partition.width;
+
+		vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+			       (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+			       (width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+		vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+			       (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+			       (height << VI6_WPF_SZCLIP_SIZE_SHIFT));
+
+		if (pipe->lif)
+			return;
+
+		/*
+		 * Update the memory offsets based on flipping configuration.
+		 * The destination addresses point to the locations where the
+		 * VSP starts writing to memory, which can be different corners
+		 * of the image depending on vertical flipping.
+		 */
+		if (pipe->partitions > 1) {
+			const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+
+			/*
+			 * Horizontal flipping is handled through a line buffer
+			 * and doesn't modify the start address, but still needs
+			 * to be handled when image partitioning is in effect to
+			 * order the partitions correctly.
+			 */
+			if (flip & BIT(WPF_CTRL_HFLIP))
+				offset = format->width - pipe->partition.left
+					- pipe->partition.width;
+			else
+				offset = pipe->partition.left;
+
+			mem.addr[0] += offset * fmtinfo->bpp[0] / 8;
+			if (format->num_planes > 1) {
+				mem.addr[1] += offset / fmtinfo->hsub
+					     * fmtinfo->bpp[1] / 8;
+				mem.addr[2] += offset / fmtinfo->hsub
+					     * fmtinfo->bpp[2] / 8;
+			}
+		}
+
+		if (flip & BIT(WPF_CTRL_VFLIP)) {
+			mem.addr[0] += (format->height - 1)
+				     * format->plane_fmt[0].bytesperline;
+
+			if (format->num_planes > 1) {
+				offset = (format->height / wpf->fmtinfo->vsub - 1)
+				       * format->plane_fmt[1].bytesperline;
+				mem.addr[1] += offset;
+				mem.addr[2] += offset;
+			}
+		}
+
+		vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
+		vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
+		vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
+		return;
+	}
+
+	/* Format */
 	if (!pipe->lif) {
 		const struct v4l2_pix_format_mplane *format = &wpf->format;
 		const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
@@ -318,12 +351,11 @@
 	/* Enable interrupts */
 	vsp1_dl_list_write(dl, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
 	vsp1_dl_list_write(dl, VI6_WPF_IRQ_ENB(wpf->entity.index),
-			   VI6_WFP_IRQ_ENB_FREE);
+			   VI6_WFP_IRQ_ENB_DFEE);
 }
 
 static const struct vsp1_entity_operations wpf_entity_ops = {
 	.destroy = vsp1_wpf_destroy,
-	.set_memory = wpf_set_memory,
 	.configure = wpf_configure,
 };
 
@@ -360,7 +392,7 @@
 		return ERR_PTR(ret);
 
 	/* Initialize the display list manager. */
-	wpf->dlm = vsp1_dlm_create(vsp1, index, 4);
+	wpf->dlm = vsp1_dlm_create(vsp1, index, 64);
 	if (!wpf->dlm) {
 		ret = -ENOMEM;
 		goto error;
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 7ae1a13..1d5836c 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -474,7 +474,7 @@
 	spin_unlock_irq(&dma->queued_lock);
 }
 
-static struct vb2_ops xvip_dma_queue_qops = {
+static const struct vb2_ops xvip_dma_queue_qops = {
 	.queue_setup = xvip_dma_queue_setup,
 	.buf_prepare = xvip_dma_buffer_prepare,
 	.buf_queue = xvip_dma_buffer_queue,
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 471d6a8..ee0470a 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -509,7 +509,6 @@
 static struct i2c_driver si470x_i2c_driver = {
 	.driver = {
 		.name		= "si470x",
-		.owner		= THIS_MODULE,
 #ifdef CONFIG_PM_SLEEP
 		.pm		= &si470x_i2c_pm,
 #endif
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 5146be2..e5e5a16 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -402,7 +402,7 @@
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-static struct i2c_algorithm si4713_algo = {
+static const struct i2c_algorithm si4713_algo = {
 	.master_xfer   = si4713_transfer,
 	.functionality = si4713_functionality,
 };
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index e0c531f..5cf983b 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -203,7 +203,8 @@
 	 * This device can only store 36 pulses + spaces, which is not enough
 	 * for the NEC protocol and many others.
 	 */
-	rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_RC6_6A_20 |
+	rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_NECX |
+			RC_BIT_NEC32 | RC_BIT_RC6_6A_20 |
 			RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
 			RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
 
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index 27a7ea8..0931493 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -34,19 +34,21 @@
 				bitrev8(addr_inv) << 16 |
 				bitrev8(data)     <<  8 |
 				bitrev8(data_inv);
+		request->protocol = RC_TYPE_NEC32;
 	} else if ((addr_inv ^ addr) != 0xff) {
 		/* Extended NEC */
 		/* scan encoding: AAaaDD */
 		request->scancode = addr     << 16 |
 				addr_inv <<  8 |
 				data;
+		request->protocol = RC_TYPE_NECX;
 	} else {
 		/* Normal NEC */
 		/* scan encoding: AADD */
 		request->scancode = addr << 8 |
 				data;
+		request->protocol = RC_TYPE_NEC;
 	}
-	request->protocol = RC_TYPE_NEC;
 	return IMG_IR_SCANCODE;
 }
 
@@ -109,7 +111,7 @@
  *        http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
  */
 struct img_ir_decoder img_ir_nec = {
-	.type = RC_BIT_NEC,
+	.type = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32,
 	.control = {
 		.decoden = 1,
 		.code_type = IMG_IR_CODETYPE_PULSEDIST,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index bea0d1e..2a9d155 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -49,6 +49,7 @@
 {
 	struct nec_dec *data = &dev->raw->nec;
 	u32 scancode;
+	enum rc_type rc_type;
 	u8 address, not_address, command, not_command;
 	bool send_32bits = false;
 
@@ -171,22 +172,25 @@
 			 * least Apple and TiVo remotes */
 			scancode = data->bits;
 			IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
+			rc_type = RC_TYPE_NEC32;
 		} else if ((address ^ not_address) != 0xff) {
 			/* Extended NEC */
 			scancode = address     << 16 |
 				   not_address <<  8 |
 				   command;
 			IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode);
+			rc_type = RC_TYPE_NECX;
 		} else {
 			/* Normal NEC */
 			scancode = address << 8 | command;
 			IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
+			rc_type = RC_TYPE_NEC;
 		}
 
 		if (data->is_nec_x)
 			data->necx_repeat = true;
 
-		rc_keydown(dev, RC_TYPE_NEC, scancode, 0);
+		rc_keydown(dev, rc_type, scancode, 0);
 		data->state = STATE_INACTIVE;
 		return 0;
 	}
@@ -198,7 +202,7 @@
 }
 
 static struct ir_raw_handler nec_handler = {
-	.protocols	= RC_BIT_NEC,
+	.protocols	= RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32,
 	.decode		= ir_nec_decode,
 };
 
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index e0e2ede..5cc54c9 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -248,7 +248,7 @@
 				toggle = 0;
 				break;
 			case 24:
-				protocol = RC_BIT_RC6_6A_24;
+				protocol = RC_TYPE_RC6_6A_24;
 				toggle = 0;
 				break;
 			case 32:
@@ -257,7 +257,7 @@
 					toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
 					scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
 				} else {
-					protocol = RC_BIT_RC6_6A_32;
+					protocol = RC_TYPE_RC6_6A_32;
 					toggle = 0;
 				}
 				break;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 00215f3..04fedaa 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -769,21 +769,11 @@
 			rawir.pulse ? "pulse" : "space", rawir.duration);
 
 		ir_raw_event_store_with_filter(nvt->rdev, &rawir);
-
-		/*
-		 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
-		 * indicates end of IR signal, but new data incoming. In both
-		 * cases, it means we're ready to call ir_raw_event_handle
-		 */
-		if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
-			nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
-			ir_raw_event_handle(nvt->rdev);
-		}
 	}
 
 	nvt->pkts = 0;
 
-	nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
+	nvt_dbg("Calling ir_raw_event_handle\n");
 	ir_raw_event_handle(nvt->rdev);
 
 	nvt_dbg_verbose("%s done", __func__);
@@ -801,8 +791,7 @@
 /* copy data from hardware rx fifo into driver buffer */
 static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 {
-	u8 fifocount, val;
-	unsigned int b_idx;
+	u8 fifocount;
 	int i;
 
 	/* Get count of how many bytes to read from RX FIFO */
@@ -810,21 +799,11 @@
 
 	nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
 
-	b_idx = nvt->pkts;
-
-	/* This should never happen, but lets check anyway... */
-	if (b_idx + fifocount > RX_BUF_LEN) {
-		nvt_process_rx_ir_data(nvt);
-		b_idx = 0;
-	}
-
 	/* Read fifocount bytes from CIR Sample RX FIFO register */
-	for (i = 0; i < fifocount; i++) {
-		val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
-		nvt->buf[b_idx + i] = val;
-	}
+	for (i = 0; i < fifocount; i++)
+		nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
 
-	nvt->pkts += fifocount;
+	nvt->pkts = fifocount;
 	nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
 
 	nvt_process_rx_ir_data(nvt);
@@ -886,6 +865,15 @@
 	status = nvt_cir_reg_read(nvt, CIR_IRSTS);
 	iren = nvt_cir_reg_read(nvt, CIR_IREN);
 
+	/* At least NCT6779D creates a spurious interrupt when the
+	 * logical device is being disabled.
+	 */
+	if (status == 0xff && iren == 0xff) {
+		spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+		nvt_dbg_verbose("Spurious interrupt detected");
+		return IRQ_HANDLED;
+	}
+
 	/* IRQ may be shared with CIR WAKE, therefore check for each
 	 * status bit whether the related interrupt source is enabled
 	 */
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 144304c..205ecc6 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -26,6 +26,7 @@
 /* Used to handle IR raw handler extensions */
 static DEFINE_MUTEX(ir_raw_handler_lock);
 static LIST_HEAD(ir_raw_handler_list);
+static DEFINE_MUTEX(available_protocols_lock);
 static u64 available_protocols;
 
 static int ir_raw_event_thread(void *data)
@@ -234,9 +235,9 @@
 ir_raw_get_allowed_protocols(void)
 {
 	u64 protocols;
-	mutex_lock(&ir_raw_handler_lock);
+	mutex_lock(&available_protocols_lock);
 	protocols = available_protocols;
-	mutex_unlock(&ir_raw_handler_lock);
+	mutex_unlock(&available_protocols_lock);
 	return protocols;
 }
 
@@ -330,7 +331,9 @@
 	if (ir_raw_handler->raw_register)
 		list_for_each_entry(raw, &ir_raw_client_list, list)
 			ir_raw_handler->raw_register(raw->dev);
+	mutex_lock(&available_protocols_lock);
 	available_protocols |= ir_raw_handler->protocols;
+	mutex_unlock(&available_protocols_lock);
 	mutex_unlock(&ir_raw_handler_lock);
 
 	return 0;
@@ -349,7 +352,9 @@
 		if (ir_raw_handler->raw_unregister)
 			ir_raw_handler->raw_unregister(raw->dev);
 	}
+	mutex_lock(&available_protocols_lock);
 	available_protocols &= ~protocols;
+	mutex_unlock(&available_protocols_lock);
 	mutex_unlock(&ir_raw_handler_lock);
 }
 EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 8e7f292..d9c1f2f 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -795,7 +795,9 @@
 	{ RC_BIT_UNKNOWN,	"unknown",	NULL			},
 	{ RC_BIT_RC5 |
 	  RC_BIT_RC5X,		"rc-5",		"ir-rc5-decoder"	},
-	{ RC_BIT_NEC,		"nec",		"ir-nec-decoder"	},
+	{ RC_BIT_NEC |
+	  RC_BIT_NECX |
+	  RC_BIT_NEC32,		"nec",		"ir-nec-decoder"	},
 	{ RC_BIT_RC6_0 |
 	  RC_BIT_RC6_6A_20 |
 	  RC_BIT_RC6_6A_24 |
@@ -1460,6 +1462,10 @@
 	dev->input_dev->phys = dev->input_phys;
 	dev->input_dev->name = dev->input_name;
 
+	rc = input_register_device(dev->input_dev);
+	if (rc)
+		goto out_table;
+
 	/*
 	 * Default delay of 250ms is too short for some protocols, especially
 	 * since the timeout is currently set to 250ms. Increase it to 500ms,
@@ -1475,11 +1481,6 @@
 	 */
 	dev->input_dev->rep[REP_PERIOD] = 125;
 
-	/* rc_open will be called here */
-	rc = input_register_device(dev->input_dev);
-	if (rc)
-		goto out_table;
-
 	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
 	dev_info(&dev->dev, "%s as %s\n",
 		dev->input_name ?: "Unspecified device", path ?: "N/A");
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ec8016d..05ba47b 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -124,6 +124,41 @@
 #define USB_RR3USB_PRODUCT_ID	0x0001
 #define USB_RR3IIUSB_PRODUCT_ID	0x0005
 
+
+/*
+ * The redrat3 encodes an IR signal as set of different lengths and a set
+ * of indices into those lengths. This sets how much two lengths must
+ * differ before they are considered distinct, the value is specified
+ * in microseconds.
+ * Default 5, value 0 to 127.
+ */
+static int length_fuzz = 5;
+module_param(length_fuzz, uint, 0644);
+MODULE_PARM_DESC(length_fuzz, "Length Fuzz (0-127)");
+
+/*
+ * When receiving a continuous ir stream (for example when a user is
+ * holding a button down on a remote), this specifies the minimum size
+ * of a space when the redrat3 sends a irdata packet to the host. Specified
+ * in miliseconds. Default value 18ms.
+ * The value can be between 2 and 30 inclusive.
+ */
+static int minimum_pause = 18;
+module_param(minimum_pause, uint, 0644);
+MODULE_PARM_DESC(minimum_pause, "Minimum Pause in ms (2-30)");
+
+/*
+ * The carrier frequency is measured during the first pulse of the IR
+ * signal. The larger the number of periods used To measure, the more
+ * accurate the result is likely to be, however some signals have short
+ * initial pulses, so in some case it may be necessary to reduce this value.
+ * Default 8, value 1 to 255.
+ */
+static int periods_measure_carrier = 8;
+module_param(periods_measure_carrier, uint, 0644);
+MODULE_PARM_DESC(periods_measure_carrier, "Number of Periods to Measure Carrier (1-255)");
+
+
 struct redrat3_header {
 	__be16 length;
 	__be16 transfer_type;
@@ -188,9 +223,6 @@
 	/* usb dma */
 	dma_addr_t dma_in;
 
-	/* rx signal timeout */
-	u32 hw_timeout;
-
 	/* Is the device currently transmitting?*/
 	bool transmitting;
 
@@ -372,7 +404,7 @@
 	/* add a trailing space */
 	rawir.pulse = false;
 	rawir.timeout = true;
-	rawir.duration = US_TO_NS(rr3->hw_timeout);
+	rawir.duration = rr3->rc->timeout;
 	dev_dbg(dev, "storing trailing timeout with duration %d\n",
 							rawir.duration);
 	ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -480,7 +512,7 @@
 	struct redrat3_dev *rr3 = rc_dev->priv;
 	struct usb_device *udev = rr3->udev;
 	struct device *dev = rr3->dev;
-	u32 *timeout;
+	__be32 *timeout;
 	int ret;
 
 	timeout = kmalloc(sizeof(*timeout), GFP_KERNEL);
@@ -495,10 +527,9 @@
 	dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n",
 						be32_to_cpu(*timeout), ret);
 
-	if (ret == sizeof(*timeout)) {
-		rr3->hw_timeout = timeoutns / 1000;
+	if (ret == sizeof(*timeout))
 		ret = 0;
-	} else if (ret >= 0)
+	else if (ret >= 0)
 		ret = -EIO;
 
 	kfree(timeout);
@@ -529,12 +560,25 @@
 			     RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25);
 	dev_dbg(dev, "reset returned 0x%02x\n", rc);
 
-	*val = 5;
+	*val = length_fuzz;
 	rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
 			     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 			     RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25);
 	dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc);
 
+	*val = (65536 - (minimum_pause * 2000)) / 256;
+	rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
+			     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+			     RR3_IR_IO_MIN_PAUSE, 0, val, len, HZ * 25);
+	dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc);
+
+	*val = periods_measure_carrier;
+	rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
+			     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+			     RR3_IR_IO_PERIODS_MF, 0, val, len, HZ * 25);
+	dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val,
+									rc);
+
 	*val = RR3_DRIVER_MAXLENS;
 	rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
 			     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -889,7 +933,7 @@
 	rc->allowed_protocols = RC_BIT_ALL;
 	rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
 	rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
-	rc->timeout = US_TO_NS(rr3->hw_timeout);
+	rc->timeout = US_TO_NS(redrat3_get_timeout(rr3));
 	rc->s_timeout = redrat3_set_timeout;
 	rc->tx_ir = redrat3_transmit_ir;
 	rc->s_tx_carrier = redrat3_set_tx_carrier;
@@ -998,9 +1042,6 @@
 	if (retval < 0)
 		goto error;
 
-	/* store current hardware timeout, in µs */
-	rr3->hw_timeout = redrat3_get_timeout(rr3);
-
 	/* default.. will get overridden by any sends with a freq defined */
 	rr3->carrier = 38000;
 
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 815243c..4004260 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -499,7 +499,7 @@
 	struct streamzap_ir *sz = usb_get_intfdata(intf);
 
 	if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
-		dev_err(sz->dev, "Error sumbiting urb\n");
+		dev_err(sz->dev, "Error submitting urb\n");
 		return -EIO;
 	}
 
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
new file mode 100644
index 0000000..a21f5a3
--- /dev/null
+++ b/drivers/media/spi/Kconfig
@@ -0,0 +1,14 @@
+if VIDEO_V4L2
+
+menu "SPI helper chips"
+	visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+
+config VIDEO_GS1662
+	tristate "Gennum Serializers video"
+	depends on SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+	---help---
+	  Enable the GS1662 driver which serializes video streams.
+
+endmenu
+
+endif
diff --git a/drivers/media/spi/Makefile b/drivers/media/spi/Makefile
new file mode 100644
index 0000000..ea64013
--- /dev/null
+++ b/drivers/media/spi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_GS1662) += gs1662.o
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
new file mode 100644
index 0000000..d76f362
--- /dev/null
+++ b/drivers/media/spi/gs1662.c
@@ -0,0 +1,478 @@
+/*
+ * GS1662 device registration.
+ *
+ * Copyright (C) 2015-2016 Nexvision
+ * Author: Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-dv-timings.h>
+#include <linux/v4l2-dv-timings.h>
+
+#define REG_STATUS			0x04
+#define REG_FORCE_FMT			0x06
+#define REG_LINES_PER_FRAME		0x12
+#define REG_WORDS_PER_LINE		0x13
+#define REG_WORDS_PER_ACT_LINE		0x14
+#define REG_ACT_LINES_PER_FRAME	0x15
+
+#define MASK_H_LOCK			0x001
+#define MASK_V_LOCK			0x002
+#define MASK_STD_LOCK			0x004
+#define MASK_FORCE_STD			0x020
+#define MASK_STD_STATUS		0x3E0
+
+#define GS_WIDTH_MIN			720
+#define GS_WIDTH_MAX			2048
+#define GS_HEIGHT_MIN			487
+#define GS_HEIGHT_MAX			1080
+#define GS_PIXELCLOCK_MIN		10519200
+#define GS_PIXELCLOCK_MAX		74250000
+
+struct gs {
+	struct spi_device *pdev;
+	struct v4l2_subdev sd;
+	struct v4l2_dv_timings current_timings;
+	int enabled;
+};
+
+struct gs_reg_fmt {
+	u16 reg_value;
+	struct v4l2_dv_timings format;
+};
+
+struct gs_reg_fmt_custom {
+	u16 reg_value;
+	__u32 width;
+	__u32 height;
+	__u64 pixelclock;
+	__u32 interlaced;
+};
+
+static const struct spi_device_id gs_id[] = {
+	{ "gs1662", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, gs_id);
+
+static const struct v4l2_dv_timings fmt_cap[] = {
+	V4L2_DV_BT_SDI_720X487I60,
+	V4L2_DV_BT_CEA_720X576P50,
+	V4L2_DV_BT_CEA_1280X720P24,
+	V4L2_DV_BT_CEA_1280X720P25,
+	V4L2_DV_BT_CEA_1280X720P30,
+	V4L2_DV_BT_CEA_1280X720P50,
+	V4L2_DV_BT_CEA_1280X720P60,
+	V4L2_DV_BT_CEA_1920X1080P24,
+	V4L2_DV_BT_CEA_1920X1080P25,
+	V4L2_DV_BT_CEA_1920X1080P30,
+	V4L2_DV_BT_CEA_1920X1080I50,
+	V4L2_DV_BT_CEA_1920X1080I60,
+};
+
+static const struct gs_reg_fmt reg_fmt[] = {
+	{ 0x00, V4L2_DV_BT_CEA_1280X720P60 },
+	{ 0x01, V4L2_DV_BT_CEA_1280X720P60 },
+	{ 0x02, V4L2_DV_BT_CEA_1280X720P30 },
+	{ 0x03, V4L2_DV_BT_CEA_1280X720P30 },
+	{ 0x04, V4L2_DV_BT_CEA_1280X720P50 },
+	{ 0x05, V4L2_DV_BT_CEA_1280X720P50 },
+	{ 0x06, V4L2_DV_BT_CEA_1280X720P25 },
+	{ 0x07, V4L2_DV_BT_CEA_1280X720P25 },
+	{ 0x08, V4L2_DV_BT_CEA_1280X720P24 },
+	{ 0x09, V4L2_DV_BT_CEA_1280X720P24 },
+	{ 0x0A, V4L2_DV_BT_CEA_1920X1080I60 },
+	{ 0x0B, V4L2_DV_BT_CEA_1920X1080P30 },
+
+	/* Default value: keep this field before 0xC */
+	{ 0x14, V4L2_DV_BT_CEA_1920X1080I50 },
+	{ 0x0C, V4L2_DV_BT_CEA_1920X1080I50 },
+	{ 0x0D, V4L2_DV_BT_CEA_1920X1080P25 },
+	{ 0x0E, V4L2_DV_BT_CEA_1920X1080P25 },
+	{ 0x10, V4L2_DV_BT_CEA_1920X1080P24 },
+	{ 0x12, V4L2_DV_BT_CEA_1920X1080P24 },
+	{ 0x16, V4L2_DV_BT_SDI_720X487I60 },
+	{ 0x19, V4L2_DV_BT_SDI_720X487I60 },
+	{ 0x18, V4L2_DV_BT_CEA_720X576P50 },
+	{ 0x1A, V4L2_DV_BT_CEA_720X576P50 },
+
+	/* Implement following timings before enable it.
+	 * Because of we don't have access to these theoretical timings yet.
+	 * Workaround: use functions to get and set registers for these formats.
+	 */
+#if 0
+	{ 0x0F, V4L2_DV_BT_XXX_1920X1080I25 }, /* SMPTE 274M */
+	{ 0x11, V4L2_DV_BT_XXX_1920X1080I24 }, /* SMPTE 274M */
+	{ 0x13, V4L2_DV_BT_XXX_1920X1080I25 }, /* SMPTE 274M */
+	{ 0x15, V4L2_DV_BT_XXX_1920X1035I60 }, /* SMPTE 260M */
+	{ 0x17, V4L2_DV_BT_SDI_720X507I60 }, /* SMPTE 125M */
+	{ 0x1B, V4L2_DV_BT_SDI_720X507I60 }, /* SMPTE 125M */
+	{ 0x1C, V4L2_DV_BT_XXX_2048X1080P25 }, /* SMPTE 428.1M */
+#endif
+};
+
+static const struct v4l2_dv_timings_cap gs_timings_cap = {
+	.type = V4L2_DV_BT_656_1120,
+	/* keep this initialization for compatibility with GCC < 4.4.6 */
+	.reserved = { 0 },
+	V4L2_INIT_BT_TIMINGS(GS_WIDTH_MIN, GS_WIDTH_MAX, GS_HEIGHT_MIN,
+			     GS_HEIGHT_MAX, GS_PIXELCLOCK_MIN,
+			     GS_PIXELCLOCK_MAX,
+			     V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_SDI,
+			     V4L2_DV_BT_CAP_PROGRESSIVE
+			     | V4L2_DV_BT_CAP_INTERLACED)
+};
+
+static int gs_read_register(struct spi_device *spi, u16 addr, u16 *value)
+{
+	int ret;
+	u16 buf_addr = (0x8000 | (0x0FFF & addr));
+	u16 buf_value = 0;
+	struct spi_message msg;
+	struct spi_transfer tx[] = {
+		{
+			.tx_buf = &buf_addr,
+			.len = 2,
+			.delay_usecs = 1,
+		}, {
+			.rx_buf = &buf_value,
+			.len = 2,
+			.delay_usecs = 1,
+		},
+	};
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&tx[0], &msg);
+	spi_message_add_tail(&tx[1], &msg);
+	ret = spi_sync(spi, &msg);
+
+	*value = buf_value;
+
+	return ret;
+}
+
+static int gs_write_register(struct spi_device *spi, u16 addr, u16 value)
+{
+	int ret;
+	u16 buf_addr = addr;
+	u16 buf_value = value;
+	struct spi_message msg;
+	struct spi_transfer tx[] = {
+		{
+			.tx_buf = &buf_addr,
+			.len = 2,
+			.delay_usecs = 1,
+		}, {
+			.tx_buf = &buf_value,
+			.len = 2,
+			.delay_usecs = 1,
+		},
+	};
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&tx[0], &msg);
+	spi_message_add_tail(&tx[1], &msg);
+	ret = spi_sync(spi, &msg);
+
+	return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int gs_g_register(struct v4l2_subdev *sd,
+		  struct v4l2_dbg_register *reg)
+{
+	struct spi_device *spi = v4l2_get_subdevdata(sd);
+	u16 val;
+	int ret;
+
+	ret = gs_read_register(spi, reg->reg & 0xFFFF, &val);
+	reg->val = val;
+	reg->size = 2;
+	return ret;
+}
+
+static int gs_s_register(struct v4l2_subdev *sd,
+		  const struct v4l2_dbg_register *reg)
+{
+	struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+	return gs_write_register(spi, reg->reg & 0xFFFF, reg->val & 0xFFFF);
+}
+#endif
+
+static int gs_status_format(u16 status, struct v4l2_dv_timings *timings)
+{
+	int std = (status & MASK_STD_STATUS) >> 5;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(reg_fmt); i++) {
+		if (reg_fmt[i].reg_value == std) {
+			*timings = reg_fmt[i].format;
+			return 0;
+		}
+	}
+
+	return -ERANGE;
+}
+
+static u16 get_register_timings(struct v4l2_dv_timings *timings)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(reg_fmt); i++) {
+		if (v4l2_match_dv_timings(timings, &reg_fmt[i].format, 0,
+					  false))
+			return reg_fmt[i].reg_value | MASK_FORCE_STD;
+	}
+
+	return 0x0;
+}
+
+static inline struct gs *to_gs(struct v4l2_subdev *sd)
+{
+	return container_of(sd, struct gs, sd);
+}
+
+static int gs_s_dv_timings(struct v4l2_subdev *sd,
+		    struct v4l2_dv_timings *timings)
+{
+	struct gs *gs = to_gs(sd);
+	int reg_value;
+
+	reg_value = get_register_timings(timings);
+	if (reg_value == 0x0)
+		return -EINVAL;
+
+	gs->current_timings = *timings;
+	return 0;
+}
+
+static int gs_g_dv_timings(struct v4l2_subdev *sd,
+		    struct v4l2_dv_timings *timings)
+{
+	struct gs *gs = to_gs(sd);
+
+	*timings = gs->current_timings;
+	return 0;
+}
+
+static int gs_query_dv_timings(struct v4l2_subdev *sd,
+			struct v4l2_dv_timings *timings)
+{
+	struct gs *gs = to_gs(sd);
+	struct v4l2_dv_timings fmt;
+	u16 reg_value, i;
+	int ret;
+
+	if (gs->enabled)
+		return -EBUSY;
+
+	/*
+	 * Check if the component detect a line, a frame or something else
+	 * which looks like a video signal activity.
+	 */
+	for (i = 0; i < 4; i++) {
+		gs_read_register(gs->pdev, REG_LINES_PER_FRAME + i, &reg_value);
+		if (reg_value)
+			break;
+	}
+
+	/* If no register reports a video signal */
+	if (i >= 4)
+		return -ENOLINK;
+
+	gs_read_register(gs->pdev, REG_STATUS, &reg_value);
+	if (!(reg_value & MASK_H_LOCK) || !(reg_value & MASK_V_LOCK))
+		return -ENOLCK;
+	if (!(reg_value & MASK_STD_LOCK))
+		return -ERANGE;
+
+	ret = gs_status_format(reg_value, &fmt);
+
+	if (ret < 0)
+		return ret;
+
+	*timings = fmt;
+	return 0;
+}
+
+static int gs_enum_dv_timings(struct v4l2_subdev *sd,
+		       struct v4l2_enum_dv_timings *timings)
+{
+	if (timings->index >= ARRAY_SIZE(fmt_cap))
+		return -EINVAL;
+
+	if (timings->pad != 0)
+		return -EINVAL;
+
+	timings->timings = fmt_cap[timings->index];
+	return 0;
+}
+
+static int gs_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct gs *gs = to_gs(sd);
+	int reg_value;
+
+	if (gs->enabled == enable)
+		return 0;
+
+	gs->enabled = enable;
+
+	if (enable) {
+		/* To force the specific format */
+		reg_value = get_register_timings(&gs->current_timings);
+		return gs_write_register(gs->pdev, REG_FORCE_FMT, reg_value);
+	}
+
+	/* To renable auto-detection mode */
+	return gs_write_register(gs->pdev, REG_FORCE_FMT, 0x0);
+}
+
+static int gs_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+	struct gs *gs = to_gs(sd);
+	u16 reg_value, i;
+	int ret;
+
+	/*
+	 * Check if the component detect a line, a frame or something else
+	 * which looks like a video signal activity.
+	 */
+	for (i = 0; i < 4; i++) {
+		ret = gs_read_register(gs->pdev,
+				       REG_LINES_PER_FRAME + i, &reg_value);
+		if (reg_value)
+			break;
+		if (ret) {
+			*status = V4L2_IN_ST_NO_POWER;
+			return ret;
+		}
+	}
+
+	/* If no register reports a video signal */
+	if (i >= 4)
+		*status |= V4L2_IN_ST_NO_SIGNAL;
+
+	ret = gs_read_register(gs->pdev, REG_STATUS, &reg_value);
+	if (!(reg_value & MASK_H_LOCK))
+		*status |=  V4L2_IN_ST_NO_H_LOCK;
+	if (!(reg_value & MASK_V_LOCK))
+		*status |=  V4L2_IN_ST_NO_V_LOCK;
+	if (!(reg_value & MASK_STD_LOCK))
+		*status |=  V4L2_IN_ST_NO_STD_LOCK;
+
+	return ret;
+}
+
+static int gs_dv_timings_cap(struct v4l2_subdev *sd,
+			     struct v4l2_dv_timings_cap *cap)
+{
+	if (cap->pad != 0)
+		return -EINVAL;
+
+	*cap = gs_timings_cap;
+	return 0;
+}
+
+/* V4L2 core operation handlers */
+static const struct v4l2_subdev_core_ops gs_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.g_register = gs_g_register,
+	.s_register = gs_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops gs_video_ops = {
+	.s_dv_timings = gs_s_dv_timings,
+	.g_dv_timings = gs_g_dv_timings,
+	.s_stream = gs_s_stream,
+	.g_input_status = gs_g_input_status,
+	.query_dv_timings = gs_query_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops gs_pad_ops = {
+	.enum_dv_timings = gs_enum_dv_timings,
+	.dv_timings_cap = gs_dv_timings_cap,
+};
+
+/* V4L2 top level operation handlers */
+static const struct v4l2_subdev_ops gs_ops = {
+	.core = &gs_core_ops,
+	.video = &gs_video_ops,
+	.pad = &gs_pad_ops,
+};
+
+static int gs_probe(struct spi_device *spi)
+{
+	int ret;
+	struct gs *gs;
+	struct v4l2_subdev *sd;
+
+	gs = devm_kzalloc(&spi->dev, sizeof(struct gs), GFP_KERNEL);
+	if (!gs)
+		return -ENOMEM;
+
+	gs->pdev = spi;
+	sd = &gs->sd;
+
+	spi->mode = SPI_MODE_0;
+	spi->irq = -1;
+	spi->max_speed_hz = 10000000;
+	spi->bits_per_word = 16;
+	ret = spi_setup(spi);
+	v4l2_spi_subdev_init(sd, spi, &gs_ops);
+
+	gs->current_timings = reg_fmt[0].format;
+	gs->enabled = 0;
+
+	/* Set H_CONFIG to SMPTE timings */
+	gs_write_register(spi, 0x0, 0x300);
+
+	return ret;
+}
+
+static int gs_remove(struct spi_device *spi)
+{
+	struct v4l2_subdev *sd = spi_get_drvdata(spi);
+	struct gs *gs = to_gs(sd);
+
+	v4l2_device_unregister_subdev(sd);
+	kfree(gs);
+	return 0;
+}
+
+static struct spi_driver gs_driver = {
+	.driver = {
+		.name		= "gs1662",
+		.owner		= THIS_MODULE,
+	},
+
+	.probe		= gs_probe,
+	.remove		= gs_remove,
+	.id_table	= gs_id,
+};
+
+module_spi_driver(gs_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>");
+MODULE_DESCRIPTION("Gennum GS1662 HD/SD-SDI Serializer driver");
diff --git a/drivers/media/tuners/fc0011.h b/drivers/media/tuners/fc0011.h
index 81bb568..438cf89 100644
--- a/drivers/media/tuners/fc0011.h
+++ b/drivers/media/tuners/fc0011.h
@@ -1,7 +1,6 @@
 #ifndef LINUX_FC0011_H_
 #define LINUX_FC0011_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 9ad3285..4a23e41 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -21,7 +21,6 @@
 #ifndef _FC0012_H_
 #define _FC0012_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "fc001x-common.h"
 
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index e130bd7..8c34105 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -22,7 +22,6 @@
 #ifndef _FC0013_H_
 #define _FC0013_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "fc001x-common.h"
 
diff --git a/drivers/media/tuners/max2165.h b/drivers/media/tuners/max2165.h
index 5054f01..aadd9fe 100644
--- a/drivers/media/tuners/max2165.h
+++ b/drivers/media/tuners/max2165.h
@@ -22,8 +22,6 @@
 #ifndef __MAX2165_H__
 #define __MAX2165_H__
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 struct i2c_adapter;
 
diff --git a/drivers/media/tuners/mc44s803.h b/drivers/media/tuners/mc44s803.h
index b3e614b..6b40df3 100644
--- a/drivers/media/tuners/mc44s803.h
+++ b/drivers/media/tuners/mc44s803.h
@@ -22,8 +22,6 @@
 #ifndef MC44S803_H
 #define MC44S803_H
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 struct i2c_adapter;
 
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 7f0b9d5..dfec237 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2201,7 +2201,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops mt2063_ops = {
+static const struct dvb_tuner_ops mt2063_ops = {
 	.info = {
 		 .name = "MT2063 Silicon Tuner",
 		 .frequency_min = 45000000,
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 9e03104..52da467 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -363,7 +363,7 @@
 	return ret;
 }
 
-static struct dvb_tuner_ops mt2032_tuner_ops = {
+static const struct dvb_tuner_ops mt2032_tuner_ops = {
 	.set_analog_params = mt2032_set_params,
 	.release           = microtune_release,
 	.get_frequency     = microtune_get_frequency,
@@ -563,7 +563,7 @@
 	return ret;
 }
 
-static struct dvb_tuner_ops mt2050_tuner_ops = {
+static const struct dvb_tuner_ops mt2050_tuner_ops = {
 	.set_analog_params = mt2050_set_params,
 	.release           = microtune_release,
 	.get_frequency     = microtune_get_frequency,
diff --git a/drivers/media/tuners/mxl5005s.h b/drivers/media/tuners/mxl5005s.h
index 5764b12..d842734 100644
--- a/drivers/media/tuners/mxl5005s.h
+++ b/drivers/media/tuners/mxl5005s.h
@@ -23,8 +23,6 @@
 #ifndef __MXL5005S_H
 #define __MXL5005S_H
 
-#include <linux/kconfig.h>
-
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index f4ae04c..42569c6 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -794,7 +794,7 @@
 
 /* ------------------------------------------------------------------------- */
 
-static struct dvb_tuner_ops mxl5007t_tuner_ops = {
+static const struct dvb_tuner_ops mxl5007t_tuner_ops = {
 	.info = {
 		.name = "MaxLinear MxL5007T",
 	},
diff --git a/drivers/media/tuners/r820t.h b/drivers/media/tuners/r820t.h
index b1e5661..fdcab91 100644
--- a/drivers/media/tuners/r820t.h
+++ b/drivers/media/tuners/r820t.h
@@ -21,7 +21,6 @@
 #ifndef R820T_H
 #define R820T_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 enum r820t_chip {
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index 5f1a60b..76807f5 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -17,7 +17,6 @@
 #ifndef SI2157_H
 #define SI2157_H
 
-#include <linux/kconfig.h>
 #include <media/media-device.h>
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/tuners/tda18212.h b/drivers/media/tuners/tda18212.h
index e58c909..6391daf 100644
--- a/drivers/media/tuners/tda18212.h
+++ b/drivers/media/tuners/tda18212.h
@@ -21,7 +21,6 @@
 #ifndef TDA18212_H
 #define TDA18212_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 struct tda18212_config {
diff --git a/drivers/media/tuners/tda18218.h b/drivers/media/tuners/tda18218.h
index 1eacb4f..076b5f2 100644
--- a/drivers/media/tuners/tda18218.h
+++ b/drivers/media/tuners/tda18218.h
@@ -21,7 +21,6 @@
 #ifndef TDA18218_H
 #define TDA18218_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 struct tda18218_config {
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index f862074..2d50e8b 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -18,11 +18,12 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
-#include <linux/delay.h>
-#include <linux/videodev2.h>
 #include "tda18271-priv.h"
 #include "tda8290.h"
 
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+
 int tda18271_debug;
 module_param_named(debug, tda18271_debug, int, 0644);
 MODULE_PARM_DESC(debug, "set debug level "
@@ -646,7 +647,7 @@
 	unsigned int i;
 	int ret;
 
-	tda_info("tda18271: performing RF tracking filter calibration\n");
+	tda_info("performing RF tracking filter calibration\n");
 
 	/* wait for die temperature stabilization */
 	msleep(200);
@@ -692,12 +693,12 @@
 	if (tda_fail(ret))
 		goto fail;
 
-	tda_info("tda18271: RF tracking filter calibration complete\n");
+	tda_info("RF tracking filter calibration complete\n");
 
 	priv->cal_initialized = true;
 	goto end;
 fail:
-	tda_info("tda18271: RF tracking filter calibration failed!\n");
+	tda_info("RF tracking filter calibration failed!\n");
 end:
 	return ret;
 }
diff --git a/drivers/media/tuners/tda18271-priv.h b/drivers/media/tuners/tda18271-priv.h
index cc80f54..0bcc735 100644
--- a/drivers/media/tuners/tda18271-priv.h
+++ b/drivers/media/tuners/tda18271-priv.h
@@ -21,6 +21,8 @@
 #ifndef __TDA18271_PRIV_H__
 #define __TDA18271_PRIV_H__
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index edcb4a7..5050ce9 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -818,7 +818,7 @@
 	return fe->ops.tuner_ops.sleep(fe);
 }
 
-static struct dvb_tuner_ops tda827xo_tuner_ops = {
+static const struct dvb_tuner_ops tda827xo_tuner_ops = {
 	.info = {
 		.name = "Philips TDA827X",
 		.frequency_min  =  55000000,
@@ -834,7 +834,7 @@
 	.get_bandwidth = tda827x_get_bandwidth,
 };
 
-static struct dvb_tuner_ops tda827xa_tuner_ops = {
+static const struct dvb_tuner_ops tda827xa_tuner_ops = {
 	.info = {
 		.name = "Philips TDA827XA",
 		.frequency_min  =  44000000,
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index bf78cb9..36b0b1e 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -301,7 +301,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tea5761_tuner_ops = {
+static const struct dvb_tuner_ops tea5761_tuner_ops = {
 	.info = {
 		.name           = "tea5761", // Philips TEA5761HN FM Radio
 	},
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index 36e85d8..d62a6d6 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -10,6 +10,8 @@
  * from their contributions on DScaler.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -370,17 +372,18 @@
 {
 	struct tuner_i2c_props i2c = { .adap = i2c_adap, .addr = i2c_addr };
 	unsigned char buffer[7] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
 	int rc;
 
 	if ((rc = tuner_i2c_xfer_recv(&i2c, buffer, 7))< 5) {
-		printk(KERN_WARNING "It is not a TEA5767. Received %i bytes.\n", rc);
+		pr_warn("It is not a TEA5767. Received %i bytes.\n", rc);
 		return -EINVAL;
 	}
 
 	/* If all bytes are the same then it's a TV tuner and not a tea5767 */
 	if (buffer[0] == buffer[1] && buffer[0] == buffer[2] &&
 	    buffer[0] == buffer[3] && buffer[0] == buffer[4]) {
-		printk(KERN_WARNING "All bytes are equal. It is not a TEA5767\n");
+		pr_warn("All bytes are equal. It is not a TEA5767\n");
 		return -EINVAL;
 	}
 
@@ -390,7 +393,7 @@
 	 *  Byte 5: bit 7:0 : == 0
 	 */
 	if (((buffer[3] & 0x0f) != 0x00) || (buffer[4] != 0x00)) {
-		printk(KERN_WARNING "Chip ID is not zero. It is not a TEA5767\n");
+		pr_warn("Chip ID is not zero. It is not a TEA5767\n");
 		return -EINVAL;
 	}
 
@@ -423,7 +426,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops tea5767_tuner_ops = {
+static const struct dvb_tuner_ops tea5767_tuner_ops = {
 	.info = {
 		.name           = "tea5767", // Philips TEA5767HN FM Radio
 	},
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 8e9ce14..9ba9582 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -1035,7 +1035,7 @@
 	return 0;
 }
 
-static struct dvb_tuner_ops simple_tuner_ops = {
+static const struct dvb_tuner_ops simple_tuner_ops = {
 	.init              = simple_init,
 	.sleep             = simple_sleep,
 	.set_analog_params = simple_set_params,
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 00ba29e..336bd49 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -22,7 +22,6 @@
 #ifndef __XC5000_H__
 #define __XC5000_H__
 
-#include <linux/kconfig.h>
 #include <linux/firmware.h>
 
 struct dvb_frontend;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 3c556ee..8251942 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -605,7 +605,7 @@
 	mutex_unlock(&s->v4l2_lock);
 }
 
-static struct vb2_ops airspy_vb2_ops = {
+static const struct vb2_ops airspy_vb2_ops = {
 	.queue_setup            = airspy_queue_setup,
 	.buf_queue              = airspy_buf_queue,
 	.start_streaming        = airspy_start_streaming,
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 3d6687f..1e66e78 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -344,7 +344,8 @@
 	rc->dev.parent = &dev->usbdev->dev;
 	rc->driver_name = "au0828-input";
 	rc->driver_type = RC_DRIVER_IR_RAW;
-	rc->allowed_protocols = RC_BIT_NEC | RC_BIT_RC5;
+	rc->allowed_protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 |
+								RC_BIT_RC5;
 
 	/* all done */
 	err = rc_register_device(rc);
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13b8387..85dd9a8 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -928,7 +928,7 @@
 	del_timer_sync(&dev->vbi_timeout);
 }
 
-static struct vb2_ops au0828_video_qops = {
+static const struct vb2_ops au0828_video_qops = {
 	.queue_setup     = queue_setup,
 	.buf_prepare     = buffer_prepare,
 	.buf_queue       = buffer_queue,
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 4cd5fa9..8263c4b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -635,7 +635,7 @@
 	return vmalloc_to_page(pageptr);
 }
 
-static struct snd_pcm_ops snd_cx231xx_pcm_capture = {
+static const struct snd_pcm_ops snd_cx231xx_pcm_capture = {
 	.open = snd_cx231xx_capture_open,
 	.close = snd_cx231xx_pcm_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 4919137..2f52d66 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1264,7 +1264,10 @@
 				   dev->board.agc_analog_digital_select_gpio,
 				   analog_or_digital);
 
-	return status;
+	if (status < 0)
+		return status;
+
+	return 0;
 }
 
 int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index c63248a..36bc254 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -486,7 +486,7 @@
 		.output_mode = OUT_MODE_VIP11,
 		.demod_xfer_mode = 0,
 		.ctl_pin_status_mask = 0xFFFFFFC4,
-		.agc_analog_digital_select_gpio = 0x00,	/* According with PV cxPolaris.inf file */
+		.agc_analog_digital_select_gpio = 0x1c,
 		.tuner_sif_gpio = -1,
 		.tuner_scl_gpio = -1,
 		.tuner_sda_gpio = -1,
@@ -1186,12 +1186,12 @@
 */
 void cx231xx_release_resources(struct cx231xx *dev)
 {
+	cx231xx_ir_exit(dev);
+
 	cx231xx_release_analog_resources(dev);
 
 	cx231xx_remove_from_devlist(dev);
 
-	cx231xx_ir_exit(dev);
-
 	/* Release I2C buses */
 	cx231xx_dev_uninit(dev);
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8ec05cb..8b099fe 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -712,6 +712,7 @@
 			break;
 		case CX231XX_BOARD_CNXT_RDE_253S:
 		case CX231XX_BOARD_CNXT_RDU_253S:
+		case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
 			errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
 			break;
 		case CX231XX_BOARD_HAUPPAUGE_EXETER:
@@ -738,14 +739,21 @@
 		case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
 		case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
 		case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
-		errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+			errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
 			break;
 		default:
 			break;
 		}
 	}
 
-	return errCode ? -EINVAL : 0;
+	if (errCode < 0) {
+		dev_err(dev->dev, "Failed to set devmode to %s: error: %i",
+			dev->mode == CX231XX_DIGITAL_MODE ? "digital" : "analog",
+			errCode);
+		return errCode;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(cx231xx_set_mode);
 
@@ -799,7 +807,7 @@
 	case -ESHUTDOWN:
 		return;
 	default:		/* error */
-		cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+		cx231xx_isocdbg("urb completion error %d.\n", urb->status);
 		break;
 	}
 
@@ -842,8 +850,11 @@
 	case -ENOENT:
 	case -ESHUTDOWN:
 		return;
+	case -EPIPE:		/* stall */
+		cx231xx_isocdbg("urb completion error - device is stalled.\n");
+		return;
 	default:		/* error */
-		cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+		cx231xx_isocdbg("urb completion error %d.\n", urb->status);
 		break;
 	}
 
@@ -867,6 +878,7 @@
 	struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
 	struct urb *urb;
 	int i;
+	bool broken_pipe = false;
 
 	cx231xx_isocdbg("cx231xx: called cx231xx_uninit_isoc\n");
 
@@ -886,12 +898,19 @@
 						  transfer_buffer[i],
 						  urb->transfer_dma);
 			}
+			if (urb->status == -EPIPE) {
+				broken_pipe = true;
+			}
 			usb_free_urb(urb);
 			dev->video_mode.isoc_ctl.urb[i] = NULL;
 		}
 		dev->video_mode.isoc_ctl.transfer_buffer[i] = NULL;
 	}
 
+	if (broken_pipe) {
+		cx231xx_isocdbg("Reset endpoint to recover broken pipe.");
+		usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr);
+	}
 	kfree(dev->video_mode.isoc_ctl.urb);
 	kfree(dev->video_mode.isoc_ctl.transfer_buffer);
 	kfree(dma_q->p_left_data);
@@ -918,6 +937,7 @@
 	struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
 	struct urb *urb;
 	int i;
+	bool broken_pipe = false;
 
 	cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n");
 
@@ -937,12 +957,19 @@
 						transfer_buffer[i],
 						urb->transfer_dma);
 			}
+			if (urb->status == -EPIPE) {
+				broken_pipe = true;
+			}
 			usb_free_urb(urb);
 			dev->video_mode.bulk_ctl.urb[i] = NULL;
 		}
 		dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL;
 	}
 
+	if (broken_pipe) {
+		cx231xx_isocdbg("Reset endpoint to recover broken pipe.");
+		usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr);
+	}
 	kfree(dev->video_mode.bulk_ctl.urb);
 	kfree(dev->video_mode.bulk_ctl.transfer_buffer);
 	kfree(dma_q->p_left_data);
@@ -1297,15 +1324,29 @@
 	dev->i2c_bus[2].i2c_reserve = 0;
 
 	/* register I2C buses */
-	cx231xx_i2c_register(&dev->i2c_bus[0]);
-	cx231xx_i2c_register(&dev->i2c_bus[1]);
-	cx231xx_i2c_register(&dev->i2c_bus[2]);
-
-	errCode = cx231xx_i2c_mux_create(dev);
+	errCode = cx231xx_i2c_register(&dev->i2c_bus[0]);
 	if (errCode < 0)
 		return errCode;
-	cx231xx_i2c_mux_register(dev, 0);
-	cx231xx_i2c_mux_register(dev, 1);
+	errCode = cx231xx_i2c_register(&dev->i2c_bus[1]);
+	if (errCode < 0)
+		return errCode;
+	errCode = cx231xx_i2c_register(&dev->i2c_bus[2]);
+	if (errCode < 0)
+		return errCode;
+
+	errCode = cx231xx_i2c_mux_create(dev);
+	if (errCode < 0) {
+		dev_err(dev->dev,
+			"%s: Failed to create I2C mux\n", __func__);
+		return errCode;
+	}
+	errCode = cx231xx_i2c_mux_register(dev, 0);
+	if (errCode < 0)
+		return errCode;
+
+	errCode = cx231xx_i2c_mux_register(dev, 1);
+	if (errCode < 0)
+		return errCode;
 
 	/* scan the real bus segments in the order of physical port numbers */
 	cx231xx_do_i2c_scan(dev, I2C_0);
@@ -1448,14 +1489,14 @@
 	/* set request */
 	if (!request) {
 		if (direction)
-			ven_req.bRequest = VRT_GET_GPIO;	/* 0x8 gpio */
+			ven_req.bRequest = VRT_GET_GPIO;	/* 0x9 gpio */
 		else
-			ven_req.bRequest = VRT_SET_GPIO;	/* 0x9 gpio */
+			ven_req.bRequest = VRT_SET_GPIO;	/* 0x8 gpio */
 	} else {
 		if (direction)
-			ven_req.bRequest = VRT_GET_GPIE;	/* 0xa gpie */
+			ven_req.bRequest = VRT_GET_GPIE;	/* 0xb gpie */
 		else
-			ven_req.bRequest = VRT_SET_GPIE;	/* 0xb gpie */
+			ven_req.bRequest = VRT_SET_GPIE;	/* 0xa gpie */
 	}
 
 	/* set index value */
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index ab2fb9f..1417515 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -65,6 +65,7 @@
 	struct dmx_frontend fe_hw;
 	struct dmx_frontend fe_mem;
 	struct dvb_net net;
+	struct i2c_client *i2c_client_demod;
 	struct i2c_client *i2c_client_tuner;
 };
 
@@ -150,18 +151,6 @@
 	.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
 };
 
-static const struct si2165_config hauppauge_930C_HD_1113xx_si2165_config = {
-	.i2c_addr	= 0x64,
-	.chip_mode	= SI2165_MODE_PLL_XTAL,
-	.ref_freq_Hz	= 16000000,
-};
-
-static const struct si2165_config pctv_quatro_stick_1114xx_si2165_config = {
-	.i2c_addr	= 0x64,
-	.chip_mode	= SI2165_MODE_PLL_EXT,
-	.ref_freq_Hz	= 24000000,
-};
-
 static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
 	.i2c_addr           = 0x59,
 	.qam_if_khz         = 4000,
@@ -586,8 +575,14 @@
 	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
 	dvb_dmxdev_release(&dvb->dmxdev);
 	dvb_dmx_release(&dvb->demux);
-	client = dvb->i2c_client_tuner;
 	/* remove I2C tuner */
+	client = dvb->i2c_client_tuner;
+	if (client) {
+		module_put(client->dev.driver->owner);
+		i2c_unregister_device(client);
+	}
+	/* remove I2C demod */
+	client = dvb->i2c_client_demod;
 	if (client) {
 		module_put(client->dev.driver->owner);
 		i2c_unregister_device(client);
@@ -749,19 +744,38 @@
 		break;
 
 	case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx:
+	{
+		struct i2c_client *client;
+		struct i2c_board_info info;
+		struct si2165_platform_data si2165_pdata;
 
-		dev->dvb->frontend = dvb_attach(si2165_attach,
-			&hauppauge_930C_HD_1113xx_si2165_config,
-			demod_i2c
-			);
+		/* attach demod */
+		memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+		si2165_pdata.fe = &dev->dvb->frontend;
+		si2165_pdata.chip_mode = SI2165_MODE_PLL_XTAL,
+		si2165_pdata.ref_freq_Hz = 16000000,
 
-		if (dev->dvb->frontend == NULL) {
+		memset(&info, 0, sizeof(struct i2c_board_info));
+		strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+		info.addr = 0x64;
+		info.platform_data = &si2165_pdata;
+		request_module(info.type);
+		client = i2c_new_device(demod_i2c, &info);
+		if (client == NULL || client->dev.driver == NULL || dev->dvb->frontend == NULL) {
 			dev_err(dev->dev,
 				"Failed to attach SI2165 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
 
+		if (!try_module_get(client->dev.driver->owner)) {
+			i2c_unregister_device(client);
+			result = -ENODEV;
+			goto out_free;
+		}
+
+		dvb->i2c_client_demod = client;
+
 		dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
 
 		/* define general-purpose callback pointer */
@@ -774,27 +788,43 @@
 
 		dev->cx231xx_reset_analog_tuner = NULL;
 		break;
-
+	}
 	case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
 	{
 		struct i2c_client *client;
 		struct i2c_board_info info;
+		struct si2165_platform_data si2165_pdata;
 		struct si2157_config si2157_config;
 
+		/* attach demod */
+		memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+		si2165_pdata.fe = &dev->dvb->frontend;
+		si2165_pdata.chip_mode = SI2165_MODE_PLL_EXT,
+		si2165_pdata.ref_freq_Hz = 24000000,
+
 		memset(&info, 0, sizeof(struct i2c_board_info));
-
-		dev->dvb->frontend = dvb_attach(si2165_attach,
-			&pctv_quatro_stick_1114xx_si2165_config,
-			demod_i2c
-			);
-
-		if (dev->dvb->frontend == NULL) {
+		strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+		info.addr = 0x64;
+		info.platform_data = &si2165_pdata;
+		request_module(info.type);
+		client = i2c_new_device(demod_i2c, &info);
+		if (client == NULL || client->dev.driver == NULL || dev->dvb->frontend == NULL) {
 			dev_err(dev->dev,
 				"Failed to attach SI2165 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
 
+		if (!try_module_get(client->dev.driver->owner)) {
+			i2c_unregister_device(client);
+			result = -ENODEV;
+			goto out_free;
+		}
+
+		dvb->i2c_client_demod = client;
+
+		memset(&info, 0, sizeof(struct i2c_board_info));
+
 		dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
 
 		/* define general-purpose callback pointer */
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 473cd34..35e9acf 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -454,7 +454,7 @@
 	return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
 }
 
-static struct i2c_algorithm cx231xx_algo = {
+static const struct i2c_algorithm cx231xx_algo = {
 	.master_xfer = cx231xx_i2c_xfer,
 	.functionality = functionality,
 };
@@ -608,7 +608,7 @@
 	case I2C_1_MUX_3:
 		return dev->muxc->adapter[1];
 	default:
-		return NULL;
+		BUG();
 	}
 }
 EXPORT_SYMBOL_GPL(cx231xx_get_i2c_adap);
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 09e0f58..941ceff 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1222,6 +1222,7 @@
 
 	/* Only process key if canary killed */
 	if (buf[16] != 0xff && buf[0] != 0x01) {
+		enum rc_type proto;
 		dev_dbg(&d->udev->dev, "%s: key pressed %*ph\n",
 				__func__, 4, buf + 12);
 
@@ -1237,11 +1238,13 @@
 				/* NEC */
 				state->rc_keycode = RC_SCANCODE_NEC(buf[12],
 								    buf[14]);
+				proto = RC_TYPE_NEC;
 			} else {
 				/* NEC extended*/
 				state->rc_keycode = RC_SCANCODE_NECX(buf[12] << 8 |
 								     buf[13],
 								     buf[14]);
+				proto = RC_TYPE_NECX;
 			}
 		} else {
 			/* 32 bit NEC */
@@ -1249,8 +1252,9 @@
 							      buf[13] << 16 |
 							      buf[14] << 8  |
 							      buf[15]);
+			proto = RC_TYPE_NEC32;
 		}
-		rc_keydown(d->rc_dev, RC_TYPE_NEC, state->rc_keycode, 0);
+		rc_keydown(d->rc_dev, proto, state->rc_keycode, 0);
 	} else {
 		dev_dbg(&d->udev->dev, "%s: no key press\n", __func__);
 		/* Invalidate last keypress */
@@ -1317,7 +1321,7 @@
 	if (!rc->map_name)
 		rc->map_name = RC_MAP_EMPTY;
 
-	rc->allowed_protos = RC_BIT_NEC;
+	rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
 	rc->query = af9015_rc_query;
 	rc->interval = 500;
 
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index ca018cd..8961dd7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1828,6 +1828,7 @@
 {
 	struct usb_interface *intf = d->intf;
 	int ret;
+	enum rc_type proto;
 	u32 key;
 	u8 buf[4];
 	struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf };
@@ -1842,19 +1843,22 @@
 		if ((buf[0] + buf[1]) == 0xff) {
 			/* NEC standard 16bit */
 			key = RC_SCANCODE_NEC(buf[0], buf[2]);
+			proto = RC_TYPE_NEC;
 		} else {
 			/* NEC extended 24bit */
 			key = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]);
+			proto = RC_TYPE_NECX;
 		}
 	} else {
 		/* NEC full code 32bit */
 		key = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 |
 					buf[2] << 8  | buf[3]);
+		proto = RC_TYPE_NEC32;
 	}
 
 	dev_dbg(&intf->dev, "%*ph\n", 4, buf);
 
-	rc_keydown(d->rc_dev, RC_TYPE_NEC, key, 0);
+	rc_keydown(d->rc_dev, proto, key, 0);
 
 	return 0;
 
@@ -1889,7 +1893,8 @@
 		switch (tmp) {
 		case 0: /* NEC */
 		default:
-			rc->allowed_protos = RC_BIT_NEC;
+			rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX |
+								RC_BIT_NEC32;
 			break;
 		case 1: /* RC6 */
 			rc->allowed_protos = RC_BIT_RC6_MCE;
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 935dbaa..50c07fe 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -208,6 +208,7 @@
 {
 	struct az6007_device_state *st = d_to_priv(d);
 	unsigned code;
+	enum rc_type proto;
 
 	az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10);
 
@@ -215,19 +216,23 @@
 		return 0;
 
 	if ((st->data[3] ^ st->data[4]) == 0xff) {
-		if ((st->data[1] ^ st->data[2]) == 0xff)
+		if ((st->data[1] ^ st->data[2]) == 0xff) {
 			code = RC_SCANCODE_NEC(st->data[1], st->data[3]);
-		else
+			proto = RC_TYPE_NEC;
+		} else {
 			code = RC_SCANCODE_NECX(st->data[1] << 8 | st->data[2],
 						st->data[3]);
+			proto = RC_TYPE_NECX;
+		}
 	} else {
 		code = RC_SCANCODE_NEC32(st->data[1] << 24 |
 					 st->data[2] << 16 |
 					 st->data[3] << 8  |
 					 st->data[4]);
+		proto = RC_TYPE_NEC32;
 	}
 
-	rc_keydown(d->rc_dev, RC_TYPE_NEC, code, st->data[5]);
+	rc_keydown(d->rc_dev, proto, code, st->data[5]);
 
 	return 0;
 }
@@ -236,7 +241,7 @@
 {
 	pr_debug("Getting az6007 Remote Control properties\n");
 
-	rc->allowed_protos = RC_BIT_NEC;
+	rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
 	rc->query          = az6007_rc_query;
 	rc->interval       = 400;
 
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 3fbb2cd..a8e6624 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -82,8 +82,6 @@
 	ret = i2c_add_adapter(&d->i2c_adap);
 	if (ret < 0) {
 		d->i2c_adap.algo = NULL;
-		dev_err(&d->udev->dev, "%s: i2c_add_adapter() failed=%d\n",
-				KBUILD_MODNAME, ret);
 		goto err;
 	}
 
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 3721ee6..0e8fb89 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -357,7 +357,8 @@
 						ibuf[5]);
 
 			deb_info(1, "INT Key = 0x%08x", key);
-			rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC, key, 0);
+			rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC32, key,
+									0);
 			break;
 		case 0xbb:
 			switch (st->tuner_config) {
@@ -1242,7 +1243,7 @@
 static int lme2510_get_rc_config(struct dvb_usb_device *d,
 	struct dvb_usb_rc *rc)
 {
-	rc->allowed_protos = RC_BIT_NEC;
+	rc->allowed_protos = RC_BIT_NEC32;
 	return 0;
 }
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 7065aca..e6eae9d 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -21,7 +21,6 @@
 #ifndef __MXL111SF_DEMOD_H__
 #define __MXL111SF_DEMOD_H__
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "mxl111sf.h"
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 7d16252..f141dcc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -466,7 +466,7 @@
 
 /* ------------------------------------------------------------------------- */
 
-static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
+static const struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
 	.info = {
 		.name = "MaxLinear MxL111SF",
 #if 0
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 509b550..e96d9a4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -21,7 +21,6 @@
 #ifndef __MXL111SF_TUNER_H__
 #define __MXL111SF_TUNER_H__
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "mxl111sf.h"
 
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 6643762..c583c63 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1631,22 +1631,27 @@
 		goto err;
 
 	if (buf[4] & 0x01) {
+		enum rc_type proto;
+
 		if (buf[2] == (u8) ~buf[3]) {
 			if (buf[0] == (u8) ~buf[1]) {
 				/* NEC standard (16 bit) */
 				rc_code = RC_SCANCODE_NEC(buf[0], buf[2]);
+				proto = RC_TYPE_NEC;
 			} else {
 				/* NEC extended (24 bit) */
 				rc_code = RC_SCANCODE_NECX(buf[0] << 8 | buf[1],
 							   buf[2]);
+				proto = RC_TYPE_NECX;
 			}
 		} else {
 			/* NEC full (32 bit) */
 			rc_code = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 |
 						    buf[2] << 8  | buf[3]);
+			proto = RC_TYPE_NEC32;
 		}
 
-		rc_keydown(d->rc_dev, RC_TYPE_NEC, rc_code, 0);
+		rc_keydown(d->rc_dev, proto, rc_code, 0);
 
 		ret = rtl28xxu_wr_reg(d, SYS_IRRC_SR, 1);
 		if (ret)
@@ -1668,7 +1673,7 @@
 		struct dvb_usb_rc *rc)
 {
 	rc->map_name = RC_MAP_EMPTY;
-	rc->allowed_protos = RC_BIT_NEC;
+	rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
 	rc->query = rtl2831u_rc_query;
 	rc->interval = 400;
 
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index f03b0b7..959fa09 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -20,10 +20,20 @@
 	  Say Y if you want to enable debugging. See modinfo dvb-usb (and the
 	  appropriate drivers) for debug levels.
 
+config DVB_USB_DIB3000MC
+	tristate
+	depends on DVB_USB
+	select DVB_DIB3000MC
+	help
+	  This is a module with helper functions for accessing the
+	  DIB3000MC from USB DVB devices. It must be a separate module
+	  in case DVB_USB is built-in and DVB_DIB3000MC is a module,
+	  and gets selected automatically when needed.
+
 config DVB_USB_A800
 	tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)"
 	depends on DVB_USB
-	select DVB_DIB3000MC
+	select DVB_USB_DIB3000MC
 	select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
 	help
@@ -34,6 +44,7 @@
 	depends on DVB_USB
 	select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_DIB3000MB
+	depends on DVB_DIB3000MC || !DVB_DIB3000MC
 	select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
 	help
 	  Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
@@ -54,7 +65,7 @@
 config DVB_USB_DIBUSB_MC
 	tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
 	depends on DVB_USB
-	select DVB_DIB3000MC
+	select DVB_USB_DIB3000MC
 	select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
 	help
 	  Support for USB2.0 DVB-T receivers based on reference designs made by
@@ -72,7 +83,7 @@
 	select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_DIB7000M if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_DIB8000 if MEDIA_SUBDRV_AUTOSELECT
-	select DVB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_USB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
@@ -99,7 +110,7 @@
 	tristate "HanfTek UMT-010 DVB-T USB2.0 support"
 	depends on DVB_USB
 	select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
-	select DVB_DIB3000MC
+	select DVB_USB_DIB3000MC
 	select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
 	help
@@ -192,7 +203,7 @@
 config DVB_USB_NOVA_T_USB2
 	tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
 	depends on DVB_USB
-	select DVB_DIB3000MC
+	select DVB_USB_DIB3000MC
 	select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
 	help
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index acdd1ef..2a7b5a9 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -16,20 +16,23 @@
 
 dvb-usb-dibusb-common-objs := dibusb-common.o
 
+dvb-usb-dibusb-mc-common-objs := dibusb-mc-common.o
+obj-$(CONFIG_DVB_USB_DIB3000MC)	+= dvb-usb-dibusb-common.o dvb-usb-dibusb-mc-common.o
+
 dvb-usb-a800-objs := a800.o
-obj-$(CONFIG_DVB_USB_A800) += dvb-usb-dibusb-common.o dvb-usb-a800.o
+obj-$(CONFIG_DVB_USB_A800) += dvb-usb-a800.o
 
 dvb-usb-dibusb-mb-objs := dibusb-mb.o
 obj-$(CONFIG_DVB_USB_DIBUSB_MB) += dvb-usb-dibusb-common.o dvb-usb-dibusb-mb.o
 
 dvb-usb-dibusb-mc-objs := dibusb-mc.o
-obj-$(CONFIG_DVB_USB_DIBUSB_MC) += dvb-usb-dibusb-common.o dvb-usb-dibusb-mc.o
+obj-$(CONFIG_DVB_USB_DIBUSB_MC) += dvb-usb-dibusb-mc.o
 
 dvb-usb-nova-t-usb2-objs := nova-t-usb2.o
-obj-$(CONFIG_DVB_USB_NOVA_T_USB2) += dvb-usb-dibusb-common.o dvb-usb-nova-t-usb2.o
+obj-$(CONFIG_DVB_USB_NOVA_T_USB2) += dvb-usb-nova-t-usb2.o
 
 dvb-usb-umt-010-objs := umt-010.o
-obj-$(CONFIG_DVB_USB_UMT_010) += dvb-usb-dibusb-common.o dvb-usb-umt-010.o
+obj-$(CONFIG_DVB_USB_UMT_010) += dvb-usb-umt-010.o
 
 dvb-usb-m920x-objs := m920x.o
 obj-$(CONFIG_DVB_USB_M920X) += dvb-usb-m920x.o
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 2679797..f319665 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -710,7 +710,6 @@
 
 	switch (d->props.rc.core.protocol) {
 	case RC_BIT_NEC:
-		protocol = RC_TYPE_NEC;
 		toggle = 0;
 
 		/* NEC protocol sends repeat code as 0 0 0 FF */
@@ -728,16 +727,19 @@
 						     poll_reply->nec.not_system << 16 |
 						     poll_reply->nec.data       << 8  |
 						     poll_reply->nec.not_data);
+			protocol = RC_TYPE_NEC32;
 		} else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) {
 			deb_data("NEC extended protocol\n");
 			keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 |
 						    poll_reply->nec.not_system,
 						    poll_reply->nec.data);
 
+			protocol = RC_TYPE_NECX;
 		} else {
 			deb_data("NEC normal protocol\n");
 			keycode = RC_SCANCODE_NEC(poll_reply->nec.system,
 						   poll_reply->nec.data);
+			protocol = RC_TYPE_NEC;
 		}
 
 		break;
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 6eea4e6..18ed3bf 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -9,7 +9,6 @@
  * see Documentation/dvb/README.dvb-usb for more information
  */
 
-#include <linux/kconfig.h>
 #include "dibusb.h"
 
 /* Max transfer size done by I2C transfer functions */
@@ -184,164 +183,6 @@
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
-#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
-
-/* 3000MC/P stuff */
-// Config Adjacent channels  Perf -cal22
-static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
-	.band_caps = BAND_VHF | BAND_UHF,
-	.setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
-
-	.agc1_max = 48497,
-	.agc1_min = 23593,
-	.agc2_max = 46531,
-	.agc2_min = 24904,
-
-	.agc1_pt1 = 0x65,
-	.agc1_pt2 = 0x69,
-
-	.agc1_slope1 = 0x51,
-	.agc1_slope2 = 0x27,
-
-	.agc2_pt1 = 0,
-	.agc2_pt2 = 0x33,
-
-	.agc2_slope1 = 0x35,
-	.agc2_slope2 = 0x37,
-};
-
-static struct dib3000mc_config stk3000p_dib3000p_config = {
-	&dib3000p_mt2060_agc_config,
-
-	.max_time     = 0x196,
-	.ln_adc_level = 0x1cc7,
-
-	.output_mpeg2_in_188_bytes = 1,
-
-	.agc_command1 = 1,
-	.agc_command2 = 1,
-};
-
-static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
-	.band_caps = BAND_VHF | BAND_UHF,
-	.setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
-
-	.agc1_max = 56361,
-	.agc1_min = 22282,
-	.agc2_max = 47841,
-	.agc2_min = 36045,
-
-	.agc1_pt1 = 0x3b,
-	.agc1_pt2 = 0x6b,
-
-	.agc1_slope1 = 0x55,
-	.agc1_slope2 = 0x1d,
-
-	.agc2_pt1 = 0,
-	.agc2_pt2 = 0x0a,
-
-	.agc2_slope1 = 0x95,
-	.agc2_slope2 = 0x1e,
-};
-
-static struct dib3000mc_config mod3000p_dib3000p_config = {
-	&dib3000p_panasonic_agc_config,
-
-	.max_time     = 0x51,
-	.ln_adc_level = 0x1cc7,
-
-	.output_mpeg2_in_188_bytes = 1,
-
-	.agc_command1 = 1,
-	.agc_command2 = 1,
-};
-
-int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
-{
-	if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
-	    le16_to_cpu(adap->dev->udev->descriptor.idProduct) ==
-			USB_PID_LITEON_DVB_T_WARM) {
-		msleep(1000);
-	}
-
-	adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
-					 &adap->dev->i2c_adap,
-					 DEFAULT_DIB3000P_I2C_ADDRESS,
-					 &mod3000p_dib3000p_config);
-	if ((adap->fe_adap[0].fe) == NULL)
-		adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
-						 &adap->dev->i2c_adap,
-						 DEFAULT_DIB3000MC_I2C_ADDRESS,
-						 &mod3000p_dib3000p_config);
-	if ((adap->fe_adap[0].fe) != NULL) {
-		if (adap->priv != NULL) {
-			struct dibusb_state *st = adap->priv;
-			st->ops.pid_parse = dib3000mc_pid_parse;
-			st->ops.pid_ctrl  = dib3000mc_pid_control;
-		}
-		return 0;
-	}
-	return -ENODEV;
-}
-EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
-
-static struct mt2060_config stk3000p_mt2060_config = {
-	0x60
-};
-
-int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
-{
-	struct dibusb_state *st = adap->priv;
-	u8 a,b;
-	u16 if1 = 1220;
-	struct i2c_adapter *tun_i2c;
-
-	// First IF calibration for Liteon Sticks
-	if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
-	    le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) {
-
-		dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
-		dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
-
-		if (a == 0x00)
-			if1 += b;
-		else if (a == 0x80)
-			if1 -= b;
-		else
-			warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
-
-	} else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM &&
-		   le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) {
-		u8 desc;
-		dibusb_read_eeprom_byte(adap->dev, 7, &desc);
-		if (desc == 2) {
-			a = 127;
-			do {
-				dibusb_read_eeprom_byte(adap->dev, a, &desc);
-				a--;
-			} while (a > 7 && (desc == 0xff || desc == 0x00));
-			if (desc & 0x80)
-				if1 -= (0xff - desc);
-			else
-				if1 += desc;
-		}
-	}
-
-	tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
-	if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
-		/* not found - use panasonic pll parameters */
-		if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
-			return -ENOMEM;
-	} else {
-		st->mt2060_present = 1;
-		/* set the correct parameters for the dib3000p */
-		dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
-	}
-	return 0;
-}
-EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
-#endif
-
 /*
  * common remote control stuff
  */
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
new file mode 100644
index 0000000..d66f56c
--- /dev/null
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -0,0 +1,168 @@
+/* Common methods for dibusb-based-receivers.
+ *
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the Free
+ *	Software Foundation, version 2.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+
+#include <linux/kconfig.h>
+#include "dibusb.h"
+
+/* 3000MC/P stuff */
+// Config Adjacent channels  Perf -cal22
+static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
+	.band_caps = BAND_VHF | BAND_UHF,
+	.setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
+
+	.agc1_max = 48497,
+	.agc1_min = 23593,
+	.agc2_max = 46531,
+	.agc2_min = 24904,
+
+	.agc1_pt1 = 0x65,
+	.agc1_pt2 = 0x69,
+
+	.agc1_slope1 = 0x51,
+	.agc1_slope2 = 0x27,
+
+	.agc2_pt1 = 0,
+	.agc2_pt2 = 0x33,
+
+	.agc2_slope1 = 0x35,
+	.agc2_slope2 = 0x37,
+};
+
+static struct dib3000mc_config stk3000p_dib3000p_config = {
+	&dib3000p_mt2060_agc_config,
+
+	.max_time     = 0x196,
+	.ln_adc_level = 0x1cc7,
+
+	.output_mpeg2_in_188_bytes = 1,
+
+	.agc_command1 = 1,
+	.agc_command2 = 1,
+};
+
+static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
+	.band_caps = BAND_VHF | BAND_UHF,
+	.setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
+
+	.agc1_max = 56361,
+	.agc1_min = 22282,
+	.agc2_max = 47841,
+	.agc2_min = 36045,
+
+	.agc1_pt1 = 0x3b,
+	.agc1_pt2 = 0x6b,
+
+	.agc1_slope1 = 0x55,
+	.agc1_slope2 = 0x1d,
+
+	.agc2_pt1 = 0,
+	.agc2_pt2 = 0x0a,
+
+	.agc2_slope1 = 0x95,
+	.agc2_slope2 = 0x1e,
+};
+
+static struct dib3000mc_config mod3000p_dib3000p_config = {
+	&dib3000p_panasonic_agc_config,
+
+	.max_time     = 0x51,
+	.ln_adc_level = 0x1cc7,
+
+	.output_mpeg2_in_188_bytes = 1,
+
+	.agc_command1 = 1,
+	.agc_command2 = 1,
+};
+
+int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
+{
+	if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+	    le16_to_cpu(adap->dev->udev->descriptor.idProduct) ==
+			USB_PID_LITEON_DVB_T_WARM) {
+		msleep(1000);
+	}
+
+	adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+					 &adap->dev->i2c_adap,
+					 DEFAULT_DIB3000P_I2C_ADDRESS,
+					 &mod3000p_dib3000p_config);
+	if ((adap->fe_adap[0].fe) == NULL)
+		adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+						 &adap->dev->i2c_adap,
+						 DEFAULT_DIB3000MC_I2C_ADDRESS,
+						 &mod3000p_dib3000p_config);
+	if ((adap->fe_adap[0].fe) != NULL) {
+		if (adap->priv != NULL) {
+			struct dibusb_state *st = adap->priv;
+			st->ops.pid_parse = dib3000mc_pid_parse;
+			st->ops.pid_ctrl  = dib3000mc_pid_control;
+		}
+		return 0;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
+
+static struct mt2060_config stk3000p_mt2060_config = {
+	0x60
+};
+
+int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
+{
+	struct dibusb_state *st = adap->priv;
+	u8 a,b;
+	u16 if1 = 1220;
+	struct i2c_adapter *tun_i2c;
+
+	// First IF calibration for Liteon Sticks
+	if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+	    le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) {
+
+		dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
+		dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
+
+		if (a == 0x00)
+			if1 += b;
+		else if (a == 0x80)
+			if1 -= b;
+		else
+			warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
+
+	} else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM &&
+		   le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) {
+		u8 desc;
+		dibusb_read_eeprom_byte(adap->dev, 7, &desc);
+		if (desc == 2) {
+			a = 127;
+			do {
+				dibusb_read_eeprom_byte(adap->dev, a, &desc);
+				a--;
+			} while (a > 7 && (desc == 0xff || desc == 0x00));
+			if (desc & 0x80)
+				if1 -= (0xff - desc);
+			else
+				if1 += desc;
+		}
+	}
+
+	tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
+	if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
+		/* not found - use panasonic pll parameters */
+		if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
+			return -ENOMEM;
+	} else {
+		st->mt2060_present = 1;
+		/* set the correct parameters for the dib3000p */
+		dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index be633ec..d2a01b5 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -62,18 +62,21 @@
 
 	dvb_usb_generic_rw(d,&cmd,1,key,5,0);
 	if (key[0] == 1) {
+		enum rc_type proto = RC_TYPE_NEC;
+
 		scancode = key[1];
 		if ((u8) ~key[1] != key[2]) {
 			/* Extended NEC */
 			scancode = scancode << 8;
 			scancode |= key[2];
+			proto = RC_TYPE_NECX;
 		}
 		scancode = scancode << 8;
 		scancode |= key[3];
 
 		/* Check command checksum is ok */
 		if ((u8) ~key[3] == key[4])
-			rc_keydown(d->rc_dev, RC_TYPE_NEC, scancode, 0);
+			rc_keydown(d->rc_dev, proto, scancode, 0);
 		else
 			rc_keyup(d->rc_dev);
 	} else if (key[0] == 2) {
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 78f3687..e11fe46 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -695,7 +695,7 @@
 /*
  * register/unregister code and data
  */
-static struct snd_pcm_ops snd_em28xx_pcm_capture = {
+static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
 	.open      = snd_em28xx_capture_open,
 	.close     = snd_em28xx_pcm_close,
 	.ioctl     = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1a9e1e5..8b690ac 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -855,7 +855,7 @@
 	return 0;
 }
 
-static struct i2c_algorithm em28xx_algo = {
+static const struct i2c_algorithm em28xx_algo = {
 	.master_xfer   = em28xx_i2c_xfer,
 	.functionality = functionality,
 };
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 7968695..1f7fa05 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1204,7 +1204,7 @@
 	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static struct vb2_ops em28xx_video_qops = {
+static const struct vb2_ops em28xx_video_qops = {
 	.queue_setup    = queue_setup,
 	.buf_prepare    = buffer_prepare,
 	.buf_queue      = buffer_queue,
diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
index 55addfa..c084bf7 100644
--- a/drivers/media/usb/go7007/go7007-i2c.c
+++ b/drivers/media/usb/go7007/go7007-i2c.c
@@ -191,7 +191,7 @@
 	return I2C_FUNC_SMBUS_BYTE_DATA;
 }
 
-static struct i2c_algorithm go7007_algo = {
+static const struct i2c_algorithm go7007_algo = {
 	.smbus_xfer	= go7007_smbus_xfer,
 	.master_xfer	= go7007_i2c_master_xfer,
 	.functionality	= go7007_functionality,
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index 14d3f8c..ed9bcaf 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1032,7 +1032,7 @@
 	return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK;
 }
 
-static struct i2c_algorithm go7007_usb_algo = {
+static const struct i2c_algorithm go7007_usb_algo = {
 	.master_xfer	= go7007_usb_i2c_master_xfer,
 	.functionality	= go7007_usb_functionality,
 };
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index af84589..4eaba0c 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -477,7 +477,7 @@
 		go7007_write_addr(go, 0x3c82, 0x000d);
 }
 
-static struct vb2_ops go7007_video_qops = {
+static const struct vb2_ops go7007_video_qops = {
 	.queue_setup    = go7007_queue_setup,
 	.buf_queue      = go7007_buf_queue,
 	.buf_prepare    = go7007_buf_prepare,
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index d22d7d5..070871f 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -198,7 +198,7 @@
 	return vmalloc_to_page(substream->runtime->dma_area + offset);
 }
 
-static struct snd_pcm_ops go7007_snd_capture_ops = {
+static const struct snd_pcm_ops go7007_snd_capture_ops = {
 	.open		= go7007_snd_capture_open,
 	.close		= go7007_snd_capture_close,
 	.ioctl		= snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/gspca/finepix.c b/drivers/media/usb/gspca/finepix.c
index 52bdb56..ae9a55d 100644
--- a/drivers/media/usb/gspca/finepix.c
+++ b/drivers/media/usb/gspca/finepix.c
@@ -41,7 +41,6 @@
 	struct gspca_dev gspca_dev;	/* !! must be the first item */
 
 	struct work_struct work_struct;
-	struct workqueue_struct *work_thread;
 };
 
 /* Delay after which claim the next frame. If the delay is too small,
@@ -226,9 +225,7 @@
 	/* Again, reset bulk in endpoint */
 	usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe);
 
-	/* Start the workqueue function to do the streaming */
-	dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
-	queue_work(dev->work_thread, &dev->work_struct);
+	schedule_work(&dev->work_struct);
 
 	return 0;
 }
@@ -241,9 +238,8 @@
 
 	/* wait for the work queue to terminate */
 	mutex_unlock(&gspca_dev->usb_lock);
-	destroy_workqueue(dev->work_thread);
+	flush_work(&dev->work_struct);
 	mutex_lock(&gspca_dev->usb_lock);
-	dev->work_thread = NULL;
 }
 
 /* Table of supported USB devices */
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index 5b481fa..ac295f0 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -45,7 +45,6 @@
 	const struct v4l2_pix_format *cap_mode;
 	/* Driver stuff */
 	struct work_struct work_struct;
-	struct workqueue_struct *work_thread;
 	u8 frame_brightness;
 	int block_size;	/* block size of camera */
 	int vga;	/* 1 if vga cam, 0 if cif cam */
@@ -477,9 +476,7 @@
 		return -1;
 	}
 
-	/* Start the workqueue function to do the streaming */
-	sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
-	queue_work(sd->work_thread, &sd->work_struct);
+	schedule_work(&sd->work_struct);
 
 	return 0;
 }
@@ -493,8 +490,7 @@
 	/* wait for the work queue to terminate */
 	mutex_unlock(&gspca_dev->usb_lock);
 	/* This waits for sq905c_dostream to finish */
-	destroy_workqueue(dev->work_thread);
-	dev->work_thread = NULL;
+	flush_work(&dev->work_struct);
 	mutex_lock(&gspca_dev->usb_lock);
 }
 
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index fd1c870..d49d76e 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -54,7 +54,6 @@
 	u32 exposure;
 
 	struct work_struct work;
-	struct workqueue_struct *work_thread;
 
 	u32 pktsz;			/* (used by pkt_scan) */
 	u16 npkt;
@@ -2485,7 +2484,6 @@
 
 	sd->pktsz = sd->npkt = 0;
 	sd->nchg = sd->short_mark = 0;
-	sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
 
 	return gspca_dev->usb_err;
 }
@@ -2569,12 +2567,9 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	if (sd->work_thread != NULL) {
-		mutex_unlock(&gspca_dev->usb_lock);
-		destroy_workqueue(sd->work_thread);
-		mutex_lock(&gspca_dev->usb_lock);
-		sd->work_thread = NULL;
-	}
+	mutex_unlock(&gspca_dev->usb_lock);
+	flush_work(&sd->work);
+	mutex_lock(&gspca_dev->usb_lock);
 }
 
 static void do_autogain(struct gspca_dev *gspca_dev)
@@ -2785,7 +2780,7 @@
 				new_qual = QUALITY_MAX;
 			if (new_qual != sd->quality) {
 				sd->quality = new_qual;
-				queue_work(sd->work_thread, &sd->work);
+				schedule_work(&sd->work);
 			}
 		}
 	} else {
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index 103f6c4..8860510 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -47,7 +47,6 @@
 struct sd {
 	struct gspca_dev gspca_dev;	/* !! must be the first item */
 	struct work_struct work_struct;
-	struct workqueue_struct *work_thread;
 };
 
 /* The vicam sensor has a resolution of 512 x 244, with I believe square
@@ -278,9 +277,7 @@
 	if (ret < 0)
 		return ret;
 
-	/* Start the workqueue function to do the streaming */
-	sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
-	queue_work(sd->work_thread, &sd->work_struct);
+	schedule_work(&sd->work_struct);
 
 	return 0;
 }
@@ -294,8 +291,7 @@
 	/* wait for the work queue to terminate */
 	mutex_unlock(&gspca_dev->usb_lock);
 	/* This waits for vicam_dostream to finish */
-	destroy_workqueue(dev->work_thread);
-	dev->work_thread = NULL;
+	flush_work(&dev->work_struct);
 	mutex_lock(&gspca_dev->usb_lock);
 
 	if (gspca_dev->present)
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index c2c8d12..d9a5252 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -129,7 +129,7 @@
 	struct list_head rx_buffer_list;
 	struct list_head tx_buffer_list;
 	spinlock_t buffer_list_lock; /* Protects buffer_list */
-	unsigned sequence;	     /* Buffer sequence counter */
+	unsigned int sequence;	     /* Buffer sequence counter */
 	unsigned int vb_full;        /* vb is full and packets dropped */
 	unsigned int vb_empty;       /* vb is empty and packets dropped */
 
@@ -891,7 +891,7 @@
 	mutex_unlock(&dev->v4l2_lock);
 }
 
-static struct vb2_ops hackrf_vb2_ops = {
+static const struct vb2_ops hackrf_vb2_ops = {
 	.queue_setup            = hackrf_queue_setup,
 	.buf_queue              = hackrf_buf_queue,
 	.start_streaming        = hackrf_start_streaming,
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index a38f58c..9b641c4 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -55,7 +55,7 @@
 	/* Our default information for ir-kbd-i2c.c to use */
 	init_data->ir_codes = RC_MAP_HAUPPAUGE;
 	init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-	init_data->type = RC_BIT_RC5;
+	init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32;
 	init_data->name = "HD-PVR";
 	init_data->polling_interval = 405; /* ms, duplicated from Windows */
 	hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
@@ -180,7 +180,7 @@
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-static struct i2c_algorithm hdpvr_algo = {
+static const struct i2c_algorithm hdpvr_algo = {
 	.master_xfer   = hdpvr_transfer,
 	.functionality = hdpvr_functionality,
 };
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 6d43d75..474c11e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 367eb7e..bb3d31e 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -897,7 +897,7 @@
 	mutex_unlock(&dev->v4l2_lock);
 }
 
-static struct vb2_ops msi2500_vb2_ops = {
+static const struct vb2_ops msi2500_vb2_ops = {
 	.queue_setup            = msi2500_queue_setup,
 	.buf_queue              = msi2500_buf_queue,
 	.start_streaming        = msi2500_start_streaming,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
index 60141b1..23473a2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
@@ -170,7 +170,6 @@
 	const struct pvr2_device_desc *hdw_desc;
 
 	/* Kernel worker thread handling */
-	struct workqueue_struct *workqueue;
 	struct work_struct workpoll;     /* Update driver state */
 
 	/* Video spigot */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index fe20fe4..1eb4f7b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2624,7 +2624,6 @@
 	if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
 	hdw->name[cnt1] = 0;
 
-	hdw->workqueue = create_singlethread_workqueue(hdw->name);
 	INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll);
 
 	pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
@@ -2651,11 +2650,7 @@
 		del_timer_sync(&hdw->decoder_stabilization_timer);
 		del_timer_sync(&hdw->encoder_run_timer);
 		del_timer_sync(&hdw->encoder_wait_timer);
-		if (hdw->workqueue) {
-			flush_workqueue(hdw->workqueue);
-			destroy_workqueue(hdw->workqueue);
-			hdw->workqueue = NULL;
-		}
+		flush_work(&hdw->workpoll);
 		usb_free_urb(hdw->ctl_read_urb);
 		usb_free_urb(hdw->ctl_write_urb);
 		kfree(hdw->ctl_read_buffer);
@@ -2712,11 +2707,7 @@
 {
 	if (!hdw) return;
 	pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw);
-	if (hdw->workqueue) {
-		flush_workqueue(hdw->workqueue);
-		destroy_workqueue(hdw->workqueue);
-		hdw->workqueue = NULL;
-	}
+	flush_work(&hdw->workpoll);
 	del_timer_sync(&hdw->quiescent_timer);
 	del_timer_sync(&hdw->decoder_stabilization_timer);
 	del_timer_sync(&hdw->encoder_run_timer);
@@ -4443,7 +4434,7 @@
 	hdw->state_decoder_quiescent = !0;
 	trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent);
 	hdw->state_stale = !0;
-	queue_work(hdw->workqueue,&hdw->workpoll);
+	schedule_work(&hdw->workpoll);
 }
 
 
@@ -4454,7 +4445,7 @@
 	hdw->state_decoder_ready = !0;
 	trace_stbit("state_decoder_ready", hdw->state_decoder_ready);
 	hdw->state_stale = !0;
-	queue_work(hdw->workqueue, &hdw->workpoll);
+	schedule_work(&hdw->workpoll);
 }
 
 
@@ -4465,7 +4456,7 @@
 	hdw->state_encoder_waitok = !0;
 	trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok);
 	hdw->state_stale = !0;
-	queue_work(hdw->workqueue,&hdw->workpoll);
+	schedule_work(&hdw->workpoll);
 }
 
 
@@ -4477,7 +4468,7 @@
 		hdw->state_encoder_runok = !0;
 		trace_stbit("state_encoder_runok",hdw->state_encoder_runok);
 		hdw->state_stale = !0;
-		queue_work(hdw->workqueue,&hdw->workpoll);
+		schedule_work(&hdw->workpoll);
 	}
 }
 
@@ -4991,7 +4982,7 @@
 	if (hdw->state_stale) return;
 	hdw->state_stale = !0;
 	trace_stbit("state_stale",hdw->state_stale);
-	queue_work(hdw->workqueue,&hdw->workpoll);
+	schedule_work(&hdw->workpoll);
 }
 
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 14321d0..6da5fb5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -596,7 +596,8 @@
 	case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
 		init_data->ir_codes              = RC_MAP_HAUPPAUGE;
 		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-		init_data->type                  = RC_BIT_RC5;
+		init_data->type                  = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+							RC_BIT_RC6_6A_32;
 		init_data->name                  = hdw->hdw_desc->description;
 		/* IR Receiver */
 		info.addr          = 0x71;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 81f788b..2cc4d2b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -719,64 +719,85 @@
 	return ret;
 }
 
-static int pvr2_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+static int pvr2_g_selection(struct file *file, void *priv,
+			    struct v4l2_selection *sel)
 {
 	struct pvr2_v4l2_fh *fh = file->private_data;
 	struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+	struct v4l2_cropcap cap;
 	int val = 0;
 	int ret;
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
-	ret = pvr2_ctrl_get_value(
-			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
-	if (ret != 0)
+
+	cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		ret = pvr2_ctrl_get_value(
+			  pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
+		if (ret != 0)
+			return -EINVAL;
+		sel->r.left = val;
+		ret = pvr2_ctrl_get_value(
+			  pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
+		if (ret != 0)
+			return -EINVAL;
+		sel->r.top = val;
+		ret = pvr2_ctrl_get_value(
+			  pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
+		if (ret != 0)
+			return -EINVAL;
+		sel->r.width = val;
+		ret = pvr2_ctrl_get_value(
+			  pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
+		if (ret != 0)
+			return -EINVAL;
+		sel->r.height = val;
+		break;
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		ret = pvr2_hdw_get_cropcap(hdw, &cap);
+		sel->r = cap.defrect;
+		break;
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		ret = pvr2_hdw_get_cropcap(hdw, &cap);
+		sel->r = cap.bounds;
+		break;
+	default:
 		return -EINVAL;
-	crop->c.left = val;
-	ret = pvr2_ctrl_get_value(
-			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
-	if (ret != 0)
-		return -EINVAL;
-	crop->c.top = val;
-	ret = pvr2_ctrl_get_value(
-			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
-	if (ret != 0)
-		return -EINVAL;
-	crop->c.width = val;
-	ret = pvr2_ctrl_get_value(
-			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
-	if (ret != 0)
-		return -EINVAL;
-	crop->c.height = val;
-	return 0;
+	}
+	return ret;
 }
 
-static int pvr2_s_crop(struct file *file, void *priv, const struct v4l2_crop *crop)
+static int pvr2_s_selection(struct file *file, void *priv,
+			    struct v4l2_selection *sel)
 {
 	struct pvr2_v4l2_fh *fh = file->private_data;
 	struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
 	int ret;
 
-	if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+	    sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 	ret = pvr2_ctrl_set_value(
 			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL),
-			crop->c.left);
+			sel->r.left);
 	if (ret != 0)
 		return -EINVAL;
 	ret = pvr2_ctrl_set_value(
 			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT),
-			crop->c.top);
+			sel->r.top);
 	if (ret != 0)
 		return -EINVAL;
 	ret = pvr2_ctrl_set_value(
 			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW),
-			crop->c.width);
+			sel->r.width);
 	if (ret != 0)
 		return -EINVAL;
 	ret = pvr2_ctrl_set_value(
 			pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH),
-			crop->c.height);
+			sel->r.height);
 	if (ret != 0)
 		return -EINVAL;
 	return 0;
@@ -798,8 +819,8 @@
 	.vidioc_enumaudio		    = pvr2_enumaudio,
 	.vidioc_enum_input		    = pvr2_enum_input,
 	.vidioc_cropcap			    = pvr2_cropcap,
-	.vidioc_s_crop			    = pvr2_s_crop,
-	.vidioc_g_crop			    = pvr2_g_crop,
+	.vidioc_s_selection		    = pvr2_s_selection,
+	.vidioc_g_selection		    = pvr2_g_selection,
 	.vidioc_g_input			    = pvr2_g_input,
 	.vidioc_s_input			    = pvr2_s_input,
 	.vidioc_g_frequency		    = pvr2_g_frequency,
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index c4454c9..ff65764 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -707,7 +707,7 @@
 	mutex_unlock(&pdev->v4l2_lock);
 }
 
-static struct vb2_ops pwc_vb_queue_ops = {
+static const struct vb2_ops pwc_vb_queue_ops = {
 	.queue_setup		= queue_setup,
 	.buf_init		= buffer_init,
 	.buf_prepare		= buffer_prepare,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 9458eb0..c3a0e87 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -717,7 +717,7 @@
 static int start_streaming(struct vb2_queue *vq, unsigned int count);
 static void stop_streaming(struct vb2_queue *vq);
 
-static struct vb2_ops s2255_video_qops = {
+static const struct vb2_ops s2255_video_qops = {
 	.queue_setup = queue_setup,
 	.buf_prepare = buffer_prepare,
 	.buf_queue = buffer_queue,
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 850cf28..3f2517b 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -235,7 +235,7 @@
 	return I2C_FUNC_SMBUS_EMUL;
 }
 
-static struct i2c_algorithm algo = {
+static const struct i2c_algorithm algo = {
 	.master_xfer   = stk1160_i2c_xfer,
 	.functionality = functionality,
 };
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 5fab3be..a005d26 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -742,7 +742,7 @@
 	stk1160_stop_streaming(dev);
 }
 
-static struct vb2_ops stk1160_video_qops = {
+static const struct vb2_ops stk1160_video_qops = {
 	.queue_setup		= queue_setup,
 	.buf_queue		= buffer_queue,
 	.start_streaming	= start_streaming,
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index e21c7aa..f16fbd1 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -388,7 +388,7 @@
 /*
  * operators
  */
-static struct snd_pcm_ops snd_tm6000_pcm_ops = {
+static const struct snd_pcm_ops snd_tm6000_pcm_ops = {
 	.open = snd_tm6000_pcm_open,
 	.close = snd_tm6000_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e36e24..4e7671a 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -206,7 +206,7 @@
 
 static void ttusb_dec_handle_irq( struct urb *urb)
 {
-	struct ttusb_dec * dec = urb->context;
+	struct ttusb_dec *dec = urb->context;
 	char *buffer = dec->irq_buffer;
 	int retval;
 
@@ -227,25 +227,31 @@
 			goto exit;
 	}
 
-	if( (buffer[0] == 0x1) && (buffer[2] == 0x15) )  {
-		/* IR - Event */
-		/* this is an fact a bit too simple implementation;
+	if ((buffer[0] == 0x1) && (buffer[2] == 0x15))  {
+		/*
+		 * IR - Event
+		 *
+		 * this is an fact a bit too simple implementation;
 		 * the box also reports a keyrepeat signal
 		 * (with buffer[3] == 0x40) in an intervall of ~100ms.
 		 * But to handle this correctly we had to imlemenent some
 		 * kind of timer which signals a 'key up' event if no
 		 * keyrepeat signal is received for lets say 200ms.
 		 * this should/could be added later ...
-		 * for now lets report each signal as a key down and up*/
-		dprintk("%s:rc signal:%d\n", __func__, buffer[4]);
-		input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
-		input_sync(dec->rc_input_dev);
-		input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
-		input_sync(dec->rc_input_dev);
+		 * for now lets report each signal as a key down and up
+		 */
+		if (buffer[4] - 1 < ARRAY_SIZE(rc_keys)) {
+			dprintk("%s:rc signal:%d\n", __func__, buffer[4]);
+			input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
+			input_sync(dec->rc_input_dev);
+			input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
+			input_sync(dec->rc_input_dev);
+		}
 	}
 
-exit:	retval = usb_submit_urb(urb, GFP_ATOMIC);
-	if(retval)
+exit:
+	retval = usb_submit_urb(urb, GFP_ATOMIC);
+	if (retval)
 		printk("%s - usb_commit_urb failed with result: %d\n",
 			__func__, retval);
 }
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 1965ff1..9db31db 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -332,7 +332,7 @@
 	return chip->snd_buffer_pos;
 }
 
-static struct snd_pcm_ops snd_usbtv_pcm_ops = {
+static const struct snd_pcm_ops snd_usbtv_pcm_ops = {
 	.open = snd_usbtv_pcm_open,
 	.close = snd_usbtv_pcm_close,
 	.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 2a08975..6cbe4a2 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -689,7 +689,7 @@
 		usbtv_stop(usbtv);
 }
 
-static struct vb2_ops usbtv_vb2_ops = {
+static const struct vb2_ops usbtv_vb2_ops = {
 	.queue_setup = usbtv_queue_setup,
 	.buf_queue = usbtv_buf_queue,
 	.start_streaming = usbtv_start_streaming,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 773fefb..77edd20 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -177,7 +177,7 @@
 	spin_unlock_irqrestore(&queue->irqlock, flags);
 }
 
-static struct vb2_ops uvc_queue_qops = {
+static const struct vb2_ops uvc_queue_qops = {
 	.queue_setup = uvc_queue_setup,
 	.buf_prepare = uvc_buffer_prepare,
 	.buf_queue = uvc_buffer_queue,
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 29b3436..367523a 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -27,7 +27,7 @@
 
 config VIDEO_PCI_SKELETON
 	tristate "Skeleton PCI V4L2 driver"
-	depends on PCI && BUILD_DOCSRC
+	depends on PCI
 	depends on VIDEO_V4L2 && VIDEOBUF2_CORE
 	depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
 	---help---
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index a4b224d..5bada20 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -119,13 +119,6 @@
 		return ret;
 	}
 
-	ret = v4l2_subdev_call(sd, core, registered_async);
-	if (ret < 0 && ret != -ENOIOCTLCMD) {
-		if (notifier->unbind)
-			notifier->unbind(notifier, sd, asd);
-		return ret;
-	}
-
 	if (list_empty(&notifier->waiting) && notifier->complete)
 		return notifier->complete(notifier);
 
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 5b80850..57cfe26a 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -291,7 +291,7 @@
 error:
 	/* If we have a client but no subdev, then something went wrong and
 	   we must unregister the client. */
-	if (spi && sd == NULL)
+	if (!sd)
 		spi_unregister_device(spi);
 
 	return sd;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f7abfad..adc2147 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -361,6 +361,7 @@
 		"Scalable Baseline",
 		"Scalable High",
 		"Scalable High Intra",
+		"Stereo High",
 		"Multiview High",
 		NULL,
 	};
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index e6da353..8be561a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -527,6 +527,7 @@
 	bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
 	bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
 	bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
 
@@ -573,7 +574,7 @@
 	if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
 		set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
 
-	if (is_vid) {
+	if (is_vid || is_tch) {
 		/* video specific ioctls */
 		if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
 			       ops->vidioc_enum_fmt_vid_cap_mplane ||
@@ -662,7 +663,7 @@
 			set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
 	}
 
-	if (is_vid || is_vbi || is_sdr) {
+	if (is_vid || is_vbi || is_sdr || is_tch) {
 		/* ioctls valid for video, vbi or sdr */
 		SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
 		SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
@@ -675,7 +676,7 @@
 		SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
 	}
 
-	if (is_vid || is_vbi) {
+	if (is_vid || is_vbi || is_tch) {
 		/* ioctls valid for video or vbi */
 		if (ops->vidioc_s_std)
 			set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
@@ -751,6 +752,10 @@
 		intf_type = MEDIA_INTF_T_V4L_SWRADIO;
 		vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO;
 		break;
+	case VFL_TYPE_TOUCH:
+		intf_type = MEDIA_INTF_T_V4L_TOUCH;
+		vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+		break;
 	case VFL_TYPE_RADIO:
 		intf_type = MEDIA_INTF_T_V4L_RADIO;
 		/*
@@ -854,6 +859,9 @@
 		/* Use device name 'swradio' because 'sdr' was already taken. */
 		name_base = "swradio";
 		break;
+	case VFL_TYPE_TOUCH:
+		name_base = "v4l-touch";
+		break;
 	default:
 		printk(KERN_ERR "%s called with unknown type: %d\n",
 		       __func__, type);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 06fa5f1..62bbed7 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -160,12 +160,9 @@
 	int err;
 
 	/* Check for valid input */
-	if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
+	if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0])
 		return -EINVAL;
 
-	/* Warn if we apparently re-register a subdev */
-	WARN_ON(sd->v4l2_dev != NULL);
-
 	/*
 	 * The reason to acquire the module here is to avoid unloading
 	 * a module of sub-device which is registered to a media
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 889de0a..730a7c3 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -306,7 +306,7 @@
 			(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
 			bt->il_vsync, bt->il_vbackporch);
 	pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
-	pr_info("%s: flags (0x%x):%s%s%s%s%s%s\n", dev_prefix, bt->flags,
+	pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s\n", dev_prefix, bt->flags,
 			(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
 			" REDUCED_BLANKING" : "",
 			((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
@@ -318,12 +318,15 @@
 			(bt->flags & V4L2_DV_FL_HALF_LINE) ?
 			" HALF_LINE" : "",
 			(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
-			" CE_VIDEO" : "");
-	pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
+			" CE_VIDEO" : "",
+			(bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
+			" FIRST_FIELD_EXTRA_LINE" : "");
+	pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
 			(bt->standards & V4L2_DV_BT_STD_CEA861) ?  " CEA" : "",
 			(bt->standards & V4L2_DV_BT_STD_DMT) ?  " DMT" : "",
 			(bt->standards & V4L2_DV_BT_STD_CVT) ?  " CVT" : "",
-			(bt->standards & V4L2_DV_BT_STD_GTF) ?  " GTF" : "");
+			(bt->standards & V4L2_DV_BT_STD_GTF) ?  " GTF" : "",
+			(bt->standards & V4L2_DV_BT_STD_SDI) ?  " SDI" : "");
 }
 EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
 
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51a0fa1..c52d94c 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -924,6 +924,7 @@
 	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
 	bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
 	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
 
@@ -932,7 +933,7 @@
 
 	switch (type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		if (is_vid && is_rx &&
+		if ((is_vid || is_tch) && is_rx &&
 		    (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
 			return 0;
 		break;
@@ -1243,6 +1244,10 @@
 	case V4L2_SDR_FMT_CS8:		descr = "Complex S8"; break;
 	case V4L2_SDR_FMT_CS14LE:	descr = "Complex S14LE"; break;
 	case V4L2_SDR_FMT_RU12LE:	descr = "Real U12LE"; break;
+	case V4L2_TCH_FMT_DELTA_TD16:	descr = "16-bit signed deltas"; break;
+	case V4L2_TCH_FMT_DELTA_TD08:	descr = "8-bit signed deltas"; break;
+	case V4L2_TCH_FMT_TU16:		descr = "16-bit unsigned touch data"; break;
+	case V4L2_TCH_FMT_TU08:		descr = "8-bit unsigned touch data"; break;
 
 	default:
 		/* Compressed formats */
@@ -1309,13 +1314,14 @@
 	struct video_device *vfd = video_devdata(file);
 	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
 	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
 	int ret = -EINVAL;
 
 	switch (p->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap))
+		if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap))
 			break;
 		ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
 		break;
@@ -1362,6 +1368,7 @@
 	struct video_device *vfd = video_devdata(file);
 	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
 	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
 	int ret;
@@ -1392,7 +1399,7 @@
 
 	switch (p->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap))
+		if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap))
 			break;
 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
 		ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
@@ -1451,6 +1458,21 @@
 	return -EINVAL;
 }
 
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+	/*
+	 * The v4l2_pix_format structure contains fields that make no sense for
+	 * touch. Set them to default values in this case.
+	 */
+
+	p->field = V4L2_FIELD_NONE;
+	p->colorspace = V4L2_COLORSPACE_RAW;
+	p->flags = 0;
+	p->ycbcr_enc = 0;
+	p->quantization = 0;
+	p->xfer_func = 0;
+}
+
 static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
 				struct file *file, void *fh, void *arg)
 {
@@ -1458,6 +1480,7 @@
 	struct video_device *vfd = video_devdata(file);
 	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
 	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
 	int ret;
@@ -1469,17 +1492,19 @@
 
 	switch (p->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap))
+		if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap))
 			break;
 		CLEAR_AFTER_FIELD(p, fmt.pix);
 		ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
 		/* just in case the driver zeroed it again */
 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+		if (is_tch)
+			v4l_pix_format_touch(&p->fmt.pix);
 		return ret;
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 		if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
 			break;
-		CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
 		return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
 		if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
@@ -1507,7 +1532,7 @@
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
 			break;
-		CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
 		return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
 		if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
@@ -1545,6 +1570,7 @@
 	struct video_device *vfd = video_devdata(file);
 	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
 	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+	bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
 	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
 	bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
 	int ret;
@@ -1553,7 +1579,7 @@
 
 	switch (p->type) {
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-		if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap))
+		if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap))
 			break;
 		CLEAR_AFTER_FIELD(p, fmt.pix);
 		ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
@@ -1563,7 +1589,7 @@
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 		if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
 			break;
-		CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
 		return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
 		if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
@@ -1591,7 +1617,7 @@
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
 			break;
-		CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
 		return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
 		if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 61d56c9..6bc27e7 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -76,9 +76,6 @@
 		return &m2m_ctx->cap_q_ctx;
 }
 
-/**
- * v4l2_m2m_get_vq() - return vb2_queue for the given type
- */
 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 				       enum v4l2_buf_type type)
 {
@@ -92,9 +89,6 @@
 }
 EXPORT_SYMBOL(v4l2_m2m_get_vq);
 
-/**
- * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
- */
 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
 {
 	struct v4l2_m2m_buffer *b;
@@ -113,10 +107,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
 
-/**
- * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
- * return it
- */
 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
 {
 	struct v4l2_m2m_buffer *b;
@@ -140,10 +130,6 @@
  * Scheduling handlers
  */
 
-/**
- * v4l2_m2m_get_curr_priv() - return driver private data for the currently
- * running instance or NULL if no instance is running
- */
 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
 {
 	unsigned long flags;
@@ -188,26 +174,6 @@
 	m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
 }
 
-/**
- * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
- * the pending job queue and add it if so.
- * @m2m_ctx:	m2m context assigned to the instance to be checked
- *
- * There are three basic requirements an instance has to meet to be able to run:
- * 1) at least one source buffer has to be queued,
- * 2) at least one destination buffer has to be queued,
- * 3) streaming has to be on.
- *
- * If a queue is buffered (for example a decoder hardware ringbuffer that has
- * to be drained before doing streamoff), allow scheduling without v4l2 buffers
- * on that queue.
- *
- * There may also be additional, custom requirements. In such case the driver
- * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
- * return 1 if the instance is ready.
- * An example of the above could be an instance that requires more than one
- * src/dst buffer per transaction.
- */
 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 {
 	struct v4l2_m2m_dev *m2m_dev;
@@ -311,18 +277,6 @@
 	}
 }
 
-/**
- * v4l2_m2m_job_finish() - inform the framework that a job has been finished
- * and have it clean up
- *
- * Called by a driver to yield back the device after it has finished with it.
- * Should be called as soon as possible after reaching a state which allows
- * other instances to take control of the device.
- *
- * This function has to be called only after device_run() callback has been
- * called on the driver. To prevent recursion, it should not be called directly
- * from the device_run() callback though.
- */
 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 			 struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -350,9 +304,6 @@
 }
 EXPORT_SYMBOL(v4l2_m2m_job_finish);
 
-/**
- * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
- */
 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		     struct v4l2_requestbuffers *reqbufs)
 {
@@ -370,11 +321,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
 
-/**
- * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
- *
- * See v4l2_m2m_mmap() documentation for details.
- */
 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		      struct v4l2_buffer *buf)
 {
@@ -400,10 +346,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
 
-/**
- * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
- * the type
- */
 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		  struct v4l2_buffer *buf)
 {
@@ -419,10 +361,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
 
-/**
- * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
- * the type
- */
 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		   struct v4l2_buffer *buf)
 {
@@ -433,10 +371,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 
-/**
- * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
- * the type
- */
 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			 struct v4l2_buffer *buf)
 {
@@ -452,10 +386,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
 
-/**
- * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
- * on the type
- */
 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			 struct v4l2_create_buffers *create)
 {
@@ -466,10 +396,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
 
-/**
- * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
- * the type
- */
 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		  struct v4l2_exportbuffer *eb)
 {
@@ -479,9 +405,7 @@
 	return vb2_expbuf(vq, eb);
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
-/**
- * v4l2_m2m_streamon() - turn on streaming for a video queue
- */
+
 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		      enum v4l2_buf_type type)
 {
@@ -497,9 +421,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
 
-/**
- * v4l2_m2m_streamoff() - turn off streaming for a video queue
- */
 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		       enum v4l2_buf_type type)
 {
@@ -540,14 +461,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
 
-/**
- * v4l2_m2m_poll() - poll replacement, for destination buffers only
- *
- * Call from the driver's poll() function. Will poll both queues. If a buffer
- * is available to dequeue (with dqbuf) from the source queue, this will
- * indicate that a non-blocking write can be performed, while read will be
- * returned in case of the destination queue.
- */
 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			   struct poll_table_struct *wait)
 {
@@ -626,16 +539,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
 
-/**
- * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
- *
- * Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
- * thus applications) receive modified offsets.
- */
 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			 struct vm_area_struct *vma)
 {
@@ -653,11 +556,6 @@
 }
 EXPORT_SYMBOL(v4l2_m2m_mmap);
 
-/**
- * v4l2_m2m_init() - initialize per-driver m2m data
- *
- * Usually called from driver's probe() function.
- */
 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
 {
 	struct v4l2_m2m_dev *m2m_dev;
@@ -679,26 +577,12 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
 
-/**
- * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
- *
- * Usually called from driver's remove() function.
- */
 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
 {
 	kfree(m2m_dev);
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
 
-/**
- * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
- * @priv - driver's instance private data
- * @m2m_dev - a previously initialized m2m_dev struct
- * @vq_init - a callback for queue type-specific initialization function to be
- * used for initializing videobuf_queues
- *
- * Usually called from driver's open() function.
- */
 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 		void *drv_priv,
 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
@@ -744,11 +628,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
 
-/**
- * v4l2_m2m_ctx_release() - release m2m context
- *
- * Usually called from driver's release() function.
- */
 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
 {
 	/* wait until the current context is dequeued from job_queue */
@@ -761,11 +640,6 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
 
-/**
- * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
- *
- * Call from buf_queue(), videobuf_queue_ops callback.
- */
 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 		struct vb2_v4l2_buffer *vbuf)
 {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f06..1db0af6 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@
 {
 	unsigned long first, last;
 	int err, rw = 0;
+	unsigned int flags = FOLL_FORCE;
 
 	dma->direction = direction;
 	switch (dma->direction) {
@@ -178,12 +179,14 @@
 	if (NULL == dma->pages)
 		return -ENOMEM;
 
+	if (rw == READ)
+		flags |= FOLL_WRITE;
+
 	dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
 		data, size, dma->nr_pages);
 
 	err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
-			     rw == READ, 1, /* force */
-			     dma->pages, NULL);
+			     flags, dma->pages, NULL);
 
 	if (err != dma->nr_pages) {
 		dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index ca8ffeb..21900202 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -198,6 +198,7 @@
 		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 	void *mem_priv;
 	int plane;
+	int ret = -ENOMEM;
 
 	/*
 	 * Allocate memory for all planes in this buffer
@@ -209,8 +210,11 @@
 		mem_priv = call_ptr_memop(vb, alloc,
 				q->alloc_devs[plane] ? : q->dev,
 				q->dma_attrs, size, dma_dir, q->gfp_flags);
-		if (IS_ERR_OR_NULL(mem_priv))
+		if (IS_ERR(mem_priv)) {
+			if (mem_priv)
+				ret = PTR_ERR(mem_priv);
 			goto free;
+		}
 
 		/* Associate allocator private data with this plane */
 		vb->planes[plane].mem_priv = mem_priv;
@@ -224,7 +228,7 @@
 		vb->planes[plane - 1].mem_priv = NULL;
 	}
 
-	return -ENOMEM;
+	return ret;
 }
 
 /**
@@ -524,10 +528,6 @@
 	return 0;
 }
 
-/**
- * vb2_buffer_in_use() - return true if the buffer is in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
- */
 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
 {
 	unsigned int plane;
@@ -560,16 +560,6 @@
 	return false;
 }
 
-/**
- * vb2_core_querybuf() - query video buffer information
- * @q:		videobuf queue
- * @index:	id number of the buffer
- * @pb:		buffer struct passed from userspace
- *
- * Should be called from vidioc_querybuf ioctl handler in driver.
- * The passed buffer should have been verified.
- * This function fills the relevant information for the userspace.
- */
 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
 {
 	call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
@@ -616,10 +606,6 @@
 	return 0;
 }
 
-/**
- * vb2_verify_memory_type() - Check whether the memory type and buffer type
- * passed to a buffer operation are compatible with the queue.
- */
 int vb2_verify_memory_type(struct vb2_queue *q,
 		enum vb2_memory memory, unsigned int type)
 {
@@ -666,30 +652,6 @@
 }
 EXPORT_SYMBOL(vb2_verify_memory_type);
 
-/**
- * vb2_core_reqbufs() - Initiate streaming
- * @q:		videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
- *
- * Should be called from vidioc_reqbufs ioctl handler of a driver.
- * This function:
- * 1) verifies streaming parameters passed from the userspace,
- * 2) sets up the queue,
- * 3) negotiates number of buffers and planes per buffer with the driver
- *    to be used during streaming,
- * 4) allocates internal buffer structures (struct vb2_buffer), according to
- *    the agreed parameters,
- * 5) for MMAP memory type, allocates actual video memory, using the
- *    memory handling/allocation routines provided during queue initialization
- *
- * If req->count is 0, all the memory will be freed instead.
- * If the queue has been allocated previously (by a previous vb2_reqbufs) call
- * and the queue is not busy, memory will be reallocated.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_reqbufs handler in driver.
- */
 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
 		unsigned int *count)
 {
@@ -815,22 +777,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
 
-/**
- * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
- * @q:		videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
- * @parg: parameter passed to device driver
- *
- * Should be called from vidioc_create_bufs ioctl handler of a driver.
- * This function:
- * 1) verifies parameter sanity
- * 2) calls the .queue_setup() queue operation
- * 3) performs any necessary memory allocations
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_create_bufs handler in driver.
- */
 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
 		unsigned int *count, unsigned requested_planes,
 		const unsigned requested_sizes[])
@@ -920,14 +866,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
 
-/**
- * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
- * @vb:		vb2_buffer to which the plane in question belongs to
- * @plane_no:	plane number for which the address is to be returned
- *
- * This function returns a kernel virtual address of a given plane if
- * such a mapping exist, NULL otherwise.
- */
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
 {
 	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
@@ -938,17 +876,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
 
-/**
- * vb2_plane_cookie() - Return allocator specific cookie for the given plane
- * @vb:		vb2_buffer to which the plane in question belongs to
- * @plane_no:	plane number for which the cookie is to be returned
- *
- * This function returns an allocator specific cookie for a given plane if
- * available, NULL otherwise. The allocator should provide some simple static
- * inline function, which would convert this cookie to the allocator specific
- * type that can be used directly by the driver to access the buffer. This can
- * be for example physical address, pointer to scatter list or IOMMU mapping.
- */
 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
 {
 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
@@ -958,26 +885,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
 
-/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
- * @vb:		vb2_buffer returned from the driver
- * @state:	either VB2_BUF_STATE_DONE if the operation finished successfully,
- *		VB2_BUF_STATE_ERROR if the operation finished with an error or
- *		VB2_BUF_STATE_QUEUED if the driver wants to requeue buffers.
- *		If start_streaming fails then it should return buffers with state
- *		VB2_BUF_STATE_QUEUED to put them back into the queue.
- *
- * This function should be called by the driver after a hardware operation on
- * a buffer is finished and the buffer may be returned to userspace. The driver
- * cannot use this buffer anymore until it is queued back to it by videobuf
- * by the means of buf_queue callback. Only buffers previously queued to the
- * driver by buf_queue can be passed to this function.
- *
- * While streaming a buffer can only be returned in state DONE or ERROR.
- * The start_streaming op can also return them in case the DMA engine cannot
- * be started for some reason. In that case the buffers should be returned with
- * state QUEUED.
- */
 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
 {
 	struct vb2_queue *q = vb->vb2_queue;
@@ -1036,18 +943,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_buffer_done);
 
-/**
- * vb2_discard_done() - discard all buffers marked as DONE
- * @q:		videobuf2 queue
- *
- * This function is intended to be used with suspend/resume operations. It
- * discards all 'done' buffers as they would be too old to be requested after
- * resume.
- *
- * Drivers must stop the hardware and synchronize with interrupt handlers and/or
- * delayed works before calling this function to make sure no buffer will be
- * touched by the driver and/or hardware.
- */
 void vb2_discard_done(struct vb2_queue *q)
 {
 	struct vb2_buffer *vb;
@@ -1136,10 +1031,10 @@
 				q->alloc_devs[plane] ? : q->dev,
 				planes[plane].m.userptr,
 				planes[plane].length, dma_dir);
-		if (IS_ERR_OR_NULL(mem_priv)) {
+		if (IS_ERR(mem_priv)) {
 			dprintk(1, "failed acquiring userspace "
 						"memory for plane %d\n", plane);
-			ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
+			ret = PTR_ERR(mem_priv);
 			goto err;
 		}
 		vb->planes[plane].mem_priv = mem_priv;
@@ -1228,8 +1123,10 @@
 			planes[plane].length = dbuf->size;
 
 		if (planes[plane].length < vb->planes[plane].min_length) {
-			dprintk(1, "invalid dmabuf length for plane %d\n",
-				plane);
+			dprintk(1, "invalid dmabuf length %u for plane %d, "
+				"minimum length %u\n",
+				planes[plane].length, plane,
+				vb->planes[plane].min_length);
 			dma_buf_put(dbuf);
 			ret = -EINVAL;
 			goto err;
@@ -1271,9 +1168,10 @@
 		vb->planes[plane].mem_priv = mem_priv;
 	}
 
-	/* TODO: This pins the buffer(s) with  dma_buf_map_attachment()).. but
-	 * really we want to do this just before the DMA, not while queueing
-	 * the buffer(s)..
+	/*
+	 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+	 * here instead just before the DMA, while queueing the buffer(s) so
+	 * userspace knows sooner rather than later if the dma-buf map fails.
 	 */
 	for (plane = 0; plane < vb->num_planes; ++plane) {
 		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
@@ -1377,22 +1275,6 @@
 	return ret;
 }
 
-/**
- * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
- *			to the kernel
- * @q:		videobuf2 queue
- * @index:	id number of the buffer
- * @pb:		buffer structure passed from userspace to vidioc_prepare_buf
- *		handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function calls buf_prepare callback in the driver (if provided),
- * in which driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
 {
 	struct vb2_buffer *vb;
@@ -1481,24 +1363,6 @@
 	return ret;
 }
 
-/**
- * vb2_core_qbuf() - Queue a buffer from userspace
- * @q:		videobuf2 queue
- * @index:	id number of the buffer
- * @pb:		buffer structure passed from userspace to vidioc_qbuf handler
- *		in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function:
- * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
- *    which driver-specific buffer initialization can be performed,
- * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
- *    callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
 {
 	struct vb2_buffer *vb;
@@ -1679,15 +1543,6 @@
 	return ret;
 }
 
-/**
- * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
- * @q:		videobuf2 queue
- *
- * This function will wait until all buffers that have been given to the driver
- * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
- * wait_prepare, wait_finish pair. It is intended to be called with all locks
- * taken, for example from stop_streaming() callback.
- */
 int vb2_wait_for_all_buffers(struct vb2_queue *q)
 {
 	if (!q->streaming) {
@@ -1725,27 +1580,6 @@
 		}
 }
 
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q:		videobuf2 queue
- * @pb:		buffer structure passed from userspace to vidioc_dqbuf handler
- *		in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- *		 buffers ready for dequeuing are present. Normally the driver
- *		 would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function:
- * 1) calls buf_finish callback in the driver (if provided), in which
- *    driver can perform any additional operations that may be required before
- *    returning the buffer to userspace, such as cache sync,
- * 2) the buffer struct members are filled with relevant information for
- *    the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
 		   bool nonblocking)
 {
@@ -1909,19 +1743,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_core_streamon);
 
-/**
- * vb2_queue_error() - signal a fatal error on the queue
- * @q:		videobuf2 queue
- *
- * Flag that a fatal unrecoverable error has occurred and wake up all processes
- * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
- * buffers will return -EIO.
- *
- * The error flag will be cleared when cancelling the queue, either from
- * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
- * function before starting the stream, otherwise the error flag will remain set
- * until the queue is released when closing the device node.
- */
 void vb2_queue_error(struct vb2_queue *q)
 {
 	q->error = 1;
@@ -1984,19 +1805,6 @@
 	return -EINVAL;
 }
 
-/**
- * vb2_core_expbuf() - Export a buffer as a file descriptor
- * @q:		videobuf2 queue
- * @fd:		file descriptor associated with DMABUF (set by driver) *
- * @type:	buffer type
- * @index:	id number of the buffer
- * @plane:	index of the plane to be exported, 0 for single plane queues
- * @flags:	flags for newly created file, currently only O_CLOEXEC is
- *		supported, refer to manual of open syscall for more details
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
- */
 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
 		unsigned int index, unsigned int plane, unsigned int flags)
 {
@@ -2068,25 +1876,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_core_expbuf);
 
-/**
- * vb2_mmap() - map video buffers into application address space
- * @q:		videobuf2 queue
- * @vma:	vma passed to the mmap file operation handler in the driver
- *
- * Should be called from mmap file operation handler of a driver.
- * This function maps one plane of one of the available video buffers to
- * userspace. To map whole video memory allocated on reqbufs, this function
- * has to be called once per each plane per each buffer previously allocated.
- *
- * When the userspace application calls mmap, it passes to it an offset returned
- * to it earlier by the means of vidioc_querybuf handler. That offset acts as
- * a "cookie", which is then used to identify the plane to be mapped.
- * This function finds a plane with a matching offset and a mapping is performed
- * by the means of a provided memory operation.
- *
- * The return values from this function are intended to be directly returned
- * from the mmap handler in driver.
- */
 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
 {
 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
@@ -2188,17 +1977,6 @@
 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
 #endif
 
-/**
- * vb2_core_queue_init() - initialize a videobuf2 queue
- * @q:		videobuf2 queue; this structure should be allocated in driver
- *
- * The vb2_queue structure should be allocated by the driver. The driver is
- * responsible of clearing it's content and setting initial values for some
- * required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
- */
 int vb2_core_queue_init(struct vb2_queue *q)
 {
 	/*
@@ -2228,14 +2006,6 @@
 
 static int __vb2_init_fileio(struct vb2_queue *q, int read);
 static int __vb2_cleanup_fileio(struct vb2_queue *q);
-/**
- * vb2_core_queue_release() - stop streaming, release the queue and free memory
- * @q:		videobuf2 queue
- *
- * This function stops streaming and performs necessary clean ups, including
- * freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
- */
 void vb2_core_queue_release(struct vb2_queue *q)
 {
 	__vb2_cleanup_fileio(q);
@@ -2246,22 +2016,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_core_queue_release);
 
-/**
- * vb2_core_poll() - implements poll userspace operation
- * @q:		videobuf2 queue
- * @file:	file argument passed to the poll file operation handler
- * @wait:	wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
 unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
 		poll_table *wait)
 {
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 59fa204..fb6a177 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -141,6 +141,9 @@
 {
 	struct vb2_dc_buf *buf;
 
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
 		return ERR_PTR(-ENOMEM);
@@ -493,6 +496,9 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
 		return ERR_PTR(-ENOMEM);
@@ -673,6 +679,9 @@
 	if (dbuf->size < size)
 		return ERR_PTR(-EFAULT);
 
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 	if (!buf)
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index bd82d70..ecff8f4 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -104,11 +104,12 @@
 	int ret;
 	int num_pages;
 
-	if (WARN_ON(dev == NULL))
-		return NULL;
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	buf->vaddr = NULL;
 	buf->dma_dir = dma_dir;
@@ -166,7 +167,7 @@
 	kfree(buf->pages);
 fail_pages_array_alloc:
 	kfree(buf);
-	return NULL;
+	return ERR_PTR(-ENOMEM);
 }
 
 static void vb2_dma_sg_put(void *buf_priv)
@@ -224,9 +225,12 @@
 	struct sg_table *sgt;
 	struct frame_vector *vec;
 
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
 	if (!buf)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	buf->vaddr = NULL;
 	buf->dev = dev;
@@ -266,7 +270,7 @@
 	vb2_destroy_framevec(vec);
 userptr_fail_pfnvec:
 	kfree(buf);
-	return NULL;
+	return ERR_PTR(-ENOMEM);
 }
 
 /*
@@ -606,6 +610,9 @@
 	struct vb2_dma_sg_buf *buf;
 	struct dma_buf_attachment *dba;
 
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
 	if (dbuf->size < size)
 		return ERR_PTR(-EFAULT);
 
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517..1cd322e 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@
 	unsigned long first, last;
 	unsigned long nr;
 	struct frame_vector *vec;
+	unsigned int flags = FOLL_FORCE;
+
+	if (write)
+		flags |= FOLL_WRITE;
 
 	first = start >> PAGE_SHIFT;
 	last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@
 	vec = frame_vector_create(nr);
 	if (!vec)
 		return ERR_PTR(-ENOMEM);
-	ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+	ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
 	if (ret < 0)
 		goto out_destroy;
 	/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 9cfbb6e..52ef883 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -483,13 +483,6 @@
 }
 EXPORT_SYMBOL(vb2_querybuf);
 
-/**
- * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
- * the memory and type values.
- * @q:		videobuf2 queue
- * @req:	struct passed from userspace to vidioc_reqbufs handler
- *		in driver
- */
 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
 {
 	int ret = vb2_verify_memory_type(q, req->memory, req->type);
@@ -498,21 +491,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_reqbufs);
 
-/**
- * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
- * @q:		videobuf2 queue
- * @b:		buffer structure passed from userspace to vidioc_prepare_buf
- *		handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- *    driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
 {
 	int ret;
@@ -528,13 +506,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
 
-/**
- * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
- * the memory and type values.
- * @q:		videobuf2 queue
- * @create:	creation parameters, passed from userspace to vidioc_create_bufs
- *		handler in driver
- */
 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
 {
 	unsigned requested_planes = 1;
@@ -586,23 +557,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_create_bufs);
 
-/**
- * vb2_qbuf() - Queue a buffer from userspace
- * @q:		videobuf2 queue
- * @b:		buffer structure passed from userspace to vidioc_qbuf handler
- *		in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- *    which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- *    callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
 {
 	int ret;
@@ -617,27 +571,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_qbuf);
 
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q:		videobuf2 queue
- * @b:		buffer structure passed from userspace to vidioc_dqbuf handler
- *		in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- *		 buffers ready for dequeuing are present. Normally the driver
- *		 would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- *    driver can perform any additional operations that may be required before
- *    returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- *    the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
 {
 	int ret;
@@ -664,19 +597,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_dqbuf);
 
-/**
- * vb2_streamon - start streaming
- * @q:		videobuf2 queue
- * @type:	type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
 {
 	if (vb2_fileio_is_active(q)) {
@@ -687,21 +607,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_streamon);
 
-/**
- * vb2_streamoff - stop streaming
- * @q:		videobuf2 queue
- * @type:	type argument passed from userspace to vidioc_streamoff handler
- *
- * Should be called from vidioc_streamoff handler of a driver.
- * This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- *    passed to the driver (after waiting for the driver to finish).
- *
- * This call can be used for pausing playback.
- * The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
- */
 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
 {
 	if (vb2_fileio_is_active(q)) {
@@ -712,15 +617,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_streamoff);
 
-/**
- * vb2_expbuf() - Export a buffer as a file descriptor
- * @q:		videobuf2 queue
- * @eb:		export buffer structure passed from userspace to vidioc_expbuf
- *		handler in driver
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
- */
 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
 {
 	return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
@@ -728,17 +624,6 @@
 }
 EXPORT_SYMBOL_GPL(vb2_expbuf);
 
-/**
- * vb2_queue_init() - initialize a videobuf2 queue
- * @q:		videobuf2 queue; this structure should be allocated in driver
- *
- * The vb2_queue structure should be allocated by the driver. The driver is
- * responsible of clearing it's content and setting initial values for some
- * required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
- */
 int vb2_queue_init(struct vb2_queue *q)
 {
 	/*
@@ -779,39 +664,12 @@
 }
 EXPORT_SYMBOL_GPL(vb2_queue_init);
 
-/**
- * vb2_queue_release() - stop streaming, release the queue and free memory
- * @q:		videobuf2 queue
- *
- * This function stops streaming and performs necessary clean ups, including
- * freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
- */
 void vb2_queue_release(struct vb2_queue *q)
 {
 	vb2_core_queue_release(q);
 }
 EXPORT_SYMBOL_GPL(vb2_queue_release);
 
-/**
- * vb2_poll() - implements poll userspace operation
- * @q:		videobuf2 queue
- * @file:	file argument passed to the poll file operation handler
- * @wait:	wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
- * pending events.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
 unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
 {
 	struct video_device *vfd = video_devdata(file);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index c2820a6..ab3227b 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -41,7 +41,7 @@
 
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
 	if (!buf)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	buf->size = size;
 	buf->vaddr = vmalloc_user(buf->size);
@@ -53,7 +53,7 @@
 	if (!buf->vaddr) {
 		pr_debug("vmalloc of size %ld failed\n", buf->size);
 		kfree(buf);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	atomic_inc(&buf->refcount);
@@ -77,17 +77,20 @@
 	struct vb2_vmalloc_buf *buf;
 	struct frame_vector *vec;
 	int n_pages, offset, i;
+	int ret = -ENOMEM;
 
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 	if (!buf)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	buf->dma_dir = dma_dir;
 	offset = vaddr & ~PAGE_MASK;
 	buf->size = size;
 	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
-	if (IS_ERR(vec))
+	if (IS_ERR(vec)) {
+		ret = PTR_ERR(vec);
 		goto fail_pfnvec_create;
+	}
 	buf->vec = vec;
 	n_pages = frame_vector_count(vec);
 	if (frame_vector_to_pages(vec) < 0) {
@@ -117,7 +120,7 @@
 fail_pfnvec_create:
 	kfree(buf);
 
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 static void vb2_vmalloc_put_userptr(void *buf_priv)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 9c677f3..520f584 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -144,7 +144,7 @@
 	if (ret) {
 		ret->i_ino = get_next_ino();
 		ret->i_mode = mode;
-		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+		ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
 	}
 	return ret;
 }
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1..f806a44 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@
 		pinned_pages->nr_pages = get_user_pages(
 				(u64)addr,
 				nr_pages,
-				!!(prot & SCIF_PROT_WRITE),
-				0,
+				(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
 				pinned_pages->pages,
 				NULL);
 		up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9..6fb773d 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@
 #else
 	*pageshift = PAGE_SHIFT;
 #endif
-	if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+	if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
 		return -EFAULT;
 	*paddr = page_to_phys(page);
 	put_page(page);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e3936b8..d46e4ad 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -39,7 +39,6 @@
 #include <linux/gfp.h>
 #include <linux/slab.h>
 #include <linux/reboot.h>
-#include <linux/kconfig.h>
 #include <linux/leds.h>
 
 #include <linux/mtd/mtd.h>
@@ -376,6 +375,110 @@
 }
 
 /**
+ * mtd_wunit_to_pairing_info - get pairing information of a wunit
+ * @mtd: pointer to new MTD device info structure
+ * @wunit: write unit we are interested in
+ * @info: returned pairing information
+ *
+ * Retrieve pairing information associated to the wunit.
+ * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
+ * paired together, and where programming a page may influence the page it is
+ * paired with.
+ * The notion of page is replaced by the term wunit (write-unit) to stay
+ * consistent with the ->writesize field.
+ *
+ * The @wunit argument can be extracted from an absolute offset using
+ * mtd_offset_to_wunit(). @info is filled with the pairing information attached
+ * to @wunit.
+ *
+ * From the pairing info the MTD user can find all the wunits paired with
+ * @wunit using the following loop:
+ *
+ * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
+ *	info.pair = i;
+ *	mtd_pairing_info_to_wunit(mtd, &info);
+ *	...
+ * }
+ */
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+			      struct mtd_pairing_info *info)
+{
+	int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+
+	if (wunit < 0 || wunit >= npairs)
+		return -EINVAL;
+
+	if (mtd->pairing && mtd->pairing->get_info)
+		return mtd->pairing->get_info(mtd, wunit, info);
+
+	info->group = 0;
+	info->pair = wunit;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
+
+/**
+ * mtd_wunit_to_pairing_info - get wunit from pairing information
+ * @mtd: pointer to new MTD device info structure
+ * @info: pairing information struct
+ *
+ * Returns a positive number representing the wunit associated to the info
+ * struct, or a negative error code.
+ *
+ * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
+ * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
+ * doc).
+ *
+ * It can also be used to only program the first page of each pair (i.e.
+ * page attached to group 0), which allows one to use an MLC NAND in
+ * software-emulated SLC mode:
+ *
+ * info.group = 0;
+ * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ * for (info.pair = 0; info.pair < npairs; info.pair++) {
+ *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
+ *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
+ *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
+ * }
+ */
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+			      const struct mtd_pairing_info *info)
+{
+	int ngroups = mtd_pairing_groups(mtd);
+	int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+
+	if (!info || info->pair < 0 || info->pair >= npairs ||
+	    info->group < 0 || info->group >= ngroups)
+		return -EINVAL;
+
+	if (mtd->pairing && mtd->pairing->get_wunit)
+		return mtd->pairing->get_wunit(mtd, info);
+
+	return info->pair;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
+
+/**
+ * mtd_pairing_groups - get the number of pairing groups
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Returns the number of pairing groups.
+ *
+ * This number is usually equal to the number of bits exposed by a single
+ * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
+ * to iterate over all pages of a given pair.
+ */
+int mtd_pairing_groups(struct mtd_info *mtd)
+{
+	if (!mtd->pairing || !mtd->pairing->ngroups)
+		return 1;
+
+	return mtd->pairing->ngroups;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_groups);
+
+/**
  *	add_mtd_device - register an MTD device
  *	@mtd: pointer to new MTD device info structure
  *
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1f13e32..fccdd49 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -30,7 +30,6 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/err.h>
-#include <linux/kconfig.h>
 
 #include "mtdcore.h"
 
@@ -317,6 +316,18 @@
 	return res;
 }
 
+static int part_get_device(struct mtd_info *mtd)
+{
+	struct mtd_part *part = mtd_to_part(mtd);
+	return part->master->_get_device(part->master);
+}
+
+static void part_put_device(struct mtd_info *mtd)
+{
+	struct mtd_part *part = mtd_to_part(mtd);
+	part->master->_put_device(part->master);
+}
+
 static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
 			      struct mtd_oob_region *oobregion)
 {
@@ -397,6 +408,7 @@
 	slave->mtd.oobsize = master->oobsize;
 	slave->mtd.oobavail = master->oobavail;
 	slave->mtd.subpage_sft = master->subpage_sft;
+	slave->mtd.pairing = master->pairing;
 
 	slave->mtd.name = name;
 	slave->mtd.owner = master->owner;
@@ -463,6 +475,12 @@
 		slave->mtd._block_isbad = part_block_isbad;
 	if (master->_block_markbad)
 		slave->mtd._block_markbad = part_block_markbad;
+
+	if (master->_get_device)
+		slave->mtd._get_device = part_get_device;
+	if (master->_put_device)
+		slave->mtd._put_device = part_put_device;
+
 	slave->mtd._erase = part_erase;
 	slave->master = master;
 	slave->offset = part->offset;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 21ff580..7b7a887 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -88,11 +88,11 @@
 	  Support for NAND flash on Amstrad E3 (Delta).
 
 config MTD_NAND_OMAP2
-	tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
-	depends on ARCH_OMAP2PLUS
+	tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
+	depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
 	help
-          Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
-	  platforms.
+          Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+	  and Keystone platforms.
 
 config MTD_NAND_OMAP_BCH
 	depends on MTD_NAND_OMAP2
@@ -428,7 +428,7 @@
 
 config MTD_NAND_FSL_ELBC
 	tristate "NAND support for Freescale eLBC controllers"
-	depends on PPC
+	depends on FSL_SOC
 	select FSL_LBC
 	help
 	  Various Freescale chips, including the 8313, include a NAND Flash
@@ -438,7 +438,7 @@
 
 config MTD_NAND_FSL_IFC
 	tristate "NAND support for Freescale IFC controller"
-	depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
+	depends on FSL_SOC || ARCH_LAYERSCAPE
 	select FSL_IFC
 	select MEMORY
 	help
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 37da423..3962f55 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -761,8 +761,7 @@
 
 	platform_set_drvdata(pdev, info);
 
-	spin_lock_init(&info->controller.lock);
-	init_waitqueue_head(&info->controller.wq);
+	nand_hw_control_init(&info->controller);
 
 	info->device     = &pdev->dev;
 	info->platform   = plat;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 8eb2c64..9d2424b 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1336,7 +1336,7 @@
 		u32 *flash_cache = (u32 *)ctrl->flash_cache;
 		int i;
 
-		brcmnand_soc_data_bus_prepare(ctrl->soc);
+		brcmnand_soc_data_bus_prepare(ctrl->soc, true);
 
 		/*
 		 * Must cache the FLASH_CACHE now, since changes in
@@ -1349,7 +1349,7 @@
 			 */
 			flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
 
-		brcmnand_soc_data_bus_unprepare(ctrl->soc);
+		brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
 
 		/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
 		if (host->hwcfg.sector_size_1k)
@@ -1565,12 +1565,12 @@
 		brcmnand_waitfunc(mtd, chip);
 
 		if (likely(buf)) {
-			brcmnand_soc_data_bus_prepare(ctrl->soc);
+			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
 
 			for (j = 0; j < FC_WORDS; j++, buf++)
 				*buf = brcmnand_read_fc(ctrl, j);
 
-			brcmnand_soc_data_bus_unprepare(ctrl->soc);
+			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
 		}
 
 		if (oob)
@@ -1815,12 +1815,12 @@
 		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
 
 		if (buf) {
-			brcmnand_soc_data_bus_prepare(ctrl->soc);
+			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
 
 			for (j = 0; j < FC_WORDS; j++, buf++)
 				brcmnand_write_fc(ctrl, j, *buf);
 
-			brcmnand_soc_data_bus_unprepare(ctrl->soc);
+			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
 		} else if (oob) {
 			for (j = 0; j < FC_WORDS; j++)
 				brcmnand_write_fc(ctrl, j, 0xffffffff);
@@ -2370,8 +2370,7 @@
 
 	init_completion(&ctrl->done);
 	init_completion(&ctrl->dma_done);
-	spin_lock_init(&ctrl->controller.lock);
-	init_waitqueue_head(&ctrl->controller.wq);
+	nand_hw_control_init(&ctrl->controller);
 	INIT_LIST_HEAD(&ctrl->host_list);
 
 	/* NAND register range */
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
index ef5eabb..5c44cd4 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/brcmnand/brcmnand.h
@@ -23,19 +23,22 @@
 struct brcmnand_soc {
 	bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
 	void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
-	void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare);
+	void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+				 bool is_param);
 };
 
-static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc)
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+						 bool is_param)
 {
 	if (soc && soc->prepare_data_bus)
-		soc->prepare_data_bus(soc, true);
+		soc->prepare_data_bus(soc, true, is_param);
 }
 
-static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc)
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+						   bool is_param)
 {
 	if (soc && soc->prepare_data_bus)
-		soc->prepare_data_bus(soc, false);
+		soc->prepare_data_bus(soc, false, is_param);
 }
 
 static inline u32 brcmnand_readl(void __iomem *addr)
diff --git a/drivers/mtd/nand/brcmnand/iproc_nand.c b/drivers/mtd/nand/brcmnand/iproc_nand.c
index 585596c..4c6ae11 100644
--- a/drivers/mtd/nand/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/brcmnand/iproc_nand.c
@@ -74,7 +74,8 @@
 	spin_unlock_irqrestore(&priv->idm_lock, flags);
 }
 
-static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare)
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
+				  bool is_param)
 {
 	struct iproc_nand_soc *priv =
 			container_of(soc, struct iproc_nand_soc, soc);
@@ -86,10 +87,19 @@
 
 	val = brcmnand_readl(mmio);
 
-	if (prepare)
-		val |= IPROC_NAND_APB_LE_MODE;
-	else
+	/*
+	 * In the case of BE or when dealing with NAND data, alway configure
+	 * the APB bus to LE mode before accessing the FIFO and back to BE mode
+	 * after the access is done
+	 */
+	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
+		if (prepare)
+			val |= IPROC_NAND_APB_LE_MODE;
+		else
+			val &= ~IPROC_NAND_APB_LE_MODE;
+	} else { /* when in LE accessing the parameter page, keep APB in BE */
 		val &= ~IPROC_NAND_APB_LE_MODE;
+	}
 
 	brcmnand_writel(val, mmio);
 
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 4731699..7af2a3c 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1249,8 +1249,7 @@
 	nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
 	nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
 	nand->controller = &nand->hwcontrol;
-	spin_lock_init(&nand->controller->lock);
-	init_waitqueue_head(&nand->controller->wq);
+	nand_hw_control_init(nand->controller);
 
 	/* methods */
 	nand->cmdfunc = docg4_command;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 60a88f2..113f76e 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -879,8 +879,7 @@
 		}
 		elbc_fcm_ctrl->counter++;
 
-		spin_lock_init(&elbc_fcm_ctrl->controller.lock);
-		init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
+		nand_hw_control_init(&elbc_fcm_ctrl->controller);
 		fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
 	} else {
 		elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4e9e5fd..0a177b1 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -987,8 +987,7 @@
 		ifc_nand_ctrl->addr = NULL;
 		fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
 
-		spin_lock_init(&ifc_nand_ctrl->controller.lock);
-		init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+		nand_hw_control_init(&ifc_nand_ctrl->controller);
 	} else {
 		ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
 	}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 6e46156..6c062b8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -318,7 +318,8 @@
 		return -EINVAL;
 	}
 
-	geo->page_size = mtd->writesize + mtd->oobsize;
+	geo->page_size = mtd->writesize + geo->metadata_size +
+		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 	geo->payload_size = mtd->writesize;
 
 	/*
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index 175f67d..a39bb70 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -368,9 +368,8 @@
 	nfc->dev = dev;
 	nfc->num_banks = num_banks;
 
-	spin_lock_init(&nfc->controller.lock);
+	nand_hw_control_init(&nfc->controller);
 	INIT_LIST_HEAD(&nfc->chips);
-	init_waitqueue_head(&nfc->controller.wq);
 
 	ret = jz4780_nand_init_chips(nfc, pdev);
 	if (ret) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 57cbe2b..d7f724b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -152,6 +152,9 @@
 	void (*select_chip)(struct mtd_info *mtd, int chip);
 	int (*correct_data)(struct mtd_info *mtd, u_char *dat,
 			u_char *read_ecc, u_char *calc_ecc);
+	int (*setup_data_interface)(struct mtd_info *mtd,
+				    const struct nand_data_interface *conf,
+				    bool check_only);
 
 	/*
 	 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
@@ -1012,6 +1015,82 @@
 	writew(0x4, NFC_V1_V2_WRPROT);
 }
 
+static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
+					const struct nand_data_interface *conf,
+					bool check_only)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int tRC_min_ns, tRC_ps, ret;
+	unsigned long rate, rate_round;
+	const struct nand_sdr_timings *timings;
+	u16 config1;
+
+	timings = nand_get_sdr_timings(conf);
+	if (IS_ERR(timings))
+		return -ENOTSUPP;
+
+	config1 = readw(NFC_V1_V2_CONFIG1);
+
+	tRC_min_ns = timings->tRC_min / 1000;
+	rate = 1000000000 / tRC_min_ns;
+
+	/*
+	 * For tRC < 30ns we have to use EDO mode. In this case the controller
+	 * does one access per clock cycle. Otherwise the controller does one
+	 * access in two clock cycles, thus we have to double the rate to the
+	 * controller.
+	 */
+	if (tRC_min_ns < 30) {
+		rate_round = clk_round_rate(host->clk, rate);
+		config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
+		tRC_ps = 1000000000 / (rate_round / 1000);
+	} else {
+		rate *= 2;
+		rate_round = clk_round_rate(host->clk, rate);
+		config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
+		tRC_ps = 1000000000 / (rate_round / 1000 / 2);
+	}
+
+	/*
+	 * The timing values compared against are from the i.MX25 Automotive
+	 * datasheet, Table 50. NFC Timing Parameters
+	 */
+	if (timings->tCLS_min > tRC_ps - 1000 ||
+	    timings->tCLH_min > tRC_ps - 2000 ||
+	    timings->tCS_min > tRC_ps - 1000 ||
+	    timings->tCH_min > tRC_ps - 2000 ||
+	    timings->tWP_min > tRC_ps - 1500 ||
+	    timings->tALS_min > tRC_ps ||
+	    timings->tALH_min > tRC_ps - 3000 ||
+	    timings->tDS_min > tRC_ps ||
+	    timings->tDH_min > tRC_ps - 5000 ||
+	    timings->tWC_min > 2 * tRC_ps ||
+	    timings->tWH_min > tRC_ps - 2500 ||
+	    timings->tRR_min > 6 * tRC_ps ||
+	    timings->tRP_min > 3 * tRC_ps / 2 ||
+	    timings->tRC_min > 2 * tRC_ps ||
+	    timings->tREH_min > (tRC_ps / 2) - 2500) {
+		dev_dbg(host->dev, "Timing out of bounds\n");
+		return -EINVAL;
+	}
+
+	if (check_only)
+		return 0;
+
+	ret = clk_set_rate(host->clk, rate);
+	if (ret)
+		return ret;
+
+	writew(config1, NFC_V1_V2_CONFIG1);
+
+	dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
+		config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
+		"normal");
+
+	return 0;
+}
+
 static void preset_v2(struct mtd_info *mtd)
 {
 	struct nand_chip *nand_chip = mtd_to_nand(mtd);
@@ -1239,6 +1318,57 @@
 	}
 }
 
+static int mxc_nand_onfi_set_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int addr,
+				      u8 *subfeature_param)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	host->buf_start = 0;
+
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		chip->write_byte(mtd, subfeature_param[i]);
+
+	memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+	host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
+	mxc_do_addr_cycle(mtd, addr, -1);
+	host->devtype_data->send_page(mtd, NFC_INPUT);
+
+	return 0;
+}
+
+static int mxc_nand_onfi_get_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int addr,
+				      u8 *subfeature_param)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
+	mxc_do_addr_cycle(mtd, addr, -1);
+	host->devtype_data->send_page(mtd, NFC_OUTPUT);
+	memcpy32_fromio(host->data_buf, host->main_area0, 512);
+	host->buf_start = 0;
+
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		*subfeature_param++ = chip->read_byte(mtd);
+
+	return 0;
+}
+
 /*
  * The generic flash bbt decriptors overlap with our ecc
  * hardware, so define some i.MX specific ones.
@@ -1327,6 +1457,7 @@
 	.ooblayout = &mxc_v2_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v2,
 	.correct_data = mxc_nand_correct_data_v2_v3,
+	.setup_data_interface = mxc_nand_v2_setup_data_interface,
 	.irqpending_quirk = 0,
 	.needs_ip = 0,
 	.regs_offset = 0x1e00,
@@ -1434,7 +1565,7 @@
 };
 MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
 
-#ifdef CONFIG_OF_MTD
+#ifdef CONFIG_OF
 static const struct of_device_id mxcnd_dt_ids[] = {
 	{
 		.compatible = "fsl,imx21-nand",
@@ -1513,6 +1644,8 @@
 	this->read_word = mxc_nand_read_word;
 	this->write_buf = mxc_nand_write_buf;
 	this->read_buf = mxc_nand_read_buf;
+	this->onfi_set_features = mxc_nand_onfi_set_features;
+	this->onfi_get_features = mxc_nand_onfi_get_features;
 
 	host->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(host->clk))
@@ -1533,6 +1666,8 @@
 	if (err < 0)
 		return err;
 
+	this->setup_data_interface = host->devtype_data->setup_data_interface;
+
 	if (host->devtype_data->needs_ip) {
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 		host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 77533f7..e5718e5 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -745,7 +745,10 @@
 				column >>= 1;
 			chip->cmd_ctrl(mtd, column, ctrl);
 			ctrl &= ~NAND_CTRL_CHANGE;
-			chip->cmd_ctrl(mtd, column >> 8, ctrl);
+
+			/* Only output a single addr cycle for 8bits opcodes. */
+			if (!nand_opcode_8bits(command))
+				chip->cmd_ctrl(mtd, column >> 8, ctrl);
 		}
 		if (page_addr != -1) {
 			chip->cmd_ctrl(mtd, page_addr, ctrl);
@@ -948,6 +951,172 @@
 }
 
 /**
+ * nand_reset_data_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	const struct nand_data_interface *conf;
+	int ret;
+
+	if (!chip->setup_data_interface)
+		return 0;
+
+	/*
+	 * The ONFI specification says:
+	 * "
+	 * To transition from NV-DDR or NV-DDR2 to the SDR data
+	 * interface, the host shall use the Reset (FFh) command
+	 * using SDR timing mode 0. A device in any timing mode is
+	 * required to recognize Reset (FFh) command issued in SDR
+	 * timing mode 0.
+	 * "
+	 *
+	 * Configure the data interface in SDR mode and set the
+	 * timings to timing mode 0.
+	 */
+
+	conf = nand_get_default_data_interface();
+	ret = chip->setup_data_interface(mtd, conf, false);
+	if (ret)
+		pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+	return ret;
+}
+
+/**
+ * nand_setup_data_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find and configure the best data interface and NAND timings supported by
+ * the chip and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	if (!chip->setup_data_interface || !chip->data_interface)
+		return 0;
+
+	/*
+	 * Ensure the timing mode has been changed on the chip side
+	 * before changing timings on the controller side.
+	 */
+	if (chip->onfi_version) {
+		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
+			chip->onfi_timing_mode_default,
+		};
+
+		ret = chip->onfi_set_features(mtd, chip,
+				ONFI_FEATURE_ADDR_TIMING_MODE,
+				tmode_param);
+		if (ret)
+			goto err;
+	}
+
+	ret = chip->setup_data_interface(mtd, chip->data_interface, false);
+err:
+	return ret;
+}
+
+/**
+ * nand_init_data_interface - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table. After this
+ * function nand_chip->data_interface is initialized with the best timing mode
+ * available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_init_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int modes, mode, ret;
+
+	if (!chip->setup_data_interface)
+		return 0;
+
+	/*
+	 * First try to identify the best timings from ONFI parameters and
+	 * if the NAND does not support ONFI, fallback to the default ONFI
+	 * timing mode.
+	 */
+	modes = onfi_get_async_timing_mode(chip);
+	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
+		if (!chip->onfi_timing_mode_default)
+			return 0;
+
+		modes = GENMASK(chip->onfi_timing_mode_default, 0);
+	}
+
+	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
+				       GFP_KERNEL);
+	if (!chip->data_interface)
+		return -ENOMEM;
+
+	for (mode = fls(modes) - 1; mode >= 0; mode--) {
+		ret = onfi_init_data_interface(chip, chip->data_interface,
+					       NAND_SDR_IFACE, mode);
+		if (ret)
+			continue;
+
+		ret = chip->setup_data_interface(mtd, chip->data_interface,
+						 true);
+		if (!ret) {
+			chip->onfi_timing_mode_default = mode;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void nand_release_data_interface(struct nand_chip *chip)
+{
+	kfree(chip->data_interface);
+}
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ *
+ * Returns 0 for success or negative error code otherwise
+ */
+int nand_reset(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	ret = nand_reset_data_interface(chip);
+	if (ret)
+		return ret;
+
+	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+	ret = nand_setup_data_interface(chip);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
  * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
  * @mtd: mtd info
  * @ofs: offset to start unlock from
@@ -1025,7 +1194,7 @@
 	 * some operation can also clear the bit 7 of status register
 	 * eg. erase/program a locked block
 	 */
-	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+	nand_reset(chip);
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
@@ -1084,7 +1253,7 @@
 	 * some operation can also clear the bit 7 of status register
 	 * eg. erase/program a locked block
 	 */
-	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+	nand_reset(chip);
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
@@ -2162,7 +2331,7 @@
 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
 			 struct mtd_oob_ops *ops)
 {
-	int ret = -ENOTSUPP;
+	int ret;
 
 	ops->retlen = 0;
 
@@ -2173,24 +2342,18 @@
 		return -EINVAL;
 	}
 
+	if (ops->mode != MTD_OPS_PLACE_OOB &&
+	    ops->mode != MTD_OPS_AUTO_OOB &&
+	    ops->mode != MTD_OPS_RAW)
+		return -ENOTSUPP;
+
 	nand_get_device(mtd, FL_READING);
 
-	switch (ops->mode) {
-	case MTD_OPS_PLACE_OOB:
-	case MTD_OPS_AUTO_OOB:
-	case MTD_OPS_RAW:
-		break;
-
-	default:
-		goto out;
-	}
-
 	if (!ops->datbuf)
 		ret = nand_do_read_oob(mtd, from, ops);
 	else
 		ret = nand_do_read_ops(mtd, from, ops);
 
-out:
 	nand_release_device(mtd);
 	return ret;
 }
@@ -2788,7 +2951,7 @@
 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
 	 * it in the doc2000 driver in August 1999.  dwmw2.
 	 */
-	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+	nand_reset(chip);
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
@@ -3191,8 +3354,7 @@
 
 	if (!chip->controller) {
 		chip->controller = &chip->hwcontrol;
-		spin_lock_init(&chip->controller->lock);
-		init_waitqueue_head(&chip->controller->wq);
+		nand_hw_control_init(chip->controller);
 	}
 
 }
@@ -3829,7 +3991,7 @@
 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
 	 * after power-up.
 	 */
-	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+	nand_reset(chip);
 
 	/* Send the command for reading device ID */
 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4113,6 +4275,9 @@
 	if (ecc_step > 0)
 		chip->ecc.size = ecc_step;
 
+	if (of_property_read_bool(dn, "nand-ecc-maximize"))
+		chip->ecc.options |= NAND_ECC_MAXIMIZE;
+
 	return 0;
 }
 
@@ -4141,6 +4306,15 @@
 	if (!mtd->name && mtd->dev.parent)
 		mtd->name = dev_name(mtd->dev.parent);
 
+	if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+		/*
+		 * Default functions assigned for chip_select() and
+		 * cmdfunc() both expect cmd_ctrl() to be populated,
+		 * so we need to check that that's the case
+		 */
+		pr_err("chip.cmd_ctrl() callback is not provided");
+		return -EINVAL;
+	}
 	/* Set the default functions */
 	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
 
@@ -4155,13 +4329,17 @@
 		return PTR_ERR(type);
 	}
 
+	ret = nand_init_data_interface(chip);
+	if (ret)
+		return ret;
+
 	chip->select_chip(mtd, -1);
 
 	/* Check for a chip array */
 	for (i = 1; i < maxchips; i++) {
 		chip->select_chip(mtd, i);
 		/* See comment in nand_get_flash_type for reset */
-		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+		nand_reset(chip);
 		/* Send the command for reading device ID */
 		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
 		/* Read manufacturer and device IDs */
@@ -4221,6 +4399,7 @@
 		ecc->write_page_raw = nand_write_page_raw;
 		ecc->read_oob = nand_read_oob_std;
 		ecc->write_oob = nand_write_oob_std;
+
 		/*
 		* Board driver should supply ecc.size and ecc.strength
 		* values to select how many bits are correctable.
@@ -4243,6 +4422,25 @@
 			}
 
 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
+		}
+
+		/*
+		 * We can only maximize ECC config when the default layout is
+		 * used, otherwise we don't know how many bytes can really be
+		 * used.
+		 */
+		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
+		    ecc->options & NAND_ECC_MAXIMIZE) {
+			int steps, bytes;
+
+			/* Always prefer 1k blocks over 512bytes ones */
+			ecc->size = 1024;
+			steps = mtd->writesize / ecc->size;
+
+			/* Reserve 2 bytes for the BBM */
+			bytes = (mtd->oobsize - 2) / steps;
+			ecc->strength = bytes * 8 / fls(8 * ecc->size);
 		}
 
 		/* See nand_bch_init() for details. */
@@ -4601,18 +4799,16 @@
 EXPORT_SYMBOL(nand_scan);
 
 /**
- * nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
  */
-void nand_release(struct mtd_info *mtd)
+void nand_cleanup(struct nand_chip *chip)
 {
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
 	if (chip->ecc.mode == NAND_ECC_SOFT &&
 	    chip->ecc.algo == NAND_ECC_BCH)
 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
 
-	mtd_device_unregister(mtd);
+	nand_release_data_interface(chip);
 
 	/* Free bad block table memory */
 	kfree(chip->bbt);
@@ -4624,6 +4820,18 @@
 			& NAND_BBT_DYNAMICSTRUCT)
 		kfree(chip->badblock_pattern);
 }
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
+/**
+ * nand_release - [NAND Interface] Unregister the MTD device and free resources
+ *		  held by the NAND device
+ * @mtd: MTD device structure
+ */
+void nand_release(struct mtd_info *mtd)
+{
+	mtd_device_unregister(mtd);
+	nand_cleanup(mtd_to_nand(mtd));
+}
 EXPORT_SYMBOL_GPL(nand_release);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2fbb523..7695efe 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -605,6 +605,100 @@
 }
 
 /**
+ * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
+ * @this: the NAND device
+ * @td: the BBT description
+ * @md: the mirror BBT descriptor
+ * @chip: the CHIP selector
+ *
+ * This functions returns a positive block number pointing a valid eraseblock
+ * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
+ * all blocks are already used of marked bad. If td->pages[chip] was already
+ * pointing to a valid block we re-use it, otherwise we search for the next
+ * valid one.
+ */
+static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
+			 struct nand_bbt_descr *md, int chip)
+{
+	int startblock, dir, page, numblocks, i;
+
+	/*
+	 * There was already a version of the table, reuse the page. This
+	 * applies for absolute placement too, as we have the page number in
+	 * td->pages.
+	 */
+	if (td->pages[chip] != -1)
+		return td->pages[chip] >>
+				(this->bbt_erase_shift - this->page_shift);
+
+	numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+	if (!(td->options & NAND_BBT_PERCHIP))
+		numblocks *= this->numchips;
+
+	/*
+	 * Automatic placement of the bad block table. Search direction
+	 * top -> down?
+	 */
+	if (td->options & NAND_BBT_LASTBLOCK) {
+		startblock = numblocks * (chip + 1) - 1;
+		dir = -1;
+	} else {
+		startblock = chip * numblocks;
+		dir = 1;
+	}
+
+	for (i = 0; i < td->maxblocks; i++) {
+		int block = startblock + dir * i;
+
+		/* Check, if the block is bad */
+		switch (bbt_get_entry(this, block)) {
+		case BBT_BLOCK_WORN:
+		case BBT_BLOCK_FACTORY_BAD:
+			continue;
+		}
+
+		page = block << (this->bbt_erase_shift - this->page_shift);
+
+		/* Check, if the block is used by the mirror table */
+		if (!md || md->pages[chip] != page)
+			return block;
+	}
+
+	return -ENOSPC;
+}
+
+/**
+ * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
+ * @this: the NAND device
+ * @td: the BBT description
+ * @chip: the CHIP selector
+ * @block: the BBT block to mark
+ *
+ * Blocks reserved for BBT can become bad. This functions is an helper to mark
+ * such blocks as bad. It takes care of updating the in-memory BBT, marking the
+ * block as bad using a bad block marker and invalidating the associated
+ * td->pages[] entry.
+ */
+static void mark_bbt_block_bad(struct nand_chip *this,
+			       struct nand_bbt_descr *td,
+			       int chip, int block)
+{
+	struct mtd_info *mtd = nand_to_mtd(this);
+	loff_t to;
+	int res;
+
+	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+	to = (loff_t)block << this->bbt_erase_shift;
+	res = this->block_markbad(mtd, to);
+	if (res)
+		pr_warn("nand_bbt: error %d while marking block %d bad\n",
+			res, block);
+
+	td->pages[chip] = -1;
+}
+
+/**
  * write_bbt - [GENERIC] (Re)write the bad block table
  * @mtd: MTD device structure
  * @buf: temporary buffer
@@ -621,7 +715,7 @@
 	struct nand_chip *this = mtd_to_nand(mtd);
 	struct erase_info einfo;
 	int i, res, chip = 0;
-	int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+	int bits, page, offs, numblocks, sft, sftmsk;
 	int nrchips, pageoffs, ooboffs;
 	uint8_t msk[4];
 	uint8_t rcode = td->reserved_block_code;
@@ -652,46 +746,21 @@
 	}
 
 	/* Loop through the chips */
-	for (; chip < nrchips; chip++) {
-		/*
-		 * There was already a version of the table, reuse the page
-		 * This applies for absolute placement too, as we have the
-		 * page nr. in td->pages.
-		 */
-		if (td->pages[chip] != -1) {
-			page = td->pages[chip];
-			goto write;
+	while (chip < nrchips) {
+		int block;
+
+		block = get_bbt_block(this, td, md, chip);
+		if (block < 0) {
+			pr_err("No space left to write bad block table\n");
+			res = block;
+			goto outerr;
 		}
 
 		/*
-		 * Automatic placement of the bad block table. Search direction
-		 * top -> down?
+		 * get_bbt_block() returns a block number, shift the value to
+		 * get a page number.
 		 */
-		if (td->options & NAND_BBT_LASTBLOCK) {
-			startblock = numblocks * (chip + 1) - 1;
-			dir = -1;
-		} else {
-			startblock = chip * numblocks;
-			dir = 1;
-		}
-
-		for (i = 0; i < td->maxblocks; i++) {
-			int block = startblock + dir * i;
-			/* Check, if the block is bad */
-			switch (bbt_get_entry(this, block)) {
-			case BBT_BLOCK_WORN:
-			case BBT_BLOCK_FACTORY_BAD:
-				continue;
-			}
-			page = block <<
-				(this->bbt_erase_shift - this->page_shift);
-			/* Check, if the block is used by the mirror table */
-			if (!md || md->pages[chip] != page)
-				goto write;
-		}
-		pr_err("No space left to write bad block table\n");
-		return -ENOSPC;
-	write:
+		page = block << (this->bbt_erase_shift - this->page_shift);
 
 		/* Set up shift count and masks for the flash table */
 		bits = td->options & NAND_BBT_NRBITS_MSK;
@@ -787,20 +856,28 @@
 		einfo.addr = to;
 		einfo.len = 1 << this->bbt_erase_shift;
 		res = nand_erase_nand(mtd, &einfo, 1);
-		if (res < 0)
-			goto outerr;
+		if (res < 0) {
+			pr_warn("nand_bbt: error while erasing BBT block %d\n",
+				res);
+			mark_bbt_block_bad(this, td, chip, block);
+			continue;
+		}
 
 		res = scan_write_bbt(mtd, to, len, buf,
 				td->options & NAND_BBT_NO_OOB ? NULL :
 				&buf[len]);
-		if (res < 0)
-			goto outerr;
+		if (res < 0) {
+			pr_warn("nand_bbt: error while writing BBT block %d\n",
+				res);
+			mark_bbt_block_bad(this, td, chip, block);
+			continue;
+		}
 
 		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
 			 (unsigned long long)to, td->version[chip]);
 
 		/* Mark it as used */
-		td->pages[chip] = page;
+		td->pages[chip++] = page;
 	}
 	return 0;
 
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index e81470a..13a5874 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -13,228 +13,246 @@
 #include <linux/export.h>
 #include <linux/mtd/nand.h>
 
-static const struct nand_sdr_timings onfi_sdr_timings[] = {
+static const struct nand_data_interface onfi_sdr_timings[] = {
 	/* Mode 0 */
 	{
-		.tADL_min = 200000,
-		.tALH_min = 20000,
-		.tALS_min = 50000,
-		.tAR_min = 25000,
-		.tCEA_max = 100000,
-		.tCEH_min = 20000,
-		.tCH_min = 20000,
-		.tCHZ_max = 100000,
-		.tCLH_min = 20000,
-		.tCLR_min = 20000,
-		.tCLS_min = 50000,
-		.tCOH_min = 0,
-		.tCS_min = 70000,
-		.tDH_min = 20000,
-		.tDS_min = 40000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 10000,
-		.tITC_max = 1000000,
-		.tRC_min = 100000,
-		.tREA_max = 40000,
-		.tREH_min = 30000,
-		.tRHOH_min = 0,
-		.tRHW_min = 200000,
-		.tRHZ_max = 200000,
-		.tRLOH_min = 0,
-		.tRP_min = 50000,
-		.tRST_max = 250000000000ULL,
-		.tWB_max = 200000,
-		.tRR_min = 40000,
-		.tWC_min = 100000,
-		.tWH_min = 30000,
-		.tWHR_min = 120000,
-		.tWP_min = 50000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 20000,
+			.tALS_min = 50000,
+			.tAR_min = 25000,
+			.tCEA_max = 100000,
+			.tCEH_min = 20000,
+			.tCH_min = 20000,
+			.tCHZ_max = 100000,
+			.tCLH_min = 20000,
+			.tCLR_min = 20000,
+			.tCLS_min = 50000,
+			.tCOH_min = 0,
+			.tCS_min = 70000,
+			.tDH_min = 20000,
+			.tDS_min = 40000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 10000,
+			.tITC_max = 1000000,
+			.tRC_min = 100000,
+			.tREA_max = 40000,
+			.tREH_min = 30000,
+			.tRHOH_min = 0,
+			.tRHW_min = 200000,
+			.tRHZ_max = 200000,
+			.tRLOH_min = 0,
+			.tRP_min = 50000,
+			.tRR_min = 40000,
+			.tRST_max = 250000000000ULL,
+			.tWB_max = 200000,
+			.tWC_min = 100000,
+			.tWH_min = 30000,
+			.tWHR_min = 120000,
+			.tWP_min = 50000,
+			.tWW_min = 100000,
+		},
 	},
 	/* Mode 1 */
 	{
-		.tADL_min = 100000,
-		.tALH_min = 10000,
-		.tALS_min = 25000,
-		.tAR_min = 10000,
-		.tCEA_max = 45000,
-		.tCEH_min = 20000,
-		.tCH_min = 10000,
-		.tCHZ_max = 50000,
-		.tCLH_min = 10000,
-		.tCLR_min = 10000,
-		.tCLS_min = 25000,
-		.tCOH_min = 15000,
-		.tCS_min = 35000,
-		.tDH_min = 10000,
-		.tDS_min = 20000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 0,
-		.tITC_max = 1000000,
-		.tRC_min = 50000,
-		.tREA_max = 30000,
-		.tREH_min = 15000,
-		.tRHOH_min = 15000,
-		.tRHW_min = 100000,
-		.tRHZ_max = 100000,
-		.tRLOH_min = 0,
-		.tRP_min = 25000,
-		.tRR_min = 20000,
-		.tRST_max = 500000000,
-		.tWB_max = 100000,
-		.tWC_min = 45000,
-		.tWH_min = 15000,
-		.tWHR_min = 80000,
-		.tWP_min = 25000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 10000,
+			.tALS_min = 25000,
+			.tAR_min = 10000,
+			.tCEA_max = 45000,
+			.tCEH_min = 20000,
+			.tCH_min = 10000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 10000,
+			.tCLR_min = 10000,
+			.tCLS_min = 25000,
+			.tCOH_min = 15000,
+			.tCS_min = 35000,
+			.tDH_min = 10000,
+			.tDS_min = 20000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 50000,
+			.tREA_max = 30000,
+			.tREH_min = 15000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRP_min = 25000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 45000,
+			.tWH_min = 15000,
+			.tWHR_min = 80000,
+			.tWP_min = 25000,
+			.tWW_min = 100000,
+		},
 	},
 	/* Mode 2 */
 	{
-		.tADL_min = 100000,
-		.tALH_min = 10000,
-		.tALS_min = 15000,
-		.tAR_min = 10000,
-		.tCEA_max = 30000,
-		.tCEH_min = 20000,
-		.tCH_min = 10000,
-		.tCHZ_max = 50000,
-		.tCLH_min = 10000,
-		.tCLR_min = 10000,
-		.tCLS_min = 15000,
-		.tCOH_min = 15000,
-		.tCS_min = 25000,
-		.tDH_min = 5000,
-		.tDS_min = 15000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 0,
-		.tITC_max = 1000000,
-		.tRC_min = 35000,
-		.tREA_max = 25000,
-		.tREH_min = 15000,
-		.tRHOH_min = 15000,
-		.tRHW_min = 100000,
-		.tRHZ_max = 100000,
-		.tRLOH_min = 0,
-		.tRR_min = 20000,
-		.tRST_max = 500000000,
-		.tWB_max = 100000,
-		.tRP_min = 17000,
-		.tWC_min = 35000,
-		.tWH_min = 15000,
-		.tWHR_min = 80000,
-		.tWP_min = 17000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 10000,
+			.tALS_min = 15000,
+			.tAR_min = 10000,
+			.tCEA_max = 30000,
+			.tCEH_min = 20000,
+			.tCH_min = 10000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 10000,
+			.tCLR_min = 10000,
+			.tCLS_min = 15000,
+			.tCOH_min = 15000,
+			.tCS_min = 25000,
+			.tDH_min = 5000,
+			.tDS_min = 15000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 35000,
+			.tREA_max = 25000,
+			.tREH_min = 15000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tRP_min = 17000,
+			.tWC_min = 35000,
+			.tWH_min = 15000,
+			.tWHR_min = 80000,
+			.tWP_min = 17000,
+			.tWW_min = 100000,
+		},
 	},
 	/* Mode 3 */
 	{
-		.tADL_min = 100000,
-		.tALH_min = 5000,
-		.tALS_min = 10000,
-		.tAR_min = 10000,
-		.tCEA_max = 25000,
-		.tCEH_min = 20000,
-		.tCH_min = 5000,
-		.tCHZ_max = 50000,
-		.tCLH_min = 5000,
-		.tCLR_min = 10000,
-		.tCLS_min = 10000,
-		.tCOH_min = 15000,
-		.tCS_min = 25000,
-		.tDH_min = 5000,
-		.tDS_min = 10000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 0,
-		.tITC_max = 1000000,
-		.tRC_min = 30000,
-		.tREA_max = 20000,
-		.tREH_min = 10000,
-		.tRHOH_min = 15000,
-		.tRHW_min = 100000,
-		.tRHZ_max = 100000,
-		.tRLOH_min = 0,
-		.tRP_min = 15000,
-		.tRR_min = 20000,
-		.tRST_max = 500000000,
-		.tWB_max = 100000,
-		.tWC_min = 30000,
-		.tWH_min = 10000,
-		.tWHR_min = 80000,
-		.tWP_min = 15000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 25000,
+			.tDH_min = 5000,
+			.tDS_min = 10000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 30000,
+			.tREA_max = 20000,
+			.tREH_min = 10000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRP_min = 15000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 30000,
+			.tWH_min = 10000,
+			.tWHR_min = 80000,
+			.tWP_min = 15000,
+			.tWW_min = 100000,
+		},
 	},
 	/* Mode 4 */
 	{
-		.tADL_min = 70000,
-		.tALH_min = 5000,
-		.tALS_min = 10000,
-		.tAR_min = 10000,
-		.tCEA_max = 25000,
-		.tCEH_min = 20000,
-		.tCH_min = 5000,
-		.tCHZ_max = 30000,
-		.tCLH_min = 5000,
-		.tCLR_min = 10000,
-		.tCLS_min = 10000,
-		.tCOH_min = 15000,
-		.tCS_min = 20000,
-		.tDH_min = 5000,
-		.tDS_min = 10000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 0,
-		.tITC_max = 1000000,
-		.tRC_min = 25000,
-		.tREA_max = 20000,
-		.tREH_min = 10000,
-		.tRHOH_min = 15000,
-		.tRHW_min = 100000,
-		.tRHZ_max = 100000,
-		.tRLOH_min = 5000,
-		.tRP_min = 12000,
-		.tRR_min = 20000,
-		.tRST_max = 500000000,
-		.tWB_max = 100000,
-		.tWC_min = 25000,
-		.tWH_min = 10000,
-		.tWHR_min = 80000,
-		.tWP_min = 12000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 30000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 20000,
+			.tDH_min = 5000,
+			.tDS_min = 10000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 25000,
+			.tREA_max = 20000,
+			.tREH_min = 10000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 5000,
+			.tRP_min = 12000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 25000,
+			.tWH_min = 10000,
+			.tWHR_min = 80000,
+			.tWP_min = 12000,
+			.tWW_min = 100000,
+		},
 	},
 	/* Mode 5 */
 	{
-		.tADL_min = 70000,
-		.tALH_min = 5000,
-		.tALS_min = 10000,
-		.tAR_min = 10000,
-		.tCEA_max = 25000,
-		.tCEH_min = 20000,
-		.tCH_min = 5000,
-		.tCHZ_max = 30000,
-		.tCLH_min = 5000,
-		.tCLR_min = 10000,
-		.tCLS_min = 10000,
-		.tCOH_min = 15000,
-		.tCS_min = 15000,
-		.tDH_min = 5000,
-		.tDS_min = 7000,
-		.tFEAT_max = 1000000,
-		.tIR_min = 0,
-		.tITC_max = 1000000,
-		.tRC_min = 20000,
-		.tREA_max = 16000,
-		.tREH_min = 7000,
-		.tRHOH_min = 15000,
-		.tRHW_min = 100000,
-		.tRHZ_max = 100000,
-		.tRLOH_min = 5000,
-		.tRP_min = 10000,
-		.tRR_min = 20000,
-		.tRST_max = 500000000,
-		.tWB_max = 100000,
-		.tWC_min = 20000,
-		.tWH_min = 7000,
-		.tWHR_min = 80000,
-		.tWP_min = 10000,
-		.tWW_min = 100000,
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 30000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 15000,
+			.tDH_min = 5000,
+			.tDS_min = 7000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 20000,
+			.tREA_max = 16000,
+			.tREH_min = 7000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 5000,
+			.tRP_min = 10000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 20000,
+			.tWH_min = 7000,
+			.tWHR_min = 80000,
+			.tWP_min = 10000,
+			.tWW_min = 100000,
+		},
 	},
 };
 
@@ -248,6 +266,46 @@
 	if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
 		return ERR_PTR(-EINVAL);
 
-	return &onfi_sdr_timings[mode];
+	return &onfi_sdr_timings[mode].timings.sdr;
 }
 EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
+
+/**
+ * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * given ONFI mode
+ * @iface: The data interface to be initialized
+ * @mode: The ONFI timing mode
+ */
+int onfi_init_data_interface(struct nand_chip *chip,
+			     struct nand_data_interface *iface,
+			     enum nand_data_interface_type type,
+			     int timing_mode)
+{
+	if (type != NAND_SDR_IFACE)
+		return -EINVAL;
+
+	if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
+		return -EINVAL;
+
+	*iface = onfi_sdr_timings[timing_mode];
+
+	/*
+	 * TODO: initialize timings that cannot be deduced from timing mode:
+	 * tR, tPROG, tCCS, ...
+	 * These information are part of the ONFI parameter page.
+	 */
+
+	return 0;
+}
+EXPORT_SYMBOL(onfi_init_data_interface);
+
+/**
+ * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
+ * data interface for mode 0. This is used as default timing after
+ * reset.
+ */
+const struct nand_data_interface *nand_get_default_data_interface(void)
+{
+	return &onfi_sdr_timings[0];
+}
+EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 218c789..28e6118 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -218,8 +218,7 @@
 	ndfc = &ndfc_ctrl[cs];
 	ndfc->chip_select = cs;
 
-	spin_lock_init(&ndfc->ndfc_control.lock);
-	init_waitqueue_head(&ndfc->ndfc_control.wq);
+	nand_hw_control_init(&ndfc->ndfc_control);
 	ndfc->ofdev = ofdev;
 	dev_set_drvdata(&ofdev->dev, ndfc);
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 436dd6d..b121bf4 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1810,8 +1810,7 @@
 		chip->cmdfunc		= nand_cmdfunc;
 	}
 
-	spin_lock_init(&chip->controller->lock);
-	init_waitqueue_head(&chip->controller->wq);
+	nand_hw_control_init(chip->controller);
 	info->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(info->clk)) {
 		dev_err(&pdev->dev, "failed to get nand clock\n");
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index de7d28e..57d483a 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -1957,8 +1957,7 @@
 	INIT_LIST_HEAD(&nandc->desc_list);
 	INIT_LIST_HEAD(&nandc->host_list);
 
-	spin_lock_init(&nandc->controller.lock);
-	init_waitqueue_head(&nandc->controller.wq);
+	nand_hw_control_init(&nandc->controller);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d9309cf..d459c19d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -180,7 +180,7 @@
 
 	enum s3c_cpu_type		cpu_type;
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 	struct notifier_block	freq_transition;
 #endif
 };
@@ -701,7 +701,7 @@
 
 /* cpufreq driver support */
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 
 static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
 					  unsigned long val, void *data)
@@ -977,8 +977,7 @@
 
 	platform_set_drvdata(pdev, info);
 
-	spin_lock_init(&info->controller.lock);
-	init_waitqueue_head(&info->controller.wq);
+	nand_hw_control_init(&info->controller);
 
 	/* get the clock source and enable it */
 
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 6fa3bcd..442ce61 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -397,7 +397,7 @@
 	struct dma_chan *chan;
 	enum dma_transfer_direction tr_dir;
 	dma_addr_t dma_addr;
-	dma_cookie_t cookie = -EINVAL;
+	dma_cookie_t cookie;
 	uint32_t reg;
 	int ret;
 
@@ -423,6 +423,12 @@
 		desc->callback = flctl_dma_complete;
 		desc->callback_param = flctl;
 		cookie = dmaengine_submit(desc);
+		if (dma_submit_error(cookie)) {
+			ret = dma_submit_error(cookie);
+			dev_warn(&flctl->pdev->dev,
+				 "DMA submit failed, falling back to PIO\n");
+			goto out;
+		}
 
 		dma_async_issue_pending(chan);
 	} else {
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index e414b31..8b8470c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1572,14 +1572,22 @@
 #define sunxi_nand_lookup_timing(l, p, c) \
 			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
 
-static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
-				       const struct nand_sdr_timings *timings)
+static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
+					const struct nand_data_interface *conf,
+					bool check_only)
 {
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
+	const struct nand_sdr_timings *timings;
 	u32 min_clk_period = 0;
 	s32 tWB, tADL, tWHR, tRHW, tCAD;
 	long real_clk_rate;
 
+	timings = nand_get_sdr_timings(conf);
+	if (IS_ERR(timings))
+		return -ENOTSUPP;
+
 	/* T1 <=> tCLS */
 	if (timings->tCLS_min > min_clk_period)
 		min_clk_period = timings->tCLS_min;
@@ -1679,6 +1687,9 @@
 		return tRHW;
 	}
 
+	if (check_only)
+		return 0;
+
 	/*
 	 * TODO: according to ONFI specs this value only applies for DDR NAND,
 	 * but Allwinner seems to set this to 0x7. Mimic them for now.
@@ -1712,44 +1723,6 @@
 	return 0;
 }
 
-static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
-					struct device_node *np)
-{
-	struct mtd_info *mtd = nand_to_mtd(&chip->nand);
-	const struct nand_sdr_timings *timings;
-	int ret;
-	int mode;
-
-	mode = onfi_get_async_timing_mode(&chip->nand);
-	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
-		mode = chip->nand.onfi_timing_mode_default;
-	} else {
-		uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
-		int i;
-
-		mode = fls(mode) - 1;
-		if (mode < 0)
-			mode = 0;
-
-		feature[0] = mode;
-		for (i = 0; i < chip->nsels; i++) {
-			chip->nand.select_chip(mtd, i);
-			ret = chip->nand.onfi_set_features(mtd,	&chip->nand,
-						ONFI_FEATURE_ADDR_TIMING_MODE,
-						feature);
-			chip->nand.select_chip(mtd, -1);
-			if (ret)
-				return ret;
-		}
-	}
-
-	timings = onfi_async_timing_mode_to_sdr_timings(mode);
-	if (IS_ERR(timings))
-		return PTR_ERR(timings);
-
-	return sunxi_nand_chip_set_timings(chip, timings);
-}
-
 static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
 				    struct mtd_oob_region *oobregion)
 {
@@ -1814,6 +1787,35 @@
 	int ret;
 	int i;
 
+	if (ecc->options & NAND_ECC_MAXIMIZE) {
+		int bytes;
+
+		ecc->size = 1024;
+		nsectors = mtd->writesize / ecc->size;
+
+		/* Reserve 2 bytes for the BBM */
+		bytes = (mtd->oobsize - 2) / nsectors;
+
+		/* 4 non-ECC bytes are added before each ECC bytes section */
+		bytes -= 4;
+
+		/* and bytes has to be even. */
+		if (bytes % 2)
+			bytes--;
+
+		ecc->strength = bytes * 8 / fls(8 * ecc->size);
+
+		for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+			if (strengths[i] > ecc->strength)
+				break;
+		}
+
+		if (!i)
+			ecc->strength = 0;
+		else
+			ecc->strength = strengths[i - 1];
+	}
+
 	if (ecc->size != 512 && ecc->size != 1024)
 		return -EINVAL;
 
@@ -1975,7 +1977,6 @@
 static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
 				struct device_node *np)
 {
-	const struct nand_sdr_timings *timings;
 	struct sunxi_nand_chip *chip;
 	struct mtd_info *mtd;
 	struct nand_chip *nand;
@@ -2065,25 +2066,11 @@
 	nand->read_buf = sunxi_nfc_read_buf;
 	nand->write_buf = sunxi_nfc_write_buf;
 	nand->read_byte = sunxi_nfc_read_byte;
+	nand->setup_data_interface = sunxi_nfc_setup_data_interface;
 
 	mtd = nand_to_mtd(nand);
 	mtd->dev.parent = dev;
 
-	timings = onfi_async_timing_mode_to_sdr_timings(0);
-	if (IS_ERR(timings)) {
-		ret = PTR_ERR(timings);
-		dev_err(dev,
-			"could not retrieve timings for ONFI mode 0: %d\n",
-			ret);
-		return ret;
-	}
-
-	ret = sunxi_nand_chip_set_timings(chip, timings);
-	if (ret) {
-		dev_err(dev, "could not configure chip timings: %d\n", ret);
-		return ret;
-	}
-
 	ret = nand_scan_ident(mtd, nsels, NULL);
 	if (ret)
 		return ret;
@@ -2096,12 +2083,6 @@
 
 	nand->options |= NAND_SUBPAGE_READ;
 
-	ret = sunxi_nand_chip_init_timings(chip, np);
-	if (ret) {
-		dev_err(dev, "could not configure chip timings: %d\n", ret);
-		return ret;
-	}
-
 	ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
 	if (ret) {
 		dev_err(dev, "ECC init failed: %d\n", ret);
@@ -2175,8 +2156,7 @@
 		return -ENOMEM;
 
 	nfc->dev = dev;
-	spin_lock_init(&nfc->controller.lock);
-	init_waitqueue_head(&nfc->controller.wq);
+	nand_hw_control_init(&nfc->controller);
 	INIT_LIST_HEAD(&nfc->chips);
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 04d63f5..0a14fda 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -303,8 +303,7 @@
 	dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
 		 (gbusclk + 500000) / 1000000, hold, spw);
 
-	spin_lock_init(&drvdata->hw_control.lock);
-	init_waitqueue_head(&drvdata->hw_control.wq);
+	nand_hw_control_init(&drvdata->hw_control);
 
 	platform_set_drvdata(dev, drvdata);
 	txx9ndfmc_initialize(dev);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 903becd..93ceea4 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -91,9 +91,132 @@
 
 static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
 
-/* Temporary variables used during scanning */
-static struct ubi_ec_hdr *ech;
-static struct ubi_vid_hdr *vidh;
+#define AV_FIND		BIT(0)
+#define AV_ADD		BIT(1)
+#define AV_FIND_OR_ADD	(AV_FIND | AV_ADD)
+
+/**
+ * find_or_add_av - internal function to find a volume, add a volume or do
+ *		    both (find and add if missing).
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
+ *	   expected operation. If only %AV_ADD is set, -EEXIST is returned
+ *	   if the volume already exists. If only %AV_FIND is set, NULL is
+ *	   returned if the volume does not exist. And if both flags are
+ *	   set, the helper first tries to find an existing volume, and if
+ *	   it does not exist it creates a new one.
+ * @created: in value used to inform the caller whether it"s a newly created
+ *	     volume or not.
+ *
+ * This function returns a pointer to a volume description or an ERR_PTR if
+ * the operation failed. It can also return NULL if only %AV_FIND is set and
+ * the volume does not exist.
+ */
+static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
+					      int vol_id, unsigned int flags,
+					      bool *created)
+{
+	struct ubi_ainf_volume *av;
+	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+	/* Walk the volume RB-tree to look if this volume is already present */
+	while (*p) {
+		parent = *p;
+		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+		if (vol_id == av->vol_id) {
+			*created = false;
+
+			if (!(flags & AV_FIND))
+				return ERR_PTR(-EEXIST);
+
+			return av;
+		}
+
+		if (vol_id > av->vol_id)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	if (!(flags & AV_ADD))
+		return NULL;
+
+	/* The volume is absent - add it */
+	av = kzalloc(sizeof(*av), GFP_KERNEL);
+	if (!av)
+		return ERR_PTR(-ENOMEM);
+
+	av->vol_id = vol_id;
+
+	if (vol_id > ai->highest_vol_id)
+		ai->highest_vol_id = vol_id;
+
+	rb_link_node(&av->rb, parent, p);
+	rb_insert_color(&av->rb, &ai->volumes);
+	ai->vols_found += 1;
+	*created = true;
+	dbg_bld("added volume %d", vol_id);
+	return av;
+}
+
+/**
+ * ubi_find_or_add_av - search for a volume in the attaching information and
+ *			add one if it does not exist.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @created: whether the volume has been created or not
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
+						  int vol_id, bool *created)
+{
+	return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
+}
+
+/**
+ * ubi_alloc_aeb - allocate an aeb element
+ * @ai: attaching information
+ * @pnum: physical eraseblock number
+ * @ec: erase counter of the physical eraseblock
+ *
+ * Allocate an aeb object and initialize the pnum and ec information.
+ * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
+ * initialized to zero.
+ * Note that the element is not added in any list or RB tree.
+ */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+				   int ec)
+{
+	struct ubi_ainf_peb *aeb;
+
+	aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
+	if (!aeb)
+		return NULL;
+
+	aeb->pnum = pnum;
+	aeb->ec = ec;
+	aeb->vol_id = UBI_UNKNOWN;
+	aeb->lnum = UBI_UNKNOWN;
+
+	return aeb;
+}
+
+/**
+ * ubi_free_aeb - free an aeb element
+ * @ai: attaching information
+ * @aeb: the element to free
+ *
+ * Free an aeb object. The caller must have removed the element from any list
+ * or RB tree.
+ */
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
+{
+	kmem_cache_free(ai->aeb_slab_cache, aeb);
+}
 
 /**
  * add_to_list - add physical eraseblock to a list.
@@ -131,14 +254,12 @@
 	} else
 		BUG();
 
-	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	aeb = ubi_alloc_aeb(ai, pnum, ec);
 	if (!aeb)
 		return -ENOMEM;
 
-	aeb->pnum = pnum;
 	aeb->vol_id = vol_id;
 	aeb->lnum = lnum;
-	aeb->ec = ec;
 	if (to_head)
 		list_add(&aeb->u.list, list);
 	else
@@ -163,13 +284,11 @@
 
 	dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
 
-	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	aeb = ubi_alloc_aeb(ai, pnum, ec);
 	if (!aeb)
 		return -ENOMEM;
 
 	ai->corr_peb_count += 1;
-	aeb->pnum = pnum;
-	aeb->ec = ec;
 	list_add(&aeb->u.list, &ai->corr);
 	return 0;
 }
@@ -192,14 +311,12 @@
 {
 	struct ubi_ainf_peb *aeb;
 
-	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	aeb = ubi_alloc_aeb(ai, pnum, ec);
 	if (!aeb)
 		return -ENOMEM;
 
-	aeb->pnum = pnum;
-	aeb->vol_id = be32_to_cpu(vidh->vol_id);
-	aeb->sqnum = be64_to_cpu(vidh->sqnum);
-	aeb->ec = ec;
+	aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
+	aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
 	list_add(&aeb->u.list, &ai->fastmap);
 
 	dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
@@ -294,44 +411,20 @@
 					  const struct ubi_vid_hdr *vid_hdr)
 {
 	struct ubi_ainf_volume *av;
-	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+	bool created;
 
 	ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
 
-	/* Walk the volume RB-tree to look if this volume is already present */
-	while (*p) {
-		parent = *p;
-		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+	av = ubi_find_or_add_av(ai, vol_id, &created);
+	if (IS_ERR(av) || !created)
+		return av;
 
-		if (vol_id == av->vol_id)
-			return av;
-
-		if (vol_id > av->vol_id)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	/* The volume is absent - add it */
-	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
-	if (!av)
-		return ERR_PTR(-ENOMEM);
-
-	av->highest_lnum = av->leb_count = 0;
-	av->vol_id = vol_id;
-	av->root = RB_ROOT;
 	av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
 	av->data_pad = be32_to_cpu(vid_hdr->data_pad);
 	av->compat = vid_hdr->compat;
 	av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
 							    : UBI_STATIC_VOLUME;
-	if (vol_id > ai->highest_vol_id)
-		ai->highest_vol_id = vol_id;
 
-	rb_link_node(&av->rb, parent, p);
-	rb_insert_color(&av->rb, &ai->volumes);
-	ai->vols_found += 1;
-	dbg_bld("added volume %d", vol_id);
 	return av;
 }
 
@@ -360,7 +453,7 @@
 {
 	int len, err, second_is_newer, bitflips = 0, corrupted = 0;
 	uint32_t data_crc, crc;
-	struct ubi_vid_hdr *vh = NULL;
+	struct ubi_vid_io_buf *vidb = NULL;
 	unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
 
 	if (sqnum2 == aeb->sqnum) {
@@ -403,12 +496,12 @@
 			return bitflips << 1;
 		}
 
-		vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-		if (!vh)
+		vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+		if (!vidb)
 			return -ENOMEM;
 
 		pnum = aeb->pnum;
-		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
 		if (err) {
 			if (err == UBI_IO_BITFLIPS)
 				bitflips = 1;
@@ -422,7 +515,7 @@
 			}
 		}
 
-		vid_hdr = vh;
+		vid_hdr = ubi_get_vid_hdr(vidb);
 	}
 
 	/* Read the data of the copy and check the CRC */
@@ -448,7 +541,7 @@
 	}
 	mutex_unlock(&ubi->buf_mutex);
 
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vidb);
 
 	if (second_is_newer)
 		dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
@@ -460,7 +553,7 @@
 out_unlock:
 	mutex_unlock(&ubi->buf_mutex);
 out_free_vidh:
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vidb);
 	return err;
 }
 
@@ -605,12 +698,10 @@
 	if (err)
 		return err;
 
-	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	aeb = ubi_alloc_aeb(ai, pnum, ec);
 	if (!aeb)
 		return -ENOMEM;
 
-	aeb->ec = ec;
-	aeb->pnum = pnum;
 	aeb->vol_id = vol_id;
 	aeb->lnum = lnum;
 	aeb->scrub = bitflips;
@@ -629,6 +720,21 @@
 }
 
 /**
+ * ubi_add_av - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
+{
+	bool created;
+
+	return find_or_add_av(ai, vol_id, AV_ADD, &created);
+}
+
+/**
  * ubi_find_av - find volume in the attaching information.
  * @ai: attaching information
  * @vol_id: the requested volume ID
@@ -639,24 +745,15 @@
 struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
 				    int vol_id)
 {
-	struct ubi_ainf_volume *av;
-	struct rb_node *p = ai->volumes.rb_node;
+	bool created;
 
-	while (p) {
-		av = rb_entry(p, struct ubi_ainf_volume, rb);
-
-		if (vol_id == av->vol_id)
-			return av;
-
-		if (vol_id > av->vol_id)
-			p = p->rb_left;
-		else
-			p = p->rb_right;
-	}
-
-	return NULL;
+	return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
+			      &created);
 }
 
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+		       struct list_head *list);
+
 /**
  * ubi_remove_av - delete attaching information about a volume.
  * @ai: attaching information
@@ -664,19 +761,10 @@
  */
 void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
 {
-	struct rb_node *rb;
-	struct ubi_ainf_peb *aeb;
-
 	dbg_bld("remove attaching information about volume %d", av->vol_id);
 
-	while ((rb = rb_first(&av->root))) {
-		aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
-		rb_erase(&aeb->u.rb, &av->root);
-		list_add_tail(&aeb->u.list, &ai->erase);
-	}
-
 	rb_erase(&av->rb, &ai->volumes);
-	kfree(av);
+	destroy_av(ai, av, &ai->erase);
 	ai->vols_found -= 1;
 }
 
@@ -866,6 +954,9 @@
 static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
 		    int pnum, bool fast)
 {
+	struct ubi_ec_hdr *ech = ai->ech;
+	struct ubi_vid_io_buf *vidb = ai->vidb;
+	struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
 	long long ec;
 	int err, bitflips = 0, vol_id = -1, ec_err = 0;
 
@@ -963,7 +1054,7 @@
 
 	/* OK, we've done with the EC header, let's look at the VID header */
 
-	err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+	err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
 	if (err < 0)
 		return err;
 	switch (err) {
@@ -1191,10 +1282,12 @@
  * destroy_av - free volume attaching information.
  * @av: volume attaching information
  * @ai: attaching information
+ * @list: put the aeb elements in there if !NULL, otherwise free them
  *
  * This function destroys the volume attaching information.
  */
-static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+		       struct list_head *list)
 {
 	struct ubi_ainf_peb *aeb;
 	struct rb_node *this = av->root.rb_node;
@@ -1214,7 +1307,10 @@
 					this->rb_right = NULL;
 			}
 
-			kmem_cache_free(ai->aeb_slab_cache, aeb);
+			if (list)
+				list_add_tail(&aeb->u.list, list);
+			else
+				ubi_free_aeb(ai, aeb);
 		}
 	}
 	kfree(av);
@@ -1232,23 +1328,23 @@
 
 	list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
 		list_del(&aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, aeb);
+		ubi_free_aeb(ai, aeb);
 	}
 	list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
 		list_del(&aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, aeb);
+		ubi_free_aeb(ai, aeb);
 	}
 	list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
 		list_del(&aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, aeb);
+		ubi_free_aeb(ai, aeb);
 	}
 	list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
 		list_del(&aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, aeb);
+		ubi_free_aeb(ai, aeb);
 	}
 	list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
 		list_del(&aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, aeb);
+		ubi_free_aeb(ai, aeb);
 	}
 
 	/* Destroy the volume RB-tree */
@@ -1269,7 +1365,7 @@
 					rb->rb_right = NULL;
 			}
 
-			destroy_av(ai, av);
+			destroy_av(ai, av, NULL);
 		}
 	}
 
@@ -1297,12 +1393,12 @@
 
 	err = -ENOMEM;
 
-	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-	if (!ech)
+	ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!ai->ech)
 		return err;
 
-	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vidh)
+	ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+	if (!ai->vidb)
 		goto out_ech;
 
 	for (pnum = start; pnum < ubi->peb_count; pnum++) {
@@ -1351,15 +1447,15 @@
 	if (err)
 		goto out_vidh;
 
-	ubi_free_vid_hdr(ubi, vidh);
-	kfree(ech);
+	ubi_free_vid_buf(ai->vidb);
+	kfree(ai->ech);
 
 	return 0;
 
 out_vidh:
-	ubi_free_vid_hdr(ubi, vidh);
+	ubi_free_vid_buf(ai->vidb);
 out_ech:
-	kfree(ech);
+	kfree(ai->ech);
 	return err;
 }
 
@@ -1411,12 +1507,12 @@
 	if (!scan_ai)
 		goto out;
 
-	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-	if (!ech)
+	scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+	if (!scan_ai->ech)
 		goto out_ai;
 
-	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vidh)
+	scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+	if (!scan_ai->vidb)
 		goto out_ech;
 
 	for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
@@ -1428,8 +1524,8 @@
 			goto out_vidh;
 	}
 
-	ubi_free_vid_hdr(ubi, vidh);
-	kfree(ech);
+	ubi_free_vid_buf(scan_ai->vidb);
+	kfree(scan_ai->ech);
 
 	if (scan_ai->force_full_scan)
 		err = UBI_NO_FASTMAP;
@@ -1449,9 +1545,9 @@
 	return err;
 
 out_vidh:
-	ubi_free_vid_hdr(ubi, vidh);
+	ubi_free_vid_buf(scan_ai->vidb);
 out_ech:
-	kfree(ech);
+	kfree(scan_ai->ech);
 out_ai:
 	destroy_ai(scan_ai);
 out:
@@ -1573,6 +1669,8 @@
  */
 static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
+	struct ubi_vid_io_buf *vidb = ai->vidb;
+	struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
 	int pnum, err, vols_found = 0;
 	struct rb_node *rb1, *rb2;
 	struct ubi_ainf_volume *av;
@@ -1708,7 +1806,7 @@
 
 			last_aeb = aeb;
 
-			err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+			err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
 			if (err && err != UBI_IO_BITFLIPS) {
 				ubi_err(ubi, "VID header is not OK (%d)",
 					err);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0680516..85d54f3 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -574,7 +574,7 @@
 
 	for (i = ubi->vtbl_slots;
 	     i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
-		kfree(ubi->volumes[i]->eba_tbl);
+		ubi_eba_replace_table(ubi->volumes[i], NULL);
 		kfree(ubi->volumes[i]);
 	}
 }
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index ee2b74d..45c3296 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -416,7 +416,7 @@
 		}
 
 		rsvd_bytes = (long long)vol->reserved_pebs *
-					ubi->leb_size-vol->data_pad;
+					vol->usable_leb_size;
 		if (bytes < 0 || bytes > rsvd_bytes) {
 			err = -EINVAL;
 			break;
@@ -454,7 +454,7 @@
 
 		/* Validate the request */
 		err = -EINVAL;
-		if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+		if (!ubi_leb_valid(vol, req.lnum) ||
 		    req.bytes < 0 || req.bytes > vol->usable_leb_size)
 			break;
 
@@ -485,7 +485,7 @@
 			break;
 		}
 
-		if (lnum < 0 || lnum >= vol->reserved_pebs) {
+		if (!ubi_leb_valid(vol, lnum)) {
 			err = -EINVAL;
 			break;
 		}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ebf5172..95c4048 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -50,6 +50,30 @@
 #define EBA_RESERVED_PEBS 1
 
 /**
+ * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
+ * @pnum: the physical eraseblock number attached to the LEB
+ *
+ * This structure is encoding a LEB -> PEB association. Note that the LEB
+ * number is not stored here, because it is the index used to access the
+ * entries table.
+ */
+struct ubi_eba_entry {
+	int pnum;
+};
+
+/**
+ * struct ubi_eba_table - LEB -> PEB association information
+ * @entries: the LEB to PEB mapping (one entry per LEB).
+ *
+ * This structure is private to the EBA logic and should be kept here.
+ * It is encoding the LEB to PEB association table, and is subject to
+ * changes.
+ */
+struct ubi_eba_table {
+	struct ubi_eba_entry *entries;
+};
+
+/**
  * next_sqnum - get next sequence number.
  * @ubi: UBI device description object
  *
@@ -84,6 +108,110 @@
 }
 
 /**
+ * ubi_eba_get_ldesc - get information about a LEB
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @ldesc: the LEB descriptor to fill
+ *
+ * Used to query information about a specific LEB.
+ * It is currently only returning the physical position of the LEB, but will be
+ * extended to provide more information.
+ */
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+		       struct ubi_eba_leb_desc *ldesc)
+{
+	ldesc->lnum = lnum;
+	ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
+}
+
+/**
+ * ubi_eba_create_table - allocate a new EBA table and initialize it with all
+ *			  LEBs unmapped
+ * @vol: volume containing the EBA table to copy
+ * @nentries: number of entries in the table
+ *
+ * Allocate a new EBA table and initialize it with all LEBs unmapped.
+ * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
+ */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+					   int nentries)
+{
+	struct ubi_eba_table *tbl;
+	int err = -ENOMEM;
+	int i;
+
+	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		return ERR_PTR(-ENOMEM);
+
+	tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
+				     GFP_KERNEL);
+	if (!tbl->entries)
+		goto err;
+
+	for (i = 0; i < nentries; i++)
+		tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
+
+	return tbl;
+
+err:
+	kfree(tbl->entries);
+	kfree(tbl);
+
+	return ERR_PTR(err);
+}
+
+/**
+ * ubi_eba_destroy_table - destroy an EBA table
+ * @tbl: the table to destroy
+ *
+ * Destroy an EBA table.
+ */
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
+{
+	if (!tbl)
+		return;
+
+	kfree(tbl->entries);
+	kfree(tbl);
+}
+
+/**
+ * ubi_eba_copy_table - copy the EBA table attached to vol into another table
+ * @vol: volume containing the EBA table to copy
+ * @dst: destination
+ * @nentries: number of entries to copy
+ *
+ * Copy the EBA table stored in vol into the one pointed by dst.
+ */
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+			int nentries)
+{
+	struct ubi_eba_table *src;
+	int i;
+
+	ubi_assert(dst && vol && vol->eba_tbl);
+
+	src = vol->eba_tbl;
+
+	for (i = 0; i < nentries; i++)
+		dst->entries[i].pnum = src->entries[i].pnum;
+}
+
+/**
+ * ubi_eba_replace_table - assign a new EBA table to a volume
+ * @vol: volume containing the EBA table to copy
+ * @tbl: new EBA table
+ *
+ * Assign a new EBA table to the volume and release the old one.
+ */
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
+{
+	ubi_eba_destroy_table(vol->eba_tbl);
+	vol->eba_tbl = tbl;
+}
+
+/**
  * ltree_lookup - look up the lock tree.
  * @ubi: UBI device description object
  * @vol_id: volume ID
@@ -312,6 +440,18 @@
 }
 
 /**
+ * ubi_eba_is_mapped - check if a LEB is mapped.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function returns true if the LEB is mapped, false otherwise.
+ */
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
+{
+	return vol->eba_tbl->entries[lnum].pnum >= 0;
+}
+
+/**
  * ubi_eba_unmap_leb - un-map logical eraseblock.
  * @ubi: UBI device description object
  * @vol: volume description object
@@ -333,7 +473,7 @@
 	if (err)
 		return err;
 
-	pnum = vol->eba_tbl[lnum];
+	pnum = vol->eba_tbl->entries[lnum].pnum;
 	if (pnum < 0)
 		/* This logical eraseblock is already unmapped */
 		goto out_unlock;
@@ -341,7 +481,7 @@
 	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
 
 	down_read(&ubi->fm_eba_sem);
-	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+	vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
 	up_read(&ubi->fm_eba_sem);
 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
 
@@ -373,6 +513,7 @@
 		     void *buf, int offset, int len, int check)
 {
 	int err, pnum, scrub = 0, vol_id = vol->vol_id;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t uninitialized_var(crc);
 
@@ -380,7 +521,7 @@
 	if (err)
 		return err;
 
-	pnum = vol->eba_tbl[lnum];
+	pnum = vol->eba_tbl->entries[lnum].pnum;
 	if (pnum < 0) {
 		/*
 		 * The logical eraseblock is not mapped, fill the whole buffer
@@ -403,13 +544,15 @@
 
 retry:
 	if (check) {
-		vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-		if (!vid_hdr) {
+		vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+		if (!vidb) {
 			err = -ENOMEM;
 			goto out_unlock;
 		}
 
-		err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+		vid_hdr = ubi_get_vid_hdr(vidb);
+
+		err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
 		if (err && err != UBI_IO_BITFLIPS) {
 			if (err > 0) {
 				/*
@@ -455,7 +598,7 @@
 		ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
 
 		crc = be32_to_cpu(vid_hdr->data_crc);
-		ubi_free_vid_hdr(ubi, vid_hdr);
+		ubi_free_vid_buf(vidb);
 	}
 
 	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
@@ -492,7 +635,7 @@
 	return err;
 
 out_free:
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 out_unlock:
 	leb_read_unlock(ubi, vol_id, lnum);
 	return err;
@@ -554,6 +697,103 @@
 }
 
 /**
+ * try_recover_peb - try to recover from write failure.
+ * @vol: volume description object
+ * @pnum: the physical eraseblock to recover
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ * @vidb: VID buffer
+ * @retry: whether the caller should retry in case of failure
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * In case of failure, the %retry parameter is set to false if this is a fatal
+ * error (retrying won't help), and true otherwise.
+ */
+static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
+			   const void *buf, int offset, int len,
+			   struct ubi_vid_io_buf *vidb, bool *retry)
+{
+	struct ubi_device *ubi = vol->ubi;
+	struct ubi_vid_hdr *vid_hdr;
+	int new_pnum, err, vol_id = vol->vol_id, data_size;
+	uint32_t crc;
+
+	*retry = false;
+
+	new_pnum = ubi_wl_get_peb(ubi);
+	if (new_pnum < 0) {
+		err = new_pnum;
+		goto out_put;
+	}
+
+	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+		pnum, new_pnum);
+
+	err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
+	if (err && err != UBI_IO_BITFLIPS) {
+		if (err > 0)
+			err = -EIO;
+		goto out_put;
+	}
+
+	ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
+
+	mutex_lock(&ubi->buf_mutex);
+	memset(ubi->peb_buf + offset, 0xFF, len);
+
+	/* Read everything before the area where the write failure happened */
+	if (offset > 0) {
+		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
+		if (err && err != UBI_IO_BITFLIPS)
+			goto out_unlock;
+	}
+
+	*retry = true;
+
+	memcpy(ubi->peb_buf + offset, buf, len);
+
+	data_size = offset + len;
+	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+	vid_hdr->copy_flag = 1;
+	vid_hdr->data_size = cpu_to_be32(data_size);
+	vid_hdr->data_crc = cpu_to_be32(crc);
+	err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
+	if (err)
+		goto out_unlock;
+
+	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+
+out_unlock:
+	mutex_unlock(&ubi->buf_mutex);
+
+	if (!err)
+		vol->eba_tbl->entries[lnum].pnum = new_pnum;
+
+out_put:
+	up_read(&ubi->fm_eba_sem);
+
+	if (!err) {
+		ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+		ubi_msg(ubi, "data was successfully recovered");
+	} else if (new_pnum >= 0) {
+		/*
+		 * Bad luck? This physical eraseblock is bad too? Crud. Let's
+		 * try to get another one.
+		 */
+		ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+		ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
+	}
+
+	return err;
+}
+
+/**
  * recover_peb - recover from write failure.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock to recover
@@ -566,106 +806,97 @@
  * This function is called in case of a write failure and moves all good data
  * from the potentially bad physical eraseblock to a good physical eraseblock.
  * This function also writes the data which was not written due to the failure.
- * Returns new physical eraseblock number in case of success, and a negative
- * error code in case of failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * This function tries %UBI_IO_RETRIES before giving up.
  */
 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
 		       const void *buf, int offset, int len)
 {
-	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+	int err, idx = vol_id2idx(ubi, vol_id), tries;
 	struct ubi_volume *vol = ubi->volumes[idx];
-	struct ubi_vid_hdr *vid_hdr;
-	uint32_t crc;
+	struct ubi_vid_io_buf *vidb;
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb)
 		return -ENOMEM;
 
-retry:
-	new_pnum = ubi_wl_get_peb(ubi);
-	if (new_pnum < 0) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		up_read(&ubi->fm_eba_sem);
-		return new_pnum;
+	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+		bool retry;
+
+		err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
+				      &retry);
+		if (!err || !retry)
+			break;
+
+		ubi_msg(ubi, "try again");
 	}
 
-	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
-		pnum, new_pnum);
+	ubi_free_vid_buf(vidb);
 
-	err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
-	if (err && err != UBI_IO_BITFLIPS) {
-		if (err > 0)
-			err = -EIO;
-		up_read(&ubi->fm_eba_sem);
+	return err;
+}
+
+/**
+ * try_write_vid_and_data - try to write VID header and data to a new PEB.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @vidb: the VID buffer to write
+ * @buf: buffer containing the data
+ * @offset: where to start writing data
+ * @len: how many bytes should be written
+ *
+ * This function tries to write VID header and data belonging to logical
+ * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
+ * in case of success and a negative error code in case of failure.
+ * In case of error, it is possible that something was still written to the
+ * flash media, but may be some garbage.
+ */
+static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+				  struct ubi_vid_io_buf *vidb, const void *buf,
+				  int offset, int len)
+{
+	struct ubi_device *ubi = vol->ubi;
+	int pnum, opnum, err, vol_id = vol->vol_id;
+
+	pnum = ubi_wl_get_peb(ubi);
+	if (pnum < 0) {
+		err = pnum;
 		goto out_put;
 	}
 
-	ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
+	opnum = vol->eba_tbl->entries[lnum].pnum;
 
-	mutex_lock(&ubi->buf_mutex);
-	memset(ubi->peb_buf + offset, 0xFF, len);
+	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+		len, offset, vol_id, lnum, pnum);
 
-	/* Read everything before the area where the write failure happened */
-	if (offset > 0) {
-		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
-		if (err && err != UBI_IO_BITFLIPS) {
-			up_read(&ubi->fm_eba_sem);
-			goto out_unlock;
+	err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
+	if (err) {
+		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+			 vol_id, lnum, pnum);
+		goto out_put;
+	}
+
+	if (len) {
+		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+		if (err) {
+			ubi_warn(ubi,
+				 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+				 len, offset, vol_id, lnum, pnum);
+			goto out_put;
 		}
 	}
 
-	memcpy(ubi->peb_buf + offset, buf, len);
+	vol->eba_tbl->entries[lnum].pnum = pnum;
 
-	data_size = offset + len;
-	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	vid_hdr->copy_flag = 1;
-	vid_hdr->data_size = cpu_to_be32(data_size);
-	vid_hdr->data_crc = cpu_to_be32(crc);
-	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
-	if (err) {
-		mutex_unlock(&ubi->buf_mutex);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
-	if (err) {
-		mutex_unlock(&ubi->buf_mutex);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	mutex_unlock(&ubi->buf_mutex);
-	ubi_free_vid_hdr(ubi, vid_hdr);
-
-	vol->eba_tbl[lnum] = new_pnum;
-	up_read(&ubi->fm_eba_sem);
-	ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
-
-	ubi_msg(ubi, "data was successfully recovered");
-	return 0;
-
-out_unlock:
-	mutex_unlock(&ubi->buf_mutex);
 out_put:
-	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
-	ubi_free_vid_hdr(ubi, vid_hdr);
-	return err;
+	up_read(&ubi->fm_eba_sem);
 
-write_error:
-	/*
-	 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
-	 * get another one.
-	 */
-	ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
-	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
-	if (++tries > UBI_IO_RETRIES) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
-	ubi_msg(ubi, "try again");
-	goto retry;
+	if (err && pnum >= 0)
+		err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+	else if (!err && opnum >= 0)
+		err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+
+	return err;
 }
 
 /**
@@ -681,11 +912,13 @@
  * @vol. Returns zero in case of success and a negative error code in case
  * of failure. In case of error, it is possible that something was still
  * written to the flash media, but may be some garbage.
+ * This function retries %UBI_IO_RETRIES times before giving up.
  */
 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		      const void *buf, int offset, int len)
 {
-	int err, pnum, tries = 0, vol_id = vol->vol_id;
+	int err, pnum, tries, vol_id = vol->vol_id;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 
 	if (ubi->ro_mode)
@@ -695,7 +928,7 @@
 	if (err)
 		return err;
 
-	pnum = vol->eba_tbl[lnum];
+	pnum = vol->eba_tbl->entries[lnum].pnum;
 	if (pnum >= 0) {
 		dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
 			len, offset, vol_id, lnum, pnum);
@@ -706,23 +939,23 @@
 			if (err == -EIO && ubi->bad_allowed)
 				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
 						  offset, len);
-			if (err)
-				ubi_ro_mode(ubi);
 		}
-		leb_write_unlock(ubi, vol_id, lnum);
-		return err;
+
+		goto out;
 	}
 
 	/*
 	 * The logical eraseblock is not mapped. We have to get a free physical
 	 * eraseblock and write the volume identifier header there first.
 	 */
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr) {
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb) {
 		leb_write_unlock(ubi, vol_id, lnum);
 		return -ENOMEM;
 	}
 
+	vid_hdr = ubi_get_vid_hdr(vidb);
+
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -730,67 +963,30 @@
 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
 
-retry:
-	pnum = ubi_wl_get_peb(ubi);
-	if (pnum < 0) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		leb_write_unlock(ubi, vol_id, lnum);
-		up_read(&ubi->fm_eba_sem);
-		return pnum;
+	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+		err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
+		if (err != -EIO || !ubi->bad_allowed)
+			break;
+
+		/*
+		 * Fortunately, this is the first write operation to this
+		 * physical eraseblock, so just put it and request a new one.
+		 * We assume that if this physical eraseblock went bad, the
+		 * erase code will handle that.
+		 */
+		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+		ubi_msg(ubi, "try another PEB");
 	}
 
-	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
-		len, offset, vol_id, lnum, pnum);
+	ubi_free_vid_buf(vidb);
 
-	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
-	if (err) {
-		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
-			 vol_id, lnum, pnum);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	if (len) {
-		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
-		if (err) {
-			ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
-				 len, offset, vol_id, lnum, pnum);
-			up_read(&ubi->fm_eba_sem);
-			goto write_error;
-		}
-	}
-
-	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_eba_sem);
+out:
+	if (err)
+		ubi_ro_mode(ubi);
 
 	leb_write_unlock(ubi, vol_id, lnum);
-	ubi_free_vid_hdr(ubi, vid_hdr);
-	return 0;
 
-write_error:
-	if (err != -EIO || !ubi->bad_allowed) {
-		ubi_ro_mode(ubi);
-		leb_write_unlock(ubi, vol_id, lnum);
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
-
-	/*
-	 * Fortunately, this is the first write operation to this physical
-	 * eraseblock, so just put it and request a new one. We assume that if
-	 * this physical eraseblock went bad, the erase code will handle that.
-	 */
-	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
-	if (err || ++tries > UBI_IO_RETRIES) {
-		ubi_ro_mode(ubi);
-		leb_write_unlock(ubi, vol_id, lnum);
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
-
-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg(ubi, "try another PEB");
-	goto retry;
+	return err;
 }
 
 /**
@@ -818,7 +1014,8 @@
 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
 			 int lnum, const void *buf, int len, int used_ebs)
 {
-	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
+	int err, tries, data_size = len, vol_id = vol->vol_id;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t crc;
 
@@ -831,15 +1028,15 @@
 	else
 		ubi_assert(!(len & (ubi->min_io_size - 1)));
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb)
 		return -ENOMEM;
 
+	vid_hdr = ubi_get_vid_hdr(vidb);
+
 	err = leb_write_lock(ubi, vol_id, lnum);
-	if (err) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
+	if (err)
+		goto out;
 
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 	vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -853,66 +1050,26 @@
 	vid_hdr->used_ebs = cpu_to_be32(used_ebs);
 	vid_hdr->data_crc = cpu_to_be32(crc);
 
-retry:
-	pnum = ubi_wl_get_peb(ubi);
-	if (pnum < 0) {
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		leb_write_unlock(ubi, vol_id, lnum);
-		up_read(&ubi->fm_eba_sem);
-		return pnum;
+	ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
+
+	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+		err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+		if (err != -EIO || !ubi->bad_allowed)
+			break;
+
+		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+		ubi_msg(ubi, "try another PEB");
 	}
 
-	dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
-		len, vol_id, lnum, pnum, used_ebs);
-
-	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
-	if (err) {
-		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
-			 vol_id, lnum, pnum);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
-	if (err) {
-		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
-			 len, pnum);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	ubi_assert(vol->eba_tbl[lnum] < 0);
-	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_eba_sem);
+	if (err)
+		ubi_ro_mode(ubi);
 
 	leb_write_unlock(ubi, vol_id, lnum);
-	ubi_free_vid_hdr(ubi, vid_hdr);
-	return 0;
 
-write_error:
-	if (err != -EIO || !ubi->bad_allowed) {
-		/*
-		 * This flash device does not admit of bad eraseblocks or
-		 * something nasty and unexpected happened. Switch to read-only
-		 * mode just in case.
-		 */
-		ubi_ro_mode(ubi);
-		leb_write_unlock(ubi, vol_id, lnum);
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
+out:
+	ubi_free_vid_buf(vidb);
 
-	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
-	if (err || ++tries > UBI_IO_RETRIES) {
-		ubi_ro_mode(ubi);
-		leb_write_unlock(ubi, vol_id, lnum);
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return err;
-	}
-
-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg(ubi, "try another PEB");
-	goto retry;
+	return err;
 }
 
 /*
@@ -935,7 +1092,8 @@
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
 			      int lnum, const void *buf, int len)
 {
-	int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
+	int err, tries, vol_id = vol->vol_id;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t crc;
 
@@ -953,10 +1111,12 @@
 		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
 	}
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb)
 		return -ENOMEM;
 
+	vid_hdr = ubi_get_vid_hdr(vidb);
+
 	mutex_lock(&ubi->alc_mutex);
 	err = leb_write_lock(ubi, vol_id, lnum);
 	if (err)
@@ -974,70 +1134,31 @@
 	vid_hdr->copy_flag = 1;
 	vid_hdr->data_crc = cpu_to_be32(crc);
 
-retry:
-	pnum = ubi_wl_get_peb(ubi);
-	if (pnum < 0) {
-		err = pnum;
-		up_read(&ubi->fm_eba_sem);
-		goto out_leb_unlock;
+	dbg_eba("change LEB %d:%d", vol_id, lnum);
+
+	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+		err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+		if (err != -EIO || !ubi->bad_allowed)
+			break;
+
+		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+		ubi_msg(ubi, "try another PEB");
 	}
 
-	dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
-		vol_id, lnum, vol->eba_tbl[lnum], pnum);
+	/*
+	 * This flash device does not admit of bad eraseblocks or
+	 * something nasty and unexpected happened. Switch to read-only
+	 * mode just in case.
+	 */
+	if (err)
+		ubi_ro_mode(ubi);
 
-	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
-	if (err) {
-		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
-			 vol_id, lnum, pnum);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
-	if (err) {
-		ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
-			 len, pnum);
-		up_read(&ubi->fm_eba_sem);
-		goto write_error;
-	}
-
-	old_pnum = vol->eba_tbl[lnum];
-	vol->eba_tbl[lnum] = pnum;
-	up_read(&ubi->fm_eba_sem);
-
-	if (old_pnum >= 0) {
-		err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
-		if (err)
-			goto out_leb_unlock;
-	}
-
-out_leb_unlock:
 	leb_write_unlock(ubi, vol_id, lnum);
+
 out_mutex:
 	mutex_unlock(&ubi->alc_mutex);
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 	return err;
-
-write_error:
-	if (err != -EIO || !ubi->bad_allowed) {
-		/*
-		 * This flash device does not admit of bad eraseblocks or
-		 * something nasty and unexpected happened. Switch to read-only
-		 * mode just in case.
-		 */
-		ubi_ro_mode(ubi);
-		goto out_leb_unlock;
-	}
-
-	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
-	if (err || ++tries > UBI_IO_RETRIES) {
-		ubi_ro_mode(ubi);
-		goto out_leb_unlock;
-	}
-
-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ubi_msg(ubi, "try another PEB");
-	goto retry;
 }
 
 /**
@@ -1082,12 +1203,15 @@
  *   o a negative error code in case of failure.
  */
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
-		     struct ubi_vid_hdr *vid_hdr)
+		     struct ubi_vid_io_buf *vidb)
 {
 	int err, vol_id, lnum, data_size, aldata_size, idx;
+	struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
 	struct ubi_volume *vol;
 	uint32_t crc;
 
+	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
 	vol_id = be32_to_cpu(vid_hdr->vol_id);
 	lnum = be32_to_cpu(vid_hdr->lnum);
 
@@ -1142,9 +1266,9 @@
 	 * probably waiting on @ubi->move_mutex. No need to continue the work,
 	 * cancel it.
 	 */
-	if (vol->eba_tbl[lnum] != from) {
+	if (vol->eba_tbl->entries[lnum].pnum != from) {
 		dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
-		       vol_id, lnum, from, vol->eba_tbl[lnum]);
+		       vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
 		err = MOVE_CANCEL_RACE;
 		goto out_unlock_leb;
 	}
@@ -1196,7 +1320,7 @@
 	}
 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
 
-	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
+	err = ubi_io_write_vid_hdr(ubi, to, vidb);
 	if (err) {
 		if (err == -EIO)
 			err = MOVE_TARGET_WR_ERR;
@@ -1206,7 +1330,7 @@
 	cond_resched();
 
 	/* Read the VID header back and check if it was written correctly */
-	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
+	err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
 	if (err) {
 		if (err != UBI_IO_BITFLIPS) {
 			ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
@@ -1229,10 +1353,8 @@
 		cond_resched();
 	}
 
-	ubi_assert(vol->eba_tbl[lnum] == from);
-	down_read(&ubi->fm_eba_sem);
-	vol->eba_tbl[lnum] = to;
-	up_read(&ubi->fm_eba_sem);
+	ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+	vol->eba_tbl->entries[lnum].pnum = to;
 
 out_unlock_buf:
 	mutex_unlock(&ubi->buf_mutex);
@@ -1388,7 +1510,7 @@
  */
 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
-	int i, j, err, num_volumes;
+	int i, err, num_volumes;
 	struct ubi_ainf_volume *av;
 	struct ubi_volume *vol;
 	struct ubi_ainf_peb *aeb;
@@ -1404,35 +1526,39 @@
 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
 
 	for (i = 0; i < num_volumes; i++) {
+		struct ubi_eba_table *tbl;
+
 		vol = ubi->volumes[i];
 		if (!vol)
 			continue;
 
 		cond_resched();
 
-		vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
-				       GFP_KERNEL);
-		if (!vol->eba_tbl) {
-			err = -ENOMEM;
+		tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+		if (IS_ERR(tbl)) {
+			err = PTR_ERR(tbl);
 			goto out_free;
 		}
 
-		for (j = 0; j < vol->reserved_pebs; j++)
-			vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
+		ubi_eba_replace_table(vol, tbl);
 
 		av = ubi_find_av(ai, idx2vol_id(ubi, i));
 		if (!av)
 			continue;
 
 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
-			if (aeb->lnum >= vol->reserved_pebs)
+			if (aeb->lnum >= vol->reserved_pebs) {
 				/*
 				 * This may happen in case of an unclean reboot
 				 * during re-size.
 				 */
 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
-			else
-				vol->eba_tbl[aeb->lnum] = aeb->pnum;
+			} else {
+				struct ubi_eba_entry *entry;
+
+				entry = &vol->eba_tbl->entries[aeb->lnum];
+				entry->pnum = aeb->pnum;
+			}
 		}
 	}
 
@@ -1469,8 +1595,7 @@
 	for (i = 0; i < num_volumes; i++) {
 		if (!ubi->volumes[i])
 			continue;
-		kfree(ubi->volumes[i]->eba_tbl);
-		ubi->volumes[i]->eba_tbl = NULL;
+		ubi_eba_replace_table(ubi->volumes[i], NULL);
 	}
 	return err;
 }
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999..4f0bd6b 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -262,6 +262,8 @@
 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
 	int pnum;
 
+	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
 	if (pool->used == pool->size) {
 		/* We cannot update the fastmap here because this
 		 * function is called in atomic context.
@@ -303,7 +305,7 @@
 
 	wrk->anchor = 1;
 	wrk->func = &wear_leveling_worker;
-	schedule_ubi_work(ubi, wrk);
+	__schedule_ubi_work(ubi, wrk);
 	return 0;
 }
 
@@ -344,7 +346,7 @@
 	spin_unlock(&ubi->wl_lock);
 
 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
-	return schedule_erase(ubi, e, vol_id, lnum, torture);
+	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
 }
 
 /**
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 48eb55f..d6384d9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -110,21 +110,23 @@
  * Returns a new struct ubi_vid_hdr on success.
  * NULL indicates out of memory.
  */
-static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
 {
-	struct ubi_vid_hdr *new;
+	struct ubi_vid_io_buf *new;
+	struct ubi_vid_hdr *vh;
 
-	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
 	if (!new)
 		goto out;
 
-	new->vol_type = UBI_VID_DYNAMIC;
-	new->vol_id = cpu_to_be32(vol_id);
+	vh = ubi_get_vid_hdr(new);
+	vh->vol_type = UBI_VID_DYNAMIC;
+	vh->vol_id = cpu_to_be32(vol_id);
 
 	/* UBI implementations without fastmap support have to delete the
 	 * fastmap.
 	 */
-	new->compat = UBI_COMPAT_DELETE;
+	vh->compat = UBI_COMPAT_DELETE;
 
 out:
 	return new;
@@ -145,12 +147,10 @@
 {
 	struct ubi_ainf_peb *aeb;
 
-	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+	aeb = ubi_alloc_aeb(ai, pnum, ec);
 	if (!aeb)
 		return -ENOMEM;
 
-	aeb->pnum = pnum;
-	aeb->ec = ec;
 	aeb->lnum = -1;
 	aeb->scrub = scrub;
 	aeb->copy_flag = aeb->sqnum = 0;
@@ -186,40 +186,19 @@
 				       int last_eb_bytes)
 {
 	struct ubi_ainf_volume *av;
-	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
 
-	while (*p) {
-		parent = *p;
-		av = rb_entry(parent, struct ubi_ainf_volume, rb);
+	av = ubi_add_av(ai, vol_id);
+	if (IS_ERR(av))
+		return av;
 
-		if (vol_id > av->vol_id)
-			p = &(*p)->rb_left;
-		else if (vol_id < av->vol_id)
-			p = &(*p)->rb_right;
-		else
-			return ERR_PTR(-EINVAL);
-	}
-
-	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
-	if (!av)
-		goto out;
-
-	av->highest_lnum = av->leb_count = av->used_ebs = 0;
-	av->vol_id = vol_id;
 	av->data_pad = data_pad;
 	av->last_data_size = last_eb_bytes;
 	av->compat = 0;
 	av->vol_type = vol_type;
-	av->root = RB_ROOT;
 	if (av->vol_type == UBI_STATIC_VOLUME)
 		av->used_ebs = used_ebs;
 
 	dbg_bld("found volume (ID %i)", vol_id);
-
-	rb_link_node(&av->rb, parent, p);
-	rb_insert_color(&av->rb, &ai->volumes);
-
-out:
 	return av;
 }
 
@@ -297,7 +276,7 @@
 		 */
 		if (aeb->pnum == new_aeb->pnum) {
 			ubi_assert(aeb->lnum == new_aeb->lnum);
-			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+			ubi_free_aeb(ai, new_aeb);
 
 			return 0;
 		}
@@ -308,13 +287,10 @@
 
 		/* new_aeb is newer */
 		if (cmp_res & 1) {
-			victim = kmem_cache_alloc(ai->aeb_slab_cache,
-				GFP_KERNEL);
+			victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
 			if (!victim)
 				return -ENOMEM;
 
-			victim->ec = aeb->ec;
-			victim->pnum = aeb->pnum;
 			list_add_tail(&victim->u.list, &ai->erase);
 
 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
@@ -328,7 +304,8 @@
 			aeb->pnum = new_aeb->pnum;
 			aeb->copy_flag = new_vh->copy_flag;
 			aeb->scrub = new_aeb->scrub;
-			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+			aeb->sqnum = new_aeb->sqnum;
+			ubi_free_aeb(ai, new_aeb);
 
 		/* new_aeb is older */
 		} else {
@@ -370,41 +347,24 @@
 			    struct ubi_vid_hdr *new_vh,
 			    struct ubi_ainf_peb *new_aeb)
 {
-	struct ubi_ainf_volume *av, *tmp_av = NULL;
-	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
-	int found = 0;
+	int vol_id = be32_to_cpu(new_vh->vol_id);
+	struct ubi_ainf_volume *av;
 
-	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
-		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
-		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
+		ubi_free_aeb(ai, new_aeb);
 
 		return 0;
 	}
 
 	/* Find the volume this SEB belongs to */
-	while (*p) {
-		parent = *p;
-		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
-
-		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
-			p = &(*p)->rb_left;
-		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
-			p = &(*p)->rb_right;
-		else {
-			found = 1;
-			break;
-		}
-	}
-
-	if (found)
-		av = tmp_av;
-	else {
+	av = ubi_find_av(ai, vol_id);
+	if (!av) {
 		ubi_err(ubi, "orphaned volume in fastmap pool!");
-		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+		ubi_free_aeb(ai, new_aeb);
 		return UBI_BAD_FASTMAP;
 	}
 
-	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+	ubi_assert(vol_id == av->vol_id);
 
 	return update_vol(ubi, ai, av, new_vh, new_aeb);
 }
@@ -423,16 +383,12 @@
 	struct rb_node *node, *node2;
 	struct ubi_ainf_peb *aeb;
 
-	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
-		av = rb_entry(node, struct ubi_ainf_volume, rb);
-
-		for (node2 = rb_first(&av->root); node2;
-		     node2 = rb_next(node2)) {
-			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
+		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
 			if (aeb->pnum == pnum) {
 				rb_erase(&aeb->u.rb, &av->root);
 				av->leb_count--;
-				kmem_cache_free(ai->aeb_slab_cache, aeb);
+				ubi_free_aeb(ai, aeb);
 				return;
 			}
 		}
@@ -455,6 +411,7 @@
 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
 		     struct list_head *free)
 {
+	struct ubi_vid_io_buf *vb;
 	struct ubi_vid_hdr *vh;
 	struct ubi_ec_hdr *ech;
 	struct ubi_ainf_peb *new_aeb;
@@ -464,12 +421,14 @@
 	if (!ech)
 		return -ENOMEM;
 
-	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vh) {
+	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+	if (!vb) {
 		kfree(ech);
 		return -ENOMEM;
 	}
 
+	vh = ubi_get_vid_hdr(vb);
+
 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
 
 	/*
@@ -510,15 +469,16 @@
 			goto out;
 		}
 
-		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
 			unsigned long long ec = be64_to_cpu(ech->ec);
 			unmap_peb(ai, pnum);
 			dbg_bld("Adding PEB to free: %i", pnum);
+
 			if (err == UBI_IO_FF_BITFLIPS)
-				add_aeb(ai, free, pnum, ec, 1);
-			else
-				add_aeb(ai, free, pnum, ec, 0);
+				scrub = 1;
+
+			add_aeb(ai, free, pnum, ec, scrub);
 			continue;
 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
 			dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -526,15 +486,12 @@
 			if (err == UBI_IO_BITFLIPS)
 				scrub = 1;
 
-			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
-						   GFP_KERNEL);
+			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
 			if (!new_aeb) {
 				ret = -ENOMEM;
 				goto out;
 			}
 
-			new_aeb->ec = be64_to_cpu(ech->ec);
-			new_aeb->pnum = pnum;
 			new_aeb->lnum = be32_to_cpu(vh->lnum);
 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
 			new_aeb->copy_flag = vh->copy_flag;
@@ -558,7 +515,7 @@
 	}
 
 out:
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vb);
 	kfree(ech);
 	return ret;
 }
@@ -841,11 +798,11 @@
 fail:
 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
 		list_del(&tmp_aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+		ubi_free_aeb(ai, tmp_aeb);
 	}
 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
 		list_del(&tmp_aeb->u.list);
-		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+		ubi_free_aeb(ai, tmp_aeb);
 	}
 
 	return ret;
@@ -886,6 +843,7 @@
 		     struct ubi_attach_info *scan_ai)
 {
 	struct ubi_fm_sb *fmsb, *fmsb2;
+	struct ubi_vid_io_buf *vb;
 	struct ubi_vid_hdr *vh;
 	struct ubi_ec_hdr *ech;
 	struct ubi_fastmap_layout *fm;
@@ -919,7 +877,7 @@
 		goto out;
 	}
 
-	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
 	if (ret && ret != UBI_IO_BITFLIPS)
 		goto free_fm_sb;
 	else if (ret == UBI_IO_BITFLIPS)
@@ -961,12 +919,14 @@
 		goto free_fm_sb;
 	}
 
-	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vh) {
+	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+	if (!vb) {
 		ret = -ENOMEM;
 		goto free_hdr;
 	}
 
+	vh = ubi_get_vid_hdr(vb);
+
 	for (i = 0; i < used_blocks; i++) {
 		int image_seq;
 
@@ -1009,7 +969,7 @@
 			goto free_hdr;
 		}
 
-		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
 		if (ret && ret != UBI_IO_BITFLIPS) {
 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
 				i, pnum);
@@ -1037,8 +997,8 @@
 		if (sqnum < be64_to_cpu(vh->sqnum))
 			sqnum = be64_to_cpu(vh->sqnum);
 
-		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
-				  ubi->leb_start, ubi->leb_size);
+		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
+				       pnum, 0, ubi->leb_size);
 		if (ret && ret != UBI_IO_BITFLIPS) {
 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
 				"err: %i)", i, pnum, ret);
@@ -1099,7 +1059,7 @@
 	ubi->fm_disabled = 0;
 	ubi->fast_attach = 1;
 
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vb);
 	kfree(ech);
 out:
 	up_write(&ubi->fm_protect);
@@ -1108,7 +1068,7 @@
 	return ret;
 
 free_hdr:
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vb);
 	kfree(ech);
 free_fm_sb:
 	kfree(fmsb);
@@ -1136,6 +1096,7 @@
 	struct ubi_fm_eba *feba;
 	struct ubi_wl_entry *wl_e;
 	struct ubi_volume *vol;
+	struct ubi_vid_io_buf *avbuf, *dvbuf;
 	struct ubi_vid_hdr *avhdr, *dvhdr;
 	struct ubi_work *ubi_wrk;
 	struct rb_node *tmp_rb;
@@ -1146,18 +1107,21 @@
 	fm_raw = ubi->fm_buf;
 	memset(ubi->fm_buf, 0, ubi->fm_size);
 
-	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
-	if (!avhdr) {
+	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+	if (!avbuf) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
-	if (!dvhdr) {
+	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
+	if (!dvbuf) {
 		ret = -ENOMEM;
 		goto out_kfree;
 	}
 
+	avhdr = ubi_get_vid_hdr(avbuf);
+	dvhdr = ubi_get_vid_hdr(dvbuf);
+
 	seen_pebs = init_seen(ubi);
 	if (IS_ERR(seen_pebs)) {
 		ret = PTR_ERR(seen_pebs);
@@ -1306,8 +1270,12 @@
 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
 		ubi_assert(fm_pos <= ubi->fm_size);
 
-		for (j = 0; j < vol->reserved_pebs; j++)
-			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+		for (j = 0; j < vol->reserved_pebs; j++) {
+			struct ubi_eba_leb_desc ldesc;
+
+			ubi_eba_get_ldesc(vol, j, &ldesc);
+			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
+		}
 
 		feba->reserved_pebs = cpu_to_be32(j);
 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
@@ -1322,7 +1290,7 @@
 	spin_unlock(&ubi->volumes_lock);
 
 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
-	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
 	if (ret) {
 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
 		goto out_kfree;
@@ -1343,7 +1311,7 @@
 		dvhdr->lnum = cpu_to_be32(i);
 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
-		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
 		if (ret) {
 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
 				new_fm->e[i]->pnum);
@@ -1352,8 +1320,8 @@
 	}
 
 	for (i = 0; i < new_fm->used_blocks; i++) {
-		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
-			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
+					new_fm->e[i]->pnum, 0, ubi->leb_size);
 		if (ret) {
 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
 				new_fm->e[i]->pnum);
@@ -1368,8 +1336,8 @@
 	dbg_bld("fastmap written!");
 
 out_kfree:
-	ubi_free_vid_hdr(ubi, avhdr);
-	ubi_free_vid_hdr(ubi, dvhdr);
+	ubi_free_vid_buf(avbuf);
+	ubi_free_vid_buf(dvbuf);
 	free_seen(seen_pebs);
 out:
 	return ret;
@@ -1439,7 +1407,8 @@
 	int ret;
 	struct ubi_fastmap_layout *fm;
 	struct ubi_wl_entry *e;
-	struct ubi_vid_hdr *vh = NULL;
+	struct ubi_vid_io_buf *vb = NULL;
+	struct ubi_vid_hdr *vh;
 
 	if (!ubi->fm)
 		return 0;
@@ -1451,10 +1420,12 @@
 	if (!fm)
 		goto out;
 
-	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
-	if (!vh)
+	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+	if (!vb)
 		goto out_free_fm;
 
+	vh = ubi_get_vid_hdr(vb);
+
 	ret = -ENOSPC;
 	e = ubi_wl_get_fm_peb(ubi, 1);
 	if (!e)
@@ -1465,7 +1436,7 @@
 	 * to scanning mode.
 	 */
 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
 	if (ret < 0) {
 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
 		goto out_free_fm;
@@ -1477,7 +1448,7 @@
 	ubi->fm = fm;
 
 out:
-	ubi_free_vid_hdr(ubi, vh);
+	ubi_free_vid_buf(vb);
 	return ret;
 
 out_free_fm:
@@ -1522,22 +1493,30 @@
 	struct ubi_wl_entry *tmp_e;
 
 	down_write(&ubi->fm_protect);
+	down_write(&ubi->work_sem);
+	down_write(&ubi->fm_eba_sem);
 
 	ubi_refill_pools(ubi);
 
 	if (ubi->ro_mode || ubi->fm_disabled) {
+		up_write(&ubi->fm_eba_sem);
+		up_write(&ubi->work_sem);
 		up_write(&ubi->fm_protect);
 		return 0;
 	}
 
 	ret = ubi_ensure_anchor_pebs(ubi);
 	if (ret) {
+		up_write(&ubi->fm_eba_sem);
+		up_write(&ubi->work_sem);
 		up_write(&ubi->fm_protect);
 		return ret;
 	}
 
 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
 	if (!new_fm) {
+		up_write(&ubi->fm_eba_sem);
+		up_write(&ubi->work_sem);
 		up_write(&ubi->fm_protect);
 		return -ENOMEM;
 	}
@@ -1646,16 +1625,14 @@
 		new_fm->e[0] = tmp_e;
 	}
 
-	down_write(&ubi->work_sem);
-	down_write(&ubi->fm_eba_sem);
 	ret = ubi_write_fastmap(ubi, new_fm);
-	up_write(&ubi->fm_eba_sem);
-	up_write(&ubi->work_sem);
 
 	if (ret)
 		goto err;
 
 out_unlock:
+	up_write(&ubi->fm_eba_sem);
+	up_write(&ubi->work_sem);
 	up_write(&ubi->fm_protect);
 	kfree(old_fm);
 	return ret;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index ff8cafe..b6fb8f9 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -502,6 +502,7 @@
 	loff_t addr;
 	uint32_t data = 0;
 	struct ubi_ec_hdr ec_hdr;
+	struct ubi_vid_io_buf vidb;
 
 	/*
 	 * Note, we cannot generally define VID header buffers on stack,
@@ -528,7 +529,10 @@
 			goto error;
 	}
 
-	err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+	ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
+	ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
+
+	err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
 	if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
 	    err != UBI_IO_FF){
 		addr += ubi->vid_hdr_aloffset;
@@ -995,12 +999,11 @@
  * ubi_io_read_vid_hdr - read and check a volume identifier header.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock number to read from
- * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
- * identifier header
+ * @vidb: the volume identifier buffer to store data in
  * @verbose: be verbose if the header is corrupted or wasn't found
  *
  * This function reads the volume identifier header from physical eraseblock
- * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
+ * @pnum and stores it in @vidb. It also checks CRC checksum of the read
  * volume identifier header. The error codes are the same as in
  * 'ubi_io_read_ec_hdr()'.
  *
@@ -1008,16 +1011,16 @@
  * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
  */
 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
-			struct ubi_vid_hdr *vid_hdr, int verbose)
+			struct ubi_vid_io_buf *vidb, int verbose)
 {
 	int err, read_err;
 	uint32_t crc, magic, hdr_crc;
-	void *p;
+	struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
+	void *p = vidb->buffer;
 
 	dbg_io("read VID header from PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
 
-	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
 	if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
@@ -1080,23 +1083,24 @@
  * ubi_io_write_vid_hdr - write a volume identifier header.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock number to write to
- * @vid_hdr: the volume identifier header to write
+ * @vidb: the volume identifier buffer to write
  *
  * This function writes the volume identifier header described by @vid_hdr to
  * physical eraseblock @pnum. This function automatically fills the
- * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
- * header CRC checksum and stores it at vid_hdr->hdr_crc.
+ * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vidb->hdr->hdr_crc.
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure. If %-EIO is returned, the physical eraseblock probably went
  * bad.
  */
 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
-			 struct ubi_vid_hdr *vid_hdr)
+			 struct ubi_vid_io_buf *vidb)
 {
+	struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
 	int err;
 	uint32_t crc;
-	void *p;
+	void *p = vidb->buffer;
 
 	dbg_io("write VID header to PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
@@ -1117,7 +1121,6 @@
 	if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
 		return -EROFS;
 
-	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			   ubi->vid_hdr_alsize);
 	return err;
@@ -1283,17 +1286,19 @@
 {
 	int err;
 	uint32_t crc, hdr_crc;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	void *p;
 
 	if (!ubi_dbg_chk_io(ubi))
 		return 0;
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb)
 		return -ENOMEM;
 
-	p = (char *)vid_hdr - ubi->vid_hdr_shift;
+	vid_hdr = ubi_get_vid_hdr(vidb);
+	p = vidb->buffer;
 	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_alsize);
 	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
@@ -1314,7 +1319,7 @@
 	err = self_check_vid_hdr(ubi, pnum, vid_hdr);
 
 exit:
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index a9e2cef..88b1897 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -538,7 +538,7 @@
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+	if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 ||
 	    offset + len > vol->usable_leb_size ||
 	    offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
 		return -EINVAL;
@@ -583,7 +583,7 @@
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+	if (!ubi_leb_valid(vol, lnum) || len < 0 ||
 	    len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
 		return -EINVAL;
 
@@ -620,7 +620,7 @@
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs)
+	if (!ubi_leb_valid(vol, lnum))
 		return -EINVAL;
 
 	if (vol->upd_marker)
@@ -680,7 +680,7 @@
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs)
+	if (!ubi_leb_valid(vol, lnum))
 		return -EINVAL;
 
 	if (vol->upd_marker)
@@ -716,13 +716,13 @@
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs)
+	if (!ubi_leb_valid(vol, lnum))
 		return -EINVAL;
 
 	if (vol->upd_marker)
 		return -EBADF;
 
-	if (vol->eba_tbl[lnum] >= 0)
+	if (ubi_eba_is_mapped(vol, lnum))
 		return -EBADMSG;
 
 	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
@@ -751,13 +751,13 @@
 
 	dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
 
-	if (lnum < 0 || lnum >= vol->reserved_pebs)
+	if (!ubi_leb_valid(vol, lnum))
 		return -EINVAL;
 
 	if (vol->upd_marker)
 		return -EBADF;
 
-	return vol->eba_tbl[lnum] >= 0;
+	return ubi_eba_is_mapped(vol, lnum);
 }
 EXPORT_SYMBOL_GPL(ubi_is_mapped);
 
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index b616a115..697dbcb 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -167,6 +167,17 @@
 };
 
 /**
+ * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
+ *			   flash.
+ * @hdr: a pointer to the VID header stored in buffer
+ * @buffer: underlying buffer
+ */
+struct ubi_vid_io_buf {
+	struct ubi_vid_hdr *hdr;
+	void *buffer;
+};
+
+/**
  * struct ubi_wl_entry - wear-leveling entry.
  * @u.rb: link in the corresponding (free/used) RB-tree
  * @u.list: link in the protection queue
@@ -267,6 +278,21 @@
 };
 
 /**
+ * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor
+ * @lnum: the logical eraseblock number
+ * @pnum: the physical eraseblock where the LEB can be found
+ *
+ * This structure is here to hide EBA's internal from other part of the
+ * UBI implementation.
+ *
+ * One can query the position of a LEB by calling ubi_eba_get_ldesc().
+ */
+struct ubi_eba_leb_desc {
+	int lnum;
+	int pnum;
+};
+
+/**
  * struct ubi_volume - UBI volume description data structure.
  * @dev: device object to make use of the the Linux device model
  * @cdev: character device object to create character device
@@ -344,7 +370,7 @@
 	long long upd_received;
 	void *upd_buf;
 
-	int *eba_tbl;
+	struct ubi_eba_table *eba_tbl;
 	unsigned int checked:1;
 	unsigned int corrupted:1;
 	unsigned int upd_marker:1;
@@ -724,6 +750,8 @@
  * @ec_sum: a temporary variable used when calculating @mean_ec
  * @ec_count: a temporary variable used when calculating @mean_ec
  * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ * @ech: temporary EC header. Only available during scan
+ * @vidh: temporary VID buffer. Only available during scan
  *
  * This data structure contains the result of attaching an MTD device and may
  * be used by other UBI sub-systems to build final UBI data structures, further
@@ -752,6 +780,8 @@
 	uint64_t ec_sum;
 	int ec_count;
 	struct kmem_cache *aeb_slab_cache;
+	struct ubi_ec_hdr *ech;
+	struct ubi_vid_io_buf *vidb;
 };
 
 /**
@@ -792,8 +822,12 @@
 extern struct blocking_notifier_head ubi_notifiers;
 
 /* attach.c */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+				   int ec);
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb);
 int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
 		  int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id);
 struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
 				    int vol_id);
 void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
@@ -835,7 +869,21 @@
 void ubi_calculate_reserved(struct ubi_device *ubi);
 int ubi_check_pattern(const void *buf, uint8_t patt, int size);
 
+static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum)
+{
+	return lnum >= 0 && lnum < vol->reserved_pebs;
+}
+
 /* eba.c */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+					   int nentries);
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl);
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+			int nentries);
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl);
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+		       struct ubi_eba_leb_desc *ldesc);
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum);
 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
 		      int lnum);
 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
@@ -850,7 +898,7 @@
 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
 			      int lnum, const void *buf, int len);
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
-		     struct ubi_vid_hdr *vid_hdr);
+		     struct ubi_vid_io_buf *vidb);
 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
 unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
@@ -885,9 +933,9 @@
 int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
 			struct ubi_ec_hdr *ec_hdr);
 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
-			struct ubi_vid_hdr *vid_hdr, int verbose);
+			struct ubi_vid_io_buf *vidb, int verbose);
 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
-			 struct ubi_vid_hdr *vid_hdr);
+			 struct ubi_vid_io_buf *vidb);
 
 /* build.c */
 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
@@ -1008,44 +1056,68 @@
 }
 
 /**
- * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
- * @ubi: UBI device description object
- * @gfp_flags: GFP flags to allocate with
- *
- * This function returns a pointer to the newly allocated and zero-filled
- * volume identifier header object in case of success and %NULL in case of
- * failure.
+ * ubi_init_vid_buf - Initialize a VID buffer
+ * @ubi: the UBI device
+ * @vidb: the VID buffer to initialize
+ * @buf: the underlying buffer
  */
-static inline struct ubi_vid_hdr *
-ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
+static inline void ubi_init_vid_buf(const struct ubi_device *ubi,
+				    struct ubi_vid_io_buf *vidb,
+				    void *buf)
 {
-	void *vid_hdr;
+	if (buf)
+		memset(buf, 0, ubi->vid_hdr_alsize);
 
-	vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
-	if (!vid_hdr)
-		return NULL;
-
-	/*
-	 * VID headers may be stored at un-aligned flash offsets, so we shift
-	 * the pointer.
-	 */
-	return vid_hdr + ubi->vid_hdr_shift;
+	vidb->buffer = buf;
+	vidb->hdr = buf + ubi->vid_hdr_shift;
 }
 
 /**
- * ubi_free_vid_hdr - free a volume identifier header object.
- * @ubi: UBI device description object
- * @vid_hdr: the object to free
+ * ubi_init_vid_buf - Allocate a VID buffer
+ * @ubi: the UBI device
+ * @gfp_flags: GFP flags to use for the allocation
  */
-static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
-				    struct ubi_vid_hdr *vid_hdr)
+static inline struct ubi_vid_io_buf *
+ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags)
 {
-	void *p = vid_hdr;
+	struct ubi_vid_io_buf *vidb;
+	void *buf;
 
-	if (!p)
+	vidb = kzalloc(sizeof(*vidb), gfp_flags);
+	if (!vidb)
+		return NULL;
+
+	buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags);
+	if (!buf) {
+		kfree(vidb);
+		return NULL;
+	}
+
+	ubi_init_vid_buf(ubi, vidb, buf);
+
+	return vidb;
+}
+
+/**
+ * ubi_free_vid_buf - Free a VID buffer
+ * @vidb: the VID buffer to free
+ */
+static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb)
+{
+	if (!vidb)
 		return;
 
-	kfree(p - ubi->vid_hdr_shift);
+	kfree(vidb->buffer);
+	kfree(vidb);
+}
+
+/**
+ * ubi_get_vid_hdr - Get the VID header attached to a VID buffer
+ * @vidb: VID buffer
+ */
+static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
+{
+	return vidb->hdr;
 }
 
 /*
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0138f52..7ac78c1 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -138,7 +138,7 @@
 {
 	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
 
-	kfree(vol->eba_tbl);
+	ubi_eba_replace_table(vol, NULL);
 	kfree(vol);
 }
 
@@ -158,6 +158,7 @@
 	int i, err, vol_id = req->vol_id, do_free = 1;
 	struct ubi_volume *vol;
 	struct ubi_vtbl_record vtbl_rec;
+	struct ubi_eba_table *eba_tbl = NULL;
 	dev_t dev;
 
 	if (ubi->ro_mode)
@@ -241,14 +242,13 @@
 	if (err)
 		goto out_acc;
 
-	vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
-	if (!vol->eba_tbl) {
-		err = -ENOMEM;
+	eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+	if (IS_ERR(eba_tbl)) {
+		err = PTR_ERR(eba_tbl);
 		goto out_acc;
 	}
 
-	for (i = 0; i < vol->reserved_pebs; i++)
-		vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
+	ubi_eba_replace_table(vol, eba_tbl);
 
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
 		vol->used_ebs = vol->reserved_pebs;
@@ -329,7 +329,7 @@
 	cdev_del(&vol->cdev);
 out_mapping:
 	if (do_free)
-		kfree(vol->eba_tbl);
+		ubi_eba_destroy_table(eba_tbl);
 out_acc:
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= vol->reserved_pebs;
@@ -427,10 +427,11 @@
  */
 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 {
-	int i, err, pebs, *new_mapping;
+	int i, err, pebs;
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 	struct ubi_vtbl_record vtbl_rec;
+	struct ubi_eba_table *new_eba_tbl = NULL;
 	int vol_id = vol->vol_id;
 
 	if (ubi->ro_mode)
@@ -450,12 +451,9 @@
 	if (reserved_pebs == vol->reserved_pebs)
 		return 0;
 
-	new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
-	if (!new_mapping)
-		return -ENOMEM;
-
-	for (i = 0; i < reserved_pebs; i++)
-		new_mapping[i] = UBI_LEB_UNMAPPED;
+	new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs);
+	if (IS_ERR(new_eba_tbl))
+		return PTR_ERR(new_eba_tbl);
 
 	spin_lock(&ubi->volumes_lock);
 	if (vol->ref_count > 1) {
@@ -481,10 +479,8 @@
 		}
 		ubi->avail_pebs -= pebs;
 		ubi->rsvd_pebs += pebs;
-		for (i = 0; i < vol->reserved_pebs; i++)
-			new_mapping[i] = vol->eba_tbl[i];
-		kfree(vol->eba_tbl);
-		vol->eba_tbl = new_mapping;
+		ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
+		ubi_eba_replace_table(vol, new_eba_tbl);
 		spin_unlock(&ubi->volumes_lock);
 	}
 
@@ -498,10 +494,8 @@
 		ubi->rsvd_pebs += pebs;
 		ubi->avail_pebs -= pebs;
 		ubi_update_reserved(ubi);
-		for (i = 0; i < reserved_pebs; i++)
-			new_mapping[i] = vol->eba_tbl[i];
-		kfree(vol->eba_tbl);
-		vol->eba_tbl = new_mapping;
+		ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
+		ubi_eba_replace_table(vol, new_eba_tbl);
 		spin_unlock(&ubi->volumes_lock);
 	}
 
@@ -543,7 +537,7 @@
 		spin_unlock(&ubi->volumes_lock);
 	}
 out_free:
-	kfree(new_mapping);
+	kfree(new_eba_tbl);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index d85c197..263743e 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -299,15 +299,18 @@
 		       int copy, void *vtbl)
 {
 	int err, tries = 0;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	struct ubi_ainf_peb *new_aeb;
 
 	dbg_gen("create volume table (copy #%d)", copy + 1);
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+	if (!vidb)
 		return -ENOMEM;
 
+	vid_hdr = ubi_get_vid_hdr(vidb);
+
 retry:
 	new_aeb = ubi_early_get_peb(ubi, ai);
 	if (IS_ERR(new_aeb)) {
@@ -324,7 +327,7 @@
 	vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
 
 	/* The EC header is already there, write the VID header */
-	err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
+	err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
 	if (err)
 		goto write_error;
 
@@ -338,8 +341,8 @@
 	 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
 	 */
 	err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
-	kmem_cache_free(ai->aeb_slab_cache, new_aeb);
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_aeb(ai, new_aeb);
+	ubi_free_vid_buf(vidb);
 	return err;
 
 write_error:
@@ -351,9 +354,9 @@
 		list_add(&new_aeb->u.list, &ai->erase);
 		goto retry;
 	}
-	kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+	ubi_free_aeb(ai, new_aeb);
 out_free:
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 	return err;
 
 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f453326..b5b8cd6 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -580,7 +580,7 @@
  * failure.
  */
 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
-			  int vol_id, int lnum, int torture)
+			  int vol_id, int lnum, int torture, bool nested)
 {
 	struct ubi_work *wl_wrk;
 
@@ -599,7 +599,10 @@
 	wl_wrk->lnum = lnum;
 	wl_wrk->torture = torture;
 
-	schedule_ubi_work(ubi, wl_wrk);
+	if (nested)
+		__schedule_ubi_work(ubi, wl_wrk);
+	else
+		schedule_ubi_work(ubi, wl_wrk);
 	return 0;
 }
 
@@ -644,11 +647,12 @@
 				int shutdown)
 {
 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
-	int vol_id = -1, lnum = -1;
+	int erase = 0, keep = 0, vol_id = -1, lnum = -1;
 #ifdef CONFIG_MTD_UBI_FASTMAP
 	int anchor = wrk->anchor;
 #endif
 	struct ubi_wl_entry *e1, *e2;
+	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
 	int dst_leb_clean = 0;
 
@@ -656,10 +660,13 @@
 	if (shutdown)
 		return 0;
 
-	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
-	if (!vid_hdr)
+	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+	if (!vidb)
 		return -ENOMEM;
 
+	vid_hdr = ubi_get_vid_hdr(vidb);
+
+	down_read(&ubi->fm_eba_sem);
 	mutex_lock(&ubi->move_mutex);
 	spin_lock(&ubi->wl_lock);
 	ubi_assert(!ubi->move_from && !ubi->move_to);
@@ -753,7 +760,7 @@
 	 * which is being moved was unmapped.
 	 */
 
-	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
+	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
 	if (err && err != UBI_IO_BITFLIPS) {
 		dst_leb_clean = 1;
 		if (err == UBI_IO_FF) {
@@ -780,6 +787,16 @@
 			       e1->pnum);
 			scrubbing = 1;
 			goto out_not_moved;
+		} else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
+			/*
+			 * While a full scan would detect interrupted erasures
+			 * at attach time we can face them here when attached from
+			 * Fastmap.
+			 */
+			dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
+			       e1->pnum);
+			erase = 1;
+			goto out_not_moved;
 		}
 
 		ubi_err(ubi, "error %d while reading VID header from PEB %d",
@@ -790,7 +807,7 @@
 	vol_id = be32_to_cpu(vid_hdr->vol_id);
 	lnum = be32_to_cpu(vid_hdr->lnum);
 
-	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
+	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
 	if (err) {
 		if (err == MOVE_CANCEL_RACE) {
 			/*
@@ -815,6 +832,7 @@
 			 * Target PEB had bit-flips or write error - torture it.
 			 */
 			torture = 1;
+			keep = 1;
 			goto out_not_moved;
 		}
 
@@ -847,7 +865,7 @@
 	if (scrubbing)
 		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
 			e1->pnum, vol_id, lnum, e2->pnum);
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 
 	spin_lock(&ubi->wl_lock);
 	if (!ubi->move_to_put) {
@@ -879,6 +897,7 @@
 
 	dbg_wl("done");
 	mutex_unlock(&ubi->move_mutex);
+	up_read(&ubi->fm_eba_sem);
 	return 0;
 
 	/*
@@ -901,7 +920,7 @@
 		ubi->erroneous_peb_count += 1;
 	} else if (scrubbing)
 		wl_tree_add(e1, &ubi->scrub);
-	else
+	else if (keep)
 		wl_tree_add(e1, &ubi->used);
 	if (dst_leb_clean) {
 		wl_tree_add(e2, &ubi->free);
@@ -913,7 +932,7 @@
 	ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 	if (dst_leb_clean) {
 		ensure_wear_leveling(ubi, 1);
 	} else {
@@ -922,7 +941,14 @@
 			goto out_ro;
 	}
 
+	if (erase) {
+		err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
+		if (err)
+			goto out_ro;
+	}
+
 	mutex_unlock(&ubi->move_mutex);
+	up_read(&ubi->fm_eba_sem);
 	return 0;
 
 out_error:
@@ -937,13 +963,14 @@
 	ubi->move_to_put = ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	ubi_free_vid_buf(vidb);
 	wl_entry_destroy(ubi, e1);
 	wl_entry_destroy(ubi, e2);
 
 out_ro:
 	ubi_ro_mode(ubi);
 	mutex_unlock(&ubi->move_mutex);
+	up_read(&ubi->fm_eba_sem);
 	ubi_assert(err != 0);
 	return err < 0 ? err : -EIO;
 
@@ -951,7 +978,8 @@
 	ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 	mutex_unlock(&ubi->move_mutex);
-	ubi_free_vid_hdr(ubi, vid_hdr);
+	up_read(&ubi->fm_eba_sem);
+	ubi_free_vid_buf(vidb);
 	return 0;
 }
 
@@ -1073,7 +1101,7 @@
 		int err1;
 
 		/* Re-schedule the LEB for erasure */
-		err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
 		if (err1) {
 			wl_entry_destroy(ubi, e);
 			err = err1;
@@ -1254,7 +1282,7 @@
 	}
 	spin_unlock(&ubi->wl_lock);
 
-	err = schedule_erase(ubi, e, vol_id, lnum, torture);
+	err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
 	if (err) {
 		spin_lock(&ubi->wl_lock);
 		wl_tree_add(e, &ubi->used);
@@ -1545,7 +1573,7 @@
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
 		ubi->lookuptbl[e->pnum] = e;
-		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
 			wl_entry_destroy(ubi, e);
 			goto out_free;
 		}
@@ -1624,7 +1652,7 @@
 			e->ec = aeb->ec;
 			ubi_assert(!ubi->lookuptbl[e->pnum]);
 			ubi->lookuptbl[e->pnum] = e;
-			if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+			if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
 				wl_entry_destroy(ubi, e);
 				goto out_free;
 			}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f31ca3..5fa36eb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -471,9 +471,9 @@
 		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
 		strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
 		mii = if_mii(&ifr);
-		if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
+		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
 			mii->reg_num = MII_BMSR;
-			if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
+			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
 				return mii->val_out & BMSR_LSTATUS;
 		}
 	}
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index cc9e6bd..76fb855 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ea0e5f..856379c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1046,7 +1046,7 @@
 
 	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
 		BGMAC_DS_MM_SHIFT;
-	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
+	if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0)
 		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
 	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
 		bgmac_cco_ctl_maskset(bgmac, 1, ~0,
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 63144bb..b32444a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -3117,6 +3117,7 @@
 		if (dev->phydev)
 			phy_disconnect(dev->phydev);
 		mdiobus_unregister(bp->mii_bus);
+		dev->phydev = NULL;
 		mdiobus_free(bp->mii_bus);
 
 		/* Shutdown the PHY if there is a GPIO reset */
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index bddb198..380a641 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -693,7 +693,7 @@
 				while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
 				       !(reg_val &
 					 CN23XX_PKT_INPUT_CTL_QUIET) &&
-				       loop--) {
+				       --loop) {
 					reg_val = octeon_read_csr64(
 					    oct,
 					    CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9cffe48..1fb5d72 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2728,6 +2728,26 @@
 	return 0;
 }
 
+#define NCSI_UPDATE_LOG	"NCSI section update is not supported in FW ver %s\n"
+static bool be_fw_ncsi_supported(char *ver)
+{
+	int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
+	int v2[4];
+	int i;
+
+	if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
+		return false;
+
+	for (i = 0; i < 4; i++) {
+		if (v1[i] < v2[i])
+			return true;
+		else if (v1[i] > v2[i])
+			return false;
+	}
+
+	return true;
+}
+
 /* For BE2, BE3 and BE3-R */
 static int be_flash_BEx(struct be_adapter *adapter,
 			const struct firmware *fw,
@@ -2805,8 +2825,10 @@
 			continue;
 
 		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
-		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+		    !be_fw_ncsi_supported(adapter->fw_ver)) {
+			dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
 			continue;
+		}
 
 		if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
 		    !phy_flashing_required(adapter))
@@ -3527,6 +3549,11 @@
 		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
 			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
 				(BIT_MASK(16) - 1);
+		/* For BEx, since GET_FUNC_CONFIG command is not
+		 * supported, we read funcnum here as a workaround.
+		 */
+		if (BEx_chip(adapter))
+			adapter->pf_num = attribs->hba_attribs.pci_funcnum;
 	}
 
 err:
@@ -4950,7 +4977,7 @@
 {
 	int status;
 
-	if (BEx_chip(adapter))
+	if (BE2_chip(adapter))
 		return -EOPNOTSUPP;
 
 	status = __be_cmd_set_logical_link_config(adapter, link_state,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1bd82bc..09da2d8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1720,7 +1720,11 @@
 	u32 rsvd2[55];
 	u8 rsvd3[3];
 	u8 phy_port;
-	u32 rsvd4[13];
+	u32 rsvd4[15];
+	u8 rsvd5[2];
+	u8 pci_funcnum;
+	u8 rsvd6;
+	u32 rsvd7[6];
 } __packed;
 
 struct mgmt_controller_attrib {
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 92942c8..36e4232 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005-2016 Broadcom.
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dcb930a..cece8a0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -724,14 +724,24 @@
 	netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
 }
 
+static int be_gso_hdr_len(struct sk_buff *skb)
+{
+	if (skb->encapsulation)
+		return skb_inner_transport_offset(skb) +
+		       inner_tcp_hdrlen(skb);
+	return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
 	struct be_tx_stats *stats = tx_stats(txo);
-	u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
+	u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
+	/* Account for headers which get duplicated in TSO pkt */
+	u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
 
 	u64_stats_update_begin(&stats->sync);
 	stats->tx_reqs++;
-	stats->tx_bytes += skb->len;
+	stats->tx_bytes += skb->len + dup_hdr_len;
 	stats->tx_pkts += tx_pkts;
 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
 		stats->tx_vxlan_offload_pkts += tx_pkts;
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
index 51fd2e6..6049177 100644
--- a/drivers/net/ethernet/freescale/fman/Makefile
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -1,7 +1,9 @@
 subdir-ccflags-y +=  -I$(srctree)/drivers/net/ethernet/freescale/fman
 
-obj-y		+= fsl_fman.o fsl_fman_mac.o fsl_mac.o
+obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
+obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
+obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
 
-fsl_fman-objs	:= fman_muram.o fman.o fman_sp.o fman_port.o
-fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o
-fsl_mac-objs += mac.o
+fsl_fman-objs	:= fman_muram.o fman.o fman_sp.o
+fsl_fman_port-objs := fman_port.o
+fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 1de2e1e..dafd9e1 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -618,7 +618,7 @@
 	unsigned long cam_offset;
 	size_t cam_size;
 	/* Fifo in MURAM */
-	int fifo_offset;
+	unsigned long fifo_offset;
 	size_t fifo_size;
 
 	u32 liodn_base[64];
@@ -2036,7 +2036,7 @@
 	/* allocate MURAM for FIFO according to total size */
 	fman->fifo_offset = fman_muram_alloc(fman->muram,
 					     fman->state->total_fifo_size);
-	if (IS_ERR_VALUE(fman->cam_offset)) {
+	if (IS_ERR_VALUE(fman->fifo_offset)) {
 		free_init_resources(fman);
 		dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
 			__func__);
@@ -2115,6 +2115,7 @@
 	fman->intr_mng[event].isr_cb = isr_cb;
 	fman->intr_mng[event].src_handle = src_arg;
 }
+EXPORT_SYMBOL(fman_register_intr);
 
 /**
  * fman_unregister_intr
@@ -2138,6 +2139,7 @@
 	fman->intr_mng[event].isr_cb = NULL;
 	fman->intr_mng[event].src_handle = NULL;
 }
+EXPORT_SYMBOL(fman_unregister_intr);
 
 /**
  * fman_set_port_params
@@ -2241,6 +2243,7 @@
 	spin_unlock_irqrestore(&fman->spinlock, flags);
 	return err;
 }
+EXPORT_SYMBOL(fman_set_port_params);
 
 /**
  * fman_reset_mac
@@ -2310,6 +2313,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(fman_reset_mac);
 
 /**
  * fman_set_mac_max_frame
@@ -2327,8 +2331,7 @@
 	 * or equal to the port's max
 	 */
 	if ((!fman->state->port_mfl[mac_id]) ||
-	    (fman->state->port_mfl[mac_id] &&
-	    (mfl <= fman->state->port_mfl[mac_id]))) {
+	    (mfl <= fman->state->port_mfl[mac_id])) {
 		fman->state->mac_mfl[mac_id] = mfl;
 	} else {
 		dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
@@ -2337,6 +2340,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(fman_set_mac_max_frame);
 
 /**
  * fman_get_clock_freq
@@ -2363,6 +2367,7 @@
 {
 	return fman->state->bmi_max_fifo_size;
 }
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
 
 /**
  * fman_get_revision
@@ -2384,6 +2389,7 @@
 				FPM_REV1_MAJOR_SHIFT);
 	rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
 }
+EXPORT_SYMBOL(fman_get_revision);
 
 /**
  * fman_get_qman_channel_id
@@ -2419,6 +2425,7 @@
 
 	return fman->state->qman_channel_base + i;
 }
+EXPORT_SYMBOL(fman_get_qman_channel_id);
 
 /**
  * fman_get_mem_region
@@ -2432,6 +2439,7 @@
 {
 	return fman->state->res;
 }
+EXPORT_SYMBOL(fman_get_mem_region);
 
 /* Bootargs defines */
 /* Extra headroom for RX buffers - Default, min and max */
@@ -2453,7 +2461,7 @@
  * particular forwarding scenarios that add extra headers to the
  * forwarded frame.
  */
-int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
 module_param(fsl_fm_rx_extra_headroom, int, 0);
 MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
 
@@ -2466,7 +2474,7 @@
  * Could be overridden once, at boot-time, via the
  * fm_set_max_frm() callback.
  */
-int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
 module_param(fsl_fm_max_frm, int, 0);
 MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
 
@@ -2538,6 +2546,7 @@
 {
 	return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
 }
+EXPORT_SYMBOL(fman_bind);
 
 static irqreturn_t fman_err_irq(int irq, void *handle)
 {
@@ -2727,8 +2736,8 @@
 	struct fman *fman;
 	struct device_node *fm_node, *muram_node;
 	struct resource *res;
-	const u32 *u32_prop;
-	int lenp, err, irq;
+	u32 val, range[2];
+	int err, irq;
 	struct clk *clk;
 	u32 clk_rate;
 	phys_addr_t phys_base_addr;
@@ -2740,16 +2749,13 @@
 
 	fm_node = of_node_get(of_dev->dev.of_node);
 
-	u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
-	if (!u32_prop) {
-		dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n",
+	err = of_property_read_u32(fm_node, "cell-index", &val);
+	if (err) {
+		dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
 			__func__, fm_node->full_name);
 		goto fman_node_put;
 	}
-	if (WARN_ON(lenp != sizeof(u32)))
-		goto fman_node_put;
-
-	fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]);
+	fman->dts_params.id = (u8)val;
 
 	/* Get the FM interrupt */
 	res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
@@ -2796,18 +2802,15 @@
 	/* Rounding to MHz */
 	fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
 
-	u32_prop = (const u32 *)of_get_property(fm_node,
-						"fsl,qman-channel-range",
-						&lenp);
-	if (!u32_prop) {
-		dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n",
+	err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+					 &range[0], 2);
+	if (err) {
+		dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
 			__func__, fm_node->full_name);
 		goto fman_node_put;
 	}
-	if (WARN_ON(lenp != sizeof(u32) * 2))
-		goto fman_node_put;
-	fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]);
-	fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]);
+	fman->dts_params.qman_channel_base = range[0];
+	fman->dts_params.num_of_qman_channels = range[1];
 
 	/* Get the MURAM base address and size */
 	muram_node = of_find_matching_node(fm_node, fman_muram_match);
@@ -2858,7 +2861,7 @@
 
 	fman->dts_params.base_addr =
 		devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
-	if (fman->dts_params.base_addr == 0) {
+	if (!fman->dts_params.base_addr) {
 		dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
 		goto fman_free;
 	}
@@ -2930,7 +2933,7 @@
 	{}
 };
 
-MODULE_DEVICE_TABLE(of, fm_match);
+MODULE_DEVICE_TABLE(of, fman_match);
 
 static struct platform_driver fman_driver = {
 	.driver = {
@@ -2940,4 +2943,25 @@
 	.probe = fman_probe,
 };
 
-builtin_platform_driver(fman_driver);
+static int __init fman_load(void)
+{
+	int err;
+
+	pr_debug("FSL DPAA FMan driver\n");
+
+	err = platform_driver_register(&fman_driver);
+	if (err < 0)
+		pr_err("Error, platform_driver_register() = %d\n", err);
+
+	return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+	platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index ddf0260..dd6d052 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -191,10 +191,6 @@
 	u16 max_speed;
 	/* A handle to the FM object this port related to */
 	void *fm;
-	/* MDIO exceptions interrupt source - not valid for all
-	 * MACs; MUST be set to 0 for MACs that don't have
-	 * mdio-irq, or for polling
-	 */
 	void *dev_id; /* device cookie used by the exception cbs */
 	fman_mac_exception_cb *event_cb;    /* MDIO Events Callback Routine */
 	fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 45e98fd..53ef51e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -507,6 +507,9 @@
 {
 	u16 tmp_reg16;
 
+	if (WARN_ON(!memac->pcsphy))
+		return;
+
 	/* SGMII mode */
 	tmp_reg16 = IF_MODE_SGMII_EN;
 	if (!fixed_link)
@@ -1151,7 +1154,8 @@
 	/* Save FMan revision */
 	fman_get_revision(memac->fm, &memac->fm_rev_info);
 
-	if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+	if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
+	    memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
 		if (!params->internal_phy_node) {
 			pr_err("PCS PHY node is not available\n");
 			memac_free(memac);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 47394c4..5ec94d2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -150,7 +150,8 @@
  *
  * Free an allocated memory from FM-MURAM partition.
  */
-void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+			 size_t size)
 {
 	unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
 
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 889649a..453bf84 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -46,6 +46,7 @@
 
 unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
 
-void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+			 size_t size);
 
 #endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 70c198d..9f3bb50 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1477,7 +1477,8 @@
  */
 int fman_port_disable(struct fman_port *port)
 {
-	u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+	u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
+	u32 tmp;
 	bool rx_port, failure = false;
 	int count;
 
@@ -1553,7 +1554,8 @@
  */
 int fman_port_enable(struct fman_port *port)
 {
-	u32 __iomem *bmi_cfg_reg, tmp;
+	u32 __iomem *bmi_cfg_reg;
+	u32 tmp;
 	bool rx_port;
 
 	if (!is_init_done(port->cfg))
@@ -1623,7 +1625,7 @@
 	struct device_node *fm_node, *port_node;
 	struct resource res;
 	struct resource *dev_res;
-	const u32 *u32_prop;
+	u32 val;
 	int err = 0, lenp;
 	enum fman_port_type port_type;
 	u16 port_speed;
@@ -1652,28 +1654,20 @@
 		goto return_err;
 	}
 
-	u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
-	if (!u32_prop) {
-		dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n",
+	err = of_property_read_u32(port_node, "cell-index", &val);
+	if (err) {
+		dev_err(port->dev, "%s: reading cell-index for %s failed\n",
 			__func__, port_node->full_name);
 		err = -EINVAL;
 		goto return_err;
 	}
-	if (WARN_ON(lenp != sizeof(u32))) {
-		err = -EINVAL;
-		goto return_err;
-	}
-	port_id = (u8)fdt32_to_cpu(u32_prop[0]);
-
+	port_id = (u8)val;
 	port->dts_params.id = port_id;
 
 	if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
 		port_type = FMAN_PORT_TYPE_TX;
 		port_speed = 1000;
-		u32_prop = (const u32 *)of_get_property(port_node,
-							"fsl,fman-10g-port",
-							&lenp);
-		if (u32_prop)
+		if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
 			port_speed = 10000;
 
 	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1686,9 +1680,7 @@
 	} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
 		port_type = FMAN_PORT_TYPE_RX;
 		port_speed = 1000;
-		u32_prop = (const u32 *)of_get_property(port_node,
-						  "fsl,fman-10g-port", &lenp);
-		if (u32_prop)
+		if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
 			port_speed = 10000;
 
 	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
@@ -1743,7 +1735,7 @@
 
 	port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
 						  resource_size(&res));
-	if (port->dts_params.base_addr == 0)
+	if (!port->dts_params.base_addr)
 		dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
 
 	dev_set_drvdata(&of_dev->dev, port);
@@ -1775,4 +1767,25 @@
 	.probe = fman_port_probe,
 };
 
-builtin_platform_driver(fman_port_driver);
+static int __init fman_port_load(void)
+{
+	int err;
+
+	pr_debug("FSL DPAA FMan driver\n");
+
+	err = platform_driver_register(&fman_port_driver);
+	if (err < 0)
+		pr_err("Error, platform_driver_register() = %d\n", err);
+
+	return err;
+}
+module_init(fman_port_load);
+
+static void __exit fman_port_unload(void)
+{
+	platform_driver_unregister(&fman_port_driver);
+}
+module_exit(fman_port_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index f9e7aa3..248f5bc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -80,6 +80,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
 
 int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
 				int_context_data_copy,
@@ -164,3 +165,5 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(fman_sp_build_buffer_struct);
+
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index e33d9d2..8fe6b3e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -469,9 +469,9 @@
 /* Initializes driver's PHY state, and attaches to the PHY.
  * Returns 0 on success.
  */
-static int init_phy(struct net_device *net_dev,
-		    struct mac_device *mac_dev,
-		    void (*adj_lnk)(struct net_device *))
+static struct phy_device *init_phy(struct net_device *net_dev,
+				   struct mac_device *mac_dev,
+				   void (*adj_lnk)(struct net_device *))
 {
 	struct phy_device	*phy_dev;
 	struct mac_priv_s	*priv = mac_dev->priv;
@@ -480,7 +480,7 @@
 				 priv->phy_if);
 	if (!phy_dev) {
 		netdev_err(net_dev, "Could not connect to PHY\n");
-		return -ENODEV;
+		return NULL;
 	}
 
 	/* Remove any features not supported by the controller */
@@ -493,23 +493,23 @@
 
 	mac_dev->phy_dev = phy_dev;
 
-	return 0;
+	return phy_dev;
 }
 
-static int dtsec_init_phy(struct net_device *net_dev,
-			  struct mac_device *mac_dev)
+static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
+					 struct mac_device *mac_dev)
 {
 	return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
 }
 
-static int tgec_init_phy(struct net_device *net_dev,
-			 struct mac_device *mac_dev)
+static struct phy_device *tgec_init_phy(struct net_device *net_dev,
+					struct mac_device *mac_dev)
 {
 	return init_phy(net_dev, mac_dev, adjust_link_void);
 }
 
-static int memac_init_phy(struct net_device *net_dev,
-			  struct mac_device *mac_dev)
+static struct phy_device *memac_init_phy(struct net_device *net_dev,
+					 struct mac_device *mac_dev)
 {
 	return init_phy(net_dev, mac_dev, &adjust_link_memac);
 }
@@ -583,31 +583,6 @@
 
 static DEFINE_MUTEX(eth_lock);
 
-static const char phy_str[][11] = {
-	[PHY_INTERFACE_MODE_MII]		= "mii",
-	[PHY_INTERFACE_MODE_GMII]		= "gmii",
-	[PHY_INTERFACE_MODE_SGMII]		= "sgmii",
-	[PHY_INTERFACE_MODE_TBI]		= "tbi",
-	[PHY_INTERFACE_MODE_RMII]		= "rmii",
-	[PHY_INTERFACE_MODE_RGMII]		= "rgmii",
-	[PHY_INTERFACE_MODE_RGMII_ID]		= "rgmii-id",
-	[PHY_INTERFACE_MODE_RGMII_RXID]	= "rgmii-rxid",
-	[PHY_INTERFACE_MODE_RGMII_TXID]	= "rgmii-txid",
-	[PHY_INTERFACE_MODE_RTBI]		= "rtbi",
-	[PHY_INTERFACE_MODE_XGMII]		= "xgmii"
-};
-
-static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(phy_str); i++)
-		if (strcmp(str, phy_str[i]) == 0)
-			return (phy_interface_t)i;
-
-	return PHY_INTERFACE_MODE_MII;
-}
-
 static const u16 phy2speed[] = {
 	[PHY_INTERFACE_MODE_MII]		= SPEED_100,
 	[PHY_INTERFACE_MODE_GMII]		= SPEED_1000,
@@ -678,7 +653,7 @@
 
 static int mac_probe(struct platform_device *_of_dev)
 {
-	int			 err, i, lenp, nph;
+	int			 err, i, nph;
 	struct device		*dev;
 	struct device_node	*mac_node, *dev_node;
 	struct mac_device	*mac_dev;
@@ -686,9 +661,9 @@
 	struct resource		 res;
 	struct mac_priv_s	*priv;
 	const u8		*mac_addr;
-	const char		*char_prop;
-	const u32		*u32_prop;
+	u32			 val;
 	u8			fman_id;
+	int			phy_if;
 
 	dev = &_of_dev->dev;
 	mac_node = dev->of_node;
@@ -749,16 +724,15 @@
 	}
 
 	/* Get the FMan cell-index */
-	u32_prop = of_get_property(dev_node, "cell-index", &lenp);
-	if (!u32_prop) {
-		dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+	err = of_property_read_u32(dev_node, "cell-index", &val);
+	if (err) {
+		dev_err(dev, "failed to read cell-index for %s\n",
 			dev_node->full_name);
 		err = -EINVAL;
 		goto _return_of_node_put;
 	}
-	WARN_ON(lenp != sizeof(u32));
 	/* cell-index 0 => FMan id 1 */
-	fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1);
+	fman_id = (u8)(val + 1);
 
 	priv->fman = fman_bind(&of_dev->dev);
 	if (!priv->fman) {
@@ -805,15 +779,14 @@
 	}
 
 	/* Get the cell-index */
-	u32_prop = of_get_property(mac_node, "cell-index", &lenp);
-	if (!u32_prop) {
-		dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+	err = of_property_read_u32(mac_node, "cell-index", &val);
+	if (err) {
+		dev_err(dev, "failed to read cell-index for %s\n",
 			mac_node->full_name);
 		err = -EINVAL;
 		goto _return_dev_set_drvdata;
 	}
-	WARN_ON(lenp != sizeof(u32));
-	priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]);
+	priv->cell_index = (u8)val;
 
 	/* Get the MAC address */
 	mac_addr = of_get_mac_address(mac_node);
@@ -870,16 +843,14 @@
 	}
 
 	/* Get the PHY connection type */
-	char_prop = (const char *)of_get_property(mac_node,
-						  "phy-connection-type", NULL);
-	if (!char_prop) {
+	phy_if = of_get_phy_mode(mac_node);
+	if (phy_if < 0) {
 		dev_warn(dev,
-			 "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+			 "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
 			 mac_node->full_name);
-		priv->phy_if = PHY_INTERFACE_MODE_MII;
-	} else {
-		priv->phy_if = str2phy(char_prop);
+		phy_if = PHY_INTERFACE_MODE_SGMII;
 	}
+	priv->phy_if = phy_if;
 
 	priv->speed		= phy2speed[priv->phy_if];
 	priv->max_speed		= priv->speed;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 0211cc9..d7313f0 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -58,7 +58,8 @@
 	bool tx_pause_active;
 	bool promisc;
 
-	int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+	struct phy_device *(*init_phy)(struct net_device *net_dev,
+				       struct mac_device *mac_dev);
 	int (*init)(struct mac_device *mac_dev);
 	int (*start)(struct mac_device *mac_dev);
 	int (*stop)(struct mac_device *mac_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e28d960..2d0cb60 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -207,6 +207,7 @@
 	int ret;
 	char *mac_addr = (char *)addr;
 	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+	u8 port_num;
 
 	assert(mac_cb);
 
@@ -221,8 +222,11 @@
 		return ret;
 	}
 
-	ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
-				mac_addr, true);
+	ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+	if (ret)
+		return ret;
+
+	ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
 	if (ret)
 		dev_err(handle->owner_dev,
 			"mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
@@ -678,9 +682,6 @@
 		ret = -EINVAL;
 	}
 
-	if (!ret)
-		hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
-
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a68eef0..ec8c738 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -126,7 +126,7 @@
 			(enum mac_speed)speed, duplex);
 		if (ret) {
 			dev_err(mac_cb->dev,
-				"adjust_link failed,%s mac%d ret = %#x!\n",
+				"adjust_link failed, %s mac%d ret = %#x!\n",
 				mac_cb->dsaf_dev->ae_dev.name,
 				mac_cb->mac_id, ret);
 			return;
@@ -141,15 +141,16 @@
  *@port_num:port number
  *
  */
-static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
-				      u8 vmid, u8 *port_num)
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
 {
+	int q_num_per_vf, vf_num_per_port;
+	int vm_queue_id;
 	u8 tmp_port;
 
 	if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
 		if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
 			dev_err(mac_cb->dev,
-				"input invalid,%s mac%d vmid%d !\n",
+				"input invalid, %s mac%d vmid%d !\n",
 				mac_cb->dsaf_dev->ae_dev.name,
 				mac_cb->mac_id, vmid);
 			return -EINVAL;
@@ -157,23 +158,29 @@
 	} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
 		if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
 			dev_err(mac_cb->dev,
-				"input invalid,%s mac%d vmid%d!\n",
+				"input invalid, %s mac%d vmid%d!\n",
 				mac_cb->dsaf_dev->ae_dev.name,
 				mac_cb->mac_id, vmid);
 			return -EINVAL;
 		}
 	} else {
-		dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+		dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
 			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
 		return -EINVAL;
 	}
 
 	if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
-		dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+		dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n",
 			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
 		return -EINVAL;
 	}
 
+	q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+	vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+	vm_queue_id = vmid * q_num_per_vf +
+			vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
 	switch (mac_cb->dsaf_dev->dsaf_mode) {
 	case DSAF_MODE_ENABLE_FIX:
 		tmp_port = 0;
@@ -193,10 +200,10 @@
 	case DSAF_MODE_DISABLE_6PORT_2VM:
 	case DSAF_MODE_DISABLE_6PORT_4VM:
 	case DSAF_MODE_DISABLE_6PORT_16VM:
-		tmp_port = vmid;
+		tmp_port = vm_queue_id;
 		break;
 	default:
-		dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+		dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
 			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
 		return -EINVAL;
 	}
@@ -275,7 +282,7 @@
 			ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
 		if (ret) {
 			dev_err(dsaf_dev->dev,
-				"set mac mc port failed,%s mac%d ret = %#x!\n",
+				"set mac mc port failed, %s mac%d ret = %#x!\n",
 				mac_cb->dsaf_dev->ae_dev.name,
 				mac_cb->mac_id, ret);
 			return ret;
@@ -305,7 +312,7 @@
 		old_mac = &mac_cb->addr_entry_idx[vfn];
 	} else {
 		dev_err(mac_cb->dev,
-			"vf queue is too large,%s mac%d queue = %#x!\n",
+			"vf queue is too large, %s mac%d queue = %#x!\n",
 			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
 		return -EINVAL;
 	}
@@ -547,7 +554,7 @@
 	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
 
 	if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
-		dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+		dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n");
 		return -ENOTSUPP;
 	}
 
@@ -571,7 +578,7 @@
 
 	if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
 		if (is_ver1 && (tx_en || rx_en)) {
-			dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
+			dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n");
 			return -EINVAL;
 		}
 	}
@@ -926,7 +933,7 @@
 	ret = hns_mac_get_mode(mac_cb->phy_if);
 	if (ret < 0) {
 		dev_err(dsaf_dev->dev,
-			"hns_mac_get_mode failed,mac%d ret = %#x!\n",
+			"hns_mac_get_mode failed, mac%d ret = %#x!\n",
 			mac_cb->mac_id, ret);
 		return ret;
 	}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 4cbdf14..d3a1f72 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -461,5 +461,7 @@
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
 			enum hnae_led_state status);
 void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+			       u8 vmid, u8 *port_num);
 
 #endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8e5b3f5..8d70377 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -760,16 +760,6 @@
 				 DSAF_CFG_MIX_MODE_S, !!en);
 }
 
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
-{
-	if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
-	    dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
-		return;
-
-	dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
-			 DSAFV2_SERDES_LBK_EN_B, !!en);
-}
-
 /**
  * hns_dsaf_tbl_stat_en - tbl
  * @dsaf_id: dsa fabric id
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 35df187..c494fc5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -466,6 +466,5 @@
 				  u32 *en);
 int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
 				 u32 en);
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index ef11077..f0ed80d6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -543,6 +543,22 @@
 			"error: coalesce_usecs setting supports 0~1023us\n");
 		return -EINVAL;
 	}
+
+	if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+		if (timeout == 0)
+			/* set timeout to 0, Disable gap time */
+			dsaf_set_reg_field(rcb_common->io_base,
+					   RCB_INT_GAP_TIME_REG + port_idx * 4,
+					   PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+					   0);
+		else
+			/* set timeout non 0, restore gap time to 1 */
+			dsaf_set_reg_field(rcb_common->io_base,
+					   RCB_INT_GAP_TIME_REG + port_idx * 4,
+					   PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+					   1);
+	}
+
 	hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
 	return 0;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 4b8b803..878950a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -417,6 +417,7 @@
 #define RCB_CFG_OVERTIME_REG			0x9300
 #define RCB_CFG_PKTLINE_INT_NUM_REG		0x9304
 #define RCB_CFG_OVERTIME_INT_NUM_REG		0x9308
+#define RCB_INT_GAP_TIME_REG			0x9400
 #define RCB_PORT_CFG_OVERTIME_REG		0x9430
 
 #define RCB_RING_RX_RING_BASEADDR_L_REG		0x00000
@@ -898,6 +899,9 @@
 #define PPE_CNT_CLR_CE_B	0
 #define PPE_CNT_CLR_SNAP_EN_B	1
 
+#define PPE_INT_GAPTIME_B	0
+#define PPE_INT_GAPTIME_M	0x3ff
+
 #define PPE_COMMON_CNT_CLR_CE_B	0
 #define PPE_COMMON_CNT_CLR_SNAP_EN_B	1
 #define RCB_COM_TSO_MODE_B	0
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 059aaed..dff7b60 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -574,7 +574,6 @@
 	struct sk_buff *skb;
 	struct hnae_desc *desc;
 	struct hnae_desc_cb *desc_cb;
-	struct ethhdr *eh;
 	unsigned char *va;
 	int bnum, length, i;
 	int pull_len;
@@ -600,7 +599,6 @@
 		ring->stats.sw_err_cnt++;
 		return -ENOMEM;
 	}
-	skb_reset_mac_header(skb);
 
 	prefetchw(skb->data);
 	length = le16_to_cpu(desc->rx.pkt_len);
@@ -682,14 +680,6 @@
 		return -EFAULT;
 	}
 
-	/* filter out multicast pkt with the same src mac as this port */
-	eh = eth_hdr(skb);
-	if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
-		     ether_addr_equal(ndev->dev_addr, eh->h_source))) {
-		dev_kfree_skb_any(skb);
-		return -EFAULT;
-	}
-
 	ring->stats.rx_pkts++;
 	ring->stats.rx_bytes += skb->len;
 
@@ -747,25 +737,37 @@
 	ndev->last_rx = jiffies;
 }
 
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+	int ntc = ring->next_to_clean;
+	int ntu = ring->next_to_use;
+
+	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
 			       int budget, void *v)
 {
 	struct hnae_ring *ring = ring_data->ring;
 	struct sk_buff *skb;
-	int num, bnum, ex_num;
+	int num, bnum;
 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
 	int recv_pkts, recv_bds, clean_count, err;
+	int unused_count = hns_desc_unused(ring);
 
 	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 	rmb(); /* make sure num taken effect before the other data is touched */
 
 	recv_pkts = 0, recv_bds = 0, clean_count = 0;
-recv:
+	num -= unused_count;
+
 	while (recv_pkts < budget && recv_bds < num) {
 		/* reuse or realloc buffers */
-		if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
-			hns_nic_alloc_rx_buffers(ring_data, clean_count);
+		if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+			hns_nic_alloc_rx_buffers(ring_data,
+						 clean_count + unused_count);
 			clean_count = 0;
+			unused_count = hns_desc_unused(ring);
 		}
 
 		/* poll one pkt */
@@ -786,21 +788,11 @@
 		recv_pkts++;
 	}
 
-	/* make all data has been write before submit */
-	if (recv_pkts < budget) {
-		ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
-
-		if (ex_num > clean_count) {
-			num += ex_num - clean_count;
-			rmb(); /*complete read rx ring bd number*/
-			goto recv;
-		}
-	}
-
 out:
 	/* make all data has been write before submit */
-	if (clean_count > 0)
-		hns_nic_alloc_rx_buffers(ring_data, clean_count);
+	if (clean_count + unused_count > 0)
+		hns_nic_alloc_rx_buffers(ring_data,
+					 clean_count + unused_count);
 
 	return recv_pkts;
 }
@@ -810,6 +802,8 @@
 	struct hnae_ring *ring = ring_data->ring;
 	int num = 0;
 
+	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
 	/* for hardware bug fixed */
 	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 
@@ -821,6 +815,20 @@
 	}
 }
 
+static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	int num = 0;
+
+	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+	if (num == 0)
+		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+			ring, 0);
+	else
+		napi_schedule(&ring_data->napi);
+}
+
 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
 					    int *bytes, int *pkts)
 {
@@ -922,7 +930,11 @@
 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
 {
 	struct hnae_ring *ring = ring_data->ring;
-	int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+	int head;
+
+	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 
 	if (head != ring->next_to_clean) {
 		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -932,6 +944,18 @@
 	}
 }
 
+static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+	struct hnae_ring *ring = ring_data->ring;
+	int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+	if (head == ring->next_to_clean)
+		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+			ring, 0);
+	else
+		napi_schedule(&ring_data->napi);
+}
+
 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
 {
 	struct hnae_ring *ring = ring_data->ring;
@@ -963,10 +987,7 @@
 
 	if (clean_complete >= 0 && clean_complete < budget) {
 		napi_complete(napi);
-		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
-			ring_data->ring, 0);
-		if (ring_data->fini_process)
-			ring_data->fini_process(ring_data);
+		ring_data->fini_process(ring_data);
 		return 0;
 	}
 
@@ -1562,6 +1583,21 @@
 	return stats;
 }
 
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+		     void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+
+	/* fix hardware broadcast/multicast packets queue loopback */
+	if (!AE_IS_VER1(priv->enet_ver) &&
+	    is_multicast_ether_addr(eth_hdr->h_dest))
+		return 0;
+	else
+		return fallback(ndev, skb);
+}
+
 static const struct net_device_ops hns_nic_netdev_ops = {
 	.ndo_open = hns_nic_net_open,
 	.ndo_stop = hns_nic_net_stop,
@@ -1577,6 +1613,7 @@
 	.ndo_poll_controller = hns_nic_poll_controller,
 #endif
 	.ndo_set_rx_mode = hns_nic_set_rx_mode,
+	.ndo_select_queue = hns_nic_select_queue,
 };
 
 static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1738,7 +1775,8 @@
 		rd->queue_index = i;
 		rd->ring = &h->qs[i]->tx_ring;
 		rd->poll_one = hns_nic_tx_poll_one;
-		rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
+		rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+			hns_nic_tx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
 			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1750,7 +1788,8 @@
 		rd->ring = &h->qs[i - h->q_num]->rx_ring;
 		rd->poll_one = hns_nic_rx_poll_one;
 		rd->ex_process = hns_nic_rx_up_pro;
-		rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
+		rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+			hns_nic_rx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
 			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 47e59bb..87d5c94 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -352,6 +352,13 @@
 		break;
 	}
 
+	if (!ret) {
+		if (loop == MAC_LOOP_NONE)
+			h->dev->ops->set_promisc_mode(
+				h, ndev->flags & IFF_PROMISC);
+		else
+			h->dev->ops->set_promisc_mode(h, 1);
+	}
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ad4ab97..4a62ffd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2323,6 +2323,41 @@
 	return err;
 }
 
+static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
+{
+	u32 val[2], id[4];
+
+	regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
+	regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
+
+	id[3] = ((val[0] >> 16) & 0xff) - '0';
+	id[2] = ((val[0] >> 24) & 0xff) - '0';
+	id[1] = (val[1] & 0xff) - '0';
+	id[0] = ((val[1] >> 8) & 0xff) - '0';
+
+	*chip_id = (id[3] * 1000) + (id[2] * 100) +
+		   (id[1] * 10) + id[0];
+
+	if (!(*chip_id)) {
+		dev_err(eth->dev, "failed to get chip id\n");
+		return -ENODEV;
+	}
+
+	dev_info(eth->dev, "chip id = %d\n", *chip_id);
+
+	return 0;
+}
+
+static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
+{
+	switch (eth->chip_id) {
+	case MT7623_ETH:
+		return true;
+	}
+
+	return false;
+}
+
 static int mtk_probe(struct platform_device *pdev)
 {
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2362,8 +2397,6 @@
 		return PTR_ERR(eth->pctl);
 	}
 
-	eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
-
 	for (i = 0; i < 3; i++) {
 		eth->irq[i] = platform_get_irq(pdev, i);
 		if (eth->irq[i] < 0) {
@@ -2388,6 +2421,12 @@
 	if (err)
 		return err;
 
+	err = mtk_get_chip_id(eth, &eth->chip_id);
+	if (err)
+		return err;
+
+	eth->hwlro = mtk_is_hwlro_supported(eth);
+
 	for_each_child_of_node(pdev->dev.of_node, mac_np) {
 		if (!of_device_is_compatible(mac_np,
 					     "mediatek,eth-mac"))
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3003195..99b1c8e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -342,6 +342,11 @@
 #define GPIO_BIAS_CTRL		0xed0
 #define GPIO_DRV_SEL10		0xf00
 
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3	0x0
+#define ETHSYS_CHIPID4_7	0x4
+#define MT7623_ETH		7623
+
 /* ethernet subsystem config register */
 #define ETHSYS_SYSCFG0		0x14
 #define SYSCFG0_GE_MASK		0x3
@@ -534,6 +539,7 @@
 	unsigned long			sysclk;
 	struct regmap			*ethsys;
 	struct regmap			*pctl;
+	u32				chip_id;
 	bool				hwlro;
 	atomic_t			dma_refcnt;
 	struct mtk_tx_ring		tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d458515..cc4fd61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -287,7 +287,7 @@
 
 			goto retry;
 		}
-		MLX5_SET64(manage_pages_in, in, pas[i], addr);
+		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
 	}
 
 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -344,7 +344,7 @@
 		if (fwp->func_id != func_id)
 			continue;
 
-		MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
+		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
 		i++;
 	}
 
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 42e3407..b14f030 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -821,7 +821,7 @@
 	}
 
 	if (oldfilter != priv->rxfilter)
-		queue_kthread_work(&priv->kworker, &priv->setrx_work);
+		kthread_queue_work(&priv->kworker, &priv->setrx_work);
 }
 
 static void encx24j600_hw_tx(struct encx24j600_priv *priv)
@@ -879,7 +879,7 @@
 	/* Remember the skb for deferred processing */
 	priv->tx_skb = skb;
 
-	queue_kthread_work(&priv->kworker, &priv->tx_work);
+	kthread_queue_work(&priv->kworker, &priv->tx_work);
 
 	return NETDEV_TX_OK;
 }
@@ -1037,9 +1037,9 @@
 		goto out_free;
 	}
 
-	init_kthread_worker(&priv->kworker);
-	init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
-	init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+	kthread_init_worker(&priv->kworker);
+	kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+	kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
 
 	priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
 					 "encx24j600");
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0df1391f9..1e8339a 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,15 +107,4 @@
 	---help---
 	  This enables the support for ...
 
-config INFINIBAND_QEDR
-	tristate "QLogic qede RoCE sources [debug]"
-	depends on QEDE && 64BIT
-	select QED_LL2
-	default n
-	---help---
-	  This provides a temporary node that allows the compilation
-	  and logical testing of the InfiniBand over Ethernet support
-	  for QLogic QED. This would be replaced by the 'real' option
-	  once the QEDR driver is added [+relocated].
-
 endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index a6db107..02a8be2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1517,7 +1517,7 @@
 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 {
 	struct qed_ll2_info ll2_info;
-	struct qed_ll2_buffer *buffer;
+	struct qed_ll2_buffer *buffer, *tmp_buffer;
 	enum qed_ll2_conn_type conn_type;
 	struct qed_ptt *p_ptt;
 	int rc, i;
@@ -1587,7 +1587,7 @@
 
 	/* Post all Rx buffers to FW */
 	spin_lock_bh(&cdev->ll2->lock);
-	list_for_each_entry(buffer, &cdev->ll2->list, list) {
+	list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
 		rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
 					    cdev->ll2->handle,
 					    buffer->phys_addr, 0, buffer, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2343005..76831a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2947,7 +2947,7 @@
 	.roce_ll2_stats = &qed_roce_ll2_stats,
 };
 
-const struct qed_rdma_ops *qed_get_rdma_ops()
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
 {
 	return &qed_rdma_ops_pass;
 }
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9ba568d..d7720bf 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -26,6 +26,7 @@
 
 config QCOM_EMAC
 	tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
+	depends on HAS_DMA && HAS_IOMEM
 	select CRC32
 	select PHYLIB
 	---help---
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c8c60a..6c85b61 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -650,20 +650,27 @@
 	if (IS_ERR(priv->clk_ptp_ref)) {
 		priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
 		priv->clk_ptp_ref = NULL;
+		netdev_dbg(priv->dev, "PTP uses main clock\n");
 	} else {
 		clk_prepare_enable(priv->clk_ptp_ref);
 		priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+		netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
 	}
 
 	priv->adv_ts = 0;
-	if (priv->dma_cap.atime_stamp && priv->extend_desc)
+	/* Check if adv_ts can be enabled for dwmac 4.x core */
+	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+		priv->adv_ts = 1;
+	/* Dwmac 3.x core with extend_desc can support adv_ts */
+	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 		priv->adv_ts = 1;
 
-	if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
-		pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+	if (priv->dma_cap.time_stamp)
+		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 
-	if (netif_msg_hw(priv) && priv->adv_ts)
-		pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
+	if (priv->adv_ts)
+		netdev_info(priv->dev,
+			    "IEEE 1588-2008 Advanced Timestamp supported\n");
 
 	priv->hw->ptp = &stmmac_ptp;
 	priv->hwts_tx_en = 0;
@@ -1702,8 +1709,8 @@
 
 	if (init_ptp) {
 		ret = stmmac_init_ptp(priv);
-		if (ret && ret != -EOPNOTSUPP)
-			pr_warn("%s: failed PTP initialisation\n", __func__);
+		if (ret)
+			netdev_warn(priv->dev, "PTP support cannot init.\n");
 	}
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 6e3b829..289d527 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -186,10 +186,12 @@
 					     priv->device);
 	if (IS_ERR(priv->ptp_clock)) {
 		priv->ptp_clock = NULL;
-		pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
-	} else if (priv->ptp_clock)
-		pr_debug("Added PTP HW clock successfully on %s\n",
-			 priv->dev->name);
+		return PTR_ERR(priv->ptp_clock);
+	}
+
+	spin_lock_init(&priv->ptp_lock);
+
+	netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index e15bf84..0ac449a 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -11,7 +11,6 @@
 #include <linux/highmem.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index c3e85ac..054a8dd 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -30,6 +30,8 @@
 
 #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN	BIT(7)
 #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN	BIT(6)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE	BIT(5)
+#define AM33XX_GMII_SEL_RGMII1_IDMODE	BIT(4)
 
 #define GMII_SEL_MODE_MASK		0x3
 
@@ -48,6 +50,7 @@
 	u32 reg;
 	u32 mask;
 	u32 mode = 0;
+	bool rgmii_id = false;
 
 	reg = readl(priv->gmii_sel);
 
@@ -57,10 +60,14 @@
 		break;
 
 	case PHY_INTERFACE_MODE_RGMII:
+		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		break;
+
 	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII_RXID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
 		mode = AM33XX_GMII_SEL_MODE_RGMII;
+		rgmii_id = true;
 		break;
 
 	default:
@@ -83,6 +90,13 @@
 			mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
 	}
 
+	if (rgmii_id) {
+		if (slave == 0)
+			mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+		else
+			mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+	}
+
 	reg &= ~mask;
 	reg |= mode;
 
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index ece0ea0..6c7ec1d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -610,8 +610,8 @@
 #ifdef CONFIG_PCI
 	if (pdev)
 		pci_release_regions(pdev);
-#endif
 err_out:
+#endif
 	if (pdev)
 		pci_disable_device(pdev);
 	return rc;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index bc258d7..272f2b1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1769,7 +1769,7 @@
 	gelic_ether_setup_netdev_ops(netdev, &card->napi);
 	result = gelic_net_setup_netdev(netdev, card);
 	if (result) {
-		dev_dbg(&dev->core, "%s: setup_netdev failed %d",
+		dev_dbg(&dev->core, "%s: setup_netdev failed %d\n",
 			__func__, result);
 		goto fail_setup_netdev;
 	}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 37ab46c..d2349a1 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -9,7 +9,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 0b37ce9..ca31a57 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -10,7 +10,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 69e2a83..c688d68 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -431,8 +431,7 @@
 	lp->options |= options;
 }
 
-static void __axienet_device_reset(struct axienet_local *lp,
-				   struct device *dev, off_t offset)
+static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
 {
 	u32 timeout;
 	/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
@@ -468,8 +467,8 @@
 	u32 axienet_status;
 	struct axienet_local *lp = netdev_priv(ndev);
 
-	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
-	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+	__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+	__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
 
 	lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 	lp->options |= XAE_OPTION_VLAN;
@@ -818,7 +817,7 @@
 		goto out;
 	}
 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
-		dev_err(&ndev->dev, "No interrupts asserted in Tx path");
+		dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
 	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 		dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -867,7 +866,7 @@
 		goto out;
 	}
 	if (!(status & XAXIDMA_IRQ_ALL_MASK))
-		dev_err(&ndev->dev, "No interrupts asserted in Rx path");
+		dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
 	if (status & XAXIDMA_IRQ_ERROR_MASK) {
 		dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -1338,8 +1337,8 @@
 	axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
 		    ~XAE_MDIO_MC_MDIOEN_MASK));
 
-	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
-	__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+	__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+	__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
 
 	axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
 	axienet_mdio_wait_until_ready(lp);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 52eeb2f..f0919bd 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -442,8 +442,6 @@
 	}
 
 	net_trans_info = get_net_transport_info(skb, &hdr_offset);
-	if (net_trans_info == TRANSPORT_INFO_NOT_IP)
-		goto do_send;
 
 	/*
 	 * Setup the sendside checksum offload only if this is not a
@@ -478,56 +476,29 @@
 		}
 		lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
 		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
-		goto do_send;
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		if (net_trans_info & INFO_TCP) {
+			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+					    TCPIP_CHKSUM_PKTINFO);
+
+			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
+									 ppi->ppi_offset);
+
+			if (net_trans_info & (INFO_IPV4 << 16))
+				csum_info->transmit.is_ipv4 = 1;
+			else
+				csum_info->transmit.is_ipv6 = 1;
+
+			csum_info->transmit.tcp_checksum = 1;
+			csum_info->transmit.tcp_header_offset = hdr_offset;
+		} else {
+			/* UDP checksum (and other) offload is not supported. */
+			if (skb_checksum_help(skb))
+				goto drop;
+		}
 	}
 
-	if ((skb->ip_summed == CHECKSUM_NONE) ||
-	    (skb->ip_summed == CHECKSUM_UNNECESSARY))
-		goto do_send;
-
-	rndis_msg_size += NDIS_CSUM_PPI_SIZE;
-	ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
-			    TCPIP_CHKSUM_PKTINFO);
-
-	csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
-			ppi->ppi_offset);
-
-	if (net_trans_info & (INFO_IPV4 << 16))
-		csum_info->transmit.is_ipv4 = 1;
-	else
-		csum_info->transmit.is_ipv6 = 1;
-
-	if (net_trans_info & INFO_TCP) {
-		csum_info->transmit.tcp_checksum = 1;
-		csum_info->transmit.tcp_header_offset = hdr_offset;
-	} else if (net_trans_info & INFO_UDP) {
-		/* UDP checksum offload is not supported on ws2008r2.
-		 * Furthermore, on ws2012 and ws2012r2, there are some
-		 * issues with udp checksum offload from Linux guests.
-		 * (these are host issues).
-		 * For now compute the checksum here.
-		 */
-		struct udphdr *uh;
-		u16 udp_len;
-
-		ret = skb_cow_head(skb, 0);
-		if (ret)
-			goto no_memory;
-
-		uh = udp_hdr(skb);
-		udp_len = ntohs(uh->len);
-		uh->check = 0;
-		uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
-					      ip_hdr(skb)->daddr,
-					      udp_len, IPPROTO_UDP,
-					      csum_partial(uh, udp_len, 0));
-		if (uh->check == 0)
-			uh->check = CSUM_MANGLED_0;
-
-		csum_info->transmit.udp_checksum = 0;
-	}
-
-do_send:
 	/* Start filling in the page buffers with the rndis hdr */
 	rndis_msg->msg_len += rndis_msg_size;
 	packet->total_data_buflen = rndis_msg->msg_len;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 5078a0d..2651c8d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -142,6 +142,7 @@
 
 config MDIO_XGENE
 	tristate "APM X-Gene SoC MDIO bus controller"
+	depends on ARCH_XGENE || COMPILE_TEST
 	help
 	  This module provides a driver for the MDIO busses found in the
 	  APM X-Gene SoC's.
@@ -320,13 +321,6 @@
          the Reduced Gigabit Media Independent Interface(RGMII) between
          Ethernet physical media devices and the Gigabit Ethernet controller.
 
-config MDIO_XGENE
-	tristate "APM X-Gene SoC MDIO bus controller"
-	depends on ARCH_XGENE || COMPILE_TEST
-	help
-	  This module provides a driver for the MDIO busses found in the
-	  APM X-Gene SoC's.
-
 endif # PHYLIB
 
 config MICREL_KS8995MA
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a17573e..77a6671 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -13,6 +13,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 #include <dt-bindings/net/mscc-phy-vsc8531.h>
+#include <linux/netdevice.h>
 
 enum rgmii_rx_clock_delay {
 	RGMII_RX_CLK_DELAY_0_2_NS = 0,
@@ -37,6 +38,7 @@
 
 #define MII_VSC85XX_INT_MASK		  25
 #define MII_VSC85XX_INT_MASK_MASK	  0xa000
+#define MII_VSC85XX_INT_MASK_WOL	  0x0040
 #define MII_VSC85XX_INT_STATUS		  26
 
 #define MSCC_PHY_WOL_MAC_CONTROL          27
@@ -52,6 +54,17 @@
 #define RGMII_RX_CLK_DELAY_MASK		  0x0070
 #define RGMII_RX_CLK_DELAY_POS		  4
 
+#define MSCC_PHY_WOL_LOWER_MAC_ADDR	  21
+#define MSCC_PHY_WOL_MID_MAC_ADDR	  22
+#define MSCC_PHY_WOL_UPPER_MAC_ADDR	  23
+#define MSCC_PHY_WOL_LOWER_PASSWD	  24
+#define MSCC_PHY_WOL_MID_PASSWD		  25
+#define MSCC_PHY_WOL_UPPER_PASSWD	  26
+
+#define MSCC_PHY_WOL_MAC_CONTROL	  27
+#define SECURE_ON_ENABLE		  0x8000
+#define SECURE_ON_PASSWD_LEN_4		  0x4000
+
 /* Microsemi PHY ID's */
 #define PHY_ID_VSC8531			  0x00070570
 #define PHY_ID_VSC8541			  0x00070770
@@ -81,6 +94,117 @@
 	return rc;
 }
 
+static int vsc85xx_wol_set(struct phy_device *phydev,
+			   struct ethtool_wolinfo *wol)
+{
+	int rc;
+	u16 reg_val;
+	u8  i;
+	u16 pwd[3] = {0, 0, 0};
+	struct ethtool_wolinfo *wol_conf = wol;
+	u8 *mac_addr = phydev->attached_dev->dev_addr;
+
+	mutex_lock(&phydev->lock);
+	rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+	if (rc != 0)
+		goto out_unlock;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		/* Store the device address for the magic packet */
+		for (i = 0; i < ARRAY_SIZE(pwd); i++)
+			pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
+				 mac_addr[5 - i * 2];
+		phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
+		phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
+		phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
+	} else {
+		phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
+		phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
+		phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
+	}
+
+	if (wol_conf->wolopts & WAKE_MAGICSECURE) {
+		for (i = 0; i < ARRAY_SIZE(pwd); i++)
+			pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
+				 wol_conf->sopass[5 - i * 2];
+		phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
+		phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
+		phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
+	} else {
+		phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
+		phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
+		phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
+	}
+
+	reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+	if (wol_conf->wolopts & WAKE_MAGICSECURE)
+		reg_val |= SECURE_ON_ENABLE;
+	else
+		reg_val &= ~SECURE_ON_ENABLE;
+	phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+
+	rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+	if (rc != 0)
+		goto out_unlock;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		/* Enable the WOL interrupt */
+		reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
+		reg_val |= MII_VSC85XX_INT_MASK_WOL;
+		rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
+		if (rc != 0)
+			goto out_unlock;
+	} else {
+		/* Disable the WOL interrupt */
+		reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
+		reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
+		rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
+		if (rc != 0)
+			goto out_unlock;
+	}
+	/* Clear WOL iterrupt status */
+	reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+out_unlock:
+	mutex_unlock(&phydev->lock);
+
+	return rc;
+}
+
+static void vsc85xx_wol_get(struct phy_device *phydev,
+			    struct ethtool_wolinfo *wol)
+{
+	int rc;
+	u16 reg_val;
+	u8  i;
+	u16 pwd[3] = {0, 0, 0};
+	struct ethtool_wolinfo *wol_conf = wol;
+
+	mutex_lock(&phydev->lock);
+	rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+	if (rc != 0)
+		goto out_unlock;
+
+	reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+	if (reg_val & SECURE_ON_ENABLE)
+		wol_conf->wolopts |= WAKE_MAGICSECURE;
+	if (wol_conf->wolopts & WAKE_MAGICSECURE) {
+		pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
+		pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
+		pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
+		for (i = 0; i < ARRAY_SIZE(pwd); i++) {
+			wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
+			wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
+							    >> 8;
+		}
+	}
+
+	rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out_unlock:
+	mutex_unlock(&phydev->lock);
+}
+
 static u8 edge_rate_magic_get(u16 vddmac,
 			      int slowdown)
 {
@@ -301,6 +425,8 @@
 	.suspend	= &genphy_suspend,
 	.resume		= &genphy_resume,
 	.probe          = &vsc85xx_probe,
+	.set_wol        = &vsc85xx_wol_set,
+	.get_wol        = &vsc85xx_wol_get,
 },
 {
 	.phy_id		= PHY_ID_VSC8541,
@@ -318,6 +444,8 @@
 	.suspend	= &genphy_suspend,
 	.resume		= &genphy_resume,
 	.probe          = &vsc85xx_probe,
+	.set_wol        = &vsc85xx_wol_set,
+	.get_wol        = &vsc85xx_wol_get,
 }
 
 };
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c6f6683..f424b86 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -608,6 +608,21 @@
 }
 
 /**
+ * phy_trigger_machine - trigger the state machine to run
+ *
+ * @phydev: the phy_device struct
+ *
+ * Description: There has been a change in state which requires that the
+ *   state machine runs.
+ */
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+	cancel_delayed_work_sync(&phydev->state_queue);
+	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+}
+
+/**
  * phy_stop_machine - stop the PHY state machine tracking
  * @phydev: target phy_device struct
  *
@@ -639,6 +654,8 @@
 	mutex_lock(&phydev->lock);
 	phydev->state = PHY_HALTED;
 	mutex_unlock(&phydev->lock);
+
+	phy_trigger_machine(phydev);
 }
 
 /**
@@ -800,8 +817,7 @@
 	}
 
 	/* reschedule state queue work to run as soon as possible */
-	cancel_delayed_work_sync(&phydev->state_queue);
-	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+	phy_trigger_machine(phydev);
 	return;
 
 ignore:
@@ -890,6 +906,8 @@
 	/* if phy was suspended, bring the physical link up again */
 	if (do_resume)
 		phy_resume(phydev);
+
+	phy_trigger_machine(phydev);
 }
 EXPORT_SYMBOL(phy_start);
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d1fce8..3ff76c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -59,6 +59,10 @@
 	QMI_WWAN_FLAG_RAWIP = 1 << 0,
 };
 
+enum qmi_wwan_quirks {
+	QMI_WWAN_QUIRK_DTR = 1 << 0,	/* needs "set DTR" request */
+};
+
 static void qmi_wwan_netdev_setup(struct net_device *net)
 {
 	struct usbnet *dev = netdev_priv(net);
@@ -411,9 +415,14 @@
 	 * clearing out state the clients might need.
 	 *
 	 * MDM9x30 is the first QMI chipset with USB3 support. Abuse
-	 * this fact to enable the quirk.
+	 * this fact to enable the quirk for all USB3 devices.
+	 *
+	 * There are also chipsets with the same "set DTR" requirement
+	 * but without USB3 support.  Devices based on these chips
+	 * need a quirk flag in the device ID table.
 	 */
-	if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
+	if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR ||
+	    le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
 		qmi_wwan_manage_power(dev, 1);
 		qmi_wwan_change_dtr(dev, true);
 	}
@@ -526,6 +535,16 @@
 	.rx_fixup       = qmi_wwan_rx_fixup,
 };
 
+static const struct driver_info	qmi_wwan_info_quirk_dtr = {
+	.description	= "WWAN/QMI device",
+	.flags		= FLAG_WWAN,
+	.bind		= qmi_wwan_bind,
+	.unbind		= qmi_wwan_unbind,
+	.manage_power	= qmi_wwan_manage_power,
+	.rx_fixup       = qmi_wwan_rx_fixup,
+	.data           = QMI_WWAN_QUIRK_DTR,
+};
+
 #define HUAWEI_VENDOR_ID	0x12D1
 
 /* map QMI/wwan function by a fixed interface number */
@@ -533,6 +552,11 @@
 	USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
 	.driver_info = (unsigned long)&qmi_wwan_info
 
+/* devices requiring "set DTR" quirk */
+#define QMI_QUIRK_SET_DTR(vend, prod, num) \
+	USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
+	.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
+
 /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
 #define QMI_GOBI1K_DEVICE(vend, prod) \
 	QMI_FIXED_INTF(vend, prod, 3)
@@ -895,6 +919,8 @@
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
+	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
+	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 5fbf83d..6564753 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -295,11 +295,11 @@
 	qe_muram_free(priv->ucc_pram_offset);
 free_tx_bd:
 	dma_free_coherent(priv->dev,
-			  TX_BD_RING_LEN * sizeof(struct qe_bd),
+			  TX_BD_RING_LEN * sizeof(struct qe_bd *),
 			  priv->tx_bd_base, priv->dma_tx_bd);
 free_rx_bd:
 	dma_free_coherent(priv->dev,
-			  RX_BD_RING_LEN * sizeof(struct qe_bd),
+			  RX_BD_RING_LEN * sizeof(struct qe_bd *),
 			  priv->rx_bd_base, priv->dma_rx_bd);
 free_uccf:
 	ucc_fast_free(priv->uccf);
@@ -688,7 +688,7 @@
 
 	if (priv->rx_bd_base) {
 		dma_free_coherent(priv->dev,
-				  RX_BD_RING_LEN * sizeof(struct qe_bd),
+				  RX_BD_RING_LEN * sizeof(struct qe_bd *),
 				  priv->rx_bd_base, priv->dma_rx_bd);
 
 		priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@
 
 	if (priv->tx_bd_base) {
 		dma_free_coherent(priv->dev,
-				  TX_BD_RING_LEN * sizeof(struct qe_bd),
+				  TX_BD_RING_LEN * sizeof(struct qe_bd *),
 				  priv->tx_bd_base, priv->dma_tx_bd);
 
 		priv->tx_bd_base = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index f6591c8..affe760 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -2422,14 +2422,12 @@
 	 */
 	if (priv->mac80211_registered) {
 		char buf[100];
-		struct dentry *mac80211_dir, *dev_dir, *root_dir;
+		struct dentry *mac80211_dir, *dev_dir;
 
 		dev_dir = dbgfs_dir->d_parent;
-		root_dir = dev_dir->d_parent;
 		mac80211_dir = priv->hw->wiphy->debugfsdir;
 
-		snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
-			 dev_dir->d_name.name);
+		snprintf(buf, 100, "../../%pd2", dev_dir);
 
 		if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
 			goto err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 2d6f44f..f4d75ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1571,8 +1571,8 @@
 	mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
 
 	if (!mvmvif->dbgfs_dir) {
-		IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
-			dbgfs_dir->d_name.name);
+		IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+			dbgfs_dir);
 		return;
 	}
 
@@ -1627,17 +1627,15 @@
 	 * find
 	 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
 	 */
-	snprintf(buf, 100, "../../../%s/%s/%s/%s",
-		 dbgfs_dir->d_parent->d_parent->d_name.name,
-		 dbgfs_dir->d_parent->d_name.name,
-		 dbgfs_dir->d_name.name,
-		 mvmvif->dbgfs_dir->d_name.name);
+	snprintf(buf, 100, "../../../%pd3/%pd",
+		 dbgfs_dir,
+		 mvmvif->dbgfs_dir);
 
 	mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
 						     mvm->debugfs_dir, buf);
 	if (!mvmvif->dbgfs_slink)
-		IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
-			dbgfs_dir->d_name.name);
+		IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
+			dbgfs_dir);
 	return;
 err:
 	IWL_ERR(mvm, "Can't create debugfs entity\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 539d718..07da4ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1748,9 +1748,7 @@
 	 * Create a symlink with mac80211. It will be removed when mac80211
 	 * exists (before the opmode exists which removes the target.)
 	 */
-	snprintf(buf, 100, "../../%s/%s",
-		 dbgfs_dir->d_parent->d_parent->d_name.name,
-		 dbgfs_dir->d_parent->d_name.name);
+	snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
 	if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
 		goto err;
 
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
index 11e02be..d49798a 100644
--- a/drivers/net/xen-netback/Makefile
+++ b/drivers/net/xen-netback/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
 
-xen-netback-y := netback.o xenbus.o interface.o hash.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index b38fb2c..3ce1f7d 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -91,13 +91,6 @@
  */
 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 
-/* It's possible for an skb to have a maximal number of frags
- * but still be less than MAX_BUFFER_OFFSET in size. Thus the
- * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
- * ring slot.
- */
-#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
-
 #define NETBACK_INVALID_HANDLE -1
 
 /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
@@ -133,6 +126,15 @@
 	unsigned long tx_frag_overflow;
 };
 
+#define COPY_BATCH_SIZE 64
+
+struct xenvif_copy_state {
+	struct gnttab_copy op[COPY_BATCH_SIZE];
+	RING_IDX idx[COPY_BATCH_SIZE];
+	unsigned int num;
+	struct sk_buff_head *completed;
+};
+
 struct xenvif_queue { /* Per-queue data for xenvif */
 	unsigned int id; /* Queue ID, 0-based */
 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
@@ -189,12 +191,7 @@
 	unsigned long last_rx_time;
 	bool stalled;
 
-	struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
-
-	/* We create one meta structure per ring request we consume, so
-	 * the maximum number is the same as the ring size.
-	 */
-	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
+	struct xenvif_copy_state rx_copy;
 
 	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
 	unsigned long   credit_bytes;
@@ -260,7 +257,6 @@
 
 	/* Frontend feature information. */
 	int gso_mask;
-	int gso_prefix_mask;
 
 	u8 can_sg:1;
 	u8 ip_csum:1;
@@ -359,6 +355,7 @@
 
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
 
+void xenvif_rx_action(struct xenvif_queue *queue);
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
 void xenvif_carrier_on(struct xenvif *vif);
@@ -410,4 +407,8 @@
 
 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
 
+#ifdef CONFIG_DEBUG_FS
+void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
+#endif
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 613bac0..e8c5ddd 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -360,6 +360,74 @@
 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
 
+#ifdef CONFIG_DEBUG_FS
+void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
+{
+	unsigned int i;
+
+	switch (vif->hash.alg) {
+	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+		seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
+		break;
+
+	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+		seq_puts(m, "Hash Algorithm: NONE\n");
+		/* FALLTHRU */
+	default:
+		return;
+	}
+
+	if (vif->hash.flags) {
+		seq_puts(m, "\nHash Flags:\n");
+
+		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
+			seq_puts(m, "- IPv4\n");
+		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
+			seq_puts(m, "- IPv4 + TCP\n");
+		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
+			seq_puts(m, "- IPv6\n");
+		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
+			seq_puts(m, "- IPv6 + TCP\n");
+	}
+
+	seq_puts(m, "\nHash Key:\n");
+
+	for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
+		unsigned int j, n;
+
+		n = 8;
+		if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
+			n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
+
+		seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
+
+		for (j = 0; j < n; j++, i++)
+			seq_printf(m, "%02x ", vif->hash.key[i]);
+
+		seq_puts(m, "\n");
+	}
+
+	if (vif->hash.size != 0) {
+		seq_puts(m, "\nHash Mapping:\n");
+
+		for (i = 0; i < vif->hash.size; ) {
+			unsigned int j, n;
+
+			n = 8;
+			if (i + n >= vif->hash.size)
+				n = vif->hash.size - i;
+
+			seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
+
+			for (j = 0; j < n; j++, i++)
+				seq_printf(m, "%4u ", vif->hash.mapping[i]);
+
+			seq_puts(m, "\n");
+		}
+	}
+}
+#endif /* CONFIG_DEBUG_FS */
+
 void xenvif_init_hash(struct xenvif *vif)
 {
 	if (xenvif_hash_cache_size == 0)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fb50c6d..74dc2bf 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -149,17 +149,8 @@
 	struct xenvif *vif = netdev_priv(dev);
 	unsigned int size = vif->hash.size;
 
-	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
-		u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
-
-		/* Make sure there is no hash information in the socket
-		 * buffer otherwise it would be incorrectly forwarded
-		 * to the frontend.
-		 */
-		skb_clear_hash(skb);
-
-		return index;
-	}
+	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+		return fallback(dev, skb) % dev->real_num_tx_queues;
 
 	xenvif_set_skb_hash(vif, skb);
 
@@ -208,6 +199,13 @@
 	cb = XENVIF_RX_CB(skb);
 	cb->expires = jiffies + vif->drain_timeout;
 
+	/* If there is no hash algorithm configured then make sure there
+	 * is no hash information in the socket buffer otherwise it
+	 * would be incorrectly forwarded to the frontend.
+	 */
+	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+		skb_clear_hash(skb);
+
 	xenvif_rx_queue_tail(queue, skb);
 	xenvif_kick_thread(queue);
 
@@ -319,9 +317,9 @@
 
 	if (!vif->can_sg)
 		features &= ~NETIF_F_SG;
-	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
+	if (~(vif->gso_mask) & GSO_BIT(TCPV4))
 		features &= ~NETIF_F_TSO;
-	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+	if (~(vif->gso_mask) & GSO_BIT(TCPV6))
 		features &= ~NETIF_F_TSO6;
 	if (!vif->ip_csum)
 		features &= ~NETIF_F_IP_CSUM;
@@ -467,7 +465,7 @@
 	dev->netdev_ops	= &xenvif_netdev_ops;
 	dev->hw_features = NETIF_F_SG |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-		NETIF_F_TSO | NETIF_F_TSO6;
+		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
 	dev->features = dev->hw_features | NETIF_F_RXCSUM;
 	dev->ethtool_ops = &xenvif_ethtool_ops;
 
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3d0c989..47b4810 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -106,13 +106,6 @@
 
 static inline int tx_work_todo(struct xenvif_queue *queue);
 
-static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
-					     u16      id,
-					     s8       st,
-					     u16      offset,
-					     u16      size,
-					     u16      flags);
-
 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 				       u16 idx)
 {
@@ -155,571 +148,11 @@
 	return i & (MAX_PENDING_REQS-1);
 }
 
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
-{
-	RING_IDX prod, cons;
-	struct sk_buff *skb;
-	int needed;
-
-	skb = skb_peek(&queue->rx_queue);
-	if (!skb)
-		return false;
-
-	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
-	if (skb_is_gso(skb))
-		needed++;
-	if (skb->sw_hash)
-		needed++;
-
-	do {
-		prod = queue->rx.sring->req_prod;
-		cons = queue->rx.req_cons;
-
-		if (prod - cons >= needed)
-			return true;
-
-		queue->rx.sring->req_event = prod + 1;
-
-		/* Make sure event is visible before we check prod
-		 * again.
-		 */
-		mb();
-	} while (queue->rx.sring->req_prod != prod);
-
-	return false;
-}
-
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&queue->rx_queue.lock, flags);
-
-	__skb_queue_tail(&queue->rx_queue, skb);
-
-	queue->rx_queue_len += skb->len;
-	if (queue->rx_queue_len > queue->rx_queue_max)
-		netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
-
-	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
-}
-
-static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
-{
-	struct sk_buff *skb;
-
-	spin_lock_irq(&queue->rx_queue.lock);
-
-	skb = __skb_dequeue(&queue->rx_queue);
-	if (skb)
-		queue->rx_queue_len -= skb->len;
-
-	spin_unlock_irq(&queue->rx_queue.lock);
-
-	return skb;
-}
-
-static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
-{
-	spin_lock_irq(&queue->rx_queue.lock);
-
-	if (queue->rx_queue_len < queue->rx_queue_max)
-		netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
-
-	spin_unlock_irq(&queue->rx_queue.lock);
-}
-
-
-static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
-{
-	struct sk_buff *skb;
-	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
-		kfree_skb(skb);
-}
-
-static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
-{
-	struct sk_buff *skb;
-
-	for(;;) {
-		skb = skb_peek(&queue->rx_queue);
-		if (!skb)
-			break;
-		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
-			break;
-		xenvif_rx_dequeue(queue);
-		kfree_skb(skb);
-	}
-}
-
-struct netrx_pending_operations {
-	unsigned copy_prod, copy_cons;
-	unsigned meta_prod, meta_cons;
-	struct gnttab_copy *copy;
-	struct xenvif_rx_meta *meta;
-	int copy_off;
-	grant_ref_t copy_gref;
-};
-
-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
-						 struct netrx_pending_operations *npo)
-{
-	struct xenvif_rx_meta *meta;
-	struct xen_netif_rx_request req;
-
-	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
-
-	meta = npo->meta + npo->meta_prod++;
-	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
-	meta->gso_size = 0;
-	meta->size = 0;
-	meta->id = req.id;
-
-	npo->copy_off = 0;
-	npo->copy_gref = req.gref;
-
-	return meta;
-}
-
-struct gop_frag_copy {
-	struct xenvif_queue *queue;
-	struct netrx_pending_operations *npo;
-	struct xenvif_rx_meta *meta;
-	int head;
-	int gso_type;
-	int protocol;
-	int hash_present;
-
-	struct page *page;
-};
-
-static void xenvif_setup_copy_gop(unsigned long gfn,
-				  unsigned int offset,
-				  unsigned int *len,
-				  struct gop_frag_copy *info)
-{
-	struct gnttab_copy *copy_gop;
-	struct xen_page_foreign *foreign;
-	/* Convenient aliases */
-	struct xenvif_queue *queue = info->queue;
-	struct netrx_pending_operations *npo = info->npo;
-	struct page *page = info->page;
-
-	BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
-
-	if (npo->copy_off == MAX_BUFFER_OFFSET)
-		info->meta = get_next_rx_buffer(queue, npo);
-
-	if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
-		*len = MAX_BUFFER_OFFSET - npo->copy_off;
-
-	copy_gop = npo->copy + npo->copy_prod++;
-	copy_gop->flags = GNTCOPY_dest_gref;
-	copy_gop->len = *len;
-
-	foreign = xen_page_foreign(page);
-	if (foreign) {
-		copy_gop->source.domid = foreign->domid;
-		copy_gop->source.u.ref = foreign->gref;
-		copy_gop->flags |= GNTCOPY_source_gref;
-	} else {
-		copy_gop->source.domid = DOMID_SELF;
-		copy_gop->source.u.gmfn = gfn;
-	}
-	copy_gop->source.offset = offset;
-
-	copy_gop->dest.domid = queue->vif->domid;
-	copy_gop->dest.offset = npo->copy_off;
-	copy_gop->dest.u.ref = npo->copy_gref;
-
-	npo->copy_off += *len;
-	info->meta->size += *len;
-
-	if (!info->head)
-		return;
-
-	/* Leave a gap for the GSO descriptor. */
-	if ((1 << info->gso_type) & queue->vif->gso_mask)
-		queue->rx.req_cons++;
-
-	/* Leave a gap for the hash extra segment. */
-	if (info->hash_present)
-		queue->rx.req_cons++;
-
-	info->head = 0; /* There must be something in this buffer now */
-}
-
-static void xenvif_gop_frag_copy_grant(unsigned long gfn,
-				       unsigned offset,
-				       unsigned int len,
-				       void *data)
-{
-	unsigned int bytes;
-
-	while (len) {
-		bytes = len;
-		xenvif_setup_copy_gop(gfn, offset, &bytes, data);
-		offset += bytes;
-		len -= bytes;
-	}
-}
-
-/*
- * Set up the grant operations for this fragment. If it's a flipping
- * interface, we also set up the unmap request from here.
- */
-static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
-				 struct netrx_pending_operations *npo,
-				 struct page *page, unsigned long size,
-				 unsigned long offset, int *head)
-{
-	struct gop_frag_copy info = {
-		.queue = queue,
-		.npo = npo,
-		.head = *head,
-		.gso_type = XEN_NETIF_GSO_TYPE_NONE,
-		/* xenvif_set_skb_hash() will have either set a s/w
-		 * hash or cleared the hash depending on
-		 * whether the the frontend wants a hash for this skb.
-		 */
-		.hash_present = skb->sw_hash,
-	};
-	unsigned long bytes;
-
-	if (skb_is_gso(skb)) {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
-		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
-	}
-
-	/* Data must not cross a page boundary. */
-	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
-
-	info.meta = npo->meta + npo->meta_prod - 1;
-
-	/* Skip unused frames from start of page */
-	page += offset >> PAGE_SHIFT;
-	offset &= ~PAGE_MASK;
-
-	while (size > 0) {
-		BUG_ON(offset >= PAGE_SIZE);
-
-		bytes = PAGE_SIZE - offset;
-		if (bytes > size)
-			bytes = size;
-
-		info.page = page;
-		gnttab_foreach_grant_in_range(page, offset, bytes,
-					      xenvif_gop_frag_copy_grant,
-					      &info);
-		size -= bytes;
-		offset = 0;
-
-		/* Next page */
-		if (size) {
-			BUG_ON(!PageCompound(page));
-			page++;
-		}
-	}
-
-	*head = info.head;
-}
-
-/*
- * Prepare an SKB to be transmitted to the frontend.
- *
- * This function is responsible for allocating grant operations, meta
- * structures, etc.
- *
- * It returns the number of meta structures consumed. The number of
- * ring slots used is always equal to the number of meta slots used
- * plus the number of GSO descriptors used. Currently, we use either
- * zero GSO descriptors (for non-GSO packets) or one descriptor (for
- * frontend-side LRO).
- */
-static int xenvif_gop_skb(struct sk_buff *skb,
-			  struct netrx_pending_operations *npo,
-			  struct xenvif_queue *queue)
-{
-	struct xenvif *vif = netdev_priv(skb->dev);
-	int nr_frags = skb_shinfo(skb)->nr_frags;
-	int i;
-	struct xen_netif_rx_request req;
-	struct xenvif_rx_meta *meta;
-	unsigned char *data;
-	int head = 1;
-	int old_meta_prod;
-	int gso_type;
-
-	old_meta_prod = npo->meta_prod;
-
-	gso_type = XEN_NETIF_GSO_TYPE_NONE;
-	if (skb_is_gso(skb)) {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
-		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
-	}
-
-	/* Set up a GSO prefix descriptor, if necessary */
-	if ((1 << gso_type) & vif->gso_prefix_mask) {
-		RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
-		meta = npo->meta + npo->meta_prod++;
-		meta->gso_type = gso_type;
-		meta->gso_size = skb_shinfo(skb)->gso_size;
-		meta->size = 0;
-		meta->id = req.id;
-	}
-
-	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
-	meta = npo->meta + npo->meta_prod++;
-
-	if ((1 << gso_type) & vif->gso_mask) {
-		meta->gso_type = gso_type;
-		meta->gso_size = skb_shinfo(skb)->gso_size;
-	} else {
-		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
-		meta->gso_size = 0;
-	}
-
-	meta->size = 0;
-	meta->id = req.id;
-	npo->copy_off = 0;
-	npo->copy_gref = req.gref;
-
-	data = skb->data;
-	while (data < skb_tail_pointer(skb)) {
-		unsigned int offset = offset_in_page(data);
-		unsigned int len = PAGE_SIZE - offset;
-
-		if (data + len > skb_tail_pointer(skb))
-			len = skb_tail_pointer(skb) - data;
-
-		xenvif_gop_frag_copy(queue, skb, npo,
-				     virt_to_page(data), len, offset, &head);
-		data += len;
-	}
-
-	for (i = 0; i < nr_frags; i++) {
-		xenvif_gop_frag_copy(queue, skb, npo,
-				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
-				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
-				     skb_shinfo(skb)->frags[i].page_offset,
-				     &head);
-	}
-
-	return npo->meta_prod - old_meta_prod;
-}
-
-/*
- * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
- * used to set up the operations on the top of
- * netrx_pending_operations, which have since been done.  Check that
- * they didn't give any errors and advance over them.
- */
-static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
-			    struct netrx_pending_operations *npo)
-{
-	struct gnttab_copy     *copy_op;
-	int status = XEN_NETIF_RSP_OKAY;
-	int i;
-
-	for (i = 0; i < nr_meta_slots; i++) {
-		copy_op = npo->copy + npo->copy_cons++;
-		if (copy_op->status != GNTST_okay) {
-			netdev_dbg(vif->dev,
-				   "Bad status %d from copy to DOM%d.\n",
-				   copy_op->status, vif->domid);
-			status = XEN_NETIF_RSP_ERROR;
-		}
-	}
-
-	return status;
-}
-
-static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
-				      struct xenvif_rx_meta *meta,
-				      int nr_meta_slots)
-{
-	int i;
-	unsigned long offset;
-
-	/* No fragments used */
-	if (nr_meta_slots <= 1)
-		return;
-
-	nr_meta_slots--;
-
-	for (i = 0; i < nr_meta_slots; i++) {
-		int flags;
-		if (i == nr_meta_slots - 1)
-			flags = 0;
-		else
-			flags = XEN_NETRXF_more_data;
-
-		offset = 0;
-		make_rx_response(queue, meta[i].id, status, offset,
-				 meta[i].size, flags);
-	}
-}
-
 void xenvif_kick_thread(struct xenvif_queue *queue)
 {
 	wake_up(&queue->wq);
 }
 
-static void xenvif_rx_action(struct xenvif_queue *queue)
-{
-	struct xenvif *vif = queue->vif;
-	s8 status;
-	u16 flags;
-	struct xen_netif_rx_response *resp;
-	struct sk_buff_head rxq;
-	struct sk_buff *skb;
-	LIST_HEAD(notify);
-	int ret;
-	unsigned long offset;
-	bool need_to_notify = false;
-
-	struct netrx_pending_operations npo = {
-		.copy  = queue->grant_copy_op,
-		.meta  = queue->meta,
-	};
-
-	skb_queue_head_init(&rxq);
-
-	while (xenvif_rx_ring_slots_available(queue)
-	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
-		queue->last_rx_time = jiffies;
-
-		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
-
-		__skb_queue_tail(&rxq, skb);
-	}
-
-	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
-
-	if (!npo.copy_prod)
-		goto done;
-
-	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
-	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
-
-	while ((skb = __skb_dequeue(&rxq)) != NULL) {
-		struct xen_netif_extra_info *extra = NULL;
-
-		if ((1 << queue->meta[npo.meta_cons].gso_type) &
-		    vif->gso_prefix_mask) {
-			resp = RING_GET_RESPONSE(&queue->rx,
-						 queue->rx.rsp_prod_pvt++);
-
-			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
-
-			resp->offset = queue->meta[npo.meta_cons].gso_size;
-			resp->id = queue->meta[npo.meta_cons].id;
-			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
-
-			npo.meta_cons++;
-			XENVIF_RX_CB(skb)->meta_slots_used--;
-		}
-
-
-		queue->stats.tx_bytes += skb->len;
-		queue->stats.tx_packets++;
-
-		status = xenvif_check_gop(vif,
-					  XENVIF_RX_CB(skb)->meta_slots_used,
-					  &npo);
-
-		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
-			flags = 0;
-		else
-			flags = XEN_NETRXF_more_data;
-
-		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
-			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
-		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
-			/* remote but checksummed. */
-			flags |= XEN_NETRXF_data_validated;
-
-		offset = 0;
-		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
-					status, offset,
-					queue->meta[npo.meta_cons].size,
-					flags);
-
-		if ((1 << queue->meta[npo.meta_cons].gso_type) &
-		    vif->gso_mask) {
-			extra = (struct xen_netif_extra_info *)
-				RING_GET_RESPONSE(&queue->rx,
-						  queue->rx.rsp_prod_pvt++);
-
-			resp->flags |= XEN_NETRXF_extra_info;
-
-			extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-			extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-			extra->u.gso.pad = 0;
-			extra->u.gso.features = 0;
-
-			extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
-			extra->flags = 0;
-		}
-
-		if (skb->sw_hash) {
-			/* Since the skb got here via xenvif_select_queue()
-			 * we know that the hash has been re-calculated
-			 * according to a configuration set by the frontend
-			 * and therefore we know that it is legitimate to
-			 * pass it to the frontend.
-			 */
-			if (resp->flags & XEN_NETRXF_extra_info)
-				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
-			else
-				resp->flags |= XEN_NETRXF_extra_info;
-
-			extra = (struct xen_netif_extra_info *)
-				RING_GET_RESPONSE(&queue->rx,
-						  queue->rx.rsp_prod_pvt++);
-
-			extra->u.hash.algorithm =
-				XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
-
-			if (skb->l4_hash)
-				extra->u.hash.type =
-					skb->protocol == htons(ETH_P_IP) ?
-					_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
-					_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
-			else
-				extra->u.hash.type =
-					skb->protocol == htons(ETH_P_IP) ?
-					_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
-					_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
-
-			*(uint32_t *)extra->u.hash.value =
-				skb_get_hash_raw(skb);
-
-			extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
-			extra->flags = 0;
-		}
-
-		xenvif_add_frag_responses(queue, status,
-					  queue->meta + npo.meta_cons + 1,
-					  XENVIF_RX_CB(skb)->meta_slots_used);
-
-		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
-
-		need_to_notify |= !!ret;
-
-		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
-		dev_kfree_skb(skb);
-	}
-
-done:
-	if (need_to_notify)
-		notify_remote_via_irq(queue->rx_irq);
-}
-
 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 {
 	int more_to_do;
@@ -1951,29 +1384,6 @@
 		notify_remote_via_irq(queue->tx_irq);
 }
 
-static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
-					     u16      id,
-					     s8       st,
-					     u16      offset,
-					     u16      size,
-					     u16      flags)
-{
-	RING_IDX i = queue->rx.rsp_prod_pvt;
-	struct xen_netif_rx_response *resp;
-
-	resp = RING_GET_RESPONSE(&queue->rx, i);
-	resp->offset     = offset;
-	resp->flags      = flags;
-	resp->id         = id;
-	resp->status     = (s16)size;
-	if (st < 0)
-		resp->status = (s16)st;
-
-	queue->rx.rsp_prod_pvt = ++i;
-
-	return resp;
-}
-
 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
 {
 	int ret;
@@ -2055,170 +1465,6 @@
 	return err;
 }
 
-static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
-{
-	struct xenvif *vif = queue->vif;
-
-	queue->stalled = true;
-
-	/* At least one queue has stalled? Disable the carrier. */
-	spin_lock(&vif->lock);
-	if (vif->stalled_queues++ == 0) {
-		netdev_info(vif->dev, "Guest Rx stalled");
-		netif_carrier_off(vif->dev);
-	}
-	spin_unlock(&vif->lock);
-}
-
-static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
-{
-	struct xenvif *vif = queue->vif;
-
-	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
-	queue->stalled = false;
-
-	/* All queues are ready? Enable the carrier. */
-	spin_lock(&vif->lock);
-	if (--vif->stalled_queues == 0) {
-		netdev_info(vif->dev, "Guest Rx ready");
-		netif_carrier_on(vif->dev);
-	}
-	spin_unlock(&vif->lock);
-}
-
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
-{
-	RING_IDX prod, cons;
-
-	prod = queue->rx.sring->req_prod;
-	cons = queue->rx.req_cons;
-
-	return !queue->stalled && prod - cons < 1
-		&& time_after(jiffies,
-			      queue->last_rx_time + queue->vif->stall_timeout);
-}
-
-static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
-{
-	RING_IDX prod, cons;
-
-	prod = queue->rx.sring->req_prod;
-	cons = queue->rx.req_cons;
-
-	return queue->stalled && prod - cons >= 1;
-}
-
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
-{
-	return xenvif_rx_ring_slots_available(queue)
-		|| (queue->vif->stall_timeout &&
-		    (xenvif_rx_queue_stalled(queue)
-		     || xenvif_rx_queue_ready(queue)))
-		|| kthread_should_stop()
-		|| queue->vif->disabled;
-}
-
-static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
-{
-	struct sk_buff *skb;
-	long timeout;
-
-	skb = skb_peek(&queue->rx_queue);
-	if (!skb)
-		return MAX_SCHEDULE_TIMEOUT;
-
-	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
-	return timeout < 0 ? 0 : timeout;
-}
-
-/* Wait until the guest Rx thread has work.
- *
- * The timeout needs to be adjusted based on the current head of the
- * queue (and not just the head at the beginning).  In particular, if
- * the queue is initially empty an infinite timeout is used and this
- * needs to be reduced when a skb is queued.
- *
- * This cannot be done with wait_event_timeout() because it only
- * calculates the timeout once.
- */
-static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
-{
-	DEFINE_WAIT(wait);
-
-	if (xenvif_have_rx_work(queue))
-		return;
-
-	for (;;) {
-		long ret;
-
-		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
-		if (xenvif_have_rx_work(queue))
-			break;
-		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
-		if (!ret)
-			break;
-	}
-	finish_wait(&queue->wq, &wait);
-}
-
-int xenvif_kthread_guest_rx(void *data)
-{
-	struct xenvif_queue *queue = data;
-	struct xenvif *vif = queue->vif;
-
-	if (!vif->stall_timeout)
-		xenvif_queue_carrier_on(queue);
-
-	for (;;) {
-		xenvif_wait_for_rx_work(queue);
-
-		if (kthread_should_stop())
-			break;
-
-		/* This frontend is found to be rogue, disable it in
-		 * kthread context. Currently this is only set when
-		 * netback finds out frontend sends malformed packet,
-		 * but we cannot disable the interface in softirq
-		 * context so we defer it here, if this thread is
-		 * associated with queue 0.
-		 */
-		if (unlikely(vif->disabled && queue->id == 0)) {
-			xenvif_carrier_off(vif);
-			break;
-		}
-
-		if (!skb_queue_empty(&queue->rx_queue))
-			xenvif_rx_action(queue);
-
-		/* If the guest hasn't provided any Rx slots for a
-		 * while it's probably not responsive, drop the
-		 * carrier so packets are dropped earlier.
-		 */
-		if (vif->stall_timeout) {
-			if (xenvif_rx_queue_stalled(queue))
-				xenvif_queue_carrier_off(queue);
-			else if (xenvif_rx_queue_ready(queue))
-				xenvif_queue_carrier_on(queue);
-		}
-
-		/* Queued packets may have foreign pages from other
-		 * domains.  These cannot be queued indefinitely as
-		 * this would starve guests of grant refs and transmit
-		 * slots.
-		 */
-		xenvif_rx_queue_drop_expired(queue);
-
-		xenvif_rx_queue_maybe_wake(queue);
-
-		cond_resched();
-	}
-
-	/* Bin any remaining skbs */
-	xenvif_rx_queue_purge(queue);
-
-	return 0;
-}
-
 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
 {
 	/* Dealloc thread must remain running until all inflight
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
new file mode 100644
index 0000000..b1cf7c6
--- /dev/null
+++ b/drivers/net/xen-netback/rx.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ * Copyright (c) 2002-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "common.h"
+
+#include <linux/kthread.h>
+
+#include <xen/xen.h>
+#include <xen/events.h>
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+	RING_IDX prod, cons;
+	struct sk_buff *skb;
+	int needed;
+
+	skb = skb_peek(&queue->rx_queue);
+	if (!skb)
+		return false;
+
+	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+	if (skb_is_gso(skb))
+		needed++;
+	if (skb->sw_hash)
+		needed++;
+
+	do {
+		prod = queue->rx.sring->req_prod;
+		cons = queue->rx.req_cons;
+
+		if (prod - cons >= needed)
+			return true;
+
+		queue->rx.sring->req_event = prod + 1;
+
+		/* Make sure event is visible before we check prod
+		 * again.
+		 */
+		mb();
+	} while (queue->rx.sring->req_prod != prod);
+
+	return false;
+}
+
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+	__skb_queue_tail(&queue->rx_queue, skb);
+
+	queue->rx_queue_len += skb->len;
+	if (queue->rx_queue_len > queue->rx_queue_max) {
+		struct net_device *dev = queue->vif->dev;
+
+		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+	}
+
+	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+}
+
+static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+{
+	struct sk_buff *skb;
+
+	spin_lock_irq(&queue->rx_queue.lock);
+
+	skb = __skb_dequeue(&queue->rx_queue);
+	if (skb) {
+		queue->rx_queue_len -= skb->len;
+		if (queue->rx_queue_len < queue->rx_queue_max) {
+			struct netdev_queue *txq;
+
+			txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
+			netif_tx_wake_queue(txq);
+		}
+	}
+
+	spin_unlock_irq(&queue->rx_queue.lock);
+
+	return skb;
+}
+
+static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
+{
+	struct sk_buff *skb;
+
+	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
+		kfree_skb(skb);
+}
+
+static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		skb = skb_peek(&queue->rx_queue);
+		if (!skb)
+			break;
+		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
+			break;
+		xenvif_rx_dequeue(queue);
+		kfree_skb(skb);
+	}
+}
+
+static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
+{
+	unsigned int i;
+	int notify;
+
+	gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
+
+	for (i = 0; i < queue->rx_copy.num; i++) {
+		struct gnttab_copy *op;
+
+		op = &queue->rx_copy.op[i];
+
+		/* If the copy failed, overwrite the status field in
+		 * the corresponding response.
+		 */
+		if (unlikely(op->status != GNTST_okay)) {
+			struct xen_netif_rx_response *rsp;
+
+			rsp = RING_GET_RESPONSE(&queue->rx,
+						queue->rx_copy.idx[i]);
+			rsp->status = op->status;
+		}
+	}
+
+	queue->rx_copy.num = 0;
+
+	/* Push responses for all completed packets. */
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
+	if (notify)
+		notify_remote_via_irq(queue->rx_irq);
+
+	__skb_queue_purge(queue->rx_copy.completed);
+}
+
+static void xenvif_rx_copy_add(struct xenvif_queue *queue,
+			       struct xen_netif_rx_request *req,
+			       unsigned int offset, void *data, size_t len)
+{
+	struct gnttab_copy *op;
+	struct page *page;
+	struct xen_page_foreign *foreign;
+
+	if (queue->rx_copy.num == COPY_BATCH_SIZE)
+		xenvif_rx_copy_flush(queue);
+
+	op = &queue->rx_copy.op[queue->rx_copy.num];
+
+	page = virt_to_page(data);
+
+	op->flags = GNTCOPY_dest_gref;
+
+	foreign = xen_page_foreign(page);
+	if (foreign) {
+		op->source.domid = foreign->domid;
+		op->source.u.ref = foreign->gref;
+		op->flags |= GNTCOPY_source_gref;
+	} else {
+		op->source.u.gmfn = virt_to_gfn(data);
+		op->source.domid  = DOMID_SELF;
+	}
+
+	op->source.offset = xen_offset_in_page(data);
+	op->dest.u.ref    = req->gref;
+	op->dest.domid    = queue->vif->domid;
+	op->dest.offset   = offset;
+	op->len           = len;
+
+	queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
+	queue->rx_copy.num++;
+}
+
+static unsigned int xenvif_gso_type(struct sk_buff *skb)
+{
+	if (skb_is_gso(skb)) {
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+			return XEN_NETIF_GSO_TYPE_TCPV4;
+		else
+			return XEN_NETIF_GSO_TYPE_TCPV6;
+	}
+	return XEN_NETIF_GSO_TYPE_NONE;
+}
+
+struct xenvif_pkt_state {
+	struct sk_buff *skb;
+	size_t remaining_len;
+	struct sk_buff *frag_iter;
+	int frag; /* frag == -1 => frag_iter->head */
+	unsigned int frag_offset;
+	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+	unsigned int extra_count;
+	unsigned int slot;
+};
+
+static void xenvif_rx_next_skb(struct xenvif_queue *queue,
+			       struct xenvif_pkt_state *pkt)
+{
+	struct sk_buff *skb;
+	unsigned int gso_type;
+
+	skb = xenvif_rx_dequeue(queue);
+
+	queue->stats.tx_bytes += skb->len;
+	queue->stats.tx_packets++;
+
+	/* Reset packet state. */
+	memset(pkt, 0, sizeof(struct xenvif_pkt_state));
+
+	pkt->skb = skb;
+	pkt->frag_iter = skb;
+	pkt->remaining_len = skb->len;
+	pkt->frag = -1;
+
+	gso_type = xenvif_gso_type(skb);
+	if ((1 << gso_type) & queue->vif->gso_mask) {
+		struct xen_netif_extra_info *extra;
+
+		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+		extra->u.gso.type = gso_type;
+		extra->u.gso.size = skb_shinfo(skb)->gso_size;
+		extra->u.gso.pad = 0;
+		extra->u.gso.features = 0;
+		extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+		extra->flags = 0;
+
+		pkt->extra_count++;
+	}
+
+	if (skb->sw_hash) {
+		struct xen_netif_extra_info *extra;
+
+		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+		extra->u.hash.algorithm =
+			XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+		if (skb->l4_hash)
+			extra->u.hash.type =
+				skb->protocol == htons(ETH_P_IP) ?
+				_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+				_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+		else
+			extra->u.hash.type =
+				skb->protocol == htons(ETH_P_IP) ?
+				_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+				_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+		*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
+
+		extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+		extra->flags = 0;
+
+		pkt->extra_count++;
+	}
+}
+
+static void xenvif_rx_complete(struct xenvif_queue *queue,
+			       struct xenvif_pkt_state *pkt)
+{
+	/* All responses are ready to be pushed. */
+	queue->rx.rsp_prod_pvt = queue->rx.req_cons;
+
+	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
+}
+
+static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
+{
+	struct sk_buff *frag_iter = pkt->frag_iter;
+	unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
+
+	pkt->frag++;
+	pkt->frag_offset = 0;
+
+	if (pkt->frag >= nr_frags) {
+		if (frag_iter == pkt->skb)
+			pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
+		else
+			pkt->frag_iter = frag_iter->next;
+
+		pkt->frag = -1;
+	}
+}
+
+static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
+				 struct xenvif_pkt_state *pkt,
+				 unsigned int offset, void **data,
+				 size_t *len)
+{
+	struct sk_buff *frag_iter = pkt->frag_iter;
+	void *frag_data;
+	size_t frag_len, chunk_len;
+
+	BUG_ON(!frag_iter);
+
+	if (pkt->frag == -1) {
+		frag_data = frag_iter->data;
+		frag_len = skb_headlen(frag_iter);
+	} else {
+		skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
+
+		frag_data = skb_frag_address(frag);
+		frag_len = skb_frag_size(frag);
+	}
+
+	frag_data += pkt->frag_offset;
+	frag_len -= pkt->frag_offset;
+
+	chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
+	chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
+					     xen_offset_in_page(frag_data));
+
+	pkt->frag_offset += chunk_len;
+
+	/* Advance to next frag? */
+	if (frag_len == chunk_len)
+		xenvif_rx_next_frag(pkt);
+
+	*data = frag_data;
+	*len = chunk_len;
+}
+
+static void xenvif_rx_data_slot(struct xenvif_queue *queue,
+				struct xenvif_pkt_state *pkt,
+				struct xen_netif_rx_request *req,
+				struct xen_netif_rx_response *rsp)
+{
+	unsigned int offset = 0;
+	unsigned int flags;
+
+	do {
+		size_t len;
+		void *data;
+
+		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
+		xenvif_rx_copy_add(queue, req, offset, data, len);
+
+		offset += len;
+		pkt->remaining_len -= len;
+
+	} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
+
+	if (pkt->remaining_len > 0)
+		flags = XEN_NETRXF_more_data;
+	else
+		flags = 0;
+
+	if (pkt->slot == 0) {
+		struct sk_buff *skb = pkt->skb;
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			flags |= XEN_NETRXF_csum_blank |
+				 XEN_NETRXF_data_validated;
+		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+			flags |= XEN_NETRXF_data_validated;
+
+		if (pkt->extra_count != 0)
+			flags |= XEN_NETRXF_extra_info;
+	}
+
+	rsp->offset = 0;
+	rsp->flags = flags;
+	rsp->id = req->id;
+	rsp->status = (s16)offset;
+}
+
+static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
+				 struct xenvif_pkt_state *pkt,
+				 struct xen_netif_rx_request *req,
+				 struct xen_netif_rx_response *rsp)
+{
+	struct xen_netif_extra_info *extra = (void *)rsp;
+	unsigned int i;
+
+	pkt->extra_count--;
+
+	for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
+		if (pkt->extras[i].type) {
+			*extra = pkt->extras[i];
+
+			if (pkt->extra_count != 0)
+				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+
+			pkt->extras[i].type = 0;
+			return;
+		}
+	}
+	BUG();
+}
+
+void xenvif_rx_skb(struct xenvif_queue *queue)
+{
+	struct xenvif_pkt_state pkt;
+
+	xenvif_rx_next_skb(queue, &pkt);
+
+	queue->last_rx_time = jiffies;
+
+	do {
+		struct xen_netif_rx_request *req;
+		struct xen_netif_rx_response *rsp;
+
+		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
+		rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
+
+		/* Extras must go after the first data slot */
+		if (pkt.slot != 0 && pkt.extra_count != 0)
+			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
+		else
+			xenvif_rx_data_slot(queue, &pkt, req, rsp);
+
+		queue->rx.req_cons++;
+		pkt.slot++;
+	} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
+
+	xenvif_rx_complete(queue, &pkt);
+}
+
+#define RX_BATCH_SIZE 64
+
+void xenvif_rx_action(struct xenvif_queue *queue)
+{
+	struct sk_buff_head completed_skbs;
+	unsigned int work_done = 0;
+
+	__skb_queue_head_init(&completed_skbs);
+	queue->rx_copy.completed = &completed_skbs;
+
+	while (xenvif_rx_ring_slots_available(queue) &&
+	       work_done < RX_BATCH_SIZE) {
+		xenvif_rx_skb(queue);
+		work_done++;
+	}
+
+	/* Flush any pending copies and complete all skbs. */
+	xenvif_rx_copy_flush(queue);
+}
+
+static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+{
+	RING_IDX prod, cons;
+
+	prod = queue->rx.sring->req_prod;
+	cons = queue->rx.req_cons;
+
+	return !queue->stalled &&
+		prod - cons < 1 &&
+		time_after(jiffies,
+			   queue->last_rx_time + queue->vif->stall_timeout);
+}
+
+static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+{
+	RING_IDX prod, cons;
+
+	prod = queue->rx.sring->req_prod;
+	cons = queue->rx.req_cons;
+
+	return queue->stalled && prod - cons >= 1;
+}
+
+static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+{
+	return xenvif_rx_ring_slots_available(queue) ||
+		(queue->vif->stall_timeout &&
+		 (xenvif_rx_queue_stalled(queue) ||
+		  xenvif_rx_queue_ready(queue))) ||
+		kthread_should_stop() ||
+		queue->vif->disabled;
+}
+
+static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
+{
+	struct sk_buff *skb;
+	long timeout;
+
+	skb = skb_peek(&queue->rx_queue);
+	if (!skb)
+		return MAX_SCHEDULE_TIMEOUT;
+
+	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
+	return timeout < 0 ? 0 : timeout;
+}
+
+/* Wait until the guest Rx thread has work.
+ *
+ * The timeout needs to be adjusted based on the current head of the
+ * queue (and not just the head at the beginning).  In particular, if
+ * the queue is initially empty an infinite timeout is used and this
+ * needs to be reduced when a skb is queued.
+ *
+ * This cannot be done with wait_event_timeout() because it only
+ * calculates the timeout once.
+ */
+static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+{
+	DEFINE_WAIT(wait);
+
+	if (xenvif_have_rx_work(queue))
+		return;
+
+	for (;;) {
+		long ret;
+
+		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+		if (xenvif_have_rx_work(queue))
+			break;
+		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+		if (!ret)
+			break;
+	}
+	finish_wait(&queue->wq, &wait);
+}
+
+static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
+{
+	struct xenvif *vif = queue->vif;
+
+	queue->stalled = true;
+
+	/* At least one queue has stalled? Disable the carrier. */
+	spin_lock(&vif->lock);
+	if (vif->stalled_queues++ == 0) {
+		netdev_info(vif->dev, "Guest Rx stalled");
+		netif_carrier_off(vif->dev);
+	}
+	spin_unlock(&vif->lock);
+}
+
+static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
+{
+	struct xenvif *vif = queue->vif;
+
+	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
+	queue->stalled = false;
+
+	/* All queues are ready? Enable the carrier. */
+	spin_lock(&vif->lock);
+	if (--vif->stalled_queues == 0) {
+		netdev_info(vif->dev, "Guest Rx ready");
+		netif_carrier_on(vif->dev);
+	}
+	spin_unlock(&vif->lock);
+}
+
+int xenvif_kthread_guest_rx(void *data)
+{
+	struct xenvif_queue *queue = data;
+	struct xenvif *vif = queue->vif;
+
+	if (!vif->stall_timeout)
+		xenvif_queue_carrier_on(queue);
+
+	for (;;) {
+		xenvif_wait_for_rx_work(queue);
+
+		if (kthread_should_stop())
+			break;
+
+		/* This frontend is found to be rogue, disable it in
+		 * kthread context. Currently this is only set when
+		 * netback finds out frontend sends malformed packet,
+		 * but we cannot disable the interface in softirq
+		 * context so we defer it here, if this thread is
+		 * associated with queue 0.
+		 */
+		if (unlikely(vif->disabled && queue->id == 0)) {
+			xenvif_carrier_off(vif);
+			break;
+		}
+
+		if (!skb_queue_empty(&queue->rx_queue))
+			xenvif_rx_action(queue);
+
+		/* If the guest hasn't provided any Rx slots for a
+		 * while it's probably not responsive, drop the
+		 * carrier so packets are dropped earlier.
+		 */
+		if (vif->stall_timeout) {
+			if (xenvif_rx_queue_stalled(queue))
+				xenvif_queue_carrier_off(queue);
+			else if (xenvif_rx_queue_ready(queue))
+				xenvif_queue_carrier_on(queue);
+		}
+
+		/* Queued packets may have foreign pages from other
+		 * domains.  These cannot be queued indefinitely as
+		 * this would starve guests of grant refs and transmit
+		 * slots.
+		 */
+		xenvif_rx_queue_drop_expired(queue);
+
+		cond_resched();
+	}
+
+	/* Bin any remaining skbs */
+	xenvif_rx_queue_purge(queue);
+
+	return 0;
+}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index daf4c78..8674e18 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -165,7 +165,7 @@
 	return count;
 }
 
-static int xenvif_dump_open(struct inode *inode, struct file *filp)
+static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
 {
 	int ret;
 	void *queue = NULL;
@@ -179,13 +179,35 @@
 
 static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
 	.owner = THIS_MODULE,
-	.open = xenvif_dump_open,
+	.open = xenvif_io_ring_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
 	.release = single_release,
 	.write = xenvif_write_io_ring,
 };
 
+static int xenvif_read_ctrl(struct seq_file *m, void *v)
+{
+	struct xenvif *vif = m->private;
+
+	xenvif_dump_hash_info(vif, m);
+
+	return 0;
+}
+
+static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, xenvif_read_ctrl, inode->i_private);
+}
+
+static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = xenvif_ctrl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 static void xenvif_debugfs_addif(struct xenvif *vif)
 {
 	struct dentry *pfile;
@@ -210,6 +232,17 @@
 				pr_warn("Creation of io_ring file returned %ld!\n",
 					PTR_ERR(pfile));
 		}
+
+		if (vif->ctrl_irq) {
+			pfile = debugfs_create_file("ctrl",
+						    S_IRUSR,
+						    vif->xenvif_dbg_root,
+						    vif,
+						    &xenvif_dbg_ctrl_ops_fops);
+			if (IS_ERR_OR_NULL(pfile))
+				pr_warn("Creation of ctrl file returned %ld!\n",
+					PTR_ERR(pfile));
+		}
 	} else
 		netdev_warn(vif->dev,
 			    "Creation of vif debugfs dir returned %ld!\n",
@@ -1135,7 +1168,6 @@
 	vif->can_sg = !!val;
 
 	vif->gso_mask = 0;
-	vif->gso_prefix_mask = 0;
 
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
 			 "%d", &val) < 0)
@@ -1143,32 +1175,12 @@
 	if (val)
 		vif->gso_mask |= GSO_BIT(TCPV4);
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
-			 "%d", &val) < 0)
-		val = 0;
-	if (val)
-		vif->gso_prefix_mask |= GSO_BIT(TCPV4);
-
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
 			 "%d", &val) < 0)
 		val = 0;
 	if (val)
 		vif->gso_mask |= GSO_BIT(TCPV6);
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
-			 "%d", &val) < 0)
-		val = 0;
-	if (val)
-		vif->gso_prefix_mask |= GSO_BIT(TCPV6);
-
-	if (vif->gso_mask & vif->gso_prefix_mask) {
-		xenbus_dev_fatal(dev, err,
-				 "%s: gso and gso prefix flags are not "
-				 "mutually exclusive",
-				 dev->otherend);
-		return -EOPNOTSUPP;
-	}
-
 	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
 			 "%d", &val) < 0)
 		val = 0;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 124c243..8b2b740 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -89,7 +89,7 @@
 	  Select Y if unsure
 
 config NVDIMM_DAX
-	bool "NVDIMM DAX: Raw access to persistent memory"
+	tristate "NVDIMM DAX: Raw access to persistent memory"
 	default LIBNVDIMM
 	depends on NVDIMM_PFN
 	help
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 935866f..a8b6949 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -217,6 +217,8 @@
 		return rc;
 	if (cmd_rc < 0)
 		return cmd_rc;
+
+	nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
 	return clear_err.cleared;
 }
 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 4d7bbd2..7ceba08 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -547,11 +547,12 @@
 }
 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
 
-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
+			gfp_t flags)
 {
 	struct nd_poison *pl;
 
-	pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+	pl = kzalloc(sizeof(*pl), flags);
 	if (!pl)
 		return -ENOMEM;
 
@@ -567,7 +568,7 @@
 	struct nd_poison *pl;
 
 	if (list_empty(&nvdimm_bus->poison_list))
-		return add_poison(nvdimm_bus, addr, length);
+		return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
 
 	/*
 	 * There is a chance this is a duplicate, check for those first.
@@ -587,7 +588,7 @@
 	 * as any overlapping ranges will get resolved when the list is consumed
 	 * and converted to badblocks
 	 */
-	return add_poison(nvdimm_bus, addr, length);
+	return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
 }
 
 int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
@@ -602,6 +603,70 @@
 }
 EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
 
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+		phys_addr_t start, unsigned int len)
+{
+	struct list_head *poison_list = &nvdimm_bus->poison_list;
+	u64 clr_end = start + len - 1;
+	struct nd_poison *pl, *next;
+
+	nvdimm_bus_lock(&nvdimm_bus->dev);
+	WARN_ON_ONCE(list_empty(poison_list));
+
+	/*
+	 * [start, clr_end] is the poison interval being cleared.
+	 * [pl->start, pl_end] is the poison_list entry we're comparing
+	 * the above interval against. The poison list entry may need
+	 * to be modified (update either start or length), deleted, or
+	 * split into two based on the overlap characteristics
+	 */
+
+	list_for_each_entry_safe(pl, next, poison_list, list) {
+		u64 pl_end = pl->start + pl->length - 1;
+
+		/* Skip intervals with no intersection */
+		if (pl_end < start)
+			continue;
+		if (pl->start >  clr_end)
+			continue;
+		/* Delete completely overlapped poison entries */
+		if ((pl->start >= start) && (pl_end <= clr_end)) {
+			list_del(&pl->list);
+			kfree(pl);
+			continue;
+		}
+		/* Adjust start point of partially cleared entries */
+		if ((start <= pl->start) && (clr_end > pl->start)) {
+			pl->length -= clr_end - pl->start + 1;
+			pl->start = clr_end + 1;
+			continue;
+		}
+		/* Adjust pl->length for partial clearing at the tail end */
+		if ((pl->start < start) && (pl_end <= clr_end)) {
+			/* pl->start remains the same */
+			pl->length = start - pl->start;
+			continue;
+		}
+		/*
+		 * If clearing in the middle of an entry, we split it into
+		 * two by modifying the current entry to represent one half of
+		 * the split, and adding a new entry for the second half.
+		 */
+		if ((pl->start < start) && (pl_end > clr_end)) {
+			u64 new_start = clr_end + 1;
+			u64 new_len = pl_end - new_start + 1;
+
+			/* Add new entry covering the right half */
+			add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+			/* Adjust this entry to cover the left half */
+			pl->length = start - pl->start;
+			continue;
+		}
+	}
+	nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
 {
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 71d12bb..619834e 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,14 @@
 	struct nvdimm_drvdata *ndd;
 	int rc;
 
+	rc = nvdimm_check_config_data(dev);
+	if (rc) {
+		/* not required for non-aliased nvdimm, ex. NVDIMM-N */
+		if (rc == -ENOTTY)
+			rc = 0;
+		return rc;
+	}
+
 	ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
 	if (!ndd)
 		return -ENOMEM;
@@ -72,6 +80,9 @@
 {
 	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
 
+	if (!ndd)
+		return 0;
+
 	nvdimm_bus_lock(dev);
 	dev_set_drvdata(dev, NULL);
 	nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d9bba5e..d614493 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -28,28 +28,30 @@
  * Retrieve bus and dimm handle and return if this bus supports
  * get_config_data commands
  */
-static int __validate_dimm(struct nvdimm_drvdata *ndd)
+int nvdimm_check_config_data(struct device *dev)
 {
-	struct nvdimm *nvdimm;
+	struct nvdimm *nvdimm = to_nvdimm(dev);
 
-	if (!ndd)
-		return -EINVAL;
-
-	nvdimm = to_nvdimm(ndd->dev);
-
-	if (!nvdimm->cmd_mask)
-		return -ENXIO;
-	if (!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask))
-		return -ENXIO;
+	if (!nvdimm->cmd_mask ||
+	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
+		if (nvdimm->flags & NDD_ALIASING)
+			return -ENXIO;
+		else
+			return -ENOTTY;
+	}
 
 	return 0;
 }
 
 static int validate_dimm(struct nvdimm_drvdata *ndd)
 {
-	int rc = __validate_dimm(ndd);
+	int rc;
 
-	if (rc && ndd)
+	if (!ndd)
+		return -EINVAL;
+
+	rc = nvdimm_check_config_data(ndd->dev);
+	if (rc)
 		dev_dbg(ndd->dev, "%pf: %s error: %d\n",
 				__builtin_return_address(0), __func__, rc);
 	return rc;
@@ -263,6 +265,12 @@
 }
 EXPORT_SYMBOL_GPL(nvdimm_name);
 
+struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
+{
+	return &nvdimm->dev.kobj;
+}
+EXPORT_SYMBOL_GPL(nvdimm_kobj);
+
 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
 {
 	return nvdimm->cmd_mask;
@@ -378,40 +386,166 @@
 }
 EXPORT_SYMBOL_GPL(nvdimm_create);
 
+int alias_dpa_busy(struct device *dev, void *data)
+{
+	resource_size_t map_end, blk_start, new, busy;
+	struct blk_alloc_info *info = data;
+	struct nd_mapping *nd_mapping;
+	struct nd_region *nd_region;
+	struct nvdimm_drvdata *ndd;
+	struct resource *res;
+	int i;
+
+	if (!is_nd_pmem(dev))
+		return 0;
+
+	nd_region = to_nd_region(dev);
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		nd_mapping  = &nd_region->mapping[i];
+		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
+			break;
+	}
+
+	if (i >= nd_region->ndr_mappings)
+		return 0;
+
+	ndd = to_ndd(nd_mapping);
+	map_end = nd_mapping->start + nd_mapping->size - 1;
+	blk_start = nd_mapping->start;
+
+	/*
+	 * In the allocation case ->res is set to free space that we are
+	 * looking to validate against PMEM aliasing collision rules
+	 * (i.e. BLK is allocated after all aliased PMEM).
+	 */
+	if (info->res) {
+		if (info->res->start >= nd_mapping->start
+				&& info->res->start < map_end)
+			/* pass */;
+		else
+			return 0;
+	}
+
+ retry:
+	/*
+	 * Find the free dpa from the end of the last pmem allocation to
+	 * the end of the interleave-set mapping that is not already
+	 * covered by a blk allocation.
+	 */
+	busy = 0;
+	for_each_dpa_resource(ndd, res) {
+		if ((res->start >= blk_start && res->start < map_end)
+				|| (res->end >= blk_start
+					&& res->end <= map_end)) {
+			if (strncmp(res->name, "pmem", 4) == 0) {
+				new = max(blk_start, min(map_end + 1,
+							res->end + 1));
+				if (new != blk_start) {
+					blk_start = new;
+					goto retry;
+				}
+			} else
+				busy += min(map_end, res->end)
+					- max(nd_mapping->start, res->start) + 1;
+		} else if (nd_mapping->start > res->start
+				&& map_end < res->end) {
+			/* total eclipse of the PMEM region mapping */
+			busy += nd_mapping->size;
+			break;
+		}
+	}
+
+	/* update the free space range with the probed blk_start */
+	if (info->res && blk_start > info->res->start) {
+		info->res->start = max(info->res->start, blk_start);
+		if (info->res->start > info->res->end)
+			info->res->end = info->res->start - 1;
+		return 1;
+	}
+
+	info->available -= blk_start - nd_mapping->start + busy;
+
+	return 0;
+}
+
+static int blk_dpa_busy(struct device *dev, void *data)
+{
+	struct blk_alloc_info *info = data;
+	struct nd_mapping *nd_mapping;
+	struct nd_region *nd_region;
+	resource_size_t map_end;
+	int i;
+
+	if (!is_nd_pmem(dev))
+		return 0;
+
+	nd_region = to_nd_region(dev);
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		nd_mapping  = &nd_region->mapping[i];
+		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
+			break;
+	}
+
+	if (i >= nd_region->ndr_mappings)
+		return 0;
+
+	map_end = nd_mapping->start + nd_mapping->size - 1;
+	if (info->res->start >= nd_mapping->start
+			&& info->res->start < map_end) {
+		if (info->res->end <= map_end) {
+			info->busy = 0;
+			return 1;
+		} else {
+			info->busy -= info->res->end - map_end;
+			return 0;
+		}
+	} else if (info->res->end >= nd_mapping->start
+			&& info->res->end <= map_end) {
+		info->busy -= nd_mapping->start - info->res->start;
+		return 0;
+	} else {
+		info->busy -= nd_mapping->size;
+		return 0;
+	}
+}
+
 /**
  * nd_blk_available_dpa - account the unused dpa of BLK region
  * @nd_mapping: container of dpa-resource-root + labels
  *
- * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges.
+ * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
+ * we arrange for them to never start at an lower dpa than the last
+ * PMEM allocation in an aliased region.
  */
-resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping)
+resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
 {
+	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	resource_size_t map_end, busy = 0, available;
+	struct blk_alloc_info info = {
+		.nd_mapping = nd_mapping,
+		.available = nd_mapping->size,
+		.res = NULL,
+	};
 	struct resource *res;
 
 	if (!ndd)
 		return 0;
 
-	map_end = nd_mapping->start + nd_mapping->size - 1;
-	for_each_dpa_resource(ndd, res)
-		if (res->start >= nd_mapping->start && res->start < map_end) {
-			resource_size_t end = min(map_end, res->end);
+	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 
-			busy += end - res->start + 1;
-		} else if (res->end >= nd_mapping->start
-				&& res->end <= map_end) {
-			busy += res->end - nd_mapping->start;
-		} else if (nd_mapping->start > res->start
-				&& nd_mapping->start < res->end) {
-			/* total eclipse of the BLK region mapping */
-			busy += nd_mapping->size;
-		}
+	/* now account for busy blk allocations in unaliased dpa */
+	for_each_dpa_resource(ndd, res) {
+		if (strncmp(res->name, "blk", 3) != 0)
+			continue;
 
-	available = map_end - nd_mapping->start + 1;
-	if (busy < available)
-		return available - busy;
-	return 0;
+		info.res = res;
+		info.busy = resource_size(res);
+		device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
+		info.available -= info.busy;
+	}
+
+	return info.available;
 }
 
 /**
@@ -443,21 +577,16 @@
 	map_start = nd_mapping->start;
 	map_end = map_start + nd_mapping->size - 1;
 	blk_start = max(map_start, map_end + 1 - *overlap);
-	for_each_dpa_resource(ndd, res)
+	for_each_dpa_resource(ndd, res) {
 		if (res->start >= map_start && res->start < map_end) {
 			if (strncmp(res->name, "blk", 3) == 0)
-				blk_start = min(blk_start, res->start);
-			else if (res->start != map_start) {
+				blk_start = min(blk_start,
+						max(map_start, res->start));
+			else if (res->end > map_end) {
 				reason = "misaligned to iset";
 				goto err;
-			} else {
-				if (busy) {
-					reason = "duplicate overlapping PMEM reservations?";
-					goto err;
-				}
+			} else
 				busy += resource_size(res);
-				continue;
-			}
 		} else if (res->end >= map_start && res->end <= map_end) {
 			if (strncmp(res->name, "blk", 3) == 0) {
 				/*
@@ -466,15 +595,14 @@
 				 * be used for BLK.
 				 */
 				blk_start = map_start;
-			} else {
-				reason = "misaligned to iset";
-				goto err;
-			}
+			} else
+				busy += resource_size(res);
 		} else if (map_start > res->start && map_start < res->end) {
 			/* total eclipse of the mapping */
 			busy += nd_mapping->size;
 			blk_start = map_start;
 		}
+	}
 
 	*overlap = map_end + 1 - blk_start;
 	available = blk_start - map_start;
@@ -483,10 +611,6 @@
 	return 0;
 
  err:
-	/*
-	 * Something is wrong, PMEM must align with the start of the
-	 * interleave set, and there can only be one allocation per set.
-	 */
 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
 	return 0;
 }
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 96526dc..fac7cabe 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -494,11 +494,13 @@
 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
 		int pos)
 {
-	u64 cookie = nd_region_interleave_set_cookie(nd_region), rawsize;
+	u64 cookie = nd_region_interleave_set_cookie(nd_region);
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	struct nd_namespace_label *victim_label;
+	struct nd_label_ent *label_ent, *victim = NULL;
 	struct nd_namespace_label *nd_label;
 	struct nd_namespace_index *nsindex;
+	struct nd_label_id label_id;
+	struct resource *res;
 	unsigned long *free;
 	u32 nslot, slot;
 	size_t offset;
@@ -507,6 +509,16 @@
 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
 		return -ENXIO;
 
+	nd_label_gen_id(&label_id, nspm->uuid, 0);
+	for_each_dpa_resource(ndd, res)
+		if (strcmp(res->name, label_id.id) == 0)
+			break;
+
+	if (!res) {
+		WARN_ON_ONCE(1);
+		return -ENXIO;
+	}
+
 	/* allocate and write the label to the staging (next) index */
 	slot = nd_label_alloc_slot(ndd);
 	if (slot == UINT_MAX)
@@ -522,11 +534,10 @@
 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
 	nd_label->position = __cpu_to_le16(pos);
 	nd_label->isetcookie = __cpu_to_le64(cookie);
-	rawsize = div_u64(resource_size(&nspm->nsio.res),
-			nd_region->ndr_mappings);
-	nd_label->rawsize = __cpu_to_le64(rawsize);
-	nd_label->dpa = __cpu_to_le64(nd_mapping->start);
+	nd_label->rawsize = __cpu_to_le64(resource_size(res));
+	nd_label->dpa = __cpu_to_le64(res->start);
 	nd_label->slot = __cpu_to_le32(slot);
+	nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
 
 	/* update label */
 	offset = nd_label_offset(ndd, nd_label);
@@ -536,38 +547,43 @@
 		return rc;
 
 	/* Garbage collect the previous label */
-	victim_label = nd_mapping->labels[0];
-	if (victim_label) {
-		slot = to_slot(ndd, victim_label);
-		nd_label_free_slot(ndd, slot);
+	mutex_lock(&nd_mapping->lock);
+	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+		if (!label_ent->label)
+			continue;
+		if (memcmp(nspm->uuid, label_ent->label->uuid,
+					NSLABEL_UUID_LEN) != 0)
+			continue;
+		victim = label_ent;
+		list_move_tail(&victim->list, &nd_mapping->labels);
+		break;
+	}
+	if (victim) {
 		dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
+		slot = to_slot(ndd, victim->label);
+		nd_label_free_slot(ndd, slot);
+		victim->label = NULL;
 	}
 
 	/* update index */
 	rc = nd_label_write_index(ndd, ndd->ns_next,
 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
-	if (rc < 0)
-		return rc;
+	if (rc == 0) {
+		list_for_each_entry(label_ent, &nd_mapping->labels, list)
+			if (!label_ent->label) {
+				label_ent->label = nd_label;
+				nd_label = NULL;
+				break;
+			}
+		dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
+				"failed to track label: %d\n",
+				to_slot(ndd, nd_label));
+		if (nd_label)
+			rc = -ENXIO;
+	}
+	mutex_unlock(&nd_mapping->lock);
 
-	nd_mapping->labels[0] = nd_label;
-
-	return 0;
-}
-
-static void del_label(struct nd_mapping *nd_mapping, int l)
-{
-	struct nd_namespace_label *next_label, *nd_label;
-	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	unsigned int slot;
-	int j;
-
-	nd_label = nd_mapping->labels[l];
-	slot = to_slot(ndd, nd_label);
-	dev_vdbg(ndd->dev, "%s: clear: %d\n", __func__, slot);
-
-	for (j = l; (next_label = nd_mapping->labels[j + 1]); j++)
-		nd_mapping->labels[j] = next_label;
-	nd_mapping->labels[j] = NULL;
+	return rc;
 }
 
 static bool is_old_resource(struct resource *res, struct resource **list, int n)
@@ -607,14 +623,16 @@
 		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
 		int num_labels)
 {
-	int i, l, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
+	int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 	struct nd_namespace_label *nd_label;
+	struct nd_label_ent *label_ent, *e;
 	struct nd_namespace_index *nsindex;
 	unsigned long *free, *victim_map = NULL;
 	struct resource *res, **old_res_list;
 	struct nd_label_id label_id;
 	u8 uuid[NSLABEL_UUID_LEN];
+	LIST_HEAD(list);
 	u32 nslot, slot;
 
 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
@@ -736,15 +754,22 @@
 	 * entries in nd_mapping->labels
 	 */
 	nlabel = 0;
-	for_each_label(l, nd_label, nd_mapping->labels) {
+	mutex_lock(&nd_mapping->lock);
+	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+		nd_label = label_ent->label;
+		if (!nd_label)
+			continue;
 		nlabel++;
 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
 			continue;
 		nlabel--;
-		del_label(nd_mapping, l);
-		l--; /* retry with the new label at this index */
+		list_move(&label_ent->list, &list);
+		label_ent->label = NULL;
 	}
+	list_splice_tail_init(&list, &nd_mapping->labels);
+	mutex_unlock(&nd_mapping->lock);
+
 	if (nlabel + nsblk->num_resources > num_labels) {
 		/*
 		 * Bug, we can't end up with more resources than
@@ -755,6 +780,15 @@
 		goto out;
 	}
 
+	mutex_lock(&nd_mapping->lock);
+	label_ent = list_first_entry_or_null(&nd_mapping->labels,
+			typeof(*label_ent), list);
+	if (!label_ent) {
+		WARN_ON(1);
+		mutex_unlock(&nd_mapping->lock);
+		rc = -ENXIO;
+		goto out;
+	}
 	for_each_clear_bit_le(slot, free, nslot) {
 		nd_label = nd_label_base(ndd) + slot;
 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
@@ -762,11 +796,19 @@
 			continue;
 		res = to_resource(ndd, nd_label);
 		res->flags &= ~DPA_RESOURCE_ADJUSTED;
-		dev_vdbg(&nsblk->common.dev, "assign label[%d] slot: %d\n",
-				l, slot);
-		nd_mapping->labels[l++] = nd_label;
+		dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
+		list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
+			if (label_ent->label)
+				continue;
+			label_ent->label = nd_label;
+			nd_label = NULL;
+			break;
+		}
+		if (nd_label)
+			dev_WARN(&nsblk->common.dev,
+					"failed to track label slot%d\n", slot);
 	}
-	nd_mapping->labels[l] = NULL;
+	mutex_unlock(&nd_mapping->lock);
 
  out:
 	kfree(old_res_list);
@@ -788,32 +830,28 @@
 
 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
 {
-	int i, l, old_num_labels = 0;
+	int i, old_num_labels = 0;
+	struct nd_label_ent *label_ent;
 	struct nd_namespace_index *nsindex;
-	struct nd_namespace_label *nd_label;
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	size_t size = (num_labels + 1) * sizeof(struct nd_namespace_label *);
 
-	for_each_label(l, nd_label, nd_mapping->labels)
+	mutex_lock(&nd_mapping->lock);
+	list_for_each_entry(label_ent, &nd_mapping->labels, list)
 		old_num_labels++;
+	mutex_unlock(&nd_mapping->lock);
 
 	/*
 	 * We need to preserve all the old labels for the mapping so
 	 * they can be garbage collected after writing the new labels.
 	 */
-	if (num_labels > old_num_labels) {
-		struct nd_namespace_label **labels;
-
-		labels = krealloc(nd_mapping->labels, size, GFP_KERNEL);
-		if (!labels)
+	for (i = old_num_labels; i < num_labels; i++) {
+		label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
+		if (!label_ent)
 			return -ENOMEM;
-		nd_mapping->labels = labels;
+		mutex_lock(&nd_mapping->lock);
+		list_add_tail(&label_ent->list, &nd_mapping->labels);
+		mutex_unlock(&nd_mapping->lock);
 	}
-	if (!nd_mapping->labels)
-		return -ENOMEM;
-
-	for (i = old_num_labels; i <= num_labels; i++)
-		nd_mapping->labels[i] = NULL;
 
 	if (ndd->ns_current == -1 || ndd->ns_next == -1)
 		/* pass */;
@@ -837,42 +875,45 @@
 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
 {
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-	struct nd_namespace_label *nd_label;
+	struct nd_label_ent *label_ent, *e;
 	struct nd_namespace_index *nsindex;
 	u8 label_uuid[NSLABEL_UUID_LEN];
-	int l, num_freed = 0;
 	unsigned long *free;
+	LIST_HEAD(list);
 	u32 nslot, slot;
+	int active = 0;
 
 	if (!uuid)
 		return 0;
 
 	/* no index || no labels == nothing to delete */
-	if (!preamble_next(ndd, &nsindex, &free, &nslot)
-			|| !nd_mapping->labels)
+	if (!preamble_next(ndd, &nsindex, &free, &nslot))
 		return 0;
 
-	for_each_label(l, nd_label, nd_mapping->labels) {
+	mutex_lock(&nd_mapping->lock);
+	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+		struct nd_namespace_label *nd_label = label_ent->label;
+
+		if (!nd_label)
+			continue;
+		active++;
 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
 		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
 			continue;
+		active--;
 		slot = to_slot(ndd, nd_label);
 		nd_label_free_slot(ndd, slot);
 		dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
-		del_label(nd_mapping, l);
-		num_freed++;
-		l--; /* retry with new label at this index */
+		list_move_tail(&label_ent->list, &list);
+		label_ent->label = NULL;
 	}
+	list_splice_tail_init(&list, &nd_mapping->labels);
 
-	if (num_freed > l) {
-		/*
-		 * num_freed will only ever be > l when we delete the last
-		 * label
-		 */
-		kfree(nd_mapping->labels);
-		nd_mapping->labels = NULL;
-		dev_dbg(ndd->dev, "%s: no more labels\n", __func__);
+	if (active == 0) {
+		nd_mapping_free_labels(nd_mapping);
+		dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
 	}
+	mutex_unlock(&nd_mapping->lock);
 
 	return nd_label_write_index(ndd, ndd->ns_next,
 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
@@ -885,7 +926,9 @@
 
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-		int rc;
+		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+		struct resource *res;
+		int rc, count = 0;
 
 		if (size == 0) {
 			rc = del_labels(nd_mapping, nspm->uuid);
@@ -894,7 +937,12 @@
 			continue;
 		}
 
-		rc = init_labels(nd_mapping, 1);
+		for_each_dpa_resource(ndd, res)
+			if (strncmp(res->name, "pmem", 3) == 0)
+				count++;
+		WARN_ON_ONCE(!count);
+
+		rc = init_labels(nd_mapping, count);
 		if (rc < 0)
 			return rc;
 
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index c5e3196..3509cff 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -12,8 +12,10 @@
  */
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/sort.h>
 #include <linux/slab.h>
 #include <linux/pmem.h>
+#include <linux/list.h>
 #include <linux/nd.h>
 #include "nd-core.h"
 #include "nd.h"
@@ -28,7 +30,10 @@
 static void namespace_pmem_release(struct device *dev)
 {
 	struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+	struct nd_region *nd_region = to_nd_region(dev->parent);
 
+	if (nspm->id >= 0)
+		ida_simple_remove(&nd_region->ns_ida, nspm->id);
 	kfree(nspm->alt_name);
 	kfree(nspm->uuid);
 	kfree(nspm);
@@ -62,17 +67,17 @@
 	.release = namespace_blk_release,
 };
 
-static bool is_namespace_pmem(struct device *dev)
+static bool is_namespace_pmem(const struct device *dev)
 {
 	return dev ? dev->type == &namespace_pmem_device_type : false;
 }
 
-static bool is_namespace_blk(struct device *dev)
+static bool is_namespace_blk(const struct device *dev)
 {
 	return dev ? dev->type == &namespace_blk_device_type : false;
 }
 
-static bool is_namespace_io(struct device *dev)
+static bool is_namespace_io(const struct device *dev)
 {
 	return dev ? dev->type == &namespace_io_device_type : false;
 }
@@ -168,7 +173,21 @@
 		suffix = "s";
 
 	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
-		sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
+		int nsidx = 0;
+
+		if (is_namespace_pmem(&ndns->dev)) {
+			struct nd_namespace_pmem *nspm;
+
+			nspm = to_nd_namespace_pmem(&ndns->dev);
+			nsidx = nspm->id;
+		}
+
+		if (nsidx)
+			sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
+					suffix ? suffix : "");
+		else
+			sprintf(name, "pmem%d%s", nd_region->id,
+					suffix ? suffix : "");
 	} else if (is_namespace_blk(&ndns->dev)) {
 		struct nd_namespace_blk *nsblk;
 
@@ -294,7 +313,7 @@
 		if (strcmp(res->name, label_id.id) != 0)
 			continue;
 		/*
-		 * Resources with unacknoweldged adjustments indicate a
+		 * Resources with unacknowledged adjustments indicate a
 		 * failure to update labels
 		 */
 		if (res->flags & DPA_RESOURCE_ADJUSTED)
@@ -510,19 +529,68 @@
 	return rc ? n : 0;
 }
 
-static bool space_valid(bool is_pmem, bool is_reserve,
-		struct nd_label_id *label_id, struct resource *res)
+
+/**
+ * space_valid() - validate free dpa space against constraints
+ * @nd_region: hosting region of the free space
+ * @ndd: dimm device data for debug
+ * @label_id: namespace id to allocate space
+ * @prev: potential allocation that precedes free space
+ * @next: allocation that follows the given free space range
+ * @exist: first allocation with same id in the mapping
+ * @n: range that must satisfied for pmem allocations
+ * @valid: free space range to validate
+ *
+ * BLK-space is valid as long as it does not precede a PMEM
+ * allocation in a given region. PMEM-space must be contiguous
+ * and adjacent to an existing existing allocation (if one
+ * exists).  If reserving PMEM any space is valid.
+ */
+static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
+		struct nd_label_id *label_id, struct resource *prev,
+		struct resource *next, struct resource *exist,
+		resource_size_t n, struct resource *valid)
 {
-	/*
-	 * For BLK-space any space is valid, for PMEM-space, it must be
-	 * contiguous with an existing allocation unless we are
-	 * reserving pmem.
-	 */
-	if (is_reserve || !is_pmem)
-		return true;
-	if (!res || strcmp(res->name, label_id->id) == 0)
-		return true;
-	return false;
+	bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
+	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
+
+	if (valid->start >= valid->end)
+		goto invalid;
+
+	if (is_reserve)
+		return;
+
+	if (!is_pmem) {
+		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+		struct nvdimm_bus *nvdimm_bus;
+		struct blk_alloc_info info = {
+			.nd_mapping = nd_mapping,
+			.available = nd_mapping->size,
+			.res = valid,
+		};
+
+		WARN_ON(!is_nd_blk(&nd_region->dev));
+		nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+		device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
+		return;
+	}
+
+	/* allocation needs to be contiguous, so this is all or nothing */
+	if (resource_size(valid) < n)
+		goto invalid;
+
+	/* we've got all the space we need and no existing allocation */
+	if (!exist)
+		return;
+
+	/* allocation needs to be contiguous with the existing namespace */
+	if (valid->start == exist->end + 1
+			|| valid->end == exist->start - 1)
+		return;
+
+ invalid:
+	/* truncate @valid size to 0 */
+	valid->end = valid->start - 1;
 }
 
 enum alloc_loc {
@@ -534,18 +602,24 @@
 		resource_size_t n)
 {
 	resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
-	bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+	struct resource *res, *exist = NULL, valid;
 	const resource_size_t to_allocate = n;
-	struct resource *res;
 	int first;
 
+	for_each_dpa_resource(ndd, res)
+		if (strcmp(label_id->id, res->name) == 0)
+			exist = res;
+
+	valid.start = nd_mapping->start;
+	valid.end = mapping_end;
+	valid.name = "free space";
  retry:
 	first = 0;
 	for_each_dpa_resource(ndd, res) {
-		resource_size_t allocate, available = 0, free_start, free_end;
 		struct resource *next = res->sibling, *new_res = NULL;
+		resource_size_t allocate, available = 0;
 		enum alloc_loc loc = ALLOC_ERR;
 		const char *action;
 		int rc = 0;
@@ -558,32 +632,35 @@
 
 		/* space at the beginning of the mapping */
 		if (!first++ && res->start > nd_mapping->start) {
-			free_start = nd_mapping->start;
-			available = res->start - free_start;
-			if (space_valid(is_pmem, is_reserve, label_id, NULL))
+			valid.start = nd_mapping->start;
+			valid.end = res->start - 1;
+			space_valid(nd_region, ndd, label_id, NULL, next, exist,
+					to_allocate, &valid);
+			available = resource_size(&valid);
+			if (available)
 				loc = ALLOC_BEFORE;
 		}
 
 		/* space between allocations */
 		if (!loc && next) {
-			free_start = res->start + resource_size(res);
-			free_end = min(mapping_end, next->start - 1);
-			if (space_valid(is_pmem, is_reserve, label_id, res)
-					&& free_start < free_end) {
-				available = free_end + 1 - free_start;
+			valid.start = res->start + resource_size(res);
+			valid.end = min(mapping_end, next->start - 1);
+			space_valid(nd_region, ndd, label_id, res, next, exist,
+					to_allocate, &valid);
+			available = resource_size(&valid);
+			if (available)
 				loc = ALLOC_MID;
-			}
 		}
 
 		/* space at the end of the mapping */
 		if (!loc && !next) {
-			free_start = res->start + resource_size(res);
-			free_end = mapping_end;
-			if (space_valid(is_pmem, is_reserve, label_id, res)
-					&& free_start < free_end) {
-				available = free_end + 1 - free_start;
+			valid.start = res->start + resource_size(res);
+			valid.end = mapping_end;
+			space_valid(nd_region, ndd, label_id, res, next, exist,
+					to_allocate, &valid);
+			available = resource_size(&valid);
+			if (available)
 				loc = ALLOC_AFTER;
-			}
 		}
 
 		if (!loc || !available)
@@ -593,8 +670,6 @@
 		case ALLOC_BEFORE:
 			if (strcmp(res->name, label_id->id) == 0) {
 				/* adjust current resource up */
-				if (is_pmem && !is_reserve)
-					return n;
 				rc = adjust_resource(res, res->start - allocate,
 						resource_size(res) + allocate);
 				action = "cur grow up";
@@ -604,8 +679,6 @@
 		case ALLOC_MID:
 			if (strcmp(next->name, label_id->id) == 0) {
 				/* adjust next resource up */
-				if (is_pmem && !is_reserve)
-					return n;
 				rc = adjust_resource(next, next->start
 						- allocate, resource_size(next)
 						+ allocate);
@@ -629,12 +702,10 @@
 		if (strcmp(action, "allocate") == 0) {
 			/* BLK allocate bottom up */
 			if (!is_pmem)
-				free_start += available - allocate;
-			else if (!is_reserve && free_start != nd_mapping->start)
-				return n;
+				valid.start += available - allocate;
 
 			new_res = nvdimm_allocate_dpa(ndd, label_id,
-					free_start, allocate);
+					valid.start, allocate);
 			if (!new_res)
 				rc = -EBUSY;
 		} else if (strcmp(action, "grow down") == 0) {
@@ -832,13 +903,45 @@
 	return 0;
 }
 
-static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
+static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
 		struct nd_namespace_pmem *nspm, resource_size_t size)
 {
 	struct resource *res = &nspm->nsio.res;
+	resource_size_t offset = 0;
 
-	res->start = nd_region->ndr_start;
-	res->end = nd_region->ndr_start + size - 1;
+	if (size && !nspm->uuid) {
+		WARN_ON_ONCE(1);
+		size = 0;
+	}
+
+	if (size && nspm->uuid) {
+		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+		struct nd_label_id label_id;
+		struct resource *res;
+
+		if (!ndd) {
+			size = 0;
+			goto out;
+		}
+
+		nd_label_gen_id(&label_id, nspm->uuid, 0);
+
+		/* calculate a spa offset from the dpa allocation offset */
+		for_each_dpa_resource(ndd, res)
+			if (strcmp(res->name, label_id.id) == 0) {
+				offset = (res->start - nd_mapping->start)
+					* nd_region->ndr_mappings;
+				goto out;
+			}
+
+		WARN_ON_ONCE(1);
+		size = 0;
+	}
+
+ out:
+	res->start = nd_region->ndr_start + offset;
+	res->end = res->start + size - 1;
 }
 
 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
@@ -929,7 +1032,7 @@
 	if (is_namespace_pmem(dev)) {
 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
-		nd_namespace_pmem_set_size(nd_region, nspm,
+		nd_namespace_pmem_set_resource(nd_region, nspm,
 				val * nd_region->ndr_mappings);
 	} else if (is_namespace_blk(dev)) {
 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
@@ -1031,22 +1134,27 @@
 }
 static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
 
-static ssize_t uuid_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static u8 *namespace_to_uuid(struct device *dev)
 {
-	u8 *uuid;
-
 	if (is_namespace_pmem(dev)) {
 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
-		uuid = nspm->uuid;
+		return nspm->uuid;
 	} else if (is_namespace_blk(dev)) {
 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 
-		uuid = nsblk->uuid;
+		return nsblk->uuid;
 	} else
-		return -ENXIO;
+		return ERR_PTR(-ENXIO);
+}
 
+static ssize_t uuid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 *uuid = namespace_to_uuid(dev);
+
+	if (IS_ERR(uuid))
+		return PTR_ERR(uuid);
 	if (uuid)
 		return sprintf(buf, "%pUb\n", uuid);
 	return sprintf(buf, "\n");
@@ -1089,7 +1197,7 @@
 		 *
 		 * FIXME: can we delete uuid with zero dpa allocated?
 		 */
-		if (nd_mapping->labels)
+		if (list_empty(&nd_mapping->labels))
 			return -EBUSY;
 	}
 
@@ -1491,14 +1599,19 @@
 
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-		struct nd_namespace_label *nd_label;
+		struct nd_label_ent *label_ent;
 		bool found_uuid = false;
-		int l;
 
-		for_each_label(l, nd_label, nd_mapping->labels) {
-			u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
-			u16 position = __le16_to_cpu(nd_label->position);
-			u16 nlabel = __le16_to_cpu(nd_label->nlabel);
+		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+			struct nd_namespace_label *nd_label = label_ent->label;
+			u16 position, nlabel;
+			u64 isetcookie;
+
+			if (!nd_label)
+				continue;
+			isetcookie = __le64_to_cpu(nd_label->isetcookie);
+			position = __le16_to_cpu(nd_label->position);
+			nlabel = __le16_to_cpu(nd_label->nlabel);
 
 			if (isetcookie != cookie)
 				continue;
@@ -1528,7 +1641,6 @@
 
 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
 {
-	struct nd_namespace_label *select = NULL;
 	int i;
 
 	if (!pmem_id)
@@ -1536,90 +1648,106 @@
 
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-		struct nd_namespace_label *nd_label;
+		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+		struct nd_namespace_label *nd_label = NULL;
 		u64 hw_start, hw_end, pmem_start, pmem_end;
-		int l;
+		struct nd_label_ent *label_ent;
 
-		for_each_label(l, nd_label, nd_mapping->labels)
+		WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+		list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+			nd_label = label_ent->label;
+			if (!nd_label)
+				continue;
 			if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
 				break;
+			nd_label = NULL;
+		}
 
 		if (!nd_label) {
 			WARN_ON(1);
 			return -EINVAL;
 		}
 
-		select = nd_label;
 		/*
 		 * Check that this label is compliant with the dpa
 		 * range published in NFIT
 		 */
 		hw_start = nd_mapping->start;
 		hw_end = hw_start + nd_mapping->size;
-		pmem_start = __le64_to_cpu(select->dpa);
-		pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
-		if (pmem_start == hw_start && pmem_end <= hw_end)
+		pmem_start = __le64_to_cpu(nd_label->dpa);
+		pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+		if (pmem_start >= hw_start && pmem_start < hw_end
+				&& pmem_end <= hw_end && pmem_end > hw_start)
 			/* pass */;
-		else
+		else {
+			dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
+					dev_name(ndd->dev), nd_label->uuid);
 			return -EINVAL;
+		}
 
-		nd_mapping->labels[0] = select;
-		nd_mapping->labels[1] = NULL;
+		/* move recently validated label to the front of the list */
+		list_move(&label_ent->list, &nd_mapping->labels);
 	}
 	return 0;
 }
 
 /**
- * find_pmem_label_set - validate interleave set labelling, retrieve label0
+ * create_namespace_pmem - validate interleave set labelling, retrieve label0
  * @nd_region: region with mappings to validate
+ * @nspm: target namespace to create
+ * @nd_label: target pmem namespace label to evaluate
  */
-static int find_pmem_label_set(struct nd_region *nd_region,
-		struct nd_namespace_pmem *nspm)
+struct device *create_namespace_pmem(struct nd_region *nd_region,
+		struct nd_namespace_label *nd_label)
 {
 	u64 cookie = nd_region_interleave_set_cookie(nd_region);
-	struct nd_namespace_label *nd_label;
-	u8 select_id[NSLABEL_UUID_LEN];
+	struct nd_label_ent *label_ent;
+	struct nd_namespace_pmem *nspm;
+	struct nd_mapping *nd_mapping;
 	resource_size_t size = 0;
-	u8 *pmem_id = NULL;
-	int rc = -ENODEV, l;
+	struct resource *res;
+	struct device *dev;
+	int rc = 0;
 	u16 i;
 
-	if (cookie == 0)
-		return -ENXIO;
+	if (cookie == 0) {
+		dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
+		return ERR_PTR(-ENXIO);
+	}
 
-	/*
-	 * Find a complete set of labels by uuid.  By definition we can start
-	 * with any mapping as the reference label
-	 */
-	for_each_label(l, nd_label, nd_region->mapping[0].labels) {
-		u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
+	if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+		dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
+				nd_label->uuid);
+		return ERR_PTR(-EAGAIN);
+	}
 
-		if (isetcookie != cookie)
-			continue;
+	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+	if (!nspm)
+		return ERR_PTR(-ENOMEM);
 
-		for (i = 0; nd_region->ndr_mappings; i++)
-			if (!has_uuid_at_pos(nd_region, nd_label->uuid,
-						cookie, i))
-				break;
-		if (i < nd_region->ndr_mappings) {
-			/*
-			 * Give up if we don't find an instance of a
-			 * uuid at each position (from 0 to
-			 * nd_region->ndr_mappings - 1), or if we find a
-			 * dimm with two instances of the same uuid.
-			 */
-			rc = -EINVAL;
-			goto err;
-		} else if (pmem_id) {
-			/*
-			 * If there is more than one valid uuid set, we
-			 * need userspace to clean this up.
-			 */
-			rc = -EBUSY;
-			goto err;
-		}
-		memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
-		pmem_id = select_id;
+	nspm->id = -1;
+	dev = &nspm->nsio.common.dev;
+	dev->type = &namespace_pmem_device_type;
+	dev->parent = &nd_region->dev;
+	res = &nspm->nsio.res;
+	res->name = dev_name(&nd_region->dev);
+	res->flags = IORESOURCE_MEM;
+
+	for (i = 0; i < nd_region->ndr_mappings; i++)
+		if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
+			break;
+	if (i < nd_region->ndr_mappings) {
+		struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+
+		/*
+		 * Give up if we don't find an instance of a uuid at each
+		 * position (from 0 to nd_region->ndr_mappings - 1), or if we
+		 * find a dimm with two instances of the same uuid.
+		 */
+		dev_err(&nd_region->dev, "%s missing label for %pUb\n",
+				dev_name(ndd->dev), nd_label->uuid);
+		rc = -EINVAL;
+		goto err;
 	}
 
 	/*
@@ -1630,14 +1758,23 @@
 	 * the dimm being enabled (i.e. nd_label_reserve_dpa()
 	 * succeeded).
 	 */
-	rc = select_pmem_id(nd_region, pmem_id);
+	rc = select_pmem_id(nd_region, nd_label->uuid);
 	if (rc)
 		goto err;
 
 	/* Calculate total size and populate namespace properties from label0 */
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
-		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-		struct nd_namespace_label *label0 = nd_mapping->labels[0];
+		struct nd_namespace_label *label0;
+
+		nd_mapping = &nd_region->mapping[i];
+		label_ent = list_first_entry_or_null(&nd_mapping->labels,
+				typeof(*label_ent), list);
+		label0 = label_ent ? label_ent->label : 0;
+
+		if (!label0) {
+			WARN_ON(1);
+			continue;
+		}
 
 		size += __le64_to_cpu(label0->rawsize);
 		if (__le16_to_cpu(label0->position) != 0)
@@ -1654,10 +1791,11 @@
 		goto err;
 	}
 
-	nd_namespace_pmem_set_size(nd_region, nspm, size);
+	nd_namespace_pmem_set_resource(nd_region, nspm, size);
 
-	return 0;
+	return dev;
  err:
+	namespace_pmem_release(dev);
 	switch (rc) {
 	case -EINVAL:
 		dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
@@ -1670,55 +1808,7 @@
 				__func__, rc);
 		break;
 	}
-	return rc;
-}
-
-static struct device **create_namespace_pmem(struct nd_region *nd_region)
-{
-	struct nd_namespace_pmem *nspm;
-	struct device *dev, **devs;
-	struct resource *res;
-	int rc;
-
-	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
-	if (!nspm)
-		return NULL;
-
-	dev = &nspm->nsio.common.dev;
-	dev->type = &namespace_pmem_device_type;
-	dev->parent = &nd_region->dev;
-	res = &nspm->nsio.res;
-	res->name = dev_name(&nd_region->dev);
-	res->flags = IORESOURCE_MEM;
-	rc = find_pmem_label_set(nd_region, nspm);
-	if (rc == -ENODEV) {
-		int i;
-
-		/* Pass, try to permit namespace creation... */
-		for (i = 0; i < nd_region->ndr_mappings; i++) {
-			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-
-			kfree(nd_mapping->labels);
-			nd_mapping->labels = NULL;
-		}
-
-		/* Publish a zero-sized namespace for userspace to configure. */
-		nd_namespace_pmem_set_size(nd_region, nspm, 0);
-
-		rc = 0;
-	} else if (rc)
-		goto err;
-
-	devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
-	if (!devs)
-		goto err;
-
-	devs[0] = dev;
-	return devs;
-
- err:
-	namespace_pmem_release(&nspm->nsio.common.dev);
-	return NULL;
+	return ERR_PTR(rc);
 }
 
 struct resource *nsblk_add_resource(struct nd_region *nd_region,
@@ -1770,16 +1860,58 @@
 	return &nsblk->common.dev;
 }
 
-void nd_region_create_blk_seed(struct nd_region *nd_region)
+static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
+{
+	struct nd_namespace_pmem *nspm;
+	struct resource *res;
+	struct device *dev;
+
+	if (!is_nd_pmem(&nd_region->dev))
+		return NULL;
+
+	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+	if (!nspm)
+		return NULL;
+
+	dev = &nspm->nsio.common.dev;
+	dev->type = &namespace_pmem_device_type;
+	dev->parent = &nd_region->dev;
+	res = &nspm->nsio.res;
+	res->name = dev_name(&nd_region->dev);
+	res->flags = IORESOURCE_MEM;
+
+	nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
+	if (nspm->id < 0) {
+		kfree(nspm);
+		return NULL;
+	}
+	dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
+	dev->parent = &nd_region->dev;
+	dev->groups = nd_namespace_attribute_groups;
+	nd_namespace_pmem_set_resource(nd_region, nspm, 0);
+
+	return dev;
+}
+
+void nd_region_create_ns_seed(struct nd_region *nd_region)
 {
 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
-	nd_region->ns_seed = nd_namespace_blk_create(nd_region);
+
+	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
+		return;
+
+	if (is_nd_blk(&nd_region->dev))
+		nd_region->ns_seed = nd_namespace_blk_create(nd_region);
+	else
+		nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
+
 	/*
 	 * Seed creation failures are not fatal, provisioning is simply
 	 * disabled until memory becomes available
 	 */
 	if (!nd_region->ns_seed)
-		dev_err(&nd_region->dev, "failed to create blk namespace\n");
+		dev_err(&nd_region->dev, "failed to create %s namespace\n",
+				is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
 	else
 		nd_device_register(nd_region->ns_seed);
 }
@@ -1820,43 +1952,137 @@
 		dev_err(&nd_region->dev, "failed to create btt namespace\n");
 }
 
-static struct device **create_namespace_blk(struct nd_region *nd_region)
+static int add_namespace_resource(struct nd_region *nd_region,
+		struct nd_namespace_label *nd_label, struct device **devs,
+		int count)
 {
 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
-	struct nd_namespace_label *nd_label;
-	struct device *dev, **devs = NULL;
+	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+	int i;
+
+	for (i = 0; i < count; i++) {
+		u8 *uuid = namespace_to_uuid(devs[i]);
+		struct resource *res;
+
+		if (IS_ERR_OR_NULL(uuid)) {
+			WARN_ON(1);
+			continue;
+		}
+
+		if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
+			continue;
+		if (is_namespace_blk(devs[i])) {
+			res = nsblk_add_resource(nd_region, ndd,
+					to_nd_namespace_blk(devs[i]),
+					__le64_to_cpu(nd_label->dpa));
+			if (!res)
+				return -ENXIO;
+			nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
+		} else {
+			dev_err(&nd_region->dev,
+					"error: conflicting extents for uuid: %pUb\n",
+					nd_label->uuid);
+			return -ENXIO;
+		}
+		break;
+	}
+
+	return i;
+}
+
+struct device *create_namespace_blk(struct nd_region *nd_region,
+		struct nd_namespace_label *nd_label, int count)
+{
+
+	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 	struct nd_namespace_blk *nsblk;
-	struct nvdimm_drvdata *ndd;
-	int i, l, count = 0;
+	char *name[NSLABEL_NAME_LEN];
+	struct device *dev = NULL;
 	struct resource *res;
 
-	if (nd_region->ndr_mappings == 0)
-		return NULL;
+	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
+	if (!nsblk)
+		return ERR_PTR(-ENOMEM);
+	dev = &nsblk->common.dev;
+	dev->type = &namespace_blk_device_type;
+	dev->parent = &nd_region->dev;
+	nsblk->id = -1;
+	nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
+	nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
+			GFP_KERNEL);
+	if (!nsblk->uuid)
+		goto blk_err;
+	memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+	if (name[0])
+		nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
+				GFP_KERNEL);
+	res = nsblk_add_resource(nd_region, ndd, nsblk,
+			__le64_to_cpu(nd_label->dpa));
+	if (!res)
+		goto blk_err;
+	nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
+	return dev;
+ blk_err:
+	namespace_blk_release(dev);
+	return ERR_PTR(-ENXIO);
+}
 
-	ndd = to_ndd(nd_mapping);
-	for_each_label(l, nd_label, nd_mapping->labels) {
-		u32 flags = __le32_to_cpu(nd_label->flags);
-		char *name[NSLABEL_NAME_LEN];
+static int cmp_dpa(const void *a, const void *b)
+{
+	const struct device *dev_a = *(const struct device **) a;
+	const struct device *dev_b = *(const struct device **) b;
+	struct nd_namespace_blk *nsblk_a, *nsblk_b;
+	struct nd_namespace_pmem *nspm_a, *nspm_b;
+
+	if (is_namespace_io(dev_a))
+		return 0;
+
+	if (is_namespace_blk(dev_a)) {
+		nsblk_a = to_nd_namespace_blk(dev_a);
+		nsblk_b = to_nd_namespace_blk(dev_b);
+
+		return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
+				sizeof(resource_size_t));
+	}
+
+	nspm_a = to_nd_namespace_pmem(dev_a);
+	nspm_b = to_nd_namespace_pmem(dev_b);
+
+	return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
+			sizeof(resource_size_t));
+}
+
+static struct device **scan_labels(struct nd_region *nd_region)
+{
+	int i, count = 0;
+	struct device *dev, **devs = NULL;
+	struct nd_label_ent *label_ent, *e;
+	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+	resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+
+	/* "safe" because create_namespace_pmem() might list_move() label_ent */
+	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+		struct nd_namespace_label *nd_label = label_ent->label;
 		struct device **__devs;
+		u32 flags;
 
-		if (flags & NSLABEL_FLAG_LOCAL)
-			/* pass */;
+		if (!nd_label)
+			continue;
+		flags = __le32_to_cpu(nd_label->flags);
+		if (is_nd_blk(&nd_region->dev)
+				== !!(flags & NSLABEL_FLAG_LOCAL))
+			/* pass, region matches label type */;
 		else
 			continue;
 
-		for (i = 0; i < count; i++) {
-			nsblk = to_nd_namespace_blk(devs[i]);
-			if (memcmp(nsblk->uuid, nd_label->uuid,
-						NSLABEL_UUID_LEN) == 0) {
-				res = nsblk_add_resource(nd_region, ndd, nsblk,
-						__le64_to_cpu(nd_label->dpa));
-				if (!res)
-					goto err;
-				nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
-					dev_name(&nsblk->common.dev));
-				break;
-			}
-		}
+		/* skip labels that describe extents outside of the region */
+		if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
+			continue;
+
+		i = add_namespace_resource(nd_region, nd_label, devs, count);
+		if (i < 0)
+			goto err;
 		if (i < count)
 			continue;
 		__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
@@ -1866,67 +2092,126 @@
 		kfree(devs);
 		devs = __devs;
 
-		nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
-		if (!nsblk)
-			goto err;
-		dev = &nsblk->common.dev;
-		dev->type = &namespace_blk_device_type;
-		dev->parent = &nd_region->dev;
-		dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
-		devs[count++] = dev;
-		nsblk->id = -1;
-		nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
-		nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
-				GFP_KERNEL);
-		if (!nsblk->uuid)
-			goto err;
-		memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-		if (name[0])
-			nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
-					GFP_KERNEL);
-		res = nsblk_add_resource(nd_region, ndd, nsblk,
-				__le64_to_cpu(nd_label->dpa));
-		if (!res)
-			goto err;
-		nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
-				dev_name(&nsblk->common.dev));
+		if (is_nd_blk(&nd_region->dev)) {
+			dev = create_namespace_blk(nd_region, nd_label, count);
+			if (IS_ERR(dev))
+				goto err;
+			devs[count++] = dev;
+		} else {
+			dev = create_namespace_pmem(nd_region, nd_label);
+			if (IS_ERR(dev)) {
+				switch (PTR_ERR(dev)) {
+				case -EAGAIN:
+					/* skip invalid labels */
+					continue;
+				case -ENODEV:
+					/* fallthrough to seed creation */
+					break;
+				default:
+					goto err;
+				}
+			} else
+				devs[count++] = dev;
+		}
 	}
 
-	dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
-			__func__, count, count == 1 ? "" : "s");
+	dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
+			__func__, count, is_nd_blk(&nd_region->dev)
+			? "blk" : "pmem", count == 1 ? "" : "s");
 
 	if (count == 0) {
 		/* Publish a zero-sized namespace for userspace to configure. */
-		for (i = 0; i < nd_region->ndr_mappings; i++) {
-			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-
-			kfree(nd_mapping->labels);
-			nd_mapping->labels = NULL;
-		}
+		nd_mapping_free_labels(nd_mapping);
 
 		devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
 		if (!devs)
 			goto err;
-		nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
-		if (!nsblk)
-			goto err;
-		dev = &nsblk->common.dev;
-		dev->type = &namespace_blk_device_type;
+		if (is_nd_blk(&nd_region->dev)) {
+			struct nd_namespace_blk *nsblk;
+
+			nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
+			if (!nsblk)
+				goto err;
+			dev = &nsblk->common.dev;
+			dev->type = &namespace_blk_device_type;
+		} else {
+			struct nd_namespace_pmem *nspm;
+
+			nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+			if (!nspm)
+				goto err;
+			dev = &nspm->nsio.common.dev;
+			dev->type = &namespace_pmem_device_type;
+			nd_namespace_pmem_set_resource(nd_region, nspm, 0);
+		}
 		dev->parent = &nd_region->dev;
 		devs[count++] = dev;
+	} else if (is_nd_pmem(&nd_region->dev)) {
+		/* clean unselected labels */
+		for (i = 0; i < nd_region->ndr_mappings; i++) {
+			struct list_head *l, *e;
+			LIST_HEAD(list);
+			int j;
+
+			nd_mapping = &nd_region->mapping[i];
+			if (list_empty(&nd_mapping->labels)) {
+				WARN_ON(1);
+				continue;
+			}
+
+			j = count;
+			list_for_each_safe(l, e, &nd_mapping->labels) {
+				if (!j--)
+					break;
+				list_move_tail(l, &list);
+			}
+			nd_mapping_free_labels(nd_mapping);
+			list_splice_init(&list, &nd_mapping->labels);
+		}
 	}
 
+	if (count > 1)
+		sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
+
 	return devs;
 
-err:
-	for (i = 0; i < count; i++) {
-		nsblk = to_nd_namespace_blk(devs[i]);
-		namespace_blk_release(&nsblk->common.dev);
-	}
+ err:
+	for (i = 0; devs[i]; i++)
+		if (is_nd_blk(&nd_region->dev))
+			namespace_blk_release(devs[i]);
+		else
+			namespace_pmem_release(devs[i]);
 	kfree(devs);
 	return NULL;
 }
 
+static struct device **create_namespaces(struct nd_region *nd_region)
+{
+	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+	struct device **devs;
+	int i;
+
+	if (nd_region->ndr_mappings == 0)
+		return NULL;
+
+	/* lock down all mappings while we scan labels */
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		nd_mapping = &nd_region->mapping[i];
+		mutex_lock_nested(&nd_mapping->lock, i);
+	}
+
+	devs = scan_labels(nd_region);
+
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		int reverse = nd_region->ndr_mappings - 1 - i;
+
+		nd_mapping = &nd_region->mapping[reverse];
+		mutex_unlock(&nd_mapping->lock);
+	}
+
+	return devs;
+}
+
 static int init_active_labels(struct nd_region *nd_region)
 {
 	int i;
@@ -1935,6 +2220,7 @@
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+		struct nd_label_ent *label_ent;
 		int count, j;
 
 		/*
@@ -1956,16 +2242,27 @@
 		dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
 		if (!count)
 			continue;
-		nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
-				GFP_KERNEL);
-		if (!nd_mapping->labels)
-			return -ENOMEM;
 		for (j = 0; j < count; j++) {
 			struct nd_namespace_label *label;
 
+			label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
+			if (!label_ent)
+				break;
 			label = nd_label_active(ndd, j);
-			nd_mapping->labels[j] = label;
+			label_ent->label = label;
+
+			mutex_lock(&nd_mapping->lock);
+			list_add_tail(&label_ent->list, &nd_mapping->labels);
+			mutex_unlock(&nd_mapping->lock);
 		}
+
+		if (j >= count)
+			continue;
+
+		mutex_lock(&nd_mapping->lock);
+		nd_mapping_free_labels(nd_mapping);
+		mutex_unlock(&nd_mapping->lock);
+		return -ENOMEM;
 	}
 
 	return 0;
@@ -1990,10 +2287,8 @@
 		devs = create_namespace_io(nd_region);
 		break;
 	case ND_DEVICE_NAMESPACE_PMEM:
-		devs = create_namespace_pmem(nd_region);
-		break;
 	case ND_DEVICE_NAMESPACE_BLK:
-		devs = create_namespace_blk(nd_region);
+		devs = create_namespaces(nd_region);
 		break;
 	default:
 		break;
@@ -2014,6 +2309,13 @@
 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
 					GFP_KERNEL);
 			nsblk->id = id;
+		} else if (type == ND_DEVICE_NAMESPACE_PMEM) {
+			struct nd_namespace_pmem *nspm;
+
+			nspm = to_nd_namespace_pmem(dev);
+			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
+					GFP_KERNEL);
+			nspm->id = id;
 		} else
 			id = i;
 
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 38ce6bb..8623e57 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -44,6 +44,23 @@
 	struct resource *flush_wpq;
 };
 
+/**
+ * struct blk_alloc_info - tracking info for BLK dpa scanning
+ * @nd_mapping: blk region mapping boundaries
+ * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
+ * @busy: decremented in blk_dpa_busy to account for ranges already
+ * 	  handled by alias_dpa_busy
+ * @res: alias_dpa_busy interprets this a free space range that needs to
+ * 	 be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
+ * 	 treats it as a busy range that needs the aliased PMEM ranges
+ * 	 truncated.
+ */
+struct blk_alloc_info {
+	struct nd_mapping *nd_mapping;
+	resource_size_t available, busy;
+	struct resource *res;
+};
+
 bool is_nvdimm(struct device *dev);
 bool is_nd_pmem(struct device *dev);
 bool is_nd_blk(struct device *dev);
@@ -54,7 +71,7 @@
 void nd_region_devs_exit(void);
 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
 struct nd_region;
-void nd_region_create_blk_seed(struct nd_region *nd_region);
+void nd_region_create_ns_seed(struct nd_region *nd_region);
 void nd_region_create_btt_seed(struct nd_region *nd_region);
 void nd_region_create_pfn_seed(struct nd_region *nd_region);
 void nd_region_create_dax_seed(struct nd_region *nd_region);
@@ -73,13 +90,14 @@
 struct nd_region;
 struct nvdimm_drvdata;
 struct nd_mapping;
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 		struct nd_mapping *nd_mapping, resource_size_t *overlap);
-resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping);
+resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
 resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
 		struct nd_label_id *label_id);
-struct nd_mapping;
+int alias_dpa_busy(struct device *dev, void *data);
 struct resource *nsblk_add_resource(struct nd_region *nd_region,
 		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
 		resource_size_t start);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 0b78a82..d3b2fca 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -101,9 +101,6 @@
 		(unsigned long long) (res ? resource_size(res) : 0), \
 		(unsigned long long) (res ? res->start : 0), ##arg)
 
-#define for_each_label(l, label, labels) \
-	for (l = 0; (label = labels ? labels[l] : NULL); l++)
-
 #define for_each_dpa_resource(ndd, res) \
 	for (res = (ndd)->dpa.child; res; res = res->sibling)
 
@@ -116,6 +113,31 @@
 	spinlock_t lock;
 };
 
+struct nd_label_ent {
+	struct list_head list;
+	struct nd_namespace_label *label;
+};
+
+enum nd_mapping_lock_class {
+	ND_MAPPING_CLASS0,
+	ND_MAPPING_UUID_SCAN,
+};
+
+struct nd_mapping {
+	struct nvdimm *nvdimm;
+	u64 start;
+	u64 size;
+	struct list_head labels;
+	struct mutex lock;
+	/*
+	 * @ndd is for private use at region enable / disable time for
+	 * get_ndd() + put_ndd(), all other nd_mapping to ndd
+	 * conversions use to_ndd() which respects enabled state of the
+	 * nvdimm.
+	 */
+	struct nvdimm_drvdata *ndd;
+};
+
 struct nd_region {
 	struct device dev;
 	struct ida ns_ida;
@@ -209,6 +231,7 @@
 void nd_region_exit(void);
 struct nvdimm;
 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
+int nvdimm_check_config_data(struct device *dev);
 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 571a6c7..42b3a82 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -66,13 +66,32 @@
 	invalidate_pmem(pmem->virt_addr + offset, len);
 }
 
+static void write_pmem(void *pmem_addr, struct page *page,
+		unsigned int off, unsigned int len)
+{
+	void *mem = kmap_atomic(page);
+
+	memcpy_to_pmem(pmem_addr, mem + off, len);
+	kunmap_atomic(mem);
+}
+
+static int read_pmem(struct page *page, unsigned int off,
+		void *pmem_addr, unsigned int len)
+{
+	int rc;
+	void *mem = kmap_atomic(page);
+
+	rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+	kunmap_atomic(mem);
+	return rc;
+}
+
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 			unsigned int len, unsigned int off, bool is_write,
 			sector_t sector)
 {
 	int rc = 0;
 	bool bad_pmem = false;
-	void *mem = kmap_atomic(page);
 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
 	void *pmem_addr = pmem->virt_addr + pmem_off;
 
@@ -83,7 +102,7 @@
 		if (unlikely(bad_pmem))
 			rc = -EIO;
 		else {
-			rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+			rc = read_pmem(page, off, pmem_addr, len);
 			flush_dcache_page(page);
 		}
 	} else {
@@ -102,14 +121,13 @@
 		 * after clear poison.
 		 */
 		flush_dcache_page(page);
-		memcpy_to_pmem(pmem_addr, mem + off, len);
+		write_pmem(pmem_addr, page, off, len);
 		if (unlikely(bad_pmem)) {
 			pmem_clear_poison(pmem, pmem_off, len);
-			memcpy_to_pmem(pmem_addr, mem + off, len);
+			write_pmem(pmem_addr, page, off, len);
 		}
 	}
 
-	kunmap_atomic(mem);
 	return rc;
 }
 
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 4c0ac4a..6af5e62 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -70,7 +70,7 @@
 
 int nd_region_activate(struct nd_region *nd_region)
 {
-	int i, num_flush = 0;
+	int i, j, num_flush = 0;
 	struct nd_region_data *ndrd;
 	struct device *dev = &nd_region->dev;
 	size_t flush_data_size = sizeof(void *);
@@ -107,6 +107,21 @@
 			return rc;
 	}
 
+	/*
+	 * Clear out entries that are duplicates. This should prevent the
+	 * extra flushings.
+	 */
+	for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
+		/* ignore if NULL already */
+		if (!ndrd_get_flush_wpq(ndrd, i, 0))
+			continue;
+
+		for (j = i + 1; j < nd_region->ndr_mappings; j++)
+			if (ndrd_get_flush_wpq(ndrd, i, 0) ==
+			    ndrd_get_flush_wpq(ndrd, j, 0))
+				ndrd_set_flush_wpq(ndrd, j, 0, NULL);
+	}
+
 	return 0;
 }
 
@@ -298,9 +313,8 @@
 				blk_max_overlap = overlap;
 				goto retry;
 			}
-		} else if (is_nd_blk(&nd_region->dev)) {
-			available += nd_blk_available_dpa(nd_mapping);
-		}
+		} else if (is_nd_blk(&nd_region->dev))
+			available += nd_blk_available_dpa(nd_region);
 	}
 
 	return available;
@@ -491,6 +505,17 @@
 	return 0;
 }
 
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+{
+	struct nd_label_ent *label_ent, *e;
+
+	WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+		list_del(&label_ent->list);
+		kfree(label_ent);
+	}
+}
+
 /*
  * Upon successful probe/remove, take/release a reference on the
  * associated interleave set (if present), and plant new btt + namespace
@@ -511,8 +536,10 @@
 			struct nvdimm_drvdata *ndd = nd_mapping->ndd;
 			struct nvdimm *nvdimm = nd_mapping->nvdimm;
 
-			kfree(nd_mapping->labels);
-			nd_mapping->labels = NULL;
+			mutex_lock(&nd_mapping->lock);
+			nd_mapping_free_labels(nd_mapping);
+			mutex_unlock(&nd_mapping->lock);
+
 			put_ndd(ndd);
 			nd_mapping->ndd = NULL;
 			if (ndd)
@@ -522,11 +549,12 @@
 		if (is_nd_pmem(dev))
 			return;
 	}
-	if (dev->parent && is_nd_blk(dev->parent) && probe) {
+	if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
+			&& probe) {
 		nd_region = to_nd_region(dev->parent);
 		nvdimm_bus_lock(dev);
 		if (nd_region->ns_seed == dev)
-			nd_region_create_blk_seed(nd_region);
+			nd_region_create_ns_seed(nd_region);
 		nvdimm_bus_unlock(dev);
 	}
 	if (is_nd_btt(dev) && probe) {
@@ -536,23 +564,30 @@
 		nvdimm_bus_lock(dev);
 		if (nd_region->btt_seed == dev)
 			nd_region_create_btt_seed(nd_region);
-		if (nd_region->ns_seed == &nd_btt->ndns->dev &&
-				is_nd_blk(dev->parent))
-			nd_region_create_blk_seed(nd_region);
+		if (nd_region->ns_seed == &nd_btt->ndns->dev)
+			nd_region_create_ns_seed(nd_region);
 		nvdimm_bus_unlock(dev);
 	}
 	if (is_nd_pfn(dev) && probe) {
+		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
 		nd_region = to_nd_region(dev->parent);
 		nvdimm_bus_lock(dev);
 		if (nd_region->pfn_seed == dev)
 			nd_region_create_pfn_seed(nd_region);
+		if (nd_region->ns_seed == &nd_pfn->ndns->dev)
+			nd_region_create_ns_seed(nd_region);
 		nvdimm_bus_unlock(dev);
 	}
 	if (is_nd_dax(dev) && probe) {
+		struct nd_dax *nd_dax = to_nd_dax(dev);
+
 		nd_region = to_nd_region(dev->parent);
 		nvdimm_bus_lock(dev);
 		if (nd_region->dax_seed == dev)
 			nd_region_create_dax_seed(nd_region);
+		if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
+			nd_region_create_ns_seed(nd_region);
 		nvdimm_bus_unlock(dev);
 	}
 }
@@ -759,10 +794,10 @@
 	int ro = 0;
 
 	for (i = 0; i < ndr_desc->num_mappings; i++) {
-		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
-		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+		struct nvdimm *nvdimm = mapping->nvdimm;
 
-		if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
+		if ((mapping->start | mapping->size) % SZ_4K) {
 			dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
 					caller, dev_name(&nvdimm->dev), i);
 
@@ -813,11 +848,15 @@
 		ndl->count = 0;
 	}
 
-	memcpy(nd_region->mapping, ndr_desc->nd_mapping,
-			sizeof(struct nd_mapping) * ndr_desc->num_mappings);
 	for (i = 0; i < ndr_desc->num_mappings; i++) {
-		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
-		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+		struct nvdimm *nvdimm = mapping->nvdimm;
+
+		nd_region->mapping[i].nvdimm = nvdimm;
+		nd_region->mapping[i].start = mapping->start;
+		nd_region->mapping[i].size = mapping->size;
+		INIT_LIST_HEAD(&nd_region->mapping[i].labels);
+		mutex_init(&nd_region->mapping[i].lock);
 
 		get_device(&nvdimm->dev);
 	}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 68ef187..0fc99f0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -515,7 +515,8 @@
 		goto out;
 
 	ret = BLK_MQ_RQ_QUEUE_BUSY;
-	if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
+	if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+				DMA_ATTR_NO_WARN))
 		goto out;
 
 	if (!nvme_setup_prps(dev, req, size))
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index bc07ad3..ba7b034b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -90,10 +90,6 @@
 	help
 	  OpenFirmware PCI IRQ routing helpers
 
-config OF_MTD
-	depends on MTD
-	def_bool y
-
 config OF_RESERVED_MEM
 	depends on OF_EARLY_FLATTREE
 	bool
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a2e68f7..393fea8 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/of_pci.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
@@ -592,87 +593,16 @@
 			    u32 rid_in)
 {
 	struct device *parent_dev;
-	struct device_node *msi_controller_node;
-	struct device_node *msi_np = *np;
-	u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
-	int msi_map_len;
-	bool matched;
 	u32 rid_out = rid_in;
-	const __be32 *msi_map = NULL;
 
 	/*
 	 * Walk up the device parent links looking for one with a
 	 * "msi-map" property.
 	 */
-	for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
-		if (!parent_dev->of_node)
-			continue;
-
-		msi_map = of_get_property(parent_dev->of_node,
-					  "msi-map", &msi_map_len);
-		if (!msi_map)
-			continue;
-
-		if (msi_map_len % (4 * sizeof(__be32))) {
-			dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
-				msi_map_len);
-			return rid_out;
-		}
-		/* We have a good parent_dev and msi_map, let's use them. */
-		break;
-	}
-	if (!msi_map)
-		return rid_out;
-
-	/* The default is to select all bits. */
-	map_mask = 0xffffffff;
-
-	/*
-	 * Can be overridden by "msi-map-mask" property.  If
-	 * of_property_read_u32() fails, the default is used.
-	 */
-	of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
-
-	masked_rid = map_mask & rid_in;
-	matched = false;
-	while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
-		rid_base = be32_to_cpup(msi_map + 0);
-		phandle = be32_to_cpup(msi_map + 1);
-		msi_base = be32_to_cpup(msi_map + 2);
-		rid_len = be32_to_cpup(msi_map + 3);
-
-		if (rid_base & ~map_mask) {
-			dev_err(parent_dev,
-				"Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
-				map_mask, rid_base);
-			return rid_out;
-		}
-
-		msi_controller_node = of_find_node_by_phandle(phandle);
-
-		matched = (masked_rid >= rid_base &&
-			   masked_rid < rid_base + rid_len);
-		if (msi_np)
-			matched &= msi_np == msi_controller_node;
-
-		if (matched && !msi_np) {
-			*np = msi_np = msi_controller_node;
+	for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
+		if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
+				    "msi-map-mask", np, &rid_out))
 			break;
-		}
-
-		of_node_put(msi_controller_node);
-		msi_map_len -= 4 * sizeof(__be32);
-		msi_map += 4;
-	}
-	if (!matched)
-		return rid_out;
-
-	rid_out = masked_rid - rid_base + msi_base;
-	dev_dbg(dev,
-		"msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
-		dev_name(parent_dev), map_mask, rid_base, msi_base,
-		rid_len, rid_in, rid_out);
-
 	return rid_out;
 }
 
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 589b30c..b58be12 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -308,3 +308,105 @@
 EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
 
 #endif /* CONFIG_PCI_MSI */
+
+/**
+ * of_pci_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @rid: PCI requester ID to map.
+ * @map_name: property name of the map to use.
+ * @map_mask_name: optional property name of the mask to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a PCI requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
+ * @id_out may be NULL if only the other is required. If @target points to
+ * a non-NULL device node pointer, only entries targeting that node will be
+ * matched; if it points to a NULL value, it will receive the device node of
+ * the first matching target phandle, with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_pci_map_rid(struct device_node *np, u32 rid,
+		   const char *map_name, const char *map_mask_name,
+		   struct device_node **target, u32 *id_out)
+{
+	u32 map_mask, masked_rid;
+	int map_len;
+	const __be32 *map = NULL;
+
+	if (!np || !map_name || (!target && !id_out))
+		return -EINVAL;
+
+	map = of_get_property(np, map_name, &map_len);
+	if (!map) {
+		if (target)
+			return -ENODEV;
+		/* Otherwise, no map implies no translation */
+		*id_out = rid;
+		return 0;
+	}
+
+	if (!map_len || map_len % (4 * sizeof(*map))) {
+		pr_err("%s: Error: Bad %s length: %d\n", np->full_name,
+			map_name, map_len);
+		return -EINVAL;
+	}
+
+	/* The default is to select all bits. */
+	map_mask = 0xffffffff;
+
+	/*
+	 * Can be overridden by "{iommu,msi}-map-mask" property.
+	 * If of_property_read_u32() fails, the default is used.
+	 */
+	if (map_mask_name)
+		of_property_read_u32(np, map_mask_name, &map_mask);
+
+	masked_rid = map_mask & rid;
+	for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+		struct device_node *phandle_node;
+		u32 rid_base = be32_to_cpup(map + 0);
+		u32 phandle = be32_to_cpup(map + 1);
+		u32 out_base = be32_to_cpup(map + 2);
+		u32 rid_len = be32_to_cpup(map + 3);
+
+		if (rid_base & ~map_mask) {
+			pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+				np->full_name, map_name, map_name,
+				map_mask, rid_base);
+			return -EFAULT;
+		}
+
+		if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+			continue;
+
+		phandle_node = of_find_node_by_phandle(phandle);
+		if (!phandle_node)
+			return -ENODEV;
+
+		if (target) {
+			if (*target)
+				of_node_put(phandle_node);
+			else
+				*target = phandle_node;
+
+			if (*target != phandle_node)
+				continue;
+		}
+
+		if (id_out)
+			*id_out = masked_rid - rid_base + out_base;
+
+		pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+			np->full_name, map_name, map_mask, rid_base, out_base,
+			rid_len, rid, *id_out);
+		return 0;
+	}
+
+	pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n",
+		np->full_name, map_name, rid,
+		target && *target ? (*target)->full_name : "any target");
+	return -EFAULT;
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f811d27..e4bf07d 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -29,6 +29,7 @@
 const struct of_device_id of_default_bus_match_table[] = {
 	{ .compatible = "simple-bus", },
 	{ .compatible = "simple-mfd", },
+	{ .compatible = "isa", },
 #ifdef CONFIG_ARM_AMBA
 	{ .compatible = "arm,amba-bus", },
 #endif /* CONFIG_ARM_AMBA */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index a0e5260..134398e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -30,7 +30,7 @@
 	if (inode) {
 		inode->i_ino = get_next_ino();
 		inode->i_mode = mode;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	}
 	return inode;
 }
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index e4a5b7e..4fce494 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -230,20 +230,20 @@
 
 static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
 {
+	struct device *dev = &pcie->pdev->dev;
 	int retries;
 
 	/* check if the link is up or not */
 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 		if (advk_pcie_link_up(pcie)) {
-			dev_info(&pcie->pdev->dev, "link up\n");
+			dev_info(dev, "link up\n");
 			return 0;
 		}
 
 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 	}
 
-	dev_err(&pcie->pdev->dev, "link never came up\n");
-
+	dev_err(dev, "link never came up\n");
 	return -ETIMEDOUT;
 }
 
@@ -376,6 +376,7 @@
 
 static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
 {
+	struct device *dev = &pcie->pdev->dev;
 	u32 reg;
 	unsigned int status;
 	char *strcomp_status, *str_posted;
@@ -407,12 +408,13 @@
 	else
 		str_posted = "Posted";
 
-	dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+	dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
 		str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
 }
 
 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
 {
+	struct device *dev = &pcie->pdev->dev;
 	unsigned long timeout;
 
 	timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
@@ -426,7 +428,7 @@
 			return 0;
 	}
 
-	dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+	dev_err(dev, "config read/write timed out\n");
 	return -ETIMEDOUT;
 }
 
@@ -560,10 +562,11 @@
 
 static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
 {
+	struct device *dev = &pcie->pdev->dev;
+
 	mutex_lock(&pcie->msi_used_lock);
 	if (!test_bit(hwirq, pcie->msi_irq_in_use))
-		dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
-			hwirq);
+		dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
 	else
 		clear_bit(hwirq, pcie->msi_irq_in_use);
 	mutex_unlock(&pcie->msi_used_lock);
@@ -910,6 +913,7 @@
 
 static int advk_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct advk_pcie *pcie;
 	struct resource *res;
 	struct pci_bus *bus, *child;
@@ -917,31 +921,29 @@
 	struct device_node *msi_node;
 	int ret, irq;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
-			    GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
 	pcie->pdev = pdev;
-	platform_set_drvdata(pdev, pcie);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pcie->base = devm_ioremap_resource(&pdev->dev, res);
+	pcie->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pcie->base))
 		return PTR_ERR(pcie->base);
 
 	irq = platform_get_irq(pdev, 0);
-	ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+	ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
 			       IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
 			       pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed to register interrupt\n");
+		dev_err(dev, "Failed to register interrupt\n");
 		return ret;
 	}
 
 	ret = advk_pcie_parse_request_of_pci_ranges(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed to parse resources\n");
+		dev_err(dev, "Failed to parse resources\n");
 		return ret;
 	}
 
@@ -949,24 +951,24 @@
 
 	ret = advk_pcie_init_irq_domain(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed to initialize irq\n");
+		dev_err(dev, "Failed to initialize irq\n");
 		return ret;
 	}
 
 	ret = advk_pcie_init_msi_irq_domain(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed to initialize irq\n");
+		dev_err(dev, "Failed to initialize irq\n");
 		advk_pcie_remove_irq_domain(pcie);
 		return ret;
 	}
 
-	msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+	msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
 	if (msi_node)
 		msi = of_pci_find_msi_chip_by_node(msi_node);
 	else
 		msi = NULL;
 
-	bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+	bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
 				    pcie, &pcie->resources, &pcie->msi);
 	if (!bus) {
 		advk_pcie_remove_msi_irq_domain(pcie);
@@ -980,7 +982,6 @@
 		pcie_bus_configure_settings(child);
 
 	pci_bus_add_devices(bus);
-
 	return 0;
 }
 
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 19223ed..9595fad 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -64,11 +64,10 @@
 #define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF
 
 struct dra7xx_pcie {
-	void __iomem		*base;
-	struct phy		**phy;
-	int			phy_count;
-	struct device		*dev;
 	struct pcie_port	pp;
+	void __iomem		*base;		/* DT ti_conf */
+	int			phy_count;	/* DT phy-names count */
+	struct phy		**phy;
 };
 
 #define to_dra7xx_pcie(x)	container_of((x), struct dra7xx_pcie, pp)
@@ -84,17 +83,6 @@
 	writel(value, pcie->base + offset);
 }
 
-static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
-{
-	return readl(pp->dbi_base + offset);
-}
-
-static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
-					 u32 value)
-{
-	writel(value, pp->dbi_base + offset);
-}
-
 static int dra7xx_pcie_link_up(struct pcie_port *pp)
 {
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -103,13 +91,14 @@
 	return !!(reg & LINK_UP);
 }
 
-static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
 {
-	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+	struct pcie_port *pp = &dra7xx->pp;
+	struct device *dev = pp->dev;
 	u32 reg;
 
 	if (dw_pcie_link_up(pp)) {
-		dev_err(pp->dev, "link is already up\n");
+		dev_err(dev, "link is already up\n");
 		return 0;
 	}
 
@@ -120,10 +109,8 @@
 	return dw_pcie_wait_for_link(pp);
 }
 
-static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
 {
-	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
-
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
 			   ~INTERRUPTS);
 	dra7xx_pcie_writel(dra7xx,
@@ -142,6 +129,8 @@
 
 static void dra7xx_pcie_host_init(struct pcie_port *pp)
 {
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
 	pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
 	pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
 	pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
@@ -149,10 +138,10 @@
 
 	dw_pcie_setup_rc(pp);
 
-	dra7xx_pcie_establish_link(pp);
+	dra7xx_pcie_establish_link(dra7xx);
 	if (IS_ENABLED(CONFIG_PCI_MSI))
 		dw_pcie_msi_init(pp);
-	dra7xx_pcie_enable_interrupts(pp);
+	dra7xx_pcie_enable_interrupts(dra7xx);
 }
 
 static struct pcie_host_ops dra7xx_pcie_host_ops = {
@@ -196,8 +185,8 @@
 
 static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
-	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+	struct dra7xx_pcie *dra7xx = arg;
+	struct pcie_port *pp = &dra7xx->pp;
 	u32 reg;
 
 	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@@ -223,51 +212,51 @@
 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
 {
 	struct dra7xx_pcie *dra7xx = arg;
+	struct device *dev = dra7xx->pp.dev;
 	u32 reg;
 
 	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
 
 	if (reg & ERR_SYS)
-		dev_dbg(dra7xx->dev, "System Error\n");
+		dev_dbg(dev, "System Error\n");
 
 	if (reg & ERR_FATAL)
-		dev_dbg(dra7xx->dev, "Fatal Error\n");
+		dev_dbg(dev, "Fatal Error\n");
 
 	if (reg & ERR_NONFATAL)
-		dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+		dev_dbg(dev, "Non Fatal Error\n");
 
 	if (reg & ERR_COR)
-		dev_dbg(dra7xx->dev, "Correctable Error\n");
+		dev_dbg(dev, "Correctable Error\n");
 
 	if (reg & ERR_AXI)
-		dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+		dev_dbg(dev, "AXI tag lookup fatal Error\n");
 
 	if (reg & ERR_ECRC)
-		dev_dbg(dra7xx->dev, "ECRC Error\n");
+		dev_dbg(dev, "ECRC Error\n");
 
 	if (reg & PME_TURN_OFF)
-		dev_dbg(dra7xx->dev,
+		dev_dbg(dev,
 			"Power Management Event Turn-Off message received\n");
 
 	if (reg & PME_TO_ACK)
-		dev_dbg(dra7xx->dev,
+		dev_dbg(dev,
 			"Power Management Turn-Off Ack message received\n");
 
 	if (reg & PM_PME)
-		dev_dbg(dra7xx->dev,
-			"PM Power Management Event message received\n");
+		dev_dbg(dev, "PM Power Management Event message received\n");
 
 	if (reg & LINK_REQ_RST)
-		dev_dbg(dra7xx->dev, "Link Request Reset\n");
+		dev_dbg(dev, "Link Request Reset\n");
 
 	if (reg & LINK_UP_EVT)
-		dev_dbg(dra7xx->dev, "Link-up state change\n");
+		dev_dbg(dev, "Link-up state change\n");
 
 	if (reg & CFG_BME_EVT)
-		dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
 
 	if (reg & CFG_MSE_EVT)
-		dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+		dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
 
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
 
@@ -278,13 +267,9 @@
 				       struct platform_device *pdev)
 {
 	int ret;
-	struct pcie_port *pp;
+	struct pcie_port *pp = &dra7xx->pp;
+	struct device *dev = pp->dev;
 	struct resource *res;
-	struct device *dev = &pdev->dev;
-
-	pp = &dra7xx->pp;
-	pp->dev = dev;
-	pp->ops = &dra7xx_pcie_host_ops;
 
 	pp->irq = platform_get_irq(pdev, 1);
 	if (pp->irq < 0) {
@@ -292,12 +277,11 @@
 		return -EINVAL;
 	}
 
-	ret = devm_request_irq(&pdev->dev, pp->irq,
-			       dra7xx_pcie_msi_irq_handler,
+	ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
 			       IRQF_SHARED | IRQF_NO_THREAD,
-			       "dra7-pcie-msi",	pp);
+			       "dra7-pcie-msi",	dra7xx);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to request irq\n");
+		dev_err(dev, "failed to request irq\n");
 		return ret;
 	}
 
@@ -314,7 +298,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(dra7xx->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -332,6 +316,7 @@
 	void __iomem *base;
 	struct resource *res;
 	struct dra7xx_pcie *dra7xx;
+	struct pcie_port *pp;
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	char name[10];
@@ -343,6 +328,10 @@
 	if (!dra7xx)
 		return -ENOMEM;
 
+	pp = &dra7xx->pp;
+	pp->dev = dev;
+	pp->ops = &dra7xx_pcie_host_ops;
+
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		dev_err(dev, "missing IRQ resource\n");
@@ -390,7 +379,6 @@
 
 	dra7xx->base = base;
 	dra7xx->phy = phy;
-	dra7xx->dev = dev;
 	dra7xx->phy_count = phy_count;
 
 	pm_runtime_enable(dev);
@@ -407,7 +395,7 @@
 		ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
 					    "pcie_reset");
 		if (ret) {
-			dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+			dev_err(dev, "gpio%d request failed, ret %d\n",
 				gpio_sel, ret);
 			goto err_gpio;
 		}
@@ -420,12 +408,11 @@
 	reg &= ~LTSSM_EN;
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 
-	platform_set_drvdata(pdev, dra7xx);
-
 	ret = dra7xx_add_pcie_port(dra7xx, pdev);
 	if (ret < 0)
 		goto err_gpio;
 
+	platform_set_drvdata(pdev, dra7xx);
 	return 0;
 
 err_gpio:
@@ -451,9 +438,9 @@
 	u32 val;
 
 	/* clear MSE */
-	val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
 	val &= ~PCI_COMMAND_MEMORY;
-	dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+	dw_pcie_writel_rc(pp, PCI_COMMAND, val);
 
 	return 0;
 }
@@ -465,9 +452,9 @@
 	u32 val;
 
 	/* set MSE */
-	val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
 	val |= PCI_COMMAND_MEMORY;
-	dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+	dw_pcie_writel_rc(pp, PCI_COMMAND, val);
 
 	return 0;
 }
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 2e2d7f0..f1c544b 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -29,13 +29,13 @@
 #define to_exynos_pcie(x)	container_of(x, struct exynos_pcie, pp)
 
 struct exynos_pcie {
-	void __iomem		*elbi_base;
-	void __iomem		*phy_base;
-	void __iomem		*block_base;
+	struct pcie_port	pp;
+	void __iomem		*elbi_base;	/* DT 0th resource */
+	void __iomem		*phy_base;	/* DT 1st resource */
+	void __iomem		*block_base;	/* DT 2nd resource */
 	int			reset_gpio;
 	struct clk		*clk;
 	struct clk		*bus_clk;
-	struct pcie_port	pp;
 };
 
 /* PCIe ELBI registers */
@@ -102,40 +102,40 @@
 #define PCIE_PHY_TRSV3_PD_TSV		(0x1 << 7)
 #define PCIE_PHY_TRSV3_LVCC		0x31c
 
-static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
 {
-	writel(val, pcie->elbi_base + reg);
+	writel(val, exynos_pcie->elbi_base + reg);
 }
 
-static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
 {
-	return readl(pcie->elbi_base + reg);
+	return readl(exynos_pcie->elbi_base + reg);
 }
 
-static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
 {
-	writel(val, pcie->phy_base + reg);
+	writel(val, exynos_pcie->phy_base + reg);
 }
 
-static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
 {
-	return readl(pcie->phy_base + reg);
+	return readl(exynos_pcie->phy_base + reg);
 }
 
-static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
 {
-	writel(val, pcie->block_base + reg);
+	writel(val, exynos_pcie->block_base + reg);
 }
 
-static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
 {
-	return readl(pcie->block_base + reg);
+	return readl(exynos_pcie->block_base + reg);
 }
 
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
+					    bool on)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	if (on) {
 		val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
@@ -148,10 +148,10 @@
 	}
 }
 
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
+					    bool on)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	if (on) {
 		val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
@@ -164,10 +164,9 @@
 	}
 }
 
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
 	val &= ~PCIE_CORE_RESET_ENABLE;
@@ -177,10 +176,9 @@
 	exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
 }
 
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
 	val |= PCIE_CORE_RESET_ENABLE;
@@ -193,18 +191,14 @@
 	exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
 }
 
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
 {
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
 	exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
 }
 
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
 {
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
 	exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
@@ -213,10 +207,9 @@
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
 }
 
-static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
 	val &= ~PCIE_PHY_COMMON_PD_CMN;
@@ -239,10 +232,9 @@
 	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
 }
 
-static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
 	val |= PCIE_PHY_COMMON_PD_CMN;
@@ -265,10 +257,8 @@
 	exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
 }
 
-static void exynos_pcie_init_phy(struct pcie_port *pp)
+static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
 {
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
 	/* DCC feedback control off */
 	exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
 
@@ -305,51 +295,41 @@
 	exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
 }
 
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
 {
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+	struct pcie_port *pp = &exynos_pcie->pp;
+	struct device *dev = pp->dev;
 
 	if (exynos_pcie->reset_gpio >= 0)
-		devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+		devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
 				GPIOF_OUT_INIT_HIGH, "RESET");
 }
 
-static int exynos_pcie_establish_link(struct pcie_port *pp)
+static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
 {
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+	struct pcie_port *pp = &exynos_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 val;
 
 	if (dw_pcie_link_up(pp)) {
-		dev_err(pp->dev, "Link already up\n");
+		dev_err(dev, "Link already up\n");
 		return 0;
 	}
 
-	/* assert reset signals */
-	exynos_pcie_assert_core_reset(pp);
-	exynos_pcie_assert_phy_reset(pp);
-
-	/* de-assert phy reset */
-	exynos_pcie_deassert_phy_reset(pp);
-
-	/* power on phy */
-	exynos_pcie_power_on_phy(pp);
-
-	/* initialize phy */
-	exynos_pcie_init_phy(pp);
+	exynos_pcie_assert_core_reset(exynos_pcie);
+	exynos_pcie_assert_phy_reset(exynos_pcie);
+	exynos_pcie_deassert_phy_reset(exynos_pcie);
+	exynos_pcie_power_on_phy(exynos_pcie);
+	exynos_pcie_init_phy(exynos_pcie);
 
 	/* pulse for common reset */
 	exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
 	udelay(500);
 	exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
 
-	/* de-assert core reset */
-	exynos_pcie_deassert_core_reset(pp);
-
-	/* setup root complex */
+	exynos_pcie_deassert_core_reset(exynos_pcie);
 	dw_pcie_setup_rc(pp);
-
-	/* assert reset signal */
-	exynos_pcie_assert_reset(pp);
+	exynos_pcie_assert_reset(exynos_pcie);
 
 	/* assert LTSSM enable */
 	exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
@@ -361,27 +341,23 @@
 
 	while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
 		val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
-		dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+		dev_info(dev, "PLL Locked: 0x%x\n", val);
 	}
-	/* power off phy */
-	exynos_pcie_power_off_phy(pp);
-
+	exynos_pcie_power_off_phy(exynos_pcie);
 	return -ETIMEDOUT;
 }
 
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
 	exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
 }
 
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
 {
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	/* enable INTX interrupt */
 	val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
@@ -391,23 +367,24 @@
 
 static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
+	struct exynos_pcie *exynos_pcie = arg;
 
-	exynos_pcie_clear_irq_pulse(pp);
+	exynos_pcie_clear_irq_pulse(exynos_pcie);
 	return IRQ_HANDLED;
 }
 
 static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
+	struct exynos_pcie *exynos_pcie = arg;
+	struct pcie_port *pp = &exynos_pcie->pp;
 
 	return dw_handle_msi_irq(pp);
 }
 
-static void exynos_pcie_msi_init(struct pcie_port *pp)
+static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
 {
+	struct pcie_port *pp = &exynos_pcie->pp;
 	u32 val;
-	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 
 	dw_pcie_msi_init(pp);
 
@@ -417,60 +394,64 @@
 	exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
 }
 
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
 {
-	exynos_pcie_enable_irq_pulse(pp);
+	exynos_pcie_enable_irq_pulse(exynos_pcie);
 
 	if (IS_ENABLED(CONFIG_PCI_MSI))
-		exynos_pcie_msi_init(pp);
+		exynos_pcie_msi_init(exynos_pcie);
 }
 
-static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
-				       void __iomem *dbi_base)
+static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
 {
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 	u32 val;
 
-	exynos_pcie_sideband_dbi_r_mode(pp, true);
-	val = readl(dbi_base);
-	exynos_pcie_sideband_dbi_r_mode(pp, false);
+	exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
+	val = readl(pp->dbi_base + reg);
+	exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
 	return val;
 }
 
-static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
-					u32 val, void __iomem *dbi_base)
+static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
 {
-	exynos_pcie_sideband_dbi_w_mode(pp, true);
-	writel(val, dbi_base);
-	exynos_pcie_sideband_dbi_w_mode(pp, false);
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+	exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
+	writel(val, pp->dbi_base + reg);
+	exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
 }
 
 static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
 				u32 *val)
 {
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 	int ret;
 
-	exynos_pcie_sideband_dbi_r_mode(pp, true);
+	exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
 	ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
-	exynos_pcie_sideband_dbi_r_mode(pp, false);
+	exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
 	return ret;
 }
 
 static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
 				u32 val)
 {
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
 	int ret;
 
-	exynos_pcie_sideband_dbi_w_mode(pp, true);
+	exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
 	ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
-	exynos_pcie_sideband_dbi_w_mode(pp, false);
+	exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
 	return ret;
 }
 
 static int exynos_pcie_link_up(struct pcie_port *pp)
 {
 	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-	u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+	u32 val;
 
+	val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
 	if (val == PCIE_ELBI_LTSSM_ENABLE)
 		return 1;
 
@@ -479,8 +460,10 @@
 
 static void exynos_pcie_host_init(struct pcie_port *pp)
 {
-	exynos_pcie_establish_link(pp);
-	exynos_pcie_enable_interrupts(pp);
+	struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+	exynos_pcie_establish_link(exynos_pcie);
+	exynos_pcie_enable_interrupts(exynos_pcie);
 }
 
 static struct pcie_host_ops exynos_pcie_host_ops = {
@@ -492,36 +475,38 @@
 	.host_init = exynos_pcie_host_init,
 };
 
-static int __init exynos_add_pcie_port(struct pcie_port *pp,
+static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
 				       struct platform_device *pdev)
 {
+	struct pcie_port *pp = &exynos_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	pp->irq = platform_get_irq(pdev, 1);
 	if (!pp->irq) {
-		dev_err(&pdev->dev, "failed to get irq\n");
+		dev_err(dev, "failed to get irq\n");
 		return -ENODEV;
 	}
-	ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
-				IRQF_SHARED, "exynos-pcie", pp);
+	ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+				IRQF_SHARED, "exynos-pcie", exynos_pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to request irq\n");
+		dev_err(dev, "failed to request irq\n");
 		return ret;
 	}
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		pp->msi_irq = platform_get_irq(pdev, 0);
 		if (!pp->msi_irq) {
-			dev_err(&pdev->dev, "failed to get msi irq\n");
+			dev_err(dev, "failed to get msi irq\n");
 			return -ENODEV;
 		}
 
-		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+		ret = devm_request_irq(dev, pp->msi_irq,
 					exynos_pcie_msi_irq_handler,
 					IRQF_SHARED | IRQF_NO_THREAD,
-					"exynos-pcie", pp);
+					"exynos-pcie", exynos_pcie);
 		if (ret) {
-			dev_err(&pdev->dev, "failed to request msi irq\n");
+			dev_err(dev, "failed to request msi irq\n");
 			return ret;
 		}
 	}
@@ -531,7 +516,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -540,37 +525,36 @@
 
 static int __init exynos_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct exynos_pcie *exynos_pcie;
 	struct pcie_port *pp;
-	struct device_node *np = pdev->dev.of_node;
+	struct device_node *np = dev->of_node;
 	struct resource *elbi_base;
 	struct resource *phy_base;
 	struct resource *block_base;
 	int ret;
 
-	exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
-				GFP_KERNEL);
+	exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
 	if (!exynos_pcie)
 		return -ENOMEM;
 
 	pp = &exynos_pcie->pp;
-
-	pp->dev = &pdev->dev;
+	pp->dev = dev;
 
 	exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
 
-	exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+	exynos_pcie->clk = devm_clk_get(dev, "pcie");
 	if (IS_ERR(exynos_pcie->clk)) {
-		dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+		dev_err(dev, "Failed to get pcie rc clock\n");
 		return PTR_ERR(exynos_pcie->clk);
 	}
 	ret = clk_prepare_enable(exynos_pcie->clk);
 	if (ret)
 		return ret;
 
-	exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+	exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
 	if (IS_ERR(exynos_pcie->bus_clk)) {
-		dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+		dev_err(dev, "Failed to get pcie bus clock\n");
 		ret = PTR_ERR(exynos_pcie->bus_clk);
 		goto fail_clk;
 	}
@@ -579,27 +563,27 @@
 		goto fail_clk;
 
 	elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+	exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
 	if (IS_ERR(exynos_pcie->elbi_base)) {
 		ret = PTR_ERR(exynos_pcie->elbi_base);
 		goto fail_bus_clk;
 	}
 
 	phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+	exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
 	if (IS_ERR(exynos_pcie->phy_base)) {
 		ret = PTR_ERR(exynos_pcie->phy_base);
 		goto fail_bus_clk;
 	}
 
 	block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+	exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
 	if (IS_ERR(exynos_pcie->block_base)) {
 		ret = PTR_ERR(exynos_pcie->block_base);
 		goto fail_bus_clk;
 	}
 
-	ret = exynos_add_pcie_port(pp, pdev);
+	ret = exynos_add_pcie_port(exynos_pcie, pdev);
 	if (ret < 0)
 		goto fail_bus_clk;
 
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index ead4a5c..c8cefb0 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -39,16 +39,15 @@
 };
 
 struct imx6_pcie {
+	struct pcie_port	pp;	/* pp.dbi_base is DT 0th resource */
 	int			reset_gpio;
 	bool			gpio_active_high;
 	struct clk		*pcie_bus;
 	struct clk		*pcie_phy;
 	struct clk		*pcie_inbound_axi;
 	struct clk		*pcie;
-	struct pcie_port	pp;
 	struct regmap		*iomuxc_gpr;
 	enum imx6_pcie_variants variant;
-	void __iomem		*mem_base;
 	u32			tx_deemph_gen1;
 	u32			tx_deemph_gen2_3p5db;
 	u32			tx_deemph_gen2_6db;
@@ -96,14 +95,15 @@
 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
 
-static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
 	u32 val;
 	u32 max_iterations = 10;
 	u32 wait_counter = 0;
 
 	do {
-		val = readl(dbi_base + PCIE_PHY_STAT);
+		val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
 		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
 		wait_counter++;
 
@@ -116,123 +116,126 @@
 	return -ETIMEDOUT;
 }
 
-static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
 	u32 val;
 	int ret;
 
 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
-	writel(val, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
 
 	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
-	writel(val, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
 
-	ret = pcie_phy_poll_ack(dbi_base, 1);
+	ret = pcie_phy_poll_ack(imx6_pcie, 1);
 	if (ret)
 		return ret;
 
 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
-	writel(val, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
 
-	return pcie_phy_poll_ack(dbi_base, 0);
+	return pcie_phy_poll_ack(imx6_pcie, 0);
 }
 
 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
 	u32 val, phy_ctl;
 	int ret;
 
-	ret = pcie_phy_wait_ack(dbi_base, addr);
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 	if (ret)
 		return ret;
 
 	/* assert Read signal */
 	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
-	writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
 
-	ret = pcie_phy_poll_ack(dbi_base, 1);
+	ret = pcie_phy_poll_ack(imx6_pcie, 1);
 	if (ret)
 		return ret;
 
-	val = readl(dbi_base + PCIE_PHY_STAT);
+	val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
 	*data = val & 0xffff;
 
 	/* deassert Read signal */
-	writel(0x00, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
 
-	return pcie_phy_poll_ack(dbi_base, 0);
+	return pcie_phy_poll_ack(imx6_pcie, 0);
 }
 
-static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
 	u32 var;
 	int ret;
 
 	/* write addr */
 	/* cap addr */
-	ret = pcie_phy_wait_ack(dbi_base, addr);
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 	if (ret)
 		return ret;
 
 	var = data << PCIE_PHY_CTRL_DATA_LOC;
-	writel(var, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
 
 	/* capture data */
 	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
-	writel(var, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
 
-	ret = pcie_phy_poll_ack(dbi_base, 1);
+	ret = pcie_phy_poll_ack(imx6_pcie, 1);
 	if (ret)
 		return ret;
 
 	/* deassert cap data */
 	var = data << PCIE_PHY_CTRL_DATA_LOC;
-	writel(var, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
 
 	/* wait for ack de-assertion */
-	ret = pcie_phy_poll_ack(dbi_base, 0);
+	ret = pcie_phy_poll_ack(imx6_pcie, 0);
 	if (ret)
 		return ret;
 
 	/* assert wr signal */
 	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
-	writel(var, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
 
 	/* wait for ack */
-	ret = pcie_phy_poll_ack(dbi_base, 1);
+	ret = pcie_phy_poll_ack(imx6_pcie, 1);
 	if (ret)
 		return ret;
 
 	/* deassert wr signal */
 	var = data << PCIE_PHY_CTRL_DATA_LOC;
-	writel(var, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
 
 	/* wait for ack de-assertion */
-	ret = pcie_phy_poll_ack(dbi_base, 0);
+	ret = pcie_phy_poll_ack(imx6_pcie, 0);
 	if (ret)
 		return ret;
 
-	writel(0x0, dbi_base + PCIE_PHY_CTRL);
+	dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
 
 	return 0;
 }
 
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
 {
 	u32 tmp;
 
-	pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-	pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 
 	usleep_range(2000, 3000);
 
-	pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-	pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 }
 
 /*  Added for PCI abort handling */
@@ -242,9 +245,9 @@
 	return 0;
 }
 
-static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 {
-	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+	struct pcie_port *pp = &imx6_pcie->pp;
 	u32 val, gpr1, gpr12;
 
 	switch (imx6_pcie->variant) {
@@ -281,10 +284,10 @@
 
 		if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
 		    (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
-			val = readl(pp->dbi_base + PCIE_PL_PFLR);
+			val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
 			val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
 			val |= PCIE_PL_PFLR_FORCE_LINK;
-			writel(val, pp->dbi_base + PCIE_PL_PFLR);
+			dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
 
 			regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 					   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -296,20 +299,19 @@
 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
 		break;
 	}
-
-	return 0;
 }
 
 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
 {
 	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret = 0;
 
 	switch (imx6_pcie->variant) {
 	case IMX6SX:
 		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
 		if (ret) {
-			dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+			dev_err(dev, "unable to enable pcie_axi clock\n");
 			break;
 		}
 
@@ -336,32 +338,33 @@
 	return ret;
 }
 
-static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 {
-	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
 	if (ret) {
-		dev_err(pp->dev, "unable to enable pcie_phy clock\n");
-		goto err_pcie_phy;
+		dev_err(dev, "unable to enable pcie_phy clock\n");
+		return;
 	}
 
 	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
 	if (ret) {
-		dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+		dev_err(dev, "unable to enable pcie_bus clock\n");
 		goto err_pcie_bus;
 	}
 
 	ret = clk_prepare_enable(imx6_pcie->pcie);
 	if (ret) {
-		dev_err(pp->dev, "unable to enable pcie clock\n");
+		dev_err(dev, "unable to enable pcie clock\n");
 		goto err_pcie;
 	}
 
 	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
 	if (ret) {
-		dev_err(pp->dev, "unable to enable pcie ref clock\n");
+		dev_err(dev, "unable to enable pcie ref clock\n");
 		goto err_ref_clk;
 	}
 
@@ -392,7 +395,7 @@
 		break;
 	}
 
-	return 0;
+	return;
 
 err_ref_clk:
 	clk_disable_unprepare(imx6_pcie->pcie);
@@ -400,14 +403,10 @@
 	clk_disable_unprepare(imx6_pcie->pcie_bus);
 err_pcie_bus:
 	clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
-	return ret;
 }
 
-static void imx6_pcie_init_phy(struct pcie_port *pp)
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 {
-	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
-
 	if (imx6_pcie->variant == IMX6SX)
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
@@ -439,45 +438,52 @@
 			   imx6_pcie->tx_swing_low << 25);
 }
 
-static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
+
 	/* check if the link is up or not */
 	if (!dw_pcie_wait_for_link(pp))
 		return 0;
 
-	dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+	dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
 	return -ETIMEDOUT;
 }
 
-static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 tmp;
 	unsigned int retries;
 
 	for (retries = 0; retries < 200; retries++) {
-		tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+		tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
 		/* Test if the speed change finished. */
 		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
 			return 0;
 		usleep_range(100, 1000);
 	}
 
-	dev_err(pp->dev, "Speed change timeout\n");
+	dev_err(dev, "Speed change timeout\n");
 	return -EINVAL;
 }
 
 static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
+	struct imx6_pcie *imx6_pcie = arg;
+	struct pcie_port *pp = &imx6_pcie->pp;
 
 	return dw_handle_msi_irq(pp);
 }
 
-static int imx6_pcie_establish_link(struct pcie_port *pp)
+static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
 {
-	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 tmp;
 	int ret;
 
@@ -486,76 +492,73 @@
 	 * started in Gen2 mode, there is a possibility the devices on the
 	 * bus will not be detected at all.  This happens with PCIe switches.
 	 */
-	tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+	tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
 	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
 	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
-	writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+	dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
 
 	/* Start LTSSM. */
 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 			IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
 
-	ret = imx6_pcie_wait_for_link(pp);
+	ret = imx6_pcie_wait_for_link(imx6_pcie);
 	if (ret) {
-		dev_info(pp->dev, "Link never came up\n");
+		dev_info(dev, "Link never came up\n");
 		goto err_reset_phy;
 	}
 
 	if (imx6_pcie->link_gen == 2) {
 		/* Allow Gen2 mode after the link is up. */
-		tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+		tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
 		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
 		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
-		writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+		dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
 	} else {
-		dev_info(pp->dev, "Link: Gen2 disabled\n");
+		dev_info(dev, "Link: Gen2 disabled\n");
 	}
 
 	/*
 	 * Start Directed Speed Change so the best possible speed both link
 	 * partners support can be negotiated.
 	 */
-	tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+	tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
 	tmp |= PORT_LOGIC_SPEED_CHANGE;
-	writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+	dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
 
-	ret = imx6_pcie_wait_for_speed_change(pp);
+	ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
 	if (ret) {
-		dev_err(pp->dev, "Failed to bring link up!\n");
+		dev_err(dev, "Failed to bring link up!\n");
 		goto err_reset_phy;
 	}
 
 	/* Make sure link training is finished as well! */
-	ret = imx6_pcie_wait_for_link(pp);
+	ret = imx6_pcie_wait_for_link(imx6_pcie);
 	if (ret) {
-		dev_err(pp->dev, "Failed to bring link up!\n");
+		dev_err(dev, "Failed to bring link up!\n");
 		goto err_reset_phy;
 	}
 
-	tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
-	dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+	tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
+	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
 	return 0;
 
 err_reset_phy:
-	dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
-	imx6_pcie_reset_phy(pp);
-
+	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
+	imx6_pcie_reset_phy(imx6_pcie);
 	return ret;
 }
 
 static void imx6_pcie_host_init(struct pcie_port *pp)
 {
-	imx6_pcie_assert_core_reset(pp);
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
 
-	imx6_pcie_init_phy(pp);
-
-	imx6_pcie_deassert_core_reset(pp);
-
+	imx6_pcie_assert_core_reset(imx6_pcie);
+	imx6_pcie_init_phy(imx6_pcie);
+	imx6_pcie_deassert_core_reset(imx6_pcie);
 	dw_pcie_setup_rc(pp);
-
-	imx6_pcie_establish_link(pp);
+	imx6_pcie_establish_link(imx6_pcie);
 
 	if (IS_ENABLED(CONFIG_PCI_MSI))
 		dw_pcie_msi_init(pp);
@@ -563,7 +566,7 @@
 
 static int imx6_pcie_link_up(struct pcie_port *pp)
 {
-	return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+	return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
 			PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
 }
 
@@ -572,24 +575,26 @@
 	.host_init = imx6_pcie_host_init,
 };
 
-static int __init imx6_add_pcie_port(struct pcie_port *pp,
-			struct platform_device *pdev)
+static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+				     struct platform_device *pdev)
 {
+	struct pcie_port *pp = &imx6_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
 		if (pp->msi_irq <= 0) {
-			dev_err(&pdev->dev, "failed to get MSI irq\n");
+			dev_err(dev, "failed to get MSI irq\n");
 			return -ENODEV;
 		}
 
-		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+		ret = devm_request_irq(dev, pp->msi_irq,
 				       imx6_pcie_msi_handler,
 				       IRQF_SHARED | IRQF_NO_THREAD,
-				       "mx6-pcie-msi", pp);
+				       "mx6-pcie-msi", imx6_pcie);
 		if (ret) {
-			dev_err(&pdev->dev, "failed to request MSI irq\n");
+			dev_err(dev, "failed to request MSI irq\n");
 			return ret;
 		}
 	}
@@ -599,7 +604,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -608,75 +613,72 @@
 
 static int __init imx6_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct imx6_pcie *imx6_pcie;
 	struct pcie_port *pp;
-	struct device_node *np = pdev->dev.of_node;
 	struct resource *dbi_base;
-	struct device_node *node = pdev->dev.of_node;
+	struct device_node *node = dev->of_node;
 	int ret;
 
-	imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
 	if (!imx6_pcie)
 		return -ENOMEM;
 
 	pp = &imx6_pcie->pp;
-	pp->dev = &pdev->dev;
+	pp->dev = dev;
 
 	imx6_pcie->variant =
-		(enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+		(enum imx6_pcie_variants)of_device_get_match_data(dev);
 
 	/* Added for PCI abort handling */
 	hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
 		"imprecise external abort");
 
 	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+	pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
 	if (IS_ERR(pp->dbi_base))
 		return PTR_ERR(pp->dbi_base);
 
 	/* Fetch GPIOs */
-	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
-	imx6_pcie->gpio_active_high = of_property_read_bool(np,
+	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+	imx6_pcie->gpio_active_high = of_property_read_bool(node,
 						"reset-gpio-active-high");
 	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
-		ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
 				imx6_pcie->gpio_active_high ?
 					GPIOF_OUT_INIT_HIGH :
 					GPIOF_OUT_INIT_LOW,
 				"PCIe reset");
 		if (ret) {
-			dev_err(&pdev->dev, "unable to get reset gpio\n");
+			dev_err(dev, "unable to get reset gpio\n");
 			return ret;
 		}
 	}
 
 	/* Fetch clocks */
-	imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
 	if (IS_ERR(imx6_pcie->pcie_phy)) {
-		dev_err(&pdev->dev,
-			"pcie_phy clock source missing or invalid\n");
+		dev_err(dev, "pcie_phy clock source missing or invalid\n");
 		return PTR_ERR(imx6_pcie->pcie_phy);
 	}
 
-	imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
 	if (IS_ERR(imx6_pcie->pcie_bus)) {
-		dev_err(&pdev->dev,
-			"pcie_bus clock source missing or invalid\n");
+		dev_err(dev, "pcie_bus clock source missing or invalid\n");
 		return PTR_ERR(imx6_pcie->pcie_bus);
 	}
 
-	imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
 	if (IS_ERR(imx6_pcie->pcie)) {
-		dev_err(&pdev->dev,
-			"pcie clock source missing or invalid\n");
+		dev_err(dev, "pcie clock source missing or invalid\n");
 		return PTR_ERR(imx6_pcie->pcie);
 	}
 
 	if (imx6_pcie->variant == IMX6SX) {
-		imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
 							   "pcie_inbound_axi");
 		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
-			dev_err(&pdev->dev,
+			dev_err(dev,
 				"pcie_incbound_axi clock missing or invalid\n");
 			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
 		}
@@ -686,7 +688,7 @@
 	imx6_pcie->iomuxc_gpr =
 		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
 	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
-		dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+		dev_err(dev, "unable to find iomuxc registers\n");
 		return PTR_ERR(imx6_pcie->iomuxc_gpr);
 	}
 
@@ -712,12 +714,12 @@
 		imx6_pcie->tx_swing_low = 127;
 
 	/* Limit link speed */
-	ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+	ret = of_property_read_u32(node, "fsl,max-link-speed",
 				   &imx6_pcie->link_gen);
 	if (ret)
 		imx6_pcie->link_gen = 1;
 
-	ret = imx6_add_pcie_port(pp, pdev);
+	ret = imx6_add_pcie_port(imx6_pcie, pdev);
 	if (ret < 0)
 		return ret;
 
@@ -730,7 +732,7 @@
 	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
 
 	/* bring down link, so bootloader gets clean state in case of reboot */
-	imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+	imx6_pcie_assert_core_reset(imx6_pcie);
 }
 
 static const struct of_device_id imx6_pcie_of_match[] = {
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 4151509..9397c46 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -88,13 +88,24 @@
 	return ks_pcie->app.start + MSI_IRQ;
 }
 
+static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+	return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
+{
+	writel(val, ks_pcie->va_app_base + offset);
+}
+
 void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
 {
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 pending, vector;
 	int src, virq;
 
-	pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+	pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
 
 	/*
 	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
@@ -104,7 +115,7 @@
 		if (BIT(src) & pending) {
 			vector = offset + (src << 3);
 			virq = irq_linear_revmap(pp->irq_domain, vector);
-			dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+			dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
 				src, vector, virq);
 			generic_handle_irq(virq);
 		}
@@ -124,9 +135,9 @@
 	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
 
-	writel(BIT(bit_pos),
-	       ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
-	writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+	ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+			 BIT(bit_pos));
+	ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
 }
 
 void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -135,8 +146,8 @@
 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 
 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
-	writel(BIT(bit_pos),
-	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+			 BIT(bit_pos));
 }
 
 void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@@ -145,8 +156,8 @@
 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 
 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
-	writel(BIT(bit_pos),
-	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+			 BIT(bit_pos));
 }
 
 static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
@@ -215,6 +226,7 @@
 int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
 {
 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+	struct device *dev = pp->dev;
 	int i;
 
 	pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@@ -222,7 +234,7 @@
 					&ks_dw_pcie_msi_domain_ops,
 					chip);
 	if (!pp->irq_domain) {
-		dev_err(pp->dev, "irq domain init failed\n");
+		dev_err(dev, "irq domain init failed\n");
 		return -ENXIO;
 	}
 
@@ -237,47 +249,47 @@
 	int i;
 
 	for (i = 0; i < MAX_LEGACY_IRQS; i++)
-		writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+		ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
 }
 
 void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
 {
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 pending;
 	int virq;
 
-	pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+	pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
 
 	if (BIT(0) & pending) {
 		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
-		dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
-			virq);
+		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
 		generic_handle_irq(virq);
 	}
 
 	/* EOI the INTx interrupt */
-	writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+	ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
 }
 
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
 {
-	writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+	ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
 }
 
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
-					void __iomem *reg_base)
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
 {
 	u32 status;
 
-	status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+	status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
 	if (!status)
 		return IRQ_NONE;
 
 	if (status & ERR_FATAL_IRQ)
-		dev_err(dev, "fatal error (status %#010x)\n", status);
+		dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
+			status);
 
 	/* Ack the IRQ; status bits are RW1C */
-	writel(status, reg_base + ERR_IRQ_STATUS);
+	ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
 	return IRQ_HANDLED;
 }
 
@@ -322,15 +334,15 @@
  * Since modification of dbi_cs2 involves different clock domain, read the
  * status back to ensure the transition is complete.
  */
-static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
 {
 	u32 val;
 
-	writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
-	       reg_virt + CMD_STATUS);
+	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+	ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
 
 	do {
-		val = readl(reg_virt + CMD_STATUS);
+		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
 	} while (!(val & DBI_CS2_EN_VAL));
 }
 
@@ -340,15 +352,15 @@
  * Since modification of dbi_cs2 involves different clock domain, read the
  * status back to ensure the transition is complete.
  */
-static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
 {
 	u32 val;
 
-	writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
-		     reg_virt + CMD_STATUS);
+	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+	ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
 
 	do {
-		val = readl(reg_virt + CMD_STATUS);
+		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
 	} while (val & DBI_CS2_EN_VAL);
 }
 
@@ -357,28 +369,29 @@
 	struct pcie_port *pp = &ks_pcie->pp;
 	u32 start = pp->mem->start, end = pp->mem->end;
 	int i, tr_size;
+	u32 val;
 
 	/* Disable BARs for inbound access */
-	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
-	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
-	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
-	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+	ks_dw_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
+	ks_dw_pcie_clear_dbi_mode(ks_pcie);
 
 	/* Set outbound translation size per window division */
-	writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+	ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
 
 	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
 
 	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
 	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
-		writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
-		writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+		ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
+		ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
 		start += tr_size;
 	}
 
 	/* Enable OB translation */
-	writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
-	       ks_pcie->va_app_base + CMD_STATUS);
+	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+	ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
 }
 
 /**
@@ -418,7 +431,7 @@
 	if (bus != 1)
 		regval |= BIT(24);
 
-	writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+	ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
 	return pp->va_cfg0_base;
 }
 
@@ -456,19 +469,19 @@
 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 
 	/* Configure and set up BAR0 */
-	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+	ks_dw_pcie_set_dbi_mode(ks_pcie);
 
 	/* Enable BAR0 */
-	writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
-	writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
 
-	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+	ks_dw_pcie_clear_dbi_mode(ks_pcie);
 
 	 /*
 	  * For BAR0, just setting bus address for inbound writes (MSI) should
 	  * be sufficient.  Use physical address to avoid any conflicts.
 	  */
-	writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
 }
 
 /**
@@ -476,8 +489,9 @@
  */
 int ks_dw_pcie_link_up(struct pcie_port *pp)
 {
-	u32 val = readl(pp->dbi_base + DEBUG0);
+	u32 val;
 
+	val = dw_pcie_readl_rc(pp, DEBUG0);
 	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
 }
 
@@ -486,13 +500,13 @@
 	u32 val;
 
 	/* Disable Link training */
-	val = readl(ks_pcie->va_app_base + CMD_STATUS);
+	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
 	val &= ~LTSSM_EN_VAL;
-	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
+	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
 
 	/* Initiate Link Training */
-	val = readl(ks_pcie->va_app_base + CMD_STATUS);
-	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
+	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
 }
 
 /**
@@ -506,12 +520,13 @@
 				struct device_node *msi_intc_np)
 {
 	struct pcie_port *pp = &ks_pcie->pp;
-	struct platform_device *pdev = to_platform_device(pp->dev);
+	struct device *dev = pp->dev;
+	struct platform_device *pdev = to_platform_device(dev);
 	struct resource *res;
 
 	/* Index 0 is the config reg. space address */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+	pp->dbi_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pp->dbi_base))
 		return PTR_ERR(pp->dbi_base);
 
@@ -524,7 +539,7 @@
 
 	/* Index 1 is the application reg. space address */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(ks_pcie->va_app_base))
 		return PTR_ERR(ks_pcie->va_app_base);
 
@@ -537,7 +552,7 @@
 					&ks_dw_pcie_legacy_irq_domain_ops,
 					NULL);
 	if (!ks_pcie->legacy_irq_domain) {
-		dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 82b461b..043c19a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -89,12 +89,13 @@
 static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
 {
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	unsigned int retries;
 
 	dw_pcie_setup_rc(pp);
 
 	if (dw_pcie_link_up(pp)) {
-		dev_err(pp->dev, "Link already up\n");
+		dev_err(dev, "Link already up\n");
 		return 0;
 	}
 
@@ -105,7 +106,7 @@
 			return 0;
 	}
 
-	dev_err(pp->dev, "phy link never came up\n");
+	dev_err(dev, "phy link never came up\n");
 	return -ETIMEDOUT;
 }
 
@@ -115,9 +116,10 @@
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 	u32 offset = irq - ks_pcie->msi_host_irqs[0];
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
-	dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
+	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
 
 	/*
 	 * The chained irq handler installation would have replaced normal
@@ -142,10 +144,11 @@
 	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
-	dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
 
 	/*
 	 * The chained irq handler installation would have replaced normal
@@ -234,7 +237,7 @@
 	}
 
 	if (ks_pcie->error_irq > 0)
-		ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
+		ks_dw_pcie_enable_error_irq(ks_pcie);
 }
 
 /*
@@ -302,14 +305,14 @@
 {
 	struct keystone_pcie *ks_pcie = priv;
 
-	return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
-					   ks_pcie->va_app_base);
+	return ks_dw_pcie_handle_error_irq(ks_pcie);
 }
 
 static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
 			 struct platform_device *pdev)
 {
 	struct pcie_port *pp = &ks_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	ret = ks_pcie_get_irq_controller_info(ks_pcie,
@@ -332,12 +335,12 @@
 	 */
 	ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
 	if (ks_pcie->error_irq <= 0)
-		dev_info(&pdev->dev, "no error IRQ defined\n");
+		dev_info(dev, "no error IRQ defined\n");
 	else {
 		ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
 				  IRQF_SHARED, "pcie-error-irq", ks_pcie);
 		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+			dev_err(dev, "failed to request error IRQ %d\n",
 				ks_pcie->error_irq);
 			return ret;
 		}
@@ -347,7 +350,7 @@
 	pp->ops = &keystone_pcie_host_ops;
 	ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -381,12 +384,12 @@
 	struct phy *phy;
 	int ret;
 
-	ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
-				GFP_KERNEL);
+	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
 	if (!ks_pcie)
 		return -ENOMEM;
 
 	pp = &ks_pcie->pp;
+	pp->dev = dev;
 
 	/* initialize SerDes Phy if present */
 	phy = devm_phy_get(dev, "pcie-phy");
@@ -408,7 +411,6 @@
 	devm_iounmap(dev, reg_p);
 	devm_release_mem_region(dev, res->start, resource_size(res));
 
-	pp->dev = dev;
 	ks_pcie->np = dev->of_node;
 	platform_set_drvdata(pdev, ks_pcie);
 	ks_pcie->clk = devm_clk_get(dev, "pcie");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index a5b0cb2..bc54baf 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -17,8 +17,8 @@
 #define MAX_LEGACY_HOST_IRQS		4
 
 struct keystone_pcie {
+	struct	pcie_port	pp;		/* pp.dbi_base is DT 0th res */
 	struct	clk		*clk;
-	struct	pcie_port	pp;
 	/* PCI Device ID */
 	u32			device_id;
 	int			num_legacy_host_irqs;
@@ -34,7 +34,7 @@
 	int error_irq;
 
 	/* Application register space */
-	void __iomem		*va_app_base;
+	void __iomem		*va_app_base;	/* DT 1st resource */
 	struct resource		app;
 };
 
@@ -45,9 +45,8 @@
 /* Keystone specific PCI controller APIs */
 void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
 void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
-					void __iomem *reg_base);
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
 int  ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
 			struct device_node *msi_intc_np);
 int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 114ba81..2cb7315 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -45,10 +45,9 @@
 };
 
 struct ls_pcie {
-	void __iomem *dbi;
+	struct pcie_port pp;		/* pp.dbi_base is DT regs */
 	void __iomem *lut;
 	struct regmap *scfg;
-	struct pcie_port pp;
 	const struct ls_pcie_drvdata *drvdata;
 	int index;
 };
@@ -59,7 +58,7 @@
 {
 	u32 header_type;
 
-	header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
+	header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE);
 	header_type &= 0x7f;
 
 	return header_type == PCI_HEADER_TYPE_BRIDGE;
@@ -68,13 +67,13 @@
 /* Clear multi-function bit */
 static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
 {
-	iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
+	iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE);
 }
 
 /* Fix class value */
 static void ls_pcie_fix_class(struct ls_pcie *pcie)
 {
-	iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+	iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE);
 }
 
 /* Drop MSG TLP except for Vendor MSG */
@@ -82,9 +81,9 @@
 {
 	u32 val;
 
-	val = ioread32(pcie->dbi + PCIE_STRFMR1);
+	val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1);
 	val &= 0xDFFFFFFF;
-	iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+	iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
 }
 
 static int ls1021_pcie_link_up(struct pcie_port *pp)
@@ -106,18 +105,19 @@
 
 static void ls1021_pcie_host_init(struct pcie_port *pp)
 {
+	struct device *dev = pp->dev;
 	struct ls_pcie *pcie = to_ls_pcie(pp);
 	u32 index[2];
 
-	pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+	pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
 						     "fsl,pcie-scfg");
 	if (IS_ERR(pcie->scfg)) {
-		dev_err(pp->dev, "No syscfg phandle specified\n");
+		dev_err(dev, "No syscfg phandle specified\n");
 		pcie->scfg = NULL;
 		return;
 	}
 
-	if (of_property_read_u32_array(pp->dev->of_node,
+	if (of_property_read_u32_array(dev->of_node,
 				       "fsl,pcie-scfg", index, 2)) {
 		pcie->scfg = NULL;
 		return;
@@ -148,18 +148,19 @@
 {
 	struct ls_pcie *pcie = to_ls_pcie(pp);
 
-	iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+	iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
 	ls_pcie_fix_class(pcie);
 	ls_pcie_clear_multifunction(pcie);
 	ls_pcie_drop_msg_tlp(pcie);
-	iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+	iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
 }
 
 static int ls_pcie_msi_host_init(struct pcie_port *pp,
 				 struct msi_controller *chip)
 {
+	struct device *dev = pp->dev;
+	struct device_node *np = dev->of_node;
 	struct device_node *msi_node;
-	struct device_node *np = pp->dev->of_node;
 
 	/*
 	 * The MSI domain is set by the generic of_msi_configure().  This
@@ -169,7 +170,7 @@
 	 */
 	msi_node = of_parse_phandle(np, "msi-parent", 0);
 	if (!msi_node) {
-		dev_err(pp->dev, "failed to find msi-parent\n");
+		dev_err(dev, "failed to find msi-parent\n");
 		return -EINVAL;
 	}
 
@@ -212,19 +213,15 @@
 	{ },
 };
 
-static int __init ls_add_pcie_port(struct pcie_port *pp,
-				   struct platform_device *pdev)
+static int __init ls_add_pcie_port(struct ls_pcie *pcie)
 {
+	struct pcie_port *pp = &pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
-	struct ls_pcie *pcie = to_ls_pcie(pp);
-
-	pp->dev = &pdev->dev;
-	pp->dbi_base = pcie->dbi;
-	pp->ops = pcie->drvdata->ops;
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(pp->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -233,38 +230,42 @@
 
 static int __init ls_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	const struct of_device_id *match;
 	struct ls_pcie *pcie;
+	struct pcie_port *pp;
 	struct resource *dbi_base;
 	int ret;
 
-	match = of_match_device(ls_pcie_of_match, &pdev->dev);
+	match = of_match_device(ls_pcie_of_match, dev);
 	if (!match)
 		return -ENODEV;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
+	pp = &pcie->pp;
+	pp->dev = dev;
+	pp->ops = pcie->drvdata->ops;
+
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-	pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
-	if (IS_ERR(pcie->dbi)) {
-		dev_err(&pdev->dev, "missing *regs* space\n");
-		return PTR_ERR(pcie->dbi);
+	pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
+	if (IS_ERR(pcie->pp.dbi_base)) {
+		dev_err(dev, "missing *regs* space\n");
+		return PTR_ERR(pcie->pp.dbi_base);
 	}
 
 	pcie->drvdata = match->data;
-	pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
+	pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
 
 	if (!ls_pcie_is_bridge(pcie))
 		return -ENODEV;
 
-	ret = ls_add_pcie_port(&pcie->pp, pdev);
+	ret = ls_add_pcie_port(pcie);
 	if (ret < 0)
 		return ret;
 
-	platform_set_drvdata(pdev, pcie);
-
 	return 0;
 }
 
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 307f81d..45a89d9 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1190,13 +1190,13 @@
 
 static int mvebu_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct mvebu_pcie *pcie;
-	struct device_node *np = pdev->dev.of_node;
+	struct device_node *np = dev->of_node;
 	struct device_node *child;
 	int num, i, ret;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
-			    GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
@@ -1206,7 +1206,7 @@
 	/* Get the PCIe memory and I/O aperture */
 	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
 	if (resource_size(&pcie->mem) == 0) {
-		dev_err(&pdev->dev, "invalid memory aperture size\n");
+		dev_err(dev, "invalid memory aperture size\n");
 		return -EINVAL;
 	}
 
@@ -1224,20 +1224,18 @@
 	/* Get the bus range */
 	ret = of_pci_parse_bus_range(np, &pcie->busn);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
-			ret);
+		dev_err(dev, "failed to parse bus-range property: %d\n", ret);
 		return ret;
 	}
 
-	num = of_get_available_child_count(pdev->dev.of_node);
+	num = of_get_available_child_count(np);
 
-	pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
-				   GFP_KERNEL);
+	pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
 	if (!pcie->ports)
 		return -ENOMEM;
 
 	i = 0;
-	for_each_available_child_of_node(pdev->dev.of_node, child) {
+	for_each_available_child_of_node(np, child) {
 		struct mvebu_pcie_port *port = &pcie->ports[i];
 
 		ret = mvebu_pcie_parse_port(pcie, port, child);
@@ -1266,8 +1264,7 @@
 
 		port->base = mvebu_pcie_map_registers(pdev, child, port);
 		if (IS_ERR(port->base)) {
-			dev_err(&pdev->dev, "%s: cannot map registers\n",
-				port->name);
+			dev_err(dev, "%s: cannot map registers\n", port->name);
 			port->base = NULL;
 			mvebu_pcie_powerdown(port);
 			continue;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 597566f..1eeefa4 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -154,10 +154,11 @@
 static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
 {
 	struct rcar_pci_priv *priv = pw;
+	struct device *dev = priv->dev;
 	u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
 
 	if (status & RCAR_PCI_INT_ALLERRORS) {
-		dev_err(priv->dev, "error irq: status %08x\n", status);
+		dev_err(dev, "error irq: status %08x\n", status);
 
 		/* clear the error(s) */
 		iowrite32(status & RCAR_PCI_INT_ALLERRORS,
@@ -170,13 +171,14 @@
 
 static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
 {
+	struct device *dev = priv->dev;
 	int ret;
 	u32 val;
 
-	ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+	ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
 			       IRQF_SHARED, "error irq", priv);
 	if (ret) {
-		dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+		dev_err(dev, "cannot claim IRQ for error handling\n");
 		return;
 	}
 
@@ -192,15 +194,16 @@
 static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
 {
 	struct rcar_pci_priv *priv = sys->private_data;
+	struct device *dev = priv->dev;
 	void __iomem *reg = priv->reg;
 	u32 val;
 	int ret;
 
-	pm_runtime_enable(priv->dev);
-	pm_runtime_get_sync(priv->dev);
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
 
 	val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
-	dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+	dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
 
 	/* Disable Direct Power Down State and assert reset */
 	val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -275,7 +278,7 @@
 
 	/* Add PCI resources */
 	pci_add_resource(&sys->resources, &priv->mem_res);
-	ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+	ret = devm_request_pci_bus_resources(dev, &sys->resources);
 	if (ret < 0)
 		return ret;
 
@@ -311,6 +314,7 @@
 static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
 					 struct device_node *np)
 {
+	struct device *dev = pci->dev;
 	struct of_pci_range range;
 	struct of_pci_range_parser parser;
 	int index = 0;
@@ -331,14 +335,14 @@
 
 		/* Catch HW limitations */
 		if (!(range.flags & IORESOURCE_PREFETCH)) {
-			dev_err(pci->dev, "window must be prefetchable\n");
+			dev_err(dev, "window must be prefetchable\n");
 			return -EINVAL;
 		}
 		if (pci->window_addr) {
 			u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
 
 			if (lowaddr < pci->window_size) {
-				dev_err(pci->dev, "invalid window size/addr\n");
+				dev_err(dev, "invalid window size/addr\n");
 				return -EINVAL;
 			}
 		}
@@ -350,6 +354,7 @@
 
 static int rcar_pci_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct resource *cfg_res, *mem_res;
 	struct rcar_pci_priv *priv;
 	void __iomem *reg;
@@ -357,7 +362,7 @@
 	void *hw_private[1];
 
 	cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+	reg = devm_ioremap_resource(dev, cfg_res);
 	if (IS_ERR(reg))
 		return PTR_ERR(reg);
 
@@ -368,8 +373,7 @@
 	if (mem_res->start & 0xFFFF)
 		return -EINVAL;
 
-	priv = devm_kzalloc(&pdev->dev,
-			    sizeof(struct rcar_pci_priv), GFP_KERNEL);
+	priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
@@ -378,10 +382,10 @@
 
 	priv->irq = platform_get_irq(pdev, 0);
 	priv->reg = reg;
-	priv->dev = &pdev->dev;
+	priv->dev = dev;
 
 	if (priv->irq < 0) {
-		dev_err(&pdev->dev, "no valid irq found\n");
+		dev_err(dev, "no valid irq found\n");
 		return priv->irq;
 	}
 
@@ -390,23 +394,23 @@
 	priv->window_pci = 0x40000000;
 	priv->window_size = SZ_1G;
 
-	if (pdev->dev.of_node) {
+	if (dev->of_node) {
 		struct resource busnr;
 		int ret;
 
-		ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+		ret = of_pci_parse_bus_range(dev->of_node, &busnr);
 		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to parse bus-range\n");
+			dev_err(dev, "failed to parse bus-range\n");
 			return ret;
 		}
 
 		priv->busnr = busnr.start;
 		if (busnr.end != busnr.start)
-			dev_warn(&pdev->dev, "only one bus number supported\n");
+			dev_warn(dev, "only one bus number supported\n");
 
-		ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
+		ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
 		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to parse dma-range\n");
+			dev_err(dev, "failed to parse dma-range\n");
 			return ret;
 		}
 	} else {
@@ -421,7 +425,7 @@
 	hw.map_irq = rcar_pci_map_irq;
 	hw.ops = &rcar_pci_ops;
 	hw.setup = rcar_pci_setup;
-	pci_common_init_dev(&pdev->dev, &hw);
+	pci_common_init_dev(dev, &hw);
 	return 0;
 }
 
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index e2a8e4c..8dfccf7 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -384,6 +384,7 @@
 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
 						   unsigned int busnr)
 {
+	struct device *dev = pcie->dev;
 	pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				 L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
 	phys_addr_t cs = pcie->cs->start;
@@ -413,8 +414,7 @@
 
 		err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
 		if (err < 0) {
-			dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
-				err);
+			dev_err(dev, "ioremap_page_range() failed: %d\n", err);
 			goto unmap;
 		}
 	}
@@ -462,6 +462,7 @@
 					int where)
 {
 	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+	struct device *dev = pcie->dev;
 	void __iomem *addr = NULL;
 
 	if (bus->number == 0) {
@@ -482,8 +483,7 @@
 				addr = (void __iomem *)b->area->addr;
 
 		if (!addr) {
-			dev_err(pcie->dev,
-				"failed to map cfg. space for bus %u\n",
+			dev_err(dev, "failed to map cfg. space for bus %u\n",
 				bus->number);
 			return NULL;
 		}
@@ -584,12 +584,13 @@
 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 {
 	struct tegra_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
 
-	devm_iounmap(pcie->dev, port->base);
-	devm_release_mem_region(pcie->dev, port->regs.start,
+	devm_iounmap(dev, port->base);
+	devm_release_mem_region(dev, port->regs.start,
 				resource_size(&port->regs));
 	list_del(&port->list);
-	devm_kfree(pcie->dev, port);
+	devm_kfree(dev, port);
 }
 
 /* Tegra PCIE root complex wrongly reports device class */
@@ -612,12 +613,13 @@
 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 {
 	struct tegra_pcie *pcie = sys_to_pcie(sys);
+	struct device *dev = pcie->dev;
 	int err;
 
 	sys->mem_offset = pcie->offset.mem;
 	sys->io_offset = pcie->offset.io;
 
-	err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
+	err = devm_request_resource(dev, &iomem_resource, &pcie->io);
 	if (err < 0)
 		return err;
 
@@ -631,7 +633,7 @@
 				sys->mem_offset);
 	pci_add_resource(&sys->resources, &pcie->busn);
 
-	err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+	err = devm_request_pci_bus_resources(dev, &sys->resources);
 	if (err < 0)
 		return err;
 
@@ -672,6 +674,7 @@
 		"Peer2Peer error",
 	};
 	struct tegra_pcie *pcie = arg;
+	struct device *dev = pcie->dev;
 	u32 code, signature;
 
 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
@@ -689,11 +692,9 @@
 	 * happen a lot during enumeration
 	 */
 	if (code == AFI_INTR_MASTER_ABORT)
-		dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
-			signature);
+		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 	else
-		dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
-			signature);
+		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 
 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
@@ -701,9 +702,9 @@
 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 
 		if (code == AFI_INTR_MASTER_ABORT)
-			dev_dbg(pcie->dev, "  FPCI address: %10llx\n", address);
+			dev_dbg(dev, "  FPCI address: %10llx\n", address);
 		else
-			dev_err(pcie->dev, "  FPCI address: %10llx\n", address);
+			dev_err(dev, "  FPCI address: %10llx\n", address);
 	}
 
 	return IRQ_HANDLED;
@@ -793,6 +794,7 @@
 
 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	u32 value;
 	int err;
@@ -829,7 +831,7 @@
 	/* wait for the PLL to lock */
 	err = tegra_pcie_pll_wait(pcie, 500);
 	if (err < 0) {
-		dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+		dev_err(dev, "PLL failed to lock: %d\n", err);
 		return err;
 	}
 
@@ -859,7 +861,7 @@
 	/* override IDDQ */
 	value = pads_readl(pcie, PADS_CTL);
 	value |= PADS_CTL_IDDQ_1L;
-	pads_writel(pcie, PADS_CTL, value);
+	pads_writel(pcie, value, PADS_CTL);
 
 	/* reset PLL */
 	value = pads_readl(pcie, soc->pads_pll_ctl);
@@ -880,8 +882,7 @@
 	for (i = 0; i < port->lanes; i++) {
 		err = phy_power_on(port->phys[i]);
 		if (err < 0) {
-			dev_err(dev, "failed to power on PHY#%u: %d\n", i,
-				err);
+			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
 			return err;
 		}
 	}
@@ -909,6 +910,7 @@
 
 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct tegra_pcie_port *port;
 	int err;
@@ -920,7 +922,7 @@
 			err = tegra_pcie_phy_enable(pcie);
 
 		if (err < 0)
-			dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+			dev_err(dev, "failed to power on PHY: %d\n", err);
 
 		return err;
 	}
@@ -928,7 +930,7 @@
 	list_for_each_entry(port, &pcie->ports, list) {
 		err = tegra_pcie_port_phy_power_on(port);
 		if (err < 0) {
-			dev_err(pcie->dev,
+			dev_err(dev,
 				"failed to power on PCIe port %u PHY: %d\n",
 				port->index, err);
 			return err;
@@ -946,6 +948,7 @@
 
 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	struct tegra_pcie_port *port;
 	int err;
 
@@ -956,8 +959,7 @@
 			err = tegra_pcie_phy_disable(pcie);
 
 		if (err < 0)
-			dev_err(pcie->dev, "failed to power off PHY: %d\n",
-				err);
+			dev_err(dev, "failed to power off PHY: %d\n", err);
 
 		return err;
 	}
@@ -965,7 +967,7 @@
 	list_for_each_entry(port, &pcie->ports, list) {
 		err = tegra_pcie_port_phy_power_off(port);
 		if (err < 0) {
-			dev_err(pcie->dev,
+			dev_err(dev,
 				"failed to power off PCIe port %u PHY: %d\n",
 				port->index, err);
 			return err;
@@ -977,6 +979,7 @@
 
 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct tegra_pcie_port *port;
 	unsigned long value;
@@ -1016,7 +1019,7 @@
 
 	err = tegra_pcie_phy_power_on(pcie);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
+		dev_err(dev, "failed to power on PHY(s): %d\n", err);
 		return err;
 	}
 
@@ -1049,13 +1052,14 @@
 
 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	int err;
 
 	/* TODO: disable and unprepare clocks? */
 
 	err = tegra_pcie_phy_power_off(pcie);
 	if (err < 0)
-		dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
+		dev_err(dev, "failed to power off PHY(s): %d\n", err);
 
 	reset_control_assert(pcie->pcie_xrst);
 	reset_control_assert(pcie->afi_rst);
@@ -1065,11 +1069,12 @@
 
 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
 	if (err < 0)
-		dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
+		dev_warn(dev, "failed to disable regulators: %d\n", err);
 }
 
 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	int err;
 
@@ -1082,13 +1087,13 @@
 	/* enable regulators */
 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
 	if (err < 0)
-		dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
+		dev_err(dev, "failed to enable regulators: %d\n", err);
 
 	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
 						pcie->pex_clk,
 						pcie->pex_rst);
 	if (err) {
-		dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
+		dev_err(dev, "powerup sequence failed: %d\n", err);
 		return err;
 	}
 
@@ -1096,22 +1101,21 @@
 
 	err = clk_prepare_enable(pcie->afi_clk);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
+		dev_err(dev, "failed to enable AFI clock: %d\n", err);
 		return err;
 	}
 
 	if (soc->has_cml_clk) {
 		err = clk_prepare_enable(pcie->cml_clk);
 		if (err < 0) {
-			dev_err(pcie->dev, "failed to enable CML clock: %d\n",
-				err);
+			dev_err(dev, "failed to enable CML clock: %d\n", err);
 			return err;
 		}
 	}
 
 	err = clk_prepare_enable(pcie->pll_e);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
+		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
 		return err;
 	}
 
@@ -1120,22 +1124,23 @@
 
 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 
-	pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
+	pcie->pex_clk = devm_clk_get(dev, "pex");
 	if (IS_ERR(pcie->pex_clk))
 		return PTR_ERR(pcie->pex_clk);
 
-	pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
+	pcie->afi_clk = devm_clk_get(dev, "afi");
 	if (IS_ERR(pcie->afi_clk))
 		return PTR_ERR(pcie->afi_clk);
 
-	pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
+	pcie->pll_e = devm_clk_get(dev, "pll_e");
 	if (IS_ERR(pcie->pll_e))
 		return PTR_ERR(pcie->pll_e);
 
 	if (soc->has_cml_clk) {
-		pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
+		pcie->cml_clk = devm_clk_get(dev, "cml");
 		if (IS_ERR(pcie->cml_clk))
 			return PTR_ERR(pcie->cml_clk);
 	}
@@ -1145,15 +1150,17 @@
 
 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
 {
-	pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+	struct device *dev = pcie->dev;
+
+	pcie->pex_rst = devm_reset_control_get(dev, "pex");
 	if (IS_ERR(pcie->pex_rst))
 		return PTR_ERR(pcie->pex_rst);
 
-	pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+	pcie->afi_rst = devm_reset_control_get(dev, "afi");
 	if (IS_ERR(pcie->afi_rst))
 		return PTR_ERR(pcie->afi_rst);
 
-	pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+	pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
 	if (IS_ERR(pcie->pcie_xrst))
 		return PTR_ERR(pcie->pcie_xrst);
 
@@ -1162,18 +1169,19 @@
 
 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	int err;
 
-	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+	pcie->phy = devm_phy_optional_get(dev, "pcie");
 	if (IS_ERR(pcie->phy)) {
 		err = PTR_ERR(pcie->phy);
-		dev_err(pcie->dev, "failed to get PHY: %d\n", err);
+		dev_err(dev, "failed to get PHY: %d\n", err);
 		return err;
 	}
 
 	err = phy_init(pcie->phy);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
+		dev_err(dev, "failed to initialize PHY: %d\n", err);
 		return err;
 	}
 
@@ -1256,43 +1264,44 @@
 
 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
 {
-	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
 	struct resource *pads, *afi, *res;
 	int err;
 
 	err = tegra_pcie_clocks_get(pcie);
 	if (err) {
-		dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
+		dev_err(dev, "failed to get clocks: %d\n", err);
 		return err;
 	}
 
 	err = tegra_pcie_resets_get(pcie);
 	if (err) {
-		dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+		dev_err(dev, "failed to get resets: %d\n", err);
 		return err;
 	}
 
 	err = tegra_pcie_phys_get(pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
+		dev_err(dev, "failed to get PHYs: %d\n", err);
 		return err;
 	}
 
 	err = tegra_pcie_power_on(pcie);
 	if (err) {
-		dev_err(&pdev->dev, "failed to power up: %d\n", err);
+		dev_err(dev, "failed to power up: %d\n", err);
 		return err;
 	}
 
 	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
-	pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
+	pcie->pads = devm_ioremap_resource(dev, pads);
 	if (IS_ERR(pcie->pads)) {
 		err = PTR_ERR(pcie->pads);
 		goto poweroff;
 	}
 
 	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
-	pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
+	pcie->afi = devm_ioremap_resource(dev, afi);
 	if (IS_ERR(pcie->afi)) {
 		err = PTR_ERR(pcie->afi);
 		goto poweroff;
@@ -1305,7 +1314,7 @@
 		goto poweroff;
 	}
 
-	pcie->cs = devm_request_mem_region(pcie->dev, res->start,
+	pcie->cs = devm_request_mem_region(dev, res->start,
 					   resource_size(res), res->name);
 	if (!pcie->cs) {
 		err = -EADDRNOTAVAIL;
@@ -1315,7 +1324,7 @@
 	/* request interrupt */
 	err = platform_get_irq_byname(pdev, "intr");
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+		dev_err(dev, "failed to get IRQ: %d\n", err);
 		goto poweroff;
 	}
 
@@ -1323,7 +1332,7 @@
 
 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
 	if (err) {
-		dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
+		dev_err(dev, "failed to register IRQ: %d\n", err);
 		goto poweroff;
 	}
 
@@ -1336,6 +1345,7 @@
 
 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	int err;
 
 	if (pcie->irq > 0)
@@ -1345,7 +1355,7 @@
 
 	err = phy_exit(pcie->phy);
 	if (err < 0)
-		dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+		dev_err(dev, "failed to teardown PHY: %d\n", err);
 
 	return 0;
 }
@@ -1384,6 +1394,7 @@
 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
 {
 	struct tegra_pcie *pcie = data;
+	struct device *dev = pcie->dev;
 	struct tegra_msi *msi = &pcie->msi;
 	unsigned int i, processed = 0;
 
@@ -1403,13 +1414,13 @@
 				if (test_bit(index, msi->used))
 					generic_handle_irq(irq);
 				else
-					dev_info(pcie->dev, "unhandled MSI\n");
+					dev_info(dev, "unhandled MSI\n");
 			} else {
 				/*
 				 * that's weird who triggered this?
 				 * just clear it
 				 */
-				dev_info(pcie->dev, "unexpected MSI\n");
+				dev_info(dev, "unexpected MSI\n");
 			}
 
 			/* see if there's any more pending in this vector */
@@ -1488,7 +1499,8 @@
 
 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
 {
-	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct tegra_msi *msi = &pcie->msi;
 	unsigned long base;
@@ -1497,20 +1509,20 @@
 
 	mutex_init(&msi->lock);
 
-	msi->chip.dev = pcie->dev;
+	msi->chip.dev = dev;
 	msi->chip.setup_irq = tegra_msi_setup_irq;
 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
 
-	msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
 					    &msi_domain_ops, &msi->chip);
 	if (!msi->domain) {
-		dev_err(&pdev->dev, "failed to create IRQ domain\n");
+		dev_err(dev, "failed to create IRQ domain\n");
 		return -ENOMEM;
 	}
 
 	err = platform_get_irq_byname(pdev, "msi");
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+		dev_err(dev, "failed to get IRQ: %d\n", err);
 		goto err;
 	}
 
@@ -1519,7 +1531,7 @@
 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
 			  tegra_msi_irq_chip.name, pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		dev_err(dev, "failed to request IRQ: %d\n", err);
 		goto err;
 	}
 
@@ -1594,46 +1606,47 @@
 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
 				      u32 *xbar)
 {
-	struct device_node *np = pcie->dev->of_node;
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
 
 	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
 		switch (lanes) {
 		case 0x0000104:
-			dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+			dev_info(dev, "4x1, 1x1 configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
 			return 0;
 
 		case 0x0000102:
-			dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+			dev_info(dev, "2x1, 1x1 configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
 			return 0;
 		}
 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
 		switch (lanes) {
 		case 0x00000204:
-			dev_info(pcie->dev, "4x1, 2x1 configuration\n");
+			dev_info(dev, "4x1, 2x1 configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
 			return 0;
 
 		case 0x00020202:
-			dev_info(pcie->dev, "2x3 configuration\n");
+			dev_info(dev, "2x3 configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
 			return 0;
 
 		case 0x00010104:
-			dev_info(pcie->dev, "4x1, 1x2 configuration\n");
+			dev_info(dev, "4x1, 1x2 configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
 			return 0;
 		}
 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
 		switch (lanes) {
 		case 0x00000004:
-			dev_info(pcie->dev, "single-mode configuration\n");
+			dev_info(dev, "single-mode configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
 			return 0;
 
 		case 0x00000202:
-			dev_info(pcie->dev, "dual-mode configuration\n");
+			dev_info(dev, "dual-mode configuration\n");
 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
 			return 0;
 		}
@@ -1673,7 +1686,8 @@
  */
 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
 {
-	struct device_node *np = pcie->dev->of_node;
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
 
 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
 		pcie->num_supplies = 3;
@@ -1681,12 +1695,12 @@
 		pcie->num_supplies = 2;
 
 	if (pcie->num_supplies == 0) {
-		dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+		dev_err(dev, "device %s not supported in legacy mode\n",
 			np->full_name);
 		return -ENODEV;
 	}
 
-	pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
 				      sizeof(*pcie->supplies),
 				      GFP_KERNEL);
 	if (!pcie->supplies)
@@ -1698,8 +1712,7 @@
 	if (pcie->num_supplies > 2)
 		pcie->supplies[2].supply = "avdd";
 
-	return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
-				       pcie->supplies);
+	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
 }
 
 /*
@@ -1713,13 +1726,14 @@
  */
 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
 {
-	struct device_node *np = pcie->dev->of_node;
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
 	unsigned int i = 0;
 
 	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
 		pcie->num_supplies = 7;
 
-		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
 					      sizeof(*pcie->supplies),
 					      GFP_KERNEL);
 		if (!pcie->supplies)
@@ -1746,7 +1760,7 @@
 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
 					 (need_pexb ? 2 : 0);
 
-		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
 					      sizeof(*pcie->supplies),
 					      GFP_KERNEL);
 		if (!pcie->supplies)
@@ -1769,7 +1783,7 @@
 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
 		pcie->num_supplies = 5;
 
-		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
 					      sizeof(*pcie->supplies),
 					      GFP_KERNEL);
 		if (!pcie->supplies)
@@ -1782,9 +1796,9 @@
 		pcie->supplies[4].supply = "vddio-pex-clk";
 	}
 
-	if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
 					pcie->num_supplies))
-		return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+		return devm_regulator_bulk_get(dev, pcie->num_supplies,
 					       pcie->supplies);
 
 	/*
@@ -1792,9 +1806,9 @@
 	 * that the device tree complies with an older version of the device
 	 * tree binding.
 	 */
-	dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+	dev_info(dev, "using legacy DT binding for power supplies\n");
 
-	devm_kfree(pcie->dev, pcie->supplies);
+	devm_kfree(dev, pcie->supplies);
 	pcie->num_supplies = 0;
 
 	return tegra_pcie_get_legacy_regulators(pcie);
@@ -1802,7 +1816,8 @@
 
 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
 {
-	struct device_node *np = pcie->dev->of_node, *port;
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node, *port;
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct of_pci_range_parser parser;
 	struct of_pci_range range;
@@ -1812,7 +1827,7 @@
 	int err;
 
 	if (of_pci_range_parser_init(&parser, np)) {
-		dev_err(pcie->dev, "missing \"ranges\" property\n");
+		dev_err(dev, "missing \"ranges\" property\n");
 		return -EINVAL;
 	}
 
@@ -1867,8 +1882,7 @@
 
 	err = of_pci_parse_bus_range(np, &pcie->busn);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to parse ranges property: %d\n",
-			err);
+		dev_err(dev, "failed to parse ranges property: %d\n", err);
 		pcie->busn.name = np->name;
 		pcie->busn.start = 0;
 		pcie->busn.end = 0xff;
@@ -1883,15 +1897,14 @@
 
 		err = of_pci_get_devfn(port);
 		if (err < 0) {
-			dev_err(pcie->dev, "failed to parse address: %d\n",
-				err);
+			dev_err(dev, "failed to parse address: %d\n", err);
 			return err;
 		}
 
 		index = PCI_SLOT(err);
 
 		if (index < 1 || index > soc->num_ports) {
-			dev_err(pcie->dev, "invalid port number: %d\n", index);
+			dev_err(dev, "invalid port number: %d\n", index);
 			return -EINVAL;
 		}
 
@@ -1899,13 +1912,13 @@
 
 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
 		if (err < 0) {
-			dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
+			dev_err(dev, "failed to parse # of lanes: %d\n",
 				err);
 			return err;
 		}
 
 		if (value > 16) {
-			dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
+			dev_err(dev, "invalid # of lanes: %u\n", value);
 			return -EINVAL;
 		}
 
@@ -1919,14 +1932,13 @@
 		mask |= ((1 << value) - 1) << lane;
 		lane += value;
 
-		rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
+		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
 		if (!rp)
 			return -ENOMEM;
 
 		err = of_address_to_resource(port, 0, &rp->regs);
 		if (err < 0) {
-			dev_err(pcie->dev, "failed to parse address: %d\n",
-				err);
+			dev_err(dev, "failed to parse address: %d\n", err);
 			return err;
 		}
 
@@ -1936,7 +1948,7 @@
 		rp->pcie = pcie;
 		rp->np = port;
 
-		rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
+		rp->base = devm_ioremap_resource(dev, &rp->regs);
 		if (IS_ERR(rp->base))
 			return PTR_ERR(rp->base);
 
@@ -1945,7 +1957,7 @@
 
 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
 	if (err < 0) {
-		dev_err(pcie->dev, "invalid lane configuration\n");
+		dev_err(dev, "invalid lane configuration\n");
 		return err;
 	}
 
@@ -1964,6 +1976,7 @@
 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
 {
+	struct device *dev = port->pcie->dev;
 	unsigned int retries = 3;
 	unsigned long value;
 
@@ -1986,8 +1999,7 @@
 		} while (--timeout);
 
 		if (!timeout) {
-			dev_err(port->pcie->dev, "link %u down, retrying\n",
-				port->index);
+			dev_err(dev, "link %u down, retrying\n", port->index);
 			goto retry;
 		}
 
@@ -2011,11 +2023,12 @@
 
 static int tegra_pcie_enable(struct tegra_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	struct tegra_pcie_port *port, *tmp;
 	struct hw_pci hw;
 
 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
-		dev_info(pcie->dev, "probing port %u, using %u lanes\n",
+		dev_info(dev, "probing port %u, using %u lanes\n",
 			 port->index, port->lanes);
 
 		tegra_pcie_port_enable(port);
@@ -2023,7 +2036,7 @@
 		if (tegra_pcie_port_check_link(port))
 			continue;
 
-		dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
+		dev_info(dev, "link %u down, ignoring\n", port->index);
 
 		tegra_pcie_port_disable(port);
 		tegra_pcie_port_free(port);
@@ -2041,8 +2054,7 @@
 	hw.map_irq = tegra_pcie_map_irq;
 	hw.ops = &tegra_pcie_ops;
 
-	pci_common_init_dev(pcie->dev, &hw);
-
+	pci_common_init_dev(dev, &hw);
 	return 0;
 }
 
@@ -2204,17 +2216,18 @@
 
 static int tegra_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct tegra_pcie *pcie;
 	int err;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
-	pcie->soc = of_device_get_match_data(&pdev->dev);
+	pcie->soc = of_device_get_match_data(dev);
 	INIT_LIST_HEAD(&pcie->buses);
 	INIT_LIST_HEAD(&pcie->ports);
-	pcie->dev = &pdev->dev;
+	pcie->dev = dev;
 
 	err = tegra_pcie_parse_dt(pcie);
 	if (err < 0)
@@ -2222,7 +2235,7 @@
 
 	err = tegra_pcie_get_resources(pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+		dev_err(dev, "failed to request resources: %d\n", err);
 		return err;
 	}
 
@@ -2236,27 +2249,23 @@
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		err = tegra_pcie_enable_msi(pcie);
 		if (err < 0) {
-			dev_err(&pdev->dev,
-				"failed to enable MSI support: %d\n",
-				err);
+			dev_err(dev, "failed to enable MSI support: %d\n", err);
 			goto put_resources;
 		}
 	}
 
 	err = tegra_pcie_enable(pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
+		dev_err(dev, "failed to enable PCIe ports: %d\n", err);
 		goto disable_msi;
 	}
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
 		err = tegra_pcie_debugfs_init(pcie);
 		if (err < 0)
-			dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
-				err);
+			dev_err(dev, "failed to setup debugfs: %d\n", err);
 	}
 
-	platform_set_drvdata(pdev, pcie);
 	return 0;
 
 disable_msi:
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index a81273c..1de23d7 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -76,6 +76,16 @@
 	u32			version;
 };
 
+static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+{
+	return readl(port->csr_base + reg);
+}
+
+static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+{
+	writel(val, port->csr_base + reg);
+}
+
 static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
 {
 	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@@ -112,9 +122,9 @@
 	if (!pci_is_root_bus(bus))
 		rtdid_val = (b << 8) | (d << 3) | f;
 
-	writel(rtdid_val, port->csr_base + RTDID);
+	xgene_pcie_writel(port, RTDID, rtdid_val);
 	/* read the register back to ensure flush */
-	readl(port->csr_base + RTDID);
+	xgene_pcie_readl(port, RTDID);
 }
 
 /*
@@ -179,28 +189,28 @@
 	.write = pci_generic_config_write32,
 };
 
-static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
 				  u32 flags, u64 size)
 {
 	u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
 	u32 val32 = 0;
 	u32 val;
 
-	val32 = readl(csr_base + addr);
+	val32 = xgene_pcie_readl(port, addr);
 	val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
-	writel(val, csr_base + addr);
+	xgene_pcie_writel(port, addr, val);
 
-	val32 = readl(csr_base + addr + 0x04);
+	val32 = xgene_pcie_readl(port, addr + 0x04);
 	val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
-	writel(val, csr_base + addr + 0x04);
+	xgene_pcie_writel(port, addr + 0x04, val);
 
-	val32 = readl(csr_base + addr + 0x04);
+	val32 = xgene_pcie_readl(port, addr + 0x04);
 	val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
-	writel(val, csr_base + addr + 0x04);
+	xgene_pcie_writel(port, addr + 0x04, val);
 
-	val32 = readl(csr_base + addr + 0x08);
+	val32 = xgene_pcie_readl(port, addr + 0x08);
 	val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
-	writel(val, csr_base + addr + 0x08);
+	xgene_pcie_writel(port, addr + 0x08, val);
 
 	return mask;
 }
@@ -208,32 +218,32 @@
 static void xgene_pcie_linkup(struct xgene_pcie_port *port,
 				   u32 *lanes, u32 *speed)
 {
-	void __iomem *csr_base = port->csr_base;
 	u32 val32;
 
 	port->link_up = false;
-	val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
+	val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
 	if (val32 & LINK_UP_MASK) {
 		port->link_up = true;
 		*speed = PIPE_PHY_RATE_RD(val32);
-		val32 = readl(csr_base + BRIDGE_STATUS_0);
+		val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
 		*lanes = val32 >> 26;
 	}
 }
 
 static int xgene_pcie_init_port(struct xgene_pcie_port *port)
 {
+	struct device *dev = port->dev;
 	int rc;
 
-	port->clk = clk_get(port->dev, NULL);
+	port->clk = clk_get(dev, NULL);
 	if (IS_ERR(port->clk)) {
-		dev_err(port->dev, "clock not available\n");
+		dev_err(dev, "clock not available\n");
 		return -ENODEV;
 	}
 
 	rc = clk_prepare_enable(port->clk);
 	if (rc) {
-		dev_err(port->dev, "clock enable failed\n");
+		dev_err(dev, "clock enable failed\n");
 		return rc;
 	}
 
@@ -243,15 +253,16 @@
 static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
 			      struct platform_device *pdev)
 {
+	struct device *dev = port->dev;
 	struct resource *res;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
-	port->csr_base = devm_ioremap_resource(port->dev, res);
+	port->csr_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(port->csr_base))
 		return PTR_ERR(port->csr_base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
-	port->cfg_base = devm_ioremap_resource(port->dev, res);
+	port->cfg_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(port->cfg_base))
 		return PTR_ERR(port->cfg_base);
 	port->cfg_addr = res->start;
@@ -263,7 +274,7 @@
 				    struct resource *res, u32 offset,
 				    u64 cpu_addr, u64 pci_addr)
 {
-	void __iomem *base = port->csr_base + offset;
+	struct device *dev = port->dev;
 	resource_size_t size = resource_size(res);
 	u64 restype = resource_type(res);
 	u64 mask = 0;
@@ -280,22 +291,24 @@
 	if (size >= min_size)
 		mask = ~(size - 1) | flag;
 	else
-		dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
+		dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
 			 (u64)size, min_size);
 
-	writel(lower_32_bits(cpu_addr), base);
-	writel(upper_32_bits(cpu_addr), base + 0x04);
-	writel(lower_32_bits(mask), base + 0x08);
-	writel(upper_32_bits(mask), base + 0x0c);
-	writel(lower_32_bits(pci_addr), base + 0x10);
-	writel(upper_32_bits(pci_addr), base + 0x14);
+	xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
+	xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
+	xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
+	xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
+	xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
+	xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
 }
 
-static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
 {
-	writel(lower_32_bits(addr), csr_base + CFGBARL);
-	writel(upper_32_bits(addr), csr_base + CFGBARH);
-	writel(EN_REG, csr_base + CFGCTL);
+	u64 addr = port->cfg_addr;
+
+	xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
+	xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
+	xgene_pcie_writel(port, CFGCTL, EN_REG);
 }
 
 static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
@@ -310,7 +323,7 @@
 		struct resource *res = window->res;
 		u64 restype = resource_type(res);
 
-		dev_dbg(port->dev, "%pR\n", res);
+		dev_dbg(dev, "%pR\n", res);
 
 		switch (restype) {
 		case IORESOURCE_IO:
@@ -339,17 +352,18 @@
 			return -EINVAL;
 		}
 	}
-	xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
-
+	xgene_pcie_setup_cfg_reg(port);
 	return 0;
 }
 
-static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
+static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+				  u64 pim, u64 size)
 {
-	writel(lower_32_bits(pim), addr);
-	writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
-	writel(lower_32_bits(size), addr + 0x10);
-	writel(upper_32_bits(size), addr + 0x14);
+	xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
+	xgene_pcie_writel(port, pim_reg + 0x04,
+			  upper_32_bits(pim) | EN_COHERENCY);
+	xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
+	xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
 }
 
 /*
@@ -379,10 +393,10 @@
 static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
 				    struct of_pci_range *range, u8 *ib_reg_mask)
 {
-	void __iomem *csr_base = port->csr_base;
 	void __iomem *cfg_base = port->cfg_base;
+	struct device *dev = port->dev;
 	void *bar_addr;
-	void *pim_addr;
+	u32 pim_reg;
 	u64 cpu_addr = range->cpu_addr;
 	u64 pci_addr = range->pci_addr;
 	u64 size = range->size;
@@ -393,7 +407,7 @@
 
 	region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
 	if (region < 0) {
-		dev_warn(port->dev, "invalid pcie dma-range config\n");
+		dev_warn(dev, "invalid pcie dma-range config\n");
 		return;
 	}
 
@@ -403,29 +417,27 @@
 	bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
 	switch (region) {
 	case 0:
-		xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
+		xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
 		bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
 		writel(bar_low, bar_addr);
 		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
-		pim_addr = csr_base + PIM1_1L;
+		pim_reg = PIM1_1L;
 		break;
 	case 1:
-		bar_addr = csr_base + IBAR2;
-		writel(bar_low, bar_addr);
-		writel(lower_32_bits(mask), csr_base + IR2MSK);
-		pim_addr = csr_base + PIM2_1L;
+		xgene_pcie_writel(port, IBAR2, bar_low);
+		xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
+		pim_reg = PIM2_1L;
 		break;
 	case 2:
-		bar_addr = csr_base + IBAR3L;
-		writel(bar_low, bar_addr);
-		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
-		writel(lower_32_bits(mask), csr_base + IR3MSKL);
-		writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
-		pim_addr = csr_base + PIM3_1L;
+		xgene_pcie_writel(port, IBAR3L, bar_low);
+		xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
+		xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
+		xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
+		pim_reg = PIM3_1L;
 		break;
 	}
 
-	xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
+	xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
 }
 
 static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
@@ -463,7 +475,7 @@
 	for_each_of_pci_range(&parser, &range) {
 		u64 end = range.cpu_addr + range.size - 1;
 
-		dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+		dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
 			range.flags, range.cpu_addr, end, range.pci_addr);
 		xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
 	}
@@ -476,13 +488,14 @@
 	int i;
 
 	for (i = PIM1_1L; i <= CFGCTL; i += 4)
-		writel(0x0, port->csr_base + i);
+		xgene_pcie_writel(port, i, 0);
 }
 
 static int xgene_pcie_setup(struct xgene_pcie_port *port,
 			    struct list_head *res,
 			    resource_size_t io_base)
 {
+	struct device *dev = port->dev;
 	u32 val, lanes = 0, speed = 0;
 	int ret;
 
@@ -490,7 +503,7 @@
 
 	/* setup the vendor and device IDs correctly */
 	val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
-	writel(val, port->csr_base + BRIDGE_CFG_0);
+	xgene_pcie_writel(port, BRIDGE_CFG_0, val);
 
 	ret = xgene_pcie_map_ranges(port, res, io_base);
 	if (ret)
@@ -502,27 +515,28 @@
 
 	xgene_pcie_linkup(port, &lanes, &speed);
 	if (!port->link_up)
-		dev_info(port->dev, "(rc) link down\n");
+		dev_info(dev, "(rc) link down\n");
 	else
-		dev_info(port->dev, "(rc) x%d gen-%d link up\n",
-				lanes, speed + 1);
+		dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
 	return 0;
 }
 
 static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 {
-	struct device_node *dn = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct device_node *dn = dev->of_node;
 	struct xgene_pcie_port *port;
 	resource_size_t iobase = 0;
 	struct pci_bus *bus;
 	int ret;
 	LIST_HEAD(res);
 
-	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
 	if (!port)
 		return -ENOMEM;
-	port->node = of_node_get(pdev->dev.of_node);
-	port->dev = &pdev->dev;
+
+	port->node = of_node_get(dn);
+	port->dev = dev;
 
 	port->version = XGENE_PCIE_IP_VER_UNKN;
 	if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
@@ -540,7 +554,7 @@
 	if (ret)
 		return ret;
 
-	ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+	ret = devm_request_pci_bus_resources(dev, &res);
 	if (ret)
 		goto error;
 
@@ -548,8 +562,7 @@
 	if (ret)
 		goto error;
 
-	bus = pci_create_root_bus(&pdev->dev, 0,
-					&xgene_pcie_ops, port, &res);
+	bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
 	if (!bus) {
 		ret = -ENOMEM;
 		goto error;
@@ -558,8 +571,6 @@
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
 	pci_bus_add_devices(bus);
-
-	platform_set_drvdata(pdev, port);
 	return 0;
 
 error:
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index c24e965..b0ac4df 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,15 +55,19 @@
 #define TLP_PAYLOAD_SIZE		0x01
 #define TLP_READ_TAG			0x1d
 #define TLP_WRITE_TAG			0x10
-#define TLP_CFG_DW0(fmttype)		(((fmttype) << 24) | TLP_PAYLOAD_SIZE)
-#define TLP_CFG_DW1(reqid, tag, be)	(((reqid) << 16) | (tag << 8) | (be))
+#define RP_DEVFN			0
+#define TLP_REQ_ID(bus, devfn)		(((bus) << 8) | (devfn))
+#define TLP_CFG_DW0(pcie, bus)						\
+    ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0			\
+				    : TLP_FMTTYPE_CFGRD1) << 24) |	\
+     TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(pcie, tag, be)	\
+    (((TLP_REQ_ID(pcie->root_bus_nr,  RP_DEVFN)) << 16) | (tag << 8) | (be))
 #define TLP_CFG_DW2(bus, devfn, offset)	\
 				(((bus) << 24) | ((devfn) << 16) | (offset))
-#define TLP_REQ_ID(bus, devfn)		(((bus) << 8) | (devfn))
 #define TLP_COMP_STATUS(s)		(((s) >> 12) & 7)
 #define TLP_HDR_SIZE			3
 #define TLP_LOOP			500
-#define RP_DEVFN			0
 
 #define LINK_UP_TIMEOUT			HZ
 #define LINK_RETRAIN_TIMEOUT		HZ
@@ -74,7 +78,7 @@
 
 struct altera_pcie {
 	struct platform_device	*pdev;
-	void __iomem		*cra_base;
+	void __iomem		*cra_base;	/* DT Cra */
 	int			irq;
 	u8			root_bus_nr;
 	struct irq_domain	*irq_domain;
@@ -131,7 +135,7 @@
 	cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
 }
 
-static bool altera_pcie_valid_config(struct altera_pcie *pcie,
+static bool altera_pcie_valid_device(struct altera_pcie *pcie,
 				     struct pci_bus *bus, int dev)
 {
 	/* If there is no link, then there is no device */
@@ -218,13 +222,8 @@
 {
 	u32 headers[TLP_HDR_SIZE];
 
-	if (bus == pcie->root_bus_nr)
-		headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
-	else
-		headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
-
-	headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
-					TLP_READ_TAG, byte_en);
+	headers[0] = TLP_CFG_DW0(pcie, bus);
+	headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
 	headers[2] = TLP_CFG_DW2(bus, devfn, where);
 
 	tlp_write_packet(pcie, headers, 0, false);
@@ -238,13 +237,8 @@
 	u32 headers[TLP_HDR_SIZE];
 	int ret;
 
-	if (bus == pcie->root_bus_nr)
-		headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
-	else
-		headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
-
-	headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
-					TLP_WRITE_TAG, byte_en);
+	headers[0] = TLP_CFG_DW0(pcie, bus);
+	headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
 	headers[2] = TLP_CFG_DW2(bus, devfn, where);
 
 	/* check alignment to Qword */
@@ -342,7 +336,7 @@
 	if (altera_pcie_hide_rc_bar(bus, devfn, where))
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
-	if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+	if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
 		*value = 0xffffffff;
 		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
@@ -359,7 +353,7 @@
 	if (altera_pcie_hide_rc_bar(bus, devfn, where))
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
-	if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+	if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
@@ -394,6 +388,7 @@
 
 static void altera_wait_link_retrain(struct altera_pcie *pcie)
 {
+	struct device *dev = &pcie->pdev->dev;
 	u16 reg16;
 	unsigned long start_jiffies;
 
@@ -406,7 +401,7 @@
 			break;
 
 		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
-			dev_err(&pcie->pdev->dev, "link retrain timeout\n");
+			dev_err(dev, "link retrain timeout\n");
 			break;
 		}
 		udelay(100);
@@ -419,7 +414,7 @@
 			break;
 
 		if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
-			dev_err(&pcie->pdev->dev, "link up timeout\n");
+			dev_err(dev, "link up timeout\n");
 			break;
 		}
 		udelay(100);
@@ -460,7 +455,6 @@
 {
 	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-
 	return 0;
 }
 
@@ -472,12 +466,14 @@
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct altera_pcie *pcie;
+	struct device *dev;
 	unsigned long status;
 	u32 bit;
 	u32 virq;
 
 	chained_irq_enter(chip, desc);
 	pcie = irq_desc_get_handler_data(desc);
+	dev = &pcie->pdev->dev;
 
 	while ((status = cra_readl(pcie, P2A_INT_STATUS)
 		& P2A_INT_STS_ALL) != 0) {
@@ -489,8 +485,7 @@
 			if (virq)
 				generic_handle_irq(virq);
 			else
-				dev_err(&pcie->pdev->dev,
-					"unexpected IRQ, INT%d\n", bit);
+				dev_err(dev, "unexpected IRQ, INT%d\n", bit);
 		}
 	}
 
@@ -549,30 +544,25 @@
 
 static int altera_pcie_parse_dt(struct altera_pcie *pcie)
 {
-	struct resource *cra;
+	struct device *dev = &pcie->pdev->dev;
 	struct platform_device *pdev = pcie->pdev;
+	struct resource *cra;
 
 	cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
-	if (!cra) {
-		dev_err(&pdev->dev, "no Cra memory resource defined\n");
-		return -ENODEV;
-	}
-
-	pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
+	pcie->cra_base = devm_ioremap_resource(dev, cra);
 	if (IS_ERR(pcie->cra_base)) {
-		dev_err(&pdev->dev, "failed to map cra memory\n");
+		dev_err(dev, "failed to map cra memory\n");
 		return PTR_ERR(pcie->cra_base);
 	}
 
 	/* setup IRQ */
 	pcie->irq = platform_get_irq(pdev, 0);
 	if (pcie->irq <= 0) {
-		dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
+		dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
 		return -EINVAL;
 	}
 
 	irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
-
 	return 0;
 }
 
@@ -583,12 +573,13 @@
 
 static int altera_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct altera_pcie *pcie;
 	struct pci_bus *bus;
 	struct pci_bus *child;
 	int ret;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
@@ -596,7 +587,7 @@
 
 	ret = altera_pcie_parse_dt(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Parsing DT failed\n");
+		dev_err(dev, "Parsing DT failed\n");
 		return ret;
 	}
 
@@ -604,13 +595,13 @@
 
 	ret = altera_pcie_parse_request_of_pci_ranges(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed add resources\n");
+		dev_err(dev, "Failed add resources\n");
 		return ret;
 	}
 
 	ret = altera_pcie_init_irq_domain(pcie);
 	if (ret) {
-		dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
+		dev_err(dev, "Failed creating IRQ Domain\n");
 		return ret;
 	}
 
@@ -620,7 +611,7 @@
 	cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
 	altera_pcie_host_init(pcie);
 
-	bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
+	bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
 				pcie, &pcie->resources);
 	if (!bus)
 		return -ENOMEM;
@@ -633,8 +624,6 @@
 		pcie_bus_configure_settings(child);
 
 	pci_bus_add_devices(bus);
-
-	platform_set_drvdata(pdev, pcie);
 	return ret;
 }
 
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
index 0f4f570..0ac0f18 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -29,34 +29,33 @@
 #include "pcie-designware.h"
 
 struct armada8k_pcie {
-	void __iomem *base;
+	struct pcie_port pp;		/* pp.dbi_base is DT ctrl */
 	struct clk *clk;
-	struct pcie_port pp;
 };
 
 #define PCIE_VENDOR_REGS_OFFSET		0x8000
 
-#define PCIE_GLOBAL_CONTROL_REG		0x0
+#define PCIE_GLOBAL_CONTROL_REG		(PCIE_VENDOR_REGS_OFFSET + 0x0)
 #define PCIE_APP_LTSSM_EN		BIT(2)
 #define PCIE_DEVICE_TYPE_SHIFT		4
 #define PCIE_DEVICE_TYPE_MASK		0xF
 #define PCIE_DEVICE_TYPE_RC		0x4 /* Root complex */
 
-#define PCIE_GLOBAL_STATUS_REG		0x8
+#define PCIE_GLOBAL_STATUS_REG		(PCIE_VENDOR_REGS_OFFSET + 0x8)
 #define PCIE_GLB_STS_RDLH_LINK_UP	BIT(1)
 #define PCIE_GLB_STS_PHY_LINK_UP	BIT(9)
 
-#define PCIE_GLOBAL_INT_CAUSE1_REG	0x1C
-#define PCIE_GLOBAL_INT_MASK1_REG	0x20
+#define PCIE_GLOBAL_INT_CAUSE1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x20)
 #define PCIE_INT_A_ASSERT_MASK		BIT(9)
 #define PCIE_INT_B_ASSERT_MASK		BIT(10)
 #define PCIE_INT_C_ASSERT_MASK		BIT(11)
 #define PCIE_INT_D_ASSERT_MASK		BIT(12)
 
-#define PCIE_ARCACHE_TRC_REG		0x50
-#define PCIE_AWCACHE_TRC_REG		0x54
-#define PCIE_ARUSER_REG			0x5C
-#define PCIE_AWUSER_REG			0x60
+#define PCIE_ARCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x60)
 /*
  * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
  * allocate
@@ -72,11 +71,10 @@
 
 static int armada8k_pcie_link_up(struct pcie_port *pp)
 {
-	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
 	u32 reg;
 	u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
 
-	reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+	reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG);
 
 	if ((reg & mask) == mask)
 		return 1;
@@ -85,51 +83,50 @@
 	return 0;
 }
 
-static void armada8k_pcie_establish_link(struct pcie_port *pp)
+static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
 {
-	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
-	void __iomem *base = pcie->base;
+	struct pcie_port *pp = &pcie->pp;
 	u32 reg;
 
 	if (!dw_pcie_link_up(pp)) {
 		/* Disable LTSSM state machine to enable configuration */
-		reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+		reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
 		reg &= ~(PCIE_APP_LTSSM_EN);
-		writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+		dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
 	}
 
 	/* Set the device to root complex mode */
-	reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+	reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
 	reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
 	reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
-	writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+	dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
 
 	/* Set the PCIe master AxCache attributes */
-	writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
-	writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+	dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+	dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
 
 	/* Set the PCIe master AxDomain attributes */
-	reg = readl(base + PCIE_ARUSER_REG);
+	reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG);
 	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
 	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
-	writel(reg, base + PCIE_ARUSER_REG);
+	dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg);
 
-	reg = readl(base + PCIE_AWUSER_REG);
+	reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG);
 	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
 	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
-	writel(reg, base + PCIE_AWUSER_REG);
+	dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg);
 
 	/* Enable INT A-D interrupts */
-	reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+	reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG);
 	reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
 	       PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
-	writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+	dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg);
 
 	if (!dw_pcie_link_up(pp)) {
 		/* Configuration done. Start LTSSM */
-		reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+		reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
 		reg |= PCIE_APP_LTSSM_EN;
-		writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+		dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
 	}
 
 	/* Wait until the link becomes active again */
@@ -139,15 +136,16 @@
 
 static void armada8k_pcie_host_init(struct pcie_port *pp)
 {
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+
 	dw_pcie_setup_rc(pp);
-	armada8k_pcie_establish_link(pp);
+	armada8k_pcie_establish_link(pcie);
 }
 
 static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
-	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
-	void __iomem *base = pcie->base;
+	struct armada8k_pcie *pcie = arg;
+	struct pcie_port *pp = &pcie->pp;
 	u32 val;
 
 	/*
@@ -155,8 +153,8 @@
 	 * PCI device. However, they are also latched into the PCIe
 	 * controller, so we simply discard them.
 	 */
-	val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
-	writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+	val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG);
+	dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val);
 
 	return IRQ_HANDLED;
 }
@@ -166,9 +164,10 @@
 	.host_init = armada8k_pcie_host_init,
 };
 
-static int armada8k_add_pcie_port(struct pcie_port *pp,
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
 				  struct platform_device *pdev)
 {
+	struct pcie_port *pp = &pcie->pp;
 	struct device *dev = &pdev->dev;
 	int ret;
 
@@ -182,7 +181,7 @@
 	}
 
 	ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
-			       IRQF_SHARED, "armada8k-pcie", pp);
+			       IRQF_SHARED, "armada8k-pcie", pcie);
 	if (ret) {
 		dev_err(dev, "failed to request irq %d\n", pp->irq);
 		return ret;
@@ -217,7 +216,6 @@
 
 	pp = &pcie->pp;
 	pp->dev = dev;
-	platform_set_drvdata(pdev, pcie);
 
 	/* Get the dw-pcie unit configuration/control registers base. */
 	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
@@ -228,9 +226,7 @@
 		goto fail;
 	}
 
-	pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
-
-	ret = armada8k_add_pcie_port(pp, pdev);
+	ret = armada8k_add_pcie_port(pcie, pdev);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
index 39bf1a6..212786b 100644
--- a/drivers/pci/host/pcie-artpec6.c
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -27,9 +27,9 @@
 #define to_artpec6_pcie(x)	container_of(x, struct artpec6_pcie, pp)
 
 struct artpec6_pcie {
-	struct pcie_port	pp;
-	struct regmap		*regmap;
-	void __iomem		*phy_base;
+	struct pcie_port	pp;		/* pp.dbi_base is DT dbi */
+	struct regmap		*regmap;	/* DT axis,syscon-pcie */
+	void __iomem		*phy_base;	/* DT phy */
 };
 
 /* PCIe Port Logic registers (memory-mapped) */
@@ -65,18 +65,31 @@
 
 #define ARTPEC6_CPU_TO_BUS_ADDR		0x0fffffff
 
-static int artpec6_pcie_establish_link(struct pcie_port *pp)
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
 {
-	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+	u32 val;
+
+	regmap_read(artpec6_pcie->regmap, offset, &val);
+	return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+	regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
+{
+	struct pcie_port *pp = &artpec6_pcie->pp;
 	u32 val;
 	unsigned int retries;
 
 	/* Hold DW core in reset */
-	regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
 	val |= PCIECFG_CORE_RESET_REQ;
-	regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
 
-	regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
 	val |=  PCIECFG_RISRCREN |	/* Receiver term. 50 Ohm */
 		PCIECFG_MODE_TX_DRV_EN |
 		PCIECFG_CISRREN |	/* Reference clock term. 100 Ohm */
@@ -84,27 +97,27 @@
 	val |= PCIECFG_REFCLK_ENABLE;
 	val &= ~PCIECFG_DBG_OEN;
 	val &= ~PCIECFG_CLKREQ_B;
-	regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
 	usleep_range(5000, 6000);
 
-	regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
 	val |= NOCCFG_ENABLE_CLK_PCIE;
-	regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
 	usleep_range(20, 30);
 
-	regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
 	val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
-	regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
 	usleep_range(6000, 7000);
 
-	regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
 	val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
-	regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
 
 	retries = 50;
 	do {
 		usleep_range(1000, 2000);
-		regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+		val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
 		retries--;
 	} while (retries &&
 		(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
@@ -117,16 +130,16 @@
 	} while (retries && !(val & PHY_COSPLLLOCK));
 
 	/* Take DW core out of reset */
-	regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
 	val &= ~PCIECFG_CORE_RESET_REQ;
-	regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
 	usleep_range(100, 200);
 
 	/*
 	 * Enable writing to config regs. This is required as the Synopsys
 	 * driver changes the class code. That register needs DBI write enable.
 	 */
-	writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+	dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
 
 	pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
 	pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@@ -137,78 +150,69 @@
 	dw_pcie_setup_rc(pp);
 
 	/* assert LTSSM enable */
-	regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
 	val |= PCIECFG_LTSSM_ENABLE;
-	regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
 
 	/* check if the link is up or not */
 	if (!dw_pcie_wait_for_link(pp))
 		return 0;
 
 	dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
-		readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+		dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
 
 	return -ETIMEDOUT;
 }
 
-static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
 {
+	struct pcie_port *pp = &artpec6_pcie->pp;
+
 	if (IS_ENABLED(CONFIG_PCI_MSI))
 		dw_pcie_msi_init(pp);
 }
 
 static void artpec6_pcie_host_init(struct pcie_port *pp)
 {
-	artpec6_pcie_establish_link(pp);
-	artpec6_pcie_enable_interrupts(pp);
-}
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
 
-static int artpec6_pcie_link_up(struct pcie_port *pp)
-{
-	u32 rc;
-
-	/*
-	 * Get status from Synopsys IP
-	 * link is debug bit 36, debug register 1 starts at bit 32
-	 */
-	rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
-	if (rc)
-		return 1;
-
-	return 0;
+	artpec6_pcie_establish_link(artpec6_pcie);
+	artpec6_pcie_enable_interrupts(artpec6_pcie);
 }
 
 static struct pcie_host_ops artpec6_pcie_host_ops = {
-	.link_up = artpec6_pcie_link_up,
 	.host_init = artpec6_pcie_host_init,
 };
 
 static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
+	struct artpec6_pcie *artpec6_pcie = arg;
+	struct pcie_port *pp = &artpec6_pcie->pp;
 
 	return dw_handle_msi_irq(pp);
 }
 
-static int artpec6_add_pcie_port(struct pcie_port *pp,
+static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
 				 struct platform_device *pdev)
 {
+	struct pcie_port *pp = &artpec6_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
 		if (pp->msi_irq <= 0) {
-			dev_err(&pdev->dev, "failed to get MSI irq\n");
+			dev_err(dev, "failed to get MSI irq\n");
 			return -ENODEV;
 		}
 
-		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+		ret = devm_request_irq(dev, pp->msi_irq,
 				       artpec6_pcie_msi_handler,
 				       IRQF_SHARED | IRQF_NO_THREAD,
-				       "artpec6-pcie-msi", pp);
+				       "artpec6-pcie-msi", artpec6_pcie);
 		if (ret) {
-			dev_err(&pdev->dev, "failed to request MSI irq\n");
+			dev_err(dev, "failed to request MSI irq\n");
 			return ret;
 		}
 	}
@@ -218,7 +222,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -227,41 +231,40 @@
 
 static int artpec6_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct artpec6_pcie *artpec6_pcie;
 	struct pcie_port *pp;
 	struct resource *dbi_base;
 	struct resource *phy_base;
 	int ret;
 
-	artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
-				    GFP_KERNEL);
+	artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
 	if (!artpec6_pcie)
 		return -ENOMEM;
 
 	pp = &artpec6_pcie->pp;
-	pp->dev = &pdev->dev;
+	pp->dev = dev;
 
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
-	pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+	pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
 	if (IS_ERR(pp->dbi_base))
 		return PTR_ERR(pp->dbi_base);
 
 	phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
-	artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+	artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
 	if (IS_ERR(artpec6_pcie->phy_base))
 		return PTR_ERR(artpec6_pcie->phy_base);
 
 	artpec6_pcie->regmap =
-		syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+		syscon_regmap_lookup_by_phandle(dev->of_node,
 						"axis,syscon-pcie");
 	if (IS_ERR(artpec6_pcie->regmap))
 		return PTR_ERR(artpec6_pcie->regmap);
 
-	ret = artpec6_add_pcie_port(pp, pdev);
+	ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
 	if (ret < 0)
 		return ret;
 
-	platform_set_drvdata(pdev, artpec6_pcie);
 	return 0;
 }
 
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 17da005..537f58a 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -25,8 +25,7 @@
 #include "pcie-designware.h"
 
 struct dw_plat_pcie {
-	void __iomem		*mem_base;
-	struct pcie_port	pp;
+	struct pcie_port	pp;	/* pp.dbi_base is DT 0th resource */
 };
 
 static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@@ -52,6 +51,7 @@
 static int dw_plat_add_pcie_port(struct pcie_port *pp,
 				 struct platform_device *pdev)
 {
+	struct device *dev = pp->dev;
 	int ret;
 
 	pp->irq = platform_get_irq(pdev, 1);
@@ -63,11 +63,11 @@
 		if (pp->msi_irq < 0)
 			return pp->msi_irq;
 
-		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+		ret = devm_request_irq(dev, pp->msi_irq,
 					dw_plat_pcie_msi_irq_handler,
 					IRQF_SHARED, "dw-plat-pcie-msi", pp);
 		if (ret) {
-			dev_err(&pdev->dev, "failed to request MSI IRQ\n");
+			dev_err(dev, "failed to request MSI IRQ\n");
 			return ret;
 		}
 	}
@@ -77,7 +77,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -86,31 +86,28 @@
 
 static int dw_plat_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct dw_plat_pcie *dw_plat_pcie;
 	struct pcie_port *pp;
 	struct resource *res;  /* Resource from DT */
 	int ret;
 
-	dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
-					GFP_KERNEL);
+	dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
 	if (!dw_plat_pcie)
 		return -ENOMEM;
 
 	pp = &dw_plat_pcie->pp;
-	pp->dev = &pdev->dev;
+	pp->dev = dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dw_plat_pcie->mem_base))
-		return PTR_ERR(dw_plat_pcie->mem_base);
-
-	pp->dbi_base = dw_plat_pcie->mem_base;
+	pp->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pp->dbi_base))
+		return PTR_ERR(pp->dbi_base);
 
 	ret = dw_plat_add_pcie_port(pp, pdev);
 	if (ret < 0)
 		return ret;
 
-	platform_set_drvdata(pdev, dw_plat_pcie);
 	return 0;
 }
 
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 74da71e..035f50c 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -141,41 +141,35 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
 {
 	if (pp->ops->readl_rc)
-		return pp->ops->readl_rc(pp, pp->dbi_base + reg);
+		return pp->ops->readl_rc(pp, reg);
 
 	return readl(pp->dbi_base + reg);
 }
 
-static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
 {
 	if (pp->ops->writel_rc)
-		pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+		pp->ops->writel_rc(pp, reg, val);
 	else
 		writel(val, pp->dbi_base + reg);
 }
 
-static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
+static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
 {
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 
-	if (pp->ops->readl_rc)
-		return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
-
-	return readl(pp->dbi_base + offset + reg);
+	return dw_pcie_readl_rc(pp, offset + reg);
 }
 
-static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
-					 u32 val, u32 reg)
+static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
+				  u32 val)
 {
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 
-	if (pp->ops->writel_rc)
-		pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
-	else
-		writel(val, pp->dbi_base + offset + reg);
+	dw_pcie_writel_rc(pp, offset + reg, val);
 }
 
 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -202,35 +196,35 @@
 	u32 retries, val;
 
 	if (pp->iatu_unroll_enabled) {
-		dw_pcie_writel_unroll(pp, index,
-			lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
-		dw_pcie_writel_unroll(pp, index,
-			upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
-		dw_pcie_writel_unroll(pp, index,
-			lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
-		dw_pcie_writel_unroll(pp, index,
-			lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
-		dw_pcie_writel_unroll(pp, index,
-			upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
-		dw_pcie_writel_unroll(pp, index,
-			type, PCIE_ATU_UNR_REGION_CTRL1);
-		dw_pcie_writel_unroll(pp, index,
-			PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
+			lower_32_bits(cpu_addr));
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
+			upper_32_bits(cpu_addr));
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
+			lower_32_bits(cpu_addr + size - 1));
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
+			lower_32_bits(pci_addr));
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
+			upper_32_bits(pci_addr));
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
+			type);
+		dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
+			PCIE_ATU_ENABLE);
 	} else {
-		dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
-						PCIE_ATU_VIEWPORT);
-		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
-						PCIE_ATU_LOWER_BASE);
-		dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
-						PCIE_ATU_UPPER_BASE);
-		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
-						PCIE_ATU_LIMIT);
-		dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
-						PCIE_ATU_LOWER_TARGET);
-		dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
-						PCIE_ATU_UPPER_TARGET);
-		dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
-		dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+		dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
+				  PCIE_ATU_REGION_OUTBOUND | index);
+		dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
+				  lower_32_bits(cpu_addr));
+		dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
+				  upper_32_bits(cpu_addr));
+		dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
+				  lower_32_bits(cpu_addr + size - 1));
+		dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
+				  lower_32_bits(pci_addr));
+		dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
+				  upper_32_bits(pci_addr));
+		dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
+		dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
 	}
 
 	/*
@@ -760,8 +754,8 @@
 	return ret;
 }
 
-static int dw_pcie_valid_config(struct pcie_port *pp,
-				struct pci_bus *bus, int dev)
+static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
+				int dev)
 {
 	/* If there is no link, then there is no device */
 	if (bus->number != pp->root_bus_nr) {
@@ -781,7 +775,7 @@
 {
 	struct pcie_port *pp = bus->sysdata;
 
-	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
 		*val = 0xffffffff;
 		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
@@ -797,7 +791,7 @@
 {
 	struct pcie_port *pp = bus->sysdata;
 
-	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	if (bus->number == pp->root_bus_nr)
@@ -835,7 +829,7 @@
 		dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
 		return;
 	}
-	dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
+	dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
 
 	/* set link width speed control register */
 	val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
@@ -854,30 +848,30 @@
 		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
 		break;
 	}
-	dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
 
 	/* setup RC BARs */
-	dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
-	dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
+	dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
 
 	/* setup interrupt pins */
 	val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
 	val &= 0xffff00ff;
 	val |= 0x00000100;
-	dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
+	dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
 
 	/* setup bus numbers */
 	val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
 	val &= 0xff000000;
 	val |= 0x00010100;
-	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
+	dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
 
 	/* setup command register */
 	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
 	val &= 0xffff0000;
 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
-	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+	dw_pcie_writel_rc(pp, PCI_COMMAND, val);
 
 	/*
 	 * If the platform provides ->rd_other_conf, it means the platform
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c8e5bc6..a567ea2 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -54,9 +54,8 @@
 };
 
 struct pcie_host_ops {
-	u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
-	void (*writel_rc)(struct pcie_port *pp,
-			u32 val, void __iomem *dbi_base);
+	u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
+	void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
 	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
 	int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
 	int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
@@ -73,6 +72,8 @@
 	int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
 };
 
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
 int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
 int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 7ee9dfc..56154c2 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -22,51 +22,38 @@
 
 #include "pcie-designware.h"
 
-#define PCIE_LTSSM_LINKUP_STATE				0x11
-#define PCIE_LTSSM_STATE_MASK				0x3F
-#define PCIE_SUBCTRL_SYS_STATE4_REG			0x6818
-#define PCIE_SYS_STATE4						0x31c
-#define PCIE_HIP06_CTRL_OFF					0x1000
+#define PCIE_SUBCTRL_SYS_STATE4_REG		0x6818
+#define PCIE_HIP06_CTRL_OFF			0x1000
+#define PCIE_SYS_STATE4				(PCIE_HIP06_CTRL_OFF + 0x31c)
+#define PCIE_LTSSM_LINKUP_STATE			0x11
+#define PCIE_LTSSM_STATE_MASK			0x3F
 
 #define to_hisi_pcie(x)	container_of(x, struct hisi_pcie, pp)
 
 struct hisi_pcie;
 
 struct pcie_soc_ops {
-	int (*hisi_pcie_link_up)(struct hisi_pcie *pcie);
+	int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
 };
 
 struct hisi_pcie {
+	struct pcie_port pp;		/* pp.dbi_base is DT rc_dbi */
 	struct regmap *subctrl;
-	void __iomem *reg_base;
 	u32 port_id;
-	struct pcie_port pp;
 	struct pcie_soc_ops *soc_ops;
 };
 
-static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
-					u32 val, u32 reg)
-{
-	writel(val, pcie->reg_base + reg);
-}
-
-static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
-{
-	return readl(pcie->reg_base + reg);
-}
-
 /* HipXX PCIe host only supports 32-bit config access */
 static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
 			      u32 *val)
 {
 	u32 reg;
 	u32 reg_val;
-	struct hisi_pcie *pcie = to_hisi_pcie(pp);
 	void *walker = &reg_val;
 
 	walker += (where & 0x3);
 	reg = where & ~0x3;
-	reg_val = hisi_pcie_apb_readl(pcie, reg);
+	reg_val = dw_pcie_readl_rc(pp, reg);
 
 	if (size == 1)
 		*val = *(u8 __force *) walker;
@@ -86,21 +73,20 @@
 {
 	u32 reg_val;
 	u32 reg;
-	struct hisi_pcie *pcie = to_hisi_pcie(pp);
 	void *walker = &reg_val;
 
 	walker += (where & 0x3);
 	reg = where & ~0x3;
 	if (size == 4)
-		hisi_pcie_apb_writel(pcie, val, reg);
+		dw_pcie_writel_rc(pp, reg, val);
 	else if (size == 2) {
-		reg_val = hisi_pcie_apb_readl(pcie, reg);
+		reg_val = dw_pcie_readl_rc(pp, reg);
 		*(u16 __force *) walker = val;
-		hisi_pcie_apb_writel(pcie, reg_val, reg);
+		dw_pcie_writel_rc(pp, reg, reg_val);
 	} else if (size == 1) {
-		reg_val = hisi_pcie_apb_readl(pcie, reg);
+		reg_val = dw_pcie_readl_rc(pp, reg);
 		*(u8 __force *) walker = val;
-		hisi_pcie_apb_writel(pcie, reg_val, reg);
+		dw_pcie_writel_rc(pp, reg, reg_val);
 	} else
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
@@ -119,10 +105,10 @@
 
 static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
 {
+	struct pcie_port *pp = &hisi_pcie->pp;
 	u32 val;
 
-	val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF +
-			PCIE_SYS_STATE4);
+	val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4);
 
 	return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
 }
@@ -140,19 +126,20 @@
 	.link_up = hisi_pcie_link_up,
 };
 
-static int hisi_add_pcie_port(struct pcie_port *pp,
-				     struct platform_device *pdev)
+static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
+			      struct platform_device *pdev)
 {
+	struct pcie_port *pp = &hisi_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 	u32 port_id;
-	struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
 
-	if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
-		dev_err(&pdev->dev, "failed to read port-id\n");
+	if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
+		dev_err(dev, "failed to read port-id\n");
 		return -EINVAL;
 	}
 	if (port_id > 3) {
-		dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
+		dev_err(dev, "Invalid port-id: %d\n", port_id);
 		return -EINVAL;
 	}
 	hisi_pcie->port_id = port_id;
@@ -161,7 +148,7 @@
 
 	ret = dw_pcie_host_init(pp);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize host\n");
+		dev_err(dev, "failed to initialize host\n");
 		return ret;
 	}
 
@@ -170,6 +157,7 @@
 
 static int hisi_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct hisi_pcie *hisi_pcie;
 	struct pcie_port *pp;
 	const struct of_device_id *match;
@@ -177,40 +165,36 @@
 	struct device_driver *driver;
 	int ret;
 
-	hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
+	hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
 	if (!hisi_pcie)
 		return -ENOMEM;
 
 	pp = &hisi_pcie->pp;
-	pp->dev = &pdev->dev;
-	driver = (pdev->dev).driver;
+	pp->dev = dev;
+	driver = dev->driver;
 
-	match = of_match_device(driver->of_match_table, &pdev->dev);
+	match = of_match_device(driver->of_match_table, dev);
 	hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
 
 	hisi_pcie->subctrl =
 	syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
 	if (IS_ERR(hisi_pcie->subctrl)) {
-		dev_err(pp->dev, "cannot get subctrl base\n");
+		dev_err(dev, "cannot get subctrl base\n");
 		return PTR_ERR(hisi_pcie->subctrl);
 	}
 
 	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
-	hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
-	if (IS_ERR(hisi_pcie->reg_base)) {
-		dev_err(pp->dev, "cannot get rc_dbi base\n");
-		return PTR_ERR(hisi_pcie->reg_base);
+	pp->dbi_base = devm_ioremap_resource(dev, reg);
+	if (IS_ERR(pp->dbi_base)) {
+		dev_err(dev, "cannot get rc_dbi base\n");
+		return PTR_ERR(pp->dbi_base);
 	}
 
-	hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
-
-	ret = hisi_add_pcie_port(pp, pdev);
+	ret = hisi_add_pcie_port(hisi_pcie, pdev);
 	if (ret)
 		return ret;
 
-	platform_set_drvdata(pdev, hisi_pcie);
-
-	dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+	dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
 
 	return 0;
 }
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 0d7bee4..8ce0890 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -42,19 +42,24 @@
 
 static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
 {
+	struct device *dev = &bdev->dev;
 	struct iproc_pcie *pcie;
 	LIST_HEAD(res);
 	struct resource res_mem;
 	int ret;
 
-	pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
-	pcie->dev = &bdev->dev;
-	bcma_set_drvdata(bdev, pcie);
+	pcie->dev = dev;
 
 	pcie->base = bdev->io_addr;
+	if (!pcie->base) {
+		dev_err(dev, "no controller registers\n");
+		return -ENOMEM;
+	}
+
 	pcie->base_addr = bdev->addr;
 
 	res_mem.start = bdev->addr_s[0];
@@ -67,10 +72,11 @@
 
 	ret = iproc_pcie_setup(pcie, &res);
 	if (ret)
-		dev_err(pcie->dev, "PCIe controller setup failed\n");
+		dev_err(dev, "PCIe controller setup failed\n");
 
 	pci_free_resource_list(&res);
 
+	bcma_set_drvdata(bdev, pcie);
 	return ret;
 }
 
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 1738c52..a3de087 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -40,35 +40,35 @@
 
 static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	const struct of_device_id *of_id;
 	struct iproc_pcie *pcie;
-	struct device_node *np = pdev->dev.of_node;
+	struct device_node *np = dev->of_node;
 	struct resource reg;
 	resource_size_t iobase = 0;
 	LIST_HEAD(res);
 	int ret;
 
-	of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
+	of_id = of_match_device(iproc_pcie_of_match_table, dev);
 	if (!of_id)
 		return -EINVAL;
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
-	pcie->dev = &pdev->dev;
+	pcie->dev = dev;
 	pcie->type = (enum iproc_pcie_type)of_id->data;
-	platform_set_drvdata(pdev, pcie);
 
 	ret = of_address_to_resource(np, 0, &reg);
 	if (ret < 0) {
-		dev_err(pcie->dev, "unable to obtain controller resources\n");
+		dev_err(dev, "unable to obtain controller resources\n");
 		return ret;
 	}
 
-	pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
+	pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
 	if (!pcie->base) {
-		dev_err(pcie->dev, "unable to map controller registers\n");
+		dev_err(dev, "unable to map controller registers\n");
 		return -ENOMEM;
 	}
 	pcie->base_addr = reg.start;
@@ -79,7 +79,7 @@
 		ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
 					   &val);
 		if (ret) {
-			dev_err(pcie->dev,
+			dev_err(dev,
 				"missing brcm,pcie-ob-axi-offset property\n");
 			return ret;
 		}
@@ -88,7 +88,7 @@
 		ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
 					   &val);
 		if (ret) {
-			dev_err(pcie->dev,
+			dev_err(dev,
 				"missing brcm,pcie-ob-window-size property\n");
 			return ret;
 		}
@@ -101,7 +101,7 @@
 	}
 
 	/* PHY use is optional */
-	pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
+	pcie->phy = devm_phy_get(dev, "pcie-phy");
 	if (IS_ERR(pcie->phy)) {
 		if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
@@ -110,7 +110,7 @@
 
 	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
 	if (ret) {
-		dev_err(pcie->dev,
+		dev_err(dev,
 			"unable to get PCI host bridge resources\n");
 		return ret;
 	}
@@ -119,10 +119,11 @@
 
 	ret = iproc_pcie_setup(pcie, &res);
 	if (ret)
-		dev_err(pcie->dev, "PCIe controller setup failed\n");
+		dev_err(dev, "PCIe controller setup failed\n");
 
 	pci_free_resource_list(&res);
 
+	platform_set_drvdata(pdev, pcie);
 	return ret;
 }
 
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index e167b2f..0b999a9 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -63,6 +63,8 @@
 #define OARR_SIZE_CFG_SHIFT          1
 #define OARR_SIZE_CFG                BIT(OARR_SIZE_CFG_SHIFT)
 
+#define PCI_EXP_CAP			0xac
+
 #define MAX_NUM_OB_WINDOWS           2
 
 #define IPROC_PCIE_REG_INVALID 0xffff
@@ -258,9 +260,10 @@
 
 static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
 {
+	struct device *dev = pcie->dev;
 	u8 hdr_type;
 	u32 link_ctrl, class, val;
-	u16 pos, link_status;
+	u16 pos = PCI_EXP_CAP, link_status;
 	bool link_is_active = false;
 
 	/*
@@ -272,14 +275,14 @@
 
 	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
 	if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
-		dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
+		dev_err(dev, "PHY or data link is INACTIVE!\n");
 		return -ENODEV;
 	}
 
 	/* make sure we are not in EP mode */
 	pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
 	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
-		dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
+		dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
 		return -EFAULT;
 	}
 
@@ -293,30 +296,27 @@
 	pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
 
 	/* check link status to see if link is active */
-	pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
 	pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
 	if (link_status & PCI_EXP_LNKSTA_NLW)
 		link_is_active = true;
 
 	if (!link_is_active) {
 		/* try GEN 1 link speed */
-#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
 #define PCI_TARGET_LINK_SPEED_MASK    0xf
 #define PCI_TARGET_LINK_SPEED_GEN2    0x2
 #define PCI_TARGET_LINK_SPEED_GEN1    0x1
 		pci_bus_read_config_dword(bus, 0,
-					  PCI_LINK_STATUS_CTRL_2_OFFSET,
+					  pos + PCI_EXP_LNKCTL2,
 					  &link_ctrl);
 		if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
 		    PCI_TARGET_LINK_SPEED_GEN2) {
 			link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
 			link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
 			pci_bus_write_config_dword(bus, 0,
-					   PCI_LINK_STATUS_CTRL_2_OFFSET,
+					   pos + PCI_EXP_LNKCTL2,
 					   link_ctrl);
 			msleep(100);
 
-			pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
 			pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
 						 &link_status);
 			if (link_status & PCI_EXP_LNKSTA_NLW)
@@ -324,7 +324,7 @@
 		}
 	}
 
-	dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+	dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
 
 	return link_is_active ? 0 : -ENODEV;
 }
@@ -349,12 +349,13 @@
 			       u64 pci_addr, resource_size_t size)
 {
 	struct iproc_pcie_ob *ob = &pcie->ob;
+	struct device *dev = pcie->dev;
 	unsigned i;
 	u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
 	u64 remainder;
 
 	if (size > max_size) {
-		dev_err(pcie->dev,
+		dev_err(dev,
 			"res size %pap exceeds max supported size 0x%llx\n",
 			&size, max_size);
 		return -EINVAL;
@@ -362,15 +363,14 @@
 
 	div64_u64_rem(size, ob->window_size, &remainder);
 	if (remainder) {
-		dev_err(pcie->dev,
+		dev_err(dev,
 			"res size %pap needs to be multiple of window size %pap\n",
 			&size, &ob->window_size);
 		return -EINVAL;
 	}
 
 	if (axi_addr < ob->axi_offset) {
-		dev_err(pcie->dev,
-			"axi address %pap less than offset %pap\n",
+		dev_err(dev, "axi address %pap less than offset %pap\n",
 			&axi_addr, &ob->axi_offset);
 		return -EINVAL;
 	}
@@ -406,6 +406,7 @@
 static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
 				 struct list_head *resources)
 {
+	struct device *dev = pcie->dev;
 	struct resource_entry *window;
 	int ret;
 
@@ -425,7 +426,7 @@
 				return ret;
 			break;
 		default:
-			dev_err(pcie->dev, "invalid resource %pR\n", res);
+			dev_err(dev, "invalid resource %pR\n", res);
 			return -EINVAL;
 		}
 	}
@@ -455,26 +456,25 @@
 
 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 {
+	struct device *dev;
 	int ret;
 	void *sysdata;
 	struct pci_bus *bus;
 
-	if (!pcie || !pcie->dev || !pcie->base)
-		return -EINVAL;
-
-	ret = devm_request_pci_bus_resources(pcie->dev, res);
+	dev = pcie->dev;
+	ret = devm_request_pci_bus_resources(dev, res);
 	if (ret)
 		return ret;
 
 	ret = phy_init(pcie->phy);
 	if (ret) {
-		dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+		dev_err(dev, "unable to initialize PCIe PHY\n");
 		return ret;
 	}
 
 	ret = phy_power_on(pcie->phy);
 	if (ret) {
-		dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+		dev_err(dev, "unable to power on PCIe PHY\n");
 		goto err_exit_phy;
 	}
 
@@ -486,7 +486,7 @@
 		pcie->reg_offsets = iproc_pcie_reg_paxc;
 		break;
 	default:
-		dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
+		dev_err(dev, "incompatible iProc PCIe interface\n");
 		ret = -EINVAL;
 		goto err_power_off_phy;
 	}
@@ -496,7 +496,7 @@
 	if (pcie->need_ob_cfg) {
 		ret = iproc_pcie_map_ranges(pcie, res);
 		if (ret) {
-			dev_err(pcie->dev, "map failed\n");
+			dev_err(dev, "map failed\n");
 			goto err_power_off_phy;
 		}
 	}
@@ -508,9 +508,9 @@
 	sysdata = pcie;
 #endif
 
-	bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
+	bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
 	if (!bus) {
-		dev_err(pcie->dev, "unable to create PCI root bus\n");
+		dev_err(dev, "unable to create PCI root bus\n");
 		ret = -ENOMEM;
 		goto err_power_off_phy;
 	}
@@ -518,7 +518,7 @@
 
 	ret = iproc_pcie_check_link(pcie, bus);
 	if (ret) {
-		dev_err(pcie->dev, "no PCIe EP device detected\n");
+		dev_err(dev, "no PCIe EP device detected\n");
 		goto err_rm_root_bus;
 	}
 
@@ -526,7 +526,7 @@
 
 	if (IS_ENABLED(CONFIG_PCI_MSI))
 		if (iproc_pcie_msi_enable(pcie))
-			dev_info(pcie->dev, "not using iProc MSI\n");
+			dev_info(dev, "not using iProc MSI\n");
 
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 5ec2d44..ef0a84c 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -86,12 +86,10 @@
 };
 
 struct qcom_pcie {
-	struct pcie_port pp;
-	struct device *dev;
+	struct pcie_port pp;			/* pp.dbi_base is DT dbi */
+	void __iomem *parf;			/* DT parf */
+	void __iomem *elbi;			/* DT elbi */
 	union qcom_pcie_resources res;
-	void __iomem *parf;
-	void __iomem *dbi;
-	void __iomem *elbi;
 	struct phy *phy;
 	struct gpio_desc *reset;
 	struct qcom_pcie_ops *ops;
@@ -136,7 +134,7 @@
 static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
-	struct device *dev = pcie->dev;
+	struct device *dev = pcie->pp.dev;
 
 	res->vdda = devm_regulator_get(dev, "vdda");
 	if (IS_ERR(res->vdda))
@@ -188,7 +186,7 @@
 static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
-	struct device *dev = pcie->dev;
+	struct device *dev = pcie->pp.dev;
 
 	res->vdda = devm_regulator_get(dev, "vdda");
 	if (IS_ERR(res->vdda))
@@ -237,7 +235,7 @@
 static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
-	struct device *dev = pcie->dev;
+	struct device *dev = pcie->pp.dev;
 	u32 val;
 	int ret;
 
@@ -359,7 +357,7 @@
 static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
 {
 	struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
-	struct device *dev = pcie->dev;
+	struct device *dev = pcie->pp.dev;
 	int ret;
 
 	ret = reset_control_deassert(res->core);
@@ -426,7 +424,7 @@
 static int qcom_pcie_link_up(struct pcie_port *pp)
 {
 	struct qcom_pcie *pcie = to_qcom_pcie(pp);
-	u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
+	u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
 
 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
 }
@@ -509,8 +507,8 @@
 	if (!pcie)
 		return -ENOMEM;
 
+	pp = &pcie->pp;
 	pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
-	pcie->dev = dev;
 
 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
 	if (IS_ERR(pcie->reset))
@@ -522,9 +520,9 @@
 		return PTR_ERR(pcie->parf);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
-	pcie->dbi = devm_ioremap_resource(dev, res);
-	if (IS_ERR(pcie->dbi))
-		return PTR_ERR(pcie->dbi);
+	pp->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pp->dbi_base))
+		return PTR_ERR(pp->dbi_base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
 	pcie->elbi = devm_ioremap_resource(dev, res);
@@ -539,9 +537,7 @@
 	if (ret)
 		return ret;
 
-	pp = &pcie->pp;
 	pp->dev = dev;
-	pp->dbi_base = pcie->dbi;
 	pp->root_bus_nr = -1;
 	pp->ops = &qcom_pcie_dw_ops;
 
@@ -569,8 +565,6 @@
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, pcie);
-
 	return 0;
 }
 
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index e06b1d3..62700d1 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -31,8 +31,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
-#define DRV_NAME "rcar-pcie"
-
 #define PCIECAR			0x000010
 #define PCIECCTLR		0x000018
 #define  CONFIG_SEND_ENABLE	(1 << 31)
@@ -397,6 +395,7 @@
 
 static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	unsigned int timeout = 1000;
 	u32 macsr;
 
@@ -404,7 +403,7 @@
 		return;
 
 	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
-		dev_err(pcie->dev, "Speed change already in progress\n");
+		dev_err(dev, "Speed change already in progress\n");
 		return;
 	}
 
@@ -433,7 +432,7 @@
 			rcar_pci_write_reg(pcie, macsr, MACSR);
 
 			if (macsr & SPCHGFAIL)
-				dev_err(pcie->dev, "Speed change failed\n");
+				dev_err(dev, "Speed change failed\n");
 
 			goto done;
 		}
@@ -441,15 +440,16 @@
 		msleep(1);
 	};
 
-	dev_err(pcie->dev, "Speed change timed out\n");
+	dev_err(dev, "Speed change timed out\n");
 
 done:
-	dev_info(pcie->dev, "Current link speed is %s GT/s\n",
+	dev_info(dev, "Current link speed is %s GT/s\n",
 		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
 }
 
 static int rcar_pcie_enable(struct rcar_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	struct pci_bus *bus, *child;
 	LIST_HEAD(res);
 
@@ -461,14 +461,14 @@
 	pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
 	if (IS_ENABLED(CONFIG_PCI_MSI))
-		bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
+		bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
 				&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
 	else
-		bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
+		bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
 				&rcar_pcie_ops, pcie, &res);
 
 	if (!bus) {
-		dev_err(pcie->dev, "Scanning rootbus failed");
+		dev_err(dev, "Scanning rootbus failed");
 		return -ENODEV;
 	}
 
@@ -487,6 +487,7 @@
 
 static int phy_wait_for_ack(struct rcar_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	unsigned int timeout = 100;
 
 	while (timeout--) {
@@ -496,7 +497,7 @@
 		udelay(100);
 	}
 
-	dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+	dev_err(dev, "Access to PCIe phy timed out\n");
 
 	return -ETIMEDOUT;
 }
@@ -697,6 +698,7 @@
 {
 	struct rcar_pcie *pcie = data;
 	struct rcar_msi *msi = &pcie->msi;
+	struct device *dev = pcie->dev;
 	unsigned long reg;
 
 	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
@@ -717,10 +719,10 @@
 			if (test_bit(index, msi->used))
 				generic_handle_irq(irq);
 			else
-				dev_info(pcie->dev, "unhandled MSI\n");
+				dev_info(dev, "unhandled MSI\n");
 		} else {
 			/* Unknown MSI, just clear it */
-			dev_dbg(pcie->dev, "unexpected MSI\n");
+			dev_dbg(dev, "unexpected MSI\n");
 		}
 
 		/* see if there's any more pending in this vector */
@@ -843,22 +845,22 @@
 
 static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
 {
-	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct device *dev = pcie->dev;
 	struct rcar_msi *msi = &pcie->msi;
 	unsigned long base;
 	int err, i;
 
 	mutex_init(&msi->lock);
 
-	msi->chip.dev = pcie->dev;
+	msi->chip.dev = dev;
 	msi->chip.setup_irq = rcar_msi_setup_irq;
 	msi->chip.setup_irqs = rcar_msi_setup_irqs;
 	msi->chip.teardown_irq = rcar_msi_teardown_irq;
 
-	msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
 					    &msi_domain_ops, &msi->chip);
 	if (!msi->domain) {
-		dev_err(&pdev->dev, "failed to create IRQ domain\n");
+		dev_err(dev, "failed to create IRQ domain\n");
 		return -ENOMEM;
 	}
 
@@ -866,19 +868,19 @@
 		irq_create_mapping(msi->domain, i);
 
 	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
-	err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
 			       IRQF_SHARED | IRQF_NO_THREAD,
 			       rcar_msi_irq_chip.name, pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		dev_err(dev, "failed to request IRQ: %d\n", err);
 		goto err;
 	}
 
-	err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
 			       IRQF_SHARED | IRQF_NO_THREAD,
 			       rcar_msi_irq_chip.name, pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		dev_err(dev, "failed to request IRQ: %d\n", err);
 		goto err;
 	}
 
@@ -899,32 +901,32 @@
 	return err;
 }
 
-static int rcar_pcie_get_resources(struct platform_device *pdev,
-				   struct rcar_pcie *pcie)
+static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	struct resource res;
 	int err, i;
 
-	err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+	err = of_address_to_resource(dev->of_node, 0, &res);
 	if (err)
 		return err;
 
-	pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+	pcie->base = devm_ioremap_resource(dev, &res);
 	if (IS_ERR(pcie->base))
 		return PTR_ERR(pcie->base);
 
-	pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+	pcie->clk = devm_clk_get(dev, "pcie");
 	if (IS_ERR(pcie->clk)) {
-		dev_err(pcie->dev, "cannot get platform clock\n");
+		dev_err(dev, "cannot get platform clock\n");
 		return PTR_ERR(pcie->clk);
 	}
 	err = clk_prepare_enable(pcie->clk);
 	if (err)
 		return err;
 
-	pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+	pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
 	if (IS_ERR(pcie->bus_clk)) {
-		dev_err(pcie->dev, "cannot get pcie bus clock\n");
+		dev_err(dev, "cannot get pcie bus clock\n");
 		err = PTR_ERR(pcie->bus_clk);
 		goto fail_clk;
 	}
@@ -932,17 +934,17 @@
 	if (err)
 		goto fail_clk;
 
-	i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	i = irq_of_parse_and_map(dev->of_node, 0);
 	if (!i) {
-		dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 		err = -ENOENT;
 		goto err_map_reg;
 	}
 	pcie->msi.irq1 = i;
 
-	i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+	i = irq_of_parse_and_map(dev->of_node, 1);
 	if (!i) {
-		dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+		dev_err(dev, "cannot get platform resources for msi interrupt\n");
 		err = -ENOENT;
 		goto err_map_reg;
 	}
@@ -1119,60 +1121,60 @@
 
 static int rcar_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct rcar_pcie *pcie;
 	unsigned int data;
 	const struct of_device_id *of_id;
 	int err;
 	int (*hw_init_fn)(struct rcar_pcie *);
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
-	pcie->dev = &pdev->dev;
-	platform_set_drvdata(pdev, pcie);
+	pcie->dev = dev;
 
 	INIT_LIST_HEAD(&pcie->resources);
 
 	rcar_pcie_parse_request_of_pci_ranges(pcie);
 
-	err = rcar_pcie_get_resources(pdev, pcie);
+	err = rcar_pcie_get_resources(pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+		dev_err(dev, "failed to request resources: %d\n", err);
 		return err;
 	}
 
-	err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+	err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
 	if (err)
 		return err;
 
-	of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+	of_id = of_match_device(rcar_pcie_of_match, dev);
 	if (!of_id || !of_id->data)
 		return -EINVAL;
 	hw_init_fn = of_id->data;
 
-	pm_runtime_enable(pcie->dev);
-	err = pm_runtime_get_sync(pcie->dev);
+	pm_runtime_enable(dev);
+	err = pm_runtime_get_sync(dev);
 	if (err < 0) {
-		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+		dev_err(dev, "pm_runtime_get_sync failed\n");
 		goto err_pm_disable;
 	}
 
 	/* Failure to get a link might just be that no cards are inserted */
 	err = hw_init_fn(pcie);
 	if (err) {
-		dev_info(&pdev->dev, "PCIe link down\n");
+		dev_info(dev, "PCIe link down\n");
 		err = 0;
 		goto err_pm_put;
 	}
 
 	data = rcar_pci_read_reg(pcie, MACSR);
-	dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		err = rcar_pcie_enable_msi(pcie);
 		if (err < 0) {
-			dev_err(&pdev->dev,
+			dev_err(dev,
 				"failed to enable MSI support: %d\n",
 				err);
 			goto err_pm_put;
@@ -1186,16 +1188,16 @@
 	return 0;
 
 err_pm_put:
-	pm_runtime_put(pcie->dev);
+	pm_runtime_put(dev);
 
 err_pm_disable:
-	pm_runtime_disable(pcie->dev);
+	pm_runtime_disable(dev);
 	return err;
 }
 
 static struct platform_driver rcar_pcie_driver = {
 	.driver = {
-		.name = DRV_NAME,
+		.name = "rcar-pcie",
 		.of_match_table = rcar_pcie_of_match,
 		.suppress_bind_attrs = true,
 	},
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index b8c82fc..e0b22da 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -972,7 +972,7 @@
 		return -EINVAL;
 	if (region_no == 0) {
 		if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
-		return -EINVAL;
+			return -EINVAL;
 	}
 	if (region_no != 0) {
 		if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
@@ -1091,8 +1091,6 @@
 	if (err)
 		goto err_vpcie;
 
-	platform_set_drvdata(pdev, rockchip);
-
 	rockchip_pcie_enable_interrupts(rockchip);
 
 	err = rockchip_pcie_init_irq_domain(rockchip);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 09aed85..3cf197b 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -25,10 +25,10 @@
 #include "pcie-designware.h"
 
 struct spear13xx_pcie {
+	struct pcie_port	pp;		/* DT dbi is pp.dbi_base */
 	void __iomem		*app_base;
 	struct phy		*phy;
 	struct clk		*clk;
-	struct pcie_port	pp;
 	bool			is_gen1;
 };
 
@@ -57,96 +57,26 @@
 };
 
 /* CR0 ID */
-#define RX_LANE_FLIP_EN_ID			0
-#define TX_LANE_FLIP_EN_ID			1
-#define SYS_AUX_PWR_DET_ID			2
 #define APP_LTSSM_ENABLE_ID			3
-#define SYS_ATTEN_BUTTON_PRESSED_ID		4
-#define SYS_MRL_SENSOR_STATE_ID			5
-#define SYS_PWR_FAULT_DET_ID			6
-#define SYS_MRL_SENSOR_CHGED_ID			7
-#define SYS_PRE_DET_CHGED_ID			8
-#define SYS_CMD_CPLED_INT_ID			9
-#define APP_INIT_RST_0_ID			11
-#define APP_REQ_ENTR_L1_ID			12
-#define APP_READY_ENTR_L23_ID			13
-#define APP_REQ_EXIT_L1_ID			14
-#define DEVICE_TYPE_EP				(0 << 25)
-#define DEVICE_TYPE_LEP				(1 << 25)
 #define DEVICE_TYPE_RC				(4 << 25)
-#define SYS_INT_ID				29
 #define MISCTRL_EN_ID				30
 #define REG_TRANSLATION_ENABLE			31
 
-/* CR1 ID */
-#define APPS_PM_XMT_TURNOFF_ID			2
-#define APPS_PM_XMT_PME_ID			5
-
 /* CR3 ID */
-#define XMLH_LTSSM_STATE_DETECT_QUIET		0x00
-#define XMLH_LTSSM_STATE_DETECT_ACT		0x01
-#define XMLH_LTSSM_STATE_POLL_ACTIVE		0x02
-#define XMLH_LTSSM_STATE_POLL_COMPLIANCE	0x03
-#define XMLH_LTSSM_STATE_POLL_CONFIG		0x04
-#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET	0x05
-#define XMLH_LTSSM_STATE_DETECT_WAIT		0x06
-#define XMLH_LTSSM_STATE_CFG_LINKWD_START	0x07
-#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT	0x08
-#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT	0x09
-#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT	0x0A
-#define XMLH_LTSSM_STATE_CFG_COMPLETE		0x0B
-#define XMLH_LTSSM_STATE_CFG_IDLE		0x0C
-#define XMLH_LTSSM_STATE_RCVRY_LOCK		0x0D
-#define XMLH_LTSSM_STATE_RCVRY_SPEED		0x0E
-#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG		0x0F
-#define XMLH_LTSSM_STATE_RCVRY_IDLE		0x10
-#define XMLH_LTSSM_STATE_L0			0x11
-#define XMLH_LTSSM_STATE_L0S			0x12
-#define XMLH_LTSSM_STATE_L123_SEND_EIDLE	0x13
-#define XMLH_LTSSM_STATE_L1_IDLE		0x14
-#define XMLH_LTSSM_STATE_L2_IDLE		0x15
-#define XMLH_LTSSM_STATE_L2_WAKE		0x16
-#define XMLH_LTSSM_STATE_DISABLED_ENTRY		0x17
-#define XMLH_LTSSM_STATE_DISABLED_IDLE		0x18
-#define XMLH_LTSSM_STATE_DISABLED		0x19
-#define XMLH_LTSSM_STATE_LPBK_ENTRY		0x1A
-#define XMLH_LTSSM_STATE_LPBK_ACTIVE		0x1B
-#define XMLH_LTSSM_STATE_LPBK_EXIT		0x1C
-#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT	0x1D
-#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY	0x1E
-#define XMLH_LTSSM_STATE_HOT_RESET		0x1F
-#define XMLH_LTSSM_STATE_MASK			0x3F
 #define XMLH_LINK_UP				(1 << 6)
 
-/* CR4 ID */
-#define CFG_MSI_EN_ID				18
-
 /* CR6 */
-#define INTA_CTRL_INT				(1 << 7)
-#define INTB_CTRL_INT				(1 << 8)
-#define INTC_CTRL_INT				(1 << 9)
-#define INTD_CTRL_INT				(1 << 10)
 #define MSI_CTRL_INT				(1 << 26)
 
-/* CR19 ID */
-#define VEN_MSI_REQ_ID				11
-#define VEN_MSI_FUN_NUM_ID			8
-#define VEN_MSI_TC_ID				5
-#define VEN_MSI_VECTOR_ID			0
-#define VEN_MSI_REQ_EN		((u32)0x1 << VEN_MSI_REQ_ID)
-#define VEN_MSI_FUN_NUM_MASK	((u32)0x7 << VEN_MSI_FUN_NUM_ID)
-#define VEN_MSI_TC_MASK		((u32)0x7 << VEN_MSI_TC_ID)
-#define VEN_MSI_VECTOR_MASK	((u32)0x1F << VEN_MSI_VECTOR_ID)
-
 #define EXP_CAP_ID_OFFSET			0x70
 
 #define to_spear13xx_pcie(x)	container_of(x, struct spear13xx_pcie, pp)
 
-static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
 {
-	u32 val;
-	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+	struct pcie_port *pp = &spear13xx_pcie->pp;
 	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	u32 val;
 	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
 
 	if (dw_pcie_link_up(pp)) {
@@ -203,9 +133,9 @@
 
 static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
 {
-	struct pcie_port *pp = arg;
-	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+	struct spear13xx_pcie *spear13xx_pcie = arg;
 	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	struct pcie_port *pp = &spear13xx_pcie->pp;
 	unsigned int status;
 
 	status = readl(&app_reg->int_sts);
@@ -220,9 +150,9 @@
 	return IRQ_HANDLED;
 }
 
-static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
 {
-	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+	struct pcie_port *pp = &spear13xx_pcie->pp;
 	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
 
 	/* Enable MSI interrupt */
@@ -246,8 +176,10 @@
 
 static void spear13xx_pcie_host_init(struct pcie_port *pp)
 {
-	spear13xx_pcie_establish_link(pp);
-	spear13xx_pcie_enable_interrupts(pp);
+	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+
+	spear13xx_pcie_establish_link(spear13xx_pcie);
+	spear13xx_pcie_enable_interrupts(spear13xx_pcie);
 }
 
 static struct pcie_host_ops spear13xx_pcie_host_ops = {
@@ -255,10 +187,11 @@
 	.host_init = spear13xx_pcie_host_init,
 };
 
-static int spear13xx_add_pcie_port(struct pcie_port *pp,
-					 struct platform_device *pdev)
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+				   struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
+	struct pcie_port *pp = &spear13xx_pcie->pp;
+	struct device *dev = pp->dev;
 	int ret;
 
 	pp->irq = platform_get_irq(pdev, 0);
@@ -268,7 +201,7 @@
 	}
 	ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
 			       IRQF_SHARED | IRQF_NO_THREAD,
-			       "spear1340-pcie", pp);
+			       "spear1340-pcie", spear13xx_pcie);
 	if (ret) {
 		dev_err(dev, "failed to request irq %d\n", pp->irq);
 		return ret;
@@ -288,10 +221,10 @@
 
 static int spear13xx_pcie_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
 	struct spear13xx_pcie *spear13xx_pcie;
 	struct pcie_port *pp;
-	struct device *dev = &pdev->dev;
-	struct device_node *np = pdev->dev.of_node;
+	struct device_node *np = dev->of_node;
 	struct resource *dbi_base;
 	int ret;
 
@@ -323,7 +256,6 @@
 	}
 
 	pp = &spear13xx_pcie->pp;
-
 	pp->dev = dev;
 
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
@@ -338,7 +270,7 @@
 	if (of_property_read_bool(np, "st,pcie-is-gen1"))
 		spear13xx_pcie->is_gen1 = true;
 
-	ret = spear13xx_add_pcie_port(pp, pdev);
+	ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
 	if (ret < 0)
 		goto fail_clk;
 
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 67eae41..43eaa4a 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -212,6 +212,7 @@
 
 static int nwl_wait_for_link(struct nwl_pcie *pcie)
 {
+	struct device *dev = pcie->dev;
 	int retries;
 
 	/* check if the link is up or not */
@@ -221,7 +222,7 @@
 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 	}
 
-	dev_err(pcie->dev, "PHY link never came up\n");
+	dev_err(dev, "PHY link never came up\n");
 	return -ETIMEDOUT;
 }
 
@@ -277,6 +278,7 @@
 static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
 {
 	struct nwl_pcie *pcie = data;
+	struct device *dev = pcie->dev;
 	u32 misc_stat;
 
 	/* Checking for misc interrupts */
@@ -286,45 +288,43 @@
 		return IRQ_NONE;
 
 	if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
-		dev_err(pcie->dev, "Received Message FIFO Overflow\n");
+		dev_err(dev, "Received Message FIFO Overflow\n");
 
 	if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
-		dev_err(pcie->dev, "Slave error\n");
+		dev_err(dev, "Slave error\n");
 
 	if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
-		dev_err(pcie->dev, "Master error\n");
+		dev_err(dev, "Master error\n");
 
 	if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
-		dev_err(pcie->dev,
-			"In Misc Ingress address translation error\n");
+		dev_err(dev, "In Misc Ingress address translation error\n");
 
 	if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
-		dev_err(pcie->dev,
-			"In Misc Egress address translation error\n");
+		dev_err(dev, "In Misc Egress address translation error\n");
 
 	if (misc_stat & MSGF_MISC_SR_FATAL_AER)
-		dev_err(pcie->dev, "Fatal Error in AER Capability\n");
+		dev_err(dev, "Fatal Error in AER Capability\n");
 
 	if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
-		dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
+		dev_err(dev, "Non-Fatal Error in AER Capability\n");
 
 	if (misc_stat & MSGF_MISC_SR_CORR_AER)
-		dev_err(pcie->dev, "Correctable Error in AER Capability\n");
+		dev_err(dev, "Correctable Error in AER Capability\n");
 
 	if (misc_stat & MSGF_MISC_SR_UR_DETECT)
-		dev_err(pcie->dev, "Unsupported request Detected\n");
+		dev_err(dev, "Unsupported request Detected\n");
 
 	if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
-		dev_err(pcie->dev, "Non-Fatal Error Detected\n");
+		dev_err(dev, "Non-Fatal Error Detected\n");
 
 	if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
-		dev_err(pcie->dev, "Fatal Error Detected\n");
+		dev_err(dev, "Fatal Error Detected\n");
 
 	if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
-		dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
+		dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
 
 	if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
-		dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
+		dev_info(dev, "Link Bandwidth Management Status bit set\n");
 
 	/* Clear misc interrupt status */
 	nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
@@ -494,20 +494,21 @@
 static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
 {
 #ifdef CONFIG_PCI_MSI
-	struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
+	struct device *dev = pcie->dev;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
 	struct nwl_msi *msi = &pcie->msi;
 
 	msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
 						&dev_msi_domain_ops, pcie);
 	if (!msi->dev_domain) {
-		dev_err(pcie->dev, "failed to create dev IRQ domain\n");
+		dev_err(dev, "failed to create dev IRQ domain\n");
 		return -ENOMEM;
 	}
 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
 						    &nwl_msi_domain_info,
 						    msi->dev_domain);
 	if (!msi->msi_domain) {
-		dev_err(pcie->dev, "failed to create msi IRQ domain\n");
+		dev_err(dev, "failed to create msi IRQ domain\n");
 		irq_domain_remove(msi->dev_domain);
 		return -ENOMEM;
 	}
@@ -517,12 +518,13 @@
 
 static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 {
-	struct device_node *node = pcie->dev->of_node;
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node;
 	struct device_node *legacy_intc_node;
 
 	legacy_intc_node = of_get_next_child(node, NULL);
 	if (!legacy_intc_node) {
-		dev_err(pcie->dev, "No legacy intc node found\n");
+		dev_err(dev, "No legacy intc node found\n");
 		return -EINVAL;
 	}
 
@@ -532,7 +534,7 @@
 							pcie);
 
 	if (!pcie->legacy_irq_domain) {
-		dev_err(pcie->dev, "failed to create IRQ domain\n");
+		dev_err(dev, "failed to create IRQ domain\n");
 		return -ENOMEM;
 	}
 
@@ -542,7 +544,8 @@
 
 static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
 {
-	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
 	struct nwl_msi *msi = &pcie->msi;
 	unsigned long base;
 	int ret;
@@ -557,7 +560,7 @@
 	/* Get msi_1 IRQ number */
 	msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
 	if (msi->irq_msi1 < 0) {
-		dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
+		dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
 		ret = -EINVAL;
 		goto err;
 	}
@@ -568,7 +571,7 @@
 	/* Get msi_0 IRQ number */
 	msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
 	if (msi->irq_msi0 < 0) {
-		dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
+		dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
 		ret = -EINVAL;
 		goto err;
 	}
@@ -579,7 +582,7 @@
 	/* Check for msii_present bit */
 	ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
 	if (!ret) {
-		dev_err(pcie->dev, "MSI not present\n");
+		dev_err(dev, "MSI not present\n");
 		ret = -EIO;
 		goto err;
 	}
@@ -628,13 +631,14 @@
 
 static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
 {
-	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
 	u32 breg_val, ecam_val, first_busno = 0;
 	int err;
 
 	breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
 	if (!breg_val) {
-		dev_err(pcie->dev, "BREG is not present\n");
+		dev_err(dev, "BREG is not present\n");
 		return breg_val;
 	}
 
@@ -665,7 +669,7 @@
 
 	ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
 	if (!ecam_val) {
-		dev_err(pcie->dev, "ECAM is not present\n");
+		dev_err(dev, "ECAM is not present\n");
 		return ecam_val;
 	}
 
@@ -692,23 +696,23 @@
 	writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
 
 	if (nwl_pcie_link_up(pcie))
-		dev_info(pcie->dev, "Link is UP\n");
+		dev_info(dev, "Link is UP\n");
 	else
-		dev_info(pcie->dev, "Link is DOWN\n");
+		dev_info(dev, "Link is DOWN\n");
 
 	/* Get misc IRQ number */
 	pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
 	if (pcie->irq_misc < 0) {
-		dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
+		dev_err(dev, "failed to get misc IRQ %d\n",
 			pcie->irq_misc);
 		return -EINVAL;
 	}
 
-	err = devm_request_irq(pcie->dev, pcie->irq_misc,
+	err = devm_request_irq(dev, pcie->irq_misc,
 			       nwl_pcie_misc_handler, IRQF_SHARED,
 			       "nwl_pcie:misc", pcie);
 	if (err) {
-		dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
+		dev_err(dev, "fail to register misc IRQ#%d\n",
 			pcie->irq_misc);
 		return err;
 	}
@@ -744,31 +748,32 @@
 static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
 			     struct platform_device *pdev)
 {
-	struct device_node *node = pcie->dev->of_node;
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node;
 	struct resource *res;
 	const char *type;
 
 	/* Check for device type */
 	type = of_get_property(node, "device_type", NULL);
 	if (!type || strcmp(type, "pci")) {
-		dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
+		dev_err(dev, "invalid \"device_type\" %s\n", type);
 		return -EINVAL;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
-	pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
+	pcie->breg_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pcie->breg_base))
 		return PTR_ERR(pcie->breg_base);
 	pcie->phys_breg_base = res->start;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
-	pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
+	pcie->pcireg_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pcie->pcireg_base))
 		return PTR_ERR(pcie->pcireg_base);
 	pcie->phys_pcie_reg_base = res->start;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
-	pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
+	pcie->ecam_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(pcie->ecam_base))
 		return PTR_ERR(pcie->ecam_base);
 	pcie->phys_ecam_base = res->start;
@@ -776,8 +781,7 @@
 	/* Get intx IRQ number */
 	pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
 	if (pcie->irq_intx < 0) {
-		dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
-			pcie->irq_intx);
+		dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
 		return -EINVAL;
 	}
 
@@ -794,7 +798,8 @@
 
 static int nwl_pcie_probe(struct platform_device *pdev)
 {
-	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
 	struct nwl_pcie *pcie;
 	struct pci_bus *bus;
 	struct pci_bus *child;
@@ -802,42 +807,42 @@
 	resource_size_t iobase = 0;
 	LIST_HEAD(res);
 
-	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 	if (!pcie)
 		return -ENOMEM;
 
-	pcie->dev = &pdev->dev;
+	pcie->dev = dev;
 	pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
 
 	err = nwl_pcie_parse_dt(pcie, pdev);
 	if (err) {
-		dev_err(pcie->dev, "Parsing DT failed\n");
+		dev_err(dev, "Parsing DT failed\n");
 		return err;
 	}
 
 	err = nwl_pcie_bridge_init(pcie);
 	if (err) {
-		dev_err(pcie->dev, "HW Initialization failed\n");
+		dev_err(dev, "HW Initialization failed\n");
 		return err;
 	}
 
 	err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
 	if (err) {
-		dev_err(pcie->dev, "Getting bridge resources failed\n");
+		dev_err(dev, "Getting bridge resources failed\n");
 		return err;
 	}
 
-	err = devm_request_pci_bus_resources(pcie->dev, &res);
+	err = devm_request_pci_bus_resources(dev, &res);
 	if (err)
 		goto error;
 
 	err = nwl_pcie_init_irq_domain(pcie);
 	if (err) {
-		dev_err(pcie->dev, "Failed creating IRQ Domain\n");
+		dev_err(dev, "Failed creating IRQ Domain\n");
 		goto error;
 	}
 
-	bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
+	bus = pci_create_root_bus(dev, pcie->root_busno,
 				  &nwl_pcie_ops, pcie, &res);
 	if (!bus) {
 		err = -ENOMEM;
@@ -847,8 +852,7 @@
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		err = nwl_pcie_enable_msi(pcie, bus);
 		if (err < 0) {
-			dev_err(&pdev->dev,
-				"failed to enable MSI support: %d\n", err);
+			dev_err(dev, "failed to enable MSI support: %d\n", err);
 			goto error;
 		}
 	}
@@ -857,7 +861,6 @@
 	list_for_each_entry(child, &bus->children, node)
 		pcie_bus_configure_settings(child);
 	pci_bus_add_devices(bus);
-	platform_set_drvdata(pdev, pcie);
 	return 0;
 
 error:
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index be56803..c8616fa 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -140,10 +140,11 @@
  */
 static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
 {
+	struct device *dev = port->dev;
 	unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
 
 	if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
-		dev_dbg(port->dev, "Requester ID %lu\n",
+		dev_dbg(dev, "Requester ID %lu\n",
 			val & XILINX_PCIE_RPEFR_REQ_ID);
 		pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
 			   XILINX_PCIE_REG_RPEFR);
@@ -228,11 +229,10 @@
 
 /**
  * xilinx_pcie_assign_msi - Allocate MSI number
- * @port: PCIe port structure
  *
  * Return: A valid IRQ on success and error value on failure.
  */
-static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_assign_msi(void)
 {
 	int pos;
 
@@ -275,7 +275,7 @@
 	struct msi_msg msg;
 	phys_addr_t msg_addr;
 
-	hwirq = xilinx_pcie_assign_msi(port);
+	hwirq = xilinx_pcie_assign_msi();
 	if (hwirq < 0)
 		return hwirq;
 
@@ -383,6 +383,7 @@
 static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
 {
 	struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+	struct device *dev = port->dev;
 	u32 val, mask, status, msi_data;
 
 	/* Read interrupt decode and mask registers */
@@ -394,32 +395,32 @@
 		return IRQ_NONE;
 
 	if (status & XILINX_PCIE_INTR_LINK_DOWN)
-		dev_warn(port->dev, "Link Down\n");
+		dev_warn(dev, "Link Down\n");
 
 	if (status & XILINX_PCIE_INTR_ECRC_ERR)
-		dev_warn(port->dev, "ECRC failed\n");
+		dev_warn(dev, "ECRC failed\n");
 
 	if (status & XILINX_PCIE_INTR_STR_ERR)
-		dev_warn(port->dev, "Streaming error\n");
+		dev_warn(dev, "Streaming error\n");
 
 	if (status & XILINX_PCIE_INTR_HOT_RESET)
-		dev_info(port->dev, "Hot reset\n");
+		dev_info(dev, "Hot reset\n");
 
 	if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
-		dev_warn(port->dev, "ECAM access timeout\n");
+		dev_warn(dev, "ECAM access timeout\n");
 
 	if (status & XILINX_PCIE_INTR_CORRECTABLE) {
-		dev_warn(port->dev, "Correctable error message\n");
+		dev_warn(dev, "Correctable error message\n");
 		xilinx_pcie_clear_err_interrupts(port);
 	}
 
 	if (status & XILINX_PCIE_INTR_NONFATAL) {
-		dev_warn(port->dev, "Non fatal error message\n");
+		dev_warn(dev, "Non fatal error message\n");
 		xilinx_pcie_clear_err_interrupts(port);
 	}
 
 	if (status & XILINX_PCIE_INTR_FATAL) {
-		dev_warn(port->dev, "Fatal error message\n");
+		dev_warn(dev, "Fatal error message\n");
 		xilinx_pcie_clear_err_interrupts(port);
 	}
 
@@ -429,7 +430,7 @@
 
 		/* Check whether interrupt valid */
 		if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
-			dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+			dev_warn(dev, "RP Intr FIFO1 read error\n");
 			goto error;
 		}
 
@@ -451,7 +452,7 @@
 		val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
 
 		if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
-			dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+			dev_warn(dev, "RP Intr FIFO1 read error\n");
 			goto error;
 		}
 
@@ -471,31 +472,31 @@
 	}
 
 	if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
-		dev_warn(port->dev, "Slave unsupported request\n");
+		dev_warn(dev, "Slave unsupported request\n");
 
 	if (status & XILINX_PCIE_INTR_SLV_UNEXP)
-		dev_warn(port->dev, "Slave unexpected completion\n");
+		dev_warn(dev, "Slave unexpected completion\n");
 
 	if (status & XILINX_PCIE_INTR_SLV_COMPL)
-		dev_warn(port->dev, "Slave completion timeout\n");
+		dev_warn(dev, "Slave completion timeout\n");
 
 	if (status & XILINX_PCIE_INTR_SLV_ERRP)
-		dev_warn(port->dev, "Slave Error Poison\n");
+		dev_warn(dev, "Slave Error Poison\n");
 
 	if (status & XILINX_PCIE_INTR_SLV_CMPABT)
-		dev_warn(port->dev, "Slave Completer Abort\n");
+		dev_warn(dev, "Slave Completer Abort\n");
 
 	if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
-		dev_warn(port->dev, "Slave Illegal Burst\n");
+		dev_warn(dev, "Slave Illegal Burst\n");
 
 	if (status & XILINX_PCIE_INTR_MST_DECERR)
-		dev_warn(port->dev, "Master decode error\n");
+		dev_warn(dev, "Master decode error\n");
 
 	if (status & XILINX_PCIE_INTR_MST_SLVERR)
-		dev_warn(port->dev, "Master slave error\n");
+		dev_warn(dev, "Master slave error\n");
 
 	if (status & XILINX_PCIE_INTR_MST_ERRP)
-		dev_warn(port->dev, "Master error poison\n");
+		dev_warn(dev, "Master error poison\n");
 
 error:
 	/* Clear the Interrupt Decode register */
@@ -554,10 +555,12 @@
  */
 static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
 {
+	struct device *dev = port->dev;
+
 	if (xilinx_pcie_link_is_up(port))
-		dev_info(port->dev, "PCIe Link is UP\n");
+		dev_info(dev, "PCIe Link is UP\n");
 	else
-		dev_info(port->dev, "PCIe Link is DOWN\n");
+		dev_info(dev, "PCIe Link is DOWN\n");
 
 	/* Disable all interrupts */
 	pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
@@ -627,8 +630,8 @@
  */
 static int xilinx_pcie_probe(struct platform_device *pdev)
 {
-	struct xilinx_pcie_port *port;
 	struct device *dev = &pdev->dev;
+	struct xilinx_pcie_port *port;
 	struct pci_bus *bus;
 	int err;
 	resource_size_t iobase = 0;
@@ -668,15 +671,14 @@
 	if (err)
 		goto error;
 
-	bus = pci_create_root_bus(&pdev->dev, 0,
-				  &xilinx_pcie_ops, port, &res);
+	bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
 	if (!bus) {
 		err = -ENOMEM;
 		goto error;
 	}
 
 #ifdef CONFIG_PCI_MSI
-	xilinx_pcie_msi_chip.dev = port->dev;
+	xilinx_pcie_msi_chip.dev = dev;
 	bus->msi = &xilinx_pcie_msi_chip;
 #endif
 	pci_scan_child_bus(bus);
@@ -685,8 +687,6 @@
 	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 #endif
 	pci_bus_add_devices(bus);
-	platform_set_drvdata(pdev, port);
-
 	return 0;
 
 error:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index cffc1c0..c232729 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -834,6 +834,17 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_amd_ioapic);
 #endif /* CONFIG_X86_IO_APIC */
 
+#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
+
+static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
+{
+	/* Fix for improper SRIOV configuration on Cavium cn88xx  RNM device */
+	if (dev->subsystem_device == 0xa118)
+		dev->sriov->link = dev->devfn;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
+#endif
+
 /*
  * Some settings of MMRBC can lead to data corruption so block changes.
  * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index 44cfc44..71ace69 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -31,13 +31,6 @@
 	return 0;
 }
 
-static void
-assabet_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
-{
-	state->vs_3v  = 1; /* Can only apply 3.3V on Assabet. */
-	state->vs_Xv  = 0;
-}
-
 static int
 assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
 {
@@ -90,7 +83,7 @@
 static struct pcmcia_low_level assabet_pcmcia_ops = { 
 	.owner			= THIS_MODULE,
 	.hw_init		= assabet_pcmcia_hw_init,
-	.socket_state		= assabet_pcmcia_socket_state,
+	.socket_state		= soc_common_cf_socket_state,
 	.configure_socket	= assabet_pcmcia_configure_socket,
 	.socket_suspend		= assabet_pcmcia_socket_suspend,
 };
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index b3774e5..c3f6736 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -45,13 +45,6 @@
 	gpio_free(CERF_GPIO_CF_RESET);
 }
 
-static void
-cerf_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
-{
-	state->vs_3v	= 1;
-	state->vs_Xv	= 0;
-}
-
 static int
 cerf_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 			     const socket_state_t *state)
@@ -77,7 +70,7 @@
 	.owner			= THIS_MODULE,
 	.hw_init		= cerf_pcmcia_hw_init,
 	.hw_shutdown		= cerf_pcmcia_hw_shutdown,
-	.socket_state		= cerf_pcmcia_socket_state,
+	.socket_state		= soc_common_cf_socket_state,
 	.configure_socket	= cerf_pcmcia_configure_socket,
 };
 
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index d5ca760..153f312 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -33,6 +33,7 @@
 
 #include <linux/cpufreq.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -42,6 +43,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 
@@ -79,6 +81,41 @@
 #define to_soc_pcmcia_socket(x)	\
 	container_of(x, struct soc_pcmcia_socket, socket)
 
+int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
+	struct soc_pcmcia_regulator *r, int v)
+{
+	bool on;
+	int ret;
+
+	if (!r->reg)
+		return 0;
+
+	on = v != 0;
+	if (r->on == on)
+		return 0;
+
+	if (on) {
+		ret = regulator_set_voltage(r->reg, v * 100000, v * 100000);
+		if (ret) {
+			int vout = regulator_get_voltage(r->reg) / 100000;
+
+			dev_warn(&skt->socket.dev,
+				 "CS requested %s=%u.%uV, applying %u.%uV\n",
+				 r == &skt->vcc ? "Vcc" : "Vpp",
+				 v / 10, v % 10, vout / 10, vout % 10);
+		}
+
+		ret = regulator_enable(r->reg);
+	} else {
+		regulator_disable(r->reg);
+	}
+	if (ret == 0)
+		r->on = on;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(soc_pcmcia_regulator_set);
+
 static unsigned short
 calc_speed(unsigned short *spds, int num, unsigned short dflt)
 {
@@ -111,12 +148,9 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < nr; i++) {
+	for (i = 0; i < nr; i++)
 		if (skt->stat[i].irq)
 			free_irq(skt->stat[i].irq, skt);
-		if (gpio_is_valid(skt->stat[i].gpio))
-			gpio_free(skt->stat[i].gpio);
-	}
 
 	if (skt->ops->hw_shutdown)
 		skt->ops->hw_shutdown(skt);
@@ -129,6 +163,30 @@
 	__soc_pcmcia_hw_shutdown(skt, ARRAY_SIZE(skt->stat));
 }
 
+int soc_pcmcia_request_gpiods(struct soc_pcmcia_socket *skt)
+{
+	struct device *dev = skt->socket.dev.parent;
+	struct gpio_desc *desc;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(skt->stat); i++) {
+		if (!skt->stat[i].name)
+			continue;
+
+		desc = devm_gpiod_get(dev, skt->stat[i].name, GPIOD_IN);
+		if (IS_ERR(desc)) {
+			dev_err(dev, "Failed to get GPIO for %s: %ld\n",
+				skt->stat[i].name, PTR_ERR(desc));
+			return PTR_ERR(desc);
+		}
+
+		skt->stat[i].desc = desc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(soc_pcmcia_request_gpiods);
+
 static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 {
 	int ret = 0, i;
@@ -143,21 +201,32 @@
 
 	for (i = 0; i < ARRAY_SIZE(skt->stat); i++) {
 		if (gpio_is_valid(skt->stat[i].gpio)) {
-			int irq;
+			unsigned long flags = GPIOF_IN;
 
-			ret = gpio_request_one(skt->stat[i].gpio, GPIOF_IN,
-					       skt->stat[i].name);
+			/* CD is active low by default */
+			if (i == SOC_STAT_CD)
+				flags |= GPIOF_ACTIVE_LOW;
+
+			ret = devm_gpio_request_one(skt->socket.dev.parent,
+						    skt->stat[i].gpio, flags,
+						    skt->stat[i].name);
 			if (ret) {
 				__soc_pcmcia_hw_shutdown(skt, i);
 				return ret;
 			}
 
-			irq = gpio_to_irq(skt->stat[i].gpio);
+			skt->stat[i].desc = gpio_to_desc(skt->stat[i].gpio);
+		}
 
-			if (i == SOC_STAT_RDY)
-				skt->socket.pci_irq = irq;
-			else
-				skt->stat[i].irq = irq;
+		if (i < SOC_STAT_VS1 && skt->stat[i].desc) {
+			int irq = gpiod_to_irq(skt->stat[i].desc);
+
+			if (irq > 0) {
+				if (i == SOC_STAT_RDY)
+					skt->socket.pci_irq = irq;
+				else
+					skt->stat[i].irq = irq;
+			}
 		}
 
 		if (skt->stat[i].irq) {
@@ -166,8 +235,6 @@
 					  IRQF_TRIGGER_NONE,
 					  skt->stat[i].name, skt);
 			if (ret) {
-				if (gpio_is_valid(skt->stat[i].gpio))
-					gpio_free(skt->stat[i].gpio);
 				__soc_pcmcia_hw_shutdown(skt, i);
 				return ret;
 			}
@@ -197,6 +264,18 @@
 			irq_set_irq_type(skt->stat[i].irq, IRQ_TYPE_NONE);
 }
 
+/*
+ * The CF 3.0 specification says that cards tie VS1 to ground and leave
+ * VS2 open.  Many implementations do not wire up the VS signals, so we
+ * provide hard-coded values as per the CF 3.0 spec.
+ */
+void soc_common_cf_socket_state(struct soc_pcmcia_socket *skt,
+	struct pcmcia_state *state)
+{
+	state->vs_3v = 1;
+}
+EXPORT_SYMBOL_GPL(soc_common_cf_socket_state);
+
 static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
 {
 	struct pcmcia_state state;
@@ -208,17 +287,18 @@
 	state.bvd1 = 1;
 	state.bvd2 = 1;
 
-	/* CD is active low by default */
-	if (gpio_is_valid(skt->stat[SOC_STAT_CD].gpio))
-		state.detect = !gpio_get_value(skt->stat[SOC_STAT_CD].gpio);
-
-	/* RDY and BVD are active high by default */
-	if (gpio_is_valid(skt->stat[SOC_STAT_RDY].gpio))
-		state.ready = !!gpio_get_value(skt->stat[SOC_STAT_RDY].gpio);
-	if (gpio_is_valid(skt->stat[SOC_STAT_BVD1].gpio))
-		state.bvd1 = !!gpio_get_value(skt->stat[SOC_STAT_BVD1].gpio);
-	if (gpio_is_valid(skt->stat[SOC_STAT_BVD2].gpio))
-		state.bvd2 = !!gpio_get_value(skt->stat[SOC_STAT_BVD2].gpio);
+	if (skt->stat[SOC_STAT_CD].desc)
+		state.detect = !!gpiod_get_value(skt->stat[SOC_STAT_CD].desc);
+	if (skt->stat[SOC_STAT_RDY].desc)
+		state.ready = !!gpiod_get_value(skt->stat[SOC_STAT_RDY].desc);
+	if (skt->stat[SOC_STAT_BVD1].desc)
+		state.bvd1 = !!gpiod_get_value(skt->stat[SOC_STAT_BVD1].desc);
+	if (skt->stat[SOC_STAT_BVD2].desc)
+		state.bvd2 = !!gpiod_get_value(skt->stat[SOC_STAT_BVD2].desc);
+	if (skt->stat[SOC_STAT_VS1].desc)
+		state.vs_3v = !!gpiod_get_value(skt->stat[SOC_STAT_VS1].desc);
+	if (skt->stat[SOC_STAT_VS2].desc)
+		state.vs_Xv = !!gpiod_get_value(skt->stat[SOC_STAT_VS2].desc);
 
 	skt->ops->socket_state(skt, &state);
 
@@ -257,7 +337,30 @@
 	int ret;
 
 	ret = skt->ops->configure_socket(skt, state);
+	if (ret < 0) {
+		pr_err("soc_common_pcmcia: unable to configure socket %d\n",
+		       skt->nr);
+		/* restore the previous state */
+		WARN_ON(skt->ops->configure_socket(skt, &skt->cs_state));
+		return ret;
+	}
+
 	if (ret == 0) {
+		struct gpio_desc *descs[2];
+		int values[2], n = 0;
+
+		if (skt->gpio_reset) {
+			descs[n] = skt->gpio_reset;
+			values[n++] = !!(state->flags & SS_RESET);
+		}
+		if (skt->gpio_bus_enable) {
+			descs[n] = skt->gpio_bus_enable;
+			values[n++] = !!(state->flags & SS_OUTPUT_ENA);
+		}
+
+		if (n)
+			gpiod_set_array_value_cansleep(n, descs, values);
+
 		/*
 		 * This really needs a better solution.  The IRQ
 		 * may or may not be claimed by the driver.
@@ -274,10 +377,6 @@
 		skt->cs_state = *state;
 	}
 
-	if (ret < 0)
-		printk(KERN_ERR "soc_common_pcmcia: unable to configure "
-		       "socket %d\n", skt->nr);
-
 	return ret;
 }
 
@@ -637,54 +736,19 @@
 };
 
 
-static LIST_HEAD(soc_pcmcia_sockets);
-static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
-
 #ifdef CONFIG_CPU_FREQ
-static int
-soc_pcmcia_notifier(struct notifier_block *nb, unsigned long val, void *data)
+static int soc_common_pcmcia_cpufreq_nb(struct notifier_block *nb,
+	unsigned long val, void *data)
 {
-	struct soc_pcmcia_socket *skt;
+	struct soc_pcmcia_socket *skt = container_of(nb, struct soc_pcmcia_socket, cpufreq_nb);
 	struct cpufreq_freqs *freqs = data;
-	int ret = 0;
 
-	mutex_lock(&soc_pcmcia_sockets_lock);
-	list_for_each_entry(skt, &soc_pcmcia_sockets, node)
-		if (skt->ops->frequency_change)
-			ret += skt->ops->frequency_change(skt, val, freqs);
-	mutex_unlock(&soc_pcmcia_sockets_lock);
-
-	return ret;
+	return skt->ops->frequency_change(skt, val, freqs);
 }
-
-static struct notifier_block soc_pcmcia_notifier_block = {
-	.notifier_call	= soc_pcmcia_notifier
-};
-
-static int soc_pcmcia_cpufreq_register(void)
-{
-	int ret;
-
-	ret = cpufreq_register_notifier(&soc_pcmcia_notifier_block,
-					CPUFREQ_TRANSITION_NOTIFIER);
-	if (ret < 0)
-		printk(KERN_ERR "Unable to register CPU frequency change "
-				"notifier for PCMCIA (%d)\n", ret);
-	return ret;
-}
-fs_initcall(soc_pcmcia_cpufreq_register);
-
-static void soc_pcmcia_cpufreq_unregister(void)
-{
-	cpufreq_unregister_notifier(&soc_pcmcia_notifier_block,
-		CPUFREQ_TRANSITION_NOTIFIER);
-}
-module_exit(soc_pcmcia_cpufreq_unregister);
-
 #endif
 
 void soc_pcmcia_init_one(struct soc_pcmcia_socket *skt,
-	struct pcmcia_low_level *ops, struct device *dev)
+	const struct pcmcia_low_level *ops, struct device *dev)
 {
 	int i;
 
@@ -700,19 +764,21 @@
 
 void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
 {
-	mutex_lock(&soc_pcmcia_sockets_lock);
 	del_timer_sync(&skt->poll_timer);
 
 	pcmcia_unregister_socket(&skt->socket);
 
+#ifdef CONFIG_CPU_FREQ
+	if (skt->ops->frequency_change)
+		cpufreq_unregister_notifier(&skt->cpufreq_nb,
+					    CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
 	soc_pcmcia_hw_shutdown(skt);
 
 	/* should not be required; violates some lowlevel drivers */
 	soc_common_pcmcia_config_skt(skt, &dead_socket);
 
-	list_del(&skt->node);
-	mutex_unlock(&soc_pcmcia_sockets_lock);
-
 	iounmap(skt->virt_io);
 	skt->virt_io = NULL;
 	release_resource(&skt->res_attr);
@@ -726,6 +792,8 @@
 {
 	int ret;
 
+	skt->cs_state = dead_socket;
+
 	setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event,
 		    (unsigned long)skt);
 	skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
@@ -752,10 +820,6 @@
 		goto out_err_5;
 	}
 
-	mutex_lock(&soc_pcmcia_sockets_lock);
-
-	list_add(&skt->node, &soc_pcmcia_sockets);
-
 	/*
 	 * We initialize default socket timing here, because
 	 * we are not guaranteed to see a SetIOMap operation at
@@ -776,14 +840,23 @@
 
 	skt->status = soc_common_pcmcia_skt_state(skt);
 
+#ifdef CONFIG_CPU_FREQ
+	if (skt->ops->frequency_change) {
+		skt->cpufreq_nb.notifier_call = soc_common_pcmcia_cpufreq_nb;
+
+		ret = cpufreq_register_notifier(&skt->cpufreq_nb,
+						CPUFREQ_TRANSITION_NOTIFIER);
+		if (ret < 0)
+			dev_err(skt->socket.dev.parent,
+				"unable to register CPU frequency change notifier for PCMCIA (%d)\n",
+				ret);
+	}
+#endif
+
 	ret = pcmcia_register_socket(&skt->socket);
 	if (ret)
 		goto out_err_7;
 
-	add_timer(&skt->poll_timer);
-
-	mutex_unlock(&soc_pcmcia_sockets_lock);
-
 	ret = device_create_file(&skt->socket.dev, &dev_attr_status);
 	if (ret)
 		goto out_err_8;
@@ -791,15 +864,12 @@
 	return ret;
 
  out_err_8:
-	mutex_lock(&soc_pcmcia_sockets_lock);
 	del_timer_sync(&skt->poll_timer);
 	pcmcia_unregister_socket(&skt->socket);
 
  out_err_7:
 	soc_pcmcia_hw_shutdown(skt);
  out_err_6:
-	list_del(&skt->node);
-	mutex_unlock(&soc_pcmcia_sockets_lock);
 	iounmap(skt->virt_io);
  out_err_5:
 	release_resource(&skt->res_attr);
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 94762a5..3f36258 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -17,7 +17,14 @@
 
 
 struct device;
+struct gpio_desc;
 struct pcmcia_low_level;
+struct regulator;
+
+struct soc_pcmcia_regulator {
+	struct regulator	*reg;
+	bool			on;
+};
 
 /*
  * This structure encapsulates per-socket state which we might need to
@@ -52,18 +59,30 @@
 
 	struct {
 		int		gpio;
+		struct gpio_desc *desc;
 		unsigned int	irq;
 		const char	*name;
-	} stat[4];
+	} stat[6];
 #define SOC_STAT_CD		0	/* Card detect */
 #define SOC_STAT_BVD1		1	/* BATDEAD / IOSTSCHG */
 #define SOC_STAT_BVD2		2	/* BATWARN / IOSPKR */
 #define SOC_STAT_RDY		3	/* Ready / Interrupt */
+#define SOC_STAT_VS1		4	/* Voltage sense 1 */
+#define SOC_STAT_VS2		5	/* Voltage sense 2 */
+
+	struct gpio_desc	*gpio_reset;
+	struct gpio_desc	*gpio_bus_enable;
+	struct soc_pcmcia_regulator vcc;
+	struct soc_pcmcia_regulator vpp;
 
 	unsigned int		irq_state;
 
+#ifdef CONFIG_CPU_FREQ
+	struct notifier_block	cpufreq_nb;
+#endif
 	struct timer_list	poll_timer;
 	struct list_head	node;
+	void *driver_data;
 };
 
 struct skt_dev_info {
@@ -133,10 +152,16 @@
 extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *);
 
 void soc_pcmcia_init_one(struct soc_pcmcia_socket *skt,
-	struct pcmcia_low_level *ops, struct device *dev);
+	const struct pcmcia_low_level *ops, struct device *dev);
 void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt);
 int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt);
+int soc_pcmcia_request_gpiods(struct soc_pcmcia_socket *skt);
 
+void soc_common_cf_socket_state(struct soc_pcmcia_socket *skt,
+	struct pcmcia_state *state);
+
+int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
+	struct soc_pcmcia_regulator *r, int v);
 
 #ifdef CONFIG_PCMCIA_DEBUG
 
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864..c8c72e8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -151,21 +151,21 @@
 
 #define GPID2_DESC      SIG_DESC_SET(SCU8C, 9)
 
-#define D20 26
+#define F20 26
 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
 SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
 SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
 
-#define D21 27
+#define D20 27
 SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
 SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
 SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
 
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
 
 #define GPIE_DESC	SIG_DESC_SET(HW_STRAP1, 21)
 #define GPIE0_DESC	SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@
 SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
 SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
 SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
 
 FUNC_GROUP_DECL(GPIE0, B20, C20);
 
-#define SPI1_DESC	SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC		{ HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC		{ HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC	{ HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
 #define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(C18, GPIOI0, SYSCS);
 
 #define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(E15, GPIOI1, SYSCK);
 
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
 
 #define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
 SS_PIN_DECL(C16, GPIOI3, SYSMISO);
 
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC	SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+			    SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+			    SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+			    SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+			    SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+			    SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+			    SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+			    SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+			    SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
 
 #define L2 73
 SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@
 	ASPEED_PINCTRL_PIN(A12),
 	ASPEED_PINCTRL_PIN(A13),
 	ASPEED_PINCTRL_PIN(A14),
+	ASPEED_PINCTRL_PIN(A15),
 	ASPEED_PINCTRL_PIN(A2),
 	ASPEED_PINCTRL_PIN(A3),
 	ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@
 	ASPEED_PINCTRL_PIN(B12),
 	ASPEED_PINCTRL_PIN(B13),
 	ASPEED_PINCTRL_PIN(B14),
+	ASPEED_PINCTRL_PIN(B15),
+	ASPEED_PINCTRL_PIN(B16),
 	ASPEED_PINCTRL_PIN(B2),
 	ASPEED_PINCTRL_PIN(B20),
 	ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@
 	ASPEED_PINCTRL_PIN(C12),
 	ASPEED_PINCTRL_PIN(C13),
 	ASPEED_PINCTRL_PIN(C14),
+	ASPEED_PINCTRL_PIN(C15),
 	ASPEED_PINCTRL_PIN(C16),
 	ASPEED_PINCTRL_PIN(C18),
 	ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@
 	ASPEED_PINCTRL_PIN(D10),
 	ASPEED_PINCTRL_PIN(D2),
 	ASPEED_PINCTRL_PIN(D20),
-	ASPEED_PINCTRL_PIN(D21),
 	ASPEED_PINCTRL_PIN(D4),
 	ASPEED_PINCTRL_PIN(D5),
 	ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@
 	ASPEED_PINCTRL_PIN(E7),
 	ASPEED_PINCTRL_PIN(E9),
 	ASPEED_PINCTRL_PIN(F19),
+	ASPEED_PINCTRL_PIN(F20),
 	ASPEED_PINCTRL_PIN(F9),
 	ASPEED_PINCTRL_PIN(H20),
 	ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@
 	ASPEED_PINCTRL_GROUP(RMII2),
 	ASPEED_PINCTRL_GROUP(SD1),
 	ASPEED_PINCTRL_GROUP(SPI1),
+	ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+	ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
 	ASPEED_PINCTRL_GROUP(TIMER4),
 	ASPEED_PINCTRL_GROUP(TIMER5),
 	ASPEED_PINCTRL_GROUP(TIMER6),
 	ASPEED_PINCTRL_GROUP(TIMER7),
 	ASPEED_PINCTRL_GROUP(TIMER8),
+	ASPEED_PINCTRL_GROUP(VGABIOSROM),
 };
 
 static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@
 	ASPEED_PINCTRL_FUNC(RMII2),
 	ASPEED_PINCTRL_FUNC(SD1),
 	ASPEED_PINCTRL_FUNC(SPI1),
+	ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+	ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
 	ASPEED_PINCTRL_FUNC(TIMER4),
 	ASPEED_PINCTRL_FUNC(TIMER5),
 	ASPEED_PINCTRL_FUNC(TIMER6),
 	ASPEED_PINCTRL_FUNC(TIMER7),
 	ASPEED_PINCTRL_FUNC(TIMER8),
+	ASPEED_PINCTRL_FUNC(VGABIOSROM),
 };
 
 static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f..49aeba9 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@
 				bool enable, struct regmap *map)
 {
 	int i;
-	bool ret;
-
-	ret = aspeed_sig_expr_eval(expr, enable, map);
-	if (ret)
-		return ret;
 
 	for (i = 0; i < expr->ndescs; i++) {
+		bool ret;
 		const struct aspeed_sig_desc *desc = &expr->descs[i];
 		u32 pattern = enable ? desc->enable : desc->disable;
 
@@ -199,12 +195,18 @@
 static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
 				   struct regmap *map)
 {
+	if (aspeed_sig_expr_eval(expr, true, map))
+		return true;
+
 	return aspeed_sig_expr_set(expr, true, map);
 }
 
 static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
 				    struct regmap *map)
 {
+	if (!aspeed_sig_expr_eval(expr, true, map))
+		return true;
+
 	return aspeed_sig_expr_set(expr, false, map);
 }
 
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe..71bbeb9 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@
 		return PTR_ERR(vg->pctl_dev);
 	}
 
+	raw_spin_lock_init(&vg->lock);
+
 	ret = byt_gpio_probe(vg);
 	if (ret) {
 		pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@
 	}
 
 	platform_set_drvdata(pdev, vg);
-	raw_spin_lock_init(&vg->lock);
 	pm_runtime_enable(&pdev->dev);
 
 	return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a4..0144376 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
+#include "../core.h"
 #include "pinctrl-intel.h"
 
 /* Offset from regs */
@@ -1056,6 +1057,26 @@
 EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
 
 #ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+	const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+	if (!pd || !intel_pad_usable(pctrl, pin))
+		return false;
+
+	/*
+	 * Only restore the pin if it is actually in use by the kernel (or
+	 * by userspace). It is possible that some pins are used by the
+	 * BIOS during resume and those are not always locked down so leave
+	 * them alone.
+	 */
+	if (pd->mux_owner || pd->gpio_owner ||
+	    gpiochip_line_is_irq(&pctrl->chip, pin))
+		return true;
+
+	return false;
+}
+
 int intel_pinctrl_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@
 		const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
 		u32 val;
 
-		if (!intel_pad_usable(pctrl, desc->number))
+		if (!intel_pinctrl_should_save(pctrl, desc->number))
 			continue;
 
 		val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@
 		void __iomem *padcfg;
 		u32 val;
 
-		if (!intel_pad_usable(pctrl, desc->number))
+		if (!intel_pinctrl_should_save(pctrl, desc->number))
 			continue;
 
 		padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d7..1aba2c7 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@
 		 * much memory to the process.
 		 */
 		down_read(&current->mm->mmap_sem);
-		ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+		ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+				&page, NULL);
 		up_read(&current->mm->mmap_sem);
 		if (ret < 0)
 			break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcc..b8a21d7 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@
 config ASUS_NB_WMI
 	tristate "Asus Notebook WMI Driver"
 	depends on ASUS_WMI
+	depends on SERIO_I8042 || SERIO_I8042 = n
 	---help---
 	  This is a driver for newer Asus notebooks. It adds extra features
 	  like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 460fa67..2acdb0d 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -405,7 +405,7 @@
 	kernelmode = 1;
 
 	thz_dev->polling_delay = interval*1000;
-	thermal_zone_device_update(thz_dev);
+	thermal_zone_device_update(thz_dev, THERMAL_EVENT_UNSPECIFIED);
 	pr_notice("kernel mode fan control ON\n");
 }
 
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 15f1311..28551f5 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -932,30 +932,19 @@
 }
 static DEVICE_ATTR_RO(infos);
 
-static int parse_arg(const char *buf, unsigned long count, int *val)
-{
-	if (!count)
-		return 0;
-	if (count > 31)
-		return -EINVAL;
-	if (sscanf(buf, "%i", val) != 1)
-		return -EINVAL;
-	return count;
-}
-
 static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
 			      const char *buf, size_t count,
 			      const char *method)
 {
 	int rv, value;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv <= 0)
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
 		return rv;
 
 	if (write_acpi_int(asus->handle, method, value))
 		return -ENODEV;
-	return rv;
+	return count;
 }
 
 /*
@@ -975,15 +964,17 @@
 	struct asus_laptop *asus = dev_get_drvdata(dev);
 	int rv, value;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0) {
-		if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
-			pr_warn("LED display write failed\n");
-			return -ENODEV;
-		}
-		asus->ledd_status = (u32) value;
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
+		return rv;
+
+	if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
+		pr_warn("LED display write failed\n");
+		return -ENODEV;
 	}
-	return rv;
+
+	asus->ledd_status = (u32) value;
+	return count;
 }
 static DEVICE_ATTR_RW(ledd);
 
@@ -1148,10 +1139,12 @@
 	struct asus_laptop *asus = dev_get_drvdata(dev);
 	int rv, value;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0)
-		asus_set_display(asus, value);
-	return rv;
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
+		return rv;
+
+	asus_set_display(asus, value);
+	return count;
 }
 static DEVICE_ATTR_WO(display);
 
@@ -1190,11 +1183,12 @@
 	struct asus_laptop *asus = dev_get_drvdata(dev);
 	int rv, value;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0)
-		asus_als_switch(asus, value ? 1 : 0);
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
+		return rv;
 
-	return rv;
+	asus_als_switch(asus, value ? 1 : 0);
+	return count;
 }
 static DEVICE_ATTR_RW(ls_switch);
 
@@ -1219,14 +1213,15 @@
 	struct asus_laptop *asus = dev_get_drvdata(dev);
 	int rv, value;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0) {
-		value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
-		/* 0 <= value <= 15 */
-		asus_als_level(asus, value);
-	}
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
+		return rv;
 
-	return rv;
+	value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
+	/* 0 <= value <= 15 */
+	asus_als_level(asus, value);
+
+	return count;
 }
 static DEVICE_ATTR_RW(ls_level);
 
@@ -1301,14 +1296,14 @@
 	int rv, value;
 	int ret;
 
-	rv = parse_arg(buf, count, &value);
-	if (rv <= 0)
-		return -EINVAL;
+	rv = kstrtoint(buf, 0, &value);
+	if (rv < 0)
+		return rv;
 	ret = asus_gps_switch(asus, !!value);
 	if (ret)
 		return ret;
 	rfkill_set_sw_state(asus->gps.rfkill, !value);
-	return rv;
+	return count;
 }
 static DEVICE_ATTR_RW(gps);
 
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index adecc1c..26e4cbc 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -27,6 +27,7 @@
 #include <linux/input/sparse-keymap.h>
 #include <linux/fb.h>
 #include <linux/dmi.h>
+#include <linux/i8042.h>
 
 #include "asus-wmi.h"
 
@@ -55,10 +56,34 @@
 
 static struct quirk_entry *quirks;
 
+static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
+			      struct serio *port)
+{
+	static bool extended;
+	bool ret = false;
+
+	if (str & I8042_STR_AUXDATA)
+		return false;
+
+	if (unlikely(data == 0xe1)) {
+		extended = true;
+		ret = true;
+	} else if (unlikely(extended)) {
+		extended = false;
+		ret = true;
+	}
+
+	return ret;
+}
+
 static struct quirk_entry quirk_asus_unknown = {
 	.wapf = 0,
 };
 
+static struct quirk_entry quirk_asus_q500a = {
+	.i8042_filter = asus_q500a_i8042_filter,
+};
+
 /*
  * For those machines that need software to control bt/wifi status
  * and can't adjust brightness through ACPI interface
@@ -87,6 +112,10 @@
 	.no_rfkill = true,
 };
 
+static struct quirk_entry quirk_asus_ux303ub = {
+	.wmi_backlight_native = true,
+};
+
 static int dmi_matched(const struct dmi_system_id *dmi)
 {
 	quirks = dmi->driver_data;
@@ -96,6 +125,15 @@
 static const struct dmi_system_id asus_quirks[] = {
 	{
 		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. Q500A",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Q500A"),
+		},
+		.driver_data = &quirk_asus_q500a,
+	},
+	{
+		.callback = dmi_matched,
 		.ident = "ASUSTeK COMPUTER INC. U32U",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
@@ -351,11 +389,22 @@
 		},
 		.driver_data = &quirk_no_rfkill,
 	},
+	{
+		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. UX303UB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
+		},
+		.driver_data = &quirk_asus_ux303ub,
+	},
 	{},
 };
 
 static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
 {
+	int ret;
+
 	quirks = &quirk_asus_unknown;
 	dmi_check_system(asus_quirks);
 
@@ -367,6 +416,15 @@
 		quirks->wapf = wapf;
 	else
 		wapf = quirks->wapf;
+
+	if (quirks->i8042_filter) {
+		ret = i8042_install_filter(quirks->i8042_filter);
+		if (ret) {
+			pr_warn("Unable to install key filter\n");
+			return;
+		}
+		pr_info("Using i8042 filter function for receiving events\n");
+	}
 }
 
 static const struct key_entry asus_nb_wmi_keymap[] = {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 7c093a0..ce6ca31 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2084,6 +2084,9 @@
 	if (asus->driver->quirks->wmi_backlight_power)
 		acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
 
+	if (asus->driver->quirks->wmi_backlight_native)
+		acpi_video_set_dmi_backlight_type(acpi_backlight_native);
+
 	if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
 		err = asus_wmi_backlight_init(asus);
 		if (err && err != -ENODEV)
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 5de1df5..0e19014 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -28,6 +28,7 @@
 #define _ASUS_WMI_H_
 
 #include <linux/platform_device.h>
+#include <linux/i8042.h>
 
 #define ASUS_WMI_KEY_IGNORE (-1)
 #define ASUS_WMI_BRN_DOWN	0x20
@@ -43,6 +44,7 @@
 	bool scalar_panel_brightness;
 	bool store_backlight_power;
 	bool wmi_backlight_power;
+	bool wmi_backlight_native;
 	int wapf;
 	/*
 	 * For machines with AMD graphic chips, it will send out WMI event
@@ -51,6 +53,9 @@
 	 * and let the ACPI interrupt to send out the key event.
 	 */
 	int no_display_toggle;
+
+	bool (*i8042_filter)(unsigned char data, unsigned char str,
+			     struct serio *serio);
 };
 
 struct asus_wmi_driver {
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index 0aec4fd..37e6460 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -24,6 +24,7 @@
 #include <linux/acpi.h>
 #include <linux/interrupt.h>
 #include <linux/miscdevice.h>
+#include <linux/uaccess.h>
 
 struct smo8800_device {
 	u32 irq;                     /* acpi device irq */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b..a232394 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,13 @@
 			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
 		},
 	},
+	{
+		.ident = "Lenovo YOGA 910-13IKB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+		},
+	},
 	{}
 };
 
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 520b58a..e8b1b83 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -100,7 +100,7 @@
 	struct dentry *dir, *file;
 
 	dir = debugfs_create_dir("pmc_core", NULL);
-	if (IS_ERR_OR_NULL(dir))
+	if (!dir)
 		return -ENOMEM;
 
 	pmcdev->dbgfs_dir = dir;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index a511d51..0bf51d5 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -522,48 +522,36 @@
 static int ipc_create_punit_device(void)
 {
 	struct platform_device *pdev;
-	int ret;
+	const struct platform_device_info pdevinfo = {
+		.parent = ipcdev.dev,
+		.name = PUNIT_DEVICE_NAME,
+		.id = -1,
+		.res = punit_res_array,
+		.num_res = ARRAY_SIZE(punit_res_array),
+		};
 
-	pdev = platform_device_alloc(PUNIT_DEVICE_NAME, -1);
-	if (!pdev) {
-		dev_err(ipcdev.dev, "Failed to alloc punit platform device\n");
-		return -ENOMEM;
-	}
+	pdev = platform_device_register_full(&pdevinfo);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
 
-	pdev->dev.parent = ipcdev.dev;
-	ret = platform_device_add_resources(pdev, punit_res_array,
-					    ARRAY_SIZE(punit_res_array));
-	if (ret) {
-		dev_err(ipcdev.dev, "Failed to add platform punit resources\n");
-		goto err;
-	}
-
-	ret = platform_device_add(pdev);
-	if (ret) {
-		dev_err(ipcdev.dev, "Failed to add punit platform device\n");
-		goto err;
-	}
 	ipcdev.punit_dev = pdev;
 
 	return 0;
-err:
-	platform_device_put(pdev);
-	return ret;
 }
 
 static int ipc_create_tco_device(void)
 {
 	struct platform_device *pdev;
 	struct resource *res;
-	int ret;
-
-	pdev = platform_device_alloc(TCO_DEVICE_NAME, -1);
-	if (!pdev) {
-		dev_err(ipcdev.dev, "Failed to alloc tco platform device\n");
-		return -ENOMEM;
-	}
-
-	pdev->dev.parent = ipcdev.dev;
+	const struct platform_device_info pdevinfo = {
+		.parent = ipcdev.dev,
+		.name = TCO_DEVICE_NAME,
+		.id = -1,
+		.res = tco_res,
+		.num_res = ARRAY_SIZE(tco_res),
+		.data = &tco_info,
+		.size_data = sizeof(tco_info),
+		};
 
 	res = tco_res + TCO_RESOURCE_ACPI_IO;
 	res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
@@ -577,45 +565,26 @@
 	res->start = ipcdev.gcr_base + TCO_PMC_OFFSET;
 	res->end = res->start + TCO_PMC_SIZE - 1;
 
-	ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
-	if (ret) {
-		dev_err(ipcdev.dev, "Failed to add tco platform resources\n");
-		goto err;
-	}
+	pdev = platform_device_register_full(&pdevinfo);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
 
-	ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
-	if (ret) {
-		dev_err(ipcdev.dev, "Failed to add tco platform data\n");
-		goto err;
-	}
-
-	ret = platform_device_add(pdev);
-	if (ret) {
-		dev_err(ipcdev.dev, "Failed to add tco platform device\n");
-		goto err;
-	}
 	ipcdev.tco_dev = pdev;
 
 	return 0;
-err:
-	platform_device_put(pdev);
-	return ret;
 }
 
 static int ipc_create_telemetry_device(void)
 {
 	struct platform_device *pdev;
 	struct resource *res;
-	int ret;
-
-	pdev = platform_device_alloc(TELEMETRY_DEVICE_NAME, -1);
-	if (!pdev) {
-		dev_err(ipcdev.dev,
-			"Failed to allocate telemetry platform device\n");
-		return -ENOMEM;
-	}
-
-	pdev->dev.parent = ipcdev.dev;
+	const struct platform_device_info pdevinfo = {
+		.parent = ipcdev.dev,
+		.name = TELEMETRY_DEVICE_NAME,
+		.id = -1,
+		.res = telemetry_res,
+		.num_res = ARRAY_SIZE(telemetry_res),
+		};
 
 	res = telemetry_res + TELEMETRY_RESOURCE_PUNIT_SSRAM;
 	res->start = ipcdev.telem_punit_ssram_base;
@@ -625,26 +594,13 @@
 	res->start = ipcdev.telem_pmc_ssram_base;
 	res->end = res->start + ipcdev.telem_pmc_ssram_size - 1;
 
-	ret = platform_device_add_resources(pdev, telemetry_res,
-					    ARRAY_SIZE(telemetry_res));
-	if (ret) {
-		dev_err(ipcdev.dev,
-			"Failed to add telemetry platform resources\n");
-		goto err;
-	}
+	pdev = platform_device_register_full(&pdevinfo);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
 
-	ret = platform_device_add(pdev);
-	if (ret) {
-		dev_err(ipcdev.dev,
-			"Failed to add telemetry platform device\n");
-		goto err;
-	}
 	ipcdev.telemetry_dev = pdev;
 
 	return 0;
-err:
-	platform_device_put(pdev);
-	return ret;
 }
 
 static int ipc_create_pmc_devices(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 1dba359..c890a49 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4116,7 +4116,7 @@
 
 	if (ret > 0) {
 		struct inode *inode = file_inode(file);
-		inode->i_atime = current_fs_time(inode->i_sb);
+		inode->i_atime = current_time(inode);
 	}
 
 	return ret;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 9d60a40..074bf2f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -321,10 +321,9 @@
 static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
 			   const u32 in[TCI_WORDS], u32 out[TCI_WORDS])
 {
+	union acpi_object in_objs[TCI_WORDS], out_objs[TCI_WORDS + 1];
 	struct acpi_object_list params;
-	union acpi_object in_objs[TCI_WORDS];
 	struct acpi_buffer results;
-	union acpi_object out_objs[TCI_WORDS + 1];
 	acpi_status status;
 	int i;
 
@@ -387,9 +386,8 @@
 {
 	u32 in[TCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
-	acpi_status status;
+	acpi_status status = tci_raw(dev, in, out);
 
-	status = tci_raw(dev, in, out);
 	if  (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to open SCI failed\n");
 		return 0;
@@ -425,9 +423,8 @@
 {
 	u32 in[TCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
-	acpi_status status;
+	acpi_status status = tci_raw(dev, in, out);
 
-	status = tci_raw(dev, in, out);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to close SCI failed\n");
 		return;
@@ -479,10 +476,15 @@
 
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query Illumination support failed\n");
-	else if (out[0] == TOS_SUCCESS)
-		dev->illumination_supported = 1;
+		return;
+	}
+
+	if (out[0] != TOS_SUCCESS)
+		return;
+
+	dev->illumination_supported = 1;
 }
 
 static void toshiba_illumination_set(struct led_classdev *cdev,
@@ -509,7 +511,8 @@
 {
 	struct toshiba_acpi_dev *dev = container_of(cdev,
 			struct toshiba_acpi_dev, led_dev);
-	u32 state, result;
+	u32 result;
+	u32 state;
 
 	/* First request : initialize communication. */
 	if (!sci_open(dev))
@@ -546,24 +549,28 @@
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query kbd illumination support failed\n");
-	} else if (out[0] == TOS_SUCCESS) {
-		/*
-		 * Check for keyboard backlight timeout max value,
-		 * previous kbd backlight implementation set this to
-		 * 0x3c0003, and now the new implementation set this
-		 * to 0x3c001a, use this to distinguish between them.
-		 */
-		if (out[3] == SCI_KBD_TIME_MAX)
-			dev->kbd_type = 2;
-		else
-			dev->kbd_type = 1;
-		/* Get the current keyboard backlight mode */
-		dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
-		/* Get the current time (1-60 seconds) */
-		dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
-		/* Flag as supported */
-		dev->kbd_illum_supported = 1;
+		return;
 	}
+
+	if (out[0] != TOS_SUCCESS)
+		return;
+
+	/*
+	 * Check for keyboard backlight timeout max value,
+	 * previous kbd backlight implementation set this to
+	 * 0x3c0003, and now the new implementation set this
+	 * to 0x3c001a, use this to distinguish between them.
+	 */
+	if (out[3] == SCI_KBD_TIME_MAX)
+		dev->kbd_type = 2;
+	else
+		dev->kbd_type = 1;
+	/* Get the current keyboard backlight mode */
+	dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+	/* Get the current time (1-60 seconds) */
+	dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+	/* Flag as supported */
+	dev->kbd_illum_supported = 1;
 }
 
 static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
@@ -672,9 +679,9 @@
 /* Eco Mode support */
 static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
 {
-	acpi_status status;
 	u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
 	u32 out[TCI_WORDS];
+	acpi_status status;
 
 	dev->eco_supported = 0;
 	dev->eco_led_registered = false;
@@ -682,7 +689,10 @@
 	status = tci_raw(dev, in, out);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get ECO led failed\n");
-	} else if (out[0] == TOS_INPUT_DATA_ERROR) {
+		return;
+	}
+
+	if (out[0] == TOS_INPUT_DATA_ERROR) {
 		/*
 		 * If we receive 0x8300 (Input Data Error), it means that the
 		 * LED device is present, but that we just screwed the input
@@ -694,10 +704,15 @@
 		 */
 		in[3] = 1;
 		status = tci_raw(dev, in, out);
-		if (ACPI_FAILURE(status))
+		if (ACPI_FAILURE(status)) {
 			pr_err("ACPI call to get ECO led failed\n");
-		else if (out[0] == TOS_SUCCESS)
-			dev->eco_supported = 1;
+			return;
+		}
+
+		if (out[0] != TOS_SUCCESS)
+			return;
+
+		dev->eco_supported = 1;
 	}
 }
 
@@ -714,10 +729,11 @@
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get ECO led failed\n");
 		return LED_OFF;
-	} else if (out[0] != TOS_SUCCESS) {
-		return LED_OFF;
 	}
 
+	if (out[0] != TOS_SUCCESS)
+		return LED_OFF;
+
 	return out[2] ? LED_FULL : LED_OFF;
 }
 
@@ -751,10 +767,15 @@
 	 * this call also serves as initialization
 	 */
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query the accelerometer failed\n");
-	else if (out[0] == TOS_SUCCESS)
-		dev->accelerometer_supported = 1;
+		return;
+	}
+
+	if (out[0] != TOS_SUCCESS)
+		return;
+
+	dev->accelerometer_supported = 1;
 }
 
 static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
@@ -769,15 +790,18 @@
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to query the accelerometer failed\n");
 		return -EIO;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		return -ENODEV;
-	} else if (out[0] == TOS_SUCCESS) {
-		*xy = out[2];
-		*z = out[4];
-		return 0;
 	}
 
-	return -EIO;
+	if (out[0] == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	if (out[0] != TOS_SUCCESS)
+		return -EIO;
+
+	*xy = out[2];
+	*z = out[4];
+
+	return 0;
 }
 
 /* Sleep (Charge and Music) utilities support */
@@ -797,24 +821,29 @@
 		pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
 		sci_close(dev);
 		return;
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
+	}
+
+	if (out[0] != TOS_SUCCESS) {
 		sci_close(dev);
 		return;
-	} else if (out[0] == TOS_SUCCESS) {
-		dev->usbsc_mode_base = out[4];
 	}
 
+	dev->usbsc_mode_base = out[4];
+
 	in[5] = SCI_USB_CHARGE_BAT_LVL;
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
-	} else if (out[0] == TOS_SUCCESS) {
-		dev->usbsc_bat_level = out[2];
-		/* Flag as supported */
-		dev->usb_sleep_charge_supported = 1;
+		return;
 	}
 
+	if (out[0] != TOS_SUCCESS)
+		return;
+
+	dev->usbsc_bat_level = out[2];
+	/* Flag as supported */
+	dev->usb_sleep_charge_supported = 1;
 }
 
 static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
@@ -868,14 +897,19 @@
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB S&C battery level failed\n");
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		return -ENODEV;
-	} else if (out[0] == TOS_SUCCESS) {
-		*mode = out[2];
-		return 0;
+		return -EIO;
 	}
 
-	return -EIO;
+	if (out[0] == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	if (out[0] != TOS_SUCCESS)
+		return -EIO;
+
+	*mode = out[2];
+
+	return 0;
+
 }
 
 static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
@@ -892,9 +926,12 @@
 	in[5] = SCI_USB_CHARGE_BAT_LVL;
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to set USB S&C battery level failed\n");
-	else if (out[0] == TOS_NOT_SUPPORTED)
+		return -EIO;
+	}
+
+	if (out[0] == TOS_NOT_SUPPORTED)
 		return -ENODEV;
 
 	return out[0] == TOS_SUCCESS ? 0 : -EIO;
@@ -915,14 +952,18 @@
 	sci_close(dev);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get USB Rapid Charge failed\n");
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		return -ENODEV;
-	} else if (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) {
-		*state = out[2];
-		return 0;
+		return -EIO;
 	}
 
-	return -EIO;
+	if (out[0] == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+		return -EIO;
+
+	*state = out[2];
+
+	return 0;
 }
 
 static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
@@ -939,9 +980,12 @@
 	in[5] = SCI_USB_CHARGE_RAPID_DSP;
 	status = tci_raw(dev, in, out);
 	sci_close(dev);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to set USB Rapid Charge failed\n");
-	else if (out[0] == TOS_NOT_SUPPORTED)
+		return -EIO;
+	}
+
+	if (out[0] == TOS_NOT_SUPPORTED)
 		return -ENODEV;
 
 	return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO;
@@ -1097,14 +1141,18 @@
 	status = tci_raw(dev, in, out);
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get System type failed\n");
-	} else if (out[0] == TOS_NOT_SUPPORTED) {
-		return -ENODEV;
-	} else if (out[0] == TOS_SUCCESS) {
-		*type = out[3];
-		return 0;
+		return -EIO;
 	}
 
-	return -EIO;
+	if (out[0] == TOS_NOT_SUPPORTED)
+		return -ENODEV;
+
+	if (out[0] != TOS_SUCCESS)
+		return -EIO;
+
+	*type = out[3];
+
+	return 0;
 }
 
 /* Wireless status (RFKill, WLAN, BT, WWAN) */
@@ -1154,7 +1202,6 @@
 	 */
 	in[3] = HCI_WIRELESS_WWAN;
 	status = tci_raw(dev, in, out);
-
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get WWAN status failed\n");
 		return;
@@ -1174,7 +1221,6 @@
 
 	in[3] = HCI_WIRELESS_WWAN_STATUS;
 	status = tci_raw(dev, in, out);
-
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to set WWAN status failed\n");
 		return -EIO;
@@ -1193,7 +1239,6 @@
 	 */
 	in[3] = HCI_WIRELESS_WWAN_POWER;
 	status = tci_raw(dev, in, out);
-
 	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to set WWAN power failed\n");
 		return -EIO;
@@ -1216,8 +1261,10 @@
 	dev->max_cooling_method = 0;
 
 	status = tci_raw(dev, in, out);
-	if (ACPI_FAILURE(status))
+	if (ACPI_FAILURE(status)) {
 		pr_err("ACPI call to get Cooling Method failed\n");
+		return;
+	}
 
 	if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
 		return;
@@ -1244,7 +1291,7 @@
 	u32 result = hci_write(dev, HCI_COOLING_METHOD, state);
 
 	if (result == TOS_FAILURE)
-		pr_err("ACPI call to get Cooling Method failed\n");
+		pr_err("ACPI call to set Cooling Method failed\n");
 
 	if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
@@ -1282,9 +1329,9 @@
 /* LCD Brightness */
 static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
+	int brightness = 0;
 	u32 result;
 	u32 value;
-	int brightness = 0;
 
 	if (dev->tr_backlight_supported) {
 		int ret = get_tr_backlight_status(dev, &value);
@@ -1301,10 +1348,10 @@
 		pr_err("ACPI call to get LCD Brightness failed\n");
 	else if (result == TOS_NOT_SUPPORTED)
 		return -ENODEV;
-	if (result == TOS_SUCCESS)
-		return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
-	return -EIO;
+	return result == TOS_SUCCESS ?
+			brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT) :
+			-EIO;
 }
 
 static int get_lcd_brightness(struct backlight_device *bd)
@@ -1325,15 +1372,15 @@
 
 	levels = dev->backlight_dev->props.max_brightness + 1;
 	value = get_lcd_brightness(dev->backlight_dev);
-	if (value >= 0) {
-		seq_printf(m, "brightness:              %d\n", value);
-		seq_printf(m, "brightness_levels:       %d\n", levels);
-		return 0;
+	if (value < 0) {
+		pr_err("Error reading LCD brightness\n");
+		return value;
 	}
 
-	pr_err("Error reading LCD brightness\n");
+	seq_printf(m, "brightness:              %d\n", value);
+	seq_printf(m, "brightness_levels:       %d\n", levels);
 
-	return -EIO;
+	return 0;
 }
 
 static int lcd_proc_open(struct inode *inode, struct file *file)
@@ -1377,7 +1424,7 @@
 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
 	char cmd[42];
 	size_t len;
-	int levels = dev->backlight_dev->props.max_brightness + 1;
+	int levels;
 	int value;
 
 	len = min(count, sizeof(cmd) - 1);
@@ -1385,6 +1432,7 @@
 		return -EFAULT;
 	cmd[len] = '\0';
 
+	levels = dev->backlight_dev->props.max_brightness + 1;
 	if (sscanf(cmd, " brightness : %i", &value) != 1 &&
 	    value < 0 && value > levels)
 		return -EINVAL;
@@ -1420,20 +1468,21 @@
 static int video_proc_show(struct seq_file *m, void *v)
 {
 	struct toshiba_acpi_dev *dev = m->private;
+	int is_lcd, is_crt, is_tv;
 	u32 value;
 
-	if (!get_video_status(dev, &value)) {
-		int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
-		int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
-		int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+	if (get_video_status(dev, &value))
+		return -EIO;
 
-		seq_printf(m, "lcd_out:                 %d\n", is_lcd);
-		seq_printf(m, "crt_out:                 %d\n", is_crt);
-		seq_printf(m, "tv_out:                  %d\n", is_tv);
-		return 0;
-	}
+	is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
+	is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
+	is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
 
-	return -EIO;
+	seq_printf(m, "lcd_out:                 %d\n", is_lcd);
+	seq_printf(m, "crt_out:                 %d\n", is_crt);
+	seq_printf(m, "tv_out:                  %d\n", is_tv);
+
+	return 0;
 }
 
 static int video_proc_open(struct inode *inode, struct file *file)
@@ -1447,10 +1496,8 @@
 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
 	char *buffer;
 	char *cmd;
+	int lcd_out, crt_out, tv_out;
 	int remain = count;
-	int lcd_out = -1;
-	int crt_out = -1;
-	int tv_out = -1;
 	int value;
 	int ret;
 	u32 video_out;
@@ -1486,6 +1533,7 @@
 
 	kfree(cmd);
 
+	lcd_out = crt_out = tv_out = -1;
 	ret = get_video_status(dev, &video_out);
 	if (!ret) {
 		unsigned int new_video_out = video_out;
@@ -1980,8 +2028,8 @@
 				      const char *buf, size_t count)
 {
 	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
-	u32 mode;
 	int state;
+	u32 mode;
 	int ret;
 
 	ret = kstrtoint(buf, 0, &state);
@@ -2021,9 +2069,8 @@
 					       char *buf)
 {
 	struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+	int bat_lvl, status;
 	u32 state;
-	int bat_lvl;
-	int status;
 	int ret;
 	int tmp;
 
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5db495dd..be1d137 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -80,7 +80,9 @@
 	if (ACPI_FAILURE(result)) {
 		pr_err("ACPI call to query Bluetooth presence failed\n");
 		return -ENXIO;
-	} else if (!bt_present) {
+	}
+
+	if (!bt_present) {
 		pr_info("Bluetooth device not present\n");
 		return -ENODEV;
 	}
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index 7f2afc6..b3dec52 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -59,7 +59,7 @@
 		return -EIO;
 	}
 
-	pr_info("HDD protection level set to: %d\n", level);
+	pr_debug("HDD protection level set to: %d\n", level);
 
 	return 0;
 }
@@ -141,7 +141,7 @@
  */
 static void toshiba_haps_notify(struct acpi_device *device, u32 event)
 {
-	pr_info("Received event: 0x%x", event);
+	pr_debug("Received event: 0x%x", event);
 
 	acpi_bus_generate_netlink_event(device->pnp.device_class,
 					dev_name(&device->dev),
@@ -168,9 +168,13 @@
 	 * A non existent device as well as having (only)
 	 * Solid State Drives can cause the call to fail.
 	 */
-	status = acpi_evaluate_integer(handle, "_STA", NULL,
-				       &hdd_present);
-	if (ACPI_FAILURE(status) || !hdd_present) {
+	status = acpi_evaluate_integer(handle, "_STA", NULL, &hdd_present);
+	if (ACPI_FAILURE(status)) {
+		pr_err("ACPI call to query HDD protection failed\n");
+		return 0;
+	}
+
+	if (!hdd_present) {
 		pr_info("HDD protection not available or using SSD\n");
 		return 0;
 	}
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 7512e98..564a51a 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -31,7 +31,7 @@
 
 config NTP_PPS
 	bool "PPS kernel consumer support"
-	depends on !NO_HZ
+	depends on !NO_HZ_COMMON
 	help
 	  This option adds support for direct in-kernel time
 	  synchronization using an external PPS signal.
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index d637c93..58a97d4 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -193,6 +193,7 @@
 		if (err)
 			break;
 
+		memset(&precise_offset, 0, sizeof(precise_offset));
 		ts = ktime_to_timespec64(xtstamp.device);
 		precise_offset.device.sec = ts.tv_sec;
 		precise_offset.device.nsec = ts.tv_nsec;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 80a566a..bf01288 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -262,6 +262,15 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-lpss-platform.
 
+config PWM_MESON
+	tristate "Amlogic Meson PWM driver"
+	depends on ARCH_MESON
+	help
+	  The platform driver for Amlogic Meson PWM controller.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called pwm-meson.
+
 config PWM_MTK_DISP
 	tristate "MediaTek display PWM driver"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index feef1dd..1194c54 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_PWM_LPSS)		+= pwm-lpss.o
 obj-$(CONFIG_PWM_LPSS_PCI)	+= pwm-lpss-pci.o
 obj-$(CONFIG_PWM_LPSS_PLATFORM)	+= pwm-lpss-platform.o
+obj-$(CONFIG_PWM_MESON)		+= pwm-meson.o
 obj-$(CONFIG_PWM_MTK_DISP)	+= pwm-mtk-disp.o
 obj-$(CONFIG_PWM_MXS)		+= pwm-mxs.o
 obj-$(CONFIG_PWM_OMAP_DMTIMER)	+= pwm-omap-dmtimer.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0dbd29e..172ef82 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -339,6 +339,8 @@
 	unsigned int i;
 	int ret = 0;
 
+	pwmchip_sysfs_unexport_children(chip);
+
 	mutex_lock(&pwm_lock);
 
 	for (i = 0; i < chip->npwm; i++) {
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 6510812..01339c1 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
+#include <linux/slab.h>
 
 #define BERLIN_PWM_EN			0x0
 #define  BERLIN_PWM_ENABLE		BIT(0)
@@ -27,6 +28,13 @@
 #define BERLIN_PWM_TCNT			0xc
 #define  BERLIN_PWM_MAX_TCNT		65535
 
+struct berlin_pwm_channel {
+	u32 enable;
+	u32 ctrl;
+	u32 duty;
+	u32 tcnt;
+};
+
 struct berlin_pwm_chip {
 	struct pwm_chip chip;
 	struct clk *clk;
@@ -55,6 +63,25 @@
 	writel_relaxed(value, chip->base + channel * 0x10 + offset);
 }
 
+static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct berlin_pwm_channel *channel;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return -ENOMEM;
+
+	return pwm_set_chip_data(pwm, channel);
+}
+
+static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
+
+	pwm_set_chip_data(pwm, NULL);
+	kfree(channel);
+}
+
 static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
 			     int duty_ns, int period_ns)
 {
@@ -137,6 +164,8 @@
 }
 
 static const struct pwm_ops berlin_pwm_ops = {
+	.request = berlin_pwm_request,
+	.free = berlin_pwm_free,
 	.config = berlin_pwm_config,
 	.set_polarity = berlin_pwm_set_polarity,
 	.enable = berlin_pwm_enable,
@@ -204,12 +233,67 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int berlin_pwm_suspend(struct device *dev)
+{
+	struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+	unsigned int i;
+
+	for (i = 0; i < pwm->chip.npwm; i++) {
+		struct berlin_pwm_channel *channel;
+
+		channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+		if (!channel)
+			continue;
+
+		channel->enable = berlin_pwm_readl(pwm, i, BERLIN_PWM_ENABLE);
+		channel->ctrl = berlin_pwm_readl(pwm, i, BERLIN_PWM_CONTROL);
+		channel->duty = berlin_pwm_readl(pwm, i, BERLIN_PWM_DUTY);
+		channel->tcnt = berlin_pwm_readl(pwm, i, BERLIN_PWM_TCNT);
+	}
+
+	clk_disable_unprepare(pwm->clk);
+
+	return 0;
+}
+
+static int berlin_pwm_resume(struct device *dev)
+{
+	struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+	unsigned int i;
+	int ret;
+
+	ret = clk_prepare_enable(pwm->clk);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < pwm->chip.npwm; i++) {
+		struct berlin_pwm_channel *channel;
+
+		channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+		if (!channel)
+			continue;
+
+		berlin_pwm_writel(pwm, i, channel->ctrl, BERLIN_PWM_CONTROL);
+		berlin_pwm_writel(pwm, i, channel->duty, BERLIN_PWM_DUTY);
+		berlin_pwm_writel(pwm, i, channel->tcnt, BERLIN_PWM_TCNT);
+		berlin_pwm_writel(pwm, i, channel->enable, BERLIN_PWM_ENABLE);
+	}
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
+			 berlin_pwm_resume);
+
 static struct platform_driver berlin_pwm_driver = {
 	.probe = berlin_pwm_probe,
 	.remove = berlin_pwm_remove,
 	.driver = {
 		.name = "berlin-pwm",
 		.of_match_table = berlin_pwm_match,
+		.pm = &berlin_pwm_pm_ops,
 	},
 };
 module_platform_driver(berlin_pwm_driver);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 99b9acc..f6ca4e8 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -38,7 +38,7 @@
 	struct {
 		struct cros_ec_command msg;
 		struct ec_params_pwm_set_duty params;
-	} buf;
+	} __packed buf;
 	struct ec_params_pwm_set_duty *params = &buf.params;
 	struct cros_ec_command *msg = &buf.msg;
 
@@ -65,7 +65,7 @@
 			struct ec_params_pwm_get_duty params;
 			struct ec_response_pwm_get_duty resp;
 		};
-	} buf;
+	} __packed buf;
 	struct ec_params_pwm_get_duty *params = &buf.params;
 	struct ec_response_pwm_get_duty *resp = &buf.resp;
 	struct cros_ec_command *msg = &buf.msg;
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 19dc64c..d7f5f7d 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -413,14 +413,18 @@
 	}
 
 	for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
+		struct lpc18xx_pwm_data *data;
+
 		pwm = &lpc18xx_pwm->chip.pwms[i];
-		pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev,
-					      sizeof(struct lpc18xx_pwm_data),
-					      GFP_KERNEL);
-		if (!pwm->chip_data) {
+
+		data = devm_kzalloc(lpc18xx_pwm->dev, sizeof(*data),
+				    GFP_KERNEL);
+		if (!data) {
 			ret = -ENOMEM;
 			goto remove_pwmchip;
 		}
+
+		pwm_set_chip_data(pwm, data);
 	}
 
 	platform_set_drvdata(pdev, lpc18xx_pwm);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
new file mode 100644
index 0000000..381871b
--- /dev/null
+++ b/drivers/pwm/pwm-meson.c
@@ -0,0 +1,529 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define REG_PWM_A		0x0
+#define REG_PWM_B		0x4
+#define PWM_HIGH_SHIFT		16
+
+#define REG_MISC_AB		0x8
+#define MISC_B_CLK_EN		BIT(23)
+#define MISC_A_CLK_EN		BIT(15)
+#define MISC_CLK_DIV_MASK	0x7f
+#define MISC_B_CLK_DIV_SHIFT	16
+#define MISC_A_CLK_DIV_SHIFT	8
+#define MISC_B_CLK_SEL_SHIFT	6
+#define MISC_A_CLK_SEL_SHIFT	4
+#define MISC_CLK_SEL_WIDTH	2
+#define MISC_B_EN		BIT(1)
+#define MISC_A_EN		BIT(0)
+
+static const unsigned int mux_reg_shifts[] = {
+	MISC_A_CLK_SEL_SHIFT,
+	MISC_B_CLK_SEL_SHIFT
+};
+
+struct meson_pwm_channel {
+	unsigned int hi;
+	unsigned int lo;
+	u8 pre_div;
+
+	struct pwm_state state;
+
+	struct clk *clk_parent;
+	struct clk_mux mux;
+	struct clk *clk;
+};
+
+struct meson_pwm_data {
+	const char * const *parent_names;
+};
+
+struct meson_pwm {
+	struct pwm_chip chip;
+	const struct meson_pwm_data *data;
+	void __iomem *base;
+	u8 inverter_mask;
+	spinlock_t lock;
+};
+
+static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
+{
+	return container_of(chip, struct meson_pwm, chip);
+}
+
+static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+	struct device *dev = chip->dev;
+	int err;
+
+	if (!channel)
+		return -ENODEV;
+
+	if (channel->clk_parent) {
+		err = clk_set_parent(channel->clk, channel->clk_parent);
+		if (err < 0) {
+			dev_err(dev, "failed to set parent %s for %s: %d\n",
+				__clk_get_name(channel->clk_parent),
+				__clk_get_name(channel->clk), err);
+				return err;
+		}
+	}
+
+	err = clk_prepare_enable(channel->clk);
+	if (err < 0) {
+		dev_err(dev, "failed to enable clock %s: %d\n",
+			__clk_get_name(channel->clk), err);
+		return err;
+	}
+
+	chip->ops->get_state(chip, pwm, &channel->state);
+
+	return 0;
+}
+
+static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+
+	if (channel)
+		clk_disable_unprepare(channel->clk);
+}
+
+static int meson_pwm_calc(struct meson_pwm *meson,
+			  struct meson_pwm_channel *channel, unsigned int id,
+			  unsigned int duty, unsigned int period)
+{
+	unsigned int pre_div, cnt, duty_cnt;
+	unsigned long fin_freq = -1, fin_ns;
+
+	if (~(meson->inverter_mask >> id) & 0x1)
+		duty = period - duty;
+
+	if (period == channel->state.period &&
+	    duty == channel->state.duty_cycle)
+		return 0;
+
+	fin_freq = clk_get_rate(channel->clk);
+	if (fin_freq == 0) {
+		dev_err(meson->chip.dev, "invalid source clock frequency\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
+	fin_ns = NSEC_PER_SEC / fin_freq;
+
+	/* Calc pre_div with the period */
+	for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
+		cnt = DIV_ROUND_CLOSEST(period, fin_ns * (pre_div + 1));
+		dev_dbg(meson->chip.dev, "fin_ns=%lu pre_div=%u cnt=%u\n",
+			fin_ns, pre_div, cnt);
+		if (cnt <= 0xffff)
+			break;
+	}
+
+	if (pre_div == MISC_CLK_DIV_MASK) {
+		dev_err(meson->chip.dev, "unable to get period pre_div\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
+		pre_div, cnt);
+
+	if (duty == period) {
+		channel->pre_div = pre_div;
+		channel->hi = cnt;
+		channel->lo = 0;
+	} else if (duty == 0) {
+		channel->pre_div = pre_div;
+		channel->hi = 0;
+		channel->lo = cnt;
+	} else {
+		/* Then check is we can have the duty with the same pre_div */
+		duty_cnt = DIV_ROUND_CLOSEST(duty, fin_ns * (pre_div + 1));
+		if (duty_cnt > 0xffff) {
+			dev_err(meson->chip.dev, "unable to get duty cycle\n");
+			return -EINVAL;
+		}
+
+		dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
+			duty, pre_div, duty_cnt);
+
+		channel->pre_div = pre_div;
+		channel->hi = duty_cnt;
+		channel->lo = cnt - duty_cnt;
+	}
+
+	return 0;
+}
+
+static void meson_pwm_enable(struct meson_pwm *meson,
+			     struct meson_pwm_channel *channel,
+			     unsigned int id)
+{
+	u32 value, clk_shift, clk_enable, enable;
+	unsigned int offset;
+
+	switch (id) {
+	case 0:
+		clk_shift = MISC_A_CLK_DIV_SHIFT;
+		clk_enable = MISC_A_CLK_EN;
+		enable = MISC_A_EN;
+		offset = REG_PWM_A;
+		break;
+
+	case 1:
+		clk_shift = MISC_B_CLK_DIV_SHIFT;
+		clk_enable = MISC_B_CLK_EN;
+		enable = MISC_B_EN;
+		offset = REG_PWM_B;
+		break;
+
+	default:
+		return;
+	}
+
+	value = readl(meson->base + REG_MISC_AB);
+	value &= ~(MISC_CLK_DIV_MASK << clk_shift);
+	value |= channel->pre_div << clk_shift;
+	value |= clk_enable;
+	writel(value, meson->base + REG_MISC_AB);
+
+	value = (channel->hi << PWM_HIGH_SHIFT) | channel->lo;
+	writel(value, meson->base + offset);
+
+	value = readl(meson->base + REG_MISC_AB);
+	value |= enable;
+	writel(value, meson->base + REG_MISC_AB);
+}
+
+static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
+{
+	u32 value, enable;
+
+	switch (id) {
+	case 0:
+		enable = MISC_A_EN;
+		break;
+
+	case 1:
+		enable = MISC_B_EN;
+		break;
+
+	default:
+		return;
+	}
+
+	value = readl(meson->base + REG_MISC_AB);
+	value &= ~enable;
+	writel(value, meson->base + REG_MISC_AB);
+}
+
+static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+			   struct pwm_state *state)
+{
+	struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+	struct meson_pwm *meson = to_meson_pwm(chip);
+	unsigned long flags;
+	int err = 0;
+
+	if (!state)
+		return -EINVAL;
+
+	spin_lock_irqsave(&meson->lock, flags);
+
+	if (!state->enabled) {
+		meson_pwm_disable(meson, pwm->hwpwm);
+		channel->state.enabled = false;
+
+		goto unlock;
+	}
+
+	if (state->period != channel->state.period ||
+	    state->duty_cycle != channel->state.duty_cycle ||
+	    state->polarity != channel->state.polarity) {
+		if (channel->state.enabled) {
+			meson_pwm_disable(meson, pwm->hwpwm);
+			channel->state.enabled = false;
+		}
+
+		if (state->polarity != channel->state.polarity) {
+			if (state->polarity == PWM_POLARITY_NORMAL)
+				meson->inverter_mask |= BIT(pwm->hwpwm);
+			else
+				meson->inverter_mask &= ~BIT(pwm->hwpwm);
+		}
+
+		err = meson_pwm_calc(meson, channel, pwm->hwpwm,
+				     state->duty_cycle, state->period);
+		if (err < 0)
+			goto unlock;
+
+		channel->state.polarity = state->polarity;
+		channel->state.period = state->period;
+		channel->state.duty_cycle = state->duty_cycle;
+	}
+
+	if (state->enabled && !channel->state.enabled) {
+		meson_pwm_enable(meson, channel, pwm->hwpwm);
+		channel->state.enabled = true;
+	}
+
+unlock:
+	spin_unlock_irqrestore(&meson->lock, flags);
+	return err;
+}
+
+static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+				struct pwm_state *state)
+{
+	struct meson_pwm *meson = to_meson_pwm(chip);
+	u32 value, mask;
+
+	if (!state)
+		return;
+
+	switch (pwm->hwpwm) {
+	case 0:
+		mask = MISC_A_EN;
+		break;
+
+	case 1:
+		mask = MISC_B_EN;
+		break;
+
+	default:
+		return;
+	}
+
+	value = readl(meson->base + REG_MISC_AB);
+	state->enabled = (value & mask) != 0;
+}
+
+static const struct pwm_ops meson_pwm_ops = {
+	.request = meson_pwm_request,
+	.free = meson_pwm_free,
+	.apply = meson_pwm_apply,
+	.get_state = meson_pwm_get_state,
+	.owner = THIS_MODULE,
+};
+
+static const char * const pwm_meson8b_parent_names[] = {
+	"xtal", "vid_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_meson8b_data = {
+	.parent_names = pwm_meson8b_parent_names,
+};
+
+static const char * const pwm_gxbb_parent_names[] = {
+	"xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_gxbb_data = {
+	.parent_names = pwm_gxbb_parent_names,
+};
+
+static const struct of_device_id meson_pwm_matches[] = {
+	{ .compatible = "amlogic,meson8b-pwm", .data = &pwm_meson8b_data },
+	{ .compatible = "amlogic,meson-gxbb-pwm", .data = &pwm_gxbb_data },
+	{},
+};
+MODULE_DEVICE_TABLE(of, meson_pwm_matches);
+
+static int meson_pwm_init_channels(struct meson_pwm *meson,
+				   struct meson_pwm_channel *channels)
+{
+	struct device *dev = meson->chip.dev;
+	struct device_node *np = dev->of_node;
+	struct clk_init_data init;
+	unsigned int i;
+	char name[255];
+	int err;
+
+	for (i = 0; i < meson->chip.npwm; i++) {
+		struct meson_pwm_channel *channel = &channels[i];
+
+		snprintf(name, sizeof(name), "%s#mux%u", np->full_name, i);
+
+		init.name = name;
+		init.ops = &clk_mux_ops;
+		init.flags = CLK_IS_BASIC;
+		init.parent_names = meson->data->parent_names;
+		init.num_parents = 1 << MISC_CLK_SEL_WIDTH;
+
+		channel->mux.reg = meson->base + REG_MISC_AB;
+		channel->mux.shift = mux_reg_shifts[i];
+		channel->mux.mask = BIT(MISC_CLK_SEL_WIDTH) - 1;
+		channel->mux.flags = 0;
+		channel->mux.lock = &meson->lock;
+		channel->mux.table = NULL;
+		channel->mux.hw.init = &init;
+
+		channel->clk = devm_clk_register(dev, &channel->mux.hw);
+		if (IS_ERR(channel->clk)) {
+			err = PTR_ERR(channel->clk);
+			dev_err(dev, "failed to register %s: %d\n", name, err);
+			return err;
+		}
+
+		snprintf(name, sizeof(name), "clkin%u", i);
+
+		channel->clk_parent = devm_clk_get(dev, name);
+		if (IS_ERR(channel->clk_parent)) {
+			err = PTR_ERR(channel->clk_parent);
+			if (err == -EPROBE_DEFER)
+				return err;
+
+			channel->clk_parent = NULL;
+		}
+	}
+
+	return 0;
+}
+
+static void meson_pwm_add_channels(struct meson_pwm *meson,
+				   struct meson_pwm_channel *channels)
+{
+	unsigned int i;
+
+	for (i = 0; i < meson->chip.npwm; i++)
+		pwm_set_chip_data(&meson->chip.pwms[i], &channels[i]);
+}
+
+static int meson_pwm_probe(struct platform_device *pdev)
+{
+	struct meson_pwm_channel *channels;
+	struct meson_pwm *meson;
+	struct resource *regs;
+	int err;
+
+	meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
+	if (!meson)
+		return -ENOMEM;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	meson->base = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(meson->base))
+		return PTR_ERR(meson->base);
+
+	meson->chip.dev = &pdev->dev;
+	meson->chip.ops = &meson_pwm_ops;
+	meson->chip.base = -1;
+	meson->chip.npwm = 2;
+	meson->chip.of_xlate = of_pwm_xlate_with_flags;
+	meson->chip.of_pwm_n_cells = 3;
+
+	meson->data = of_device_get_match_data(&pdev->dev);
+	meson->inverter_mask = BIT(meson->chip.npwm) - 1;
+
+	channels = devm_kcalloc(&pdev->dev, meson->chip.npwm, sizeof(*meson),
+				GFP_KERNEL);
+	if (!channels)
+		return -ENOMEM;
+
+	err = meson_pwm_init_channels(meson, channels);
+	if (err < 0)
+		return err;
+
+	err = pwmchip_add(&meson->chip);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
+		return err;
+	}
+
+	meson_pwm_add_channels(meson, channels);
+
+	platform_set_drvdata(pdev, meson);
+
+	return 0;
+}
+
+static int meson_pwm_remove(struct platform_device *pdev)
+{
+	struct meson_pwm *meson = platform_get_drvdata(pdev);
+
+	return pwmchip_remove(&meson->chip);
+}
+
+static struct platform_driver meson_pwm_driver = {
+	.driver = {
+		.name = "meson-pwm",
+		.of_match_table = meson_pwm_matches,
+	},
+	.probe = meson_pwm_probe,
+	.remove = meson_pwm_remove,
+};
+module_platform_driver(meson_pwm_driver);
+
+MODULE_ALIAS("platform:meson-pwm");
+MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 0ad3385..893940d 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -18,30 +18,40 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/slab.h>
 
 #define DISP_PWM_EN		0x00
-#define PWM_ENABLE_MASK		BIT(0)
 
-#define DISP_PWM_COMMIT		0x08
-#define PWM_COMMIT_MASK		BIT(0)
-
-#define DISP_PWM_CON_0		0x10
 #define PWM_CLKDIV_SHIFT	16
 #define PWM_CLKDIV_MAX		0x3ff
 #define PWM_CLKDIV_MASK		(PWM_CLKDIV_MAX << PWM_CLKDIV_SHIFT)
 
-#define DISP_PWM_CON_1		0x14
 #define PWM_PERIOD_BIT_WIDTH	12
 #define PWM_PERIOD_MASK		((1 << PWM_PERIOD_BIT_WIDTH) - 1)
 
 #define PWM_HIGH_WIDTH_SHIFT	16
 #define PWM_HIGH_WIDTH_MASK	(0x1fff << PWM_HIGH_WIDTH_SHIFT)
 
+struct mtk_pwm_data {
+	u32 enable_mask;
+	unsigned int con0;
+	u32 con0_sel;
+	unsigned int con1;
+
+	bool has_commit;
+	unsigned int commit;
+	unsigned int commit_mask;
+
+	unsigned int bls_debug;
+	u32 bls_debug_mask;
+};
+
 struct mtk_disp_pwm {
 	struct pwm_chip chip;
+	const struct mtk_pwm_data *data;
 	struct clk *clk_main;
 	struct clk *clk_mm;
 	void __iomem *base;
@@ -106,12 +116,21 @@
 		return err;
 	}
 
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_0, PWM_CLKDIV_MASK,
+	mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+				 PWM_CLKDIV_MASK,
 				 clk_div << PWM_CLKDIV_SHIFT);
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_1,
-				 PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK, value);
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 1);
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 0);
+	mtk_disp_pwm_update_bits(mdp, mdp->data->con1,
+				 PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK,
+				 value);
+
+	if (mdp->data->has_commit) {
+		mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+					 mdp->data->commit_mask,
+					 mdp->data->commit_mask);
+		mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+					 mdp->data->commit_mask,
+					 0x0);
+	}
 
 	clk_disable(mdp->clk_mm);
 	clk_disable(mdp->clk_main);
@@ -134,7 +153,8 @@
 		return err;
 	}
 
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 1);
+	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+				 mdp->data->enable_mask);
 
 	return 0;
 }
@@ -143,7 +163,8 @@
 {
 	struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
 
-	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 0);
+	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+				 0x0);
 
 	clk_disable(mdp->clk_mm);
 	clk_disable(mdp->clk_main);
@@ -166,6 +187,8 @@
 	if (!mdp)
 		return -ENOMEM;
 
+	mdp->data = of_device_get_match_data(&pdev->dev);
+
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mdp->base = devm_ioremap_resource(&pdev->dev, r);
 	if (IS_ERR(mdp->base))
@@ -200,6 +223,19 @@
 
 	platform_set_drvdata(pdev, mdp);
 
+	/*
+	 * For MT2701, disable double buffer before writing register
+	 * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+	 */
+	if (!mdp->data->has_commit) {
+		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+					 mdp->data->bls_debug_mask,
+					 mdp->data->bls_debug_mask);
+		mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+					 mdp->data->con0_sel,
+					 mdp->data->con0_sel);
+	}
+
 	return 0;
 
 disable_clk_mm:
@@ -221,9 +257,30 @@
 	return ret;
 }
 
+static const struct mtk_pwm_data mt2701_pwm_data = {
+	.enable_mask = BIT(16),
+	.con0 = 0xa8,
+	.con0_sel = 0x2,
+	.con1 = 0xac,
+	.has_commit = false,
+	.bls_debug = 0xb0,
+	.bls_debug_mask = 0x3,
+};
+
+static const struct mtk_pwm_data mt8173_pwm_data = {
+	.enable_mask = BIT(0),
+	.con0 = 0x10,
+	.con0_sel = 0x0,
+	.con1 = 0x14,
+	.has_commit = true,
+	.commit = 0x8,
+	.commit_mask = 0x1,
+};
+
 static const struct of_device_id mtk_disp_pwm_of_match[] = {
-	{ .compatible = "mediatek,mt8173-disp-pwm" },
-	{ .compatible = "mediatek,mt6595-disp-pwm" },
+	{ .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
+	{ .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
+	{ .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index ada2d32..f113cda 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -193,9 +193,18 @@
 	 * divider settings and choose the lowest divisor that can generate
 	 * frequencies lower than requested.
 	 */
-	for (div = variant->div_base; div < 4; ++div)
-		if ((rate >> (variant->bits + div)) < freq)
-			break;
+	if (variant->bits < 32) {
+		/* Only for s3c24xx */
+		for (div = variant->div_base; div < 4; ++div)
+			if ((rate >> (variant->bits + div)) < freq)
+				break;
+	} else {
+		/*
+		 * Other variants have enough counter bits to generate any
+		 * requested rate, so no need to check higher divisors.
+		 */
+		div = variant->div_base;
+	}
 
 	pwm_samsung_set_divisor(chip, chan, BIT(div));
 
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 92abbd5..dd82dc8 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -1,8 +1,10 @@
 /*
- * PWM device driver for ST SoCs.
- * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ * PWM device driver for ST SoCs
  *
- * Copyright (C) 2013-2014 STMicroelectronics (R&D) Limited
+ * Copyright (C) 2013-2016 STMicroelectronics (R&D) Limited
+ *
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *         Lee Jones <lee.jones@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -11,6 +13,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/interrupt.h>
 #include <linux/math64.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
@@ -18,43 +21,82 @@
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/time.h>
+#include <linux/wait.h>
 
-#define STI_DS_REG(ch)	(4 * (ch))	/* Channel's Duty Cycle register */
-#define STI_PWMCR	0x50		/* Control/Config register */
-#define STI_INTEN	0x54		/* Interrupt Enable/Disable register */
-#define PWM_PRESCALE_LOW_MASK		0x0f
-#define PWM_PRESCALE_HIGH_MASK		0xf0
+#define PWM_OUT_VAL(x)	(0x00 + (4 * (x))) /* Device's Duty Cycle register */
+#define PWM_CPT_VAL(x)	(0x10 + (4 * (x))) /* Capture value */
+#define PWM_CPT_EDGE(x) (0x30 + (4 * (x))) /* Edge to capture on */
+
+#define STI_PWM_CTRL		0x50	/* Control/Config register */
+#define STI_INT_EN		0x54	/* Interrupt Enable/Disable register */
+#define STI_INT_STA		0x58	/* Interrupt Status register */
+#define PWM_INT_ACK		0x5c
+#define PWM_PRESCALE_LOW_MASK	0x0f
+#define PWM_PRESCALE_HIGH_MASK	0xf0
+#define PWM_CPT_EDGE_MASK	0x03
+#define PWM_INT_ACK_MASK	0x1ff
+
+#define STI_MAX_CPT_DEVS	4
+#define CPT_DC_MAX		0xff
 
 /* Regfield IDs */
 enum {
+	/* Bits in PWM_CTRL*/
 	PWMCLK_PRESCALE_LOW,
 	PWMCLK_PRESCALE_HIGH,
-	PWM_EN,
-	PWM_INT_EN,
+	CPTCLK_PRESCALE,
+
+	PWM_OUT_EN,
+	PWM_CPT_EN,
+
+	PWM_CPT_INT_EN,
+	PWM_CPT_INT_STAT,
 
 	/* Keep last */
 	MAX_REGFIELDS
 };
 
+/*
+ * Each capture input can be programmed to detect rising-edge, falling-edge,
+ * either edge or neither egde.
+ */
+enum sti_cpt_edge {
+	CPT_EDGE_DISABLED,
+	CPT_EDGE_RISING,
+	CPT_EDGE_FALLING,
+	CPT_EDGE_BOTH,
+};
+
+struct sti_cpt_ddata {
+	u32 snapshot[3];
+	unsigned int index;
+	struct mutex lock;
+	wait_queue_head_t wait;
+};
+
 struct sti_pwm_compat_data {
 	const struct reg_field *reg_fields;
-	unsigned int num_chan;
+	unsigned int pwm_num_devs;
+	unsigned int cpt_num_devs;
 	unsigned int max_pwm_cnt;
 	unsigned int max_prescale;
 };
 
 struct sti_pwm_chip {
 	struct device *dev;
-	struct clk *clk;
-	unsigned long clk_rate;
+	struct clk *pwm_clk;
+	struct clk *cpt_clk;
 	struct regmap *regmap;
 	struct sti_pwm_compat_data *cdata;
 	struct regmap_field *prescale_low;
 	struct regmap_field *prescale_high;
-	struct regmap_field *pwm_en;
-	struct regmap_field *pwm_int_en;
+	struct regmap_field *pwm_out_en;
+	struct regmap_field *pwm_cpt_en;
+	struct regmap_field *pwm_cpt_int_en;
+	struct regmap_field *pwm_cpt_int_stat;
 	struct pwm_chip chip;
 	struct pwm_device *cur;
 	unsigned long configured;
@@ -64,10 +106,13 @@
 };
 
 static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = {
-	[PWMCLK_PRESCALE_LOW]	= REG_FIELD(STI_PWMCR, 0, 3),
-	[PWMCLK_PRESCALE_HIGH]	= REG_FIELD(STI_PWMCR, 11, 14),
-	[PWM_EN]		= REG_FIELD(STI_PWMCR, 9, 9),
-	[PWM_INT_EN]		= REG_FIELD(STI_INTEN, 0, 0),
+	[PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWM_CTRL, 0, 3),
+	[PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWM_CTRL, 11, 14),
+	[CPTCLK_PRESCALE] = REG_FIELD(STI_PWM_CTRL, 4, 8),
+	[PWM_OUT_EN] = REG_FIELD(STI_PWM_CTRL, 9, 9),
+	[PWM_CPT_EN] = REG_FIELD(STI_PWM_CTRL, 10, 10),
+	[PWM_CPT_INT_EN] = REG_FIELD(STI_INT_EN, 1, 4),
+	[PWM_CPT_INT_STAT] = REG_FIELD(STI_INT_STA, 1, 4),
 };
 
 static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip)
@@ -82,61 +127,68 @@
 				unsigned int *prescale)
 {
 	struct sti_pwm_compat_data *cdata = pc->cdata;
-	unsigned long val;
+	unsigned long clk_rate;
+	unsigned long value;
 	unsigned int ps;
 
-	/*
-	 * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_count + 1)) - 1
-	 */
-	val = NSEC_PER_SEC / pc->clk_rate;
-	val *= cdata->max_pwm_cnt + 1;
-
-	if (period % val) {
+	clk_rate = clk_get_rate(pc->pwm_clk);
+	if (!clk_rate) {
+		dev_err(pc->dev, "failed to get clock rate\n");
 		return -EINVAL;
-	} else {
-		ps  = period / val - 1;
-		if (ps > cdata->max_prescale)
-			return -EINVAL;
 	}
+
+	/*
+	 * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_cnt + 1)) - 1
+	 */
+	value = NSEC_PER_SEC / clk_rate;
+	value *= cdata->max_pwm_cnt + 1;
+
+	if (period % value)
+		return -EINVAL;
+
+	ps  = period / value - 1;
+	if (ps > cdata->max_prescale)
+		return -EINVAL;
+
 	*prescale = ps;
 
 	return 0;
 }
 
 /*
- * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
- * The only way to change the period (apart from changing the PWM input clock)
- * is to change the PWM clock prescaler.
- * The prescaler is of 8 bits, so 256 prescaler values and hence
- * 256 possible period values are supported (for a particular clock rate).
- * The requested period will be applied only if it matches one of these
- * 256 values.
+ * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. The
+ * only way to change the period (apart from changing the PWM input clock) is
+ * to change the PWM clock prescaler.
+ *
+ * The prescaler is of 8 bits, so 256 prescaler values and hence 256 possible
+ * period values are supported (for a particular clock rate). The requested
+ * period will be applied only if it matches one of these 256 values.
  */
 static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
-			 int duty_ns, int period_ns)
+			  int duty_ns, int period_ns)
 {
 	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
 	struct sti_pwm_compat_data *cdata = pc->cdata;
+	unsigned int ncfg, value, prescale = 0;
 	struct pwm_device *cur = pc->cur;
 	struct device *dev = pc->dev;
-	unsigned int prescale = 0, pwmvalx;
-	int ret;
-	unsigned int ncfg;
 	bool period_same = false;
+	int ret;
 
 	ncfg = hweight_long(pc->configured);
 	if (ncfg)
 		period_same = (period_ns == pwm_get_period(cur));
 
-	/* Allow configuration changes if one of the
-	 * following conditions satisfy.
-	 * 1. No channels have been configured.
-	 * 2. Only one channel has been configured and the new request
-	 *    is for the same channel.
-	 * 3. Only one channel has been configured and the new request is
-	 *    for a new channel and period of the new channel is same as
-	 *    the current configured period.
-	 * 4. More than one channels are configured and period of the new
+	/*
+	 * Allow configuration changes if one of the following conditions
+	 * satisfy.
+	 * 1. No devices have been configured.
+	 * 2. Only one device has been configured and the new request is for
+	 *    the same device.
+	 * 3. Only one device has been configured and the new request is for
+	 *    a new device and period of the new device is same as the current
+	 *    configured period.
+	 * 4. More than one devices are configured and period of the new
 	 *    requestis the same as the current period.
 	 */
 	if (!ncfg ||
@@ -144,7 +196,11 @@
 	    ((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) ||
 	    ((ncfg > 1) && period_same)) {
 		/* Enable clock before writing to PWM registers. */
-		ret = clk_enable(pc->clk);
+		ret = clk_enable(pc->pwm_clk);
+		if (ret)
+			return ret;
+
+		ret = clk_enable(pc->cpt_clk);
 		if (ret)
 			return ret;
 
@@ -153,15 +209,15 @@
 			if (ret)
 				goto clk_dis;
 
-			ret =
-			regmap_field_write(pc->prescale_low,
-					   prescale & PWM_PRESCALE_LOW_MASK);
+			value = prescale & PWM_PRESCALE_LOW_MASK;
+
+			ret = regmap_field_write(pc->prescale_low, value);
 			if (ret)
 				goto clk_dis;
 
-			ret =
-			regmap_field_write(pc->prescale_high,
-				(prescale & PWM_PRESCALE_HIGH_MASK) >> 4);
+			value = (prescale & PWM_PRESCALE_HIGH_MASK) >> 4;
+
+			ret = regmap_field_write(pc->prescale_high, value);
 			if (ret)
 				goto clk_dis;
 		}
@@ -172,25 +228,26 @@
 		 * PWM pulse = (max_pwm_count + 1) local cycles,
 		 * that is continuous pulse: signal never goes low.
 		 */
-		pwmvalx = cdata->max_pwm_cnt * duty_ns / period_ns;
+		value = cdata->max_pwm_cnt * duty_ns / period_ns;
 
-		ret = regmap_write(pc->regmap, STI_DS_REG(pwm->hwpwm), pwmvalx);
+		ret = regmap_write(pc->regmap, PWM_OUT_VAL(pwm->hwpwm), value);
 		if (ret)
 			goto clk_dis;
 
-		ret = regmap_field_write(pc->pwm_int_en, 0);
+		ret = regmap_field_write(pc->pwm_cpt_int_en, 0);
 
 		set_bit(pwm->hwpwm, &pc->configured);
 		pc->cur = pwm;
 
-		dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
-			prescale, period_ns, duty_ns, pwmvalx);
+		dev_dbg(dev, "prescale:%u, period:%i, duty:%i, value:%u\n",
+			prescale, period_ns, duty_ns, value);
 	} else {
 		return -EINVAL;
 	}
 
 clk_dis:
-	clk_disable(pc->clk);
+	clk_disable(pc->pwm_clk);
+	clk_disable(pc->cpt_clk);
 	return ret;
 }
 
@@ -201,23 +258,30 @@
 	int ret = 0;
 
 	/*
-	 * Since we have a common enable for all PWM channels,
-	 * do not enable if already enabled.
+	 * Since we have a common enable for all PWM devices, do not enable if
+	 * already enabled.
 	 */
 	mutex_lock(&pc->sti_pwm_lock);
+
 	if (!pc->en_count) {
-		ret = clk_enable(pc->clk);
+		ret = clk_enable(pc->pwm_clk);
 		if (ret)
 			goto out;
 
-		ret = regmap_field_write(pc->pwm_en, 1);
+		ret = clk_enable(pc->cpt_clk);
+		if (ret)
+			goto out;
+
+		ret = regmap_field_write(pc->pwm_out_en, 1);
 		if (ret) {
-			dev_err(dev, "failed to enable PWM device:%d\n",
-				pwm->hwpwm);
+			dev_err(dev, "failed to enable PWM device %u: %d\n",
+				pwm->hwpwm, ret);
 			goto out;
 		}
 	}
+
 	pc->en_count++;
+
 out:
 	mutex_unlock(&pc->sti_pwm_lock);
 	return ret;
@@ -228,13 +292,17 @@
 	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
 
 	mutex_lock(&pc->sti_pwm_lock);
+
 	if (--pc->en_count) {
 		mutex_unlock(&pc->sti_pwm_lock);
 		return;
 	}
-	regmap_field_write(pc->pwm_en, 0);
 
-	clk_disable(pc->clk);
+	regmap_field_write(pc->pwm_out_en, 0);
+
+	clk_disable(pc->pwm_clk);
+	clk_disable(pc->cpt_clk);
+
 	mutex_unlock(&pc->sti_pwm_lock);
 }
 
@@ -245,7 +313,90 @@
 	clear_bit(pwm->hwpwm, &pc->configured);
 }
 
+static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+			   struct pwm_capture *result, unsigned long timeout)
+{
+	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+	struct sti_pwm_compat_data *cdata = pc->cdata;
+	struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
+	struct device *dev = pc->dev;
+	unsigned int effective_ticks;
+	unsigned long long high, low;
+	int ret;
+
+	if (pwm->hwpwm >= cdata->cpt_num_devs) {
+		dev_err(dev, "device %u is not valid\n", pwm->hwpwm);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ddata->lock);
+	ddata->index = 0;
+
+	/* Prepare capture measurement */
+	regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_RISING);
+	regmap_field_write(pc->pwm_cpt_int_en, BIT(pwm->hwpwm));
+
+	/* Enable capture */
+	ret = regmap_field_write(pc->pwm_cpt_en, 1);
+	if (ret) {
+		dev_err(dev, "failed to enable PWM capture %u: %d\n",
+			pwm->hwpwm, ret);
+		goto out;
+	}
+
+	ret = wait_event_interruptible_timeout(ddata->wait, ddata->index > 1,
+					       msecs_to_jiffies(timeout));
+
+	regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_DISABLED);
+
+	if (ret == -ERESTARTSYS)
+		goto out;
+
+	switch (ddata->index) {
+	case 0:
+	case 1:
+		/*
+		 * Getting here could mean:
+		 *  - input signal is constant of less than 1 Hz
+		 *  - there is no input signal at all
+		 *
+		 * In such case the frequency is rounded down to 0
+		 */
+		result->period = 0;
+		result->duty_cycle = 0;
+
+		break;
+
+	case 2:
+		/* We have everying we need */
+		high = ddata->snapshot[1] - ddata->snapshot[0];
+		low = ddata->snapshot[2] - ddata->snapshot[1];
+
+		effective_ticks = clk_get_rate(pc->cpt_clk);
+
+		result->period = (high + low) * NSEC_PER_SEC;
+		result->period /= effective_ticks;
+
+		result->duty_cycle = high * NSEC_PER_SEC;
+		result->duty_cycle /= effective_ticks;
+
+		break;
+
+	default:
+		dev_err(dev, "internal error\n");
+		break;
+	}
+
+out:
+	/* Disable capture */
+	regmap_field_write(pc->pwm_cpt_en, 0);
+
+	mutex_unlock(&ddata->lock);
+	return ret;
+}
+
 static const struct pwm_ops sti_pwm_ops = {
+	.capture = sti_pwm_capture,
 	.config = sti_pwm_config,
 	.enable = sti_pwm_enable,
 	.disable = sti_pwm_disable,
@@ -253,17 +404,98 @@
 	.owner = THIS_MODULE,
 };
 
+static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+{
+	struct sti_pwm_chip *pc = data;
+	struct device *dev = pc->dev;
+	struct sti_cpt_ddata *ddata;
+	int devicenum;
+	unsigned int cpt_int_stat;
+	unsigned int reg;
+	int ret = IRQ_NONE;
+
+	ret = regmap_field_read(pc->pwm_cpt_int_stat, &cpt_int_stat);
+	if (ret)
+		return ret;
+
+	while (cpt_int_stat) {
+		devicenum = ffs(cpt_int_stat) - 1;
+
+		ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
+
+		/*
+		 * Capture input:
+		 *    _______                   _______
+		 *   |       |                 |       |
+		 * __|       |_________________|       |________
+		 *   ^0      ^1                ^2
+		 *
+		 * Capture start by the first available rising edge. When a
+		 * capture event occurs, capture value (CPT_VALx) is stored,
+		 * index incremented, capture edge changed.
+		 *
+		 * After the capture, if the index > 1, we have collected the
+		 * necessary data so we signal the thread waiting for it and
+		 * disable the capture by setting capture edge to none
+		 */
+
+		regmap_read(pc->regmap,
+			    PWM_CPT_VAL(devicenum),
+			    &ddata->snapshot[ddata->index]);
+
+		switch (ddata->index) {
+		case 0:
+		case 1:
+			regmap_read(pc->regmap, PWM_CPT_EDGE(devicenum), &reg);
+			reg ^= PWM_CPT_EDGE_MASK;
+			regmap_write(pc->regmap, PWM_CPT_EDGE(devicenum), reg);
+
+			ddata->index++;
+			break;
+
+		case 2:
+			regmap_write(pc->regmap,
+				     PWM_CPT_EDGE(devicenum),
+				     CPT_EDGE_DISABLED);
+			wake_up(&ddata->wait);
+			break;
+
+		default:
+			dev_err(dev, "Internal error\n");
+		}
+
+		cpt_int_stat &= ~BIT_MASK(devicenum);
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Just ACK everything */
+	regmap_write(pc->regmap, PWM_INT_ACK, PWM_INT_ACK_MASK);
+
+	return ret;
+}
+
 static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
 {
 	struct device *dev = pc->dev;
 	const struct reg_field *reg_fields;
 	struct device_node *np = dev->of_node;
 	struct sti_pwm_compat_data *cdata = pc->cdata;
-	u32 num_chan;
+	u32 num_devs;
+	int ret;
 
-	of_property_read_u32(np, "st,pwm-num-chan", &num_chan);
-	if (num_chan)
-		cdata->num_chan = num_chan;
+	ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
+	if (!ret)
+		cdata->pwm_num_devs = num_devs;
+
+	ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
+	if (!ret)
+		cdata->cpt_num_devs = num_devs;
+
+	if (!cdata->pwm_num_devs && !cdata->cpt_num_devs) {
+		dev_err(dev, "No channels configured\n");
+		return -EINVAL;
+	}
 
 	reg_fields = cdata->reg_fields;
 
@@ -277,15 +509,26 @@
 	if (IS_ERR(pc->prescale_high))
 		return PTR_ERR(pc->prescale_high);
 
-	pc->pwm_en = devm_regmap_field_alloc(dev, pc->regmap,
-					     reg_fields[PWM_EN]);
-	if (IS_ERR(pc->pwm_en))
-		return PTR_ERR(pc->pwm_en);
 
-	pc->pwm_int_en = devm_regmap_field_alloc(dev, pc->regmap,
-						 reg_fields[PWM_INT_EN]);
-	if (IS_ERR(pc->pwm_int_en))
-		return PTR_ERR(pc->pwm_int_en);
+	pc->pwm_out_en = devm_regmap_field_alloc(dev, pc->regmap,
+						 reg_fields[PWM_OUT_EN]);
+	if (IS_ERR(pc->pwm_out_en))
+		return PTR_ERR(pc->pwm_out_en);
+
+	pc->pwm_cpt_en = devm_regmap_field_alloc(dev, pc->regmap,
+						 reg_fields[PWM_CPT_EN]);
+	if (IS_ERR(pc->pwm_cpt_en))
+		return PTR_ERR(pc->pwm_cpt_en);
+
+	pc->pwm_cpt_int_en = devm_regmap_field_alloc(dev, pc->regmap,
+						reg_fields[PWM_CPT_INT_EN]);
+	if (IS_ERR(pc->pwm_cpt_int_en))
+		return PTR_ERR(pc->pwm_cpt_int_en);
+
+	pc->pwm_cpt_int_stat = devm_regmap_field_alloc(dev, pc->regmap,
+						reg_fields[PWM_CPT_INT_STAT]);
+	if (PTR_ERR_OR_ZERO(pc->pwm_cpt_int_stat))
+		return PTR_ERR(pc->pwm_cpt_int_stat);
 
 	return 0;
 }
@@ -302,7 +545,8 @@
 	struct sti_pwm_compat_data *cdata;
 	struct sti_pwm_chip *pc;
 	struct resource *res;
-	int ret;
+	unsigned int i;
+	int irq, ret;
 
 	pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
 	if (!pc)
@@ -323,14 +567,28 @@
 	if (IS_ERR(pc->regmap))
 		return PTR_ERR(pc->regmap);
 
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to obtain IRQ\n");
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, sti_pwm_interrupt, 0,
+			       pdev->name, pc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request IRQ\n");
+		return ret;
+	}
+
 	/*
 	 * Setup PWM data with default values: some values could be replaced
 	 * with specific ones provided from Device Tree.
 	 */
-	cdata->reg_fields   = &sti_pwm_regfields[0];
+	cdata->reg_fields = sti_pwm_regfields;
 	cdata->max_prescale = 0xff;
-	cdata->max_pwm_cnt  = 255;
-	cdata->num_chan     = 1;
+	cdata->max_pwm_cnt = 255;
+	cdata->pwm_num_devs = 0;
+	cdata->cpt_num_devs = 0;
 
 	pc->cdata = cdata;
 	pc->dev = dev;
@@ -341,36 +599,64 @@
 	if (ret)
 		return ret;
 
-	pc->clk = of_clk_get_by_name(dev->of_node, "pwm");
-	if (IS_ERR(pc->clk)) {
+	if (!cdata->pwm_num_devs)
+		goto skip_pwm;
+
+	pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+	if (IS_ERR(pc->pwm_clk)) {
 		dev_err(dev, "failed to get PWM clock\n");
-		return PTR_ERR(pc->clk);
+		return PTR_ERR(pc->pwm_clk);
 	}
 
-	pc->clk_rate = clk_get_rate(pc->clk);
-	if (!pc->clk_rate) {
-		dev_err(dev, "failed to get clock rate\n");
-		return -EINVAL;
-	}
-
-	ret = clk_prepare(pc->clk);
+	ret = clk_prepare(pc->pwm_clk);
 	if (ret) {
 		dev_err(dev, "failed to prepare clock\n");
 		return ret;
 	}
 
+skip_pwm:
+	if (!cdata->cpt_num_devs)
+		goto skip_cpt;
+
+	pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+	if (IS_ERR(pc->cpt_clk)) {
+		dev_err(dev, "failed to get PWM capture clock\n");
+		return PTR_ERR(pc->cpt_clk);
+	}
+
+	ret = clk_prepare(pc->cpt_clk);
+	if (ret) {
+		dev_err(dev, "failed to prepare clock\n");
+		return ret;
+	}
+
+skip_cpt:
 	pc->chip.dev = dev;
 	pc->chip.ops = &sti_pwm_ops;
 	pc->chip.base = -1;
-	pc->chip.npwm = pc->cdata->num_chan;
+	pc->chip.npwm = pc->cdata->pwm_num_devs;
 	pc->chip.can_sleep = true;
 
 	ret = pwmchip_add(&pc->chip);
 	if (ret < 0) {
-		clk_unprepare(pc->clk);
+		clk_unprepare(pc->pwm_clk);
+		clk_unprepare(pc->cpt_clk);
 		return ret;
 	}
 
+	for (i = 0; i < cdata->cpt_num_devs; i++) {
+		struct sti_cpt_ddata *ddata;
+
+		ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+		if (!ddata)
+			return -ENOMEM;
+
+		init_waitqueue_head(&ddata->wait);
+		mutex_init(&ddata->lock);
+
+		pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+	}
+
 	platform_set_drvdata(pdev, pc);
 
 	return 0;
@@ -381,10 +667,11 @@
 	struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
 	unsigned int i;
 
-	for (i = 0; i < pc->cdata->num_chan; i++)
+	for (i = 0; i < pc->cdata->pwm_num_devs; i++)
 		pwm_disable(&pc->chip.pwms[i]);
 
-	clk_unprepare(pc->clk);
+	clk_unprepare(pc->pwm_clk);
+	clk_unprepare(pc->cpt_clk);
 
 	return pwmchip_remove(&pc->chip);
 }
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 03a99a5..b0803f6 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -284,6 +284,12 @@
 	.npwm = 2,
 };
 
+static const struct sun4i_pwm_data sun4i_pwm_data_h3 = {
+	.has_prescaler_bypass = true,
+	.has_rdy = true,
+	.npwm = 1,
+};
+
 static const struct of_device_id sun4i_pwm_dt_ids[] = {
 	{
 		.compatible = "allwinner,sun4i-a10-pwm",
@@ -298,6 +304,9 @@
 		.compatible = "allwinner,sun7i-a20-pwm",
 		.data = &sun4i_pwm_data_a20,
 	}, {
+		.compatible = "allwinner,sun8i-h3-pwm",
+		.data = &sun4i_pwm_data_h3,
+	}, {
 		/* sentinel */
 	},
 };
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 829f499..7fa85a1 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -34,7 +34,6 @@
 	struct device_node *node = pdev->dev.of_node;
 
 	pm_runtime_enable(&pdev->dev);
-	pm_runtime_get_sync(&pdev->dev);
 
 	/* Populate all the child nodes here... */
 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -46,31 +45,13 @@
 
 static int pwmss_remove(struct platform_device *pdev)
 {
-	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int pwmss_suspend(struct device *dev)
-{
-	pm_runtime_put_sync(dev);
-	return 0;
-}
-
-static int pwmss_resume(struct device *dev)
-{
-	pm_runtime_get_sync(dev);
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
-
 static struct platform_driver pwmss_driver = {
 	.driver	= {
 		.name	= "pwmss",
-		.pm	= &pwmss_pm_ops,
 		.of_match_table	= pwmss_of_match,
 	},
 	.probe	= pwmss_probe,
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 04f7672..7a993b0 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -269,6 +269,22 @@
 		goto out;
 	}
 
+	val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN);
+
+	ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+	if (ret < 0) {
+		dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+		goto out;
+	}
+
+	val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN);
+
+	ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+	if (ret < 0) {
+		dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+		goto out;
+	}
+
 	twl->twl6030_toggle3 = val;
 out:
 	mutex_unlock(&twl->mutex);
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 18ed725..0296d81 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -409,6 +409,24 @@
 	}
 }
 
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+	struct device *parent;
+	unsigned int i;
+
+	parent = class_find_device(&pwm_class, NULL, chip,
+				   pwmchip_sysfs_match);
+	if (!parent)
+		return;
+
+	for (i = 0; i < chip->npwm; i++) {
+		struct pwm_device *pwm = &chip->pwms[i];
+
+		if (test_bit(PWMF_EXPORTED, &pwm->flags))
+			pwm_unexport_child(parent, pwm);
+	}
+}
+
 static int __init pwm_sysfs_init(void)
 {
 	return class_register(&pwm_class);
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe8..9013a58 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@
 		down_read(&current->mm->mmap_sem);
 		pinned = get_user_pages(
 				(unsigned long)xfer->loc_addr & PAGE_MASK,
-				nr_pages, dir == DMA_FROM_DEVICE, 0,
+				nr_pages,
+				dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
 				page_list, NULL);
 		up_read(&current->mm->mmap_sem);
 
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index cebc296..bad0e0e 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -1841,24 +1841,19 @@
 {
 	struct rio_cm_msg msg;
 	void *buf;
-	int ret = 0;
+	int ret;
 
 	if (copy_from_user(&msg, arg, sizeof(msg)))
 		return -EFAULT;
 	if (msg.size > RIO_MAX_MSG_SIZE)
 		return -EINVAL;
 
-	buf = kmalloc(msg.size, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
-		ret = -EFAULT;
-		goto out;
-	}
+	buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
 
 	ret = riocm_ch_send(msg.ch_num, buf, msg.size);
-out:
+
 	kfree(buf);
 	return ret;
 }
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 3958f50..e0c747a 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -495,7 +495,8 @@
 {
 	struct max8973_chip *mchip = data;
 
-	thermal_zone_device_update(mchip->tz_device);
+	thermal_zone_device_update(mchip->tz_device,
+				   THERMAL_EVENT_UNSPECIFIED);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d1e0807..e859d14 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -208,14 +208,14 @@
 	  will be called rtc-as3722.
 
 config RTC_DRV_DS1307
-	tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
+	tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057"
 	help
 	  If you say yes here you get support for various compatible RTC
 	  chips (often with battery backup) connected with I2C. This driver
 	  should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00,
-	  EPSON RX-8025 and probably other chips. In some cases the RTC
-	  must already have been initialized (by manufacturing or a
-	  bootloader).
+	  EPSON RX-8025, Intersil ISL12057 and probably other chips. In some
+	  cases the RTC must already have been initialized (by manufacturing or
+	  a bootloader).
 
 	  The first seven registers on these chips hold an RTC, and other
 	  registers may add features such as NVRAM, a trickle charger for
@@ -234,6 +234,20 @@
 	  Say Y here if you want to expose temperature sensor data on
 	  rtc-ds1307 (only DS3231)
 
+config RTC_DRV_DS1307_CENTURY
+	bool "Century bit support for rtc-ds1307"
+	depends on RTC_DRV_DS1307
+	default n
+	help
+	  The DS1307 driver suffered from a bug where it was enabling the
+	  century bit inconditionnally but never used it when reading the time.
+	  It made the driver unable to support dates beyond 2099.
+	  Setting this option will add proper support for the century bit but if
+	  the time was previously set using a kernel predating this option,
+	  reading the date will return a date in the next century.
+	  To solve that, you could boot a kernel without this option set, set
+	  the RTC date and then boot a kernel with this option set.
+
 config RTC_DRV_DS1374
 	tristate "Dallas/Maxim DS1374"
 	help
@@ -374,16 +388,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-isl12022.
 
-config RTC_DRV_ISL12057
-	select REGMAP_I2C
-	tristate "Intersil ISL12057"
-	help
-	  If you say yes here you get support for the Intersil ISL12057
-	  I2C RTC chip.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called rtc-isl12057.
-
 config RTC_DRV_X1205
 	tristate "Xicor/Intersil X1205"
 	help
@@ -661,6 +665,7 @@
 	  will be called rtc-ds1343.
 
 config RTC_DRV_DS1347
+	select REGMAP_SPI
 	tristate "Dallas/Maxim DS1347"
 	help
 	  If you say yes here you get support for the
@@ -1201,7 +1206,7 @@
 
 config RTC_DRV_ASM9260
 	tristate "Alphascale asm9260 RTC"
-	depends on MACH_ASM9260
+	depends on MACH_ASM9260 || COMPILE_TEST
 	help
 	  If you say yes here you get support for the RTC on the
 	  Alphascale asm9260 SoC.
@@ -1241,6 +1246,9 @@
 config RTC_DRV_OMAP
 	tristate "TI OMAP Real Time Clock"
 	depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
+	depends on OF
+	depends on PINCTRL
+	select GENERIC_PINCONF
 	help
 	  Say "yes" here to support the on chip real time clock
 	  present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8fb994b..1ac694a 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -72,7 +72,6 @@
 obj-$(CONFIG_RTC_DRV_HYM8563)	+= rtc-hym8563.o
 obj-$(CONFIG_RTC_DRV_IMXDI)	+= rtc-imxdi.o
 obj-$(CONFIG_RTC_DRV_ISL12022)	+= rtc-isl12022.o
-obj-$(CONFIG_RTC_DRV_ISL12057)	+= rtc-isl12057.o
 obj-$(CONFIG_RTC_DRV_ISL1208)	+= rtc-isl1208.o
 obj-$(CONFIG_RTC_DRV_JZ4740)	+= rtc-jz4740.o
 obj-$(CONFIG_RTC_DRV_LP8788)	+= rtc-lp8788.o
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 70b4fd0..9e33618 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -327,6 +327,8 @@
 			.flags = 0,
 		};
 
+		of_property_read_string_index(np, "clock-output-names",
+					      i, &init.name);
 		clk->regmap = chip->regmap;
 		clk->offset = AC100_CLKOUT_CTRL1 + i;
 		clk->hw.init = &init;
@@ -552,6 +554,9 @@
 	int ret;
 
 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
 	platform_set_drvdata(pdev, chip);
 	chip->dev = &pdev->dev;
 	chip->regmap = ac100->regmap;
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 5219916..18a93d3 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -112,8 +112,6 @@
 	void __iomem		*iobase;
 	struct rtc_device	*rtc;
 	struct clk		*clk;
-	/* io lock */
-	spinlock_t		lock;
 };
 
 static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
@@ -122,11 +120,15 @@
 	u32 isr;
 	unsigned long events = 0;
 
+	mutex_lock(&priv->rtc->ops_lock);
 	isr = ioread32(priv->iobase + HW_CIIR);
-	if (!isr)
+	if (!isr) {
+		mutex_unlock(&priv->rtc->ops_lock);
 		return IRQ_NONE;
+	}
 
 	iowrite32(0, priv->iobase + HW_CIIR);
+	mutex_unlock(&priv->rtc->ops_lock);
 
 	events |= RTC_AF | RTC_IRQF;
 
@@ -139,9 +141,7 @@
 {
 	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
 	u32 ctime0, ctime1, ctime2;
-	unsigned long irq_flags;
 
-	spin_lock_irqsave(&priv->lock, irq_flags);
 	ctime0 = ioread32(priv->iobase + HW_CTIME0);
 	ctime1 = ioread32(priv->iobase + HW_CTIME1);
 	ctime2 = ioread32(priv->iobase + HW_CTIME2);
@@ -155,7 +155,6 @@
 		ctime1 = ioread32(priv->iobase + HW_CTIME1);
 		ctime2 = ioread32(priv->iobase + HW_CTIME2);
 	}
-	spin_unlock_irqrestore(&priv->lock, irq_flags);
 
 	tm->tm_sec  = (ctime0 >> BM_CTIME0_SEC_S)  & BM_CTIME0_SEC_M;
 	tm->tm_min  = (ctime0 >> BM_CTIME0_MIN_S)  & BM_CTIME0_MIN_M;
@@ -174,9 +173,7 @@
 static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
-	unsigned long irq_flags;
 
-	spin_lock_irqsave(&priv->lock, irq_flags);
 	/*
 	 * make sure SEC counter will not flip other counter on write time,
 	 * real value will be written at the enf of sequence.
@@ -191,7 +188,6 @@
 	iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
 	iowrite32(tm->tm_min,  priv->iobase + HW_MIN);
 	iowrite32(tm->tm_sec,  priv->iobase + HW_SEC);
-	spin_unlock_irqrestore(&priv->lock, irq_flags);
 
 	return 0;
 }
@@ -199,9 +195,7 @@
 static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
 	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
-	unsigned long irq_flags;
 
-	spin_lock_irqsave(&priv->lock, irq_flags);
 	alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
 	alrm->time.tm_mon  = ioread32(priv->iobase + HW_ALMON);
 	alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
@@ -213,7 +207,6 @@
 
 	alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
 	alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
-	spin_unlock_irqrestore(&priv->lock, irq_flags);
 
 	return rtc_valid_tm(&alrm->time);
 }
@@ -221,9 +214,7 @@
 static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
 	struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
-	unsigned long irq_flags;
 
-	spin_lock_irqsave(&priv->lock, irq_flags);
 	iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
 	iowrite32(alrm->time.tm_mon,  priv->iobase + HW_ALMON);
 	iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
@@ -234,7 +225,6 @@
 	iowrite32(alrm->time.tm_sec,  priv->iobase + HW_ALSEC);
 
 	iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
-	spin_unlock_irqrestore(&priv->lock, irq_flags);
 
 	return 0;
 }
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 83ac233..de8bf56 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -187,7 +187,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops at32_rtc_ops = {
+static const struct rtc_class_ops at32_rtc_ops = {
 	.read_time	= at32_rtc_readtime,
 	.set_time	= at32_rtc_settime,
 	.read_alarm	= at32_rtc_readalarm,
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 0299988..3977424 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -93,8 +93,15 @@
 	if (error)
 		return error;
 
+	/*
+	 * In case of oscillator failure, the register contents should be
+	 * considered invalid. The flag is cleared the next time the RTC is set.
+	 */
+	if (regs.minutes & BQ32K_OF)
+		return -EINVAL;
+
 	tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
-	tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+	tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK);
 	tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
 	tm->tm_mday = bcd2bin(regs.date);
 	tm->tm_wday = bcd2bin(regs.day) - 1;
@@ -204,13 +211,10 @@
 
 	/* Check Oscillator Failure flag */
 	error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
-	if (!error && (reg & BQ32K_OF)) {
-		dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
-		reg &= ~BQ32K_OF;
-		error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
-	}
 	if (error)
 		return error;
+	if (reg & BQ32K_OF)
+		dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
 
 	if (client->dev.of_node)
 		trickle_charger_of_init(dev, client->dev.of_node);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 43745ca..dd3d598 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -62,6 +62,8 @@
 	u8			day_alrm;
 	u8			mon_alrm;
 	u8			century;
+
+	struct rtc_wkalrm	saved_wkalrm;
 };
 
 /* both platform and pnp busses use negative numbers for invalid irqs */
@@ -707,6 +709,8 @@
 		goto cleanup1;
 	}
 
+	hpet_rtc_timer_init();
+
 	if (is_valid_irq(rtc_irq)) {
 		irq_handler_t rtc_cmos_int_handler;
 
@@ -714,6 +718,7 @@
 			rtc_cmos_int_handler = hpet_rtc_interrupt;
 			retval = hpet_register_irq_handler(cmos_interrupt);
 			if (retval) {
+				hpet_mask_rtc_irq_bit(RTC_IRQMASK);
 				dev_warn(dev, "hpet_register_irq_handler "
 						" failed in rtc_init().");
 				goto cleanup1;
@@ -729,7 +734,6 @@
 			goto cleanup1;
 		}
 	}
-	hpet_rtc_timer_init();
 
 	/* export at least the first block of NVRAM */
 	nvram.size = address_space - NVRAM_OFFSET;
@@ -844,8 +848,6 @@
 	return retval;
 }
 
-#ifdef CONFIG_PM
-
 static int cmos_suspend(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
@@ -877,6 +879,8 @@
 			enable_irq_wake(cmos->irq);
 	}
 
+	cmos_read_alarm(dev, &cmos->saved_wkalrm);
+
 	dev_dbg(dev, "suspend%s, ctrl %02x\n",
 			(tmp & RTC_AIE) ? ", alarm may wake" : "",
 			tmp);
@@ -892,12 +896,32 @@
  */
 static inline int cmos_poweroff(struct device *dev)
 {
+	if (!IS_ENABLED(CONFIG_PM))
+		return -ENOSYS;
+
 	return cmos_suspend(dev);
 }
 
-#ifdef	CONFIG_PM_SLEEP
+static void cmos_check_wkalrm(struct device *dev)
+{
+	struct cmos_rtc *cmos = dev_get_drvdata(dev);
+	struct rtc_wkalrm current_alarm;
+	time64_t t_current_expires;
+	time64_t t_saved_expires;
 
-static int cmos_resume(struct device *dev)
+	cmos_read_alarm(dev, &current_alarm);
+	t_current_expires = rtc_tm_to_time64(&current_alarm.time);
+	t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
+	if (t_current_expires != t_saved_expires ||
+	    cmos->saved_wkalrm.enabled != current_alarm.enabled) {
+		cmos_set_alarm(dev, &cmos->saved_wkalrm);
+	}
+}
+
+static void cmos_check_acpi_rtc_status(struct device *dev,
+				       unsigned char *rtc_control);
+
+static int __maybe_unused cmos_resume(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	unsigned char tmp;
@@ -910,6 +934,9 @@
 		cmos->enabled_wake = 0;
 	}
 
+	/* The BIOS might have changed the alarm, restore it */
+	cmos_check_wkalrm(dev);
+
 	spin_lock_irq(&rtc_lock);
 	tmp = cmos->suspend_ctrl;
 	cmos->suspend_ctrl = 0;
@@ -936,6 +963,9 @@
 			tmp &= ~RTC_AIE;
 			hpet_mask_rtc_irq_bit(RTC_AIE);
 		} while (mask & RTC_AIE);
+
+		if (tmp & RTC_AIE)
+			cmos_check_acpi_rtc_status(dev, &tmp);
 	}
 	spin_unlock_irq(&rtc_lock);
 
@@ -944,16 +974,6 @@
 	return 0;
 }
 
-#endif
-#else
-
-static inline int cmos_poweroff(struct device *dev)
-{
-	return -ENOSYS;
-}
-
-#endif
-
 static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
 
 /*----------------------------------------------------------------*/
@@ -973,6 +993,20 @@
 static u32 rtc_handler(void *context)
 {
 	struct device *dev = context;
+	struct cmos_rtc *cmos = dev_get_drvdata(dev);
+	unsigned char rtc_control = 0;
+	unsigned char rtc_intr;
+
+	spin_lock_irq(&rtc_lock);
+	if (cmos_rtc.suspend_ctrl)
+		rtc_control = CMOS_READ(RTC_CONTROL);
+	if (rtc_control & RTC_AIE) {
+		cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+		CMOS_WRITE(rtc_control, RTC_CONTROL);
+		rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+		rtc_update_irq(cmos->rtc, 1, rtc_intr);
+	}
+	spin_unlock_irq(&rtc_lock);
 
 	pm_wakeup_event(dev, 0);
 	acpi_clear_event(ACPI_EVENT_RTC);
@@ -1039,12 +1073,39 @@
 	device_init_wakeup(dev, 1);
 }
 
+static void cmos_check_acpi_rtc_status(struct device *dev,
+				       unsigned char *rtc_control)
+{
+	struct cmos_rtc *cmos = dev_get_drvdata(dev);
+	acpi_event_status rtc_status;
+	acpi_status status;
+
+	if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+		return;
+
+	status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+	if (ACPI_FAILURE(status)) {
+		dev_err(dev, "Could not get RTC status\n");
+	} else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+		unsigned char mask;
+		*rtc_control &= ~RTC_AIE;
+		CMOS_WRITE(*rtc_control, RTC_CONTROL);
+		mask = CMOS_READ(RTC_INTR_FLAGS);
+		rtc_update_irq(cmos->rtc, 1, mask);
+	}
+}
+
 #else
 
 static void cmos_wake_setup(struct device *dev)
 {
 }
 
+static void cmos_check_acpi_rtc_status(struct device *dev,
+				       unsigned char *rtc_control)
+{
+}
+
 #endif
 
 #ifdef	CONFIG_PNP
@@ -1206,9 +1267,7 @@
 	.shutdown	= cmos_platform_shutdown,
 	.driver = {
 		.name		= driver_name,
-#ifdef CONFIG_PM
 		.pm		= &cmos_pm_ops,
-#endif
 		.of_match_table = of_match_ptr(of_cmos_match),
 	}
 };
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 101b7a2..cfc4141 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -140,7 +140,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops coh901331_ops = {
+static const struct rtc_class_ops coh901331_ops = {
 	.read_time = coh901331_read_time,
 	.set_mmss = coh901331_set_mmss,
 	.read_alarm = coh901331_read_alarm,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index dba60c1..caf3556 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops davinci_rtc_ops = {
+static const struct rtc_class_ops davinci_rtc_ops = {
 	.ioctl			= davinci_rtc_ioctl,
 	.read_time		= davinci_rtc_read_time,
 	.set_time		= davinci_rtc_set_time,
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 8d05596..b253bf1 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -159,7 +159,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops dc_rtc_ops = {
+static const struct rtc_class_ops dc_rtc_ops = {
 	.read_time		= dc_rtc_read_time,
 	.set_mmss		= dc_rtc_set_mmss,
 	.read_alarm		= dc_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index f5dd09f..0ec4be6 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -102,7 +102,7 @@
 	return rtc_valid_tm(time);
 }
 
-static struct rtc_class_ops ds1302_rtc_ops = {
+static const struct rtc_class_ops ds1302_rtc_ops = {
 	.read_time	= ds1302_rtc_get_time,
 	.set_time	= ds1302_rtc_set_time,
 };
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8e1c5cb..4e31036 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -186,6 +186,7 @@
 	{ "mcp7941x", mcp794xx },
 	{ "pt7c4338", ds_1307 },
 	{ "rx8025", rx_8025 },
+	{ "isl12057", ds_1337 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, ds1307_id);
@@ -382,10 +383,25 @@
 	t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
 	tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
 	t->tm_mon = bcd2bin(tmp) - 1;
-
-	/* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
 	t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
 
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+	switch (ds1307->type) {
+	case ds_1337:
+	case ds_1339:
+	case ds_3231:
+		if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY)
+			t->tm_year += 100;
+		break;
+	case ds_1340:
+		if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY)
+			t->tm_year += 100;
+		break;
+	default:
+		break;
+	}
+#endif
+
 	dev_dbg(dev, "%s secs=%d, mins=%d, "
 		"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
 		"read", t->tm_sec, t->tm_min,
@@ -409,6 +425,27 @@
 		t->tm_hour, t->tm_mday,
 		t->tm_mon, t->tm_year, t->tm_wday);
 
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+	if (t->tm_year < 100)
+		return -EINVAL;
+
+	switch (ds1307->type) {
+	case ds_1337:
+	case ds_1339:
+	case ds_3231:
+	case ds_1340:
+		if (t->tm_year > 299)
+			return -EINVAL;
+	default:
+		if (t->tm_year > 199)
+			return -EINVAL;
+		break;
+	}
+#else
+	if (t->tm_year < 100 || t->tm_year > 199)
+		return -EINVAL;
+#endif
+
 	buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
 	buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
 	buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -424,11 +461,13 @@
 	case ds_1337:
 	case ds_1339:
 	case ds_3231:
-		buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
+		if (t->tm_year > 199)
+			buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
 		break;
 	case ds_1340:
-		buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
-				| DS1340_BIT_CENTURY;
+		buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN;
+		if (t->tm_year > 199)
+			buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY;
 		break;
 	case mcp794xx:
 		/*
@@ -1295,6 +1334,11 @@
 	if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
 		ds1307_can_wakeup_device = true;
 	}
+	/* Intersil ISL12057 DT backward compatibility */
+	if (of_property_read_bool(client->dev.of_node,
+				  "isil,irq2-can-wakeup-machine")) {
+		ds1307_can_wakeup_device = true;
+	}
 #endif
 
 	switch (ds1307->type) {
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 641e8e8..ccfc9d4 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -18,6 +18,7 @@
 #include <linux/rtc.h>
 #include <linux/spi/spi.h>
 #include <linux/bcd.h>
+#include <linux/regmap.h>
 
 /* Registers in ds1347 rtc */
 
@@ -32,37 +33,28 @@
 #define DS1347_STATUS_REG	0x17
 #define DS1347_CLOCK_BURST	0x3F
 
-static int ds1347_read_reg(struct device *dev, unsigned char address,
-				unsigned char *data)
-{
-	struct spi_device *spi = to_spi_device(dev);
+static const struct regmap_range ds1347_ranges[] = {
+	{
+		.range_min = DS1347_SECONDS_REG,
+		.range_max = DS1347_STATUS_REG,
+	},
+};
 
-	*data = address | 0x80;
-
-	return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds1347_write_reg(struct device *dev, unsigned char address,
-				unsigned char data)
-{
-	struct spi_device *spi = to_spi_device(dev);
-	unsigned char buf[2];
-
-	buf[0] = address & 0x7F;
-	buf[1] = data;
-
-	return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
+static const struct regmap_access_table ds1347_access_table = {
+	.yes_ranges = ds1347_ranges,
+	.n_yes_ranges = ARRAY_SIZE(ds1347_ranges),
+};
 
 static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
 {
 	struct spi_device *spi = to_spi_device(dev);
+	struct regmap *map;
 	int err;
 	unsigned char buf[8];
 
-	buf[0] = DS1347_CLOCK_BURST | 0x80;
+	map = spi_get_drvdata(spi);
 
-	err = spi_write_then_read(spi, buf, 1, buf, 8);
+	err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
 	if (err)
 		return err;
 
@@ -80,25 +72,27 @@
 static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
 {
 	struct spi_device *spi = to_spi_device(dev);
-	unsigned char buf[9];
+	struct regmap *map;
+	unsigned char buf[8];
 
-	buf[0] = DS1347_CLOCK_BURST & 0x7F;
-	buf[1] = bin2bcd(dt->tm_sec);
-	buf[2] = bin2bcd(dt->tm_min);
-	buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
-	buf[4] = bin2bcd(dt->tm_mday);
-	buf[5] = bin2bcd(dt->tm_mon + 1);
-	buf[6] = bin2bcd(dt->tm_wday + 1);
+	map = spi_get_drvdata(spi);
+
+	buf[0] = bin2bcd(dt->tm_sec);
+	buf[1] = bin2bcd(dt->tm_min);
+	buf[2] = (bin2bcd(dt->tm_hour) & 0x3F);
+	buf[3] = bin2bcd(dt->tm_mday);
+	buf[4] = bin2bcd(dt->tm_mon + 1);
+	buf[5] = bin2bcd(dt->tm_wday + 1);
 
 	/* year in linux is from 1900 i.e in range of 100
 	in rtc it is from 00 to 99 */
 	dt->tm_year = dt->tm_year % 100;
 
-	buf[7] = bin2bcd(dt->tm_year);
-	buf[8] = bin2bcd(0x00);
+	buf[6] = bin2bcd(dt->tm_year);
+	buf[7] = bin2bcd(0x00);
 
 	/* write the rtc settings */
-	return spi_write_then_read(spi, buf, 9, NULL, 0);
+	return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
 }
 
 static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -109,35 +103,53 @@
 static int ds1347_probe(struct spi_device *spi)
 {
 	struct rtc_device *rtc;
-	unsigned char data;
+	struct regmap_config config;
+	struct regmap *map;
+	unsigned int data;
 	int res;
 
+	memset(&config, 0, sizeof(config));
+	config.reg_bits = 8;
+	config.val_bits = 8;
+	config.read_flag_mask = 0x80;
+	config.max_register = 0x3F;
+	config.wr_table = &ds1347_access_table;
+
 	/* spi setup with ds1347 in mode 3 and bits per word as 8 */
 	spi->mode = SPI_MODE_3;
 	spi->bits_per_word = 8;
 	spi_setup(spi);
 
+	map = devm_regmap_init_spi(spi, &config);
+
+	if (IS_ERR(map)) {
+		dev_err(&spi->dev, "ds1347 regmap init spi failed\n");
+		return PTR_ERR(map);
+	}
+
+	spi_set_drvdata(spi, map);
+
 	/* RTC Settings */
-	res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
+	res = regmap_read(map, DS1347_SECONDS_REG, &data);
 	if (res)
 		return res;
 
 	/* Disable the write protect of rtc */
-	ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+	regmap_read(map, DS1347_CONTROL_REG, &data);
 	data = data & ~(1<<7);
-	ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
+	regmap_write(map, DS1347_CONTROL_REG, data);
 
 	/* Enable the oscillator , disable the oscillator stop flag,
 	 and glitch filter to reduce current consumption */
-	ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+	regmap_read(map, DS1347_STATUS_REG, &data);
 	data = data & 0x1B;
-	ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
+	regmap_write(map, DS1347_STATUS_REG, data);
 
 	/* display the settings */
-	ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+	regmap_read(map, DS1347_CONTROL_REG, &data);
 	dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
 
-	ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+	regmap_read(map, DS1347_STATUS_REG, &data);
 	dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
 
 	rtc = devm_rtc_device_register(&spi->dev, "ds1347",
@@ -146,8 +158,6 @@
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
 
-	spi_set_drvdata(spi, rtc);
-
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index b57505e..688debc 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -110,7 +110,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops gemini_rtc_ops = {
+static const struct rtc_class_ops gemini_rtc_ops = {
 	.read_time     = gemini_rtc_read_time,
 	.set_time      = gemini_rtc_set_time,
 };
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
deleted file mode 100644
index 0e7f0f5..0000000
--- a/drivers/rtc/rtc-isl12057.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
- *
- * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
- *
- * This work is largely based on Intersil ISL1208 driver developed by
- * Hebert Valerio Riedel <hvr@gnu.org>.
- *
- * Detailed datasheet on which this development is based is available here:
- *
- *  http://natisbad.org/NAS2/refs/ISL12057.pdf
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/rtc.h>
-#include <linux/i2c.h>
-#include <linux/bcd.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define DRV_NAME "rtc-isl12057"
-
-/* RTC section */
-#define ISL12057_REG_RTC_SC	0x00	/* Seconds */
-#define ISL12057_REG_RTC_MN	0x01	/* Minutes */
-#define ISL12057_REG_RTC_HR	0x02	/* Hours */
-#define ISL12057_REG_RTC_HR_PM	BIT(5)	/* AM/PM bit in 12h format */
-#define ISL12057_REG_RTC_HR_MIL BIT(6)	/* 24h/12h format */
-#define ISL12057_REG_RTC_DW	0x03	/* Day of the Week */
-#define ISL12057_REG_RTC_DT	0x04	/* Date */
-#define ISL12057_REG_RTC_MO	0x05	/* Month */
-#define ISL12057_REG_RTC_MO_CEN	BIT(7)	/* Century bit */
-#define ISL12057_REG_RTC_YR	0x06	/* Year */
-#define ISL12057_RTC_SEC_LEN	7
-
-/* Alarm 1 section */
-#define ISL12057_REG_A1_SC	0x07	/* Alarm 1 Seconds */
-#define ISL12057_REG_A1_MN	0x08	/* Alarm 1 Minutes */
-#define ISL12057_REG_A1_HR	0x09	/* Alarm 1 Hours */
-#define ISL12057_REG_A1_HR_PM	BIT(5)	/* AM/PM bit in 12h format */
-#define ISL12057_REG_A1_HR_MIL	BIT(6)	/* 24h/12h format */
-#define ISL12057_REG_A1_DWDT	0x0A	/* Alarm 1 Date / Day of the week */
-#define ISL12057_REG_A1_DWDT_B	BIT(6)	/* DW / DT selection bit */
-#define ISL12057_A1_SEC_LEN	4
-
-/* Alarm 2 section */
-#define ISL12057_REG_A2_MN	0x0B	/* Alarm 2 Minutes */
-#define ISL12057_REG_A2_HR	0x0C	/* Alarm 2 Hours */
-#define ISL12057_REG_A2_DWDT	0x0D	/* Alarm 2 Date / Day of the week */
-#define ISL12057_A2_SEC_LEN	3
-
-/* Control/Status registers */
-#define ISL12057_REG_INT	0x0E
-#define ISL12057_REG_INT_A1IE	BIT(0)	/* Alarm 1 interrupt enable bit */
-#define ISL12057_REG_INT_A2IE	BIT(1)	/* Alarm 2 interrupt enable bit */
-#define ISL12057_REG_INT_INTCN	BIT(2)	/* Interrupt control enable bit */
-#define ISL12057_REG_INT_RS1	BIT(3)	/* Freq out control bit 1 */
-#define ISL12057_REG_INT_RS2	BIT(4)	/* Freq out control bit 2 */
-#define ISL12057_REG_INT_EOSC	BIT(7)	/* Oscillator enable bit */
-
-#define ISL12057_REG_SR		0x0F
-#define ISL12057_REG_SR_A1F	BIT(0)	/* Alarm 1 interrupt bit */
-#define ISL12057_REG_SR_A2F	BIT(1)	/* Alarm 2 interrupt bit */
-#define ISL12057_REG_SR_OSF	BIT(7)	/* Oscillator failure bit */
-
-/* Register memory map length */
-#define ISL12057_MEM_MAP_LEN	0x10
-
-struct isl12057_rtc_data {
-	struct rtc_device *rtc;
-	struct regmap *regmap;
-	struct mutex lock;
-	int irq;
-};
-
-static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
-{
-	tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
-	tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
-
-	if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
-		tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
-		if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
-			tm->tm_hour += 12;
-	} else {					    /* 24 hour mode */
-		tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
-	}
-
-	tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
-	tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
-	tm->tm_mon  = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
-	tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
-
-	/* Check if years register has overflown from 99 to 00 */
-	if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN)
-		tm->tm_year += 100;
-}
-
-static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
-{
-	u8 century_bit;
-
-	/*
-	 * The clock has an 8 bit wide bcd-coded register for the year.
-	 * It also has a century bit encoded in MO flag which provides
-	 * information about overflow of year register from 99 to 00.
-	 * tm_year is an offset from 1900 and we are interested in the
-	 * 2000-2199 range, so any value less than 100 or larger than
-	 * 299 is invalid.
-	 */
-	if (tm->tm_year < 100 || tm->tm_year > 299)
-		return -EINVAL;
-
-	century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0;
-
-	regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
-	regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
-	regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
-	regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
-	regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit;
-	regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100);
-	regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
-
-	return 0;
-}
-
-/*
- * Try and match register bits w/ fixed null values to see whether we
- * are dealing with an ISL12057. Note: this function is called early
- * during init and hence does need mutex protection.
- */
-static int isl12057_i2c_validate_chip(struct regmap *regmap)
-{
-	u8 regs[ISL12057_MEM_MAP_LEN];
-	static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
-						       0xc0, 0x60, 0x00, 0x00,
-						       0x00, 0x00, 0x00, 0x00,
-						       0x00, 0x00, 0x60, 0x7c };
-	int ret, i;
-
-	ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
-		if (regs[i] & mask[i])	/* check if bits are cleared */
-			return -ENODEV;
-	}
-
-	return 0;
-}
-
-static int _isl12057_rtc_clear_alarm(struct device *dev)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	int ret;
-
-	ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
-				 ISL12057_REG_SR_A1F, 0);
-	if (ret)
-		dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
-
-	return ret;
-}
-
-static int _isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	int ret;
-
-	ret = regmap_update_bits(data->regmap, ISL12057_REG_INT,
-				 ISL12057_REG_INT_A1IE,
-				 enable ? ISL12057_REG_INT_A1IE : 0);
-	if (ret)
-		dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n",
-			__func__, ret);
-
-	return ret;
-}
-
-/*
- * Note: as we only read from device and do not perform any update, there is
- * no need for an equivalent function which would try and get driver's main
- * lock. Here, it is safe for everyone if we just use regmap internal lock
- * on the device when reading.
- */
-static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	u8 regs[ISL12057_RTC_SEC_LEN];
-	unsigned int sr;
-	int ret;
-
-	ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
-	if (ret) {
-		dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
-			__func__, ret);
-		goto out;
-	} else {
-		if (sr & ISL12057_REG_SR_OSF) {
-			ret = -ENODATA;
-			goto out;
-		}
-	}
-
-	ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
-			       ISL12057_RTC_SEC_LEN);
-	if (ret)
-		dev_err(dev, "%s: unable to read RTC time section (%d)\n",
-			__func__, ret);
-
-out:
-	if (ret)
-		return ret;
-
-	isl12057_rtc_regs_to_tm(tm, regs);
-
-	return rtc_valid_tm(tm);
-}
-
-static int isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	int ret;
-
-	mutex_lock(&data->lock);
-	ret = _isl12057_rtc_update_alarm(dev, enable);
-	mutex_unlock(&data->lock);
-
-	return ret;
-}
-
-static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	struct rtc_time *alarm_tm = &alarm->time;
-	u8 regs[ISL12057_A1_SEC_LEN];
-	unsigned int ir;
-	int ret;
-
-	mutex_lock(&data->lock);
-	ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs,
-			       ISL12057_A1_SEC_LEN);
-	if (ret) {
-		dev_err(dev, "%s: reading alarm section failed (%d)\n",
-			__func__, ret);
-		goto err_unlock;
-	}
-
-	alarm_tm->tm_sec  = bcd2bin(regs[0] & 0x7f);
-	alarm_tm->tm_min  = bcd2bin(regs[1] & 0x7f);
-	alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
-	alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
-
-	ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
-	if (ret) {
-		dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n",
-			__func__, ret);
-		goto err_unlock;
-	}
-
-	alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE);
-
-err_unlock:
-	mutex_unlock(&data->lock);
-
-	return ret;
-}
-
-static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	struct rtc_time *alarm_tm = &alarm->time;
-	unsigned long rtc_secs, alarm_secs;
-	u8 regs[ISL12057_A1_SEC_LEN];
-	struct rtc_time rtc_tm;
-	int ret, enable = 1;
-
-	mutex_lock(&data->lock);
-	ret = _isl12057_rtc_read_time(dev, &rtc_tm);
-	if (ret)
-		goto err_unlock;
-
-	ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
-	if (ret)
-		goto err_unlock;
-
-	ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
-	if (ret)
-		goto err_unlock;
-
-	/* If alarm time is before current time, disable the alarm */
-	if (!alarm->enabled || alarm_secs <= rtc_secs) {
-		enable = 0;
-	} else {
-		/*
-		 * Chip only support alarms up to one month in the future. Let's
-		 * return an error if we get something after that limit.
-		 * Comparison is done by incrementing rtc_tm month field by one
-		 * and checking alarm value is still below.
-		 */
-		if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
-			rtc_tm.tm_mon = 0;
-			rtc_tm.tm_year += 1;
-		} else {
-			rtc_tm.tm_mon += 1;
-		}
-
-		ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
-		if (ret)
-			goto err_unlock;
-
-		if (alarm_secs > rtc_secs) {
-			dev_err(dev, "%s: max for alarm is one month (%d)\n",
-				__func__, ret);
-			ret = -EINVAL;
-			goto err_unlock;
-		}
-	}
-
-	/* Disable the alarm before modifying it */
-	ret = _isl12057_rtc_update_alarm(dev, 0);
-	if (ret < 0) {
-		dev_err(dev, "%s: unable to disable the alarm (%d)\n",
-			__func__, ret);
-		goto err_unlock;
-	}
-
-	/* Program alarm registers */
-	regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f;
-	regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f;
-	regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
-	regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
-
-	ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs,
-				ISL12057_A1_SEC_LEN);
-	if (ret < 0) {
-		dev_err(dev, "%s: writing alarm section failed (%d)\n",
-			__func__, ret);
-		goto err_unlock;
-	}
-
-	/* Enable or disable alarm */
-	ret = _isl12057_rtc_update_alarm(dev, enable);
-
-err_unlock:
-	mutex_unlock(&data->lock);
-
-	return ret;
-}
-
-static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-	u8 regs[ISL12057_RTC_SEC_LEN];
-	int ret;
-
-	ret = isl12057_rtc_tm_to_regs(regs, tm);
-	if (ret)
-		return ret;
-
-	mutex_lock(&data->lock);
-	ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
-				ISL12057_RTC_SEC_LEN);
-	if (ret) {
-		dev_err(dev, "%s: unable to write RTC time section (%d)\n",
-			__func__, ret);
-		goto out;
-	}
-
-	/*
-	 * Now that RTC time has been updated, let's clear oscillator
-	 * failure flag, if needed.
-	 */
-	ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
-				 ISL12057_REG_SR_OSF, 0);
-	if (ret < 0)
-		dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n",
-			__func__, ret);
-
-out:
-	mutex_unlock(&data->lock);
-
-	return ret;
-}
-
-/*
- * Check current RTC status and enable/disable what needs to be. Return 0 if
- * everything went ok and a negative value upon error. Note: this function
- * is called early during init and hence does need mutex protection.
- */
-static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
-{
-	int ret;
-
-	/* Enable oscillator if not already running */
-	ret = regmap_update_bits(regmap, ISL12057_REG_INT,
-				 ISL12057_REG_INT_EOSC, 0);
-	if (ret < 0) {
-		dev_err(dev, "%s: unable to enable oscillator (%d)\n",
-			__func__, ret);
-		return ret;
-	}
-
-	/* Clear alarm bit if needed */
-	ret = regmap_update_bits(regmap, ISL12057_REG_SR,
-				 ISL12057_REG_SR_A1F, 0);
-	if (ret < 0) {
-		dev_err(dev, "%s: unable to clear alarm bit (%d)\n",
-			__func__, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_OF
-/*
- * One would expect the device to be marked as a wakeup source only
- * when an IRQ pin of the RTC is routed to an interrupt line of the
- * CPU. In practice, such an IRQ pin can be connected to a PMIC and
- * this allows the device to be powered up when RTC alarm rings. This
- * is for instance the case on ReadyNAS 102, 104 and 2120. On those
- * devices with no IRQ driectly connected to the SoC, the RTC chip
- * can be forced as a wakeup source by stating that explicitly in
- * the device's .dts file using the "wakeup-source" boolean property.
- * This will guarantee 'wakealarm' sysfs entry is available on the device.
- *
- * The function below returns 1, i.e. the capability of the chip to
- * wakeup the device, based on IRQ availability or if the boolean
- * property has been set in the .dts file. Otherwise, it returns 0.
- */
-
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
-	return data->irq || of_property_read_bool(dev->of_node, "wakeup-source")
-		|| of_property_read_bool(dev->of_node, /* legacy */
-					 "isil,irq2-can-wakeup-machine");
-}
-#else
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
-	struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
-	return !!data->irq;
-}
-#endif
-
-static int isl12057_rtc_alarm_irq_enable(struct device *dev,
-					 unsigned int enable)
-{
-	struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-	int ret = -ENOTTY;
-
-	if (rtc_data->irq)
-		ret = isl12057_rtc_update_alarm(dev, enable);
-
-	return ret;
-}
-
-static irqreturn_t isl12057_rtc_interrupt(int irq, void *data)
-{
-	struct i2c_client *client = data;
-	struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
-	struct rtc_device *rtc = rtc_data->rtc;
-	int ret, handled = IRQ_NONE;
-	unsigned int sr;
-
-	ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr);
-	if (!ret && (sr & ISL12057_REG_SR_A1F)) {
-		dev_dbg(&client->dev, "RTC alarm!\n");
-
-		rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
-
-		/* Acknowledge and disable the alarm */
-		_isl12057_rtc_clear_alarm(&client->dev);
-		_isl12057_rtc_update_alarm(&client->dev, 0);
-
-		handled = IRQ_HANDLED;
-	}
-
-	return handled;
-}
-
-static const struct rtc_class_ops rtc_ops = {
-	.read_time = _isl12057_rtc_read_time,
-	.set_time = isl12057_rtc_set_time,
-	.read_alarm = isl12057_rtc_read_alarm,
-	.set_alarm = isl12057_rtc_set_alarm,
-	.alarm_irq_enable = isl12057_rtc_alarm_irq_enable,
-};
-
-static const struct regmap_config isl12057_rtc_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
-};
-
-static int isl12057_probe(struct i2c_client *client,
-			  const struct i2c_device_id *id)
-{
-	struct device *dev = &client->dev;
-	struct isl12057_rtc_data *data;
-	struct regmap *regmap;
-	int ret;
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
-				     I2C_FUNC_SMBUS_BYTE_DATA |
-				     I2C_FUNC_SMBUS_I2C_BLOCK))
-		return -ENODEV;
-
-	regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
-	if (IS_ERR(regmap)) {
-		ret = PTR_ERR(regmap);
-		dev_err(dev, "%s: regmap allocation failed (%d)\n",
-			__func__, ret);
-		return ret;
-	}
-
-	ret = isl12057_i2c_validate_chip(regmap);
-	if (ret)
-		return ret;
-
-	ret = isl12057_check_rtc_status(dev, regmap);
-	if (ret)
-		return ret;
-
-	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	mutex_init(&data->lock);
-	data->regmap = regmap;
-	dev_set_drvdata(dev, data);
-
-	if (client->irq > 0) {
-		ret = devm_request_threaded_irq(dev, client->irq, NULL,
-						isl12057_rtc_interrupt,
-						IRQF_SHARED|IRQF_ONESHOT,
-						DRV_NAME, client);
-		if (!ret)
-			data->irq = client->irq;
-		else
-			dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__,
-				client->irq, ret);
-	}
-
-	if (isl12057_can_wakeup_machine(dev))
-		device_init_wakeup(dev, true);
-
-	data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
-					     THIS_MODULE);
-	ret = PTR_ERR_OR_ZERO(data->rtc);
-	if (ret) {
-		dev_err(dev, "%s: unable to register RTC device (%d)\n",
-			__func__, ret);
-		goto err;
-	}
-
-	/* We cannot support UIE mode if we do not have an IRQ line */
-	if (!data->irq)
-		data->rtc->uie_unsupported = 1;
-
-err:
-	return ret;
-}
-
-static int isl12057_remove(struct i2c_client *client)
-{
-	if (isl12057_can_wakeup_machine(&client->dev))
-		device_init_wakeup(&client->dev, false);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl12057_rtc_suspend(struct device *dev)
-{
-	struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
-	if (rtc_data->irq && device_may_wakeup(dev))
-		return enable_irq_wake(rtc_data->irq);
-
-	return 0;
-}
-
-static int isl12057_rtc_resume(struct device *dev)
-{
-	struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
-	if (rtc_data->irq && device_may_wakeup(dev))
-		return disable_irq_wake(rtc_data->irq);
-
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend,
-			 isl12057_rtc_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id isl12057_dt_match[] = {
-	{ .compatible = "isl,isl12057" }, /* for backward compat., don't use */
-	{ .compatible = "isil,isl12057" },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, isl12057_dt_match);
-#endif
-
-static const struct i2c_device_id isl12057_id[] = {
-	{ "isl12057", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, isl12057_id);
-
-static struct i2c_driver isl12057_driver = {
-	.driver = {
-		.name = DRV_NAME,
-		.pm = &isl12057_rtc_pm_ops,
-		.of_match_table = of_match_ptr(isl12057_dt_match),
-	},
-	.probe	  = isl12057_probe,
-	.remove	  = isl12057_remove,
-	.id_table = isl12057_id,
-};
-module_i2c_driver(isl12057_driver);
-
-MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
-MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b2bcfc0..5e14651 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -174,7 +174,7 @@
 	return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
 }
 
-static struct rtc_class_ops jz4740_rtc_ops = {
+static const struct rtc_class_ops jz4740_rtc_ops = {
 	.read_time	= jz4740_rtc_read_time,
 	.set_mmss	= jz4740_rtc_set_mmss,
 	.read_alarm	= jz4740_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 025bb33..4021fd0 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -151,7 +151,7 @@
 	return rtc_valid_tm(tim);
 }
 
-static struct rtc_class_ops mcp795_rtc_ops = {
+static const struct rtc_class_ops mcp795_rtc_ops = {
 		.read_time = mcp795_read_time,
 		.set_time = mcp795_set_time
 };
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 44f622c..1a61fa5 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops mtk_rtc_ops = {
+static const struct rtc_class_ops mtk_rtc_ops = {
 	.read_time  = mtk_rtc_read_time,
 	.set_time   = mtk_rtc_set_time,
 	.read_alarm = mtk_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 09fc1c1..b1b6b30 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -214,7 +214,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops nuc900_rtc_ops = {
+static const struct rtc_class_ops nuc900_rtc_ops = {
 	.read_time = nuc900_rtc_read_time,
 	.set_time = nuc900_rtc_set_time,
 	.read_alarm = nuc900_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index ec2e9c5..b04ea9b 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -13,19 +13,23 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/rtc.h>
+#include <dt-bindings/gpio/gpio.h>
 #include <linux/bcd.h>
-#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
-#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/rtc.h>
 
 /*
  * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -115,6 +119,8 @@
 
 /* OMAP_RTC_PMIC bit fields: */
 #define OMAP_RTC_PMIC_POWER_EN_EN	BIT(16)
+#define OMAP_RTC_PMIC_EXT_WKUP_EN(x)	BIT(x)
+#define OMAP_RTC_PMIC_EXT_WKUP_POL(x)	BIT(4 + x)
 
 /* OMAP_RTC_KICKER values */
 #define	KICK0_VALUE			0x83e70b13
@@ -141,6 +147,7 @@
 	bool is_pmic_controller;
 	bool has_ext_clk;
 	const struct omap_rtc_device_type *type;
+	struct pinctrl_dev *pctldev;
 };
 
 static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg)
@@ -469,7 +476,7 @@
 	mdelay(2500);
 }
 
-static struct rtc_class_ops omap_rtc_ops = {
+static const struct rtc_class_ops omap_rtc_ops = {
 	.read_time	= omap_rtc_read_time,
 	.set_time	= omap_rtc_set_time,
 	.read_alarm	= omap_rtc_read_alarm,
@@ -525,6 +532,139 @@
 };
 MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
 
+static const struct pinctrl_pin_desc rtc_pins_desc[] = {
+	PINCTRL_PIN(0, "ext_wakeup0"),
+	PINCTRL_PIN(1, "ext_wakeup1"),
+	PINCTRL_PIN(2, "ext_wakeup2"),
+	PINCTRL_PIN(3, "ext_wakeup3"),
+};
+
+static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return 0;
+}
+
+static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+					unsigned int group)
+{
+	return NULL;
+}
+
+static const struct pinctrl_ops rtc_pinctrl_ops = {
+	.get_groups_count = rtc_pinctrl_get_groups_count,
+	.get_group_name = rtc_pinctrl_get_group_name,
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map = pinconf_generic_dt_free_map,
+};
+
+enum rtc_pin_config_param {
+	PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params rtc_params[] = {
+	{"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = {
+	PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false),
+};
+#endif
+
+static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
+			unsigned int pin, unsigned long *config)
+{
+	struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int param = pinconf_to_config_param(*config);
+	u32 val;
+	u16 arg = 0;
+
+	rtc->type->unlock(rtc);
+	val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+	rtc->type->lock(rtc);
+
+	switch (param) {
+	case PIN_CONFIG_INPUT_ENABLE:
+		if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin)))
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_ACTIVE_HIGH:
+		if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin))
+			return -EINVAL;
+		break;
+	default:
+		return -ENOTSUPP;
+	};
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
+			unsigned int pin, unsigned long *configs,
+			unsigned int num_configs)
+{
+	struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+	u32 val;
+	unsigned int param;
+	u16 param_val;
+	int i;
+
+	rtc->type->unlock(rtc);
+	val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+	rtc->type->lock(rtc);
+
+	/* active low by default */
+	val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		param_val = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_INPUT_ENABLE:
+			if (param_val)
+				val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+			else
+				val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+			break;
+		case PIN_CONFIG_ACTIVE_HIGH:
+			val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+			break;
+		default:
+			dev_err(&rtc->rtc->dev, "Property %u not supported\n",
+				param);
+			return -ENOTSUPP;
+		}
+	}
+
+	rtc->type->unlock(rtc);
+	rtc_writel(rtc, OMAP_RTC_PMIC_REG, val);
+	rtc->type->lock(rtc);
+
+	return 0;
+}
+
+static const struct pinconf_ops rtc_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_get = rtc_pinconf_get,
+	.pin_config_set = rtc_pinconf_set,
+};
+
+static struct pinctrl_desc rtc_pinctrl_desc = {
+	.pins = rtc_pins_desc,
+	.npins = ARRAY_SIZE(rtc_pins_desc),
+	.pctlops = &rtc_pinctrl_ops,
+	.confops = &rtc_pinconf_ops,
+	.custom_params = rtc_params,
+	.num_custom_params = ARRAY_SIZE(rtc_params),
+#ifdef CONFIG_DEBUG_FS
+	.custom_conf_items = rtc_conf_items,
+#endif
+	.owner = THIS_MODULE,
+};
+
 static int omap_rtc_probe(struct platform_device *pdev)
 {
 	struct omap_rtc	*rtc;
@@ -681,6 +821,15 @@
 		}
 	}
 
+	/* Support ext_wakeup pinconf */
+	rtc_pinctrl_desc.name = dev_name(&pdev->dev);
+
+	rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+	if (IS_ERR(rtc->pctldev)) {
+		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+		return PTR_ERR(rtc->pctldev);
+	}
+
 	return 0;
 
 err:
@@ -724,6 +873,9 @@
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	/* Remove ext_wakeup pinconf */
+	pinctrl_unregister(rtc->pctldev);
+
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 6080e0e..4bcfb88 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -225,7 +225,7 @@
 	return IRQ_HANDLED;
 }
 
-static struct rtc_class_ops palmas_rtc_ops = {
+static const struct rtc_class_ops palmas_rtc_ops = {
 	.read_time	= palmas_rtc_read_time,
 	.set_time	= palmas_rtc_set_time,
 	.read_alarm	= palmas_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index b4478cc..8895f77 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -182,7 +182,8 @@
 }
 
 static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
-			     const char *buffer, size_t count) {
+			     const char *buffer, size_t count)
+{
 	struct pcf2123_sysfs_reg *r;
 	unsigned long reg;
 	unsigned long val;
@@ -199,7 +200,7 @@
 	if (ret)
 		return ret;
 
-	pcf2123_write_reg(dev, reg, val);
+	ret = pcf2123_write_reg(dev, reg, val);
 	if (ret < 0)
 		return -EIO;
 	return count;
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index e6b6911..00c31c9 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -232,7 +232,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops pcf50633_rtc_ops = {
+static const struct rtc_class_ops pcf50633_rtc_ops = {
 	.read_time		= pcf50633_rtc_read_time,
 	.set_time		= pcf50633_rtc_set_time,
 	.read_alarm		= pcf50633_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 64e1e45..5cfb6df 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -400,7 +400,6 @@
 	.remove		= pic32_rtc_remove,
 	.driver		= {
 		.name	= "pic32-rtc",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(pic32_rtc_dt_ids),
 	},
 };
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 9a2f6a9..f9277e5 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,11 +52,21 @@
 #define RV8803_CTRL_TIE			BIT(4)
 #define RV8803_CTRL_UIE			BIT(5)
 
+#define RX8900_BACKUP_CTRL		0x18
+#define RX8900_FLAG_SWOFF		BIT(2)
+#define RX8900_FLAG_VDETOFF		BIT(3)
+
+enum rv8803_type {
+	rv_8803,
+	rx_8900
+};
+
 struct rv8803_data {
 	struct i2c_client *client;
 	struct rtc_device *rtc;
 	struct mutex flags_lock;
 	u8 ctrl;
+	enum rv8803_type type;
 };
 
 static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
@@ -497,6 +507,35 @@
 	.ioctl = rv8803_ioctl,
 };
 
+static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
+{
+	struct i2c_client *client = rv8803->client;
+	struct device_node *node = client->dev.of_node;
+	int err;
+	u8 flags;
+
+	if (!node)
+		return 0;
+
+	if (rv8803->type != rx_8900)
+		return 0;
+
+	err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL);
+	if (err < 0)
+		return err;
+
+	flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
+
+	if (of_property_read_bool(node, "epson,vdet-disable"))
+		flags |= RX8900_FLAG_VDETOFF;
+
+	if (of_property_read_bool(node, "trickle-diode-disable"))
+		flags |= RX8900_FLAG_SWOFF;
+
+	return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
+					 flags);
+}
+
 static int rv8803_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
@@ -517,6 +556,7 @@
 
 	mutex_init(&rv8803->flags_lock);
 	rv8803->client = client;
+	rv8803->type = id->driver_data;
 	i2c_set_clientdata(client, rv8803);
 
 	flags = rv8803_read_reg(client, RV8803_FLAG);
@@ -558,6 +598,12 @@
 	if (err)
 		return err;
 
+	err = rx8900_trickle_charger_init(rv8803);
+	if (err) {
+		dev_err(&client->dev, "failed to init charger\n");
+		return err;
+	}
+
 	err = device_create_bin_file(&client->dev, &rv8803_nvram_attr);
 	if (err)
 		return err;
@@ -575,8 +621,8 @@
 }
 
 static const struct i2c_device_id rv8803_id[] = {
-	{ "rv8803", 0 },
-	{ "rx8900", 0 },
+	{ "rv8803", rv_8803 },
+	{ "rx8900", rx_8900 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, rv8803_id);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index bbad00b..7c9c08e 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -317,7 +317,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops rx6110_rtc_ops = {
+static const struct rtc_class_ops rx6110_rtc_ops = {
 	.read_time = rx6110_get_time,
 	.set_time = rx6110_set_time,
 };
@@ -388,7 +388,6 @@
 static struct spi_driver rx6110_driver = {
 	.driver = {
 		.name = RX6110_DRIVER_NAME,
-		.owner = THIS_MODULE,
 	},
 	.probe		= rx6110_probe,
 	.remove		= rx6110_remove,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 2b85cc7..91857d8 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -403,7 +403,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops rx8025_rtc_ops = {
+static const struct rtc_class_ops rx8025_rtc_ops = {
 	.read_time = rx8025_get_time,
 	.set_time = rx8025_set_time,
 	.read_alarm = rx8025_read_alarm,
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index f05ef85..e377f42 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -343,7 +343,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops spear_rtc_ops = {
+static const struct rtc_class_ops spear_rtc_ops = {
 	.read_time = spear_rtc_read_time,
 	.set_time = spear_rtc_set_time,
 	.read_alarm = spear_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index e6aaaa5..d578e40 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -231,7 +231,7 @@
 	return 0;
 }
 
-static struct rtc_class_ops stmp3xxx_rtc_ops = {
+static const struct rtc_class_ops stmp3xxx_rtc_ops = {
 	.alarm_irq_enable =
 			  stmp3xxx_alarm_irq_enable,
 	.read_time	= stmp3xxx_rtc_gettime,
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 63b9fb1..1218d5d 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -160,7 +160,7 @@
 	unsigned long push = 0;
 	struct rtc_wkalrm alm;
 	struct rtc_device *rtc = to_rtc_device(dev);
-	char *buf_ptr;
+	const char *buf_ptr;
 	int adjust = 0;
 
 	/* Only request alarms that trigger in the future.  Disable them
@@ -171,7 +171,7 @@
 		return retval;
 	rtc_tm_to_time(&alm.time, &now);
 
-	buf_ptr = (char *)buf;
+	buf_ptr = buf;
 	if (*buf_ptr == '+') {
 		buf_ptr++;
 		if (*buf_ptr == '=') {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 15ac597..3853ba9 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -291,7 +291,7 @@
 	return IRQ_HANDLED;
 }
 
-static struct rtc_class_ops tegra_rtc_ops = {
+static const struct rtc_class_ops tegra_rtc_ops = {
 	.read_time	= tegra_rtc_read_time,
 	.set_time	= tegra_rtc_set_time,
 	.read_alarm	= tegra_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2dc787d..176720b 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -462,7 +462,7 @@
 	return ret;
 }
 
-static struct rtc_class_ops twl_rtc_ops = {
+static const struct rtc_class_ops twl_rtc_ops = {
 	.read_time	= twl_rtc_read_time,
 	.set_time	= twl_rtc_set_time,
 	.read_alarm	= twl_rtc_read_alarm,
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 6a6906f..68138a6 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -61,7 +61,7 @@
 		"memory that can be allocated. Range is 16 - 128");
 
 #define beiscsi_disp_param(_name)\
-ssize_t	\
+static ssize_t	\
 beiscsi_##_name##_disp(struct device *dev,\
 			struct device_attribute *attrib, char *buf)	\
 {	\
@@ -74,7 +74,7 @@
 }
 
 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
-int \
+static int \
 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
 {\
 	if (val >= _minval && val <= _maxval) {\
@@ -93,7 +93,7 @@
 }
 
 #define beiscsi_store_param(_name)  \
-ssize_t \
+static ssize_t \
 beiscsi_##_name##_store(struct device *dev,\
 			 struct device_attribute *attr, const char *buf,\
 			 size_t count) \
@@ -112,7 +112,7 @@
 }
 
 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
-int \
+static int \
 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
 { \
 	if (val >= _minval && val <= _maxval) {\
@@ -4584,7 +4584,7 @@
 	io_task->cmd_bhs = NULL;
 	return -ENOMEM;
 }
-int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
 		       unsigned int num_sg, unsigned int xferlen,
 		       unsigned int writedir)
 {
@@ -4973,7 +4973,7 @@
 	return rc;
 }
 
-void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
 {
 	/* Set the logging parameter */
 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 7c0d7af..0039beb 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -685,6 +685,11 @@
 					req_completion);
 			csk->snd_nxt += len;
 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+		} else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
+			   (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+			struct cpl_close_con_req *req =
+				(struct cpl_close_con_req *)skb->data;
+			req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
 		}
 		total_size += skb->truesize;
 		t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 516bd6c..cbf0103 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -30,161 +30,41 @@
 #include "NCR5380.h"
 #include <linux/init.h>
 #include <linux/ioport.h>
-#include <linux/isapnp.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
 #include <linux/interrupt.h>
 
+#define MAX_CARDS 8
+
+/* old-style parameters for compatibility */
 static int ncr_irq;
-static int ncr_dma;
 static int ncr_addr;
 static int ncr_5380;
 static int ncr_53c400;
 static int ncr_53c400a;
 static int dtc_3181e;
 static int hp_c2502;
+module_param(ncr_irq, int, 0);
+module_param(ncr_addr, int, 0);
+module_param(ncr_5380, int, 0);
+module_param(ncr_53c400, int, 0);
+module_param(ncr_53c400a, int, 0);
+module_param(dtc_3181e, int, 0);
+module_param(hp_c2502, int, 0);
 
-static struct override {
-	NCR5380_map_type NCR5380_map_name;
-	int irq;
-	int dma;
-	int board;		/* Use NCR53c400, Ricoh, etc. extensions ? */
-} overrides
-#ifdef GENERIC_NCR5380_OVERRIDE
-[] __initdata = GENERIC_NCR5380_OVERRIDE;
-#else
-[1] __initdata = { { 0,},};
-#endif
+static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ number(s)");
 
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
+static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(base, int, NULL, 0);
+MODULE_PARM_DESC(base, "base address(es)");
 
-#ifndef MODULE
+static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+module_param_array(card, int, NULL, 0);
+MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
 
-/**
- *	internal_setup		-	handle lilo command string override
- *	@board:	BOARD_* identifier for the board
- *	@str: unused
- *	@ints: numeric parameters
- *
- * 	Do LILO command line initialization of the overrides array. Display
- *	errors when needed
- *
- *	Locks: none
- */
-
-static void __init internal_setup(int board, char *str, int *ints)
-{
-	static int commandline_current;
-	switch (board) {
-	case BOARD_NCR5380:
-		if (ints[0] != 2 && ints[0] != 3) {
-			printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
-			return;
-		}
-		break;
-	case BOARD_NCR53C400:
-		if (ints[0] != 2) {
-			printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
-			return;
-		}
-		break;
-	case BOARD_NCR53C400A:
-		if (ints[0] != 2) {
-			printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n");
-			return;
-		}
-		break;
-	case BOARD_DTC3181E:
-		if (ints[0] != 2) {
-			printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n");
-			return;
-		}
-		break;
-	}
-
-	if (commandline_current < NO_OVERRIDES) {
-		overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1];
-		overrides[commandline_current].irq = ints[2];
-		if (ints[0] == 3)
-			overrides[commandline_current].dma = ints[3];
-		else
-			overrides[commandline_current].dma = DMA_NONE;
-		overrides[commandline_current].board = board;
-		++commandline_current;
-	}
-}
-
-
-/**
- * 	do_NCR53C80_setup		-	set up entry point
- *	@str: unused
- *
- *	Setup function invoked at boot to parse the ncr5380= command
- *	line.
- */
-
-static int __init do_NCR5380_setup(char *str)
-{
-	int ints[10];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-	internal_setup(BOARD_NCR5380, str, ints);
-	return 1;
-}
-
-/**
- * 	do_NCR53C400_setup		-	set up entry point
- *	@str: unused
- *	@ints: integer parameters from kernel setup code
- *
- *	Setup function invoked at boot to parse the ncr53c400= command
- *	line.
- */
-
-static int __init do_NCR53C400_setup(char *str)
-{
-	int ints[10];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-	internal_setup(BOARD_NCR53C400, str, ints);
-	return 1;
-}
-
-/**
- * 	do_NCR53C400A_setup	-	set up entry point
- *	@str: unused
- *	@ints: integer parameters from kernel setup code
- *
- *	Setup function invoked at boot to parse the ncr53c400a= command
- *	line.
- */
-
-static int __init do_NCR53C400A_setup(char *str)
-{
-	int ints[10];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-	internal_setup(BOARD_NCR53C400A, str, ints);
-	return 1;
-}
-
-/**
- * 	do_DTC3181E_setup	-	set up entry point
- *	@str: unused
- *	@ints: integer parameters from kernel setup code
- *
- *	Setup function invoked at boot to parse the dtc3181e= command
- *	line.
- */
-
-static int __init do_DTC3181E_setup(char *str)
-{
-	int ints[10];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-	internal_setup(BOARD_DTC3181E, str, ints);
-	return 1;
-}
-
-#endif
+MODULE_LICENSE("GPL");
 
 #ifndef SCSI_G_NCR5380_MEM
 /*
@@ -210,21 +90,9 @@
 }
 #endif
 
-/**
- * 	generic_NCR5380_detect	-	look for NCR5380 controllers
- *	@tpnt: the scsi template
- *
- *	Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E
- *	and DTC436(ISAPnP) controllers. If overrides have been set we use
- *	them.
- *
- *	Locks: none
- */
-
-static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
+static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
+			struct device *pdev, int base, int irq, int board)
 {
-	static int current_override;
-	int count;
 	unsigned int *ports;
 	u8 *magic = NULL;
 #ifndef SCSI_G_NCR5380_MEM
@@ -232,272 +100,222 @@
 	int port_idx = -1;
 	unsigned long region_size;
 #endif
-	static unsigned int __initdata ncr_53c400a_ports[] = {
+	static unsigned int ncr_53c400a_ports[] = {
 		0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
 	};
-	static unsigned int __initdata dtc_3181e_ports[] = {
+	static unsigned int dtc_3181e_ports[] = {
 		0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
 	};
-	static u8 ncr_53c400a_magic[] __initdata = {	/* 53C400A & DTC436 */
+	static u8 ncr_53c400a_magic[] = {	/* 53C400A & DTC436 */
 		0x59, 0xb9, 0xc5, 0xae, 0xa6
 	};
-	static u8 hp_c2502_magic[] __initdata = {	/* HP C2502 */
+	static u8 hp_c2502_magic[] = {	/* HP C2502 */
 		0x0f, 0x22, 0xf0, 0x20, 0x80
 	};
-	int flags;
+	int flags, ret;
 	struct Scsi_Host *instance;
 	struct NCR5380_hostdata *hostdata;
 #ifdef SCSI_G_NCR5380_MEM
-	unsigned long base;
 	void __iomem *iomem;
 	resource_size_t iomem_size;
 #endif
 
-	if (ncr_irq)
-		overrides[0].irq = ncr_irq;
-	if (ncr_dma)
-		overrides[0].dma = ncr_dma;
-	if (ncr_addr)
-		overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr;
-	if (ncr_5380)
-		overrides[0].board = BOARD_NCR5380;
-	else if (ncr_53c400)
-		overrides[0].board = BOARD_NCR53C400;
-	else if (ncr_53c400a)
-		overrides[0].board = BOARD_NCR53C400A;
-	else if (dtc_3181e)
-		overrides[0].board = BOARD_DTC3181E;
-	else if (hp_c2502)
-		overrides[0].board = BOARD_HP_C2502;
-#ifndef SCSI_G_NCR5380_MEM
-	if (!current_override && isapnp_present()) {
-		struct pnp_dev *dev = NULL;
-		count = 0;
-		while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
-			if (count >= NO_OVERRIDES)
-				break;
-			if (pnp_device_attach(dev) < 0)
-				continue;
-			if (pnp_activate_dev(dev) < 0) {
-				printk(KERN_ERR "dtc436e probe: activate failed\n");
-				pnp_device_detach(dev);
-				continue;
-			}
-			if (!pnp_port_valid(dev, 0)) {
-				printk(KERN_ERR "dtc436e probe: no valid port\n");
-				pnp_device_detach(dev);
-				continue;
-			}
-			if (pnp_irq_valid(dev, 0))
-				overrides[count].irq = pnp_irq(dev, 0);
-			else
-				overrides[count].irq = NO_IRQ;
-			if (pnp_dma_valid(dev, 0))
-				overrides[count].dma = pnp_dma(dev, 0);
-			else
-				overrides[count].dma = DMA_NONE;
-			overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0);
-			overrides[count].board = BOARD_DTC3181E;
-			count++;
-		}
+	ports = NULL;
+	flags = 0;
+	switch (board) {
+	case BOARD_NCR5380:
+		flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
+		break;
+	case BOARD_NCR53C400A:
+		ports = ncr_53c400a_ports;
+		magic = ncr_53c400a_magic;
+		break;
+	case BOARD_HP_C2502:
+		ports = ncr_53c400a_ports;
+		magic = hp_c2502_magic;
+		break;
+	case BOARD_DTC3181E:
+		ports = dtc_3181e_ports;
+		magic = ncr_53c400a_magic;
+		break;
 	}
-#endif
-
-	for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
-		if (!(overrides[current_override].NCR5380_map_name))
-			continue;
-
-		ports = NULL;
-		flags = 0;
-		switch (overrides[current_override].board) {
-		case BOARD_NCR5380:
-			flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
-			break;
-		case BOARD_NCR53C400A:
-			ports = ncr_53c400a_ports;
-			magic = ncr_53c400a_magic;
-			break;
-		case BOARD_HP_C2502:
-			ports = ncr_53c400a_ports;
-			magic = hp_c2502_magic;
-			break;
-		case BOARD_DTC3181E:
-			ports = dtc_3181e_ports;
-			magic = ncr_53c400a_magic;
-			break;
-		}
 
 #ifndef SCSI_G_NCR5380_MEM
-		if (ports && magic) {
-			/* wakeup sequence for the NCR53C400A and DTC3181E */
+	if (ports && magic) {
+		/* wakeup sequence for the NCR53C400A and DTC3181E */
 
-			/* Disable the adapter and look for a free io port */
-			magic_configure(-1, 0, magic);
+		/* Disable the adapter and look for a free io port */
+		magic_configure(-1, 0, magic);
 
-			region_size = 16;
-
-			if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
-				for (i = 0; ports[i]; i++) {
-					if (!request_region(ports[i], region_size, "ncr53c80"))
-						continue;
-					if (overrides[current_override].NCR5380_map_name == ports[i])
-						break;
-					release_region(ports[i], region_size);
-			} else
-				for (i = 0; ports[i]; i++) {
-					if (!request_region(ports[i], region_size, "ncr53c80"))
-						continue;
-					if (inb(ports[i]) == 0xff)
-						break;
-					release_region(ports[i], region_size);
+		region_size = 16;
+		if (base)
+			for (i = 0; ports[i]; i++) {
+				if (base == ports[i]) {	/* index found */
+					if (!request_region(ports[i],
+							    region_size,
+							    "ncr53c80"))
+						return -EBUSY;
+					break;
 				}
-			if (ports[i]) {
-				/* At this point we have our region reserved */
-				magic_configure(i, 0, magic); /* no IRQ yet */
-				outb(0xc0, ports[i] + 9);
-				if (inb(ports[i] + 9) != 0x80)
-					continue;
-				overrides[current_override].NCR5380_map_name = ports[i];
-				port_idx = i;
-			} else
-				continue;
-		}
-		else
-		{
-			/* Not a 53C400A style setup - just grab */
-			region_size = 8;
-			if (!request_region(overrides[current_override].NCR5380_map_name,
-			                    region_size, "ncr5380"))
-				continue;
-		}
-#else
-		base = overrides[current_override].NCR5380_map_name;
-		iomem_size = NCR53C400_region_size;
-		if (!request_mem_region(base, iomem_size, "ncr5380"))
-			continue;
-		iomem = ioremap(base, iomem_size);
-		if (!iomem) {
-			release_mem_region(base, iomem_size);
-			continue;
-		}
-#endif
-		instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
-		if (instance == NULL)
-			goto out_release;
-		hostdata = shost_priv(instance);
-
-#ifndef SCSI_G_NCR5380_MEM
-		instance->io_port = overrides[current_override].NCR5380_map_name;
-		instance->n_io_port = region_size;
-		hostdata->io_width = 1; /* 8-bit PDMA by default */
-
-		/*
-		 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
-		 * the base address.
-		 */
-		switch (overrides[current_override].board) {
-		case BOARD_NCR53C400:
-			instance->io_port += 8;
-			hostdata->c400_ctl_status = 0;
-			hostdata->c400_blk_cnt = 1;
-			hostdata->c400_host_buf = 4;
-			break;
-		case BOARD_DTC3181E:
-			hostdata->io_width = 2;	/* 16-bit PDMA */
-			/* fall through */
-		case BOARD_NCR53C400A:
-		case BOARD_HP_C2502:
-			hostdata->c400_ctl_status = 9;
-			hostdata->c400_blk_cnt = 10;
-			hostdata->c400_host_buf = 8;
-			break;
-		}
-#else
-		instance->base = overrides[current_override].NCR5380_map_name;
-		hostdata->iomem = iomem;
-		hostdata->iomem_size = iomem_size;
-		switch (overrides[current_override].board) {
-		case BOARD_NCR53C400:
-			hostdata->c400_ctl_status = 0x100;
-			hostdata->c400_blk_cnt = 0x101;
-			hostdata->c400_host_buf = 0x104;
-			break;
-		case BOARD_DTC3181E:
-		case BOARD_NCR53C400A:
-		case BOARD_HP_C2502:
-			pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
-			goto out_unregister;
-		}
-#endif
-
-		if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
-			goto out_unregister;
-
-		switch (overrides[current_override].board) {
-		case BOARD_NCR53C400:
-		case BOARD_DTC3181E:
-		case BOARD_NCR53C400A:
-		case BOARD_HP_C2502:
-			NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
-		}
-
-		NCR5380_maybe_reset_bus(instance);
-
-		if (overrides[current_override].irq != IRQ_AUTO)
-			instance->irq = overrides[current_override].irq;
-		else
-			instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
-		/* Compatibility with documented NCR5380 kernel parameters */
-		if (instance->irq == 255)
-			instance->irq = NO_IRQ;
-
-		if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
-			/* set IRQ for HP C2502 */
-			if (overrides[current_override].board == BOARD_HP_C2502)
-				magic_configure(port_idx, instance->irq, magic);
-#endif
-			if (request_irq(instance->irq, generic_NCR5380_intr,
-					0, "NCR5380", instance)) {
-				printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
-				instance->irq = NO_IRQ;
 			}
-		}
-
-		if (instance->irq == NO_IRQ) {
-			printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
-			printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
-		}
-
-		++current_override;
-		++count;
+		else
+			for (i = 0; ports[i]; i++) {
+				if (!request_region(ports[i], region_size,
+						    "ncr53c80"))
+					continue;
+				if (inb(ports[i]) == 0xff)
+					break;
+				release_region(ports[i], region_size);
+			}
+		if (ports[i]) {
+			/* At this point we have our region reserved */
+			magic_configure(i, 0, magic); /* no IRQ yet */
+			outb(0xc0, ports[i] + 9);
+			if (inb(ports[i] + 9) != 0x80) {
+				ret = -ENODEV;
+				goto out_release;
+			}
+			base = ports[i];
+			port_idx = i;
+		} else
+			return -EINVAL;
 	}
-	return count;
+	else
+	{
+		/* NCR5380 - no configuration, just grab */
+		region_size = 8;
+		if (!base || !request_region(base, region_size, "ncr5380"))
+			return -EBUSY;
+	}
+#else
+	iomem_size = NCR53C400_region_size;
+	if (!request_mem_region(base, iomem_size, "ncr5380"))
+		return -EBUSY;
+	iomem = ioremap(base, iomem_size);
+	if (!iomem) {
+		release_mem_region(base, iomem_size);
+		return -ENOMEM;
+	}
+#endif
+	instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+	if (instance == NULL) {
+		ret = -ENOMEM;
+		goto out_release;
+	}
+	hostdata = shost_priv(instance);
 
+#ifndef SCSI_G_NCR5380_MEM
+	instance->io_port = base;
+	instance->n_io_port = region_size;
+	hostdata->io_width = 1; /* 8-bit PDMA by default */
+
+	/*
+	 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+	 * the base address.
+	 */
+	switch (board) {
+	case BOARD_NCR53C400:
+		instance->io_port += 8;
+		hostdata->c400_ctl_status = 0;
+		hostdata->c400_blk_cnt = 1;
+		hostdata->c400_host_buf = 4;
+		break;
+	case BOARD_DTC3181E:
+		hostdata->io_width = 2;	/* 16-bit PDMA */
+		/* fall through */
+	case BOARD_NCR53C400A:
+	case BOARD_HP_C2502:
+		hostdata->c400_ctl_status = 9;
+		hostdata->c400_blk_cnt = 10;
+		hostdata->c400_host_buf = 8;
+		break;
+	}
+#else
+	instance->base = base;
+	hostdata->iomem = iomem;
+	hostdata->iomem_size = iomem_size;
+	switch (board) {
+	case BOARD_NCR53C400:
+		hostdata->c400_ctl_status = 0x100;
+		hostdata->c400_blk_cnt = 0x101;
+		hostdata->c400_host_buf = 0x104;
+		break;
+	case BOARD_DTC3181E:
+	case BOARD_NCR53C400A:
+	case BOARD_HP_C2502:
+		pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+		ret = -EINVAL;
+		goto out_unregister;
+	}
+#endif
+
+	ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
+	if (ret)
+		goto out_unregister;
+
+	switch (board) {
+	case BOARD_NCR53C400:
+	case BOARD_DTC3181E:
+	case BOARD_NCR53C400A:
+	case BOARD_HP_C2502:
+		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+	}
+
+	NCR5380_maybe_reset_bus(instance);
+
+	if (irq != IRQ_AUTO)
+		instance->irq = irq;
+	else
+		instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+	/* Compatibility with documented NCR5380 kernel parameters */
+	if (instance->irq == 255)
+		instance->irq = NO_IRQ;
+
+	if (instance->irq != NO_IRQ) {
+#ifndef SCSI_G_NCR5380_MEM
+		/* set IRQ for HP C2502 */
+		if (board == BOARD_HP_C2502)
+			magic_configure(port_idx, instance->irq, magic);
+#endif
+		if (request_irq(instance->irq, generic_NCR5380_intr,
+				0, "NCR5380", instance)) {
+			printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+			instance->irq = NO_IRQ;
+		}
+	}
+
+	if (instance->irq == NO_IRQ) {
+		printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+		printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+	}
+
+	ret = scsi_add_host(instance, pdev);
+	if (ret)
+		goto out_free_irq;
+	scsi_scan_host(instance);
+	dev_set_drvdata(pdev, instance);
+	return 0;
+
+out_free_irq:
+	if (instance->irq != NO_IRQ)
+		free_irq(instance->irq, instance);
+	NCR5380_exit(instance);
 out_unregister:
-	scsi_unregister(instance);
+	scsi_host_put(instance);
 out_release:
 #ifndef SCSI_G_NCR5380_MEM
-	release_region(overrides[current_override].NCR5380_map_name, region_size);
+	release_region(base, region_size);
 #else
 	iounmap(iomem);
 	release_mem_region(base, iomem_size);
 #endif
-	return count;
+	return ret;
 }
 
-/**
- *	generic_NCR5380_release_resources	-	free resources
- *	@instance: host adapter to clean up 
- *
- *	Free the generic interface resources from this adapter.
- *
- *	Locks: none
- */
- 
-static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
+static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
 {
+	scsi_remove_host(instance);
 	if (instance->irq != NO_IRQ)
 		free_irq(instance->irq, instance);
 	NCR5380_exit(instance);
@@ -511,7 +329,7 @@
 		release_mem_region(instance->base, hostdata->iomem_size);
 	}
 #endif
-	return 0;
+	scsi_host_put(instance);
 }
 
 /**
@@ -701,10 +519,9 @@
 #include "NCR5380.c"
 
 static struct scsi_host_template driver_template = {
+	.module			= THIS_MODULE,
 	.proc_name		= DRV_MODULE_NAME,
 	.name			= "Generic NCR5380/NCR53C400 SCSI",
-	.detect			= generic_NCR5380_detect,
-	.release		= generic_NCR5380_release_resources,
 	.info			= generic_NCR5380_info,
 	.queuecommand		= generic_NCR5380_queue_command,
 	.eh_abort_handler	= generic_NCR5380_abort,
@@ -718,31 +535,115 @@
 	.max_sectors		= 128,
 };
 
-#include "scsi_module.c"
 
-module_param(ncr_irq, int, 0);
-module_param(ncr_dma, int, 0);
-module_param(ncr_addr, int, 0);
-module_param(ncr_5380, int, 0);
-module_param(ncr_53c400, int, 0);
-module_param(ncr_53c400a, int, 0);
-module_param(dtc_3181e, int, 0);
-module_param(hp_c2502, int, 0);
-MODULE_LICENSE("GPL");
+static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
+{
+	int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
+					  irq[ndev], card[ndev]);
+	if (ret) {
+		if (base[ndev])
+			printk(KERN_WARNING "Card not found at address 0x%03x\n",
+			       base[ndev]);
+		return 0;
+	}
 
-#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
-static struct isapnp_device_id id_table[] = {
-	{
-	 ISAPNP_ANY_ID, ISAPNP_ANY_ID,
-	 ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
-	 0},
-	{0}
+	return 1;
+}
+
+static int generic_NCR5380_isa_remove(struct device *pdev,
+				   unsigned int ndev)
+{
+	generic_NCR5380_release_resources(dev_get_drvdata(pdev));
+	dev_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct isa_driver generic_NCR5380_isa_driver = {
+	.match		= generic_NCR5380_isa_match,
+	.remove		= generic_NCR5380_isa_remove,
+	.driver		= {
+		.name	= DRV_MODULE_NAME
+	},
 };
 
-MODULE_DEVICE_TABLE(isapnp, id_table);
-#endif
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+	{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
+	{ .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
 
-__setup("ncr5380=", do_NCR5380_setup);
-__setup("ncr53c400=", do_NCR53C400_setup);
-__setup("ncr53c400a=", do_NCR53C400A_setup);
-__setup("dtc3181e=", do_DTC3181E_setup);
+static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
+			       const struct pnp_device_id *id)
+{
+	int base, irq;
+
+	if (pnp_activate_dev(pdev) < 0)
+		return -EBUSY;
+
+	base = pnp_port_start(pdev, 0);
+	irq = pnp_irq(pdev, 0);
+
+	return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
+				       id->driver_data);
+}
+
+static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
+{
+	generic_NCR5380_release_resources(pnp_get_drvdata(pdev));
+	pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver generic_NCR5380_pnp_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= generic_NCR5380_pnp_ids,
+	.probe		= generic_NCR5380_pnp_probe,
+	.remove		= generic_NCR5380_pnp_remove,
+};
+#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+
+static int pnp_registered, isa_registered;
+
+static int __init generic_NCR5380_init(void)
+{
+	int ret = 0;
+
+	/* compatibility with old-style parameters */
+	if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+		irq[0] = ncr_irq;
+		base[0] = ncr_addr;
+		if (ncr_5380)
+			card[0] = BOARD_NCR5380;
+		if (ncr_53c400)
+			card[0] = BOARD_NCR53C400;
+		if (ncr_53c400a)
+			card[0] = BOARD_NCR53C400A;
+		if (dtc_3181e)
+			card[0] = BOARD_DTC3181E;
+		if (hp_c2502)
+			card[0] = BOARD_HP_C2502;
+	}
+
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+	if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
+		pnp_registered = 1;
+#endif
+	ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS);
+	if (!ret)
+		isa_registered = 1;
+
+	return (pnp_registered || isa_registered) ? 0 : ret;
+}
+
+static void __exit generic_NCR5380_exit(void)
+{
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+	if (pnp_registered)
+		pnp_unregister_driver(&generic_NCR5380_pnp_driver);
+#endif
+	if (isa_registered)
+		isa_unregister_driver(&generic_NCR5380_isa_driver);
+}
+
+module_init(generic_NCR5380_init);
+module_exit(generic_NCR5380_exit);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 5951774..b175b92 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,15 +14,9 @@
 #ifndef GENERIC_NCR5380_H
 #define GENERIC_NCR5380_H
 
-#define __STRVAL(x) #x
-#define STRVAL(x) __STRVAL(x)
-
 #ifndef SCSI_G_NCR5380_MEM
 #define DRV_MODULE_NAME "g_NCR5380"
 
-#define NCR5380_map_type int
-#define NCR5380_map_name port
-
 #define NCR5380_read(reg) \
 	inb(instance->io_port + (reg))
 #define NCR5380_write(reg, value) \
@@ -38,8 +32,6 @@
 /* therefore SCSI_G_NCR5380_MEM */
 #define DRV_MODULE_NAME "g_NCR5380_mmio"
 
-#define NCR5380_map_type unsigned long
-#define NCR5380_map_name base
 #define NCR53C400_mem_base 0x3880
 #define NCR53C400_host_buffer 0x3900
 #define NCR53C400_region_size 0x3a00
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226..618422e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@
 	res = get_user_pages_unlocked(
 		uaddr,
 		nr_pages,
-		rw == READ,
-		0, /* don't force */
-		pages);
+		pages,
+		rw == READ ? FOLL_WRITE : 0); /* don't force */
 
 	/* Errors and no page mapped should return here */
 	if (res < nr_pages)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 4796690..e27b4d4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -63,7 +63,7 @@
 
 config SCSI_UFS_DWC_TC_PCI
 	tristate "DesignWare pci support using a G210 Test Chip"
-	depends on SCSI_UFSHCD && PCI
+	depends on SCSI_UFSHCD_PCI
 	---help---
 	  Synopsys Test Chip is a PHY for prototyping purposes.
 
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index ee4ab85..22f881e 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -25,6 +25,7 @@
 
 #define UFS_VENDOR_TOSHIBA     0x198
 #define UFS_VENDOR_SAMSUNG     0x1CE
+#define UFS_VENDOR_SKHYNIX     0x1AD
 
 /**
  * ufs_device_info - ufs device details
@@ -145,6 +146,7 @@
 		UFS_DEVICE_QUIRK_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
 		UFS_DEVICE_QUIRK_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 
 	END_FIX
 };
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 37f3c51..05c7456 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1266,9 +1266,12 @@
 	ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
 			0, query->request.query_func, 0, 0);
 
-	/* Data segment length */
-	ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
-			0, 0, len >> 8, (u8)len);
+	/* Data segment length only need for WRITE_DESC */
+	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+		ucd_req_ptr->header.dword_2 =
+			UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+	else
+		ucd_req_ptr->header.dword_2 = 0;
 
 	/* Copy the Query Request buffer as is */
 	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -6500,6 +6503,7 @@
 		if (IS_ERR(hba->devfreq)) {
 			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
 					PTR_ERR(hba->devfreq));
+			err = PTR_ERR(hba->devfreq);
 			goto out_remove_scsi_host;
 		}
 		/* Suspend devfreq until the UFS device is detected */
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fe42a2f..e6e90e8 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,6 +1,7 @@
 menu "SOC (System On Chip) specific Drivers"
 
 source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/qbman/Kconfig"
 source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 203307f..75e1f53 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -2,5 +2,6 @@
 # Makefile for the Linux Kernel SOC fsl specific device drivers
 #
 
+obj-$(CONFIG_FSL_DPAA)                 += qbman/
 obj-$(CONFIG_QUICC_ENGINE)		+= qe/
 obj-$(CONFIG_CPM)			+= qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 0000000..757033c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+	bool "Freescale DPAA 1.x support"
+	depends on FSL_SOC_BOOKE
+	select GENERIC_ALLOCATOR
+	help
+	  The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+	  hardware components on specific QorIQ multicore processors.
+	  This architecture provides the infrastructure to support simplified
+	  sharing of networking interfaces and accelerators by multiple CPUs.
+	  The major h/w blocks composing DPAA are BMan and QMan.
+
+	  The Buffer Manager (BMan) is a hardware buffer pool management block
+	  that allows software and accelerators on the datapath to acquire and
+	  release buffers in order to build frames.
+
+	  The Queue Manager (QMan) is a hardware queue management block
+	  that allows software and accelerators on the datapath to enqueue and
+	  dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+	bool "Additional driver checking"
+	help
+	  Compiles in additional checks, to sanity-check the drivers and
+	  any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+	tristate "BMan self-tests"
+	help
+	  Compile the BMan self-test code. These tests will
+	  exercise the BMan APIs to confirm functionality
+	  of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+	bool "High-level API self-test"
+	depends on FSL_BMAN_TEST
+	default y
+	help
+	  This requires the presence of cpu-affine portals, and performs
+	  high-level API testing with them (whichever portal(s) are affine
+	  to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+	tristate "QMan self-tests"
+	help
+	  Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+	bool "QMan high-level self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This requires the presence of cpu-affine portals, and performs
+	  high-level API testing with them (whichever portal(s) are affine to
+	  the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+	bool "QMan 'hot potato' data-stashing self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This performs a "hot potato" style test enqueuing/dequeuing a frame
+	  across a series of FQs scheduled to different portals (and cpus), with
+	  DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 0000000..7ae199f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o qman_ccsr.o \
+						   bman_portal.o qman_portal.o \
+						   bman.o qman.o
+
+obj-$(CONFIG_FSL_BMAN_TEST)                     += bman-test.o
+bman-test-y                                      = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API)           += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST)			+= qman-test.o
+qman-test-y					 = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API)		+= qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH)		+= qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 0000000..ffa48fd
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME		"BMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH	0x0000
+#define BM_REG_RCR_CI_CINH	0x0004
+#define BM_REG_RCR_ITR		0x0008
+#define BM_REG_CFG		0x0100
+#define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
+#define BM_REG_ISR		0x0e00
+#define BM_REG_IER		0x0e04
+#define BM_REG_ISDR		0x0e08
+#define BM_REG_IIR		0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR		0x0000
+#define BM_CL_RR0		0x0100
+#define BM_CL_RR1		0x0140
+#define BM_CL_RCR		0x1000
+#define BM_CL_RCR_PI_CENA	0x3000
+#define BM_CL_RCR_CI_CENA	0x3100
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
+	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
+	bm_rcr_pce = 1,		/* PI index, cache-enabled */
+	bm_rcr_pvb = 2		/* valid-bit */
+};
+enum bm_rcr_cmode {		/* s/w-only */
+	bm_rcr_cci,		/* CI index, cache-inhibited */
+	bm_rcr_cce		/* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE		8
+
+/* Release Command */
+struct bm_rcr_entry {
+	union {
+		struct {
+			u8 _ncw_verb; /* writes to this are non-coherent */
+			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+			u8 __reserved1[62];
+		};
+		struct bm_buffer bufs[8];
+	};
+};
+#define BM_RCR_VERB_VBIT		0x80
+#define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI	0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
+
+struct bm_rcr {
+	struct bm_rcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	u32 busy;
+	enum bm_rcr_pmode pmode;
+	enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+	u8 _ncw_verb; /* writes to this are non-coherent */
+	u8 bpid; /* used by acquire command */
+	u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT		0x80
+#define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE		0x10
+#define BM_MCC_VERB_CMD_QUERY		0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+	struct {
+		u8 verb;
+		u8 bpid;
+		u8 __reserved[62];
+	};
+	struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT		0x80
+#define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID	0x60
+#define BM_MCR_VERB_CMD_ERR_ECC		0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT			10000 /* us */
+
+struct bm_mc {
+	struct bm_mc_command *cr;
+	union bm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum {
+		/* Can only be _mc_start()ed */
+		mc_idle,
+		/* Can only be _mc_commit()ed or _mc_abort()ed */
+		mc_user,
+		/* Can only be _mc_retry()ed */
+		mc_hw
+	} state;
+#endif
+};
+
+struct bm_addr {
+	void __iomem *ce;	/* cache-enabled */
+	void __iomem *ci;	/* cache-inhibited */
+};
+
+struct bm_portal {
+	struct bm_addr addr;
+	struct bm_rcr rcr;
+	struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+	__raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+	dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+	dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+	struct bm_portal p;
+	/* interrupt sources processed by portal_isr(), configurable */
+	unsigned long irq_sources;
+	/* probing time config params for cpu-affine portals */
+	const struct bm_portal_config *config;
+	char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+	return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+	/* index of the buffer pool to encapsulate (0-63) */
+	u32 bpid;
+	/* Used for hash-table admin when using depletion notifications. */
+	struct bman_portal *portal;
+	struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+	struct bman_portal *p = ptr;
+	struct bm_portal *portal = &p->p;
+	u32 clear = p->irq_sources;
+	u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+	if (unlikely(!is))
+		return IRQ_NONE;
+
+	clear |= poll_portal_slow(p, is);
+	bm_out(portal, BM_REG_ISR, clear);
+	return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT	ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY	(uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~RCR_CARRY;
+
+	return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+	return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+	/* increment to the next RCR pointer and handle overflow and 'vbit' */
+	struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+	rcr->cursor = rcr_carryclear(partial);
+	if (partial != rcr->cursor)
+		rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	rcr->ithresh = ithresh;
+	bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+	bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+
+	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+	rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+	bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+	diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	DPAA_ASSERT(!rcr->busy);
+	if (!rcr->available)
+		return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 1;
+#endif
+	dpaa_zero(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	struct bm_rcr_entry *rcursor;
+
+	DPAA_ASSERT(rcr->busy);
+	DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+	DPAA_ASSERT(rcr->available >= 1);
+	dma_wmb();
+	rcursor = rcr->cursor;
+	rcursor->_ncw_verb = myverb | rcr->vbit;
+	dpaa_flush(rcursor);
+	rcr_inc(rcr);
+	rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+		       enum bm_rcr_cmode cmode)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	u32 cfg;
+	u8 pi;
+
+	rcr->ring = portal->addr.ce + BM_CL_RCR;
+	rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	rcr->cursor = rcr->ring + pi;
+	rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+		BM_RCR_VERB_VBIT : 0;
+	rcr->available = BM_RCR_SIZE - 1
+		- dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+	rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 0;
+	rcr->pmode = pmode;
+	rcr->cmode = cmode;
+#endif
+	cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+		| (pmode & 0x3); /* BCSP_CFG::RPM */
+	bm_out(portal, BM_REG_CFG, cfg);
+	return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct bm_rcr *rcr = &portal->rcr;
+	int i;
+
+	DPAA_ASSERT(!rcr->busy);
+
+	i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	if (i != rcr_ptr2idx(rcr->cursor))
+		pr_crit("losing uncommited RCR entries\n");
+
+	i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	if (i != rcr->ci)
+		pr_crit("missing existing RCR completions\n");
+	if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+		pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.ce + BM_CL_CR;
+	mc->rr = portal->addr.ce + BM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+		    0 : 1;
+	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct bm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == mc_idle);
+	if (mc->state != mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_user;
+#endif
+	dpaa_zero(mc->cr);
+	return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+	struct bm_mc *mc = &portal->mc;
+	union bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == mc_user);
+	dma_wmb();
+	mc->cr->_ncw_verb = myverb | mc->vbit;
+	dpaa_flush(mc->cr);
+	dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+	union bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == mc_hw);
+	/*
+	 * The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering...
+	 */
+	if (!__raw_readb(&rr->verb)) {
+		dpaa_invalidate_touch_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+				       union bm_mc_result **mcr)
+{
+	int timeout = BM_MCR_TIMEOUT;
+
+	do {
+		*mcr = bm_mc_result(portal);
+		if (*mcr)
+			break;
+		udelay(1);
+	} while (--timeout);
+
+	return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+	bm_out(portal, BM_REG_SCN(0), 0);
+	bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+			      const struct bm_portal_config *c)
+{
+	struct bm_portal *p;
+	int ret;
+
+	p = &portal->p;
+	/*
+	 * prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference...
+	 */
+	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+		dev_err(c->dev, "RCR initialisation failed\n");
+		goto fail_rcr;
+	}
+	if (bm_mc_init(p)) {
+		dev_err(c->dev, "MC initialisation failed\n");
+		goto fail_mc;
+	}
+	/*
+	 * Default to all BPIDs disabled, we enable as required at
+	 * run-time.
+	 */
+	bm_isr_bscn_disable(p);
+
+	/* Write-to-clear any stale interrupt status bits */
+	bm_out(p, BM_REG_ISDR, 0xffffffff);
+	portal->irq_sources = 0;
+	bm_out(p, BM_REG_IER, 0);
+	bm_out(p, BM_REG_ISR, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
+		dev_err(c->dev, "request_irq() failed\n");
+		goto fail_irq;
+	}
+	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+		dev_err(c->dev, "irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need RCR to be empty before continuing */
+	ret = bm_rcr_get_fill(p);
+	if (ret) {
+		dev_err(c->dev, "RCR unclean\n");
+		goto fail_rcr_empty;
+	}
+	/* Success */
+	portal->config = c;
+
+	bm_out(p, BM_REG_ISDR, 0);
+	bm_out(p, BM_REG_IIR, 0);
+
+	return 0;
+
+fail_rcr_empty:
+fail_affinity:
+	free_irq(c->irq, portal);
+fail_irq:
+	bm_mc_finish(p);
+fail_mc:
+	bm_rcr_finish(p);
+fail_rcr:
+	return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+	struct bman_portal *portal;
+	int err;
+
+	portal = &per_cpu(bman_affine_portal, c->cpu);
+	err = bman_create_portal(portal, c);
+	if (err)
+		return NULL;
+
+	spin_lock(&affine_mask_lock);
+	cpumask_set_cpu(c->cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+
+	return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+	u32 ret = is;
+
+	if (is & BM_PIRQ_RCRI) {
+		bm_rcr_cce_update(&p->p);
+		bm_rcr_set_ithresh(&p->p, 0);
+		bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+		is &= ~BM_PIRQ_RCRI;
+	}
+
+	/* There should be no status register bits left undefined */
+	DPAA_ASSERT(!is);
+	return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+	bm_out(&p->p, BM_REG_IER, p->irq_sources);
+	local_irq_restore(irqflags);
+	return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+	struct bm_mc_command *bm_cmd;
+	union bm_mc_result *bm_res;
+
+	while (1) {
+		struct bman_portal *p = get_affine_portal();
+		/* Acquire buffers until empty */
+		bm_cmd = bm_mc_start(&p->p);
+		bm_cmd->bpid = bpid;
+		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+		if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+			put_affine_portal();
+			pr_crit("BMan Acquire Command timedout\n");
+			return -ETIMEDOUT;
+		}
+		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+			put_affine_portal();
+			/* Pool is empty */
+			return 0;
+		}
+		put_affine_portal();
+	}
+
+	return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+	unsigned long addr;
+
+	addr = gen_pool_alloc(bm_bpalloc, count);
+	if (!addr)
+		return -ENOMEM;
+
+	*result = addr & ~DPAA_GENALLOC_OFF;
+
+	return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+	int ret;
+
+	ret = bm_shutdown_pool(bpid);
+	if (ret) {
+		pr_debug("BPID %d leaked\n", bpid);
+		return ret;
+	}
+
+	gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+	struct bman_pool *pool = NULL;
+	u32 bpid;
+
+	if (bm_alloc_bpid_range(&bpid, 1))
+		return NULL;
+
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		goto err;
+
+	pool->bpid = bpid;
+
+	return pool;
+err:
+	bm_release_bpid(bpid);
+	kfree(pool);
+	return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+	bm_release_bpid(pool->bpid);
+
+	kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+	return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+	if (avail)
+		bm_rcr_cce_prefetch(&p->p);
+	else
+		bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+	struct bman_portal *p;
+	struct bm_rcr_entry *r;
+	unsigned long irqflags;
+	int avail, timeout = 1000; /* 1ms */
+	int i = num - 1;
+
+	DPAA_ASSERT(num > 0 && num <= 8);
+
+	do {
+		p = get_affine_portal();
+		local_irq_save(irqflags);
+		avail = bm_rcr_get_avail(&p->p);
+		if (avail < 2)
+			update_rcr_ci(p, avail);
+		r = bm_rcr_start(&p->p);
+		local_irq_restore(irqflags);
+		put_affine_portal();
+		if (likely(r))
+			break;
+
+		udelay(1);
+	} while (--timeout);
+
+	if (unlikely(!timeout))
+		return -ETIMEDOUT;
+
+	p = get_affine_portal();
+	local_irq_save(irqflags);
+	/*
+	 * we can copy all but the first entry, as this can trigger badness
+	 * with the valid-bit
+	 */
+	bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+	bm_buffer_set_bpid(r->bufs, pool->bpid);
+	if (i)
+		memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+			  (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+	local_irq_restore(irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+	struct bman_portal *p = get_affine_portal();
+	struct bm_mc_command *mcc;
+	union bm_mc_result *mcr;
+	int ret;
+
+	DPAA_ASSERT(num > 0 && num <= 8);
+
+	mcc = bm_mc_start(&p->p);
+	mcc->bpid = pool->bpid;
+	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+		     (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+	if (!bm_mc_result_timeout(&p->p, &mcr)) {
+		put_affine_portal();
+		pr_crit("BMan Acquire Timeout\n");
+		return -ETIMEDOUT;
+	}
+	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+	if (bufs)
+		memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+	put_affine_portal();
+	if (ret != num)
+		ret = -ENOMEM;
+	return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+	return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 0000000..9deb052
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FBPR_BARE		0x0c00
+#define REG_FBPR_BAR		0x0c04
+#define REG_FBPR_AR		0x0c10
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_ERR_ISR		0x0e00
+#define REG_ERR_IER		0x0e04
+#define REG_ERR_ISDR		0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI	0x00000010	/* Invalid Command Verb */
+#define BM_EIRQ_FLWI	0x00000008	/* FBPR Low Watermark */
+#define BM_EIRQ_MBEI	0x00000004	/* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI	0x00000002	/* Single-bit ECC Error */
+#define BM_EIRQ_BSCN	0x00000001	/* pool State Change Notification */
+
+struct bman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+	{ BM_EIRQ_IVCI, "Invalid Command Verb" },
+	{ BM_EIRQ_FLWI, "FBPR Low Watermark" },
+	{ BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+	{ BM_EIRQ_SBEI, "Single-bit ECC Error" },
+	{ BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+	return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+	iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = bm_ccsr_in(REG_IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+		   is_power_of_2(size));
+	/* choke if '[e]ba' has lower-alignment than 'size' */
+	DPAA_ASSERT(!(ba & (size - 1)));
+	bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+	bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+	bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+	fbpr_a = rmem->base;
+	fbpr_sz = rmem->size;
+
+	WARN_ON(!(fbpr_a && fbpr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+	struct device *dev = ptr;
+
+	ier_val = bm_ccsr_in(REG_ERR_IER);
+	isr_val = bm_ccsr_in(REG_ERR_ISR);
+	ecsr_val = bm_ccsr_in(REG_ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+		if (bman_hwerr_txts[i].mask & isr_mask) {
+			dev_err_ratelimited(dev, "ErrInt: %s\n",
+					    bman_hwerr_txts[i].txt);
+			if (bman_hwerr_txts[i].mask & ecsr_val) {
+				/* Re-arm error capture registers */
+				bm_ccsr_out(REG_ECSR, ecsr_val);
+			}
+			if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+				dev_dbg(dev, "Disabling error 0x%x\n",
+					bman_hwerr_txts[i].mask);
+				ier_val &= ~bman_hwerr_txts[i].mask;
+				bm_ccsr_out(REG_ERR_IER, ier_val);
+			}
+		}
+	}
+	bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+	int ret, err_irq;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	u16 id, bm_pool_cnt;
+	u8 major, minor;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	bm_ccsr_start = devm_ioremap(dev, res->start,
+				     res->end - res->start + 1);
+	if (!bm_ccsr_start)
+		return -ENXIO;
+
+	bm_get_version(&id, &major, &minor);
+	if (major == 1 && minor == 0) {
+		bman_ip_rev = BMAN_REV10;
+		bm_pool_cnt = BM_POOL_MAX;
+	} else if (major == 2 && minor == 0) {
+		bman_ip_rev = BMAN_REV20;
+		bm_pool_cnt = 8;
+	} else if (major == 2 && minor == 1) {
+		bman_ip_rev = BMAN_REV21;
+		bm_pool_cnt = BM_POOL_MAX;
+	} else {
+		dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+			id, major, minor);
+		return -ENODEV;
+	}
+
+	bm_set_memory(fbpr_a, fbpr_sz);
+
+	err_irq = platform_get_irq(pdev, 0);
+	if (err_irq <= 0) {
+		dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+			       dev);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+			ret, node->full_name);
+		return ret;
+	}
+	/* Disable Buffer Pool State Change */
+	bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+	/*
+	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init).
+	 */
+	bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+	/* Enable Error Interrupts */
+	bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+	bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+	if (IS_ERR(bm_bpalloc)) {
+		ret = PTR_ERR(bm_bpalloc);
+		dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	/* seed BMan resource pool */
+	ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+			0, bm_pool_cnt - 1, ret);
+		return ret;
+	}
+
+	return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+	{
+		.compatible = "fsl,bman",
+	},
+	{}
+};
+
+static struct platform_driver fsl_bman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = fsl_bman_ids,
+		.suppress_bind_attrs = true,
+	},
+	.probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 0000000..6579cc1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+	struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+	if (!p) {
+		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+			 __func__, pcfg->cpu);
+		return NULL;
+	}
+
+	bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+	affine_bportals[pcfg->cpu] = p;
+
+	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+	return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (!p)
+		return;
+
+	pcfg = bman_get_bm_portal_config(p);
+	if (!pcfg)
+		return;
+
+	irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (!p)
+		return;
+
+	pcfg = bman_get_bm_portal_config(p);
+	if (!pcfg)
+		return;
+
+	irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+				     unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		bman_online_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		bman_offline_cpu(cpu);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+	.notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct bm_portal_config *pcfg;
+	struct resource *addr_phys[2];
+	void __iomem *va;
+	int irq, cpu;
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return -ENOMEM;
+
+	pcfg->dev = dev;
+
+	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CE);
+	if (!addr_phys[0]) {
+		dev_err(dev, "Can't get %s property 'reg::CE'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CI);
+	if (!addr_phys[1]) {
+		dev_err(dev, "Can't get %s property 'reg::CI'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	pcfg->cpu = -1;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+		return -ENXIO;
+	}
+	pcfg->irq = irq;
+
+	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+	if (!va)
+		goto err_ioremap1;
+
+	pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+			  _PAGE_GUARDED | _PAGE_NO_CACHE);
+	if (!va)
+		goto err_ioremap2;
+
+	pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+	spin_lock(&bman_lock);
+	cpu = cpumask_next_zero(-1, &portal_cpus);
+	if (cpu >= nr_cpu_ids) {
+		/* unassigned portal, skip init */
+		spin_unlock(&bman_lock);
+		return 0;
+	}
+
+	cpumask_set_cpu(cpu, &portal_cpus);
+	spin_unlock(&bman_lock);
+	pcfg->cpu = cpu;
+
+	if (!init_pcfg(pcfg))
+		goto err_ioremap2;
+
+	/* clear irq affinity if assigned cpu is offline */
+	if (!cpu_online(cpu))
+		bman_offline_cpu(cpu);
+
+	return 0;
+
+err_ioremap2:
+	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+	dev_err(dev, "ioremap failed\n");
+	return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+	{
+		.compatible = "fsl,bman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = bman_portal_ids,
+	},
+	.probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+	int ret;
+
+	ret = platform_driver_register(drv);
+	if (ret < 0)
+		return ret;
+
+	register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+	return 0;
+}
+
+module_driver(bman_portal_driver,
+	      bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 0000000..f6896a2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI	0x00000002	/* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev;	/* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+	/*
+	 * Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited.
+	 */
+	void __iomem *addr_virt[2];
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	struct device *dev;
+	/* User-visible portal configuration settings */
+	/* portal is affined to this cpu */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE	BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 0000000..09b1c96
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+	int loop = 1;
+
+	while (loop--)
+		bman_test_api();
+#endif
+	return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 0000000..037ed34
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 0000000..6f6bdd1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS	93
+#define LOOPS		3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_BUFS; i++)
+		bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+	bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+	if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+		/*
+		 * On SoCs with BMan revison 2.0, BMan only respects the 40
+		 * LS-bits of buffer addresses, masking off the upper 8-bits on
+		 * release commands. The API provides for 48-bit addresses
+		 * because some SoCs support all 48-bits. When generating
+		 * garbage addresses for testing, we either need to zero the
+		 * upper 8-bits when releasing to BMan (otherwise we'll be
+		 * disappointed when the buffers we acquire back from BMan
+		 * don't match), or we need to mask the upper 8-bits off when
+		 * comparing. We do the latter.
+		 */
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+		    (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return -1;
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+		    (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return 1;
+	} else {
+		if (bm_buffer_get64(a) < bm_buffer_get64(b))
+			return -1;
+		if (bm_buffer_get64(a) > bm_buffer_get64(b))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void bufs_confirm(void)
+{
+	int i, j;
+
+	for (i = 0; i < NUM_BUFS; i++) {
+		int matches = 0;
+
+		for (j = 0; j < NUM_BUFS; j++)
+			if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+				matches++;
+		WARN_ON(matches != 1);
+	}
+}
+
+/* test */
+void bman_test_api(void)
+{
+	int i, loops = LOOPS;
+
+	bufs_init();
+
+	pr_info("%s(): Starting\n", __func__);
+
+	pool = bman_new_pool();
+	if (!pool) {
+		pr_crit("bman_new_pool() failed\n");
+		goto failed;
+	}
+
+	/* Release buffers */
+do_loop:
+	i = 0;
+	while (i < NUM_BUFS) {
+		int num = 8;
+
+		if (i + num > NUM_BUFS)
+			num = NUM_BUFS - i;
+		if (bman_release(pool, bufs_in + i, num)) {
+			pr_crit("bman_release() failed\n");
+			goto failed;
+		}
+		i += num;
+	}
+
+	/* Acquire buffers */
+	while (i > 0) {
+		int tmp, num = 8;
+
+		if (num > i)
+			num = i;
+		tmp = bman_acquire(pool, bufs_out + i - num, num);
+		WARN_ON(tmp != num);
+		i -= num;
+	}
+	i = bman_acquire(pool, NULL, 1);
+	WARN_ON(i > 0);
+
+	bufs_confirm();
+
+	if (--loops)
+		goto do_loop;
+
+	/* Clean up */
+	bman_free_pool(pool);
+	pr_info("%s(): Finished\n", __func__);
+	return;
+
+failed:
+	WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 0000000..b63fd72
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+	flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+	__cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+	__flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+	prefetch(p+32);
+#endif
+	prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+	dpaa_invalidate(p);
+	dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF	0x80000000
+
+#endif	/* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000..119054b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL	15
+#define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
+#define IRQNAME		"QMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x0000
+#define QM_REG_EQCR_CI_CINH	0x0004
+#define QM_REG_EQCR_ITR		0x0008
+#define QM_REG_DQRR_PI_CINH	0x0040
+#define QM_REG_DQRR_CI_CINH	0x0044
+#define QM_REG_DQRR_ITR		0x0048
+#define QM_REG_DQRR_DCAP	0x0050
+#define QM_REG_DQRR_SDQCR	0x0054
+#define QM_REG_DQRR_VDQCR	0x0058
+#define QM_REG_DQRR_PDQCR	0x005c
+#define QM_REG_MR_PI_CINH	0x0080
+#define QM_REG_MR_CI_CINH	0x0084
+#define QM_REG_MR_ITR		0x0088
+#define QM_REG_CFG		0x0100
+#define QM_REG_ISR		0x0e00
+#define QM_REG_IER		0x0e04
+#define QM_REG_ISDR		0x0e08
+#define QM_REG_IIR		0x0e0c
+#define QM_REG_ITPR		0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3100
+#define QM_CL_DQRR_PI_CENA	0x3200
+#define QM_CL_DQRR_CI_CENA	0x3300
+#define QM_CL_MR_PI_CENA	0x3400
+#define QM_CL_MR_CI_CENA	0x3500
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)	((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {		/* matches QCSP_CFG::EPM */
+	qm_eqcr_pci = 0,	/* PI index, cache-inhibited */
+	qm_eqcr_pce = 1,	/* PI index, cache-enabled */
+	qm_eqcr_pvb = 2		/* valid-bit */
+};
+enum qm_dqrr_dmode {		/* matches QCSP_CFG::DP */
+	qm_dqrr_dpush = 0,	/* SDQCR  + VDQCR */
+	qm_dqrr_dpull = 1	/* PDQCR */
+};
+enum qm_dqrr_pmode {		/* s/w-only */
+	qm_dqrr_pci,		/* reads DQRR_PI_CINH */
+	qm_dqrr_pce,		/* reads DQRR_PI_CENA */
+	qm_dqrr_pvb		/* reads valid-bit */
+};
+enum qm_dqrr_cmode {		/* matches QCSP_CFG::DCM */
+	qm_dqrr_cci = 0,	/* CI index, cache-inhibited */
+	qm_dqrr_cce = 1,	/* CI index, cache-enabled */
+	qm_dqrr_cdc = 2		/* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {		/* s/w-only */
+	qm_mr_pci,		/* reads MR_PI_CINH */
+	qm_mr_pce,		/* reads MR_PI_CENA */
+	qm_mr_pvb		/* reads valid-bit */
+};
+enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
+	qm_mr_cci = 0,		/* CI index, cache-inhibited */
+	qm_mr_cce = 1		/* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE		8
+#define QM_DQRR_SIZE		16
+#define QM_MR_SIZE		8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+	u8 _ncw_verb; /* writes to this are non-coherent */
+	u8 dca;
+	u16 seqnum;
+	u32 orp;	/* 24-bit */
+	u32 fqid;	/* 24-bit */
+	u32 tag;
+	struct qm_fd fd;
+	u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT		0x80
+#define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+#define QM_EQCR_SEQNUM_NESN		0x8000	/* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS		0x4000	/* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK		0x3fff	/* sequence number goes here */
+
+struct qm_eqcr {
+	struct qm_eqcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	u32 busy;
+	enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+	const struct qm_dqrr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_dqrr_dmode dmode;
+	enum qm_dqrr_pmode pmode;
+	enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+	union qm_mr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_mr_pmode pmode;
+	enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2;
+	u8 count;	/* number of consecutive FQID */
+	u8 __reserved3[10];
+	u32 context_b;	/* frame queue context b */
+	u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+	u8 _ncw_verb;
+	u8 __reserved1[30];
+	u8 cgid;
+	u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+	u8 _ncw_verb;
+	u8 __reserved;
+	/* select channel if verb != QUERYWQ_DEDICATED */
+	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+	u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT		0x80
+#define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED	0x40
+#define QM_MCC_VERB_INITFQ_SCHED	0x41
+#define QM_MCC_VERB_QUERYFQ		0x44
+#define QM_MCC_VERB_QUERYFQ_NP		0x45	/* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ		0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED	0x47
+#define QM_MCC_VERB_ALTER_SCHED		0x48	/* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE		0x49	/* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE	0x4a	/* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS		0x4b	/* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON		0x4d	/* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF	0x4e	/* FQ XOFF */
+#define QM_MCC_VERB_INITCGR		0x50
+#define QM_MCC_VERB_MODIFYCGR		0x51
+#define QM_MCC_VERB_CGRTESTWRITE	0x52
+#define QM_MCC_VERB_QUERYCGR		0x58
+#define QM_MCC_VERB_QUERYCONGESTION	0x59
+union qm_mc_command {
+	struct {
+		u8 _ncw_verb; /* writes to this are non-coherent */
+		u8 __reserved[63];
+	};
+	struct qm_mcc_initfq initfq;
+	struct qm_mcc_queryfq queryfq;
+	struct qm_mcc_alterfq alterfq;
+	struct qm_mcc_initcgr initcgr;
+	struct qm_mcc_querycgr querycgr;
+	struct qm_mcc_querywq querywq;
+	struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+	u8 verb;
+	u8 result;
+	u8 __reserved1[8];
+	struct qm_fqd fqd;	/* the FQD fields are here */
+	u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+	u8 verb;
+	u8 result;
+	u8 fqs;		/* Frame Queue Status */
+	u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID		0x80
+#define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL		0x00
+#define QM_MCR_RESULT_OK		0xf0
+#define QM_MCR_RESULT_ERR_FQID		0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE	0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
+#define QM_MCR_RESULT_PENDING		0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
+#define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT			10000	/* us */
+union qm_mc_result {
+	struct {
+		u8 verb;
+		u8 result;
+		u8 __reserved1[62];
+	};
+	struct qm_mcr_queryfq queryfq;
+	struct qm_mcr_alterfq alterfq;
+	struct qm_mcr_querycgr querycgr;
+	struct qm_mcr_querycongestion querycongestion;
+	struct qm_mcr_querywq querywq;
+	struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+	union qm_mc_command *cr;
+	union qm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum {
+		/* Can be _mc_start()ed */
+		qman_mc_idle,
+		/* Can be _mc_commit()ed or _mc_abort()ed */
+		qman_mc_user,
+		/* Can only be _mc_retry()ed */
+		qman_mc_hw
+	} state;
+#endif
+};
+
+struct qm_addr {
+	void __iomem *ce;	/* cache-enabled */
+	void __iomem *ci;	/* cache-inhibited */
+};
+
+struct qm_portal {
+	/*
+	 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+	 * and including 'mc' fits within a cacheline (yay!). The 'config' part
+	 * is setup-only, so isn't a cause for a concern. In other words, don't
+	 * rearrange this structure on a whim, there be dragons ...
+	 */
+	struct qm_addr addr;
+	struct qm_eqcr eqcr;
+	struct qm_dqrr dqrr;
+	struct qm_mr mr;
+	struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+	__raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+	dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+	dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+	return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT	ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY	(uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~EQCR_CARRY;
+
+	return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+	return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+	/* increment to the next EQCR pointer and handle overflow and 'vbit' */
+	struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+	eqcr->cursor = eqcr_carryclear(partial);
+	if (partial != eqcr->cursor)
+		eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+				enum qm_eqcr_pmode pmode,
+				unsigned int eq_stash_thresh,
+				int eq_stash_prio)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u32 cfg;
+	u8 pi;
+
+	eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+	eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	eqcr->cursor = eqcr->ring + pi;
+	eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+		     QM_EQCR_VERB_VBIT : 0;
+	eqcr->available = QM_EQCR_SIZE - 1 -
+			  dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+	eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+	eqcr->pmode = pmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+	      (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+	      (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+	      ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+	return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (pi != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("losing uncommited EQCR entries\n");
+	if (ci != eqcr->ci)
+		pr_crit("missing existing EQCR completions\n");
+	if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+								 *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available)
+		return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+								*portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+			   (QM_EQCR_SIZE - 1);
+		diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return NULL;
+	}
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+	DPAA_ASSERT(eqcr->busy);
+	DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+	DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+	DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eqcursor;
+
+	eqcr_commit_checks(eqcr);
+	DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+	dma_wmb();
+	eqcursor = eqcr->cursor;
+	eqcursor->_ncw_verb = myverb | eqcr->vbit;
+	dpaa_flush(eqcursor);
+	eqcr_inc(eqcr);
+	eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+	qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	eqcr->ithresh = ithresh;
+	qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT	ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY	(uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+					const struct qm_dqrr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~DQRR_CARRY;
+
+	return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+	return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+	return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+	qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+				   ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+			       const struct qm_portal_config *config,
+			       enum qm_dqrr_dmode dmode,
+			       enum qm_dqrr_pmode pmode,
+			       enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	u32 cfg;
+
+	/* Make sure the DQRR will be idle when we enable */
+	qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+	dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+	dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->cursor = dqrr->ring + dqrr->ci;
+	dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+	dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+			QM_DQRR_VERB_VBIT : 0;
+	dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	dqrr->dmode = dmode;
+	dqrr->pmode = pmode;
+	dqrr->cmode = cmode;
+#endif
+	/* Invalidate every ring entry before beginning */
+	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+		dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+		((dmode & 1) << 18) |			/* DP */
+		((cmode & 3) << 16) |			/* DCM */
+		0xa0 |					/* RE+SE */
+		(0 ? 0x40 : 0) |			/* Ignore RP */
+		(0 ? 0x10 : 0);				/* Ignore SP */
+	qm_out(portal, QM_REG_CFG, cfg);
+	qm_dqrr_set_maxfill(portal, max_fill);
+	return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (dqrr->cmode != qm_dqrr_cdc &&
+	    dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+		pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+						struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (!dqrr->fill)
+		return NULL;
+	return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->fill);
+	dqrr->cursor = dqrr_inc(dqrr->cursor);
+	return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+	DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+	/*
+	 * If PAMU is not available we need to invalidate the cache.
+	 * When PAMU is available the cache is updated by stash
+	 */
+	dpaa_invalidate_touch_ro(res);
+#endif
+	/*
+	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads".
+	 */
+	if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+		if (!dqrr->pi)
+			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+		dqrr->fill++;
+	}
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+					const struct qm_dqrr_entry *dq,
+					int park)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+	int idx = dqrr_ptr2idx(dq);
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPAA_ASSERT((dqrr->ring + idx) == dq);
+	DPAA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+	       ((park ? 1 : 0) << 6) |		    /* DQRR_DCAP::PK */
+	       idx);				    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+	       (bitmask << 16));		    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT	ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY	(uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~MR_CARRY;
+
+	return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+	return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+	return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+			     enum qm_mr_cmode cmode)
+{
+	struct qm_mr *mr = &portal->mr;
+	u32 cfg;
+
+	mr->ring = portal->addr.ce + QM_CL_MR;
+	mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+	mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+	mr->cursor = mr->ring + mr->ci;
+	mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+	mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+		? QM_MR_VERB_VBIT : 0;
+	mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mr->pmode = pmode;
+	mr->cmode = cmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+	      ((cmode & 1) << 8);	/* QCSP_CFG:MM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (mr->ci != mr_ptr2idx(mr->cursor))
+		pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (!mr->fill)
+		return NULL;
+	return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->fill);
+	mr->cursor = mr_inc(mr->cursor);
+	return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+	union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+	DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+	/*
+	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads".
+	 */
+	if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+		if (!mr->pi)
+			mr->vbit ^= QM_MR_VERB_VBIT;
+		mr->fill++;
+		res = mr_inc(res);
+	}
+	dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = mr_ptr2idx(mr->cursor);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.ce + QM_CL_CR;
+	mc->rr = portal->addr.ce + QM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+		    ? 0 : 1;
+	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+	if (mc->state != qman_mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_user;
+#endif
+	dpaa_zero(mc->cr);
+	return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_user);
+	dma_wmb();
+	mc->cr->_ncw_verb = myverb | mc->vbit;
+	dpaa_flush(mc->cr);
+	dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_hw);
+	/*
+	 *  The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering...
+	 */
+	if (!__raw_readb(&rr->verb)) {
+		dpaa_invalidate_touch_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+				       union qm_mc_result **mcr)
+{
+	int timeout = QM_MCR_TIMEOUT;
+
+	do {
+		*mcr = qm_mc_result(portal);
+		if (*mcr)
+			break;
+		udelay(1);
+	} while (--timeout);
+
+	return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+	set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+	clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+	return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+	return !(fq->flags & mask);
+}
+
+struct qman_portal {
+	struct qm_portal p;
+	/* PORTAL_BITS_*** - dynamic, strictly internal */
+	unsigned long bits;
+	/* interrupt sources processed by portal_isr(), configurable */
+	unsigned long irq_sources;
+	u32 use_eqcr_ci_stashing;
+	/* only 1 volatile dequeue at a time */
+	struct qman_fq *vdqcr_owned;
+	u32 sdqcr;
+	/* probing time config params for cpu-affine portals */
+	const struct qm_portal_config *config;
+	/* needed for providing a non-NULL device to dma_map_***() */
+	struct platform_device *pdev;
+	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+	struct qman_cgrs *cgrs;
+	/* linked-list of CSCN handlers. */
+	struct list_head cgr_cbs;
+	/* list lock */
+	spinlock_t cgr_lock;
+	struct work_struct congestion_work;
+	struct work_struct mr_work;
+	char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+	return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+	qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+	if (!qm_portal_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+	num_fqids = _num_fqids;
+
+	fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+	if (!fq_table)
+		return -ENOMEM;
+
+	pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+		 fq_table, num_fqids * 2);
+	return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+	struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (WARN_ON(idx >= num_fqids * 2))
+		return NULL;
+#endif
+	fq = fq_table[idx];
+	DPAA_ASSERT(!fq || idx == fq->idx);
+
+	return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+	return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+	return idx_to_fq(tag);
+#else
+	return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+	return fq->idx;
+#else
+	return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+	struct qman_portal *p = ptr;
+
+	u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+	if (unlikely(!is))
+		return IRQ_NONE;
+
+	/* DQRR-handling if it's interrupt-driven */
+	if (is & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+	/* Handling of anything else that's interrupt-driven */
+	clear |= __poll_portal_slow(p, is);
+	qm_out(&p->p, QM_REG_ISR, clear);
+	return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+	const union qm_mr_entry *msg;
+loop:
+	msg = qm_mr_current(p);
+	if (!msg) {
+		/*
+		 * if MR was full and h/w had other FQRNI entries to produce, we
+		 * need to allow it time to produce those entries once the
+		 * existing entries are consumed. A worst-case situation
+		 * (fully-loaded system) means h/w sequencers may have to do 3-4
+		 * other things before servicing the portal's MR pump, each of
+		 * which (if slow) may take ~50 qman cycles (which is ~200
+		 * processor cycles). So rounding up and then multiplying this
+		 * worst-case estimate by a factor of 10, just to be
+		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+		 * one entry at a time, so h/w has an opportunity to produce new
+		 * entries well before the ring has been fully consumed, so
+		 * we're being *really* paranoid here.
+		 */
+		u64 now, then = jiffies;
+
+		do {
+			now = jiffies;
+		} while ((then + 10000) > now);
+		msg = qm_mr_current(p);
+		if (!msg)
+			return 0;
+	}
+	if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+		/* We aren't draining anything but FQRNIs */
+		pr_err("Found verb 0x%x in MR\n", msg->verb);
+		return -1;
+	}
+	qm_mr_next(p);
+	qm_mr_cci_consume(p, 1);
+	goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+			      const struct qm_portal_config *c,
+			      const struct qman_cgrs *cgrs)
+{
+	struct qm_portal *p;
+	char buf[16];
+	int ret;
+	u32 isdr;
+
+	p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+	/* PAMU is required for stashing */
+	portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+	portal->use_eqcr_ci_stashing = 0;
+#endif
+	/*
+	 * prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference
+	 */
+	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+	/*
+	 * If CI-stashing is used, the current defaults use a threshold of 3,
+	 * and stash with high-than-DQRR priority.
+	 */
+	if (qm_eqcr_init(p, qm_eqcr_pvb,
+			portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+		dev_err(c->dev, "EQCR initialisation failed\n");
+		goto fail_eqcr;
+	}
+	if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+			qm_dqrr_cdc, DQRR_MAXFILL)) {
+		dev_err(c->dev, "DQRR initialisation failed\n");
+		goto fail_dqrr;
+	}
+	if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+		dev_err(c->dev, "MR initialisation failed\n");
+		goto fail_mr;
+	}
+	if (qm_mc_init(p)) {
+		dev_err(c->dev, "MC initialisation failed\n");
+		goto fail_mc;
+	}
+	/* static interrupt-gating controls */
+	qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+	qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+	qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+	portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+	if (!portal->cgrs)
+		goto fail_cgrs;
+	/* initial snapshot is no-depletion */
+	qman_cgrs_init(&portal->cgrs[1]);
+	if (cgrs)
+		portal->cgrs[0] = *cgrs;
+	else
+		/* if the given mask is NULL, assume all CGRs can be seen */
+		qman_cgrs_fill(&portal->cgrs[0]);
+	INIT_LIST_HEAD(&portal->cgr_cbs);
+	spin_lock_init(&portal->cgr_lock);
+	INIT_WORK(&portal->congestion_work, qm_congestion_task);
+	INIT_WORK(&portal->mr_work, qm_mr_process_task);
+	portal->bits = 0;
+	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+	sprintf(buf, "qportal-%d", c->channel);
+	portal->pdev = platform_device_alloc(buf, -1);
+	if (!portal->pdev)
+		goto fail_devalloc;
+	if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+		goto fail_devadd;
+	ret = platform_device_add(portal->pdev);
+	if (ret)
+		goto fail_devadd;
+	isdr = 0xffffffff;
+	qm_out(p, QM_REG_ISDR, isdr);
+	portal->irq_sources = 0;
+	qm_out(p, QM_REG_IER, 0);
+	qm_out(p, QM_REG_ISR, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
+		dev_err(c->dev, "request_irq() failed\n");
+		goto fail_irq;
+	}
+	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+		dev_err(c->dev, "irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need EQCR to be empty before continuing */
+	isdr &= ~QM_PIRQ_EQCI;
+	qm_out(p, QM_REG_ISDR, isdr);
+	ret = qm_eqcr_get_fill(p);
+	if (ret) {
+		dev_err(c->dev, "EQCR unclean\n");
+		goto fail_eqcr_empty;
+	}
+	isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+	qm_out(p, QM_REG_ISDR, isdr);
+	if (qm_dqrr_current(p)) {
+		dev_err(c->dev, "DQRR unclean\n");
+		qm_dqrr_cdc_consume_n(p, 0xffff);
+	}
+	if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+		/* special handling, drain just in case it's a few FQRNIs */
+		const union qm_mr_entry *e = qm_mr_current(p);
+
+		dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+			e->verb, e->ern.rc, e->ern.fd.addr_lo);
+		goto fail_dqrr_mr_empty;
+	}
+	/* Success */
+	portal->config = c;
+	qm_out(p, QM_REG_ISDR, 0);
+	qm_out(p, QM_REG_IIR, 0);
+	/* Write a sane SDQCR */
+	qm_dqrr_sdqcr_set(p, portal->sdqcr);
+	return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+	free_irq(c->irq, portal);
+fail_irq:
+	platform_device_del(portal->pdev);
+fail_devadd:
+	platform_device_put(portal->pdev);
+fail_devalloc:
+	kfree(portal->cgrs);
+fail_cgrs:
+	qm_mc_finish(p);
+fail_mc:
+	qm_mr_finish(p);
+fail_mr:
+	qm_dqrr_finish(p);
+fail_dqrr:
+	qm_eqcr_finish(p);
+fail_eqcr:
+	return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+					      const struct qman_cgrs *cgrs)
+{
+	struct qman_portal *portal;
+	int err;
+
+	portal = &per_cpu(qman_affine_portal, c->cpu);
+	err = qman_create_portal(portal, c, cgrs);
+	if (err)
+		return NULL;
+
+	spin_lock(&affine_mask_lock);
+	cpumask_set_cpu(c->cpu, &affine_mask);
+	affine_channels[c->cpu] = c->channel;
+	affine_portals[c->cpu] = portal;
+	spin_unlock(&affine_mask_lock);
+
+	return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+	const struct qm_portal_config *pcfg;
+
+	/* Stop dequeues on the portal */
+	qm_dqrr_sdqcr_set(&qm->p, 0);
+
+	/*
+	 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+	 * something related to QM_PIRQ_EQCI, this may need fixing.
+	 * Also, due to the prefetching model used for CI updates in the enqueue
+	 * path, this update will only invalidate the CI cacheline *after*
+	 * working on it, so we need to call this twice to ensure a full update
+	 * irrespective of where the enqueue processing was at when the teardown
+	 * began.
+	 */
+	qm_eqcr_cce_update(&qm->p);
+	qm_eqcr_cce_update(&qm->p);
+	pcfg = qm->config;
+
+	free_irq(pcfg->irq, qm);
+
+	kfree(qm->cgrs);
+	qm_mc_finish(&qm->p);
+	qm_mr_finish(&qm->p);
+	qm_dqrr_finish(&qm->p);
+	qm_eqcr_finish(&qm->p);
+
+	platform_device_del(qm->pdev);
+	platform_device_put(qm->pdev);
+
+	qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+	struct qman_portal *qm = get_affine_portal();
+	const struct qm_portal_config *pcfg;
+	int cpu;
+
+	pcfg = qm->config;
+	cpu = pcfg->cpu;
+
+	qman_destroy_portal(qm);
+
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+	put_affine_portal();
+	return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+				   const union qm_mr_entry *msg, u8 verb)
+{
+	switch (verb) {
+	case QM_MR_VERB_FQRL:
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+		fq_clear(fq, QMAN_FQ_STATE_ORL);
+		break;
+	case QM_MR_VERB_FQRN:
+		DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+			    fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		fq->state = qman_fq_state_retired;
+		break;
+	case QM_MR_VERB_FQPN:
+		DPAA_ASSERT(fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+		fq->state = qman_fq_state_parked;
+	}
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     congestion_work);
+	struct qman_cgrs rr, c;
+	union qm_mc_result *mcr;
+	struct qman_cgr *cgr;
+
+	spin_lock(&p->cgr_lock);
+	qm_mc_start(&p->p);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		spin_unlock(&p->cgr_lock);
+		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+		return;
+	}
+	/* mask out the ones I'm not interested in */
+	qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+		      &p->cgrs[0]);
+	/* check previous snapshot for delta, enter/exit congestion */
+	qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+	/* update snapshot */
+	qman_cgrs_cp(&p->cgrs[1], &rr);
+	/* Invoke callback */
+	list_for_each_entry(cgr, &p->cgr_cbs, node)
+		if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+	spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     mr_work);
+	const union qm_mr_entry *msg;
+	struct qman_fq *fq;
+	u8 verb, num = 0;
+
+	preempt_disable();
+
+	while (1) {
+		qm_mr_pvb_update(&p->p);
+		msg = qm_mr_current(&p->p);
+		if (!msg)
+			break;
+
+		verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+		/* The message is a software ERN iff the 0x20 bit is clear */
+		if (verb & 0x20) {
+			switch (verb) {
+			case QM_MR_VERB_FQRNI:
+				/* nada, we drop FQRNIs on the floor */
+				break;
+			case QM_MR_VERB_FQRN:
+			case QM_MR_VERB_FQRL:
+				/* Lookup in the retirement table */
+				fq = fqid_to_fq(msg->fq.fqid);
+				if (WARN_ON(!fq))
+					break;
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_FQPN:
+				/* Parked */
+				fq = tag_to_fq(msg->fq.contextB);
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_DC_ERN:
+				/* DCP ERN */
+				pr_crit_once("Leaking DCP ERNs!\n");
+				break;
+			default:
+				pr_crit("Invalid MR verb 0x%02x\n", verb);
+			}
+		} else {
+			/* Its a software ERN */
+			fq = tag_to_fq(msg->ern.tag);
+			fq->cb.ern(p, fq, msg);
+		}
+		num++;
+		qm_mr_next(&p->p);
+	}
+
+	qm_mr_cci_consume(&p->p, num);
+	preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+	if (is & QM_PIRQ_CSCI) {
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->congestion_work);
+	}
+
+	if (is & QM_PIRQ_EQRI) {
+		qm_eqcr_cce_update(&p->p);
+		qm_eqcr_set_ithresh(&p->p, 0);
+		wake_up(&affine_queue);
+	}
+
+	if (is & QM_PIRQ_MRI) {
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->mr_work);
+	}
+
+	return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+	p->vdqcr_owned = NULL;
+	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+	wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *	 vdqcr_owned field (which it does before setting VDQCR), and
+ *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *	 done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *	 with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit)
+{
+	const struct qm_dqrr_entry *dq;
+	struct qman_fq *fq;
+	enum qman_cb_dqrr_result res;
+	unsigned int limit = 0;
+
+	do {
+		qm_dqrr_pvb_update(&p->p);
+		dq = qm_dqrr_current(&p->p);
+		if (!dq)
+			break;
+
+		if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+			/*
+			 * VDQCR: don't trust contextB as the FQ may have
+			 * been configured for h/w consumption and we're
+			 * draining it post-retirement.
+			 */
+			fq = p->vdqcr_owned;
+			/*
+			 * We only set QMAN_FQ_STATE_NE when retiring, so we
+			 * only need to check for clearing it when doing
+			 * volatile dequeues.  It's one less thing to check
+			 * in the critical path (SDQCR).
+			 */
+			if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+				fq_clear(fq, QMAN_FQ_STATE_NE);
+			/*
+			 * This is duplicated from the SDQCR code, but we
+			 * have stuff to do before *and* after this callback,
+			 * and we don't want multiple if()s in the critical
+			 * path (SDQCR).
+			 */
+			res = fq->cb.dqrr(p, fq, dq);
+			if (res == qman_cb_dqrr_stop)
+				break;
+			/* Check for VDQCR completion */
+			if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+				clear_vdqcr(p, fq);
+		} else {
+			/* SDQCR: contextB points to the FQ */
+			fq = tag_to_fq(dq->contextB);
+			/* Now let the callback do its stuff */
+			res = fq->cb.dqrr(p, fq, dq);
+			/*
+			 * The callback can request that we exit without
+			 * consuming this entry nor advancing;
+			 */
+			if (res == qman_cb_dqrr_stop)
+				break;
+		}
+		/* Interpret 'dq' from a driver perspective. */
+		/*
+		 * Parking isn't possible unless HELDACTIVE was set. NB,
+		 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+		 * check for HELDACTIVE to cover both.
+		 */
+		DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+			    (res != qman_cb_dqrr_park));
+		/* just means "skip it, I'll consume it myself later on" */
+		if (res != qman_cb_dqrr_defer)
+			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+						 res == qman_cb_dqrr_park);
+		/* Move forward */
+		qm_dqrr_next(&p->p);
+		/*
+		 * Entry processed and consumed, increment our counter.  The
+		 * callback can request that we exit after consuming the
+		 * entry, and we also exit if we reach our processing limit,
+		 * so loop back only if neither of these conditions is met.
+		 */
+	} while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+	return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+	u32 ier;
+
+	/*
+	 * Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert.
+	 */
+	local_irq_save(irqflags);
+	bits &= QM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	ier = qm_in(&p->p, QM_REG_IER);
+	/*
+	 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering.
+	 */
+	qm_out(&p->p, QM_REG_ISR, ~ier);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+	if (cpu < 0) {
+		struct qman_portal *portal = get_affine_portal();
+
+		cpu = portal->config->cpu;
+		put_affine_portal();
+	}
+	WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+	return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+	return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+	return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	pools &= p->config->pools;
+	p->sdqcr |= pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+	switch (result) {
+	case QM_MCR_RESULT_NULL:
+		return "QM_MCR_RESULT_NULL";
+	case QM_MCR_RESULT_OK:
+		return "QM_MCR_RESULT_OK";
+	case QM_MCR_RESULT_ERR_FQID:
+		return "QM_MCR_RESULT_ERR_FQID";
+	case QM_MCR_RESULT_ERR_FQSTATE:
+		return "QM_MCR_RESULT_ERR_FQSTATE";
+	case QM_MCR_RESULT_ERR_NOTEMPTY:
+		return "QM_MCR_RESULT_ERR_NOTEMPTY";
+	case QM_MCR_RESULT_PENDING:
+		return "QM_MCR_RESULT_PENDING";
+	case QM_MCR_RESULT_ERR_BADCOMMAND:
+		return "QM_MCR_RESULT_ERR_BADCOMMAND";
+	}
+	return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+		int ret = qman_alloc_fqid(&fqid);
+
+		if (ret)
+			return ret;
+	}
+	fq->fqid = fqid;
+	fq->flags = flags;
+	fq->state = qman_fq_state_oos;
+	fq->cgr_groupid = 0;
+
+	/* A context_b of 0 is allegedly special, so don't use that fqid */
+	if (fqid == 0 || fqid >= num_fqids) {
+		WARN(1, "bad fqid %d\n", fqid);
+		return -EINVAL;
+	}
+
+	fq->idx = fqid * 2;
+	if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+		fq->idx++;
+
+	WARN_ON(fq_table[fq->idx]);
+	fq_table[fq->idx] = fq;
+
+	return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+	/*
+	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
+	 * quiesced. Instead, run some checks.
+	 */
+	switch (fq->state) {
+	case qman_fq_state_parked:
+	case qman_fq_state_oos:
+		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+			qman_release_fqid(fq->fqid);
+
+		DPAA_ASSERT(fq_table[fq->idx]);
+		fq_table[fq->idx] = NULL;
+		return;
+	default:
+		break;
+	}
+	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+	return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	u8 res, myverb;
+	int ret = 0;
+
+	myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+		? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+	if (fq->state != qman_fq_state_oos &&
+	    fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+		/* And can't be set at the same time as TDTHRESH */
+		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+			return -EINVAL;
+	}
+	/* Issue an INITFQ_[PARKED|SCHED] management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    (fq->state != qman_fq_state_oos &&
+	     fq->state != qman_fq_state_parked)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initfq = *opts;
+	mcc->initfq.fqid = fq->fqid;
+	mcc->initfq.count = 0;
+	/*
+	 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+	 * demux pointer. Otherwise, the caller-provided value is allowed to
+	 * stand, don't overwrite it.
+	 */
+	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+		dma_addr_t phys_fq;
+
+		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+		mcc->initfq.fqd.context_b = fq_to_tag(fq);
+		/*
+		 *  and the physical address - NB, if the user wasn't trying to
+		 * set CONTEXTA, clear the stashing settings.
+		 */
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			memset(&mcc->initfq.fqd.context_a, 0,
+				sizeof(mcc->initfq.fqd.context_a));
+		} else {
+			phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+						 DMA_TO_DEVICE);
+			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+		}
+	}
+	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+		int wq = 0;
+
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+			wq = 4;
+		}
+		qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+	}
+	qm_mc_commit(&p->p, myverb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "MCR timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	if (opts) {
+		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+			else
+				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+		}
+		if (opts->we_mask & QM_INITFQ_WE_CGID)
+			fq->cgr_groupid = opts->fqd.cgid;
+	}
+	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+		qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	/* Issue a ALTERFQ_SCHED management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state != qman_fq_state_parked) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_sched;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret;
+	u8 res;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_sched)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state == qman_fq_state_retired ||
+	    fq->state == qman_fq_state_oos) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+	res = mcr->result;
+	/*
+	 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
+	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
+	 * friendly, otherwise the caller doesn't necessarily have a fully
+	 * "retired" FQ on return even if the retirement was immediate. However
+	 * this does mean some code duplication between here and
+	 * fq_state_change().
+	 */
+	if (res == QM_MCR_RESULT_OK) {
+		ret = 0;
+		/* Process 'fq' right away, we'll ignore FQRNI */
+		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		if (flags)
+			*flags = fq->flags;
+		fq->state = qman_fq_state_retired;
+		if (fq->cb.fqs) {
+			/*
+			 * Another issue with supporting "immediate" retirement
+			 * is that we're forced to drop FQRNIs, because by the
+			 * time they're seen it may already be "too late" (the
+			 * fq may have been OOS'd and free()'d already). But if
+			 * the upper layer wants a callback whether it's
+			 * immediate or not, we have to fake a "MR" entry to
+			 * look like an FQRNI...
+			 */
+			union qm_mr_entry msg;
+
+			msg.verb = QM_MR_VERB_FQRNI;
+			msg.fq.fqs = mcr->alterfq.fqs;
+			msg.fq.fqid = fq->fqid;
+			msg.fq.contextB = fq_to_tag(fq);
+			fq->cb.fqs(p, fq, &msg);
+		}
+	} else if (res == QM_MCR_RESULT_PENDING) {
+		ret = 1;
+		fq_set(fq, QMAN_FQ_STATE_CHANGING);
+	} else {
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_retired)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+	    fq->state != qman_fq_state_retired) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_oos;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*fqd = mcr->queryfq.fqd;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+			    struct qm_mcr_queryfq_np *np)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*np = mcr->queryfq_np;
+	else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+		ret = -ERANGE;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+			  struct qm_mcr_querycgr *cgrd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->querycgr.cgid = cgr->cgrid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*cgrd = mcr->querycgr;
+	else {
+		dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+			mcr_result_str(mcr->result));
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+	struct qm_mcr_querycgr query_cgr;
+	int err;
+
+	err = qman_query_cgr(cgr, &query_cgr);
+	if (err)
+		return err;
+
+	*result = !!query_cgr.cgr.cs;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+	unsigned long irqflags;
+	int ret = -EBUSY;
+
+	local_irq_save(irqflags);
+	if (p->vdqcr_owned)
+		goto out;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		goto out;
+
+	fq_set(fq, QMAN_FQ_STATE_VDQCR);
+	p->vdqcr_owned = fq;
+	qm_dqrr_vdqcr_set(&p->p, vdqcr);
+	ret = 0;
+out:
+	local_irq_restore(irqflags);
+	return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+	int ret;
+
+	*p = get_affine_portal();
+	ret = set_p_vdqcr(*p, fq, vdqcr);
+	put_affine_portal();
+	return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!set_vdqcr(p, fq, vdqcr));
+	else
+		wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+	return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+	struct qman_portal *p;
+	int ret;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_retired)
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+	else
+		ret = set_vdqcr(&p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/*
+			 * NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue.
+			 */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+	if (avail)
+		qm_eqcr_cce_prefetch(&p->p);
+	else
+		qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags;
+	u8 avail;
+
+	p = get_affine_portal();
+	local_irq_save(irqflags);
+
+	if (p->use_eqcr_ci_stashing) {
+		/*
+		 * The stashing case is easy, only update if we need to in
+		 * order to try and liberate ring entries.
+		 */
+		eq = qm_eqcr_start_stash(&p->p);
+	} else {
+		/*
+		 * The non-stashing case is harder, need to prefetch ahead of
+		 * time.
+		 */
+		avail = qm_eqcr_get_avail(&p->p);
+		if (avail < 2)
+			update_eqcr_ci(p, avail);
+		eq = qm_eqcr_start_no_stash(&p->p);
+	}
+
+	if (unlikely(!eq))
+		goto out;
+
+	eq->fqid = fq->fqid;
+	eq->tag = fq_to_tag(fq);
+	eq->fd = *fd;
+
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+	local_irq_restore(irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			 struct qm_mcc_initcgr *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	u8 verb = QM_MCC_VERB_MODIFYCGR;
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initcgr = *opts;
+	mcc->initcgr.cgid = cgr->cgrid;
+	if (flags & QMAN_CGR_FLAG_USE_INIT)
+		verb = QM_MCC_VERB_INITCGR;
+	qm_mc_commit(&p->p, verb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+	if (mcr->result != QM_MCR_RESULT_OK)
+		ret = -EIO;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+#define PORTAL_IDX(n)	(n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n)	(BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+	struct qman_cgr cgr;
+	int err_cnt = 0;
+
+	for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+		if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+			err_cnt++;
+	}
+
+	if (err_cnt)
+		pr_err("Warning: %d error%s while initialising CGR h/w\n",
+		       err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+		    struct qm_mcc_initcgr *opts)
+{
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts = {};
+	int ret;
+	struct qman_portal *p;
+
+	/*
+	 * We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC.
+	 */
+	if (cgr->cgrid >= CGR_NUM)
+		return -EINVAL;
+
+	preempt_disable();
+	p = get_affine_portal();
+	qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+	preempt_enable();
+
+	cgr->chan = p->config->channel;
+	spin_lock(&p->cgr_lock);
+
+	if (opts) {
+		ret = qman_query_cgr(cgr, &cgr_state);
+		if (ret)
+			goto out;
+		if (opts)
+			local_opts = *opts;
+		if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+			local_opts.cgr.cscn_targ_upd_ctrl =
+				QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+		else
+			/* Overwrite TARG */
+			local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+						   TARG_MASK(p);
+		local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+		/* send init if flags indicate so */
+		if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+			ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+					    &local_opts);
+		else
+			ret = qm_modify_cgr(cgr, 0, &local_opts);
+		if (ret)
+			goto out;
+	}
+
+	list_add(&cgr->node, &p->cgr_cbs);
+
+	/* Determine if newly added object requires its callback to be called */
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret) {
+		/* we can't go back, so proceed and return success */
+		dev_err(p->config->dev, "CGR HW state partially modified\n");
+		ret = 0;
+		goto out;
+	}
+	if (cgr->cb && cgr_state.cgr.cscn_en &&
+	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+		cgr->cb(p, cgr, 1);
+out:
+	spin_unlock(&p->cgr_lock);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+	unsigned long irqflags;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret = 0;
+	struct qman_cgr *i;
+	struct qman_portal *p = get_affine_portal();
+
+	if (cgr->chan != p->config->channel) {
+		/* attempt to delete from other portal than creator */
+		dev_err(p->config->dev, "CGR not owned by current portal");
+		dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+			cgr->chan, p->config->channel);
+
+		ret = -EINVAL;
+		goto put_portal;
+	}
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+	list_del(&cgr->node);
+	/*
+	 * If there are no other CGR objects for this CGRID in the list,
+	 * update CSCN_TARG accordingly
+	 */
+	list_for_each_entry(i, &p->cgr_cbs, node)
+		if (i->cgrid == cgr->cgrid && i->cb)
+			goto release_lock;
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)  {
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+		goto release_lock;
+	}
+	/* Overwrite TARG */
+	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+	else
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+							 ~(TARG_MASK(p));
+	ret = qm_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+	struct qman_cgr *cgr;
+	struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+	struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+	int ret;
+
+	ret = qman_delete_cgr(cgr_comp->cgr);
+	complete(&cgr_comp->completion);
+
+	return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+	struct task_struct *thread;
+	struct cgr_comp cgr_comp;
+
+	preempt_disable();
+	if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+		init_completion(&cgr_comp.completion);
+		cgr_comp.cgr = cgr;
+		thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+					"cgr_del");
+
+		if (IS_ERR(thread))
+			goto out;
+
+		kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+		wake_up_process(thread);
+		wait_for_completion(&cgr_comp.completion);
+		preempt_enable();
+		return;
+	}
+out:
+	qman_delete_cgr(cgr);
+	preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+	const union qm_mr_entry *msg;
+	int found = 0;
+
+	qm_mr_pvb_update(p);
+	msg = qm_mr_current(p);
+	while (msg) {
+		if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+			found = 1;
+		qm_mr_next(p);
+		qm_mr_cci_consume_to_current(p);
+		qm_mr_pvb_update(p);
+		msg = qm_mr_current(p);
+	}
+	return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+				      bool wait)
+{
+	const struct qm_dqrr_entry *dqrr;
+	int found = 0;
+
+	do {
+		qm_dqrr_pvb_update(p);
+		dqrr = qm_dqrr_current(p);
+		if (!dqrr)
+			cpu_relax();
+	} while (wait && !dqrr);
+
+	while (dqrr) {
+		if (dqrr->fqid == fqid && (dqrr->stat & s))
+			found = 1;
+		qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+		qm_dqrr_pvb_update(p);
+		qm_dqrr_next(p);
+		dqrr = qm_dqrr_current(p);
+	}
+	return found;
+}
+
+#define qm_mr_drain(p, V) \
+	_qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+	_qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+	struct qman_portal *p;
+	struct device *dev;
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	int orl_empty, drain = 0, ret = 0;
+	u32 channel, wq, res;
+	u8 state;
+
+	p = get_affine_portal();
+	dev = p->config->dev;
+	/* Determine the state of the FQID */
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ_NP timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+	if (state == QM_MCR_NP_STATE_OOS)
+		goto out; /* Already OOS, no need to do anymore checks */
+
+	/* Query which channel the FQ is using */
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	/* Need to store these since the MCR gets reused */
+	channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+	wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+	switch (state) {
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+	case QM_MCR_NP_STATE_PARKED:
+		orl_empty = 0;
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			dev_err(dev, "QUERYFQ_NP timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_RETIRE);
+		res = mcr->result; /* Make a copy as we reuse MCR below */
+
+		if (res == QM_MCR_RESULT_PENDING) {
+			/*
+			 * Need to wait for the FQRN in the message ring, which
+			 * will only occur once the FQ has been drained.  In
+			 * order for the FQ to drain the portal needs to be set
+			 * to dequeue from the channel the FQ is scheduled on
+			 */
+			int found_fqrn = 0;
+			u16 dequeue_wq = 0;
+
+			/* Flag that we need to drain FQ */
+			drain = 1;
+
+			if (channel >= qm_channel_pool1 &&
+			    channel < qm_channel_pool1 + 15) {
+				/* Pool channel, enable the bit in the portal */
+				dequeue_wq = (channel -
+					      qm_channel_pool1 + 1)<<4 | wq;
+			} else if (channel < qm_channel_pool1) {
+				/* Dedicated channel */
+				dequeue_wq = wq;
+			} else {
+				dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+					fqid, channel);
+				ret = -EBUSY;
+				goto out;
+			}
+			/* Set the sdqcr to drain this channel */
+			if (channel < qm_channel_pool1)
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_DEDICATED);
+			else
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_POOL_CONV
+						  (channel));
+			do {
+				/* Keep draining DQRR while checking the MR*/
+				qm_dqrr_drain_nomatch(&p->p);
+				/* Process message ring too */
+				found_fqrn = qm_mr_drain(&p->p, FQRN);
+				cpu_relax();
+			} while (!found_fqrn);
+
+		}
+		if (res != QM_MCR_RESULT_OK &&
+		    res != QM_MCR_RESULT_PENDING) {
+			dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+				fqid, res);
+			ret = -EIO;
+			goto out;
+		}
+		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+			/*
+			 * ORL had no entries, no need to wait until the
+			 * ERNs come in
+			 */
+			orl_empty = 1;
+		}
+		/*
+		 * Retirement succeeded, check to see if FQ needs
+		 * to be drained
+		 */
+		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+			/* FQ is Not Empty, drain using volatile DQ commands */
+			do {
+				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+				qm_dqrr_vdqcr_set(&p->p, vdqcr);
+				/*
+				 * Wait for a dequeue and process the dequeues,
+				 * making sure to empty the ring completely
+				 */
+			} while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+		}
+		qm_dqrr_sdqcr_set(&p->p, 0);
+
+		while (!orl_empty) {
+			/* Wait for the ORL to have been completely drained */
+			orl_empty = qm_mr_drain(&p->p, FQRL);
+			cpu_relax();
+		}
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result != QM_MCR_RESULT_OK) {
+			dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_RETIRED:
+		/* Send OOS Command */
+		mcc = qm_mc_start(&p->p);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result) {
+			dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_OOS:
+		/*  Done */
+		break;
+
+	default:
+		ret = -EIO;
+	}
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal)
+{
+	return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+	unsigned long addr;
+
+	addr = gen_pool_alloc(p, cnt);
+	if (!addr)
+		return -ENOMEM;
+
+	*result = addr & ~DPAA_GENALLOC_OFF;
+
+	return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+	int ret = qman_shutdown_fq(fqid);
+
+	if (ret) {
+		pr_debug("FQID %d leaked\n", fqid);
+		return ret;
+	}
+
+	gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+	/*
+	 * We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose destination channel is the pool-channel being released.
+	 * When a non-OOS FQD is found we attempt to clean it up
+	 */
+	struct qman_fq fq = {
+		.fqid = QM_FQID_RANGE_START
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return 0;
+			if (qm_fqd_get_chan(&fqd) == qp) {
+				/* The channel is the FQ's target, clean it */
+				err = qman_shutdown_fq(fq.fqid);
+				if (err)
+					/*
+					 * Couldn't shut down the FQ
+					 * so the pool must be leaked
+					 */
+					return err;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+	int ret;
+
+	ret = qpool_cleanup(qp);
+	if (ret) {
+		pr_debug("CHID %d leaked\n", qp);
+		return ret;
+	}
+
+	gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+	/*
+	 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+	 * error, looking for non-OOS FQDs whose CGR is the CGR being released
+	 */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return 0;
+			if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+			    fqd.cgid == cgrid) {
+				pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+				       cgrid, fq.fqid);
+				return -EIO;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+	int ret;
+
+	ret = cgr_cleanup(cgrid);
+	if (ret) {
+		pr_debug("CGRID %d leaked\n", cgrid);
+		return ret;
+	}
+
+	gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 0000000..0cace9e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,808 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
+#define REG_DD_CFG		0x0200
+#define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC		0x0400
+#define REG_PFDR_FP_HEAD	0x0404
+#define REG_PFDR_FP_TAIL	0x0408
+#define REG_PFDR_FP_LWIT	0x0410
+#define REG_PFDR_CFG		0x0414
+#define REG_SFDR_CFG		0x0500
+#define REG_SFDR_IN_USE		0x0504
+#define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID	0x0630
+#define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_ECIR2		0x0a0c
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_MCR			0x0b00
+#define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG		0x0be0
+#define REG_HID_CFG		0x0bf0
+#define REG_IDLE_STAT		0x0bf4
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FQD_BARE		0x0c00
+#define REG_PFDR_BARE		0x0c20
+#define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE		0x0c80
+#define REG_QCSP_BAR		0x0c84
+#define REG_CI_SCHED_CFG	0x0d00
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_CI_RLM_AVG		0x0d14
+#define REG_ERR_ISR		0x0e00
+#define REG_ERR_IER		0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR		0x01000000
+#define MCR_get_rslt(v)		(u8)((v) >> 24)
+#define MCR_rslt_idle(r)	(!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r)		((r) == 0xf0)
+#define MCR_rslt_eaccess(r)	((r) == 0xf8)
+#define MCR_rslt_inval(r)	((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV		4
+#define QM_CI_SCHED_CFG_SRQ_W		3
+#define QM_CI_SCHED_CFG_RW_W		2
+#define QM_CI_SCHED_CFG_BMAN_W		2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN	BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+	qm_wq_portal = 0,
+	qm_wq_pool = 1,
+	qm_wq_fman0 = 2,
+	qm_wq_fman1 = 3,
+	qm_wq_caam = 4,
+	qm_wq_pme = 5,
+	qm_wq_first = qm_wq_portal,
+	qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+	qm_memory_fqd,
+	qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
+#define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
+#define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
+#define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
+#define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
+#define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+			 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+			 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IFSI)
+
+struct qm_ecir {
+	u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+	return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+	return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+	u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+	return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+	return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+	u32 info; /* memid[24-27], eadr[0-11] */
+		  /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+	{ QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+	{ QM_EIRQ_CTDE, "Corenet Target Data Error" },
+	{ QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+	{ QM_EIRQ_PLWI, "PFDR Low Watermark" },
+	{ QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+	{ QM_EIRQ_SBEI, "Single-bit ECC Error" },
+	{ QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+	{ QM_EIRQ_ICVI, "Invalid Command Verb" },
+	{ QM_EIRQ_IFSI, "Invalid Flow Control State" },
+	{ QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+	{ QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+	{ QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+	{ QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+	{ QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+	{ QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+	{ QM_EIRQ_IESI, "Invalid Enqueue State" },
+	{ QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+	{ QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+	{ 0x01FF, 24, "FQD cache tag memory 0" },
+	{ 0x01FF, 24, "FQD cache tag memory 1" },
+	{ 0x01FF, 24, "FQD cache tag memory 2" },
+	{ 0x01FF, 24, "FQD cache tag memory 3" },
+	{ 0x0FFF, 512, "FQD cache memory" },
+	{ 0x07FF, 128, "SFDR memory" },
+	{ 0x01FF, 72, "WQ context memory" },
+	{ 0x00FF, 240, "CGR memory" },
+	{ 0x00FF, 302, "Internal Order Restoration List memory" },
+	{ 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+	return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+	iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+	return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+	qm_dc_portal_fman0 = 0,
+	qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+	DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+		    portal == qm_dc_portal_fman1);
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+	else
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+				 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+				 u8 csw5, u8 csw6, u8 csw7)
+{
+	qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+		    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+		    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+		    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+	qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+	qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+		    (QM_CI_SCHED_CFG_SRCCIV << 24) |
+		    (QM_CI_SCHED_CFG_SRQ_W << 8) |
+		    (QM_CI_SCHED_CFG_RW_W << 4) |
+		    QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = qm_ccsr_in(REG_IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+#define PFDR_AR_EN		BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+	u32 exp = ilog2(size);
+
+	/* choke if size isn't within range */
+	DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+		    is_power_of_2(size));
+	/* choke if 'ba' has lower-alignment than 'size' */
+	DPAA_ASSERT(!(ba & (size - 1)));
+	qm_ccsr_out(offset, upper_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+	qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+	qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+	qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+	u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+	DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+	/* Make sure the command interface is 'idle' */
+	if (!MCR_rslt_idle(rslt)) {
+		dev_crit(dev, "QMAN_MCR isn't idle");
+		WARN_ON(1);
+	}
+
+	/* Write the MCR command params then the verb */
+	qm_ccsr_out(REG_MCP(0), pfdr_start);
+	/*
+	 * TODO: remove this - it's a workaround for a model bug that is
+	 * corrected in more recent versions. We use the workaround until
+	 * everyone has upgraded.
+	 */
+	qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+	dma_wmb();
+	qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+	/* Poll for the result */
+	do {
+		rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+	} while (!MCR_rslt_idle(rslt));
+	if (MCR_rslt_ok(rslt))
+		return 0;
+	if (MCR_rslt_eaccess(rslt))
+		return -EACCES;
+	if (MCR_rslt_inval(rslt))
+		return -EINVAL;
+	dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+	return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+	fqd_a = rmem->base;
+	fqd_sz = rmem->size;
+
+	WARN_ON(!(fqd_a && fqd_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+	pfdr_a = rmem->base;
+	pfdr_sz = rmem->size;
+
+	WARN_ON(!(pfdr_a && pfdr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+	return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+			 phys_addr_t addr, size_t sz)
+{
+	/* map as cacheable, non-guarded */
+	void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+	memset_io(tmpp, 0, sz);
+	flush_dcache_range((unsigned long)tmpp,
+			   (unsigned long)tmpp + sz);
+	iounmap(tmpp);
+
+	return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	dev_warn(dev, "ErrInt, EDATA:\n");
+	i = bit_count / 32;
+	if (bit_count % 32) {
+		i++;
+		mask = ~(mask << bit_count % 32);
+	}
+	j = 16 - i;
+	dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+				      u32 ecsr_val)
+{
+	struct qm_ecir ecir_val;
+	struct qm_eadr eadr_val;
+	int memid;
+
+	ecir_val.info = qm_ccsr_in(REG_ECIR);
+	/* Is portal info valid */
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		struct qm_ecir2 ecir2_val;
+
+		ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+				 qm_ecir2_get_pnum(&ecir2_val));
+		}
+		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_v3_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_v3_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	} else {
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+				 qm_ecir_get_pnum(&ecir_val));
+		}
+		if (ecsr_val & FQID_ECSR_ERR)
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	}
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+	struct device *dev = ptr;
+
+	ier_val = qm_ccsr_in(REG_ERR_IER);
+	isr_val = qm_ccsr_in(REG_ERR_ISR);
+	ecsr_val = qm_ccsr_in(REG_ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+		if (qman_hwerr_txts[i].mask & isr_mask) {
+			dev_err_ratelimited(dev, "ErrInt: %s\n",
+					    qman_hwerr_txts[i].txt);
+			if (qman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(dev, isr_mask,
+							  ecsr_val);
+				/* Re-arm error capture registers */
+				qm_ccsr_out(REG_ECSR, ecsr_val);
+			}
+			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+				dev_dbg(dev, "Disabling error 0x%x\n",
+					qman_hwerr_txts[i].mask);
+				ier_val &= ~qman_hwerr_txts[i].mask;
+				qm_ccsr_out(REG_ERR_IER, ier_val);
+			}
+		}
+	}
+	qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+	int i, err;
+
+	/* FQD memory */
+	qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+	/* PFDR memory */
+	qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+	err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+	if (err)
+		return err;
+	/* thresholds */
+	qm_set_pfdr_threshold(512, 64);
+	qm_set_sfdr_threshold(128);
+	/* clear stale PEBI bit from interrupt status register */
+	qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+	/* corenet initiator settings */
+	qm_set_corenet_initiator();
+	/* HID settings */
+	qm_set_hid();
+	/* Set scheduling weights to defaults */
+	for (i = qm_wq_first; i <= qm_wq_last; i++)
+		qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+	/* We are not prepared to accept ERNs for hardware enqueues */
+	qm_set_dc(qm_dc_portal_fman0, 1, 0);
+	qm_set_dc(qm_dc_portal_fman1, 1, 0);
+	return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+	static int done;
+	static u32 liodn_offset;
+	u32 before, after;
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+	else
+		before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+	if (!done) {
+		liodn_offset = before & LIO_CFG_LIODN_MASK;
+		done = 1;
+		return;
+	}
+	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+	else
+		qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+	u32 before, after;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+		/* Each pair of vcpu share the same SRQ(SDEST) */
+		cpu_idx /= 2;
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+	} else {
+		before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+	}
+}
+
+static int qman_resource_init(struct device *dev)
+{
+	int pool_chan_num, cgrid_num;
+	int ret, i;
+
+	switch (qman_ip_rev >> 8) {
+	case 1:
+		pool_chan_num = 15;
+		cgrid_num = 256;
+		break;
+	case 2:
+		pool_chan_num = 3;
+		cgrid_num = 64;
+		break;
+	case 3:
+		pool_chan_num = 15;
+		cgrid_num = 256;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+			   pool_chan_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+		return ret;
+	}
+
+	ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+		return ret;
+	}
+
+	/* parse pool channels into the SDQCR mask */
+	for (i = 0; i < cgrid_num; i++)
+		qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+	ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+			   qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	int ret, err_irq;
+	u16 id;
+	u8 major, minor;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+	if (!qm_ccsr_start)
+		return -ENXIO;
+
+	qm_get_version(&id, &major, &minor);
+	if (major == 1 && minor == 0) {
+		dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+			return -ENODEV;
+	} else if (major == 1 && minor == 1)
+		qman_ip_rev = QMAN_REV11;
+	else if	(major == 1 && minor == 2)
+		qman_ip_rev = QMAN_REV12;
+	else if (major == 2 && minor == 0)
+		qman_ip_rev = QMAN_REV20;
+	else if (major == 3 && minor == 0)
+		qman_ip_rev = QMAN_REV30;
+	else if (major == 3 && minor == 1)
+		qman_ip_rev = QMAN_REV31;
+	else {
+		dev_err(dev, "Unknown QMan version\n");
+		return -ENODEV;
+	}
+
+	if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+	ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+	WARN_ON(ret);
+	if (ret)
+		return -ENODEV;
+
+	ret = qman_init_ccsr(dev);
+	if (ret) {
+		dev_err(dev, "CCSR setup failed\n");
+		return ret;
+	}
+
+	err_irq = platform_get_irq(pdev, 0);
+	if (err_irq <= 0) {
+		dev_info(dev, "Can't get %s property 'interrupts'\n",
+			 node->full_name);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+			       dev);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+			ret, node->full_name);
+		return ret;
+	}
+
+	/*
+	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init).
+	 */
+	qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+	/* Enable Error Interrupts */
+	qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+	qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+	if (IS_ERR(qm_fqalloc)) {
+		ret = PTR_ERR(qm_fqalloc);
+		dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+	if (IS_ERR(qm_qpalloc)) {
+		ret = PTR_ERR(qm_qpalloc);
+		dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+	if (IS_ERR(qm_cgralloc)) {
+		ret = PTR_ERR(qm_cgralloc);
+		dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	ret = qman_resource_init(dev);
+	if (ret)
+		return ret;
+
+	ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+	if (ret)
+		return ret;
+
+	ret = qman_wq_alloc();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+	{
+		.compatible = "fsl,qman",
+	},
+	{}
+};
+
+static struct platform_driver fsl_qman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = fsl_qman_ids,
+		.suppress_bind_attrs = true,
+	},
+	.probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000..1486143
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,355 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+	struct device *dev = pcfg->dev;
+	int window_count = 1;
+	struct iommu_domain_geometry geom_attr;
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+	if (!pcfg->iommu_domain) {
+		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+		goto no_iommu;
+	}
+	geom_attr.aperture_start = 0;
+	geom_attr.aperture_end =
+		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+	geom_attr.force_aperture = true;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+				    &geom_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	stash_attr.cpu = cpu;
+	stash_attr.cache = PAMU_ATTR_CACHE_L1;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_STASH,
+				    &stash_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+					 IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_attach_device(pcfg->iommu_domain, dev);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_detach_device;
+	}
+
+no_iommu:
+#endif
+	qman_set_sdest(pcfg->channel, cpu);
+
+	return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+	iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+	iommu_domain_free(pcfg->iommu_domain);
+	pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+	struct qman_portal *p;
+	u32 irq_sources = 0;
+
+	/* We need the same LIODN offset for all portals */
+	qman_liodn_fixup(pcfg->channel);
+
+	pcfg->iommu_domain = NULL;
+	portal_set_cpu(pcfg, pcfg->cpu);
+
+	p = qman_create_affine_portal(pcfg, NULL);
+	if (!p) {
+		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+			 __func__, pcfg->cpu);
+		return NULL;
+	}
+
+	/* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+	irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+		       QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+	irq_sources |= QM_PIRQ_DQRI;
+#endif
+	qman_p_irqsource_add(p, irq_sources);
+
+	spin_lock(&qman_lock);
+	if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+		/* all assigned portals are initialized now */
+		qman_init_cgr_all();
+	}
+	spin_unlock(&qman_lock);
+
+	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+	return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+							unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	if (pcfg->iommu_domain) {
+		stash_attr.cpu = cpu;
+		stash_attr.cache = PAMU_ATTR_CACHE_L1;
+		ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+		if (ret < 0) {
+			dev_err(pcfg->dev,
+				"Failed to update pamu stash setting\n");
+			return;
+		}
+	}
+#endif
+	qman_set_sdest(pcfg->channel, cpu);
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(0));
+			qman_portal_update_sdest(pcfg, 0);
+		}
+	}
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+			qman_portal_update_sdest(pcfg, cpu);
+		}
+	}
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+				     unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		qman_online_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		qman_offline_cpu(cpu);
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+	.notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct qm_portal_config *pcfg;
+	struct resource *addr_phys[2];
+	const u32 *channel;
+	void __iomem *va;
+	int irq, len, cpu;
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return -ENOMEM;
+
+	pcfg->dev = dev;
+
+	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CE);
+	if (!addr_phys[0]) {
+		dev_err(dev, "Can't get %s property 'reg::CE'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CI);
+	if (!addr_phys[1]) {
+		dev_err(dev, "Can't get %s property 'reg::CI'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+
+	channel = of_get_property(node, "cell-index", &len);
+	if (!channel || (len != 4)) {
+		dev_err(dev, "Can't get %s property 'cell-index'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	pcfg->channel = *channel;
+	pcfg->cpu = -1;
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+		return -ENXIO;
+	}
+	pcfg->irq = irq;
+
+	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+	if (!va)
+		goto err_ioremap1;
+
+	pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+			  _PAGE_GUARDED | _PAGE_NO_CACHE);
+	if (!va)
+		goto err_ioremap2;
+
+	pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+	pcfg->pools = qm_get_pools_sdqcr();
+
+	spin_lock(&qman_lock);
+	cpu = cpumask_next_zero(-1, &portal_cpus);
+	if (cpu >= nr_cpu_ids) {
+		/* unassigned portal, skip init */
+		spin_unlock(&qman_lock);
+		return 0;
+	}
+
+	cpumask_set_cpu(cpu, &portal_cpus);
+	spin_unlock(&qman_lock);
+	pcfg->cpu = cpu;
+
+	if (!init_pcfg(pcfg))
+		goto err_ioremap2;
+
+	/* clear irq affinity if assigned cpu is offline */
+	if (!cpu_online(cpu))
+		qman_offline_cpu(cpu);
+
+	return 0;
+
+err_ioremap2:
+	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+	dev_err(dev, "ioremap failed\n");
+	return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+	{
+		.compatible = "fsl,qman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = qman_portal_ids,
+	},
+	.probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+	int ret;
+
+	ret = platform_driver_register(drv);
+	if (ret < 0)
+		return ret;
+
+	register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+	return 0;
+}
+
+module_driver(qman_portal_driver,
+	      qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000..5cf821e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,371 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+	u8 verb;
+	u8 result;
+	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+	u8 __reserved[28];
+	u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+	return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+	u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+	u8 verb;
+	u8 result;
+	u8 __reserved[30];
+	/* Access this struct using qman_cgrs_get() */
+	struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+	u8 verb;
+	u8 result;
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[6];
+	u8 i_bcnt_hi;	/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u8 __reserved3[3];
+	u8 a_bcnt_hi;	/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+	u8 verb;
+	u8 result;
+	u8 __reserved1;
+	u8 state;		/* QM_MCR_NP_STATE_*** */
+	u32 fqd_link;		/* 24-bit, _res2[24-31] */
+	u16 odp_seq;		/* 14-bit, _res3[14-15] */
+	u16 orp_nesn;		/* 14-bit, _res4[14-15] */
+	u16 orp_ea_hseq;	/* 15-bit, _res5[15] */
+	u16 orp_ea_tseq;	/* 15-bit, _res6[15] */
+	u32 orp_ea_hptr;	/* 24-bit, _res7[24-31] */
+	u32 orp_ea_tptr;	/* 24-bit, _res8[24-31] */
+	u32 pfdr_hptr;		/* 24-bit, _res9[24-31] */
+	u32 pfdr_tptr;		/* 24-bit, _res10[24-31] */
+	u8 __reserved2[5];
+	u8 is;			/* 1-bit, _res12[1-7] */
+	u16 ics_surp;
+	u32 byte_cnt;
+	u32 frm_cnt;		/* 24-bit, _res13[24-31] */
+	u32 __reserved3;
+	u16 ra1_sfdr;		/* QM_MCR_NP_RA1_*** */
+	u16 ra2_sfdr;		/* QM_MCR_NP_RA2_*** */
+	u16 __reserved4;
+	u16 od1_sfdr;		/* QM_MCR_NP_OD1_*** */
+	u16 od2_sfdr;		/* QM_MCR_NP_OD2_*** */
+	u16 od3_sfdr;		/* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE		0x10
+#define QM_MCR_NP_STATE_R		0x08
+#define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS		0x00
+#define QM_MCR_NP_STATE_RETIRED		0x01
+#define QM_MCR_NP_STATE_TEN_SCHED	0x02
+#define QM_MCR_NP_STATE_TRU_SCHED	0x03
+#define QM_MCR_NP_STATE_PARKED		0x04
+#define QM_MCR_NP_STATE_ACTIVE		0x05
+#define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+	qm_mcr_fqd_link_mask = BIT(24)-1,
+	qm_mcr_odp_seq_mask = BIT(14)-1,
+	qm_mcr_orp_nesn_mask = BIT(14)-1,
+	qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+	qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+	qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+	qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+	qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+	qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+	qm_mcr_is_mask = BIT(1)-1,
+	qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+	((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x)	((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x)	(BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM	(sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+	struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+	memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+	return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+				const struct qman_cgrs *src)
+{
+	*dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+	/*
+	 * Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited.
+	 */
+	void __iomem *addr_virt[2];
+	struct device *dev;
+	struct iommu_domain *iommu_domain;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	/* User-visible portal configuration settings */
+	/* portal is affined to this cpu */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+	/*
+	 * the portal's dedicated channel id, used initialising
+	 * frame queues to target this portal when scheduled
+	 */
+	u16 channel;
+	/*
+	 * mask of pool channels this portal has dequeue access to
+	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+	 */
+	u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR	0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
+#define QM_VDQCR_EXACT			0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT	     0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/*   QMan s/w corenet portal, low-level i/face	 */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS	0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
+#define QM_SDQCR_COUNT_EXACT1		0x0
+#define QM_SDQCR_COUNT_UPTO3		0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_SDQCR_TYPE_MASK		0x03000000
+#define QM_SDQCR_TYPE_NULL		0x0
+#define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_SDQCR_TYPE_ACTIVE		0x03000000
+#define QM_SDQCR_TOKEN_MASK		0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK		0x00ffffff
+#define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL	0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
+#define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK		0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 0000000..18f7f02
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+	int loop = 1;
+	int err = 0;
+
+	while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+		err = qman_test_stash();
+		if (err)
+			break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+		err = qman_test_api();
+		if (err)
+			break;
+#endif
+	}
+	return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 0000000..d5f8cb2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,36 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 0000000..6880ff1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,252 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID		27
+#define POOL_ID		2
+#define FQ_FLAGS	QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES	10
+#define NUM_PARTIAL	4
+#define PORTAL_SDQCR	(QM_SDQCR_SOURCE_CHANNELS | \
+			QM_SDQCR_TYPE_PRIO_QOS | \
+			QM_SDQCR_TOKEN_SET(0x98) | \
+			QM_SDQCR_CHANNELS_DEDICATED | \
+			QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE	((void *)0xf00dbeef)
+#define VDQCR_FLAGS	(QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+					struct qman_fq *,
+					const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+		   const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+		   const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+	.cb.dqrr = cb_dqrr,
+	.cb.ern = cb_ern,
+	.cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+	qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+	qm_fd_set_contig_big(fd, 0x0000ffff);
+	fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+	u64 t = qm_fd_addr_get64(fd);
+	int z = t >> 40;
+	unsigned int len, off;
+	enum qm_fd_format fmt;
+
+	t <<= 1;
+	if (z)
+		t |= 1;
+	qm_fd_addr_set64(fd, t);
+
+	fmt = qm_fd_get_format(fd);
+	off = qm_fd_get_offset(fd);
+	len = qm_fd_get_length(fd);
+	len--;
+	qm_fd_set_param(fd, fmt, off, len);
+
+	fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+	int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+	if (!r) {
+		enum qm_fd_format fmt_a, fmt_b;
+
+		fmt_a = qm_fd_get_format(a);
+		fmt_b = qm_fd_get_format(b);
+		r = fmt_a - fmt_b;
+	}
+	if (!r)
+		r = a->cfg - b->cfg;
+	if (!r)
+		r = a->cmd - b->cmd;
+	return r;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+	unsigned int loop;
+	int err = 0;
+
+	for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+		if (qman_enqueue(fq, &fd)) {
+			pr_crit("qman_enqueue() failed\n");
+			err = -EIO;
+		}
+		fd_inc(&fd);
+	}
+
+	return err;
+}
+
+int qman_test_api(void)
+{
+	unsigned int flags, frmcnt;
+	int err;
+	struct qman_fq *fq = &fq_base;
+
+	pr_info("%s(): Starting\n", __func__);
+	fd_init(&fd);
+	fd_init(&fd_dq);
+
+	/* Initialise (parked) FQ */
+	err = qman_create_fq(0, FQ_FLAGS, fq);
+	if (err) {
+		pr_crit("qman_create_fq() failed\n");
+		goto failed;
+	}
+	err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+	if (err) {
+		pr_crit("qman_init_fq() failed\n");
+		goto failed;
+	}
+	/* Do enqueues + VDQCR, twice. (Parked FQ) */
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("VDQCR (till-empty);\n");
+	frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_crit("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+	frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_crit("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+	pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+		NUM_ENQUEUES);
+	frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_err("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("scheduled dequeue (till-empty)\n");
+	err = qman_schedule_fq(fq);
+	if (err) {
+		pr_crit("qman_schedule_fq() failed\n");
+		goto failed;
+	}
+	wait_event(waitqueue, sdqcr_complete);
+
+	/* Retire and OOS the FQ */
+	err = qman_retire_fq(fq, &flags);
+	if (err < 0) {
+		pr_crit("qman_retire_fq() failed\n");
+		goto failed;
+	}
+	wait_event(waitqueue, retire_complete);
+	if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+		err = -EIO;
+		pr_crit("leaking frames\n");
+		goto failed;
+	}
+	err = qman_oos_fq(fq);
+	if (err) {
+		pr_crit("qman_oos_fq() failed\n");
+		goto failed;
+	}
+	qman_destroy_fq(fq);
+	pr_info("%s(): Finished\n", __func__);
+	return 0;
+
+failed:
+	WARN_ON(1);
+	return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dq)
+{
+	if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+		pr_err("BADNESS: dequeued frame doesn't match;\n");
+		return qman_cb_dqrr_consume;
+	}
+	fd_inc(&fd_dq);
+	if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+		sdqcr_complete = 1;
+		wake_up(&waitqueue);
+	}
+	return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+		   const union qm_mr_entry *msg)
+{
+	pr_crit("cb_ern() unimplemented");
+	WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+		   const union qm_mr_entry *msg)
+{
+	u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+	if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+		pr_crit("unexpected FQS message");
+		WARN_ON(1);
+		return;
+	}
+	pr_info("Retirement message received\n");
+	retire_complete = 1;
+	wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 0000000..43cf66b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,617 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ *    handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ *    and advance the iterator for the next loop. This includes a final fixup,
+ *    which connects the last handler to the first (and which is why phase 2
+ *    and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    initialise FQ objects and advance the iterator for the next loop.
+ *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ *    initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+	int (*fn)(void);
+	atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+	struct bstrap *bstrap = bs;
+	int err;
+
+	atomic_inc(&bstrap->started);
+	err = bstrap->fn();
+	if (err)
+		return err;
+	while (!kthread_should_stop())
+		msleep(20);
+	return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+	int cpu;
+
+	for_each_cpu(cpu, cpu_online_mask) {
+		struct bstrap bstrap = {
+			.fn = fn,
+			.started = ATOMIC_INIT(0)
+		};
+		struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+			"hotpotato%d", cpu);
+		int ret;
+
+		if (IS_ERR(k))
+			return -ENOMEM;
+		kthread_bind(k, cpu);
+		wake_up_process(k);
+		/*
+		 * If we call kthread_stop() before the "wake up" has had an
+		 * effect, then the thread may exit with -EINTR without ever
+		 * running the function. So poll until it's started before
+		 * requesting it to stop.
+		 */
+		while (!atomic_read(&bstrap.started))
+			msleep(20);
+		ret = kthread_stop(k);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+struct hp_handler {
+
+	/* The following data is stashed when 'rx' is dequeued; */
+	/* -------------- */
+	/* The Rx FQ, dequeues of which will stash the entire hp_handler */
+	struct qman_fq rx;
+	/* The Tx FQ we should forward to */
+	struct qman_fq tx;
+	/* The value we XOR post-dequeue, prior to validating */
+	u32 rx_mixer;
+	/* The value we XOR pre-enqueue, after validating */
+	u32 tx_mixer;
+	/* what the hotpotato address should be on dequeue */
+	dma_addr_t addr;
+	u32 *frame_ptr;
+
+	/* The following data isn't (necessarily) stashed on dequeue; */
+	/* -------------- */
+	u32 fqid_rx, fqid_tx;
+	/* list node for linking us into 'hp_cpu' */
+	struct list_head node;
+	/* Just to check ... */
+	unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+	/* identify the cpu we run on; */
+	unsigned int processor_id;
+	/* root node for the per-cpu list of handlers */
+	struct list_head handlers;
+	/* list node for linking us into 'hp_cpu_list' */
+	struct list_head node;
+	/*
+	 * when repeatedly scanning 'hp_list', each time linking the n'th
+	 * handlers together, this is used as per-cpu iterator state
+	 */
+	struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU	2
+#define HP_LOOPS	8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS	80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD	0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+	return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+	struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+	if (!pdev) {
+		pr_crit("platform_device_alloc() failed");
+		return -EIO;
+	}
+	if (platform_device_add(pdev)) {
+		pr_crit("platform_device_add() failed");
+		return -EIO;
+	}
+	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+	if (!__frame_ptr)
+		return -ENOMEM;
+
+	frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+	for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+		frame_ptr[loop] = lfsr;
+		lfsr = do_lfsr(lfsr);
+	}
+	frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+				   DMA_BIDIRECTIONAL);
+	platform_device_del(pdev);
+	platform_device_put(pdev);
+	return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+	kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+				     const struct qm_fd *fd)
+{
+	u32 *p = handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+
+	if (qm_fd_addr_get64(fd) != handler->addr) {
+		pr_crit("bad frame address");
+		return -EIO;
+	}
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		*p ^= handler->rx_mixer;
+		if (*p != lfsr) {
+			pr_crit("corrupt frame data");
+			return -EIO;
+		}
+		*p ^= handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+	return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+					    struct qman_fq *fq,
+					    const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	if (process_frame_data(handler, &dqrr->fd)) {
+		WARN_ON(1);
+		goto skip;
+	}
+	if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+		pr_crit("qman_enqueue() failed");
+		WARN_ON(1);
+	}
+skip:
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+					     struct qman_fq *fq,
+					     const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	process_frame_data(handler, &dqrr->fd);
+	if (++loop_counter < HP_LOOPS) {
+		if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+			pr_crit("qman_enqueue() failed");
+			WARN_ON(1);
+			goto skip;
+		}
+	} else {
+		pr_info("Received final (%dth) frame\n", loop_counter);
+		wake_up(&queue);
+	}
+skip:
+	return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+	struct hp_handler *handler;
+	int loop;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	hp_cpu->processor_id = smp_processor_id();
+	spin_lock(&hp_lock);
+	list_add_tail(&hp_cpu->node, &hp_cpu_list);
+	hp_cpu_list_length++;
+	spin_unlock(&hp_lock);
+	INIT_LIST_HEAD(&hp_cpu->handlers);
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+		if (!handler) {
+			pr_crit("kmem_cache_alloc() failed");
+			WARN_ON(1);
+			return -EIO;
+		}
+		handler->processor_id = hp_cpu->processor_id;
+		handler->addr = frame_dma;
+		handler->frame_ptr = frame_ptr;
+		list_add_tail(&handler->node, &hp_cpu->handlers);
+	}
+	return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+	struct list_head *loop, *tmp;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	spin_lock(&hp_lock);
+	list_del(&hp_cpu->node);
+	spin_unlock(&hp_lock);
+	list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+		u32 flags = 0;
+		struct hp_handler *handler = list_entry(loop, struct hp_handler,
+							node);
+		if (qman_retire_fq(&handler->rx, &flags) ||
+		    (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+			pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+			WARN_ON(1);
+			return -EIO;
+		}
+		if (qman_oos_fq(&handler->rx)) {
+			pr_crit("qman_oos_fq(rx) failed");
+			WARN_ON(1);
+			return -EIO;
+		}
+		qman_destroy_fq(&handler->rx);
+		qman_destroy_fq(&handler->tx);
+		qman_release_fqid(handler->fqid_rx);
+		list_del(&handler->node);
+		kmem_cache_free(hp_handler_slab, handler);
+	}
+	return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+	u8 res = (offset + (L1_CACHE_BYTES - 1))
+			 / (L1_CACHE_BYTES);
+	if (res > 3)
+		return 3;
+	return res;
+}
+#define STASH_DATA_CL \
+	num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+	num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+	struct qm_mcc_initfq opts;
+	struct hp_handler *handler = h;
+	int err;
+
+	if (handler->processor_id != smp_processor_id()) {
+		err = -EIO;
+		goto failed;
+	}
+	/* Set up rx */
+	memset(&handler->rx, 0, sizeof(handler->rx));
+	if (handler == special_handler)
+		handler->rx.cb.dqrr = special_dqrr;
+	else
+		handler->rx.cb.dqrr = normal_dqrr;
+	err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+	if (err) {
+		pr_crit("qman_create_fq(rx) failed");
+		goto failed;
+	}
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+	qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+	err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+			   QMAN_INITFQ_FLAG_LOCAL, &opts);
+	if (err) {
+		pr_crit("qman_init_fq(rx) failed");
+		goto failed;
+	}
+	/* Set up tx */
+	memset(&handler->tx, 0, sizeof(handler->tx));
+	err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+			     &handler->tx);
+	if (err) {
+		pr_crit("qman_create_fq(tx) failed");
+		goto failed;
+	}
+
+	return 0;
+failed:
+	return err;
+}
+
+static void init_handler_cb(void *h)
+{
+	if (init_handler(h))
+		WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+	int loop;
+	u32 fqid = 0;
+	u32 lfsr = 0xdeadbeef;
+	struct hp_cpu *hp_cpu;
+	struct hp_handler *handler;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			int err;
+
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			/* Rx FQID is the previous handler's Tx FQID */
+			hp_cpu->iterator->fqid_rx = fqid;
+			/* Allocate new FQID for Tx */
+			err = qman_alloc_fqid(&fqid);
+			if (err) {
+				pr_crit("qman_alloc_fqid() failed");
+				return err;
+			}
+			hp_cpu->iterator->fqid_tx = fqid;
+			/* Rx mixer is the previous handler's Tx mixer */
+			hp_cpu->iterator->rx_mixer = lfsr;
+			/* Get new mixer for Tx */
+			lfsr = do_lfsr(lfsr);
+			hp_cpu->iterator->tx_mixer = lfsr;
+		}
+	}
+	/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+	hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+	handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+	if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+		return 1;
+	handler->fqid_rx = fqid;
+	handler->rx_mixer = lfsr;
+	/* and tag it as our "special" handler */
+	special_handler = handler;
+	return 0;
+}
+
+static int init_phase3(void)
+{
+	int loop, err;
+	struct hp_cpu *hp_cpu;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			preempt_disable();
+			if (hp_cpu->processor_id == smp_processor_id()) {
+				err = init_handler(hp_cpu->iterator);
+				if (err)
+					return err;
+			} else {
+				smp_call_function_single(hp_cpu->processor_id,
+					init_handler_cb, hp_cpu->iterator, 1);
+			}
+			preempt_enable();
+		}
+	}
+	return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+	u32 *p = special_handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop, err;
+	struct qm_fd fd;
+
+	if (special_handler->processor_id != smp_processor_id()) {
+		err = -EIO;
+		goto failed;
+	}
+	memset(&fd, 0, sizeof(fd));
+	qm_fd_addr_set64(&fd, special_handler->addr);
+	qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		if (*p != lfsr) {
+			err = -EIO;
+			pr_crit("corrupt frame data");
+			goto failed;
+		}
+		*p ^= special_handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+	pr_info("Sending first frame\n");
+	err = qman_enqueue(&special_handler->tx, &fd);
+	if (err) {
+		pr_crit("qman_enqueue() failed");
+		goto failed;
+	}
+
+	return 0;
+failed:
+	return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+	if (send_first_frame(NULL))
+		WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+	int err;
+
+	if (cpumask_weight(cpu_online_mask) < 2) {
+		pr_info("%s(): skip - only 1 CPU\n", __func__);
+		return 0;
+	}
+
+	pr_info("%s(): Starting\n", __func__);
+
+	hp_cpu_list_length = 0;
+	loop_counter = 0;
+	hp_handler_slab = kmem_cache_create("hp_handler_slab",
+			sizeof(struct hp_handler), L1_CACHE_BYTES,
+			SLAB_HWCACHE_ALIGN, NULL);
+	if (!hp_handler_slab) {
+		err = -EIO;
+		pr_crit("kmem_cache_create() failed");
+		goto failed;
+	}
+
+	err = allocate_frame_data();
+	if (err)
+		goto failed;
+
+	/* Init phase 1 */
+	pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+	if (on_all_cpus(create_per_cpu_handlers)) {
+		err = -EIO;
+		pr_crit("on_each_cpu() failed");
+		goto failed;
+	}
+	pr_info("Number of cpus: %d, total of %d handlers\n",
+		hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+	err = init_phase2();
+	if (err)
+		goto failed;
+
+	err = init_phase3();
+	if (err)
+		goto failed;
+
+	preempt_disable();
+	if (special_handler->processor_id == smp_processor_id()) {
+		err = send_first_frame(NULL);
+		if (err)
+			goto failed;
+	} else {
+		smp_call_function_single(special_handler->processor_id,
+					 send_first_frame_cb, NULL, 1);
+	}
+	preempt_enable();
+
+	wait_event(queue, loop_counter == HP_LOOPS);
+	deallocate_frame_data();
+	if (on_all_cpus(destroy_per_cpu_handlers)) {
+		err = -EIO;
+		pr_crit("on_each_cpu() failed");
+		goto failed;
+	}
+	kmem_cache_destroy(hp_handler_slab);
+	pr_info("%s(): Finished\n", __func__);
+
+	return 0;
+failed:
+	WARN_ON(1);
+	return err;
+}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb22..0aaf429 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@
 
 static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+	struct qe_gpio_chip *qe_gc =
+		container_of(mm_gc, struct qe_gpio_chip, mm_gc);
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 
 	qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 7026507..2707a82 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -69,8 +69,8 @@
 phys_addr_t get_qe_base(void)
 {
 	struct device_node *qe;
-	int size;
-	const u32 *prop;
+	int ret;
+	struct resource res;
 
 	if (qebase != -1)
 		return qebase;
@@ -82,9 +82,9 @@
 			return qebase;
 	}
 
-	prop = of_get_property(qe, "reg", &size);
-	if (prop && size >= sizeof(*prop))
-		qebase = of_translate_address(qe, prop);
+	ret = of_address_to_resource(qe, 0, &res);
+	if (!ret)
+		qebase = res.start;
 	of_node_put(qe);
 
 	return qebase;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff80..104e68d 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@
 	}
 
 	muram_pool = gen_pool_create(0, -1);
+	if (!muram_pool) {
+		pr_err("Cannot allocate memory pool for CPM/QE muram");
+		ret = -ENOMEM;
+		goto out_muram;
+	}
 	muram_pbase = of_translate_address(np, zero);
 	if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
 		pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@
 	struct muram_block *entry;
 	unsigned long start;
 
+	if (!muram_pool && cpm_muram_init())
+		goto out2;
+
 	start = gen_pool_alloc_algo(muram_pool, size, algo, data);
 	if (!start)
 		goto out2;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index 5e48b14..a1048b4 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -99,7 +99,7 @@
 	utdm->tdm_port = val;
 	ut_info->uf_info.tdm_num = utdm->tdm_port;
 
-	if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+	if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
 		utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
 	else
 		utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@
 	}
 
 	if (siram_init_flag == 0) {
-		memset_io(utdm->siram, 0,  res->end - res->start + 1);
+		memset_io(utdm->siram, 0,  resource_size(res));
 		siram_init_flag = 1;
 	}
 
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8146ccd..5787b72 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1112,7 +1112,7 @@
 
 	/* If another context is idling the device then defer */
 	if (master->idling) {
-		queue_kthread_work(&master->kworker, &master->pump_messages);
+		kthread_queue_work(&master->kworker, &master->pump_messages);
 		spin_unlock_irqrestore(&master->queue_lock, flags);
 		return;
 	}
@@ -1126,7 +1126,7 @@
 
 		/* Only do teardown in the thread */
 		if (!in_kthread) {
-			queue_kthread_work(&master->kworker,
+			kthread_queue_work(&master->kworker,
 					   &master->pump_messages);
 			spin_unlock_irqrestore(&master->queue_lock, flags);
 			return;
@@ -1250,7 +1250,7 @@
 	master->running = false;
 	master->busy = false;
 
-	init_kthread_worker(&master->kworker);
+	kthread_init_worker(&master->kworker);
 	master->kworker_task = kthread_run(kthread_worker_fn,
 					   &master->kworker, "%s",
 					   dev_name(&master->dev));
@@ -1258,7 +1258,7 @@
 		dev_err(&master->dev, "failed to create message pump task\n");
 		return PTR_ERR(master->kworker_task);
 	}
-	init_kthread_work(&master->pump_messages, spi_pump_messages);
+	kthread_init_work(&master->pump_messages, spi_pump_messages);
 
 	/*
 	 * Master config will indicate if this controller should run the
@@ -1331,7 +1331,7 @@
 	spin_lock_irqsave(&master->queue_lock, flags);
 	master->cur_msg = NULL;
 	master->cur_msg_prepared = false;
-	queue_kthread_work(&master->kworker, &master->pump_messages);
+	kthread_queue_work(&master->kworker, &master->pump_messages);
 	spin_unlock_irqrestore(&master->queue_lock, flags);
 
 	trace_spi_message_done(mesg);
@@ -1357,7 +1357,7 @@
 	master->cur_msg = NULL;
 	spin_unlock_irqrestore(&master->queue_lock, flags);
 
-	queue_kthread_work(&master->kworker, &master->pump_messages);
+	kthread_queue_work(&master->kworker, &master->pump_messages);
 
 	return 0;
 }
@@ -1404,7 +1404,7 @@
 	ret = spi_stop_queue(master);
 
 	/*
-	 * flush_kthread_worker will block until all work is done.
+	 * kthread_flush_worker will block until all work is done.
 	 * If the reason that stop_queue timed out is that the work will never
 	 * finish, then it does no good to call flush/stop thread, so
 	 * return anyway.
@@ -1414,7 +1414,7 @@
 		return ret;
 	}
 
-	flush_kthread_worker(&master->kworker);
+	kthread_flush_worker(&master->kworker);
 	kthread_stop(master->kworker_task);
 
 	return 0;
@@ -1438,7 +1438,7 @@
 
 	list_add_tail(&msg->queue, &master->queue);
 	if (!master->busy && need_pump)
-		queue_kthread_work(&master->kworker, &master->pump_messages);
+		kthread_queue_work(&master->kworker, &master->pump_messages);
 
 	spin_unlock_irqrestore(&master->queue_lock, flags);
 	return 0;
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index fecabe1..9786f6c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -38,8 +38,8 @@
 #include <linux/posix_acl_xattr.h>
 
 #define LUSTRE_POSIX_ACL_MAX_ENTRIES	32
-#define LUSTRE_POSIX_ACL_MAX_SIZE					\
-	(sizeof(posix_acl_xattr_header) +				\
-	 LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
+#define LUSTRE_POSIX_ACL_MAX_SIZE						\
+	(sizeof(struct posix_acl_xattr_header) +				\
+	 LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(struct posix_acl_xattr_entry))
 
 #endif
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index d56863f..e1d784b 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1121,8 +1121,8 @@
 	struct cl_io	 *io;
 	ssize_t	       result;
 
-	CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zu\n",
-	       file->f_path.dentry->d_name.name, iot, *ppos, count);
+	CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
+	       file, iot, *ppos, count);
 
 restart:
 	io = vvp_env_thread_io(env);
@@ -3268,10 +3268,7 @@
 	.setattr	= ll_setattr,
 	.getattr	= ll_getattr,
 	.permission	= ll_inode_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ll_listxattr,
-	.removexattr	= generic_removexattr,
 	.fiemap		= ll_fiemap,
 	.get_acl	= ll_get_acl,
 };
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6bb41b0..e5c62f4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1459,7 +1459,7 @@
 		attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
 	}
 
-	/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
+	/* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
 	if (attr->ia_valid & TIMES_SET_FLAGS) {
 		if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
 		    !capable(CFS_CAP_FOWNER))
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index dfa36d3..180f35e 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -1097,13 +1097,17 @@
 }
 
 static int ll_rename(struct inode *src, struct dentry *src_dchild,
-		     struct inode *tgt, struct dentry *tgt_dchild)
+		     struct inode *tgt, struct dentry *tgt_dchild,
+		     unsigned int flags)
 {
 	struct ptlrpc_request *request = NULL;
 	struct ll_sb_info *sbi = ll_i2sbi(src);
 	struct md_op_data *op_data;
 	int err;
 
+	if (flags)
+		return -EINVAL;
+
 	CDEBUG(D_VFSTRACE,
 	       "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
 	       src_dchild, PFID(ll_inode2fid(src)), src,
@@ -1148,14 +1152,11 @@
 	.rmdir	      = ll_rmdir,
 	.symlink	    = ll_symlink,
 	.link	       = ll_link,
-	.rename	     = ll_rename,
+	.rename		= ll_rename,
 	.setattr	    = ll_setattr,
 	.getattr	    = ll_getattr,
 	.permission	 = ll_inode_permission,
-	.setxattr	   = generic_setxattr,
-	.getxattr	   = generic_getxattr,
 	.listxattr	  = ll_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	    = ll_get_acl,
 };
 
@@ -1163,9 +1164,6 @@
 	.setattr	= ll_setattr,
 	.getattr	= ll_getattr,
 	.permission     = ll_inode_permission,
-	.setxattr       = generic_setxattr,
-	.getxattr       = generic_getxattr,
 	.listxattr      = ll_listxattr,
-	.removexattr    = generic_removexattr,
 	.get_acl	    = ll_get_acl,
 };
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index f8bc7ed..82c7c48 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -154,8 +154,5 @@
 	.get_link	= ll_get_link,
 	.getattr	= ll_getattr,
 	.permission	= ll_inode_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ll_listxattr,
-	.removexattr	= generic_removexattr,
 };
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 5d79efc..046e84d 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -241,10 +241,7 @@
 		obj->vob_discard_page_warned = 0;
 	} else {
 		SetPageError(vmpage);
-		if (ioret == -ENOSPC)
-			set_bit(AS_ENOSPC, &inode->i_mapping->flags);
-		else
-			set_bit(AS_EIO, &inode->i_mapping->flags);
+		mapping_set_error(inode->i_mapping, ioret);
 
 		if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
 		    obj->vob_discard_page_warned == 0) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index e5945e2..b05b1f9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -3742,32 +3742,28 @@
 	CLASSERT(FIEMAP_EXTENT_NET == 0x80000000);
 
 	/* Checks for type posix_acl_xattr_entry */
-	LASSERTF((int)sizeof(posix_acl_xattr_entry) == 8, "found %lld\n",
-		 (long long)(int)sizeof(posix_acl_xattr_entry));
-	LASSERTF((int)offsetof(posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
-		 (long long)(int)offsetof(posix_acl_xattr_entry, e_tag));
-	LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_tag));
-	LASSERTF((int)offsetof(posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
-		 (long long)(int)offsetof(posix_acl_xattr_entry, e_perm));
-	LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
-		 (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_perm));
-	LASSERTF((int)offsetof(posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
-		 (long long)(int)offsetof(posix_acl_xattr_entry, e_id));
-	LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_id));
+	LASSERTF((int)sizeof(struct posix_acl_xattr_entry) == 8, "found %lld\n",
+		 (long long)(int)sizeof(struct posix_acl_xattr_entry));
+	LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
+		 (long long)(int)offsetof(struct posix_acl_xattr_entry, e_tag));
+	LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag));
+	LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
+		 (long long)(int)offsetof(struct posix_acl_xattr_entry, e_perm));
+	LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm));
+	LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
+		 (long long)(int)offsetof(struct posix_acl_xattr_entry, e_id));
+	LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id));
 
 	/* Checks for type posix_acl_xattr_header */
-	LASSERTF((int)sizeof(posix_acl_xattr_header) == 4, "found %lld\n",
-		 (long long)(int)sizeof(posix_acl_xattr_header));
-	LASSERTF((int)offsetof(posix_acl_xattr_header, a_version) == 0, "found %lld\n",
-		 (long long)(int)offsetof(posix_acl_xattr_header, a_version));
-	LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_version));
-	LASSERTF((int)offsetof(posix_acl_xattr_header, a_entries) == 4, "found %lld\n",
-		 (long long)(int)offsetof(posix_acl_xattr_header, a_entries));
-	LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_entries) == 0, "found %lld\n",
-		 (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_entries));
+	LASSERTF((int)sizeof(struct posix_acl_xattr_header) == 4, "found %lld\n",
+		 (long long)(int)sizeof(struct posix_acl_xattr_header));
+	LASSERTF((int)offsetof(struct posix_acl_xattr_header, a_version) == 0, "found %lld\n",
+		 (long long)(int)offsetof(struct posix_acl_xattr_header, a_version));
+	LASSERTF((int)sizeof(((struct posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct posix_acl_xattr_header *)0)->a_version));
 
 	/* Checks for struct link_ea_header */
 	LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 7292f23..6620d96 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -31,11 +31,11 @@
 
 source "drivers/staging/media/pulse8-cec/Kconfig"
 
-source "drivers/staging/media/tw686x-kh/Kconfig"
-
 source "drivers/staging/media/s5p-cec/Kconfig"
 
 # Keep LIRC at the end, as it has sub-menus
 source "drivers/staging/media/lirc/Kconfig"
 
+source "drivers/staging/media/st-cec/Kconfig"
+
 endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 87ce8ad..906257e 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,4 +6,4 @@
 obj-$(CONFIG_VIDEO_DM365_VPFE)	+= davinci_vpfe/
 obj-$(CONFIG_VIDEO_OMAP4)	+= omap4iss/
 obj-$(CONFIG_USB_PULSE8_CEC)    += pulse8-cec/
-obj-$(CONFIG_VIDEO_TW686X_KH)	+= tw686x-kh/
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
index 21457a1..6e12d41 100644
--- a/drivers/staging/media/cec/Kconfig
+++ b/drivers/staging/media/cec/Kconfig
@@ -5,9 +5,6 @@
 	---help---
 	  Enable the CEC API.
 
-	  To compile this driver as a module, choose M here: the
-	  module will be called cec.
-
 config MEDIA_CEC_DEBUG
 	bool "CEC debugfs interface (EXPERIMENTAL)"
 	depends on MEDIA_CEC && DEBUG_FS
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 946986f..611e07b 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1164,8 +1164,6 @@
 	if (IS_ERR_OR_NULL(adap))
 		return;
 
-	if (WARN_ON(adap->capabilities & CEC_CAP_PHYS_ADDR))
-		return;
 	mutex_lock(&adap->lock);
 	__cec_s_phys_addr(adap, phys_addr, block);
 	mutex_unlock(&adap->lock);
@@ -1306,8 +1304,6 @@
 {
 	int err;
 
-	if (WARN_ON(adap->capabilities & CEC_CAP_LOG_ADDRS))
-		return -EINVAL;
 	mutex_lock(&adap->lock);
 	err = __cec_s_log_addrs(adap, log_addrs, block);
 	mutex_unlock(&adap->lock);
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
index 3b1e4d2..b0137e2 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/staging/media/cec/cec-core.c
@@ -357,8 +357,7 @@
 	if (adap->kthread_config)
 		kthread_stop(adap->kthread_config);
 #if IS_REACHABLE(CONFIG_RC_CORE)
-	if (adap->rc)
-		rc_free_device(adap->rc);
+	rc_free_device(adap->rc);
 #endif
 	kfree(adap);
 }
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 64d99ec..bfb76a4 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -650,7 +650,7 @@
 	if (!pport) {
 		pr_notice("no port at %x found\n", io);
 		result = -ENXIO;
-		goto exit_device_put;
+		goto exit_device_del;
 	}
 	ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
 					   pf, kf, lirc_lirc_irq_handler, 0,
@@ -659,7 +659,7 @@
 	if (!ppdevice) {
 		pr_notice("parport_register_device() failed\n");
 		result = -ENXIO;
-		goto exit_device_put;
+		goto exit_device_del;
 	}
 	if (parport_claim(ppdevice) != 0)
 		goto skip_init;
@@ -678,7 +678,7 @@
 		parport_release(pport);
 		parport_unregister_device(ppdevice);
 		result = -EIO;
-		goto exit_device_put;
+		goto exit_device_del;
 	}
 
 #endif
@@ -695,11 +695,13 @@
 		pr_notice("register_chrdev() failed\n");
 		parport_unregister_device(ppdevice);
 		result = -EIO;
-		goto exit_device_put;
+		goto exit_device_del;
 	}
 	pr_info("installed using port 0x%04x irq %d\n", io, irq);
 	return 0;
 
+exit_device_del:
+	platform_device_del(lirc_parallel_dev);
 exit_device_put:
 	platform_device_put(lirc_parallel_dev);
 exit_driver_unregister:
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 6ceb4eb..c26c99fd 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -61,7 +61,7 @@
  * See this link for reference:
  *   http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
  */
-void omap4iss_flush(struct iss_device *iss)
+static void omap4iss_flush(struct iss_device *iss)
 {
 	iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
 	iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
@@ -362,6 +362,10 @@
 	return IRQ_HANDLED;
 }
 
+static const struct media_device_ops iss_media_ops = {
+	.link_notify = v4l2_pipeline_link_notify,
+};
+
 /* -----------------------------------------------------------------------------
  * Pipeline stream management
  */
@@ -988,7 +992,7 @@
 	strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
 		sizeof(iss->media_dev.model));
 	iss->media_dev.hw_revision = iss->revision;
-	iss->media_dev.link_notify = v4l2_pipeline_link_notify;
+	iss->media_dev.ops = &iss_media_ops;
 	ret = media_device_register(&iss->media_dev);
 	if (ret < 0) {
 		dev_err(iss->dev, "Media device registration failed (%d)\n",
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 90b7ff5..c16927a 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -646,6 +646,103 @@
 }
 
 static int
+iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev_format format;
+	struct v4l2_subdev *subdev;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+	};
+	u32 pad;
+	int ret;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	subdev = iss_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	/* Try the get selection operation first and fallback to get format if not
+	 * implemented.
+	 */
+	sdsel.pad = pad;
+	ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+	if (!ret)
+		sel->r = sdsel.r;
+	if (ret != -ENOIOCTLCMD)
+		return ret;
+
+	format.pad = pad;
+	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+	if (ret < 0)
+		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+	sel->r.left = 0;
+	sel->r.top = 0;
+	sel->r.width = format.format.width;
+	sel->r.height = format.format.height;
+
+	return 0;
+}
+
+static int
+iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+	struct iss_video *video = video_drvdata(file);
+	struct v4l2_subdev *subdev;
+	struct v4l2_subdev_selection sdsel = {
+		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
+		.target = sel->target,
+		.flags = sel->flags,
+		.r = sel->r,
+	};
+	u32 pad;
+	int ret;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_CROP:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	subdev = iss_video_remote_subdev(video, &pad);
+	if (subdev == NULL)
+		return -EINVAL;
+
+	sdsel.pad = pad;
+	mutex_lock(&video->mutex);
+	ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
+	mutex_unlock(&video->mutex);
+	if (!ret)
+		sel->r = sdsel.r;
+
+	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
 iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
 {
 	struct iss_video_fh *vfh = to_iss_video_fh(fh);
@@ -971,6 +1068,8 @@
 	.vidioc_g_fmt_vid_out		= iss_video_get_format,
 	.vidioc_s_fmt_vid_out		= iss_video_set_format,
 	.vidioc_try_fmt_vid_out		= iss_video_try_format,
+	.vidioc_g_selection		= iss_video_get_selection,
+	.vidioc_s_selection		= iss_video_set_selection,
 	.vidioc_g_parm			= iss_video_get_param,
 	.vidioc_s_parm			= iss_video_set_param,
 	.vidioc_reqbufs			= iss_video_reqbufs,
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index ed8bd95..1732c38 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -10,6 +10,29 @@
  * this archive for more details.
  */
 
+/*
+ * Notes:
+ *
+ * - Devices with firmware version < 2 do not store their configuration in
+ *   EEPROM.
+ *
+ * - In autonomous mode, only messages from a TV will be acknowledged, even
+ *   polling messages. Upon receiving a message from a TV, the dongle will
+ *   respond to messages from any logical address.
+ *
+ * - In autonomous mode, the dongle will by default reply Feature Abort
+ *   [Unrecognized Opcode] when it receives Give Device Vendor ID. It will
+ *   however observe vendor ID's reported by other devices and possibly
+ *   alter this behavior. When TV's (and TV's only) report that their vendor ID
+ *   is LG (0x00e091), the dongle will itself reply that it has the same vendor
+ *   ID, and it will respond to at least one vendor specific command.
+ *
+ * - In autonomous mode, the dongle is known to attempt wakeup if it receives
+ *   <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it
+ *   receives <Set Stream Path> with its own physical address. It also does this
+ *   if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV.
+ */
+
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -28,8 +51,11 @@
 MODULE_LICENSE("GPL");
 
 static int debug;
+static int persistent_config = 1;
 module_param(debug, int, 0644);
+module_param(persistent_config, int, 0644);
 MODULE_PARM_DESC(debug, "debug level (0-1)");
+MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
 
 enum pulse8_msgcodes {
 	MSGCODE_NOTHING = 0,
@@ -86,12 +112,16 @@
 
 #define DATA_SIZE 256
 
+#define PING_PERIOD	(15 * HZ)
+
 struct pulse8 {
 	struct device *dev;
 	struct serio *serio;
 	struct cec_adapter *adap;
+	unsigned int vers;
 	struct completion cmd_done;
 	struct work_struct work;
+	struct delayed_work ping_eeprom_work;
 	struct cec_msg rx_msg;
 	u8 data[DATA_SIZE];
 	unsigned int len;
@@ -99,8 +129,15 @@
 	unsigned int idx;
 	bool escape;
 	bool started;
+	struct mutex config_lock;
+	struct mutex write_lock;
+	bool config_pending;
+	bool restoring_config;
+	bool autonomous;
 };
 
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
+
 static void pulse8_irq_work_handler(struct work_struct *work)
 {
 	struct pulse8 *pulse8 =
@@ -205,6 +242,7 @@
 	struct pulse8 *pulse8 = serio_get_drvdata(serio);
 
 	cec_unregister_adapter(pulse8->adap);
+	cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
 	dev_info(&serio->dev, "disconnected\n");
 	serio_close(serio);
 	serio_set_drvdata(serio, NULL);
@@ -228,13 +266,14 @@
 		}
 	}
 	if (!err)
-		err = serio_write(serio, 0xfe);
+		err = serio_write(serio, MSGEND);
 
 	return err;
 }
 
-static int pulse8_send_and_wait(struct pulse8 *pulse8,
-				const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
+				     const u8 *cmd, u8 cmd_len,
+				     u8 response, u8 size)
 {
 	int err;
 
@@ -250,24 +289,8 @@
 	if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
 	    cmd[0] != MSGCODE_SET_CONTROLLED &&
 	    cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
-	    cmd[0] != MSGCODE_GET_BUILDDATE) {
-		u8 cmd_sc[2];
-
-		cmd_sc[0] = MSGCODE_SET_CONTROLLED;
-		cmd_sc[1] = 1;
-		err = pulse8_send_and_wait(pulse8, cmd_sc, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-		if (err)
-			return err;
-		init_completion(&pulse8->cmd_done);
-
-		err = pulse8_send(pulse8->serio, cmd, cmd_len);
-		if (err)
-			return err;
-
-		if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
-			return -ETIMEDOUT;
-	}
+	    cmd[0] != MSGCODE_GET_BUILDDATE)
+		return -ENOTTY;
 	if (response &&
 	    ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
 		dev_info(pulse8->dev, "transmit: failed %02x\n",
@@ -277,74 +300,155 @@
 	return 0;
 }
 
-static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio)
+static int pulse8_send_and_wait(struct pulse8 *pulse8,
+				const u8 *cmd, u8 cmd_len, u8 response, u8 size)
 {
-	u8 *data = pulse8->data + 1;
-	unsigned int count = 0;
-	unsigned int vers = 0;
-	u8 cmd[2];
+	u8 cmd_sc[2];
 	int err;
 
-	cmd[0] = MSGCODE_PING;
-	err = pulse8_send_and_wait(pulse8, cmd, 1,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
+	mutex_lock(&pulse8->write_lock);
+	err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+
+	if (err == -ENOTTY) {
+		cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+		cmd_sc[1] = 1;
+		err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
+						MSGCODE_COMMAND_ACCEPTED, 1);
+		if (err)
+			goto unlock;
+		err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
+						response, size);
+	}
+
+unlock:
+	mutex_unlock(&pulse8->write_lock);
+	return err == -ENOTTY ? -EIO : err;
+}
+
+static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+			struct cec_log_addrs *log_addrs, u16 *pa)
+{
+	u8 *data = pulse8->data + 1;
+	u8 cmd[2];
+	int err;
+	struct tm tm;
+	time_t date;
+
+	pulse8->vers = 0;
+
 	cmd[0] = MSGCODE_FIRMWARE_VERSION;
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+	if (err)
+		return err;
+	pulse8->vers = (data[0] << 8) | data[1];
+	dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers);
+	if (pulse8->vers < 2) {
+		*pa = CEC_PHYS_ADDR_INVALID;
+		return 0;
+	}
+
+	cmd[0] = MSGCODE_GET_BUILDDATE;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
+	if (err)
+		return err;
+	date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
+	time_to_tm(date, 0, &tm);
+	dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
+		 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		 tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	dev_dbg(pulse8->dev, "Persistent config:\n");
+	cmd[0] = MSGCODE_GET_AUTO_ENABLED;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	pulse8->autonomous = data[0];
+	dev_dbg(pulse8->dev, "Autonomous mode: %s",
+		data[0] ? "on" : "off");
+
+	cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	log_addrs->primary_device_type[0] = data[0];
+	dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]);
+	switch (log_addrs->primary_device_type[0]) {
+	case CEC_OP_PRIM_DEVTYPE_TV:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_RECORD:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_TUNER:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_SWITCH:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
+		break;
+	default:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+		dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
+			 log_addrs->primary_device_type[0]);
+		break;
+	}
+
+	cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+	if (err)
+		return err;
+	log_addrs->log_addr_mask = (data[0] << 8) | data[1];
+	dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n",
+		log_addrs->log_addr_mask);
+	if (log_addrs->log_addr_mask)
+		log_addrs->num_log_addrs = 1;
+
+	cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	*pa = (data[0] << 8) | data[1];
+	dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+		cec_phys_addr_exp(*pa));
+
+	cmd[0] = MSGCODE_GET_HDMI_VERSION;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	log_addrs->cec_version = data[0];
+	dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+
+	cmd[0] = MSGCODE_GET_OSD_NAME;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
+	if (err)
+		return err;
+	strncpy(log_addrs->osd_name, data, 13);
+	dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
+
+	return 0;
+}
+
+static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
+					  struct cec_log_addrs *log_addrs,
+					  u16 pa)
+{
+	int err;
+
+	err = cec_s_log_addrs(pulse8->adap, log_addrs, false);
 	if (err)
 		return err;
 
-	vers = (data[0] << 8) | data[1];
+	cec_s_phys_addr(pulse8->adap, pa, false);
 
-	dev_info(pulse8->dev, "Firmware version %04x\n", vers);
-	if (vers < 2)
-		return 0;
-
-	cmd[0] = MSGCODE_GET_BUILDDATE;
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
-	if (!err) {
-		time_t date = (data[0] << 24) | (data[1] << 16) |
-			(data[2] << 8) | data[3];
-		struct tm tm;
-
-		time_to_tm(date, 0, &tm);
-
-		dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
-			 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
-			 tm.tm_hour, tm.tm_min, tm.tm_sec);
-	}
-
-	do {
-		if (count)
-			msleep(500);
-		cmd[0] = MSGCODE_SET_AUTO_ENABLED;
-		cmd[1] = 0;
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-		if (err && count == 0) {
-			dev_info(pulse8->dev, "No Auto Enabled supported\n");
-			return 0;
-		}
-
-		cmd[0] = MSGCODE_GET_AUTO_ENABLED;
-		if (!err)
-			err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-		if (!err && !data[0]) {
-			cmd[0] = MSGCODE_WRITE_EEPROM;
-			err = pulse8_send_and_wait(pulse8, cmd, 1,
-						   MSGCODE_COMMAND_ACCEPTED, 1);
-			cmd[0] = MSGCODE_GET_AUTO_ENABLED;
-			if (!err)
-				err = pulse8_send_and_wait(pulse8, cmd, 1,
-							   cmd[0], 1);
-		}
-	} while (!err && data[0] && count++ < 5);
-
-	if (!err && data[0])
-		err = -EIO;
-
-	return err;
+	return 0;
 }
 
 static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -364,9 +468,11 @@
 {
 	struct pulse8 *pulse8 = adap->priv;
 	u16 mask = 0;
-	u8 cmd[3];
-	int err;
+	u16 pa = adap->phys_addr;
+	u8 cmd[16];
+	int err = 0;
 
+	mutex_lock(&pulse8->config_lock);
 	if (log_addr != CEC_LOG_ADDR_INVALID)
 		mask = 1 << log_addr;
 	cmd[0] = MSGCODE_SET_ACK_MASK;
@@ -374,8 +480,106 @@
 	cmd[2] = mask & 0xff;
 	err = pulse8_send_and_wait(pulse8, cmd, 3,
 				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (mask == 0)
-		return 0;
+	if ((err && mask != 0) || pulse8->restoring_config)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+	cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+	pulse8->autonomous = cmd[1];
+	if (log_addr == CEC_LOG_ADDR_INVALID)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEVICE_TYPE;
+	cmd[1] = adap->log_addrs.primary_device_type[0];
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	switch (adap->log_addrs.primary_device_type[0]) {
+	case CEC_OP_PRIM_DEVTYPE_TV:
+		mask = CEC_LOG_ADDR_MASK_TV;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_RECORD:
+		mask = CEC_LOG_ADDR_MASK_RECORD;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_TUNER:
+		mask = CEC_LOG_ADDR_MASK_TUNER;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+		mask = CEC_LOG_ADDR_MASK_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+		mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_SWITCH:
+		mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+		mask = CEC_LOG_ADDR_MASK_SPECIFIC;
+		break;
+	default:
+		mask = 0;
+		break;
+	}
+	cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
+	cmd[1] = mask >> 8;
+	cmd[2] = mask & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
+	cmd[1] = log_addr;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
+	cmd[1] = pa >> 8;
+	cmd[2] = pa & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_HDMI_VERSION;
+	cmd[1] = adap->log_addrs.cec_version;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	if (adap->log_addrs.osd_name[0]) {
+		size_t osd_len = strlen(adap->log_addrs.osd_name);
+		char *osd_str = cmd + 1;
+
+		cmd[0] = MSGCODE_SET_OSD_NAME;
+		strncpy(cmd + 1, adap->log_addrs.osd_name, 13);
+		if (osd_len < 4) {
+			memset(osd_str + osd_len, ' ', 4 - osd_len);
+			osd_len = 4;
+			osd_str[osd_len] = '\0';
+			strcpy(adap->log_addrs.osd_name, osd_str);
+		}
+		err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
+					   MSGCODE_COMMAND_ACCEPTED, 0);
+		if (err)
+			goto unlock;
+	}
+
+unlock:
+	if (pulse8->restoring_config)
+		pulse8->restoring_config = false;
+	else
+		pulse8->config_pending = true;
+	mutex_unlock(&pulse8->config_lock);
 	return err;
 }
 
@@ -437,6 +641,8 @@
 		CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
 	struct pulse8 *pulse8;
 	int err = -ENOMEM;
+	struct cec_log_addrs log_addrs = {};
+	u16 pa = CEC_PHYS_ADDR_INVALID;
 
 	pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
 
@@ -453,12 +659,15 @@
 	pulse8->dev = &serio->dev;
 	serio_set_drvdata(serio, pulse8);
 	INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
+	mutex_init(&pulse8->write_lock);
+	mutex_init(&pulse8->config_lock);
+	pulse8->config_pending = false;
 
 	err = serio_open(serio, drv);
 	if (err)
 		goto delete_adap;
 
-	err = pulse8_setup(pulse8, serio);
+	err = pulse8_setup(pulse8, serio, &log_addrs, &pa);
 	if (err)
 		goto close_serio;
 
@@ -467,6 +676,18 @@
 		goto close_serio;
 
 	pulse8->dev = &pulse8->adap->devnode.dev;
+
+	if (persistent_config && pulse8->autonomous) {
+		err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa);
+		if (err)
+			goto close_serio;
+		pulse8->restoring_config = true;
+	}
+
+	INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
+			  pulse8_ping_eeprom_work_handler);
+	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+
 	return 0;
 
 close_serio:
@@ -479,6 +700,33 @@
 	return err;
 }
 
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
+{
+	struct pulse8 *pulse8 =
+		container_of(work, struct pulse8, ping_eeprom_work.work);
+	u8 cmd;
+
+	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+	cmd = MSGCODE_PING;
+	pulse8_send_and_wait(pulse8, &cmd, 1,
+			     MSGCODE_COMMAND_ACCEPTED, 0);
+
+	if (pulse8->vers < 2)
+		return;
+
+	mutex_lock(&pulse8->config_lock);
+	if (pulse8->config_pending && persistent_config) {
+		dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
+		cmd = MSGCODE_WRITE_EEPROM;
+		if (pulse8_send_and_wait(pulse8, &cmd, 1,
+					 MSGCODE_COMMAND_ACCEPTED, 0))
+			dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
+		else
+			pulse8->config_pending = false;
+	}
+	mutex_unlock(&pulse8->config_lock);
+}
+
 static struct serio_device_id pulse8_serio_ids[] = {
 	{
 		.type	= SERIO_RS232,
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 7833327..1780a08 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -173,7 +173,7 @@
 	int ret;
 
 	cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
-	if (!dev)
+	if (!cec)
 		return -ENOMEM;
 
 	cec->dev = dev;
@@ -250,22 +250,9 @@
 	return 0;
 }
 
-static int __maybe_unused s5p_cec_suspend(struct device *dev)
-{
-	if (pm_runtime_suspended(dev))
-		return 0;
-	return s5p_cec_runtime_suspend(dev);
-}
-
-static int __maybe_unused s5p_cec_resume(struct device *dev)
-{
-	if (pm_runtime_suspended(dev))
-		return 0;
-	return s5p_cec_runtime_resume(dev);
-}
-
 static const struct dev_pm_ops s5p_cec_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(s5p_cec_suspend, s5p_cec_resume)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
 	SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
 			   NULL)
 };
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
new file mode 100644
index 0000000..784d2c6
--- /dev/null
+++ b/drivers/staging/media/st-cec/Kconfig
@@ -0,0 +1,8 @@
+config VIDEO_STI_HDMI_CEC
+       tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+       depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST)
+       ---help---
+         This is a driver for STIH4xx HDMI CEC interface. It uses the
+         generic CEC framework interface.
+         CEC bus is present in the HDMI connector and enables communication
+         between compatible devices.
diff --git a/drivers/staging/media/st-cec/Makefile b/drivers/staging/media/st-cec/Makefile
new file mode 100644
index 0000000..f07905e
--- /dev/null
+++ b/drivers/staging/media/st-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += stih-cec.o
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
new file mode 100644
index 0000000..2143448
--- /dev/null
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -0,0 +1,380 @@
+/*
+ * drivers/staging/media/st-cec/stih-cec.c
+ *
+ * STIH4xx CEC driver
+ * Copyright (C) STMicroelectronic SA 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include <media/cec.h>
+
+#define CEC_NAME	"stih-cec"
+
+/* CEC registers  */
+#define CEC_CLK_DIV           0x0
+#define CEC_CTRL              0x4
+#define CEC_IRQ_CTRL          0x8
+#define CEC_STATUS            0xC
+#define CEC_EXT_STATUS        0x10
+#define CEC_TX_CTRL           0x14
+#define CEC_FREE_TIME_THRESH  0x18
+#define CEC_BIT_TOUT_THRESH   0x1C
+#define CEC_BIT_PULSE_THRESH  0x20
+#define CEC_DATA              0x24
+#define CEC_TX_ARRAY_CTRL     0x28
+#define CEC_CTRL2             0x2C
+#define CEC_TX_ERROR_STS      0x30
+#define CEC_ADDR_TABLE        0x34
+#define CEC_DATA_ARRAY_CTRL   0x38
+#define CEC_DATA_ARRAY_STATUS 0x3C
+#define CEC_TX_DATA_BASE      0x40
+#define CEC_TX_DATA_TOP       0x50
+#define CEC_TX_DATA_SIZE      0x1
+#define CEC_RX_DATA_BASE      0x54
+#define CEC_RX_DATA_TOP       0x64
+#define CEC_RX_DATA_SIZE      0x1
+
+/* CEC_CTRL2 */
+#define CEC_LINE_INACTIVE_EN   BIT(0)
+#define CEC_AUTO_BUS_ERR_EN    BIT(1)
+#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
+#define CEC_TX_REQ_WAIT_EN     BIT(3)
+
+/* CEC_DATA_ARRAY_CTRL */
+#define CEC_TX_ARRAY_EN          BIT(0)
+#define CEC_RX_ARRAY_EN          BIT(1)
+#define CEC_TX_ARRAY_RESET       BIT(2)
+#define CEC_RX_ARRAY_RESET       BIT(3)
+#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
+#define CEC_TX_STOP_ON_NACK      BIT(7)
+
+/* CEC_TX_ARRAY_CTRL */
+#define CEC_TX_N_OF_BYTES  0x1F
+#define CEC_TX_START       BIT(5)
+#define CEC_TX_AUTO_SOM_EN BIT(6)
+#define CEC_TX_AUTO_EOM_EN BIT(7)
+
+/* CEC_IRQ_CTRL */
+#define CEC_TX_DONE_IRQ_EN   BIT(0)
+#define CEC_ERROR_IRQ_EN     BIT(2)
+#define CEC_RX_DONE_IRQ_EN   BIT(3)
+#define CEC_RX_SOM_IRQ_EN    BIT(4)
+#define CEC_RX_EOM_IRQ_EN    BIT(5)
+#define CEC_FREE_TIME_IRQ_EN BIT(6)
+#define CEC_PIN_STS_IRQ_EN   BIT(7)
+
+/* CEC_CTRL */
+#define CEC_IN_FILTER_EN    BIT(0)
+#define CEC_PWR_SAVE_EN     BIT(1)
+#define CEC_EN              BIT(4)
+#define CEC_ACK_CTRL        BIT(5)
+#define CEC_RX_RESET_EN     BIT(6)
+#define CEC_IGNORE_RX_ERROR BIT(7)
+
+/* CEC_STATUS */
+#define CEC_TX_DONE_STS       BIT(0)
+#define CEC_TX_ACK_GET_STS    BIT(1)
+#define CEC_ERROR_STS         BIT(2)
+#define CEC_RX_DONE_STS       BIT(3)
+#define CEC_RX_SOM_STS        BIT(4)
+#define CEC_RX_EOM_STS        BIT(5)
+#define CEC_FREE_TIME_IRQ_STS BIT(6)
+#define CEC_PIN_STS           BIT(7)
+#define CEC_SBIT_TOUT_STS     BIT(8)
+#define CEC_DBIT_TOUT_STS     BIT(9)
+#define CEC_LPULSE_ERROR_STS  BIT(10)
+#define CEC_HPULSE_ERROR_STS  BIT(11)
+#define CEC_TX_ERROR          BIT(12)
+#define CEC_TX_ARB_ERROR      BIT(13)
+#define CEC_RX_ERROR_MIN      BIT(14)
+#define CEC_RX_ERROR_MAX      BIT(15)
+
+/* Signal free time in bit periods (2.4ms) */
+#define CEC_PRESENT_INIT_SFT 7
+#define CEC_NEW_INIT_SFT     5
+#define CEC_RETRANSMIT_SFT   3
+
+/* Constants for CEC_BIT_TOUT_THRESH register */
+#define CEC_SBIT_TOUT_47MS BIT(1)
+#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1)
+#define CEC_SBIT_TOUT_50MS BIT(2)
+#define CEC_DBIT_TOUT_27MS BIT(0)
+#define CEC_DBIT_TOUT_28MS BIT(1)
+#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1)
+
+/* Constants for CEC_BIT_PULSE_THRESH register */
+#define CEC_BIT_LPULSE_03MS BIT(1)
+#define CEC_BIT_HPULSE_03MS BIT(3)
+
+/* Constants for CEC_DATA_ARRAY_STATUS register */
+#define CEC_RX_N_OF_BYTES                     0x1F
+#define CEC_TX_N_OF_BYTES_SENT                BIT(5)
+#define CEC_RX_OVERRUN                        BIT(6)
+
+struct stih_cec {
+	struct cec_adapter	*adap;
+	struct device		*dev;
+	struct clk		*clk;
+	void __iomem		*regs;
+	int			irq;
+	u32			irq_status;
+};
+
+static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+	struct stih_cec *cec = adap->priv;
+
+	if (enable) {
+		/* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
+		unsigned long clk_freq = clk_get_rate(cec->clk);
+		u32 cec_clk_div = clk_freq / 10000;
+
+		writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
+
+		/* Configuration of the durations activating a timeout */
+		writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
+		       cec->regs + CEC_BIT_TOUT_THRESH);
+
+		/* Configuration of the smallest allowed duration for pulses */
+		writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
+		       cec->regs + CEC_BIT_PULSE_THRESH);
+
+		/* Minimum received bit period threshold */
+		writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
+
+		/* Configuration of transceiver data arrays */
+		writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
+		       cec->regs + CEC_DATA_ARRAY_CTRL);
+
+		/* Configuration of the control bits for CEC Transceiver */
+		writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
+		       cec->regs + CEC_CTRL);
+
+		/* Clear logical addresses */
+		writel(0, cec->regs + CEC_ADDR_TABLE);
+
+		/* Clear the status register */
+		writel(0x0, cec->regs + CEC_STATUS);
+
+		/* Enable the interrupts */
+		writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
+		       CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
+		       CEC_ERROR_IRQ_EN,
+		       cec->regs + CEC_IRQ_CTRL);
+
+	} else {
+		/* Clear logical addresses */
+		writel(0, cec->regs + CEC_ADDR_TABLE);
+
+		/* Clear the status register */
+		writel(0x0, cec->regs + CEC_STATUS);
+
+		/* Disable the interrupts */
+		writel(0, cec->regs + CEC_IRQ_CTRL);
+	}
+
+	return 0;
+}
+
+static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+	struct stih_cec *cec = adap->priv;
+	u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
+
+	reg |= 1 << logical_addr;
+
+	if (logical_addr == CEC_LOG_ADDR_INVALID)
+		reg = 0;
+
+	writel(reg, cec->regs + CEC_ADDR_TABLE);
+
+	return 0;
+}
+
+static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+				  u32 signal_free_time, struct cec_msg *msg)
+{
+	struct stih_cec *cec = adap->priv;
+	int i;
+
+	/* Copy message into registers */
+	for (i = 0; i < msg->len; i++)
+		writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
+
+	/* Start transmission, configure hardware to add start and stop bits
+	 * Signal free time is handled by the hardware
+	 */
+	writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
+	       msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
+
+	return 0;
+}
+
+static void stih_tx_done(struct stih_cec *cec, u32 status)
+{
+	if (status & CEC_TX_ERROR) {
+		cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, 0, 0, 0, 1);
+		return;
+	}
+
+	if (status & CEC_TX_ARB_ERROR) {
+		cec_transmit_done(cec->adap,
+				  CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+		return;
+	}
+
+	if (!(status & CEC_TX_ACK_GET_STS)) {
+		cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, 0, 1, 0, 0);
+		return;
+	}
+
+	cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+}
+
+static void stih_rx_done(struct stih_cec *cec, u32 status)
+{
+	struct cec_msg msg = {};
+	u8 i;
+
+	if (status & CEC_RX_ERROR_MIN)
+		return;
+
+	if (status & CEC_RX_ERROR_MAX)
+		return;
+
+	msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
+
+	if (!msg.len)
+		return;
+
+	if (msg.len > 16)
+		msg.len = 16;
+
+	for (i = 0; i < msg.len; i++)
+		msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
+
+	cec_received_msg(cec->adap, &msg);
+}
+
+static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
+{
+	struct stih_cec *cec = priv;
+
+	if (cec->irq_status & CEC_TX_DONE_STS)
+		stih_tx_done(cec, cec->irq_status);
+
+	if (cec->irq_status & CEC_RX_DONE_STS)
+		stih_rx_done(cec, cec->irq_status);
+
+	cec->irq_status = 0;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
+{
+	struct stih_cec *cec = priv;
+
+	cec->irq_status = readl(cec->regs + CEC_STATUS);
+	writel(cec->irq_status, cec->regs + CEC_STATUS);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static const struct cec_adap_ops sti_cec_adap_ops = {
+	.adap_enable = stih_cec_adap_enable,
+	.adap_log_addr = stih_cec_adap_log_addr,
+	.adap_transmit = stih_cec_adap_transmit,
+};
+
+static int stih_cec_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct stih_cec *cec;
+	int ret;
+
+	cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+	if (!cec)
+		return -ENOMEM;
+
+	cec->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cec->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(cec->regs))
+		return PTR_ERR(cec->regs);
+
+	cec->irq = platform_get_irq(pdev, 0);
+	if (cec->irq < 0)
+		return cec->irq;
+
+	ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
+					stih_cec_irq_handler_thread, 0,
+					pdev->name, cec);
+	if (ret)
+		return ret;
+
+	cec->clk = devm_clk_get(dev, "cec-clk");
+	if (IS_ERR(cec->clk)) {
+		dev_err(dev, "Cannot get cec clock\n");
+		return PTR_ERR(cec->clk);
+	}
+
+	cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
+			CEC_NAME,
+			CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
+			CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT,
+			1, &pdev->dev);
+	ret = PTR_ERR_OR_ZERO(cec->adap);
+	if (ret)
+		return ret;
+
+	ret = cec_register_adapter(cec->adap);
+	if (ret) {
+		cec_delete_adapter(cec->adap);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, cec);
+	return 0;
+}
+
+static int stih_cec_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id stih_cec_match[] = {
+	{
+		.compatible	= "st,stih-cec",
+	},
+	{},
+};
+
+static struct platform_driver stih_cec_pdrv = {
+	.probe	= stih_cec_probe,
+	.remove = stih_cec_remove,
+	.driver = {
+		.name		= CEC_NAME,
+		.of_match_table	= stih_cec_match,
+	},
+};
+
+module_platform_driver(stih_cec_pdrv);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STIH4xx CEC driver");
diff --git a/drivers/staging/media/tw686x-kh/Kconfig b/drivers/staging/media/tw686x-kh/Kconfig
deleted file mode 100644
index 6264d30..0000000
--- a/drivers/staging/media/tw686x-kh/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-config VIDEO_TW686X_KH
-	tristate "Intersil/Techwell TW686x Video For Linux"
-	depends on VIDEO_DEV && PCI && VIDEO_V4L2
-	depends on !(VIDEO_TW686X=y || VIDEO_TW686X=m) || COMPILE_TEST
-	select VIDEOBUF2_DMA_SG
-	help
-	  Support for Intersil/Techwell TW686x-based frame grabber cards.
-
-	  Currently supported chips:
-	  - TW6864 (4 video channels),
-	  - TW6865 (4 video channels, not tested, second generation chip),
-	  - TW6868 (8 video channels but only 4 first channels using
-	    built-in video decoder are supported, not tested),
-	  - TW6869 (8 video channels, second generation chip).
-
-	  To compile this driver as a module, choose M here: the module
-	  will be named tw686x-kh.
diff --git a/drivers/staging/media/tw686x-kh/Makefile b/drivers/staging/media/tw686x-kh/Makefile
deleted file mode 100644
index 2a36a38..0000000
--- a/drivers/staging/media/tw686x-kh/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-tw686x-kh-objs := tw686x-kh-core.o tw686x-kh-video.o
-
-obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh.o
diff --git a/drivers/staging/media/tw686x-kh/TODO b/drivers/staging/media/tw686x-kh/TODO
deleted file mode 100644
index 480a495..0000000
--- a/drivers/staging/media/tw686x-kh/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-TODO:
-
-- implement V4L2_FIELD_INTERLACED* mode(s).
-- add audio support
-
-Please Cc: patches to Krzysztof Halasa <khalasa@piap.pl>.
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
deleted file mode 100644
index 03b3b62..0000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "tw686x-kh.h"
-#include "tw686x-kh-regs.h"
-
-static irqreturn_t tw686x_irq(int irq, void *dev_id)
-{
-	struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
-	u32 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
-	unsigned long flags;
-	unsigned int handled = 0;
-
-	if (int_status) {
-		spin_lock_irqsave(&dev->irq_lock, flags);
-		dev->dma_requests |= int_status;
-		spin_unlock_irqrestore(&dev->irq_lock, flags);
-
-		if (int_status & 0xFF0000FF)
-			handled = tw686x_kh_video_irq(dev);
-	}
-
-	return IRQ_RETVAL(handled);
-}
-
-static int tw686x_probe(struct pci_dev *pci_dev,
-			const struct pci_device_id *pci_id)
-{
-	struct tw686x_dev *dev;
-	int err;
-
-	dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev) +
-			   (pci_id->driver_data & TYPE_MAX_CHANNELS) *
-			   sizeof(dev->video_channels[0]), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	sprintf(dev->name, "TW%04X", pci_dev->device);
-	dev->type = pci_id->driver_data;
-
-	pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
-		pci_name(pci_dev), pci_dev->irq,
-		(unsigned long)pci_resource_start(pci_dev, 0));
-
-	dev->pci_dev = pci_dev;
-	if (pcim_enable_device(pci_dev))
-		return -EIO;
-
-	pci_set_master(pci_dev);
-
-	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
-		pr_err("%s: 32-bit PCI DMA not supported\n", dev->name);
-		return -EIO;
-	}
-
-	err = pci_request_regions(pci_dev, dev->name);
-	if (err < 0) {
-		pr_err("%s: Unable to get MMIO region\n", dev->name);
-		return err;
-	}
-
-	dev->mmio = pci_ioremap_bar(pci_dev, 0);
-	if (!dev->mmio) {
-		pr_err("%s: Unable to remap MMIO region\n", dev->name);
-		return -EIO;
-	}
-
-	reg_write(dev, SYS_SOFT_RST, 0x0F); /* Reset all subsystems */
-	mdelay(1);
-
-	reg_write(dev, SRST[0], 0x3F);
-	if (max_channels(dev) > 4)
-		reg_write(dev, SRST[1], 0x3F);
-	reg_write(dev, DMA_CMD, 0);
-	reg_write(dev, DMA_CHANNEL_ENABLE, 0);
-	reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x3EFF0FF0);
-	reg_write(dev, DMA_TIMER_INTERVAL, 0x38000);
-	reg_write(dev, DMA_CONFIG, 0xFFFFFF04);
-
-	spin_lock_init(&dev->irq_lock);
-
-	err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw686x_irq,
-			       IRQF_SHARED, dev->name, dev);
-	if (err < 0) {
-		pr_err("%s: Unable to get IRQ\n", dev->name);
-		return err;
-	}
-
-	err = tw686x_kh_video_init(dev);
-	if (err)
-		return err;
-
-	pci_set_drvdata(pci_dev, dev);
-	return 0;
-}
-
-static void tw686x_remove(struct pci_dev *pci_dev)
-{
-	struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
-
-	tw686x_kh_video_free(dev);
-}
-
-/* driver_data is number of A/V channels */
-static const struct pci_device_id tw686x_pci_tbl[] = {
-	{PCI_DEVICE(0x1797, 0x6864), .driver_data = 4},
-	/* not tested */
-	{PCI_DEVICE(0x1797, 0x6865), .driver_data = 4 | TYPE_SECOND_GEN},
-	/* TW6868 supports 8 A/V channels with an external TW2865 chip -
-	   not supported by the driver */
-	{PCI_DEVICE(0x1797, 0x6868), .driver_data = 4}, /* not tested */
-	{PCI_DEVICE(0x1797, 0x6869), .driver_data = 8 | TYPE_SECOND_GEN},
-	{}
-};
-
-static struct pci_driver tw686x_pci_driver = {
-	.name = "tw686x-kh",
-	.id_table = tw686x_pci_tbl,
-	.probe = tw686x_probe,
-	.remove = tw686x_remove,
-};
-
-MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
-MODULE_AUTHOR("Krzysztof Halasa");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
-module_pci_driver(tw686x_pci_driver);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
deleted file mode 100644
index 53e1889..0000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* DMA controller registers */
-#define REG8_1(a0) ((const u16[8]) {a0, a0 + 1, a0 + 2, a0 + 3,	\
-				   a0 + 4, a0 + 5, a0 + 6, a0 + 7})
-#define REG8_2(a0) ((const u16[8]) {a0, a0 + 2, a0 + 4, a0 + 6,	\
-				   a0 + 8, a0 + 0xA, a0 + 0xC, a0 + 0xE})
-#define REG8_8(a0) ((const u16[8]) {a0, a0 + 8, a0 + 0x10, a0 + 0x18,	\
-				   a0 + 0x20, a0 + 0x28, a0 + 0x30, a0 + 0x38})
-#define INT_STATUS		0x00
-#define PB_STATUS		0x01
-#define DMA_CMD			0x02
-#define VIDEO_FIFO_STATUS	0x03
-#define VIDEO_CHANNEL_ID	0x04
-#define VIDEO_PARSER_STATUS	0x05
-#define SYS_SOFT_RST		0x06
-#define DMA_PAGE_TABLE0_ADDR	((const u16[8]) {0x08, 0xD0, 0xD2, 0xD4, \
-						0xD6, 0xD8, 0xDA, 0xDC})
-#define DMA_PAGE_TABLE1_ADDR	((const u16[8]) {0x09, 0xD1, 0xD3, 0xD5, \
-						0xD7, 0xD9, 0xDB, 0xDD})
-#define DMA_CHANNEL_ENABLE	0x0A
-#define DMA_CONFIG		0x0B
-#define DMA_TIMER_INTERVAL	0x0C
-#define DMA_CHANNEL_TIMEOUT	0x0D
-#define VDMA_CHANNEL_CONFIG	REG8_1(0x10)
-#define ADMA_P_ADDR		REG8_2(0x18)
-#define ADMA_B_ADDR		REG8_2(0x19)
-#define DMA10_P_ADDR		0x28 /* ??? */
-#define DMA10_B_ADDR		0x29
-#define VIDEO_CONTROL1		0x2A
-#define VIDEO_CONTROL2		0x2B
-#define AUDIO_CONTROL1		0x2C
-#define AUDIO_CONTROL2		0x2D
-#define PHASE_REF		0x2E
-#define GPIO_REG		0x2F
-#define INTL_HBAR_CTRL		REG8_1(0x30)
-#define AUDIO_CONTROL3		0x38
-#define VIDEO_FIELD_CTRL	REG8_1(0x39)
-#define HSCALER_CTRL		REG8_1(0x42)
-#define VIDEO_SIZE		REG8_1(0x4A)
-#define VIDEO_SIZE_F2		REG8_1(0x52)
-#define MD_CONF			REG8_1(0x60)
-#define MD_INIT			REG8_1(0x68)
-#define MD_MAP0			REG8_1(0x70)
-#define VDMA_P_ADDR		REG8_8(0x80) /* not used in DMA SG mode */
-#define VDMA_WHP		REG8_8(0x81)
-#define VDMA_B_ADDR		REG8_8(0x82)
-#define VDMA_F2_P_ADDR		REG8_8(0x84)
-#define VDMA_F2_WHP		REG8_8(0x85)
-#define VDMA_F2_B_ADDR		REG8_8(0x86)
-#define EP_REG_ADDR		0xFE
-#define EP_REG_DATA		0xFF
-
-/* Video decoder registers */
-#define VDREG8(a0) ((const u16[8]) {			\
-	a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030,	\
-	a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
-#define VIDSTAT			VDREG8(0x100)
-#define BRIGHT			VDREG8(0x101)
-#define CONTRAST		VDREG8(0x102)
-#define SHARPNESS		VDREG8(0x103)
-#define SAT_U			VDREG8(0x104)
-#define SAT_V			VDREG8(0x105)
-#define HUE			VDREG8(0x106)
-#define CROP_HI			VDREG8(0x107)
-#define VDELAY_LO		VDREG8(0x108)
-#define VACTIVE_LO		VDREG8(0x109)
-#define HDELAY_LO		VDREG8(0x10A)
-#define HACTIVE_LO		VDREG8(0x10B)
-#define MVSN			VDREG8(0x10C)
-#define STATUS2			VDREG8(0x10C)
-#define SDT			VDREG8(0x10E)
-#define SDT_EN			VDREG8(0x10F)
-
-#define VSCALE_LO		VDREG8(0x144)
-#define SCALE_HI		VDREG8(0x145)
-#define HSCALE_LO		VDREG8(0x146)
-#define F2CROP_HI		VDREG8(0x147)
-#define F2VDELAY_LO		VDREG8(0x148)
-#define F2VACTIVE_LO		VDREG8(0x149)
-#define F2HDELAY_LO		VDREG8(0x14A)
-#define F2HACTIVE_LO		VDREG8(0x14B)
-#define F2VSCALE_LO		VDREG8(0x14C)
-#define F2SCALE_HI		VDREG8(0x14D)
-#define F2HSCALE_LO		VDREG8(0x14E)
-#define F2CNT			VDREG8(0x14F)
-
-#define VDREG2(a0) ((const u16[2]) {a0, a0 + 0x100})
-#define SRST			VDREG2(0x180)
-#define ACNTL			VDREG2(0x181)
-#define ACNTL2			VDREG2(0x182)
-#define CNTRL1			VDREG2(0x183)
-#define CKHY			VDREG2(0x184)
-#define SHCOR			VDREG2(0x185)
-#define CORING			VDREG2(0x186)
-#define CLMPG			VDREG2(0x187)
-#define IAGC			VDREG2(0x188)
-#define VCTRL1			VDREG2(0x18F)
-#define MISC1			VDREG2(0x194)
-#define LOOP			VDREG2(0x195)
-#define MISC2			VDREG2(0x196)
-
-#define CLMD			VDREG2(0x197)
-#define AIGAIN			((const u16[8]) {0x1D0, 0x1D1, 0x1D2, 0x1D3, \
-						 0x2D0, 0x2D1, 0x2D2, 0x2D3})
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
deleted file mode 100644
index 9bf32ae..0000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-event.h>
-#include "tw686x-kh.h"
-#include "tw686x-kh-regs.h"
-
-#define MAX_SG_ENTRY_SIZE (/* 8192 - 128 */ 4096)
-#define MAX_SG_DESC_COUNT 256 /* PAL 704x576 needs up to 198 4-KB pages */
-
-static const struct tw686x_format formats[] = {
-	{
-		.name = "4:2:2 packed, UYVY", /* aka Y422 */
-		.fourcc = V4L2_PIX_FMT_UYVY,
-		.mode = 0,
-		.depth = 16,
-	}, {
-#if 0
-		.name = "4:2:0 packed, YUV",
-		.mode = 1,	/* non-standard */
-		.depth = 12,
-	}, {
-		.name = "4:1:1 packed, YUV",
-		.mode = 2,	/* non-standard */
-		.depth = 12,
-	}, {
-#endif
-		.name = "4:1:1 packed, YUV",
-		.fourcc = V4L2_PIX_FMT_Y41P,
-		.mode = 3,
-		.depth = 12,
-	}, {
-		.name = "15 bpp RGB",
-		.fourcc = V4L2_PIX_FMT_RGB555,
-		.mode = 4,
-		.depth = 16,
-	}, {
-		.name = "16 bpp RGB",
-		.fourcc = V4L2_PIX_FMT_RGB565,
-		.mode = 5,
-		.depth = 16,
-	}, {
-		.name = "4:2:2 packed, YUYV",
-		.fourcc = V4L2_PIX_FMT_YUYV,
-		.mode = 6,
-		.depth = 16,
-	}
-	/* mode 7 is "reserved" */
-};
-
-static const v4l2_std_id video_standards[7] = {
-	V4L2_STD_NTSC,
-	V4L2_STD_PAL,
-	V4L2_STD_SECAM,
-	V4L2_STD_NTSC_443,
-	V4L2_STD_PAL_M,
-	V4L2_STD_PAL_N,
-	V4L2_STD_PAL_60,
-};
-
-static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
-{
-	unsigned int cnt;
-
-	for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
-		if (formats[cnt].fourcc == fourcc)
-			return &formats[cnt];
-	return NULL;
-}
-
-static void tw686x_get_format(struct tw686x_video_channel *vc,
-			      struct v4l2_format *f)
-{
-	const struct tw686x_format *format;
-	unsigned int width, height, height_div = 1;
-
-	format = format_by_fourcc(f->fmt.pix.pixelformat);
-	if (!format) {
-		format = &formats[0];
-		f->fmt.pix.pixelformat = format->fourcc;
-	}
-
-	width = 704;
-	if (f->fmt.pix.width < width * 3 / 4 /* halfway */)
-		width /= 2;
-
-	height = (vc->video_standard & V4L2_STD_625_50) ? 576 : 480;
-	if (f->fmt.pix.height < height * 3 / 4 /* halfway */)
-		height_div = 2;
-
-	switch (f->fmt.pix.field) {
-	case V4L2_FIELD_TOP:
-	case V4L2_FIELD_BOTTOM:
-		height_div = 2;
-		break;
-	case V4L2_FIELD_SEQ_BT:
-		if (height_div > 1)
-			f->fmt.pix.field = V4L2_FIELD_BOTTOM;
-		break;
-	default:
-		if (height_div > 1)
-			f->fmt.pix.field = V4L2_FIELD_TOP;
-		else
-			f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
-	}
-	height /= height_div;
-
-	f->fmt.pix.width = width;
-	f->fmt.pix.height = height;
-	f->fmt.pix.bytesperline = f->fmt.pix.width * format->depth / 8;
-	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
-	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-}
-
-/* video queue operations */
-
-static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
-			      unsigned int *nplanes, unsigned int sizes[],
-			      struct device *alloc_devs[])
-{
-	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
-	unsigned int size = vc->width * vc->height * vc->format->depth / 8;
-
-	if (*nbuffers < 2)
-		*nbuffers = 2;
-
-	if (*nplanes)
-		return sizes[0] < size ? -EINVAL : 0;
-
-	sizes[0] = size;
-	*nplanes = 1;		/* packed formats only */
-	return 0;
-}
-
-static void tw686x_buf_queue(struct vb2_buffer *vb)
-{
-	struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-	struct tw686x_vb2_buf *buf;
-
-	buf = container_of(vbuf, struct tw686x_vb2_buf, vb);
-
-	spin_lock(&vc->qlock);
-	list_add_tail(&buf->list, &vc->vidq_queued);
-	spin_unlock(&vc->qlock);
-}
-
-static void setup_descs(struct tw686x_video_channel *vc, unsigned int n)
-{
-loop:
-	while (!list_empty(&vc->vidq_queued)) {
-		struct vdma_desc *descs = vc->sg_descs[n];
-		struct tw686x_vb2_buf *buf;
-		struct sg_table *vbuf;
-		struct scatterlist *sg;
-		unsigned int buf_len, count = 0;
-		int i;
-
-		buf = list_first_entry(&vc->vidq_queued, struct tw686x_vb2_buf,
-				       list);
-		list_del(&buf->list);
-
-		buf_len = vc->width * vc->height * vc->format->depth / 8;
-		if (vb2_plane_size(&buf->vb.vb2_buf, 0) < buf_len) {
-			pr_err("Video buffer size too small\n");
-			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
-			goto loop; /* try another */
-		}
-
-		vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
-		for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
-			dma_addr_t phys = sg_dma_address(sg);
-			unsigned int len = sg_dma_len(sg);
-
-			while (len && buf_len) {
-				unsigned int entry_len = min_t(unsigned int, len,
-							   MAX_SG_ENTRY_SIZE);
-				entry_len = min(entry_len, buf_len);
-				if (count == MAX_SG_DESC_COUNT) {
-					pr_err("Video buffer size too fragmented\n");
-					vb2_buffer_done(&buf->vb.vb2_buf,
-							VB2_BUF_STATE_ERROR);
-					goto loop;
-				}
-				descs[count].phys = cpu_to_le32(phys);
-				descs[count++].flags_length =
-					cpu_to_le32(0x40000000 /* available */ |
-						    entry_len);
-				phys += entry_len;
-				len -= entry_len;
-				buf_len -= entry_len;
-			}
-			if (!buf_len)
-				break;
-		}
-
-		/* clear the remaining entries */
-		while (count < MAX_SG_DESC_COUNT) {
-			descs[count].phys = 0;
-			descs[count++].flags_length = 0; /* unavailable */
-		}
-
-		buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
-		vc->curr_bufs[n] = buf;
-		return;
-	}
-	vc->curr_bufs[n] = NULL;
-}
-
-/* On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
-   with 10-bit start_idx and end_idx determining start and end of frame buffer
-   for particular channel.
-   TW6868 with all its 8 channels would be problematic (only 127 SG entries per
-   channel) but we support only 4 channels on this chip anyway (the first
-   4 channels are driven with internal video decoder, the other 4 would require
-   an external TW286x part).
-
-   On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
-   starting with 0. Both chips have complete sets of internal video decoders
-   (respectively 4 or 8-channel).
-
-   All chips have separate SG tables for two video frames. */
-
-static void setup_dma_cfg(struct tw686x_video_channel *vc)
-{
-	unsigned int field_width = 704;
-	unsigned int field_height = (vc->video_standard & V4L2_STD_625_50) ?
-		288 : 240;
-	unsigned int start_idx = is_second_gen(vc->dev) ? 0 :
-		vc->ch * MAX_SG_DESC_COUNT;
-	unsigned int end_idx = start_idx + MAX_SG_DESC_COUNT - 1;
-	u32 dma_cfg = (0 << 30) /* input selection */ |
-		(1 << 29) /* field2 dropped (if any) */ |
-		((vc->height < 300) << 28) /* field dropping */ |
-		(1 << 27) /* master */ |
-		(0 << 25) /* master channel (for slave only) */ |
-		(0 << 24) /* (no) vertical (line) decimation */ |
-		((vc->width < 400) << 23) /* horizontal decimation */ |
-		(vc->format->mode << 20) /* output video format */ |
-		(end_idx << 10) /* DMA end index */ |
-		start_idx /* DMA start index */;
-	u32 reg;
-
-	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], dma_cfg);
-	reg_write(vc->dev, VIDEO_SIZE[vc->ch], (1 << 31) | (field_height << 16)
-		  | field_width);
-	reg = reg_read(vc->dev, VIDEO_CONTROL1);
-	if (vc->video_standard & V4L2_STD_625_50)
-		reg |= 1 << (vc->ch + 13);
-	else
-		reg &= ~(1 << (vc->ch + 13));
-	reg_write(vc->dev, VIDEO_CONTROL1, reg);
-}
-
-static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
-	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
-	struct tw686x_dev *dev = vc->dev;
-	u32 dma_ch_mask;
-	unsigned int n;
-
-	setup_dma_cfg(vc);
-
-	/* queue video buffers if available */
-	spin_lock(&vc->qlock);
-	for (n = 0; n < 2; n++)
-		setup_descs(vc, n);
-	spin_unlock(&vc->qlock);
-
-	dev->video_active |= 1 << vc->ch;
-	vc->seq = 0;
-	dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE) | (1 << vc->ch);
-	reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
-	reg_write(dev, DMA_CMD, (1 << 31) | dma_ch_mask);
-	return 0;
-}
-
-static void tw686x_stop_streaming(struct vb2_queue *vq)
-{
-	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
-	struct tw686x_dev *dev = vc->dev;
-	u32 dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
-	u32 dma_cmd = reg_read(dev, DMA_CMD);
-	unsigned int n;
-
-	dma_ch_mask &= ~(1 << vc->ch);
-	reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
-
-	dev->video_active &= ~(1 << vc->ch);
-
-	dma_cmd &= ~(1 << vc->ch);
-	reg_write(dev, DMA_CMD, dma_cmd);
-
-	if (!dev->video_active) {
-		reg_write(dev, DMA_CMD, 0);
-		reg_write(dev, DMA_CHANNEL_ENABLE, 0);
-	}
-
-	spin_lock(&vc->qlock);
-	while (!list_empty(&vc->vidq_queued)) {
-		struct tw686x_vb2_buf *buf;
-
-		buf = list_entry(vc->vidq_queued.next, struct tw686x_vb2_buf,
-				 list);
-		list_del(&buf->list);
-		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
-	}
-
-	for (n = 0; n < 2; n++)
-		if (vc->curr_bufs[n])
-			vb2_buffer_done(&vc->curr_bufs[n]->vb.vb2_buf,
-					VB2_BUF_STATE_ERROR);
-
-	spin_unlock(&vc->qlock);
-}
-
-static struct vb2_ops tw686x_video_qops = {
-	.queue_setup		= tw686x_queue_setup,
-	.buf_queue		= tw686x_buf_queue,
-	.start_streaming	= tw686x_start_streaming,
-	.stop_streaming		= tw686x_stop_streaming,
-	.wait_prepare		= vb2_ops_wait_prepare,
-	.wait_finish		= vb2_ops_wait_finish,
-};
-
-static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-	struct tw686x_video_channel *vc;
-	struct tw686x_dev *dev;
-	unsigned int ch;
-
-	vc = container_of(ctrl->handler, struct tw686x_video_channel,
-			  ctrl_handler);
-	dev = vc->dev;
-	ch = vc->ch;
-
-	switch (ctrl->id) {
-	case V4L2_CID_BRIGHTNESS:
-		reg_write(dev, BRIGHT[ch], ctrl->val & 0xFF);
-		return 0;
-
-	case V4L2_CID_CONTRAST:
-		reg_write(dev, CONTRAST[ch], ctrl->val);
-		return 0;
-
-	case V4L2_CID_SATURATION:
-		reg_write(dev, SAT_U[ch], ctrl->val);
-		reg_write(dev, SAT_V[ch], ctrl->val);
-		return 0;
-
-	case V4L2_CID_HUE:
-		reg_write(dev, HUE[ch], ctrl->val & 0xFF);
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
-	.s_ctrl = tw686x_s_ctrl,
-};
-
-static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
-				struct v4l2_format *f)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-
-	f->fmt.pix.width = vc->width;
-	f->fmt.pix.height = vc->height;
-	f->fmt.pix.field = vc->field;
-	f->fmt.pix.pixelformat = vc->format->fourcc;
-	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-	f->fmt.pix.bytesperline = f->fmt.pix.width * vc->format->depth / 8;
-	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
-	return 0;
-}
-
-static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
-				  struct v4l2_format *f)
-{
-	tw686x_get_format(video_drvdata(file), f);
-	return 0;
-}
-
-static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
-				struct v4l2_format *f)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-
-	tw686x_get_format(vc, f);
-	vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
-	vc->field = f->fmt.pix.field;
-	vc->width = f->fmt.pix.width;
-	vc->height = f->fmt.pix.height;
-	return 0;
-}
-
-static int tw686x_querycap(struct file *file, void *priv,
-			   struct v4l2_capability *cap)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-	struct tw686x_dev *dev = vc->dev;
-
-	strcpy(cap->driver, "tw686x-kh");
-	strcpy(cap->card, dev->name);
-	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci_dev));
-	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-	return 0;
-}
-
-static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-	unsigned int cnt;
-	u32 sdt = 0; /* default */
-
-	for (cnt = 0; cnt < ARRAY_SIZE(video_standards); cnt++)
-		if (id & video_standards[cnt]) {
-			sdt = cnt;
-			break;
-		}
-
-	reg_write(vc->dev, SDT[vc->ch], sdt);
-	vc->video_standard = video_standards[sdt];
-	return 0;
-}
-
-static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-
-	*id = vc->video_standard;
-	return 0;
-}
-
-static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
-				   struct v4l2_fmtdesc *f)
-{
-	if (f->index >= ARRAY_SIZE(formats))
-		return -EINVAL;
-
-	strlcpy(f->description, formats[f->index].name, sizeof(f->description));
-	f->pixelformat = formats[f->index].fourcc;
-	return 0;
-}
-
-static int tw686x_g_parm(struct file *file, void *priv,
-			 struct v4l2_streamparm *sp)
-{
-	struct tw686x_video_channel *vc = video_drvdata(file);
-
-	if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-		return -EINVAL;
-	memset(&sp->parm.capture, 0, sizeof(sp->parm.capture));
-	sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
-	v4l2_video_std_frame_period(vc->video_standard,
-				    &sp->parm.capture.timeperframe);
-
-	return 0;
-}
-
-static int tw686x_enum_input(struct file *file, void *priv,
-			     struct v4l2_input *inp)
-{
-	/* the chip has internal multiplexer, support can be added
-	   if the actual hw uses it */
-	if (inp->index)
-		return -EINVAL;
-
-	snprintf(inp->name, sizeof(inp->name), "Composite");
-	inp->type = V4L2_INPUT_TYPE_CAMERA;
-	inp->std = V4L2_STD_ALL;
-	inp->capabilities = V4L2_IN_CAP_STD;
-	return 0;
-}
-
-static int tw686x_g_input(struct file *file, void *priv, unsigned int *v)
-{
-	*v = 0;
-	return 0;
-}
-
-static int tw686x_s_input(struct file *file, void *priv, unsigned int v)
-{
-	if (v)
-		return -EINVAL;
-	return 0;
-}
-
-static const struct v4l2_file_operations tw686x_video_fops = {
-	.owner		= THIS_MODULE,
-	.open		= v4l2_fh_open,
-	.unlocked_ioctl	= video_ioctl2,
-	.release	= vb2_fop_release,
-	.poll		= vb2_fop_poll,
-	.read		= vb2_fop_read,
-	.mmap		= vb2_fop_mmap,
-};
-
-static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
-	.vidioc_querycap		= tw686x_querycap,
-	.vidioc_enum_fmt_vid_cap	= tw686x_enum_fmt_vid_cap,
-	.vidioc_g_fmt_vid_cap		= tw686x_g_fmt_vid_cap,
-	.vidioc_s_fmt_vid_cap		= tw686x_s_fmt_vid_cap,
-	.vidioc_try_fmt_vid_cap		= tw686x_try_fmt_vid_cap,
-	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
-	.vidioc_querybuf		= vb2_ioctl_querybuf,
-	.vidioc_qbuf			= vb2_ioctl_qbuf,
-	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
-	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
-	.vidioc_streamon		= vb2_ioctl_streamon,
-	.vidioc_streamoff		= vb2_ioctl_streamoff,
-	.vidioc_g_std			= tw686x_g_std,
-	.vidioc_s_std			= tw686x_s_std,
-	.vidioc_g_parm			= tw686x_g_parm,
-	.vidioc_enum_input		= tw686x_enum_input,
-	.vidioc_g_input			= tw686x_g_input,
-	.vidioc_s_input			= tw686x_s_input,
-	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
-	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
-};
-
-static int video_thread(void *arg)
-{
-	struct tw686x_dev *dev = arg;
-	DECLARE_WAITQUEUE(wait, current);
-
-	set_freezable();
-	add_wait_queue(&dev->video_thread_wait, &wait);
-
-	while (1) {
-		long timeout = schedule_timeout_interruptible(HZ);
-		unsigned int ch;
-
-		if (timeout == -ERESTARTSYS || kthread_should_stop())
-			break;
-
-		for (ch = 0; ch < max_channels(dev); ch++) {
-			struct tw686x_video_channel *vc;
-			unsigned long flags;
-			u32 request, n, stat = VB2_BUF_STATE_DONE;
-
-			vc = &dev->video_channels[ch];
-			if (!(dev->video_active & (1 << ch)))
-				continue;
-
-			spin_lock_irq(&dev->irq_lock);
-			request = dev->dma_requests & (0x01000001 << ch);
-			if (request)
-				dev->dma_requests &= ~request;
-			spin_unlock_irq(&dev->irq_lock);
-
-			if (!request)
-				continue;
-
-			request >>= ch;
-
-			/* handle channel events */
-			if ((request & 0x01000000) |
-			    (reg_read(dev, VIDEO_FIFO_STATUS) & (0x01010001 << ch)) |
-			    (reg_read(dev, VIDEO_PARSER_STATUS) & (0x00000101 << ch))) {
-				/* DMA Errors - reset channel */
-				u32 reg;
-
-				spin_lock_irqsave(&dev->irq_lock, flags);
-				reg = reg_read(dev, DMA_CMD);
-				/* Reset DMA channel */
-				reg_write(dev, DMA_CMD, reg & ~(1 << ch));
-				reg_write(dev, DMA_CMD, reg);
-				spin_unlock_irqrestore(&dev->irq_lock, flags);
-				stat = VB2_BUF_STATE_ERROR;
-			}
-
-			/* handle video stream */
-			mutex_lock(&vc->vb_mutex);
-			spin_lock(&vc->qlock);
-			n = !!(reg_read(dev, PB_STATUS) & (1 << ch));
-			if (vc->curr_bufs[n]) {
-				struct vb2_v4l2_buffer *vb;
-
-				vb = &vc->curr_bufs[n]->vb;
-				vb->vb2_buf.timestamp = ktime_get_ns();
-				vb->field = vc->field;
-				if (V4L2_FIELD_HAS_BOTH(vc->field))
-					vb->sequence = vc->seq++;
-				else
-					vb->sequence = (vc->seq++) / 2;
-				vb2_set_plane_payload(&vb->vb2_buf, 0,
-				      vc->width * vc->height * vc->format->depth / 8);
-				vb2_buffer_done(&vb->vb2_buf, stat);
-			}
-			setup_descs(vc, n);
-			spin_unlock(&vc->qlock);
-			mutex_unlock(&vc->vb_mutex);
-		}
-		try_to_freeze();
-	}
-
-	remove_wait_queue(&dev->video_thread_wait, &wait);
-	return 0;
-}
-
-int tw686x_kh_video_irq(struct tw686x_dev *dev)
-{
-	unsigned long flags, handled = 0;
-	u32 requests;
-
-	spin_lock_irqsave(&dev->irq_lock, flags);
-	requests = dev->dma_requests;
-	spin_unlock_irqrestore(&dev->irq_lock, flags);
-
-	if (requests & dev->video_active) {
-		wake_up_interruptible_all(&dev->video_thread_wait);
-		handled = 1;
-	}
-	return handled;
-}
-
-void tw686x_kh_video_free(struct tw686x_dev *dev)
-{
-	unsigned int ch, n;
-
-	if (dev->video_thread)
-		kthread_stop(dev->video_thread);
-
-	for (ch = 0; ch < max_channels(dev); ch++) {
-		struct tw686x_video_channel *vc = &dev->video_channels[ch];
-
-		v4l2_ctrl_handler_free(&vc->ctrl_handler);
-		if (vc->device)
-			video_unregister_device(vc->device);
-		for (n = 0; n < 2; n++) {
-			struct dma_desc *descs = &vc->sg_tables[n];
-
-			if (descs->virt)
-				pci_free_consistent(dev->pci_dev, descs->size,
-						    descs->virt, descs->phys);
-		}
-	}
-
-	v4l2_device_unregister(&dev->v4l2_dev);
-}
-
-#define SG_TABLE_SIZE (MAX_SG_DESC_COUNT * sizeof(struct vdma_desc))
-
-int tw686x_kh_video_init(struct tw686x_dev *dev)
-{
-	unsigned int ch, n;
-	int err;
-
-	init_waitqueue_head(&dev->video_thread_wait);
-
-	err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
-	if (err)
-		return err;
-
-	reg_write(dev, VIDEO_CONTROL1, 0); /* NTSC, disable scaler */
-	reg_write(dev, PHASE_REF, 0x00001518); /* Scatter-gather DMA mode */
-
-	/* setup required SG table sizes */
-	for (n = 0; n < 2; n++)
-		if (is_second_gen(dev)) {
-			/* TW 6865, TW6869 - each channel needs a pair of
-			   descriptor tables */
-			for (ch = 0; ch < max_channels(dev); ch++)
-				dev->video_channels[ch].sg_tables[n].size =
-					SG_TABLE_SIZE;
-
-		} else
-			/* TW 6864, TW6868 - we need to allocate a pair of
-			   descriptor tables, common for all channels.
-			   Each table will be bigger than 4 KB. */
-			dev->video_channels[0].sg_tables[n].size =
-				max_channels(dev) * SG_TABLE_SIZE;
-
-	/* allocate SG tables and initialize video channels */
-	for (ch = 0; ch < max_channels(dev); ch++) {
-		struct tw686x_video_channel *vc = &dev->video_channels[ch];
-		struct video_device *vdev;
-
-		mutex_init(&vc->vb_mutex);
-		spin_lock_init(&vc->qlock);
-		INIT_LIST_HEAD(&vc->vidq_queued);
-
-		vc->dev = dev;
-		vc->ch = ch;
-
-		/* default settings: NTSC */
-		vc->format = &formats[0];
-		vc->video_standard = V4L2_STD_NTSC;
-		reg_write(vc->dev, SDT[vc->ch], 0);
-		vc->field = V4L2_FIELD_SEQ_BT;
-		vc->width = 704;
-		vc->height = 480;
-
-		for (n = 0; n < 2; n++) {
-			void *cpu;
-
-			if (vc->sg_tables[n].size) {
-				unsigned int reg = n ? DMA_PAGE_TABLE1_ADDR[ch] :
-					DMA_PAGE_TABLE0_ADDR[ch];
-
-				cpu = pci_alloc_consistent(dev->pci_dev,
-							   vc->sg_tables[n].size,
-							   &vc->sg_tables[n].phys);
-				if (!cpu) {
-					pr_err("Error allocating video DMA scatter-gather tables\n");
-					err = -ENOMEM;
-					goto error;
-				}
-				vc->sg_tables[n].virt = cpu;
-				reg_write(dev, reg, vc->sg_tables[n].phys);
-			} else
-				cpu = dev->video_channels[0].sg_tables[n].virt +
-					ch * SG_TABLE_SIZE;
-
-			vc->sg_descs[n] = cpu;
-		}
-
-		reg_write(dev, VCTRL1[0], 0x24);
-		reg_write(dev, LOOP[0], 0xA5);
-		if (max_channels(dev) > 4) {
-			reg_write(dev, VCTRL1[1], 0x24);
-			reg_write(dev, LOOP[1], 0xA5);
-		}
-		reg_write(dev, VIDEO_FIELD_CTRL[ch], 0);
-		reg_write(dev, VDELAY_LO[ch], 0x14);
-
-		vdev = video_device_alloc();
-		if (!vdev) {
-			pr_warn("Unable to allocate video device\n");
-			err = -ENOMEM;
-			goto error;
-		}
-
-		vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-		vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
-		vc->vidq.drv_priv = vc;
-		vc->vidq.buf_struct_size = sizeof(struct tw686x_vb2_buf);
-		vc->vidq.ops = &tw686x_video_qops;
-		vc->vidq.mem_ops = &vb2_dma_sg_memops;
-		vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-		vc->vidq.min_buffers_needed = 2;
-		vc->vidq.lock = &vc->vb_mutex;
-		vc->vidq.dev = &dev->pci_dev->dev;
-		vc->vidq.gfp_flags = GFP_DMA32;
-
-		err = vb2_queue_init(&vc->vidq);
-		if (err)
-			goto error;
-
-		strcpy(vdev->name, "TW686x-video");
-		snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
-		vdev->fops = &tw686x_video_fops;
-		vdev->ioctl_ops = &tw686x_video_ioctl_ops;
-		vdev->release = video_device_release;
-		vdev->v4l2_dev = &dev->v4l2_dev;
-		vdev->queue = &vc->vidq;
-		vdev->tvnorms = V4L2_STD_ALL;
-		vdev->minor = -1;
-		vdev->lock = &vc->vb_mutex;
-
-		dev->video_channels[ch].device = vdev;
-		video_set_drvdata(vdev, vc);
-		err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
-		if (err < 0)
-			goto error;
-
-		v4l2_ctrl_handler_init(&vc->ctrl_handler,
-				       4 /* number of controls */);
-		vdev->ctrl_handler = &vc->ctrl_handler;
-		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
-				  V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
-		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
-				  V4L2_CID_CONTRAST, 0, 255, 1, 64);
-		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
-				  V4L2_CID_SATURATION, 0, 255, 1, 128);
-		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops, V4L2_CID_HUE,
-				  -124, 127, 1, 0);
-		err = vc->ctrl_handler.error;
-		if (err)
-			goto error;
-
-		v4l2_ctrl_handler_setup(&vc->ctrl_handler);
-	}
-
-	dev->video_thread = kthread_run(video_thread, dev, "tw686x_video");
-	if (IS_ERR(dev->video_thread)) {
-		err = PTR_ERR(dev->video_thread);
-		goto error;
-	}
-
-	return 0;
-
-error:
-	tw686x_kh_video_free(dev);
-	return err;
-}
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
deleted file mode 100644
index 6284a90..0000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/freezer.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <media/videobuf2-dma-sg.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-#define TYPE_MAX_CHANNELS 0x0F
-#define TYPE_SECOND_GEN   0x10
-
-struct tw686x_format {
-	char *name;
-	unsigned int fourcc;
-	unsigned int depth;
-	unsigned int mode;
-};
-
-struct dma_desc {
-	dma_addr_t phys;
-	void *virt;
-	unsigned int size;
-};
-
-struct vdma_desc {
-	__le32 flags_length;	/* 3 MSBits for flags, 13 LSBits for length */
-	__le32 phys;
-};
-
-struct tw686x_vb2_buf {
-	struct vb2_v4l2_buffer vb;
-	struct list_head list;
-};
-
-struct tw686x_video_channel {
-	struct tw686x_dev *dev;
-
-	struct vb2_queue vidq;
-	struct list_head vidq_queued;
-	struct video_device *device;
-	struct dma_desc sg_tables[2];
-	struct tw686x_vb2_buf *curr_bufs[2];
-	struct vdma_desc *sg_descs[2];
-
-	struct v4l2_ctrl_handler ctrl_handler;
-	const struct tw686x_format *format;
-	struct mutex vb_mutex;
-	spinlock_t qlock;
-	v4l2_std_id video_standard;
-	unsigned int width, height;
-	enum v4l2_field field; /* supported TOP, BOTTOM, SEQ_TB and SEQ_BT */
-	unsigned int seq;	       /* video field or frame counter */
-	unsigned int ch;
-};
-
-/* global device status */
-struct tw686x_dev {
-	spinlock_t irq_lock;
-
-	struct v4l2_device v4l2_dev;
-	struct snd_card *card;	/* sound card */
-
-	unsigned int video_active;	/* active video channel mask */
-
-	char name[32];
-	unsigned int type;
-	struct pci_dev *pci_dev;
-	__u32 __iomem *mmio;
-
-	struct task_struct *video_thread;
-	wait_queue_head_t video_thread_wait;
-	u32 dma_requests;
-
-	struct tw686x_video_channel video_channels[0];
-};
-
-static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
-{
-	return readl(dev->mmio + reg);
-}
-
-static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
-			     uint32_t value)
-{
-	writel(value, dev->mmio + reg);
-}
-
-static inline unsigned int max_channels(struct tw686x_dev *dev)
-{
-	return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
-}
-
-static inline unsigned int is_second_gen(struct tw686x_dev *dev)
-{
-	/* each channel has its own DMA SG table */
-	return dev->type & TYPE_SECOND_GEN;
-}
-
-int tw686x_kh_video_irq(struct tw686x_dev *dev);
-int tw686x_kh_video_init(struct tw686x_dev *dev);
-void tw686x_kh_video_free(struct tw686x_dev *dev);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040f..1091b9f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@
 		actual_pages = get_user_pages(task, task->mm,
 				          (unsigned long)buf & ~(PAGE_SIZE - 1),
 					  num_pages,
-					  (type == PAGELIST_READ) /*Write */ ,
-					  0 /*Force */ ,
+					  (type == PAGELIST_READ) ? FOLL_WRITE : 0,
 					  pages,
 					  NULL /*vmas */);
 		up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e0..7b6cd4d 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@
 		current->mm,              /* mm */
 		(unsigned long)virt_addr, /* start */
 		num_pages,                /* len */
-		0,                        /* write */
-		0,                        /* force */
+		0,                        /* gup_flags */
 		pages,                    /* pages (array of page pointers) */
 		NULL);                    /* vmas */
 	up_read(&current->mm->mmap_sem);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2d702ca..a13541b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -186,7 +186,7 @@
 
 config IMX_THERMAL
 	tristate "Temperature sensor driver for Freescale i.MX SoCs"
-	depends on CPU_THERMAL
+	depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST
 	depends on MFD_SYSCON
 	depends on OF
 	help
@@ -195,6 +195,26 @@
 	  cpufreq is used as the cooling device to throttle CPUs when the
 	  passive trip is crossed.
 
+config MAX77620_THERMAL
+	tristate "Temperature sensor driver for Maxim MAX77620 PMIC"
+	depends on MFD_MAX77620
+	depends on OF
+	help
+	  Support for die junction temperature warning alarm for Maxim
+	  Semiconductor PMIC MAX77620 device. Device generates two alarm
+	  interrupts when PMIC die temperature cross the threshold of
+	  120 degC and 140 degC.
+
+config QORIQ_THERMAL
+	tristate "QorIQ Thermal Monitoring Unit"
+	depends on THERMAL_OF
+	depends on HAS_IOMEM
+	help
+	  Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms.
+	  It supports one critical trip point and one passive trip point. The
+	  cpufreq is used as the cooling device to throttle CPUs when the
+	  passive trip is crossed.
+
 config SPEAR_THERMAL
 	tristate "SPEAr thermal sensor driver"
 	depends on PLAT_SPEAR || COMPILE_TEST
@@ -332,6 +352,16 @@
 source drivers/thermal/int340x_thermal/Kconfig
 endmenu
 
+config INTEL_BXT_PMIC_THERMAL
+	tristate "Intel Broxton PMIC thermal driver"
+	depends on X86 && INTEL_SOC_PMIC && REGMAP
+	help
+	  Select this driver for Intel Broxton PMIC with ADC channels monitoring
+	  system temperature measurements and alerts.
+	  This driver is used for monitoring the ADC channels of PMIC and handles
+	  the alert trip point interrupts and notifies the thermal framework with
+	  the trip point and temperature details of the zone.
+
 config INTEL_PCH_THERMAL
 	tristate "Intel PCH Thermal Reporting Driver"
 	depends on X86 && PCI
@@ -399,4 +429,9 @@
 	  to this driver. This driver reports the temperature by reading ADC
 	  channel and converts it to temperature based on lookup table.
 
+menu "Qualcomm thermal drivers"
+depends on (ARCH_QCOM && OF) || COMPILE_TEST
+source "drivers/thermal/qcom/Kconfig"
+endmenu
+
 endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 10b07c1..c92eb22 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -37,6 +37,8 @@
 obj-$(CONFIG_ARMADA_THERMAL)	+= armada_thermal.o
 obj-$(CONFIG_TANGO_THERMAL)	+= tango_thermal.o
 obj-$(CONFIG_IMX_THERMAL)	+= imx_thermal.o
+obj-$(CONFIG_MAX77620_THERMAL)	+= max77620_thermal.o
+obj-$(CONFIG_QORIQ_THERMAL)	+= qoriq_thermal.o
 obj-$(CONFIG_DB8500_CPUFREQ_COOLING)	+= db8500_cpufreq_cooling.o
 obj-$(CONFIG_INTEL_POWERCLAMP)	+= intel_powerclamp.o
 obj-$(CONFIG_X86_PKG_TEMP_THERMAL)	+= x86_pkg_temp_thermal.o
@@ -45,8 +47,10 @@
 obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL)	+= intel_quark_dts_thermal.o
 obj-$(CONFIG_TI_SOC_THERMAL)	+= ti-soc-thermal/
 obj-$(CONFIG_INT340X_THERMAL)  += int340x_thermal/
+obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
 obj-$(CONFIG_INTEL_PCH_THERMAL)	+= intel_pch_thermal.o
 obj-$(CONFIG_ST_THERMAL)	+= st/
+obj-$(CONFIG_QCOM_TSENS)	+= qcom/
 obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra/
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index a32b417..9ce0e9e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -74,7 +74,7 @@
  *	cpufreq frequencies.
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
  * @node: list_head to link all cpufreq_cooling_device together.
- * @last_load: load measured by the latest call to cpufreq_get_actual_power()
+ * @last_load: load measured by the latest call to cpufreq_get_requested_power()
  * @time_in_idle: previous reading of the absolute time that this cpu was idle
  * @time_in_idle_timestamp: wall time of the last invocation of
  *	get_cpu_idle_time_us()
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 652acd8..e776cea 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -306,7 +306,7 @@
 	if (cur_mode == THERMAL_DEVICE_DISABLED)
 		return;
 
-	thermal_zone_device_update(pzone->therm_dev);
+	thermal_zone_device_update(pzone->therm_dev, THERMAL_EVENT_UNSPECIFIED);
 	dev_dbg(&pzone->therm_dev->device, "thermal work finished.\n");
 }
 
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 01f0015..81631b1 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -312,7 +312,7 @@
 	unsigned long freq;
 	u32 static_power;
 
-	if (state < 0 || state >= dfc->freq_table_size)
+	if (state >= dfc->freq_table_size)
 		return -EINVAL;
 
 	freq = dfc->freq_table[state];
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index bb118a1..fc5e505 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -65,7 +65,7 @@
 		if (instance->target == 0 && tz->temperature >= trip_temp)
 			instance->target = 1;
 		else if (instance->target == 1 &&
-				tz->temperature < trip_temp - trip_hyst)
+				tz->temperature <= trip_temp - trip_hyst)
 			instance->target = 0;
 
 		dev_dbg(&instance->cdev->device, "target=%d\n",
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 97fad8f..f642966 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -237,7 +237,8 @@
 		if (!data->sensors[i].tzd)
 			continue;
 
-		thermal_zone_device_update(data->sensors[i].tzd);
+		thermal_zone_device_update(data->sensors[i].tzd,
+					   THERMAL_EVENT_UNSPECIFIED);
 	}
 
 	return IRQ_HANDLED;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index e473548..06912f0 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -246,7 +246,7 @@
 	}
 
 	data->mode = mode;
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return 0;
 }
@@ -457,7 +457,7 @@
 	dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n",
 		data->alarm_temp / 1000);
 
-	thermal_zone_device_update(data->tz);
+	thermal_zone_device_update(data->tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index 69df3d9..8e90b31 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -35,7 +35,8 @@
 	case INT3402_PERF_CHANGED_EVENT:
 		break;
 	case INT3402_THERMAL_EVENT:
-		int340x_thermal_zone_device_update(priv->int340x_zone);
+		int340x_thermal_zone_device_update(priv->int340x_zone,
+						   THERMAL_TRIP_VIOLATED);
 		break;
 	default:
 		break;
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 50a7a08..c4890c9 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -25,6 +25,7 @@
 #define INT3403_TYPE_CHARGER		0x0B
 #define INT3403_TYPE_BATTERY		0x0C
 #define INT3403_PERF_CHANGED_EVENT	0x80
+#define INT3403_PERF_TRIP_POINT_CHANGED	0x81
 #define INT3403_THERMAL_EVENT		0x90
 
 /* Preserved structure for future expandbility */
@@ -72,7 +73,13 @@
 	case INT3403_PERF_CHANGED_EVENT:
 		break;
 	case INT3403_THERMAL_EVENT:
-		int340x_thermal_zone_device_update(obj->int340x_zone);
+		int340x_thermal_zone_device_update(obj->int340x_zone,
+						   THERMAL_TRIP_VIOLATED);
+		break;
+	case INT3403_PERF_TRIP_POINT_CHANGED:
+		int340x_thermal_read_trips(obj->int340x_zone);
+		int340x_thermal_zone_device_update(obj->int340x_zone,
+						   THERMAL_TRIP_CHANGED);
 		break;
 	default:
 		dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index b9b2666..145a5c53 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -177,6 +177,42 @@
 	return 0;
 }
 
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+{
+	int trip_cnt = int34x_zone->aux_trip_nr;
+	int i;
+
+	int34x_zone->crt_trip_id = -1;
+	if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+					     &int34x_zone->crt_temp))
+		int34x_zone->crt_trip_id = trip_cnt++;
+
+	int34x_zone->hot_trip_id = -1;
+	if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_HOT",
+					     &int34x_zone->hot_temp))
+		int34x_zone->hot_trip_id = trip_cnt++;
+
+	int34x_zone->psv_trip_id = -1;
+	if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_PSV",
+					     &int34x_zone->psv_temp))
+		int34x_zone->psv_trip_id = trip_cnt++;
+
+	for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+		char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+
+		if (int340x_thermal_get_trip_config(int34x_zone->adev->handle,
+					name,
+					&int34x_zone->act_trips[i].temp))
+			break;
+
+		int34x_zone->act_trips[i].id = trip_cnt++;
+		int34x_zone->act_trips[i].valid = true;
+	}
+
+	return trip_cnt;
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+
 static struct thermal_zone_params int340x_thermal_params = {
 	.governor_name = "user_space",
 	.no_hwmon = true,
@@ -188,7 +224,7 @@
 	struct int34x_thermal_zone *int34x_thermal_zone;
 	acpi_status status;
 	unsigned long long trip_cnt;
-	int trip_mask = 0, i;
+	int trip_mask = 0;
 	int ret;
 
 	int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
@@ -214,28 +250,8 @@
 		int34x_thermal_zone->aux_trip_nr = trip_cnt;
 	}
 
-	int34x_thermal_zone->crt_trip_id = -1;
-	if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
-					     &int34x_thermal_zone->crt_temp))
-		int34x_thermal_zone->crt_trip_id = trip_cnt++;
-	int34x_thermal_zone->hot_trip_id = -1;
-	if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
-					     &int34x_thermal_zone->hot_temp))
-		int34x_thermal_zone->hot_trip_id = trip_cnt++;
-	int34x_thermal_zone->psv_trip_id = -1;
-	if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
-					     &int34x_thermal_zone->psv_temp))
-		int34x_thermal_zone->psv_trip_id = trip_cnt++;
-	for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
-		char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+	trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
 
-		if (int340x_thermal_get_trip_config(adev->handle, name,
-				&int34x_thermal_zone->act_trips[i].temp))
-			break;
-
-		int34x_thermal_zone->act_trips[i].id = trip_cnt++;
-		int34x_thermal_zone->act_trips[i].valid = true;
-	}
 	int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
 								adev->handle);
 
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
index aaadf72..5f3ba47 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -46,6 +46,7 @@
 struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
 				struct thermal_zone_device_ops *override_ops);
 void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone);
 
 static inline void int340x_thermal_zone_set_priv_data(
 			struct int34x_thermal_zone *tzone, void *priv_data)
@@ -60,9 +61,10 @@
 }
 
 static inline void int340x_thermal_zone_device_update(
-			struct int34x_thermal_zone *tzone)
+					struct int34x_thermal_zone *tzone,
+					enum thermal_notify_event event)
 {
-	thermal_zone_device_update(tzone->zone);
+	thermal_zone_device_update(tzone->zone, event);
 }
 
 #endif
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 42c1ac0..ff3b36f 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -258,7 +258,8 @@
 	switch (event) {
 	case PROC_POWER_CAPABILITY_CHANGED:
 		proc_thermal_read_ppcc(proc_priv);
-		int340x_thermal_zone_device_update(proc_priv->int340x_zone);
+		int340x_thermal_zone_device_update(proc_priv->int340x_zone,
+				THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
 		break;
 	default:
 		dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c
new file mode 100644
index 0000000..0f19a393
--- /dev/null
+++ b/drivers/thermal/intel_bxt_pmic_thermal.c
@@ -0,0 +1,300 @@
+/*
+ * Intel Broxton PMIC thermal driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_THRM0IRQ		0x4E04
+#define BXTWC_THRM1IRQ		0x4E05
+#define BXTWC_THRM2IRQ		0x4E06
+#define BXTWC_MTHRM0IRQ		0x4E12
+#define BXTWC_MTHRM1IRQ		0x4E13
+#define BXTWC_MTHRM2IRQ		0x4E14
+#define BXTWC_STHRM0IRQ		0x4F19
+#define BXTWC_STHRM1IRQ		0x4F1A
+#define BXTWC_STHRM2IRQ		0x4F1B
+
+struct trip_config_map {
+	u16 irq_reg;
+	u16 irq_en;
+	u16 evt_stat;
+	u8 irq_mask;
+	u8 irq_en_mask;
+	u8 evt_mask;
+	u8 trip_num;
+};
+
+struct thermal_irq_map {
+	char handle[20];
+	int num_trips;
+	const struct trip_config_map *trip_config;
+};
+
+struct pmic_thermal_data {
+	const struct thermal_irq_map *maps;
+	int num_maps;
+};
+
+static const struct trip_config_map bxtwc_str0_trip_config[] = {
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x01,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x01,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x01,
+		.trip_num = 0
+	},
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x10,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x10,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x10,
+		.trip_num = 1
+	}
+};
+
+static const struct trip_config_map bxtwc_str1_trip_config[] = {
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x02,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x02,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x02,
+		.trip_num = 0
+	},
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x20,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x20,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x20,
+		.trip_num = 1
+	},
+};
+
+static const struct trip_config_map bxtwc_str2_trip_config[] = {
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x04,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x04,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x04,
+		.trip_num = 0
+	},
+	{
+		.irq_reg = BXTWC_THRM0IRQ,
+		.irq_mask = 0x40,
+		.irq_en = BXTWC_MTHRM0IRQ,
+		.irq_en_mask = 0x40,
+		.evt_stat = BXTWC_STHRM0IRQ,
+		.evt_mask = 0x40,
+		.trip_num = 1
+	},
+};
+
+static const struct trip_config_map bxtwc_str3_trip_config[] = {
+	{
+		.irq_reg = BXTWC_THRM2IRQ,
+		.irq_mask = 0x10,
+		.irq_en = BXTWC_MTHRM2IRQ,
+		.irq_en_mask = 0x10,
+		.evt_stat = BXTWC_STHRM2IRQ,
+		.evt_mask = 0x10,
+		.trip_num = 0
+	},
+};
+
+static const struct thermal_irq_map bxtwc_thermal_irq_map[] = {
+	{
+		.handle = "STR0",
+		.trip_config = bxtwc_str0_trip_config,
+		.num_trips = ARRAY_SIZE(bxtwc_str0_trip_config),
+	},
+	{
+		.handle = "STR1",
+		.trip_config = bxtwc_str1_trip_config,
+		.num_trips = ARRAY_SIZE(bxtwc_str1_trip_config),
+	},
+	{
+		.handle = "STR2",
+		.trip_config = bxtwc_str2_trip_config,
+		.num_trips = ARRAY_SIZE(bxtwc_str2_trip_config),
+	},
+	{
+		.handle = "STR3",
+		.trip_config = bxtwc_str3_trip_config,
+		.num_trips = ARRAY_SIZE(bxtwc_str3_trip_config),
+	},
+};
+
+static const struct pmic_thermal_data bxtwc_thermal_data = {
+	.maps = bxtwc_thermal_irq_map,
+	.num_maps = ARRAY_SIZE(bxtwc_thermal_irq_map),
+};
+
+static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
+{
+	struct platform_device *pdev = data;
+	struct thermal_zone_device *tzd;
+	struct pmic_thermal_data *td;
+	struct intel_soc_pmic *pmic;
+	struct regmap *regmap;
+	u8 reg_val, mask, irq_stat, trip;
+	u16 reg, evt_stat_reg;
+	int i, j, ret;
+
+	pmic = dev_get_drvdata(pdev->dev.parent);
+	regmap = pmic->regmap;
+	td = (struct pmic_thermal_data *)
+		platform_get_device_id(pdev)->driver_data;
+
+	/* Resolve thermal irqs */
+	for (i = 0; i < td->num_maps; i++) {
+		for (j = 0; j < td->maps[i].num_trips; j++) {
+			reg = td->maps[i].trip_config[j].irq_reg;
+			mask = td->maps[i].trip_config[j].irq_mask;
+			/*
+			 * Read the irq register to resolve whether the
+			 * interrupt was triggered for this sensor
+			 */
+			if (regmap_read(regmap, reg, &ret))
+				return IRQ_HANDLED;
+
+			reg_val = (u8)ret;
+			irq_stat = ((u8)ret & mask);
+
+			if (!irq_stat)
+				continue;
+
+			/*
+			 * Read the status register to find out what
+			 * event occurred i.e a high or a low
+			 */
+			evt_stat_reg = td->maps[i].trip_config[j].evt_stat;
+			if (regmap_read(regmap, evt_stat_reg, &ret))
+				return IRQ_HANDLED;
+
+			trip = td->maps[i].trip_config[j].trip_num;
+			tzd = thermal_zone_get_zone_by_name(td->maps[i].handle);
+			if (!IS_ERR(tzd))
+				thermal_zone_device_update(tzd,
+						THERMAL_EVENT_UNSPECIFIED);
+
+			/* Clear the appropriate irq */
+			regmap_write(regmap, reg, reg_val & mask);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int pmic_thermal_probe(struct platform_device *pdev)
+{
+	struct regmap_irq_chip_data *regmap_irq_chip;
+	struct pmic_thermal_data *thermal_data;
+	int ret, irq, virq, i, j, pmic_irq_count;
+	struct intel_soc_pmic *pmic;
+	struct regmap *regmap;
+	struct device *dev;
+	u16 reg;
+	u8 mask;
+
+	dev = &pdev->dev;
+	pmic = dev_get_drvdata(pdev->dev.parent);
+	if (!pmic) {
+		dev_err(dev, "Failed to get struct intel_soc_pmic pointer\n");
+		return -ENODEV;
+	}
+
+	thermal_data = (struct pmic_thermal_data *)
+				platform_get_device_id(pdev)->driver_data;
+	if (!thermal_data) {
+		dev_err(dev, "No thermal data initialized!!\n");
+		return -ENODEV;
+	}
+
+	regmap = pmic->regmap;
+	regmap_irq_chip = pmic->irq_chip_data_level2;
+
+	pmic_irq_count = 0;
+	while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) {
+		virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+		if (virq < 0) {
+			dev_err(dev, "failed to get virq by irq %d\n", irq);
+			return virq;
+		}
+
+		ret = devm_request_threaded_irq(&pdev->dev, virq,
+				NULL, pmic_thermal_irq_handler,
+				IRQF_ONESHOT, "pmic_thermal", pdev);
+
+		if (ret) {
+			dev_err(dev, "request irq(%d) failed: %d\n", virq, ret);
+			return ret;
+		}
+		pmic_irq_count++;
+	}
+
+	/* Enable thermal interrupts */
+	for (i = 0; i < thermal_data->num_maps; i++) {
+		for (j = 0; j < thermal_data->maps[i].num_trips; j++) {
+			reg = thermal_data->maps[i].trip_config[j].irq_en;
+			mask = thermal_data->maps[i].trip_config[j].irq_en_mask;
+			ret = regmap_update_bits(regmap, reg, mask, 0x00);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id pmic_thermal_id_table[] = {
+	{
+		.name = "bxt_wcove_thermal",
+		.driver_data = (kernel_ulong_t)&bxtwc_thermal_data,
+	},
+	{},
+};
+
+static struct platform_driver pmic_thermal_driver = {
+	.probe = pmic_thermal_probe,
+	.driver = {
+		.name = "pmic_thermal",
+	},
+	.id_table = pmic_thermal_id_table,
+};
+
+MODULE_DEVICE_TABLE(platform, pmic_thermal_id_table);
+module_platform_driver(pmic_thermal_driver);
+
+MODULE_AUTHOR("Yegnesh S Iyer <yegnesh.s.iyer@intel.com>");
+MODULE_DESCRIPTION("Intel Broxton PMIC Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
index f72e1db..e0813df 100644
--- a/drivers/thermal/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -391,7 +391,8 @@
 
 		for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
 			pr_debug("TZD update for zone %d\n", i);
-			thermal_zone_device_update(sensors->soc_dts[i].tzone);
+			thermal_zone_device_update(sensors->soc_dts[i].tzone,
+						   THERMAL_EVENT_UNSPECIFIED);
 		}
 	} else
 		spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
new file mode 100644
index 0000000..83905ff
--- /dev/null
+++ b/drivers/thermal/max77620_thermal.c
@@ -0,0 +1,166 @@
+/*
+ * Junction temperature thermal driver for Maxim Max77620.
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *	   Mallikarjun Kasoju <mkasoju@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define MAX77620_NORMAL_OPERATING_TEMP	100000
+#define MAX77620_TJALARM1_TEMP		120000
+#define MAX77620_TJALARM2_TEMP		140000
+
+struct max77620_therm_info {
+	struct device			*dev;
+	struct regmap			*rmap;
+	struct thermal_zone_device	*tz_device;
+	int				irq_tjalarm1;
+	int				irq_tjalarm2;
+};
+
+/**
+ * max77620_thermal_read_temp: Read PMIC die temperatue.
+ * @data:	Device specific data.
+ * temp:	Temperature in millidegrees Celsius
+ *
+ * The actual temperature of PMIC die is not available from PMIC.
+ * PMIC only tells the status if it has crossed or not the threshold level
+ * of 120degC or 140degC.
+ * If threshold has not been crossed then assume die temperature as 100degC
+ * else 120degC or 140deG based on the PMIC die temp threshold status.
+ *
+ * Return 0 on success otherwise error number to show reason of failure.
+ */
+
+static int max77620_thermal_read_temp(void *data, int *temp)
+{
+	struct max77620_therm_info *mtherm = data;
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(mtherm->rmap, MAX77620_REG_STATLBT, &val);
+	if (ret < 0) {
+		dev_err(mtherm->dev, "Failed to read STATLBT: %d\n", ret);
+		return ret;
+	}
+
+	if (val & MAX77620_IRQ_TJALRM2_MASK)
+		*temp = MAX77620_TJALARM2_TEMP;
+	else if (val & MAX77620_IRQ_TJALRM1_MASK)
+		*temp = MAX77620_TJALARM1_TEMP;
+	else
+		*temp = MAX77620_NORMAL_OPERATING_TEMP;
+
+	return 0;
+}
+
+static const struct thermal_zone_of_device_ops max77620_thermal_ops = {
+	.get_temp = max77620_thermal_read_temp,
+};
+
+static irqreturn_t max77620_thermal_irq(int irq, void *data)
+{
+	struct max77620_therm_info *mtherm = data;
+
+	if (irq == mtherm->irq_tjalarm1)
+		dev_warn(mtherm->dev, "Junction Temp Alarm1(120C) occurred\n");
+	else if (irq == mtherm->irq_tjalarm2)
+		dev_crit(mtherm->dev, "Junction Temp Alarm2(140C) occurred\n");
+
+	thermal_zone_device_update(mtherm->tz_device,
+				   THERMAL_EVENT_UNSPECIFIED);
+
+	return IRQ_HANDLED;
+}
+
+static int max77620_thermal_probe(struct platform_device *pdev)
+{
+	struct max77620_therm_info *mtherm;
+	int ret;
+
+	mtherm = devm_kzalloc(&pdev->dev, sizeof(*mtherm), GFP_KERNEL);
+	if (!mtherm)
+		return -ENOMEM;
+
+	mtherm->irq_tjalarm1 = platform_get_irq(pdev, 0);
+	mtherm->irq_tjalarm2 = platform_get_irq(pdev, 1);
+	if ((mtherm->irq_tjalarm1 < 0) || (mtherm->irq_tjalarm2 < 0)) {
+		dev_err(&pdev->dev, "Alarm irq number not available\n");
+		return -EINVAL;
+	}
+
+	pdev->dev.of_node = pdev->dev.parent->of_node;
+
+	mtherm->dev = &pdev->dev;
+	mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!mtherm->rmap) {
+		dev_err(&pdev->dev, "Failed to get parent regmap\n");
+		return -ENODEV;
+	}
+
+	mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+				mtherm, &max77620_thermal_ops);
+	if (IS_ERR(mtherm->tz_device)) {
+		ret = PTR_ERR(mtherm->tz_device);
+		dev_err(&pdev->dev, "Failed to register thermal zone: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm1, NULL,
+					max77620_thermal_irq,
+					IRQF_ONESHOT | IRQF_SHARED,
+					dev_name(&pdev->dev), mtherm);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request irq1: %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm2, NULL,
+					max77620_thermal_irq,
+					IRQF_ONESHOT | IRQF_SHARED,
+					dev_name(&pdev->dev), mtherm);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request irq2: %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mtherm);
+
+	return 0;
+}
+
+static struct platform_device_id max77620_thermal_devtype[] = {
+	{ .name = "max77620-thermal", },
+	{},
+};
+
+static struct platform_driver max77620_thermal_driver = {
+	.driver = {
+		.name = "max77620-thermal",
+	},
+	.probe = max77620_thermal_probe,
+	.id_table = max77620_thermal_devtype,
+};
+
+module_platform_driver(max77620_thermal_driver);
+
+MODULE_DESCRIPTION("Max77620 Junction temperature Thermal driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 262ab0a..34169c3 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -2,6 +2,7 @@
  * Copyright (c) 2015 MediaTek Inc.
  * Author: Hanyi Wu <hanyi.wu@mediatek.com>
  *         Sascha Hauer <s.hauer@pengutronix.de>
+ *         Dawei Chien <dawei.chien@mediatek.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -21,6 +22,7 @@
 #include <linux/nvmem-consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/io.h>
@@ -88,6 +90,7 @@
 #define TEMP_ADCVALIDMASK_VALID_HIGH		BIT(5)
 #define TEMP_ADCVALIDMASK_VALID_POS(bit)	(bit)
 
+/* MT8173 thermal sensors */
 #define MT8173_TS1	0
 #define MT8173_TS2	1
 #define MT8173_TS3	2
@@ -106,7 +109,12 @@
 /* The number of sensing points per bank */
 #define MT8173_NUM_SENSORS_PER_ZONE	4
 
-/* Layout of the fuses providing the calibration data */
+/*
+ * Layout of the fuses providing the calibration data
+ * These macros could be used for both MT8173 and MT2701.
+ * MT8173 has five sensors and need five VTS calibration data,
+ * and MT2701 has three sensors and need three VTS calibration data.
+ */
 #define MT8173_CALIB_BUF0_VALID		BIT(0)
 #define MT8173_CALIB_BUF1_ADC_GE(x)	(((x) >> 22) & 0x3ff)
 #define MT8173_CALIB_BUF0_VTS_TS1(x)	(((x) >> 17) & 0x1ff)
@@ -117,24 +125,50 @@
 #define MT8173_CALIB_BUF0_DEGC_CALI(x)	(((x) >> 1) & 0x3f)
 #define MT8173_CALIB_BUF0_O_SLOPE(x)	(((x) >> 26) & 0x3f)
 
+/* MT2701 thermal sensors */
+#define MT2701_TS1	0
+#define MT2701_TS2	1
+#define MT2701_TSABB	2
+
+/* AUXADC channel 11 is used for the temperature sensors */
+#define MT2701_TEMP_AUXADC_CHANNEL	11
+
+/* The total number of temperature sensors in the MT2701 */
+#define MT2701_NUM_SENSORS	3
+
 #define THERMAL_NAME    "mtk-thermal"
 
+/* The number of sensing points per bank */
+#define MT2701_NUM_SENSORS_PER_ZONE	3
+
 struct mtk_thermal;
 
+struct thermal_bank_cfg {
+	unsigned int num_sensors;
+	const int *sensors;
+};
+
 struct mtk_thermal_bank {
 	struct mtk_thermal *mt;
 	int id;
 };
 
+struct mtk_thermal_data {
+	s32 num_banks;
+	s32 num_sensors;
+	s32 auxadc_channel;
+	const int *sensor_mux_values;
+	const int *msr;
+	const int *adcpnp;
+	struct thermal_bank_cfg bank_data[];
+};
+
 struct mtk_thermal {
 	struct device *dev;
 	void __iomem *thermal_base;
 
 	struct clk *clk_peri_therm;
 	struct clk *clk_auxadc;
-
-	struct mtk_thermal_bank banks[MT8173_NUM_ZONES];
-
 	/* lock: for getting and putting banks */
 	struct mutex lock;
 
@@ -144,16 +178,44 @@
 	s32 o_slope;
 	s32 vts[MT8173_NUM_SENSORS];
 
+	const struct mtk_thermal_data *conf;
+	struct mtk_thermal_bank banks[];
 };
 
-struct mtk_thermal_bank_cfg {
-	unsigned int num_sensors;
-	unsigned int sensors[MT8173_NUM_SENSORS_PER_ZONE];
+/* MT8173 thermal sensor data */
+const int mt8173_bank_data[MT8173_NUM_ZONES][3] = {
+	{ MT8173_TS2, MT8173_TS3 },
+	{ MT8173_TS2, MT8173_TS4 },
+	{ MT8173_TS1, MT8173_TS2, MT8173_TSABB },
+	{ MT8173_TS2 },
 };
 
-static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = {
+	TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2
+};
 
-/*
+const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = {
+	TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3
+};
+
+const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+
+/* MT2701 thermal sensor data */
+const int mt2701_bank_data[MT2701_NUM_SENSORS] = {
+	MT2701_TS1, MT2701_TS2, MT2701_TSABB
+};
+
+const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = {
+	TEMP_MSR0, TEMP_MSR1, TEMP_MSR2
+};
+
+const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = {
+	TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2
+};
+
+const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 };
+
+/**
  * The MT8173 thermal controller has four banks. Each bank can read up to
  * four temperature sensors simultaneously. The MT8173 has a total of 5
  * temperature sensors. We use each bank to measure a certain area of the
@@ -166,42 +228,53 @@
  * data, and this indeed needs the temperatures of the individual banks
  * for making better decisions.
  */
-static const struct mtk_thermal_bank_cfg bank_data[] = {
-	{
-		.num_sensors = 2,
-		.sensors = { MT8173_TS2, MT8173_TS3 },
-	}, {
-		.num_sensors = 2,
-		.sensors = { MT8173_TS2, MT8173_TS4 },
-	}, {
-		.num_sensors = 3,
-		.sensors = { MT8173_TS1, MT8173_TS2, MT8173_TSABB },
-	}, {
-		.num_sensors = 1,
-		.sensors = { MT8173_TS2 },
+static const struct mtk_thermal_data mt8173_thermal_data = {
+	.auxadc_channel = MT8173_TEMP_AUXADC_CHANNEL,
+	.num_banks = MT8173_NUM_ZONES,
+	.num_sensors = MT8173_NUM_SENSORS,
+	.bank_data = {
+		{
+			.num_sensors = 2,
+			.sensors = mt8173_bank_data[0],
+		}, {
+			.num_sensors = 2,
+			.sensors = mt8173_bank_data[1],
+		}, {
+			.num_sensors = 3,
+			.sensors = mt8173_bank_data[2],
+		}, {
+			.num_sensors = 1,
+			.sensors = mt8173_bank_data[3],
+		},
 	},
+	.msr = mt8173_msr,
+	.adcpnp = mt8173_adcpnp,
+	.sensor_mux_values = mt8173_mux_values,
 };
 
-struct mtk_thermal_sense_point {
-	int msr;
-	int adcpnp;
-};
-
-static const struct mtk_thermal_sense_point
-		sensing_points[MT8173_NUM_SENSORS_PER_ZONE] = {
-	{
-		.msr = TEMP_MSR0,
-		.adcpnp = TEMP_ADCPNP0,
-	}, {
-		.msr = TEMP_MSR1,
-		.adcpnp = TEMP_ADCPNP1,
-	}, {
-		.msr = TEMP_MSR2,
-		.adcpnp = TEMP_ADCPNP2,
-	}, {
-		.msr = TEMP_MSR3,
-		.adcpnp = TEMP_ADCPNP3,
+/**
+ * The MT2701 thermal controller has one bank, which can read up to
+ * three temperature sensors simultaneously. The MT2701 has a total of 3
+ * temperature sensors.
+ *
+ * The thermal core only gets the maximum temperature of this one bank,
+ * so the bank concept wouldn't be necessary here. However, the SVS (Smart
+ * Voltage Scaling) unit makes its decisions based on the same bank
+ * data.
+ */
+static const struct mtk_thermal_data mt2701_thermal_data = {
+	.auxadc_channel = MT2701_TEMP_AUXADC_CHANNEL,
+	.num_banks = 1,
+	.num_sensors = MT2701_NUM_SENSORS,
+	.bank_data = {
+		{
+			.num_sensors = 3,
+			.sensors = mt2701_bank_data,
+		},
 	},
+	.msr = mt2701_msr,
+	.adcpnp = mt2701_adcpnp,
+	.sensor_mux_values = mt2701_mux_values,
 };
 
 /**
@@ -270,13 +343,16 @@
 static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
 {
 	struct mtk_thermal *mt = bank->mt;
+	const struct mtk_thermal_data *conf = mt->conf;
 	int i, temp = INT_MIN, max = INT_MIN;
 	u32 raw;
 
-	for (i = 0; i < bank_data[bank->id].num_sensors; i++) {
-		raw = readl(mt->thermal_base + sensing_points[i].msr);
+	for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
+		raw = readl(mt->thermal_base + conf->msr[i]);
 
-		temp = raw_to_mcelsius(mt, bank_data[bank->id].sensors[i], raw);
+		temp = raw_to_mcelsius(mt,
+				       conf->bank_data[bank->id].sensors[i],
+				       raw);
 
 		/*
 		 * The first read of a sensor often contains very high bogus
@@ -299,7 +375,7 @@
 	int i;
 	int tempmax = INT_MIN;
 
-	for (i = 0; i < MT8173_NUM_ZONES; i++) {
+	for (i = 0; i < mt->conf->num_banks; i++) {
 		struct mtk_thermal_bank *bank = &mt->banks[i];
 
 		mtk_thermal_get_bank(bank);
@@ -322,7 +398,7 @@
 				  u32 apmixed_phys_base, u32 auxadc_phys_base)
 {
 	struct mtk_thermal_bank *bank = &mt->banks[num];
-	const struct mtk_thermal_bank_cfg *cfg = &bank_data[num];
+	const struct mtk_thermal_data *conf = mt->conf;
 	int i;
 
 	bank->id = num;
@@ -368,7 +444,7 @@
 	 * this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0)
 	 * automatically by hw
 	 */
-	writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCMUX);
+	writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCMUX);
 
 	/* AHB address for auxadc mux selection */
 	writel(auxadc_phys_base + AUXADC_CON1_CLR_V,
@@ -379,18 +455,18 @@
 	       mt->thermal_base + TEMP_PNPMUXADDR);
 
 	/* AHB value for auxadc enable */
-	writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCEN);
+	writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCEN);
 
 	/* AHB address for auxadc enable (channel 0 immediate mode selected) */
 	writel(auxadc_phys_base + AUXADC_CON1_SET_V,
 	       mt->thermal_base + TEMP_ADCENADDR);
 
 	/* AHB address for auxadc valid bit */
-	writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+	writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
 	       mt->thermal_base + TEMP_ADCVALIDADDR);
 
 	/* AHB address for auxadc voltage output */
-	writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+	writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
 	       mt->thermal_base + TEMP_ADCVOLTADDR);
 
 	/* read valid & voltage are at the same register */
@@ -407,11 +483,12 @@
 	writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
 	       mt->thermal_base + TEMP_ADCWRITECTRL);
 
-	for (i = 0; i < cfg->num_sensors; i++)
-		writel(sensor_mux_values[cfg->sensors[i]],
-		       mt->thermal_base + sensing_points[i].adcpnp);
+	for (i = 0; i < conf->bank_data[num].num_sensors; i++)
+		writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
+		       mt->thermal_base + conf->adcpnp[i]);
 
-	writel((1 << cfg->num_sensors) - 1, mt->thermal_base + TEMP_MONCTL0);
+	writel((1 << conf->bank_data[num].num_sensors) - 1,
+	       mt->thermal_base + TEMP_MONCTL0);
 
 	writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE |
 	       TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
@@ -442,7 +519,7 @@
 
 	/* Start with default values */
 	mt->adc_ge = 512;
-	for (i = 0; i < MT8173_NUM_SENSORS; i++)
+	for (i = 0; i < mt->conf->num_sensors; i++)
 		mt->vts[i] = 260;
 	mt->degc_cali = 40;
 	mt->o_slope = 0;
@@ -486,18 +563,37 @@
 	return ret;
 }
 
+static const struct of_device_id mtk_thermal_of_match[] = {
+	{
+		.compatible = "mediatek,mt8173-thermal",
+		.data = (void *)&mt8173_thermal_data,
+	},
+	{
+		.compatible = "mediatek,mt2701-thermal",
+		.data = (void *)&mt2701_thermal_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, mtk_thermal_of_match);
+
 static int mtk_thermal_probe(struct platform_device *pdev)
 {
 	int ret, i;
 	struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
 	struct mtk_thermal *mt;
 	struct resource *res;
+	const struct of_device_id *of_id;
 	u64 auxadc_phys_base, apmixed_phys_base;
+	struct thermal_zone_device *tzdev;
 
 	mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL);
 	if (!mt)
 		return -ENOMEM;
 
+	of_id = of_match_device(mtk_thermal_of_match, &pdev->dev);
+	if (of_id)
+		mt->conf = (const struct mtk_thermal_data *)of_id->data;
+
 	mt->clk_peri_therm = devm_clk_get(&pdev->dev, "therm");
 	if (IS_ERR(mt->clk_peri_therm))
 		return PTR_ERR(mt->clk_peri_therm);
@@ -565,17 +661,23 @@
 		goto err_disable_clk_auxadc;
 	}
 
-	for (i = 0; i < MT8173_NUM_ZONES; i++)
+	for (i = 0; i < mt->conf->num_banks; i++)
 		mtk_thermal_init_bank(mt, i, apmixed_phys_base,
 				      auxadc_phys_base);
 
 	platform_set_drvdata(pdev, mt);
 
-	devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
-					     &mtk_thermal_ops);
+	tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+						     &mtk_thermal_ops);
+	if (IS_ERR(tzdev)) {
+		ret = PTR_ERR(tzdev);
+		goto err_disable_clk_peri_therm;
+	}
 
 	return 0;
 
+err_disable_clk_peri_therm:
+	clk_disable_unprepare(mt->clk_peri_therm);
 err_disable_clk_auxadc:
 	clk_disable_unprepare(mt->clk_auxadc);
 
@@ -592,13 +694,6 @@
 	return 0;
 }
 
-static const struct of_device_id mtk_thermal_of_match[] = {
-	{
-		.compatible = "mediatek,mt8173-thermal",
-	}, {
-	},
-};
-
 static struct platform_driver mtk_thermal_driver = {
 	.probe = mtk_thermal_probe,
 	.remove = mtk_thermal_remove,
@@ -610,6 +705,7 @@
 
 module_platform_driver(mtk_thermal_driver);
 
+MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>");
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
 MODULE_DESCRIPTION("Mediatek thermal driver");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b8e509c..d04ec3b 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -101,6 +101,17 @@
 	return data->ops->get_temp(data->sensor_data, temp);
 }
 
+static int of_thermal_set_trips(struct thermal_zone_device *tz,
+				int low, int high)
+{
+	struct __thermal_zone *data = tz->devdata;
+
+	if (!data->ops || !data->ops->set_trips)
+		return -EINVAL;
+
+	return data->ops->set_trips(data->sensor_data, low, high);
+}
+
 /**
  * of_thermal_get_ntrips - function to export number of available trip
  *			   points.
@@ -181,9 +192,6 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	if (!data->ops || !data->ops->set_emul_temp)
-		return -EINVAL;
-
 	return data->ops->set_emul_temp(data->sensor_data, temp);
 }
 
@@ -191,25 +199,11 @@
 				enum thermal_trend *trend)
 {
 	struct __thermal_zone *data = tz->devdata;
-	long dev_trend;
-	int r;
 
 	if (!data->ops->get_trend)
 		return -EINVAL;
 
-	r = data->ops->get_trend(data->sensor_data, &dev_trend);
-	if (r)
-		return r;
-
-	/* TODO: These intervals might have some thresholds, but in core code */
-	if (dev_trend > 0)
-		*trend = THERMAL_TREND_RAISING;
-	else if (dev_trend < 0)
-		*trend = THERMAL_TREND_DROPPING;
-	else
-		*trend = THERMAL_TREND_STABLE;
-
-	return 0;
+	return data->ops->get_trend(data->sensor_data, trip, trend);
 }
 
 static int of_thermal_bind(struct thermal_zone_device *thermal,
@@ -292,7 +286,7 @@
 	mutex_unlock(&tz->lock);
 
 	data->mode = mode;
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return 0;
 }
@@ -427,7 +421,17 @@
 
 	tzd->ops->get_temp = of_thermal_get_temp;
 	tzd->ops->get_trend = of_thermal_get_trend;
-	tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+
+	/*
+	 * The thermal zone core will calculate the window if they have set the
+	 * optional set_trips pointer.
+	 */
+	if (ops->set_trips)
+		tzd->ops->set_trips = of_thermal_set_trips;
+
+	if (ops->set_emul_temp)
+		tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+
 	mutex_unlock(&tzd->lock);
 
 	return tzd;
@@ -596,7 +600,7 @@
  * Return: On success returns a valid struct thermal_zone_device,
  * otherwise, it returns a corresponding ERR_PTR(). Caller must
  * check the return value with help of IS_ERR() helper.
- * Registered hermal_zone_device device will automatically be
+ * Registered thermal_zone_device device will automatically be
  * released when device is unbounded.
  */
 struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index f8a3c60..819c6d5 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -150,7 +150,7 @@
 {
 	struct qpnp_tm_chip *chip = data;
 
-	thermal_zone_device_update(chip->tz_dev);
+	thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
new file mode 100644
index 0000000..be32e5a
--- /dev/null
+++ b/drivers/thermal/qcom/Kconfig
@@ -0,0 +1,11 @@
+config QCOM_TSENS
+	tristate "Qualcomm TSENS Temperature Alarm"
+	depends on THERMAL
+	depends on QCOM_QFPROM
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  This enables the thermal sysfs driver for the TSENS device. It shows
+	  up in Sysfs as a thermal zone with multiple trip points. Disabling the
+	  thermal zone device via the mode file results in disabling the sensor.
+	  Also able to set threshold temperature for both hot and cold and update
+	  when a threshold is reached.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
new file mode 100644
index 0000000..2cc2193
--- /dev/null
+++ b/drivers/thermal/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QCOM_TSENS)	+= qcom_tsens.o
+qcom_tsens-y			+= tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
new file mode 100644
index 0000000..fdf561b
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8916 */
+#define BASE0_MASK	0x0000007f
+#define BASE1_MASK	0xfe000000
+#define BASE0_SHIFT	0
+#define BASE1_SHIFT	25
+
+#define S0_P1_MASK	0x00000f80
+#define S1_P1_MASK	0x003e0000
+#define S2_P1_MASK	0xf8000000
+#define S3_P1_MASK	0x000003e0
+#define S4_P1_MASK	0x000f8000
+
+#define S0_P2_MASK	0x0001f000
+#define S1_P2_MASK	0x07c00000
+#define S2_P2_MASK	0x0000001f
+#define S3_P2_MASK	0x00007c00
+#define S4_P2_MASK	0x01f00000
+
+#define S0_P1_SHIFT	7
+#define S1_P1_SHIFT	17
+#define S2_P1_SHIFT	27
+#define S3_P1_SHIFT	5
+#define S4_P1_SHIFT	15
+
+#define S0_P2_SHIFT	12
+#define S1_P2_SHIFT	22
+#define S2_P2_SHIFT	0
+#define S3_P2_SHIFT	10
+#define S4_P2_SHIFT	20
+
+#define CAL_SEL_MASK	0xe0000000
+#define CAL_SEL_SHIFT	29
+
+static int calibrate_8916(struct tsens_device *tmdev)
+{
+	int base0 = 0, base1 = 0, i;
+	u32 p1[5], p2[5];
+	int mode = 0;
+	u32 *qfprom_cdata, *qfprom_csel;
+
+	qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
+	if (IS_ERR(qfprom_cdata))
+		return PTR_ERR(qfprom_cdata);
+
+	qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
+	if (IS_ERR(qfprom_csel))
+		return PTR_ERR(qfprom_csel);
+
+	mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+	dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
+
+	switch (mode) {
+	case TWO_PT_CALIB:
+		base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
+		p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+		p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+		p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
+		p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+		p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+		for (i = 0; i < tmdev->num_sensors; i++)
+			p2[i] = ((base1 + p2[i]) << 3);
+		/* Fall through */
+	case ONE_PT_CALIB2:
+		base0 = (qfprom_cdata[0] & BASE0_MASK);
+		p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+		p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+		p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+		p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+		p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+		for (i = 0; i < tmdev->num_sensors; i++)
+			p1[i] = (((base0) + p1[i]) << 3);
+		break;
+	default:
+		for (i = 0; i < tmdev->num_sensors; i++) {
+			p1[i] = 500;
+			p2[i] = 780;
+		}
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+
+	return 0;
+}
+
+static const struct tsens_ops ops_8916 = {
+	.init		= init_common,
+	.calibrate	= calibrate_8916,
+	.get_temp	= get_temp_common,
+};
+
+const struct tsens_data data_8916 = {
+	.num_sensors	= 5,
+	.ops		= &ops_8916,
+	.hw_ids		= (unsigned int []){0, 1, 2, 4, 5 },
+};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
new file mode 100644
index 0000000..0451277
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+#define CAL_MDEGC		30000
+
+#define CONFIG_ADDR		0x3640
+#define CONFIG_ADDR_8660	0x3620
+/* CONFIG_ADDR bitmasks */
+#define CONFIG			0x9b
+#define CONFIG_MASK		0xf
+#define CONFIG_8660		1
+#define CONFIG_SHIFT_8660	28
+#define CONFIG_MASK_8660	(3 << CONFIG_SHIFT_8660)
+
+#define STATUS_CNTL_ADDR_8064	0x3660
+#define CNTL_ADDR		0x3620
+/* CNTL_ADDR bitmasks */
+#define EN			BIT(0)
+#define SW_RST			BIT(1)
+#define SENSOR0_EN		BIT(3)
+#define SLP_CLK_ENA		BIT(26)
+#define SLP_CLK_ENA_8660	BIT(24)
+#define MEASURE_PERIOD		1
+#define SENSOR0_SHIFT		3
+
+/* INT_STATUS_ADDR bitmasks */
+#define MIN_STATUS_MASK		BIT(0)
+#define LOWER_STATUS_CLR	BIT(1)
+#define UPPER_STATUS_CLR	BIT(2)
+#define MAX_STATUS_MASK		BIT(3)
+
+#define THRESHOLD_ADDR		0x3624
+/* THRESHOLD_ADDR bitmasks */
+#define THRESHOLD_MAX_LIMIT_SHIFT	24
+#define THRESHOLD_MIN_LIMIT_SHIFT	16
+#define THRESHOLD_UPPER_LIMIT_SHIFT	8
+#define THRESHOLD_LOWER_LIMIT_SHIFT	0
+
+/* Initial temperature threshold values */
+#define LOWER_LIMIT_TH		0x50
+#define UPPER_LIMIT_TH		0xdf
+#define MIN_LIMIT_TH		0x0
+#define MAX_LIMIT_TH		0xff
+
+#define S0_STATUS_ADDR		0x3628
+#define INT_STATUS_ADDR		0x363c
+#define TRDY_MASK		BIT(7)
+#define TIMEOUT_US		100
+
+static int suspend_8960(struct tsens_device *tmdev)
+{
+	int ret;
+	unsigned int mask;
+	struct regmap *map = tmdev->map;
+
+	ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+	if (ret)
+		return ret;
+
+	if (tmdev->num_sensors > 1)
+		mask = SLP_CLK_ENA | EN;
+	else
+		mask = SLP_CLK_ENA_8660 | EN;
+
+	ret = regmap_update_bits(map, CNTL_ADDR, mask, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int resume_8960(struct tsens_device *tmdev)
+{
+	int ret;
+	struct regmap *map = tmdev->map;
+
+	ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
+	if (ret)
+		return ret;
+
+	/*
+	 * Separate CONFIG restore is not needed only for 8660 as
+	 * config is part of CTRL Addr and its restored as such
+	 */
+	if (tmdev->num_sensors > 1) {
+		ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
+		if (ret)
+			return ret;
+	}
+
+	ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+	if (ret)
+		return ret;
+
+	ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int enable_8960(struct tsens_device *tmdev, int id)
+{
+	int ret;
+	u32 reg, mask;
+
+	ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+	if (ret)
+		return ret;
+
+	mask = BIT(id + SENSOR0_SHIFT);
+	ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+	if (ret)
+		return ret;
+
+	if (tmdev->num_sensors > 1)
+		reg |= mask | SLP_CLK_ENA | EN;
+	else
+		reg |= mask | SLP_CLK_ENA_8660 | EN;
+
+	ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void disable_8960(struct tsens_device *tmdev)
+{
+	int ret;
+	u32 reg_cntl;
+	u32 mask;
+
+	mask = GENMASK(tmdev->num_sensors - 1, 0);
+	mask <<= SENSOR0_SHIFT;
+	mask |= EN;
+
+	ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+	if (ret)
+		return;
+
+	reg_cntl &= ~mask;
+
+	if (tmdev->num_sensors > 1)
+		reg_cntl &= ~SLP_CLK_ENA;
+	else
+		reg_cntl &= ~SLP_CLK_ENA_8660;
+
+	regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+}
+
+static int init_8960(struct tsens_device *tmdev)
+{
+	int ret, i;
+	u32 reg_cntl;
+
+	tmdev->map = dev_get_regmap(tmdev->dev, NULL);
+	if (!tmdev->map)
+		return -ENODEV;
+
+	/*
+	 * The status registers for each sensor are discontiguous
+	 * because some SoCs have 5 sensors while others have more
+	 * but the control registers stay in the same place, i.e
+	 * directly after the first 5 status registers.
+	 */
+	for (i = 0; i < tmdev->num_sensors; i++) {
+		if (i >= 5)
+			tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
+		tmdev->sensor[i].status += i * 4;
+	}
+
+	reg_cntl = SW_RST;
+	ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+	if (ret)
+		return ret;
+
+	if (tmdev->num_sensors > 1) {
+		reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
+		reg_cntl &= ~SW_RST;
+		ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+					 CONFIG_MASK, CONFIG);
+	} else {
+		reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
+		reg_cntl &= ~CONFIG_MASK_8660;
+		reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
+	}
+
+	reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
+	ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+	if (ret)
+		return ret;
+
+	reg_cntl |= EN;
+	ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int calibrate_8960(struct tsens_device *tmdev)
+{
+	int i;
+	char *data;
+
+	ssize_t num_read = tmdev->num_sensors;
+	struct tsens_sensor *s = tmdev->sensor;
+
+	data = qfprom_read(tmdev->dev, "calib");
+	if (IS_ERR(data))
+		data = qfprom_read(tmdev->dev, "calib_backup");
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	for (i = 0; i < num_read; i++, s++)
+		s->offset = data[i];
+
+	return 0;
+}
+
+/* Temperature on y axis and ADC-code on x-axis */
+static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
+{
+	int slope, offset;
+
+	slope = thermal_zone_get_slope(s->tzd);
+	offset = CAL_MDEGC - slope * s->offset;
+
+	return adc_code * slope + offset;
+}
+
+static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+{
+	int ret;
+	u32 code, trdy;
+	const struct tsens_sensor *s = &tmdev->sensor[id];
+	unsigned long timeout;
+
+	timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+	do {
+		ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+		if (ret)
+			return ret;
+		if (!(trdy & TRDY_MASK))
+			continue;
+		ret = regmap_read(tmdev->map, s->status, &code);
+		if (ret)
+			return ret;
+		*temp = code_to_mdegC(code, s);
+		return 0;
+	} while (time_before(jiffies, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static const struct tsens_ops ops_8960 = {
+	.init		= init_8960,
+	.calibrate	= calibrate_8960,
+	.get_temp	= get_temp_8960,
+	.enable		= enable_8960,
+	.disable	= disable_8960,
+	.suspend	= suspend_8960,
+	.resume		= resume_8960,
+};
+
+const struct tsens_data data_8960 = {
+	.num_sensors	= 11,
+	.ops		= &ops_8960,
+};
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
new file mode 100644
index 0000000..9baf77e
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8974 */
+#define BASE1_MASK		0xff
+#define S0_P1_MASK		0x3f00
+#define S1_P1_MASK		0xfc000
+#define S2_P1_MASK		0x3f00000
+#define S3_P1_MASK		0xfc000000
+#define S4_P1_MASK		0x3f
+#define S5_P1_MASK		0xfc0
+#define S6_P1_MASK		0x3f000
+#define S7_P1_MASK		0xfc0000
+#define S8_P1_MASK		0x3f000000
+#define S8_P1_MASK_BKP		0x3f
+#define S9_P1_MASK		0x3f
+#define S9_P1_MASK_BKP		0xfc0
+#define S10_P1_MASK		0xfc0
+#define S10_P1_MASK_BKP		0x3f000
+#define CAL_SEL_0_1		0xc0000000
+#define CAL_SEL_2		0x40000000
+#define CAL_SEL_SHIFT		30
+#define CAL_SEL_SHIFT_2		28
+
+#define S0_P1_SHIFT		8
+#define S1_P1_SHIFT		14
+#define S2_P1_SHIFT		20
+#define S3_P1_SHIFT		26
+#define S5_P1_SHIFT		6
+#define S6_P1_SHIFT		12
+#define S7_P1_SHIFT		18
+#define S8_P1_SHIFT		24
+#define S9_P1_BKP_SHIFT		6
+#define S10_P1_SHIFT		6
+#define S10_P1_BKP_SHIFT	12
+
+#define BASE2_SHIFT		12
+#define BASE2_BKP_SHIFT		18
+#define S0_P2_SHIFT		20
+#define S0_P2_BKP_SHIFT		26
+#define S1_P2_SHIFT		26
+#define S2_P2_BKP_SHIFT		6
+#define S3_P2_SHIFT		6
+#define S3_P2_BKP_SHIFT		12
+#define S4_P2_SHIFT		12
+#define S4_P2_BKP_SHIFT		18
+#define S5_P2_SHIFT		18
+#define S5_P2_BKP_SHIFT		24
+#define S6_P2_SHIFT		24
+#define S7_P2_BKP_SHIFT		6
+#define S8_P2_SHIFT		6
+#define S8_P2_BKP_SHIFT		12
+#define S9_P2_SHIFT		12
+#define S9_P2_BKP_SHIFT		18
+#define S10_P2_SHIFT		18
+#define S10_P2_BKP_SHIFT	24
+
+#define BASE2_MASK		0xff000
+#define BASE2_BKP_MASK		0xfc0000
+#define S0_P2_MASK		0x3f00000
+#define S0_P2_BKP_MASK		0xfc000000
+#define S1_P2_MASK		0xfc000000
+#define S1_P2_BKP_MASK		0x3f
+#define S2_P2_MASK		0x3f
+#define S2_P2_BKP_MASK		0xfc0
+#define S3_P2_MASK		0xfc0
+#define S3_P2_BKP_MASK		0x3f000
+#define S4_P2_MASK		0x3f000
+#define S4_P2_BKP_MASK		0xfc0000
+#define S5_P2_MASK		0xfc0000
+#define S5_P2_BKP_MASK		0x3f000000
+#define S6_P2_MASK		0x3f000000
+#define S6_P2_BKP_MASK		0x3f
+#define S7_P2_MASK		0x3f
+#define S7_P2_BKP_MASK		0xfc0
+#define S8_P2_MASK		0xfc0
+#define S8_P2_BKP_MASK		0x3f000
+#define S9_P2_MASK		0x3f000
+#define S9_P2_BKP_MASK		0xfc0000
+#define S10_P2_MASK		0xfc0000
+#define S10_P2_BKP_MASK		0x3f000000
+
+#define BKP_SEL			0x3
+#define BKP_REDUN_SEL		0xe0000000
+#define BKP_REDUN_SHIFT		29
+
+#define BIT_APPEND		0x3
+
+static int calibrate_8974(struct tsens_device *tmdev)
+{
+	int base1 = 0, base2 = 0, i;
+	u32 p1[11], p2[11];
+	int mode = 0;
+	u32 *calib, *bkp;
+	u32 calib_redun_sel;
+
+	calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+	if (IS_ERR(calib))
+		return PTR_ERR(calib);
+
+	bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+	if (IS_ERR(bkp))
+		return PTR_ERR(bkp);
+
+	calib_redun_sel =  bkp[1] & BKP_REDUN_SEL;
+	calib_redun_sel >>= BKP_REDUN_SHIFT;
+
+	if (calib_redun_sel == BKP_SEL) {
+		mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+		mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+		switch (mode) {
+		case TWO_PT_CALIB:
+			base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT;
+			p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT;
+			p2[1] = (bkp[3] & S1_P2_BKP_MASK);
+			p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT;
+			p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT;
+			p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT;
+			p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT;
+			p2[6] = (calib[5] & S6_P2_BKP_MASK);
+			p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT;
+			p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
+			p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
+			p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
+			/* Fall through */
+		case ONE_PT_CALIB:
+		case ONE_PT_CALIB2:
+			base1 = bkp[0] & BASE1_MASK;
+			p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+			p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+			p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+			p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+			p1[4] = (bkp[1] & S4_P1_MASK);
+			p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+			p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+			p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+			p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT;
+			p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT;
+			p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT;
+			break;
+		}
+	} else {
+		mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+		mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+		switch (mode) {
+		case TWO_PT_CALIB:
+			base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT;
+			p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT;
+			p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT;
+			p2[2] = (calib[3] & S2_P2_MASK);
+			p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT;
+			p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT;
+			p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT;
+			p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT;
+			p2[7] = (calib[4] & S7_P2_MASK);
+			p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
+			p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
+			p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
+			/* Fall through */
+		case ONE_PT_CALIB:
+		case ONE_PT_CALIB2:
+			base1 = calib[0] & BASE1_MASK;
+			p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+			p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+			p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+			p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+			p1[4] = (calib[1] & S4_P1_MASK);
+			p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+			p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+			p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+			p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT;
+			p1[9] = (calib[2] & S9_P1_MASK);
+			p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT;
+			break;
+		}
+	}
+
+	switch (mode) {
+	case ONE_PT_CALIB:
+		for (i = 0; i < tmdev->num_sensors; i++)
+			p1[i] += (base1 << 2) | BIT_APPEND;
+		break;
+	case TWO_PT_CALIB:
+		for (i = 0; i < tmdev->num_sensors; i++) {
+			p2[i] += base2;
+			p2[i] <<= 2;
+			p2[i] |= BIT_APPEND;
+		}
+		/* Fall through */
+	case ONE_PT_CALIB2:
+		for (i = 0; i < tmdev->num_sensors; i++) {
+			p1[i] += base1;
+			p1[i] <<= 2;
+			p1[i] |= BIT_APPEND;
+		}
+		break;
+	default:
+		for (i = 0; i < tmdev->num_sensors; i++)
+			p2[i] = 780;
+		p1[0] = 502;
+		p1[1] = 509;
+		p1[2] = 503;
+		p1[3] = 509;
+		p1[4] = 505;
+		p1[5] = 509;
+		p1[6] = 507;
+		p1[7] = 510;
+		p1[8] = 508;
+		p1[9] = 509;
+		p1[10] = 508;
+		break;
+	}
+
+	compute_intercept_slope(tmdev, p1, p2, mode);
+
+	return 0;
+}
+
+static const struct tsens_ops ops_8974 = {
+	.init		= init_common,
+	.calibrate	= calibrate_8974,
+	.get_temp	= get_temp_common,
+};
+
+const struct tsens_data data_8974 = {
+	.num_sensors	= 11,
+	.ops		= &ops_8974,
+};
diff --git a/drivers/thermal/qcom/tsens-8996.c b/drivers/thermal/qcom/tsens-8996.c
new file mode 100644
index 0000000..e1f7781
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8996.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define STATUS_OFFSET	0x10a0
+#define LAST_TEMP_MASK	0xfff
+#define STATUS_VALID_BIT	BIT(21)
+#define CODE_SIGN_BIT		BIT(11)
+
+static int get_temp_8996(struct tsens_device *tmdev, int id, int *temp)
+{
+	struct tsens_sensor *s = &tmdev->sensor[id];
+	u32 code;
+	unsigned int sensor_addr;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, ret;
+
+	sensor_addr = STATUS_OFFSET + s->hw_id * 4;
+	ret = regmap_read(tmdev->map, sensor_addr, &code);
+	if (ret)
+		return ret;
+	last_temp = code & LAST_TEMP_MASK;
+	if (code & STATUS_VALID_BIT)
+		goto done;
+
+	/* Try a second time */
+	ret = regmap_read(tmdev->map, sensor_addr, &code);
+	if (ret)
+		return ret;
+	if (code & STATUS_VALID_BIT) {
+		last_temp = code & LAST_TEMP_MASK;
+		goto done;
+	} else {
+		last_temp2 = code & LAST_TEMP_MASK;
+	}
+
+	/* Try a third/last time */
+	ret = regmap_read(tmdev->map, sensor_addr, &code);
+	if (ret)
+		return ret;
+	if (code & STATUS_VALID_BIT) {
+		last_temp = code & LAST_TEMP_MASK;
+		goto done;
+	} else {
+		last_temp3 = code & LAST_TEMP_MASK;
+	}
+
+	if (last_temp == last_temp2)
+		last_temp = last_temp2;
+	else if (last_temp2 == last_temp3)
+		last_temp = last_temp3;
+done:
+	/* Code sign bit is the sign extension for a negative value */
+	if (last_temp & CODE_SIGN_BIT)
+		last_temp |= ~CODE_SIGN_BIT;
+
+	/* Temperatures are in deciCelicius */
+	*temp = last_temp * 100;
+
+	return 0;
+}
+
+static const struct tsens_ops ops_8996 = {
+	.init		= init_common,
+	.get_temp	= get_temp_8996,
+};
+
+const struct tsens_data data_8996 = {
+	.num_sensors	= 13,
+	.ops		= &ops_8996,
+};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
new file mode 100644
index 0000000..b1449ad
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define S0_ST_ADDR		0x1030
+#define SN_ADDR_OFFSET		0x4
+#define SN_ST_TEMP_MASK		0x3ff
+#define CAL_DEGC_PT1		30
+#define CAL_DEGC_PT2		120
+#define SLOPE_FACTOR		1000
+#define SLOPE_DEFAULT		3200
+
+char *qfprom_read(struct device *dev, const char *cname)
+{
+	struct nvmem_cell *cell;
+	ssize_t data;
+	char *ret;
+
+	cell = nvmem_cell_get(dev, cname);
+	if (IS_ERR(cell))
+		return ERR_CAST(cell);
+
+	ret = nvmem_cell_read(cell, &data);
+	nvmem_cell_put(cell);
+
+	return ret;
+}
+
+/*
+ * Use this function on devices where slope and offset calculations
+ * depend on calibration data read from qfprom. On others the slope
+ * and offset values are derived from tz->tzp->slope and tz->tzp->offset
+ * resp.
+ */
+void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+			     u32 *p2, u32 mode)
+{
+	int i;
+	int num, den;
+
+	for (i = 0; i < tmdev->num_sensors; i++) {
+		dev_dbg(tmdev->dev,
+			"sensor%d - data_point1:%#x data_point2:%#x\n",
+			i, p1[i], p2[i]);
+
+		tmdev->sensor[i].slope = SLOPE_DEFAULT;
+		if (mode == TWO_PT_CALIB) {
+			/*
+			 * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+			 *	temp_120_degc - temp_30_degc (x2 - x1)
+			 */
+			num = p2[i] - p1[i];
+			num *= SLOPE_FACTOR;
+			den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+			tmdev->sensor[i].slope = num / den;
+		}
+
+		tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+				(CAL_DEGC_PT1 *
+				tmdev->sensor[i].slope);
+		dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+	}
+}
+
+static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
+{
+	int degc, num, den;
+
+	num = (adc_code * SLOPE_FACTOR) - s->offset;
+	den = s->slope;
+
+	if (num > 0)
+		degc = num + (den / 2);
+	else if (num < 0)
+		degc = num - (den / 2);
+	else
+		degc = num;
+
+	degc /= den;
+
+	return degc;
+}
+
+int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+{
+	struct tsens_sensor *s = &tmdev->sensor[id];
+	u32 code;
+	unsigned int sensor_addr;
+	int last_temp = 0, ret;
+
+	sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+	ret = regmap_read(tmdev->map, sensor_addr, &code);
+	if (ret)
+		return ret;
+	last_temp = code & SN_ST_TEMP_MASK;
+
+	*temp = code_to_degc(last_temp, s) * 1000;
+
+	return 0;
+}
+
+static const struct regmap_config tsens_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+};
+
+int __init init_common(struct tsens_device *tmdev)
+{
+	void __iomem *base;
+
+	base = of_iomap(tmdev->dev->of_node, 0);
+	if (!base)
+		return -EINVAL;
+
+	tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
+	if (IS_ERR(tmdev->map)) {
+		iounmap(base);
+		return PTR_ERR(tmdev->map);
+	}
+
+	return 0;
+}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
new file mode 100644
index 0000000..3f9fe6a
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+static int tsens_get_temp(void *data, int *temp)
+{
+	const struct tsens_sensor *s = data;
+	struct tsens_device *tmdev = s->tmdev;
+
+	return tmdev->ops->get_temp(tmdev, s->id, temp);
+}
+
+static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+{
+	const struct tsens_sensor *s = p;
+	struct tsens_device *tmdev = s->tmdev;
+
+	if (tmdev->ops->get_trend)
+		return  tmdev->ops->get_trend(tmdev, s->id, trend);
+
+	return -ENOTSUPP;
+}
+
+static int  __maybe_unused tsens_suspend(struct device *dev)
+{
+	struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+	if (tmdev->ops && tmdev->ops->suspend)
+		return tmdev->ops->suspend(tmdev);
+
+	return 0;
+}
+
+static int __maybe_unused tsens_resume(struct device *dev)
+{
+	struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+	if (tmdev->ops && tmdev->ops->resume)
+		return tmdev->ops->resume(tmdev);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
+
+static const struct of_device_id tsens_table[] = {
+	{
+		.compatible = "qcom,msm8916-tsens",
+		.data = &data_8916,
+	}, {
+		.compatible = "qcom,msm8974-tsens",
+		.data = &data_8974,
+	}, {
+		.compatible = "qcom,msm8996-tsens",
+		.data = &data_8996,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static const struct thermal_zone_of_device_ops tsens_of_ops = {
+	.get_temp = tsens_get_temp,
+	.get_trend = tsens_get_trend,
+};
+
+static int tsens_register(struct tsens_device *tmdev)
+{
+	int i;
+	struct thermal_zone_device *tzd;
+	u32 *hw_id, n = tmdev->num_sensors;
+
+	hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
+	if (!hw_id)
+		return -ENOMEM;
+
+	for (i = 0;  i < tmdev->num_sensors; i++) {
+		tmdev->sensor[i].tmdev = tmdev;
+		tmdev->sensor[i].id = i;
+		tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
+							   &tmdev->sensor[i],
+							   &tsens_of_ops);
+		if (IS_ERR(tzd))
+			continue;
+		tmdev->sensor[i].tzd = tzd;
+		if (tmdev->ops->enable)
+			tmdev->ops->enable(tmdev, i);
+	}
+	return 0;
+}
+
+static int tsens_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct device *dev;
+	struct device_node *np;
+	struct tsens_sensor *s;
+	struct tsens_device *tmdev;
+	const struct tsens_data *data;
+	const struct of_device_id *id;
+
+	if (pdev->dev.of_node)
+		dev = &pdev->dev;
+	else
+		dev = pdev->dev.parent;
+
+	np = dev->of_node;
+
+	id = of_match_node(tsens_table, np);
+	if (id)
+		data = id->data;
+	else
+		data = &data_8960;
+
+	if (data->num_sensors <= 0) {
+		dev_err(dev, "invalid number of sensors\n");
+		return -EINVAL;
+	}
+
+	tmdev = devm_kzalloc(dev, sizeof(*tmdev) +
+			     data->num_sensors * sizeof(*s), GFP_KERNEL);
+	if (!tmdev)
+		return -ENOMEM;
+
+	tmdev->dev = dev;
+	tmdev->num_sensors = data->num_sensors;
+	tmdev->ops = data->ops;
+	for (i = 0;  i < tmdev->num_sensors; i++) {
+		if (data->hw_ids)
+			tmdev->sensor[i].hw_id = data->hw_ids[i];
+		else
+			tmdev->sensor[i].hw_id = i;
+	}
+
+	if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+		return -EINVAL;
+
+	ret = tmdev->ops->init(tmdev);
+	if (ret < 0) {
+		dev_err(dev, "tsens init failed\n");
+		return ret;
+	}
+
+	if (tmdev->ops->calibrate) {
+		ret = tmdev->ops->calibrate(tmdev);
+		if (ret < 0) {
+			dev_err(dev, "tsens calibration failed\n");
+			return ret;
+		}
+	}
+
+	ret = tsens_register(tmdev);
+
+	platform_set_drvdata(pdev, tmdev);
+
+	return ret;
+}
+
+static int tsens_remove(struct platform_device *pdev)
+{
+	struct tsens_device *tmdev = platform_get_drvdata(pdev);
+
+	if (tmdev->ops->disable)
+		tmdev->ops->disable(tmdev);
+
+	return 0;
+}
+
+static struct platform_driver tsens_driver = {
+	.probe = tsens_probe,
+	.remove = tsens_remove,
+	.driver = {
+		.name = "qcom-tsens",
+		.pm	= &tsens_pm_ops,
+		.of_match_table = tsens_table,
+	},
+};
+module_platform_driver(tsens_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM Temperature Sensor driver");
+MODULE_ALIAS("platform:qcom-tsens");
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
new file mode 100644
index 0000000..911c197
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#define ONE_PT_CALIB		0x1
+#define ONE_PT_CALIB2		0x2
+#define TWO_PT_CALIB		0x3
+
+#include <linux/thermal.h>
+
+struct tsens_device;
+
+struct tsens_sensor {
+	struct tsens_device		*tmdev;
+	struct thermal_zone_device	*tzd;
+	int				offset;
+	int				id;
+	int				hw_id;
+	int				slope;
+	u32				status;
+};
+
+/**
+ * struct tsens_ops - operations as supported by the tsens device
+ * @init: Function to initialize the tsens device
+ * @calibrate: Function to calibrate the tsens device
+ * @get_temp: Function which returns the temp in millidegC
+ * @enable: Function to enable (clocks/power) tsens device
+ * @disable: Function to disable the tsens device
+ * @suspend: Function to suspend the tsens device
+ * @resume: Function to resume the tsens device
+ * @get_trend: Function to get the thermal/temp trend
+ */
+struct tsens_ops {
+	/* mandatory callbacks */
+	int (*init)(struct tsens_device *);
+	int (*calibrate)(struct tsens_device *);
+	int (*get_temp)(struct tsens_device *, int, int *);
+	/* optional callbacks */
+	int (*enable)(struct tsens_device *, int);
+	void (*disable)(struct tsens_device *);
+	int (*suspend)(struct tsens_device *);
+	int (*resume)(struct tsens_device *);
+	int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
+};
+
+/**
+ * struct tsens_data - tsens instance specific data
+ * @num_sensors: Max number of sensors supported by platform
+ * @ops: operations the tsens instance supports
+ * @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ */
+struct tsens_data {
+	const u32		num_sensors;
+	const struct tsens_ops	*ops;
+	unsigned int		*hw_ids;
+};
+
+/* Registers to be saved/restored across a context loss */
+struct tsens_context {
+	int	threshold;
+	int	control;
+};
+
+struct tsens_device {
+	struct device			*dev;
+	u32				num_sensors;
+	struct regmap			*map;
+	struct regmap_field		*status_field;
+	struct tsens_context		ctx;
+	bool				trdy;
+	const struct tsens_ops		*ops;
+	struct tsens_sensor		sensor[0];
+};
+
+char *qfprom_read(struct device *, const char *);
+void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
+int init_common(struct tsens_device *);
+int get_temp_common(struct tsens_device *, int, int *);
+
+extern const struct tsens_data data_8916, data_8974, data_8960, data_8996;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
new file mode 100644
index 0000000..644ba52
--- /dev/null
+++ b/drivers/thermal/qoriq_thermal.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define SITES_MAX	16
+
+/*
+ * QorIQ TMU Registers
+ */
+struct qoriq_tmu_site_regs {
+	u32 tritsr;		/* Immediate Temperature Site Register */
+	u32 tratsr;		/* Average Temperature Site Register */
+	u8 res0[0x8];
+};
+
+struct qoriq_tmu_regs {
+	u32 tmr;		/* Mode Register */
+#define TMR_DISABLE	0x0
+#define TMR_ME		0x80000000
+#define TMR_ALPF	0x0c000000
+	u32 tsr;		/* Status Register */
+	u32 tmtmir;		/* Temperature measurement interval Register */
+#define TMTMIR_DEFAULT	0x0000000f
+	u8 res0[0x14];
+	u32 tier;		/* Interrupt Enable Register */
+#define TIER_DISABLE	0x0
+	u32 tidr;		/* Interrupt Detect Register */
+	u32 tiscr;		/* Interrupt Site Capture Register */
+	u32 ticscr;		/* Interrupt Critical Site Capture Register */
+	u8 res1[0x10];
+	u32 tmhtcrh;		/* High Temperature Capture Register */
+	u32 tmhtcrl;		/* Low Temperature Capture Register */
+	u8 res2[0x8];
+	u32 tmhtitr;		/* High Temperature Immediate Threshold */
+	u32 tmhtatr;		/* High Temperature Average Threshold */
+	u32 tmhtactr;	/* High Temperature Average Crit Threshold */
+	u8 res3[0x24];
+	u32 ttcfgr;		/* Temperature Configuration Register */
+	u32 tscfgr;		/* Sensor Configuration Register */
+	u8 res4[0x78];
+	struct qoriq_tmu_site_regs site[SITES_MAX];
+	u8 res5[0x9f8];
+	u32 ipbrr0;		/* IP Block Revision Register 0 */
+	u32 ipbrr1;		/* IP Block Revision Register 1 */
+	u8 res6[0x310];
+	u32 ttr0cr;		/* Temperature Range 0 Control Register */
+	u32 ttr1cr;		/* Temperature Range 1 Control Register */
+	u32 ttr2cr;		/* Temperature Range 2 Control Register */
+	u32 ttr3cr;		/* Temperature Range 3 Control Register */
+};
+
+/*
+ * Thermal zone data
+ */
+struct qoriq_tmu_data {
+	struct thermal_zone_device *tz;
+	struct qoriq_tmu_regs __iomem *regs;
+	int sensor_id;
+	bool little_endian;
+};
+
+static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+{
+	if (p->little_endian)
+		iowrite32(val, addr);
+	else
+		iowrite32be(val, addr);
+}
+
+static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
+{
+	if (p->little_endian)
+		return ioread32(addr);
+	else
+		return ioread32be(addr);
+}
+
+static int tmu_get_temp(void *p, int *temp)
+{
+	u32 val;
+	struct qoriq_tmu_data *data = p;
+
+	val = tmu_read(data, &data->regs->site[data->sensor_id].tritsr);
+	*temp = (val & 0xff) * 1000;
+
+	return 0;
+}
+
+static int qoriq_tmu_get_sensor_id(void)
+{
+	int ret, id;
+	struct of_phandle_args sensor_specs;
+	struct device_node *np, *sensor_np;
+
+	np = of_find_node_by_name(NULL, "thermal-zones");
+	if (!np)
+		return -ENODEV;
+
+	sensor_np = of_get_next_child(np, NULL);
+	ret = of_parse_phandle_with_args(sensor_np, "thermal-sensors",
+			"#thermal-sensor-cells",
+			0, &sensor_specs);
+	if (ret) {
+		of_node_put(np);
+		of_node_put(sensor_np);
+		return ret;
+	}
+
+	if (sensor_specs.args_count >= 1) {
+		id = sensor_specs.args[0];
+		WARN(sensor_specs.args_count > 1,
+				"%s: too many cells in sensor specifier %d\n",
+				sensor_specs.np->name, sensor_specs.args_count);
+	} else {
+		id = 0;
+	}
+
+	of_node_put(np);
+	of_node_put(sensor_np);
+
+	return id;
+}
+
+static int qoriq_tmu_calibration(struct platform_device *pdev)
+{
+	int i, val, len;
+	u32 range[4];
+	const u32 *calibration;
+	struct device_node *np = pdev->dev.of_node;
+	struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+
+	if (of_property_read_u32_array(np, "fsl,tmu-range", range, 4)) {
+		dev_err(&pdev->dev, "missing calibration range.\n");
+		return -ENODEV;
+	}
+
+	/* Init temperature range registers */
+	tmu_write(data, range[0], &data->regs->ttr0cr);
+	tmu_write(data, range[1], &data->regs->ttr1cr);
+	tmu_write(data, range[2], &data->regs->ttr2cr);
+	tmu_write(data, range[3], &data->regs->ttr3cr);
+
+	calibration = of_get_property(np, "fsl,tmu-calibration", &len);
+	if (calibration == NULL || len % 8) {
+		dev_err(&pdev->dev, "invalid calibration data.\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < len; i += 8, calibration += 2) {
+		val = of_read_number(calibration, 1);
+		tmu_write(data, val, &data->regs->ttcfgr);
+		val = of_read_number(calibration + 1, 1);
+		tmu_write(data, val, &data->regs->tscfgr);
+	}
+
+	return 0;
+}
+
+static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
+{
+	/* Disable interrupt, using polling instead */
+	tmu_write(data, TIER_DISABLE, &data->regs->tier);
+
+	/* Set update_interval */
+	tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+
+	/* Disable monitoring */
+	tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+}
+
+static struct thermal_zone_of_device_ops tmu_tz_ops = {
+	.get_temp = tmu_get_temp,
+};
+
+static int qoriq_tmu_probe(struct platform_device *pdev)
+{
+	int ret;
+	const struct thermal_trip *trip;
+	struct qoriq_tmu_data *data;
+	struct device_node *np = pdev->dev.of_node;
+	u32 site = 0;
+
+	if (!np) {
+		dev_err(&pdev->dev, "Device OF-Node is NULL");
+		return -ENODEV;
+	}
+
+	data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, data);
+
+	data->little_endian = of_property_read_bool(np, "little-endian");
+
+	data->sensor_id = qoriq_tmu_get_sensor_id();
+	if (data->sensor_id < 0) {
+		dev_err(&pdev->dev, "Failed to get sensor id\n");
+		ret = -ENODEV;
+		goto err_iomap;
+	}
+
+	data->regs = of_iomap(np, 0);
+	if (!data->regs) {
+		dev_err(&pdev->dev, "Failed to get memory region\n");
+		ret = -ENODEV;
+		goto err_iomap;
+	}
+
+	qoriq_tmu_init_device(data);	/* TMU initialization */
+
+	ret = qoriq_tmu_calibration(pdev);	/* TMU calibration */
+	if (ret < 0)
+		goto err_tmu;
+
+	data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
+				data, &tmu_tz_ops);
+	if (IS_ERR(data->tz)) {
+		ret = PTR_ERR(data->tz);
+		dev_err(&pdev->dev,
+			"Failed to register thermal zone device %d\n", ret);
+		goto err_tmu;
+	}
+
+	trip = of_thermal_get_trip_points(data->tz);
+
+	/* Enable monitoring */
+	site |= 0x1 << (15 - data->sensor_id);
+	tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
+
+	return 0;
+
+err_tmu:
+	iounmap(data->regs);
+
+err_iomap:
+	platform_set_drvdata(pdev, NULL);
+
+	return ret;
+}
+
+static int qoriq_tmu_remove(struct platform_device *pdev)
+{
+	struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+
+	thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
+
+	/* Disable monitoring */
+	tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+
+	iounmap(data->regs);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qoriq_tmu_suspend(struct device *dev)
+{
+	u32 tmr;
+	struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+
+	/* Disable monitoring */
+	tmr = tmu_read(data, &data->regs->tmr);
+	tmr &= ~TMR_ME;
+	tmu_write(data, tmr, &data->regs->tmr);
+
+	return 0;
+}
+
+static int qoriq_tmu_resume(struct device *dev)
+{
+	u32 tmr;
+	struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+
+	/* Enable monitoring */
+	tmr = tmu_read(data, &data->regs->tmr);
+	tmr |= TMR_ME;
+	tmu_write(data, tmr, &data->regs->tmr);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
+			 qoriq_tmu_suspend, qoriq_tmu_resume);
+
+static const struct of_device_id qoriq_tmu_match[] = {
+	{ .compatible = "fsl,qoriq-tmu", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
+
+static struct platform_driver qoriq_tmu = {
+	.driver	= {
+		.name		= "qoriq_thermal",
+		.pm		= &qoriq_tmu_pm_ops,
+		.of_match_table	= qoriq_tmu_match,
+	},
+	.probe	= qoriq_tmu_probe,
+	.remove	= qoriq_tmu_remove,
+};
+module_platform_driver(qoriq_tmu);
+
+MODULE_AUTHOR("Jia Hongtao <hongtao.jia@nxp.com>");
+MODULE_DESCRIPTION("QorIQ Thermal Monitoring Unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5f81792..73e5fee 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -31,6 +31,8 @@
 #include <linux/spinlock.h>
 #include <linux/thermal.h>
 
+#include "thermal_hwmon.h"
+
 #define IDLE_INTERVAL	5000
 
 #define COMMON_STR	0x00
@@ -75,6 +77,8 @@
 #define rcar_priv_to_dev(priv)		((priv)->common->dev)
 #define rcar_has_irq_support(priv)	((priv)->common->base)
 #define rcar_id_to_shift(priv)		((priv)->id * 8)
+#define rcar_of_data(dev)		((unsigned long)of_device_get_match_data(dev))
+#define rcar_use_of_thermal(dev)	(rcar_of_data(dev) == USE_OF_THERMAL)
 
 #define USE_OF_THERMAL	1
 static const struct of_device_id rcar_thermal_dt_ids[] = {
@@ -354,7 +358,8 @@
 		return;
 
 	if (nctemp != cctemp)
-		thermal_zone_device_update(priv->zone);
+		thermal_zone_device_update(priv->zone,
+					   THERMAL_EVENT_UNSPECIFIED);
 }
 
 static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -415,7 +420,10 @@
 
 	rcar_thermal_for_each_priv(priv, common) {
 		rcar_thermal_irq_disable(priv);
-		thermal_zone_device_unregister(priv->zone);
+		if (rcar_use_of_thermal(dev))
+			thermal_remove_hwmon_sysfs(priv->zone);
+		else
+			thermal_zone_device_unregister(priv->zone);
 	}
 
 	pm_runtime_put(dev);
@@ -430,7 +438,6 @@
 	struct rcar_thermal_priv *priv;
 	struct device *dev = &pdev->dev;
 	struct resource *res, *irq;
-	unsigned long of_data = (unsigned long)of_device_get_match_data(dev);
 	int mres = 0;
 	int i;
 	int ret = -ENODEV;
@@ -491,7 +498,7 @@
 		if (ret < 0)
 			goto error_unregister;
 
-		if (of_data == USE_OF_THERMAL)
+		if (rcar_use_of_thermal(dev))
 			priv->zone = devm_thermal_zone_of_sensor_register(
 						dev, i, priv,
 						&rcar_thermal_zone_of_ops);
@@ -508,6 +515,17 @@
 			goto error_unregister;
 		}
 
+		if (rcar_use_of_thermal(dev)) {
+			/*
+			 * thermal_zone doesn't enable hwmon as default,
+			 * but, enable it here to keep compatible
+			 */
+			priv->zone->tzp->no_hwmon = false;
+			ret = thermal_add_hwmon_sysfs(priv->zone);
+			if (ret)
+				goto error_unregister;
+		}
+
 		rcar_thermal_irq_enable(priv);
 
 		list_move_tail(&priv->list, &common->head);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 5d491f1..e227a9f 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -96,6 +96,7 @@
  * @initialize: SoC special initialize tsadc controller method
  * @irq_ack: clear the interrupt
  * @get_temp: get the temperature
+ * @set_alarm_temp: set the high temperature interrupt
  * @set_tshut_temp: set the hardware-controlled shutdown temperature
  * @set_tshut_mode: set the hardware-controlled shutdown mode
  * @table: the chip-specific conversion table
@@ -119,6 +120,8 @@
 	/* Per-sensor methods */
 	int (*get_temp)(struct chip_tsadc_table table,
 			int chn, void __iomem *reg, int *temp);
+	void (*set_alarm_temp)(struct chip_tsadc_table table,
+			       int chn, void __iomem *reg, int temp);
 	void (*set_tshut_temp)(struct chip_tsadc_table table,
 			       int chn, void __iomem *reg, int temp);
 	void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
@@ -183,6 +186,7 @@
 #define TSADCV2_INT_EN				0x08
 #define TSADCV2_INT_PD				0x0c
 #define TSADCV2_DATA(chn)			(0x20 + (chn) * 0x04)
+#define TSADCV2_COMP_INT(chn)		        (0x30 + (chn) * 0x04)
 #define TSADCV2_COMP_SHUT(chn)		        (0x40 + (chn) * 0x04)
 #define TSADCV2_HIGHT_INT_DEBOUNCE		0x60
 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE		0x64
@@ -207,18 +211,21 @@
 
 #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT	4
 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT	4
-#define TSADCV2_AUTO_PERIOD_TIME		250 /* msec */
-#define TSADCV2_AUTO_PERIOD_HT_TIME		50  /* msec */
+#define TSADCV2_AUTO_PERIOD_TIME		250 /* 250ms */
+#define TSADCV2_AUTO_PERIOD_HT_TIME		50  /* 50ms */
+#define TSADCV3_AUTO_PERIOD_TIME		1875 /* 2.5ms */
+#define TSADCV3_AUTO_PERIOD_HT_TIME		1875 /* 2.5ms */
+
 #define TSADCV2_USER_INTER_PD_SOC		0x340 /* 13 clocks */
 
 #define GRF_SARADC_TESTBIT			0x0e644
 #define GRF_TSADC_TESTBIT_L			0x0e648
 #define GRF_TSADC_TESTBIT_H			0x0e64c
 
-#define GRF_TSADC_TSEN_PD_ON			(0x30003 << 0)
-#define GRF_TSADC_TSEN_PD_OFF			(0x30000 << 0)
 #define GRF_SARADC_TESTBIT_ON			(0x10001 << 2)
 #define GRF_TSADC_TESTBIT_H_ON			(0x10001 << 2)
+#define GRF_TSADC_VCM_EN_L			(0x10001 << 7)
+#define GRF_TSADC_VCM_EN_H			(0x10001 << 7)
 
 /**
  * struct tsadc_table - code to temperature conversion table
@@ -394,13 +401,17 @@
 				   int temp)
 {
 	int high, low, mid;
+	u32 error = 0;
 
 	low = 0;
 	high = table.length - 1;
 	mid = (high + low) / 2;
 
-	if (temp < table.id[low].temp || temp > table.id[high].temp)
-		return 0;
+	/* Return mask code data when the temp is over table range */
+	if (temp < table.id[low].temp || temp > table.id[high].temp) {
+		error = table.data_mask;
+		goto exit;
+	}
 
 	while (low <= high) {
 		if (temp == table.id[mid].temp)
@@ -412,7 +423,9 @@
 		mid = (low + high) / 2;
 	}
 
-	return 0;
+exit:
+	pr_err("Invalid the conversion, error=%d\n", error);
+	return error;
 }
 
 static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
@@ -543,14 +556,34 @@
 		/* Set interleave value to workround ic time sync issue */
 		writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs +
 			       TSADCV2_USER_CON);
+
+		writel_relaxed(TSADCV2_AUTO_PERIOD_TIME,
+			       regs + TSADCV2_AUTO_PERIOD);
+		writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+			       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+		writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+			       regs + TSADCV2_AUTO_PERIOD_HT);
+		writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+			       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
 	} else {
-		regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON);
-		mdelay(10);
-		regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF);
+		/* Enable the voltage common mode feature */
+		regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_VCM_EN_L);
+		regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_VCM_EN_H);
+
 		usleep_range(15, 100); /* The spec note says at least 15 us */
 		regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON);
 		regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON);
 		usleep_range(90, 200); /* The spec note says at least 90 us */
+
+		writel_relaxed(TSADCV3_AUTO_PERIOD_TIME,
+			       regs + TSADCV2_AUTO_PERIOD);
+		writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+			       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+		writel_relaxed(TSADCV3_AUTO_PERIOD_HT_TIME,
+			       regs + TSADCV2_AUTO_PERIOD_HT);
+		writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+			       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
 	}
 
 	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
@@ -559,14 +592,6 @@
 	else
 		writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
 			       regs + TSADCV2_AUTO_CON);
-
-	writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
-	writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
-		       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
-	writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
-		       regs + TSADCV2_AUTO_PERIOD_HT);
-	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
-		       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
 }
 
 static void rk_tsadcv2_irq_ack(void __iomem *regs)
@@ -628,12 +653,34 @@
 	return rk_tsadcv2_code_to_temp(table, val, temp);
 }
 
+static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
+				  int chn, void __iomem *regs, int temp)
+{
+	u32 alarm_value, int_en;
+
+	/* Make sure the value is valid */
+	alarm_value = rk_tsadcv2_temp_to_code(table, temp);
+	if (alarm_value == table.data_mask)
+		return;
+
+	writel_relaxed(alarm_value & table.data_mask,
+		       regs + TSADCV2_COMP_INT(chn));
+
+	int_en = readl_relaxed(regs + TSADCV2_INT_EN);
+	int_en |= TSADCV2_INT_SRC_EN(chn);
+	writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+}
+
 static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
 				  int chn, void __iomem *regs, int temp)
 {
 	u32 tshut_value, val;
 
+	/* Make sure the value is valid */
 	tshut_value = rk_tsadcv2_temp_to_code(table, temp);
+	if (tshut_value == table.data_mask)
+		return;
+
 	writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
 
 	/* TSHUT will be valid */
@@ -670,6 +717,7 @@
 	.irq_ack = rk_tsadcv3_irq_ack,
 	.control = rk_tsadcv3_control,
 	.get_temp = rk_tsadcv2_get_temp,
+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
 	.set_tshut_temp = rk_tsadcv2_tshut_temp,
 	.set_tshut_mode = rk_tsadcv2_tshut_mode,
 
@@ -694,6 +742,7 @@
 	.irq_ack = rk_tsadcv2_irq_ack,
 	.control = rk_tsadcv2_control,
 	.get_temp = rk_tsadcv2_get_temp,
+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
 	.set_tshut_temp = rk_tsadcv2_tshut_temp,
 	.set_tshut_mode = rk_tsadcv2_tshut_mode,
 
@@ -718,6 +767,7 @@
 	.irq_ack = rk_tsadcv3_irq_ack,
 	.control = rk_tsadcv3_control,
 	.get_temp = rk_tsadcv2_get_temp,
+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
 	.set_tshut_temp = rk_tsadcv2_tshut_temp,
 	.set_tshut_mode = rk_tsadcv2_tshut_mode,
 
@@ -742,6 +792,7 @@
 	.irq_ack = rk_tsadcv2_irq_ack,
 	.control = rk_tsadcv2_control,
 	.get_temp = rk_tsadcv2_get_temp,
+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
 	.set_tshut_temp = rk_tsadcv2_tshut_temp,
 	.set_tshut_mode = rk_tsadcv2_tshut_mode,
 
@@ -766,6 +817,7 @@
 	.irq_ack = rk_tsadcv3_irq_ack,
 	.control = rk_tsadcv3_control,
 	.get_temp = rk_tsadcv2_get_temp,
+	.set_alarm_temp = rk_tsadcv2_alarm_temp,
 	.set_tshut_temp = rk_tsadcv2_tshut_temp,
 	.set_tshut_mode = rk_tsadcv2_tshut_mode,
 
@@ -821,11 +873,27 @@
 	thermal->chip->irq_ack(thermal->regs);
 
 	for (i = 0; i < thermal->chip->chn_num; i++)
-		thermal_zone_device_update(thermal->sensors[i].tzd);
+		thermal_zone_device_update(thermal->sensors[i].tzd,
+					   THERMAL_EVENT_UNSPECIFIED);
 
 	return IRQ_HANDLED;
 }
 
+static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
+{
+	struct rockchip_thermal_sensor *sensor = _sensor;
+	struct rockchip_thermal_data *thermal = sensor->thermal;
+	const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+
+	dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
+		__func__, sensor->id, low, high);
+
+	tsadc->set_alarm_temp(tsadc->table,
+			      sensor->id, thermal->regs, high);
+
+	return 0;
+}
+
 static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
 {
 	struct rockchip_thermal_sensor *sensor = _sensor;
@@ -843,6 +911,7 @@
 
 static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
 	.get_temp = rockchip_thermal_get_temp,
+	.set_trips = rockchip_thermal_set_trips,
 };
 
 static int rockchip_configure_from_dt(struct device *dev,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index f3ce94e..ad1186d 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -225,7 +225,7 @@
 		return;
 	}
 
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	mutex_lock(&tz->lock);
 	/* Find the level for which trip happened */
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index fc0c9e1..91d4231 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -42,7 +42,8 @@
 {
 	struct st_thermal_sensor *sensor = sdata;
 
-	thermal_zone_device_update(sensor->thermal_dev);
+	thermal_zone_device_update(sensor->thermal_dev,
+				   THERMAL_EVENT_UNSPECIFIED);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
index 70e0d9f..201304a 100644
--- a/drivers/thermal/tango_thermal.c
+++ b/drivers/thermal/tango_thermal.c
@@ -64,6 +64,12 @@
 	.get_temp	= tango_get_temp,
 };
 
+static void tango_thermal_init(struct tango_thermal_priv *priv)
+{
+	writel(0, priv->base + TEMPSI_CFG);
+	writel(CMD_ON, priv->base + TEMPSI_CMD);
+}
+
 static int tango_thermal_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -79,14 +85,22 @@
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
+	platform_set_drvdata(pdev, priv);
 	priv->thresh_idx = IDX_MIN;
-	writel(0, priv->base + TEMPSI_CFG);
-	writel(CMD_ON, priv->base + TEMPSI_CMD);
+	tango_thermal_init(priv);
 
 	tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
 	return PTR_ERR_OR_ZERO(tzdev);
 }
 
+static int __maybe_unused tango_thermal_resume(struct device *dev)
+{
+	tango_thermal_init(dev_get_drvdata(dev));
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tango_thermal_pm, NULL, tango_thermal_resume);
+
 static const struct of_device_id tango_sensor_ids[] = {
 	{
 		.compatible = "sigma,smp8758-thermal",
@@ -99,6 +113,7 @@
 	.driver	= {
 		.name		= "tango-thermal",
 		.of_match_table	= tango_sensor_ids,
+		.pm		= &tango_thermal_pm,
 	},
 };
 
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index b865172..7d2db23 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -30,6 +30,7 @@
 
 #include <dt-bindings/thermal/tegra124-soctherm.h>
 
+#include "../thermal_core.h"
 #include "soctherm.h"
 
 #define SENSOR_CONFIG0				0
@@ -67,35 +68,228 @@
 #define READBACK_ADD_HALF			BIT(7)
 #define READBACK_NEGATE				BIT(0)
 
+/*
+ * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h
+ * because it will be used by tegraxxx_soctherm.c
+ */
+#define THERMCTL_LVL0_CPU0_EN_MASK		BIT(8)
+#define THERMCTL_LVL0_CPU0_CPU_THROT_MASK	(0x3 << 5)
+#define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT	0x1
+#define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY	0x2
+#define THERMCTL_LVL0_CPU0_GPU_THROT_MASK	(0x3 << 3)
+#define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT	0x1
+#define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY	0x2
+#define THERMCTL_LVL0_CPU0_MEM_THROT_MASK	BIT(2)
+#define THERMCTL_LVL0_CPU0_STATUS_MASK		0x3
+
+#define THERMCTL_LVL0_UP_STATS			0x10
+#define THERMCTL_LVL0_DN_STATS			0x14
+
+#define THERMCTL_STATS_CTL			0x94
+#define STATS_CTL_CLR_DN			0x8
+#define STATS_CTL_EN_DN				0x4
+#define STATS_CTL_CLR_UP			0x2
+#define STATS_CTL_EN_UP				0x1
+
+#define THROT_GLOBAL_CFG			0x400
+#define THROT_GLOBAL_ENB_MASK			BIT(0)
+
+#define CPU_PSKIP_STATUS			0x418
+#define XPU_PSKIP_STATUS_M_MASK			(0xff << 12)
+#define XPU_PSKIP_STATUS_N_MASK			(0xff << 4)
+#define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK	BIT(1)
+#define XPU_PSKIP_STATUS_ENABLED_MASK		BIT(0)
+
+#define THROT_PRIORITY_LOCK			0x424
+#define THROT_PRIORITY_LOCK_PRIORITY_MASK	0xff
+
+#define THROT_STATUS				0x428
+#define THROT_STATUS_BREACH_MASK		BIT(12)
+#define THROT_STATUS_STATE_MASK			(0xff << 4)
+#define THROT_STATUS_ENABLED_MASK		BIT(0)
+
+#define THROT_PSKIP_CTRL_LITE_CPU		0x430
+#define THROT_PSKIP_CTRL_ENABLE_MASK            BIT(31)
+#define THROT_PSKIP_CTRL_DIVIDEND_MASK          (0xff << 8)
+#define THROT_PSKIP_CTRL_DIVISOR_MASK           0xff
+#define THROT_PSKIP_CTRL_VECT_GPU_MASK          (0x7 << 16)
+#define THROT_PSKIP_CTRL_VECT_CPU_MASK          (0x7 << 8)
+#define THROT_PSKIP_CTRL_VECT2_CPU_MASK         0x7
+
+#define THROT_VECT_NONE				0x0 /* 3'b000 */
+#define THROT_VECT_LOW				0x1 /* 3'b001 */
+#define THROT_VECT_MED				0x3 /* 3'b011 */
+#define THROT_VECT_HIGH				0x7 /* 3'b111 */
+
+#define THROT_PSKIP_RAMP_LITE_CPU		0x434
+#define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK	BIT(31)
+#define THROT_PSKIP_RAMP_DURATION_MASK		(0xffff << 8)
+#define THROT_PSKIP_RAMP_STEP_MASK		0xff
+
+#define THROT_PRIORITY_LITE			0x444
+#define THROT_PRIORITY_LITE_PRIO_MASK		0xff
+
+#define THROT_DELAY_LITE			0x448
+#define THROT_DELAY_LITE_DELAY_MASK		0xff
+
+/* car register offsets needed for enabling HW throttling */
+#define CAR_SUPER_CCLKG_DIVIDER			0x36c
+#define CDIVG_USE_THERM_CONTROLS_MASK		BIT(30)
+
+/* ccroc register offsets needed for enabling HW throttling for Tegra132 */
+#define CCROC_SUPER_CCLKG_DIVIDER		0x024
+
+#define CCROC_GLOBAL_CFG			0x148
+
+#define CCROC_THROT_PSKIP_RAMP_CPU		0x150
+#define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK	BIT(31)
+#define CCROC_THROT_PSKIP_RAMP_DURATION_MASK	(0xffff << 8)
+#define CCROC_THROT_PSKIP_RAMP_STEP_MASK	0xff
+
+#define CCROC_THROT_PSKIP_CTRL_CPU		0x154
+#define CCROC_THROT_PSKIP_CTRL_ENB_MASK		BIT(31)
+#define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK	(0xff << 8)
+#define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK	0xff
+
 /* get val from register(r) mask bits(m) */
 #define REG_GET_MASK(r, m)	(((r) & (m)) >> (ffs(m) - 1))
 /* set val(v) to mask bits(m) of register(r) */
 #define REG_SET_MASK(r, m, v)	(((r) & ~(m)) | \
 				 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
 
+/* get dividend from the depth */
+#define THROT_DEPTH_DIVIDEND(depth)	((256 * (100 - (depth)) / 100) - 1)
+
+/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
+#define THROT_OFFSET			0x30
+#define THROT_PSKIP_CTRL(throt, dev)	(THROT_PSKIP_CTRL_LITE_CPU + \
+					(THROT_OFFSET * throt) + (8 * dev))
+#define THROT_PSKIP_RAMP(throt, dev)	(THROT_PSKIP_RAMP_LITE_CPU + \
+					(THROT_OFFSET * throt) + (8 * dev))
+
+/* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */
+#define THROT_PRIORITY_CTRL(throt)	(THROT_PRIORITY_LITE + \
+					(THROT_OFFSET * throt))
+#define THROT_DELAY_CTRL(throt)		(THROT_DELAY_LITE + \
+					(THROT_OFFSET * throt))
+
+/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
+#define CCROC_THROT_OFFSET			0x0c
+#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect)    (CCROC_THROT_PSKIP_CTRL_CPU + \
+						(CCROC_THROT_OFFSET * vect))
+#define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect)    (CCROC_THROT_PSKIP_RAMP_CPU + \
+						(CCROC_THROT_OFFSET * vect))
+
+/* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */
+#define THERMCTL_LVL_REGS_SIZE		0x20
+#define THERMCTL_LVL_REG(rg, lv)	((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
+
 static const int min_low_temp = -127000;
 static const int max_high_temp = 127000;
 
+enum soctherm_throttle_id {
+	THROTTLE_LIGHT = 0,
+	THROTTLE_HEAVY,
+	THROTTLE_SIZE,
+};
+
+enum soctherm_throttle_dev_id {
+	THROTTLE_DEV_CPU = 0,
+	THROTTLE_DEV_GPU,
+	THROTTLE_DEV_SIZE,
+};
+
+static const char *const throt_names[] = {
+	[THROTTLE_LIGHT] = "light",
+	[THROTTLE_HEAVY] = "heavy",
+};
+
+struct tegra_soctherm;
 struct tegra_thermctl_zone {
 	void __iomem *reg;
 	struct device *dev;
+	struct tegra_soctherm *ts;
 	struct thermal_zone_device *tz;
 	const struct tegra_tsensor_group *sg;
 };
 
+struct soctherm_throt_cfg {
+	const char *name;
+	unsigned int id;
+	u8 priority;
+	u8 cpu_throt_level;
+	u32 cpu_throt_depth;
+	struct thermal_cooling_device *cdev;
+	bool init;
+};
+
 struct tegra_soctherm {
 	struct reset_control *reset;
 	struct clk *clock_tsensor;
 	struct clk *clock_soctherm;
 	void __iomem *regs;
-	struct thermal_zone_device **thermctl_tzs;
+	void __iomem *clk_regs;
+	void __iomem *ccroc_regs;
 
 	u32 *calib;
+	struct thermal_zone_device **thermctl_tzs;
 	struct tegra_soctherm_soc *soc;
 
+	struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
+
 	struct dentry *debugfs_dir;
 };
 
+/**
+ * clk_writel() - writes a value to a CAR register
+ * @ts: pointer to a struct tegra_soctherm
+ * @v: the value to write
+ * @reg: the register offset
+ *
+ * Writes @v to @reg.  No return value.
+ */
+static inline void clk_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
+{
+	writel(value, (ts->clk_regs + reg));
+}
+
+/**
+ * clk_readl() - reads specified register from CAR IP block
+ * @ts: pointer to a struct tegra_soctherm
+ * @reg: register address to be read
+ *
+ * Return: the value of the register
+ */
+static inline u32 clk_readl(struct tegra_soctherm *ts, u32 reg)
+{
+	return readl(ts->clk_regs + reg);
+}
+
+/**
+ * ccroc_writel() - writes a value to a CCROC register
+ * @ts: pointer to a struct tegra_soctherm
+ * @v: the value to write
+ * @reg: the register offset
+ *
+ * Writes @v to @reg.  No return value.
+ */
+static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
+{
+	writel(value, (ts->ccroc_regs + reg));
+}
+
+/**
+ * ccroc_readl() - reads specified register from CCROC IP block
+ * @ts: pointer to a struct tegra_soctherm
+ * @reg: register address to be read
+ *
+ * Return: the value of the register
+ */
+static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg)
+{
+	return readl(ts->ccroc_regs + reg);
+}
+
 static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
 {
 	const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
@@ -150,11 +344,17 @@
 static int
 thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
 		  int trip_temp);
+static int
+throttrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
+		  struct soctherm_throt_cfg *stc, int trip_temp);
+static struct soctherm_throt_cfg *
+find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name);
 
 static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
 {
 	struct tegra_thermctl_zone *zone = data;
 	struct thermal_zone_device *tz = zone->tz;
+	struct tegra_soctherm *ts = zone->ts;
 	const struct tegra_tsensor_group *sg = zone->sg;
 	struct device *dev = zone->dev;
 	enum thermal_trip_type type;
@@ -167,10 +367,29 @@
 	if (ret)
 		return ret;
 
-	if (type != THERMAL_TRIP_CRITICAL)
-		return 0;
+	if (type == THERMAL_TRIP_CRITICAL) {
+		return thermtrip_program(dev, sg, temp);
+	} else if (type == THERMAL_TRIP_HOT) {
+		int i;
 
-	return thermtrip_program(dev, sg, temp);
+		for (i = 0; i < THROTTLE_SIZE; i++) {
+			struct thermal_cooling_device *cdev;
+			struct soctherm_throt_cfg *stc;
+
+			if (!ts->throt_cfgs[i].init)
+				continue;
+
+			cdev = ts->throt_cfgs[i].cdev;
+			if (get_thermal_instance(tz, cdev, trip))
+				stc = find_throttle_cfg_by_name(ts, cdev->type);
+			else
+				continue;
+
+			return throttrip_program(dev, sg, stc, temp);
+		}
+	}
+
+	return 0;
 }
 
 static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
@@ -238,14 +457,110 @@
 }
 
 /**
+ * throttrip_program() - Configures the hardware to throttle the
+ * pulse if a given sensor group reaches a given temperature
+ * @dev: ptr to the struct device for the SOC_THERM IP block
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @stc: pointer to the throttle need to be triggered
+ * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
+ *
+ * Sets the thermal trip threshold and throttle event of the given sensor
+ * group. If this threshold is crossed, the hardware will trigger the
+ * throttle.
+ *
+ * Note that, although @trip_temp is specified in millicelsius, the
+ * hardware is programmed in degrees Celsius.
+ *
+ * Return: 0 upon success, or %-EINVAL upon failure.
+ */
+static int throttrip_program(struct device *dev,
+			     const struct tegra_tsensor_group *sg,
+			     struct soctherm_throt_cfg *stc,
+			     int trip_temp)
+{
+	struct tegra_soctherm *ts = dev_get_drvdata(dev);
+	int temp, cpu_throt, gpu_throt;
+	unsigned int throt;
+	u32 r, reg_off;
+
+	if (!dev || !sg || !stc || !stc->init)
+		return -EINVAL;
+
+	temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
+
+	/* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */
+	throt = stc->id;
+	reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1);
+
+	if (throt == THROTTLE_LIGHT) {
+		cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT;
+		gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT;
+	} else {
+		cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY;
+		gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY;
+		if (throt != THROTTLE_HEAVY)
+			dev_warn(dev,
+				 "invalid throt id %d - assuming HEAVY",
+				 throt);
+	}
+
+	r = readl(ts->regs + reg_off);
+	r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp);
+	r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp);
+	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt);
+	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt);
+	r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
+	writel(r, ts->regs + reg_off);
+
+	return 0;
+}
+
+static struct soctherm_throt_cfg *
+find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
+{
+	unsigned int i;
+
+	for (i = 0; ts->throt_cfgs[i].name; i++)
+		if (!strcmp(ts->throt_cfgs[i].name, name))
+			return &ts->throt_cfgs[i];
+
+	return NULL;
+}
+
+static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
+{
+	int ntrips, i, ret;
+	enum thermal_trip_type type;
+
+	ntrips = of_thermal_get_ntrips(tz);
+	if (ntrips <= 0)
+		return -EINVAL;
+
+	for (i = 0; i < ntrips; i++) {
+		ret = tz->ops->get_trip_type(tz, i, &type);
+		if (ret)
+			return -EINVAL;
+		if (type == THERMAL_TRIP_HOT) {
+			ret = tz->ops->get_trip_temp(tz, i, temp);
+			if (!ret)
+				*trip = i;
+
+			return ret;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/**
  * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
  * @dev: struct device * of the SOC_THERM instance
  *
  * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
- * trip points , using "critical" type trip_temp from thermal
- * zone.
- * After they have been configured, THERMTRIP will take action
- * when the configured SoC thermal sensor group reaches a
+ * "THROTTLE" trip points , using "critical" or "hot" type trip_temp
+ * from thermal zone.
+ * After they have been configured, THERMTRIP or THROTTLE will take
+ * action when the configured SoC thermal sensor group reaches a
  * certain temperature.
  *
  * Return: 0 upon success, or a negative error code on failure.
@@ -254,19 +569,24 @@
  * THERMTRIP has been enabled successfully when a message similar to
  * this one appears on the serial console:
  * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
+ * THROTTLE has been enabled successfully when a message similar to
+ * this one appears on the serial console:
+ * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC"
  */
 static int tegra_soctherm_set_hwtrips(struct device *dev,
 				      const struct tegra_tsensor_group *sg,
 				      struct thermal_zone_device *tz)
 {
-	int temperature;
+	struct tegra_soctherm *ts = dev_get_drvdata(dev);
+	struct soctherm_throt_cfg *stc;
+	int i, trip, temperature;
 	int ret;
 
 	ret = tz->ops->get_crit_temp(tz, &temperature);
 	if (ret) {
 		dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
 			 sg->name);
-		return ret;
+		goto set_throttle;
 	}
 
 	ret = thermtrip_program(dev, sg, temperature);
@@ -280,6 +600,43 @@
 		 "thermtrip: will shut down when %s reaches %d mC\n",
 		 sg->name, temperature);
 
+set_throttle:
+	ret = get_hot_temp(tz, &trip, &temperature);
+	if (ret) {
+		dev_warn(dev, "throttrip: %s: missing hot temperature\n",
+			 sg->name);
+		return 0;
+	}
+
+	for (i = 0; i < THROTTLE_SIZE; i++) {
+		struct thermal_cooling_device *cdev;
+
+		if (!ts->throt_cfgs[i].init)
+			continue;
+
+		cdev = ts->throt_cfgs[i].cdev;
+		if (get_thermal_instance(tz, cdev, trip))
+			stc = find_throttle_cfg_by_name(ts, cdev->type);
+		else
+			continue;
+
+		ret = throttrip_program(dev, sg, stc, temperature);
+		if (ret) {
+			dev_err(dev, "throttrip: %s: error during enable\n",
+				sg->name);
+			return ret;
+		}
+
+		dev_info(dev,
+			 "throttrip: will throttle when %s reaches %d mC\n",
+			 sg->name, temperature);
+		break;
+	}
+
+	if (i == THROTTLE_SIZE)
+		dev_warn(dev, "throttrip: %s: missing throttle cdev\n",
+			 sg->name);
+
 	return 0;
 }
 
@@ -291,7 +648,7 @@
 	const struct tegra_tsensor *tsensors = ts->soc->tsensors;
 	const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
 	u32 r, state;
-	int i;
+	int i, level;
 
 	seq_puts(s, "-----TSENSE (convert HW)-----\n");
 
@@ -365,6 +722,81 @@
 	state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
 	seq_printf(s, " MEM(%d)\n", translate_temp(state));
 
+	for (i = 0; i < ts->soc->num_ttgs; i++) {
+		seq_printf(s, "%s:\n", ttgs[i]->name);
+		for (level = 0; level < 4; level++) {
+			s32 v;
+			u32 mask;
+			u16 off = ttgs[i]->thermctl_lvl0_offset;
+
+			r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+
+			mask = ttgs[i]->thermctl_lvl0_up_thresh_mask;
+			state = REG_GET_MASK(r, mask);
+			v = sign_extend32(state, ts->soc->bptt - 1);
+			v *= ts->soc->thresh_grain;
+			seq_printf(s, "   %d: Up/Dn(%d /", level, v);
+
+			mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask;
+			state = REG_GET_MASK(r, mask);
+			v = sign_extend32(state, ts->soc->bptt - 1);
+			v *= ts->soc->thresh_grain;
+			seq_printf(s, "%d ) ", v);
+
+			mask = THERMCTL_LVL0_CPU0_EN_MASK;
+			state = REG_GET_MASK(r, mask);
+			seq_printf(s, "En(%d) ", state);
+
+			mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK;
+			state = REG_GET_MASK(r, mask);
+			seq_puts(s, "CPU Throt");
+			if (!state)
+				seq_printf(s, "(%s) ", "none");
+			else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT)
+				seq_printf(s, "(%s) ", "L");
+			else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY)
+				seq_printf(s, "(%s) ", "H");
+			else
+				seq_printf(s, "(%s) ", "H+L");
+
+			mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK;
+			state = REG_GET_MASK(r, mask);
+			seq_puts(s, "GPU Throt");
+			if (!state)
+				seq_printf(s, "(%s) ", "none");
+			else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT)
+				seq_printf(s, "(%s) ", "L");
+			else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY)
+				seq_printf(s, "(%s) ", "H");
+			else
+				seq_printf(s, "(%s) ", "H+L");
+
+			mask = THERMCTL_LVL0_CPU0_STATUS_MASK;
+			state = REG_GET_MASK(r, mask);
+			seq_printf(s, "Status(%s)\n",
+				   state == 0 ? "LO" :
+				   state == 1 ? "In" :
+				   state == 2 ? "Res" : "HI");
+		}
+	}
+
+	r = readl(ts->regs + THERMCTL_STATS_CTL);
+	seq_printf(s, "STATS: Up(%s) Dn(%s)\n",
+		   r & STATS_CTL_EN_UP ? "En" : "--",
+		   r & STATS_CTL_EN_DN ? "En" : "--");
+
+	for (level = 0; level < 4; level++) {
+		u16 off;
+
+		off = THERMCTL_LVL0_UP_STATS;
+		r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+		seq_printf(s, "  Level_%d Up(%d) ", level, r);
+
+		off = THERMCTL_LVL0_DN_STATS;
+		r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+		seq_printf(s, "Dn(%d)\n", r);
+	}
+
 	r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
 	state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
 	seq_printf(s, "Thermtrip Any En(%d)\n", state);
@@ -376,6 +808,32 @@
 		seq_printf(s, "Thresh(%d)\n", state);
 	}
 
+	r = readl(ts->regs + THROT_GLOBAL_CFG);
+	seq_puts(s, "\n");
+	seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r);
+
+	seq_puts(s, "---------------------------------------------------\n");
+	r = readl(ts->regs + THROT_STATUS);
+	state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK);
+	seq_printf(s, "THROT STATUS: breach(%d) ", state);
+	state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK);
+	seq_printf(s, "state(%d) ", state);
+	state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK);
+	seq_printf(s, "enabled(%d)\n", state);
+
+	r = readl(ts->regs + CPU_PSKIP_STATUS);
+	if (ts->soc->use_ccroc) {
+		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
+		seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state);
+	} else {
+		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK);
+		seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state);
+		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK);
+		seq_printf(s, "N(%d) ", state);
+		state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
+		seq_printf(s, "enabled(%d)\n", state);
+	}
+
 	return 0;
 }
 
@@ -449,6 +907,326 @@
 	return 0;
 }
 
+static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev,
+				    unsigned long *max_state)
+{
+	*max_state = 1;
+	return 0;
+}
+
+static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev,
+				    unsigned long *cur_state)
+{
+	struct tegra_soctherm *ts = cdev->devdata;
+	u32 r;
+
+	r = readl(ts->regs + THROT_STATUS);
+	if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK))
+		*cur_state = 1;
+	else
+		*cur_state = 0;
+
+	return 0;
+}
+
+static int throt_set_cdev_state(struct thermal_cooling_device *cdev,
+				unsigned long cur_state)
+{
+	return 0;
+}
+
+static struct thermal_cooling_device_ops throt_cooling_ops = {
+	.get_max_state = throt_get_cdev_max_state,
+	.get_cur_state = throt_get_cdev_cur_state,
+	.set_cur_state = throt_set_cdev_state,
+};
+
+/**
+ * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
+ * and register them as cooling devices.
+ */
+static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tegra_soctherm *ts = dev_get_drvdata(dev);
+	struct device_node *np_stc, *np_stcc;
+	const char *name;
+	u32 val;
+	int i, r;
+
+	for (i = 0; i < THROTTLE_SIZE; i++) {
+		ts->throt_cfgs[i].name = throt_names[i];
+		ts->throt_cfgs[i].id = i;
+		ts->throt_cfgs[i].init = false;
+	}
+
+	np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs");
+	if (!np_stc) {
+		dev_info(dev,
+			 "throttle-cfg: no throttle-cfgs - not enabling\n");
+		return;
+	}
+
+	for_each_child_of_node(np_stc, np_stcc) {
+		struct soctherm_throt_cfg *stc;
+		struct thermal_cooling_device *tcd;
+
+		name = np_stcc->name;
+		stc = find_throttle_cfg_by_name(ts, name);
+		if (!stc) {
+			dev_err(dev,
+				"throttle-cfg: could not find %s\n", name);
+			continue;
+		}
+
+		r = of_property_read_u32(np_stcc, "nvidia,priority", &val);
+		if (r) {
+			dev_info(dev,
+				 "throttle-cfg: %s: missing priority\n", name);
+			continue;
+		}
+		stc->priority = val;
+
+		if (ts->soc->use_ccroc) {
+			r = of_property_read_u32(np_stcc,
+						 "nvidia,cpu-throt-level",
+						 &val);
+			if (r) {
+				dev_info(dev,
+					 "throttle-cfg: %s: missing cpu-throt-level\n",
+					 name);
+				continue;
+			}
+			stc->cpu_throt_level = val;
+		} else {
+			r = of_property_read_u32(np_stcc,
+						 "nvidia,cpu-throt-percent",
+						 &val);
+			if (r) {
+				dev_info(dev,
+					 "throttle-cfg: %s: missing cpu-throt-percent\n",
+					 name);
+				continue;
+			}
+			stc->cpu_throt_depth = val;
+		}
+
+		tcd = thermal_of_cooling_device_register(np_stcc,
+							 (char *)name, ts,
+							 &throt_cooling_ops);
+		of_node_put(np_stcc);
+		if (IS_ERR_OR_NULL(tcd)) {
+			dev_err(dev,
+				"throttle-cfg: %s: failed to register cooling device\n",
+				name);
+			continue;
+		}
+
+		stc->cdev = tcd;
+		stc->init = true;
+	}
+
+	of_node_put(np_stc);
+}
+
+/**
+ * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
+ * @level: describing the level LOW/MED/HIGH of throttling
+ *
+ * It's necessary to set up the CPU-local CCROC NV_THERM instance with
+ * the M/N values desired for each level. This function does this.
+ *
+ * This function pre-programs the CCROC NV_THERM levels in terms of
+ * pre-configured "Low", "Medium" or "Heavy" throttle levels which are
+ * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY.
+ */
+static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
+{
+	u8 depth, dividend;
+	u32 r;
+
+	switch (level) {
+	case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
+		depth = 50;
+		break;
+	case TEGRA_SOCTHERM_THROT_LEVEL_MED:
+		depth = 75;
+		break;
+	case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
+		depth = 80;
+		break;
+	case TEGRA_SOCTHERM_THROT_LEVEL_NONE:
+		return;
+	default:
+		return;
+	}
+
+	dividend = THROT_DEPTH_DIVIDEND(depth);
+
+	/* setup PSKIP in ccroc nv_therm registers */
+	r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
+	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
+	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf);
+	ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
+
+	r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
+	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1);
+	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
+	r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
+	ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
+}
+
+/**
+ * throttlectl_cpu_level_select() - program CPU pulse skipper config
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * Pulse skippers are used to throttle clock frequencies.  This
+ * function programs the pulse skippers based on @throt and platform
+ * data.  This function is used on SoCs which have CPU-local pulse
+ * skipper control, such as T13x. It programs soctherm's interface to
+ * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling
+ * vectors. PSKIP_BYPASS mode is set as required per HW spec.
+ */
+static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
+					 enum soctherm_throttle_id throt)
+{
+	u32 r, throt_vect;
+
+	/* Denver:CCROC NV_THERM interface N:3 Mapping */
+	switch (ts->throt_cfgs[throt].cpu_throt_level) {
+	case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
+		throt_vect = THROT_VECT_LOW;
+		break;
+	case TEGRA_SOCTHERM_THROT_LEVEL_MED:
+		throt_vect = THROT_VECT_MED;
+		break;
+	case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
+		throt_vect = THROT_VECT_HIGH;
+		break;
+	default:
+		throt_vect = THROT_VECT_NONE;
+		break;
+	}
+
+	r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect);
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect);
+	writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+
+	/* bypass sequencer in soc_therm as it is programmed in ccroc */
+	r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1);
+	writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+}
+
+/**
+ * throttlectl_cpu_mn() - program CPU pulse skipper configuration
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * Pulse skippers are used to throttle clock frequencies.  This
+ * function programs the pulse skippers based on @throt and platform
+ * data.  This function is used for CPUs that have "remote" pulse
+ * skipper control, e.g., the CPU pulse skipper is controlled by the
+ * SOC_THERM IP block.  (SOC_THERM is located outside the CPU
+ * complex.)
+ */
+static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
+			       enum soctherm_throttle_id throt)
+{
+	u32 r;
+	int depth;
+	u8 dividend;
+
+	depth = ts->throt_cfgs[throt].cpu_throt_depth;
+	dividend = THROT_DEPTH_DIVIDEND(depth);
+
+	r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
+	r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
+	writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+
+	r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+	r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
+	r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf);
+	writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+}
+
+/**
+ * soctherm_throttle_program() - programs pulse skippers' configuration
+ * @throt: the LIGHT/HEAVY of the throttle event id.
+ *
+ * Pulse skippers are used to throttle clock frequencies.
+ * This function programs the pulse skippers.
+ */
+static void soctherm_throttle_program(struct tegra_soctherm *ts,
+				      enum soctherm_throttle_id throt)
+{
+	u32 r;
+	struct soctherm_throt_cfg stc = ts->throt_cfgs[throt];
+
+	if (!stc.init)
+		return;
+
+	/* Setup PSKIP parameters */
+	if (ts->soc->use_ccroc)
+		throttlectl_cpu_level_select(ts, throt);
+	else
+		throttlectl_cpu_mn(ts, throt);
+
+	r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
+	writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
+
+	r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0);
+	writel(r, ts->regs + THROT_DELAY_CTRL(throt));
+
+	r = readl(ts->regs + THROT_PRIORITY_LOCK);
+	r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK);
+	if (r >= stc.priority)
+		return;
+	r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK,
+			 stc.priority);
+	writel(r, ts->regs + THROT_PRIORITY_LOCK);
+}
+
+static void tegra_soctherm_throttle(struct device *dev)
+{
+	struct tegra_soctherm *ts = dev_get_drvdata(dev);
+	u32 v;
+	int i;
+
+	/* configure LOW, MED and HIGH levels for CCROC NV_THERM */
+	if (ts->soc->use_ccroc) {
+		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW);
+		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED);
+		throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH);
+	}
+
+	/* Thermal HW throttle programming */
+	for (i = 0; i < THROTTLE_SIZE; i++)
+		soctherm_throttle_program(ts, i);
+
+	v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1);
+	if (ts->soc->use_ccroc) {
+		ccroc_writel(ts, v, CCROC_GLOBAL_CFG);
+
+		v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER);
+		v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
+		ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER);
+	} else {
+		writel(v, ts->regs + THROT_GLOBAL_CFG);
+
+		v = clk_readl(ts, CAR_SUPER_CCLKG_DIVIDER);
+		v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
+		clk_writel(ts, v, CAR_SUPER_CCLKG_DIVIDER);
+	}
+
+	/* initialize stats collection */
+	v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN |
+	    STATS_CTL_CLR_UP | STATS_CTL_EN_UP;
+	writel(v, ts->regs + THERMCTL_STATS_CTL);
+}
+
 static void soctherm_init(struct platform_device *pdev)
 {
 	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
@@ -475,6 +1253,9 @@
 	}
 	writel(pdiv, tegra->regs + SENSOR_PDIV);
 	writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
+
+	/* Configure hw throttle */
+	tegra_soctherm_throttle(&pdev->dev);
 }
 
 static const struct of_device_id tegra_soctherm_of_match[] = {
@@ -527,10 +1308,31 @@
 
 	tegra->soc = soc;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "soctherm-reg");
 	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(tegra->regs))
+	if (IS_ERR(tegra->regs)) {
+		dev_err(&pdev->dev, "can't get soctherm registers");
 		return PTR_ERR(tegra->regs);
+	}
+
+	if (!tegra->soc->use_ccroc) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "car-reg");
+		tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(tegra->clk_regs)) {
+			dev_err(&pdev->dev, "can't get car clk registers");
+			return PTR_ERR(tegra->clk_regs);
+		}
+	} else {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "ccroc-reg");
+		tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(tegra->ccroc_regs)) {
+			dev_err(&pdev->dev, "can't get ccroc registers");
+			return PTR_ERR(tegra->ccroc_regs);
+		}
+	}
 
 	tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
 	if (IS_ERR(tegra->reset)) {
@@ -580,6 +1382,8 @@
 	if (err)
 		return err;
 
+	soctherm_init_hw_throt_cdev(pdev);
+
 	soctherm_init(pdev);
 
 	for (i = 0; i < soc->num_ttgs; ++i) {
@@ -593,6 +1397,7 @@
 		zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
 		zone->dev = &pdev->dev;
 		zone->sg = soc->ttgs[i];
+		zone->ts = tegra;
 
 		z = devm_thermal_zone_of_sensor_register(&pdev->dev,
 							 soc->ttgs[i]->id, zone,
@@ -608,7 +1413,9 @@
 		tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
 
 		/* Configure hw trip points */
-		tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+		err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+		if (err)
+			goto disable_clocks;
 	}
 
 	soctherm_debug_init(pdev);
@@ -661,7 +1468,12 @@
 		struct thermal_zone_device *tz;
 
 		tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
-		tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+		err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Resume failed: set hwtrips failed\n");
+			return err;
+		}
 	}
 
 	return 0;
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index 28e18ec..e96ca73 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -15,6 +15,11 @@
 #ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
 #define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
 
+#define THERMCTL_LEVEL0_GROUP_CPU               0x0
+#define THERMCTL_LEVEL0_GROUP_GPU		0x4
+#define THERMCTL_LEVEL0_GROUP_MEM		0x8
+#define THERMCTL_LEVEL0_GROUP_TSENSE		0xc
+
 #define SENSOR_CONFIG2                          8
 #define SENSOR_CONFIG2_THERMA_MASK		(0xffff << 16)
 #define SENSOR_CONFIG2_THERMA_SHIFT		16
@@ -65,6 +70,9 @@
 	u32 thermtrip_enable_mask;
 	u32 thermtrip_any_en_mask;
 	u32 thermtrip_threshold_mask;
+	u16 thermctl_lvl0_offset;
+	u32 thermctl_lvl0_up_thresh_mask;
+	u32 thermctl_lvl0_dn_thresh_mask;
 };
 
 struct tegra_tsensor_configuration {
@@ -103,6 +111,8 @@
 	const unsigned int num_ttgs;
 	const struct tegra_soctherm_fuse *tfuse;
 	const int thresh_grain;
+	const unsigned int bptt;
+	const bool use_ccroc;
 };
 
 int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index beb9d36..3676863 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -28,7 +28,11 @@
 #define TEGRA124_THERMTRIP_CPU_THRESH_MASK	(0xff << 8)
 #define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK	0xff
 
+#define TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK	(0xff << 17)
+#define TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK	(0xff << 9)
+
 #define TEGRA124_THRESH_GRAIN			1000
+#define TEGRA124_BPTT				8
 
 static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
 	.tall = 16300,
@@ -51,6 +55,9 @@
 	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
@@ -66,6 +73,9 @@
 	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
@@ -79,6 +89,9 @@
 	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+	.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
@@ -94,6 +107,9 @@
 	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+	.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
@@ -193,4 +209,6 @@
 	.num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
 	.tfuse = &tegra124_soctherm_fuse,
 	.thresh_grain = TEGRA124_THRESH_GRAIN,
+	.bptt = TEGRA124_BPTT,
+	.use_ccroc = false,
 };
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index e2aa84e..97fa305 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -28,7 +28,11 @@
 #define TEGRA132_THERMTRIP_CPU_THRESH_MASK	(0xff << 8)
 #define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK	0xff
 
+#define TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK	(0xff << 17)
+#define TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK	(0xff << 9)
+
 #define TEGRA132_THRESH_GRAIN			1000
+#define TEGRA132_BPTT				8
 
 static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
 	.tall = 16300,
@@ -51,6 +55,9 @@
 	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
@@ -66,6 +73,9 @@
 	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
@@ -79,6 +89,9 @@
 	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+	.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
@@ -94,6 +107,9 @@
 	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+	.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
@@ -193,4 +209,6 @@
 	.num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
 	.tfuse = &tegra132_soctherm_fuse,
 	.thresh_grain = TEGRA132_THRESH_GRAIN,
+	.bptt = TEGRA132_BPTT,
+	.use_ccroc = true,
 };
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index 19cc0ab..ad53169 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -29,7 +29,11 @@
 #define TEGRA210_THERMTRIP_CPU_THRESH_MASK	(0x1ff << 9)
 #define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK	0x1ff
 
+#define TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK	(0x1ff << 18)
+#define TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK	(0x1ff << 9)
+
 #define TEGRA210_THRESH_GRAIN			500
+#define TEGRA210_BPTT				9
 
 static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
 	.tall = 16300,
@@ -52,6 +56,9 @@
 	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
@@ -67,6 +74,9 @@
 	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+	.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
@@ -80,6 +90,9 @@
 	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+	.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
@@ -95,6 +108,9 @@
 	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
 	.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
 	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+	.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+	.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+	.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
 };
 
 static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
@@ -194,4 +210,6 @@
 	.num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
 	.tfuse = &tegra210_soctherm_fuse,
 	.thresh_grain = TEGRA210_THRESH_GRAIN,
+	.bptt = TEGRA210_BPTT,
+	.use_ccroc = false,
 };
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index e2fc616..226b0b4ac 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -520,6 +520,56 @@
 }
 EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
 
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+	int low = -INT_MAX;
+	int high = INT_MAX;
+	int trip_temp, hysteresis;
+	int i, ret;
+
+	mutex_lock(&tz->lock);
+
+	if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
+		goto exit;
+
+	for (i = 0; i < tz->trips; i++) {
+		int trip_low;
+
+		tz->ops->get_trip_temp(tz, i, &trip_temp);
+		tz->ops->get_trip_hyst(tz, i, &hysteresis);
+
+		trip_low = trip_temp - hysteresis;
+
+		if (trip_low < tz->temperature && trip_low > low)
+			low = trip_low;
+
+		if (trip_temp > tz->temperature && trip_temp < high)
+			high = trip_temp;
+	}
+
+	/* No need to change trip points */
+	if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+		goto exit;
+
+	tz->prev_low_trip = low;
+	tz->prev_high_trip = high;
+
+	dev_dbg(&tz->device,
+		"new temperature boundaries: %d < x < %d\n", low, high);
+
+	/*
+	 * Set a temperature window. When this window is left the driver
+	 * must inform the thermal core via thermal_zone_device_update.
+	 */
+	ret = tz->ops->set_trips(tz, low, high);
+	if (ret)
+		dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+
+exit:
+	mutex_unlock(&tz->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trips);
+
 static void update_temperature(struct thermal_zone_device *tz)
 {
 	int temp, ret;
@@ -557,7 +607,8 @@
 		pos->initialized = false;
 }
 
-void thermal_zone_device_update(struct thermal_zone_device *tz)
+void thermal_zone_device_update(struct thermal_zone_device *tz,
+				enum thermal_notify_event event)
 {
 	int count;
 
@@ -569,6 +620,10 @@
 
 	update_temperature(tz);
 
+	thermal_zone_set_trips(tz);
+
+	tz->notify_event = event;
+
 	for (count = 0; count < tz->trips; count++)
 		handle_thermal_trip(tz, count);
 }
@@ -579,7 +634,7 @@
 	struct thermal_zone_device *tz = container_of(work, struct
 						      thermal_zone_device,
 						      poll_queue.work);
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 }
 
 /* sys I/F for thermal zone */
@@ -703,7 +758,7 @@
 	if (ret)
 		return ret;
 
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return count;
 }
@@ -754,6 +809,9 @@
 	 */
 	ret = tz->ops->set_trip_hyst(tz, trip, temperature);
 
+	if (!ret)
+		thermal_zone_set_trips(tz);
+
 	return ret ? ret : count;
 }
 
@@ -822,7 +880,7 @@
 
 	tz->forced_passive = state;
 
-	thermal_zone_device_update(tz);
+	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return count;
 }
@@ -913,7 +971,7 @@
 	}
 
 	if (!ret)
-		thermal_zone_device_update(tz);
+		thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return ret ? ret : count;
 }
@@ -1509,7 +1567,8 @@
 	mutex_lock(&thermal_list_lock);
 	list_for_each_entry(pos, &thermal_tz_list, node)
 		if (atomic_cmpxchg(&pos->need_update, 1, 0))
-			thermal_zone_device_update(pos);
+			thermal_zone_device_update(pos,
+						   THERMAL_EVENT_UNSPECIFIED);
 	mutex_unlock(&thermal_list_lock);
 
 	return cdev;
@@ -1952,7 +2011,7 @@
 	thermal_zone_device_reset(tz);
 	/* Update the new thermal zone and mark it as already updated. */
 	if (atomic_cmpxchg(&tz->need_update, 1, 0))
-		thermal_zone_device_update(tz);
+		thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
 	return tz;
 
@@ -2069,6 +2128,36 @@
 }
 EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
 
+/**
+ * thermal_zone_get_slope - return the slope attribute of the thermal zone
+ * @tz: thermal zone device with the slope attribute
+ *
+ * Return: If the thermal zone device has a slope attribute, return it, else
+ * return 1.
+ */
+int thermal_zone_get_slope(struct thermal_zone_device *tz)
+{
+	if (tz && tz->tzp)
+		return tz->tzp->slope;
+	return 1;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_slope);
+
+/**
+ * thermal_zone_get_offset - return the offset attribute of the thermal zone
+ * @tz: thermal zone device with the offset attribute
+ *
+ * Return: If the thermal zone device has a offset attribute, return it, else
+ * return 0.
+ */
+int thermal_zone_get_offset(struct thermal_zone_device *tz)
+{
+	if (tz && tz->tzp)
+		return tz->tzp->offset;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_offset);
+
 #ifdef CONFIG_NET
 static const struct genl_multicast_group thermal_event_mcgrps[] = {
 	{ .name = THERMAL_GENL_MCAST_GROUP_NAME, },
@@ -2209,7 +2298,8 @@
 		atomic_set(&in_suspend, 0);
 		list_for_each_entry(tz, &thermal_tz_list, node) {
 			thermal_zone_device_reset(tz);
-			thermal_zone_device_update(tz);
+			thermal_zone_device_update(tz,
+						   THERMAL_EVENT_UNSPECIFIED);
 		}
 		break;
 	default:
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 15c0a9a..0586bd0 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -52,7 +52,7 @@
 	struct ti_thermal_data *data = container_of(work,
 					struct ti_thermal_data, thermal_wq);
 
-	thermal_zone_device_update(data->ti_thermal);
+	thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED);
 
 	dev_dbg(&data->ti_thermal->device, "updated thermal zone %s\n",
 		data->ti_thermal->type);
@@ -205,7 +205,7 @@
 	data->mode = mode;
 	ti_bandgap_write_update_interval(bgp, data->sensor_id,
 					data->ti_thermal->polling_delay);
-	thermal_zone_device_update(data->ti_thermal);
+	thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED);
 	dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n",
 		data->ti_thermal->polling_delay);
 
@@ -239,7 +239,7 @@
 	return 0;
 }
 
-static int __ti_thermal_get_trend(void *p, long *trend)
+static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
 {
 	struct ti_thermal_data *data = p;
 	struct ti_bandgap *bgp;
@@ -252,22 +252,6 @@
 	if (ret)
 		return ret;
 
-	*trend = tr;
-
-	return 0;
-}
-
-/* Get the temperature trend callback functions for thermal zone */
-static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
-				int trip, enum thermal_trend *trend)
-{
-	int ret;
-	long tr;
-
-	ret = __ti_thermal_get_trend(thermal->devdata, &tr);
-	if (ret)
-		return ret;
-
 	if (tr > 0)
 		*trend = THERMAL_TREND_RAISING;
 	else if (tr < 0)
@@ -278,6 +262,13 @@
 	return 0;
 }
 
+/* Get the temperature trend callback functions for thermal zone */
+static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
+				int trip, enum thermal_trend *trend)
+{
+	return __ti_thermal_get_trend(thermal->devdata, trip, trend);
+}
+
 /* Get critical temperature callback functions for thermal zone */
 static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
 				    int *temp)
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 10adcdd..c908150 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -23,19 +23,30 @@
  */
 
 #include <linux/thermal.h>
-
+#include <linux/slab.h>
 #include "thermal_core.h"
 
 /**
  * notify_user_space - Notifies user space about thermal events
  * @tz - thermal_zone_device
+ * @trip - Trip point index
  *
  * This function notifies the user space through UEvents.
  */
 static int notify_user_space(struct thermal_zone_device *tz, int trip)
 {
+	char *thermal_prop[5];
+	int i;
+
 	mutex_lock(&tz->lock);
-	kobject_uevent(&tz->device.kobj, KOBJ_CHANGE);
+	thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type);
+	thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature);
+	thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip);
+	thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", tz->notify_event);
+	thermal_prop[4] = NULL;
+	kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop);
+	for (i = 0; i < 4; ++i)
+		kfree(thermal_prop[i]);
 	mutex_unlock(&tz->lock);
 	return 0;
 }
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 97f0a2b..95f4c1b 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -348,7 +348,8 @@
 	}
 	if (notify) {
 		pr_debug("thermal_zone_device_update\n");
-		thermal_zone_device_update(phdev->tzone);
+		thermal_zone_device_update(phdev->tzone,
+					   THERMAL_EVENT_UNSPECIFIED);
 	}
 }
 
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index a9d94f7..2675792 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -708,7 +708,7 @@
 {
 	struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
 
-	queue_kthread_work(&s->kworker, &s->irq_work);
+	kthread_queue_work(&s->kworker, &s->irq_work);
 
 	return IRQ_HANDLED;
 }
@@ -784,7 +784,7 @@
 
 	one->config.flags |= SC16IS7XX_RECONF_IER;
 	one->config.ier_clear |= bit;
-	queue_kthread_work(&s->kworker, &one->reg_work);
+	kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_stop_tx(struct uart_port *port)
@@ -802,7 +802,7 @@
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-	queue_kthread_work(&s->kworker, &one->tx_work);
+	kthread_queue_work(&s->kworker, &one->tx_work);
 }
 
 static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -828,7 +828,7 @@
 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
 	one->config.flags |= SC16IS7XX_RECONF_MD;
-	queue_kthread_work(&s->kworker, &one->reg_work);
+	kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@@ -957,7 +957,7 @@
 
 	port->rs485 = *rs485;
 	one->config.flags |= SC16IS7XX_RECONF_RS485;
-	queue_kthread_work(&s->kworker, &one->reg_work);
+	kthread_queue_work(&s->kworker, &one->reg_work);
 
 	return 0;
 }
@@ -1030,7 +1030,7 @@
 
 	sc16is7xx_power(port, 0);
 
-	flush_kthread_worker(&s->kworker);
+	kthread_flush_worker(&s->kworker);
 }
 
 static const char *sc16is7xx_type(struct uart_port *port)
@@ -1176,8 +1176,8 @@
 	s->devtype = devtype;
 	dev_set_drvdata(dev, s);
 
-	init_kthread_worker(&s->kworker);
-	init_kthread_work(&s->irq_work, sc16is7xx_ist);
+	kthread_init_worker(&s->kworker);
+	kthread_init_work(&s->irq_work, sc16is7xx_ist);
 	s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
 				      "sc16is7xx");
 	if (IS_ERR(s->kworker_task)) {
@@ -1234,8 +1234,8 @@
 				     SC16IS7XX_EFCR_RXDISABLE_BIT |
 				     SC16IS7XX_EFCR_TXDISABLE_BIT);
 		/* Initialize kthread work structs */
-		init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
-		init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+		kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+		kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
 		/* Register port */
 		uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
 
@@ -1301,7 +1301,7 @@
 		sc16is7xx_power(&s->p[i].port, 0);
 	}
 
-	flush_kthread_worker(&s->kworker);
+	kthread_flush_worker(&s->kworker);
 	kthread_stop(s->kworker_task);
 
 	if (!IS_ERR(s->clk))
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e841a4e..06fb39c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3187,11 +3187,11 @@
 
 	pr_info("Console: switching ");
 	if (!deflt)
-		printk("consoles %d-%d ", first+1, last+1);
+		printk(KERN_CONT "consoles %d-%d ", first+1, last+1);
 	if (j >= 0) {
 		struct vc_data *vc = vc_cons[j].d;
 
-		printk("to %s %s %dx%d\n",
+		printk(KERN_CONT "to %s %s %dx%d\n",
 		       vc->vc_can_do_color ? "colour" : "mono",
 		       desc, vc->vc_cols, vc->vc_rows);
 
@@ -3200,7 +3200,7 @@
 			update_screen(vc);
 		}
 	} else
-		printk("to %s\n", desc);
+		printk(KERN_CONT "to %s\n", desc);
 
 	retval = 0;
 err:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 09c8d9c..4016dae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2409,21 +2409,21 @@
 		snoop(&dev->dev, "%s: CONTROL\n", __func__);
 		ret = proc_control(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_BULK:
 		snoop(&dev->dev, "%s: BULK\n", __func__);
 		ret = proc_bulk(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_RESETEP:
 		snoop(&dev->dev, "%s: RESETEP\n", __func__);
 		ret = proc_resetep(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_RESET:
@@ -2435,7 +2435,7 @@
 		snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
 		ret = proc_clearhalt(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_GETDRIVER:
@@ -2462,7 +2462,7 @@
 		snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
 		ret = proc_submiturb(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 #ifdef CONFIG_COMPAT
@@ -2470,14 +2470,14 @@
 		snoop(&dev->dev, "%s: CONTROL32\n", __func__);
 		ret = proc_control_compat(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_BULK32:
 		snoop(&dev->dev, "%s: BULK32\n", __func__);
 		ret = proc_bulk_compat(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_DISCSIGNAL32:
@@ -2489,7 +2489,7 @@
 		snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
 		ret = proc_submiturb_compat(ps, p);
 		if (ret >= 0)
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 		break;
 
 	case USBDEVFS_IOCTL32:
@@ -2552,7 +2552,7 @@
  done:
 	usb_unlock_device(dev);
 	if (ret >= 0)
-		inode->i_atime = CURRENT_TIME;
+		inode->i_atime = current_time(inode);
 	return ret;
 }
 
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 12731e6..ea73afb 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -20,7 +20,6 @@
 #include <linux/usb/ehci_def.h>
 #include <linux/delay.h>
 #include <linux/serial_core.h>
-#include <linux/kconfig.h>
 #include <linux/kgdb.h>
 #include <linux/kthread.h>
 #include <asm/io.h>
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0aeed85..54ad100 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1196,15 +1196,15 @@
 	inode = new_inode(sb);
 
 	if (likely(inode)) {
-		struct timespec current_time = CURRENT_TIME;
+		struct timespec ts = current_time(inode);
 
 		inode->i_ino	 = get_next_ino();
 		inode->i_mode    = perms->mode;
 		inode->i_uid     = perms->uid;
 		inode->i_gid     = perms->gid;
-		inode->i_atime   = current_time;
-		inode->i_mtime   = current_time;
-		inode->i_ctime   = current_time;
+		inode->i_atime   = ts;
+		inode->i_mtime   = ts;
+		inode->i_ctime   = ts;
 		inode->i_private = data;
 		if (fops)
 			inode->i_fop = fops;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 16104b5e..bd82dd1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1913,7 +1913,7 @@
 		inode->i_uid = make_kuid(&init_user_ns, default_uid);
 		inode->i_gid = make_kgid(&init_user_ns, default_gid);
 		inode->i_atime = inode->i_mtime = inode->i_ctime
-				= CURRENT_TIME;
+				= current_time(inode);
 		inode->i_private = data;
 		inode->i_fop = fops;
 	}
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index f5fccb3..f785032 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -21,7 +21,6 @@
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/module.h>
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1e5f529..0630648 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1308,11 +1308,6 @@
 #define        PLATFORM_DRIVER         ehci_mv_driver
 #endif
 
-#ifdef CONFIG_MIPS_SEAD3
-#include "ehci-sead3.c"
-#define	PLATFORM_DRIVER		ehci_hcd_sead3_driver
-#endif
-
 static int __init ehci_hcd_init(void)
 {
 	int retval = 0;
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
deleted file mode 100644
index 3d86cc2..0000000
--- a/drivers/usb/host/ehci-sead3.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * MIPS CI13320A EHCI Host Controller driver
- * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-static int ehci_sead3_setup(struct usb_hcd *hcd)
-{
-	int ret;
-	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
-	ehci->caps = hcd->regs + 0x100;
-
-#ifdef __BIG_ENDIAN
-	ehci->big_endian_mmio = 1;
-	ehci->big_endian_desc = 1;
-#endif
-
-	ret = ehci_setup(hcd);
-	if (ret)
-		return ret;
-
-	ehci->need_io_watchdog = 0;
-
-	/* Set burst length to 16 words. */
-	ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
-
-	return ret;
-}
-
-const struct hc_driver ehci_sead3_hc_driver = {
-	.description		= hcd_name,
-	.product_desc		= "SEAD-3 EHCI",
-	.hcd_priv_size		= sizeof(struct ehci_hcd),
-
-	/*
-	 * generic hardware linkage
-	 */
-	.irq			= ehci_irq,
-	.flags			= HCD_MEMORY | HCD_USB2 | HCD_BH,
-
-	/*
-	 * basic lifecycle operations
-	 *
-	 */
-	.reset			= ehci_sead3_setup,
-	.start			= ehci_run,
-	.stop			= ehci_stop,
-	.shutdown		= ehci_shutdown,
-
-	/*
-	 * managing i/o requests and associated device resources
-	 */
-	.urb_enqueue		= ehci_urb_enqueue,
-	.urb_dequeue		= ehci_urb_dequeue,
-	.endpoint_disable	= ehci_endpoint_disable,
-	.endpoint_reset		= ehci_endpoint_reset,
-
-	/*
-	 * scheduling support
-	 */
-	.get_frame_number	= ehci_get_frame,
-
-	/*
-	 * root hub support
-	 */
-	.hub_status_data	= ehci_hub_status_data,
-	.hub_control		= ehci_hub_control,
-	.bus_suspend		= ehci_bus_suspend,
-	.bus_resume		= ehci_bus_resume,
-	.relinquish_port	= ehci_relinquish_port,
-	.port_handed_over	= ehci_port_handed_over,
-
-	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
-};
-
-static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd;
-	struct resource *res;
-	int ret;
-
-	if (usb_disabled())
-		return -ENODEV;
-
-	if (pdev->resource[1].flags != IORESOURCE_IRQ) {
-		pr_debug("resource[1] is not IORESOURCE_IRQ");
-		return -ENOMEM;
-	}
-	hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
-	if (!hcd)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(hcd->regs)) {
-		ret = PTR_ERR(hcd->regs);
-		goto err1;
-	}
-	hcd->rsrc_start = res->start;
-	hcd->rsrc_len = resource_size(res);
-
-	/* Root hub has integrated TT. */
-	hcd->has_tt = 1;
-
-	ret = usb_add_hcd(hcd, pdev->resource[1].start,
-			  IRQF_SHARED);
-	if (ret == 0) {
-		platform_set_drvdata(pdev, hcd);
-		device_wakeup_enable(hcd->self.controller);
-		return ret;
-	}
-
-err1:
-	usb_put_hcd(hcd);
-	return ret;
-}
-
-static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
-	usb_remove_hcd(hcd);
-	usb_put_hcd(hcd);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_sead3_drv_suspend(struct device *dev)
-{
-	struct usb_hcd *hcd = dev_get_drvdata(dev);
-	bool do_wakeup = device_may_wakeup(dev);
-
-	return ehci_suspend(hcd, do_wakeup);
-}
-
-static int ehci_hcd_sead3_drv_resume(struct device *dev)
-{
-	struct usb_hcd *hcd = dev_get_drvdata(dev);
-
-	ehci_resume(hcd, false);
-	return 0;
-}
-
-static const struct dev_pm_ops sead3_ehci_pmops = {
-	.suspend	= ehci_hcd_sead3_drv_suspend,
-	.resume		= ehci_hcd_sead3_drv_resume,
-};
-
-#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
-
-#else
-#define SEAD3_EHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ehci_hcd_sead3_driver = {
-	.probe		= ehci_hcd_sead3_drv_probe,
-	.remove		= ehci_hcd_sead3_drv_remove,
-	.shutdown	= usb_hcd_platform_shutdown,
-	.driver = {
-		.name	= "sead3-ehci",
-		.pm	= SEAD3_EHCI_PMOPS,
-	}
-};
-
-MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 35af362..d793f54 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -9,7 +9,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 88b008f..5d3b0db 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -284,12 +284,14 @@
 config FB_ARMCLCD
 	tristate "ARM PrimeCell PL110 support"
 	depends on ARM || ARM64 || COMPILE_TEST
-	depends on FB && ARM_AMBA
+	depends on FB && ARM_AMBA && HAS_IOMEM
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB_MODE_HELPERS if OF
 	select VIDEOMODE_HELPERS if OF
+	select BACKLIGHT_LCD_SUPPORT if OF
+	select BACKLIGHT_CLASS_DEVICE if OF
 	help
 	  This framebuffer device driver is for the ARM PrimeCell PL110
 	  Colour LCD controller.  ARM PrimeCells provide the building
@@ -305,6 +307,8 @@
 	def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
 	depends on ARM
 	depends on FB_ARMCLCD && FB=y
+	select REGMAP
+	select MFD_SYSCON
 
 config FB_ACORN
 	bool "Acorn VIDC support"
@@ -2183,7 +2187,7 @@
 
 config FB_COBALT
 	tristate "Cobalt server LCD frame buffer support"
-	depends on FB && (MIPS_COBALT || MIPS_SEAD3)
+	depends on FB && MIPS_COBALT
 
 config FB_SH7760
 	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2443,7 +2447,6 @@
 
 source "drivers/video/fbdev/omap/Kconfig"
 source "drivers/video/fbdev/omap2/Kconfig"
-source "drivers/video/fbdev/exynos/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
 
 config FB_SH_MOBILE_MERAM
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index f673186..ee8c814 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -6,8 +6,6 @@
 
 obj-y				+= core/
 
-obj-$(CONFIG_EXYNOS_VIDEO)     += exynos/
-
 obj-$(CONFIG_FB_MACMODES)      += macmodes.o
 obj-$(CONFIG_FB_WMT_GE_ROPS)   += wmt_ge_rops.o
 
@@ -79,6 +77,7 @@
 obj-$(CONFIG_FB_PVR2)             += pvr2fb.o
 obj-$(CONFIG_FB_VOODOO1)          += sstfb.o
 obj-$(CONFIG_FB_ARMCLCD)	  += amba-clcd.o
+obj-$(CONFIG_ARCH_NOMADIK)	  += amba-clcd-nomadik.o
 obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o
 obj-$(CONFIG_FB_GOLDFISH)         += goldfishfb.o
 obj-$(CONFIG_FB_68328)            += 68328fb.o
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c
new file mode 100644
index 0000000..0c06fca
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-nomadik.c
@@ -0,0 +1,259 @@
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "amba-clcd-nomadik.h"
+
+static struct gpio_desc *grestb;
+static struct gpio_desc *scen;
+static struct gpio_desc *scl;
+static struct gpio_desc *sda;
+
+static u8 tpg110_readwrite_reg(bool write, u8 address, u8 outval)
+{
+	int i;
+	u8 inval = 0;
+
+	/* Assert SCEN */
+	gpiod_set_value_cansleep(scen, 1);
+	ndelay(150);
+	/* Hammer out the address */
+	for (i = 5; i >= 0; i--) {
+		if (address & BIT(i))
+			gpiod_set_value_cansleep(sda, 1);
+		else
+			gpiod_set_value_cansleep(sda, 0);
+		ndelay(150);
+		/* Send an SCL pulse */
+		gpiod_set_value_cansleep(scl, 1);
+		ndelay(160);
+		gpiod_set_value_cansleep(scl, 0);
+		ndelay(160);
+	}
+
+	if (write) {
+		/* WRITE */
+		gpiod_set_value_cansleep(sda, 0);
+	} else {
+		/* READ */
+		gpiod_set_value_cansleep(sda, 1);
+	}
+	ndelay(150);
+	/* Send an SCL pulse */
+	gpiod_set_value_cansleep(scl, 1);
+	ndelay(160);
+	gpiod_set_value_cansleep(scl, 0);
+	ndelay(160);
+
+	if (!write)
+		/* HiZ turn-around cycle */
+		gpiod_direction_input(sda);
+	ndelay(150);
+	/* Send an SCL pulse */
+	gpiod_set_value_cansleep(scl, 1);
+	ndelay(160);
+	gpiod_set_value_cansleep(scl, 0);
+	ndelay(160);
+
+	/* Hammer in/out the data */
+	for (i = 7; i >= 0; i--) {
+		int value;
+
+		if (write) {
+			value = !!(outval & BIT(i));
+			gpiod_set_value_cansleep(sda, value);
+		} else {
+			value = gpiod_get_value(sda);
+			if (value)
+				inval |= BIT(i);
+		}
+		ndelay(150);
+		/* Send an SCL pulse */
+		gpiod_set_value_cansleep(scl, 1);
+		ndelay(160);
+		gpiod_set_value_cansleep(scl, 0);
+		ndelay(160);
+	}
+
+	gpiod_direction_output(sda, 0);
+	/* Deassert SCEN */
+	gpiod_set_value_cansleep(scen, 0);
+	/* Satisfies SCEN pulse width */
+	udelay(1);
+
+	return inval;
+}
+
+static u8 tpg110_read_reg(u8 address)
+{
+	return tpg110_readwrite_reg(false, address, 0);
+}
+
+static void tpg110_write_reg(u8 address, u8 outval)
+{
+	tpg110_readwrite_reg(true, address, outval);
+}
+
+static void tpg110_startup(struct device *dev)
+{
+	u8 val;
+
+	dev_info(dev, "TPG110 display enable\n");
+	/* De-assert the reset signal */
+	gpiod_set_value_cansleep(grestb, 0);
+	mdelay(1);
+	dev_info(dev, "de-asserted GRESTB\n");
+
+	/* Test display communication */
+	tpg110_write_reg(0x00, 0x55);
+	val = tpg110_read_reg(0x00);
+	if (val == 0x55)
+		dev_info(dev, "passed communication test\n");
+	val = tpg110_read_reg(0x01);
+	dev_info(dev, "TPG110 chip ID: %d version: %d\n",
+		val>>4, val&0x0f);
+
+	/* Show display resolution */
+	val = tpg110_read_reg(0x02);
+	val &= 7;
+	switch (val) {
+	case 0x0:
+		dev_info(dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)");
+		break;
+	case 0x1:
+		dev_info(dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)");
+		break;
+	case 0x4:
+		dev_info(dev, "480x640 RGB");
+		break;
+	case 0x5:
+		dev_info(dev, "480x272 RGB");
+		break;
+	case 0x6:
+		dev_info(dev, "640x480 RGB");
+		break;
+	case 0x7:
+		dev_info(dev, "800x480 RGB");
+		break;
+	default:
+		dev_info(dev, "ILLEGAL RESOLUTION");
+		break;
+	}
+
+	val = tpg110_read_reg(0x03);
+	dev_info(dev, "resolution is controlled by %s\n",
+		(val & BIT(7)) ? "software" : "hardware");
+}
+
+static void tpg110_enable(struct clcd_fb *fb)
+{
+	struct device *dev = &fb->dev->dev;
+	static bool startup;
+	u8 val;
+
+	if (!startup) {
+		tpg110_startup(dev);
+		startup = true;
+	}
+
+	/* Take chip out of standby */
+	val = tpg110_read_reg(0x03);
+	val |= BIT(0);
+	tpg110_write_reg(0x03, val);
+}
+
+static void tpg110_disable(struct clcd_fb *fb)
+{
+	u8 val;
+
+	dev_info(&fb->dev->dev, "TPG110 display disable\n");
+	val = tpg110_read_reg(0x03);
+	/* Put into standby */
+	val &= ~BIT(0);
+	tpg110_write_reg(0x03, val);
+}
+
+static void tpg110_init(struct device *dev, struct device_node *np,
+			struct clcd_board *board)
+{
+	dev_info(dev, "TPG110 display init\n");
+
+	grestb = devm_get_gpiod_from_child(dev, "grestb", &np->fwnode);
+	if (IS_ERR(grestb)) {
+		dev_err(dev, "no GRESTB GPIO\n");
+		return;
+	}
+	/* This asserts the GRESTB signal, putting the display into reset */
+	gpiod_direction_output(grestb, 1);
+
+	scen = devm_get_gpiod_from_child(dev, "scen", &np->fwnode);
+	if (IS_ERR(scen)) {
+		dev_err(dev, "no SCEN GPIO\n");
+		return;
+	}
+	gpiod_direction_output(scen, 0);
+	scl = devm_get_gpiod_from_child(dev, "scl", &np->fwnode);
+	if (IS_ERR(scl)) {
+		dev_err(dev, "no SCL GPIO\n");
+		return;
+	}
+	gpiod_direction_output(scl, 0);
+	sda = devm_get_gpiod_from_child(dev, "sda", &np->fwnode);
+	if (IS_ERR(sda)) {
+		dev_err(dev, "no SDA GPIO\n");
+		return;
+	}
+	gpiod_direction_output(sda, 0);
+	board->enable = tpg110_enable;
+	board->disable = tpg110_disable;
+}
+
+int nomadik_clcd_init_panel(struct clcd_fb *fb,
+			    struct device_node *endpoint)
+{
+	struct device_node *panel;
+
+	panel = of_graph_get_remote_port_parent(endpoint);
+	if (!panel)
+		return -ENODEV;
+
+	if (of_device_is_compatible(panel, "tpo,tpg110"))
+		tpg110_init(&fb->dev->dev, panel, fb->board);
+	else
+		dev_info(&fb->dev->dev, "unknown panel\n");
+
+	/* Unknown panel, fall through */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nomadik_clcd_init_panel);
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+int nomadik_clcd_init_board(struct amba_device *adev,
+			    struct clcd_board *board)
+{
+	struct regmap *pmu_regmap;
+
+	dev_info(&adev->dev, "Nomadik CLCD board init\n");
+	pmu_regmap =
+		syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+	if (IS_ERR(pmu_regmap)) {
+		dev_err(&adev->dev, "could not find PMU syscon regmap\n");
+		return PTR_ERR(pmu_regmap);
+	}
+	regmap_update_bits(pmu_regmap,
+			   PMU_CTRL_OFFSET,
+			   PMU_CTRL_LCDNDIF,
+			   0);
+	dev_info(&adev->dev, "set PMU mux to CLCD mode\n");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nomadik_clcd_init_board);
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h
new file mode 100644
index 0000000..50aa9bd
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-nomadik.h
@@ -0,0 +1,24 @@
+#ifndef _AMBA_CLCD_NOMADIK_H
+#define _AMBA_CLCD_NOMADIK_H
+
+#include <linux/amba/bus.h>
+
+#ifdef CONFIG_ARCH_NOMADIK
+int nomadik_clcd_init_board(struct amba_device *adev,
+			     struct clcd_board *board);
+int nomadik_clcd_init_panel(struct clcd_fb *fb,
+			    struct device_node *endpoint);
+#else
+static inline int nomadik_clcd_init_board(struct amba_device *adev,
+					  struct clcd_board *board)
+{
+	return 0;
+}
+static inline int nomadik_clcd_init_panel(struct clcd_fb *fb,
+					  struct device_node *endpoint)
+{
+	return 0;
+}
+#endif
+
+#endif /* inclusion guard */
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index a8a22da..19ad864 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -3,6 +3,12 @@
 #include <linux/amba/bus.h>
 #include <linux/amba/clcd.h>
 #include <linux/platform_data/video-clcd-versatile.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include "amba-clcd-versatile.h"
 
 static struct clcd_panel vga = {
 	.mode		= {
@@ -178,3 +184,392 @@
 	dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
 		    fb->fb.fix.smem_start);
 }
+
+#ifdef CONFIG_OF
+
+static struct regmap *versatile_syscon_map;
+static struct regmap *versatile_ib2_map;
+
+/*
+ * We detect the different syscon types from the compatible strings.
+ */
+enum versatile_clcd {
+	INTEGRATOR_CLCD_CM,
+	VERSATILE_CLCD,
+	REALVIEW_CLCD_EB,
+	REALVIEW_CLCD_PB1176,
+	REALVIEW_CLCD_PB11MP,
+	REALVIEW_CLCD_PBA8,
+	REALVIEW_CLCD_PBX,
+};
+
+static const struct of_device_id versatile_clcd_of_match[] = {
+	{
+		.compatible = "arm,core-module-integrator",
+		.data = (void *)INTEGRATOR_CLCD_CM,
+	},
+	{
+		.compatible = "arm,versatile-sysreg",
+		.data = (void *)VERSATILE_CLCD,
+	},
+	{
+		.compatible = "arm,realview-eb-syscon",
+		.data = (void *)REALVIEW_CLCD_EB,
+	},
+	{
+		.compatible = "arm,realview-pb1176-syscon",
+		.data = (void *)REALVIEW_CLCD_PB1176,
+	},
+	{
+		.compatible = "arm,realview-pb11mp-syscon",
+		.data = (void *)REALVIEW_CLCD_PB11MP,
+	},
+	{
+		.compatible = "arm,realview-pba8-syscon",
+		.data = (void *)REALVIEW_CLCD_PBA8,
+	},
+	{
+		.compatible = "arm,realview-pbx-syscon",
+		.data = (void *)REALVIEW_CLCD_PBX,
+	},
+	{},
+};
+
+/*
+ * Core module CLCD control on the Integrator/CP, bits
+ * 8 thru 19 of the CM_CONTROL register controls a bunch
+ * of CLCD settings.
+ */
+#define INTEGRATOR_HDR_CTRL_OFFSET	0x0C
+#define INTEGRATOR_CLCD_LCDBIASEN	BIT(8)
+#define INTEGRATOR_CLCD_LCDBIASUP	BIT(9)
+#define INTEGRATOR_CLCD_LCDBIASDN	BIT(10)
+/* Bits 11,12,13 controls the LCD type */
+#define INTEGRATOR_CLCD_LCDMUX_MASK	(BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCDMUX_LCD24	BIT(11)
+#define INTEGRATOR_CLCD_LCDMUX_VGA565	BIT(12)
+#define INTEGRATOR_CLCD_LCDMUX_SHARP	(BIT(11)|BIT(12))
+#define INTEGRATOR_CLCD_LCDMUX_VGA555	BIT(13)
+#define INTEGRATOR_CLCD_LCDMUX_VGA24	(BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCD0_EN		BIT(14)
+#define INTEGRATOR_CLCD_LCD1_EN		BIT(15)
+/* R/L flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC1	BIT(16)
+/* U/D flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC2	BIT(17)
+/* No connection on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC	BIT(18)
+/* 0 = 24bit VGA, 1 = 18bit VGA */
+#define INTEGRATOR_CLCD_LCD_N24BITEN	BIT(19)
+
+#define INTEGRATOR_CLCD_MASK		(INTEGRATOR_CLCD_LCDBIASEN | \
+					 INTEGRATOR_CLCD_LCDBIASUP | \
+					 INTEGRATOR_CLCD_LCDBIASDN | \
+					 INTEGRATOR_CLCD_LCDMUX_MASK | \
+					 INTEGRATOR_CLCD_LCD0_EN | \
+					 INTEGRATOR_CLCD_LCD1_EN | \
+					 INTEGRATOR_CLCD_LCD_STATIC1 | \
+					 INTEGRATOR_CLCD_LCD_STATIC2 | \
+					 INTEGRATOR_CLCD_LCD_STATIC | \
+					 INTEGRATOR_CLCD_LCD_N24BITEN)
+
+static void integrator_clcd_enable(struct clcd_fb *fb)
+{
+	struct fb_var_screeninfo *var = &fb->fb.var;
+	u32 val;
+
+	dev_info(&fb->dev->dev, "enable Integrator CLCD connectors\n");
+
+	/* FIXME: really needed? */
+	val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
+		INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
+	if (var->bits_per_pixel <= 8 ||
+	    (var->bits_per_pixel == 16 && var->green.length == 5))
+		/* Pseudocolor, RGB555, BGR555 */
+		val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
+	else if (fb->fb.var.bits_per_pixel <= 16)
+		/* truecolor RGB565 */
+		val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
+	else
+		val = 0; /* no idea for this, don't trust the docs */
+
+	regmap_update_bits(versatile_syscon_map,
+			   INTEGRATOR_HDR_CTRL_OFFSET,
+			   INTEGRATOR_CLCD_MASK,
+			   val);
+}
+
+/*
+ * This configuration register in the Versatile and RealView
+ * family is uniformly present but appears more and more
+ * unutilized starting with the RealView series.
+ */
+#define SYS_CLCD			0x50
+#define SYS_CLCD_MODE_MASK		(BIT(0)|BIT(1))
+#define SYS_CLCD_MODE_888		0
+#define SYS_CLCD_MODE_5551		BIT(0)
+#define SYS_CLCD_MODE_565_R_LSB		BIT(1)
+#define SYS_CLCD_MODE_565_B_LSB		(BIT(0)|BIT(1))
+#define SYS_CLCD_CONNECTOR_MASK		(BIT(2)|BIT(3)|BIT(4)|BIT(5))
+#define SYS_CLCD_NLCDIOON		BIT(2)
+#define SYS_CLCD_VDDPOSSWITCH		BIT(3)
+#define SYS_CLCD_PWR3V5SWITCH		BIT(4)
+#define SYS_CLCD_VDDNEGSWITCH		BIT(5)
+#define SYS_CLCD_TSNSS			BIT(6) /* touchscreen enable */
+#define SYS_CLCD_SSPEXP			BIT(7) /* SSP expansion enable */
+
+/* The Versatile can detect the connected panel type */
+#define SYS_CLCD_CLCDID_MASK		(BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12))
+#define SYS_CLCD_ID_SANYO_3_8		(0x00 << 8)
+#define SYS_CLCD_ID_SHARP_8_4		(0x01 << 8)
+#define SYS_CLCD_ID_EPSON_2_2		(0x02 << 8)
+#define SYS_CLCD_ID_SANYO_2_5		(0x07 << 8)
+#define SYS_CLCD_ID_VGA			(0x1f << 8)
+
+#define SYS_CLCD_TSNDAV			BIT(13) /* data ready from TS */
+
+/* IB2 control register for the Versatile daughterboard */
+#define IB2_CTRL			0x00
+#define IB2_CTRL_LCD_SD			BIT(1) /* 1 = shut down LCD */
+#define IB2_CTRL_LCD_BL_ON		BIT(0)
+#define IB2_CTRL_LCD_MASK		(BIT(0)|BIT(1))
+
+static void versatile_clcd_disable(struct clcd_fb *fb)
+{
+	dev_info(&fb->dev->dev, "disable Versatile CLCD connectors\n");
+	regmap_update_bits(versatile_syscon_map,
+			   SYS_CLCD,
+			   SYS_CLCD_CONNECTOR_MASK,
+			   0);
+
+	/* If we're on an IB2 daughterboard, turn off display */
+	if (versatile_ib2_map) {
+		dev_info(&fb->dev->dev, "disable IB2 display\n");
+		regmap_update_bits(versatile_ib2_map,
+				   IB2_CTRL,
+				   IB2_CTRL_LCD_MASK,
+				   IB2_CTRL_LCD_SD);
+	}
+}
+
+static void versatile_clcd_enable(struct clcd_fb *fb)
+{
+	struct fb_var_screeninfo *var = &fb->fb.var;
+	u32 val = 0;
+
+	dev_info(&fb->dev->dev, "enable Versatile CLCD connectors\n");
+	switch (var->green.length) {
+	case 5:
+		val |= SYS_CLCD_MODE_5551;
+		break;
+	case 6:
+		if (var->red.offset == 0)
+			val |= SYS_CLCD_MODE_565_R_LSB;
+		else
+			val |= SYS_CLCD_MODE_565_B_LSB;
+		break;
+	case 8:
+		val |= SYS_CLCD_MODE_888;
+		break;
+	}
+
+	/* Set up the MUX */
+	regmap_update_bits(versatile_syscon_map,
+			   SYS_CLCD,
+			   SYS_CLCD_MODE_MASK,
+			   val);
+
+	/* Then enable the display */
+	regmap_update_bits(versatile_syscon_map,
+			   SYS_CLCD,
+			   SYS_CLCD_CONNECTOR_MASK,
+			   SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+
+	/* If we're on an IB2 daughterboard, turn on display */
+	if (versatile_ib2_map) {
+		dev_info(&fb->dev->dev, "enable IB2 display\n");
+		regmap_update_bits(versatile_ib2_map,
+				   IB2_CTRL,
+				   IB2_CTRL_LCD_MASK,
+				   IB2_CTRL_LCD_BL_ON);
+	}
+}
+
+static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs)
+{
+	clcdfb_decode(fb, regs);
+
+	/* Always clear BGR for RGB565: we do the routing externally */
+	if (fb->fb.var.green.length == 6)
+		regs->cntl &= ~CNTL_BGR;
+}
+
+static void realview_clcd_disable(struct clcd_fb *fb)
+{
+	dev_info(&fb->dev->dev, "disable RealView CLCD connectors\n");
+	regmap_update_bits(versatile_syscon_map,
+			   SYS_CLCD,
+			   SYS_CLCD_CONNECTOR_MASK,
+			   0);
+}
+
+static void realview_clcd_enable(struct clcd_fb *fb)
+{
+	dev_info(&fb->dev->dev, "enable RealView CLCD connectors\n");
+	regmap_update_bits(versatile_syscon_map,
+			   SYS_CLCD,
+			   SYS_CLCD_CONNECTOR_MASK,
+			   SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+struct versatile_panel {
+	u32 id;
+	char *compatible;
+	bool ib2;
+};
+
+static const struct versatile_panel versatile_panels[] = {
+	{
+		.id = SYS_CLCD_ID_VGA,
+		.compatible = "VGA",
+	},
+	{
+		.id = SYS_CLCD_ID_SANYO_3_8,
+		.compatible = "sanyo,tm38qv67a02a",
+	},
+	{
+		.id = SYS_CLCD_ID_SHARP_8_4,
+		.compatible = "sharp,lq084v1dg21",
+	},
+	{
+		.id = SYS_CLCD_ID_EPSON_2_2,
+		.compatible = "epson,l2f50113t00",
+	},
+	{
+		.id = SYS_CLCD_ID_SANYO_2_5,
+		.compatible = "sanyo,alr252rgt",
+		.ib2 = true,
+	},
+};
+
+static void versatile_panel_probe(struct device *dev,
+				  struct device_node *endpoint)
+{
+	struct versatile_panel const *vpanel = NULL;
+	struct device_node *panel = NULL;
+	u32 val;
+	int ret;
+	int i;
+
+	/*
+	 * The Versatile CLCD has a panel auto-detection mechanism.
+	 * We use this and look for the compatible panel in the
+	 * device tree.
+	 */
+	ret = regmap_read(versatile_syscon_map, SYS_CLCD, &val);
+	if (ret) {
+		dev_err(dev, "cannot read CLCD syscon register\n");
+		return;
+	}
+	val &= SYS_CLCD_CLCDID_MASK;
+
+	/* First find corresponding panel information */
+	for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) {
+		vpanel = &versatile_panels[i];
+
+		if (val == vpanel->id) {
+			dev_err(dev, "autodetected panel \"%s\"\n",
+				vpanel->compatible);
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(versatile_panels)) {
+		dev_err(dev, "could not auto-detect panel\n");
+		return;
+	}
+
+	panel = of_graph_get_remote_port_parent(endpoint);
+	if (!panel) {
+		dev_err(dev, "could not locate panel in DT\n");
+		return;
+	}
+	if (!of_device_is_compatible(panel, vpanel->compatible))
+		dev_err(dev, "panel in DT is not compatible with the "
+			"auto-detected panel, continuing anyway\n");
+
+	/*
+	 * If we have a Sanyo 2.5" port
+	 * that we're running on an IB2 and proceed to look for the
+	 * IB2 syscon regmap.
+	 */
+	if (!vpanel->ib2)
+		return;
+
+	versatile_ib2_map = syscon_regmap_lookup_by_compatible(
+		"arm,versatile-ib2-syscon");
+	if (IS_ERR(versatile_ib2_map)) {
+		dev_err(dev, "could not locate IB2 control register\n");
+		versatile_ib2_map = NULL;
+		return;
+	}
+}
+
+int versatile_clcd_init_panel(struct clcd_fb *fb,
+			      struct device_node *endpoint)
+{
+	const struct of_device_id *clcd_id;
+	enum versatile_clcd versatile_clcd_type;
+	struct device_node *np;
+	struct regmap *map;
+	struct device *dev = &fb->dev->dev;
+
+	np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
+					     &clcd_id);
+	if (!np) {
+		dev_err(dev, "no Versatile syscon node\n");
+		return -ENODEV;
+	}
+	versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+
+	map = syscon_node_to_regmap(np);
+	if (IS_ERR(map)) {
+		dev_err(dev, "no Versatile syscon regmap\n");
+		return PTR_ERR(map);
+	}
+
+	switch (versatile_clcd_type) {
+	case INTEGRATOR_CLCD_CM:
+		versatile_syscon_map = map;
+		fb->board->enable = integrator_clcd_enable;
+		/* Override the caps, we have only these */
+		fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 |
+			CLCD_CAP_888;
+		dev_info(dev, "set up callbacks for Integrator PL110\n");
+		break;
+	case VERSATILE_CLCD:
+		versatile_syscon_map = map;
+		fb->board->enable = versatile_clcd_enable;
+		fb->board->disable = versatile_clcd_disable;
+		fb->board->decode = versatile_clcd_decode;
+		versatile_panel_probe(dev, endpoint);
+		dev_info(dev, "set up callbacks for Versatile\n");
+		break;
+	case REALVIEW_CLCD_EB:
+	case REALVIEW_CLCD_PB1176:
+	case REALVIEW_CLCD_PB11MP:
+	case REALVIEW_CLCD_PBA8:
+	case REALVIEW_CLCD_PBX:
+		versatile_syscon_map = map;
+		fb->board->enable = realview_clcd_enable;
+		fb->board->disable = realview_clcd_disable;
+		dev_info(dev, "set up callbacks for RealView PL111\n");
+		break;
+	default:
+		dev_info(dev, "unknown Versatile system controller\n");
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(versatile_clcd_init_panel);
+#endif
diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h
new file mode 100644
index 0000000..1b14359
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-versatile.h
@@ -0,0 +1,17 @@
+/*
+ * Special local versatile callbacks
+ */
+#include <linux/of.h>
+#include <linux/amba/bus.h>
+#include <linux/platform_data/video-clcd-versatile.h>
+
+#if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF)
+int versatile_clcd_init_panel(struct clcd_fb *fb,
+			      struct device_node *endpoint);
+#else
+static inline int versatile_clcd_init_panel(struct clcd_fb *fb,
+				struct device_node *endpoint)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9b15886..ec2671d 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -30,10 +30,14 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_graph.h>
+#include <linux/backlight.h>
 #include <video/display_timing.h>
 #include <video/of_display_timing.h>
 #include <video/videomode.h>
 
+#include "amba-clcd-nomadik.h"
+#include "amba-clcd-versatile.h"
+
 #define to_clcd(info)	container_of(info, struct clcd_fb, fb)
 
 /* This is limited to 16 characters when displayed by X startup */
@@ -71,6 +75,11 @@
 	if (fb->board->disable)
 		fb->board->disable(fb);
 
+	if (fb->panel->backlight) {
+		fb->panel->backlight->props.power = FB_BLANK_POWERDOWN;
+		backlight_update_status(fb->panel->backlight);
+	}
+
 	val = readl(fb->regs + fb->off_cntl);
 	if (val & CNTL_LCDPWR) {
 		val &= ~CNTL_LCDPWR;
@@ -117,6 +126,14 @@
 	writel(cntl, fb->regs + fb->off_cntl);
 
 	/*
+	 * Turn on backlight
+	 */
+	if (fb->panel->backlight) {
+		fb->panel->backlight->props.power = FB_BLANK_UNBLANK;
+		backlight_update_status(fb->panel->backlight);
+	}
+
+	/*
 	 * finally, enable the interface.
 	 */
 	if (fb->board->enable)
@@ -211,6 +228,15 @@
 			var->blue.length = 4;
 		}
 		break;
+	case 24:
+		if (fb->vendor->packed_24_bit_pixels) {
+			var->red.length = 8;
+			var->green.length = 8;
+			var->blue.length = 8;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
 	case 32:
 		/* If we can't do 888, reject */
 		caps &= CLCD_CAP_888;
@@ -297,6 +323,12 @@
 
 	clcdfb_disable(fb);
 
+	/* Some variants must be clocked here */
+	if (fb->vendor->clock_timregs && !fb->clk_enabled) {
+		fb->clk_enabled = true;
+		clk_enable(fb->clk);
+	}
+
 	writel(regs.tim0, fb->regs + CLCD_TIM0);
 	writel(regs.tim1, fb->regs + CLCD_TIM1);
 	writel(regs.tim2, fb->regs + CLCD_TIM2);
@@ -551,7 +583,7 @@
 
 #ifdef CONFIG_OF
 static int clcdfb_of_get_dpi_panel_mode(struct device_node *node,
-		struct fb_videomode *mode)
+		struct clcd_panel *clcd_panel)
 {
 	int err;
 	struct display_timing timing;
@@ -563,10 +595,31 @@
 
 	videomode_from_timing(&timing, &video);
 
-	err = fb_videomode_from_videomode(&video, mode);
+	err = fb_videomode_from_videomode(&video, &clcd_panel->mode);
 	if (err)
 		return err;
 
+	/* Set up some inversion flags */
+	if (timing.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+		clcd_panel->tim2 |= TIM2_IPC;
+	else if (!(timing.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE))
+		/*
+		 * To preserve backwards compatibility, the IPC (inverted
+		 * pixel clock) flag needs to be set on any display that
+		 * doesn't explicitly specify that the pixel clock is
+		 * active on the negative or positive edge.
+		 */
+		clcd_panel->tim2 |= TIM2_IPC;
+
+	if (timing.flags & DISPLAY_FLAGS_HSYNC_LOW)
+		clcd_panel->tim2 |= TIM2_IHS;
+
+	if (timing.flags & DISPLAY_FLAGS_VSYNC_LOW)
+		clcd_panel->tim2 |= TIM2_IVS;
+
+	if (timing.flags & DISPLAY_FLAGS_DE_LOW)
+		clcd_panel->tim2 |= TIM2_IOE;
+
 	return 0;
 }
 
@@ -576,11 +629,34 @@
 			mode->refresh);
 }
 
+static int clcdfb_of_get_backlight(struct device_node *endpoint,
+				   struct clcd_panel *clcd_panel)
+{
+	struct device_node *panel;
+	struct device_node *backlight;
+
+	panel = of_graph_get_remote_port_parent(endpoint);
+	if (!panel)
+		return -ENODEV;
+
+	/* Look up the optional backlight phandle */
+	backlight = of_parse_phandle(panel, "backlight", 0);
+	if (backlight) {
+		clcd_panel->backlight = of_find_backlight_by_node(backlight);
+		of_node_put(backlight);
+
+		if (!clcd_panel->backlight)
+			return -EPROBE_DEFER;
+	}
+	return 0;
+}
+
 static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
-		struct fb_videomode *mode)
+		struct clcd_panel *clcd_panel)
 {
 	int err;
 	struct device_node *panel;
+	struct fb_videomode *mode;
 	char *name;
 	int len;
 
@@ -590,11 +666,12 @@
 
 	/* Only directly connected DPI panels supported for now */
 	if (of_device_is_compatible(panel, "panel-dpi"))
-		err = clcdfb_of_get_dpi_panel_mode(panel, mode);
+		err = clcdfb_of_get_dpi_panel_mode(panel, clcd_panel);
 	else
 		err = -ENOENT;
 	if (err)
 		return err;
+	mode = &clcd_panel->mode;
 
 	len = clcdfb_snprintf_mode(NULL, 0, mode);
 	name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
@@ -616,6 +693,7 @@
 	} panels[] = {
 		{ 0x110, 1,  7, 13, CLCD_CAP_5551 },
 		{ 0x110, 0,  8, 16, CLCD_CAP_888 },
+		{ 0x110, 16, 8, 0,  CLCD_CAP_888 },
 		{ 0x111, 4, 14, 20, CLCD_CAP_444 },
 		{ 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 },
 		{ 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 |
@@ -625,8 +703,8 @@
 	};
 	int i;
 
-	/* Bypass pixel clock divider, data output on the falling edge */
-	fb->panel->tim2 = TIM2_BCD | TIM2_IPC;
+	/* Bypass pixel clock divider */
+	fb->panel->tim2 |= TIM2_BCD;
 
 	/* TFT display, vert. comp. interrupt at the start of the back porch */
 	fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1);
@@ -643,6 +721,49 @@
 			fb->panel->caps = panels[i].caps;
 	}
 
+	/*
+	 * If we actually physically connected the R lines to B and
+	 * vice versa
+	 */
+	if (r0 != 0 && b0 == 0)
+		fb->panel->bgr_connection = true;
+
+	if (fb->panel->caps && fb->vendor->st_bitmux_control) {
+		/*
+		 * Set up the special bits for the Nomadik control register
+		 * (other platforms tend to do this through an external
+		 * register).
+		 */
+
+		/* Offset of the highest used color */
+		int maxoff = max3(r0, g0, b0);
+		/* Most significant bit out, highest used bit */
+		int msb = 0;
+
+		if (fb->panel->caps & CLCD_CAP_888) {
+			msb = maxoff + 8 - 1;
+		} else if (fb->panel->caps & CLCD_CAP_565) {
+			msb = maxoff + 5 - 1;
+			fb->panel->cntl |= CNTL_ST_1XBPP_565;
+		} else if (fb->panel->caps & CLCD_CAP_5551) {
+			msb = maxoff + 5 - 1;
+			fb->panel->cntl |= CNTL_ST_1XBPP_5551;
+		} else if (fb->panel->caps & CLCD_CAP_444) {
+			msb = maxoff + 4 - 1;
+			fb->panel->cntl |= CNTL_ST_1XBPP_444;
+		}
+
+		/* Send out as many bits as we need */
+		if (msb > 17)
+			fb->panel->cntl |= CNTL_ST_CDWID_24;
+		else if (msb > 15)
+			fb->panel->cntl |= CNTL_ST_CDWID_18;
+		else if (msb > 11)
+			fb->panel->cntl |= CNTL_ST_CDWID_16;
+		else
+			fb->panel->cntl |= CNTL_ST_CDWID_12;
+	}
+
 	return fb->panel->caps ? 0 : -EINVAL;
 }
 
@@ -658,11 +779,24 @@
 	if (!fb->panel)
 		return -ENOMEM;
 
+	/*
+	 * Fetch the panel endpoint.
+	 */
 	endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL);
 	if (!endpoint)
 		return -ENODEV;
 
-	err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, &fb->panel->mode);
+	if (fb->vendor->init_panel) {
+		err = fb->vendor->init_panel(fb, endpoint);
+		if (err)
+			return err;
+	}
+
+	err = clcdfb_of_get_backlight(endpoint, fb->panel);
+	if (err)
+		return err;
+
+	err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, fb->panel);
 	if (err)
 		return err;
 
@@ -693,11 +827,11 @@
 
 	if (of_property_read_u32_array(endpoint,
 			"arm,pl11x,tft-r0g0b0-pads",
-			tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) == 0)
-		return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
-				 tft_r0b0g0[1],  tft_r0b0g0[2]);
+			tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
+		return -ENOENT;
 
-	return -ENOENT;
+	return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
+					tft_r0b0g0[1],  tft_r0b0g0[2]);
 }
 
 static int clcdfb_of_vram_setup(struct clcd_fb *fb)
@@ -818,6 +952,7 @@
 static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct clcd_board *board = dev_get_platdata(&dev->dev);
+	struct clcd_vendor_data *vendor = id->data;
 	struct clcd_fb *fb;
 	int ret;
 
@@ -827,6 +962,12 @@
 	if (!board)
 		return -EINVAL;
 
+	if (vendor->init_board) {
+		ret = vendor->init_board(dev, board);
+		if (ret)
+			return ret;
+	}
+
 	ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
 	if (ret)
 		goto out;
@@ -845,17 +986,18 @@
 	}
 
 	fb->dev = dev;
+	fb->vendor = vendor;
 	fb->board = board;
 
-	dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n",
-		amba_part(dev), amba_rev(dev),
+	dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n",
+		amba_part(dev), amba_manf(dev), amba_rev(dev),
 		(unsigned long long)dev->res.start);
 
 	ret = fb->board->setup(fb);
 	if (ret)
 		goto free_fb;
 
-	ret = clcdfb_register(fb); 
+	ret = clcdfb_register(fb);
 	if (ret == 0) {
 		amba_set_drvdata(dev, fb);
 		goto out;
@@ -891,10 +1033,30 @@
 	return 0;
 }
 
+static struct clcd_vendor_data vendor_arm = {
+	/* Sets up the versatile board displays */
+	.init_panel = versatile_clcd_init_panel,
+};
+
+static struct clcd_vendor_data vendor_nomadik = {
+	.clock_timregs = true,
+	.packed_24_bit_pixels = true,
+	.st_bitmux_control = true,
+	.init_board = nomadik_clcd_init_board,
+	.init_panel = nomadik_clcd_init_panel,
+};
+
 static struct amba_id clcdfb_id_table[] = {
 	{
 		.id	= 0x00041110,
 		.mask	= 0x000ffffe,
+		.data	= &vendor_arm,
+	},
+	/* ST Electronics Nomadik variant */
+	{
+		.id	= 0x00180110,
+		.mask	= 0x00fffffe,
+		.data	= &vendor_nomadik,
 	},
 	{ 0, 0 },
 };
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 1b0b233..1928cb2 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -79,7 +79,7 @@
 	spinlock_t lock;
 };
 
-static struct fb_fix_screeninfo arcfb_fix = {
+static const struct fb_fix_screeninfo arcfb_fix = {
 	.id =		"arcfb",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_MONO01,
@@ -89,7 +89,7 @@
 	.accel =	FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo arcfb_var = {
+static const struct fb_var_screeninfo arcfb_var = {
 	.xres		= 128,
 	.yres		= 64,
 	.xres_virtual	= 128,
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 7e8ddf0..91eea45 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -474,7 +474,7 @@
 		write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
 }
 
-static struct fb_fix_screeninfo asiliantfb_fix = {
+static const struct fb_fix_screeninfo asiliantfb_fix = {
 	.id =		"Asiliant 69000",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_PSEUDOCOLOR,
@@ -483,7 +483,7 @@
 	.smem_len =	0x200000,	/* 2MB */
 };
 
-static struct fb_var_screeninfo asiliantfb_var = {
+static const struct fb_var_screeninfo asiliantfb_var = {
 	.xres 		= 640,
 	.yres 		= 480,
 	.xres_virtual 	= 640,
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 0a46268..fa07242 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -93,7 +93,7 @@
 
 #ifndef CONFIG_PPC_PMAC
 /* default mode */
-static struct fb_var_screeninfo default_var = {
+static const struct fb_var_screeninfo default_var = {
 	/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
 	640, 480, 640, 480, 0, 0, 8, 0,
 	{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -104,7 +104,7 @@
 #else /* CONFIG_PPC_PMAC */
 /* default to 1024x768 at 75Hz on PPC - this will work
  * on the iMac, the usual 640x480 @ 60Hz doesn't. */
-static struct fb_var_screeninfo default_var = {
+static const struct fb_var_screeninfo default_var = {
 	/* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
 	1024, 768, 1024, 768, 0, 0, 8, 0,
 	{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -375,7 +375,7 @@
 	.name = "64-bit DDR SGRAM",
 };
 
-static struct fb_fix_screeninfo aty128fb_fix = {
+static const struct fb_fix_screeninfo aty128fb_fix = {
 	.id		= "ATY Rage128",
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.visual		= FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index f34ed47f..11026e7 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -212,7 +212,7 @@
 	unsigned long prot_mask;
 };
 
-static struct fb_fix_screeninfo atyfb_fix = {
+static const struct fb_fix_screeninfo atyfb_fix = {
 	.id		= "ATY Mach64",
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.visual		= FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index f1ce229..278b421 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -4,7 +4,7 @@
 
 #include "../edid.h"
 
-static struct fb_var_screeninfo radeonfb_default_var = {
+static const struct fb_var_screeninfo radeonfb_default_var = {
 	.xres		= 640,
 	.yres		= 480,
 	.xres_virtual	= 640,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index f9507b1..6c2b2ca 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -43,6 +43,7 @@
 #include <linux/ctype.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
 
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1200fb.h>	/* platform_data */
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index e2d7d03..542ffad 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -375,7 +375,6 @@
 {
 	int ret = 0;
 	struct proc_dir_entry *entry;
-	int num_modes = ARRAY_SIZE(known_modes);
 
 	struct adv7393fb_device *fbdev = NULL;
 
@@ -384,7 +383,7 @@
 		return -EINVAL;
 	}
 
-	if (mode > num_modes) {
+	if (mode >= ARRAY_SIZE(known_modes)) {
 		dev_err(&client->dev, "mode %d: not supported", mode);
 		return -EFAULT;
 	}
@@ -797,7 +796,7 @@
 
 static int __init bfin_adv7393_fb_driver_init(void)
 {
-#if  defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
 	request_module("i2c-bfin-twi");
 #else
 	request_module("i2c-gpio");
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6..2d3b691 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -63,7 +63,6 @@
 #define LCD_CUR_POS(x)		((x) & LCD_CUR_POS_MASK)
 #define LCD_TEXT_POS(x)		((x) | LCD_TEXT_MODE)
 
-#ifdef CONFIG_MIPS_COBALT
 static inline void lcd_write_control(struct fb_info *info, u8 control)
 {
 	writel((u32)control << 24, info->screen_base);
@@ -83,47 +82,6 @@
 {
 	return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
 }
-#else
-
-#define LCD_CTL			0x00
-#define LCD_DATA		0x08
-#define CPLD_STATUS		0x10
-#define CPLD_DATA		0x18
-
-static inline void cpld_wait(struct fb_info *info)
-{
-	do {
-	} while (readl(info->screen_base + CPLD_STATUS) & 1);
-}
-
-static inline void lcd_write_control(struct fb_info *info, u8 control)
-{
-	cpld_wait(info);
-	writel(control, info->screen_base + LCD_CTL);
-}
-
-static inline u8 lcd_read_control(struct fb_info *info)
-{
-	cpld_wait(info);
-	readl(info->screen_base + LCD_CTL);
-	cpld_wait(info);
-	return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-
-static inline void lcd_write_data(struct fb_info *info, u8 data)
-{
-	cpld_wait(info);
-	writel(data, info->screen_base + LCD_DATA);
-}
-
-static inline u8 lcd_read_data(struct fb_info *info)
-{
-	cpld_wait(info);
-	readl(info->screen_base + LCD_DATA);
-	cpld_wait(info);
-	return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-#endif
 
 static int lcd_busy_wait(struct fb_info *info)
 {
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 924bad4..37a37c4 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -50,9 +50,9 @@
 		return 1;
 
 	if (regno < 16) {
-		red   >>= 8;
-		green >>= 8;
-		blue  >>= 8;
+		red   >>= 16 - info->var.red.length;
+		green >>= 16 - info->var.green.length;
+		blue  >>= 16 - info->var.blue.length;
 		((u32 *)(info->pseudo_palette))[regno] =
 			(red   << info->var.red.offset)   |
 			(green << info->var.green.offset) |
diff --git a/drivers/video/fbdev/exynos/Kconfig b/drivers/video/fbdev/exynos/Kconfig
deleted file mode 100644
index d916bef..0000000
--- a/drivers/video/fbdev/exynos/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Exynos Video configuration
-#
-
-menuconfig EXYNOS_VIDEO
-	tristate "Exynos Video driver support"
-	depends on ARCH_S5PV210 || ARCH_EXYNOS
-	help
-	  This enables support for EXYNOS Video device.
-
-if EXYNOS_VIDEO
-
-#
-# MIPI DSI driver
-#
-
-config EXYNOS_MIPI_DSI
-	tristate "EXYNOS MIPI DSI driver support."
-	select GENERIC_PHY
-	help
-	  This enables support for MIPI-DSI device.
-
-config EXYNOS_LCD_S6E8AX0
-	tristate "S6E8AX0 MIPI AMOLED LCD Driver"
-	depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
-	depends on (LCD_CLASS_DEVICE = y)
-	default n
-	help
-	  If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
-	  LCD control driver.
-
-endif # EXYNOS_VIDEO
diff --git a/drivers/video/fbdev/exynos/Makefile b/drivers/video/fbdev/exynos/Makefile
deleted file mode 100644
index 02d8dc5..0000000
--- a/drivers/video/fbdev/exynos/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the exynos video drivers.
-#
-
-obj-$(CONFIG_EXYNOS_MIPI_DSI)		+= exynos-mipi-dsi-mod.o
-
-exynos-mipi-dsi-mod-objs		+= exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
-					   exynos_mipi_dsi_lowlevel.o
-obj-$(CONFIG_EXYNOS_LCD_S6E8AX0)	+= s6e8ax0.o
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
deleted file mode 100644
index 92e4af3..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
+++ /dev/null
@@ -1,574 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi.c
- *
- * Samsung SoC MIPI-DSIM driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/memory.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/notifier.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_runtime.h>
-#include <linux/err.h>
-
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_common.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-
-struct mipi_dsim_ddi {
-	int				bus_id;
-	struct list_head		list;
-	struct mipi_dsim_lcd_device	*dsim_lcd_dev;
-	struct mipi_dsim_lcd_driver	*dsim_lcd_drv;
-};
-
-static LIST_HEAD(dsim_ddi_list);
-
-static DEFINE_MUTEX(mipi_dsim_lock);
-
-static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
-							*pdev)
-{
-	return pdev->dev.platform_data;
-}
-
-static struct regulator_bulk_data supplies[] = {
-	{ .supply = "vdd11", },
-	{ .supply = "vdd18", },
-};
-
-static int exynos_mipi_regulator_enable(struct mipi_dsim_device *dsim)
-{
-	int ret;
-
-	mutex_lock(&dsim->lock);
-	ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
-	mutex_unlock(&dsim->lock);
-
-	return ret;
-}
-
-static int exynos_mipi_regulator_disable(struct mipi_dsim_device *dsim)
-{
-	int ret;
-
-	mutex_lock(&dsim->lock);
-	ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
-	mutex_unlock(&dsim->lock);
-
-	return ret;
-}
-
-/* update all register settings to MIPI DSI controller. */
-static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
-{
-	/*
-	 * data from Display controller(FIMD) is not transferred in video mode
-	 * but in case of command mode, all settings is not updated to
-	 * registers.
-	 */
-	exynos_mipi_dsi_stand_by(dsim, 0);
-
-	exynos_mipi_dsi_init_dsim(dsim);
-	exynos_mipi_dsi_init_link(dsim);
-
-	exynos_mipi_dsi_set_hs_enable(dsim);
-
-	/* set display timing. */
-	exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
-
-	exynos_mipi_dsi_init_interrupt(dsim);
-
-	/*
-	 * data from Display controller(FIMD) is transferred in video mode
-	 * but in case of command mode, all settings are updated to registers.
-	 */
-	exynos_mipi_dsi_stand_by(dsim, 1);
-}
-
-static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim,
-		int power)
-{
-	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
-	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
-	switch (power) {
-	case FB_BLANK_POWERDOWN:
-		if (dsim->suspended)
-			return 0;
-
-		if (client_drv && client_drv->suspend)
-			client_drv->suspend(client_dev);
-
-		clk_disable(dsim->clock);
-
-		exynos_mipi_regulator_disable(dsim);
-
-		dsim->suspended = true;
-
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power)
-{
-	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
-	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
-	switch (power) {
-	case FB_BLANK_UNBLANK:
-		if (!dsim->suspended)
-			return 0;
-
-		/* lcd panel power on. */
-		if (client_drv && client_drv->power_on)
-			client_drv->power_on(client_dev, 1);
-
-		exynos_mipi_regulator_enable(dsim);
-
-		/* enable MIPI-DSI PHY. */
-		phy_power_on(dsim->phy);
-
-		clk_enable(dsim->clock);
-
-		exynos_mipi_update_cfg(dsim);
-
-		/* set lcd panel sequence commands. */
-		if (client_drv && client_drv->set_sequence)
-			client_drv->set_sequence(client_dev);
-
-		dsim->suspended = false;
-
-		break;
-	case FB_BLANK_NORMAL:
-		/* TODO. */
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device *lcd_dev)
-{
-	struct mipi_dsim_ddi *dsim_ddi;
-
-	if (!lcd_dev->name) {
-		pr_err("dsim_lcd_device name is NULL.\n");
-		return -EFAULT;
-	}
-
-	dsim_ddi = kzalloc(sizeof(struct mipi_dsim_ddi), GFP_KERNEL);
-	if (!dsim_ddi) {
-		pr_err("failed to allocate dsim_ddi object.\n");
-		return -ENOMEM;
-	}
-
-	dsim_ddi->dsim_lcd_dev = lcd_dev;
-
-	mutex_lock(&mipi_dsim_lock);
-	list_add_tail(&dsim_ddi->list, &dsim_ddi_list);
-	mutex_unlock(&mipi_dsim_lock);
-
-	return 0;
-}
-
-static struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device(
-					struct mipi_dsim_lcd_driver *lcd_drv)
-{
-	struct mipi_dsim_ddi *dsim_ddi, *next;
-	struct mipi_dsim_lcd_device *lcd_dev;
-
-	mutex_lock(&mipi_dsim_lock);
-
-	list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
-		if (!dsim_ddi)
-			goto out;
-
-		lcd_dev = dsim_ddi->dsim_lcd_dev;
-		if (!lcd_dev)
-			continue;
-
-		if ((strcmp(lcd_drv->name, lcd_dev->name)) == 0) {
-			/**
-			 * bus_id would be used to identify
-			 * connected bus.
-			 */
-			dsim_ddi->bus_id = lcd_dev->bus_id;
-			mutex_unlock(&mipi_dsim_lock);
-
-			return dsim_ddi;
-		}
-
-		list_del(&dsim_ddi->list);
-		kfree(dsim_ddi);
-	}
-
-out:
-	mutex_unlock(&mipi_dsim_lock);
-
-	return NULL;
-}
-
-int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv)
-{
-	struct mipi_dsim_ddi *dsim_ddi;
-
-	if (!lcd_drv->name) {
-		pr_err("dsim_lcd_driver name is NULL.\n");
-		return -EFAULT;
-	}
-
-	dsim_ddi = exynos_mipi_dsi_find_lcd_device(lcd_drv);
-	if (!dsim_ddi) {
-		pr_err("mipi_dsim_ddi object not found.\n");
-		return -EFAULT;
-	}
-
-	dsim_ddi->dsim_lcd_drv = lcd_drv;
-
-	pr_info("registered panel driver(%s) to mipi-dsi driver.\n",
-		lcd_drv->name);
-
-	return 0;
-
-}
-EXPORT_SYMBOL_GPL(exynos_mipi_dsi_register_lcd_driver);
-
-static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi(
-						struct mipi_dsim_device *dsim,
-						const char *name)
-{
-	struct mipi_dsim_ddi *dsim_ddi, *next;
-	struct mipi_dsim_lcd_driver *lcd_drv;
-	struct mipi_dsim_lcd_device *lcd_dev;
-	int ret;
-
-	mutex_lock(&dsim->lock);
-
-	list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
-		lcd_drv = dsim_ddi->dsim_lcd_drv;
-		lcd_dev = dsim_ddi->dsim_lcd_dev;
-		if (!lcd_drv || !lcd_dev ||
-			(dsim->id != dsim_ddi->bus_id))
-				continue;
-
-		dev_dbg(dsim->dev, "lcd_drv->id = %d, lcd_dev->id = %d\n",
-				lcd_drv->id, lcd_dev->id);
-		dev_dbg(dsim->dev, "lcd_dev->bus_id = %d, dsim->id = %d\n",
-				lcd_dev->bus_id, dsim->id);
-
-		if ((strcmp(lcd_drv->name, name) == 0)) {
-			lcd_dev->master = dsim;
-
-			lcd_dev->dev.parent = dsim->dev;
-			dev_set_name(&lcd_dev->dev, "%s", lcd_drv->name);
-
-			ret = device_register(&lcd_dev->dev);
-			if (ret < 0) {
-				dev_err(dsim->dev,
-					"can't register %s, status %d\n",
-					dev_name(&lcd_dev->dev), ret);
-				mutex_unlock(&dsim->lock);
-
-				return NULL;
-			}
-
-			dsim->dsim_lcd_dev = lcd_dev;
-			dsim->dsim_lcd_drv = lcd_drv;
-
-			mutex_unlock(&dsim->lock);
-
-			return dsim_ddi;
-		}
-	}
-
-	mutex_unlock(&dsim->lock);
-
-	return NULL;
-}
-
-/* define MIPI-DSI Master operations. */
-static struct mipi_dsim_master_ops master_ops = {
-	.cmd_read			= exynos_mipi_dsi_rd_data,
-	.cmd_write			= exynos_mipi_dsi_wr_data,
-	.get_dsim_frame_done		= exynos_mipi_dsi_get_frame_done_status,
-	.clear_dsim_frame_done		= exynos_mipi_dsi_clear_frame_done,
-	.set_early_blank_mode		= exynos_mipi_dsi_early_blank_mode,
-	.set_blank_mode			= exynos_mipi_dsi_blank_mode,
-};
-
-static int exynos_mipi_dsi_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	struct mipi_dsim_device *dsim;
-	struct mipi_dsim_config *dsim_config;
-	struct mipi_dsim_platform_data *dsim_pd;
-	struct mipi_dsim_ddi *dsim_ddi;
-	int ret = -EINVAL;
-
-	dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device),
-				GFP_KERNEL);
-	if (!dsim) {
-		dev_err(&pdev->dev, "failed to allocate dsim object.\n");
-		return -ENOMEM;
-	}
-
-	dsim->pd = to_dsim_plat(pdev);
-	dsim->dev = &pdev->dev;
-	dsim->id = pdev->id;
-
-	/* get mipi_dsim_platform_data. */
-	dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
-	if (dsim_pd == NULL) {
-		dev_err(&pdev->dev, "failed to get platform data for dsim.\n");
-		return -EINVAL;
-	}
-	/* get mipi_dsim_config. */
-	dsim_config = dsim_pd->dsim_config;
-	if (dsim_config == NULL) {
-		dev_err(&pdev->dev, "failed to get dsim config data.\n");
-		return -EINVAL;
-	}
-
-	dsim->dsim_config = dsim_config;
-	dsim->master_ops = &master_ops;
-
-	mutex_init(&dsim->lock);
-
-	ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies),
-					supplies);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret);
-		return ret;
-	}
-
-	dsim->phy = devm_phy_get(&pdev->dev, "dsim");
-	if (IS_ERR(dsim->phy))
-		return PTR_ERR(dsim->phy);
-
-	dsim->clock = devm_clk_get(&pdev->dev, "dsim0");
-	if (IS_ERR(dsim->clock)) {
-		dev_err(&pdev->dev, "failed to get dsim clock source\n");
-		return -ENODEV;
-	}
-
-	clk_enable(dsim->clock);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	dsim->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dsim->reg_base)) {
-		ret = PTR_ERR(dsim->reg_base);
-		goto error;
-	}
-
-	mutex_init(&dsim->lock);
-
-	/* bind lcd ddi matched with panel name. */
-	dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name);
-	if (!dsim_ddi) {
-		dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
-	ret = platform_get_irq(pdev, 0);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request dsim irq resource\n");
-		goto error;
-	}
-	dsim->irq = ret;
-
-	init_completion(&dsim_wr_comp);
-	init_completion(&dsim_rd_comp);
-	platform_set_drvdata(pdev, dsim);
-
-	ret = devm_request_irq(&pdev->dev, dsim->irq,
-			exynos_mipi_dsi_interrupt_handler,
-			IRQF_SHARED, dev_name(&pdev->dev), dsim);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "failed to request dsim irq\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
-	/* enable interrupts */
-	exynos_mipi_dsi_init_interrupt(dsim);
-
-	/* initialize mipi-dsi client(lcd panel). */
-	if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
-		dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
-
-	/* in case mipi-dsi has been enabled by bootloader */
-	if (dsim_pd->enabled) {
-		exynos_mipi_regulator_enable(dsim);
-		goto done;
-	}
-
-	/* lcd panel power on. */
-	if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
-		dsim_ddi->dsim_lcd_drv->power_on(dsim_ddi->dsim_lcd_dev, 1);
-
-	exynos_mipi_regulator_enable(dsim);
-
-	/* enable MIPI-DSI PHY. */
-	phy_power_on(dsim->phy);
-
-	exynos_mipi_update_cfg(dsim);
-
-	/* set lcd panel sequence commands. */
-	if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->set_sequence)
-		dsim_ddi->dsim_lcd_drv->set_sequence(dsim_ddi->dsim_lcd_dev);
-
-	dsim->suspended = false;
-
-done:
-	platform_set_drvdata(pdev, dsim);
-
-	dev_dbg(&pdev->dev, "%s() completed successfully (%s mode)\n", __func__,
-		dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
-
-	return 0;
-
-error:
-	clk_disable(dsim->clock);
-	return ret;
-}
-
-static int exynos_mipi_dsi_remove(struct platform_device *pdev)
-{
-	struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
-	struct mipi_dsim_ddi *dsim_ddi, *next;
-	struct mipi_dsim_lcd_driver *dsim_lcd_drv;
-
-	clk_disable(dsim->clock);
-
-	list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
-		if (dsim_ddi) {
-			if (dsim->id != dsim_ddi->bus_id)
-				continue;
-
-			dsim_lcd_drv = dsim_ddi->dsim_lcd_drv;
-
-			if (dsim_lcd_drv->remove)
-				dsim_lcd_drv->remove(dsim_ddi->dsim_lcd_dev);
-
-			kfree(dsim_ddi);
-		}
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mipi_dsi_suspend(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
-	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
-	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
-	disable_irq(dsim->irq);
-
-	if (dsim->suspended)
-		return 0;
-
-	if (client_drv && client_drv->suspend)
-		client_drv->suspend(client_dev);
-
-	/* disable MIPI-DSI PHY. */
-	phy_power_off(dsim->phy);
-
-	clk_disable(dsim->clock);
-
-	exynos_mipi_regulator_disable(dsim);
-
-	dsim->suspended = true;
-
-	return 0;
-}
-
-static int exynos_mipi_dsi_resume(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
-	struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
-	struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
-	enable_irq(dsim->irq);
-
-	if (!dsim->suspended)
-		return 0;
-
-	/* lcd panel power on. */
-	if (client_drv && client_drv->power_on)
-		client_drv->power_on(client_dev, 1);
-
-	exynos_mipi_regulator_enable(dsim);
-
-	/* enable MIPI-DSI PHY. */
-	phy_power_on(dsim->phy);
-
-	clk_enable(dsim->clock);
-
-	exynos_mipi_update_cfg(dsim);
-
-	/* set lcd panel sequence commands. */
-	if (client_drv && client_drv->set_sequence)
-		client_drv->set_sequence(client_dev);
-
-	dsim->suspended = false;
-
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
-};
-
-static struct platform_driver exynos_mipi_dsi_driver = {
-	.probe = exynos_mipi_dsi_probe,
-	.remove = exynos_mipi_dsi_remove,
-	.driver = {
-		   .name = "exynos-mipi-dsim",
-		   .pm = &exynos_mipi_dsi_pm_ops,
-	},
-};
-
-module_platform_driver(exynos_mipi_dsi_driver);
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
deleted file mode 100644
index 2358a2f..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
+++ /dev/null
@@ -1,880 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_common.c
- *
- * Samsung SoC MIPI-DSI common driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/memory.h>
-#include <linux/delay.h>
-#include <linux/irqreturn.h>
-#include <linux/kthread.h>
-
-#include <video/mipi_display.h>
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_regs.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-#include "exynos_mipi_dsi_common.h"
-
-#define MIPI_FIFO_TIMEOUT	msecs_to_jiffies(250)
-#define MIPI_RX_FIFO_READ_DONE  0x30800002
-#define MIPI_MAX_RX_FIFO        20
-#define MHZ			(1000 * 1000)
-#define FIN_HZ			(24 * MHZ)
-
-#define DFIN_PLL_MIN_HZ		(6 * MHZ)
-#define DFIN_PLL_MAX_HZ		(12 * MHZ)
-
-#define DFVCO_MIN_HZ		(500 * MHZ)
-#define DFVCO_MAX_HZ		(1000 * MHZ)
-
-#define TRY_GET_FIFO_TIMEOUT	(5000 * 2)
-#define TRY_FIFO_CLEAR		(10)
-
-/* MIPI-DSIM status types. */
-enum {
-	DSIM_STATE_INIT,	/* should be initialized. */
-	DSIM_STATE_STOP,	/* CPU and LCDC are LP mode. */
-	DSIM_STATE_HSCLKEN,	/* HS clock was enabled. */
-	DSIM_STATE_ULPS
-};
-
-/* define DSI lane types. */
-enum {
-	DSIM_LANE_CLOCK = (1 << 0),
-	DSIM_LANE_DATA0 = (1 << 1),
-	DSIM_LANE_DATA1 = (1 << 2),
-	DSIM_LANE_DATA2 = (1 << 3),
-	DSIM_LANE_DATA3 = (1 << 4)
-};
-
-static unsigned int dpll_table[15] = {
-	100, 120, 170, 220, 270,
-	320, 390, 450, 510, 560,
-	640, 690, 770, 870, 950
-};
-
-irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
-{
-	struct mipi_dsim_device *dsim = dev_id;
-	unsigned int intsrc, intmsk;
-
-	intsrc = exynos_mipi_dsi_read_interrupt(dsim);
-	intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
-	intmsk = ~intmsk & intsrc;
-
-	if (intsrc & INTMSK_RX_DONE) {
-		complete(&dsim_rd_comp);
-		dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
-	}
-	if (intsrc & INTMSK_FIFO_EMPTY) {
-		complete(&dsim_wr_comp);
-		dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
-	}
-
-	exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
-
-	return IRQ_HANDLED;
-}
-
-/*
- * write long packet to mipi dsi slave
- * @dsim: mipi dsim device structure.
- * @data0: packet data to send.
- * @data1: size of packet data
- */
-static void exynos_mipi_dsi_long_data_wr(struct mipi_dsim_device *dsim,
-		const unsigned char *data0, unsigned int data_size)
-{
-	unsigned int data_cnt = 0, payload = 0;
-
-	/* in case that data count is more then 4 */
-	for (data_cnt = 0; data_cnt < data_size; data_cnt += 4) {
-		/*
-		 * after sending 4bytes per one time,
-		 * send remainder data less then 4.
-		 */
-		if ((data_size - data_cnt) < 4) {
-			if ((data_size - data_cnt) == 3) {
-				payload = data0[data_cnt] |
-				    data0[data_cnt + 1] << 8 |
-					data0[data_cnt + 2] << 16;
-			dev_dbg(dsim->dev, "count = 3 payload = %x, %x %x %x\n",
-				payload, data0[data_cnt],
-				data0[data_cnt + 1],
-				data0[data_cnt + 2]);
-			} else if ((data_size - data_cnt) == 2) {
-				payload = data0[data_cnt] |
-					data0[data_cnt + 1] << 8;
-			dev_dbg(dsim->dev,
-				"count = 2 payload = %x, %x %x\n", payload,
-				data0[data_cnt],
-				data0[data_cnt + 1]);
-			} else if ((data_size - data_cnt) == 1) {
-				payload = data0[data_cnt];
-			}
-
-			exynos_mipi_dsi_wr_tx_data(dsim, payload);
-		/* send 4bytes per one time. */
-		} else {
-			payload = data0[data_cnt] |
-				data0[data_cnt + 1] << 8 |
-				data0[data_cnt + 2] << 16 |
-				data0[data_cnt + 3] << 24;
-
-			dev_dbg(dsim->dev,
-				"count = 4 payload = %x, %x %x %x %x\n",
-				payload, *(u8 *)(data0 + data_cnt),
-				data0[data_cnt + 1],
-				data0[data_cnt + 2],
-				data0[data_cnt + 3]);
-
-			exynos_mipi_dsi_wr_tx_data(dsim, payload);
-		}
-	}
-}
-
-int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
-	const unsigned char *data0, unsigned int data_size)
-{
-	unsigned int check_rx_ack = 0;
-
-	if (dsim->state == DSIM_STATE_ULPS) {
-		dev_err(dsim->dev, "state is ULPS.\n");
-
-		return -EINVAL;
-	}
-
-	/* FIXME!!! why does it need this delay? */
-	msleep(20);
-
-	mutex_lock(&dsim->lock);
-
-	switch (data_id) {
-	/* short packet types of packet types for command. */
-	case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
-	case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
-	case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
-	case MIPI_DSI_DCS_SHORT_WRITE:
-	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
-	case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
-		exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]);
-		if (check_rx_ack) {
-			/* process response func should be implemented */
-			mutex_unlock(&dsim->lock);
-			return 0;
-		} else {
-			mutex_unlock(&dsim->lock);
-			return -EINVAL;
-		}
-
-	/* general command */
-	case MIPI_DSI_COLOR_MODE_OFF:
-	case MIPI_DSI_COLOR_MODE_ON:
-	case MIPI_DSI_SHUTDOWN_PERIPHERAL:
-	case MIPI_DSI_TURN_ON_PERIPHERAL:
-		exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]);
-		if (check_rx_ack) {
-			/* process response func should be implemented. */
-			mutex_unlock(&dsim->lock);
-			return 0;
-		} else {
-			mutex_unlock(&dsim->lock);
-			return -EINVAL;
-		}
-
-	/* packet types for video data */
-	case MIPI_DSI_V_SYNC_START:
-	case MIPI_DSI_V_SYNC_END:
-	case MIPI_DSI_H_SYNC_START:
-	case MIPI_DSI_H_SYNC_END:
-	case MIPI_DSI_END_OF_TRANSMISSION:
-		mutex_unlock(&dsim->lock);
-		return 0;
-
-	/* long packet type and null packet */
-	case MIPI_DSI_NULL_PACKET:
-	case MIPI_DSI_BLANKING_PACKET:
-		mutex_unlock(&dsim->lock);
-		return 0;
-	case MIPI_DSI_GENERIC_LONG_WRITE:
-	case MIPI_DSI_DCS_LONG_WRITE:
-	{
-		unsigned int size, payload = 0;
-		reinit_completion(&dsim_wr_comp);
-
-		size = data_size * 4;
-
-		/* if data count is less then 4, then send 3bytes data.  */
-		if (data_size < 4) {
-			payload = data0[0] |
-				data0[1] << 8 |
-				data0[2] << 16;
-
-			exynos_mipi_dsi_wr_tx_data(dsim, payload);
-
-			dev_dbg(dsim->dev, "count = %d payload = %x,%x %x %x\n",
-				data_size, payload, data0[0],
-				data0[1], data0[2]);
-
-		/* in case that data count is more then 4 */
-		} else
-			exynos_mipi_dsi_long_data_wr(dsim, data0, data_size);
-
-		/* put data into header fifo */
-		exynos_mipi_dsi_wr_tx_header(dsim, data_id, data_size & 0xff,
-			(data_size & 0xff00) >> 8);
-
-		if (!wait_for_completion_interruptible_timeout(&dsim_wr_comp,
-							MIPI_FIFO_TIMEOUT)) {
-			dev_warn(dsim->dev, "command write timeout.\n");
-			mutex_unlock(&dsim->lock);
-			return -EAGAIN;
-		}
-
-		if (check_rx_ack) {
-			/* process response func should be implemented. */
-			mutex_unlock(&dsim->lock);
-			return 0;
-		} else {
-			mutex_unlock(&dsim->lock);
-			return -EINVAL;
-		}
-	}
-
-	/* packet typo for video data */
-	case MIPI_DSI_PACKED_PIXEL_STREAM_16:
-	case MIPI_DSI_PACKED_PIXEL_STREAM_18:
-	case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
-	case MIPI_DSI_PACKED_PIXEL_STREAM_24:
-		if (check_rx_ack) {
-			/* process response func should be implemented. */
-			mutex_unlock(&dsim->lock);
-			return 0;
-		} else {
-			mutex_unlock(&dsim->lock);
-			return -EINVAL;
-		}
-	default:
-		dev_warn(dsim->dev,
-			"data id %x is not supported current DSI spec.\n",
-			data_id);
-
-		mutex_unlock(&dsim->lock);
-		return -EINVAL;
-	}
-}
-
-static unsigned int exynos_mipi_dsi_long_data_rd(struct mipi_dsim_device *dsim,
-		unsigned int req_size, unsigned int rx_data, u8 *rx_buf)
-{
-	unsigned int rcv_pkt, i, j;
-	u16 rxsize;
-
-	/* for long packet */
-	rxsize = (u16)((rx_data & 0x00ffff00) >> 8);
-	dev_dbg(dsim->dev, "mipi dsi rx size : %d\n", rxsize);
-	if (rxsize != req_size) {
-		dev_dbg(dsim->dev,
-			"received size mismatch received: %d, requested: %d\n",
-			rxsize, req_size);
-		goto err;
-	}
-
-	for (i = 0; i < (rxsize >> 2); i++) {
-		rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
-		dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt);
-		for (j = 0; j < 4; j++) {
-			rx_buf[(i * 4) + j] =
-					(u8)(rcv_pkt >> (j * 8)) & 0xff;
-			dev_dbg(dsim->dev, "received value : %02x\n",
-					(rcv_pkt >> (j * 8)) & 0xff);
-		}
-	}
-	if (rxsize % 4) {
-		rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
-		dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt);
-		for (j = 0; j < (rxsize % 4); j++) {
-			rx_buf[(i * 4) + j] =
-					(u8)(rcv_pkt >> (j * 8)) & 0xff;
-			dev_dbg(dsim->dev, "received value : %02x\n",
-					(rcv_pkt >> (j * 8)) & 0xff);
-		}
-	}
-
-	return rxsize;
-
-err:
-	return -EINVAL;
-}
-
-static unsigned int exynos_mipi_dsi_response_size(unsigned int req_size)
-{
-	switch (req_size) {
-	case 1:
-		return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE;
-	case 2:
-		return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE;
-	default:
-		return MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE;
-	}
-}
-
-int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
-	unsigned int data0, unsigned int req_size, u8 *rx_buf)
-{
-	unsigned int rx_data, rcv_pkt, i;
-	u8 response = 0;
-	u16 rxsize;
-
-	if (dsim->state == DSIM_STATE_ULPS) {
-		dev_err(dsim->dev, "state is ULPS.\n");
-
-		return -EINVAL;
-	}
-
-	/* FIXME!!! */
-	msleep(20);
-
-	mutex_lock(&dsim->lock);
-	reinit_completion(&dsim_rd_comp);
-	exynos_mipi_dsi_rd_tx_header(dsim,
-		MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size);
-
-	response = exynos_mipi_dsi_response_size(req_size);
-
-	switch (data_id) {
-	case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
-	case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
-	case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
-	case MIPI_DSI_DCS_READ:
-		exynos_mipi_dsi_rd_tx_header(dsim,
-			data_id, data0);
-		/* process response func should be implemented. */
-		break;
-	default:
-		dev_warn(dsim->dev,
-			"data id %x is not supported current DSI spec.\n",
-			data_id);
-
-		mutex_unlock(&dsim->lock);
-		return -EINVAL;
-	}
-
-	if (!wait_for_completion_interruptible_timeout(&dsim_rd_comp,
-				MIPI_FIFO_TIMEOUT)) {
-		pr_err("RX done interrupt timeout\n");
-		mutex_unlock(&dsim->lock);
-		return 0;
-	}
-
-	msleep(20);
-
-	rx_data = exynos_mipi_dsi_rd_rx_fifo(dsim);
-
-	if ((u8)(rx_data & 0xff) != response) {
-		printk(KERN_ERR
-			"mipi dsi wrong response rx_data : %x, response:%x\n",
-			rx_data, response);
-		goto clear_rx_fifo;
-	}
-
-	if (req_size <= 2) {
-		/* for short packet */
-		for (i = 0; i < req_size; i++)
-			rx_buf[i] = (rx_data >> (8 + (i * 8))) & 0xff;
-		rxsize = req_size;
-	} else {
-		/* for long packet */
-		rxsize = exynos_mipi_dsi_long_data_rd(dsim, req_size, rx_data,
-							rx_buf);
-		if (rxsize != req_size)
-			goto clear_rx_fifo;
-	}
-
-	rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
-
-	msleep(20);
-
-	if (rcv_pkt != MIPI_RX_FIFO_READ_DONE) {
-		dev_info(dsim->dev,
-			"Can't found RX FIFO READ DONE FLAG : %x\n", rcv_pkt);
-		goto clear_rx_fifo;
-	}
-
-	mutex_unlock(&dsim->lock);
-
-	return rxsize;
-
-clear_rx_fifo:
-	i = 0;
-	while (1) {
-		rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
-		if ((rcv_pkt == MIPI_RX_FIFO_READ_DONE)
-				|| (i > MIPI_MAX_RX_FIFO))
-			break;
-		dev_dbg(dsim->dev,
-				"mipi dsi clear rx fifo : %08x\n", rcv_pkt);
-		i++;
-	}
-	dev_info(dsim->dev,
-		"mipi dsi rx done count : %d, rcv_pkt : %08x\n", i, rcv_pkt);
-
-	mutex_unlock(&dsim->lock);
-
-	return 0;
-}
-
-static int exynos_mipi_dsi_pll_on(struct mipi_dsim_device *dsim,
-				unsigned int enable)
-{
-	int sw_timeout;
-
-	if (enable) {
-		sw_timeout = 1000;
-
-		exynos_mipi_dsi_enable_pll(dsim, 1);
-		while (1) {
-			sw_timeout--;
-			if (exynos_mipi_dsi_is_pll_stable(dsim))
-				return 0;
-			if (sw_timeout == 0)
-				return -EINVAL;
-		}
-	} else
-		exynos_mipi_dsi_enable_pll(dsim, 0);
-
-	return 0;
-}
-
-static unsigned long exynos_mipi_dsi_change_pll(struct mipi_dsim_device *dsim,
-	unsigned int pre_divider, unsigned int main_divider,
-	unsigned int scaler)
-{
-	unsigned long dfin_pll, dfvco, dpll_out;
-	unsigned int i, freq_band = 0xf;
-
-	dfin_pll = (FIN_HZ / pre_divider);
-
-	/******************************************************
-	 *	Serial Clock(=ByteClk X 8)	FreqBand[3:0] *
-	 ******************************************************
-	 *	~ 99.99 MHz			0000
-	 *	100 ~ 119.99 MHz		0001
-	 *	120 ~ 159.99 MHz		0010
-	 *	160 ~ 199.99 MHz		0011
-	 *	200 ~ 239.99 MHz		0100
-	 *	140 ~ 319.99 MHz		0101
-	 *	320 ~ 389.99 MHz		0110
-	 *	390 ~ 449.99 MHz		0111
-	 *	450 ~ 509.99 MHz		1000
-	 *	510 ~ 559.99 MHz		1001
-	 *	560 ~ 639.99 MHz		1010
-	 *	640 ~ 689.99 MHz		1011
-	 *	690 ~ 769.99 MHz		1100
-	 *	770 ~ 869.99 MHz		1101
-	 *	870 ~ 949.99 MHz		1110
-	 *	950 ~ 1000 MHz			1111
-	 ******************************************************/
-	if (dfin_pll < DFIN_PLL_MIN_HZ || dfin_pll > DFIN_PLL_MAX_HZ) {
-		dev_warn(dsim->dev, "fin_pll range should be 6MHz ~ 12MHz\n");
-		exynos_mipi_dsi_enable_afc(dsim, 0, 0);
-	} else {
-		if (dfin_pll < 7 * MHZ)
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x1);
-		else if (dfin_pll < 8 * MHZ)
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x0);
-		else if (dfin_pll < 9 * MHZ)
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x3);
-		else if (dfin_pll < 10 * MHZ)
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x2);
-		else if (dfin_pll < 11 * MHZ)
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x5);
-		else
-			exynos_mipi_dsi_enable_afc(dsim, 1, 0x4);
-	}
-
-	dfvco = dfin_pll * main_divider;
-	dev_dbg(dsim->dev, "dfvco = %lu, dfin_pll = %lu, main_divider = %d\n",
-				dfvco, dfin_pll, main_divider);
-	if (dfvco < DFVCO_MIN_HZ || dfvco > DFVCO_MAX_HZ)
-		dev_warn(dsim->dev, "fvco range should be 500MHz ~ 1000MHz\n");
-
-	dpll_out = dfvco / (1 << scaler);
-	dev_dbg(dsim->dev, "dpll_out = %lu, dfvco = %lu, scaler = %d\n",
-		dpll_out, dfvco, scaler);
-
-	for (i = 0; i < ARRAY_SIZE(dpll_table); i++) {
-		if (dpll_out < dpll_table[i] * MHZ) {
-			freq_band = i;
-			break;
-		}
-	}
-
-	dev_dbg(dsim->dev, "freq_band = %d\n", freq_band);
-
-	exynos_mipi_dsi_pll_freq(dsim, pre_divider, main_divider, scaler);
-
-	exynos_mipi_dsi_hs_zero_ctrl(dsim, 0);
-	exynos_mipi_dsi_prep_ctrl(dsim, 0);
-
-	/* Freq Band */
-	exynos_mipi_dsi_pll_freq_band(dsim, freq_band);
-
-	/* Stable time */
-	exynos_mipi_dsi_pll_stable_time(dsim, dsim->dsim_config->pll_stable_time);
-
-	/* Enable PLL */
-	dev_dbg(dsim->dev, "FOUT of mipi dphy pll is %luMHz\n",
-		(dpll_out / MHZ));
-
-	return dpll_out;
-}
-
-static int exynos_mipi_dsi_set_clock(struct mipi_dsim_device *dsim,
-	unsigned int byte_clk_sel, unsigned int enable)
-{
-	unsigned int esc_div;
-	unsigned long esc_clk_error_rate;
-	unsigned long hs_clk = 0, byte_clk = 0, escape_clk = 0;
-
-	if (enable) {
-		dsim->e_clk_src = byte_clk_sel;
-
-		/* Escape mode clock and byte clock source */
-		exynos_mipi_dsi_set_byte_clock_src(dsim, byte_clk_sel);
-
-		/* DPHY, DSIM Link : D-PHY clock out */
-		if (byte_clk_sel == DSIM_PLL_OUT_DIV8) {
-			hs_clk = exynos_mipi_dsi_change_pll(dsim,
-				dsim->dsim_config->p, dsim->dsim_config->m,
-				dsim->dsim_config->s);
-			if (hs_clk == 0) {
-				dev_err(dsim->dev,
-					"failed to get hs clock.\n");
-				return -EINVAL;
-			}
-
-			byte_clk = hs_clk / 8;
-			exynos_mipi_dsi_enable_pll_bypass(dsim, 0);
-			exynos_mipi_dsi_pll_on(dsim, 1);
-		/* DPHY : D-PHY clock out, DSIM link : external clock out */
-		} else if (byte_clk_sel == DSIM_EXT_CLK_DIV8) {
-			dev_warn(dsim->dev, "this project is not support\n");
-			dev_warn(dsim->dev,
-				"external clock source for MIPI DSIM.\n");
-		} else if (byte_clk_sel == DSIM_EXT_CLK_BYPASS) {
-			dev_warn(dsim->dev, "this project is not support\n");
-			dev_warn(dsim->dev,
-				"external clock source for MIPI DSIM\n");
-		}
-
-		/* escape clock divider */
-		esc_div = byte_clk / (dsim->dsim_config->esc_clk);
-		dev_dbg(dsim->dev,
-			"esc_div = %d, byte_clk = %lu, esc_clk = %lu\n",
-			esc_div, byte_clk, dsim->dsim_config->esc_clk);
-		if ((byte_clk / esc_div) >= (20 * MHZ) ||
-				(byte_clk / esc_div) >
-					dsim->dsim_config->esc_clk)
-			esc_div += 1;
-
-		escape_clk = byte_clk / esc_div;
-		dev_dbg(dsim->dev,
-			"escape_clk = %lu, byte_clk = %lu, esc_div = %d\n",
-			escape_clk, byte_clk, esc_div);
-
-		/* enable escape clock. */
-		exynos_mipi_dsi_enable_byte_clock(dsim, 1);
-
-		/* enable byte clk and escape clock */
-		exynos_mipi_dsi_set_esc_clk_prs(dsim, 1, esc_div);
-		/* escape clock on lane */
-		exynos_mipi_dsi_enable_esc_clk_on_lane(dsim,
-			(DSIM_LANE_CLOCK | dsim->data_lane), 1);
-
-		dev_dbg(dsim->dev, "byte clock is %luMHz\n",
-			(byte_clk / MHZ));
-		dev_dbg(dsim->dev, "escape clock that user's need is %lu\n",
-			(dsim->dsim_config->esc_clk / MHZ));
-		dev_dbg(dsim->dev, "escape clock divider is %x\n", esc_div);
-		dev_dbg(dsim->dev, "escape clock is %luMHz\n",
-			((byte_clk / esc_div) / MHZ));
-
-		if ((byte_clk / esc_div) > escape_clk) {
-			esc_clk_error_rate = escape_clk /
-				(byte_clk / esc_div);
-			dev_warn(dsim->dev, "error rate is %lu over.\n",
-				(esc_clk_error_rate / 100));
-		} else if ((byte_clk / esc_div) < (escape_clk)) {
-			esc_clk_error_rate = (byte_clk / esc_div) /
-				escape_clk;
-			dev_warn(dsim->dev, "error rate is %lu under.\n",
-				(esc_clk_error_rate / 100));
-		}
-	} else {
-		exynos_mipi_dsi_enable_esc_clk_on_lane(dsim,
-			(DSIM_LANE_CLOCK | dsim->data_lane), 0);
-		exynos_mipi_dsi_set_esc_clk_prs(dsim, 0, 0);
-
-		/* disable escape clock. */
-		exynos_mipi_dsi_enable_byte_clock(dsim, 0);
-
-		if (byte_clk_sel == DSIM_PLL_OUT_DIV8)
-			exynos_mipi_dsi_pll_on(dsim, 0);
-	}
-
-	return 0;
-}
-
-int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim)
-{
-	dsim->state = DSIM_STATE_INIT;
-
-	switch (dsim->dsim_config->e_no_data_lane) {
-	case DSIM_DATA_LANE_1:
-		dsim->data_lane = DSIM_LANE_DATA0;
-		break;
-	case DSIM_DATA_LANE_2:
-		dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1;
-		break;
-	case DSIM_DATA_LANE_3:
-		dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 |
-			DSIM_LANE_DATA2;
-		break;
-	case DSIM_DATA_LANE_4:
-		dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 |
-			DSIM_LANE_DATA2 | DSIM_LANE_DATA3;
-		break;
-	default:
-		dev_info(dsim->dev, "data lane is invalid.\n");
-		return -EINVAL;
-	}
-
-	exynos_mipi_dsi_sw_reset(dsim);
-	exynos_mipi_dsi_func_reset(dsim);
-
-	exynos_mipi_dsi_dp_dn_swap(dsim, 0);
-
-	return 0;
-}
-
-void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim)
-{
-	unsigned int src = 0;
-
-	src = (INTSRC_SFR_FIFO_EMPTY | INTSRC_RX_DATA_DONE);
-	exynos_mipi_dsi_set_interrupt(dsim, src, 1);
-
-	src = 0;
-	src = ~(INTMSK_RX_DONE | INTMSK_FIFO_EMPTY);
-	exynos_mipi_dsi_set_interrupt_mask(dsim, src, 1);
-}
-
-int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim,
-	unsigned int enable)
-{
-	/* enable only frame done interrupt */
-	exynos_mipi_dsi_set_interrupt_mask(dsim, INTMSK_FRAME_DONE, enable);
-
-	return 0;
-}
-
-void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim,
-		unsigned int enable)
-{
-
-	/* consider Main display and Sub display. */
-
-	exynos_mipi_dsi_set_main_stand_by(dsim, enable);
-}
-
-int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
-	struct mipi_dsim_config *dsim_config)
-{
-	struct mipi_dsim_platform_data *dsim_pd;
-	struct fb_videomode *timing;
-
-	dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
-	timing = (struct fb_videomode *)dsim_pd->lcd_panel_info;
-
-	/* in case of VIDEO MODE (RGB INTERFACE), it sets polarities. */
-	if (dsim_config->e_interface == (u32) DSIM_VIDEO) {
-		if (dsim_config->auto_vertical_cnt == 0) {
-			exynos_mipi_dsi_set_main_disp_vporch(dsim,
-				dsim_config->cmd_allow,
-				timing->lower_margin,
-				timing->upper_margin);
-			exynos_mipi_dsi_set_main_disp_hporch(dsim,
-				timing->right_margin,
-				timing->left_margin);
-			exynos_mipi_dsi_set_main_disp_sync_area(dsim,
-				timing->vsync_len,
-				timing->hsync_len);
-		}
-	}
-
-	exynos_mipi_dsi_set_main_disp_resol(dsim, timing->xres,
-			timing->yres);
-
-	exynos_mipi_dsi_display_config(dsim, dsim_config);
-
-	dev_info(dsim->dev, "lcd panel ==> width = %d, height = %d\n",
-			timing->xres, timing->yres);
-
-	return 0;
-}
-
-int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim)
-{
-	unsigned int time_out = 100;
-
-	switch (dsim->state) {
-	case DSIM_STATE_INIT:
-		exynos_mipi_dsi_init_fifo_pointer(dsim, 0x1f);
-
-		/* dsi configuration */
-		exynos_mipi_dsi_init_config(dsim);
-		exynos_mipi_dsi_enable_lane(dsim, DSIM_LANE_CLOCK, 1);
-		exynos_mipi_dsi_enable_lane(dsim, dsim->data_lane, 1);
-
-		/* set clock configuration */
-		exynos_mipi_dsi_set_clock(dsim, dsim->dsim_config->e_byte_clk, 1);
-
-		/* check clock and data lane state are stop state */
-		while (!(exynos_mipi_dsi_is_lane_state(dsim))) {
-			time_out--;
-			if (time_out == 0) {
-				dev_err(dsim->dev,
-					"DSI Master is not stop state.\n");
-				dev_err(dsim->dev,
-					"Check initialization process\n");
-
-				return -EINVAL;
-			}
-		}
-		if (time_out != 0) {
-			dev_info(dsim->dev,
-				"DSI Master driver has been completed.\n");
-			dev_info(dsim->dev, "DSI Master state is stop state\n");
-		}
-
-		dsim->state = DSIM_STATE_STOP;
-
-		/* BTA sequence counters */
-		exynos_mipi_dsi_set_stop_state_counter(dsim,
-			dsim->dsim_config->stop_holding_cnt);
-		exynos_mipi_dsi_set_bta_timeout(dsim,
-			dsim->dsim_config->bta_timeout);
-		exynos_mipi_dsi_set_lpdr_timeout(dsim,
-			dsim->dsim_config->rx_timeout);
-
-		return 0;
-	default:
-		dev_info(dsim->dev, "DSI Master is already init.\n");
-		return 0;
-	}
-
-	return 0;
-}
-
-int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim)
-{
-	if (dsim->state != DSIM_STATE_STOP) {
-		dev_warn(dsim->dev, "DSIM is not in stop state.\n");
-		return 0;
-	}
-
-	if (dsim->e_clk_src == DSIM_EXT_CLK_BYPASS) {
-		dev_warn(dsim->dev, "clock source is external bypass.\n");
-		return 0;
-	}
-
-	dsim->state = DSIM_STATE_HSCLKEN;
-
-	 /* set LCDC and CPU transfer mode to HS. */
-	exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0);
-	exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0);
-	exynos_mipi_dsi_enable_hs_clock(dsim, 1);
-
-	return 0;
-}
-
-int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim,
-		unsigned int mode)
-{
-	if (mode) {
-		if (dsim->state != DSIM_STATE_HSCLKEN) {
-			dev_err(dsim->dev, "HS Clock lane is not enabled.\n");
-			return -EINVAL;
-		}
-
-		exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0);
-	} else {
-		if (dsim->state == DSIM_STATE_INIT || dsim->state ==
-			DSIM_STATE_ULPS) {
-			dev_err(dsim->dev,
-				"DSI Master is not STOP or HSDT state.\n");
-			return -EINVAL;
-		}
-
-		exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0);
-	}
-
-	return 0;
-}
-
-int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim)
-{
-	return _exynos_mipi_dsi_get_frame_done_status(dsim);
-}
-
-int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim)
-{
-	_exynos_mipi_dsi_clear_frame_done(dsim);
-
-	return 0;
-}
-
-int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
-				unsigned int val)
-{
-	int try = TRY_FIFO_CLEAR;
-
-	exynos_mipi_dsi_sw_reset_release(dsim);
-	exynos_mipi_dsi_func_reset(dsim);
-
-	do {
-		if (exynos_mipi_dsi_get_sw_reset_release(dsim)) {
-			exynos_mipi_dsi_init_interrupt(dsim);
-			dev_dbg(dsim->dev, "reset release done.\n");
-			return 0;
-		}
-	} while (--try);
-
-	dev_err(dsim->dev, "failed to clear dsim fifo.\n");
-	return -EAGAIN;
-}
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
deleted file mode 100644
index 4125522..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* linux/drivers/video/exynos_mipi_dsi_common.h
- *
- * Header file for Samsung SoC MIPI-DSI common driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_COMMON_H
-#define _EXYNOS_MIPI_DSI_COMMON_H
-
-static DECLARE_COMPLETION(dsim_rd_comp);
-static DECLARE_COMPLETION(dsim_wr_comp);
-
-int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
-	const unsigned char *data0, unsigned int data_size);
-int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
-	unsigned int data0, unsigned int req_size, u8 *rx_buf);
-irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id);
-void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim,
-		unsigned int enable);
-int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
-			struct mipi_dsim_config *dsim_info);
-int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim,
-		unsigned int mode);
-int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim,
-	unsigned int enable);
-int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim);
-
-extern struct fb_info *registered_fb[FB_MAX] __read_mostly;
-
-int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
-				unsigned int val);
-
-#endif /* _EXYNOS_MIPI_DSI_COMMON_H */
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
deleted file mode 100644
index c148d06..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
+++ /dev/null
@@ -1,618 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
- *
- * Samsung SoC MIPI-DSI lowlevel driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_regs.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-
-void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST);
-
-	reg |= DSIM_FUNCRST;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST);
-}
-
-void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST);
-
-	reg |= DSIM_SWRST;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST);
-}
-
-void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
-	reg |= INTSRC_SW_RST_RELEASE;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim)
-{
-	return (readl(dsim->reg_base + EXYNOS_DSIM_INTSRC)) &
-			INTSRC_SW_RST_RELEASE;
-}
-
-unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_INTMSK);
-
-	return reg;
-}
-
-void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim,
-		unsigned int mode, unsigned int mask)
-{
-	unsigned int reg = 0;
-
-	if (mask)
-		reg |= mode;
-	else
-		reg &= ~mode;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_INTMSK);
-}
-
-void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim,
-		unsigned int cfg)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
-
-	writel(reg & ~(cfg), dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
-	mdelay(10);
-	reg |= cfg;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
-}
-
-/*
- * this function set PLL P, M and S value in D-PHY
- */
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
-		unsigned int value)
-{
-	writel(DSIM_AFC_CTL(value), dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-}
-
-void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim,
-		unsigned int enable)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-
-	reg &= ~DSIM_MAIN_STAND_BY;
-
-	if (enable)
-		reg |= DSIM_MAIN_STAND_BY;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-}
-
-void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim,
-	unsigned int width_resol, unsigned int height_resol)
-{
-	unsigned int reg;
-
-	/* standby should be set after configuration so set to not ready*/
-	reg = (readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL)) &
-		~(DSIM_MAIN_STAND_BY);
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-
-	reg &= ~((0x7ff << 16) | (0x7ff << 0));
-	reg |= DSIM_MAIN_VRESOL(height_resol) | DSIM_MAIN_HRESOL(width_resol);
-
-	reg |= DSIM_MAIN_STAND_BY;
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-}
-
-void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim,
-	unsigned int cmd_allow, unsigned int vfront, unsigned int vback)
-{
-	unsigned int reg;
-
-	reg = (readl(dsim->reg_base + EXYNOS_DSIM_MVPORCH)) &
-		~((DSIM_CMD_ALLOW_MASK) | (DSIM_STABLE_VFP_MASK) |
-		(DSIM_MAIN_VBP_MASK));
-
-	reg |= (DSIM_CMD_ALLOW_SHIFT(cmd_allow & 0xf) |
-		DSIM_STABLE_VFP_SHIFT(vfront & 0x7ff) |
-		DSIM_MAIN_VBP_SHIFT(vback & 0x7ff));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MVPORCH);
-}
-
-void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim,
-	unsigned int front, unsigned int back)
-{
-	unsigned int reg;
-
-	reg = (readl(dsim->reg_base + EXYNOS_DSIM_MHPORCH)) &
-		~((DSIM_MAIN_HFP_MASK) | (DSIM_MAIN_HBP_MASK));
-
-	reg |= DSIM_MAIN_HFP_SHIFT(front) | DSIM_MAIN_HBP_SHIFT(back);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MHPORCH);
-}
-
-void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim,
-	unsigned int vert, unsigned int hori)
-{
-	unsigned int reg;
-
-	reg = (readl(dsim->reg_base + EXYNOS_DSIM_MSYNC)) &
-		~((DSIM_MAIN_VSA_MASK) | (DSIM_MAIN_HSA_MASK));
-
-	reg |= (DSIM_MAIN_VSA_SHIFT(vert & 0x3ff) |
-		DSIM_MAIN_HSA_SHIFT(hori));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_MSYNC);
-}
-
-void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim,
-	unsigned int vert, unsigned int hori)
-{
-	unsigned int reg;
-
-	reg = (readl(dsim->reg_base + EXYNOS_DSIM_SDRESOL)) &
-		~(DSIM_SUB_STANDY_MASK);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-
-	reg &= ~(DSIM_SUB_VRESOL_MASK) | ~(DSIM_SUB_HRESOL_MASK);
-	reg |= (DSIM_SUB_VRESOL_SHIFT(vert & 0x7ff) |
-		DSIM_SUB_HRESOL_SHIFT(hori & 0x7ff));
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-
-	reg |= DSIM_SUB_STANDY_SHIFT(1);
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-}
-
-void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim)
-{
-	struct mipi_dsim_config *dsim_config = dsim->dsim_config;
-
-	unsigned int cfg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) &
-		~((1 << 28) | (0x1f << 20) | (0x3 << 5));
-
-	cfg =	((DSIM_AUTO_FLUSH(dsim_config->auto_flush)) |
-		(DSIM_EOT_DISABLE(dsim_config->eot_disable)) |
-		(DSIM_AUTO_MODE_SHIFT(dsim_config->auto_vertical_cnt)) |
-		(DSIM_HSE_MODE_SHIFT(dsim_config->hse)) |
-		(DSIM_HFP_MODE_SHIFT(dsim_config->hfp)) |
-		(DSIM_HBP_MODE_SHIFT(dsim_config->hbp)) |
-		(DSIM_HSA_MODE_SHIFT(dsim_config->hsa)) |
-		(DSIM_NUM_OF_DATALANE_SHIFT(dsim_config->e_no_data_lane)));
-
-	writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim,
-				struct mipi_dsim_config *dsim_config)
-{
-	u32 reg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) &
-		~((0x3 << 26) | (1 << 25) | (0x3 << 18) | (0x7 << 12) |
-		(0x3 << 16) | (0x7 << 8));
-
-	if (dsim_config->e_interface == DSIM_VIDEO)
-		reg |= (1 << 25);
-	else if (dsim_config->e_interface == DSIM_COMMAND)
-		reg &= ~(1 << 25);
-	else {
-		dev_err(dsim->dev, "unknown lcd type.\n");
-		return;
-	}
-
-	/* main lcd */
-	reg |= ((u8) (dsim_config->e_burst_mode) & 0x3) << 26 |
-		((u8) (dsim_config->e_virtual_ch) & 0x3) << 18 |
-		((u8) (dsim_config->e_pixel_format) & 0x7) << 12;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane,
-	unsigned int enable)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_CONFIG);
-
-	if (enable)
-		reg |= DSIM_LANE_ENx(lane);
-	else
-		reg &= ~DSIM_LANE_ENx(lane);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
-	unsigned int count)
-{
-	unsigned int cfg;
-
-	/* get the data lane number. */
-	cfg = DSIM_NUM_OF_DATALANE_SHIFT(count);
-
-	writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable,
-	unsigned int afc_code)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-
-	if (enable) {
-		reg |= (1 << 14);
-		reg &= ~(0x7 << 5);
-		reg |= (afc_code & 0x7) << 5;
-	} else
-		reg &= ~(1 << 14);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-}
-
-void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim,
-	unsigned int enable)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
-		~(DSIM_PLL_BYPASS_SHIFT(0x1));
-
-	reg |= DSIM_PLL_BYPASS_SHIFT(enable);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p,
-	unsigned int m, unsigned int s)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-
-	reg |= ((p & 0x3f) << 13) | ((m & 0x1ff) << 4) | ((s & 0x7) << 1);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim,
-		unsigned int freq_band)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
-		~(DSIM_FREQ_BAND_SHIFT(0x1f));
-
-	reg |= DSIM_FREQ_BAND_SHIFT(freq_band & 0x1f);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim,
-		unsigned int pre_divider, unsigned int main_divider,
-		unsigned int scaler)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
-		~(0x7ffff << 1);
-
-	reg |= (pre_divider & 0x3f) << 13 | (main_divider & 0x1ff) << 4 |
-		(scaler & 0x7) << 1;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim,
-	unsigned int lock_time)
-{
-	writel(lock_time, dsim->reg_base + EXYNOS_DSIM_PLLTMR);
-}
-
-void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim, unsigned int enable)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
-		~(DSIM_PLL_EN_SHIFT(0x1));
-
-	reg |= DSIM_PLL_EN_SHIFT(enable & 0x1);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim,
-		unsigned int src)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
-		~(DSIM_BYTE_CLK_SRC_SHIFT(0x3));
-
-	reg |= (DSIM_BYTE_CLK_SRC_SHIFT(src));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim,
-		unsigned int enable)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
-		~(DSIM_BYTE_CLKEN_SHIFT(0x1));
-
-	reg |= DSIM_BYTE_CLKEN_SHIFT(enable);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim,
-		unsigned int enable, unsigned int prs_val)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
-		~(DSIM_ESC_CLKEN_SHIFT(0x1) | 0xffff);
-
-	reg |= DSIM_ESC_CLKEN_SHIFT(enable);
-	if (enable)
-		reg |= prs_val;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim,
-		unsigned int lane_sel, unsigned int enable)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-
-	if (enable)
-		reg |= DSIM_LANE_ESC_CLKEN(lane_sel);
-	else
-
-		reg &= ~DSIM_LANE_ESC_CLKEN(lane_sel);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim,
-	unsigned int enable)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) &
-		~(DSIM_FORCE_STOP_STATE_SHIFT(0x1));
-
-	reg |= (DSIM_FORCE_STOP_STATE_SHIFT(enable & 0x1));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS);
-
-	/**
-	 * check clock and data lane states.
-	 * if MIPI-DSI controller was enabled at bootloader then
-	 * TX_READY_HS_CLK is enabled otherwise STOP_STATE_CLK.
-	 * so it should be checked for two case.
-	 */
-	if ((reg & DSIM_STOP_STATE_DAT(0xf)) &&
-			((reg & DSIM_STOP_STATE_CLK) ||
-			 (reg & DSIM_TX_READY_HS_CLK)))
-		return 1;
-
-	return 0;
-}
-
-void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim,
-		unsigned int cnt_val)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) &
-		~(DSIM_STOP_STATE_CNT_SHIFT(0x7ff));
-
-	reg |= (DSIM_STOP_STATE_CNT_SHIFT(cnt_val & 0x7ff));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim,
-		unsigned int timeout)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) &
-		~(DSIM_BTA_TOUT_SHIFT(0xff));
-
-	reg |= (DSIM_BTA_TOUT_SHIFT(timeout));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT);
-}
-
-void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim,
-		unsigned int timeout)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) &
-		~(DSIM_LPDR_TOUT_SHIFT(0xffff));
-
-	reg |= (DSIM_LPDR_TOUT_SHIFT(timeout));
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT);
-}
-
-void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim,
-		unsigned int lp)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-
-	reg &= ~DSIM_CMD_LPDT_LP;
-
-	if (lp)
-		reg |= DSIM_CMD_LPDT_LP;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim,
-		unsigned int lp)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-
-	reg &= ~DSIM_TX_LPDT_LP;
-
-	if (lp)
-		reg |= DSIM_TX_LPDT_LP;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim,
-		unsigned int enable)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
-		~(DSIM_TX_REQUEST_HSCLK_SHIFT(0x1));
-
-	reg |= DSIM_TX_REQUEST_HSCLK_SHIFT(enable);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim,
-		unsigned int swap_en)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR1);
-
-	reg &= ~(0x3 << 0);
-	reg |= (swap_en & 0x3) << 0;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR1);
-}
-
-void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim,
-		unsigned int hs_zero)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
-		~(0xf << 28);
-
-	reg |= ((hs_zero & 0xf) << 28);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep)
-{
-	unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
-		~(0x7 << 20);
-
-	reg |= ((prep & 0x7) << 20);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim)
-{
-	return readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim,
-					unsigned int src)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
-	reg |= src;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim,
-					unsigned int src, unsigned int enable)
-{
-	unsigned int reg = 0;
-
-	if (enable)
-		reg |= src;
-	else
-		reg &= ~src;
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg;
-
-	reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS);
-
-	return reg & (1 << 31) ? 1 : 0;
-}
-
-unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim)
-{
-	return readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL) & ~(0x1f);
-}
-
-void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim,
-	unsigned int di, unsigned int data0, unsigned int data1)
-{
-	unsigned int reg = (data1 << 16) | (data0 << 8) | ((di & 0x3f) << 0);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR);
-}
-
-void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim,
-	unsigned int di, unsigned int data0)
-{
-	unsigned int reg = (data0 << 8) | (di << 0);
-
-	writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR);
-}
-
-unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim)
-{
-	return readl(dsim->reg_base + EXYNOS_DSIM_RXFIFO);
-}
-
-unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
-	return (reg & INTSRC_FRAME_DONE) ? 1 : 0;
-}
-
-void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim)
-{
-	unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
-	writel(reg | INTSRC_FRAME_DONE, dsim->reg_base +
-		EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim,
-		unsigned int tx_data)
-{
-	writel(tx_data, dsim->reg_base + EXYNOS_DSIM_PAYLOAD);
-}
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
deleted file mode 100644
index 8546070..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.h
- *
- * Header file for Samsung SoC MIPI-DSI lowlevel driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_LOWLEVEL_H
-#define _EXYNOS_MIPI_DSI_LOWLEVEL_H
-
-void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim,
-	unsigned int mode, unsigned int mask);
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
-					unsigned int count);
-void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim,
-					unsigned int cfg);
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
-				unsigned int value);
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
-				unsigned int value);
-void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim,
-		unsigned int enable);
-void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim,
-		unsigned int width_resol, unsigned int height_resol);
-void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim,
-	unsigned int cmd_allow, unsigned int vfront, unsigned int vback);
-void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim,
-			unsigned int front, unsigned int back);
-void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim,
-				unsigned int vert, unsigned int hori);
-void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim,
-				unsigned int vert, unsigned int hori);
-void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim,
-				struct mipi_dsim_config *dsim_config);
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
-				unsigned int count);
-void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane,
-				unsigned int enable);
-void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable,
-				unsigned int afc_code);
-void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim,
-				unsigned int enable);
-void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p,
-				unsigned int m, unsigned int s);
-void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim,
-				unsigned int freq_band);
-void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim,
-			unsigned int pre_divider, unsigned int main_divider,
-			unsigned int scaler);
-void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim,
-			unsigned int lock_time);
-void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim,
-					unsigned int enable);
-void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim,
-					unsigned int src);
-void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim,
-					unsigned int enable);
-void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim,
-				unsigned int enable, unsigned int prs_val);
-void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim,
-				unsigned int lane_sel, unsigned int enable);
-void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim,
-				unsigned int enable);
-unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim,
-				unsigned int cnt_val);
-void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim,
-				unsigned int timeout);
-void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim,
-				unsigned int timeout);
-void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim,
-					unsigned int lp);
-void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim,
-					unsigned int lp);
-void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim,
-				unsigned int enable);
-void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim,
-				unsigned int swap_en);
-void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim,
-				unsigned int hs_zero);
-void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep);
-unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim);
-unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim,
-					unsigned int src);
-void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim,
-					unsigned int src, unsigned int enable);
-unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim);
-unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim);
-unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim);
-void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim, unsigned int di,
-				unsigned int data0, unsigned int data1);
-void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim,
-		unsigned int tx_data);
-void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim,
-		unsigned int data0, unsigned int data1);
-unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim);
-
-#endif /* _EXYNOS_MIPI_DSI_LOWLEVEL_H */
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
deleted file mode 100644
index 4227106..0000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* linux/driver/video/exynos/exynos_mipi_dsi_regs.h
- *
- * Register definition file for Samsung MIPI-DSIM driver
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_REGS_H
-#define _EXYNOS_MIPI_DSI_REGS_H
-
-#define EXYNOS_DSIM_STATUS		0x0	/* Status register */
-#define EXYNOS_DSIM_SWRST		0x4	/* Software reset register */
-#define EXYNOS_DSIM_CLKCTRL		0x8	/* Clock control register */
-#define EXYNOS_DSIM_TIMEOUT		0xc	/* Time out register */
-#define EXYNOS_DSIM_CONFIG		0x10	/* Configuration register */
-#define EXYNOS_DSIM_ESCMODE		0x14	/* Escape mode register */
-
-/* Main display image resolution register */
-#define EXYNOS_DSIM_MDRESOL		0x18
-#define EXYNOS_DSIM_MVPORCH		0x1c	/* Main display Vporch register */
-#define EXYNOS_DSIM_MHPORCH		0x20	/* Main display Hporch register */
-#define EXYNOS_DSIM_MSYNC		0x24	/* Main display sync area register */
-
-/* Sub display image resolution register */
-#define EXYNOS_DSIM_SDRESOL		0x28
-#define EXYNOS_DSIM_INTSRC		0x2c	/* Interrupt source register */
-#define EXYNOS_DSIM_INTMSK		0x30	/* Interrupt mask register */
-#define EXYNOS_DSIM_PKTHDR		0x34	/* Packet Header FIFO register */
-#define EXYNOS_DSIM_PAYLOAD		0x38	/* Payload FIFO register */
-#define EXYNOS_DSIM_RXFIFO		0x3c	/* Read FIFO register */
-#define EXYNOS_DSIM_FIFOTHLD		0x40	/* FIFO threshold level register */
-#define EXYNOS_DSIM_FIFOCTRL		0x44	/* FIFO status and control register */
-
-/* FIFO memory AC characteristic register */
-#define EXYNOS_DSIM_PLLCTRL		0x4c	/* PLL control register */
-#define EXYNOS_DSIM_PLLTMR		0x50	/* PLL timer register */
-#define EXYNOS_DSIM_PHYACCHR		0x54	/* D-PHY AC characteristic register */
-#define EXYNOS_DSIM_PHYACCHR1		0x58	/* D-PHY AC characteristic register1 */
-
-/* DSIM_STATUS */
-#define DSIM_STOP_STATE_DAT(x)		(((x) & 0xf) << 0)
-#define DSIM_STOP_STATE_CLK		(1 << 8)
-#define DSIM_TX_READY_HS_CLK		(1 << 10)
-
-/* DSIM_SWRST */
-#define DSIM_FUNCRST			(1 << 16)
-#define DSIM_SWRST			(1 << 0)
-
-/* EXYNOS_DSIM_TIMEOUT */
-#define DSIM_LPDR_TOUT_SHIFT(x)		((x) << 0)
-#define DSIM_BTA_TOUT_SHIFT(x)		((x) << 16)
-
-/* EXYNOS_DSIM_CLKCTRL */
-#define DSIM_LANE_ESC_CLKEN(x)		(((x) & 0x1f) << 19)
-#define DSIM_BYTE_CLKEN_SHIFT(x)	((x) << 24)
-#define DSIM_BYTE_CLK_SRC_SHIFT(x)	((x) <<	25)
-#define DSIM_PLL_BYPASS_SHIFT(x)	((x) <<	27)
-#define DSIM_ESC_CLKEN_SHIFT(x)		((x) << 28)
-#define DSIM_TX_REQUEST_HSCLK_SHIFT(x)	((x) << 31)
-
-/* EXYNOS_DSIM_CONFIG */
-#define DSIM_LANE_ENx(x)		(((x) & 0x1f) << 0)
-#define DSIM_NUM_OF_DATALANE_SHIFT(x)	((x) << 5)
-#define DSIM_HSA_MODE_SHIFT(x)		((x) << 20)
-#define DSIM_HBP_MODE_SHIFT(x)		((x) << 21)
-#define DSIM_HFP_MODE_SHIFT(x)		((x) << 22)
-#define DSIM_HSE_MODE_SHIFT(x)		((x) << 23)
-#define DSIM_AUTO_MODE_SHIFT(x)		((x) << 24)
-#define DSIM_EOT_DISABLE(x)		((x) << 28)
-#define DSIM_AUTO_FLUSH(x)		((x) << 29)
-
-#define DSIM_NUM_OF_DATA_LANE(x)	((x) << DSIM_NUM_OF_DATALANE_SHIFT)
-
-/* EXYNOS_DSIM_ESCMODE */
-#define DSIM_TX_LPDT_LP			(1 << 6)
-#define DSIM_CMD_LPDT_LP		(1 << 7)
-#define DSIM_FORCE_STOP_STATE_SHIFT(x)	((x) << 20)
-#define DSIM_STOP_STATE_CNT_SHIFT(x)	((x) << 21)
-
-/* EXYNOS_DSIM_MDRESOL */
-#define DSIM_MAIN_STAND_BY		(1 << 31)
-#define DSIM_MAIN_VRESOL(x)		(((x) & 0x7ff) << 16)
-#define DSIM_MAIN_HRESOL(x)		(((x) & 0X7ff) << 0)
-
-/* EXYNOS_DSIM_MVPORCH */
-#define DSIM_CMD_ALLOW_SHIFT(x)		((x) << 28)
-#define DSIM_STABLE_VFP_SHIFT(x)	((x) << 16)
-#define DSIM_MAIN_VBP_SHIFT(x)		((x) << 0)
-#define DSIM_CMD_ALLOW_MASK		(0xf << 28)
-#define DSIM_STABLE_VFP_MASK		(0x7ff << 16)
-#define DSIM_MAIN_VBP_MASK		(0x7ff << 0)
-
-/* EXYNOS_DSIM_MHPORCH */
-#define DSIM_MAIN_HFP_SHIFT(x)		((x) << 16)
-#define DSIM_MAIN_HBP_SHIFT(x)		((x) << 0)
-#define DSIM_MAIN_HFP_MASK		((0xffff) << 16)
-#define DSIM_MAIN_HBP_MASK		((0xffff) << 0)
-
-/* EXYNOS_DSIM_MSYNC */
-#define DSIM_MAIN_VSA_SHIFT(x)		((x) << 22)
-#define DSIM_MAIN_HSA_SHIFT(x)		((x) << 0)
-#define DSIM_MAIN_VSA_MASK		((0x3ff) << 22)
-#define DSIM_MAIN_HSA_MASK		((0xffff) << 0)
-
-/* EXYNOS_DSIM_SDRESOL */
-#define DSIM_SUB_STANDY_SHIFT(x)	((x) << 31)
-#define DSIM_SUB_VRESOL_SHIFT(x)	((x) << 16)
-#define DSIM_SUB_HRESOL_SHIFT(x)	((x) << 0)
-#define DSIM_SUB_STANDY_MASK		((0x1) << 31)
-#define DSIM_SUB_VRESOL_MASK		((0x7ff) << 16)
-#define DSIM_SUB_HRESOL_MASK		((0x7ff) << 0)
-
-/* EXYNOS_DSIM_INTSRC */
-#define INTSRC_PLL_STABLE		(1 << 31)
-#define INTSRC_SW_RST_RELEASE		(1 << 30)
-#define INTSRC_SFR_FIFO_EMPTY		(1 << 29)
-#define INTSRC_FRAME_DONE		(1 << 24)
-#define INTSRC_RX_DATA_DONE		(1 << 18)
-
-/* EXYNOS_DSIM_INTMSK */
-#define INTMSK_FIFO_EMPTY		(1 << 29)
-#define INTMSK_BTA			(1 << 25)
-#define INTMSK_FRAME_DONE		(1 << 24)
-#define INTMSK_RX_TIMEOUT		(1 << 21)
-#define INTMSK_BTA_TIMEOUT		(1 << 20)
-#define INTMSK_RX_DONE			(1 << 18)
-#define INTMSK_RX_TE			(1 << 17)
-#define INTMSK_RX_ACK			(1 << 16)
-#define INTMSK_RX_ECC_ERR		(1 << 15)
-#define INTMSK_RX_CRC_ERR		(1 << 14)
-
-/* EXYNOS_DSIM_FIFOCTRL */
-#define SFR_HEADER_EMPTY		(1 << 22)
-
-/* EXYNOS_DSIM_PHYACCHR */
-#define DSIM_AFC_CTL(x)			(((x) & 0x7) << 5)
-
-/* EXYNOS_DSIM_PLLCTRL */
-#define DSIM_PLL_EN_SHIFT(x)		((x) << 23)
-#define DSIM_FREQ_BAND_SHIFT(x)		((x) << 24)
-
-#endif /* _EXYNOS_MIPI_DSI_REGS_H */
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
deleted file mode 100644
index de2f3e7..0000000
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ /dev/null
@@ -1,887 +0,0 @@
-/* linux/drivers/video/exynos/s6e8ax0.c
- *
- * MIPI-DSI based s6e8ax0 AMOLED lcd 4.65 inch panel driver.
- *
- * Inki Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/ctype.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/lcd.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/mipi_display.h>
-#include <video/exynos_mipi_dsim.h>
-
-#define LDI_MTP_LENGTH		24
-#define DSIM_PM_STABLE_TIME	10
-#define MIN_BRIGHTNESS		0
-#define MAX_BRIGHTNESS		24
-#define GAMMA_TABLE_COUNT	26
-
-#define POWER_IS_ON(pwr)	((pwr) == FB_BLANK_UNBLANK)
-#define POWER_IS_OFF(pwr)	((pwr) == FB_BLANK_POWERDOWN)
-#define POWER_IS_NRM(pwr)	((pwr) == FB_BLANK_NORMAL)
-
-#define lcd_to_master(a)	(a->dsim_dev->master)
-#define lcd_to_master_ops(a)	((lcd_to_master(a))->master_ops)
-
-enum {
-	DSIM_NONE_STATE = 0,
-	DSIM_RESUME_COMPLETE = 1,
-	DSIM_FRAME_DONE = 2,
-};
-
-struct s6e8ax0 {
-	struct device	*dev;
-	unsigned int			power;
-	unsigned int			id;
-	unsigned int			gamma;
-	unsigned int			acl_enable;
-	unsigned int			cur_acl;
-
-	struct lcd_device	*ld;
-	struct backlight_device	*bd;
-
-	struct mipi_dsim_lcd_device	*dsim_dev;
-	struct lcd_platform_data	*ddi_pd;
-	struct mutex			lock;
-	bool  enabled;
-};
-
-
-static struct regulator_bulk_data supplies[] = {
-	{ .supply = "vdd3", },
-	{ .supply = "vci", },
-};
-
-static void s6e8ax0_regulator_enable(struct s6e8ax0 *lcd)
-{
-	int ret = 0;
-	struct lcd_platform_data *pd = NULL;
-
-	pd = lcd->ddi_pd;
-	mutex_lock(&lcd->lock);
-	if (!lcd->enabled) {
-		ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
-		if (ret)
-			goto out;
-
-		lcd->enabled = true;
-	}
-	msleep(pd->power_on_delay);
-out:
-	mutex_unlock(&lcd->lock);
-}
-
-static void s6e8ax0_regulator_disable(struct s6e8ax0 *lcd)
-{
-	int ret = 0;
-
-	mutex_lock(&lcd->lock);
-	if (lcd->enabled) {
-		ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
-		if (ret)
-			goto out;
-
-		lcd->enabled = false;
-	}
-out:
-	mutex_unlock(&lcd->lock);
-}
-
-static const unsigned char s6e8ax0_22_gamma_30[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xf5, 0x00, 0xff, 0xad, 0xaf,
-	0xbA, 0xc3, 0xd8, 0xc5, 0x9f, 0xc6, 0x9e, 0xc1, 0xdc, 0xc0,
-	0x00, 0x61, 0x00, 0x5a, 0x00, 0x74,
-};
-
-static const unsigned char s6e8ax0_22_gamma_50[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xe8, 0x1f, 0xf7, 0xad, 0xc0,
-	0xb5, 0xc4, 0xdc, 0xc4, 0x9e, 0xc6, 0x9c, 0xbb, 0xd8, 0xbb,
-	0x00, 0x70, 0x00, 0x68, 0x00, 0x86,
-};
-
-static const unsigned char s6e8ax0_22_gamma_60[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xde, 0x1f, 0xef, 0xad, 0xc4,
-	0xb3, 0xc3, 0xdd, 0xc4, 0x9e, 0xc6, 0x9c, 0xbc, 0xd6, 0xba,
-	0x00, 0x75, 0x00, 0x6e, 0x00, 0x8d,
-};
-
-static const unsigned char s6e8ax0_22_gamma_70[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xd8, 0x1f, 0xe7, 0xaf, 0xc8,
-	0xb4, 0xc4, 0xdd, 0xc3, 0x9d, 0xc6, 0x9c, 0xbb, 0xd6, 0xb9,
-	0x00, 0x7a, 0x00, 0x72, 0x00, 0x93,
-};
-
-static const unsigned char s6e8ax0_22_gamma_80[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xc9, 0x1f, 0xde, 0xae, 0xc9,
-	0xb1, 0xc3, 0xdd, 0xc2, 0x9d, 0xc5, 0x9b, 0xbc, 0xd6, 0xbb,
-	0x00, 0x7f, 0x00, 0x77, 0x00, 0x99,
-};
-
-static const unsigned char s6e8ax0_22_gamma_90[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xc7, 0x1f, 0xd9, 0xb0, 0xcc,
-	0xb2, 0xc3, 0xdc, 0xc1, 0x9c, 0xc6, 0x9c, 0xbc, 0xd4, 0xb9,
-	0x00, 0x83, 0x00, 0x7b, 0x00, 0x9e,
-};
-
-static const unsigned char s6e8ax0_22_gamma_100[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xbd, 0x80, 0xcd, 0xba, 0xce,
-	0xb3, 0xc4, 0xde, 0xc3, 0x9c, 0xc4, 0x9, 0xb8, 0xd3, 0xb6,
-	0x00, 0x88, 0x00, 0x80, 0x00, 0xa5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_120[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb9, 0x95, 0xc8, 0xb1, 0xcf,
-	0xb2, 0xc6, 0xdf, 0xc5, 0x9b, 0xc3, 0x99, 0xb6, 0xd2, 0xb6,
-	0x00, 0x8f, 0x00, 0x86, 0x00, 0xac,
-};
-
-static const unsigned char s6e8ax0_22_gamma_130[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc7, 0xb1, 0xd0,
-	0xb2, 0xc4, 0xdd, 0xc3, 0x9a, 0xc3, 0x98, 0xb6, 0xd0, 0xb4,
-	0x00, 0x92, 0x00, 0x8a, 0x00, 0xb1,
-};
-
-static const unsigned char s6e8ax0_22_gamma_140[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc5, 0xb2, 0xd0,
-	0xb3, 0xc3, 0xde, 0xc3, 0x9b, 0xc2, 0x98, 0xb6, 0xd0, 0xb4,
-	0x00, 0x95, 0x00, 0x8d, 0x00, 0xb5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_150[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xa0, 0xc2, 0xb2, 0xd0,
-	0xb2, 0xc1, 0xdd, 0xc2, 0x9b, 0xc2, 0x98, 0xb4, 0xcf, 0xb1,
-	0x00, 0x99, 0x00, 0x90, 0x00, 0xba,
-};
-
-static const unsigned char s6e8ax0_22_gamma_160[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xa5, 0xbf, 0xb0, 0xd0,
-	0xb1, 0xc3, 0xde, 0xc2, 0x99, 0xc1, 0x97, 0xb4, 0xce, 0xb1,
-	0x00, 0x9c, 0x00, 0x93, 0x00, 0xbe,
-};
-
-static const unsigned char s6e8ax0_22_gamma_170[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb5, 0xbf, 0xb1, 0xd1,
-	0xb1, 0xc3, 0xde, 0xc3, 0x99, 0xc0, 0x96, 0xb4, 0xce, 0xb1,
-	0x00, 0x9f, 0x00, 0x96, 0x00, 0xc2,
-};
-
-static const unsigned char s6e8ax0_22_gamma_180[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb7, 0xbe, 0xb3, 0xd2,
-	0xb3, 0xc3, 0xde, 0xc2, 0x97, 0xbf, 0x95, 0xb4, 0xcd, 0xb1,
-	0x00, 0xa2, 0x00, 0x99, 0x00, 0xc5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_190[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbe, 0xb2, 0xd2,
-	0xb2, 0xc3, 0xdd, 0xc3, 0x98, 0xbf, 0x95, 0xb2, 0xcc, 0xaf,
-	0x00, 0xa5, 0x00, 0x9c, 0x00, 0xc9,
-};
-
-static const unsigned char s6e8ax0_22_gamma_200[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbc, 0xb2, 0xd2,
-	0xb1, 0xc4, 0xdd, 0xc3, 0x97, 0xbe, 0x95, 0xb1, 0xcb, 0xae,
-	0x00, 0xa8, 0x00, 0x9f, 0x00, 0xcd,
-};
-
-static const unsigned char s6e8ax0_22_gamma_210[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc1, 0xbd, 0xb1, 0xd1,
-	0xb1, 0xc2, 0xde, 0xc2, 0x97, 0xbe, 0x94, 0xB0, 0xc9, 0xad,
-	0x00, 0xae, 0x00, 0xa4, 0x00, 0xd4,
-};
-
-static const unsigned char s6e8ax0_22_gamma_220[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc7, 0xbd, 0xb1, 0xd1,
-	0xb1, 0xc2, 0xdd, 0xc2, 0x97, 0xbd, 0x94, 0xb0, 0xc9, 0xad,
-	0x00, 0xad, 0x00, 0xa2, 0x00, 0xd3,
-};
-
-static const unsigned char s6e8ax0_22_gamma_230[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc3, 0xbd, 0xb2, 0xd1,
-	0xb1, 0xc3, 0xdd, 0xc1, 0x96, 0xbd, 0x94, 0xb0, 0xc9, 0xad,
-	0x00, 0xb0, 0x00, 0xa7, 0x00, 0xd7,
-};
-
-static const unsigned char s6e8ax0_22_gamma_240[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xcb, 0xbd, 0xb1, 0xd2,
-	0xb1, 0xc3, 0xdD, 0xc2, 0x95, 0xbd, 0x93, 0xaf, 0xc8, 0xab,
-	0x00, 0xb3, 0x00, 0xa9, 0x00, 0xdb,
-};
-
-static const unsigned char s6e8ax0_22_gamma_250[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xcc, 0xbe, 0xb0, 0xd2,
-	0xb0, 0xc3, 0xdD, 0xc2, 0x94, 0xbc, 0x92, 0xae, 0xc8, 0xab,
-	0x00, 0xb6, 0x00, 0xab, 0x00, 0xde,
-};
-
-static const unsigned char s6e8ax0_22_gamma_260[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xd0, 0xbe, 0xaf, 0xd1,
-	0xaf, 0xc2, 0xdd, 0xc1, 0x96, 0xbc, 0x93, 0xaf, 0xc8, 0xac,
-	0x00, 0xb7, 0x00, 0xad, 0x00, 0xe0,
-};
-
-static const unsigned char s6e8ax0_22_gamma_270[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xcF, 0xbd, 0xb0, 0xd2,
-	0xaf, 0xc2, 0xdc, 0xc1, 0x95, 0xbd, 0x93, 0xae, 0xc6, 0xaa,
-	0x00, 0xba, 0x00, 0xb0, 0x00, 0xe4,
-};
-
-static const unsigned char s6e8ax0_22_gamma_280[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xd0, 0xbd, 0xaf, 0xd0,
-	0xad, 0xc4, 0xdd, 0xc3, 0x95, 0xbd, 0x93, 0xac, 0xc5, 0xa9,
-	0x00, 0xbd, 0x00, 0xb2, 0x00, 0xe7,
-};
-
-static const unsigned char s6e8ax0_22_gamma_300[] = {
-	0xfa, 0x01, 0x60, 0x10, 0x60, 0xb5, 0xd3, 0xbd, 0xb1, 0xd2,
-	0xb0, 0xc0, 0xdc, 0xc0, 0x94, 0xba, 0x91, 0xac, 0xc5, 0xa9,
-	0x00, 0xc2, 0x00, 0xb7, 0x00, 0xed,
-};
-
-static const unsigned char *s6e8ax0_22_gamma_table[] = {
-	s6e8ax0_22_gamma_30,
-	s6e8ax0_22_gamma_50,
-	s6e8ax0_22_gamma_60,
-	s6e8ax0_22_gamma_70,
-	s6e8ax0_22_gamma_80,
-	s6e8ax0_22_gamma_90,
-	s6e8ax0_22_gamma_100,
-	s6e8ax0_22_gamma_120,
-	s6e8ax0_22_gamma_130,
-	s6e8ax0_22_gamma_140,
-	s6e8ax0_22_gamma_150,
-	s6e8ax0_22_gamma_160,
-	s6e8ax0_22_gamma_170,
-	s6e8ax0_22_gamma_180,
-	s6e8ax0_22_gamma_190,
-	s6e8ax0_22_gamma_200,
-	s6e8ax0_22_gamma_210,
-	s6e8ax0_22_gamma_220,
-	s6e8ax0_22_gamma_230,
-	s6e8ax0_22_gamma_240,
-	s6e8ax0_22_gamma_250,
-	s6e8ax0_22_gamma_260,
-	s6e8ax0_22_gamma_270,
-	s6e8ax0_22_gamma_280,
-	s6e8ax0_22_gamma_300,
-};
-
-static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
-	static const unsigned char data_to_send[] = {
-		0xf8, 0x3d, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
-		0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
-		0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
-		0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
-	};
-	static const unsigned char data_to_send_panel_reverse[] = {
-		0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
-		0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
-		0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
-		0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
-	};
-
-	if (lcd->dsim_dev->panel_reverse)
-		ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-				data_to_send_panel_reverse,
-				ARRAY_SIZE(data_to_send_panel_reverse));
-	else
-		ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-				data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xf2, 0x80, 0x03, 0x0d
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-/* Gamma 2.2 Setting (200cd, 7500K, 10MPCD) */
-static void s6e8ax0_gamma_cond(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	unsigned int gamma = lcd->bd->props.brightness;
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-			s6e8ax0_22_gamma_table[gamma],
-			GAMMA_TABLE_COUNT);
-}
-
-static void s6e8ax0_gamma_update(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xf7, 0x03
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE_PARAM, data_to_send,
-		ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond1(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xd1, 0xfe, 0x80, 0x00, 0x01, 0x0b, 0x00, 0x00, 0x40,
-		0x0d, 0x00, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond2(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0,
-		0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond3(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xe1, 0x10, 0x1c, 0x17, 0x08, 0x1d
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond4(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xe2, 0xed, 0x07, 0xc3, 0x13, 0x0d, 0x03
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond5(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x19, 0x33, 0x02
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-static void s6e8ax0_etc_cond6(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xe3, 0x40
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE_PARAM,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond7(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xe4, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_elvss_set(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xb1, 0x04, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_elvss_nvm_set(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xd9, 0x5c, 0x20, 0x0c, 0x0f, 0x41, 0x00, 0x10, 0x11,
-		0x12, 0xd1, 0x00, 0x00, 0x00, 0x00, 0x80, 0xcb, 0xed,
-		0x64, 0xaf
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_sleep_in(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0x10, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_sleep_out(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0x11, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_on(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0x29, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_off(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0x28, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_apply_level2_key(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xf0, 0x5a, 0x5a
-	};
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_acl_on(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xc0, 0x01
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_acl_off(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	static const unsigned char data_to_send[] = {
-		0xc0, 0x00
-	};
-
-	ops->cmd_write(lcd_to_master(lcd),
-		MIPI_DSI_DCS_SHORT_WRITE,
-		data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-/* Full white 50% reducing setting */
-static void s6e8ax0_acl_ctrl_set(struct s6e8ax0 *lcd)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	/* Full white 50% reducing setting */
-	static const unsigned char cutoff_50[] = {
-		0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
-		0x00, 0x00, 0x04, 0xff,	0x00, 0x00, 0x00, 0x00, 0x00,
-		0x01, 0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2a, 0x31, 0x38,
-		0x3f, 0x46
-	};
-	/* Full white 45% reducing setting */
-	static const unsigned char cutoff_45[] = {
-		0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
-		0x00, 0x00, 0x04, 0xff,	0x00, 0x00, 0x00, 0x00, 0x00,
-		0x01, 0x07, 0x0d, 0x13, 0x19, 0x1f, 0x25, 0x2b, 0x31,
-		0x37, 0x3d
-	};
-	/* Full white 40% reducing setting */
-	static const unsigned char cutoff_40[] = {
-		0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
-		0x00, 0x00, 0x04, 0xff,	0x00, 0x00, 0x00, 0x00, 0x00,
-		0x01, 0x06, 0x0c, 0x11, 0x16, 0x1c, 0x21, 0x26, 0x2b,
-		0x31, 0x36
-	};
-
-	if (lcd->acl_enable) {
-		if (lcd->cur_acl == 0) {
-			if (lcd->gamma == 0 || lcd->gamma == 1) {
-				s6e8ax0_acl_off(lcd);
-				dev_dbg(&lcd->ld->dev,
-					"cur_acl=%d\n", lcd->cur_acl);
-			} else
-				s6e8ax0_acl_on(lcd);
-		}
-		switch (lcd->gamma) {
-		case 0: /* 30cd */
-			s6e8ax0_acl_off(lcd);
-			lcd->cur_acl = 0;
-			break;
-		case 1 ... 3: /* 50cd ~ 90cd */
-			ops->cmd_write(lcd_to_master(lcd),
-				MIPI_DSI_DCS_LONG_WRITE,
-				cutoff_40,
-				ARRAY_SIZE(cutoff_40));
-			lcd->cur_acl = 40;
-			break;
-		case 4 ... 7: /* 120cd ~ 210cd */
-			ops->cmd_write(lcd_to_master(lcd),
-				MIPI_DSI_DCS_LONG_WRITE,
-				cutoff_45,
-				ARRAY_SIZE(cutoff_45));
-			lcd->cur_acl = 45;
-			break;
-		case 8 ... 10: /* 220cd ~ 300cd */
-			ops->cmd_write(lcd_to_master(lcd),
-				MIPI_DSI_DCS_LONG_WRITE,
-				cutoff_50,
-				ARRAY_SIZE(cutoff_50));
-			lcd->cur_acl = 50;
-			break;
-		default:
-			break;
-		}
-	} else {
-		s6e8ax0_acl_off(lcd);
-		lcd->cur_acl = 0;
-		dev_dbg(&lcd->ld->dev, "cur_acl = %d\n", lcd->cur_acl);
-	}
-}
-
-static void s6e8ax0_read_id(struct s6e8ax0 *lcd, u8 *mtp_id)
-{
-	unsigned int ret;
-	unsigned int addr = 0xd1;	/* MTP ID */
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
-	ret = ops->cmd_read(lcd_to_master(lcd),
-			MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM,
-			addr, 3, mtp_id);
-}
-
-static int s6e8ax0_panel_init(struct s6e8ax0 *lcd)
-{
-	s6e8ax0_apply_level2_key(lcd);
-	s6e8ax0_sleep_out(lcd);
-	msleep(1);
-	s6e8ax0_panel_cond(lcd);
-	s6e8ax0_display_cond(lcd);
-	s6e8ax0_gamma_cond(lcd);
-	s6e8ax0_gamma_update(lcd);
-
-	s6e8ax0_etc_cond1(lcd);
-	s6e8ax0_etc_cond2(lcd);
-	s6e8ax0_etc_cond3(lcd);
-	s6e8ax0_etc_cond4(lcd);
-	s6e8ax0_etc_cond5(lcd);
-	s6e8ax0_etc_cond6(lcd);
-	s6e8ax0_etc_cond7(lcd);
-
-	s6e8ax0_elvss_nvm_set(lcd);
-	s6e8ax0_elvss_set(lcd);
-
-	s6e8ax0_acl_ctrl_set(lcd);
-	s6e8ax0_acl_on(lcd);
-
-	/* if ID3 value is not 33h, branch private elvss mode */
-	msleep(lcd->ddi_pd->power_on_delay);
-
-	return 0;
-}
-
-static int s6e8ax0_update_gamma_ctrl(struct s6e8ax0 *lcd, int brightness)
-{
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
-	ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
-			s6e8ax0_22_gamma_table[brightness],
-			ARRAY_SIZE(s6e8ax0_22_gamma_table));
-
-	/* update gamma table. */
-	s6e8ax0_gamma_update(lcd);
-	lcd->gamma = brightness;
-
-	return 0;
-}
-
-static int s6e8ax0_gamma_ctrl(struct s6e8ax0 *lcd, int gamma)
-{
-	s6e8ax0_update_gamma_ctrl(lcd, gamma);
-
-	return 0;
-}
-
-static int s6e8ax0_set_power(struct lcd_device *ld, int power)
-{
-	struct s6e8ax0 *lcd = lcd_get_data(ld);
-	struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-	int ret = 0;
-
-	if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
-			power != FB_BLANK_NORMAL) {
-		dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
-		return -EINVAL;
-	}
-
-	if ((power == FB_BLANK_UNBLANK) && ops->set_blank_mode) {
-		/* LCD power on */
-		if ((POWER_IS_ON(power) && POWER_IS_OFF(lcd->power))
-			|| (POWER_IS_ON(power) && POWER_IS_NRM(lcd->power))) {
-			ret = ops->set_blank_mode(lcd_to_master(lcd), power);
-			if (!ret && lcd->power != power)
-				lcd->power = power;
-		}
-	} else if ((power == FB_BLANK_POWERDOWN) && ops->set_early_blank_mode) {
-		/* LCD power off */
-		if ((POWER_IS_OFF(power) && POWER_IS_ON(lcd->power)) ||
-		(POWER_IS_ON(lcd->power) && POWER_IS_NRM(power))) {
-			ret = ops->set_early_blank_mode(lcd_to_master(lcd),
-							power);
-			if (!ret && lcd->power != power)
-				lcd->power = power;
-		}
-	}
-
-	return ret;
-}
-
-static int s6e8ax0_get_power(struct lcd_device *ld)
-{
-	struct s6e8ax0 *lcd = lcd_get_data(ld);
-
-	return lcd->power;
-}
-
-static int s6e8ax0_set_brightness(struct backlight_device *bd)
-{
-	int ret = 0, brightness = bd->props.brightness;
-	struct s6e8ax0 *lcd = bl_get_data(bd);
-
-	if (brightness < MIN_BRIGHTNESS ||
-		brightness > bd->props.max_brightness) {
-		dev_err(lcd->dev, "lcd brightness should be %d to %d.\n",
-			MIN_BRIGHTNESS, MAX_BRIGHTNESS);
-		return -EINVAL;
-	}
-
-	ret = s6e8ax0_gamma_ctrl(lcd, brightness);
-	if (ret) {
-		dev_err(&bd->dev, "lcd brightness setting failed.\n");
-		return -EIO;
-	}
-
-	return ret;
-}
-
-static struct lcd_ops s6e8ax0_lcd_ops = {
-	.set_power = s6e8ax0_set_power,
-	.get_power = s6e8ax0_get_power,
-};
-
-static const struct backlight_ops s6e8ax0_backlight_ops = {
-	.update_status = s6e8ax0_set_brightness,
-};
-
-static void s6e8ax0_power_on(struct mipi_dsim_lcd_device *dsim_dev, int power)
-{
-	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
-	msleep(lcd->ddi_pd->power_on_delay);
-
-	/* lcd power on */
-	if (power)
-		s6e8ax0_regulator_enable(lcd);
-	else
-		s6e8ax0_regulator_disable(lcd);
-
-	msleep(lcd->ddi_pd->reset_delay);
-
-	/* lcd reset */
-	if (lcd->ddi_pd->reset)
-		lcd->ddi_pd->reset(lcd->ld);
-	msleep(5);
-}
-
-static void s6e8ax0_set_sequence(struct mipi_dsim_lcd_device *dsim_dev)
-{
-	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
-	s6e8ax0_panel_init(lcd);
-	s6e8ax0_display_on(lcd);
-
-	lcd->power = FB_BLANK_UNBLANK;
-}
-
-static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
-{
-	struct s6e8ax0 *lcd;
-	int ret;
-	u8 mtp_id[3] = {0, };
-
-	lcd = devm_kzalloc(&dsim_dev->dev, sizeof(struct s6e8ax0), GFP_KERNEL);
-	if (!lcd) {
-		dev_err(&dsim_dev->dev, "failed to allocate s6e8ax0 structure.\n");
-		return -ENOMEM;
-	}
-
-	lcd->dsim_dev = dsim_dev;
-	lcd->ddi_pd = (struct lcd_platform_data *)dsim_dev->platform_data;
-	lcd->dev = &dsim_dev->dev;
-
-	mutex_init(&lcd->lock);
-
-	ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
-	if (ret) {
-		dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
-		return ret;
-	}
-
-	lcd->ld = devm_lcd_device_register(lcd->dev, "s6e8ax0", lcd->dev, lcd,
-			&s6e8ax0_lcd_ops);
-	if (IS_ERR(lcd->ld)) {
-		dev_err(lcd->dev, "failed to register lcd ops.\n");
-		return PTR_ERR(lcd->ld);
-	}
-
-	lcd->bd = devm_backlight_device_register(lcd->dev, "s6e8ax0-bl",
-				lcd->dev, lcd, &s6e8ax0_backlight_ops, NULL);
-	if (IS_ERR(lcd->bd)) {
-		dev_err(lcd->dev, "failed to register backlight ops.\n");
-		return PTR_ERR(lcd->bd);
-	}
-
-	lcd->bd->props.max_brightness = MAX_BRIGHTNESS;
-	lcd->bd->props.brightness = MAX_BRIGHTNESS;
-
-	s6e8ax0_read_id(lcd, mtp_id);
-	if (mtp_id[0] == 0x00)
-		dev_err(lcd->dev, "read id failed\n");
-
-	dev_info(lcd->dev, "Read ID : %x, %x, %x\n",
-			mtp_id[0], mtp_id[1], mtp_id[2]);
-
-	if (mtp_id[2] == 0x33)
-		dev_info(lcd->dev,
-			"ID-3 is 0xff does not support dynamic elvss\n");
-	else
-		dev_info(lcd->dev,
-			"ID-3 is 0x%x support dynamic elvss\n", mtp_id[2]);
-
-	lcd->acl_enable = 1;
-	lcd->cur_acl = 0;
-
-	dev_set_drvdata(&dsim_dev->dev, lcd);
-
-	dev_dbg(lcd->dev, "probed s6e8ax0 panel driver.\n");
-
-	return 0;
-}
-
-static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
-{
-	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
-	s6e8ax0_sleep_in(lcd);
-	msleep(lcd->ddi_pd->power_off_delay);
-	s6e8ax0_display_off(lcd);
-
-	s6e8ax0_regulator_disable(lcd);
-
-	return 0;
-}
-
-static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
-{
-	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
-	s6e8ax0_sleep_out(lcd);
-	msleep(lcd->ddi_pd->power_on_delay);
-
-	s6e8ax0_regulator_enable(lcd);
-	s6e8ax0_set_sequence(dsim_dev);
-
-	return 0;
-}
-
-static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
-	.name = "s6e8ax0",
-	.id = -1,
-
-	.power_on = s6e8ax0_power_on,
-	.set_sequence = s6e8ax0_set_sequence,
-	.probe = s6e8ax0_probe,
-	.suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
-	.resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
-};
-
-static int s6e8ax0_init(void)
-{
-	exynos_mipi_dsi_register_lcd_driver(&s6e8ax0_dsim_ddi_driver);
-
-	return 0;
-}
-
-static void s6e8ax0_exit(void)
-{
-	return;
-}
-
-module_init(s6e8ax0_init);
-module_exit(s6e8ax0_exit);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("MIPI-DSI based s6e8ax0 AMOLED LCD Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index e4031ef..8577195 100644
--- a/drivers/video/fbdev/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
@@ -47,7 +47,7 @@
 #define DPY_W 600
 #define DPY_H 800
 
-static struct fb_fix_screeninfo hecubafb_fix = {
+static const struct fb_fix_screeninfo hecubafb_fix = {
 	.id =		"hecubafb",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_MONO01,
@@ -58,7 +58,7 @@
 	.accel =	FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo hecubafb_var = {
+static const struct fb_var_screeninfo hecubafb_var = {
 	.xres		= DPY_W,
 	.yres		= DPY_H,
 	.xres_virtual	= DPY_W,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 15d3ccf..4630285 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -106,7 +106,7 @@
 
 /* Framebuffer driver structures */
 
-static struct fb_var_screeninfo hga_default_var = {
+static const struct fb_var_screeninfo hga_default_var = {
 	.xres		= 720,
 	.yres 		= 348,
 	.xres_virtual 	= 720,
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index cf5ccd0..7bc5f60 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -82,7 +82,7 @@
 #define DACSPEED24_SD	128
 #define DACSPEED32	86
 
-static struct fb_fix_screeninfo i740fb_fix = {
+static const struct fb_fix_screeninfo i740fb_fix = {
 	.id =		"i740fb",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_TRUECOLOR,
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index 025b882..483ab25 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1691,7 +1691,7 @@
 	if (!(par->i810_gtt.i810_cursor_memory = 
 	      agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
 				  AGP_PHYSICAL_MEMORY))) {
-		printk("i810fb_alloc_cursormem:  can't allocate" 
+		printk("i810fb_alloc_cursormem:  can't allocate "
 		       "cursor memory\n");
 		agp_backend_release(bridge);
 		return -ENOMEM;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index bf20744..ff2a5d2 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -1301,11 +1301,6 @@
 		break;
 	}
 
-	if (v.xoffset < 0)
-		v.xoffset = 0;
-	if (v.yoffset < 0)
-		v.yoffset = 0;
-
 	if (v.xoffset > v.xres_virtual - v.xres)
 		v.xoffset = v.xres_virtual - v.xres;
 	if (v.yoffset > v.yres_virtual - v.yres)
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 5bb0153..f77478f 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -44,7 +44,7 @@
 	.accel		= FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo kyro_var = {
+static const struct fb_var_screeninfo kyro_var = {
 	/* 640x480, 16bpp @ 60 Hz */
 	.xres		= 640,
 	.yres		= 480,
diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
index 195ad7c..68fa037 100644
--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
@@ -372,7 +372,7 @@
 
 	DBG(__func__)
 
-	memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
+	memcpy(hw->DACreg, MGADACbpp32, sizeof(MGADACbpp32));
 	switch (minfo->fbcon.var.bits_per_pixel) {
 		case 4:	hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1;	/* or _8_1, they are same */
 			hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/matrox/matroxfb_g450.c b/drivers/video/fbdev/matrox/matroxfb_g450.c
index cff0546..f108ae6 100644
--- a/drivers/video/fbdev/matrox/matroxfb_g450.c
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.c
@@ -433,7 +433,7 @@
 		0x00,	/* 3E written multiple times */
 		0x00,	/* 3F not written */
 	} };
-	static struct mavenregs ntscregs = { {
+	static const struct mavenregs ntscregs = { {
 		0x21, 0xF0, 0x7C, 0x1F,	/* 00: chroma subcarrier */
 		0x00,
 		0x00,	/* test */
diff --git a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
index c87e17a..ba96c44 100644
--- a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
@@ -157,17 +157,10 @@
 
 int mb862xx_i2c_init(struct mb862xxfb_par *par)
 {
-	int ret;
-
 	mb862xx_i2c_adapter.algo_data = par;
 	par->adap = &mb862xx_i2c_adapter;
 
-	ret = i2c_add_adapter(par->adap);
-	if (ret < 0) {
-		dev_err(par->dev, "failed to add %s\n",
-			mb862xx_i2c_adapter.name);
-	}
-	return ret;
+	return i2c_add_adapter(par->adap);
 }
 
 void mb862xx_i2c_exit(struct mb862xxfb_par *par)
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index f91b1db..8778e01 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -845,7 +845,7 @@
 		if (fbi->var.sync & FB_SYNC_SHARP_MODE)
 			mode = IPU_PANEL_SHARP_TFT;
 
-		dev_dbg(fbi->device, "pixclock = %ul Hz\n",
+		dev_dbg(fbi->device, "pixclock = %u Hz\n",
 			(u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
 
 		if (sdc_init_panel(mx3fb, mode,
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 4e6608c..7846f0e 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -800,6 +800,7 @@
 			struct fb_videomode *vmode)
 {
 	int ret;
+	struct device *dev = &host->pdev->dev;
 	struct fb_info *fb_info = &host->fb_info;
 	struct fb_var_screeninfo *var = &fb_info->var;
 	dma_addr_t fb_phys;
@@ -825,12 +826,10 @@
 
 	/* Memory allocation for framebuffer */
 	fb_size = SZ_2M;
-	fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+	fb_virt = dma_alloc_wc(dev, PAGE_ALIGN(fb_size), &fb_phys, GFP_KERNEL);
 	if (!fb_virt)
 		return -ENOMEM;
 
-	fb_phys = virt_to_phys(fb_virt);
-
 	fb_info->fix.smem_start = fb_phys;
 	fb_info->screen_base = fb_virt;
 	fb_info->screen_size = fb_info->fix.smem_len = fb_size;
@@ -843,9 +842,11 @@
 
 static void mxsfb_free_videomem(struct mxsfb_info *host)
 {
+	struct device *dev = &host->pdev->dev;
 	struct fb_info *fb_info = &host->fb_info;
 
-	free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
+	dma_free_wc(dev, fb_info->screen_size, fb_info->screen_base,
+		    fb_info->fix.smem_start);
 }
 
 static const struct platform_device_id mxsfb_devtype[] = {
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index fb60a8f..906c6e7 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -625,6 +625,21 @@
 	if (address == OF_BAD_ADDR && addr_prop)
 		address = (u64)addr_prop;
 	if (address != OF_BAD_ADDR) {
+#ifdef CONFIG_PCI
+		const __be32 *vidp, *didp;
+		u32 vid, did;
+		struct pci_dev *pdev;
+
+		vidp = of_get_property(dp, "vendor-id", NULL);
+		didp = of_get_property(dp, "device-id", NULL);
+		if (vidp && didp) {
+			vid = be32_to_cpup(vidp);
+			did = be32_to_cpup(didp);
+			pdev = pci_get_device(vid, did, NULL);
+			if (!pdev || pci_enable_device(pdev))
+				return;
+		}
+#endif
 		/* kludge for valkyrie */
 		if (strcmp(dp->name, "valkyrie") == 0)
 			address += 0x1000;
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index 0e4cee9a..c81f150 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -60,7 +60,6 @@
 	struct mutex		mutex;
 	struct lcd_panel	panel;
 
-	struct workqueue_struct	*esd_wq;
 	struct delayed_work	esd_work;
 	void			(*esd_check)(struct mipid_device *m);
 };
@@ -390,7 +389,7 @@
 static void mipid_esd_start_check(struct mipid_device *md)
 {
 	if (md->esd_check != NULL)
-		queue_delayed_work(md->esd_wq, &md->esd_work,
+		schedule_delayed_work(&md->esd_work,
 				   MIPID_ESD_CHECK_PERIOD);
 }
 
@@ -476,11 +475,6 @@
 	struct mipid_device *md = to_mipid_device(panel);
 
 	md->fbdev = fbdev;
-	md->esd_wq = create_singlethread_workqueue("mipid_esd");
-	if (md->esd_wq == NULL) {
-		dev_err(&md->spi->dev, "can't create ESD workqueue\n");
-		return -ENOMEM;
-	}
 	INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work);
 	mutex_init(&md->mutex);
 
@@ -500,7 +494,6 @@
 
 	if (md->enabled)
 		mipid_esd_stop_check(md);
-	destroy_workqueue(md->esd_wq);
 }
 
 static struct lcd_panel mipid_panel = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index b58012b..8b81069 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -75,8 +75,6 @@
 
 	bool intro_printed;
 
-	struct workqueue_struct *workqueue;
-
 	bool ulps_enabled;
 	unsigned ulps_timeout;
 	struct delayed_work ulps_work;
@@ -232,7 +230,7 @@
 static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
 {
 	if (ddata->ulps_timeout > 0)
-		queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
+		schedule_delayed_work(&ddata->ulps_work,
 				msecs_to_jiffies(ddata->ulps_timeout));
 }
 
@@ -1244,11 +1242,6 @@
 		dev_dbg(dev, "Using GPIO TE\n");
 	}
 
-	ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
-	if (ddata->workqueue == NULL) {
-		dev_err(dev, "can't create workqueue\n");
-		return -ENOMEM;
-	}
 	INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
 
 	dsicm_hw_reset(ddata);
@@ -1262,7 +1255,7 @@
 				dev, ddata, &dsicm_bl_ops, &props);
 		if (IS_ERR(bldev)) {
 			r = PTR_ERR(bldev);
-			goto err_bl;
+			goto err_reg;
 		}
 
 		ddata->bldev = bldev;
@@ -1285,8 +1278,6 @@
 err_sysfs_create:
 	if (bldev != NULL)
 		backlight_device_unregister(bldev);
-err_bl:
-	destroy_workqueue(ddata->workqueue);
 err_reg:
 	return r;
 }
@@ -1316,7 +1307,6 @@
 	omap_dss_put_device(ddata->in);
 
 	dsicm_cancel_ulps_work(ddata);
-	destroy_workqueue(ddata->workqueue);
 
 	/* reset, to be sure that the panel is in a valid state */
 	dsicm_hw_reset(ddata);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 3691bde..a864608 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -644,6 +644,7 @@
 {
 
 	int r;
+	long time_left;
 	DECLARE_COMPLETION_ONSTACK(completion);
 
 	r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
@@ -652,15 +653,15 @@
 	if (r)
 		return r;
 
-	timeout = wait_for_completion_interruptible_timeout(&completion,
+	time_left = wait_for_completion_interruptible_timeout(&completion,
 			timeout);
 
 	omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
 
-	if (timeout == 0)
+	if (time_left == 0)
 		return -ETIMEDOUT;
 
-	if (timeout == -ERESTARTSYS)
+	if (time_left == -ERESTARTSYS)
 		return -ERESTARTSYS;
 
 	return 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 9e4800a..30d49f3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1167,7 +1167,6 @@
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	struct regulator *vdds_dsi;
-	int r;
 
 	if (dsi->vdds_dsi_reg != NULL)
 		return 0;
@@ -1180,13 +1179,6 @@
 		return PTR_ERR(vdds_dsi);
 	}
 
-	r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
-	if (r) {
-		devm_regulator_put(vdds_dsi);
-		DSSERR("can't set the DSI regulator voltage\n");
-		return r;
-	}
-
 	dsi->vdds_dsi_reg = vdds_dsi;
 
 	return 0;
@@ -5348,7 +5340,7 @@
 
 	dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
 		resource_size(res));
-	if (!dsi->proto_base) {
+	if (!dsi->phy_base) {
 		DSSERR("can't ioremap DSI PHY\n");
 		return -ENOMEM;
 	}
@@ -5368,7 +5360,7 @@
 
 	dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
 		resource_size(res));
-	if (!dsi->proto_base) {
+	if (!dsi->pll_base) {
 		DSSERR("can't ioremap DSI PLL\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 926a6f2..156a254 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -100,7 +100,6 @@
 
 static int hdmi_init_regulator(void)
 {
-	int r;
 	struct regulator *reg;
 
 	if (hdmi.vdda_reg != NULL)
@@ -114,13 +113,6 @@
 		return PTR_ERR(reg);
 	}
 
-	r = regulator_set_voltage(reg, 1800000, 1800000);
-	if (r) {
-		devm_regulator_put(reg);
-		DSSWARN("can't set the regulator voltage\n");
-		return r;
-	}
-
 	hdmi.vdda_reg = reg;
 
 	return 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 0ee829a..4da36bc 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -119,7 +119,6 @@
 
 static int hdmi_init_regulator(void)
 {
-	int r;
 	struct regulator *reg;
 
 	if (hdmi.vdda_reg != NULL)
@@ -131,13 +130,6 @@
 		return PTR_ERR(reg);
 	}
 
-	r = regulator_set_voltage(reg, 1800000, 1800000);
-	if (r) {
-		devm_regulator_put(reg);
-		DSSWARN("can't set the regulator voltage\n");
-		return r;
-	}
-
 	hdmi.vdda_reg = reg;
 
 	return 0;
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index aa8d288..1a4070f 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -113,7 +113,7 @@
 /*
  * Default video mode. In case the modedb doesn't work.
  */
-static struct fb_var_screeninfo pm2fb_var = {
+static const struct fb_var_screeninfo pm2fb_var = {
 	/* "640x480, 8 bpp @ 60 Hz */
 	.xres =			640,
 	.yres =			480,
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca44..a2564ab 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@
 	if (!pages)
 		return -ENOMEM;
 
-	ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
-			0, pages);
+	ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+			FOLL_WRITE);
 
 	if (ret < nr_pages) {
 		nr_pages = ret;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 2c0487f..ef73f14 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2125,7 +2125,7 @@
 
 	timings = of_get_display_timings(disp);
 	if (!timings)
-		goto out;
+		return -EINVAL;
 
 	ret = -ENOMEM;
 	info->modes = kmalloc_array(timings->num_timings,
@@ -2186,6 +2186,7 @@
 	ret = of_property_read_u32(np, "bus-width", &bus_width);
 	if (ret) {
 		dev_err(dev, "no bus-width specified: %d\n", ret);
+		of_node_put(np);
 		return ret;
 	}
 
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 96aa46d..5d6179e 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -83,7 +83,7 @@
 /*
  * here we define the default struct fb_fix_screeninfo
  */
-static struct fb_fix_screeninfo s1d13xxxfb_fix = {
+static const struct fb_fix_screeninfo s1d13xxxfb_fix = {
 	.id		= S1D_FBID,
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.visual		= FB_VISUAL_PSEUDOCOLOR,
@@ -929,7 +929,7 @@
 		s1dfb->disp_save = kmalloc(info->fix.smem_len, GFP_KERNEL);
 
 	if (!s1dfb->disp_save) {
-		printk(KERN_ERR PFX "no memory to save screen");
+		printk(KERN_ERR PFX "no memory to save screen\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index 0dd86be..a67e456 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -767,7 +767,7 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 
 static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
 					unsigned long val, void *data)
diff --git a/drivers/video/fbdev/s3c2410fb.h b/drivers/video/fbdev/s3c2410fb.h
index 47a17bd..cdd11e2 100644
--- a/drivers/video/fbdev/s3c2410fb.h
+++ b/drivers/video/fbdev/s3c2410fb.h
@@ -32,7 +32,7 @@
 	unsigned long		clk_rate;
 	unsigned int		palette_ready;
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 	struct notifier_block	freq_transition;
 #endif
 
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 6c77ab0..c30a91c 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1660,7 +1660,7 @@
 
 /* --------------------------------------------------------------------- */
 
-static struct fb_var_screeninfo savagefb_var800x600x8 = {
+static const struct fb_var_screeninfo savagefb_var800x600x8 = {
 	.accel_flags =	FB_ACCELF_TEXT,
 	.xres =		800,
 	.yres =		600,
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index e9cf199..61f799a 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -33,14 +33,14 @@
 #include <linux/parser.h>
 #include <linux/regulator/consumer.h>
 
-static struct fb_fix_screeninfo simplefb_fix = {
+static const struct fb_fix_screeninfo simplefb_fix = {
 	.id		= "simple",
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.visual		= FB_VISUAL_TRUECOLOR,
 	.accel		= FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo simplefb_var = {
+static const struct fb_var_screeninfo simplefb_var = {
 	.height		= -1,
 	.width		= -1,
 	.activate	= FB_ACTIVATE_NOW,
@@ -74,8 +74,14 @@
 	return 0;
 }
 
+struct simplefb_par;
+static void simplefb_clocks_destroy(struct simplefb_par *par);
+static void simplefb_regulators_destroy(struct simplefb_par *par);
+
 static void simplefb_destroy(struct fb_info *info)
 {
+	simplefb_regulators_destroy(info->par);
+	simplefb_clocks_destroy(info->par);
 	if (info->screen_base)
 		iounmap(info->screen_base);
 }
@@ -487,11 +493,8 @@
 static int simplefb_remove(struct platform_device *pdev)
 {
 	struct fb_info *info = platform_get_drvdata(pdev);
-	struct simplefb_par *par = info->par;
 
 	unregister_framebuffer(info);
-	simplefb_regulators_destroy(par);
-	simplefb_clocks_destroy(par);
 	framebuffer_release(info);
 
 	return 0;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 86ae1d4..73cb4ff 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -56,7 +56,7 @@
 
 void __iomem *smtc_regbaseaddress;	/* Memory Map IO starting address */
 
-static struct fb_var_screeninfo smtcfb_var = {
+static const struct fb_var_screeninfo smtcfb_var = {
 	.xres           = 1024,
 	.yres           = 600,
 	.xres_virtual   = 1024,
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 9279e5f..ec2e7e3 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1761,10 +1761,8 @@
 static void ufx_usb_disconnect(struct usb_interface *interface)
 {
 	struct ufx_data *dev;
-	struct fb_info *info;
 
 	dev = usb_get_intfdata(interface);
-	info = dev->info;
 
 	pr_debug("USB disconnect starting\n");
 
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index a9c45c8..2925d5c 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -64,7 +64,7 @@
 	u32 contrast;
 	u32 dclk_div;
 	u32 dclk_frq;
-	struct ssd1307fb_deviceinfo *device_info;
+	const struct ssd1307fb_deviceinfo *device_info;
 	struct i2c_client *client;
 	u32 height;
 	struct fb_info *info;
@@ -84,7 +84,7 @@
 	u8	data[0];
 };
 
-static struct fb_fix_screeninfo ssd1307fb_fix = {
+static const struct fb_fix_screeninfo ssd1307fb_fix = {
 	.id		= "Solomon SSD1307",
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.visual		= FB_VISUAL_MONO10,
@@ -94,7 +94,7 @@
 	.accel		= FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo ssd1307fb_var = {
+static const struct fb_var_screeninfo ssd1307fb_var = {
 	.bits_per_pixel	= 1,
 };
 
@@ -559,8 +559,7 @@
 	par->info = info;
 	par->client = client;
 
-	par->device_info = (struct ssd1307fb_deviceinfo *)of_match_device(
-			ssd1307fb_of_match, &client->dev)->data;
+	par->device_info = of_device_get_match_data(&client->dev);
 
 	par->reset = of_get_named_gpio(client->dev.of_node,
 					 "reset-gpios", 0);
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 621fa44..d5fa313 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -82,7 +82,7 @@
 #define VOODOO3_MAX_PIXCLOCK 300000
 #define VOODOO5_MAX_PIXCLOCK 350000
 
-static struct fb_fix_screeninfo tdfx_fix = {
+static const struct fb_fix_screeninfo tdfx_fix = {
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_PSEUDOCOLOR,
 	.ypanstep =	1,
@@ -90,7 +90,7 @@
 	.accel =	FB_ACCEL_3DFX_BANSHEE
 };
 
-static struct fb_var_screeninfo tdfx_var = {
+static const struct fb_var_screeninfo tdfx_var = {
 	/* "640x480, 8 bpp @ 60 Hz */
 	.xres =		640,
 	.yres =		480,
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 178ae93..98af9e0 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -33,7 +33,7 @@
 static char v86d_path[PATH_MAX] = "/sbin/v86d";
 static char v86d_started;	/* has v86d been started by uvesafb? */
 
-static struct fb_fix_screeninfo uvesafb_fix = {
+static const struct fb_fix_screeninfo uvesafb_fix = {
 	.id	= "VESA VGA",
 	.type	= FB_TYPE_PACKED_PIXELS,
 	.accel	= FB_ACCEL_NONE,
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index b9c2f81..da653a0 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -35,76 +35,23 @@
 static void *videomemory;
 static u_long videomemorysize = VIDEOMEMSIZE;
 module_param(videomemorysize, ulong, 0);
+MODULE_PARM_DESC(videomemorysize, "RAM available to frame buffer (in bytes)");
 
-/**********************************************************************
- *
- * Memory management
- *
- **********************************************************************/
-static void *rvmalloc(unsigned long size)
-{
-	void *mem;
-	unsigned long adr;
+static char *mode_option = NULL;
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Preferred video mode (e.g. 640x480-8@60)");
 
-	size = PAGE_ALIGN(size);
-	mem = vmalloc_32(size);
-	if (!mem)
-		return NULL;
-
-	/*
-	 * VFB must clear memory to prevent kernel info
-	 * leakage into userspace
-	 * VGA-based drivers MUST NOT clear memory if
-	 * they want to be able to take over vgacon
-	 */
-
-	memset(mem, 0, size);
-	adr = (unsigned long) mem;
-	while (size > 0) {
-		SetPageReserved(vmalloc_to_page((void *)adr));
-		adr += PAGE_SIZE;
-		size -= PAGE_SIZE;
-	}
-
-	return mem;
-}
-
-static void rvfree(void *mem, unsigned long size)
-{
-	unsigned long adr;
-
-	if (!mem)
-		return;
-
-	adr = (unsigned long) mem;
-	while ((long) size > 0) {
-		ClearPageReserved(vmalloc_to_page((void *)adr));
-		adr += PAGE_SIZE;
-		size -= PAGE_SIZE;
-	}
-	vfree(mem);
-}
-
-static struct fb_var_screeninfo vfb_default = {
+static const struct fb_videomode vfb_default = {
 	.xres =		640,
 	.yres =		480,
-	.xres_virtual =	640,
-	.yres_virtual =	480,
-	.bits_per_pixel = 8,
-	.red =		{ 0, 8, 0 },
-      	.green =	{ 0, 8, 0 },
-      	.blue =		{ 0, 8, 0 },
-      	.activate =	FB_ACTIVATE_TEST,
-      	.height =	-1,
-      	.width =	-1,
-      	.pixclock =	20000,
-      	.left_margin =	64,
-      	.right_margin =	64,
-      	.upper_margin =	32,
-      	.lower_margin =	32,
-      	.hsync_len =	64,
-      	.vsync_len =	2,
-      	.vmode =	FB_VMODE_NONINTERLACED,
+	.pixclock =	20000,
+	.left_margin =	64,
+	.right_margin =	64,
+	.upper_margin =	32,
+	.lower_margin =	32,
+	.hsync_len =	64,
+	.vsync_len =	2,
+	.vmode =	FB_VMODE_NONINTERLACED,
 };
 
 static struct fb_fix_screeninfo vfb_fix = {
@@ -119,6 +66,7 @@
 
 static bool vfb_enable __initdata = 0;	/* disabled by default */
 module_param(vfb_enable, bool, 0);
+MODULE_PARM_DESC(vfb_enable, "Enable Virtual FB driver");
 
 static int vfb_check_var(struct fb_var_screeninfo *var,
 			 struct fb_info *info);
@@ -421,35 +369,7 @@
 static int vfb_mmap(struct fb_info *info,
 		    struct vm_area_struct *vma)
 {
-	unsigned long start = vma->vm_start;
-	unsigned long size = vma->vm_end - vma->vm_start;
-	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-	unsigned long page, pos;
-
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-		return -EINVAL;
-	if (size > info->fix.smem_len)
-		return -EINVAL;
-	if (offset > info->fix.smem_len - size)
-		return -EINVAL;
-
-	pos = (unsigned long)info->fix.smem_start + offset;
-
-	while (size > 0) {
-		page = vmalloc_to_pfn((void *)pos);
-		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
-			return -EAGAIN;
-		}
-		start += PAGE_SIZE;
-		pos += PAGE_SIZE;
-		if (size > PAGE_SIZE)
-			size -= PAGE_SIZE;
-		else
-			size = 0;
-	}
-
-	return 0;
-
+	return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
 }
 
 #ifndef MODULE
@@ -477,6 +397,8 @@
 		/* Test disable for backwards compatibility */
 		if (!strcmp(this_opt, "disable"))
 			vfb_enable = 0;
+		else
+			mode_option = this_opt;
 	}
 	return 1;
 }
@@ -489,12 +411,13 @@
 static int vfb_probe(struct platform_device *dev)
 {
 	struct fb_info *info;
+	unsigned int size = PAGE_ALIGN(videomemorysize);
 	int retval = -ENOMEM;
 
 	/*
 	 * For real video cards we use ioremap.
 	 */
-	if (!(videomemory = rvmalloc(videomemorysize)))
+	if (!(videomemory = vmalloc_32_user(size)))
 		return retval;
 
 	info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
@@ -504,11 +427,13 @@
 	info->screen_base = (char __iomem *)videomemory;
 	info->fbops = &vfb_ops;
 
-	retval = fb_find_mode(&info->var, info, NULL,
-			      NULL, 0, NULL, 8);
+	if (!fb_find_mode(&info->var, info, mode_option,
+			  NULL, 0, &vfb_default, 8)){
+		fb_err(info, "Unable to find usable video mode.\n");
+		retval = -EINVAL;
+		goto err1;
+	}
 
-	if (!retval || (retval == 4))
-		info->var = vfb_default;
 	vfb_fix.smem_start = (unsigned long) videomemory;
 	vfb_fix.smem_len = videomemorysize;
 	info->fix = vfb_fix;
@@ -533,7 +458,7 @@
 err1:
 	framebuffer_release(info);
 err:
-	rvfree(videomemory, videomemorysize);
+	vfree(videomemory);
 	return retval;
 }
 
@@ -543,7 +468,7 @@
 
 	if (info) {
 		unregister_framebuffer(info);
-		rvfree(videomemory, videomemorysize);
+		vfree(videomemory);
 		fb_dealloc_cmap(&info->cmap);
 		framebuffer_release(info);
 	}
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 283d335..5f0690c 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -85,7 +85,7 @@
 };
 
 /* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix = {
+static const struct fb_fix_screeninfo vga16fb_fix = {
 	.id		= "VGA16 VGA",
 	.smem_start	= VGA_FB_PHYS,
 	.smem_len	= VGA_FB_PHYS_LEN,
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3..150ce2a 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@
 	/* Get the physical addresses of the source buffer */
 	down_read(&current->mm->mmap_sem);
 	num_pinned = get_user_pages(param.local_vaddr - lb_offset,
-		num_pages, (param.source == -1) ? READ : WRITE,
-		0, pages, NULL);
+		num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+		pages, NULL);
 	up_read(&current->mm->mmap_sem);
 
 	if (num_pinned != num_pages) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 50dbaa8..fdd3228 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1844,4 +1844,53 @@
 
 	  Most people will say N.
 
+comment "Watchdog Pretimeout Governors"
+
+config WATCHDOG_PRETIMEOUT_GOV
+	bool "Enable watchdog pretimeout governors"
+	help
+	  The option allows to select watchdog pretimeout governors.
+
+if WATCHDOG_PRETIMEOUT_GOV
+
+choice
+	prompt "Default Watchdog Pretimeout Governor"
+	default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+	help
+	  This option selects a default watchdog pretimeout governor.
+	  The governor takes its action, if a watchdog is capable
+	  to report a pretimeout event.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
+	bool "noop"
+	select WATCHDOG_PRETIMEOUT_GOV_NOOP
+	help
+	  Use noop watchdog pretimeout governor by default. If noop
+	  governor is selected by a user, write a short message to
+	  the kernel log buffer and don't do any system changes.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+	bool "panic"
+	select WATCHDOG_PRETIMEOUT_GOV_PANIC
+	help
+	  Use panic watchdog pretimeout governor by default, if
+	  a watchdog pretimeout event happens, consider that
+	  a watchdog feeder is dead and reboot is unavoidable.
+
+endchoice
+
+config WATCHDOG_PRETIMEOUT_GOV_NOOP
+	tristate "Noop watchdog pretimeout governor"
+	help
+	  Noop watchdog pretimeout governor, only an informational
+	  message is added to kernel log buffer.
+
+config WATCHDOG_PRETIMEOUT_GOV_PANIC
+	tristate "Panic watchdog pretimeout governor"
+	help
+	  Panic watchdog pretimeout governor, on watchdog pretimeout
+	  event put the kernel into panic.
+
+endif # WATCHDOG_PRETIMEOUT_GOV
+
 endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index cba0043..caa9f4a 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -3,9 +3,15 @@
 #
 
 # The WatchDog Timer Driver Core.
-watchdog-objs	+= watchdog_core.o watchdog_dev.o
 obj-$(CONFIG_WATCHDOG_CORE)	+= watchdog.o
 
+watchdog-objs	+= watchdog_core.o watchdog_dev.o
+
+watchdog-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV)	+= watchdog_pretimeout.o
+
+obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP)	+= pretimeout_noop.o
+obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC)	+= pretimeout_panic.o
+
 # Only one watchdog can succeed. We probe the ISA/PCI/USB based
 # watchdog-cards first, then the architecture specific watchdog
 # drivers and then the architecture independent "softdog" driver.
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index c9686b2..d0b59ba 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -389,7 +389,6 @@
 static struct platform_driver asm9260_wdt_driver = {
 	.driver = {
 		.name = "asm9260-wdt",
-		.owner = THIS_MODULE,
 		.of_match_table	= asm9260_wdt_of_match,
 	},
 	.probe = asm9260_wdt_probe,
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 835d310..e2209bf 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -35,6 +35,7 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/uaccess.h>
 
 #define DRIVER_NAME	"ath79-wdt"
 
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 4245b65..e238df4 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -107,7 +107,7 @@
 				WDIOF_MAGICCLOSE
 };
 
-static struct watchdog_ops bcm7038_wdt_ops = {
+static const struct watchdog_ops bcm7038_wdt_ops = {
 	.owner		= THIS_MODULE,
 	.start		= bcm7038_wdt_start,
 	.stop		= bcm7038_wdt_stop,
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 4dda902..98acef7 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -269,7 +269,7 @@
 };
 
 /* Watchdog Core Ops */
-static struct watchdog_ops cdns_wdt_ops = {
+static const struct watchdog_ops cdns_wdt_ops = {
 	.owner = THIS_MODULE,
 	.start = cdns_wdt_start,
 	.stop = cdns_wdt_stop,
@@ -424,8 +424,10 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct cdns_wdt *wdt = platform_get_drvdata(pdev);
 
-	cdns_wdt_stop(&wdt->cdns_wdt_device);
-	clk_disable_unprepare(wdt->clk);
+	if (watchdog_active(&wdt->cdns_wdt_device)) {
+		cdns_wdt_stop(&wdt->cdns_wdt_device);
+		clk_disable_unprepare(wdt->clk);
+	}
 
 	return 0;
 }
@@ -442,12 +444,14 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct cdns_wdt *wdt = platform_get_drvdata(pdev);
 
-	ret = clk_prepare_enable(wdt->clk);
-	if (ret) {
-		dev_err(dev, "unable to enable clock\n");
-		return ret;
+	if (watchdog_active(&wdt->cdns_wdt_device)) {
+		ret = clk_prepare_enable(wdt->clk);
+		if (ret) {
+			dev_err(dev, "unable to enable clock\n");
+			return ret;
+		}
+		cdns_wdt_start(&wdt->cdns_wdt_device);
 	}
-	cdns_wdt_start(&wdt->cdns_wdt_device);
 
 	return 0;
 }
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 2acb51c..3c6a3de 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -54,6 +54,7 @@
 struct dw_wdt {
 	void __iomem		*regs;
 	struct clk		*clk;
+	unsigned long		rate;
 	struct notifier_block	restart_handler;
 	struct watchdog_device	wdd;
 };
@@ -72,7 +73,7 @@
 	 * There are 16 possible timeout values in 0..15 where the number of
 	 * cycles is 2 ^ (16 + i) and the watchdog counts down.
 	 */
-	return (1U << (16 + top)) / clk_get_rate(dw_wdt->clk);
+	return (1U << (16 + top)) / dw_wdt->rate;
 }
 
 static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
@@ -163,7 +164,7 @@
 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
 
 	return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
-		clk_get_rate(dw_wdt->clk);
+		dw_wdt->rate;
 }
 
 static const struct watchdog_info dw_wdt_ident = {
@@ -231,6 +232,12 @@
 	if (ret)
 		return ret;
 
+	dw_wdt->rate = clk_get_rate(dw_wdt->clk);
+	if (dw_wdt->rate == 0) {
+		ret = -EINVAL;
+		goto out_disable_clk;
+	}
+
 	wdd = &dw_wdt->wdd;
 	wdd->info = &dw_wdt_ident;
 	wdd->ops = &dw_wdt_ops;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8f89bd8..70c7194 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
 #include <asm/nmi.h>
 #include <asm/frame.h>
 
-#define HPWDT_VERSION			"1.3.3"
+#define HPWDT_VERSION			"1.4.0"
 #define SECS_TO_TICKS(secs)		((secs) * 1000 / 128)
 #define TICKS_TO_SECS(ticks)		((ticks) * 128 / 1000)
 #define HPWDT_MAX_TIMER			TICKS_TO_SECS(65535)
@@ -814,7 +814,8 @@
 	 * not run on a legacy ASM box.
 	 * So we only support the G5 ProLiant servers and higher.
 	 */
-	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP &&
+	    dev->subsystem_vendor != PCI_VENDOR_ID_HP_3PAR) {
 		dev_warn(&dev->dev,
 			"This server does not have an iLO2+ ASIC.\n");
 		return -ENODEV;
@@ -823,7 +824,8 @@
 	/*
 	 * Ignore all auxilary iLO devices with the following PCI ID
 	 */
-	if (dev->subsystem_device == 0x1979)
+	if (dev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+	    dev->subsystem_device == 0x1979)
 		return -ENODEV;
 
 	if (pci_enable_device(dev)) {
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 54cab18..06fcb6c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -629,7 +629,7 @@
 	return 0;
 }
 
-static struct dev_pm_ops iTCO_wdt_pm = {
+static const struct dev_pm_ops iTCO_wdt_pm = {
 	.suspend_noirq = iTCO_wdt_suspend_noirq,
 	.resume_noirq = iTCO_wdt_resume_noirq,
 };
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 62f346b..4874b0f 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -37,18 +38,23 @@
 
 #define IMX2_WDT_WCR		0x00		/* Control Register */
 #define IMX2_WDT_WCR_WT		(0xFF << 8)	/* -> Watchdog Timeout Field */
-#define IMX2_WDT_WCR_WDA	(1 << 5)	/* -> External Reset WDOG_B */
-#define IMX2_WDT_WCR_SRS	(1 << 4)	/* -> Software Reset Signal */
-#define IMX2_WDT_WCR_WRE	(1 << 3)	/* -> WDOG Reset Enable */
-#define IMX2_WDT_WCR_WDE	(1 << 2)	/* -> Watchdog Enable */
-#define IMX2_WDT_WCR_WDZST	(1 << 0)	/* -> Watchdog timer Suspend */
+#define IMX2_WDT_WCR_WDA	BIT(5)		/* -> External Reset WDOG_B */
+#define IMX2_WDT_WCR_SRS	BIT(4)		/* -> Software Reset Signal */
+#define IMX2_WDT_WCR_WRE	BIT(3)		/* -> WDOG Reset Enable */
+#define IMX2_WDT_WCR_WDE	BIT(2)		/* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST	BIT(0)		/* -> Watchdog timer Suspend */
 
 #define IMX2_WDT_WSR		0x02		/* Service Register */
 #define IMX2_WDT_SEQ1		0x5555		/* -> service sequence 1 */
 #define IMX2_WDT_SEQ2		0xAAAA		/* -> service sequence 2 */
 
 #define IMX2_WDT_WRSR		0x04		/* Reset Status Register */
-#define IMX2_WDT_WRSR_TOUT	(1 << 1)	/* -> Reset due to Timeout */
+#define IMX2_WDT_WRSR_TOUT	BIT(1)		/* -> Reset due to Timeout */
+
+#define IMX2_WDT_WICR		0x06		/* Interrupt Control Register */
+#define IMX2_WDT_WICR_WIE	BIT(15)		/* -> Interrupt Enable */
+#define IMX2_WDT_WICR_WTIS	BIT(14)		/* -> Interrupt Status */
+#define IMX2_WDT_WICR_WICT	0xFF		/* -> Interrupt Count Timeout */
 
 #define IMX2_WDT_WMCR		0x08		/* Misc Register */
 
@@ -80,6 +86,12 @@
 	.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
 };
 
+static const struct watchdog_info imx2_wdt_pretimeout_info = {
+	.identity = "imx2+ watchdog",
+	.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+		   WDIOF_PRETIMEOUT,
+};
+
 static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action,
 			    void *data)
 {
@@ -169,6 +181,35 @@
 	return 0;
 }
 
+static int imx2_wdt_set_pretimeout(struct watchdog_device *wdog,
+				   unsigned int new_pretimeout)
+{
+	struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+	if (new_pretimeout >= IMX2_WDT_MAX_TIME)
+		return -EINVAL;
+
+	wdog->pretimeout = new_pretimeout;
+
+	regmap_update_bits(wdev->regmap, IMX2_WDT_WICR,
+			   IMX2_WDT_WICR_WIE | IMX2_WDT_WICR_WICT,
+			   IMX2_WDT_WICR_WIE | (new_pretimeout << 1));
+	return 0;
+}
+
+static irqreturn_t imx2_wdt_isr(int irq, void *wdog_arg)
+{
+	struct watchdog_device *wdog = wdog_arg;
+	struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+	regmap_write_bits(wdev->regmap, IMX2_WDT_WICR,
+			  IMX2_WDT_WICR_WTIS, IMX2_WDT_WICR_WTIS);
+
+	watchdog_notify_pretimeout(wdog);
+
+	return IRQ_HANDLED;
+}
+
 static int imx2_wdt_start(struct watchdog_device *wdog)
 {
 	struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -188,6 +229,7 @@
 	.start = imx2_wdt_start,
 	.ping = imx2_wdt_ping,
 	.set_timeout = imx2_wdt_set_timeout,
+	.set_pretimeout = imx2_wdt_set_pretimeout,
 	.restart = imx2_wdt_restart,
 };
 
@@ -236,6 +278,12 @@
 	wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
 	wdog->parent		= &pdev->dev;
 
+	ret = platform_get_irq(pdev, 0);
+	if (ret > 0)
+		if (!devm_request_irq(&pdev->dev, ret, imx2_wdt_isr, 0,
+				      dev_name(&pdev->dev), wdog))
+			wdog->info = &imx2_wdt_pretimeout_info;
+
 	ret = clk_prepare_enable(wdev->clk);
 	if (ret)
 		return ret;
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5bf931c..8e302d0 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -430,7 +430,7 @@
 			WDIOF_PRETIMEOUT
 };
 
-static struct watchdog_ops kempld_wdt_ops = {
+static const struct watchdog_ops kempld_wdt_ops = {
 	.owner		= THIS_MODULE,
 	.start		= kempld_wdt_start,
 	.stop		= kempld_wdt_stop,
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 4a2290f..d5735c1 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -139,7 +139,6 @@
 	if (!IS_ERR(mt7621_wdt_reset))
 		reset_control_deassert(mt7621_wdt_reset);
 
-	mt7621_wdt_dev.dev = &pdev->dev;
 	mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
 
 	watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index b2e1b4c..fae7fe9 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -10,6 +10,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -45,6 +46,7 @@
 	u32 wdt_interval;
 	spinlock_t spinlock;
 	struct watchdog_device xilinx_wdt_wdd;
+	struct clk		*clk;
 };
 
 static int xilinx_wdt_start(struct watchdog_device *wdd)
@@ -195,16 +197,30 @@
 	spin_lock_init(&xdev->spinlock);
 	watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
 
+	xdev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(xdev->clk)) {
+		if (PTR_ERR(xdev->clk) == -ENOENT)
+			xdev->clk = NULL;
+		else
+			return PTR_ERR(xdev->clk);
+	}
+
+	rc = clk_prepare_enable(xdev->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to enable clock\n");
+		return rc;
+	}
+
 	rc = xwdt_selftest(xdev);
 	if (rc == XWT_TIMER_FAILED) {
 		dev_err(&pdev->dev, "SelfTest routine error\n");
-		return rc;
+		goto err_clk_disable;
 	}
 
 	rc = watchdog_register_device(xilinx_wdt_wdd);
 	if (rc) {
 		dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
-		return rc;
+		goto err_clk_disable;
 	}
 
 	dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
@@ -213,6 +229,10 @@
 	platform_set_drvdata(pdev, xdev);
 
 	return 0;
+err_clk_disable:
+	clk_disable_unprepare(xdev->clk);
+
+	return rc;
 }
 
 static int xwdt_remove(struct platform_device *pdev)
@@ -220,6 +240,7 @@
 	struct xwdt_device *xdev = platform_get_drvdata(pdev);
 
 	watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
+	clk_disable_unprepare(xdev->clk);
 
 	return 0;
 }
diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c
new file mode 100644
index 0000000..85f5299
--- /dev/null
+++ b/drivers/watchdog/pretimeout_noop.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/**
+ * pretimeout_noop - No operation on watchdog pretimeout event
+ * @wdd - watchdog_device
+ *
+ * This function prints a message about pretimeout to kernel log.
+ */
+static void pretimeout_noop(struct watchdog_device *wdd)
+{
+	pr_alert("watchdog%d: pretimeout event\n", wdd->id);
+}
+
+static struct watchdog_governor watchdog_gov_noop = {
+	.name		= "noop",
+	.pretimeout	= pretimeout_noop,
+};
+
+static int __init watchdog_gov_noop_register(void)
+{
+	return watchdog_register_governor(&watchdog_gov_noop);
+}
+
+static void __exit watchdog_gov_noop_unregister(void)
+{
+	watchdog_unregister_governor(&watchdog_gov_noop);
+}
+module_init(watchdog_gov_noop_register);
+module_exit(watchdog_gov_noop_unregister);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
+MODULE_DESCRIPTION("Panic watchdog pretimeout governor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c
new file mode 100644
index 0000000..0c197a1
--- /dev/null
+++ b/drivers/watchdog/pretimeout_panic.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/**
+ * pretimeout_panic - Panic on watchdog pretimeout event
+ * @wdd - watchdog_device
+ *
+ * Panic, watchdog has not been fed till pretimeout event.
+ */
+static void pretimeout_panic(struct watchdog_device *wdd)
+{
+	panic("watchdog pretimeout event\n");
+}
+
+static struct watchdog_governor watchdog_gov_panic = {
+	.name		= "panic",
+	.pretimeout	= pretimeout_panic,
+};
+
+static int __init watchdog_gov_panic_register(void)
+{
+	return watchdog_register_governor(&watchdog_gov_panic);
+}
+
+static void __exit watchdog_gov_panic_unregister(void)
+{
+	watchdog_unregister_governor(&watchdog_gov_panic);
+}
+module_init(watchdog_gov_panic_register);
+module_exit(watchdog_gov_panic_unregister);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
+MODULE_DESCRIPTION("Panic watchdog pretimeout governor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index d1c1227..0805ee2 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -136,7 +136,7 @@
 	.identity	= DRIVER_NAME,
 };
 
-static struct watchdog_ops rn5t618_wdt_ops = {
+static const struct watchdog_ops rn5t618_wdt_ops = {
 	.owner          = THIS_MODULE,
 	.start          = rn5t618_wdt_start,
 	.stop           = rn5t618_wdt_stop,
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 1967919..14b4fd4 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -158,7 +158,6 @@
 
 	rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
 
-	rt288x_wdt_dev.dev = &pdev->dev;
 	rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
 	rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
 	rt288x_wdt_dev.parent = &pdev->dev;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index b067edf..c7bdc98 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -72,10 +72,27 @@
 static struct timer_list softdog_ticktock =
 		TIMER_INITIALIZER(softdog_fire, 0, 0);
 
+static struct watchdog_device softdog_dev;
+
+static void softdog_pretimeout(unsigned long data)
+{
+	watchdog_notify_pretimeout(&softdog_dev);
+}
+
+static struct timer_list softdog_preticktock =
+		TIMER_INITIALIZER(softdog_pretimeout, 0, 0);
+
 static int softdog_ping(struct watchdog_device *w)
 {
 	if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ)))
 		__module_get(THIS_MODULE);
+
+	if (w->pretimeout)
+		mod_timer(&softdog_preticktock, jiffies +
+			  (w->timeout - w->pretimeout) * HZ);
+	else
+		del_timer(&softdog_preticktock);
+
 	return 0;
 }
 
@@ -84,15 +101,18 @@
 	if (del_timer(&softdog_ticktock))
 		module_put(THIS_MODULE);
 
+	del_timer(&softdog_preticktock);
+
 	return 0;
 }
 
 static struct watchdog_info softdog_info = {
 	.identity = "Software Watchdog",
-	.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+	.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+		   WDIOF_PRETIMEOUT,
 };
 
-static struct watchdog_ops softdog_ops = {
+static const struct watchdog_ops softdog_ops = {
 	.owner = THIS_MODULE,
 	.start = softdog_ping,
 	.stop = softdog_stop,
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14e9bad..e6100e4 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -52,27 +52,6 @@
 	bool warm_reset;
 };
 
-static struct st_wdog_syscfg stid127_syscfg = {
-	.reset_type_reg		= 0x004,
-	.reset_type_mask	= BIT(2),
-	.enable_reg		= 0x000,
-	.enable_mask		= BIT(2),
-};
-
-static struct st_wdog_syscfg stih415_syscfg = {
-	.reset_type_reg		= 0x0B8,
-	.reset_type_mask	= BIT(6),
-	.enable_reg		= 0x0B4,
-	.enable_mask		= BIT(7),
-};
-
-static struct st_wdog_syscfg stih416_syscfg = {
-	.reset_type_reg		= 0x88C,
-	.reset_type_mask	= BIT(6),
-	.enable_reg		= 0x888,
-	.enable_mask		= BIT(7),
-};
-
 static struct st_wdog_syscfg stih407_syscfg = {
 	.enable_reg		= 0x204,
 	.enable_mask		= BIT(19),
@@ -83,18 +62,6 @@
 		.compatible = "st,stih407-lpc",
 		.data = &stih407_syscfg,
 	},
-	{
-		.compatible = "st,stih416-lpc",
-		.data = &stih416_syscfg,
-	},
-	{
-		.compatible = "st,stih415-lpc",
-		.data = &stih415_syscfg,
-	},
-	{
-		.compatible = "st,stid127-lpc",
-		.data = &stid127_syscfg,
-	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_wdog_match);
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 9ec5760..2d53c3f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -178,7 +178,7 @@
 	.identity	= "Tegra Watchdog",
 };
 
-static struct watchdog_ops tegra_wdt_ops = {
+static const struct watchdog_ops tegra_wdt_ops = {
 	.owner = THIS_MODULE,
 	.start = tegra_wdt_start,
 	.stop = tegra_wdt_stop,
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index c2da880..6f7a9de 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -112,7 +112,7 @@
 		txx9_imclk = NULL;
 		goto exit;
 	}
-	ret = clk_enable(txx9_imclk);
+	ret = clk_prepare_enable(txx9_imclk);
 	if (ret) {
 		clk_put(txx9_imclk);
 		txx9_imclk = NULL;
@@ -144,7 +144,7 @@
 	return 0;
 exit:
 	if (txx9_imclk) {
-		clk_disable(txx9_imclk);
+		clk_disable_unprepare(txx9_imclk);
 		clk_put(txx9_imclk);
 	}
 	return ret;
@@ -153,7 +153,7 @@
 static int __exit txx9wdt_remove(struct platform_device *dev)
 {
 	watchdog_unregister_device(&txx9wdt);
-	clk_disable(txx9_imclk);
+	clk_disable_unprepare(txx9_imclk);
 	clk_put(txx9_imclk);
 	return 0;
 }
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 09e8003..ef2ecaf 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -302,7 +302,7 @@
 	.identity = "W83627HF Watchdog",
 };
 
-static struct watchdog_ops wdt_ops = {
+static const struct watchdog_ops wdt_ops = {
 	.owner = THIS_MODULE,
 	.start = wdt_start,
 	.stop = wdt_stop,
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 6abb83c..74265b2 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -349,7 +349,7 @@
 	struct watchdog_device **rcwdd;
 	int ret;
 
-	rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+	rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
 			     GFP_KERNEL);
 	if (!rcwdd)
 		return -ENOMEM;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 040bf83..32930a0 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -49,6 +49,7 @@
 #include <linux/uaccess.h>	/* For copy_to_user/put_user/... */
 
 #include "watchdog_core.h"
+#include "watchdog_pretimeout.h"
 
 /*
  * struct watchdog_core_data - watchdog core internal data
@@ -335,10 +336,14 @@
 	if (watchdog_timeout_invalid(wdd, timeout))
 		return -EINVAL;
 
-	if (wdd->ops->set_timeout)
+	if (wdd->ops->set_timeout) {
 		err = wdd->ops->set_timeout(wdd, timeout);
-	else
+	} else {
 		wdd->timeout = timeout;
+		/* Disable pretimeout if it doesn't fit the new timeout */
+		if (wdd->pretimeout >= wdd->timeout)
+			wdd->pretimeout = 0;
+	}
 
 	watchdog_update_worker(wdd);
 
@@ -346,6 +351,31 @@
 }
 
 /*
+ *	watchdog_set_pretimeout: set the watchdog timer pretimeout
+ *	@wdd: the watchdog device to set the timeout for
+ *	@timeout: pretimeout to set in seconds
+ */
+
+static int watchdog_set_pretimeout(struct watchdog_device *wdd,
+				   unsigned int timeout)
+{
+	int err = 0;
+
+	if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+		return -EOPNOTSUPP;
+
+	if (watchdog_pretimeout_invalid(wdd, timeout))
+		return -EINVAL;
+
+	if (wdd->ops->set_pretimeout)
+		err = wdd->ops->set_pretimeout(wdd, timeout);
+	else
+		wdd->pretimeout = timeout;
+
+	return err;
+}
+
+/*
  *	watchdog_get_timeleft: wrapper to get the time left before a reboot
  *	@wdd: the watchdog device to get the remaining time from
  *	@timeleft: the time that's left
@@ -429,6 +459,15 @@
 }
 static DEVICE_ATTR_RO(timeout);
 
+static ssize_t pretimeout_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", wdd->pretimeout);
+}
+static DEVICE_ATTR_RO(pretimeout);
+
 static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
 				char *buf)
 {
@@ -450,6 +489,36 @@
 }
 static DEVICE_ATTR_RO(state);
 
+static ssize_t pretimeout_available_governors_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return watchdog_pretimeout_available_governors_get(buf);
+}
+static DEVICE_ATTR_RO(pretimeout_available_governors);
+
+static ssize_t pretimeout_governor_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+	return watchdog_pretimeout_governor_get(wdd, buf);
+}
+
+static ssize_t pretimeout_governor_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct watchdog_device *wdd = dev_get_drvdata(dev);
+	int ret = watchdog_pretimeout_governor_set(wdd, buf);
+
+	if (!ret)
+		ret = count;
+
+	return ret;
+}
+static DEVICE_ATTR_RW(pretimeout_governor);
+
 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
 				int n)
 {
@@ -459,6 +528,14 @@
 
 	if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
 		mode = 0;
+	else if (attr == &dev_attr_pretimeout.attr &&
+		 !(wdd->info->options & WDIOF_PRETIMEOUT))
+		mode = 0;
+	else if ((attr == &dev_attr_pretimeout_governor.attr ||
+		  attr == &dev_attr_pretimeout_available_governors.attr) &&
+		 (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
+		  !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
+		mode = 0;
 
 	return mode;
 }
@@ -466,10 +543,13 @@
 	&dev_attr_state.attr,
 	&dev_attr_identity.attr,
 	&dev_attr_timeout.attr,
+	&dev_attr_pretimeout.attr,
 	&dev_attr_timeleft.attr,
 	&dev_attr_bootstatus.attr,
 	&dev_attr_status.attr,
 	&dev_attr_nowayout.attr,
+	&dev_attr_pretimeout_governor.attr,
+	&dev_attr_pretimeout_available_governors.attr,
 	NULL,
 };
 
@@ -646,6 +726,16 @@
 			break;
 		err = put_user(val, p);
 		break;
+	case WDIOC_SETPRETIMEOUT:
+		if (get_user(val, p)) {
+			err = -EFAULT;
+			break;
+		}
+		err = watchdog_set_pretimeout(wdd, val);
+		break;
+	case WDIOC_GETPRETIMEOUT:
+		err = put_user(wdd->pretimeout, p);
+		break;
 	default:
 		err = -ENOTTY;
 		break;
@@ -937,6 +1027,12 @@
 		return PTR_ERR(dev);
 	}
 
+	ret = watchdog_register_pretimeout(wdd);
+	if (ret) {
+		device_destroy(&watchdog_class, devno);
+		watchdog_cdev_unregister(wdd);
+	}
+
 	return ret;
 }
 
@@ -950,6 +1046,7 @@
 
 void watchdog_dev_unregister(struct watchdog_device *wdd)
 {
+	watchdog_unregister_pretimeout(wdd);
 	device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
 	watchdog_cdev_unregister(wdd);
 }
diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c
new file mode 100644
index 0000000..9db07bf
--- /dev/null
+++ b/drivers/watchdog/watchdog_pretimeout.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/* Default watchdog pretimeout governor */
+static struct watchdog_governor *default_gov;
+
+/* The spinlock protects default_gov, wdd->gov and pretimeout_list */
+static DEFINE_SPINLOCK(pretimeout_lock);
+
+/* List of watchdog devices, which can generate a pretimeout event */
+static LIST_HEAD(pretimeout_list);
+
+struct watchdog_pretimeout {
+	struct watchdog_device		*wdd;
+	struct list_head		entry;
+};
+
+/* The mutex protects governor list and serializes external interfaces */
+static DEFINE_MUTEX(governor_lock);
+
+/* List of the registered watchdog pretimeout governors */
+static LIST_HEAD(governor_list);
+
+struct governor_priv {
+	struct watchdog_governor	*gov;
+	struct list_head		entry;
+};
+
+static struct governor_priv *find_governor_by_name(const char *gov_name)
+{
+	struct governor_priv *priv;
+
+	list_for_each_entry(priv, &governor_list, entry)
+		if (sysfs_streq(gov_name, priv->gov->name))
+			return priv;
+
+	return NULL;
+}
+
+int watchdog_pretimeout_available_governors_get(char *buf)
+{
+	struct governor_priv *priv;
+	int count = 0;
+
+	mutex_lock(&governor_lock);
+
+	list_for_each_entry(priv, &governor_list, entry)
+		count += sprintf(buf + count, "%s\n", priv->gov->name);
+
+	mutex_unlock(&governor_lock);
+
+	return count;
+}
+
+int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf)
+{
+	int count = 0;
+
+	spin_lock_irq(&pretimeout_lock);
+	if (wdd->gov)
+		count = sprintf(buf, "%s\n", wdd->gov->name);
+	spin_unlock_irq(&pretimeout_lock);
+
+	return count;
+}
+
+int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+				     const char *buf)
+{
+	struct governor_priv *priv;
+
+	mutex_lock(&governor_lock);
+
+	priv = find_governor_by_name(buf);
+	if (!priv) {
+		mutex_unlock(&governor_lock);
+		return -EINVAL;
+	}
+
+	spin_lock_irq(&pretimeout_lock);
+	wdd->gov = priv->gov;
+	spin_unlock_irq(&pretimeout_lock);
+
+	mutex_unlock(&governor_lock);
+
+	return 0;
+}
+
+void watchdog_notify_pretimeout(struct watchdog_device *wdd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pretimeout_lock, flags);
+	if (!wdd->gov) {
+		spin_unlock_irqrestore(&pretimeout_lock, flags);
+		return;
+	}
+
+	wdd->gov->pretimeout(wdd);
+	spin_unlock_irqrestore(&pretimeout_lock, flags);
+}
+EXPORT_SYMBOL_GPL(watchdog_notify_pretimeout);
+
+int watchdog_register_governor(struct watchdog_governor *gov)
+{
+	struct watchdog_pretimeout *p;
+	struct governor_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_lock(&governor_lock);
+
+	if (find_governor_by_name(gov->name)) {
+		mutex_unlock(&governor_lock);
+		kfree(priv);
+		return -EBUSY;
+	}
+
+	priv->gov = gov;
+	list_add(&priv->entry, &governor_list);
+
+	if (!strncmp(gov->name, WATCHDOG_PRETIMEOUT_DEFAULT_GOV,
+		     WATCHDOG_GOV_NAME_MAXLEN)) {
+		spin_lock_irq(&pretimeout_lock);
+		default_gov = gov;
+
+		list_for_each_entry(p, &pretimeout_list, entry)
+			if (!p->wdd->gov)
+				p->wdd->gov = default_gov;
+		spin_unlock_irq(&pretimeout_lock);
+	}
+
+	mutex_unlock(&governor_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(watchdog_register_governor);
+
+void watchdog_unregister_governor(struct watchdog_governor *gov)
+{
+	struct watchdog_pretimeout *p;
+	struct governor_priv *priv, *t;
+
+	mutex_lock(&governor_lock);
+
+	list_for_each_entry_safe(priv, t, &governor_list, entry) {
+		if (priv->gov == gov) {
+			list_del(&priv->entry);
+			kfree(priv);
+			break;
+		}
+	}
+
+	spin_lock_irq(&pretimeout_lock);
+	list_for_each_entry(p, &pretimeout_list, entry)
+		if (p->wdd->gov == gov)
+			p->wdd->gov = default_gov;
+	spin_unlock_irq(&pretimeout_lock);
+
+	mutex_unlock(&governor_lock);
+}
+EXPORT_SYMBOL(watchdog_unregister_governor);
+
+int watchdog_register_pretimeout(struct watchdog_device *wdd)
+{
+	struct watchdog_pretimeout *p;
+
+	if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+		return 0;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	spin_lock_irq(&pretimeout_lock);
+	list_add(&p->entry, &pretimeout_list);
+	p->wdd = wdd;
+	wdd->gov = default_gov;
+	spin_unlock_irq(&pretimeout_lock);
+
+	return 0;
+}
+
+void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
+{
+	struct watchdog_pretimeout *p, *t;
+
+	if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+		return;
+
+	spin_lock_irq(&pretimeout_lock);
+	wdd->gov = NULL;
+
+	list_for_each_entry_safe(p, t, &pretimeout_list, entry) {
+		if (p->wdd == wdd) {
+			list_del(&p->entry);
+			break;
+		}
+	}
+	spin_unlock_irq(&pretimeout_lock);
+
+	kfree(p);
+}
diff --git a/drivers/watchdog/watchdog_pretimeout.h b/drivers/watchdog/watchdog_pretimeout.h
new file mode 100644
index 0000000..a5a32b3
--- /dev/null
+++ b/drivers/watchdog/watchdog_pretimeout.h
@@ -0,0 +1,60 @@
+#ifndef __WATCHDOG_PRETIMEOUT_H
+#define __WATCHDOG_PRETIMEOUT_H
+
+#define WATCHDOG_GOV_NAME_MAXLEN	20
+
+struct watchdog_device;
+
+struct watchdog_governor {
+	const char	name[WATCHDOG_GOV_NAME_MAXLEN];
+	void		(*pretimeout)(struct watchdog_device *wdd);
+};
+
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)
+/* Interfaces to watchdog pretimeout governors */
+int watchdog_register_governor(struct watchdog_governor *gov);
+void watchdog_unregister_governor(struct watchdog_governor *gov);
+
+/* Interfaces to watchdog_dev.c */
+int watchdog_register_pretimeout(struct watchdog_device *wdd);
+void watchdog_unregister_pretimeout(struct watchdog_device *wdd);
+int watchdog_pretimeout_available_governors_get(char *buf);
+int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf);
+int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+				     const char *buf);
+
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP)
+#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV		"noop"
+#elif IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC)
+#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV		"panic"
+#endif
+
+#else
+static inline int watchdog_register_pretimeout(struct watchdog_device *wdd)
+{
+	return 0;
+}
+
+static inline void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
+{
+}
+
+static inline int watchdog_pretimeout_available_governors_get(char *buf)
+{
+	return -EINVAL;
+}
+
+static inline int watchdog_pretimeout_governor_get(struct watchdog_device *wdd,
+						   char *buf)
+{
+	return -EINVAL;
+}
+
+static inline int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+						   const char *buf)
+{
+	return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index fa1efef..b4e0cea 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -18,7 +18,10 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/delay.h>
 #include <linux/i2c.h>
+#include <linux/ihex.h>
+#include <linux/firmware.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -36,6 +39,8 @@
 #define ZIIRAVE_STATE_OFF	0x1
 #define ZIIRAVE_STATE_ON	0x2
 
+#define ZIIRAVE_FW_NAME		"ziirave_wdt.fw"
+
 static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
 				  "host request", NULL, "illegal configuration",
 				  "illegal instruction", "illegal trap",
@@ -50,12 +55,35 @@
 #define ZIIRAVE_WDT_PING		0x9
 #define ZIIRAVE_WDT_RESET_DURATION	0xa
 
+#define ZIIRAVE_FIRM_PKT_TOTAL_SIZE	20
+#define ZIIRAVE_FIRM_PKT_DATA_SIZE	16
+#define ZIIRAVE_FIRM_FLASH_MEMORY_START	0x1600
+#define ZIIRAVE_FIRM_FLASH_MEMORY_END	0x2bbf
+
+/* Received and ready for next Download packet. */
+#define ZIIRAVE_FIRM_DOWNLOAD_ACK	1
+/* Currently writing to flash. Retry Download status in a moment! */
+#define ZIIRAVE_FIRM_DOWNLOAD_BUSY	2
+
+/* Wait for ACK timeout in ms */
+#define ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT	50
+
+/* Firmware commands */
+#define ZIIRAVE_CMD_DOWNLOAD_START		0x10
+#define ZIIRAVE_CMD_DOWNLOAD_END		0x11
+#define ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR	0x12
+#define ZIIRAVE_CMD_DOWNLOAD_READ_BYTE		0x13
+#define ZIIRAVE_CMD_RESET_PROCESSOR		0x0b
+#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER		0x0c
+#define ZIIRAVE_CMD_DOWNLOAD_PACKET		0x0e
+
 struct ziirave_wdt_rev {
 	unsigned char major;
 	unsigned char minor;
 };
 
 struct ziirave_wdt_data {
+	struct mutex sysfs_mutex;
 	struct watchdog_device wdd;
 	struct ziirave_wdt_rev bootloader_rev;
 	struct ziirave_wdt_rev firmware_rev;
@@ -146,6 +174,293 @@
 	return ret;
 }
 
+static int ziirave_firm_wait_for_ack(struct watchdog_device *wdd)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	int ret;
+	unsigned long timeout;
+
+	timeout = jiffies + msecs_to_jiffies(ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT);
+	do {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+
+		usleep_range(5000, 10000);
+
+		ret = i2c_smbus_read_byte(client);
+		if (ret < 0) {
+			dev_err(&client->dev, "Failed to read byte\n");
+			return ret;
+		}
+	} while (ret == ZIIRAVE_FIRM_DOWNLOAD_BUSY);
+
+	return ret == ZIIRAVE_FIRM_DOWNLOAD_ACK ? 0 : -EIO;
+}
+
+static int ziirave_firm_set_read_addr(struct watchdog_device *wdd, u16 addr)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	u8 address[2];
+
+	address[0] = addr & 0xff;
+	address[1] = (addr >> 8) & 0xff;
+
+	return i2c_smbus_write_block_data(client,
+					  ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR,
+					  ARRAY_SIZE(address), address);
+}
+
+static int ziirave_firm_write_block_data(struct watchdog_device *wdd,
+					 u8 command, u8 length, const u8 *data,
+					 bool wait_for_ack)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	int ret;
+
+	ret = i2c_smbus_write_block_data(client, command, length, data);
+	if (ret) {
+		dev_err(&client->dev,
+			"Failed to send command 0x%02x: %d\n", command, ret);
+		return ret;
+	}
+
+	if (wait_for_ack)
+		ret = ziirave_firm_wait_for_ack(wdd);
+
+	return ret;
+}
+
+static int ziirave_firm_write_byte(struct watchdog_device *wdd, u8 command,
+				   u8 byte, bool wait_for_ack)
+{
+	return ziirave_firm_write_block_data(wdd, command, 1, &byte,
+					     wait_for_ack);
+}
+
+/*
+ * ziirave_firm_write_pkt() - Build and write a firmware packet
+ *
+ * A packet to send to the firmware is composed by following bytes:
+ *     Length | Addr0 | Addr1 | Data0 .. Data15 | Checksum |
+ * Where,
+ *     Length: A data byte containing the length of the data.
+ *     Addr0: Low byte of the address.
+ *     Addr1: High byte of the address.
+ *     Data0 .. Data15: Array of 16 bytes of data.
+ *     Checksum: Checksum byte to verify data integrity.
+ */
+static int ziirave_firm_write_pkt(struct watchdog_device *wdd,
+				  const struct ihex_binrec *rec)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	u8 i, checksum = 0, packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE];
+	int ret;
+	u16 addr;
+
+	memset(packet, 0, ARRAY_SIZE(packet));
+
+	/* Packet length */
+	packet[0] = (u8)be16_to_cpu(rec->len);
+	/* Packet address */
+	addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
+	packet[1] = addr & 0xff;
+	packet[2] = (addr & 0xff00) >> 8;
+
+	/* Packet data */
+	if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE)
+		return -EMSGSIZE;
+	memcpy(packet + 3, rec->data, be16_to_cpu(rec->len));
+
+	/* Packet checksum */
+	for (i = 0; i < ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1; i++)
+		checksum += packet[i];
+	packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1] = checksum;
+
+	ret = ziirave_firm_write_block_data(wdd, ZIIRAVE_CMD_DOWNLOAD_PACKET,
+					    ARRAY_SIZE(packet), packet, true);
+	if (ret)
+		dev_err(&client->dev,
+		      "Failed to write firmware packet at address 0x%04x: %d\n",
+		      addr, ret);
+
+	return ret;
+}
+
+static int ziirave_firm_verify(struct watchdog_device *wdd,
+			       const struct firmware *fw)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	const struct ihex_binrec *rec;
+	int i, ret;
+	u8 data[ZIIRAVE_FIRM_PKT_DATA_SIZE];
+	u16 addr;
+
+	for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
+		/* Zero length marks end of records */
+		if (!be16_to_cpu(rec->len))
+			break;
+
+		addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
+		if (addr < ZIIRAVE_FIRM_FLASH_MEMORY_START ||
+		    addr > ZIIRAVE_FIRM_FLASH_MEMORY_END)
+			continue;
+
+		ret = ziirave_firm_set_read_addr(wdd, addr);
+		if (ret) {
+			dev_err(&client->dev,
+				"Failed to send SET_READ_ADDR command: %d\n",
+				ret);
+			return ret;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(data); i++) {
+			ret = i2c_smbus_read_byte_data(client,
+						ZIIRAVE_CMD_DOWNLOAD_READ_BYTE);
+			if (ret < 0) {
+				dev_err(&client->dev,
+					"Failed to READ DATA: %d\n", ret);
+				return ret;
+			}
+			data[i] = ret;
+		}
+
+		if (memcmp(data, rec->data, be16_to_cpu(rec->len))) {
+			dev_err(&client->dev,
+				"Firmware mismatch at address 0x%04x\n", addr);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ziirave_firm_upload(struct watchdog_device *wdd,
+			       const struct firmware *fw)
+{
+	struct i2c_client *client = to_i2c_client(wdd->parent);
+	int ret, words_till_page_break;
+	const struct ihex_binrec *rec;
+	struct ihex_binrec *rec_new;
+
+	ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_JUMP_TO_BOOTLOADER, 1,
+				      false);
+	if (ret)
+		return ret;
+
+	msleep(500);
+
+	ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_START, 1, true);
+	if (ret)
+		return ret;
+
+	msleep(500);
+
+	for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
+		/* Zero length marks end of records */
+		if (!be16_to_cpu(rec->len))
+			break;
+
+		/* Check max data size */
+		if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE) {
+			dev_err(&client->dev, "Firmware packet too long (%d)\n",
+				be16_to_cpu(rec->len));
+			return -EMSGSIZE;
+		}
+
+		/* Calculate words till page break */
+		words_till_page_break = (64 - ((be32_to_cpu(rec->addr) >> 1) &
+					 0x3f));
+		if ((be16_to_cpu(rec->len) >> 1) > words_till_page_break) {
+			/*
+			 * Data in passes page boundary, so we need to split in
+			 * two blocks of data. Create a packet with the first
+			 * block of data.
+			 */
+			rec_new = kzalloc(sizeof(struct ihex_binrec) +
+					  (words_till_page_break << 1),
+					  GFP_KERNEL);
+			if (!rec_new)
+				return -ENOMEM;
+
+			rec_new->len = cpu_to_be16(words_till_page_break << 1);
+			rec_new->addr = rec->addr;
+			memcpy(rec_new->data, rec->data,
+			       be16_to_cpu(rec_new->len));
+
+			ret = ziirave_firm_write_pkt(wdd, rec_new);
+			kfree(rec_new);
+			if (ret)
+				return ret;
+
+			/* Create a packet with the second block of data */
+			rec_new = kzalloc(sizeof(struct ihex_binrec) +
+					  be16_to_cpu(rec->len) -
+					  (words_till_page_break << 1),
+					  GFP_KERNEL);
+			if (!rec_new)
+				return -ENOMEM;
+
+			/* Remaining bytes */
+			rec_new->len = rec->len -
+				       cpu_to_be16(words_till_page_break << 1);
+
+			rec_new->addr = cpu_to_be32(be32_to_cpu(rec->addr) +
+					(words_till_page_break << 1));
+
+			memcpy(rec_new->data,
+			       rec->data + (words_till_page_break << 1),
+			       be16_to_cpu(rec_new->len));
+
+			ret = ziirave_firm_write_pkt(wdd, rec_new);
+			kfree(rec_new);
+			if (ret)
+				return ret;
+		} else {
+			ret = ziirave_firm_write_pkt(wdd, rec);
+			if (ret)
+				return ret;
+		}
+	}
+
+	/* For end of download, the length field will be set to 0 */
+	rec_new = kzalloc(sizeof(struct ihex_binrec) + 1, GFP_KERNEL);
+	if (!rec_new)
+		return -ENOMEM;
+
+	ret = ziirave_firm_write_pkt(wdd, rec_new);
+	kfree(rec_new);
+	if (ret) {
+		dev_err(&client->dev, "Failed to send EMPTY packet: %d\n", ret);
+		return ret;
+	}
+
+	/* This sleep seems to be required */
+	msleep(20);
+
+	/* Start firmware verification */
+	ret = ziirave_firm_verify(wdd, fw);
+	if (ret) {
+		dev_err(&client->dev,
+			"Failed to verify firmware: %d\n", ret);
+		return ret;
+	}
+
+	/* End download operation */
+	ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_END, 1, false);
+	if (ret)
+		return ret;
+
+	/* Reset the processor */
+	ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_RESET_PROCESSOR, 1,
+				      false);
+	if (ret)
+		return ret;
+
+	msleep(500);
+
+	return 0;
+}
+
 static const struct watchdog_info ziirave_wdt_info = {
 	.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
 	.identity = "Zodiac RAVE Watchdog",
@@ -166,9 +481,18 @@
 {
 	struct i2c_client *client = to_i2c_client(dev->parent);
 	struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+	int ret;
 
-	return sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major,
-		       w_priv->firmware_rev.minor);
+	ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+	if (ret)
+		return ret;
+
+	ret = sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major,
+		      w_priv->firmware_rev.minor);
+
+	mutex_unlock(&w_priv->sysfs_mutex);
+
+	return ret;
 }
 
 static DEVICE_ATTR(firmware_version, S_IRUGO, ziirave_wdt_sysfs_show_firm,
@@ -180,9 +504,18 @@
 {
 	struct i2c_client *client = to_i2c_client(dev->parent);
 	struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+	int ret;
 
-	return sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major,
-		       w_priv->bootloader_rev.minor);
+	ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+	if (ret)
+		return ret;
+
+	ret = sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major,
+		      w_priv->bootloader_rev.minor);
+
+	mutex_unlock(&w_priv->sysfs_mutex);
+
+	return ret;
 }
 
 static DEVICE_ATTR(bootloader_version, S_IRUGO, ziirave_wdt_sysfs_show_boot,
@@ -194,17 +527,81 @@
 {
 	struct i2c_client *client = to_i2c_client(dev->parent);
 	struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+	int ret;
 
-	return sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]);
+	ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+	if (ret)
+		return ret;
+
+	ret = sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]);
+
+	mutex_unlock(&w_priv->sysfs_mutex);
+
+	return ret;
 }
 
 static DEVICE_ATTR(reset_reason, S_IRUGO, ziirave_wdt_sysfs_show_reason,
 		   NULL);
 
+static ssize_t ziirave_wdt_sysfs_store_firm(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev->parent);
+	struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+	const struct firmware *fw;
+	int err;
+
+	err = request_ihex_firmware(&fw, ZIIRAVE_FW_NAME, dev);
+	if (err) {
+		dev_err(&client->dev, "Failed to request ihex firmware\n");
+		return err;
+	}
+
+	err = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+	if (err)
+		goto release_firmware;
+
+	err = ziirave_firm_upload(&w_priv->wdd, fw);
+	if (err) {
+		dev_err(&client->dev, "The firmware update failed: %d\n", err);
+		goto unlock_mutex;
+	}
+
+	/* Update firmware version */
+	err = ziirave_wdt_revision(client, &w_priv->firmware_rev,
+				   ZIIRAVE_WDT_FIRM_VER_MAJOR);
+	if (err) {
+		dev_err(&client->dev, "Failed to read firmware version: %d\n",
+			err);
+		goto unlock_mutex;
+	}
+
+	dev_info(&client->dev, "Firmware updated to version 02.%02u.%02u\n",
+		 w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
+
+	/* Restore the watchdog timeout */
+	err = ziirave_wdt_set_timeout(&w_priv->wdd, w_priv->wdd.timeout);
+	if (err)
+		dev_err(&client->dev, "Failed to set timeout: %d\n", err);
+
+unlock_mutex:
+	mutex_unlock(&w_priv->sysfs_mutex);
+
+release_firmware:
+	release_firmware(fw);
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(update_firmware, S_IWUSR, NULL,
+		   ziirave_wdt_sysfs_store_firm);
+
 static struct attribute *ziirave_wdt_attrs[] = {
 	&dev_attr_firmware_version.attr,
 	&dev_attr_bootloader_version.attr,
 	&dev_attr_reset_reason.attr,
+	&dev_attr_update_firmware.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(ziirave_wdt);
@@ -252,6 +649,8 @@
 	if (!w_priv)
 		return -ENOMEM;
 
+	mutex_init(&w_priv->sysfs_mutex);
+
 	w_priv->wdd.info = &ziirave_wdt_info;
 	w_priv->wdd.ops = &ziirave_wdt_ops;
 	w_priv->wdd.min_timeout = ZIIRAVE_TIMEOUT_MIN;
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 5b6a174..b3c2cc7 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -276,32 +276,26 @@
 	switch (handler->flags) {
 	case ACL_TYPE_ACCESS:
 		if (acl) {
-			umode_t mode = inode->i_mode;
-			retval = posix_acl_equiv_mode(acl, &mode);
-			if (retval < 0)
+			struct iattr iattr;
+
+			retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
+			if (retval)
 				goto err_out;
-			else {
-				struct iattr iattr;
-				if (retval == 0) {
-					/*
-					 * ACL can be represented
-					 * by the mode bits. So don't
-					 * update ACL.
-					 */
-					acl = NULL;
-					value = NULL;
-					size = 0;
-				}
-				/* Updte the mode bits */
-				iattr.ia_mode = ((mode & S_IALLUGO) |
-						 (inode->i_mode & ~S_IALLUGO));
-				iattr.ia_valid = ATTR_MODE;
-				/* FIXME should we update ctime ?
-				 * What is the following setxattr update the
-				 * mode ?
+			if (!acl) {
+				/*
+				 * ACL can be represented
+				 * by the mode bits. So don't
+				 * update ACL.
 				 */
-				v9fs_vfs_setattr_dotl(dentry, &iattr);
+				value = NULL;
+				size = 0;
 			}
+			iattr.ia_valid = ATTR_MODE;
+			/* FIXME should we update ctime ?
+			 * What is the following setxattr update the
+			 * mode ?
+			 */
+			v9fs_vfs_setattr_dotl(dentry, &iattr);
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 6877050..443d12e 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -148,7 +148,8 @@
 extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
 extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
 extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry);
+			   struct inode *new_dir, struct dentry *new_dentry,
+			   unsigned int flags);
 extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
 					 struct p9_fid *fid,
 					 struct super_block *sb, int new);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8b1999b..30ca770 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -276,7 +276,7 @@
 	inode_init_owner(inode, NULL, mode);
 	inode->i_blocks = 0;
 	inode->i_rdev = rdev;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_mapping->a_ops = &v9fs_addr_operations;
 
 	switch (mode & S_IFMT) {
@@ -955,7 +955,8 @@
 
 int
 v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+		struct inode *new_dir, struct dentry *new_dentry,
+		unsigned int flags)
 {
 	int retval;
 	struct inode *old_inode;
@@ -966,6 +967,9 @@
 	struct p9_fid *newdirfid;
 	struct p9_wstat wstat;
 
+	if (flags)
+		return -EINVAL;
+
 	p9_debug(P9_DEBUG_VFS, "\n");
 	retval = 0;
 	old_inode = d_inode(old_dentry);
@@ -1094,7 +1098,7 @@
 	struct p9_wstat wstat;
 
 	p9_debug(P9_DEBUG_VFS, "\n");
-	retval = inode_change_ok(d_inode(dentry), iattr);
+	retval = setattr_prepare(dentry, iattr);
 	if (retval)
 		return retval;
 
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index eeabcb0..afaa4b6 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -558,7 +558,7 @@
 
 	p9_debug(P9_DEBUG_VFS, "\n");
 
-	retval = inode_change_ok(inode, iattr);
+	retval = setattr_prepare(dentry, iattr);
 	if (retval)
 		return retval;
 
@@ -967,9 +967,6 @@
 	.rename = v9fs_vfs_rename,
 	.getattr = v9fs_vfs_getattr_dotl,
 	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
 	.listxattr = v9fs_listxattr,
 	.get_acl = v9fs_iop_get_acl,
 };
@@ -977,9 +974,6 @@
 const struct inode_operations v9fs_file_inode_operations_dotl = {
 	.getattr = v9fs_vfs_getattr_dotl,
 	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
 	.listxattr = v9fs_listxattr,
 	.get_acl = v9fs_iop_get_acl,
 };
@@ -989,8 +983,5 @@
 	.get_link = v9fs_vfs_get_link_dotl,
 	.getattr = v9fs_vfs_getattr_dotl,
 	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
 	.listxattr = v9fs_listxattr,
 };
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 335055d..8dbd36f 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -199,7 +199,7 @@
 	return;
 
  cur_time:
-	*tv = CURRENT_TIME;
+	*tv = current_time(inode);
 	return;
 
  too_early:
@@ -303,7 +303,7 @@
 	unsigned int ia_valid = attr->ia_valid;
 	int error;
 	
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 
 	/*
 	 * we can't change the UID or GID of any file -
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index cc2b2ef..2f08877 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -173,7 +173,8 @@
 extern int	affs_symlink(struct inode *dir, struct dentry *dentry,
 			     const char *symname);
 extern int	affs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			    struct inode *new_dir, struct dentry *new_dentry);
+			    struct inode *new_dir, struct dentry *new_dentry,
+			    unsigned int flags);
 
 /* inode.c */
 
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index d8f217c..0ec65c1 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -58,7 +58,7 @@
 	mark_buffer_dirty_inode(dir_bh, dir);
 	affs_brelse(dir_bh);
 
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	dir->i_version++;
 	mark_inode_dirty(dir);
 
@@ -112,7 +112,7 @@
 
 	affs_brelse(bh);
 
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	dir->i_version++;
 	mark_inode_dirty(dir);
 
@@ -313,7 +313,7 @@
 	else
 		clear_nlink(inode);
 	affs_unlock_link(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 
 done:
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 0fdb0f5..fe4e129 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -219,7 +219,7 @@
 
 	pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid);
 
-	error = inode_change_ok(inode,attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		goto out;
 
@@ -309,7 +309,7 @@
 	inode->i_gid     = current_fsgid();
 	inode->i_ino     = block;
 	set_nlink(inode, 1);
-	inode->i_mtime   = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime   = inode->i_atime = inode->i_ctime = current_time(inode);
 	atomic_set(&AFFS_I(inode)->i_opencnt, 0);
 	AFFS_I(inode)->i_blkcnt = 0;
 	AFFS_I(inode)->i_lc = NULL;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index a2d68f8..29186d2 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -414,12 +414,16 @@
 
 int
 affs_rename(struct inode *old_dir, struct dentry *old_dentry,
-	    struct inode *new_dir, struct dentry *new_dentry)
+	    struct inode *new_dir, struct dentry *new_dentry,
+	    unsigned int flags)
 {
 	struct super_block *sb = old_dir->i_sb;
 	struct buffer_head *bh = NULL;
 	int retval;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__,
 		 old_dir->i_ino, old_dentry, new_dir->i_ino, new_dentry);
 
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index eba5410..51a241e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -38,7 +38,8 @@
 static int afs_symlink(struct inode *dir, struct dentry *dentry,
 		       const char *content);
 static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry);
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags);
 
 const struct file_operations afs_dir_file_operations = {
 	.open		= afs_dir_open,
@@ -1083,12 +1084,16 @@
  * rename a file in an AFS filesystem and/or move it between directories
  */
 static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
 	struct key *key;
 	int ret;
 
+	if (flags)
+		return -EINVAL;
+
 	vnode = AFS_FS_I(d_inode(old_dentry));
 	orig_dvnode = AFS_FS_I(old_dir);
 	new_dvnode = AFS_FS_I(new_dir);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 59bdaa7..477928b 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -418,7 +418,7 @@
 						     &call->abort_code);
 			if (ret == -EINPROGRESS || ret == -EAGAIN)
 				return;
-			if (ret == 1) {
+			if (ret == 1 || ret < 0) {
 				call->state = AFS_CALL_COMPLETE;
 				goto done;
 			}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 14d506e..f865c3f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -398,8 +398,7 @@
 		switch (ret) {
 		case -EDQUOT:
 		case -ENOSPC:
-			set_bit(AS_ENOSPC,
-				&wb->vnode->vfs_inode.i_mapping->flags);
+			mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
 			break;
 		case -EROFS:
 		case -EIO:
@@ -409,7 +408,7 @@
 		case -ENOMEDIUM:
 		case -ENXIO:
 			afs_kill_pages(wb->vnode, true, first, last);
-			set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
+			mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
 			break;
 		case -EACCES:
 		case -EPERM:
diff --git a/fs/aio.c b/fs/aio.c
index 4fe81d1..1157e13 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -274,14 +274,17 @@
 static void put_aio_ring_file(struct kioctx *ctx)
 {
 	struct file *aio_ring_file = ctx->aio_ring_file;
+	struct address_space *i_mapping;
+
 	if (aio_ring_file) {
 		truncate_setsize(aio_ring_file->f_inode, 0);
 
 		/* Prevent further access to the kioctx from migratepages */
-		spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
-		aio_ring_file->f_inode->i_mapping->private_data = NULL;
+		i_mapping = aio_ring_file->f_inode->i_mapping;
+		spin_lock(&i_mapping->private_lock);
+		i_mapping->private_data = NULL;
 		ctx->aio_ring_file = NULL;
-		spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
+		spin_unlock(&i_mapping->private_lock);
 
 		fput(aio_ring_file);
 	}
diff --git a/fs/attr.c b/fs/attr.c
index 42bb42b..c902b3d 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -17,19 +17,22 @@
 #include <linux/ima.h>
 
 /**
- * inode_change_ok - check if attribute changes to an inode are allowed
- * @inode:	inode to check
+ * setattr_prepare - check if attribute changes to a dentry are allowed
+ * @dentry:	dentry to check
  * @attr:	attributes to change
  *
  * Check if we are allowed to change the attributes contained in @attr
- * in the given inode.  This includes the normal unix access permission
- * checks, as well as checks for rlimits and others.
+ * in the given dentry.  This includes the normal unix access permission
+ * checks, as well as checks for rlimits and others. The function also clears
+ * SGID bit from mode if user is not allowed to set it. Also file capabilities
+ * and IMA extended attributes are cleared if ATTR_KILL_PRIV is set.
  *
  * Should be called as the first thing in ->setattr implementations,
  * possibly after taking additional locks.
  */
-int inode_change_ok(const struct inode *inode, struct iattr *attr)
+int setattr_prepare(struct dentry *dentry, struct iattr *attr)
 {
+	struct inode *inode = d_inode(dentry);
 	unsigned int ia_valid = attr->ia_valid;
 
 	/*
@@ -44,7 +47,7 @@
 
 	/* If force is set do it anyway. */
 	if (ia_valid & ATTR_FORCE)
-		return 0;
+		goto kill_priv;
 
 	/* Make sure a caller can chown. */
 	if ((ia_valid & ATTR_UID) &&
@@ -77,9 +80,19 @@
 			return -EPERM;
 	}
 
+kill_priv:
+	/* User has permission for the change */
+	if (ia_valid & ATTR_KILL_PRIV) {
+		int error;
+
+		error = security_inode_killpriv(dentry);
+		if (error)
+			return error;
+	}
+
 	return 0;
 }
-EXPORT_SYMBOL(inode_change_ok);
+EXPORT_SYMBOL(setattr_prepare);
 
 /**
  * inode_newsize_ok - may this inode be truncated to a given size
@@ -202,6 +215,21 @@
 			return -EPERM;
 	}
 
+	/*
+	 * If utimes(2) and friends are called with times == NULL (or both
+	 * times are UTIME_NOW), then we need to check for write permission
+	 */
+	if (ia_valid & ATTR_TOUCH) {
+		if (IS_IMMUTABLE(inode))
+			return -EPERM;
+
+		if (!inode_owner_or_capable(inode)) {
+			error = inode_permission(inode, MAY_WRITE);
+			if (error)
+				return error;
+		}
+	}
+
 	if ((ia_valid & ATTR_MODE)) {
 		umode_t amode = attr->ia_mode;
 		/* Flag setting protected by i_mutex */
@@ -209,7 +237,7 @@
 			inode->i_flags &= ~S_NOSEC;
 	}
 
-	now = current_fs_time(inode->i_sb);
+	now = current_time(inode);
 
 	attr->ia_ctime = now;
 	if (!(ia_valid & ATTR_ATIME_SET))
@@ -217,13 +245,11 @@
 	if (!(ia_valid & ATTR_MTIME_SET))
 		attr->ia_mtime = now;
 	if (ia_valid & ATTR_KILL_PRIV) {
-		attr->ia_valid &= ~ATTR_KILL_PRIV;
-		ia_valid &= ~ATTR_KILL_PRIV;
 		error = security_inode_need_killpriv(dentry);
-		if (error > 0)
-			error = security_inode_killpriv(dentry);
-		if (error)
+		if (error < 0)
 			return error;
+		if (error == 0)
+			ia_valid = attr->ia_valid &= ~ATTR_KILL_PRIV;
 	}
 
 	/*
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index a439548..a1fba42 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -20,7 +20,8 @@
 #define AUTOFS_IOC_COUNT     32
 
 #define AUTOFS_DEV_IOCTL_IOC_FIRST	(AUTOFS_DEV_IOCTL_VERSION)
-#define AUTOFS_DEV_IOCTL_IOC_COUNT	(AUTOFS_IOC_COUNT - 11)
+#define AUTOFS_DEV_IOCTL_IOC_COUNT \
+	(AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD - AUTOFS_DEV_IOCTL_VERSION_CMD)
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -33,8 +34,6 @@
 #include <asm/current.h>
 #include <linux/uaccess.h>
 
-/* #define DEBUG */
-
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
@@ -111,8 +110,6 @@
 	int max_proto;
 	unsigned long exp_timeout;
 	unsigned int type;
-	int reghost_enabled;
-	int needs_reghost;
 	struct super_block *sb;
 	struct mutex wq_mutex;
 	struct mutex pipe_mutex;
@@ -271,4 +268,4 @@
 	}
 }
 
-extern void autofs4_kill_sb(struct super_block *);
+void autofs4_kill_sb(struct super_block *);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index c7fcc74..fc09eb7 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -75,7 +75,7 @@
 	if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) ||
 	    (param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) {
 		pr_warn("ioctl control interface version mismatch: "
-			"kernel(%u.%u), user(%u.%u), cmd(%d)\n",
+			"kernel(%u.%u), user(%u.%u), cmd(0x%08x)\n",
 			AUTOFS_DEV_IOCTL_VERSION_MAJOR,
 			AUTOFS_DEV_IOCTL_VERSION_MINOR,
 			param->ver_major, param->ver_minor, cmd);
@@ -172,6 +172,17 @@
 	return sbi;
 }
 
+/* Return autofs dev ioctl version */
+static int autofs_dev_ioctl_version(struct file *fp,
+				    struct autofs_sb_info *sbi,
+				    struct autofs_dev_ioctl *param)
+{
+	/* This should have already been set. */
+	param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+	param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+	return 0;
+}
+
 /* Return autofs module protocol version */
 static int autofs_dev_ioctl_protover(struct file *fp,
 				     struct autofs_sb_info *sbi,
@@ -586,41 +597,25 @@
 
 static ioctl_fn lookup_dev_ioctl(unsigned int cmd)
 {
-	static struct {
-		int cmd;
-		ioctl_fn fn;
-	} _ioctls[] = {
-		{cmd_idx(AUTOFS_DEV_IOCTL_VERSION_CMD), NULL},
-		{cmd_idx(AUTOFS_DEV_IOCTL_PROTOVER_CMD),
-			 autofs_dev_ioctl_protover},
-		{cmd_idx(AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD),
-			 autofs_dev_ioctl_protosubver},
-		{cmd_idx(AUTOFS_DEV_IOCTL_OPENMOUNT_CMD),
-			 autofs_dev_ioctl_openmount},
-		{cmd_idx(AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD),
-			 autofs_dev_ioctl_closemount},
-		{cmd_idx(AUTOFS_DEV_IOCTL_READY_CMD),
-			 autofs_dev_ioctl_ready},
-		{cmd_idx(AUTOFS_DEV_IOCTL_FAIL_CMD),
-			 autofs_dev_ioctl_fail},
-		{cmd_idx(AUTOFS_DEV_IOCTL_SETPIPEFD_CMD),
-			 autofs_dev_ioctl_setpipefd},
-		{cmd_idx(AUTOFS_DEV_IOCTL_CATATONIC_CMD),
-			 autofs_dev_ioctl_catatonic},
-		{cmd_idx(AUTOFS_DEV_IOCTL_TIMEOUT_CMD),
-			 autofs_dev_ioctl_timeout},
-		{cmd_idx(AUTOFS_DEV_IOCTL_REQUESTER_CMD),
-			 autofs_dev_ioctl_requester},
-		{cmd_idx(AUTOFS_DEV_IOCTL_EXPIRE_CMD),
-			 autofs_dev_ioctl_expire},
-		{cmd_idx(AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD),
-			 autofs_dev_ioctl_askumount},
-		{cmd_idx(AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD),
-			 autofs_dev_ioctl_ismountpoint}
+	static ioctl_fn _ioctls[] = {
+		autofs_dev_ioctl_version,
+		autofs_dev_ioctl_protover,
+		autofs_dev_ioctl_protosubver,
+		autofs_dev_ioctl_openmount,
+		autofs_dev_ioctl_closemount,
+		autofs_dev_ioctl_ready,
+		autofs_dev_ioctl_fail,
+		autofs_dev_ioctl_setpipefd,
+		autofs_dev_ioctl_catatonic,
+		autofs_dev_ioctl_timeout,
+		autofs_dev_ioctl_requester,
+		autofs_dev_ioctl_expire,
+		autofs_dev_ioctl_askumount,
+		autofs_dev_ioctl_ismountpoint,
 	};
 	unsigned int idx = cmd_idx(cmd);
 
-	return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx].fn;
+	return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx];
 }
 
 /* ioctl dispatcher */
@@ -642,7 +637,7 @@
 	cmd = _IOC_NR(command);
 
 	if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) ||
-	    cmd - cmd_first >= AUTOFS_DEV_IOCTL_IOC_COUNT) {
+	    cmd - cmd_first > AUTOFS_DEV_IOCTL_IOC_COUNT) {
 		return -ENOTTY;
 	}
 
@@ -655,14 +650,11 @@
 	if (err)
 		goto out;
 
-	/* The validate routine above always sets the version */
-	if (cmd == AUTOFS_DEV_IOCTL_VERSION_CMD)
-		goto done;
-
 	fn = lookup_dev_ioctl(cmd);
 	if (!fn) {
 		pr_warn("unknown command 0x%08x\n", command);
-		return -ENOTTY;
+		err = -ENOTTY;
+		goto out;
 	}
 
 	fp = NULL;
@@ -671,9 +663,11 @@
 	/*
 	 * For obvious reasons the openmount can't have a file
 	 * descriptor yet. We don't take a reference to the
-	 * file during close to allow for immediate release.
+	 * file during close to allow for immediate release,
+	 * and the same for retrieving ioctl version.
 	 */
-	if (cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
+	if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD &&
+	    cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
 	    cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) {
 		fp = fget(param->ioctlfd);
 		if (!fp) {
@@ -706,7 +700,6 @@
 
 	if (fp)
 		fput(fp);
-done:
 	if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE))
 		err = -EFAULT;
 out:
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 61b2105..438b5bf 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -274,6 +274,23 @@
 		goto fail_dput;
 	}
 
+	/* Test versions first */
+	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
+	    sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
+		pr_err("kernel does not match daemon version "
+		       "daemon (%d, %d) kernel (%d, %d)\n",
+		       sbi->min_proto, sbi->max_proto,
+		       AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
+		goto fail_dput;
+	}
+
+	/* Establish highest kernel protocol version */
+	if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
+		sbi->version = AUTOFS_MAX_PROTO_VERSION;
+	else
+		sbi->version = sbi->max_proto;
+	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+
 	if (pgrp_set) {
 		sbi->oz_pgrp = find_get_pid(pgrp);
 		if (!sbi->oz_pgrp) {
@@ -291,29 +308,12 @@
 	root_inode->i_fop = &autofs4_root_operations;
 	root_inode->i_op = &autofs4_dir_inode_operations;
 
-	/* Couldn't this be tested earlier? */
-	if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
-	    sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
-		pr_err("kernel does not match daemon version "
-		       "daemon (%d, %d) kernel (%d, %d)\n",
-		       sbi->min_proto, sbi->max_proto,
-		       AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
-		goto fail_dput;
-	}
-
-	/* Establish highest kernel protocol version */
-	if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
-		sbi->version = AUTOFS_MAX_PROTO_VERSION;
-	else
-		sbi->version = sbi->max_proto;
-	sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
-
 	pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp));
 	pipe = fget(pipefd);
 
 	if (!pipe) {
 		pr_err("could not open pipe file descriptor\n");
-		goto fail_dput;
+		goto fail_put_pid;
 	}
 	ret = autofs_prepare_pipe(pipe);
 	if (ret < 0)
@@ -334,14 +334,14 @@
 fail_fput:
 	pr_err("pipe file descriptor does not contain proper ops\n");
 	fput(pipe);
-	/* fall through */
+fail_put_pid:
+	put_pid(sbi->oz_pgrp);
 fail_dput:
 	dput(root);
 	goto fail_free;
 fail_ino:
-	kfree(ino);
+	autofs4_free_ino(ino);
 fail_free:
-	put_pid(sbi->oz_pgrp);
 	kfree(sbi);
 	s->s_fs_info = NULL;
 	return ret;
@@ -359,7 +359,7 @@
 		inode->i_uid = d_inode(sb->s_root)->i_uid;
 		inode->i_gid = d_inode(sb->s_root)->i_gid;
 	}
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_ino = get_next_ino();
 
 	if (S_ISDIR(mode)) {
@@ -368,7 +368,8 @@
 		inode->i_fop = &autofs4_dir_operations;
 	} else if (S_ISLNK(mode)) {
 		inode->i_op = &autofs4_symlink_inode_operations;
-	}
+	} else
+		WARN_ON(1);
 
 	return inode;
 }
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index fa84bb8..a11f731 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -577,8 +577,6 @@
 	inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
 	if (!inode) {
 		kfree(cp);
-		if (!dentry->d_fsdata)
-			kfree(ino);
 		return -ENOMEM;
 	}
 	inode->i_private = cp;
@@ -591,7 +589,7 @@
 	if (p_ino && !IS_ROOT(dentry))
 		atomic_inc(&p_ino->count);
 
-	dir->i_mtime = CURRENT_TIME;
+	dir->i_mtime = current_time(dir);
 
 	return 0;
 }
@@ -631,7 +629,7 @@
 	d_inode(dentry)->i_size = 0;
 	clear_nlink(d_inode(dentry));
 
-	dir->i_mtime = CURRENT_TIME;
+	dir->i_mtime = current_time(dir);
 
 	spin_lock(&sbi->lookup_lock);
 	__autofs4_add_expiring(dentry);
@@ -762,7 +760,7 @@
 	if (p_ino && !IS_ROOT(dentry))
 		atomic_inc(&p_ino->count);
 	inc_nlink(dir);
-	dir->i_mtime = CURRENT_TIME;
+	dir->i_mtime = current_time(dir);
 
 	return 0;
 }
@@ -842,7 +840,7 @@
 	if (may_umount(mnt))
 		status = 1;
 
-	pr_debug("returning %d\n", status);
+	pr_debug("may umount %d\n", status);
 
 	status = put_user(status, p);
 
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 3ba385e..8712062 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -100,29 +100,12 @@
 	return -EIO;
 }
 
-static int bad_inode_setxattr(struct dentry *dentry, struct inode *inode,
-		const char *name, const void *value, size_t size, int flags)
-{
-	return -EIO;
-}
-
-static ssize_t bad_inode_getxattr(struct dentry *dentry, struct inode *inode,
-			const char *name, void *buffer, size_t size)
-{
-	return -EIO;
-}
-
 static ssize_t bad_inode_listxattr(struct dentry *dentry, char *buffer,
 			size_t buffer_size)
 {
 	return -EIO;
 }
 
-static int bad_inode_removexattr(struct dentry *dentry, const char *name)
-{
-	return -EIO;
-}
-
 static const struct inode_operations bad_inode_ops =
 {
 	.create		= bad_inode_create,
@@ -133,7 +116,7 @@
 	.mkdir		= bad_inode_mkdir,
 	.rmdir		= bad_inode_rmdir,
 	.mknod		= bad_inode_mknod,
-	.rename2	= bad_inode_rename2,
+	.rename		= bad_inode_rename2,
 	.readlink	= bad_inode_readlink,
 	/* follow_link must be no-op, otherwise unmounting this inode
 	   won't work */
@@ -142,10 +125,7 @@
 	.permission	= bad_inode_permission,
 	.getattr	= bad_inode_getattr,
 	.setattr	= bad_inode_setattr,
-	.setxattr	= bad_inode_setxattr,
-	.getxattr	= bad_inode_getxattr,
 	.listxattr	= bad_inode_listxattr,
-	.removexattr	= bad_inode_removexattr,
 };
 
 
@@ -173,8 +153,9 @@
 
 	inode->i_mode = S_IFREG;
 	inode->i_atime = inode->i_mtime = inode->i_ctime =
-		current_fs_time(inode->i_sb);
+		current_time(inode);
 	inode->i_op = &bad_inode_ops;	
+	inode->i_opflags &= ~IOP_XATTR;
 	inode->i_fop = &bad_file_ops;	
 }
 EXPORT_SYMBOL(make_bad_inode);
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
index e0f59263a..c6bad51 100644
--- a/fs/befs/befs.h
+++ b/fs/befs/befs.h
@@ -43,7 +43,10 @@
 	u32 ag_shift;
 	u32 num_ags;
 
-	/* jornal log entry */
+	/* State of the superblock */
+	u32 flags;
+
+	/* Journal log entry */
 	befs_block_run log_blocks;
 	befs_off_t log_start;
 	befs_off_t log_end;
@@ -79,7 +82,7 @@
 	BEFS_BT_END,
 	BEFS_BT_EMPTY,
 	BEFS_BT_MATCH,
-	BEFS_BT_PARMATCH,
+	BEFS_BT_OVERFLOW,
 	BEFS_BT_NOT_FOUND
 };
 
@@ -140,18 +143,6 @@
 	return BEFS_SB(sb)->block_size / sizeof (befs_disk_inode_addr);
 }
 
-static inline int
-befs_iaddr_is_empty(const befs_inode_addr *iaddr)
-{
-	return (!iaddr->allocation_group) && (!iaddr->start) && (!iaddr->len);
-}
-
-static inline size_t
-befs_brun_size(struct super_block *sb, befs_block_run run)
-{
-	return BEFS_SB(sb)->block_size * run.len;
-}
-
 #include "endian.h"
 
 #endif				/* _LINUX_BEFS_H */
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 307645f9..7e135ea 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -85,7 +85,7 @@
 };
 
 /* local constants */
-static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL;
+static const befs_off_t BEFS_BT_INVAL = 0xffffffffffffffffULL;
 
 /* local functions */
 static int befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
@@ -156,8 +156,6 @@
 	sup->max_depth = fs32_to_cpu(sb, od_sup->max_depth);
 	sup->data_type = fs32_to_cpu(sb, od_sup->data_type);
 	sup->root_node_ptr = fs64_to_cpu(sb, od_sup->root_node_ptr);
-	sup->free_node_ptr = fs64_to_cpu(sb, od_sup->free_node_ptr);
-	sup->max_size = fs64_to_cpu(sb, od_sup->max_size);
 
 	brelse(bh);
 	if (sup->magic != BEFS_BTREE_MAGIC) {
@@ -183,8 +181,8 @@
  * Calls befs_read_datastream to read in the indicated btree node and
  * makes sure its header fields are in cpu byteorder, byteswapping if
  * necessary.
- * Note: node->bh must be NULL when this function called first
- * time. Don't forget brelse(node->bh) after last call.
+ * Note: node->bh must be NULL when this function is called the first time.
+ * Don't forget brelse(node->bh) after last call.
  *
  * On success, returns BEFS_OK and *@node contains the btree node that
  * starts at @node_off, with the node->head fields in cpu byte order.
@@ -244,7 +242,7 @@
  *   Read the superblock and rootnode of the b+tree.
  *   Drill down through the interior nodes using befs_find_key().
  *   Once at the correct leaf node, use befs_find_key() again to get the
- *   actuall value stored with the key.
+ *   actual value stored with the key.
  */
 int
 befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
@@ -283,9 +281,9 @@
 
 	while (!befs_leafnode(this_node)) {
 		res = befs_find_key(sb, this_node, key, &node_off);
-		if (res == BEFS_BT_NOT_FOUND)
+		/* if no key set, try the overflow node */
+		if (res == BEFS_BT_OVERFLOW)
 			node_off = this_node->head.overflow;
-		/* if no match, go to overflow node */
 		if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) {
 			befs_error(sb, "befs_btree_find() failed to read "
 				   "node at %llu", node_off);
@@ -293,15 +291,15 @@
 		}
 	}
 
-	/* at the correct leaf node now */
-
+	/* at a leaf node now, check if it is correct */
 	res = befs_find_key(sb, this_node, key, value);
 
 	brelse(this_node->bh);
 	kfree(this_node);
 
 	if (res != BEFS_BT_MATCH) {
-		befs_debug(sb, "<--- %s Key %s not found", __func__, key);
+		befs_error(sb, "<--- %s Key %s not found", __func__, key);
+		befs_debug(sb, "<--- %s ERROR", __func__);
 		*value = 0;
 		return BEFS_BT_NOT_FOUND;
 	}
@@ -324,16 +322,12 @@
  * @findkey: Keystring to search for
  * @value: If key is found, the value stored with the key is put here
  *
- * finds exact match if one exists, and returns BEFS_BT_MATCH
- * If no exact match, finds first key in node that is greater
- * (alphabetically) than the search key and returns BEFS_BT_PARMATCH
- * (for partial match, I guess). Can you think of something better to
- * call it?
+ * Finds exact match if one exists, and returns BEFS_BT_MATCH.
+ * If there is no match and node's value array is too small for key, return
+ * BEFS_BT_OVERFLOW.
+ * If no match and node should countain this key, return BEFS_BT_NOT_FOUND.
  *
- * If no key was a match or greater than the search key, return
- * BEFS_BT_NOT_FOUND.
- *
- * Use binary search instead of a linear.
+ * Uses binary search instead of a linear.
  */
 static int
 befs_find_key(struct super_block *sb, struct befs_btree_node *node,
@@ -348,18 +342,16 @@
 
 	befs_debug(sb, "---> %s %s", __func__, findkey);
 
-	*value = 0;
-
 	findkey_len = strlen(findkey);
 
-	/* if node can not contain key, just skeep this node */
+	/* if node can not contain key, just skip this node */
 	last = node->head.all_key_count - 1;
 	thiskey = befs_bt_get_key(sb, node, last, &keylen);
 
 	eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len);
 	if (eq < 0) {
-		befs_debug(sb, "<--- %s %s not found", __func__, findkey);
-		return BEFS_BT_NOT_FOUND;
+		befs_debug(sb, "<--- node can't contain %s", findkey);
+		return BEFS_BT_OVERFLOW;
 	}
 
 	valarray = befs_bt_valarray(node);
@@ -387,12 +379,15 @@
 		else
 			first = mid + 1;
 	}
+
+	/* return an existing value so caller can arrive to a leaf node */
 	if (eq < 0)
 		*value = fs64_to_cpu(sb, valarray[mid + 1]);
 	else
 		*value = fs64_to_cpu(sb, valarray[mid]);
-	befs_debug(sb, "<--- %s found %s at %d", __func__, thiskey, mid);
-	return BEFS_BT_PARMATCH;
+	befs_error(sb, "<--- %s %s not found", __func__, findkey);
+	befs_debug(sb, "<--- %s ERROR", __func__);
+	return BEFS_BT_NOT_FOUND;
 }
 
 /**
@@ -405,7 +400,7 @@
  * @keysize: Length of the returned key
  * @value: Value stored with the returned key
  *
- * Heres how it works: Key_no is the index of the key/value pair to 
+ * Here's how it works: Key_no is the index of the key/value pair to
  * return in keybuf/value.
  * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is 
  * the number of characters in the key (just a convenience).
@@ -422,7 +417,7 @@
 {
 	struct befs_btree_node *this_node;
 	befs_btree_super bt_super;
-	befs_off_t node_off = 0;
+	befs_off_t node_off;
 	int cur_key;
 	fs64 *valarray;
 	char *keystart;
@@ -467,7 +462,7 @@
 	while (key_sum + this_node->head.all_key_count <= key_no) {
 
 		/* no more nodes to look in: key_no is too large */
-		if (this_node->head.right == befs_bt_inval) {
+		if (this_node->head.right == BEFS_BT_INVAL) {
 			*keysize = 0;
 			*value = 0;
 			befs_debug(sb,
@@ -541,7 +536,6 @@
  * @node_off: Pointer to offset of current node within datastream. Modified
  * 		by the function.
  *
- *
  * Helper function for btree traverse. Moves the current position to the 
  * start of the first leaf node.
  *
@@ -608,7 +602,7 @@
 befs_leafnode(struct befs_btree_node *node)
 {
 	/* all interior nodes (and only interior nodes) have an overflow node */
-	if (node->head.overflow == befs_bt_inval)
+	if (node->head.overflow == BEFS_BT_INVAL)
 		return 1;
 	else
 		return 0;
@@ -715,7 +709,7 @@
  *
  * Returns 0 if @key1 and @key2 are equal.
  * Returns >0 if @key1 is greater.
- * Returns <0 if @key2 is greater..
+ * Returns <0 if @key2 is greater.
  */
 static int
 befs_compare_strings(const void *key1, int keylen1,
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index af1bc19..b4c7ba0 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -22,22 +22,22 @@
 
 static int befs_find_brun_direct(struct super_block *sb,
 				 const befs_data_stream *data,
-				 befs_blocknr_t blockno, befs_block_run * run);
+				 befs_blocknr_t blockno, befs_block_run *run);
 
 static int befs_find_brun_indirect(struct super_block *sb,
 				   const befs_data_stream *data,
 				   befs_blocknr_t blockno,
-				   befs_block_run * run);
+				   befs_block_run *run);
 
 static int befs_find_brun_dblindirect(struct super_block *sb,
 				      const befs_data_stream *data,
 				      befs_blocknr_t blockno,
-				      befs_block_run * run);
+				      befs_block_run *run);
 
 /**
  * befs_read_datastream - get buffer_head containing data, starting from pos.
  * @sb: Filesystem superblock
- * @ds: datastrem to find data with
+ * @ds: datastream to find data with
  * @pos: start of data
  * @off: offset of data in buffer_head->b_data
  *
@@ -46,7 +46,7 @@
  */
 struct buffer_head *
 befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
-		     befs_off_t pos, uint * off)
+		     befs_off_t pos, uint *off)
 {
 	struct buffer_head *bh;
 	befs_block_run run;
@@ -75,7 +75,13 @@
 	return bh;
 }
 
-/*
+/**
+ * befs_fblock2brun - give back block run for fblock
+ * @sb: the superblock
+ * @data: datastream to read from
+ * @fblock: the blocknumber with the file position to find
+ * @run: The found run is passed back through this pointer
+ *
  * Takes a file position and gives back a brun who's starting block
  * is block number fblock of the file.
  * 
@@ -88,7 +94,7 @@
  */
 int
 befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
-		 befs_blocknr_t fblock, befs_block_run * run)
+		 befs_blocknr_t fblock, befs_block_run *run)
 {
 	int err;
 	befs_off_t pos = fblock << BEFS_SB(sb)->block_shift;
@@ -115,7 +121,7 @@
 /**
  * befs_read_lsmylink - read long symlink from datastream.
  * @sb: Filesystem superblock 
- * @ds: Datastrem to read from
+ * @ds: Datastream to read from
  * @buff: Buffer in which to place long symlink data
  * @len: Length of the long symlink in bytes
  *
@@ -128,6 +134,7 @@
 	befs_off_t bytes_read = 0;	/* bytes readed */
 	u16 plen;
 	struct buffer_head *bh;
+
 	befs_debug(sb, "---> %s length: %llu", __func__, len);
 
 	while (bytes_read < len) {
@@ -183,13 +190,13 @@
 		metablocks += ds->indirect.len;
 
 	/*
-	   Double indir block, plus all the indirect blocks it mapps
-	   In the double-indirect range, all block runs of data are
-	   BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know 
-	   how many data block runs are in the double-indirect region,
-	   and from that we know how many indirect blocks it takes to
-	   map them. We assume that the indirect blocks are also
-	   BEFS_DBLINDIR_BRUN_LEN blocks long.
+	 * Double indir block, plus all the indirect blocks it maps.
+	 * In the double-indirect range, all block runs of data are
+	 * BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know
+	 * how many data block runs are in the double-indirect region,
+	 * and from that we know how many indirect blocks it takes to
+	 * map them. We assume that the indirect blocks are also
+	 * BEFS_DBLINDIR_BRUN_LEN blocks long.
 	 */
 	if (ds->size > ds->max_indirect_range && ds->max_indirect_range != 0) {
 		uint dbl_bytes;
@@ -212,58 +219,50 @@
 	return blocks;
 }
 
-/*
-	Finds the block run that starts at file block number blockno
-	in the file represented by the datastream data, if that 
-	blockno is in the direct region of the datastream.
-	
-	sb: the superblock
-	data: the datastream
-	blockno: the blocknumber to find
-	run: The found run is passed back through this pointer
-	
-	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
-	otherwise.
-	
-	Algorithm:
-	Linear search. Checks each element of array[] to see if it
-	contains the blockno-th filesystem block. This is necessary
-	because the block runs map variable amounts of data. Simply
-	keeps a count of the number of blocks searched so far (sum),
-	incrementing this by the length of each block run as we come
-	across it. Adds sum to *count before returning (this is so
-	you can search multiple arrays that are logicaly one array,
-	as in the indirect region code).
-	
-	When/if blockno is found, if blockno is inside of a block 
-	run as stored on disk, we offset the start and length members
-	of the block run, so that blockno is the start and len is
-	still valid (the run ends in the same place).
-	
-	2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_direct - find a direct block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the direct region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * Linear search. Checks each element of array[] to see if it
+ * contains the blockno-th filesystem block. This is necessary
+ * because the block runs map variable amounts of data. Simply
+ * keeps a count of the number of blocks searched so far (sum),
+ * incrementing this by the length of each block run as we come
+ * across it. Adds sum to *count before returning (this is so
+ * you can search multiple arrays that are logicaly one array,
+ * as in the indirect region code).
+ *
+ * When/if blockno is found, if blockno is inside of a block
+ * run as stored on disk, we offset the start and length members
+ * of the block run, so that blockno is the start and len is
+ * still valid (the run ends in the same place).
+ */
 static int
 befs_find_brun_direct(struct super_block *sb, const befs_data_stream *data,
-		      befs_blocknr_t blockno, befs_block_run * run)
+		      befs_blocknr_t blockno, befs_block_run *run)
 {
 	int i;
 	const befs_block_run *array = data->direct;
 	befs_blocknr_t sum;
-	befs_blocknr_t max_block =
-	    data->max_direct_range >> BEFS_SB(sb)->block_shift;
 
 	befs_debug(sb, "---> %s, find %lu", __func__, (unsigned long)blockno);
 
-	if (blockno > max_block) {
-		befs_error(sb, "%s passed block outside of direct region",
-			   __func__);
-		return BEFS_ERR;
-	}
-
 	for (i = 0, sum = 0; i < BEFS_NUM_DIRECT_BLOCKS;
 	     sum += array[i].len, i++) {
 		if (blockno >= sum && blockno < sum + (array[i].len)) {
 			int offset = blockno - sum;
+
 			run->allocation_group = array[i].allocation_group;
 			run->start = array[i].start + offset;
 			run->len = array[i].len - offset;
@@ -275,38 +274,39 @@
 		}
 	}
 
+	befs_error(sb, "%s failed to find file block %lu", __func__,
+		   (unsigned long)blockno);
 	befs_debug(sb, "---> %s ERROR", __func__);
 	return BEFS_ERR;
 }
 
-/*
-	Finds the block run that starts at file block number blockno
-	in the file represented by the datastream data, if that 
-	blockno is in the indirect region of the datastream.
-	
-	sb: the superblock
-	data: the datastream
-	blockno: the blocknumber to find
-	run: The found run is passed back through this pointer
-	
-	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
-	otherwise.
-	
-	Algorithm:
-	For each block in the indirect run of the datastream, read
-	it in and search through it for	search_blk.
-	
-	XXX:
-	Really should check to make sure blockno is inside indirect
-	region.
-	
-	2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_indirect - find a block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the indirect region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * For each block in the indirect run of the datastream, read
+ * it in and search through it for search_blk.
+ *
+ * XXX:
+ * Really should check to make sure blockno is inside indirect
+ * region.
+ */
 static int
 befs_find_brun_indirect(struct super_block *sb,
 			const befs_data_stream *data,
 			befs_blocknr_t blockno,
-			befs_block_run * run)
+			befs_block_run *run)
 {
 	int i, j;
 	befs_blocknr_t sum = 0;
@@ -326,11 +326,12 @@
 
 	/* Examine blocks of the indirect run one at a time */
 	for (i = 0; i < indirect.len; i++) {
-		indirblock = befs_bread(sb, indirblockno + i);
+		indirblock = sb_bread(sb, indirblockno + i);
 		if (indirblock == NULL) {
-			befs_debug(sb, "---> %s failed to read "
+			befs_error(sb, "---> %s failed to read "
 				   "disk block %lu from the indirect brun",
 				   __func__, (unsigned long)indirblockno + i);
+			befs_debug(sb, "<--- %s ERROR", __func__);
 			return BEFS_ERR;
 		}
 
@@ -370,52 +371,51 @@
 	return BEFS_ERR;
 }
 
-/*
-	Finds the block run that starts at file block number blockno
-	in the file represented by the datastream data, if that 
-	blockno is in the double-indirect region of the datastream.
-	
-	sb: the superblock
-	data: the datastream
-	blockno: the blocknumber to find
-	run: The found run is passed back through this pointer
-	
-	Return value is BEFS_OK if the blockrun is found, BEFS_ERR
-	otherwise.
-	
-	Algorithm:
-	The block runs in the double-indirect region are different.
-	They are always allocated 4 fs blocks at a time, so each
-	block run maps a constant amount of file data. This means
-	that we can directly calculate how many block runs into the
-	double-indirect region we need to go to get to the one that
-	maps a particular filesystem block.
-	
-	We do this in two stages. First we calculate which of the
-	inode addresses in the double-indirect block will point us
-	to the indirect block that contains the mapping for the data,
-	then we calculate which of the inode addresses in that 
-	indirect block maps the data block we are after.
-	
-	Oh, and once we've done that, we actually read in the blocks 
-	that contain the inode addresses we calculated above. Even 
-	though the double-indirect run may be several blocks long, 
-	we can calculate which of those blocks will contain the index
-	we are after and only read that one. We then follow it to 
-	the indirect block and perform a  similar process to find
-	the actual block run that maps the data block we are interested
-	in.
-	
-	Then we offset the run as in befs_find_brun_array() and we are 
-	done.
-	
-	2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_dblindirect - find a block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the double-indirect region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * The block runs in the double-indirect region are different.
+ * They are always allocated 4 fs blocks at a time, so each
+ * block run maps a constant amount of file data. This means
+ * that we can directly calculate how many block runs into the
+ * double-indirect region we need to go to get to the one that
+ * maps a particular filesystem block.
+ *
+ * We do this in two stages. First we calculate which of the
+ * inode addresses in the double-indirect block will point us
+ * to the indirect block that contains the mapping for the data,
+ * then we calculate which of the inode addresses in that
+ * indirect block maps the data block we are after.
+ *
+ * Oh, and once we've done that, we actually read in the blocks
+ * that contain the inode addresses we calculated above. Even
+ * though the double-indirect run may be several blocks long,
+ * we can calculate which of those blocks will contain the index
+ * we are after and only read that one. We then follow it to
+ * the indirect block and perform a similar process to find
+ * the actual block run that maps the data block we are interested
+ * in.
+ *
+ * Then we offset the run as in befs_find_brun_array() and we are
+ * done.
+ */
 static int
 befs_find_brun_dblindirect(struct super_block *sb,
 			   const befs_data_stream *data,
 			   befs_blocknr_t blockno,
-			   befs_block_run * run)
+			   befs_block_run *run)
 {
 	int dblindir_indx;
 	int indir_indx;
@@ -430,10 +430,9 @@
 	struct buffer_head *indir_block;
 	befs_block_run indir_run;
 	befs_disk_inode_addr *iaddr_array;
-	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 
 	befs_blocknr_t indir_start_blk =
-	    data->max_indirect_range >> befs_sb->block_shift;
+	    data->max_indirect_range >> BEFS_SB(sb)->block_shift;
 
 	off_t dbl_indir_off = blockno - indir_start_blk;
 
@@ -471,7 +470,7 @@
 	}
 
 	dbl_indir_block =
-	    befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) +
+	    sb_bread(sb, iaddr2blockno(sb, &data->double_indirect) +
 					dbl_which_block);
 	if (dbl_indir_block == NULL) {
 		befs_error(sb, "%s couldn't read the "
@@ -479,7 +478,6 @@
 			   (unsigned long)
 			   iaddr2blockno(sb, &data->double_indirect) +
 			   dbl_which_block);
-		brelse(dbl_indir_block);
 		return BEFS_ERR;
 	}
 
@@ -499,12 +497,11 @@
 	}
 
 	indir_block =
-	    befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block);
+	    sb_bread(sb, iaddr2blockno(sb, &indir_run) + which_block);
 	if (indir_block == NULL) {
 		befs_error(sb, "%s couldn't read the indirect block "
 			   "at blockno %lu", __func__, (unsigned long)
 			   iaddr2blockno(sb, &indir_run) + which_block);
-		brelse(indir_block);
 		return BEFS_ERR;
 	}
 
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index 4de7cff..85c1339 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -169,6 +169,7 @@
 
 	befs_debug(sb, "  num_blocks %llu", fs64_to_cpu(sb, sup->num_blocks));
 	befs_debug(sb, "  used_blocks %llu", fs64_to_cpu(sb, sup->used_blocks));
+	befs_debug(sb, "  inode_size %u", fs32_to_cpu(sb, sup->inode_size));
 
 	befs_debug(sb, "  magic2 %08x", fs32_to_cpu(sb, sup->magic2));
 	befs_debug(sb, "  blocks_per_ag %u",
diff --git a/fs/befs/io.c b/fs/befs/io.c
index 523c8af..b4a5581 100644
--- a/fs/befs/io.c
+++ b/fs/befs/io.c
@@ -27,7 +27,7 @@
 befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
 {
 	struct buffer_head *bh;
-	befs_blocknr_t block = 0;
+	befs_blocknr_t block;
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 
 	befs_debug(sb, "---> Enter %s "
@@ -59,27 +59,3 @@
 	befs_debug(sb, "<--- %s ERROR", __func__);
 	return NULL;
 }
-
-struct buffer_head *
-befs_bread(struct super_block *sb, befs_blocknr_t block)
-{
-	struct buffer_head *bh;
-
-	befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block);
-
-	bh = sb_bread(sb, block);
-
-	if (bh == NULL) {
-		befs_error(sb, "Failed to read block %lu",
-			   (unsigned long)block);
-		goto error;
-	}
-
-	befs_debug(sb, "<--- %s", __func__);
-
-	return bh;
-
-      error:
-	befs_debug(sb, "<--- %s ERROR", __func__);
-	return NULL;
-}
diff --git a/fs/befs/io.h b/fs/befs/io.h
index 9b78266..78d7bc6 100644
--- a/fs/befs/io.h
+++ b/fs/befs/io.h
@@ -5,5 +5,3 @@
 struct buffer_head *befs_bread_iaddr(struct super_block *sb,
 				     befs_inode_addr iaddr);
 
-struct buffer_head *befs_bread(struct super_block *sb, befs_blocknr_t block);
-
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index bfe9f99..647a276 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -120,7 +120,7 @@
 	struct super_block *sb = inode->i_sb;
 	befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
 	befs_block_run run = BAD_IADDR;
-	int res = 0;
+	int res;
 	ulong disk_off;
 
 	befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld",
@@ -179,15 +179,16 @@
 		kfree(utfname);
 
 	} else {
-		ret = befs_btree_find(sb, ds, dentry->d_name.name, &offset);
+		ret = befs_btree_find(sb, ds, name, &offset);
 	}
 
 	if (ret == BEFS_BT_NOT_FOUND) {
 		befs_debug(sb, "<--- %s %pd not found", __func__, dentry);
+		d_add(dentry, NULL);
 		return ERR_PTR(-ENOENT);
 
 	} else if (ret != BEFS_OK || offset == 0) {
-		befs_warning(sb, "<--- %s Error", __func__);
+		befs_error(sb, "<--- %s Error", __func__);
 		return ERR_PTR(-ENODATA);
 	}
 
@@ -211,56 +212,55 @@
 	befs_off_t value;
 	int result;
 	size_t keysize;
-	unsigned char d_type;
 	char keybuf[BEFS_NAME_LEN + 1];
 
 	befs_debug(sb, "---> %s name %pD, inode %ld, ctx->pos %lld",
 		  __func__, file, inode->i_ino, ctx->pos);
 
-more:
-	result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
-				 keybuf, &keysize, &value);
+	while (1) {
+		result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
+					 keybuf, &keysize, &value);
 
-	if (result == BEFS_ERR) {
-		befs_debug(sb, "<--- %s ERROR", __func__);
-		befs_error(sb, "IO error reading %pD (inode %lu)",
-			   file, inode->i_ino);
-		return -EIO;
-
-	} else if (result == BEFS_BT_END) {
-		befs_debug(sb, "<--- %s END", __func__);
-		return 0;
-
-	} else if (result == BEFS_BT_EMPTY) {
-		befs_debug(sb, "<--- %s Empty directory", __func__);
-		return 0;
-	}
-
-	d_type = DT_UNKNOWN;
-
-	/* Convert to NLS */
-	if (BEFS_SB(sb)->nls) {
-		char *nlsname;
-		int nlsnamelen;
-		result =
-		    befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
-		if (result < 0) {
+		if (result == BEFS_ERR) {
 			befs_debug(sb, "<--- %s ERROR", __func__);
-			return result;
+			befs_error(sb, "IO error reading %pD (inode %lu)",
+				   file, inode->i_ino);
+			return -EIO;
+
+		} else if (result == BEFS_BT_END) {
+			befs_debug(sb, "<--- %s END", __func__);
+			return 0;
+
+		} else if (result == BEFS_BT_EMPTY) {
+			befs_debug(sb, "<--- %s Empty directory", __func__);
+			return 0;
 		}
-		if (!dir_emit(ctx, nlsname, nlsnamelen,
-				 (ino_t) value, d_type)) {
+
+		/* Convert to NLS */
+		if (BEFS_SB(sb)->nls) {
+			char *nlsname;
+			int nlsnamelen;
+
+			result =
+			    befs_utf2nls(sb, keybuf, keysize, &nlsname,
+					 &nlsnamelen);
+			if (result < 0) {
+				befs_debug(sb, "<--- %s ERROR", __func__);
+				return result;
+			}
+			if (!dir_emit(ctx, nlsname, nlsnamelen,
+				      (ino_t) value, DT_UNKNOWN)) {
+				kfree(nlsname);
+				return 0;
+			}
 			kfree(nlsname);
-			return 0;
+		} else {
+			if (!dir_emit(ctx, keybuf, keysize,
+				      (ino_t) value, DT_UNKNOWN))
+				return 0;
 		}
-		kfree(nlsname);
-	} else {
-		if (!dir_emit(ctx, keybuf, keysize,
-				 (ino_t) value, d_type))
-			return 0;
+		ctx->pos++;
 	}
-	ctx->pos++;
-	goto more;
 }
 
 static struct inode *
@@ -299,7 +299,6 @@
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 	struct befs_inode_info *befs_ino;
 	struct inode *inode;
-	long ret = -EIO;
 
 	befs_debug(sb, "---> %s inode = %lu", __func__, ino);
 
@@ -318,7 +317,7 @@
 		   befs_ino->i_inode_num.allocation_group,
 		   befs_ino->i_inode_num.start, befs_ino->i_inode_num.len);
 
-	bh = befs_bread(sb, inode->i_ino);
+	bh = sb_bread(sb, inode->i_ino);
 	if (!bh) {
 		befs_error(sb, "unable to read inode block - "
 			   "inode = %lu", inode->i_ino);
@@ -421,7 +420,7 @@
       unacquire_none:
 	iget_failed(inode);
 	befs_debug(sb, "<--- %s - Bad inode", __func__);
-	return ERR_PTR(ret);
+	return ERR_PTR(-EIO);
 }
 
 /* Initialize the inode cache. Called at fs setup.
@@ -436,10 +435,9 @@
 					      0, (SLAB_RECLAIM_ACCOUNT|
 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
 					      init_once);
-	if (befs_inode_cachep == NULL) {
-		pr_err("%s: Couldn't initialize inode slabcache\n", __func__);
+	if (befs_inode_cachep == NULL)
 		return -ENOMEM;
-	}
+
 	return 0;
 }
 
@@ -524,8 +522,6 @@
 
 	*out = result = kmalloc(maxlen, GFP_NOFS);
 	if (!*out) {
-		befs_error(sb, "%s cannot allocate memory", __func__);
-		*out_len = 0;
 		return -ENOMEM;
 	}
 
@@ -604,7 +600,6 @@
 
 	*out = result = kmalloc(maxlen, GFP_NOFS);
 	if (!*out) {
-		befs_error(sb, "%s cannot allocate memory", __func__);
 		*out_len = 0;
 		return -ENOMEM;
 	}
@@ -637,10 +632,6 @@
 	return -EILSEQ;
 }
 
-/**
- * Use the
- *
- */
 enum {
 	Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
 };
@@ -760,19 +751,19 @@
 	long ret = -EINVAL;
 	const unsigned long sb_block = 0;
 	const off_t x86_sb_off = 512;
+	int blocksize;
 
 	save_mount_options(sb, data);
 
 	sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL);
-	if (sb->s_fs_info == NULL) {
-		pr_err("(%s): Unable to allocate memory for private "
-		       "portion of superblock. Bailing.\n", sb->s_id);
+	if (sb->s_fs_info == NULL)
 		goto unacquire_none;
-	}
+
 	befs_sb = BEFS_SB(sb);
 
 	if (!parse_options((char *) data, &befs_sb->mount_opts)) {
-		befs_error(sb, "cannot parse mount options");
+		if (!silent)
+			befs_error(sb, "cannot parse mount options");
 		goto unacquire_priv_sbp;
 	}
 
@@ -793,10 +784,16 @@
 	 * least 1k to get the second 512 bytes of the volume.
 	 * -WD 10-26-01
 	 */ 
-	sb_min_blocksize(sb, 1024);
+	blocksize = sb_min_blocksize(sb, 1024);
+	if (!blocksize) {
+		if (!silent)
+			befs_error(sb, "unable to set blocksize");
+		goto unacquire_priv_sbp;
+	}
 
 	if (!(bh = sb_bread(sb, sb_block))) {
-		befs_error(sb, "unable to read superblock");
+		if (!silent)
+			befs_error(sb, "unable to read superblock");
 		goto unacquire_priv_sbp;
 	}
 
@@ -820,9 +817,9 @@
 	brelse(bh);
 
 	if( befs_sb->num_blocks > ~((sector_t)0) ) {
-		befs_error(sb, "blocks count: %llu "
-			"is larger than the host can use",
-			befs_sb->num_blocks);
+		if (!silent)
+			befs_error(sb, "blocks count: %llu is larger than the host can use",
+					befs_sb->num_blocks);
 		goto unacquire_priv_sbp;
 	}
 
@@ -841,7 +838,8 @@
 	}
 	sb->s_root = d_make_root(root);
 	if (!sb->s_root) {
-		befs_error(sb, "get root inode failed");
+		if (!silent)
+			befs_error(sb, "get root inode failed");
 		goto unacquire_priv_sbp;
 	}
 
@@ -870,9 +868,9 @@
       unacquire_priv_sbp:
 	kfree(befs_sb->mount_opts.iocharset);
 	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
 
       unacquire_none:
-	sb->s_fs_info = NULL;
 	return ret;
 }
 
diff --git a/fs/befs/super.c b/fs/befs/super.c
index aeafc4d..7c50025 100644
--- a/fs/befs/super.c
+++ b/fs/befs/super.c
@@ -13,24 +13,20 @@
 #include "befs.h"
 #include "super.h"
 
-/**
- * load_befs_sb -- Read from disk and properly byteswap all the fields
+/*
+ * befs_load_sb -- Read from disk and properly byteswap all the fields
  * of the befs superblock
- *
- *
- *
- *
  */
 int
-befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
+befs_load_sb(struct super_block *sb, befs_super_block *disk_sb)
 {
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 
 	/* Check the byte order of the filesystem */
 	if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE)
-	    befs_sb->byte_order = BEFS_BYTESEX_LE;
+		befs_sb->byte_order = BEFS_BYTESEX_LE;
 	else if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_BE)
-	    befs_sb->byte_order = BEFS_BYTESEX_BE;
+		befs_sb->byte_order = BEFS_BYTESEX_BE;
 
 	befs_sb->magic1 = fs32_to_cpu(sb, disk_sb->magic1);
 	befs_sb->magic2 = fs32_to_cpu(sb, disk_sb->magic2);
@@ -45,6 +41,8 @@
 	befs_sb->ag_shift = fs32_to_cpu(sb, disk_sb->ag_shift);
 	befs_sb->num_ags = fs32_to_cpu(sb, disk_sb->num_ags);
 
+	befs_sb->flags = fs32_to_cpu(sb, disk_sb->flags);
+
 	befs_sb->log_blocks = fsrun_to_cpu(sb, disk_sb->log_blocks);
 	befs_sb->log_start = fs64_to_cpu(sb, disk_sb->log_start);
 	befs_sb->log_end = fs64_to_cpu(sb, disk_sb->log_end);
@@ -84,15 +82,15 @@
 	}
 
 	if (befs_sb->block_size > PAGE_SIZE) {
-		befs_error(sb, "blocksize(%u) cannot be larger"
+		befs_error(sb, "blocksize(%u) cannot be larger "
 			   "than system pagesize(%lu)", befs_sb->block_size,
 			   PAGE_SIZE);
 		return BEFS_ERR;
 	}
 
 	/*
-	   * block_shift and block_size encode the same information
-	   * in different ways as a consistency check.
+	 * block_shift and block_size encode the same information
+	 * in different ways as a consistency check.
 	 */
 
 	if ((1 << befs_sb->block_shift) != befs_sb->block_size) {
@@ -101,10 +99,18 @@
 		return BEFS_ERR;
 	}
 
-	if (befs_sb->log_start != befs_sb->log_end) {
+
+	/* ag_shift also encodes the same information as blocks_per_ag in a
+	 * different way, non-fatal consistency check
+	 */
+	if ((1 << befs_sb->ag_shift) != befs_sb->blocks_per_ag)
+		befs_error(sb, "ag_shift disagrees with blocks_per_ag.");
+
+	if (befs_sb->log_start != befs_sb->log_end ||
+	    befs_sb->flags == BEFS_DIRTY) {
 		befs_error(sb, "Filesystem not clean! There are blocks in the "
-			   "journal. You must boot into BeOS and mount this volume "
-			   "to make it clean.");
+			   "journal. You must boot into BeOS and mount this "
+			   "volume to make it clean.");
 		return BEFS_ERR;
 	}
 
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 34a5bc2..3e5ac30 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -97,7 +97,7 @@
 	set_bit(ino, info->si_imap);
 	info->si_freei--;
 	inode_init_owner(inode, dir, mode);
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_blocks = 0;
 	inode->i_op = &bfs_file_inops;
 	inode->i_fop = &bfs_file_operations;
@@ -165,7 +165,7 @@
 		return err;
 	}
 	inc_nlink(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 	ihold(inode);
 	d_instantiate(new, inode);
@@ -194,7 +194,7 @@
 	}
 	de->ino = 0;
 	mark_buffer_dirty_inode(bh, dir);
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	inode->i_ctime = dir->i_ctime;
 	inode_dec_link_count(inode);
@@ -207,7 +207,8 @@
 }
 
 static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct inode *old_inode, *new_inode;
 	struct buffer_head *old_bh, *new_bh;
@@ -215,6 +216,9 @@
 	struct bfs_sb_info *info;
 	int error = -ENOENT;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_bh = new_bh = NULL;
 	old_inode = d_inode(old_dentry);
 	if (S_ISDIR(old_inode->i_mode))
@@ -249,10 +253,10 @@
 			goto end_rename;
 	}
 	old_de->ino = 0;
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+	old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
 	mark_inode_dirty(old_dir);
 	if (new_inode) {
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		new_inode->i_ctime = current_time(new_inode);
 		inode_dec_link_count(new_inode);
 	}
 	mark_buffer_dirty_inode(old_bh, old_dir);
@@ -300,9 +304,9 @@
 				pos = (block - sblock) * BFS_BSIZE + off;
 				if (pos >= dir->i_size) {
 					dir->i_size += BFS_DIRENT_SIZE;
-					dir->i_ctime = CURRENT_TIME_SEC;
+					dir->i_ctime = current_time(dir);
 				}
-				dir->i_mtime = CURRENT_TIME_SEC;
+				dir->i_mtime = current_time(dir);
 				mark_inode_dirty(dir);
 				de->ino = cpu_to_le16((u16)ino);
 				for (i = 0; i < BFS_NAMELEN; i++)
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 6103a63..9b4688a 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -584,7 +584,7 @@
 		inode->i_ino = get_next_ino();
 		inode->i_mode = mode;
 		inode->i_atime = inode->i_mtime = inode->i_ctime =
-			current_fs_time(inode->i_sb);
+			current_time(inode);
 	}
 	return inode;
 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 376e4e4..05b5533 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -30,6 +30,7 @@
 #include <linux/cleancache.h>
 #include <linux/dax.h>
 #include <linux/badblocks.h>
+#include <linux/falloc.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -1775,6 +1776,81 @@
 	.is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
+#define	BLKDEV_FALLOC_FL_SUPPORTED					\
+		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
+		 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+
+static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+			     loff_t len)
+{
+	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+	struct request_queue *q = bdev_get_queue(bdev);
+	struct address_space *mapping;
+	loff_t end = start + len - 1;
+	loff_t isize;
+	int error;
+
+	/* Fail if we don't recognize the flags. */
+	if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
+		return -EOPNOTSUPP;
+
+	/* Don't go off the end of the device. */
+	isize = i_size_read(bdev->bd_inode);
+	if (start >= isize)
+		return -EINVAL;
+	if (end >= isize) {
+		if (mode & FALLOC_FL_KEEP_SIZE) {
+			len = isize - start;
+			end = start + len - 1;
+		} else
+			return -EINVAL;
+	}
+
+	/*
+	 * Don't allow IO that isn't aligned to logical block size.
+	 */
+	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+		return -EINVAL;
+
+	/* Invalidate the page cache, including dirty pages. */
+	mapping = bdev->bd_inode->i_mapping;
+	truncate_inode_pages_range(mapping, start, end);
+
+	switch (mode) {
+	case FALLOC_FL_ZERO_RANGE:
+	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+		error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
+					    GFP_KERNEL, false);
+		break;
+	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+		/* Only punch if the device can do zeroing discard. */
+		if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data)
+			return -EOPNOTSUPP;
+		error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+					     GFP_KERNEL, 0);
+		break;
+	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+		if (!blk_queue_discard(q))
+			return -EOPNOTSUPP;
+		error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+					     GFP_KERNEL, 0);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	if (error)
+		return error;
+
+	/*
+	 * Invalidate again; if someone wandered in and dirtied a page,
+	 * the caller will be given -EBUSY.  The third argument is
+	 * inclusive, so the rounding here is safe.
+	 */
+	return invalidate_inode_pages2_range(mapping,
+					     start >> PAGE_SHIFT,
+					     end >> PAGE_SHIFT);
+}
+
 const struct file_operations def_blk_fops = {
 	.open		= blkdev_open,
 	.release	= blkdev_close,
@@ -1789,6 +1865,7 @@
 #endif
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= iter_file_splice_write,
+	.fallocate	= blkdev_fallocate,
 };
 
 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 53bb7af..247b8df 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -79,11 +79,9 @@
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			ret = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (ret < 0)
+			ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (ret)
 				return ret;
-			if (ret == 0)
-				acl = NULL;
 		}
 		ret = 0;
 		break;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 455a6b2..85dc7ab 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -17,6 +17,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/rbtree.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "backref.h"
@@ -34,6 +35,265 @@
 	struct extent_inode_elem *next;
 };
 
+/*
+ * ref_root is used as the root of the ref tree that hold a collection
+ * of unique references.
+ */
+struct ref_root {
+	struct rb_root rb_root;
+
+	/*
+	 * The unique_refs represents the number of ref_nodes with a positive
+	 * count stored in the tree. Even if a ref_node (the count is greater
+	 * than one) is added, the unique_refs will only increase by one.
+	 */
+	unsigned int unique_refs;
+};
+
+/* ref_node is used to store a unique reference to the ref tree. */
+struct ref_node {
+	struct rb_node rb_node;
+
+	/* For NORMAL_REF, otherwise all these fields should be set to 0 */
+	u64 root_id;
+	u64 object_id;
+	u64 offset;
+
+	/* For SHARED_REF, otherwise parent field should be set to 0 */
+	u64 parent;
+
+	/* Ref to the ref_mod of btrfs_delayed_ref_node */
+	int ref_mod;
+};
+
+/* Dynamically allocate and initialize a ref_root */
+static struct ref_root *ref_root_alloc(void)
+{
+	struct ref_root *ref_tree;
+
+	ref_tree = kmalloc(sizeof(*ref_tree), GFP_NOFS);
+	if (!ref_tree)
+		return NULL;
+
+	ref_tree->rb_root = RB_ROOT;
+	ref_tree->unique_refs = 0;
+
+	return ref_tree;
+}
+
+/* Free all nodes in the ref tree, and reinit ref_root */
+static void ref_root_fini(struct ref_root *ref_tree)
+{
+	struct ref_node *node;
+	struct rb_node *next;
+
+	while ((next = rb_first(&ref_tree->rb_root)) != NULL) {
+		node = rb_entry(next, struct ref_node, rb_node);
+		rb_erase(next, &ref_tree->rb_root);
+		kfree(node);
+	}
+
+	ref_tree->rb_root = RB_ROOT;
+	ref_tree->unique_refs = 0;
+}
+
+static void ref_root_free(struct ref_root *ref_tree)
+{
+	if (!ref_tree)
+		return;
+
+	ref_root_fini(ref_tree);
+	kfree(ref_tree);
+}
+
+/*
+ * Compare ref_node with (root_id, object_id, offset, parent)
+ *
+ * The function compares two ref_node a and b. It returns an integer less
+ * than, equal to, or greater than zero , respectively, to be less than, to
+ * equal, or be greater than b.
+ */
+static int ref_node_cmp(struct ref_node *a, struct ref_node *b)
+{
+	if (a->root_id < b->root_id)
+		return -1;
+	else if (a->root_id > b->root_id)
+		return 1;
+
+	if (a->object_id < b->object_id)
+		return -1;
+	else if (a->object_id > b->object_id)
+		return 1;
+
+	if (a->offset < b->offset)
+		return -1;
+	else if (a->offset > b->offset)
+		return 1;
+
+	if (a->parent < b->parent)
+		return -1;
+	else if (a->parent > b->parent)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Search ref_node with (root_id, object_id, offset, parent) in the tree
+ *
+ * if found, the pointer of the ref_node will be returned;
+ * if not found, NULL will be returned and pos will point to the rb_node for
+ * insert, pos_parent will point to pos'parent for insert;
+*/
+static struct ref_node *__ref_tree_search(struct ref_root *ref_tree,
+					  struct rb_node ***pos,
+					  struct rb_node **pos_parent,
+					  u64 root_id, u64 object_id,
+					  u64 offset, u64 parent)
+{
+	struct ref_node *cur = NULL;
+	struct ref_node entry;
+	int ret;
+
+	entry.root_id = root_id;
+	entry.object_id = object_id;
+	entry.offset = offset;
+	entry.parent = parent;
+
+	*pos = &ref_tree->rb_root.rb_node;
+
+	while (**pos) {
+		*pos_parent = **pos;
+		cur = rb_entry(*pos_parent, struct ref_node, rb_node);
+
+		ret = ref_node_cmp(cur, &entry);
+		if (ret > 0)
+			*pos = &(**pos)->rb_left;
+		else if (ret < 0)
+			*pos = &(**pos)->rb_right;
+		else
+			return cur;
+	}
+
+	return NULL;
+}
+
+/*
+ * Insert a ref_node to the ref tree
+ * @pos used for specifiy the position to insert
+ * @pos_parent for specifiy pos's parent
+ *
+ * success, return 0;
+ * ref_node already exists, return -EEXIST;
+*/
+static int ref_tree_insert(struct ref_root *ref_tree, struct rb_node **pos,
+			   struct rb_node *pos_parent, struct ref_node *ins)
+{
+	struct rb_node **p = NULL;
+	struct rb_node *parent = NULL;
+	struct ref_node *cur = NULL;
+
+	if (!pos) {
+		cur = __ref_tree_search(ref_tree, &p, &parent, ins->root_id,
+					ins->object_id, ins->offset,
+					ins->parent);
+		if (cur)
+			return -EEXIST;
+	} else {
+		p = pos;
+		parent = pos_parent;
+	}
+
+	rb_link_node(&ins->rb_node, parent, p);
+	rb_insert_color(&ins->rb_node, &ref_tree->rb_root);
+
+	return 0;
+}
+
+/* Erase and free ref_node, caller should update ref_root->unique_refs */
+static void ref_tree_remove(struct ref_root *ref_tree, struct ref_node *node)
+{
+	rb_erase(&node->rb_node, &ref_tree->rb_root);
+	kfree(node);
+}
+
+/*
+ * Update ref_root->unique_refs
+ *
+ * Call __ref_tree_search
+ *	1. if ref_node doesn't exist, ref_tree_insert this node, and update
+ *	ref_root->unique_refs:
+ *		if ref_node->ref_mod > 0, ref_root->unique_refs++;
+ *		if ref_node->ref_mod < 0, do noting;
+ *
+ *	2. if ref_node is found, then get origin ref_node->ref_mod, and update
+ *	ref_node->ref_mod.
+ *		if ref_node->ref_mod is equal to 0,then call ref_tree_remove
+ *
+ *		according to origin_mod and new_mod, update ref_root->items
+ *		+----------------+--------------+-------------+
+ *		|		 |new_count <= 0|new_count > 0|
+ *		+----------------+--------------+-------------+
+ *		|origin_count < 0|       0      |      1      |
+ *		+----------------+--------------+-------------+
+ *		|origin_count > 0|      -1      |      0      |
+ *		+----------------+--------------+-------------+
+ *
+ * In case of allocation failure, -ENOMEM is returned and the ref_tree stays
+ * unaltered.
+ * Success, return 0
+ */
+static int ref_tree_add(struct ref_root *ref_tree, u64 root_id, u64 object_id,
+			u64 offset, u64 parent, int count)
+{
+	struct ref_node *node = NULL;
+	struct rb_node **pos = NULL;
+	struct rb_node *pos_parent = NULL;
+	int origin_count;
+	int ret;
+
+	if (!count)
+		return 0;
+
+	node = __ref_tree_search(ref_tree, &pos, &pos_parent, root_id,
+				 object_id, offset, parent);
+	if (node == NULL) {
+		node = kmalloc(sizeof(*node), GFP_NOFS);
+		if (!node)
+			return -ENOMEM;
+
+		node->root_id = root_id;
+		node->object_id = object_id;
+		node->offset = offset;
+		node->parent = parent;
+		node->ref_mod = count;
+
+		ret = ref_tree_insert(ref_tree, pos, pos_parent, node);
+		ASSERT(!ret);
+		if (ret) {
+			kfree(node);
+			return ret;
+		}
+
+		ref_tree->unique_refs += node->ref_mod > 0 ? 1 : 0;
+
+		return 0;
+	}
+
+	origin_count = node->ref_mod;
+	node->ref_mod += count;
+
+	if (node->ref_mod > 0)
+		ref_tree->unique_refs += origin_count > 0 ? 0 : 1;
+	else if (node->ref_mod <= 0)
+		ref_tree->unique_refs += origin_count > 0 ? -1 : 0;
+
+	if (!node->ref_mod)
+		ref_tree_remove(ref_tree, node);
+
+	return 0;
+}
+
 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
 				struct btrfs_file_extent_item *fi,
 				u64 extent_item_pos,
@@ -390,8 +650,8 @@
 	/* root node has been locked, we can release @subvol_srcu safely here */
 	srcu_read_unlock(&fs_info->subvol_srcu, index);
 
-	pr_debug("search slot in root %llu (level %d, ref count %d) returned "
-		 "%d for key (%llu %u %llu)\n",
+	btrfs_debug(fs_info,
+		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 		 ref->root_id, level, ref->count, ret,
 		 ref->key_for_search.objectid, ref->key_for_search.type,
 		 ref->key_for_search.offset);
@@ -700,6 +960,7 @@
 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 			     struct btrfs_path *path, u64 bytenr,
 			     int *info_level, struct list_head *prefs,
+			     struct ref_root *ref_tree,
 			     u64 *total_refs, u64 inum)
 {
 	int ret = 0;
@@ -767,6 +1028,13 @@
 			count = btrfs_shared_data_ref_count(leaf, sdref);
 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
 					       bytenr, count, GFP_NOFS);
+			if (ref_tree) {
+				if (!ret)
+					ret = ref_tree_add(ref_tree, 0, 0, 0,
+							   bytenr, count);
+				if (!ret && ref_tree->unique_refs > 1)
+					ret = BACKREF_FOUND_SHARED;
+			}
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
@@ -794,6 +1062,15 @@
 			root = btrfs_extent_data_ref_root(leaf, dref);
 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
 					       bytenr, count, GFP_NOFS);
+			if (ref_tree) {
+				if (!ret)
+					ret = ref_tree_add(ref_tree, root,
+							   key.objectid,
+							   key.offset, 0,
+							   count);
+				if (!ret && ref_tree->unique_refs > 1)
+					ret = BACKREF_FOUND_SHARED;
+			}
 			break;
 		}
 		default:
@@ -812,7 +1089,8 @@
  */
 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
 			    struct btrfs_path *path, u64 bytenr,
-			    int info_level, struct list_head *prefs, u64 inum)
+			    int info_level, struct list_head *prefs,
+			    struct ref_root *ref_tree, u64 inum)
 {
 	struct btrfs_root *extent_root = fs_info->extent_root;
 	int ret;
@@ -855,6 +1133,13 @@
 			count = btrfs_shared_data_ref_count(leaf, sdref);
 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
 						bytenr, count, GFP_NOFS);
+			if (ref_tree) {
+				if (!ret)
+					ret = ref_tree_add(ref_tree, 0, 0, 0,
+							   bytenr, count);
+				if (!ret && ref_tree->unique_refs > 1)
+					ret = BACKREF_FOUND_SHARED;
+			}
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
@@ -883,6 +1168,15 @@
 			root = btrfs_extent_data_ref_root(leaf, dref);
 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
 					       bytenr, count, GFP_NOFS);
+			if (ref_tree) {
+				if (!ret)
+					ret = ref_tree_add(ref_tree, root,
+							   key.objectid,
+							   key.offset, 0,
+							   count);
+				if (!ret && ref_tree->unique_refs > 1)
+					ret = BACKREF_FOUND_SHARED;
+			}
 			break;
 		}
 		default:
@@ -909,13 +1203,16 @@
  * commit root.
  * The special case is for qgroup to search roots in commit_transaction().
  *
+ * If check_shared is set to 1, any extent has more than one ref item, will
+ * be returned BACKREF_FOUND_SHARED immediately.
+ *
  * FIXME some caching might speed things up
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
 			     struct btrfs_fs_info *fs_info, u64 bytenr,
 			     u64 time_seq, struct ulist *refs,
 			     struct ulist *roots, const u64 *extent_item_pos,
-			     u64 root_objectid, u64 inum)
+			     u64 root_objectid, u64 inum, int check_shared)
 {
 	struct btrfs_key key;
 	struct btrfs_path *path;
@@ -927,6 +1224,7 @@
 	struct list_head prefs;
 	struct __prelim_ref *ref;
 	struct extent_inode_elem *eie = NULL;
+	struct ref_root *ref_tree = NULL;
 	u64 total_refs = 0;
 
 	INIT_LIST_HEAD(&prefs);
@@ -958,6 +1256,18 @@
 again:
 	head = NULL;
 
+	if (check_shared) {
+		if (!ref_tree) {
+			ref_tree = ref_root_alloc();
+			if (!ref_tree) {
+				ret = -ENOMEM;
+				goto out;
+			}
+		} else {
+			ref_root_fini(ref_tree);
+		}
+	}
+
 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out;
@@ -1002,6 +1312,36 @@
 		} else {
 			spin_unlock(&delayed_refs->lock);
 		}
+
+		if (check_shared && !list_empty(&prefs_delayed)) {
+			/*
+			 * Add all delay_ref to the ref_tree and check if there
+			 * are multiple ref items added.
+			 */
+			list_for_each_entry(ref, &prefs_delayed, list) {
+				if (ref->key_for_search.type) {
+					ret = ref_tree_add(ref_tree,
+						ref->root_id,
+						ref->key_for_search.objectid,
+						ref->key_for_search.offset,
+						0, ref->count);
+					if (ret)
+						goto out;
+				} else {
+					ret = ref_tree_add(ref_tree, 0, 0, 0,
+						     ref->parent, ref->count);
+					if (ret)
+						goto out;
+				}
+
+			}
+
+			if (ref_tree->unique_refs > 1) {
+				ret = BACKREF_FOUND_SHARED;
+				goto out;
+			}
+
+		}
 	}
 
 	if (path->slots[0]) {
@@ -1017,11 +1357,13 @@
 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
 			ret = __add_inline_refs(fs_info, path, bytenr,
 						&info_level, &prefs,
-						&total_refs, inum);
+						ref_tree, &total_refs,
+						inum);
 			if (ret)
 				goto out;
 			ret = __add_keyed_refs(fs_info, path, bytenr,
-					       info_level, &prefs, inum);
+					       info_level, &prefs,
+					       ref_tree, inum);
 			if (ret)
 				goto out;
 		}
@@ -1106,6 +1448,7 @@
 
 out:
 	btrfs_free_path(path);
+	ref_root_free(ref_tree);
 	while (!list_empty(&prefs)) {
 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
 		list_del(&ref->list);
@@ -1159,8 +1502,8 @@
 	if (!*leafs)
 		return -ENOMEM;
 
-	ret = find_parent_nodes(trans, fs_info, bytenr,
-				time_seq, *leafs, NULL, extent_item_pos, 0, 0);
+	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
+				*leafs, NULL, extent_item_pos, 0, 0, 0);
 	if (ret < 0 && ret != -ENOENT) {
 		free_leaf_list(*leafs);
 		return ret;
@@ -1202,8 +1545,8 @@
 
 	ULIST_ITER_INIT(&uiter);
 	while (1) {
-		ret = find_parent_nodes(trans, fs_info, bytenr,
-					time_seq, tmp, *roots, NULL, 0, 0);
+		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
+					tmp, *roots, NULL, 0, 0, 0);
 		if (ret < 0 && ret != -ENOENT) {
 			ulist_free(tmp);
 			ulist_free(*roots);
@@ -1273,7 +1616,7 @@
 	ULIST_ITER_INIT(&uiter);
 	while (1) {
 		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
-					roots, NULL, root_objectid, inum);
+					roots, NULL, root_objectid, inum, 1);
 		if (ret == BACKREF_FOUND_SHARED) {
 			/* this is the only condition under which we return 1 */
 			ret = 1;
@@ -1492,7 +1835,8 @@
 
 	if (found_key->objectid > logical ||
 	    found_key->objectid + size <= logical) {
-		pr_debug("logical %llu is not within any extent\n", logical);
+		btrfs_debug(fs_info,
+			"logical %llu is not within any extent", logical);
 		return -ENOENT;
 	}
 
@@ -1503,8 +1847,8 @@
 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 	flags = btrfs_extent_flags(eb, ei);
 
-	pr_debug("logical %llu is at position %llu within the extent (%llu "
-		 "EXTENT_ITEM %llu) flags %#llx size %u\n",
+	btrfs_debug(fs_info,
+		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
 		 logical, logical - found_key->objectid, found_key->objectid,
 		 found_key->offset, flags, item_size);
 
@@ -1625,21 +1969,24 @@
 	return 0;
 }
 
-static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
-				u64 root, u64 extent_item_objectid,
-				iterate_extent_inodes_t *iterate, void *ctx)
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
+			     struct extent_inode_elem *inode_list,
+			     u64 root, u64 extent_item_objectid,
+			     iterate_extent_inodes_t *iterate, void *ctx)
 {
 	struct extent_inode_elem *eie;
 	int ret = 0;
 
 	for (eie = inode_list; eie; eie = eie->next) {
-		pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
-			 "root %llu\n", extent_item_objectid,
-			 eie->inum, eie->offset, root);
+		btrfs_debug(fs_info,
+			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
+			    extent_item_objectid, eie->inum,
+			    eie->offset, root);
 		ret = iterate(eie->inum, eie->offset, root, ctx);
 		if (ret) {
-			pr_debug("stopping iteration for %llu due to ret=%d\n",
-				 extent_item_objectid, ret);
+			btrfs_debug(fs_info,
+				    "stopping iteration for %llu due to ret=%d",
+				    extent_item_objectid, ret);
 			break;
 		}
 	}
@@ -1667,7 +2014,7 @@
 	struct ulist_iterator ref_uiter;
 	struct ulist_iterator root_uiter;
 
-	pr_debug("resolving all inodes for extent %llu\n",
+	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
 			extent_item_objectid);
 
 	if (!search_commit_root) {
@@ -1693,10 +2040,12 @@
 			break;
 		ULIST_ITER_INIT(&root_uiter);
 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
-			pr_debug("root %llu references leaf %llu, data list "
-				 "%#llx\n", root_node->val, ref_node->val,
-				 ref_node->aux);
-			ret = iterate_leaf_refs((struct extent_inode_elem *)
+			btrfs_debug(fs_info,
+				    "root %llu references leaf %llu, data list %#llx",
+				    root_node->val, ref_node->val,
+				    ref_node->aux);
+			ret = iterate_leaf_refs(fs_info,
+						(struct extent_inode_elem *)
 						(uintptr_t)ref_node->aux,
 						root_node->val,
 						extent_item_objectid,
@@ -1792,9 +2141,9 @@
 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
 			name_len = btrfs_inode_ref_name_len(eb, iref);
 			/* path must be released before calling iterate()! */
-			pr_debug("following ref at offset %u for inode %llu in "
-				 "tree %llu\n", cur, found_key.objectid,
-				 fs_root->objectid);
+			btrfs_debug(fs_root->fs_info,
+				"following ref at offset %u for inode %llu in tree %llu",
+				cur, found_key.objectid, fs_root->objectid);
 			ret = iterate(parent, name_len,
 				      (unsigned long)(iref + 1), eb, ctx);
 			if (ret)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 4919aed..1a8fa46 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,17 +44,6 @@
 #define BTRFS_INODE_IN_DELALLOC_LIST		9
 #define BTRFS_INODE_READDIO_NEED_LOCK		10
 #define BTRFS_INODE_HAS_PROPS		        11
-/*
- * The following 3 bits are meant only for the btree inode.
- * When any of them is set, it means an error happened while writing an
- * extent buffer belonging to:
- * 1) a non-log btree
- * 2) a log btree and first log sub-transaction
- * 3) a log btree and second log sub-transaction
- */
-#define BTRFS_INODE_BTREE_ERR		        12
-#define BTRFS_INODE_BTREE_LOG1_ERR		13
-#define BTRFS_INODE_BTREE_LOG2_ERR		14
 
 /* in memory btrfs inode */
 struct btrfs_inode {
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 6678947..8e99251 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -656,7 +656,7 @@
 	BUG_ON(NULL == state);
 	selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
 	if (NULL == selected_super) {
-		printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+		pr_info("btrfsic: error, kmalloc failed!\n");
 		return -ENOMEM;
 	}
 
@@ -681,7 +681,7 @@
 	}
 
 	if (NULL == state->latest_superblock) {
-		printk(KERN_INFO "btrfsic: no superblock found!\n");
+		pr_info("btrfsic: no superblock found!\n");
 		kfree(selected_super);
 		return -1;
 	}
@@ -698,13 +698,13 @@
 			next_bytenr = btrfs_super_root(selected_super);
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "root@%llu\n", next_bytenr);
+				pr_info("root@%llu\n", next_bytenr);
 			break;
 		case 1:
 			next_bytenr = btrfs_super_chunk_root(selected_super);
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "chunk@%llu\n", next_bytenr);
+				pr_info("chunk@%llu\n", next_bytenr);
 			break;
 		case 2:
 			next_bytenr = btrfs_super_log_root(selected_super);
@@ -712,7 +712,7 @@
 				continue;
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "log@%llu\n", next_bytenr);
+				pr_info("log@%llu\n", next_bytenr);
 			break;
 		}
 
@@ -720,7 +720,7 @@
 		    btrfs_num_copies(state->root->fs_info,
 				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
-			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
 
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
@@ -733,9 +733,7 @@
 						&tmp_next_block_ctx,
 						mirror_num);
 			if (ret) {
-				printk(KERN_INFO "btrfsic:"
-				       " btrfsic_map_block(root @%llu,"
-				       " mirror %d) failed!\n",
+				pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n",
 				       next_bytenr, mirror_num);
 				kfree(selected_super);
 				return -1;
@@ -758,8 +756,7 @@
 
 			ret = btrfsic_read_block(state, &tmp_next_block_ctx);
 			if (ret < (int)PAGE_SIZE) {
-				printk(KERN_INFO
-				       "btrfsic: read @logical %llu failed!\n",
+				pr_info("btrfsic: read @logical %llu failed!\n",
 				       tmp_next_block_ctx.start);
 				btrfsic_release_block_ctx(&tmp_next_block_ctx);
 				kfree(selected_super);
@@ -820,7 +817,7 @@
 	if (NULL == superblock_tmp) {
 		superblock_tmp = btrfsic_block_alloc();
 		if (NULL == superblock_tmp) {
-			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+			pr_info("btrfsic: error, kmalloc failed!\n");
 			brelse(bh);
 			return -1;
 		}
@@ -894,7 +891,7 @@
 		    btrfs_num_copies(state->root->fs_info,
 				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
-			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
 			struct btrfsic_block *next_block;
@@ -905,8 +902,7 @@
 					      state->metablock_size,
 					      &tmp_next_block_ctx,
 					      mirror_num)) {
-				printk(KERN_INFO "btrfsic: btrfsic_map_block("
-				       "bytenr @%llu, mirror %d) failed!\n",
+				pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n",
 				       next_bytenr, mirror_num);
 				brelse(bh);
 				return -1;
@@ -948,7 +944,7 @@
 
 	sf = kzalloc(sizeof(*sf), GFP_NOFS);
 	if (NULL == sf)
-		printk(KERN_INFO "btrfsic: alloc memory failed!\n");
+		pr_info("btrfsic: alloc memory failed!\n");
 	else
 		sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
 	return sf;
@@ -994,9 +990,7 @@
 			sf->nr = btrfs_stack_header_nritems(&leafhdr->header);
 
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO
-				       "leaf %llu items %d generation %llu"
-				       " owner %llu\n",
+				pr_info("leaf %llu items %d generation %llu owner %llu\n",
 				       sf->block_ctx->start, sf->nr,
 				       btrfs_stack_header_generation(
 					       &leafhdr->header),
@@ -1023,8 +1017,7 @@
 			if (disk_item_offset + sizeof(struct btrfs_item) >
 			    sf->block_ctx->len) {
 leaf_item_out_of_bounce_error:
-				printk(KERN_INFO
-				       "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
+				pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
 				       sf->block_ctx->start,
 				       sf->block_ctx->dev->name);
 				goto one_stack_frame_backwards;
@@ -1120,8 +1113,7 @@
 			sf->nr = btrfs_stack_header_nritems(&nodehdr->header);
 
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO "node %llu level %d items %d"
-				       " generation %llu owner %llu\n",
+				pr_info("node %llu level %d items %d generation %llu owner %llu\n",
 				       sf->block_ctx->start,
 				       nodehdr->header.level, sf->nr,
 				       btrfs_stack_header_generation(
@@ -1145,8 +1137,7 @@
 					  (uintptr_t)nodehdr;
 			if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
 			    sf->block_ctx->len) {
-				printk(KERN_INFO
-				       "btrfsic: node item out of bounce at logical %llu, dev %s\n",
+				pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n",
 				       sf->block_ctx->start,
 				       sf->block_ctx->dev->name);
 				goto one_stack_frame_backwards;
@@ -1275,7 +1266,7 @@
 		    btrfs_num_copies(state->root->fs_info,
 				     next_bytenr, state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
-			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, *num_copiesp);
 		*mirror_nump = 1;
 	}
@@ -1284,15 +1275,13 @@
 		return 0;
 
 	if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-		printk(KERN_INFO
-		       "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
+		pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n",
 		       *mirror_nump);
 	ret = btrfsic_map_block(state, next_bytenr,
 				state->metablock_size,
 				next_block_ctx, *mirror_nump);
 	if (ret) {
-		printk(KERN_INFO
-		       "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
+		pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
 		       next_bytenr, *mirror_nump);
 		btrfsic_release_block_ctx(next_block_ctx);
 		*next_blockp = NULL;
@@ -1318,16 +1307,14 @@
 			if (next_block->logical_bytenr != next_bytenr &&
 			    !(!next_block->is_metadata &&
 			      0 == next_block->logical_bytenr))
-				printk(KERN_INFO
-				       "Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+				pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
 				       next_bytenr, next_block_ctx->dev->name,
 				       next_block_ctx->dev_bytenr, *mirror_nump,
 				       btrfsic_get_block_type(state,
 							      next_block),
 				       next_block->logical_bytenr);
 			else
-				printk(KERN_INFO
-				       "Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+				pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
 				       next_bytenr, next_block_ctx->dev->name,
 				       next_block_ctx->dev_bytenr, *mirror_nump,
 				       btrfsic_get_block_type(state,
@@ -1348,7 +1335,7 @@
 	if (NULL == l) {
 		l = btrfsic_block_link_alloc();
 		if (NULL == l) {
-			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+			pr_info("btrfsic: error, kmalloc failed!\n");
 			btrfsic_release_block_ctx(next_block_ctx);
 			*next_blockp = NULL;
 			return -1;
@@ -1381,8 +1368,7 @@
 	if (limit_nesting > 0 && did_alloc_block_link) {
 		ret = btrfsic_read_block(state, next_block_ctx);
 		if (ret < (int)next_block_ctx->len) {
-			printk(KERN_INFO
-			       "btrfsic: read block @logical %llu failed!\n",
+			pr_info("btrfsic: read block @logical %llu failed!\n",
 			       next_bytenr);
 			btrfsic_release_block_ctx(next_block_ctx);
 			*next_blockp = NULL;
@@ -1417,8 +1403,7 @@
 	if (file_extent_item_offset +
 	    offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
 	    block_ctx->len) {
-		printk(KERN_INFO
-		       "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+		pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
 		       block_ctx->start, block_ctx->dev->name);
 		return -1;
 	}
@@ -1429,7 +1414,7 @@
 	if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
 	    btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) {
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
-			printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
+			pr_info("extent_data: type %u, disk_bytenr = %llu\n",
 			       file_extent_item.type,
 			       btrfs_stack_file_extent_disk_bytenr(
 			       &file_extent_item));
@@ -1438,8 +1423,7 @@
 
 	if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
 	    block_ctx->len) {
-		printk(KERN_INFO
-		       "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+		pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
 		       block_ctx->start, block_ctx->dev->name);
 		return -1;
 	}
@@ -1457,8 +1441,7 @@
 	generation = btrfs_stack_file_extent_generation(&file_extent_item);
 
 	if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
-		printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
-		       " offset = %llu, num_bytes = %llu\n",
+		pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n",
 		       file_extent_item.type,
 		       btrfs_stack_file_extent_disk_bytenr(&file_extent_item),
 		       btrfs_stack_file_extent_offset(&file_extent_item),
@@ -1477,7 +1460,7 @@
 		    btrfs_num_copies(state->root->fs_info,
 				     next_bytenr, state->datablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
-			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
 			struct btrfsic_block_data_ctx next_block_ctx;
@@ -1485,19 +1468,16 @@
 			int block_was_created;
 
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO "btrfsic_handle_extent_data("
-				       "mirror_num=%d)\n", mirror_num);
+				pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n",
+					mirror_num);
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
-				printk(KERN_INFO
-				       "\tdisk_bytenr = %llu, num_bytes %u\n",
+				pr_info("\tdisk_bytenr = %llu, num_bytes %u\n",
 				       next_bytenr, chunk_len);
 			ret = btrfsic_map_block(state, next_bytenr,
 						chunk_len, &next_block_ctx,
 						mirror_num);
 			if (ret) {
-				printk(KERN_INFO
-				       "btrfsic: btrfsic_map_block(@%llu,"
-				       " mirror=%d) failed!\n",
+				pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
 				       next_bytenr, mirror_num);
 				return -1;
 			}
@@ -1512,8 +1492,7 @@
 					mirror_num,
 					&block_was_created);
 			if (NULL == next_block) {
-				printk(KERN_INFO
-				       "btrfsic: error, kmalloc failed!\n");
+				pr_info("btrfsic: error, kmalloc failed!\n");
 				btrfsic_release_block_ctx(&next_block_ctx);
 				return -1;
 			}
@@ -1523,12 +1502,7 @@
 				    next_block->logical_bytenr != next_bytenr &&
 				    !(!next_block->is_metadata &&
 				      0 == next_block->logical_bytenr)) {
-					printk(KERN_INFO
-					       "Referenced block"
-					       " @%llu (%s/%llu/%d)"
-					       " found in hash table, D,"
-					       " bytenr mismatch"
-					       " (!= stored %llu).\n",
+					pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n",
 					       next_bytenr,
 					       next_block_ctx.dev->name,
 					       next_block_ctx.dev_bytenr,
@@ -1592,7 +1566,7 @@
 	kfree(multi);
 	if (NULL == block_ctx_out->dev) {
 		ret = -ENXIO;
-		printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
+		pr_info("btrfsic: error, cannot lookup dev (#1)!\n");
 	}
 
 	return ret;
@@ -1638,8 +1612,7 @@
 	BUG_ON(block_ctx->pagev);
 	BUG_ON(block_ctx->mem_to_free);
 	if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
-		printk(KERN_INFO
-		       "btrfsic: read_block() with unaligned bytenr %llu\n",
+		pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
 		       block_ctx->dev_bytenr);
 		return -1;
 	}
@@ -1666,8 +1639,7 @@
 
 		bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
 		if (!bio) {
-			printk(KERN_INFO
-			       "btrfsic: bio_alloc() for %u pages failed!\n",
+			pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
 			       num_pages - i);
 			return -1;
 		}
@@ -1682,13 +1654,11 @@
 				break;
 		}
 		if (j == i) {
-			printk(KERN_INFO
-			       "btrfsic: error, failed to add a single page!\n");
+			pr_info("btrfsic: error, failed to add a single page!\n");
 			return -1;
 		}
 		if (submit_bio_wait(bio)) {
-			printk(KERN_INFO
-			       "btrfsic: read error at logical %llu dev %s!\n",
+			pr_info("btrfsic: read error at logical %llu dev %s!\n",
 			       block_ctx->start, block_ctx->dev->name);
 			bio_put(bio);
 			return -1;
@@ -1700,7 +1670,7 @@
 	for (i = 0; i < num_pages; i++) {
 		block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
 		if (!block_ctx->datav[i]) {
-			printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
+			pr_info("btrfsic: kmap() failed (dev %s)!\n",
 			       block_ctx->dev->name);
 			return -1;
 		}
@@ -1715,19 +1685,17 @@
 
 	BUG_ON(NULL == state);
 
-	printk(KERN_INFO "all_blocks_list:\n");
+	pr_info("all_blocks_list:\n");
 	list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
 		const struct btrfsic_block_link *l;
 
-		printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
+		pr_info("%c-block @%llu (%s/%llu/%d)\n",
 		       btrfsic_get_block_type(state, b_all),
 		       b_all->logical_bytenr, b_all->dev_state->name,
 		       b_all->dev_bytenr, b_all->mirror_num);
 
 		list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
-			printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
-			       " refers %u* to"
-			       " %c @%llu (%s/%llu/%d)\n",
+			pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n",
 			       btrfsic_get_block_type(state, b_all),
 			       b_all->logical_bytenr, b_all->dev_state->name,
 			       b_all->dev_bytenr, b_all->mirror_num,
@@ -1740,9 +1708,7 @@
 		}
 
 		list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
-			printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
-			       " is ref %u* from"
-			       " %c @%llu (%s/%llu/%d)\n",
+			pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
 			       btrfsic_get_block_type(state, b_all),
 			       b_all->logical_bytenr, b_all->dev_state->name,
 			       b_all->dev_bytenr, b_all->mirror_num,
@@ -1754,7 +1720,7 @@
 			       l->block_ref_from->mirror_num);
 		}
 
-		printk(KERN_INFO "\n");
+		pr_info("\n");
 	}
 }
 
@@ -1829,8 +1795,7 @@
 						    mapped_datav[0]);
 			if (num_pages * PAGE_SIZE <
 			    BTRFS_SUPER_INFO_SIZE) {
-				printk(KERN_INFO
-				       "btrfsic: cannot work with too short bios!\n");
+				pr_info("btrfsic: cannot work with too short bios!\n");
 				return;
 			}
 			is_metadata = 1;
@@ -1838,8 +1803,7 @@
 			processed_len = BTRFS_SUPER_INFO_SIZE;
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
-				printk(KERN_INFO
-				       "[before new superblock is written]:\n");
+				pr_info("[before new superblock is written]:\n");
 				btrfsic_dump_tree_sub(state, block, 0);
 			}
 		}
@@ -1847,8 +1811,7 @@
 			if (!block->is_superblock) {
 				if (num_pages * PAGE_SIZE <
 				    state->metablock_size) {
-					printk(KERN_INFO
-					       "btrfsic: cannot work with too short bios!\n");
+					pr_info("btrfsic: cannot work with too short bios!\n");
 					return;
 				}
 				processed_len = state->metablock_size;
@@ -1863,8 +1826,7 @@
 				if (block->logical_bytenr != bytenr &&
 				    !(!block->is_metadata &&
 				      block->logical_bytenr == 0))
-					printk(KERN_INFO
-					       "Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+					pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
 					       bytenr, dev_state->name,
 					       dev_bytenr,
 					       block->mirror_num,
@@ -1872,8 +1834,7 @@
 								      block),
 					       block->logical_bytenr);
 				else
-					printk(KERN_INFO
-					       "Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+					pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
 					       bytenr, dev_state->name,
 					       dev_bytenr, block->mirror_num,
 					       btrfsic_get_block_type(state,
@@ -1883,33 +1844,24 @@
 		} else {
 			if (num_pages * PAGE_SIZE <
 			    state->datablock_size) {
-				printk(KERN_INFO
-				       "btrfsic: cannot work with too short bios!\n");
+				pr_info("btrfsic: cannot work with too short bios!\n");
 				return;
 			}
 			processed_len = state->datablock_size;
 			bytenr = block->logical_bytenr;
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO
-				       "Written block @%llu (%s/%llu/%d)"
-				       " found in hash table, %c.\n",
+				pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
 				       bytenr, dev_state->name, dev_bytenr,
 				       block->mirror_num,
 				       btrfsic_get_block_type(state, block));
 		}
 
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "ref_to_list: %cE, ref_from_list: %cE\n",
+			pr_info("ref_to_list: %cE, ref_from_list: %cE\n",
 			       list_empty(&block->ref_to_list) ? ' ' : '!',
 			       list_empty(&block->ref_from_list) ? ' ' : '!');
 		if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
-			printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
-			       " @%llu (%s/%llu/%d), old(gen=%llu,"
-			       " objectid=%llu, type=%d, offset=%llu),"
-			       " new(gen=%llu),"
-			       " which is referenced by most recent superblock"
-			       " (superblockgen=%llu)!\n",
+			pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n",
 			       btrfsic_get_block_type(state, block), bytenr,
 			       dev_state->name, dev_bytenr, block->mirror_num,
 			       block->generation,
@@ -1923,9 +1875,7 @@
 		}
 
 		if (!block->is_iodone && !block->never_written) {
-			printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
-			       " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
-			       " which is not yet iodone!\n",
+			pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n",
 			       btrfsic_get_block_type(state, block), bytenr,
 			       dev_state->name, dev_bytenr, block->mirror_num,
 			       block->generation,
@@ -2023,8 +1973,7 @@
 						mapped_datav[0]);
 				if (state->print_mask &
 				    BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
-					printk(KERN_INFO
-					"[after new superblock is written]:\n");
+					pr_info("[after new superblock is written]:\n");
 					btrfsic_dump_tree_sub(state, block, 0);
 				}
 			} else {
@@ -2036,9 +1985,7 @@
 						0, 0);
 			}
 			if (ret)
-				printk(KERN_INFO
-				       "btrfsic: btrfsic_process_metablock"
-				       "(root @%llu) failed!\n",
+				pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n",
 				       dev_bytenr);
 		} else {
 			block->is_metadata = 0;
@@ -2065,8 +2012,7 @@
 		if (!is_metadata) {
 			processed_len = state->datablock_size;
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO "Written block (%s/%llu/?)"
-				       " !found in hash table, D.\n",
+				pr_info("Written block (%s/%llu/?) !found in hash table, D.\n",
 				       dev_state->name, dev_bytenr);
 			if (!state->include_extent_data) {
 				/* ignore that written D block */
@@ -2084,9 +2030,7 @@
 			btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
 						       dev_bytenr);
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO
-				       "Written block @%llu (%s/%llu/?)"
-				       " !found in hash table, M.\n",
+				pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n",
 				       bytenr, dev_state->name, dev_bytenr);
 		}
 
@@ -2100,7 +2044,7 @@
 
 		block = btrfsic_block_alloc();
 		if (NULL == block) {
-			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+			pr_info("btrfsic: error, kmalloc failed!\n");
 			btrfsic_release_block_ctx(&block_ctx);
 			goto continue_loop;
 		}
@@ -2150,8 +2094,7 @@
 			block->next_in_same_bio = NULL;
 		}
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "New written %c-block @%llu (%s/%llu/%d)\n",
+			pr_info("New written %c-block @%llu (%s/%llu/%d)\n",
 			       is_metadata ? 'M' : 'D',
 			       block->logical_bytenr, block->dev_state->name,
 			       block->dev_bytenr, block->mirror_num);
@@ -2162,9 +2105,7 @@
 			ret = btrfsic_process_metablock(state, block,
 							&block_ctx, 0, 0);
 			if (ret)
-				printk(KERN_INFO
-				       "btrfsic: process_metablock(root @%llu)"
-				       " failed!\n",
+				pr_info("btrfsic: process_metablock(root @%llu) failed!\n",
 				       dev_bytenr);
 		}
 		btrfsic_release_block_ctx(&block_ctx);
@@ -2199,8 +2140,7 @@
 
 		if ((dev_state->state->print_mask &
 		     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
-			printk(KERN_INFO
-			       "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
+			pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
 			       bp->bi_error,
 			       btrfsic_get_block_type(dev_state->state, block),
 			       block->logical_bytenr, dev_state->name,
@@ -2211,8 +2151,7 @@
 			dev_state->last_flush_gen++;
 			if ((dev_state->state->print_mask &
 			     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
-				printk(KERN_INFO
-				       "bio_end_io() new %s flush_gen=%llu\n",
+				pr_info("bio_end_io() new %s flush_gen=%llu\n",
 				       dev_state->name,
 				       dev_state->last_flush_gen);
 		}
@@ -2235,8 +2174,7 @@
 	BUG_ON(NULL == block);
 	dev_state = block->dev_state;
 	if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
-		printk(KERN_INFO
-		       "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
+		pr_info("bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
 		       iodone_w_error,
 		       btrfsic_get_block_type(dev_state->state, block),
 		       block->logical_bytenr, block->dev_state->name,
@@ -2247,8 +2185,7 @@
 		dev_state->last_flush_gen++;
 		if ((dev_state->state->print_mask &
 		     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
-			printk(KERN_INFO
-			       "bh_end_io() new %s flush_gen=%llu\n",
+			pr_info("bh_end_io() new %s flush_gen=%llu\n",
 			       dev_state->name, dev_state->last_flush_gen);
 	}
 	if (block->submit_bio_bh_rw & REQ_FUA)
@@ -2271,9 +2208,7 @@
 	if (!(superblock->generation > state->max_superblock_generation ||
 	      0 == state->max_superblock_generation)) {
 		if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-			printk(KERN_INFO
-			       "btrfsic: superblock @%llu (%s/%llu/%d)"
-			       " with old gen %llu <= %llu\n",
+			pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n",
 			       superblock->logical_bytenr,
 			       superblock->dev_state->name,
 			       superblock->dev_bytenr, superblock->mirror_num,
@@ -2281,9 +2216,7 @@
 			       state->max_superblock_generation);
 	} else {
 		if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-			printk(KERN_INFO
-			       "btrfsic: got new superblock @%llu (%s/%llu/%d)"
-			       " with new gen %llu > %llu\n",
+			pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n",
 			       superblock->logical_bytenr,
 			       superblock->dev_state->name,
 			       superblock->dev_bytenr, superblock->mirror_num,
@@ -2318,7 +2251,7 @@
 			next_bytenr = btrfs_super_root(super_hdr);
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "root@%llu\n", next_bytenr);
+				pr_info("root@%llu\n", next_bytenr);
 			break;
 		case 1:
 			btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2327,7 +2260,7 @@
 			next_bytenr = btrfs_super_chunk_root(super_hdr);
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "chunk@%llu\n", next_bytenr);
+				pr_info("chunk@%llu\n", next_bytenr);
 			break;
 		case 2:
 			btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2338,7 +2271,7 @@
 				continue;
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
-				printk(KERN_INFO "log@%llu\n", next_bytenr);
+				pr_info("log@%llu\n", next_bytenr);
 			break;
 		}
 
@@ -2346,23 +2279,19 @@
 		    btrfs_num_copies(state->root->fs_info,
 				     next_bytenr, BTRFS_SUPER_INFO_SIZE);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
-			printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
 			int was_created;
 
 			if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-				printk(KERN_INFO
-				       "btrfsic_process_written_superblock("
-				       "mirror_num=%d)\n", mirror_num);
+				pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num);
 			ret = btrfsic_map_block(state, next_bytenr,
 						BTRFS_SUPER_INFO_SIZE,
 						&tmp_next_block_ctx,
 						mirror_num);
 			if (ret) {
-				printk(KERN_INFO
-				       "btrfsic: btrfsic_map_block(@%llu,"
-				       " mirror=%d) failed!\n",
+				pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
 				       next_bytenr, mirror_num);
 				return -1;
 			}
@@ -2375,8 +2304,7 @@
 					mirror_num,
 					&was_created);
 			if (NULL == next_block) {
-				printk(KERN_INFO
-				       "btrfsic: error, kmalloc failed!\n");
+				pr_info("btrfsic: error, kmalloc failed!\n");
 				btrfsic_release_block_ctx(&tmp_next_block_ctx);
 				return -1;
 			}
@@ -2425,8 +2353,7 @@
 		 * by the most recent super block.
 		 */
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "btrfsic: abort cyclic linkage (case 1).\n");
+			pr_info("btrfsic: abort cyclic linkage (case 1).\n");
 
 		return ret;
 	}
@@ -2437,9 +2364,7 @@
 	 */
 	list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "rl=%d, %c @%llu (%s/%llu/%d)"
-			       " %u* refers to %c @%llu (%s/%llu/%d)\n",
+			pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n",
 			       recursion_level,
 			       btrfsic_get_block_type(state, block),
 			       block->logical_bytenr, block->dev_state->name,
@@ -2451,9 +2376,7 @@
 			       l->block_ref_to->dev_bytenr,
 			       l->block_ref_to->mirror_num);
 		if (l->block_ref_to->never_written) {
-			printk(KERN_INFO "btrfs: attempt to write superblock"
-			       " which references block %c @%llu (%s/%llu/%d)"
-			       " which is never written!\n",
+			pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n",
 			       btrfsic_get_block_type(state, l->block_ref_to),
 			       l->block_ref_to->logical_bytenr,
 			       l->block_ref_to->dev_state->name,
@@ -2461,9 +2384,7 @@
 			       l->block_ref_to->mirror_num);
 			ret = -1;
 		} else if (!l->block_ref_to->is_iodone) {
-			printk(KERN_INFO "btrfs: attempt to write superblock"
-			       " which references block %c @%llu (%s/%llu/%d)"
-			       " which is not yet iodone!\n",
+			pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n",
 			       btrfsic_get_block_type(state, l->block_ref_to),
 			       l->block_ref_to->logical_bytenr,
 			       l->block_ref_to->dev_state->name,
@@ -2471,9 +2392,7 @@
 			       l->block_ref_to->mirror_num);
 			ret = -1;
 		} else if (l->block_ref_to->iodone_w_error) {
-			printk(KERN_INFO "btrfs: attempt to write superblock"
-			       " which references block %c @%llu (%s/%llu/%d)"
-			       " which has write error!\n",
+			pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n",
 			       btrfsic_get_block_type(state, l->block_ref_to),
 			       l->block_ref_to->logical_bytenr,
 			       l->block_ref_to->dev_state->name,
@@ -2486,10 +2405,7 @@
 			   l->parent_generation &&
 			   BTRFSIC_GENERATION_UNKNOWN !=
 			   l->block_ref_to->generation) {
-			printk(KERN_INFO "btrfs: attempt to write superblock"
-			       " which references block %c @%llu (%s/%llu/%d)"
-			       " with generation %llu !="
-			       " parent generation %llu!\n",
+			pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n",
 			       btrfsic_get_block_type(state, l->block_ref_to),
 			       l->block_ref_to->logical_bytenr,
 			       l->block_ref_to->dev_state->name,
@@ -2500,11 +2416,7 @@
 			ret = -1;
 		} else if (l->block_ref_to->flush_gen >
 			   l->block_ref_to->dev_state->last_flush_gen) {
-			printk(KERN_INFO "btrfs: attempt to write superblock"
-			       " which references block %c @%llu (%s/%llu/%d)"
-			       " which is not flushed out of disk's write cache"
-			       " (block flush_gen=%llu,"
-			       " dev->flush_gen=%llu)!\n",
+			pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n",
 			       btrfsic_get_block_type(state, l->block_ref_to),
 			       l->block_ref_to->logical_bytenr,
 			       l->block_ref_to->dev_state->name,
@@ -2533,8 +2445,7 @@
 	if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
 		/* refer to comment at "abort cyclic linkage (case 1)" */
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "btrfsic: abort cyclic linkage (case 2).\n");
+			pr_info("btrfsic: abort cyclic linkage (case 2).\n");
 
 		return 0;
 	}
@@ -2545,9 +2456,7 @@
 	 */
 	list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "rl=%d, %c @%llu (%s/%llu/%d)"
-			       " is ref %u* from %c @%llu (%s/%llu/%d)\n",
+			pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
 			       recursion_level,
 			       btrfsic_get_block_type(state, block),
 			       block->logical_bytenr, block->dev_state->name,
@@ -2577,9 +2486,7 @@
 static void btrfsic_print_add_link(const struct btrfsic_state *state,
 				   const struct btrfsic_block_link *l)
 {
-	printk(KERN_INFO
-	       "Add %u* link from %c @%llu (%s/%llu/%d)"
-	       " to %c @%llu (%s/%llu/%d).\n",
+	pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
 	       l->ref_cnt,
 	       btrfsic_get_block_type(state, l->block_ref_from),
 	       l->block_ref_from->logical_bytenr,
@@ -2594,9 +2501,7 @@
 static void btrfsic_print_rem_link(const struct btrfsic_state *state,
 				   const struct btrfsic_block_link *l)
 {
-	printk(KERN_INFO
-	       "Rem %u* link from %c @%llu (%s/%llu/%d)"
-	       " to %c @%llu (%s/%llu/%d).\n",
+	pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
 	       l->ref_cnt,
 	       btrfsic_get_block_type(state, l->block_ref_from),
 	       l->block_ref_from->logical_bytenr,
@@ -2708,8 +2613,7 @@
 	if (NULL == l) {
 		l = btrfsic_block_link_alloc();
 		if (NULL == l) {
-			printk(KERN_INFO
-			       "btrfsic: error, kmalloc" " failed!\n");
+			pr_info("btrfsic: error, kmalloc failed!\n");
 			return NULL;
 		}
 
@@ -2756,13 +2660,12 @@
 
 		block = btrfsic_block_alloc();
 		if (NULL == block) {
-			printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+			pr_info("btrfsic: error, kmalloc failed!\n");
 			return NULL;
 		}
 		dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
 		if (NULL == dev_state) {
-			printk(KERN_INFO
-			       "btrfsic: error, lookup dev_state failed!\n");
+			pr_info("btrfsic: error, lookup dev_state failed!\n");
 			btrfsic_block_free(block);
 			return NULL;
 		}
@@ -2774,8 +2677,7 @@
 		block->never_written = never_written;
 		block->mirror_num = mirror_num;
 		if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
-			printk(KERN_INFO
-			       "New %s%c-block @%llu (%s/%llu/%d)\n",
+			pr_info("New %s%c-block @%llu (%s/%llu/%d)\n",
 			       additional_string,
 			       btrfsic_get_block_type(state, block),
 			       block->logical_bytenr, dev_state->name,
@@ -2810,9 +2712,7 @@
 		ret = btrfsic_map_block(state, bytenr, state->metablock_size,
 					&block_ctx, mirror_num);
 		if (ret) {
-			printk(KERN_INFO "btrfsic:"
-			       " btrfsic_map_block(logical @%llu,"
-			       " mirror %d) failed!\n",
+			pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n",
 			       bytenr, mirror_num);
 			continue;
 		}
@@ -2827,9 +2727,7 @@
 	}
 
 	if (WARN_ON(!match)) {
-		printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
-		       " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
-		       " phys_bytenr=%llu)!\n",
+		pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n",
 		       bytenr, dev_state->name, dev_bytenr);
 		for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
 			ret = btrfsic_map_block(state, bytenr,
@@ -2838,8 +2736,7 @@
 			if (ret)
 				continue;
 
-			printk(KERN_INFO "Read logical bytenr @%llu maps to"
-			       " (%s/%llu/%d)\n",
+			pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n",
 			       bytenr, block_ctx.dev->name,
 			       block_ctx.dev_bytenr, mirror_num);
 		}
@@ -2849,11 +2746,8 @@
 static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
 		struct block_device *bdev)
 {
-	struct btrfsic_dev_state *ds;
-
-	ds = btrfsic_dev_state_hashtable_lookup(bdev,
-						&btrfsic_dev_state_hashtable);
-	return ds;
+	return btrfsic_dev_state_hashtable_lookup(bdev,
+						  &btrfsic_dev_state_hashtable);
 }
 
 int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
@@ -2876,9 +2770,7 @@
 		dev_bytenr = 4096 * bh->b_blocknr;
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-			printk(KERN_INFO
-			       "submit_bh(op=0x%x,0x%x, blocknr=%llu "
-			       "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
+			pr_info("submit_bh(op=0x%x,0x%x, blocknr=%llu (bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
 			       op, op_flags, (unsigned long long)bh->b_blocknr,
 			       dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
 		btrfsic_process_written_block(dev_state, dev_bytenr,
@@ -2887,17 +2779,13 @@
 	} else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-			printk(KERN_INFO
-			       "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
+			pr_info("submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
 			       op, op_flags, bh->b_bdev);
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 			if ((dev_state->state->print_mask &
 			     (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
 			      BTRFSIC_PRINT_MASK_VERBOSE)))
-				printk(KERN_INFO
-				       "btrfsic_submit_bh(%s) with FLUSH"
-				       " but dummy block already in use"
-				       " (ignored)!\n",
+				pr_info("btrfsic_submit_bh(%s) with FLUSH but dummy block already in use (ignored)!\n",
 				       dev_state->name);
 		} else {
 			struct btrfsic_block *const block =
@@ -2942,9 +2830,7 @@
 		bio_is_patched = 0;
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-			printk(KERN_INFO
-			       "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
-			       " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
+			pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
 			       bio_op(bio), bio->bi_opf, bio->bi_vcnt,
 			       (unsigned long long)bio->bi_iter.bi_sector,
 			       dev_bytenr, bio->bi_bdev);
@@ -2967,8 +2853,7 @@
 			}
 			if (dev_state->state->print_mask &
 			    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
-				printk(KERN_INFO
-				       "#%u: bytenr=%llu, len=%u, offset=%u\n",
+				pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
 				       i, cur_bytenr, bio->bi_io_vec[i].bv_len,
 				       bio->bi_io_vec[i].bv_offset);
 			cur_bytenr += bio->bi_io_vec[i].bv_len;
@@ -2985,17 +2870,13 @@
 	} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
-			printk(KERN_INFO
-			       "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
+			pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
 			       bio_op(bio), bio->bi_opf, bio->bi_bdev);
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 			if ((dev_state->state->print_mask &
 			     (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
 			      BTRFSIC_PRINT_MASK_VERBOSE)))
-				printk(KERN_INFO
-				       "btrfsic_submit_bio(%s) with FLUSH"
-				       " but dummy block already in use"
-				       " (ignored)!\n",
+				pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n",
 				       dev_state->name);
 		} else {
 			struct btrfsic_block *const block =
@@ -3039,14 +2920,12 @@
 	struct btrfs_device *device;
 
 	if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
-		printk(KERN_INFO
-		       "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
+		pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
 		       root->nodesize, PAGE_SIZE);
 		return -1;
 	}
 	if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
-		printk(KERN_INFO
-		       "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
+		pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
 		       root->sectorsize, PAGE_SIZE);
 		return -1;
 	}
@@ -3054,7 +2933,7 @@
 	if (!state) {
 		state = vzalloc(sizeof(*state));
 		if (!state) {
-			printk(KERN_INFO "btrfs check-integrity: vzalloc() failed!\n");
+			pr_info("btrfs check-integrity: vzalloc() failed!\n");
 			return -1;
 		}
 	}
@@ -3086,8 +2965,7 @@
 
 		ds = btrfsic_dev_state_alloc();
 		if (NULL == ds) {
-			printk(KERN_INFO
-			       "btrfs check-integrity: kmalloc() failed!\n");
+			pr_info("btrfs check-integrity: kmalloc() failed!\n");
 			mutex_unlock(&btrfsic_mutex);
 			return -1;
 		}
@@ -3148,9 +3026,7 @@
 	}
 
 	if (NULL == state) {
-		printk(KERN_INFO
-		       "btrfsic: error, cannot find state information"
-		       " on umount!\n");
+		pr_info("btrfsic: error, cannot find state information on umount!\n");
 		mutex_unlock(&btrfsic_mutex);
 		return;
 	}
@@ -3177,9 +3053,7 @@
 		if (b_all->is_iodone || b_all->never_written)
 			btrfsic_block_free(b_all);
 		else
-			printk(KERN_INFO "btrfs: attempt to free %c-block"
-			       " @%llu (%s/%llu/%d) on umount which is"
-			       " not yet iodone!\n",
+			pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n",
 			       btrfsic_get_block_type(state, b_all),
 			       b_all->logical_bytenr, b_all->dev_state->name,
 			       b_all->dev_bytenr, b_all->mirror_num);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 029db6e..d4d8b7e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -698,7 +698,7 @@
 
 			ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
 			if (ret) {
-				bio->bi_error = ret;
+				comp_bio->bi_error = ret;
 				bio_endio(comp_bio);
 			}
 
@@ -728,7 +728,7 @@
 
 	ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
 	if (ret) {
-		bio->bi_error = ret;
+		comp_bio->bi_error = ret;
 		bio_endio(comp_bio);
 	}
 
@@ -783,8 +783,7 @@
 		 */
 		workspace = btrfs_compress_op[i]->alloc_workspace();
 		if (IS_ERR(workspace)) {
-			printk(KERN_WARNING
-	"BTRFS: cannot preallocate compression workspace, will try later");
+			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
 		} else {
 			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
 			btrfs_comp_ws[i].free_ws = 1;
@@ -854,8 +853,7 @@
 					/* no burst */ 1);
 
 			if (__ratelimit(&_rs)) {
-				printk(KERN_WARNING
-			    "no compression workspaces, low memory, retrying");
+				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 			}
 		}
 		goto again;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d1c56c9..f6ba165 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -45,9 +45,7 @@
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
-	struct btrfs_path *path;
-	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
-	return path;
+	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
 }
 
 /*
@@ -1102,7 +1100,7 @@
 	int level, ret;
 	int last_ref = 0;
 	int unlock_orig = 0;
-	u64 parent_start;
+	u64 parent_start = 0;
 
 	if (*cow_ret == buf)
 		unlock_orig = 1;
@@ -1121,13 +1119,8 @@
 	else
 		btrfs_node_key(buf, &disk_key, 0);
 
-	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
-		if (parent)
-			parent_start = parent->start;
-		else
-			parent_start = 0;
-	} else
-		parent_start = 0;
+	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
+		parent_start = parent->start;
 
 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
 			root->root_key.objectid, &disk_key, level,
@@ -1170,8 +1163,6 @@
 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
 			parent_start = buf->start;
-		else
-			parent_start = 0;
 
 		extent_buffer_get(cow);
 		tree_mod_log_set_root_pointer(root, cow, 1);
@@ -1182,11 +1173,6 @@
 		free_extent_buffer(buf);
 		add_root_to_dirty_list(root);
 	} else {
-		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
-			parent_start = parent->start;
-		else
-			parent_start = 0;
-
 		WARN_ON(trans->transid != btrfs_header_generation(parent));
 		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
@@ -1729,20 +1715,6 @@
 	return err;
 }
 
-/*
- * The leaf data grows from end-to-front in the node.
- * this returns the address of the start of the last item,
- * which is the stop of the leaf data stack
- */
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
-					 struct extent_buffer *leaf)
-{
-	u32 nr = btrfs_header_nritems(leaf);
-	if (nr == 0)
-		return BTRFS_LEAF_DATA_SIZE(root);
-	return btrfs_item_offset_nr(leaf, nr - 1);
-}
-
 
 /*
  * search for key in the extent_buffer.  The items start at offset p,
@@ -2268,7 +2240,6 @@
 	u64 search;
 	u64 target;
 	u64 nread = 0;
-	u64 gen;
 	struct extent_buffer *eb;
 	u32 nr;
 	u32 blocksize;
@@ -2313,7 +2284,6 @@
 		search = btrfs_node_blockptr(node, nr);
 		if ((search <= target && target - search <= 65536) ||
 		    (search > target && search - target <= 65536)) {
-			gen = btrfs_node_ptr_generation(node, nr);
 			readahead_tree_block(root, search);
 			nread += blocksize;
 		}
@@ -4341,7 +4311,11 @@
 			if (path->slots[1] == 0)
 				fixup_low_keys(fs_info, path, &disk_key, 1);
 		}
-		btrfs_mark_buffer_dirty(right);
+		/*
+		 * We create a new leaf 'right' for the required ins_len and
+		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
+		 * the content of ins_len to 'right'.
+		 */
 		return ret;
 	}
 
@@ -4772,8 +4746,9 @@
 
 	if (btrfs_leaf_free_space(root, leaf) < total_size) {
 		btrfs_print_leaf(root, leaf);
-		btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
-		       total_size, btrfs_leaf_free_space(root, leaf));
+		btrfs_crit(root->fs_info,
+			   "not enough freespace need %u have %d",
+			   total_size, btrfs_leaf_free_space(root, leaf));
 		BUG();
 	}
 
@@ -4782,8 +4757,9 @@
 
 		if (old_data < data_end) {
 			btrfs_print_leaf(root, leaf);
-			btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
-			       slot, old_data, data_end);
+			btrfs_crit(root->fs_info,
+				   "slot %d old_data %d data_end %d",
+				   slot, old_data, data_end);
 			BUG_ON(1);
 		}
 		/*
@@ -4793,7 +4769,7 @@
 		for (i = slot; i < nritems; i++) {
 			u32 ioff;
 
-			item = btrfs_item_nr( i);
+			item = btrfs_item_nr(i);
 			ioff = btrfs_token_item_offset(leaf, item, &token);
 			btrfs_set_token_item_offset(leaf, item,
 						    ioff - total_data, &token);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 33fe035..0b8ce2b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -37,6 +37,7 @@
 #include <linux/workqueue.h>
 #include <linux/security.h>
 #include <linux/sizes.h>
+#include <linux/dynamic_debug.h>
 #include "extent_io.h"
 #include "extent_map.h"
 #include "async-thread.h"
@@ -251,7 +252,8 @@
 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR		0ULL
 
 #define BTRFS_FEATURE_COMPAT_RO_SUPP			\
-	(BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)
+	(BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE |	\
+	 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID)
 
 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET	0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR	0ULL
@@ -676,9 +678,25 @@
 struct btrfs_fs_devices;
 struct btrfs_balance_control;
 struct btrfs_delayed_root;
+
+#define BTRFS_FS_BARRIER			1
+#define BTRFS_FS_CLOSING_START			2
+#define BTRFS_FS_CLOSING_DONE			3
+#define BTRFS_FS_LOG_RECOVERING			4
+#define BTRFS_FS_OPEN				5
+#define BTRFS_FS_QUOTA_ENABLED			6
+#define BTRFS_FS_QUOTA_ENABLING			7
+#define BTRFS_FS_QUOTA_DISABLING		8
+#define BTRFS_FS_UPDATE_UUID_TREE_GEN		9
+#define BTRFS_FS_CREATING_FREE_SPACE_TREE	10
+#define BTRFS_FS_BTREE_ERR			11
+#define BTRFS_FS_LOG1_ERR			12
+#define BTRFS_FS_LOG2_ERR			13
+
 struct btrfs_fs_info {
 	u8 fsid[BTRFS_FSID_SIZE];
 	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+	unsigned long flags;
 	struct btrfs_root *extent_root;
 	struct btrfs_root *tree_root;
 	struct btrfs_root *chunk_root;
@@ -907,10 +925,6 @@
 	int thread_pool_size;
 
 	struct kobject *space_info_kobj;
-	int do_barriers;
-	int closing;
-	int log_root_recovering;
-	int open;
 
 	u64 total_pinned;
 
@@ -987,17 +1001,6 @@
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	u32 check_integrity_print_mask;
 #endif
-	/*
-	 * quota information
-	 */
-	unsigned int quota_enabled:1;
-
-	/*
-	 * quota_enabled only changes state after a commit. This holds the
-	 * next state.
-	 */
-	unsigned int pending_quota_state:1;
-
 	/* is qgroup tracking in a consistent state? */
 	u64 qgroup_flags;
 
@@ -1061,7 +1064,6 @@
 	wait_queue_head_t replace_wait;
 
 	struct semaphore uuid_tree_rescan_sem;
-	unsigned int update_uuid_tree_gen:1;
 
 	/* Used to reclaim the metadata space in the background. */
 	struct work_struct async_reclaim_work;
@@ -1080,7 +1082,6 @@
 	 */
 	struct list_head pinned_chunks;
 
-	int creating_free_space_tree;
 	/* Used to record internally whether fs has been frozen */
 	int fs_frozen;
 };
@@ -1435,13 +1436,13 @@
 #define cpu_to_le8(v) (v)
 #define __le8 u8
 
-#define read_eb_member(eb, ptr, type, member, result) (			\
+#define read_eb_member(eb, ptr, type, member, result) (\
 	read_extent_buffer(eb, (char *)(result),			\
 			   ((unsigned long)(ptr)) +			\
 			    offsetof(type, member),			\
 			   sizeof(((type *)0)->member)))
 
-#define write_eb_member(eb, ptr, type, member, result) (		\
+#define write_eb_member(eb, ptr, type, member, result) (\
 	write_extent_buffer(eb, (char *)(result),			\
 			   ((unsigned long)(ptr)) +			\
 			    offsetof(type, member),			\
@@ -2293,6 +2294,21 @@
 	return offsetof(struct btrfs_leaf, items);
 }
 
+/*
+ * The leaf data grows from end-to-front in the node.
+ * this returns the address of the start of the last item,
+ * which is the stop of the leaf data stack
+ */
+static inline unsigned int leaf_data_end(struct btrfs_root *root,
+					 struct extent_buffer *leaf)
+{
+	u32 nr = btrfs_header_nritems(leaf);
+
+	if (nr == 0)
+		return BTRFS_LEAF_DATA_SIZE(root);
+	return btrfs_item_offset_nr(leaf, nr - 1);
+}
+
 /* struct btrfs_file_extent_item */
 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
@@ -2867,10 +2883,14 @@
 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
 {
 	/*
-	 * Get synced with close_ctree()
+	 * Do it this way so we only ever do one test_bit in the normal case.
 	 */
-	smp_mb();
-	return fs_info->closing;
+	if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
+		if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
+			return 2;
+		return 1;
+	}
+	return 0;
 }
 
 /*
@@ -3118,7 +3138,7 @@
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
 			       int nr);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
-			      struct extent_state **cached_state);
+			      struct extent_state **cached_state, int dedupe);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *new_root,
 			     struct btrfs_root *parent_root,
@@ -3161,7 +3181,6 @@
 				    struct btrfs_trans_handle *trans, int mode,
 				    u64 start, u64 num_bytes, u64 min_size,
 				    loff_t actual_len, u64 *alloc_hint);
-int btrfs_inode_check_errors(struct inode *inode);
 extern const struct dentry_operations btrfs_dentry_operations;
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 void btrfs_test_inode_set_ops(struct inode *inode);
@@ -3237,14 +3256,17 @@
 			unsigned long new_flags);
 int btrfs_sync_fs(struct super_block *sb, int wait);
 
+static inline __printf(2, 3)
+void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+}
+
 #ifdef CONFIG_PRINTK
 __printf(2, 3)
 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
 #else
-static inline __printf(2, 3)
-void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
-{
-}
+#define btrfs_printk(fs_info, fmt, args...) \
+	btrfs_no_printk(fs_info, fmt, ##args)
 #endif
 
 #define btrfs_emerg(fs_info, fmt, args...) \
@@ -3315,7 +3337,35 @@
 	btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
 #define btrfs_info_rl(fs_info, fmt, args...) \
 	btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
-#ifdef DEBUG
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define btrfs_debug(fs_info, fmt, args...)				\
+do {									\
+        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);         	\
+        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))  	\
+		btrfs_printk(fs_info, KERN_DEBUG fmt, ##args);		\
+} while (0)
+#define btrfs_debug_in_rcu(fs_info, fmt, args...) 			\
+do {									\
+        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); 	        \
+        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) 		\
+		btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args);	\
+} while (0)
+#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...)			\
+do {									\
+        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);         	\
+        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))  	\
+		btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt,		\
+				       ##args);\
+} while (0)
+#define btrfs_debug_rl(fs_info, fmt, args...) 				\
+do {									\
+        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);         	\
+        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))  	\
+		btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt,	\
+					 ##args);			\
+} while (0)
+#elif defined(DEBUG)
 #define btrfs_debug(fs_info, fmt, args...) \
 	btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
@@ -3326,13 +3376,13 @@
 	btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
 #else
 #define btrfs_debug(fs_info, fmt, args...) \
-    no_printk(KERN_DEBUG fmt, ##args)
+	btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
-	no_printk(KERN_DEBUG fmt, ##args)
+	btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
-	no_printk(KERN_DEBUG fmt, ##args)
+	btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl(fs_info, fmt, args...) \
-	no_printk(KERN_DEBUG fmt, ##args)
+	btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #endif
 
 #define btrfs_printk_in_rcu(fs_info, fmt, args...)	\
@@ -3363,7 +3413,7 @@
 __cold
 static inline void assfail(char *expr, char *file, int line)
 {
-	pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
+	pr_err("assertion failed: %s, file: %s, line: %d\n",
 	       expr, file, line);
 	BUG();
 }
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3eeb9cd..0fcf5f2 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -385,11 +385,8 @@
 					struct btrfs_delayed_node *delayed_node,
 					struct btrfs_key *key)
 {
-	struct btrfs_delayed_item *item;
-
-	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+	return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
 					   NULL, NULL);
-	return item;
 }
 
 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
@@ -1481,11 +1478,10 @@
 	mutex_lock(&delayed_node->mutex);
 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
 	if (unlikely(ret)) {
-		btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
-				"into the insertion tree of the delayed node"
-				"(root id: %llu, inode id: %llu, errno: %d)",
-				name_len, name, delayed_node->root->objectid,
-				delayed_node->inode_id, ret);
+		btrfs_err(root->fs_info,
+			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+			  name_len, name, delayed_node->root->objectid,
+			  delayed_node->inode_id, ret);
 		BUG();
 	}
 	mutex_unlock(&delayed_node->mutex);
@@ -1553,11 +1549,9 @@
 	mutex_lock(&node->mutex);
 	ret = __btrfs_add_delayed_deletion_item(node, item);
 	if (unlikely(ret)) {
-		btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
-				"into the deletion tree of the delayed node"
-				"(root id: %llu, inode id: %llu, errno: %d)",
-				index, node->root->objectid, node->inode_id,
-				ret);
+		btrfs_err(root->fs_info,
+			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+			  index, node->root->objectid, node->inode_id, ret);
 		BUG();
 	}
 	mutex_unlock(&node->mutex);
@@ -1874,7 +1868,8 @@
 	 * leads to enospc problems.  This means we also can't do
 	 * delayed inode refs
 	 */
-	if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+	if (test_bit(BTRFS_FS_LOG_RECOVERING,
+		     &BTRFS_I(inode)->root->fs_info->flags))
 		return -EAGAIN;
 
 	delayed_node = btrfs_get_or_create_delayed_node(inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ac02e04..8d93854 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -322,10 +322,11 @@
 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
 					struct seq_list, list);
 		if (seq >= elem->seq) {
-			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
-				 (u32)(seq >> 32), (u32)seq,
-				 (u32)(elem->seq >> 32), (u32)elem->seq,
-				 delayed_refs);
+			btrfs_debug(fs_info,
+				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
+				(u32)(seq >> 32), (u32)seq,
+				(u32)(elem->seq >> 32), (u32)elem->seq,
+				delayed_refs);
 			ret = 1;
 		}
 	}
@@ -770,7 +771,8 @@
 	if (!head_ref)
 		goto free_ref;
 
-	if (fs_info->quota_enabled && is_fstree(ref_root)) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+	    is_fstree(ref_root)) {
 		record = kmalloc(sizeof(*record), GFP_NOFS);
 		if (!record)
 			goto free_head_ref;
@@ -828,7 +830,8 @@
 		return -ENOMEM;
 	}
 
-	if (fs_info->quota_enabled && is_fstree(ref_root)) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+	    is_fstree(ref_root)) {
 		record = kmalloc(sizeof(*record), GFP_NOFS);
 		if (!record) {
 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e9bbff3..05169ef 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -218,8 +218,9 @@
 	}
 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
 	if (ret < 0) {
-		btrfs_warn(fs_info, "error %d while searching for dev_replace item!",
-			ret);
+		btrfs_warn(fs_info,
+			   "error %d while searching for dev_replace item!",
+			   ret);
 		goto out;
 	}
 
@@ -238,8 +239,9 @@
 		 */
 		ret = btrfs_del_item(trans, dev_root, path);
 		if (ret != 0) {
-			btrfs_warn(fs_info, "delete too small dev_replace item failed %d!",
-				ret);
+			btrfs_warn(fs_info,
+				   "delete too small dev_replace item failed %d!",
+				   ret);
 			goto out;
 		}
 		ret = 1;
@@ -251,8 +253,8 @@
 		ret = btrfs_insert_empty_item(trans, dev_root, path,
 					      &key, sizeof(*ptr));
 		if (ret < 0) {
-			btrfs_warn(fs_info, "insert dev_replace item failed %d!",
-				ret);
+			btrfs_warn(fs_info,
+				   "insert dev_replace item failed %d!", ret);
 			goto out;
 		}
 	}
@@ -383,7 +385,7 @@
 
 	ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
 	if (ret)
-		btrfs_err(fs_info, "kobj add dev failed %d\n", ret);
+		btrfs_err(fs_info, "kobj add dev failed %d", ret);
 
 	btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
 
@@ -772,9 +774,10 @@
 		break;
 	}
 	if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
-		btrfs_info(fs_info, "cannot continue dev_replace, tgtdev is missing");
 		btrfs_info(fs_info,
-			"you may cancel the operation after 'mount -o degraded'");
+			   "cannot continue dev_replace, tgtdev is missing");
+		btrfs_info(fs_info,
+			   "you may cancel the operation after 'mount -o degraded'");
 		btrfs_dev_replace_unlock(dev_replace, 1);
 		return 0;
 	}
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 1752625..0dc1a03 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -472,9 +472,10 @@
 	/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
 	if ((btrfs_dir_data_len(leaf, dir_item) +
 	     btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
-		btrfs_crit(root->fs_info, "invalid dir item name + data len: %u + %u",
-		       (unsigned)btrfs_dir_name_len(leaf, dir_item),
-		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
+		btrfs_crit(root->fs_info,
+			   "invalid dir item name + data len: %u + %u",
+			   (unsigned)btrfs_dir_name_len(leaf, dir_item),
+			   (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
 	}
 
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 54bc8c7..3a57f99 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -326,8 +326,7 @@
 
 			read_extent_buffer(buf, &val, 0, csum_size);
 			btrfs_warn_rl(fs_info,
-				"%s checksum verify failed on %llu wanted %X found %X "
-				"level %d",
+				"%s checksum verify failed on %llu wanted %X found %X level %d",
 				fs_info->sb->s_id, buf->start,
 				val, found, btrfs_header_level(buf));
 			if (result != (char *)&inline_result)
@@ -402,7 +401,8 @@
  * Return 0 if the superblock checksum type matches the checksum value of that
  * algorithm. Pass the raw disk superblock data.
  */
-static int btrfs_check_super_csum(char *raw_disk_sb)
+static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+				  char *raw_disk_sb)
 {
 	struct btrfs_super_block *disk_sb =
 		(struct btrfs_super_block *)raw_disk_sb;
@@ -428,7 +428,7 @@
 	}
 
 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
-		printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
+		btrfs_err(fs_info, "unsupported checksum algorithm %u",
 				csum_type);
 		ret = 1;
 	}
@@ -442,7 +442,7 @@
  */
 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 					  struct extent_buffer *eb,
-					  u64 start, u64 parent_transid)
+					  u64 parent_transid)
 {
 	struct extent_io_tree *io_tree;
 	int failed = 0;
@@ -454,8 +454,7 @@
 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 	while (1) {
-		ret = read_extent_buffer_pages(io_tree, eb, start,
-					       WAIT_COMPLETE,
+		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
 					       btree_get_extent, mirror_num);
 		if (!ret) {
 			if (!verify_parent_transid(io_tree, eb,
@@ -547,9 +546,10 @@
 }
 
 #define CORRUPT(reason, eb, root, slot)				\
-	btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"	\
-		   "root=%llu, slot=%d", reason,			\
-	       btrfs_header_bytenr(eb),	root->objectid, slot)
+	btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu,"	\
+		   " root=%llu, slot=%d",			\
+		   btrfs_header_level(eb) == 0 ? "leaf" : "node",\
+		   reason, btrfs_header_bytenr(eb), root->objectid, slot)
 
 static noinline int check_leaf(struct btrfs_root *root,
 			       struct extent_buffer *leaf)
@@ -636,6 +636,10 @@
 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
 {
 	unsigned long nr = btrfs_header_nritems(node);
+	struct btrfs_key key, next_key;
+	int slot;
+	u64 bytenr;
+	int ret = 0;
 
 	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
 		btrfs_crit(root->fs_info,
@@ -643,7 +647,26 @@
 			   node->start, root->objectid, nr);
 		return -EIO;
 	}
-	return 0;
+
+	for (slot = 0; slot < nr - 1; slot++) {
+		bytenr = btrfs_node_blockptr(node, slot);
+		btrfs_node_key_to_cpu(node, &key, slot);
+		btrfs_node_key_to_cpu(node, &next_key, slot + 1);
+
+		if (!bytenr) {
+			CORRUPT("invalid item slot", node, root, slot);
+			ret = -EIO;
+			goto out;
+		}
+
+		if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+			CORRUPT("bad key order", node, root, slot);
+			ret = -EIO;
+			goto out;
+		}
+	}
+out:
+	return ret;
 }
 
 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
@@ -1132,7 +1155,7 @@
 	if (IS_ERR(buf))
 		return;
 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
-				 buf, 0, WAIT_NONE, btree_get_extent, 0);
+				 buf, WAIT_NONE, btree_get_extent, 0);
 	free_extent_buffer(buf);
 }
 
@@ -1150,7 +1173,7 @@
 
 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
 
-	ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
+	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
 				       btree_get_extent, mirror_num);
 	if (ret) {
 		free_extent_buffer(buf);
@@ -1206,7 +1229,7 @@
 	if (IS_ERR(buf))
 		return buf;
 
-	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+	ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
 	if (ret) {
 		free_extent_buffer(buf);
 		return ERR_PTR(ret);
@@ -1839,7 +1862,7 @@
 		 * Do not do anything if we might cause open_ctree() to block
 		 * before we have finished mounting the filesystem.
 		 */
-		if (!root->fs_info->open)
+		if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
 			goto sleep;
 
 		if (!mutex_trylock(&root->fs_info->cleaner_mutex))
@@ -2332,8 +2355,6 @@
 	fs_info->qgroup_op_tree = RB_ROOT;
 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
 	fs_info->qgroup_seq = 1;
-	fs_info->quota_enabled = 0;
-	fs_info->pending_quota_state = 0;
 	fs_info->qgroup_ulist = NULL;
 	fs_info->qgroup_rescan_running = false;
 	mutex_init(&fs_info->qgroup_rescan_lock);
@@ -2518,8 +2539,7 @@
 	root = btrfs_read_tree_root(tree_root, &location);
 	if (!IS_ERR(root)) {
 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
-		fs_info->quota_enabled = 1;
-		fs_info->pending_quota_state = 1;
+		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 		fs_info->quota_root = root;
 	}
 
@@ -2566,6 +2586,7 @@
 	int num_backups_tried = 0;
 	int backup_index = 0;
 	int max_active;
+	int clear_free_space_tree = 0;
 
 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2710,8 +2731,7 @@
 	extent_io_tree_init(&fs_info->freed_extents[1],
 			     fs_info->btree_inode->i_mapping);
 	fs_info->pinned_extents = &fs_info->freed_extents[0];
-	fs_info->do_barriers = 1;
-
+	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
 
 	mutex_init(&fs_info->ordered_operations_mutex);
 	mutex_init(&fs_info->tree_log_mutex);
@@ -2762,7 +2782,7 @@
 	 * We want to check superblock checksum, the type is stored inside.
 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
 	 */
-	if (btrfs_check_super_csum(bh->b_data)) {
+	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
 		btrfs_err(fs_info, "superblock checksum mismatch");
 		err = -EINVAL;
 		brelse(bh);
@@ -3129,6 +3149,26 @@
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
+	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
+	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+		clear_free_space_tree = 1;
+	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
+		btrfs_warn(fs_info, "free space tree is invalid");
+		clear_free_space_tree = 1;
+	}
+
+	if (clear_free_space_tree) {
+		btrfs_info(fs_info, "clearing free space tree");
+		ret = btrfs_clear_free_space_tree(fs_info);
+		if (ret) {
+			btrfs_warn(fs_info,
+				   "failed to clear free space tree: %d", ret);
+			close_ctree(tree_root);
+			return ret;
+		}
+	}
+
 	if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
 		btrfs_info(fs_info, "creating free space tree");
@@ -3166,18 +3206,6 @@
 
 	btrfs_qgroup_rescan_resume(fs_info);
 
-	if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
-	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
-		btrfs_info(fs_info, "clearing free space tree");
-		ret = btrfs_clear_free_space_tree(fs_info);
-		if (ret) {
-			btrfs_warn(fs_info,
-				"failed to clear free space tree: %d", ret);
-			close_ctree(tree_root);
-			return ret;
-		}
-	}
-
 	if (!fs_info->uuid_root) {
 		btrfs_info(fs_info, "creating UUID tree");
 		ret = btrfs_create_uuid_tree(fs_info);
@@ -3199,10 +3227,9 @@
 			return ret;
 		}
 	} else {
-		fs_info->update_uuid_tree_gen = 1;
+		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
 	}
-
-	fs_info->open = 1;
+	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
 
 	/*
 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
@@ -3607,7 +3634,7 @@
 	}
 
 	if (min_tolerated == INT_MAX) {
-		pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
+		pr_warn("BTRFS: unknown raid flag: %llu", flags);
 		min_tolerated = 0;
 	}
 
@@ -3893,8 +3920,7 @@
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
-	fs_info->closing = 1;
-	smp_mb();
+	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
 
 	/* wait for the qgroup rescan worker to stop */
 	btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3939,8 +3965,7 @@
 	kthread_stop(fs_info->transaction_kthread);
 	kthread_stop(fs_info->cleaner_kthread);
 
-	fs_info->closing = 2;
-	smp_mb();
+	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
 
 	btrfs_free_qgroup_config(fs_info);
 
@@ -3965,7 +3990,7 @@
 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 	btrfs_stop_all_workers(fs_info);
 
-	fs_info->open = 0;
+	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
 	free_root_pointers(fs_info, 1);
 
 	iput(fs_info->btree_inode);
@@ -4036,8 +4061,7 @@
 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
 	btrfs_assert_tree_locked(buf);
 	if (transid != root->fs_info->generation)
-		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
-		       "found %llu running %llu\n",
+		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
 			buf->start, transid, root->fs_info->generation);
 	was_dirty = set_extent_buffer_dirty(buf);
 	if (!was_dirty)
@@ -4088,7 +4112,7 @@
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
 {
 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
-	return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+	return btree_read_extent_buffer_pages(root, buf, parent_transid);
 }
 
 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -4100,24 +4124,24 @@
 	int ret = 0;
 
 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
-		printk(KERN_ERR "BTRFS: no valid FS found\n");
+		btrfs_err(fs_info, "no valid FS found");
 		ret = -EINVAL;
 	}
 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
-		printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
+		btrfs_warn(fs_info, "unrecognized super flag: %llu",
 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
-		printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
+		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
 		ret = -EINVAL;
 	}
 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
-		printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
+		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
 		ret = -EINVAL;
 	}
 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
-		printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
+		btrfs_err(fs_info, "log_root level too big: %d >= %d",
 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
 		ret = -EINVAL;
 	}
@@ -4128,47 +4152,48 @@
 	 */
 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
-		printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
+		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
 		ret = -EINVAL;
 	}
 	/* Only PAGE SIZE is supported yet */
 	if (sectorsize != PAGE_SIZE) {
-		printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
-				sectorsize, PAGE_SIZE);
+		btrfs_err(fs_info,
+			"sectorsize %llu not supported yet, only support %lu",
+			sectorsize, PAGE_SIZE);
 		ret = -EINVAL;
 	}
 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
-		printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
+		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
 		ret = -EINVAL;
 	}
 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
-		printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
-				le32_to_cpu(sb->__unused_leafsize),
-				nodesize);
+		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
+			  le32_to_cpu(sb->__unused_leafsize), nodesize);
 		ret = -EINVAL;
 	}
 
 	/* Root alignment check */
 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
-		printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
-				btrfs_super_root(sb));
+		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
+			   btrfs_super_root(sb));
 		ret = -EINVAL;
 	}
 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
-		printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
-				btrfs_super_chunk_root(sb));
+		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
+			   btrfs_super_chunk_root(sb));
 		ret = -EINVAL;
 	}
 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
-		printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
-				btrfs_super_log_root(sb));
+		btrfs_warn(fs_info, "log_root block unaligned: %llu",
+			   btrfs_super_log_root(sb));
 		ret = -EINVAL;
 	}
 
 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
-		printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
-				fs_info->fsid, sb->dev_item.fsid);
+		btrfs_err(fs_info,
+			   "dev_item UUID does not match fsid: %pU != %pU",
+			   fs_info->fsid, sb->dev_item.fsid);
 		ret = -EINVAL;
 	}
 
@@ -4178,25 +4203,25 @@
 	 */
 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
 		btrfs_err(fs_info, "bytes_used is too small %llu",
-		       btrfs_super_bytes_used(sb));
+			  btrfs_super_bytes_used(sb));
 		ret = -EINVAL;
 	}
 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
 		btrfs_err(fs_info, "invalid stripesize %u",
-		       btrfs_super_stripesize(sb));
+			  btrfs_super_stripesize(sb));
 		ret = -EINVAL;
 	}
 	if (btrfs_super_num_devices(sb) > (1UL << 31))
-		printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
-				btrfs_super_num_devices(sb));
+		btrfs_warn(fs_info, "suspicious number of devices: %llu",
+			   btrfs_super_num_devices(sb));
 	if (btrfs_super_num_devices(sb) == 0) {
-		printk(KERN_ERR "BTRFS: number of devices is 0\n");
+		btrfs_err(fs_info, "number of devices is 0");
 		ret = -EINVAL;
 	}
 
 	if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
-		printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
-				btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
+		btrfs_err(fs_info, "super offset mismatch %llu != %u",
+			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
 		ret = -EINVAL;
 	}
 
@@ -4205,17 +4230,17 @@
 	 * and one chunk
 	 */
 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
-		printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
-				btrfs_super_sys_array_size(sb),
-				BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+		btrfs_err(fs_info, "system chunk array too big %u > %u",
+			  btrfs_super_sys_array_size(sb),
+			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
 		ret = -EINVAL;
 	}
 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
 			+ sizeof(struct btrfs_chunk)) {
-		printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
-				btrfs_super_sys_array_size(sb),
-				sizeof(struct btrfs_disk_key)
-				+ sizeof(struct btrfs_chunk));
+		btrfs_err(fs_info, "system chunk array too small %u < %zu",
+			  btrfs_super_sys_array_size(sb),
+			  sizeof(struct btrfs_disk_key)
+			  + sizeof(struct btrfs_chunk));
 		ret = -EINVAL;
 	}
 
@@ -4224,14 +4249,16 @@
 	 * but it's still possible that it's the one that's wrong.
 	 */
 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
-		printk(KERN_WARNING
-			"BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
-			btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
+		btrfs_warn(fs_info,
+			"suspicious: generation < chunk_root_generation: %llu < %llu",
+			btrfs_super_generation(sb),
+			btrfs_super_chunk_root_generation(sb));
 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
 	    && btrfs_super_cache_generation(sb) != (u64)-1)
-		printk(KERN_WARNING
-			"BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
-			btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
+		btrfs_warn(fs_info,
+			"suspicious: generation < cache_generation: %llu < %llu",
+			btrfs_super_generation(sb),
+			btrfs_super_cache_generation(sb));
 
 	return ret;
 }
@@ -4475,9 +4502,80 @@
 	return 0;
 }
 
+static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+{
+	struct inode *inode;
+
+	inode = cache->io_ctl.inode;
+	if (inode) {
+		invalidate_inode_pages2(inode->i_mapping);
+		BTRFS_I(inode)->generation = 0;
+		cache->io_ctl.inode = NULL;
+		iput(inode);
+	}
+	btrfs_put_block_group(cache);
+}
+
+void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
+			     struct btrfs_root *root)
+{
+	struct btrfs_block_group_cache *cache;
+
+	spin_lock(&cur_trans->dirty_bgs_lock);
+	while (!list_empty(&cur_trans->dirty_bgs)) {
+		cache = list_first_entry(&cur_trans->dirty_bgs,
+					 struct btrfs_block_group_cache,
+					 dirty_list);
+		if (!cache) {
+			btrfs_err(root->fs_info,
+				  "orphan block group dirty_bgs list");
+			spin_unlock(&cur_trans->dirty_bgs_lock);
+			return;
+		}
+
+		if (!list_empty(&cache->io_list)) {
+			spin_unlock(&cur_trans->dirty_bgs_lock);
+			list_del_init(&cache->io_list);
+			btrfs_cleanup_bg_io(cache);
+			spin_lock(&cur_trans->dirty_bgs_lock);
+		}
+
+		list_del_init(&cache->dirty_list);
+		spin_lock(&cache->lock);
+		cache->disk_cache_state = BTRFS_DC_ERROR;
+		spin_unlock(&cache->lock);
+
+		spin_unlock(&cur_trans->dirty_bgs_lock);
+		btrfs_put_block_group(cache);
+		spin_lock(&cur_trans->dirty_bgs_lock);
+	}
+	spin_unlock(&cur_trans->dirty_bgs_lock);
+
+	while (!list_empty(&cur_trans->io_bgs)) {
+		cache = list_first_entry(&cur_trans->io_bgs,
+					 struct btrfs_block_group_cache,
+					 io_list);
+		if (!cache) {
+			btrfs_err(root->fs_info,
+				  "orphan block group on io_bgs list");
+			return;
+		}
+
+		list_del_init(&cache->io_list);
+		spin_lock(&cache->lock);
+		cache->disk_cache_state = BTRFS_DC_ERROR;
+		spin_unlock(&cache->lock);
+		btrfs_cleanup_bg_io(cache);
+	}
+}
+
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
 				   struct btrfs_root *root)
 {
+	btrfs_cleanup_dirty_bgs(cur_trans, root);
+	ASSERT(list_empty(&cur_trans->dirty_bgs));
+	ASSERT(list_empty(&cur_trans->io_bgs));
+
 	btrfs_destroy_delayed_refs(cur_trans, root);
 
 	cur_trans->state = TRANS_STATE_COMMIT_START;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index f19a982..1a3237e 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -136,6 +136,8 @@
 			     struct btrfs_fs_info *fs_info);
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root);
+void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
+			     struct btrfs_root *root);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
 				  struct btrfs_root *root);
 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 665da8f..210c94a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -87,7 +87,8 @@
 			  int force);
 static int find_next_key(struct btrfs_path *path, int level,
 			 struct btrfs_key *key);
-static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+static void dump_space_info(struct btrfs_fs_info *fs_info,
+			    struct btrfs_space_info *info, u64 bytes,
 			    int dump_block_groups);
 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
 				    u64 ram_bytes, u64 num_bytes, int delalloc);
@@ -266,9 +267,8 @@
 
 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
 		bytenr = btrfs_sb_offset(i);
-		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
-				       cache->key.objectid, bytenr,
-				       0, &logical, &nr, &stripe_len);
+		ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
+				       bytenr, 0, &logical, &nr, &stripe_len);
 		if (ret)
 			return ret;
 
@@ -730,11 +730,7 @@
 static struct btrfs_block_group_cache *
 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
 {
-	struct btrfs_block_group_cache *cache;
-
-	cache = block_group_cache_tree_search(info, bytenr, 0);
-
-	return cache;
+	return block_group_cache_tree_search(info, bytenr, 0);
 }
 
 /*
@@ -744,11 +740,7 @@
 						 struct btrfs_fs_info *info,
 						 u64 bytenr)
 {
-	struct btrfs_block_group_cache *cache;
-
-	cache = block_group_cache_tree_search(info, bytenr, 1);
-
-	return cache;
+	return block_group_cache_tree_search(info, bytenr, 1);
 }
 
 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
@@ -2360,7 +2352,13 @@
 		ins.type = BTRFS_EXTENT_ITEM_KEY;
 	}
 
-	BUG_ON(node->ref_mod != 1);
+	if (node->ref_mod != 1) {
+		btrfs_err(root->fs_info,
+	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+			  node->bytenr, node->ref_mod, node->action, ref_root,
+			  parent);
+		return -EIO;
+	}
 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
 		BUG_ON(!extent_op || !extent_op->update_flags);
 		ret = alloc_reserved_tree_block(trans, root,
@@ -2590,7 +2588,9 @@
 					if (must_insert_reserved)
 						locked_ref->must_insert_reserved = 1;
 					locked_ref->processing = 0;
-					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
+					btrfs_debug(fs_info,
+						    "run_delayed_extent_op returned %d",
+						    ret);
 					btrfs_delayed_ref_unlock(locked_ref);
 					return ret;
 				}
@@ -2650,7 +2650,8 @@
 			locked_ref->processing = 0;
 			btrfs_delayed_ref_unlock(locked_ref);
 			btrfs_put_delayed_ref(ref);
-			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
+			btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
+				    ret);
 			return ret;
 		}
 
@@ -2940,7 +2941,7 @@
 	if (trans->aborted)
 		return 0;
 
-	if (root->fs_info->creating_free_space_tree)
+	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
 		return 0;
 
 	if (root == root->fs_info->extent_root)
@@ -2971,7 +2972,6 @@
 			spin_unlock(&delayed_refs->lock);
 			goto out;
 		}
-		count = (unsigned long)-1;
 
 		while (node) {
 			head = rb_entry(node, struct btrfs_delayed_ref_head,
@@ -3694,6 +3694,8 @@
 			goto again;
 		}
 		spin_unlock(&cur_trans->dirty_bgs_lock);
+	} else if (ret < 0) {
+		btrfs_cleanup_dirty_bgs(cur_trans, root);
 	}
 
 	btrfs_free_path(path);
@@ -4429,7 +4431,7 @@
 	if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
 		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
 			left, thresh, type);
-		dump_space_info(info, 0, 0);
+		dump_space_info(root->fs_info, info, 0, 0);
 	}
 
 	if (left < thresh) {
@@ -5186,7 +5188,7 @@
 		 * which means we won't have fs_info->fs_root set, so don't do
 		 * the async reclaim as we will panic.
 		 */
-		if (!root->fs_info->log_root_recovering &&
+		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
 		    need_do_async_reclaim(space_info, root, used) &&
 		    !work_busy(&root->fs_info->async_reclaim_work)) {
 			trace_btrfs_trigger_flush(root->fs_info,
@@ -5792,7 +5794,7 @@
 	int ret;
 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
 
-	if (root->fs_info->quota_enabled) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
 		/* One for parent inode, two for dir entries */
 		num_bytes = 3 * root->nodesize;
 		ret = btrfs_qgroup_reserve_meta(root, num_bytes);
@@ -5970,7 +5972,7 @@
 	csum_bytes = BTRFS_I(inode)->csum_bytes;
 	spin_unlock(&BTRFS_I(inode)->lock);
 
-	if (root->fs_info->quota_enabled) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
 		ret = btrfs_qgroup_reserve_meta(root,
 				nr_extents * root->nodesize);
 		if (ret)
@@ -6110,8 +6112,6 @@
  * @start: start range we are writing to
  * @len: how long the range we are writing to
  *
- * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
- *
  * This will do the following things
  *
  * o reserve space in data space info for num bytes
@@ -6930,8 +6930,9 @@
 			}
 
 			if (ret) {
-				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
-					ret, bytenr);
+				btrfs_err(info,
+					  "umm, got %d back from search, was looking for %llu",
+					  ret, bytenr);
 				if (ret > 0)
 					btrfs_print_leaf(extent_root,
 							 path->nodes[0]);
@@ -6977,7 +6978,8 @@
 		ret = btrfs_search_slot(trans, extent_root, &key, path,
 					-1, 1);
 		if (ret) {
-			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+			btrfs_err(info,
+				  "umm, got %d back from search, was looking for %llu",
 				ret, bytenr);
 			btrfs_print_leaf(extent_root, path->nodes[0]);
 		}
@@ -7004,8 +7006,9 @@
 
 	refs = btrfs_extent_refs(leaf, ei);
 	if (refs < refs_to_drop) {
-		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
-			  "for bytenr %Lu", refs_to_drop, refs, bytenr);
+		btrfs_err(info,
+			  "trying to drop %d refs but we only have %Lu for bytenr %Lu",
+			  refs_to_drop, refs, bytenr);
 		ret = -EINVAL;
 		btrfs_abort_transaction(trans, ret);
 		goto out;
@@ -7901,23 +7904,24 @@
 	return ret;
 }
 
-static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+static void dump_space_info(struct btrfs_fs_info *fs_info,
+			    struct btrfs_space_info *info, u64 bytes,
 			    int dump_block_groups)
 {
 	struct btrfs_block_group_cache *cache;
 	int index = 0;
 
 	spin_lock(&info->lock);
-	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
-	       info->flags,
-	       info->total_bytes - info->bytes_used - info->bytes_pinned -
-	       info->bytes_reserved - info->bytes_readonly -
-	       info->bytes_may_use, (info->full) ? "" : "not ");
-	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
-	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
-	       info->total_bytes, info->bytes_used, info->bytes_pinned,
-	       info->bytes_reserved, info->bytes_may_use,
-	       info->bytes_readonly);
+	btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+		   info->flags,
+		   info->total_bytes - info->bytes_used - info->bytes_pinned -
+		   info->bytes_reserved - info->bytes_readonly -
+		   info->bytes_may_use, (info->full) ? "" : "not ");
+	btrfs_info(fs_info,
+		"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
+		info->total_bytes, info->bytes_used, info->bytes_pinned,
+		info->bytes_reserved, info->bytes_may_use,
+		info->bytes_readonly);
 	spin_unlock(&info->lock);
 
 	if (!dump_block_groups)
@@ -7927,12 +7931,11 @@
 again:
 	list_for_each_entry(cache, &info->block_groups[index], list) {
 		spin_lock(&cache->lock);
-		printk(KERN_INFO "BTRFS: "
-			   "block group %llu has %llu bytes, "
-			   "%llu used %llu pinned %llu reserved %s\n",
-		       cache->key.objectid, cache->key.offset,
-		       btrfs_block_group_used(&cache->item), cache->pinned,
-		       cache->reserved, cache->ro ? "[readonly]" : "");
+		btrfs_info(fs_info,
+			"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
+			cache->key.objectid, cache->key.offset,
+			btrfs_block_group_used(&cache->item), cache->pinned,
+			cache->reserved, cache->ro ? "[readonly]" : "");
 		btrfs_dump_free_space(cache, bytes);
 		spin_unlock(&cache->lock);
 	}
@@ -7946,6 +7949,7 @@
 			 u64 empty_size, u64 hint_byte,
 			 struct btrfs_key *ins, int is_data, int delalloc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	bool final_tried = num_bytes == min_alloc_size;
 	u64 flags;
 	int ret;
@@ -7956,8 +7960,7 @@
 	ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
 			       hint_byte, ins, flags, delalloc);
 	if (!ret && !is_data) {
-		btrfs_dec_block_group_reservations(root->fs_info,
-						   ins->objectid);
+		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
 	} else if (ret == -ENOSPC) {
 		if (!final_tried && ins->offset) {
 			num_bytes = min(num_bytes >> 1, ins->offset);
@@ -7967,14 +7970,15 @@
 			if (num_bytes == min_alloc_size)
 				final_tried = true;
 			goto again;
-		} else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
 			struct btrfs_space_info *sinfo;
 
-			sinfo = __find_space_info(root->fs_info, flags);
-			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
-				flags, num_bytes);
+			sinfo = __find_space_info(fs_info, flags);
+			btrfs_err(root->fs_info,
+				  "allocation failed flags %llu, wanted %llu",
+				  flags, num_bytes);
 			if (sinfo)
-				dump_space_info(sinfo, num_bytes, 1);
+				dump_space_info(fs_info, sinfo, num_bytes, 1);
 		}
 	}
 
@@ -8462,7 +8466,6 @@
 	u64 refs;
 	u64 flags;
 	u32 nritems;
-	u32 blocksize;
 	struct btrfs_key key;
 	struct extent_buffer *eb;
 	int ret;
@@ -8480,7 +8483,6 @@
 
 	eb = path->nodes[wc->level];
 	nritems = btrfs_header_nritems(eb);
-	blocksize = root->nodesize;
 
 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
 		if (nread >= wc->reada_count)
@@ -8544,7 +8546,7 @@
 	u64 bytenr, num_bytes;
 
 	/* We can be called directly from walk_up_proc() */
-	if (!root->fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
 		return 0;
 
 	for (i = 0; i < nr; i++) {
@@ -8653,7 +8655,7 @@
 	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
 	BUG_ON(root_eb == NULL);
 
-	if (!root->fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
 		return 0;
 
 	if (!extent_buffer_uptodate(root_eb)) {
@@ -8884,14 +8886,13 @@
 	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
 				       &wc->refs[level - 1],
 				       &wc->flags[level - 1]);
-	if (ret < 0) {
-		btrfs_tree_unlock(next);
-		return ret;
-	}
+	if (ret < 0)
+		goto out_unlock;
 
 	if (unlikely(wc->refs[level - 1] == 0)) {
 		btrfs_err(root->fs_info, "Missing references.");
-		BUG();
+		ret = -EIO;
+		goto out_unlock;
 	}
 	*lookup_info = 0;
 
@@ -8943,7 +8944,12 @@
 	}
 
 	level--;
-	BUG_ON(level != btrfs_header_level(next));
+	ASSERT(level == btrfs_header_level(next));
+	if (level != btrfs_header_level(next)) {
+		btrfs_err(root->fs_info, "mismatched level");
+		ret = -EIO;
+		goto out_unlock;
+	}
 	path->nodes[level] = next;
 	path->slots[level] = 0;
 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
@@ -8958,8 +8964,15 @@
 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
 			parent = path->nodes[level]->start;
 		} else {
-			BUG_ON(root->root_key.objectid !=
+			ASSERT(root->root_key.objectid ==
 			       btrfs_header_owner(path->nodes[level]));
+			if (root->root_key.objectid !=
+			    btrfs_header_owner(path->nodes[level])) {
+				btrfs_err(root->fs_info,
+						"mismatched block owner");
+				ret = -EIO;
+				goto out_unlock;
+			}
 			parent = 0;
 		}
 
@@ -8968,20 +8981,24 @@
 						     generation, level - 1);
 			if (ret) {
 				btrfs_err_rl(root->fs_info,
-					"Error "
-					"%d accounting shared subtree. Quota "
-					"is out of sync, rescan required.",
-					ret);
+					     "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
+					     ret);
 			}
 		}
 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
 				root->root_key.objectid, level - 1, 0);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret)
+			goto out_unlock;
 	}
+
+	*lookup_info = 1;
+	ret = 1;
+
+out_unlock:
 	btrfs_tree_unlock(next);
 	free_extent_buffer(next);
-	*lookup_info = 1;
-	return 1;
+
+	return ret;
 }
 
 /*
@@ -9061,10 +9078,8 @@
 			ret = account_leaf_items(trans, root, eb);
 			if (ret) {
 				btrfs_err_rl(root->fs_info,
-					"error "
-					"%d accounting leaf items. Quota "
-					"is out of sync, rescan required.",
-					ret);
+					     "error %d accounting leaf items. Quota is out of sync, rescan required.",
+					     ret);
 			}
 		}
 		/* make block locked assertion in clean_tree_block happy */
@@ -9180,9 +9195,10 @@
 			 struct btrfs_block_rsv *block_rsv, int update_ref,
 			 int for_reloc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_trans_handle *trans;
-	struct btrfs_root *tree_root = root->fs_info->tree_root;
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_root_item *root_item = &root->root_item;
 	struct walk_control *wc;
 	struct btrfs_key key;
@@ -9191,7 +9207,7 @@
 	int level;
 	bool root_dropped = false;
 
-	btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
+	btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -9320,7 +9336,8 @@
 
 			btrfs_end_transaction_throttle(trans, tree_root);
 			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
-				pr_debug("BTRFS: drop snapshot early exit\n");
+				btrfs_debug(fs_info,
+					    "drop snapshot early exit");
 				err = -EAGAIN;
 				goto out_free;
 			}
@@ -9386,7 +9403,7 @@
 	if (!for_reloc && root_dropped == false)
 		btrfs_add_dead_root(root);
 	if (err && err != -EAGAIN)
-		btrfs_handle_fs_error(root->fs_info, err, NULL);
+		btrfs_handle_fs_error(fs_info, err, NULL);
 	return err;
 }
 
@@ -10020,7 +10037,7 @@
 		if (WARN_ON(space_info->bytes_pinned > 0 ||
 			    space_info->bytes_reserved > 0 ||
 			    space_info->bytes_may_use > 0))
-			dump_space_info(space_info, 0, 0);
+			dump_space_info(info, space_info, 0, 0);
 		list_del(&space_info->list);
 		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
 			struct kobject *kobj;
@@ -10069,7 +10086,8 @@
 
 	return;
 out_err:
-	pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+	btrfs_warn(cache->fs_info,
+		   "failed to add kobject for block cache, ignoring");
 }
 
 static struct btrfs_block_group_cache *
@@ -10127,6 +10145,11 @@
 	struct extent_buffer *leaf;
 	int need_clear = 0;
 	u64 cache_gen;
+	u64 feature;
+	int mixed;
+
+	feature = btrfs_super_incompat_flags(info->super_copy);
+	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
 
 	root = info->extent_root;
 	key.objectid = 0;
@@ -10180,6 +10203,15 @@
 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
 				   sizeof(cache->item));
 		cache->flags = btrfs_block_group_flags(&cache->item);
+		if (!mixed &&
+		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+			btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+				  cache->key.objectid);
+			ret = -EINVAL;
+			goto error;
+		}
 
 		key.objectid = found_key.objectid + found_key.offset;
 		btrfs_release_path(path);
@@ -10789,7 +10821,7 @@
 	struct btrfs_trans_handle *trans;
 	int ret = 0;
 
-	if (!fs_info->open)
+	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
 		return;
 
 	spin_lock(&fs_info->unused_bgs_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 44fe66b..66a7551 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -20,6 +20,7 @@
 #include "locking.h"
 #include "rcu-string.h"
 #include "backref.h"
+#include "transaction.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -74,8 +75,7 @@
 
 	while (!list_empty(&buffers)) {
 		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
-		printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
-		       "refs %d\n",
+		pr_err("BTRFS: buffer leak start %llu len %lu refs %d\n",
 		       eb->start, eb->len, atomic_read(&eb->refs));
 		list_del(&eb->leak_list);
 		kmem_cache_free(extent_buffer_cache, eb);
@@ -460,8 +460,7 @@
 	if (node) {
 		struct extent_state *found;
 		found = rb_entry(node, struct extent_state, rb_node);
-		printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
-		       "%llu %llu\n",
+		pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
 		       found->start, found->end, start, end);
 		return -EEXIST;
 	}
@@ -572,9 +571,8 @@
 
 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 {
-	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
-		    "Extent tree was modified by another "
-		    "thread while locked.");
+	btrfs_panic(tree_fs_info(tree), err,
+		    "Locking error: Extent tree was modified by another thread while locked.");
 }
 
 /*
@@ -1729,7 +1727,7 @@
 }
 
 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
-				 struct page *locked_page,
+				 u64 delalloc_end, struct page *locked_page,
 				 unsigned clear_bits,
 				 unsigned long page_ops)
 {
@@ -2122,8 +2120,9 @@
 
 	if (failrec->in_validation) {
 		/* there was no real error, just free the record */
-		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
-			 failrec->start);
+		btrfs_debug(fs_info,
+			"clean_io_failure: freeing dummy error at %llu",
+			failrec->start);
 		goto out;
 	}
 	if (fs_info->sb->s_flags & MS_RDONLY)
@@ -2189,6 +2188,7 @@
 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
 		struct io_failure_record **failrec_ret)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct io_failure_record *failrec;
 	struct extent_map *em;
 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
@@ -2236,8 +2236,9 @@
 						 em->compress_type);
 		}
 
-		pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
-			 logical, start, failrec->len);
+		btrfs_debug(fs_info,
+			"Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
+			logical, start, failrec->len);
 
 		failrec->logical = logical;
 		free_extent_map(em);
@@ -2255,9 +2256,10 @@
 			return ret;
 		}
 	} else {
-		pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
-			 failrec->logical, failrec->start, failrec->len,
-			 failrec->in_validation);
+		btrfs_debug(fs_info,
+			"Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
+			failrec->logical, failrec->start, failrec->len,
+			failrec->in_validation);
 		/*
 		 * when data can be on disk more than twice, add to failrec here
 		 * (e.g. with a list for failed_mirror) to make
@@ -2273,18 +2275,19 @@
 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
 			   struct io_failure_record *failrec, int failed_mirror)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int num_copies;
 
-	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
-				      failrec->logical, failrec->len);
+	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
 	if (num_copies == 1) {
 		/*
 		 * we only have a single copy of the data, so don't bother with
 		 * all the retry and error correction code that follows. no
 		 * matter what the error is, it is very likely to persist.
 		 */
-		pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
-			 num_copies, failrec->this_mirror, failed_mirror);
+		btrfs_debug(fs_info,
+			"Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
+			num_copies, failrec->this_mirror, failed_mirror);
 		return 0;
 	}
 
@@ -2323,8 +2326,9 @@
 	}
 
 	if (failrec->this_mirror > num_copies) {
-		pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
-			 num_copies, failrec->this_mirror, failed_mirror);
+		btrfs_debug(fs_info,
+			"Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
+			num_copies, failrec->this_mirror, failed_mirror);
 		return 0;
 	}
 
@@ -2415,8 +2419,9 @@
 	}
 	bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
 
-	pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
-		 read_mode, failrec->this_mirror, failrec->in_validation);
+	btrfs_debug(btrfs_sb(inode->i_sb),
+		"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
+		read_mode, failrec->this_mirror, failrec->in_validation);
 
 	ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
 					 failrec->bio_flags, 0);
@@ -2484,8 +2489,7 @@
 					bvec->bv_offset, bvec->bv_len);
 			else
 				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
-				   "incomplete page write in btrfs with offset %u and "
-				   "length %u",
+				   "incomplete page write in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
 		}
 
@@ -2541,10 +2545,12 @@
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 		struct inode *inode = page->mapping->host;
+		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
-		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-			 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
-			 bio->bi_error, io_bio->mirror_num);
+		btrfs_debug(fs_info,
+			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
+			(u64)bio->bi_iter.bi_sector, bio->bi_error,
+			io_bio->mirror_num);
 		tree = &BTRFS_I(inode)->io_tree;
 
 		/* We always issue full-page reads, but if some block
@@ -2554,13 +2560,12 @@
 		 * if they don't add up to a full page.  */
 		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
 			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
-				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
-				   "partial page read in btrfs with offset %u and length %u",
+				btrfs_err(fs_info,
+					"partial page read in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
 			else
-				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
-				   "incomplete page read in btrfs with offset %u and "
-				   "length %u",
+				btrfs_info(fs_info,
+					"incomplete page read in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
 		}
 
@@ -3624,7 +3629,6 @@
 static void set_btree_ioerr(struct page *page)
 {
 	struct extent_buffer *eb = (struct extent_buffer *)page->private;
-	struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
 
 	SetPageError(page);
 	if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
@@ -3670,13 +3674,13 @@
 	 */
 	switch (eb->log_index) {
 	case -1:
-		set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
+		set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
 		break;
 	case 0:
-		set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
+		set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
 		break;
 	case 1:
-		set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+		set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
 		break;
 	default:
 		BUG(); /* unexpected, logic error */
@@ -3721,8 +3725,10 @@
 	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
 	struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
 	u64 offset = eb->start;
+	u32 nritems;
 	unsigned long i, num_pages;
 	unsigned long bio_flags = 0;
+	unsigned long start, end;
 	int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
 	int ret = 0;
 
@@ -3732,6 +3738,23 @@
 	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
 		bio_flags = EXTENT_BIO_TREE_LOG;
 
+	/* set btree blocks beyond nritems with 0 to avoid stale content. */
+	nritems = btrfs_header_nritems(eb);
+	if (btrfs_header_level(eb) > 0) {
+		end = btrfs_node_key_ptr_offset(nritems);
+
+		memset_extent_buffer(eb, 0, end, eb->len - end);
+	} else {
+		/*
+		 * leaf:
+		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
+		 */
+		start = btrfs_item_nr_offset(nritems);
+		end = btrfs_leaf_data(eb) +
+		      leaf_data_end(fs_info->tree_root, eb);
+		memset_extent_buffer(eb, 0, start, end - start);
+	}
+
 	for (i = 0; i < num_pages; i++) {
 		struct page *p = eb->pages[i];
 
@@ -4487,21 +4510,36 @@
 			flags |= (FIEMAP_EXTENT_DELALLOC |
 				  FIEMAP_EXTENT_UNKNOWN);
 		} else if (fieinfo->fi_extents_max) {
+			struct btrfs_trans_handle *trans;
+
 			u64 bytenr = em->block_start -
 				(em->start - em->orig_start);
 
 			disko = em->block_start + offset_in_extent;
 
 			/*
+			 * We need a trans handle to get delayed refs
+			 */
+			trans = btrfs_join_transaction(root);
+			/*
+			 * It's OK if we can't start a trans we can still check
+			 * from commit_root
+			 */
+			if (IS_ERR(trans))
+				trans = NULL;
+
+			/*
 			 * As btrfs supports shared space, this information
 			 * can be exported to userspace tools via
 			 * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
 			 * then we're just getting a count and we can skip the
 			 * lookup stuff.
 			 */
-			ret = btrfs_check_shared(NULL, root->fs_info,
+			ret = btrfs_check_shared(trans, root->fs_info,
 						 root->objectid,
 						 btrfs_ino(inode), bytenr);
+			if (trans)
+				btrfs_end_transaction(trans, root);
 			if (ret < 0)
 				goto out_free;
 			if (ret)
@@ -5173,11 +5211,10 @@
 }
 
 int read_extent_buffer_pages(struct extent_io_tree *tree,
-			     struct extent_buffer *eb, u64 start, int wait,
+			     struct extent_buffer *eb, int wait,
 			     get_extent_t *get_extent, int mirror_num)
 {
 	unsigned long i;
-	unsigned long start_i;
 	struct page *page;
 	int err;
 	int ret = 0;
@@ -5191,16 +5228,8 @@
 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
 		return 0;
 
-	if (start) {
-		WARN_ON(start < eb->start);
-		start_i = (start >> PAGE_SHIFT) -
-			(eb->start >> PAGE_SHIFT);
-	} else {
-		start_i = 0;
-	}
-
 	num_pages = num_extent_pages(eb->start, eb->len);
-	for (i = start_i; i < num_pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		page = eb->pages[i];
 		if (wait == WAIT_NONE) {
 			if (!trylock_page(page))
@@ -5209,21 +5238,29 @@
 			lock_page(page);
 		}
 		locked_pages++;
+	}
+	/*
+	 * We need to firstly lock all pages to make sure that
+	 * the uptodate bit of our pages won't be affected by
+	 * clear_extent_buffer_uptodate().
+	 */
+	for (i = 0; i < num_pages; i++) {
+		page = eb->pages[i];
 		if (!PageUptodate(page)) {
 			num_reads++;
 			all_uptodate = 0;
 		}
 	}
+
 	if (all_uptodate) {
-		if (start_i == 0)
-			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 		goto unlock_exit;
 	}
 
 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
 	eb->read_mirror = 0;
 	atomic_set(&eb->io_pages, num_reads);
-	for (i = start_i; i < num_pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		page = eb->pages[i];
 
 		if (!PageUptodate(page)) {
@@ -5264,7 +5301,7 @@
 	if (ret || wait != WAIT_COMPLETE)
 		return ret;
 
-	for (i = start_i; i < num_pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		page = eb->pages[i];
 		wait_on_page_locked(page);
 		if (!PageUptodate(page))
@@ -5274,12 +5311,10 @@
 	return ret;
 
 unlock_exit:
-	i = start_i;
 	while (locked_pages > 0) {
-		page = eb->pages[i];
-		i++;
-		unlock_page(page);
 		locked_pages--;
+		page = eb->pages[locked_pages];
+		unlock_page(page);
 	}
 	return ret;
 }
@@ -5382,8 +5417,7 @@
 	}
 
 	if (start + min_len > eb->len) {
-		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
-		       "wanted %lu %lu\n",
+		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
 		       eb->start, eb->len, start, min_len);
 		return -EINVAL;
 	}
@@ -5524,17 +5558,45 @@
 	}
 }
 
-/*
- * The extent buffer bitmap operations are done with byte granularity because
- * bitmap items are not guaranteed to be aligned to a word and therefore a
- * single word in a bitmap may straddle two pages in the extent buffer.
- */
-#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
-#define BITMAP_FIRST_BYTE_MASK(start) \
-	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
-#define BITMAP_LAST_BYTE_MASK(nbits) \
-	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
+void le_bitmap_set(u8 *map, unsigned int start, int len)
+{
+	u8 *p = map + BIT_BYTE(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
+	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_BYTE;
+		mask_to_set = ~(u8)0;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+void le_bitmap_clear(u8 *map, unsigned int start, int len)
+{
+	u8 *p = map + BIT_BYTE(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE);
+	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_BYTE;
+		mask_to_clear = ~(u8)0;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
 
 /*
  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
@@ -5578,7 +5640,7 @@
 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
 			   unsigned long nr)
 {
-	char *kaddr;
+	u8 *kaddr;
 	struct page *page;
 	unsigned long i;
 	size_t offset;
@@ -5600,13 +5662,13 @@
 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
 			      unsigned long pos, unsigned long len)
 {
-	char *kaddr;
+	u8 *kaddr;
 	struct page *page;
 	unsigned long i;
 	size_t offset;
 	const unsigned int size = pos + len;
 	int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
-	unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
+	u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
 
 	eb_bitmap_offset(eb, start, pos, &i, &offset);
 	page = eb->pages[i];
@@ -5617,7 +5679,7 @@
 		kaddr[offset] |= mask_to_set;
 		len -= bits_to_set;
 		bits_to_set = BITS_PER_BYTE;
-		mask_to_set = ~0U;
+		mask_to_set = ~(u8)0;
 		if (++offset >= PAGE_SIZE && len > 0) {
 			offset = 0;
 			page = eb->pages[++i];
@@ -5642,13 +5704,13 @@
 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
 				unsigned long pos, unsigned long len)
 {
-	char *kaddr;
+	u8 *kaddr;
 	struct page *page;
 	unsigned long i;
 	size_t offset;
 	const unsigned int size = pos + len;
 	int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
-	unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
+	u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
 
 	eb_bitmap_offset(eb, start, pos, &i, &offset);
 	page = eb->pages[i];
@@ -5659,7 +5721,7 @@
 		kaddr[offset] &= ~mask_to_clear;
 		len -= bits_to_clear;
 		bits_to_clear = BITS_PER_BYTE;
-		mask_to_clear = ~0U;
+		mask_to_clear = ~(u8)0;
 		if (++offset >= PAGE_SIZE && len > 0) {
 			offset = 0;
 			page = eb->pages[++i];
@@ -5713,14 +5775,14 @@
 
 	if (src_offset + len > dst->len) {
 		btrfs_err(dst->fs_info,
-			"memmove bogus src_offset %lu move "
-		       "len %lu dst len %lu", src_offset, len, dst->len);
+			"memmove bogus src_offset %lu move len %lu dst len %lu",
+			 src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
 		btrfs_err(dst->fs_info,
-			"memmove bogus dst_offset %lu move "
-		       "len %lu dst len %lu", dst_offset, len, dst->len);
+			"memmove bogus dst_offset %lu move len %lu dst len %lu",
+			 dst_offset, len, dst->len);
 		BUG_ON(1);
 	}
 
@@ -5760,13 +5822,15 @@
 	unsigned long src_i;
 
 	if (src_offset + len > dst->len) {
-		btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
-		       "len %lu len %lu", src_offset, len, dst->len);
+		btrfs_err(dst->fs_info,
+			  "memmove bogus src_offset %lu move len %lu len %lu",
+			  src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
-		btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
-		       "len %lu len %lu", dst_offset, len, dst->len);
+		btrfs_err(dst->fs_info,
+			  "memmove bogus dst_offset %lu move len %lu len %lu",
+			  dst_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset < src_offset) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 28cd88f..ab31d14 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -59,6 +59,28 @@
  */
 #define EXTENT_PAGE_PRIVATE 1
 
+/*
+ * The extent buffer bitmap operations are done with byte granularity instead of
+ * word granularity for two reasons:
+ * 1. The bitmaps must be little-endian on disk.
+ * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
+ *    single word in a bitmap may straddle two pages in the extent buffer.
+ */
+#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
+#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
+#define BITMAP_FIRST_BYTE_MASK(start) \
+	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
+#define BITMAP_LAST_BYTE_MASK(nbits) \
+	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
+
+static inline int le_test_bit(int nr, const u8 *addr)
+{
+	return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
+}
+
+extern void le_bitmap_set(u8 *map, unsigned int start, int len);
+extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
+
 struct extent_state;
 struct btrfs_root;
 struct btrfs_io_bio;
@@ -359,7 +381,7 @@
 #define WAIT_COMPLETE	1
 #define WAIT_PAGE_LOCK	2
 int read_extent_buffer_pages(struct extent_io_tree *tree,
-			     struct extent_buffer *eb, u64 start, int wait,
+			     struct extent_buffer *eb, int wait,
 			     get_extent_t *get_extent, int mirror_num);
 void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
 
@@ -413,7 +435,7 @@
 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
-				 struct page *locked_page,
+				 u64 delalloc_end, struct page *locked_page,
 				 unsigned bits_to_clear,
 				 unsigned long page_ops);
 struct bio *
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index fea31a4..3a14c87 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -503,7 +503,7 @@
 
 	end_of_last_block = start_pos + num_bytes - 1;
 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-					cached);
+					cached, 0);
 	if (err)
 		return err;
 
@@ -1110,13 +1110,25 @@
 
 	leaf = path->nodes[0];
 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-	BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
+	if (key.objectid != ino ||
+	    key.type != BTRFS_EXTENT_DATA_KEY) {
+		ret = -EINVAL;
+		btrfs_abort_transaction(trans, ret);
+		goto out;
+	}
 	fi = btrfs_item_ptr(leaf, path->slots[0],
 			    struct btrfs_file_extent_item);
-	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
-	       BTRFS_FILE_EXTENT_PREALLOC);
+	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
+		ret = -EINVAL;
+		btrfs_abort_transaction(trans, ret);
+		goto out;
+	}
 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
-	BUG_ON(key.offset > start || extent_end < end);
+	if (key.offset > start || extent_end < end) {
+		ret = -EINVAL;
+		btrfs_abort_transaction(trans, ret);
+		goto out;
+	}
 
 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -1213,12 +1225,19 @@
 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
 					   root->root_key.objectid,
 					   ino, orig_offset);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret) {
+			btrfs_abort_transaction(trans, ret);
+			goto out;
+		}
 
 		if (split == start) {
 			key.offset = start;
 		} else {
-			BUG_ON(start != key.offset);
+			if (start != key.offset) {
+				ret = -EINVAL;
+				btrfs_abort_transaction(trans, ret);
+				goto out;
+			}
 			path->slots[0]--;
 			extent_end = end;
 		}
@@ -1240,7 +1259,10 @@
 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret) {
+			btrfs_abort_transaction(trans, ret);
+			goto out;
+		}
 	}
 	other_start = 0;
 	other_end = start;
@@ -1257,7 +1279,10 @@
 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret) {
+			btrfs_abort_transaction(trans, ret);
+			goto out;
+		}
 	}
 	if (del_nr == 0) {
 		fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1757,7 +1782,7 @@
 	if (IS_NOCMTIME(inode))
 		return;
 
-	now = current_fs_time(inode->i_sb);
+	now = current_time(inode);
 	if (!timespec_equal(&inode->i_mtime, &now))
 		inode->i_mtime = now;
 
@@ -2040,7 +2065,7 @@
 		 * flags for any errors that might have happened while doing
 		 * writeback of file data.
 		 */
-		ret = btrfs_inode_check_errors(inode);
+		ret = filemap_check_errors(inode->i_mapping);
 		inode_unlock(inode);
 		goto out;
 	}
@@ -2578,7 +2603,7 @@
 		goto out_free;
 
 	inode_inc_iversion(inode);
-	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 
 	trans->block_rsv = &root->fs_info->trans_block_rsv;
 	ret = btrfs_update_inode(trans, root, inode);
@@ -2842,7 +2867,7 @@
 		if (IS_ERR(trans)) {
 			ret = PTR_ERR(trans);
 		} else {
-			inode->i_ctime = current_fs_time(inode->i_sb);
+			inode->i_ctime = current_time(inode);
 			i_size_write(inode, actual_end);
 			btrfs_ordered_update_i_size(inode, actual_end, NULL);
 			ret = btrfs_update_inode(trans, root, inode);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d571bd2..e4b48f3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -716,8 +716,7 @@
 
 	if (BTRFS_I(inode)->generation != generation) {
 		btrfs_err(root->fs_info,
-			"free space inode generation (%llu) "
-			"did not match free space cache generation (%llu)",
+			"free space inode generation (%llu) did not match free space cache generation (%llu)",
 			BTRFS_I(inode)->generation, generation);
 		return 0;
 	}
@@ -879,8 +878,9 @@
 
 	if (!matched) {
 		__btrfs_remove_free_space_cache(ctl);
-		btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
-			block_group->key.objectid);
+		btrfs_warn(fs_info,
+			   "block group %llu has wrong amount of free space",
+			   block_group->key.objectid);
 		ret = -1;
 	}
 out:
@@ -891,8 +891,9 @@
 		spin_unlock(&block_group->lock);
 		ret = 0;
 
-		btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
-			block_group->key.objectid);
+		btrfs_warn(fs_info,
+			   "failed to load free space cache for block group %llu, rebuilding it now",
+			   block_group->key.objectid);
 	}
 
 	iput(inode);
@@ -2298,7 +2299,8 @@
 	}
 }
 
-int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
+			   struct btrfs_free_space_ctl *ctl,
 			   u64 offset, u64 bytes)
 {
 	struct btrfs_free_space *info;
@@ -2345,7 +2347,7 @@
 	spin_unlock(&ctl->tree_lock);
 
 	if (ret) {
-		printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
+		btrfs_crit(fs_info, "unable to add free space :%d", ret);
 		ASSERT(ret != -EEXIST);
 	}
 
@@ -2621,7 +2623,8 @@
 	spin_unlock(&ctl->tree_lock);
 
 	if (align_gap_len)
-		__btrfs_add_free_space(ctl, align_gap, align_gap_len);
+		__btrfs_add_free_space(block_group->fs_info, ctl,
+				       align_gap, align_gap_len);
 	return ret;
 }
 
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 3af651c..363fdd9 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -89,13 +89,15 @@
 			      struct inode *inode);
 
 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
-int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
+			   struct btrfs_free_space_ctl *ctl,
 			   u64 bytenr, u64 size);
 static inline int
 btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
 		     u64 bytenr, u64 size)
 {
-	return __btrfs_add_free_space(block_group->free_space_ctl,
+	return __btrfs_add_free_space(block_group->fs_info,
+				      block_group->free_space_ctl,
 				      bytenr, size);
 }
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 87e7e3d..57401b4 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -107,7 +107,7 @@
 	if (ret < 0)
 		return ERR_PTR(ret);
 	if (ret != 0) {
-		btrfs_warn(fs_info, "missing free space info for %llu\n",
+		btrfs_warn(fs_info, "missing free space info for %llu",
 			   block_group->key.objectid);
 		ASSERT(0);
 		return ERR_PTR(-ENOENT);
@@ -151,7 +151,7 @@
 	return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
 }
 
-static unsigned long *alloc_bitmap(u32 bitmap_size)
+static u8 *alloc_bitmap(u32 bitmap_size)
 {
 	void *mem;
 
@@ -180,8 +180,7 @@
 	struct btrfs_free_space_info *info;
 	struct btrfs_key key, found_key;
 	struct extent_buffer *leaf;
-	unsigned long *bitmap;
-	char *bitmap_cursor;
+	u8 *bitmap, *bitmap_cursor;
 	u64 start, end;
 	u64 bitmap_range, i;
 	u32 bitmap_size, flags, expected_extent_count;
@@ -231,7 +230,7 @@
 						block_group->sectorsize);
 				last = div_u64(found_key.objectid + found_key.offset - start,
 					       block_group->sectorsize);
-				bitmap_set(bitmap, first, last - first);
+				le_bitmap_set(bitmap, first, last - first);
 
 				extent_count++;
 				nr++;
@@ -261,7 +260,8 @@
 	btrfs_release_path(path);
 
 	if (extent_count != expected_extent_count) {
-		btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+		btrfs_err(fs_info,
+			  "incorrect extent count for %llu; counted %u, expected %u",
 			  block_group->key.objectid, extent_count,
 			  expected_extent_count);
 		ASSERT(0);
@@ -269,7 +269,7 @@
 		goto out;
 	}
 
-	bitmap_cursor = (char *)bitmap;
+	bitmap_cursor = bitmap;
 	bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
 	i = start;
 	while (i < end) {
@@ -318,7 +318,7 @@
 	struct btrfs_free_space_info *info;
 	struct btrfs_key key, found_key;
 	struct extent_buffer *leaf;
-	unsigned long *bitmap;
+	u8 *bitmap;
 	u64 start, end;
 	/* Initialize to silence GCC. */
 	u64 extent_start = 0;
@@ -362,7 +362,7 @@
 				break;
 			} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
 				unsigned long ptr;
-				char *bitmap_cursor;
+				u8 *bitmap_cursor;
 				u32 bitmap_pos, data_size;
 
 				ASSERT(found_key.objectid >= start);
@@ -372,7 +372,7 @@
 				bitmap_pos = div_u64(found_key.objectid - start,
 						     block_group->sectorsize *
 						     BITS_PER_BYTE);
-				bitmap_cursor = ((char *)bitmap) + bitmap_pos;
+				bitmap_cursor = bitmap + bitmap_pos;
 				data_size = free_space_bitmap_size(found_key.offset,
 								   block_group->sectorsize);
 
@@ -409,7 +409,7 @@
 	offset = start;
 	bitnr = 0;
 	while (offset < end) {
-		bit = !!test_bit(bitnr, bitmap);
+		bit = !!le_test_bit(bitnr, bitmap);
 		if (prev_bit == 0 && bit == 1) {
 			extent_start = offset;
 		} else if (prev_bit == 1 && bit == 0) {
@@ -442,7 +442,8 @@
 	}
 
 	if (extent_count != expected_extent_count) {
-		btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+		btrfs_err(fs_info,
+			  "incorrect extent count for %llu; counted %u, expected %u",
 			  block_group->key.objectid, extent_count,
 			  expected_extent_count);
 		ASSERT(0);
@@ -1163,7 +1164,7 @@
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
 
-	fs_info->creating_free_space_tree = 1;
+	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 	free_space_root = btrfs_create_tree(trans, fs_info,
 					    BTRFS_FREE_SPACE_TREE_OBJECTID);
 	if (IS_ERR(free_space_root)) {
@@ -1183,7 +1184,8 @@
 	}
 
 	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
-	fs_info->creating_free_space_tree = 0;
+	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
+	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 
 	ret = btrfs_commit_transaction(trans, tree_root);
 	if (ret)
@@ -1192,7 +1194,7 @@
 	return 0;
 
 abort:
-	fs_info->creating_free_space_tree = 0;
+	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 	btrfs_abort_transaction(trans, ret);
 	btrfs_end_transaction(trans, tree_root);
 	return ret;
@@ -1251,6 +1253,7 @@
 		return PTR_ERR(trans);
 
 	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+	btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
 	fs_info->free_space_root = NULL;
 
 	ret = clear_free_space_tree(trans, free_space_root);
@@ -1480,7 +1483,8 @@
 	}
 
 	if (extent_count != expected_extent_count) {
-		btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+		btrfs_err(fs_info,
+			  "incorrect extent count for %llu; counted %u, expected %u",
 			  block_group->key.objectid, extent_count,
 			  expected_extent_count);
 		ASSERT(0);
@@ -1542,7 +1546,8 @@
 	}
 
 	if (extent_count != expected_extent_count) {
-		btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+		btrfs_err(fs_info,
+			  "incorrect extent count for %llu; counted %u, expected %u",
 			  block_group->key.objectid, extent_count,
 			  expected_extent_count);
 		ASSERT(0);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 359ee86..d27014b 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -104,7 +104,7 @@
 			break;
 
 		if (last != (u64)-1 && last + 1 != key.objectid) {
-			__btrfs_add_free_space(ctl, last + 1,
+			__btrfs_add_free_space(fs_info, ctl, last + 1,
 					       key.objectid - last - 1);
 			wake_up(&root->ino_cache_wait);
 		}
@@ -115,7 +115,7 @@
 	}
 
 	if (last < root->highest_objectid - 1) {
-		__btrfs_add_free_space(ctl, last + 1,
+		__btrfs_add_free_space(fs_info, ctl, last + 1,
 				       root->highest_objectid - last - 1);
 	}
 
@@ -136,12 +136,13 @@
 
 static void start_caching(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
 	struct task_struct *tsk;
 	int ret;
 	u64 objectid;
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return;
 
 	spin_lock(&root->ino_cache_lock);
@@ -153,7 +154,7 @@
 	root->ino_cache_state = BTRFS_CACHE_STARTED;
 	spin_unlock(&root->ino_cache_lock);
 
-	ret = load_free_ino_cache(root->fs_info, root);
+	ret = load_free_ino_cache(fs_info, root);
 	if (ret == 1) {
 		spin_lock(&root->ino_cache_lock);
 		root->ino_cache_state = BTRFS_CACHE_FINISHED;
@@ -170,15 +171,15 @@
 	 */
 	ret = btrfs_find_free_objectid(root, &objectid);
 	if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
-		__btrfs_add_free_space(ctl, objectid,
+		__btrfs_add_free_space(fs_info, ctl, objectid,
 				       BTRFS_LAST_FREE_OBJECTID - objectid + 1);
 	}
 
 	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
 			  root->root_key.objectid);
 	if (IS_ERR(tsk)) {
-		btrfs_warn(root->fs_info, "failed to start inode caching task");
-		btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
+		btrfs_warn(fs_info, "failed to start inode caching task");
+		btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
 				"disabling inode map caching");
 	}
 }
@@ -209,28 +210,29 @@
 
 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return;
 again:
 	if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
-		__btrfs_add_free_space(pinned, objectid, 1);
+		__btrfs_add_free_space(fs_info, pinned, objectid, 1);
 	} else {
-		down_write(&root->fs_info->commit_root_sem);
+		down_write(&fs_info->commit_root_sem);
 		spin_lock(&root->ino_cache_lock);
 		if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
 			spin_unlock(&root->ino_cache_lock);
-			up_write(&root->fs_info->commit_root_sem);
+			up_write(&fs_info->commit_root_sem);
 			goto again;
 		}
 		spin_unlock(&root->ino_cache_lock);
 
 		start_caching(root);
 
-		__btrfs_add_free_space(pinned, objectid, 1);
+		__btrfs_add_free_space(fs_info, pinned, objectid, 1);
 
-		up_write(&root->fs_info->commit_root_sem);
+		up_write(&fs_info->commit_root_sem);
 	}
 }
 
@@ -277,7 +279,8 @@
 		rb_erase(&info->offset_index, rbroot);
 		spin_unlock(rbroot_lock);
 		if (add_to_ctl)
-			__btrfs_add_free_space(ctl, info->offset, count);
+			__btrfs_add_free_space(root->fs_info, ctl,
+					       info->offset, count);
 		kmem_cache_free(btrfs_free_space_cachep, info);
 	}
 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ca01106..2b790bd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -560,8 +560,9 @@
 			 * we don't need to create any more async work items.
 			 * Unlock and free up our temp pages.
 			 */
-			extent_clear_unlock_delalloc(inode, start, end, NULL,
-						     clear_flags, PAGE_UNLOCK |
+			extent_clear_unlock_delalloc(inode, start, end, end,
+						     NULL, clear_flags,
+						     PAGE_UNLOCK |
 						     PAGE_CLEAR_DIRTY |
 						     PAGE_SET_WRITEBACK |
 						     page_error_op |
@@ -837,6 +838,8 @@
 		extent_clear_unlock_delalloc(inode, async_extent->start,
 				async_extent->start +
 				async_extent->ram_size - 1,
+				async_extent->start +
+				async_extent->ram_size - 1,
 				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
 				PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
 				PAGE_SET_WRITEBACK);
@@ -856,7 +859,8 @@
 			tree->ops->writepage_end_io_hook(p, start, end,
 							 NULL, 0);
 			p->mapping = NULL;
-			extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
+			extent_clear_unlock_delalloc(inode, start, end, end,
+						     NULL, 0,
 						     PAGE_END_WRITEBACK |
 						     PAGE_SET_ERROR);
 			free_async_extent_pages(async_extent);
@@ -873,6 +877,8 @@
 	extent_clear_unlock_delalloc(inode, async_extent->start,
 				     async_extent->start +
 				     async_extent->ram_size - 1,
+				     async_extent->start +
+				     async_extent->ram_size - 1,
 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
 				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -966,7 +972,8 @@
 		ret = cow_file_range_inline(root, inode, start, end, 0, 0,
 					    NULL);
 		if (ret == 0) {
-			extent_clear_unlock_delalloc(inode, start, end, NULL,
+			extent_clear_unlock_delalloc(inode, start, end,
+				     delalloc_end, NULL,
 				     EXTENT_LOCKED | EXTENT_DELALLOC |
 				     EXTENT_DEFRAG, PAGE_UNLOCK |
 				     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
@@ -1062,7 +1069,8 @@
 		op |= PAGE_SET_PRIVATE2;
 
 		extent_clear_unlock_delalloc(inode, start,
-					     start + ram_size - 1, locked_page,
+					     start + ram_size - 1,
+					     delalloc_end, locked_page,
 					     EXTENT_LOCKED | EXTENT_DELALLOC,
 					     op);
 		disk_num_bytes -= cur_alloc_size;
@@ -1079,7 +1087,8 @@
 	btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
-	extent_clear_unlock_delalloc(inode, start, end, locked_page,
+	extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
+				     locked_page,
 				     EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
 				     EXTENT_DELALLOC | EXTENT_DEFRAG,
 				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -1258,7 +1267,8 @@
 
 	path = btrfs_alloc_path();
 	if (!path) {
-		extent_clear_unlock_delalloc(inode, start, end, locked_page,
+		extent_clear_unlock_delalloc(inode, start, end, end,
+					     locked_page,
 					     EXTENT_LOCKED | EXTENT_DELALLOC |
 					     EXTENT_DO_ACCOUNTING |
 					     EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1276,7 +1286,8 @@
 		trans = btrfs_join_transaction(root);
 
 	if (IS_ERR(trans)) {
-		extent_clear_unlock_delalloc(inode, start, end, locked_page,
+		extent_clear_unlock_delalloc(inode, start, end, end,
+					     locked_page,
 					     EXTENT_LOCKED | EXTENT_DELALLOC |
 					     EXTENT_DO_ACCOUNTING |
 					     EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1490,7 +1501,7 @@
 		}
 
 		extent_clear_unlock_delalloc(inode, cur_offset,
-					     cur_offset + num_bytes - 1,
+					     cur_offset + num_bytes - 1, end,
 					     locked_page, EXTENT_LOCKED |
 					     EXTENT_DELALLOC |
 					     EXTENT_CLEAR_DATA_RESV,
@@ -1522,7 +1533,7 @@
 		ret = err;
 
 	if (ret && cur_offset < end)
-		extent_clear_unlock_delalloc(inode, cur_offset, end,
+		extent_clear_unlock_delalloc(inode, cur_offset, end, end,
 					     locked_page, EXTENT_LOCKED |
 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1988,7 +1999,7 @@
 }
 
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
-			      struct extent_state **cached_state)
+			      struct extent_state **cached_state, int dedupe)
 {
 	WARN_ON((end & (PAGE_SIZE - 1)) == 0);
 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
@@ -2052,7 +2063,8 @@
 		goto out;
 	 }
 
-	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
+	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+				  0);
 	ClearPageChecked(page);
 	set_page_dirty(page);
 out:
@@ -2309,7 +2321,7 @@
 		if (PTR_ERR(root) == -ENOENT)
 			return 0;
 		WARN_ON(1);
-		pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
+		btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
 			 inum, offset, root_id);
 		return PTR_ERR(root);
 	}
@@ -3936,7 +3948,7 @@
 	 */
 	if (!btrfs_is_free_space_inode(inode)
 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
-	    && !root->fs_info->log_root_recovering) {
+	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
 		btrfs_update_root_times(trans, root);
 
 		ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -4059,7 +4071,7 @@
 	inode_inc_iversion(inode);
 	inode_inc_iversion(dir);
 	inode->i_ctime = dir->i_mtime =
-		dir->i_ctime = current_fs_time(inode->i_sb);
+		dir->i_ctime = current_time(inode);
 	ret = btrfs_update_inode(trans, root, dir);
 out:
 	return ret;
@@ -4202,7 +4214,7 @@
 
 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
 	inode_inc_iversion(dir);
-	dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	ret = btrfs_update_inode_fallback(trans, root, dir);
 	if (ret)
 		btrfs_abort_transaction(trans, ret);
@@ -4757,7 +4769,7 @@
 			  0, 0, &cached_state, GFP_NOFS);
 
 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
-					&cached_state);
+					&cached_state, 0);
 	if (ret) {
 		unlock_extent_cached(io_tree, block_start, block_end,
 				     &cached_state, GFP_NOFS);
@@ -4965,7 +4977,7 @@
 		inode_inc_iversion(inode);
 		if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
 			inode->i_ctime = inode->i_mtime =
-				current_fs_time(inode->i_sb);
+				current_time(inode);
 	}
 
 	if (newsize > oldsize) {
@@ -5072,7 +5084,7 @@
 	if (btrfs_root_readonly(root))
 		return -EROFS;
 
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -5223,7 +5235,7 @@
 
 	btrfs_free_io_failure_record(inode, 0, (u64)-1);
 
-	if (root->fs_info->log_root_recovering) {
+	if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
 				 &BTRFS_I(inode)->runtime_flags));
 		goto no_delete;
@@ -5672,7 +5684,7 @@
 	inode->i_op = &btrfs_dir_ro_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
-	inode->i_mtime = current_fs_time(inode->i_sb);
+	inode->i_mtime = current_time(inode);
 	inode->i_atime = inode->i_mtime;
 	inode->i_ctime = inode->i_mtime;
 	BTRFS_I(inode)->i_otime = inode->i_mtime;
@@ -6258,7 +6270,7 @@
 	inode_init_owner(inode, dir, mode);
 	inode_set_bytes(inode, 0);
 
-	inode->i_mtime = current_fs_time(inode->i_sb);
+	inode->i_mtime = current_time(inode);
 	inode->i_atime = inode->i_mtime;
 	inode->i_ctime = inode->i_mtime;
 	BTRFS_I(inode)->i_otime = inode->i_mtime;
@@ -6372,7 +6384,7 @@
 			   name_len * 2);
 	inode_inc_iversion(parent_inode);
 	parent_inode->i_mtime = parent_inode->i_ctime =
-		current_fs_time(parent_inode->i_sb);
+		current_time(parent_inode);
 	ret = btrfs_update_inode(trans, root, parent_inode);
 	if (ret)
 		btrfs_abort_transaction(trans, ret);
@@ -6590,7 +6602,7 @@
 	BTRFS_I(inode)->dir_index = 0ULL;
 	inc_nlink(inode);
 	inode_inc_iversion(inode);
-	inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_ctime = current_time(inode);
 	ihold(inode);
 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
 
@@ -7012,8 +7024,9 @@
 insert:
 	btrfs_release_path(path);
 	if (em->start > start || extent_map_end(em) <= start) {
-		btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
-			em->start, em->len, start, len);
+		btrfs_err(root->fs_info,
+			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
+			  em->start, em->len, start, len);
 		err = -EIO;
 		goto out;
 	}
@@ -7865,18 +7878,19 @@
 				      struct io_failure_record *failrec,
 				      int failed_mirror)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int num_copies;
 
-	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
-				      failrec->logical, failrec->len);
+	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
 	if (num_copies == 1) {
 		/*
 		 * we only have a single copy of the data, so don't bother with
 		 * all the retry and error correction code that follows. no
 		 * matter what the error is, it is very likely to persist.
 		 */
-		pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
-			 num_copies, failrec->this_mirror, failed_mirror);
+		btrfs_debug(fs_info,
+			"Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
+			num_copies, failrec->this_mirror, failed_mirror);
 		return 0;
 	}
 
@@ -7886,8 +7900,9 @@
 		failrec->this_mirror++;
 
 	if (failrec->this_mirror > num_copies) {
-		pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
-			 num_copies, failrec->this_mirror, failed_mirror);
+		btrfs_debug(fs_info,
+			"Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
+			num_copies, failrec->this_mirror, failed_mirror);
 		return 0;
 	}
 
@@ -8619,7 +8634,7 @@
 		goto out;
 
 	/* If this is a write we don't need to check anymore */
-	if (iov_iter_rw(iter) == WRITE)
+	if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
 		return 0;
 	/*
 	 * Check to make sure we don't have duplicate iov_base's in this
@@ -9055,7 +9070,7 @@
 			  0, 0, &cached_state, GFP_NOFS);
 
 	ret = btrfs_set_extent_delalloc(inode, page_start, end,
-					&cached_state);
+					&cached_state, 0);
 	if (ret) {
 		unlock_extent_cached(io_tree, page_start, page_end,
 				     &cached_state, GFP_NOFS);
@@ -9377,8 +9392,9 @@
 		if (!ordered)
 			break;
 		else {
-			btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
-				ordered->file_offset, ordered->len);
+			btrfs_err(root->fs_info,
+				  "found ordered extent %llu %llu on inode cleanup",
+				  ordered->file_offset, ordered->len);
 			btrfs_remove_ordered_extent(inode, ordered);
 			btrfs_put_ordered_extent(ordered);
 			btrfs_put_ordered_extent(ordered);
@@ -9493,7 +9509,7 @@
 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
 	struct inode *new_inode = new_dentry->d_inode;
 	struct inode *old_inode = old_dentry->d_inode;
-	struct timespec ctime = CURRENT_TIME;
+	struct timespec ctime = current_time(old_inode);
 	struct dentry *parent;
 	u64 old_ino = btrfs_ino(old_inode);
 	u64 new_ino = btrfs_ino(new_inode);
@@ -9861,7 +9877,7 @@
 	inode_inc_iversion(old_inode);
 	old_dir->i_ctime = old_dir->i_mtime =
 	new_dir->i_ctime = new_dir->i_mtime =
-	old_inode->i_ctime = current_fs_time(old_dir->i_sb);
+	old_inode->i_ctime = current_time(old_dir);
 
 	if (old_dentry->d_parent != new_dentry->d_parent)
 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
@@ -9886,7 +9902,7 @@
 
 	if (new_inode) {
 		inode_inc_iversion(new_inode);
-		new_inode->i_ctime = current_fs_time(new_inode->i_sb);
+		new_inode->i_ctime = current_time(new_inode);
 		if (unlikely(btrfs_ino(new_inode) ==
 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
 			root_objectid = BTRFS_I(new_inode)->location.objectid;
@@ -10404,7 +10420,7 @@
 		*alloc_hint = ins.objectid + ins.offset;
 
 		inode_inc_iversion(inode);
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 		    (actual_len > inode->i_size) &&
@@ -10544,21 +10560,6 @@
 
 }
 
-/* Inspired by filemap_check_errors() */
-int btrfs_inode_check_errors(struct inode *inode)
-{
-	int ret = 0;
-
-	if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
-	    test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
-		ret = -ENOSPC;
-	if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
-	    test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
-		ret = -EIO;
-
-	return ret;
-}
-
 static const struct inode_operations btrfs_dir_inode_operations = {
 	.getattr	= btrfs_getattr,
 	.lookup		= btrfs_lookup,
@@ -10567,14 +10568,11 @@
 	.link		= btrfs_link,
 	.mkdir		= btrfs_mkdir,
 	.rmdir		= btrfs_rmdir,
-	.rename2	= btrfs_rename2,
+	.rename		= btrfs_rename2,
 	.symlink	= btrfs_symlink,
 	.setattr	= btrfs_setattr,
 	.mknod		= btrfs_mknod,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.permission	= btrfs_permission,
 	.get_acl	= btrfs_get_acl,
 	.set_acl	= btrfs_set_acl,
@@ -10648,10 +10646,7 @@
 static const struct inode_operations btrfs_file_inode_operations = {
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr      = btrfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.permission	= btrfs_permission,
 	.fiemap		= btrfs_fiemap,
 	.get_acl	= btrfs_get_acl,
@@ -10662,10 +10657,7 @@
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
 	.permission	= btrfs_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= btrfs_get_acl,
 	.set_acl	= btrfs_set_acl,
 	.update_time	= btrfs_update_time,
@@ -10676,10 +10668,7 @@
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
 	.permission	= btrfs_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.update_time	= btrfs_update_time,
 };
 
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7fd939b..18e1aa0f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -349,7 +349,7 @@
 
 	btrfs_update_iflags(inode);
 	inode_inc_iversion(inode);
-	inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_ctime = current_time(inode);
 	ret = btrfs_update_inode(trans, root, inode);
 
 	btrfs_end_transaction(trans, root);
@@ -445,7 +445,7 @@
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_root *new_root;
 	struct btrfs_block_rsv block_rsv;
-	struct timespec cur_time = current_fs_time(dir->i_sb);
+	struct timespec cur_time = current_time(dir);
 	struct inode *inode;
 	int ret;
 	int err;
@@ -1903,8 +1903,9 @@
 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
 		if (key.objectid == root->root_key.objectid) {
 			ret = -EPERM;
-			btrfs_err(root->fs_info, "deleting default subvolume "
-				  "%llu is not allowed", key.objectid);
+			btrfs_err(root->fs_info,
+				  "deleting default subvolume %llu is not allowed",
+				  key.objectid);
 			goto out;
 		}
 		btrfs_release_path(path);
@@ -3291,7 +3292,7 @@
 
 	inode_inc_iversion(inode);
 	if (!no_time_update)
-		inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_mtime = inode->i_ctime = current_time(inode);
 	/*
 	 * We round up to the block size at eof when determining which
 	 * extents to clone above, but shouldn't round up the file size.
@@ -4097,8 +4098,8 @@
 	if (IS_ERR_OR_NULL(di)) {
 		btrfs_free_path(path);
 		btrfs_end_transaction(trans, root);
-		btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
-			   "item, this isn't going to work");
+		btrfs_err(new_root->fs_info,
+			  "Umm, you don't have the default diritem, this isn't going to work");
 		ret = -ENOENT;
 		goto out;
 	}
@@ -5106,7 +5107,7 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_root_item *root_item = &root->root_item;
 	struct btrfs_trans_handle *trans;
-	struct timespec ct = current_fs_time(inode->i_sb);
+	struct timespec ct = current_time(inode);
 	int ret = 0;
 	int received_uuid_changed;
 
@@ -5307,8 +5308,9 @@
 		return -EFAULT;
 
 	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
-		btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
-		       BTRFS_LABEL_SIZE - 1);
+		btrfs_err(root->fs_info,
+			  "unable to set label with more than %d bytes",
+			  BTRFS_LABEL_SIZE - 1);
 		return -EINVAL;
 	}
 
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1adfbe7..48655da 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -141,7 +141,7 @@
 		ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 				       &out_len, workspace->mem);
 		if (ret != LZO_E_OK) {
-			printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
+			pr_debug("BTRFS: deflate in loop returned %d\n",
 			       ret);
 			ret = -EIO;
 			goto out;
@@ -356,7 +356,7 @@
 		if (need_unmap)
 			kunmap(pages_in[page_in_index - 1]);
 		if (ret != LZO_E_OK) {
-			printk(KERN_WARNING "BTRFS: decompress failed\n");
+			pr_warn("BTRFS: decompress failed\n");
 			ret = -EIO;
 			break;
 		}
@@ -402,7 +402,7 @@
 	out_len = PAGE_SIZE;
 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 	if (ret != LZO_E_OK) {
-		printk(KERN_WARNING "BTRFS: decompress failed!\n");
+		pr_warn("BTRFS: decompress failed!\n");
 		ret = -EIO;
 		goto out;
 	}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 3b78d38..b2d1e95 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -67,8 +67,8 @@
 					       u64 offset)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
-		    "%llu", offset);
+	btrfs_panic(fs_info, errno,
+		    "Inconsistency in ordered tree at offset %llu", offset);
 }
 
 /*
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 147dc6c..438575e 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -24,12 +24,11 @@
 {
 	int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
 	int i;
-	printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
-	       "num_stripes %d\n",
+	pr_info("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
 	       btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk),
 	       btrfs_chunk_type(eb, chunk), num_stripes);
 	for (i = 0 ; i < num_stripes ; i++) {
-		printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
+		pr_info("\t\t\tstripe %d devid %llu offset %llu\n", i,
 		      btrfs_stripe_devid_nr(eb, chunk, i),
 		      btrfs_stripe_offset_nr(eb, chunk, i));
 	}
@@ -37,8 +36,7 @@
 static void print_dev_item(struct extent_buffer *eb,
 			   struct btrfs_dev_item *dev_item)
 {
-	printk(KERN_INFO "\t\tdev item devid %llu "
-	       "total_bytes %llu bytes used %llu\n",
+	pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
 	       btrfs_device_id(eb, dev_item),
 	       btrfs_device_total_bytes(eb, dev_item),
 	       btrfs_device_bytes_used(eb, dev_item));
@@ -46,8 +44,7 @@
 static void print_extent_data_ref(struct extent_buffer *eb,
 				  struct btrfs_extent_data_ref *ref)
 {
-	printk(KERN_INFO "\t\textent data backref root %llu "
-	       "objectid %llu offset %llu count %u\n",
+	pr_info("\t\textent data backref root %llu objectid %llu offset %llu count %u\n",
 	       btrfs_extent_data_ref_root(eb, ref),
 	       btrfs_extent_data_ref_objectid(eb, ref),
 	       btrfs_extent_data_ref_offset(eb, ref),
@@ -72,7 +69,7 @@
 		struct btrfs_extent_item_v0 *ei0;
 		BUG_ON(item_size != sizeof(*ei0));
 		ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
-		printk(KERN_INFO "\t\textent refs %u\n",
+		pr_info("\t\textent refs %u\n",
 		       btrfs_extent_refs_v0(eb, ei0));
 		return;
 #else
@@ -83,7 +80,7 @@
 	ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
 	flags = btrfs_extent_flags(eb, ei);
 
-	printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n",
+	pr_info("\t\textent refs %llu gen %llu flags %llu\n",
 	       btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
 	       flags);
 
@@ -92,8 +89,7 @@
 		struct btrfs_tree_block_info *info;
 		info = (struct btrfs_tree_block_info *)(ei + 1);
 		btrfs_tree_block_key(eb, info, &key);
-		printk(KERN_INFO "\t\ttree block key (%llu %u %llu) "
-		       "level %d\n",
+		pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
 		       btrfs_disk_key_objectid(&key), key.type,
 		       btrfs_disk_key_offset(&key),
 		       btrfs_tree_block_level(eb, info));
@@ -110,12 +106,10 @@
 		offset = btrfs_extent_inline_ref_offset(eb, iref);
 		switch (type) {
 		case BTRFS_TREE_BLOCK_REF_KEY:
-			printk(KERN_INFO "\t\ttree block backref "
-				"root %llu\n", offset);
+			pr_info("\t\ttree block backref root %llu\n", offset);
 			break;
 		case BTRFS_SHARED_BLOCK_REF_KEY:
-			printk(KERN_INFO "\t\tshared block backref "
-				"parent %llu\n", offset);
+			pr_info("\t\tshared block backref parent %llu\n", offset);
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY:
 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -123,8 +117,7 @@
 			break;
 		case BTRFS_SHARED_DATA_REF_KEY:
 			sref = (struct btrfs_shared_data_ref *)(iref + 1);
-			printk(KERN_INFO "\t\tshared data backref "
-			       "parent %llu count %u\n",
+			pr_info("\t\tshared data backref parent %llu count %u\n",
 			       offset, btrfs_shared_data_ref_count(eb, sref));
 			break;
 		default:
@@ -141,8 +134,7 @@
 	struct btrfs_extent_ref_v0 *ref0;
 
 	ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
-	printk("\t\textent back ref root %llu gen %llu "
-		"owner %llu num_refs %lu\n",
+	printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
 		btrfs_ref_root_v0(eb, ref0),
 		btrfs_ref_generation_v0(eb, ref0),
 		btrfs_ref_objectid_v0(eb, ref0),
@@ -162,7 +154,7 @@
 		__le64 subvol_id;
 
 		read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
-		printk(KERN_INFO "\t\tsubvol_id %llu\n",
+		pr_info("\t\tsubvol_id %llu\n",
 		       (unsigned long long)le64_to_cpu(subvol_id));
 		item_size -= sizeof(u64);
 		offset += sizeof(u64);
@@ -196,15 +188,13 @@
 		item = btrfs_item_nr(i);
 		btrfs_item_key_to_cpu(l, &key, i);
 		type = key.type;
-		printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d "
-		       "itemsize %d\n",
+		pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
 			i, key.objectid, type, key.offset,
 			btrfs_item_offset(l, item), btrfs_item_size(l, item));
 		switch (type) {
 		case BTRFS_INODE_ITEM_KEY:
 			ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
-			printk(KERN_INFO "\t\tinode generation %llu size %llu "
-			       "mode %o\n",
+			pr_info("\t\tinode generation %llu size %llu mode %o\n",
 			       btrfs_inode_generation(l, ii),
 			       btrfs_inode_size(l, ii),
 			       btrfs_inode_mode(l, ii));
@@ -212,13 +202,13 @@
 		case BTRFS_DIR_ITEM_KEY:
 			di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
 			btrfs_dir_item_key_to_cpu(l, di, &found_key);
-			printk(KERN_INFO "\t\tdir oid %llu type %u\n",
+			pr_info("\t\tdir oid %llu type %u\n",
 				found_key.objectid,
 				btrfs_dir_type(l, di));
 			break;
 		case BTRFS_ROOT_ITEM_KEY:
 			ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
-			printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
+			pr_info("\t\troot data bytenr %llu refs %u\n",
 				btrfs_disk_root_bytenr(l, ri),
 				btrfs_disk_root_refs(l, ri));
 			break;
@@ -227,10 +217,10 @@
 			print_extent_item(l, i, type);
 			break;
 		case BTRFS_TREE_BLOCK_REF_KEY:
-			printk(KERN_INFO "\t\ttree block backref\n");
+			pr_info("\t\ttree block backref\n");
 			break;
 		case BTRFS_SHARED_BLOCK_REF_KEY:
-			printk(KERN_INFO "\t\tshared block backref\n");
+			pr_info("\t\tshared block backref\n");
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY:
 			dref = btrfs_item_ptr(l, i,
@@ -240,7 +230,7 @@
 		case BTRFS_SHARED_DATA_REF_KEY:
 			sref = btrfs_item_ptr(l, i,
 					      struct btrfs_shared_data_ref);
-			printk(KERN_INFO "\t\tshared data backref count %u\n",
+			pr_info("\t\tshared data backref count %u\n",
 			       btrfs_shared_data_ref_count(l, sref));
 			break;
 		case BTRFS_EXTENT_DATA_KEY:
@@ -248,17 +238,14 @@
 					    struct btrfs_file_extent_item);
 			if (btrfs_file_extent_type(l, fi) ==
 			    BTRFS_FILE_EXTENT_INLINE) {
-				printk(KERN_INFO "\t\tinline extent data "
-				       "size %u\n",
+				pr_info("\t\tinline extent data size %u\n",
 				       btrfs_file_extent_inline_len(l, i, fi));
 				break;
 			}
-			printk(KERN_INFO "\t\textent data disk bytenr %llu "
-			       "nr %llu\n",
+			pr_info("\t\textent data disk bytenr %llu nr %llu\n",
 			       btrfs_file_extent_disk_bytenr(l, fi),
 			       btrfs_file_extent_disk_num_bytes(l, fi));
-			printk(KERN_INFO "\t\textent data offset %llu "
-			       "nr %llu ram %llu\n",
+			pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
 			       btrfs_file_extent_offset(l, fi),
 			       btrfs_file_extent_num_bytes(l, fi),
 			       btrfs_file_extent_ram_bytes(l, fi));
@@ -273,7 +260,7 @@
 		case BTRFS_BLOCK_GROUP_ITEM_KEY:
 			bi = btrfs_item_ptr(l, i,
 					    struct btrfs_block_group_item);
-			printk(KERN_INFO "\t\tblock group used %llu\n",
+			pr_info("\t\tblock group used %llu\n",
 			       btrfs_disk_block_group_used(l, bi));
 			break;
 		case BTRFS_CHUNK_ITEM_KEY:
@@ -287,38 +274,36 @@
 		case BTRFS_DEV_EXTENT_KEY:
 			dev_extent = btrfs_item_ptr(l, i,
 						    struct btrfs_dev_extent);
-			printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
-			       "\t\tchunk objectid %llu chunk offset %llu "
-			       "length %llu\n",
+			pr_info("\t\tdev extent chunk_tree %llu\n\t\tchunk objectid %llu chunk offset %llu length %llu\n",
 			       btrfs_dev_extent_chunk_tree(l, dev_extent),
 			       btrfs_dev_extent_chunk_objectid(l, dev_extent),
 			       btrfs_dev_extent_chunk_offset(l, dev_extent),
 			       btrfs_dev_extent_length(l, dev_extent));
 			break;
 		case BTRFS_PERSISTENT_ITEM_KEY:
-			printk(KERN_INFO "\t\tpersistent item objectid %llu offset %llu\n",
+			pr_info("\t\tpersistent item objectid %llu offset %llu\n",
 					key.objectid, key.offset);
 			switch (key.objectid) {
 			case BTRFS_DEV_STATS_OBJECTID:
-				printk(KERN_INFO "\t\tdevice stats\n");
+				pr_info("\t\tdevice stats\n");
 				break;
 			default:
-				printk(KERN_INFO "\t\tunknown persistent item\n");
+				pr_info("\t\tunknown persistent item\n");
 			}
 			break;
 		case BTRFS_TEMPORARY_ITEM_KEY:
-			printk(KERN_INFO "\t\ttemporary item objectid %llu offset %llu\n",
+			pr_info("\t\ttemporary item objectid %llu offset %llu\n",
 					key.objectid, key.offset);
 			switch (key.objectid) {
 			case BTRFS_BALANCE_OBJECTID:
-				printk(KERN_INFO "\t\tbalance status\n");
+				pr_info("\t\tbalance status\n");
 				break;
 			default:
-				printk(KERN_INFO "\t\tunknown temporary item\n");
+				pr_info("\t\tunknown temporary item\n");
 			}
 			break;
 		case BTRFS_DEV_REPLACE_KEY:
-			printk(KERN_INFO "\t\tdev replace\n");
+			pr_info("\t\tdev replace\n");
 			break;
 		case BTRFS_UUID_KEY_SUBVOL:
 		case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
@@ -343,12 +328,13 @@
 		btrfs_print_leaf(root, c);
 		return;
 	}
-	btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u",
-		btrfs_header_bytenr(c), level, nr,
-		(u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+	btrfs_info(root->fs_info,
+		   "node %llu level %d total ptrs %d free spc %u",
+		   btrfs_header_bytenr(c), level, nr,
+		   (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
 	for (i = 0; i < nr; i++) {
 		btrfs_node_key_to_cpu(c, &key, i);
-		printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
+		pr_info("\tkey %d (%llu %u %llu) block %llu\n",
 		       i, key.objectid, key.type, key.offset,
 		       btrfs_node_blockptr(c, i));
 	}
@@ -356,6 +342,13 @@
 		struct extent_buffer *next = read_tree_block(root,
 					btrfs_node_blockptr(c, i),
 					btrfs_node_ptr_generation(c, i));
+		if (IS_ERR(next)) {
+			continue;
+		} else if (!extent_buffer_uptodate(next)) {
+			free_extent_buffer(next);
+			continue;
+		}
+
 		if (btrfs_is_leaf(next) &&
 		   level != 1)
 			BUG();
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8db2e29..11f4fff 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -309,7 +309,7 @@
 	u64 flags = 0;
 	u64 rescan_progress = 0;
 
-	if (!fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 		return 0;
 
 	fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
@@ -360,8 +360,7 @@
 			    fs_info->generation) {
 				flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 				btrfs_err(fs_info,
-					"qgroup generation mismatch, "
-					"marked as inconsistent");
+					"qgroup generation mismatch, marked as inconsistent");
 			}
 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
 									  ptr);
@@ -463,13 +462,11 @@
 	}
 out:
 	fs_info->qgroup_flags |= flags;
-	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
-		fs_info->quota_enabled = 0;
-		fs_info->pending_quota_state = 0;
-	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
-		   ret >= 0) {
+	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
+		 ret >= 0)
 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
-	}
 	btrfs_free_path(path);
 
 	if (ret < 0) {
@@ -847,7 +844,7 @@
 	}
 	ret = 0;
 out:
-	root->fs_info->pending_quota_state = 0;
+	set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -868,7 +865,7 @@
 
 	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	if (fs_info->quota_root) {
-		fs_info->pending_quota_state = 1;
+		set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
 		goto out;
 	}
 
@@ -964,7 +961,7 @@
 	}
 	spin_lock(&fs_info->qgroup_lock);
 	fs_info->quota_root = quota_root;
-	fs_info->pending_quota_state = 1;
+	set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
 	spin_unlock(&fs_info->qgroup_lock);
 out_free_path:
 	btrfs_free_path(path);
@@ -993,8 +990,8 @@
 	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	if (!fs_info->quota_root)
 		goto out;
-	fs_info->quota_enabled = 0;
-	fs_info->pending_quota_state = 0;
+	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+	set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
 	btrfs_qgroup_wait_for_completion(fs_info, false);
 	spin_lock(&fs_info->qgroup_lock);
 	quota_root = fs_info->quota_root;
@@ -1490,7 +1487,8 @@
 	struct btrfs_delayed_ref_root *delayed_refs;
 	int ret;
 
-	if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
+	    || bytenr == 0 || num_bytes == 0)
 		return 0;
 	if (WARN_ON(trans == NULL))
 		return -EINVAL;
@@ -1713,7 +1711,7 @@
 	if (old_roots)
 		nr_old_roots = old_roots->nnodes;
 
-	if (!fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 		goto out_free;
 	BUG_ON(!fs_info->quota_root);
 
@@ -1833,10 +1831,14 @@
 	if (!quota_root)
 		goto out;
 
-	if (!fs_info->quota_enabled && fs_info->pending_quota_state)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+	    test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
 		start_rescan_worker = 1;
 
-	fs_info->quota_enabled = fs_info->pending_quota_state;
+	if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
+		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+	if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
+		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 
 	spin_lock(&fs_info->qgroup_lock);
 	while (!list_empty(&fs_info->dirty_qgroups)) {
@@ -1855,7 +1857,7 @@
 					BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 		spin_lock(&fs_info->qgroup_lock);
 	}
-	if (fs_info->quota_enabled)
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
 	else
 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1900,7 +1902,7 @@
 	u64 nums;
 
 	mutex_lock(&fs_info->qgroup_ioctl_lock);
-	if (!fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 		goto out;
 
 	if (!quota_root) {
@@ -1991,8 +1993,9 @@
 		ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
 		if (ret) {
 			fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
-			btrfs_info(fs_info, "unable to update quota limit for %llu",
-			       dstgroup->qgroupid);
+			btrfs_info(fs_info,
+				   "unable to update quota limit for %llu",
+				   dstgroup->qgroupid);
 			goto unlock;
 		}
 	}
@@ -2226,8 +2229,7 @@
 	if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
 		return;
 	btrfs_err(trans->fs_info,
-		"qgroups not uptodate in trans handle %p:  list is%s empty, "
-		"seq is %#x.%x",
+		"qgroups not uptodate in trans handle %p:  list is%s empty, seq is %#x.%x",
 		trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
 		(u32)(trans->delayed_ref_elem.seq >> 32),
 		(u32)trans->delayed_ref_elem.seq);
@@ -2255,10 +2257,11 @@
 					 &fs_info->qgroup_rescan_progress,
 					 path, 1, 0);
 
-	pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
-		 fs_info->qgroup_rescan_progress.objectid,
-		 fs_info->qgroup_rescan_progress.type,
-		 fs_info->qgroup_rescan_progress.offset, ret);
+	btrfs_debug(fs_info,
+		"current progress key (%llu %u %llu), search_slot ret %d",
+		fs_info->qgroup_rescan_progress.objectid,
+		fs_info->qgroup_rescan_progress.type,
+		fs_info->qgroup_rescan_progress.offset, ret);
 
 	if (ret) {
 		/*
@@ -2347,7 +2350,7 @@
 			err = PTR_ERR(trans);
 			break;
 		}
-		if (!fs_info->quota_enabled) {
+		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
 			err = -EINTR;
 		} else {
 			err = qgroup_rescan_leaf(fs_info, path, trans);
@@ -2388,7 +2391,7 @@
 	ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
 	if (ret < 0) {
 		err = ret;
-		btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
+		btrfs_err(fs_info, "fail to update qgroup status: %d", err);
 	}
 	btrfs_end_transaction(trans, fs_info->quota_root);
 
@@ -2578,8 +2581,8 @@
 	struct ulist_iterator uiter;
 	int ret;
 
-	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
-	    len == 0)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	    !is_fstree(root->objectid) || len == 0)
 		return 0;
 
 	changeset.bytes_changed = 0;
@@ -2676,8 +2679,8 @@
 {
 	int ret;
 
-	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
-	    num_bytes == 0)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	    !is_fstree(root->objectid) || num_bytes == 0)
 		return 0;
 
 	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
@@ -2692,7 +2695,8 @@
 {
 	int reserved;
 
-	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	    !is_fstree(root->objectid))
 		return;
 
 	reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
@@ -2703,7 +2707,8 @@
 
 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
 {
-	if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	    !is_fstree(root->objectid))
 		return;
 
 	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index cd8d302..d016d4a 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2143,7 +2143,10 @@
 
 	rbio->faila = find_logical_bio_stripe(rbio, bio);
 	if (rbio->faila == -1) {
-		BUG();
+		btrfs_warn(root->fs_info,
+	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
+			   __func__, (u64)bio->bi_iter.bi_sector << 9,
+			   (u64)bio->bi_iter.bi_size, bbio->map_type);
 		if (generic_io)
 			btrfs_put_bbio(bbio);
 		kfree(rbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 8428db7..75bab76 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -820,7 +820,7 @@
 
 	spin_lock(&fs_info->reada_lock);
 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
-		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
+		btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
 			atomic_read(&device->reada_in_flight));
 		index = 0;
 		while (1) {
@@ -829,17 +829,17 @@
 						     (void **)&zone, index, 1);
 			if (ret == 0)
 				break;
-			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
-				"%d devs", zone->start, zone->end, zone->elems,
-				zone->locked);
+			pr_debug("  zone %llu-%llu elems %llu locked %d devs",
+				    zone->start, zone->end, zone->elems,
+				    zone->locked);
 			for (j = 0; j < zone->ndevs; ++j) {
-				printk(KERN_CONT " %lld",
+				pr_cont(" %lld",
 					zone->devs[j]->devid);
 			}
 			if (device->reada_curr_zone == zone)
-				printk(KERN_CONT " curr off %llu",
+				pr_cont(" curr off %llu",
 					device->reada_next - zone->start);
-			printk(KERN_CONT "\n");
+			pr_cont("\n");
 			index = (zone->end >> PAGE_SHIFT) + 1;
 		}
 		cnt = 0;
@@ -851,21 +851,20 @@
 						     (void **)&re, index, 1);
 			if (ret == 0)
 				break;
-			printk(KERN_DEBUG
-				"  re: logical %llu size %u empty %d scheduled %d",
+			pr_debug("  re: logical %llu size %u empty %d scheduled %d",
 				re->logical, fs_info->tree_root->nodesize,
 				list_empty(&re->extctl), re->scheduled);
 
 			for (i = 0; i < re->nzones; ++i) {
-				printk(KERN_CONT " zone %llu-%llu devs",
+				pr_cont(" zone %llu-%llu devs",
 					re->zones[i]->start,
 					re->zones[i]->end);
 				for (j = 0; j < re->zones[i]->ndevs; ++j) {
-					printk(KERN_CONT " %lld",
+					pr_cont(" %lld",
 						re->zones[i]->devs[j]->devid);
 				}
 			}
-			printk(KERN_CONT "\n");
+			pr_cont("\n");
 			index = (re->logical >> PAGE_SHIFT) + 1;
 			if (++cnt > 15)
 				break;
@@ -885,20 +884,19 @@
 			index = (re->logical >> PAGE_SHIFT) + 1;
 			continue;
 		}
-		printk(KERN_DEBUG
-			"re: logical %llu size %u list empty %d scheduled %d",
+		pr_debug("re: logical %llu size %u list empty %d scheduled %d",
 			re->logical, fs_info->tree_root->nodesize,
 			list_empty(&re->extctl), re->scheduled);
 		for (i = 0; i < re->nzones; ++i) {
-			printk(KERN_CONT " zone %llu-%llu devs",
+			pr_cont(" zone %llu-%llu devs",
 				re->zones[i]->start,
 				re->zones[i]->end);
 			for (j = 0; j < re->zones[i]->ndevs; ++j) {
-				printk(KERN_CONT " %lld",
+				pr_cont(" %lld",
 				       re->zones[i]->devs[j]->devid);
 			}
 		}
-		printk(KERN_CONT "\n");
+		pr_cont("\n");
 		index = (re->logical >> PAGE_SHIFT) + 1;
 	}
 	spin_unlock(&fs_info->reada_lock);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c0c13dc..0ec8ffa 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -337,8 +337,9 @@
 					      rb_node);
 	if (bnode->root)
 		fs_info = bnode->root->fs_info;
-	btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
-		    "found at offset %llu", bytenr);
+	btrfs_panic(fs_info, errno,
+		    "Inconsistency in backref cache found at offset %llu",
+		    bytenr);
 }
 
 /*
@@ -923,9 +924,16 @@
 			path2->slots[level]--;
 
 		eb = path2->nodes[level];
-		WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
-			cur->bytenr);
-
+		if (btrfs_node_blockptr(eb, path2->slots[level]) !=
+		    cur->bytenr) {
+			btrfs_err(root->fs_info,
+	"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
+				  cur->bytenr, level - 1, root->objectid,
+				  node_key->objectid, node_key->type,
+				  node_key->offset);
+			err = -ENOENT;
+			goto out;
+		}
 		lower = cur;
 		need_check = true;
 		for (; level < BTRFS_MAX_LEVEL; level++) {
@@ -1296,9 +1304,9 @@
 			      node->bytenr, &node->rb_node);
 	spin_unlock(&rc->reloc_root_tree.lock);
 	if (rb_node) {
-		btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
-			    "for start=%llu while inserting into relocation "
-			    "tree", node->bytenr);
+		btrfs_panic(root->fs_info, -EEXIST,
+			    "Duplicate root found for start=%llu while inserting into relocation tree",
+			    node->bytenr);
 		kfree(node);
 		return -EEXIST;
 	}
@@ -2350,6 +2358,10 @@
 	while (!list_empty(list)) {
 		reloc_root = list_entry(list->next, struct btrfs_root,
 					root_list);
+		free_extent_buffer(reloc_root->node);
+		free_extent_buffer(reloc_root->commit_root);
+		reloc_root->node = NULL;
+		reloc_root->commit_root = NULL;
 		__del_reloc_root(reloc_root);
 	}
 }
@@ -2686,11 +2698,15 @@
 
 		if (!upper->eb) {
 			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
-			if (ret < 0) {
-				err = ret;
+			if (ret) {
+				if (ret < 0)
+					err = ret;
+				else
+					err = -ENOENT;
+
+				btrfs_release_path(path);
 				break;
 			}
-			BUG_ON(ret > 0);
 
 			if (!upper->eb) {
 				upper->eb = path->nodes[upper->level];
@@ -3203,7 +3219,7 @@
 			nr++;
 		}
 
-		btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
+		btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
 		set_page_dirty(page);
 
 		unlock_extent(&BTRFS_I(inode)->io_tree,
@@ -3952,7 +3968,7 @@
 	struct btrfs_key key;
 	int ret = 0;
 
-	if (!fs_info->quota_enabled)
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 		return 0;
 
 	/*
@@ -4365,8 +4381,9 @@
 		goto out;
 	}
 
-	btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
-	       rc->block_group->key.objectid, rc->block_group->flags);
+	btrfs_info(extent_root->fs_info,
+		   "relocating block group %llu flags %llu",
+		   rc->block_group->key.objectid, rc->block_group->flags);
 
 	btrfs_wait_block_group_reservations(rc->block_group);
 	btrfs_wait_nocow_writers(rc->block_group);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0912960..edae751 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -46,12 +46,7 @@
 		!= btrfs_root_generation_v2(item)) {
 		if (btrfs_root_generation_v2(item) != 0) {
 			btrfs_warn(eb->fs_info,
-					"mismatching "
-					"generation and generation_v2 "
-					"found in root item. This root "
-					"was probably mounted with an "
-					"older kernel. Resetting all "
-					"new fields.");
+					"mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields.");
 		}
 		need_reset = 1;
 	}
@@ -156,8 +151,9 @@
 
 	if (ret != 0) {
 		btrfs_print_leaf(root, path->nodes[0]);
-		btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu",
-		       key->objectid, key->type, key->offset);
+		btrfs_crit(root->fs_info,
+			   "unable to update root key %llu %u %llu",
+			   key->objectid, key->type, key->offset);
 		BUG_ON(1);
 	}
 
@@ -302,8 +298,7 @@
 			if (IS_ERR(trans)) {
 				err = PTR_ERR(trans);
 				btrfs_handle_fs_error(tree_root->fs_info, err,
-					    "Failed to start trans to delete "
-					    "orphan item");
+					    "Failed to start trans to delete orphan item");
 				break;
 			}
 			err = btrfs_del_orphan_item(trans, tree_root,
@@ -311,8 +306,7 @@
 			btrfs_end_transaction(trans, tree_root);
 			if (err) {
 				btrfs_handle_fs_error(tree_root->fs_info, err,
-					    "Failed to delete root orphan "
-					    "item");
+					    "Failed to delete root orphan item");
 				break;
 			}
 			continue;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1d195d2..fffb9ab 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -575,23 +575,25 @@
 	 * hold all of the paths here
 	 */
 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-		btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
-			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
-			"length %llu, links %u (path: %s)", swarn->errstr,
-			swarn->logical, rcu_str_deref(swarn->dev->name),
-			(unsigned long long)swarn->sector, root, inum, offset,
-			min(isize - offset, (u64)PAGE_SIZE), nlink,
-			(char *)(unsigned long)ipath->fspath->val[i]);
+		btrfs_warn_in_rcu(fs_info,
+				  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
+				  swarn->errstr, swarn->logical,
+				  rcu_str_deref(swarn->dev->name),
+				  (unsigned long long)swarn->sector,
+				  root, inum, offset,
+				  min(isize - offset, (u64)PAGE_SIZE), nlink,
+				  (char *)(unsigned long)ipath->fspath->val[i]);
 
 	free_ipath(ipath);
 	return 0;
 
 err:
-	btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
-		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
-		"resolving failed with ret=%d", swarn->errstr,
-		swarn->logical, rcu_str_deref(swarn->dev->name),
-		(unsigned long long)swarn->sector, root, inum, offset, ret);
+	btrfs_warn_in_rcu(fs_info,
+			  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
+			  swarn->errstr, swarn->logical,
+			  rcu_str_deref(swarn->dev->name),
+			  (unsigned long long)swarn->sector,
+			  root, inum, offset, ret);
 
 	free_ipath(ipath);
 	return 0;
@@ -645,9 +647,8 @@
 						      item_size, &ref_root,
 						      &ref_level);
 			btrfs_warn_in_rcu(fs_info,
-				"%s at logical %llu on dev %s, "
-				"sector %llu: metadata %s (level %d) in tree "
-				"%llu", errstr, swarn.logical,
+				"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
+				errstr, swarn.logical,
 				rcu_str_deref(dev->name),
 				(unsigned long long)swarn.sector,
 				ref_level ? "node" : "leaf",
@@ -1574,8 +1575,7 @@
 
 		if (!page_bad->dev->bdev) {
 			btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
-				"scrub_repair_page_from_good_copy(bdev == NULL) "
-				"is unexpected");
+				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
 			return -EIO;
 		}
 
@@ -2961,7 +2961,8 @@
 			    (key.objectid < logic_start ||
 			     key.objectid + bytes >
 			     logic_start + map->stripe_len)) {
-				btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
+				btrfs_err(fs_info,
+					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
 					  key.objectid, logic_start);
 				spin_lock(&sctx->stat_lock);
 				sctx->stat.uncorrectable_errors++;
@@ -3312,8 +3313,7 @@
 			     key.objectid + bytes >
 			     logical + map->stripe_len)) {
 				btrfs_err(fs_info,
-					   "scrub: tree block %llu spanning "
-					   "stripes, ignored. logical=%llu",
+					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
 				       key.objectid, logical);
 				spin_lock(&sctx->stat_lock);
 				sctx->stat.uncorrectable_errors++;
@@ -3640,7 +3640,8 @@
 			 */
 			ro_set = 0;
 		} else {
-			btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
+			btrfs_warn(fs_info,
+				   "failed setting block group ro, ret=%d\n",
 				   ret);
 			btrfs_put_block_group(cache);
 			break;
@@ -3861,8 +3862,7 @@
 	if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
 		/* not supported for data w/o checksums */
 		btrfs_err_rl(fs_info,
-			   "scrub: size assumption sectorsize != PAGE_SIZE "
-			   "(%d != %lu) fails",
+			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
 		       fs_info->chunk_root->sectorsize, PAGE_SIZE);
 		return -EINVAL;
 	}
@@ -3875,8 +3875,8 @@
 		 * would exhaust the array bounds of pagev member in
 		 * struct scrub_block
 		 */
-		btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
-			   "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
+		btrfs_err(fs_info,
+			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
 		       fs_info->chunk_root->nodesize,
 		       SCRUB_MAX_PAGES_PER_BLOCK,
 		       fs_info->chunk_root->sectorsize,
@@ -4202,10 +4202,10 @@
 	ret = iterate_inodes_from_logical(logical, fs_info, path,
 					  record_inode_for_nocow, nocow_ctx);
 	if (ret != 0 && ret != -ENOENT) {
-		btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
-			"phys %llu, len %llu, mir %u, ret %d",
-			logical, physical_for_dev_replace, len, mirror_num,
-			ret);
+		btrfs_warn(fs_info,
+			   "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
+			   logical, physical_for_dev_replace, len, mirror_num,
+			   ret);
 		not_written = 1;
 		goto out;
 	}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index a87675f..01bc36c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -36,10 +36,6 @@
 #include "transaction.h"
 #include "compression.h"
 
-static int g_verbose = 0;
-
-#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
-
 /*
  * A fs_path is a helper to dynamically build path names with unknown size.
  * It reallocates the internal buffer on demand.
@@ -727,9 +723,10 @@
 static int send_rename(struct send_ctx *sctx,
 		     struct fs_path *from, struct fs_path *to)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret;
 
-verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
+	btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
 
 	ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
 	if (ret < 0)
@@ -751,9 +748,10 @@
 static int send_link(struct send_ctx *sctx,
 		     struct fs_path *path, struct fs_path *lnk)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret;
 
-verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
+	btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
 
 	ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
 	if (ret < 0)
@@ -774,9 +772,10 @@
  */
 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret;
 
-verbose_printk("btrfs: send_unlink %s\n", path->start);
+	btrfs_debug(fs_info, "send_unlink %s", path->start);
 
 	ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
 	if (ret < 0)
@@ -796,9 +795,10 @@
  */
 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret;
 
-verbose_printk("btrfs: send_rmdir %s\n", path->start);
+	btrfs_debug(fs_info, "send_rmdir %s", path->start);
 
 	ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
 	if (ret < 0)
@@ -1313,6 +1313,7 @@
 			     u64 ino_size,
 			     struct clone_root **found)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret;
 	int extent_type;
 	u64 logical;
@@ -1371,10 +1372,10 @@
 	}
 	logical = disk_byte + btrfs_file_extent_offset(eb, fi);
 
-	down_read(&sctx->send_root->fs_info->commit_root_sem);
-	ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
+	down_read(&fs_info->commit_root_sem);
+	ret = extent_from_logical(fs_info, disk_byte, tmp_path,
 				  &found_key, &flags);
-	up_read(&sctx->send_root->fs_info->commit_root_sem);
+	up_read(&fs_info->commit_root_sem);
 	btrfs_release_path(tmp_path);
 
 	if (ret < 0)
@@ -1429,7 +1430,7 @@
 		extent_item_pos = logical - found_key.objectid;
 	else
 		extent_item_pos = 0;
-	ret = iterate_extent_inodes(sctx->send_root->fs_info,
+	ret = iterate_extent_inodes(fs_info,
 					found_key.objectid, extent_item_pos, 1,
 					__iterate_backrefs, backref_ctx);
 
@@ -1439,20 +1440,18 @@
 	if (!backref_ctx->found_itself) {
 		/* found a bug in backref code? */
 		ret = -EIO;
-		btrfs_err(sctx->send_root->fs_info, "did not find backref in "
-				"send_root. inode=%llu, offset=%llu, "
-				"disk_byte=%llu found extent=%llu",
-				ino, data_offset, disk_byte, found_key.objectid);
+		btrfs_err(fs_info,
+			  "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
+			  ino, data_offset, disk_byte, found_key.objectid);
 		goto out;
 	}
 
-verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
-		"ino=%llu, "
-		"num_bytes=%llu, logical=%llu\n",
-		data_offset, ino, num_bytes, logical);
+	btrfs_debug(fs_info,
+		    "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
+		    data_offset, ino, num_bytes, logical);
 
 	if (!backref_ctx->found)
-		verbose_printk("btrfs:    no clones found\n");
+		btrfs_debug(fs_info, "no clones found");
 
 	cur_clone_root = NULL;
 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -2423,10 +2422,11 @@
 
 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p;
 
-verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
+	btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
 
 	p = fs_path_alloc();
 	if (!p)
@@ -2452,10 +2452,11 @@
 
 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p;
 
-verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
+	btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
 
 	p = fs_path_alloc();
 	if (!p)
@@ -2481,10 +2482,12 @@
 
 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p;
 
-verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
+	btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
+		    ino, uid, gid);
 
 	p = fs_path_alloc();
 	if (!p)
@@ -2511,6 +2514,7 @@
 
 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p = NULL;
 	struct btrfs_inode_item *ii;
@@ -2519,7 +2523,7 @@
 	struct btrfs_key key;
 	int slot;
 
-verbose_printk("btrfs: send_utimes %llu\n", ino);
+	btrfs_debug(fs_info, "send_utimes %llu", ino);
 
 	p = fs_path_alloc();
 	if (!p)
@@ -2573,6 +2577,7 @@
  */
 static int send_create_inode(struct send_ctx *sctx, u64 ino)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p;
 	int cmd;
@@ -2580,7 +2585,7 @@
 	u64 mode;
 	u64 rdev;
 
-verbose_printk("btrfs: send_create_inode %llu\n", ino);
+	btrfs_debug(fs_info, "send_create_inode %llu", ino);
 
 	p = fs_path_alloc();
 	if (!p)
@@ -3638,6 +3643,7 @@
  */
 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct recorded_ref *cur;
 	struct recorded_ref *cur2;
@@ -3650,7 +3656,7 @@
 	u64 last_dir_ino_rm = 0;
 	bool can_rename = true;
 
-verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
+	btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
 
 	/*
 	 * This should never happen as the root dir always has the same ref
@@ -4329,7 +4335,7 @@
 	int ret;
 	struct send_ctx *sctx = ctx;
 	struct fs_path *p;
-	posix_acl_xattr_header dummy_acl;
+	struct posix_acl_xattr_header dummy_acl;
 
 	p = fs_path_alloc();
 	if (!p)
@@ -4398,12 +4404,8 @@
 
 static int process_deleted_xattr(struct send_ctx *sctx)
 {
-	int ret;
-
-	ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
-			       sctx->cmp_key, __process_deleted_xattr, sctx);
-
-	return ret;
+	return iterate_dir_item(sctx->parent_root, sctx->right_path,
+				sctx->cmp_key, __process_deleted_xattr, sctx);
 }
 
 struct find_xattr_ctx {
@@ -4664,6 +4666,7 @@
  */
 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
 {
+	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 	int ret = 0;
 	struct fs_path *p;
 	ssize_t num_read = 0;
@@ -4672,7 +4675,7 @@
 	if (!p)
 		return -ENOMEM;
 
-verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+	btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
 
 	num_read = fill_read_buf(sctx, offset, len);
 	if (num_read <= 0) {
@@ -4714,10 +4717,10 @@
 	struct fs_path *p;
 	u64 gen;
 
-verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
-	       "clone_inode=%llu, clone_offset=%llu\n", offset, len,
-		clone_root->root->objectid, clone_root->ino,
-		clone_root->offset);
+	btrfs_debug(sctx->send_root->fs_info,
+		    "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
+		    offset, len, clone_root->root->objectid, clone_root->ino,
+		    clone_root->offset);
 
 	p = fs_path_alloc();
 	if (!p)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4071fe2..74ed5aa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -151,12 +151,11 @@
 		vaf.fmt = fmt;
 		vaf.va = &args;
 
-		printk(KERN_CRIT
-			"BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
+		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 			sb->s_id, function, line, errno, errstr, &vaf);
 		va_end(args);
 	} else {
-		printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
+		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 			sb->s_id, function, line, errno, errstr);
 	}
 #endif
@@ -462,9 +461,11 @@
 		case Opt_datasum:
 			if (btrfs_test_opt(info, NODATASUM)) {
 				if (btrfs_test_opt(info, NODATACOW))
-					btrfs_info(root->fs_info, "setting datasum, datacow enabled");
+					btrfs_info(root->fs_info,
+						   "setting datasum, datacow enabled");
 				else
-					btrfs_info(root->fs_info, "setting datasum");
+					btrfs_info(root->fs_info,
+						   "setting datasum");
 			}
 			btrfs_clear_opt(info->mount_opt, NODATACOW);
 			btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -476,7 +477,8 @@
 					btrfs_info(root->fs_info,
 						   "setting nodatacow, compression disabled");
 				} else {
-					btrfs_info(root->fs_info, "setting nodatacow");
+					btrfs_info(root->fs_info,
+						   "setting nodatacow");
 				}
 			}
 			btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -608,8 +610,9 @@
 				info->alloc_start = memparse(num, NULL);
 				mutex_unlock(&info->chunk_mutex);
 				kfree(num);
-				btrfs_info(root->fs_info, "allocations start at %llu",
-					info->alloc_start);
+				btrfs_info(root->fs_info,
+					   "allocations start at %llu",
+					   info->alloc_start);
 			} else {
 				ret = -ENOMEM;
 				goto out;
@@ -762,8 +765,9 @@
 				goto out;
 			} else if (intarg >= 0) {
 				info->check_integrity_print_mask = intarg;
-				btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x",
-				       info->check_integrity_print_mask);
+				btrfs_info(root->fs_info,
+					   "check_integrity_print_mask 0x%x",
+					   info->check_integrity_print_mask);
 			} else {
 				ret = -EINVAL;
 				goto out;
@@ -794,19 +798,22 @@
 			intarg = 0;
 			ret = match_int(&args[0], &intarg);
 			if (ret < 0) {
-				btrfs_err(root->fs_info, "invalid commit interval");
+				btrfs_err(root->fs_info,
+					  "invalid commit interval");
 				ret = -EINVAL;
 				goto out;
 			}
 			if (intarg > 0) {
 				if (intarg > 300) {
-					btrfs_warn(root->fs_info, "excessive commit interval %d",
-							intarg);
+					btrfs_warn(root->fs_info,
+						"excessive commit interval %d",
+						intarg);
 				}
 				info->commit_interval = intarg;
 			} else {
-				btrfs_info(root->fs_info, "using default commit interval %ds",
-				    BTRFS_DEFAULT_COMMIT_INTERVAL);
+				btrfs_info(root->fs_info,
+					   "using default commit interval %ds",
+					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 				info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
 			}
 			break;
@@ -827,7 +834,8 @@
 			break;
 #endif
 		case Opt_err:
-			btrfs_info(root->fs_info, "unrecognized mount option '%s'", p);
+			btrfs_info(root->fs_info,
+				   "unrecognized mount option '%s'", p);
 			ret = -EINVAL;
 			goto out;
 		default:
@@ -916,9 +924,7 @@
 			}
 			break;
 		case Opt_subvolrootid:
-			printk(KERN_WARNING
-				"BTRFS: 'subvolrootid' mount option is deprecated and has "
-				"no effect\n");
+			pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
 			break;
 		case Opt_device:
 			device_name = match_strdup(&args[0]);
@@ -1142,7 +1148,7 @@
 	sb->s_iflags |= SB_I_CGROUPWB;
 	err = open_ctree(sb, fs_devices, (char *)data);
 	if (err) {
-		printk(KERN_ERR "BTRFS: open_ctree failed\n");
+		btrfs_err(fs_info, "open_ctree failed");
 		return err;
 	}
 
@@ -1440,12 +1446,13 @@
 
 	if (!IS_ERR(root)) {
 		struct super_block *s = root->d_sb;
+		struct btrfs_fs_info *fs_info = btrfs_sb(s);
 		struct inode *root_inode = d_inode(root);
 		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
 
 		ret = 0;
 		if (!is_subvolume_inode(root_inode)) {
-			pr_err("BTRFS: '%s' is not a valid subvolume\n",
+			btrfs_err(fs_info, "'%s' is not a valid subvolume",
 			       subvol_name);
 			ret = -EINVAL;
 		}
@@ -1455,8 +1462,9 @@
 			 * subvolume which was passed by ID is renamed and
 			 * another subvolume is renamed over the old location.
 			 */
-			pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n",
-			       subvol_name, subvol_objectid);
+			btrfs_err(fs_info,
+				  "subvol '%s' does not match subvolid %llu",
+				  subvol_name, subvol_objectid);
 			ret = -EINVAL;
 		}
 		if (ret) {
@@ -1830,13 +1838,15 @@
 			btrfs_info(fs_info, "creating UUID tree");
 			ret = btrfs_create_uuid_tree(fs_info);
 			if (ret) {
-				btrfs_warn(fs_info, "failed to create the UUID tree %d", ret);
+				btrfs_warn(fs_info,
+					   "failed to create the UUID tree %d",
+					   ret);
 				goto restore;
 			}
 		}
 		sb->s_flags &= ~MS_RDONLY;
 
-		fs_info->open = 1;
+		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
 	}
 out:
 	wake_up_process(fs_info->transaction_kthread);
@@ -2346,7 +2356,7 @@
 
 static void btrfs_print_mod_info(void)
 {
-	printk(KERN_INFO "Btrfs loaded, crc32c=%s"
+	pr_info("Btrfs loaded, crc32c=%s"
 #ifdef CONFIG_BTRFS_DEBUG
 			", debug=on"
 #endif
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c656990..1f157fb 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -77,7 +77,7 @@
 		clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
 		break;
 	default:
-		printk(KERN_WARNING "btrfs: sysfs: unknown feature set %d\n",
+		pr_warn("btrfs: sysfs: unknown feature set %d\n",
 				fa->feature_set);
 		return 0;
 	}
@@ -430,7 +430,8 @@
 {
 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 
-	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			fs_info->super_copy->sectorsize);
 }
 
 BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
@@ -440,7 +441,8 @@
 {
 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 
-	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			fs_info->super_copy->sectorsize);
 }
 
 BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
@@ -836,9 +838,18 @@
 	if (!btrfs_debugfs_root_dentry)
 		return -ENOMEM;
 
-	debugfs_create_u64("test", S_IRUGO | S_IWUGO, btrfs_debugfs_root_dentry,
+	/*
+	 * Example code, how to export data through debugfs.
+	 *
+	 * file:        /sys/kernel/debug/btrfs/test
+	 * contents of: btrfs_debugfs_test
+	 */
+#ifdef CONFIG_BTRFS_DEBUG
+	debugfs_create_u64("test", S_IRUGO | S_IWUSR, btrfs_debugfs_root_dentry,
 			&btrfs_debugfs_test);
 #endif
+
+#endif
 	return 0;
 }
 
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index d19ab03..caad80b 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -273,20 +273,37 @@
 	return ret;
 }
 
-/**
- * test_bit_in_byte - Determine whether a bit is set in a byte
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit_in_byte(int nr, const u8 *addr)
+static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
+			   unsigned long len)
 {
-	return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+	unsigned long i;
+
+	for (i = 0; i < len * BITS_PER_BYTE; i++) {
+		int bit, bit1;
+
+		bit = !!test_bit(i, bitmap);
+		bit1 = !!extent_buffer_test_bit(eb, 0, i);
+		if (bit1 != bit) {
+			test_msg("Bits do not match\n");
+			return -EINVAL;
+		}
+
+		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
+						i % BITS_PER_BYTE);
+		if (bit1 != bit) {
+			test_msg("Offset bits do not match\n");
+			return -EINVAL;
+		}
+	}
+	return 0;
 }
 
 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 			     unsigned long len)
 {
-	unsigned long i, x;
+	unsigned long i, j;
+	u32 x;
+	int ret;
 
 	memset(bitmap, 0, len);
 	memset_extent_buffer(eb, 0, 0, len);
@@ -297,16 +314,18 @@
 
 	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
 	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+	ret = check_eb_bitmap(bitmap, eb, len);
+	if (ret) {
 		test_msg("Setting all bits failed\n");
-		return -EINVAL;
+		return ret;
 	}
 
 	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
 	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
-	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+	ret = check_eb_bitmap(bitmap, eb, len);
+	if (ret) {
 		test_msg("Clearing all bits failed\n");
-		return -EINVAL;
+		return ret;
 	}
 
 	/* Straddling pages test */
@@ -316,9 +335,10 @@
 			sizeof(long) * BITS_PER_BYTE);
 		extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 					sizeof(long) * BITS_PER_BYTE);
-		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+		ret = check_eb_bitmap(bitmap, eb, len);
+		if (ret) {
 			test_msg("Setting straddling pages failed\n");
-			return -EINVAL;
+			return ret;
 		}
 
 		bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
@@ -328,9 +348,10 @@
 		extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
 		extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 					sizeof(long) * BITS_PER_BYTE);
-		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+		ret = check_eb_bitmap(bitmap, eb, len);
+		if (ret) {
 			test_msg("Clearing straddling pages failed\n");
-			return -EINVAL;
+			return ret;
 		}
 	}
 
@@ -339,28 +360,22 @@
 	 * something repetitive that could miss some hypothetical off-by-n bug.
 	 */
 	x = 0;
-	for (i = 0; i < len / sizeof(long); i++) {
-		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
-		bitmap[i] = x;
+	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
+	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
+	for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
+		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
+		for (j = 0; j < 32; j++) {
+			if (x & (1U << j)) {
+				bitmap_set(bitmap, i * 32 + j, 1);
+				extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
+			}
+		}
 	}
-	write_extent_buffer(eb, bitmap, 0, len);
 
-	for (i = 0; i < len * BITS_PER_BYTE; i++) {
-		int bit, bit1;
-
-		bit = !!test_bit_in_byte(i, (u8 *)bitmap);
-		bit1 = !!extent_buffer_test_bit(eb, 0, i);
-		if (bit1 != bit) {
-			test_msg("Testing bit pattern failed\n");
-			return -EINVAL;
-		}
-
-		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
-						i % BITS_PER_BYTE);
-		if (bit1 != bit) {
-			test_msg("Testing bit pattern with offset failed\n");
-			return -EINVAL;
-		}
+	ret = check_eb_bitmap(bitmap, eb, len);
+	if (ret) {
+		test_msg("Random bit pattern failed\n");
+		return ret;
 	}
 
 	return 0;
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 7508d3b..6e14404 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -24,20 +24,15 @@
 #include "../transaction.h"
 
 struct free_space_extent {
-	u64 start, length;
+	u64 start;
+	u64 length;
 };
 
-/*
- * The test cases align their operations to this in order to hit some of the
- * edge cases in the bitmap code.
- */
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
-
 static int __check_free_space_extents(struct btrfs_trans_handle *trans,
 				      struct btrfs_fs_info *fs_info,
 				      struct btrfs_block_group_cache *cache,
 				      struct btrfs_path *path,
-				      struct free_space_extent *extents,
+				      const struct free_space_extent * const extents,
 				      unsigned int num_extents)
 {
 	struct btrfs_free_space_info *info;
@@ -126,7 +121,7 @@
 				    struct btrfs_fs_info *fs_info,
 				    struct btrfs_block_group_cache *cache,
 				    struct btrfs_path *path,
-				    struct free_space_extent *extents,
+				    const struct free_space_extent * const extents,
 				    unsigned int num_extents)
 {
 	struct btrfs_free_space_info *info;
@@ -168,9 +163,10 @@
 static int test_empty_block_group(struct btrfs_trans_handle *trans,
 				  struct btrfs_fs_info *fs_info,
 				  struct btrfs_block_group_cache *cache,
-				  struct btrfs_path *path)
+				  struct btrfs_path *path,
+				  u32 alignment)
 {
-	struct free_space_extent extents[] = {
+	const struct free_space_extent extents[] = {
 		{cache->key.objectid, cache->key.offset},
 	};
 
@@ -181,9 +177,10 @@
 static int test_remove_all(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {};
+	const struct free_space_extent extents[] = {};
 	int ret;
 
 	ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
@@ -201,16 +198,17 @@
 static int test_remove_beginning(struct btrfs_trans_handle *trans,
 				 struct btrfs_fs_info *fs_info,
 				 struct btrfs_block_group_cache *cache,
-				 struct btrfs_path *path)
+				 struct btrfs_path *path,
+				 u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid + BITMAP_RANGE,
-			cache->key.offset - BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid + alignment,
+			cache->key.offset - alignment},
 	};
 	int ret;
 
 	ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
-					    cache->key.objectid, BITMAP_RANGE);
+					    cache->key.objectid, alignment);
 	if (ret) {
 		test_msg("Could not remove free space\n");
 		return ret;
@@ -224,17 +222,18 @@
 static int test_remove_end(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid, cache->key.offset - BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid, cache->key.offset - alignment},
 	};
 	int ret;
 
 	ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
 					    cache->key.objectid +
-					    cache->key.offset - BITMAP_RANGE,
-					    BITMAP_RANGE);
+					    cache->key.offset - alignment,
+					    alignment);
 	if (ret) {
 		test_msg("Could not remove free space\n");
 		return ret;
@@ -247,18 +246,19 @@
 static int test_remove_middle(struct btrfs_trans_handle *trans,
 			      struct btrfs_fs_info *fs_info,
 			      struct btrfs_block_group_cache *cache,
-			      struct btrfs_path *path)
+			      struct btrfs_path *path,
+			      u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid, BITMAP_RANGE},
-		{cache->key.objectid + 2 * BITMAP_RANGE,
-			cache->key.offset - 2 * BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid, alignment},
+		{cache->key.objectid + 2 * alignment,
+			cache->key.offset - 2 * alignment},
 	};
 	int ret;
 
 	ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
-					    cache->key.objectid + BITMAP_RANGE,
-					    BITMAP_RANGE);
+					    cache->key.objectid + alignment,
+					    alignment);
 	if (ret) {
 		test_msg("Could not remove free space\n");
 		return ret;
@@ -271,10 +271,11 @@
 static int test_merge_left(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid, 2 * BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid, 2 * alignment},
 	};
 	int ret;
 
@@ -287,15 +288,15 @@
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid, BITMAP_RANGE);
+				       cache->key.objectid, alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
@@ -308,10 +309,11 @@
 static int test_merge_right(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid + BITMAP_RANGE, 2 * BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid + alignment, 2 * alignment},
 	};
 	int ret;
 
@@ -324,16 +326,16 @@
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + 2 * BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + 2 * alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
@@ -346,10 +348,11 @@
 static int test_merge_both(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid, 3 * BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid, 3 * alignment},
 	};
 	int ret;
 
@@ -362,23 +365,23 @@
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid, BITMAP_RANGE);
+				       cache->key.objectid, alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + 2 * BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + 2 * alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
@@ -391,12 +394,13 @@
 static int test_merge_none(struct btrfs_trans_handle *trans,
 			   struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
-			   struct btrfs_path *path)
+			   struct btrfs_path *path,
+			   u32 alignment)
 {
-	struct free_space_extent extents[] = {
-		{cache->key.objectid, BITMAP_RANGE},
-		{cache->key.objectid + 2 * BITMAP_RANGE, BITMAP_RANGE},
-		{cache->key.objectid + 4 * BITMAP_RANGE, BITMAP_RANGE},
+	const struct free_space_extent extents[] = {
+		{cache->key.objectid, alignment},
+		{cache->key.objectid + 2 * alignment, alignment},
+		{cache->key.objectid + 4 * alignment, alignment},
 	};
 	int ret;
 
@@ -409,23 +413,23 @@
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid, BITMAP_RANGE);
+				       cache->key.objectid, alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + 4 * BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + 4 * alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
 	}
 
 	ret = __add_to_free_space_tree(trans, fs_info, cache, path,
-				       cache->key.objectid + 2 * BITMAP_RANGE,
-				       BITMAP_RANGE);
+				       cache->key.objectid + 2 * alignment,
+				       alignment);
 	if (ret) {
 		test_msg("Could not add free space\n");
 		return ret;
@@ -438,10 +442,11 @@
 typedef int (*test_func_t)(struct btrfs_trans_handle *,
 			   struct btrfs_fs_info *,
 			   struct btrfs_block_group_cache *,
-			   struct btrfs_path *);
+			   struct btrfs_path *,
+			   u32 alignment);
 
-static int run_test(test_func_t test_func, int bitmaps,
-		u32 sectorsize, u32 nodesize)
+static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+		    u32 nodesize, u32 alignment)
 {
 	struct btrfs_fs_info *fs_info;
 	struct btrfs_root *root = NULL;
@@ -480,7 +485,7 @@
 	btrfs_set_header_nritems(root->node, 0);
 	root->alloc_bytenr += 2 * nodesize;
 
-	cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
+	cache = btrfs_alloc_dummy_block_group(8 * alignment, sectorsize);
 	if (!cache) {
 		test_msg("Couldn't allocate dummy block group cache\n");
 		ret = -ENOMEM;
@@ -514,7 +519,7 @@
 		}
 	}
 
-	ret = test_func(&trans, root->fs_info, cache, path);
+	ret = test_func(&trans, root->fs_info, cache, path, alignment);
 	if (ret)
 		goto out;
 
@@ -539,15 +544,27 @@
 	return ret;
 }
 
-static int run_test_both_formats(test_func_t test_func,
-	u32 sectorsize, u32 nodesize)
+static int run_test_both_formats(test_func_t test_func, u32 sectorsize,
+				 u32 nodesize, u32 alignment)
 {
+	int test_ret = 0;
 	int ret;
 
-	ret = run_test(test_func, 0, sectorsize, nodesize);
-	if (ret)
-		return ret;
-	return run_test(test_func, 1, sectorsize, nodesize);
+	ret = run_test(test_func, 0, sectorsize, nodesize, alignment);
+	if (ret) {
+		test_msg("%pf failed with extents, sectorsize=%u, nodesize=%u, alignment=%u\n",
+			 test_func, sectorsize, nodesize, alignment);
+		test_ret = ret;
+	}
+
+	ret = run_test(test_func, 1, sectorsize, nodesize, alignment);
+	if (ret) {
+		test_msg("%pf failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u\n",
+			 test_func, sectorsize, nodesize, alignment);
+		test_ret = ret;
+	}
+
+	return test_ret;
 }
 
 int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
@@ -563,18 +580,30 @@
 		test_merge_both,
 		test_merge_none,
 	};
+	u32 bitmap_alignment;
+	int test_ret = 0;
 	int i;
 
+	/*
+	 * Align some operations to a page to flush out bugs in the extent
+	 * buffer bitmap handling of highmem.
+	 */
+	bitmap_alignment = BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE;
+
 	test_msg("Running free space tree tests\n");
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
-		int ret = run_test_both_formats(tests[i], sectorsize,
-			nodesize);
-		if (ret) {
-			test_msg("%pf : sectorsize %u failed\n",
-				tests[i], sectorsize);
-			return ret;
-		}
+		int ret;
+
+		ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+					    sectorsize);
+		if (ret)
+			test_ret = ret;
+
+		ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+					    bitmap_alignment);
+		if (ret)
+			test_ret = ret;
 	}
 
-	return 0;
+	return test_ret;
 }
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 9f72aed..0bf4680 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -968,7 +968,7 @@
 	/* [BTRFS_MAX_EXTENT_SIZE] */
 	BTRFS_I(inode)->outstanding_extents++;
 	ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
-					NULL);
+					NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
@@ -984,7 +984,7 @@
 	BTRFS_I(inode)->outstanding_extents++;
 	ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
 					BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
-					NULL);
+					NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
@@ -1019,7 +1019,7 @@
 	ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
 					(BTRFS_MAX_EXTENT_SIZE >> 1)
 					+ sectorsize - 1,
-					NULL);
+					NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
@@ -1042,7 +1042,7 @@
 	ret = btrfs_set_extent_delalloc(inode,
 			BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
 			(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
-			NULL);
+			NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
@@ -1060,7 +1060,7 @@
 	BTRFS_I(inode)->outstanding_extents++;
 	ret = btrfs_set_extent_delalloc(inode,
 			BTRFS_MAX_EXTENT_SIZE + sectorsize,
-			BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
+			BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
@@ -1097,7 +1097,7 @@
 	BTRFS_I(inode)->outstanding_extents++;
 	ret = btrfs_set_extent_delalloc(inode,
 			BTRFS_MAX_EXTENT_SIZE + sectorsize,
-			BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
+			BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
 	if (ret) {
 		test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
 		goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 4407fef..ca7cb5e 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -480,7 +480,7 @@
 	 */
 	root->fs_info->tree_root = root;
 	root->fs_info->quota_root = root;
-	root->fs_info->quota_enabled = 1;
+	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
 
 	/*
 	 * Can't use bytenr 0, some things freak out
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 95d4191..9517de0 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -65,8 +65,9 @@
 		BUG_ON(!list_empty(&transaction->list));
 		WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
 		if (transaction->delayed_refs.pending_csums)
-			printk(KERN_ERR "pending csums is %llu\n",
-			       transaction->delayed_refs.pending_csums);
+			btrfs_err(transaction->fs_info,
+				  "pending csums is %llu",
+				  transaction->delayed_refs.pending_csums);
 		while (!list_empty(&transaction->pending_chunks)) {
 			struct extent_map *em;
 
@@ -245,6 +246,7 @@
 		return -EROFS;
 	}
 
+	cur_trans->fs_info = fs_info;
 	atomic_set(&cur_trans->num_writers, 1);
 	extwriter_counter_init(cur_trans, type);
 	init_waitqueue_head(&cur_trans->writer_wait);
@@ -272,11 +274,9 @@
 	 */
 	smp_mb();
 	if (!list_empty(&fs_info->tree_mod_seq_list))
-		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
-			"creating a fresh transaction\n");
+		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
-		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
-			"creating a fresh transaction\n");
+		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
 	atomic64_set(&fs_info->tree_mod_seq, 0);
 
 	spin_lock_init(&cur_trans->delayed_refs.lock);
@@ -441,7 +441,7 @@
 
 static int may_wait_transaction(struct btrfs_root *root, int type)
 {
-	if (root->fs_info->log_root_recovering)
+	if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
 		return 0;
 
 	if (type == TRANS_USERSPACE)
@@ -549,11 +549,8 @@
 		}
 	} while (ret == -EBUSY);
 
-	if (ret < 0) {
-		/* We must get the transaction if we are JOIN_NOLOCK. */
-		BUG_ON(type == TRANS_JOIN_NOLOCK);
+	if (ret < 0)
 		goto join_fail;
-	}
 
 	cur_trans = root->fs_info->running_transaction;
 
@@ -993,7 +990,6 @@
 	struct extent_state *cached_state = NULL;
 	u64 start = 0;
 	u64 end;
-	struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
 	bool errors = false;
 
 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
@@ -1025,17 +1021,17 @@
 
 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
 		if ((mark & EXTENT_DIRTY) &&
-		    test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
-				       &btree_ino->runtime_flags))
+		    test_and_clear_bit(BTRFS_FS_LOG1_ERR,
+				       &root->fs_info->flags))
 			errors = true;
 
 		if ((mark & EXTENT_NEW) &&
-		    test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
-				       &btree_ino->runtime_flags))
+		    test_and_clear_bit(BTRFS_FS_LOG2_ERR,
+				       &root->fs_info->flags))
 			errors = true;
 	} else {
-		if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
-				       &btree_ino->runtime_flags))
+		if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
+				       &root->fs_info->flags))
 			errors = true;
 	}
 
@@ -1300,11 +1296,11 @@
 		btrfs_btree_balance_dirty(info->tree_root);
 		cond_resched();
 
-		if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
+		if (btrfs_fs_closing(info) || ret != -EAGAIN)
 			break;
 
-		if (btrfs_defrag_cancelled(root->fs_info)) {
-			pr_debug("BTRFS: defrag_root cancelled\n");
+		if (btrfs_defrag_cancelled(info)) {
+			btrfs_debug(info, "defrag_root cancelled");
 			ret = -EAGAIN;
 			break;
 		}
@@ -1335,7 +1331,7 @@
 	 * kick in anyway.
 	 */
 	mutex_lock(&fs_info->qgroup_ioctl_lock);
-	if (!fs_info->quota_enabled) {
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
 		return 0;
 	}
@@ -1474,7 +1470,7 @@
 	parent_root = BTRFS_I(parent_inode)->root;
 	record_root_in_trans(trans, parent_root, 0);
 
-	cur_time = current_fs_time(parent_inode->i_sb);
+	cur_time = current_time(parent_inode);
 
 	/*
 	 * insert the directory item
@@ -1630,7 +1626,7 @@
 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
 					 dentry->d_name.len * 2);
 	parent_inode->i_mtime = parent_inode->i_ctime =
-		current_fs_time(parent_inode->i_sb);
+		current_time(parent_inode);
 	ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
@@ -1712,7 +1708,7 @@
 	super->root_level = root_item->level;
 	if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
 		super->cache_generation = root_item->generation;
-	if (root->fs_info->update_uuid_tree_gen)
+	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
 		super->uuid_tree_generation = root_item->generation;
 }
 
@@ -1919,7 +1915,6 @@
 {
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_transaction *prev_trans = NULL;
-	struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
 	int ret;
 
 	/* Stop the commit early if ->aborted is set */
@@ -2213,8 +2208,8 @@
 	btrfs_update_commit_device_size(root->fs_info);
 	btrfs_update_commit_device_bytes_used(root, cur_trans);
 
-	clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
-	clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+	clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
+	clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
 
 	btrfs_trans_release_chunk_metadata(trans);
 
@@ -2328,7 +2323,7 @@
 	list_del_init(&root->root_list);
 	spin_unlock(&fs_info->trans_lock);
 
-	pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
+	btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
 
 	btrfs_kill_all_delayed_nodes(root);
 
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index efb1226..6cf0d37 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -82,6 +82,7 @@
 	spinlock_t dropped_roots_lock;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
+	struct btrfs_fs_info *fs_info;
 };
 
 #define __TRANS_FREEZABLE	(1U << 0)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ef9c55b..528cae1 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3961,7 +3961,7 @@
 			 * i_mapping flags, so that the next fsync won't get
 			 * an outdated io error too.
 			 */
-			btrfs_inode_check_errors(inode);
+			filemap_check_errors(inode->i_mapping);
 			*ordered_io_error = true;
 			break;
 		}
@@ -4198,7 +4198,7 @@
 	 * without writing to the log tree and the fsync must report the
 	 * file data write error and not commit the current transaction.
 	 */
-	ret = btrfs_inode_check_errors(inode);
+	ret = filemap_check_errors(inode->i_mapping);
 	if (ret)
 		ctx->io_err = ret;
 process:
@@ -5579,7 +5579,7 @@
 	if (!path)
 		return -ENOMEM;
 
-	fs_info->log_root_recovering = 1;
+	set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
 
 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
 	if (IS_ERR(trans)) {
@@ -5592,8 +5592,8 @@
 
 	ret = walk_log_tree(trans, log_root_tree, &wc);
 	if (ret) {
-		btrfs_handle_fs_error(fs_info, ret, "Failed to pin buffers while "
-			    "recovering log root tree.");
+		btrfs_handle_fs_error(fs_info, ret,
+			"Failed to pin buffers while recovering log root tree.");
 		goto error;
 	}
 
@@ -5639,8 +5639,8 @@
 			free_extent_buffer(log->node);
 			free_extent_buffer(log->commit_root);
 			kfree(log);
-			btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root "
-				    "for tree log recovery.");
+			btrfs_handle_fs_error(fs_info, ret,
+				"Couldn't read target root for tree log recovery.");
 			goto error;
 		}
 
@@ -5689,7 +5689,7 @@
 
 	free_extent_buffer(log_root_tree->node);
 	log_root_tree->log_root = NULL;
-	fs_info->log_root_recovering = 0;
+	clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
 	kfree(log_root_tree);
 
 	return 0;
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7782829..7fc89e4 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -69,8 +69,9 @@
 	ret = -ENOENT;
 
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
-			(unsigned long)item_size);
+		btrfs_warn(uuid_root->fs_info,
+			   "uuid item with illegal size %lu!",
+			   (unsigned long)item_size);
 		goto out;
 	}
 	while (item_size) {
@@ -137,10 +138,10 @@
 		offset = btrfs_item_ptr_offset(eb, slot);
 		offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
 	} else if (ret < 0) {
-		btrfs_warn(uuid_root->fs_info, "insert uuid item failed %d "
-			"(0x%016llx, 0x%016llx) type %u!",
-			ret, (unsigned long long)key.objectid,
-			(unsigned long long)key.offset, type);
+		btrfs_warn(uuid_root->fs_info,
+			   "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
+			   ret, (unsigned long long)key.objectid,
+			   (unsigned long long)key.offset, type);
 		goto out;
 	}
 
@@ -184,8 +185,8 @@
 
 	ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
 	if (ret < 0) {
-		btrfs_warn(uuid_root->fs_info, "error %d while searching for uuid item!",
-			ret);
+		btrfs_warn(uuid_root->fs_info,
+			   "error %d while searching for uuid item!", ret);
 		goto out;
 	}
 	if (ret > 0) {
@@ -198,8 +199,9 @@
 	offset = btrfs_item_ptr_offset(eb, slot);
 	item_size = btrfs_item_size_nr(eb, slot);
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
-			(unsigned long)item_size);
+		btrfs_warn(uuid_root->fs_info,
+			   "uuid item with illegal size %lu!",
+			   (unsigned long)item_size);
 		ret = -ENOENT;
 		goto out;
 	}
@@ -299,8 +301,9 @@
 		offset = btrfs_item_ptr_offset(leaf, slot);
 		item_size = btrfs_item_size_nr(leaf, slot);
 		if (!IS_ALIGNED(item_size, sizeof(u64))) {
-			btrfs_warn(fs_info, "uuid item with illegal size %lu!",
-				(unsigned long)item_size);
+			btrfs_warn(fs_info,
+				   "uuid item with illegal size %lu!",
+				   (unsigned long)item_size);
 			goto skip;
 		}
 		while (item_size) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 035efce..71a60cc 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -859,7 +859,7 @@
 		blkdev_put(device->bdev, device->mode);
 }
 
-static void btrfs_close_one_device(struct btrfs_device *device)
+static void btrfs_prepare_close_one_device(struct btrfs_device *device)
 {
 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
 	struct btrfs_device *new_device;
@@ -877,8 +877,6 @@
 	if (device->missing)
 		fs_devices->missing_devices--;
 
-	btrfs_close_bdev(device);
-
 	new_device = btrfs_alloc_device(NULL, &device->devid,
 					device->uuid);
 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
@@ -892,23 +890,39 @@
 
 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
 	new_device->fs_devices = device->fs_devices;
-
-	call_rcu(&device->rcu, free_device);
 }
 
 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 {
 	struct btrfs_device *device, *tmp;
+	struct list_head pending_put;
+
+	INIT_LIST_HEAD(&pending_put);
 
 	if (--fs_devices->opened > 0)
 		return 0;
 
 	mutex_lock(&fs_devices->device_list_mutex);
 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
-		btrfs_close_one_device(device);
+		btrfs_prepare_close_one_device(device);
+		list_add(&device->dev_list, &pending_put);
 	}
 	mutex_unlock(&fs_devices->device_list_mutex);
 
+	/*
+	 * btrfs_show_devname() is using the device_list_mutex,
+	 * sometimes call to blkdev_put() leads vfs calling
+	 * into this func. So do put outside of device_list_mutex,
+	 * as of now.
+	 */
+	while (!list_empty(&pending_put)) {
+		device = list_first_entry(&pending_put,
+				struct btrfs_device, dev_list);
+		list_del(&device->dev_list);
+		btrfs_close_bdev(device);
+		call_rcu(&device->rcu, free_device);
+	}
+
 	WARN_ON(fs_devices->open_devices);
 	WARN_ON(fs_devices->rw_devices);
 	fs_devices->opened = 0;
@@ -1140,12 +1154,12 @@
 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 	if (ret > 0) {
 		if (disk_super->label[0]) {
-			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
+			pr_info("BTRFS: device label %s ", disk_super->label);
 		} else {
-			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
+			pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
 		}
 
-		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
+		pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
 		ret = 0;
 	}
 	if (!ret && fs_devices_ret)
@@ -1846,7 +1860,6 @@
 	u64 num_devices;
 	int ret = 0;
 	bool clear_super = false;
-	char *dev_name = NULL;
 
 	mutex_lock(&uuid_mutex);
 
@@ -1882,11 +1895,6 @@
 		list_del_init(&device->dev_alloc_list);
 		device->fs_devices->rw_devices--;
 		unlock_chunks(root);
-		dev_name = kstrdup(device->name->str, GFP_KERNEL);
-		if (!dev_name) {
-			ret = -ENOMEM;
-			goto error_undo;
-		}
 		clear_super = true;
 	}
 
@@ -1936,14 +1944,21 @@
 		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
 	}
 
-	btrfs_close_bdev(device);
-
-	call_rcu(&device->rcu, free_device);
-
 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
+	/*
+	 * at this point, the device is zero sized and detached from
+	 * the devices list.  All that's left is to zero out the old
+	 * supers and free the device.
+	 */
+	if (device->writeable)
+		btrfs_scratch_superblocks(device->bdev, device->name->str);
+
+	btrfs_close_bdev(device);
+	call_rcu(&device->rcu, free_device);
+
 	if (cur_devices->open_devices == 0) {
 		struct btrfs_fs_devices *fs_devices;
 		fs_devices = root->fs_info->fs_devices;
@@ -1962,24 +1977,7 @@
 	root->fs_info->num_tolerated_disk_barrier_failures =
 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
 
-	/*
-	 * at this point, the device is zero sized.  We want to
-	 * remove it from the devices list and zero out the old super
-	 */
-	if (clear_super) {
-		struct block_device *bdev;
-
-		bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
-						root->fs_info->bdev_holder);
-		if (!IS_ERR(bdev)) {
-			btrfs_scratch_superblocks(bdev, dev_name);
-			blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
-		}
-	}
-
 out:
-	kfree(dev_name);
-
 	mutex_unlock(&uuid_mutex);
 	return ret;
 
@@ -2494,9 +2492,7 @@
 		ret = btrfs_relocate_sys_chunks(root);
 		if (ret < 0)
 			btrfs_handle_fs_error(root->fs_info, ret,
-				    "Failed to relocate sys chunks after "
-				    "device initialization. This can be fixed "
-				    "using the \"btrfs balance\" command.");
+				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
 		trans = btrfs_attach_transaction(root);
 		if (IS_ERR(trans)) {
 			if (PTR_ERR(trans) == -ENOENT)
@@ -2555,7 +2551,8 @@
 	devices = &fs_info->fs_devices->devices;
 	list_for_each_entry(device, devices, dev_list) {
 		if (device->bdev == bdev) {
-			btrfs_err(fs_info, "target device is in the filesystem!");
+			btrfs_err(fs_info,
+				  "target device is in the filesystem!");
 			ret = -EEXIST;
 			goto error;
 		}
@@ -2564,7 +2561,8 @@
 
 	if (i_size_read(bdev->bd_inode) <
 	    btrfs_device_get_total_bytes(srcdev)) {
-		btrfs_err(fs_info, "target device is smaller than source device!");
+		btrfs_err(fs_info,
+			  "target device is smaller than source device!");
 		ret = -EINVAL;
 		goto error;
 	}
@@ -3698,7 +3696,7 @@
 	btrfs_free_path(path);
 	if (enospc_errors) {
 		btrfs_info(fs_info, "%d enospc errors during balance",
-		       enospc_errors);
+			   enospc_errors);
 		if (!ret)
 			ret = -ENOSPC;
 	}
@@ -3792,8 +3790,8 @@
 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
-			btrfs_err(fs_info, "with mixed groups data and "
-				   "metadata balance options must be the same");
+			btrfs_err(fs_info,
+				  "with mixed groups data and metadata balance options must be the same");
 			ret = -EINVAL;
 			goto out;
 		}
@@ -3815,23 +3813,23 @@
 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
 			    BTRFS_BLOCK_GROUP_RAID6);
 	if (validate_convert_profile(&bctl->data, allowed)) {
-		btrfs_err(fs_info, "unable to start balance with target "
-			   "data profile %llu",
-		       bctl->data.target);
+		btrfs_err(fs_info,
+			  "unable to start balance with target data profile %llu",
+			  bctl->data.target);
 		ret = -EINVAL;
 		goto out;
 	}
 	if (validate_convert_profile(&bctl->meta, allowed)) {
 		btrfs_err(fs_info,
-			   "unable to start balance with target metadata profile %llu",
-		       bctl->meta.target);
+			  "unable to start balance with target metadata profile %llu",
+			  bctl->meta.target);
 		ret = -EINVAL;
 		goto out;
 	}
 	if (validate_convert_profile(&bctl->sys, allowed)) {
 		btrfs_err(fs_info,
-			   "unable to start balance with target system profile %llu",
-		       bctl->sys.target);
+			  "unable to start balance with target system profile %llu",
+			  bctl->sys.target);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -3851,10 +3849,11 @@
 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
 		     !(bctl->meta.target & allowed))) {
 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
-				btrfs_info(fs_info, "force reducing metadata integrity");
+				btrfs_info(fs_info,
+					   "force reducing metadata integrity");
 			} else {
-				btrfs_err(fs_info, "balance will reduce metadata "
-					   "integrity, use force if you want this");
+				btrfs_err(fs_info,
+					  "balance will reduce metadata integrity, use force if you want this");
 				ret = -EINVAL;
 				goto out;
 			}
@@ -3864,8 +3863,8 @@
 	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
 		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
 		btrfs_warn(fs_info,
-	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
-			bctl->meta.target, bctl->data.target);
+			   "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
+			   bctl->meta.target, bctl->data.target);
 	}
 
 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
@@ -4221,7 +4220,7 @@
 	if (ret)
 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
 	else
-		fs_info->update_uuid_tree_gen = 1;
+		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
 	up(&fs_info->uuid_tree_rescan_sem);
 	return 0;
 }
@@ -4913,15 +4912,16 @@
 	read_unlock(&em_tree->lock);
 
 	if (!em) {
-		btrfs_crit(extent_root->fs_info, "unable to find logical "
-			   "%Lu len %Lu", chunk_offset, chunk_size);
+		btrfs_crit(extent_root->fs_info,
+			   "unable to find logical %Lu len %Lu",
+			   chunk_offset, chunk_size);
 		return -EINVAL;
 	}
 
 	if (em->start != chunk_offset || em->len != chunk_size) {
-		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
-			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
-			  chunk_size, em->start, em->len);
+		btrfs_crit(extent_root->fs_info,
+			   "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
+			    chunk_offset, chunk_size, em->start, em->len);
 		free_extent_map(em);
 		return -EINVAL;
 	}
@@ -5154,9 +5154,9 @@
 	}
 
 	if (em->start > logical || em->start + em->len < logical) {
-		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
-			    "%Lu-%Lu", logical, logical+len, em->start,
-			    em->start + em->len);
+		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got %Lu-%Lu",
+			   logical, logical+len, em->start,
+			   em->start + em->len);
 		free_extent_map(em);
 		return 1;
 	}
@@ -5370,9 +5370,9 @@
 	}
 
 	if (em->start > logical || em->start + em->len < logical) {
-		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
-			   "found %Lu-%Lu", logical, em->start,
-			   em->start + em->len);
+		btrfs_crit(fs_info,
+			   "found a bad mapping, wanted %Lu, found %Lu-%Lu",
+			   logical, em->start, em->start + em->len);
 		free_extent_map(em);
 		return -EINVAL;
 	}
@@ -5390,9 +5390,8 @@
 
 	stripe_offset = stripe_nr * stripe_len;
 	if (offset < stripe_offset) {
-		btrfs_crit(fs_info, "stripe math has gone wrong, "
-			   "stripe_offset=%llu, offset=%llu, start=%llu, "
-			   "logical=%llu, stripe_len=%llu",
+		btrfs_crit(fs_info,
+			   "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
 			   stripe_offset, offset, em->start, logical,
 			   stripe_len);
 		free_extent_map(em);
@@ -5642,8 +5641,8 @@
 		mirror_num = stripe_index + 1;
 	}
 	if (stripe_index >= map->num_stripes) {
-		btrfs_crit(fs_info, "stripe index math went horribly wrong, "
-			   "got stripe_index=%u, num_stripes=%u",
+		btrfs_crit(fs_info,
+			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
 			   stripe_index, map->num_stripes);
 		ret = -EINVAL;
 		goto out;
@@ -5907,10 +5906,11 @@
 				 mirror_num, need_raid_map);
 }
 
-int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
 		     u64 chunk_start, u64 physical, u64 devid,
 		     u64 **logical, int *naddrs, int *stripe_len)
 {
+	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
 	struct extent_map_tree *em_tree = &map_tree->map_tree;
 	struct extent_map *em;
 	struct map_lookup *map;
@@ -5926,13 +5926,13 @@
 	read_unlock(&em_tree->lock);
 
 	if (!em) {
-		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
-		       chunk_start);
+		btrfs_err(fs_info, "couldn't find em for chunk %Lu",
+			chunk_start);
 		return -EIO;
 	}
 
 	if (em->start != chunk_start) {
-		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
+		btrfs_err(fs_info, "bad chunk start, em=%Lu, wanted=%Lu",
 		       em->start, chunk_start);
 		free_extent_map(em);
 		return -EIO;
@@ -6137,10 +6137,12 @@
 
 		rcu_read_lock();
 		name = rcu_dereference(dev->name);
-		pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
-			 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
-			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
-			 name->str, dev->devid, bio->bi_iter.bi_size);
+		btrfs_debug(fs_info,
+			"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
+			bio_op(bio), bio->bi_opf,
+			(u64)bio->bi_iter.bi_sector,
+			(u_long)dev->bdev->bd_dev, name->str, dev->devid,
+			bio->bi_iter.bi_size);
 		rcu_read_unlock();
 	}
 #endif
@@ -6215,8 +6217,9 @@
 	}
 
 	if (map_length < length) {
-		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
-			logical, length, map_length);
+		btrfs_crit(root->fs_info,
+			   "mapping failed logical %llu bio len %llu len %llu",
+			   logical, length, map_length);
 		BUG();
 	}
 
@@ -6483,8 +6486,9 @@
 				free_extent_map(em);
 				return -EIO;
 			}
-			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
-						devid, uuid);
+			btrfs_warn(root->fs_info,
+				   "devid %llu uuid %pU is missing",
+				   devid, uuid);
 		}
 		map->stripes[i].dev->in_fs_metadata = 1;
 	}
@@ -6661,7 +6665,8 @@
 
 int btrfs_read_sys_array(struct btrfs_root *root)
 {
-	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	struct extent_buffer *sb;
 	struct btrfs_disk_key *disk_key;
 	struct btrfs_chunk *chunk;
@@ -6732,8 +6737,8 @@
 
 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
 			if (!num_stripes) {
-				printk(KERN_ERR
-	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
+				btrfs_err(fs_info,
+					"invalid number of stripes %u in sys_array at offset %u",
 					num_stripes, cur_offset);
 				ret = -EIO;
 				break;
@@ -6741,7 +6746,7 @@
 
 			type = btrfs_chunk_type(sb, chunk);
 			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
-				btrfs_err(root->fs_info,
+				btrfs_err(fs_info,
 			    "invalid chunk type %llu in sys_array at offset %u",
 					type, cur_offset);
 				ret = -EIO;
@@ -6756,9 +6761,9 @@
 			if (ret)
 				break;
 		} else {
-			printk(KERN_ERR
-		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
-				(u32)key.type, cur_offset);
+			btrfs_err(fs_info,
+			    "unexpected item type %u in sys_array at offset %u",
+				  (u32)key.type, cur_offset);
 			ret = -EIO;
 			break;
 		}
@@ -6771,7 +6776,7 @@
 	return ret;
 
 out_short_read:
-	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
+	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
 			len, cur_offset);
 	clear_extent_buffer_uptodate(sb);
 	free_extent_buffer_stale(sb);
@@ -7095,10 +7100,12 @@
 	mutex_unlock(&fs_devices->device_list_mutex);
 
 	if (!dev) {
-		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
+		btrfs_warn(root->fs_info,
+			   "get dev_stats failed, device not found");
 		return -ENODEV;
 	} else if (!dev->dev_stats_valid) {
-		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
+		btrfs_warn(root->fs_info,
+			   "get dev_stats failed, not yet valid");
 		return -ENODEV;
 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6613e63..09ed29c 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -382,7 +382,7 @@
 		     u64 logical, u64 *length,
 		     struct btrfs_bio **bbio_ret, int mirror_num,
 		     int need_raid_map);
-int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
 		     u64 chunk_start, u64 physical, u64 devid,
 		     u64 **logical, int *naddrs, int *stripe_len);
 int btrfs_read_sys_array(struct btrfs_root *root);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index d1a177a..fccbf55 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -252,7 +252,7 @@
 		goto out;
 
 	inode_inc_iversion(inode);
-	inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_ctime = current_time(inode);
 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 88d274e..441b81a 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -95,7 +95,7 @@
 	*total_in = 0;
 
 	if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
-		printk(KERN_WARNING "BTRFS: deflateInit failed\n");
+		pr_warn("BTRFS: deflateInit failed\n");
 		ret = -EIO;
 		goto out;
 	}
@@ -123,7 +123,7 @@
 	while (workspace->strm.total_in < len) {
 		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
 		if (ret != Z_OK) {
-			printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
+			pr_debug("BTRFS: deflate in loop returned %d\n",
 			       ret);
 			zlib_deflateEnd(&workspace->strm);
 			ret = -EIO;
@@ -249,7 +249,7 @@
 	}
 
 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
-		printk(KERN_WARNING "BTRFS: inflateInit failed\n");
+		pr_warn("BTRFS: inflateInit failed\n");
 		return -EIO;
 	}
 	while (workspace->strm.total_in < srclen) {
@@ -339,7 +339,7 @@
 	}
 
 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
-		printk(KERN_WARNING "BTRFS: inflateInit failed\n");
+		pr_warn("BTRFS: inflateInit failed\n");
 		return -EIO;
 	}
 
diff --git a/fs/buffer.c b/fs/buffer.c
index 9c8eb9b..b205a62 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -351,7 +351,7 @@
 		set_buffer_uptodate(bh);
 	} else {
 		buffer_io_error(bh, ", lost async page write");
-		set_bit(AS_EIO, &page->mapping->flags);
+		mapping_set_error(page->mapping, -EIO);
 		set_buffer_write_io_error(bh);
 		clear_buffer_uptodate(bh);
 		SetPageError(page);
@@ -1078,7 +1078,7 @@
 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
 }
 
-struct buffer_head *
+static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block,
 	     unsigned size, gfp_t gfp)
 {
@@ -1109,7 +1109,6 @@
 			free_more_memory();
 	}
 }
-EXPORT_SYMBOL(__getblk_slow);
 
 /*
  * The relationship between dirty buffers and dirty pages:
@@ -3250,7 +3249,7 @@
 	bh = head;
 	do {
 		if (buffer_write_io_error(bh) && page->mapping)
-			set_bit(AS_EIO, &page->mapping->flags);
+			mapping_set_error(page->mapping, -EIO);
 		if (buffer_busy(bh))
 			goto failed;
 		bh = bh->b_this_page;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 6af790f..3ff867f 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -20,6 +20,7 @@
 #include <linux/mount.h>
 #include <linux/statfs.h>
 #include <linux/ctype.h>
+#include <linux/xattr.h>
 #include "internal.h"
 
 static int cachefiles_daemon_add_cache(struct cachefiles_cache *caches);
@@ -126,8 +127,7 @@
 	if (d_is_negative(root) ||
 	    !d_backing_inode(root)->i_op->lookup ||
 	    !d_backing_inode(root)->i_op->mkdir ||
-	    !d_backing_inode(root)->i_op->setxattr ||
-	    !d_backing_inode(root)->i_op->getxattr ||
+	    !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
 	    !root->d_sb->s_op->statfs ||
 	    !root->d_sb->s_op->sync_fs)
 		goto error_unsupported;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index ce5f345..e7f16a7 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -253,6 +253,8 @@
 	struct cachefiles_object *object;
 	struct cachefiles_cache *cache;
 	const struct cred *saved_cred;
+	struct inode *inode;
+	blkcnt_t i_blocks = 0;
 
 	ASSERT(_object);
 
@@ -279,6 +281,10 @@
 		    _object != cache->cache.fsdef
 		    ) {
 			_debug("- retire object OBJ%x", object->fscache.debug_id);
+			inode = d_backing_inode(object->dentry);
+			if (inode)
+				i_blocks = inode->i_blocks;
+
 			cachefiles_begin_secure(cache, &saved_cred);
 			cachefiles_delete_object(cache, object);
 			cachefiles_end_secure(cache, saved_cred);
@@ -292,7 +298,7 @@
 
 	/* note that the object is now inactive */
 	if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
-		cachefiles_mark_object_inactive(cache, object);
+		cachefiles_mark_object_inactive(cache, object, i_blocks);
 
 	dput(object->dentry);
 	object->dentry = NULL;
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 2fcde1a..cd1effe 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -160,7 +160,8 @@
  * namei.c
  */
 extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
-					    struct cachefiles_object *object);
+					    struct cachefiles_object *object,
+					    blkcnt_t i_blocks);
 extern int cachefiles_delete_object(struct cachefiles_cache *cache,
 				    struct cachefiles_object *object);
 extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 3f7c2cd..41df8a2 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -20,6 +20,7 @@
 #include <linux/namei.h>
 #include <linux/security.h>
 #include <linux/slab.h>
+#include <linux/xattr.h>
 #include "internal.h"
 
 #define CACHEFILES_KEYBUF_SIZE 512
@@ -261,10 +262,9 @@
  * Mark an object as being inactive.
  */
 void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
-				     struct cachefiles_object *object)
+				     struct cachefiles_object *object,
+				     blkcnt_t i_blocks)
 {
-	blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
-
 	write_lock(&cache->active_lock);
 	rb_erase(&object->active_node, &cache->active_nodes);
 	clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
@@ -707,7 +707,8 @@
 
 check_error:
 	_debug("check error %d", ret);
-	cachefiles_mark_object_inactive(cache, object);
+	cachefiles_mark_object_inactive(
+		cache, object, d_backing_inode(object->dentry)->i_blocks);
 release_dentry:
 	dput(object->dentry);
 	object->dentry = NULL;
@@ -799,13 +800,11 @@
 	}
 
 	ret = -EPERM;
-	if (!d_backing_inode(subdir)->i_op->setxattr ||
-	    !d_backing_inode(subdir)->i_op->getxattr ||
+	if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
 	    !d_backing_inode(subdir)->i_op->lookup ||
 	    !d_backing_inode(subdir)->i_op->mkdir ||
 	    !d_backing_inode(subdir)->i_op->create ||
-	    (!d_backing_inode(subdir)->i_op->rename &&
-	     !d_backing_inode(subdir)->i_op->rename2) ||
+	    !d_backing_inode(subdir)->i_op->rename ||
 	    !d_backing_inode(subdir)->i_op->rmdir ||
 	    !d_backing_inode(subdir)->i_op->unlink)
 		goto check_error;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4f67227..987044b 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -95,11 +95,9 @@
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			ret = posix_acl_equiv_mode(acl, &new_mode);
-			if (ret < 0)
+			ret = posix_acl_update_mode(inode, &new_mode, &acl);
+			if (ret)
 				goto out;
-			if (ret == 0)
-				acl = NULL;
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
@@ -127,6 +125,11 @@
 			goto out_free;
 	}
 
+	if (ceph_snap(inode) != CEPH_NOSNAP) {
+		ret = -EROFS;
+		goto out_free;
+	}
+
 	if (new_mode != old_mode) {
 		newattrs.ia_mode = new_mode;
 		newattrs.ia_valid = ATTR_MODE;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index d5b6f95..ef3ebd7 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -175,9 +175,8 @@
 
 static int ceph_releasepage(struct page *page, gfp_t g)
 {
-	dout("%p releasepage %p idx %lu\n", page->mapping->host,
-	     page, page->index);
-	WARN_ON(PageDirty(page));
+	dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
+	     page, page->index, PageDirty(page) ? "" : "not ");
 
 	/* Can we release the page from the cache? */
 	if (!ceph_release_fscache_page(page, g))
@@ -298,14 +297,6 @@
 	kfree(osd_data->pages);
 }
 
-static void ceph_unlock_page_vector(struct page **pages, int num_pages)
-{
-	int i;
-
-	for (i = 0; i < num_pages; i++)
-		unlock_page(pages[i]);
-}
-
 /*
  * start an async read(ahead) operation.  return nr_pages we submitted
  * a read for on success, or negative error code.
@@ -370,6 +361,10 @@
 			dout("start_read %p add_to_page_cache failed %p\n",
 			     inode, page);
 			nr_pages = i;
+			if (nr_pages > 0) {
+				len = nr_pages << PAGE_SHIFT;
+				break;
+			}
 			goto out_pages;
 		}
 		pages[i] = page;
@@ -386,8 +381,11 @@
 	return nr_pages;
 
 out_pages:
-	ceph_unlock_page_vector(pages, nr_pages);
-	ceph_release_page_vector(pages, nr_pages);
+	for (i = 0; i < nr_pages; ++i) {
+		ceph_fscache_readpage_cancel(inode, pages[i]);
+		unlock_page(pages[i]);
+	}
+	ceph_put_page_vector(pages, nr_pages, false);
 out:
 	ceph_osdc_put_request(req);
 	return ret;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index df4b3e6..78180d1 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1061,7 +1061,8 @@
 }
 
 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
-		       struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
 	struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
 	struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1069,6 +1070,9 @@
 	int op = CEPH_MDS_OP_RENAME;
 	int err;
 
+	if (flags)
+		return -EINVAL;
+
 	if (ceph_snap(old_dir) != ceph_snap(new_dir))
 		return -EXDEV;
 	if (ceph_snap(old_dir) != CEPH_NOSNAP) {
@@ -1486,10 +1490,7 @@
 	.permission = ceph_permission,
 	.getattr = ceph_getattr,
 	.setattr = ceph_setattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = generic_removexattr,
 	.get_acl = ceph_get_acl,
 	.set_acl = ceph_set_acl,
 	.mknod = ceph_mknod,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 0f5375d..7bf0882 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -886,7 +886,7 @@
 	int num_pages = 0;
 	int flags;
 	int ret;
-	struct timespec mtime = current_fs_time(inode->i_sb);
+	struct timespec mtime = current_time(inode);
 	size_t count = iov_iter_count(iter);
 	loff_t pos = iocb->ki_pos;
 	bool write = iov_iter_rw(iter) == WRITE;
@@ -902,10 +902,10 @@
 		return ret;
 
 	if (write) {
-		ret = invalidate_inode_pages2_range(inode->i_mapping,
+		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
 					pos >> PAGE_SHIFT,
 					(pos + count) >> PAGE_SHIFT);
-		if (ret < 0)
+		if (ret2 < 0)
 			dout("invalidate_inode_pages2_range returned %d\n", ret);
 
 		flags = CEPH_OSD_FLAG_ORDERSNAP |
@@ -1091,7 +1091,7 @@
 	int flags;
 	int check_caps = 0;
 	int ret;
-	struct timespec mtime = current_fs_time(inode->i_sb);
+	struct timespec mtime = current_time(inode);
 	size_t count = iov_iter_count(from);
 
 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index dd3a6db..bca1b49 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -94,10 +94,7 @@
 	.permission = ceph_permission,
 	.setattr = ceph_setattr,
 	.getattr = ceph_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = generic_removexattr,
 	.get_acl = ceph_get_acl,
 	.set_acl = ceph_set_acl,
 };
@@ -1885,10 +1882,7 @@
 	.get_link = simple_get_link,
 	.setattr = ceph_setattr,
 	.getattr = ceph_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = generic_removexattr,
 };
 
 int __ceph_setattr(struct inode *inode, struct iattr *attr)
@@ -1905,13 +1899,6 @@
 	int inode_dirty_flags = 0;
 	bool lock_snap_rwsem = false;
 
-	if (ceph_snap(inode) != CEPH_NOSNAP)
-		return -EROFS;
-
-	err = inode_change_ok(inode, attr);
-	if (err != 0)
-		return err;
-
 	prealloc_cf = ceph_alloc_cap_flush();
 	if (!prealloc_cf)
 		return -ENOMEM;
@@ -2080,7 +2067,7 @@
 	if (dirtied) {
 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
 							   &prealloc_cf);
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 	}
 
 	release &= issued;
@@ -2124,7 +2111,17 @@
  */
 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
 {
-	return __ceph_setattr(d_inode(dentry), attr);
+	struct inode *inode = d_inode(dentry);
+	int err;
+
+	if (ceph_snap(inode) != CEPH_NOSNAP)
+		return -EROFS;
+
+	err = setattr_prepare(dentry, attr);
+	if (err != 0)
+		return err;
+
+	return __ceph_setattr(inode, attr);
 }
 
 /*
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index a2cb0c2..6806dbe 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -210,8 +210,8 @@
 	if (!(fl->fl_flags & FL_FLOCK))
 		return -ENOLCK;
 	/* No mandatory locks */
-	if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
-		return -ENOLCK;
+	if (fl->fl_type & LOCK_MAND)
+		return -EOPNOTSUPP;
 
 	dout("ceph_flock, fl_file: %p", fl->fl_file);
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f72d4ae..815acd1 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -370,6 +370,7 @@
 	case CEPH_MDS_SESSION_CLOSING: return "closing";
 	case CEPH_MDS_SESSION_RESTARTING: return "restarting";
 	case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
+	case CEPH_MDS_SESSION_REJECTED: return "rejected";
 	default: return "???";
 	}
 }
@@ -1150,8 +1151,7 @@
 		while (!list_empty(&ci->i_cap_flush_list)) {
 			cf = list_first_entry(&ci->i_cap_flush_list,
 					      struct ceph_cap_flush, i_list);
-			list_del(&cf->i_list);
-			list_add(&cf->i_list, &to_remove);
+			list_move(&cf->i_list, &to_remove);
 		}
 
 		spin_lock(&mdsc->cap_dirty_lock);
@@ -1378,7 +1378,7 @@
 	if (!msg)
 		return -ENOMEM;
 	ceph_con_send(&session->s_con, msg);
-	return 0;
+	return 1;
 }
 
 /*
@@ -2131,6 +2131,10 @@
 	     ceph_session_state_name(session->s_state));
 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
 	    session->s_state != CEPH_MDS_SESSION_HUNG) {
+		if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
+			err = -EACCES;
+			goto out_session;
+		}
 		if (session->s_state == CEPH_MDS_SESSION_NEW ||
 		    session->s_state == CEPH_MDS_SESSION_CLOSING)
 			__open_session(mdsc, session);
@@ -2652,6 +2656,15 @@
 		wake_up_session_caps(session, 0);
 		break;
 
+	case CEPH_SESSION_REJECT:
+		WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
+		pr_info("mds%d rejected session\n", session->s_mds);
+		session->s_state = CEPH_MDS_SESSION_REJECTED;
+		cleanup_session_requests(mdsc, session);
+		remove_session_caps(session);
+		wake = 2; /* for good measure */
+		break;
+
 	default:
 		pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
 		WARN_ON(1);
@@ -3557,11 +3570,11 @@
 /*
  * true if all sessions are closed, or we force unmount
  */
-static bool done_closing_sessions(struct ceph_mds_client *mdsc)
+static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
 {
 	if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
 		return true;
-	return atomic_read(&mdsc->num_sessions) == 0;
+	return atomic_read(&mdsc->num_sessions) <= skipped;
 }
 
 /*
@@ -3572,6 +3585,7 @@
 	struct ceph_options *opts = mdsc->fsc->client->options;
 	struct ceph_mds_session *session;
 	int i;
+	int skipped = 0;
 
 	dout("close_sessions\n");
 
@@ -3583,7 +3597,8 @@
 			continue;
 		mutex_unlock(&mdsc->mutex);
 		mutex_lock(&session->s_mutex);
-		__close_session(mdsc, session);
+		if (__close_session(mdsc, session) <= 0)
+			skipped++;
 		mutex_unlock(&session->s_mutex);
 		ceph_put_mds_session(session);
 		mutex_lock(&mdsc->mutex);
@@ -3591,7 +3606,8 @@
 	mutex_unlock(&mdsc->mutex);
 
 	dout("waiting for sessions to close\n");
-	wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
+	wait_event_timeout(mdsc->session_close_wq,
+			   done_closing_sessions(mdsc, skipped),
 			   ceph_timeout_jiffies(opts->mount_timeout));
 
 	/* tear down remaining sessions */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 6b36797..3c6f77b 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -121,6 +121,7 @@
 	CEPH_MDS_SESSION_CLOSING = 5,
 	CEPH_MDS_SESSION_RESTARTING = 6,
 	CEPH_MDS_SESSION_RECONNECTING = 7,
+	CEPH_MDS_SESSION_REJECTED = 8,
 };
 
 struct ceph_mds_session {
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
index 89e6bc3..913dea1 100644
--- a/fs/ceph/strings.c
+++ b/fs/ceph/strings.c
@@ -43,6 +43,8 @@
 	case CEPH_SESSION_RECALL_STATE: return "recall_state";
 	case CEPH_SESSION_FLUSHMSG: return "flushmsg";
 	case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack";
+	case CEPH_SESSION_FORCE_RO: return "force_ro";
+	case CEPH_SESSION_REJECT: return "reject";
 	}
 	return "???";
 }
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index e247f6f..a29ffce 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -396,10 +396,12 @@
 	 */
 	dev_name_end = strchr(dev_name, '/');
 	if (dev_name_end) {
-		fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
-		if (!fsopt->server_path) {
-			err = -ENOMEM;
-			goto out;
+		if (strlen(dev_name_end) > 1) {
+			fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+			if (!fsopt->server_path) {
+				err = -ENOMEM;
+				goto out;
+			}
 		}
 	} else {
 		dev_name_end = dev_name + strlen(dev_name);
@@ -788,15 +790,10 @@
 		struct inode *inode = req->r_target_inode;
 		req->r_target_inode = NULL;
 		dout("open_root_inode success\n");
-		if (ceph_ino(inode) == CEPH_INO_ROOT &&
-		    fsc->sb->s_root == NULL) {
-			root = d_make_root(inode);
-			if (!root) {
-				root = ERR_PTR(-ENOMEM);
-				goto out;
-			}
-		} else {
-			root = d_obtain_root(inode);
+		root = d_make_root(inode);
+		if (!root) {
+			root = ERR_PTR(-ENOMEM);
+			goto out;
 		}
 		ceph_init_dentry(root);
 		dout("open_root_inode success, root dentry is %p\n", root);
@@ -825,17 +822,24 @@
 	mutex_lock(&fsc->client->mount_mutex);
 
 	if (!fsc->sb->s_root) {
+		const char *path;
 		err = __ceph_open_session(fsc->client, started);
 		if (err < 0)
 			goto out;
 
-		dout("mount opening root\n");
-		root = open_root_dentry(fsc, "", started);
+		if (!fsc->mount_options->server_path) {
+			path = "";
+			dout("mount opening path \\t\n");
+		} else {
+			path = fsc->mount_options->server_path + 1;
+			dout("mount opening path %s\n", path);
+		}
+		root = open_root_dentry(fsc, path, started);
 		if (IS_ERR(root)) {
 			err = PTR_ERR(root);
 			goto out;
 		}
-		fsc->sb->s_root = root;
+		fsc->sb->s_root = dget(root);
 		first = 1;
 
 		err = ceph_fs_debugfs_init(fsc);
@@ -843,19 +847,6 @@
 			goto fail;
 	}
 
-	if (!fsc->mount_options->server_path) {
-		root = fsc->sb->s_root;
-		dget(root);
-	} else {
-		const char *path = fsc->mount_options->server_path + 1;
-		dout("mount opening path %s\n", path);
-		root = open_root_dentry(fsc, path, started);
-		if (IS_ERR(root)) {
-			err = PTR_ERR(root);
-			goto fail;
-		}
-	}
-
 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
 	dout("mount success\n");
 	mutex_unlock(&fsc->client->mount_mutex);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index adc2318..40b7032 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1034,7 +1034,7 @@
 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
 					       &prealloc_cf);
 		ci->i_xattrs.dirty = true;
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 	}
 
 	spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 6edd825..44a240c 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -406,6 +406,7 @@
 	spin_lock(&cdev_lock);
 	list_del_init(&inode->i_devices);
 	inode->i_cdev = NULL;
+	inode->i_mapping = &inode->i_data;
 	spin_unlock(&cdev_lock);
 }
 
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 6c58e13..3d03e48 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -152,6 +152,7 @@
 	list_for_each(tmp1, &cifs_tcp_ses_list) {
 		server = list_entry(tmp1, struct TCP_Server_Info,
 				    tcp_ses_list);
+		seq_printf(m, "\nNumber of credits: %d", server->credits);
 		i++;
 		list_for_each(tmp2, &server->smb_ses_list) {
 			ses = list_entry(tmp2, struct cifs_ses,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 1418daa..07ed81c 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -49,6 +49,7 @@
 #define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
 					      * root mountable
 					      */
+#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
 
 struct cifs_sb_info {
 	struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 0065256..57ff075 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -36,7 +36,15 @@
 	__u64   cifs_posix_caps;
 } __packed;
 
+struct smb_snapshot_array {
+	__u32	number_of_snapshots;
+	__u32	number_of_snapshots_returned;
+	__u32	snapshot_array_size;
+	/*	snapshots[]; */
+} __packed;
+
 #define CIFS_IOCTL_MAGIC	0xCF
 #define CIFS_IOC_COPYCHUNK_FILE	_IOW(CIFS_IOCTL_MAGIC, 3, int)
 #define CIFS_IOC_SET_INTEGRITY  _IO(CIFS_IOCTL_MAGIC, 4)
 #define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
+#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 71e8a56..15bac39 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -42,6 +42,35 @@
 /* group users */
 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
 
+/* S-1-22-1 Unmapped Unix users */
+static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+		{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-22-2 Unmapped Unix groups */
+static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+		{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/*
+ * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+ */
+
+/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+
+/* S-1-5-88-1 Unix uid */
+static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+	{cpu_to_le32(88),
+	 cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-2 Unix gid */
+static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+	{cpu_to_le32(88),
+	 cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-3 Unix mode */
+static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+	{cpu_to_le32(88),
+	 cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
 static const struct cred *root_cred;
 
 static int
@@ -183,6 +212,62 @@
 	return 0; /* sids compare/match */
 }
 
+static bool
+is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
+{
+	int i;
+	int num_subauth;
+	const struct cifs_sid *pwell_known_sid;
+
+	if (!psid || (puid == NULL))
+		return false;
+
+	num_subauth = psid->num_subauth;
+
+	/* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */
+	if (num_subauth == 2) {
+		if (is_group)
+			pwell_known_sid = &sid_unix_groups;
+		else
+			pwell_known_sid = &sid_unix_users;
+	} else if (num_subauth == 3) {
+		if (is_group)
+			pwell_known_sid = &sid_unix_NFS_groups;
+		else
+			pwell_known_sid = &sid_unix_NFS_users;
+	} else
+		return false;
+
+	/* compare the revision */
+	if (psid->revision != pwell_known_sid->revision)
+		return false;
+
+	/* compare all of the six auth values */
+	for (i = 0; i < NUM_AUTHS; ++i) {
+		if (psid->authority[i] != pwell_known_sid->authority[i]) {
+			cifs_dbg(FYI, "auth %d did not match\n", i);
+			return false;
+		}
+	}
+
+	if (num_subauth == 2) {
+		if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0])
+			return false;
+
+		*puid = le32_to_cpu(psid->sub_auth[1]);
+	} else /* 3 subauths, ie Windows/Mac style */ {
+		*puid = le32_to_cpu(psid->sub_auth[0]);
+		if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) ||
+		    (psid->sub_auth[1] != pwell_known_sid->sub_auth[1]))
+			return false;
+
+		*puid = le32_to_cpu(psid->sub_auth[2]);
+	}
+
+	cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid);
+	return true; /* well known sid found, uid returned */
+}
+
 static void
 cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
 {
@@ -276,6 +361,43 @@
 		return -EIO;
 	}
 
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
+		uint32_t unix_id;
+		bool is_group;
+
+		if (sidtype != SIDOWNER)
+			is_group = true;
+		else
+			is_group = false;
+
+		if (is_well_known_sid(psid, &unix_id, is_group) == false)
+			goto try_upcall_to_get_id;
+
+		if (is_group) {
+			kgid_t gid;
+			gid_t id;
+
+			id = (gid_t)unix_id;
+			gid = make_kgid(&init_user_ns, id);
+			if (gid_valid(gid)) {
+				fgid = gid;
+				goto got_valid_id;
+			}
+		} else {
+			kuid_t uid;
+			uid_t id;
+
+			id = (uid_t)unix_id;
+			uid = make_kuid(&init_user_ns, id);
+			if (uid_valid(uid)) {
+				fuid = uid;
+				goto got_valid_id;
+			}
+		}
+		/* If unable to find uid/gid easily from SID try via upcall */
+	}
+
+try_upcall_to_get_id:
 	sidstr = sid_to_key_str(psid, sidtype);
 	if (!sidstr)
 		return -ENOMEM;
@@ -329,6 +451,7 @@
 	 * Note that we return 0 here unconditionally. If the mapping
 	 * fails then we just fall back to using the mnt_uid/mnt_gid.
 	 */
+got_valid_id:
 	if (sidtype == SIDOWNER)
 		fattr->cf_uid = fuid;
 	else
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 14ae4b8..15261ba 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,15 +64,15 @@
 unsigned int sign_CIFS_PDUs = 1;
 static const struct super_operations cifs_super_ops;
 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
-module_param(CIFSMaxBufSize, uint, 0);
+module_param(CIFSMaxBufSize, uint, 0444);
 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
 				 "Default: 16384 Range: 8192 to 130048");
 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
-module_param(cifs_min_rcv, uint, 0);
+module_param(cifs_min_rcv, uint, 0444);
 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
 				"1 to 64");
 unsigned int cifs_min_small = 30;
-module_param(cifs_min_small, uint, 0);
+module_param(cifs_min_small, uint, 0444);
 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
 				 "Range: 2 to 256");
 unsigned int cifs_max_pending = CIFS_MAX_REQ;
@@ -271,7 +271,7 @@
 	cifs_inode->createtime = 0;
 	cifs_inode->epoch = 0;
 #ifdef CONFIG_CIFS_SMB2
-	get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
+	generate_random_uuid(cifs_inode->lease_key);
 #endif
 	/*
 	 * Can not set i_flags here - they get immediately overwritten to zero
@@ -469,6 +469,8 @@
 		seq_puts(s, ",posixpaths");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
 		seq_puts(s, ",setuids");
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+		seq_puts(s, ",idsfromsid");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 		seq_puts(s, ",serverino");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
@@ -896,35 +898,26 @@
 	.link = cifs_hardlink,
 	.mkdir = cifs_mkdir,
 	.rmdir = cifs_rmdir,
-	.rename2 = cifs_rename2,
+	.rename = cifs_rename2,
 	.permission = cifs_permission,
 	.setattr = cifs_setattr,
 	.symlink = cifs_symlink,
 	.mknod   = cifs_mknod,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = generic_removexattr,
 };
 
 const struct inode_operations cifs_file_inode_ops = {
 	.setattr = cifs_setattr,
 	.getattr = cifs_getattr,
 	.permission = cifs_permission,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = generic_removexattr,
 };
 
 const struct inode_operations cifs_symlink_inode_ops = {
 	.readlink = generic_readlink,
 	.get_link = cifs_get_link,
 	.permission = cifs_permission,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = generic_removexattr,
 };
 
 static int cifs_clone_file_range(struct file *src_file, loff_t off,
@@ -1271,7 +1264,6 @@
 	GlobalTotalActiveXid = 0;
 	GlobalMaxActiveXid = 0;
 	spin_lock_init(&cifs_tcp_ses_lock);
-	spin_lock_init(&cifs_file_list_lock);
 	spin_lock_init(&GlobalMid_Lock);
 
 	get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9dcf974..c9c00a8 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -41,6 +41,16 @@
 
 }
 
+static inline void cifs_set_time(struct dentry *dentry, unsigned long time)
+{
+	dentry->d_fsdata = (void *) time;
+}
+
+static inline unsigned long cifs_get_time(struct dentry *dentry)
+{
+	return (unsigned long) dentry->d_fsdata;
+}
+
 extern struct file_system_type cifs_fs_type;
 extern const struct address_space_operations cifs_addr_ops;
 extern const struct address_space_operations cifs_addr_ops_smallbuf;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8f1d8c1..1f17f6b 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -75,6 +75,18 @@
 #define SMB_ECHO_INTERVAL_MAX 600
 #define SMB_ECHO_INTERVAL_DEFAULT 60
 
+/*
+ * Default number of credits to keep available for SMB3.
+ * This value is chosen somewhat arbitrarily. The Windows client
+ * defaults to 128 credits, the Windows server allows clients up to
+ * 512 credits (or 8K for later versions), and the NetApp server
+ * does not limit clients at all.  Choose a high enough default value
+ * such that the client shouldn't limit performance, but allow mount
+ * to override (until you approach 64K, where we limit credits to 65000
+ * to reduce possibility of seeing more server credit overflow bugs.
+ */
+#define SMB2_MAX_CREDITS_AVAILABLE 32000
+
 #include "cifspdu.h"
 
 #ifndef XATTR_DOS_ATTRIB
@@ -376,6 +388,8 @@
 	int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *);
 	int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
 			     struct cifsFileInfo *src_file);
+	int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
+			     struct cifsFileInfo *src_file, void __user *);
 	int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
 				struct cifs_sb_info *, const unsigned char *,
 				char *, unsigned int *);
@@ -464,6 +478,7 @@
 	bool retry:1;
 	bool intr:1;
 	bool setuids:1;
+	bool setuidfromacl:1;
 	bool override_uid:1;
 	bool override_gid:1;
 	bool dynperm:1;
@@ -510,6 +525,7 @@
 	struct sockaddr_storage srcaddr; /* allow binding to a local IP */
 	struct nls_table *local_nls;
 	unsigned int echo_interval; /* echo interval in secs */
+	unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
 };
 
 #define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
@@ -567,7 +583,8 @@
 	bool noblocksnd;		/* use blocking sendmsg */
 	bool noautotune;		/* do not autotune send buf sizes */
 	bool tcp_nodelay;
-	int credits;  /* send no more requests at once */
+	unsigned int credits;  /* send no more requests at once */
+	unsigned int max_credits; /* can override large 32000 default at mnt */
 	unsigned int in_flight;  /* number of requests on the wire to server */
 	spinlock_t req_lock;  /* protect the two values above */
 	struct mutex srv_mutex;
@@ -833,6 +850,7 @@
 	struct list_head tcon_list;
 	int tc_count;
 	struct list_head openFileList;
+	spinlock_t open_file_lock; /* protects list above */
 	struct cifs_ses *ses;	/* pointer to session associated with */
 	char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
 	char *nativeFileSystem;
@@ -889,7 +907,7 @@
 #endif /* CONFIG_CIFS_STATS2 */
 	__u64    bytes_read;
 	__u64    bytes_written;
-	spinlock_t stat_lock;
+	spinlock_t stat_lock;  /* protects the two fields above */
 #endif /* CONFIG_CIFS_STATS */
 	FILE_SYSTEM_DEVICE_INFO fsDevInfo;
 	FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
@@ -1040,20 +1058,24 @@
 };
 
 struct cifsFileInfo {
+	/* following two lists are protected by tcon->open_file_lock */
 	struct list_head tlist;	/* pointer to next fid owned by tcon */
 	struct list_head flist;	/* next fid (file instance) for this inode */
+	/* lock list below protected by cifsi->lock_sem */
 	struct cifs_fid_locks *llist;	/* brlocks held by this fid */
 	kuid_t uid;		/* allows finding which FileInfo structure */
 	__u32 pid;		/* process id who opened file */
 	struct cifs_fid fid;	/* file id from remote */
+	struct list_head rlist; /* reconnect list */
 	/* BB add lock scope info here if needed */ ;
 	/* lock scope id (0 if none) */
 	struct dentry *dentry;
-	unsigned int f_flags;
 	struct tcon_link *tlink;
+	unsigned int f_flags;
 	bool invalidHandle:1;	/* file closed via session abend */
 	bool oplock_break_cancelled:1;
-	int count;		/* refcount protected by cifs_file_list_lock */
+	int count;
+	spinlock_t file_info_lock; /* protects four flag/count fields above */
 	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
 	struct cifs_search_info srch_inf;
 	struct work_struct oplock_break; /* work for oplock breaks */
@@ -1120,7 +1142,7 @@
 
 /*
  * Take a reference on the file private data. Must be called with
- * cifs_file_list_lock held.
+ * cfile->file_info_lock held.
  */
 static inline void
 cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
@@ -1514,8 +1536,10 @@
  *  GlobalMid_Lock protects:
  *	list operations on pending_mid_q and oplockQ
  *      updates to XID counters, multiplex id  and SMB sequence numbers
- *  cifs_file_list_lock protects:
- *	list operations on tcp and SMB session lists and tCon lists
+ *  tcp_ses_lock protects:
+ *	list operations on tcp and SMB session lists
+ *  tcon->open_file_lock protects the list of open files hanging off the tcon
+ *  cfile->file_info_lock protects counters and fields in cifs file struct
  *  f_owner.lock protects certain per file struct operations
  *  mapping->page_lock protects certain per page operations
  *
@@ -1547,18 +1571,12 @@
  * tcp session, and the list of tcon's per smb session. It also protects
  * the reference counters for the server, smb session, and tcon. Finally,
  * changes to the tcon->tidStatus should be done while holding this lock.
+ * generally the locks should be taken in order tcp_ses_lock before
+ * tcon->open_file_lock and that before file->file_info_lock since the
+ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
  */
 GLOBAL_EXTERN spinlock_t		cifs_tcp_ses_lock;
 
-/*
- * This lock protects the cifs_file->llist and cifs_file->flist
- * list operations, and updates to some flags (cifs_file->invalidHandle)
- * It will be moved to either use the tcon->stat_lock or equivalent later.
- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
- * the cifs_tcp_ses_lock must be grabbed first and released last.
- */
-GLOBAL_EXTERN spinlock_t	cifs_file_list_lock;
-
 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
 /* Outstanding dir notify requests */
 GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 95dab43..ced0e42 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -193,6 +193,8 @@
 extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
 extern void cifs_umount(struct cifs_sb_info *);
 extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
+extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
+
 extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
 				    __u64 length, __u8 type,
 				    struct cifsLockInfo **conf_lock,
@@ -392,8 +394,7 @@
 			unsigned int *nbytes, char **buf,
 			int *return_buf_type);
 extern int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
-			unsigned int *nbytes, const char *buf,
-			const char __user *ubuf, const int long_op);
+			unsigned int *nbytes, const char *buf);
 extern int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
 			unsigned int *nbytes, struct kvec *iov, const int nvec);
 extern int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index d47197e..3f3185f 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -98,13 +98,13 @@
 	struct list_head *tmp1;
 
 	/* list all files open on tree connection and mark them invalid */
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
 		open_file->invalidHandle = true;
 		open_file->oplock_break_cancelled = true;
 	}
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tcon->open_file_lock);
 	/*
 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
 	 * to this tcon.
@@ -1228,7 +1228,6 @@
 	inc_rfc1001_len(pSMB, count);
 
 	pSMB->ByteCount = cpu_to_le16(count);
-	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
 			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
@@ -1768,8 +1767,7 @@
 
 int
 CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
-	     unsigned int *nbytes, const char *buf,
-	     const char __user *ubuf, const int long_op)
+	     unsigned int *nbytes, const char *buf)
 {
 	int rc = -EACCES;
 	WRITE_REQ *pSMB = NULL;
@@ -1838,12 +1836,7 @@
 		cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
 	if (buf)
 		memcpy(pSMB->Data, buf, bytes_sent);
-	else if (ubuf) {
-		if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) {
-			cifs_buf_release(pSMB);
-			return -EFAULT;
-		}
-	} else if (count != 0) {
+	else if (count != 0) {
 		/* No buffer */
 		cifs_buf_release(pSMB);
 		return -EINVAL;
@@ -1867,7 +1860,7 @@
 	}
 
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			 (struct smb_hdr *) pSMBr, &bytes_returned, long_op);
+			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
 	if (rc) {
 		cifs_dbg(FYI, "Send error in write = %d\n", rc);
@@ -3334,7 +3327,7 @@
 #ifdef CONFIG_CIFS_POSIX
 
 /*Convert an Access Control Entry from wire format to local POSIX xattr format*/
-static void cifs_convert_ace(posix_acl_xattr_entry *ace,
+static void cifs_convert_ace(struct posix_acl_xattr_entry *ace,
 			     struct cifs_posix_ace *cifs_ace)
 {
 	/* u8 cifs fields do not need le conversion */
@@ -3358,7 +3351,7 @@
 	__u16 count;
 	struct cifs_posix_ace *pACE;
 	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
-	posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt;
+	struct posix_acl_xattr_header *local_acl = (void *)trgt;
 
 	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
 		return -EOPNOTSUPP;
@@ -3396,9 +3389,11 @@
 	} else if (size > buflen) {
 		return -ERANGE;
 	} else /* buffer big enough */ {
+		struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
+
 		local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
 		for (i = 0; i < count ; i++) {
-			cifs_convert_ace(&local_acl->a_entries[i], pACE);
+			cifs_convert_ace(&ace[i], pACE);
 			pACE++;
 		}
 	}
@@ -3406,7 +3401,7 @@
 }
 
 static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
-				     const posix_acl_xattr_entry *local_ace)
+				     const struct posix_acl_xattr_entry *local_ace)
 {
 	__u16 rc = 0; /* 0 = ACL converted ok */
 
@@ -3431,7 +3426,7 @@
 {
 	__u16 rc = 0;
 	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
-	posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL;
+	struct posix_acl_xattr_header *local_acl = (void *)pACL;
 	int count;
 	int i;
 
@@ -3459,7 +3454,7 @@
 	}
 	for (i = 0; i < count; i++) {
 		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
-					&local_acl->a_entries[i]);
+			(struct posix_acl_xattr_entry *)(local_acl + 1));
 		if (rc != 0) {
 			/* ACE not converted */
 			break;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e4f4ba..aab5227 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -63,7 +63,6 @@
 #define TLINK_IDLE_EXPIRE	(600 * HZ)
 
 enum {
-
 	/* Mount options that take no arguments */
 	Opt_user_xattr, Opt_nouser_xattr,
 	Opt_forceuid, Opt_noforceuid,
@@ -76,7 +75,7 @@
 	Opt_noposixpaths, Opt_nounix,
 	Opt_nocase,
 	Opt_brl, Opt_nobrl,
-	Opt_forcemandatorylock, Opt_setuids,
+	Opt_forcemandatorylock, Opt_setuidfromacl, Opt_setuids,
 	Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
 	Opt_nohard, Opt_nosoft,
 	Opt_nointr, Opt_intr,
@@ -95,7 +94,7 @@
 	Opt_cruid, Opt_gid, Opt_file_mode,
 	Opt_dirmode, Opt_port,
 	Opt_rsize, Opt_wsize, Opt_actimeo,
-	Opt_echo_interval,
+	Opt_echo_interval, Opt_max_credits,
 
 	/* Mount options which take string value */
 	Opt_user, Opt_pass, Opt_ip,
@@ -148,6 +147,7 @@
 	{ Opt_forcemandatorylock, "forcemand" },
 	{ Opt_setuids, "setuids" },
 	{ Opt_nosetuids, "nosetuids" },
+	{ Opt_setuidfromacl, "idsfromsid" },
 	{ Opt_dynperm, "dynperm" },
 	{ Opt_nodynperm, "nodynperm" },
 	{ Opt_nohard, "nohard" },
@@ -190,6 +190,7 @@
 	{ Opt_wsize, "wsize=%s" },
 	{ Opt_actimeo, "actimeo=%s" },
 	{ Opt_echo_interval, "echo_interval=%s" },
+	{ Opt_max_credits, "max_credits=%s" },
 
 	{ Opt_blank_user, "user=" },
 	{ Opt_blank_user, "username=" },
@@ -1376,6 +1377,9 @@
 		case Opt_nosetuids:
 			vol->setuids = 0;
 			break;
+		case Opt_setuidfromacl:
+			vol->setuidfromacl = 1;
+			break;
 		case Opt_dynperm:
 			vol->dynperm = true;
 			break;
@@ -1586,6 +1590,15 @@
 			}
 			vol->echo_interval = option;
 			break;
+		case Opt_max_credits:
+			if (get_option_ul(args, &option) || (option < 20) ||
+			    (option > 60000)) {
+				cifs_dbg(VFS, "%s: Invalid max_credits value\n",
+					 __func__);
+				goto cifs_parse_mount_err;
+			}
+			vol->max_credits = option;
+			break;
 
 		/* String Arguments */
 
@@ -2163,7 +2176,7 @@
 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
 		sizeof(tcp_ses->dstaddr));
 #ifdef CONFIG_CIFS_SMB2
-	get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
+	generate_random_uuid(tcp_ses->client_guid);
 #endif
 	/*
 	 * at this point we are the only ones with the pointer
@@ -3270,6 +3283,8 @@
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
 	if (pvolume_info->setuids)
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+	if (pvolume_info->setuidfromacl)
+		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL;
 	if (pvolume_info->server_ino)
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
 	if (pvolume_info->remap)
@@ -3598,7 +3613,11 @@
 		bdi_destroy(&cifs_sb->bdi);
 		goto out;
 	}
-
+	if ((volume_info->max_credits < 20) ||
+	     (volume_info->max_credits > 60000))
+		server->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
+	else
+		server->max_credits = volume_info->max_credits;
 	/* get a reference to a SMB session */
 	ses = cifs_get_smb_ses(server, volume_info);
 	if (IS_ERR(ses)) {
@@ -3688,14 +3707,16 @@
 			goto mount_fail_check;
 		}
 
-		rc = cifs_are_all_path_components_accessible(server,
+		if (rc != -EREMOTE) {
+			rc = cifs_are_all_path_components_accessible(server,
 							     xid, tcon, cifs_sb,
 							     full_path);
-		if (rc != 0) {
-			cifs_dbg(VFS, "cannot query dirs between root and final path, "
-				 "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
-			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
-			rc = 0;
+			if (rc != 0) {
+				cifs_dbg(VFS, "cannot query dirs between root and final path, "
+					 "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+				cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+				rc = 0;
+			}
 		}
 		kfree(full_path);
 	}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 4716c54..789ff1d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -40,7 +40,7 @@
 	/* BB check if there is a way to get the kernel to do this or if we
 	   really need this */
 	do {
-		direntry->d_time = jiffies;
+		cifs_set_time(direntry, jiffies);
 		direntry = direntry->d_parent;
 	} while (!IS_ROOT(direntry));
 }
@@ -802,7 +802,7 @@
 
 	} else if (rc == -ENOENT) {
 		rc = 0;
-		direntry->d_time = jiffies;
+		cifs_set_time(direntry, jiffies);
 		d_add(direntry, NULL);
 	/*	if it was once a directory (but how can we tell?) we could do
 		shrink_dcache_parent(direntry); */
@@ -862,7 +862,7 @@
 	if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
 		return 0;
 
-	if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
+	if (time_after(jiffies, cifs_get_time(direntry) + HZ) || !lookupCacheEnabled)
 		return 0;
 
 	return 1;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 579e41b..7f5f617 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -305,6 +305,7 @@
 	cfile->tlink = cifs_get_tlink(tlink);
 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
 	mutex_init(&cfile->fh_mutex);
+	spin_lock_init(&cfile->file_info_lock);
 
 	cifs_sb_active(inode->i_sb);
 
@@ -317,7 +318,7 @@
 		oplock = 0;
 	}
 
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
 		oplock = fid->pending_open->oplock;
 	list_del(&fid->pending_open->olist);
@@ -326,12 +327,13 @@
 	server->ops->set_fid(cfile, fid, oplock);
 
 	list_add(&cfile->tlist, &tcon->openFileList);
+
 	/* if readable file instance put first in list*/
 	if (file->f_mode & FMODE_READ)
 		list_add(&cfile->flist, &cinode->openFileList);
 	else
 		list_add_tail(&cfile->flist, &cinode->openFileList);
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tcon->open_file_lock);
 
 	if (fid->purge_cache)
 		cifs_zap_mapping(inode);
@@ -343,16 +345,16 @@
 struct cifsFileInfo *
 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
 {
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&cifs_file->file_info_lock);
 	cifsFileInfo_get_locked(cifs_file);
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&cifs_file->file_info_lock);
 	return cifs_file;
 }
 
 /*
  * Release a reference on the file private data. This may involve closing
  * the filehandle out on the server. Must be called without holding
- * cifs_file_list_lock.
+ * tcon->open_file_lock and cifs_file->file_info_lock.
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
@@ -367,11 +369,15 @@
 	struct cifs_pending_open open;
 	bool oplock_break_cancelled;
 
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
+
+	spin_lock(&cifs_file->file_info_lock);
 	if (--cifs_file->count > 0) {
-		spin_unlock(&cifs_file_list_lock);
+		spin_unlock(&cifs_file->file_info_lock);
+		spin_unlock(&tcon->open_file_lock);
 		return;
 	}
+	spin_unlock(&cifs_file->file_info_lock);
 
 	if (server->ops->get_lease_key)
 		server->ops->get_lease_key(inode, &fid);
@@ -395,7 +401,8 @@
 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
 		cifs_set_oplock_level(cifsi, 0);
 	}
-	spin_unlock(&cifs_file_list_lock);
+
+	spin_unlock(&tcon->open_file_lock);
 
 	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
 
@@ -732,6 +739,15 @@
 	 * to the server to get the new inode info.
 	 */
 
+	/*
+	 * If the server returned a read oplock and we have mandatory brlocks,
+	 * set oplock level to None.
+	 */
+	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
+		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
+		oplock = 0;
+	}
+
 	server->ops->set_fid(cfile, &cfile->fid, oplock);
 	if (oparms.reconnect)
 		cifs_relock_file(cfile);
@@ -753,6 +769,36 @@
 	return 0;
 }
 
+void
+cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
+{
+	struct cifsFileInfo *open_file;
+	struct list_head *tmp;
+	struct list_head *tmp1;
+	struct list_head tmp_list;
+
+	cifs_dbg(FYI, "Reopen persistent handles");
+	INIT_LIST_HEAD(&tmp_list);
+
+	/* list all files open on tree connection, reopen resilient handles  */
+	spin_lock(&tcon->open_file_lock);
+	list_for_each(tmp, &tcon->openFileList) {
+		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+		if (!open_file->invalidHandle)
+			continue;
+		cifsFileInfo_get(open_file);
+		list_add_tail(&open_file->rlist, &tmp_list);
+	}
+	spin_unlock(&tcon->open_file_lock);
+
+	list_for_each_safe(tmp, tmp1, &tmp_list) {
+		open_file = list_entry(tmp, struct cifsFileInfo, rlist);
+		cifs_reopen_file(open_file, false /* do not flush */);
+		list_del_init(&open_file->rlist);
+		cifsFileInfo_put(open_file);
+	}
+}
+
 int cifs_closedir(struct inode *inode, struct file *file)
 {
 	int rc = 0;
@@ -772,10 +818,10 @@
 	server = tcon->ses->server;
 
 	cifs_dbg(FYI, "Freeing private data in close dir\n");
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&cfile->file_info_lock);
 	if (server->ops->dir_needs_close(cfile)) {
 		cfile->invalidHandle = true;
-		spin_unlock(&cifs_file_list_lock);
+		spin_unlock(&cfile->file_info_lock);
 		if (server->ops->close_dir)
 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
 		else
@@ -784,7 +830,7 @@
 		/* not much we can do if it fails anyway, ignore rc */
 		rc = 0;
 	} else
-		spin_unlock(&cifs_file_list_lock);
+		spin_unlock(&cfile->file_info_lock);
 
 	buf = cfile->srch_inf.ntwrk_buf_start;
 	if (buf) {
@@ -1728,12 +1774,13 @@
 {
 	struct cifsFileInfo *open_file = NULL;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
 	/* we could simply get the first_list_entry since write-only entries
 	   are always at the end of the list but since the first entry might
 	   have a close pending, we go through the whole list */
@@ -1744,8 +1791,8 @@
 			if (!open_file->invalidHandle) {
 				/* found a good file */
 				/* lock it so it will not be closed on us */
-				cifsFileInfo_get_locked(open_file);
-				spin_unlock(&cifs_file_list_lock);
+				cifsFileInfo_get(open_file);
+				spin_unlock(&tcon->open_file_lock);
 				return open_file;
 			} /* else might as well continue, and look for
 			     another, or simply have the caller reopen it
@@ -1753,7 +1800,7 @@
 		} else /* write only file */
 			break; /* write only files are last so must be done */
 	}
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tcon->open_file_lock);
 	return NULL;
 }
 
@@ -1762,6 +1809,7 @@
 {
 	struct cifsFileInfo *open_file, *inv_file = NULL;
 	struct cifs_sb_info *cifs_sb;
+	struct cifs_tcon *tcon;
 	bool any_available = false;
 	int rc;
 	unsigned int refind = 0;
@@ -1777,15 +1825,16 @@
 	}
 
 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+	tcon = cifs_sb_master_tcon(cifs_sb);
 
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
 
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
 refind_writable:
 	if (refind > MAX_REOPEN_ATT) {
-		spin_unlock(&cifs_file_list_lock);
+		spin_unlock(&tcon->open_file_lock);
 		return NULL;
 	}
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1796,8 +1845,8 @@
 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
 			if (!open_file->invalidHandle) {
 				/* found a good writable file */
-				cifsFileInfo_get_locked(open_file);
-				spin_unlock(&cifs_file_list_lock);
+				cifsFileInfo_get(open_file);
+				spin_unlock(&tcon->open_file_lock);
 				return open_file;
 			} else {
 				if (!inv_file)
@@ -1813,24 +1862,24 @@
 
 	if (inv_file) {
 		any_available = false;
-		cifsFileInfo_get_locked(inv_file);
+		cifsFileInfo_get(inv_file);
 	}
 
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tcon->open_file_lock);
 
 	if (inv_file) {
 		rc = cifs_reopen_file(inv_file, false);
 		if (!rc)
 			return inv_file;
 		else {
-			spin_lock(&cifs_file_list_lock);
+			spin_lock(&tcon->open_file_lock);
 			list_move_tail(&inv_file->flist,
 					&cifs_inode->openFileList);
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&tcon->open_file_lock);
 			cifsFileInfo_put(inv_file);
-			spin_lock(&cifs_file_list_lock);
 			++refind;
 			inv_file = NULL;
+			spin_lock(&tcon->open_file_lock);
 			goto refind_writable;
 		}
 	}
@@ -1878,7 +1927,7 @@
 					   write_data, to - from, &offset);
 		cifsFileInfo_put(open_file);
 		/* Does mm or vfs already set times? */
-		inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
+		inode->i_atime = inode->i_mtime = current_time(inode);
 		if ((bytes_written > 0) && (offset))
 			rc = 0;
 		else if (bytes_written < 0)
@@ -2478,7 +2527,7 @@
 	size_t cur_len;
 	unsigned long nr_pages, num_pages, i;
 	struct cifs_writedata *wdata;
-	struct iov_iter saved_from;
+	struct iov_iter saved_from = *from;
 	loff_t saved_offset = offset;
 	pid_t pid;
 	struct TCP_Server_Info *server;
@@ -2489,7 +2538,6 @@
 		pid = current->tgid;
 
 	server = tlink_tcon(open_file->tlink)->ses->server;
-	memcpy(&saved_from, from, sizeof(struct iov_iter));
 
 	do {
 		unsigned int wsize, credits;
@@ -2551,8 +2599,7 @@
 			kref_put(&wdata->refcount,
 				 cifs_uncached_writedata_release);
 			if (rc == -EAGAIN) {
-				memcpy(from, &saved_from,
-				       sizeof(struct iov_iter));
+				*from = saved_from;
 				iov_iter_advance(from, offset - saved_offset);
 				continue;
 			}
@@ -2576,7 +2623,7 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifs_writedata *wdata, *tmp;
 	struct list_head wdata_list;
-	struct iov_iter saved_from;
+	struct iov_iter saved_from = *from;
 	int rc;
 
 	/*
@@ -2597,8 +2644,6 @@
 	if (!tcon->ses->server->ops->async_writev)
 		return -ENOSYS;
 
-	memcpy(&saved_from, from, sizeof(struct iov_iter));
-
 	rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
 				  open_file, cifs_sb, &wdata_list);
 
@@ -2631,13 +2676,11 @@
 			/* resend call if it's a retryable error */
 			if (rc == -EAGAIN) {
 				struct list_head tmp_list;
-				struct iov_iter tmp_from;
+				struct iov_iter tmp_from = saved_from;
 
 				INIT_LIST_HEAD(&tmp_list);
 				list_del_init(&wdata->list);
 
-				memcpy(&tmp_from, &saved_from,
-				       sizeof(struct iov_iter));
 				iov_iter_advance(&tmp_from,
 						 wdata->offset - iocb->ki_pos);
 
@@ -3571,7 +3614,7 @@
 		cifs_dbg(FYI, "Bytes read %d\n", rc);
 
 	file_inode(file)->i_atime =
-		current_fs_time(file_inode(file)->i_sb);
+		current_time(file_inode(file));
 
 	if (PAGE_SIZE > rc)
 		memset(read_data + rc, 0, PAGE_SIZE - rc);
@@ -3618,15 +3661,17 @@
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
 	struct cifsFileInfo *open_file;
+	struct cifs_tcon *tcon =
+		cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tcon->open_file_lock);
 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&tcon->open_file_lock);
 			return 1;
 		}
 	}
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tcon->open_file_lock);
 	return 0;
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b87efd0..7ab5be7 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1951,7 +1951,7 @@
 
 	cifs_dbg(FYI, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld\n",
 		 full_path, inode, inode->i_count.counter,
-		 dentry, dentry->d_time, jiffies);
+		 dentry, cifs_get_time(dentry), jiffies);
 
 	if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
 		rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
@@ -2154,7 +2154,7 @@
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 		attrs->ia_valid |= ATTR_FORCE;
 
-	rc = inode_change_ok(inode, attrs);
+	rc = setattr_prepare(direntry, attrs);
 	if (rc < 0)
 		goto out;
 
@@ -2294,7 +2294,7 @@
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
 		attrs->ia_valid |= ATTR_FORCE;
 
-	rc = inode_change_ok(inode, attrs);
+	rc = setattr_prepare(direntry, attrs);
 	if (rc < 0) {
 		free_xid(xid);
 		return rc;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 7a3b84e..9f51b81 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -189,7 +189,7 @@
 	xid = get_xid();
 
 	cifs_sb = CIFS_SB(inode->i_sb);
-
+	cifs_dbg(VFS, "cifs ioctl 0x%x\n", command);
 	switch (command) {
 		case FS_IOC_GETFLAGS:
 			if (pSMBFile == NULL)
@@ -267,11 +267,23 @@
 			tcon = tlink_tcon(pSMBFile->tlink);
 			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
 			break;
+		case CIFS_ENUMERATE_SNAPSHOTS:
+			if (arg == 0) {
+				rc = -EINVAL;
+				goto cifs_ioc_exit;
+			}
+			tcon = tlink_tcon(pSMBFile->tlink);
+			if (tcon->ses->server->ops->enum_snapshots)
+				rc = tcon->ses->server->ops->enum_snapshots(xid, tcon,
+						pSMBFile, (void __user *)arg);
+			else
+				rc = -EOPNOTSUPP;
+			break;
 		default:
 			cifs_dbg(FYI, "unsupported ioctl\n");
 			break;
 	}
-
+cifs_ioc_exit:
 	free_xid(xid);
 	return rc;
 }
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 062c237..d031af8 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -399,7 +399,7 @@
 	io_parms.offset = 0;
 	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
 
-	rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf, NULL, 0);
+	rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf);
 	CIFSSMBClose(xid, tcon, fid.netfid);
 	return rc;
 }
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 813fe13..c672915 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -120,6 +120,7 @@
 		++ret_buf->tc_count;
 		INIT_LIST_HEAD(&ret_buf->openFileList);
 		INIT_LIST_HEAD(&ret_buf->tcon_list);
+		spin_lock_init(&ret_buf->open_file_lock);
 #ifdef CONFIG_CIFS_STATS
 		spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -465,7 +466,7 @@
 				continue;
 
 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
-			spin_lock(&cifs_file_list_lock);
+			spin_lock(&tcon->open_file_lock);
 			list_for_each(tmp2, &tcon->openFileList) {
 				netfile = list_entry(tmp2, struct cifsFileInfo,
 						     tlist);
@@ -495,11 +496,11 @@
 					   &netfile->oplock_break);
 				netfile->oplock_break_cancelled = false;
 
-				spin_unlock(&cifs_file_list_lock);
+				spin_unlock(&tcon->open_file_lock);
 				spin_unlock(&cifs_tcp_ses_lock);
 				return true;
 			}
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&tcon->open_file_lock);
 			spin_unlock(&cifs_tcp_ses_lock);
 			cifs_dbg(FYI, "No matching file for oplock break\n");
 			return true;
@@ -613,9 +614,9 @@
 void
 cifs_del_pending_open(struct cifs_pending_open *open)
 {
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
 	list_del(&open->olist);
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
 }
 
 void
@@ -635,7 +636,7 @@
 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
 		      struct cifs_pending_open *open)
 {
-	spin_lock(&cifs_file_list_lock);
+	spin_lock(&tlink_tcon(tlink)->open_file_lock);
 	cifs_add_pending_open_locked(fid, tlink, open);
-	spin_unlock(&cifs_file_list_lock);
+	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
 }
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 65cf85d..8f6a2a5 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -597,14 +597,14 @@
 	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
 		/* close and restart search */
 		cifs_dbg(FYI, "search backing up - close and restart search\n");
-		spin_lock(&cifs_file_list_lock);
+		spin_lock(&cfile->file_info_lock);
 		if (server->ops->dir_needs_close(cfile)) {
 			cfile->invalidHandle = true;
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&cfile->file_info_lock);
 			if (server->ops->close_dir)
 				server->ops->close_dir(xid, tcon, &cfile->fid);
 		} else
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&cfile->file_info_lock);
 		if (cfile->srch_inf.ntwrk_buf_start) {
 			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
 			if (cfile->srch_inf.smallBuf)
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 4f0231e..1238cd3 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -266,9 +266,15 @@
 	struct tcon_link *tlink;
 	int rc;
 
+	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+	    (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+	    (buf->Attributes == 0))
+		return 0; /* would be a no op, no sense sending this */
+
 	tlink = cifs_sb_tlink(cifs_sb);
 	if (IS_ERR(tlink))
 		return PTR_ERR(tlink);
+
 	rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
 				FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
 				SMB2_OP_SET_INFO);
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 389fb9f..3d38348 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -549,19 +549,19 @@
 		list_for_each(tmp1, &server->smb_ses_list) {
 			ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
 
-			spin_lock(&cifs_file_list_lock);
 			list_for_each(tmp2, &ses->tcon_list) {
 				tcon = list_entry(tmp2, struct cifs_tcon,
 						  tcon_list);
+				spin_lock(&tcon->open_file_lock);
 				cifs_stats_inc(
 				    &tcon->stats.cifs_stats.num_oplock_brks);
 				if (smb2_tcon_has_lease(tcon, rsp, lw)) {
-					spin_unlock(&cifs_file_list_lock);
+					spin_unlock(&tcon->open_file_lock);
 					spin_unlock(&cifs_tcp_ses_lock);
 					return true;
 				}
+				spin_unlock(&tcon->open_file_lock);
 			}
-			spin_unlock(&cifs_file_list_lock);
 		}
 	}
 	spin_unlock(&cifs_tcp_ses_lock);
@@ -603,7 +603,7 @@
 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
 
 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
-			spin_lock(&cifs_file_list_lock);
+			spin_lock(&tcon->open_file_lock);
 			list_for_each(tmp2, &tcon->openFileList) {
 				cfile = list_entry(tmp2, struct cifsFileInfo,
 						     tlist);
@@ -615,7 +615,7 @@
 
 				cifs_dbg(FYI, "file id match, oplock break\n");
 				cinode = CIFS_I(d_inode(cfile->dentry));
-
+				spin_lock(&cfile->file_info_lock);
 				if (!CIFS_CACHE_WRITE(cinode) &&
 				    rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
 					cfile->oplock_break_cancelled = true;
@@ -637,14 +637,14 @@
 					clear_bit(
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &cinode->flags);
-
+				spin_unlock(&cfile->file_info_lock);
 				queue_work(cifsiod_wq, &cfile->oplock_break);
 
-				spin_unlock(&cifs_file_list_lock);
+				spin_unlock(&tcon->open_file_lock);
 				spin_unlock(&cifs_tcp_ses_lock);
 				return true;
 			}
-			spin_unlock(&cifs_file_list_lock);
+			spin_unlock(&tcon->open_file_lock);
 			spin_unlock(&cifs_tcp_ses_lock);
 			cifs_dbg(FYI, "No matching file for oplock break\n");
 			return true;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d203c03..5d456eb 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -28,6 +28,7 @@
 #include "cifs_unicode.h"
 #include "smb2status.h"
 #include "smb2glob.h"
+#include "cifs_ioctl.h"
 
 static int
 change_conf(struct TCP_Server_Info *server)
@@ -70,6 +71,10 @@
 	spin_lock(&server->req_lock);
 	val = server->ops->get_credits_field(server, optype);
 	*val += add;
+	if (*val > 65000) {
+		*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
+		printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
+	}
 	server->in_flight--;
 	if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
 		rc = change_conf(server);
@@ -287,7 +292,7 @@
 		cifs_dbg(FYI, "Link Speed %lld\n",
 			le64_to_cpu(out_buf->LinkSpeed));
 	}
-
+	kfree(out_buf);
 	return rc;
 }
 #endif /* STATS2 */
@@ -541,6 +546,7 @@
 	server->ops->set_oplock_level(cinode, oplock, fid->epoch,
 				      &fid->purge_cache);
 	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+	memcpy(cfile->fid.create_guid, fid->create_guid, 16);
 }
 
 static void
@@ -699,6 +705,7 @@
 
 cchunk_out:
 	kfree(pcchunk);
+	kfree(retbuf);
 	return rc;
 }
 
@@ -823,7 +830,6 @@
 {
 	int rc;
 	unsigned int ret_data_len;
-	char *retbuf = NULL;
 	struct duplicate_extents_to_file dup_ext_buf;
 	struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
 
@@ -849,7 +855,7 @@
 			FSCTL_DUPLICATE_EXTENTS_TO_FILE,
 			true /* is_fsctl */, (char *)&dup_ext_buf,
 			sizeof(struct duplicate_extents_to_file),
-			(char **)&retbuf,
+			NULL,
 			&ret_data_len);
 
 	if (ret_data_len > 0)
@@ -872,7 +878,6 @@
 		   struct cifsFileInfo *cfile)
 {
 	struct fsctl_set_integrity_information_req integr_info;
-	char *retbuf = NULL;
 	unsigned int ret_data_len;
 
 	integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
@@ -884,12 +889,56 @@
 			FSCTL_SET_INTEGRITY_INFORMATION,
 			true /* is_fsctl */, (char *)&integr_info,
 			sizeof(struct fsctl_set_integrity_information_req),
-			(char **)&retbuf,
+			NULL,
 			&ret_data_len);
 
 }
 
 static int
+smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
+		   struct cifsFileInfo *cfile, void __user *ioc_buf)
+{
+	char *retbuf = NULL;
+	unsigned int ret_data_len = 0;
+	int rc;
+	struct smb_snapshot_array snapshot_in;
+
+	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+			cfile->fid.volatile_fid,
+			FSCTL_SRV_ENUMERATE_SNAPSHOTS,
+			true /* is_fsctl */, NULL, 0 /* no input data */,
+			(char **)&retbuf,
+			&ret_data_len);
+	cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
+			rc, ret_data_len);
+	if (rc)
+		return rc;
+
+	if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
+		/* Fixup buffer */
+		if (copy_from_user(&snapshot_in, ioc_buf,
+		    sizeof(struct smb_snapshot_array))) {
+			rc = -EFAULT;
+			kfree(retbuf);
+			return rc;
+		}
+		if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
+			rc = -ERANGE;
+			return rc;
+		}
+
+		if (ret_data_len > snapshot_in.snapshot_array_size)
+			ret_data_len = snapshot_in.snapshot_array_size;
+
+		if (copy_to_user(ioc_buf, retbuf, ret_data_len))
+			rc = -EFAULT;
+	}
+
+	kfree(retbuf);
+	return rc;
+}
+
+static int
 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 		     const char *path, struct cifs_sb_info *cifs_sb,
 		     struct cifs_fid *fid, __u16 search_flags,
@@ -1041,7 +1090,7 @@
 static void
 smb2_new_lease_key(struct cifs_fid *fid)
 {
-	get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+	generate_random_uuid(fid->lease_key);
 }
 
 #define SMB2_SYMLINK_STRUCT_SIZE \
@@ -1654,6 +1703,7 @@
 	.clone_range = smb2_clone_range,
 	.wp_retry_size = smb2_wp_retry_size,
 	.dir_needs_close = smb2_dir_needs_close,
+	.enum_snapshots = smb3_enum_snapshots,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -1740,6 +1790,7 @@
 	.wp_retry_size = smb2_wp_retry_size,
 	.dir_needs_close = smb2_dir_needs_close,
 	.fallocate = smb3_fallocate,
+	.enum_snapshots = smb3_enum_snapshots,
 };
 
 #ifdef CONFIG_CIFS_SMB311
@@ -1827,6 +1878,7 @@
 	.wp_retry_size = smb2_wp_retry_size,
 	.dir_needs_close = smb2_dir_needs_close,
 	.fallocate = smb3_fallocate,
+	.enum_snapshots = smb3_enum_snapshots,
 };
 #endif /* CIFS_SMB311 */
 
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 29e06db..5ca5ea46 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -100,7 +100,21 @@
 	hdr->ProtocolId = SMB2_PROTO_NUMBER;
 	hdr->StructureSize = cpu_to_le16(64);
 	hdr->Command = smb2_cmd;
-	hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
+	if (tcon && tcon->ses && tcon->ses->server) {
+		struct TCP_Server_Info *server = tcon->ses->server;
+
+		spin_lock(&server->req_lock);
+		/* Request up to 2 credits but don't go over the limit. */
+		if (server->credits >= server->max_credits)
+			hdr->CreditRequest = cpu_to_le16(0);
+		else
+			hdr->CreditRequest = cpu_to_le16(
+				min_t(int, server->max_credits -
+						server->credits, 2));
+		spin_unlock(&server->req_lock);
+	} else {
+		hdr->CreditRequest = cpu_to_le16(2);
+	}
 	hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
 
 	if (!tcon)
@@ -236,8 +250,13 @@
 	}
 
 	cifs_mark_open_files_invalid(tcon);
+
 	rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
 	mutex_unlock(&tcon->ses->session_mutex);
+
+	if (tcon->use_persistent)
+		cifs_reopen_persistent_handles(tcon);
+
 	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
 	if (rc)
 		goto out;
@@ -574,59 +593,42 @@
 	return -EIO;
 }
 
-int
-SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
-		const struct nls_table *nls_cp)
-{
-	struct smb2_sess_setup_req *req;
-	struct smb2_sess_setup_rsp *rsp = NULL;
+struct SMB2_sess_data {
+	unsigned int xid;
+	struct cifs_ses *ses;
+	struct nls_table *nls_cp;
+	void (*func)(struct SMB2_sess_data *);
+	int result;
+	u64 previous_session;
+
+	/* we will send the SMB in three pieces:
+	 * a fixed length beginning part, an optional
+	 * SPNEGO blob (which can be zero length), and a
+	 * last part which will include the strings
+	 * and rest of bcc area. This allows us to avoid
+	 * a large buffer 17K allocation
+	 */
+	int buf0_type;
 	struct kvec iov[2];
-	int rc = 0;
-	int resp_buftype = CIFS_NO_BUFFER;
-	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
+};
+
+static int
+SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+{
+	int rc;
+	struct cifs_ses *ses = sess_data->ses;
+	struct smb2_sess_setup_req *req;
 	struct TCP_Server_Info *server = ses->server;
-	u16 blob_length = 0;
-	struct key *spnego_key = NULL;
-	char *security_blob = NULL;
-	unsigned char *ntlmssp_blob = NULL;
-	bool use_spnego = false; /* else use raw ntlmssp */
-
-	cifs_dbg(FYI, "Session Setup\n");
-
-	if (!server) {
-		WARN(1, "%s: server is NULL!\n", __func__);
-		return -EIO;
-	}
-
-	/*
-	 * If we are here due to reconnect, free per-smb session key
-	 * in case signing was required.
-	 */
-	kfree(ses->auth_key.response);
-	ses->auth_key.response = NULL;
-
-	/*
-	 * If memory allocation is successful, caller of this function
-	 * frees it.
-	 */
-	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
-	if (!ses->ntlmssp)
-		return -ENOMEM;
-	ses->ntlmssp->sesskey_per_smbsess = true;
-
-	/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
-	if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
-		ses->sectype = RawNTLMSSP;
-
-ssetup_ntlmssp_authenticate:
-	if (phase == NtLmChallenge)
-		phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
 
 	rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
 	if (rc)
 		return rc;
 
 	req->hdr.SessionId = 0; /* First session, not a reauthenticate */
+
+	/* if reconnect, we need to send previous sess id, otherwise it is 0 */
+	req->PreviousSessionId = sess_data->previous_session;
+
 	req->Flags = 0; /* MBZ */
 	/* to enable echos and oplocks */
 	req->hdr.CreditRequest = cpu_to_le16(3);
@@ -642,199 +644,368 @@
 	req->Capabilities = 0;
 	req->Channel = 0; /* MBZ */
 
-	iov[0].iov_base = (char *)req;
+	sess_data->iov[0].iov_base = (char *)req;
 	/* 4 for rfc1002 length field and 1 for pad */
-	iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+	sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+	/*
+	 * This variable will be used to clear the buffer
+	 * allocated above in case of any error in the calling function.
+	 */
+	sess_data->buf0_type = CIFS_SMALL_BUFFER;
 
-	if (ses->sectype == Kerberos) {
-#ifdef CONFIG_CIFS_UPCALL
-		struct cifs_spnego_msg *msg;
+	return 0;
+}
 
-		spnego_key = cifs_get_spnego_key(ses);
-		if (IS_ERR(spnego_key)) {
-			rc = PTR_ERR(spnego_key);
-			spnego_key = NULL;
-			goto ssetup_exit;
-		}
+static void
+SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
+{
+	free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
+	sess_data->buf0_type = CIFS_NO_BUFFER;
+}
 
-		msg = spnego_key->payload.data[0];
-		/*
-		 * check version field to make sure that cifs.upcall is
-		 * sending us a response in an expected form
-		 */
-		if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
-			cifs_dbg(VFS,
-				  "bad cifs.upcall version. Expected %d got %d",
-				  CIFS_SPNEGO_UPCALL_VERSION, msg->version);
-			rc = -EKEYREJECTED;
-			goto ssetup_exit;
-		}
-		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
-						 GFP_KERNEL);
-		if (!ses->auth_key.response) {
-			cifs_dbg(VFS,
-				"Kerberos can't allocate (%u bytes) memory",
-				msg->sesskey_len);
-			rc = -ENOMEM;
-			goto ssetup_exit;
-		}
-		ses->auth_key.len = msg->sesskey_len;
-		blob_length = msg->secblob_len;
-		iov[1].iov_base = msg->data + msg->sesskey_len;
-		iov[1].iov_len = blob_length;
-#else
-		rc = -EOPNOTSUPP;
-		goto ssetup_exit;
-#endif /* CONFIG_CIFS_UPCALL */
-	} else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
-		ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
-				       GFP_KERNEL);
-		if (ntlmssp_blob == NULL) {
-			rc = -ENOMEM;
-			goto ssetup_exit;
-		}
-		build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
-		if (use_spnego) {
-			/* blob_length = build_spnego_ntlmssp_blob(
-					&security_blob,
-					sizeof(struct _NEGOTIATE_MESSAGE),
-					ntlmssp_blob); */
-			/* BB eventually need to add this */
-			cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
-			rc = -EOPNOTSUPP;
-			kfree(ntlmssp_blob);
-			goto ssetup_exit;
-		} else {
-			blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
-			/* with raw NTLMSSP we don't encapsulate in SPNEGO */
-			security_blob = ntlmssp_blob;
-		}
-		iov[1].iov_base = security_blob;
-		iov[1].iov_len = blob_length;
-	} else if (phase == NtLmAuthenticate) {
-		req->hdr.SessionId = ses->Suid;
-		rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
-					     nls_cp);
-		if (rc) {
-			cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
-				 rc);
-			goto ssetup_exit; /* BB double check error handling */
-		}
-		if (use_spnego) {
-			/* blob_length = build_spnego_ntlmssp_blob(
-							&security_blob,
-							blob_length,
-							ntlmssp_blob); */
-			cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
-			rc = -EOPNOTSUPP;
-			kfree(ntlmssp_blob);
-			goto ssetup_exit;
-		} else {
-			security_blob = ntlmssp_blob;
-		}
-		iov[1].iov_base = security_blob;
-		iov[1].iov_len = blob_length;
-	} else {
-		cifs_dbg(VFS, "illegal ntlmssp phase\n");
-		rc = -EIO;
-		goto ssetup_exit;
-	}
+static int
+SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
+{
+	int rc;
+	struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
 
 	/* Testing shows that buffer offset must be at location of Buffer[0] */
 	req->SecurityBufferOffset =
-				cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
-					    1 /* pad */ - 4 /* rfc1001 len */);
-	req->SecurityBufferLength = cpu_to_le16(blob_length);
+		cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
+			1 /* pad */ - 4 /* rfc1001 len */);
+	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-	inc_rfc1001_len(req, blob_length - 1 /* pad */);
+	inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */);
 
 	/* BB add code to build os and lm fields */
 
-	rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
-			  CIFS_LOG_ERROR | CIFS_NEG_OP);
+	rc = SendReceive2(sess_data->xid, sess_data->ses,
+				sess_data->iov, 2,
+				&sess_data->buf0_type,
+				CIFS_LOG_ERROR | CIFS_NEG_OP);
 
-	kfree(security_blob);
-	rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
-	ses->Suid = rsp->hdr.SessionId;
-	if (resp_buftype != CIFS_NO_BUFFER &&
-	    rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
-		if (phase != NtLmNegotiate) {
-			cifs_dbg(VFS, "Unexpected more processing error\n");
-			goto ssetup_exit;
-		}
-		if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
-				le16_to_cpu(rsp->SecurityBufferOffset)) {
-			cifs_dbg(VFS, "Invalid security buffer offset %d\n",
-				 le16_to_cpu(rsp->SecurityBufferOffset));
-			rc = -EIO;
-			goto ssetup_exit;
-		}
+	return rc;
+}
 
-		/* NTLMSSP Negotiate sent now processing challenge (response) */
-		phase = NtLmChallenge; /* process ntlmssp challenge */
-		rc = 0; /* MORE_PROCESSING is not an error here but expected */
-		rc = decode_ntlmssp_challenge(rsp->Buffer,
-				le16_to_cpu(rsp->SecurityBufferLength), ses);
+static int
+SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
+{
+	int rc = 0;
+	struct cifs_ses *ses = sess_data->ses;
+
+	mutex_lock(&ses->server->srv_mutex);
+	if (ses->server->sign && ses->server->ops->generate_signingkey) {
+		rc = ses->server->ops->generate_signingkey(ses);
+		kfree(ses->auth_key.response);
+		ses->auth_key.response = NULL;
+		if (rc) {
+			cifs_dbg(FYI,
+				"SMB3 session key generation failed\n");
+			mutex_unlock(&ses->server->srv_mutex);
+			goto keygen_exit;
+		}
+	}
+	if (!ses->server->session_estab) {
+		ses->server->sequence_number = 0x2;
+		ses->server->session_estab = true;
+	}
+	mutex_unlock(&ses->server->srv_mutex);
+
+	cifs_dbg(FYI, "SMB2/3 session established successfully\n");
+	spin_lock(&GlobalMid_Lock);
+	ses->status = CifsGood;
+	ses->need_reconnect = false;
+	spin_unlock(&GlobalMid_Lock);
+
+keygen_exit:
+	if (!ses->server->sign) {
+		kfree(ses->auth_key.response);
+		ses->auth_key.response = NULL;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_CIFS_UPCALL
+static void
+SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+{
+	int rc;
+	struct cifs_ses *ses = sess_data->ses;
+	struct cifs_spnego_msg *msg;
+	struct key *spnego_key = NULL;
+	struct smb2_sess_setup_rsp *rsp = NULL;
+
+	rc = SMB2_sess_alloc_buffer(sess_data);
+	if (rc)
+		goto out;
+
+	spnego_key = cifs_get_spnego_key(ses);
+	if (IS_ERR(spnego_key)) {
+		rc = PTR_ERR(spnego_key);
+		spnego_key = NULL;
+		goto out;
 	}
 
+	msg = spnego_key->payload.data[0];
 	/*
-	 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
-	 * but at least the raw NTLMSSP case works.
+	 * check version field to make sure that cifs.upcall is
+	 * sending us a response in an expected form
 	 */
-	/*
-	 * No tcon so can't do
-	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
-	 */
-	if (rc != 0)
-		goto ssetup_exit;
+	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+		cifs_dbg(VFS,
+			  "bad cifs.upcall version. Expected %d got %d",
+			  CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+		rc = -EKEYREJECTED;
+		goto out_put_spnego_key;
+	}
+
+	ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+					 GFP_KERNEL);
+	if (!ses->auth_key.response) {
+		cifs_dbg(VFS,
+			"Kerberos can't allocate (%u bytes) memory",
+			msg->sesskey_len);
+		rc = -ENOMEM;
+		goto out_put_spnego_key;
+	}
+	ses->auth_key.len = msg->sesskey_len;
+
+	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
+	sess_data->iov[1].iov_len = msg->secblob_len;
+
+	rc = SMB2_sess_sendreceive(sess_data);
+	if (rc)
+		goto out_put_spnego_key;
+
+	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+	ses->Suid = rsp->hdr.SessionId;
 
 	ses->session_flags = le16_to_cpu(rsp->SessionFlags);
 	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
 		cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
-ssetup_exit:
-	free_rsp_buf(resp_buftype, rsp);
 
-	/* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
-	if ((phase == NtLmChallenge) && (rc == 0))
-		goto ssetup_ntlmssp_authenticate;
+	rc = SMB2_sess_establish_session(sess_data);
+out_put_spnego_key:
+	key_invalidate(spnego_key);
+	key_put(spnego_key);
+out:
+	sess_data->result = rc;
+	sess_data->func = NULL;
+	SMB2_sess_free_buffer(sess_data);
+}
+#else
+static void
+SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+{
+	cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+	sess_data->result = -EOPNOTSUPP;
+	sess_data->func = NULL;
+}
+#endif
 
+static void
+SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
+
+static void
+SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
+{
+	int rc;
+	struct cifs_ses *ses = sess_data->ses;
+	struct smb2_sess_setup_rsp *rsp = NULL;
+	char *ntlmssp_blob = NULL;
+	bool use_spnego = false; /* else use raw ntlmssp */
+	u16 blob_length = 0;
+
+	/*
+	 * If memory allocation is successful, caller of this function
+	 * frees it.
+	 */
+	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+	if (!ses->ntlmssp) {
+		rc = -ENOMEM;
+		goto out_err;
+	}
+	ses->ntlmssp->sesskey_per_smbsess = true;
+
+	rc = SMB2_sess_alloc_buffer(sess_data);
+	if (rc)
+		goto out_err;
+
+	ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
+			       GFP_KERNEL);
+	if (ntlmssp_blob == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
+	if (use_spnego) {
+		/* BB eventually need to add this */
+		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+		rc = -EOPNOTSUPP;
+		goto out;
+	} else {
+		blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
+		/* with raw NTLMSSP we don't encapsulate in SPNEGO */
+	}
+	sess_data->iov[1].iov_base = ntlmssp_blob;
+	sess_data->iov[1].iov_len = blob_length;
+
+	rc = SMB2_sess_sendreceive(sess_data);
+	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+
+	/* If true, rc here is expected and not an error */
+	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
+		rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
+		rc = 0;
+
+	if (rc)
+		goto out;
+
+	if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
+			le16_to_cpu(rsp->SecurityBufferOffset)) {
+		cifs_dbg(VFS, "Invalid security buffer offset %d\n",
+			le16_to_cpu(rsp->SecurityBufferOffset));
+		rc = -EIO;
+		goto out;
+	}
+	rc = decode_ntlmssp_challenge(rsp->Buffer,
+			le16_to_cpu(rsp->SecurityBufferLength), ses);
+	if (rc)
+		goto out;
+
+	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+
+
+	ses->Suid = rsp->hdr.SessionId;
+	ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+		cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
+
+out:
+	kfree(ntlmssp_blob);
+	SMB2_sess_free_buffer(sess_data);
 	if (!rc) {
-		mutex_lock(&server->srv_mutex);
-		if (server->sign && server->ops->generate_signingkey) {
-			rc = server->ops->generate_signingkey(ses);
-			kfree(ses->auth_key.response);
-			ses->auth_key.response = NULL;
-			if (rc) {
-				cifs_dbg(FYI,
-					"SMB3 session key generation failed\n");
-				mutex_unlock(&server->srv_mutex);
-				goto keygen_exit;
-			}
-		}
-		if (!server->session_estab) {
-			server->sequence_number = 0x2;
-			server->session_estab = true;
-		}
-		mutex_unlock(&server->srv_mutex);
-
-		cifs_dbg(FYI, "SMB2/3 session established successfully\n");
-		spin_lock(&GlobalMid_Lock);
-		ses->status = CifsGood;
-		ses->need_reconnect = false;
-		spin_unlock(&GlobalMid_Lock);
+		sess_data->result = 0;
+		sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
+		return;
 	}
-
-keygen_exit:
-	if (!server->sign) {
-		kfree(ses->auth_key.response);
-		ses->auth_key.response = NULL;
-	}
-	if (spnego_key) {
-		key_invalidate(spnego_key);
-		key_put(spnego_key);
-	}
+out_err:
 	kfree(ses->ntlmssp);
+	ses->ntlmssp = NULL;
+	sess_data->result = rc;
+	sess_data->func = NULL;
+}
 
+static void
+SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
+{
+	int rc;
+	struct cifs_ses *ses = sess_data->ses;
+	struct smb2_sess_setup_req *req;
+	struct smb2_sess_setup_rsp *rsp = NULL;
+	unsigned char *ntlmssp_blob = NULL;
+	bool use_spnego = false; /* else use raw ntlmssp */
+	u16 blob_length = 0;
+
+	rc = SMB2_sess_alloc_buffer(sess_data);
+	if (rc)
+		goto out;
+
+	req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
+	req->hdr.SessionId = ses->Suid;
+
+	rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
+					sess_data->nls_cp);
+	if (rc) {
+		cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
+		goto out;
+	}
+
+	if (use_spnego) {
+		/* BB eventually need to add this */
+		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+	sess_data->iov[1].iov_base = ntlmssp_blob;
+	sess_data->iov[1].iov_len = blob_length;
+
+	rc = SMB2_sess_sendreceive(sess_data);
+	if (rc)
+		goto out;
+
+	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+
+	ses->Suid = rsp->hdr.SessionId;
+	ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+		cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
+
+	rc = SMB2_sess_establish_session(sess_data);
+out:
+	kfree(ntlmssp_blob);
+	SMB2_sess_free_buffer(sess_data);
+	kfree(ses->ntlmssp);
+	ses->ntlmssp = NULL;
+	sess_data->result = rc;
+	sess_data->func = NULL;
+}
+
+static int
+SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
+{
+	if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
+		ses->sectype = RawNTLMSSP;
+
+	switch (ses->sectype) {
+	case Kerberos:
+		sess_data->func = SMB2_auth_kerberos;
+		break;
+	case RawNTLMSSP:
+		sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
+		break;
+	default:
+		cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype);
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+int
+SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+		const struct nls_table *nls_cp)
+{
+	int rc = 0;
+	struct TCP_Server_Info *server = ses->server;
+	struct SMB2_sess_data *sess_data;
+
+	cifs_dbg(FYI, "Session Setup\n");
+
+	if (!server) {
+		WARN(1, "%s: server is NULL!\n", __func__);
+		return -EIO;
+	}
+
+	sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
+	if (!sess_data)
+		return -ENOMEM;
+
+	rc = SMB2_select_sec(ses, sess_data);
+	if (rc)
+		goto out;
+	sess_data->xid = xid;
+	sess_data->ses = ses;
+	sess_data->buf0_type = CIFS_NO_BUFFER;
+	sess_data->nls_cp = (struct nls_table *) nls_cp;
+
+	while (sess_data->func)
+		sess_data->func(sess_data);
+
+	rc = sess_data->result;
+out:
+	kfree(sess_data);
 	return rc;
 }
 
@@ -1164,7 +1335,7 @@
 
 	buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
 	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
-	get_random_bytes(buf->dcontext.CreateGuid, 16);
+	generate_random_uuid(buf->dcontext.CreateGuid);
 	memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
 
 	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
@@ -2057,6 +2228,7 @@
 	if (rdata->credits) {
 		buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
 						SMB2_MAX_BUFFER_SIZE));
+		buf->CreditRequest = buf->CreditCharge;
 		spin_lock(&server->req_lock);
 		server->credits += rdata->credits -
 						le16_to_cpu(buf->CreditCharge);
@@ -2243,6 +2415,7 @@
 	if (wdata->credits) {
 		req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
 						    SMB2_MAX_BUFFER_SIZE));
+		req->hdr.CreditRequest = req->hdr.CreditCharge;
 		spin_lock(&server->req_lock);
 		server->credits += wdata->credits -
 					le16_to_cpu(req->hdr.CreditCharge);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ff88d9f..fd3709e 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -276,7 +276,7 @@
 	__le32 Channel;
 	__le16 SecurityBufferOffset;
 	__le16 SecurityBufferLength;
-	__le64 PreviousSessionId;
+	__u64 PreviousSessionId;
 	__u8   Buffer[1];	/* variable length GSS security buffer */
 } __packed;
 
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 5e23f64..20af518 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -33,7 +33,8 @@
 
 #define MAX_EA_VALUE_SIZE 65535
 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
-
+#define CIFS_XATTR_ATTRIB "cifs.dosattrib"  /* full name: user.cifs.dosattrib */
+#define CIFS_XATTR_CREATETIME "cifs.creationtime"  /* user.cifs.creationtime */
 /* BB need to add server (Samba e.g) support for security and trusted prefix */
 
 enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
@@ -144,6 +145,54 @@
 	return rc;
 }
 
+static int cifs_attrib_get(struct dentry *dentry,
+			   struct inode *inode, void *value,
+			   size_t size)
+{
+	ssize_t rc;
+	__u32 *pattribute;
+
+	rc = cifs_revalidate_dentry_attr(dentry);
+
+	if (rc)
+		return rc;
+
+	if ((value == NULL) || (size == 0))
+		return sizeof(__u32);
+	else if (size < sizeof(__u32))
+		return -ERANGE;
+
+	/* return dos attributes as pseudo xattr */
+	pattribute = (__u32 *)value;
+	*pattribute = CIFS_I(inode)->cifsAttrs;
+
+	return sizeof(__u32);
+}
+
+static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
+				  void *value, size_t size)
+{
+	ssize_t rc;
+	__u64 * pcreatetime;
+
+	rc = cifs_revalidate_dentry_attr(dentry);
+	if (rc)
+		return rc;
+
+	if ((value == NULL) || (size == 0))
+		return sizeof(__u64);
+	else if (size < sizeof(__u64))
+		return -ERANGE;
+
+	/* return dos attributes as pseudo xattr */
+	pcreatetime = (__u64 *)value;
+	*pcreatetime = CIFS_I(inode)->createtime;
+	return sizeof(__u64);
+
+	return rc;
+}
+
+
 static int cifs_xattr_get(const struct xattr_handler *handler,
 			  struct dentry *dentry, struct inode *inode,
 			  const char *name, void *value, size_t size)
@@ -168,10 +217,19 @@
 		rc = -ENOMEM;
 		goto out;
 	}
-	/* return dos attributes as pseudo xattr */
+
 	/* return alt name if available as pseudo attr */
 	switch (handler->flags) {
 	case XATTR_USER:
+		cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
+		if (strcmp(name, CIFS_XATTR_ATTRIB) == 0) {
+			rc = cifs_attrib_get(dentry, inode, value, size);
+			break;
+		} else if (strcmp(name, CIFS_XATTR_CREATETIME) == 0) {
+			rc = cifs_creation_time_get(dentry, inode, value, size);
+			break;
+		}
+
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 			goto out;
 
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 6fb8672..c0474ac 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -109,7 +109,7 @@
 	/* optimistically we can also act as if our nose bleeds. The
 	 * granularity of the mtime is coarse anyways so we might actually be
 	 * right most of the time. Note: we only do this for directories. */
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 #endif
 }
 
@@ -291,7 +291,8 @@
 
 /* rename */
 static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
-		       struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
 	const char *old_name = old_dentry->d_name.name;
 	const char *new_name = new_dentry->d_name.name;
@@ -299,6 +300,9 @@
 	int new_length = new_dentry->d_name.len;
 	int error;
 
+	if (flags)
+		return -EINVAL;
+
 	error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
 			     coda_i2f(new_dir), old_length, new_length,
 			     (const char *) old_name, (const char *)new_name);
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 8415d4f..6e0154e 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -54,7 +54,7 @@
 	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos);
 	coda_inode->i_size = file_inode(host_file)->i_size;
 	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
-	coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
+	coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
 	inode_unlock(coda_inode);
 	file_end_write(host_file);
 	return ret;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 57e81cb..71dbe7e 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -271,7 +271,7 @@
 
 	memset(&vattr, 0, sizeof(vattr)); 
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	coda_iattr_to_vattr(iattr, &vattr);
 	vattr.va_type = C_VNON; /* cannot set type */
 
diff --git a/fs/compat.c b/fs/compat.c
index be6e48b..bd064a2 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -54,20 +54,6 @@
 #include <asm/ioctls.h>
 #include "internal.h"
 
-int compat_log = 1;
-
-int compat_printk(const char *fmt, ...)
-{
-	va_list ap;
-	int ret;
-	if (!compat_log)
-		return 0;
-	va_start(ap, fmt);
-	ret = vprintk(fmt, ap);
-	va_end(ap);
-	return ret;
-}
-
 /*
  * Not all architectures have sys_utime, so implement this in terms
  * of sys_utimes.
@@ -562,7 +548,7 @@
 		goto out;
 
 	ret = -EINVAL;
-	if (nr_segs > UIO_MAXIOV || nr_segs < 0)
+	if (nr_segs > UIO_MAXIOV)
 		goto out;
 	if (nr_segs > fast_segs) {
 		ret = -ENOMEM;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c1e9f29..f2d7402 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1209,6 +1209,8 @@
 COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
 COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
 COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
+COMPATIBLE_IOCTL(WDIOC_SETPRETIMEOUT)
+COMPATIBLE_IOCTL(WDIOC_GETPRETIMEOUT)
 /* Big R */
 COMPATIBLE_IOCTL(RNDGETENTCNT)
 COMPATIBLE_IOCTL(RNDADDTOENTCNT)
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 0387968..ad718e5 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,7 +76,7 @@
 		sd_iattr->ia_uid = GLOBAL_ROOT_UID;
 		sd_iattr->ia_gid = GLOBAL_ROOT_GID;
 		sd_iattr->ia_atime = sd_iattr->ia_mtime =
-			sd_iattr->ia_ctime = current_fs_time(inode->i_sb);
+			sd_iattr->ia_ctime = current_time(inode);
 		sd->s_iattr = sd_iattr;
 	}
 	/* attributes were changed atleast once in past */
@@ -113,7 +113,7 @@
 {
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime =
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 }
 
 static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
@@ -197,7 +197,7 @@
 		return -ENOMEM;
 
 	p_inode = d_inode(dentry->d_parent);
-	p_inode->i_mtime = p_inode->i_ctime = current_fs_time(p_inode->i_sb);
+	p_inode->i_mtime = p_inode->i_ctime = current_time(p_inode);
 	configfs_set_inode_lock_class(sd, inode);
 
 	init(inode);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 72361ba..f17fcf8 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -45,7 +45,7 @@
 	if (inode) {
 		inode->i_ino = get_next_ino();
 		inode->i_atime = inode->i_mtime =
-			inode->i_ctime = current_fs_time(sb);
+			inode->i_ctime = current_time(inode);
 	}
 	return inode;
 }
@@ -748,7 +748,7 @@
 	old_name = fsnotify_oldname_init(old_dentry->d_name.name);
 
 	error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
-		dentry);
+			      dentry, 0);
 	if (error) {
 		fsnotify_oldname_free(old_name);
 		goto exit;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 442d1a7..108df2e 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -300,7 +300,7 @@
 	}
 
 	inode->i_ino = 2;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 
 	mode = S_IFCHR|opts->ptmxmode;
 	init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
@@ -412,7 +412,7 @@
 	if (!inode)
 		goto fail;
 	inode->i_ino = 1;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
 	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
@@ -540,7 +540,7 @@
 	inode->i_ino = index + 3;
 	inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
 	inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
 
 	sprintf(s, "%d", index);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 963016c..609998d 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1656,16 +1656,12 @@
 	mutex_lock(&connections_lock);
 	dlm_allow_conn = 0;
 	foreach_conn(stop_conn);
+	clean_writequeues();
+	foreach_conn(free_conn);
 	mutex_unlock(&connections_lock);
 
 	work_stop();
 
-	mutex_lock(&connections_lock);
-	clean_writequeues();
-
-	foreach_conn(free_conn);
-
-	mutex_unlock(&connections_lock);
 	kmem_cache_destroy(con_cache);
 }
 
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 4ba1547..599a292 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -715,4 +715,6 @@
 int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
 		       loff_t offset);
 
+extern const struct xattr_handler *ecryptfs_xattr_handlers[];
+
 #endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 9d153b6..cf390dc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -577,7 +577,8 @@
 
 static int
 ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+		struct inode *new_dir, struct dentry *new_dentry,
+		unsigned int flags)
 {
 	int rc;
 	struct dentry *lower_old_dentry;
@@ -587,6 +588,9 @@
 	struct dentry *trap = NULL;
 	struct inode *target_inode;
 
+	if (flags)
+		return -EINVAL;
+
 	lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
 	lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
 	dget(lower_old_dentry);
@@ -927,7 +931,7 @@
 	}
 	mutex_unlock(&crypt_stat->cs_mutex);
 
-	rc = inode_change_ok(inode, ia);
+	rc = setattr_prepare(dentry, ia);
 	if (rc)
 		goto out;
 	if (ia->ia_valid & ATTR_SIZE) {
@@ -1005,15 +1009,14 @@
 		  const char *name, const void *value,
 		  size_t size, int flags)
 {
-	int rc = 0;
+	int rc;
 	struct dentry *lower_dentry;
 
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	if (!d_inode(lower_dentry)->i_op->setxattr) {
+	if (!(d_inode(lower_dentry)->i_opflags & IOP_XATTR)) {
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
-
 	rc = vfs_setxattr(lower_dentry, name, value, size, flags);
 	if (!rc && inode)
 		fsstack_copy_attr_all(inode, d_inode(lower_dentry));
@@ -1025,15 +1028,14 @@
 ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
 			const char *name, void *value, size_t size)
 {
-	int rc = 0;
+	int rc;
 
-	if (!lower_inode->i_op->getxattr) {
+	if (!(lower_inode->i_opflags & IOP_XATTR)) {
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
 	inode_lock(lower_inode);
-	rc = lower_inode->i_op->getxattr(lower_dentry, lower_inode,
-					 name, value, size);
+	rc = __vfs_getxattr(lower_dentry, lower_inode, name, value, size);
 	inode_unlock(lower_inode);
 out:
 	return rc;
@@ -1066,19 +1068,22 @@
 	return rc;
 }
 
-static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
+static int ecryptfs_removexattr(struct dentry *dentry, struct inode *inode,
+				const char *name)
 {
-	int rc = 0;
+	int rc;
 	struct dentry *lower_dentry;
+	struct inode *lower_inode;
 
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	if (!d_inode(lower_dentry)->i_op->removexattr) {
+	lower_inode = ecryptfs_inode_to_lower(inode);
+	if (!(lower_inode->i_opflags & IOP_XATTR)) {
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
-	inode_lock(d_inode(lower_dentry));
-	rc = d_inode(lower_dentry)->i_op->removexattr(lower_dentry, name);
-	inode_unlock(d_inode(lower_dentry));
+	inode_lock(lower_inode);
+	rc = __vfs_removexattr(lower_dentry, name);
+	inode_unlock(lower_inode);
 out:
 	return rc;
 }
@@ -1089,10 +1094,7 @@
 	.permission = ecryptfs_permission,
 	.setattr = ecryptfs_setattr,
 	.getattr = ecryptfs_getattr_link,
-	.setxattr = ecryptfs_setxattr,
-	.getxattr = ecryptfs_getxattr,
 	.listxattr = ecryptfs_listxattr,
-	.removexattr = ecryptfs_removexattr
 };
 
 const struct inode_operations ecryptfs_dir_iops = {
@@ -1107,18 +1109,43 @@
 	.rename = ecryptfs_rename,
 	.permission = ecryptfs_permission,
 	.setattr = ecryptfs_setattr,
-	.setxattr = ecryptfs_setxattr,
-	.getxattr = ecryptfs_getxattr,
 	.listxattr = ecryptfs_listxattr,
-	.removexattr = ecryptfs_removexattr
 };
 
 const struct inode_operations ecryptfs_main_iops = {
 	.permission = ecryptfs_permission,
 	.setattr = ecryptfs_setattr,
 	.getattr = ecryptfs_getattr,
-	.setxattr = ecryptfs_setxattr,
-	.getxattr = ecryptfs_getxattr,
 	.listxattr = ecryptfs_listxattr,
-	.removexattr = ecryptfs_removexattr
+};
+
+static int ecryptfs_xattr_get(const struct xattr_handler *handler,
+			      struct dentry *dentry, struct inode *inode,
+			      const char *name, void *buffer, size_t size)
+{
+	return ecryptfs_getxattr(dentry, inode, name, buffer, size);
+}
+
+static int ecryptfs_xattr_set(const struct xattr_handler *handler,
+			      struct dentry *dentry, struct inode *inode,
+			      const char *name, const void *value, size_t size,
+			      int flags)
+{
+	if (value)
+		return ecryptfs_setxattr(dentry, inode, name, value, size, flags);
+	else {
+		BUG_ON(flags != XATTR_REPLACE);
+		return ecryptfs_removexattr(dentry, inode, name);
+	}
+}
+
+const struct xattr_handler ecryptfs_xattr_handler = {
+	.prefix = "",  /* match anything */
+	.get = ecryptfs_xattr_get,
+	.set = ecryptfs_xattr_set,
+};
+
+const struct xattr_handler *ecryptfs_xattr_handlers[] = {
+	&ecryptfs_xattr_handler,
+	NULL
 };
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 6120044..151872d 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -529,6 +529,7 @@
 	/* ->kill_sb() will take care of sbi after that point */
 	sbi = NULL;
 	s->s_op = &ecryptfs_sops;
+	s->s_xattr = ecryptfs_xattr_handlers;
 	s->s_d_op = &ecryptfs_dops;
 
 	err = "Reading sb failed";
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 9c3437c..1f0c471 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -32,6 +32,7 @@
 #include <linux/file.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/xattr.h>
 #include <asm/unaligned.h>
 #include "ecryptfs_kernel.h"
 
@@ -422,7 +423,7 @@
 	struct inode *lower_inode = d_inode(lower_dentry);
 	int rc;
 
-	if (!lower_inode->i_op->getxattr || !lower_inode->i_op->setxattr) {
+	if (!(lower_inode->i_opflags & IOP_XATTR)) {
 		printk(KERN_WARNING
 		       "No support for setting xattr in lower filesystem\n");
 		rc = -ENOSYS;
@@ -436,15 +437,13 @@
 		goto out;
 	}
 	inode_lock(lower_inode);
-	size = lower_inode->i_op->getxattr(lower_dentry, lower_inode,
-					   ECRYPTFS_XATTR_NAME,
-					   xattr_virt, PAGE_SIZE);
+	size = __vfs_getxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
+			      xattr_virt, PAGE_SIZE);
 	if (size < 0)
 		size = 8;
 	put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
-	rc = lower_inode->i_op->setxattr(lower_dentry, lower_inode,
-					 ECRYPTFS_XATTR_NAME,
-					 xattr_virt, size, 0);
+	rc = __vfs_setxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
+			    xattr_virt, size, 0);
 	inode_unlock(lower_inode);
 	if (rc)
 		printk(KERN_ERR "Error whilst attempting to write inode size "
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index cbb50ca..71fcccc 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -24,7 +24,7 @@
 	if (inode) {
 		inode->i_ino = get_next_ino();
 		inode->i_mode = mode;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
 		switch (mode & S_IFMT) {
 		case S_IFREG:
diff --git a/fs/exec.c b/fs/exec.c
index 6fcfb3f..4e497b9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@
 {
 	struct page *page;
 	int ret;
+	unsigned int gup_flags = FOLL_FORCE;
 
 #ifdef CONFIG_STACK_GROWSUP
 	if (write) {
@@ -199,12 +200,16 @@
 			return NULL;
 	}
 #endif
+
+	if (write)
+		gup_flags |= FOLL_WRITE;
+
 	/*
 	 * We are doing an exec().  'current' is the process
 	 * doing the exec and bprm->mm is the new process's mm.
 	 */
-	ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
-			1, &page, NULL);
+	ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
+			&page, NULL);
 	if (ret <= 0)
 		return NULL;
 
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index f69a1b5..79101651 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -416,7 +416,7 @@
 	if (likely(!err))
 		err = exofs_commit_chunk(page, pos, len);
 	exofs_put_page(page);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 	return err;
 }
@@ -503,7 +503,7 @@
 	de->inode_no = cpu_to_le64(inode->i_ino);
 	exofs_set_de_type(de, inode);
 	err = exofs_commit_chunk(page, pos, rec_len);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 	sbi->s_numfiles++;
 
@@ -554,7 +554,7 @@
 	dir->inode_no = 0;
 	if (likely(!err))
 		err = exofs_commit_chunk(page, pos, to - from);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	mark_inode_dirty(inode);
 	sbi->s_numfiles--;
 out:
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9dc4c6d..d8072bc 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -778,7 +778,7 @@
 fail:
 	EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
 		     inode->i_ino, page->index, ret);
-	set_bit(AS_EIO, &page->mapping->flags);
+	mapping_set_error(page->mapping, -EIO);
 	unlock_page(page);
 	return ret;
 }
@@ -1007,7 +1007,7 @@
 	struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
 	int ret;
 
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 
 	ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
 	if (likely(!ret))
@@ -1034,7 +1034,7 @@
 	if (unlikely(error))
 		return error;
 
-	error = inode_change_ok(inode, iattr);
+	error = setattr_prepare(dentry, iattr);
 	if (unlikely(error))
 		return error;
 
@@ -1313,7 +1313,7 @@
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = sbi->s_nextid++;
 	inode->i_blkbits = EXOFS_BLKSHIFT;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	oi->i_commit_size = inode->i_size = 0;
 	spin_lock(&sbi->s_next_gen_lock);
 	inode->i_generation = sbi->s_next_generation++;
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 622a686..7295cd7 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -142,7 +142,7 @@
 {
 	struct inode *inode = d_inode(old_dentry);
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 
@@ -227,7 +227,8 @@
 }
 
 static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+			struct inode *new_dir, struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
@@ -237,6 +238,9 @@
 	struct exofs_dir_entry *old_de;
 	int err = -ENOENT;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_de = exofs_find_entry(old_dir, old_dentry, &old_page);
 	if (!old_de)
 		goto out;
@@ -261,7 +265,7 @@
 		if (!new_de)
 			goto out_dir;
 		err = exofs_set_link(new_dir, new_de, new_page, old_inode);
-		new_inode->i_ctime = CURRENT_TIME;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		inode_dec_link_count(new_inode);
@@ -275,7 +279,7 @@
 			inode_inc_link_count(new_dir);
 	}
 
-	old_inode->i_ctime = CURRENT_TIME;
+	old_inode->i_ctime = current_time(old_inode);
 
 	exofs_delete_entry(old_de, old_page);
 	mark_inode_dirty(old_inode);
@@ -310,7 +314,7 @@
 	.mkdir  	= exofs_mkdir,
 	.rmdir  	= exofs_rmdir,
 	.mknod  	= exofs_mknod,
-	.rename 	= exofs_rename,
+	.rename		= exofs_rename,
 	.setattr	= exofs_setattr,
 };
 
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 207ba8d..a4b531b 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -428,10 +428,10 @@
 	if (!nop || !nop->fh_to_dentry)
 		return ERR_PTR(-ESTALE);
 	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
-	if (!result)
-		result = ERR_PTR(-ESTALE);
-	if (IS_ERR(result))
-		return result;
+	if (PTR_ERR(result) == -ENOMEM)
+		return ERR_CAST(result);
+	if (IS_ERR_OR_NULL(result))
+		return ERR_PTR(-ESTALE);
 
 	if (d_is_dir(result)) {
 		/*
@@ -541,6 +541,8 @@
 
  err_result:
 	dput(result);
+	if (err != -ENOMEM)
+		err = -ESTALE;
 	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(exportfs_decode_fh);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 42f1d18..79dafa7 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -190,15 +190,11 @@
 		case ACL_TYPE_ACCESS:
 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
 			if (acl) {
-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
-				if (error < 0)
+				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+				if (error)
 					return error;
-				else {
-					inode->i_ctime = CURRENT_TIME_SEC;
-					mark_inode_dirty(inode);
-					if (error == 0)
-						acl = NULL;
-				}
+				inode->i_ctime = current_time(inode);
+				mark_inode_dirty(inode);
 			}
 			break;
 
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 61ad490..d9650c9 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -471,7 +471,7 @@
 	err = ext2_commit_chunk(page, pos, len);
 	ext2_put_page(page);
 	if (update_times)
-		dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+		dir->i_mtime = dir->i_ctime = current_time(dir);
 	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
 	mark_inode_dirty(dir);
 }
@@ -561,7 +561,7 @@
 	de->inode = cpu_to_le32(inode->i_ino);
 	ext2_set_de_type (de, inode);
 	err = ext2_commit_chunk(page, pos, rec_len);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
 	mark_inode_dirty(dir);
 	/* OFFSET_CACHE */
@@ -610,7 +610,7 @@
 		pde->rec_len = ext2_rec_len_to_disk(to - from);
 	dir->inode = 0;
 	err = ext2_commit_chunk(page, pos, to - from);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
 	mark_inode_dirty(inode);
 out:
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 0ca363d..a0e1478 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -241,10 +241,7 @@
 
 const struct inode_operations ext2_file_inode_operations = {
 #ifdef CONFIG_EXT2_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext2_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 	.setattr	= ext2_setattr,
 	.get_acl	= ext2_get_acl,
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 04e73a9..395fc074c 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -556,7 +556,7 @@
 
 	inode->i_ino = ino;
 	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	memset(ei->i_data, 0, sizeof(ei->i_data));
 	ei->i_flags =
 		ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 1e72d42..41b8b44 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -595,7 +595,7 @@
 	if (where->bh)
 		mark_buffer_dirty_inode(where->bh, inode);
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 }
 
@@ -622,7 +622,7 @@
 			   u32 *bno, bool *new, bool *boundary,
 			   int create)
 {
-	int err = -EIO;
+	int err;
 	int offsets[4];
 	Indirect chain[4];
 	Indirect *partial;
@@ -639,7 +639,7 @@
 	depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 
 	if (depth == 0)
-		return (err);
+		return -EIO;
 
 	partial = ext2_get_branch(inode, depth, offsets, chain, &err);
 	/* Simplest case - block found, no allocation needed */
@@ -761,7 +761,6 @@
 	ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
 	mutex_unlock(&ei->truncate_mutex);
 got_it:
-	*bno = le32_to_cpu(chain[depth-1].key);
 	if (count > blocks_to_boundary)
 		*boundary = true;
 	err = count;
@@ -772,6 +771,8 @@
 		brelse(partial->bh);
 		partial--;
 	}
+	if (err > 0)
+		*bno = le32_to_cpu(chain[depth-1].key);
 	return err;
 }
 
@@ -1308,7 +1309,7 @@
 	__ext2_truncate_blocks(inode, newsize);
 	dax_sem_up_write(EXT2_I(inode));
 
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	if (inode_needs_sync(inode)) {
 		sync_mapping_buffers(inode->i_mapping);
 		sync_inode_metadata(inode, 1);
@@ -1652,7 +1653,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, iattr);
+	error = setattr_prepare(dentry, iattr);
 	if (error)
 		return error;
 
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index b386af2..9d61742 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -79,7 +79,7 @@
 		ei->i_flags = flags;
 
 		ext2_set_inode_flags(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = current_time(inode);
 		inode_unlock(inode);
 
 		mark_inode_dirty(inode);
@@ -103,7 +103,7 @@
 		}
 
 		inode_lock(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = current_time(inode);
 		inode->i_generation = generation;
 		inode_unlock(inode);
 
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index d446203..814e405 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -221,7 +221,7 @@
 	if (err)
 		return err;
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 
@@ -328,7 +328,8 @@
 }
 
 static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
-	struct inode * new_dir,	struct dentry * new_dentry )
+			struct inode * new_dir,	struct dentry * new_dentry,
+			unsigned int flags)
 {
 	struct inode * old_inode = d_inode(old_dentry);
 	struct inode * new_inode = d_inode(new_dentry);
@@ -338,6 +339,9 @@
 	struct ext2_dir_entry_2 * old_de;
 	int err;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	err = dquot_initialize(old_dir);
 	if (err)
 		goto out;
@@ -372,7 +376,7 @@
 		if (!new_de)
 			goto out_dir;
 		ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		inode_dec_link_count(new_inode);
@@ -388,7 +392,7 @@
 	 * Like most other Unix systems, set the ctime for inodes on a
  	 * rename.
 	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
+	old_inode->i_ctime = current_time(old_inode);
 	mark_inode_dirty(old_inode);
 
 	ext2_delete_entry (old_de, old_page);
@@ -428,10 +432,7 @@
 	.mknod		= ext2_mknod,
 	.rename		= ext2_rename,
 #ifdef CONFIG_EXT2_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext2_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 	.setattr	= ext2_setattr,
 	.get_acl	= ext2_get_acl,
@@ -441,10 +442,7 @@
 
 const struct inode_operations ext2_special_inode_operations = {
 #ifdef CONFIG_EXT2_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext2_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 	.setattr	= ext2_setattr,
 	.get_acl	= ext2_get_acl,
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1d93795..6cb042b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1543,7 +1543,7 @@
 	if (inode->i_size < off+len-towrite)
 		i_size_write(inode, off+len-towrite);
 	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 	return len - towrite;
 }
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 3495d8a..8437b19 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -25,10 +25,7 @@
 	.get_link	= page_get_link,
 	.setattr	= ext2_setattr,
 #ifdef CONFIG_EXT2_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext2_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
  
@@ -37,9 +34,6 @@
 	.get_link	= simple_get_link,
 	.setattr	= ext2_setattr,
 #ifdef CONFIG_EXT2_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext2_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index b7f896f..fbdb8f1 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -691,7 +691,7 @@
 
 	/* Update the inode. */
 	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode)) {
 		error = sync_inode_metadata(inode, 1);
 		/* In case sync failed due to ENOSPC the inode was actually
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index c6601a4..dfa5199 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -193,15 +193,11 @@
 	case ACL_TYPE_ACCESS:
 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
 		if (acl) {
-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (error < 0)
+			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (error)
 				return error;
-			else {
-				inode->i_ctime = ext4_current_time(inode);
-				ext4_mark_inode_dirty(handle, inode);
-				if (error == 0)
-					acl = NULL;
-			}
+			inode->i_ctime = ext4_current_time(inode);
+			ext4_mark_inode_dirty(handle, inode);
 		}
 		break;
 
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 36d49cf..2a822d3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -706,10 +706,7 @@
 const struct inode_operations ext4_file_inode_operations = {
 	.setattr	= ext4_setattr,
 	.getattr	= ext4_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
 	.set_acl	= ext4_set_acl,
 	.fiemap		= ext4_fiemap,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cd91882..9c06472 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5078,7 +5078,7 @@
 	int orphan = 0;
 	const unsigned int ia_valid = attr->ia_valid;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index c344b81..f92f10d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3878,12 +3878,9 @@
 	.rmdir		= ext4_rmdir,
 	.mknod		= ext4_mknod,
 	.tmpfile	= ext4_tmpfile,
-	.rename2	= ext4_rename2,
+	.rename		= ext4_rename2,
 	.setattr	= ext4_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
 	.set_acl	= ext4_set_acl,
 	.fiemap         = ext4_fiemap,
@@ -3891,10 +3888,7 @@
 
 const struct inode_operations ext4_special_inode_operations = {
 	.setattr	= ext4_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
 	.set_acl	= ext4_set_acl,
 };
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b4cbee9..0094923 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -88,7 +88,7 @@
 
 		if (bio->bi_error) {
 			SetPageError(page);
-			set_bit(AS_EIO, &page->mapping->flags);
+			mapping_set_error(page->mapping, -EIO);
 		}
 		bh = head = page_buffers(page);
 		/*
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index fdf1c61..557b3b0 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -86,28 +86,19 @@
 	.readlink	= generic_readlink,
 	.get_link	= ext4_encrypted_get_link,
 	.setattr	= ext4_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 const struct inode_operations ext4_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.get_link	= page_get_link,
 	.setattr	= ext4_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 const struct inode_operations ext4_fast_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.get_link	= simple_get_link,
 	.setattr	= ext4_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
-	.removexattr	= generic_removexattr,
 };
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 1e29630..6fe23af 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -212,12 +212,10 @@
 	case ACL_TYPE_ACCESS:
 		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
 		if (acl) {
-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (error < 0)
+			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (error)
 				return error;
 			set_acl_inode(inode, inode->i_mode);
-			if (error == 0)
-				acl = NULL;
 		}
 		break;
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0d0177c..9ae194f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -75,7 +75,7 @@
 		fscrypt_pullback_bio_page(&page, true);
 
 		if (unlikely(bio->bi_error)) {
-			set_bit(AS_EIO, &page->mapping->flags);
+			mapping_set_error(page->mapping, -EIO);
 			f2fs_stop_checkpoint(sbi, true);
 		}
 		end_page_writeback(page);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 12b5836..369f451 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -312,7 +312,7 @@
 	f2fs_dentry_kunmap(dir, page);
 	set_page_dirty(page);
 
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	f2fs_mark_inode_dirty_sync(dir);
 	f2fs_put_page(page, 1);
 }
@@ -465,7 +465,7 @@
 			f2fs_i_links_write(dir, true);
 		clear_inode_flag(inode, FI_NEW_INODE);
 	}
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	f2fs_mark_inode_dirty_sync(dir);
 
 	if (F2FS_I(dir)->i_current_depth != current_depth)
@@ -683,7 +683,7 @@
 
 	if (S_ISDIR(inode->i_mode))
 		f2fs_i_links_write(dir, false);
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 
 	f2fs_i_links_write(inode, false);
 	if (S_ISDIR(inode->i_mode)) {
@@ -730,7 +730,7 @@
 	kunmap(page); /* kunmap - pair of f2fs_find_entry */
 	set_page_dirty(page);
 
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	f2fs_mark_inode_dirty_sync(dir);
 
 	if (inode)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f8b4fe0..c786507 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -631,7 +631,7 @@
 	if (err)
 		return err;
 
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	f2fs_mark_inode_dirty_sync(inode);
 	return 0;
 }
@@ -680,7 +680,7 @@
 	struct inode *inode = d_inode(dentry);
 	int err;
 
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -708,7 +708,7 @@
 				if (err)
 					return err;
 			}
-			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+			inode->i_mtime = inode->i_ctime = current_time(inode);
 		}
 	}
 
@@ -732,10 +732,7 @@
 	.get_acl	= f2fs_get_acl,
 	.set_acl	= f2fs_set_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= f2fs_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 	.fiemap		= f2fs_fiemap,
 };
@@ -1395,7 +1392,7 @@
 	}
 
 	if (!ret) {
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_mtime = inode->i_ctime = current_time(inode);
 		f2fs_mark_inode_dirty_sync(inode);
 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	}
@@ -1487,7 +1484,7 @@
 	fi->i_flags = flags;
 	inode_unlock(inode);
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	f2fs_set_inode_flags(inode);
 out:
 	mnt_drop_write_file(filp);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 93985c6..6f14ee9 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -852,16 +852,16 @@
 
 	for (segno = start_segno; segno < end_segno; segno++) {
 
-		if (get_valid_blocks(sbi, segno, 1) == 0 ||
-					unlikely(f2fs_cp_error(sbi)))
-			goto next;
-
 		/* find segment summary of victim */
 		sum_page = find_get_page(META_MAPPING(sbi),
 					GET_SUM_BLOCK(sbi, segno));
-		f2fs_bug_on(sbi, !PageUptodate(sum_page));
 		f2fs_put_page(sum_page, 0);
 
+		if (get_valid_blocks(sbi, segno, 1) == 0 ||
+				!PageUptodate(sum_page) ||
+				unlikely(f2fs_cp_error(sbi)))
+			goto next;
+
 		sum = page_address(sum_page);
 		f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 34234d8..5f1a67f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -572,7 +572,7 @@
 	set_page_dirty(page);
 	f2fs_put_page(page, 1);
 
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	f2fs_mark_inode_dirty_sync(dir);
 
 	if (inode)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 5625b87..489fa0d 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -46,7 +46,7 @@
 
 	inode->i_ino = ino;
 	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_generation = sbi->s_next_generation++;
 
 	err = insert_inode_locked(inode);
@@ -182,7 +182,7 @@
 
 	f2fs_balance_fs(sbi, true);
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	ihold(inode);
 
 	set_inode_flag(inode, FI_INC_LINK);
@@ -723,7 +723,7 @@
 
 		f2fs_set_link(new_dir, new_entry, new_page, old_inode);
 
-		new_inode->i_ctime = CURRENT_TIME;
+		new_inode->i_ctime = current_time(new_inode);
 		down_write(&F2FS_I(new_inode)->i_sem);
 		if (old_dir_entry)
 			f2fs_i_links_write(new_inode, false);
@@ -777,7 +777,7 @@
 		file_set_enc_name(old_inode);
 	up_write(&F2FS_I(old_inode)->i_sem);
 
-	old_inode->i_ctime = CURRENT_TIME;
+	old_inode->i_ctime = current_time(old_inode);
 	f2fs_mark_inode_dirty_sync(old_inode);
 
 	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
@@ -932,7 +932,7 @@
 	file_lost_pino(old_inode);
 	up_write(&F2FS_I(old_inode)->i_sem);
 
-	old_dir->i_ctime = CURRENT_TIME;
+	old_dir->i_ctime = current_time(old_dir);
 	if (old_nlink) {
 		down_write(&F2FS_I(old_dir)->i_sem);
 		f2fs_i_links_write(old_dir, old_nlink > 0);
@@ -947,7 +947,7 @@
 	file_lost_pino(new_inode);
 	up_write(&F2FS_I(new_inode)->i_sem);
 
-	new_dir->i_ctime = CURRENT_TIME;
+	new_dir->i_ctime = current_time(new_dir);
 	if (new_nlink) {
 		down_write(&F2FS_I(new_dir)->i_sem);
 		f2fs_i_links_write(new_dir, new_nlink > 0);
@@ -1080,10 +1080,7 @@
 	.getattr	= f2fs_getattr,
 	.setattr	= f2fs_setattr,
 #ifdef CONFIG_F2FS_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= f2fs_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
 
@@ -1096,17 +1093,14 @@
 	.mkdir		= f2fs_mkdir,
 	.rmdir		= f2fs_rmdir,
 	.mknod		= f2fs_mknod,
-	.rename2	= f2fs_rename2,
+	.rename		= f2fs_rename2,
 	.tmpfile	= f2fs_tmpfile,
 	.getattr	= f2fs_getattr,
 	.setattr	= f2fs_setattr,
 	.get_acl	= f2fs_get_acl,
 	.set_acl	= f2fs_set_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= f2fs_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
 
@@ -1116,10 +1110,7 @@
 	.getattr	= f2fs_getattr,
 	.setattr	= f2fs_setattr,
 #ifdef CONFIG_F2FS_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= f2fs_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
 
@@ -1129,9 +1120,6 @@
 	.get_acl	= f2fs_get_acl,
 	.set_acl	= f2fs_set_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
-	.setxattr       = generic_setxattr,
-	.getxattr       = generic_getxattr,
 	.listxattr	= f2fs_listxattr,
-	.removexattr    = generic_removexattr,
 #endif
 };
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8831035..01177ec 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1525,7 +1525,7 @@
 {
 	pgoff_t index = 0, end = ULONG_MAX;
 	struct pagevec pvec;
-	int ret2 = 0, ret = 0;
+	int ret2, ret = 0;
 
 	pagevec_init(&pvec, 0);
 
@@ -1554,10 +1554,7 @@
 		cond_resched();
 	}
 
-	if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
-		ret2 = -ENOSPC;
-	if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
-		ret2 = -EIO;
+	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
 	if (!ret)
 		ret = ret2;
 	return ret;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1f74876..3e1c028 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -548,7 +548,7 @@
 
 	if (is_inode_flag_set(inode, FI_ACL_MODE)) {
 		inode->i_mode = F2FS_I(inode)->i_acl_mode;
-		inode->i_ctime = CURRENT_TIME;
+		inode->i_ctime = current_time(inode);
 		clear_inode_flag(inode, FI_ACL_MODE);
 	}
 	if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 663e428..81cecbe 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1071,7 +1071,7 @@
 		}
 	}
 
-	dir->i_mtime = dir->i_atime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_atime = current_time(dir);
 	if (IS_DIRSYNC(dir))
 		(void)fat_sync_inode(dir);
 	else
diff --git a/fs/fat/file.c b/fs/fat/file.c
index f701856..3d04b12 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -63,7 +63,7 @@
 
 	/* Equivalent to a chmod() */
 	ia.ia_valid = ATTR_MODE | ATTR_CTIME;
-	ia.ia_ctime = current_fs_time(inode->i_sb);
+	ia.ia_ctime = current_time(inode);
 	if (is_dir)
 		ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
 	else {
@@ -194,7 +194,7 @@
 	if (err)
 		goto out;
 
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	mark_inode_dirty(inode);
 	if (IS_SYNC(inode)) {
 		int err2;
@@ -297,7 +297,7 @@
 		MSDOS_I(inode)->i_logstart = 0;
 	}
 	MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	if (wait) {
 		err = fat_sync_inode(inode);
 		if (err) {
@@ -450,7 +450,7 @@
 			attr->ia_valid &= ~TIMES_SET_FLAGS;
 	}
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	attr->ia_valid = ia_valid;
 	if (error) {
 		if (sbi->options.quiet)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index da04c02..338d2f7 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -237,7 +237,7 @@
 	if (err < len)
 		fat_write_failed(mapping, pos + len);
 	if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_mtime = inode->i_ctime = current_time(inode);
 		MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
 		mark_inode_dirty(inode);
 	}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 664655b..7d6a105 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -283,7 +283,7 @@
 		goto out;
 	}
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(dir);
 	err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo);
 	if (err)
 		goto out;
@@ -330,7 +330,7 @@
 	drop_nlink(dir);
 
 	clear_nlink(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	fat_detach(inode);
 out:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -364,7 +364,7 @@
 		goto out;
 	}
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(dir);
 	cluster = fat_alloc_new_dir(dir, &ts);
 	if (cluster < 0) {
 		err = cluster;
@@ -416,7 +416,7 @@
 	if (err)
 		goto out;
 	clear_nlink(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	fat_detach(inode);
 out:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -481,7 +481,7 @@
 				mark_inode_dirty(old_inode);
 
 			old_dir->i_version++;
-			old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+			old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
 			if (IS_DIRSYNC(old_dir))
 				(void)fat_sync_inode(old_dir);
 			else
@@ -490,7 +490,7 @@
 		}
 	}
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(old_inode);
 	if (new_inode) {
 		if (err)
 			goto out;
@@ -596,12 +596,16 @@
 
 /***** Rename, a wrapper for rename_same_dir & rename_diff_dir */
 static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry)
+			struct inode *new_dir, struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct super_block *sb = old_dir->i_sb;
 	unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME];
 	int err, is_hid;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	mutex_lock(&MSDOS_SB(sb)->s_lock);
 
 	err = msdos_format_name(old_dentry->d_name.name,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 92b7363..6a7152d 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -21,6 +21,17 @@
 #include <linux/namei.h>
 #include "fat.h"
 
+static inline unsigned long vfat_d_version(struct dentry *dentry)
+{
+	return (unsigned long) dentry->d_fsdata;
+}
+
+static inline void vfat_d_version_set(struct dentry *dentry,
+				      unsigned long version)
+{
+	dentry->d_fsdata = (void *) version;
+}
+
 /*
  * If new entry was created in the parent, it could create the 8.3
  * alias (the shortname of logname).  So, the parent may have the
@@ -33,7 +44,7 @@
 {
 	int ret = 1;
 	spin_lock(&dentry->d_lock);
-	if (dentry->d_time != d_inode(dentry->d_parent)->i_version)
+	if (vfat_d_version(dentry) != d_inode(dentry->d_parent)->i_version)
 		ret = 0;
 	spin_unlock(&dentry->d_lock);
 	return ret;
@@ -759,7 +770,7 @@
 out:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
 	if (!inode)
-		dentry->d_time = dir->i_version;
+		vfat_d_version_set(dentry, dir->i_version);
 	return d_splice_alias(inode, dentry);
 error:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -777,7 +788,7 @@
 
 	mutex_lock(&MSDOS_SB(sb)->s_lock);
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(dir);
 	err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
 	if (err)
 		goto out;
@@ -821,9 +832,9 @@
 	drop_nlink(dir);
 
 	clear_nlink(inode);
-	inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = current_time(inode);
 	fat_detach(inode);
-	dentry->d_time = dir->i_version;
+	vfat_d_version_set(dentry, dir->i_version);
 out:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -847,9 +858,9 @@
 	if (err)
 		goto out;
 	clear_nlink(inode);
-	inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = current_time(inode);
 	fat_detach(inode);
-	dentry->d_time = dir->i_version;
+	vfat_d_version_set(dentry, dir->i_version);
 out:
 	mutex_unlock(&MSDOS_SB(sb)->s_lock);
 
@@ -866,7 +877,7 @@
 
 	mutex_lock(&MSDOS_SB(sb)->s_lock);
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(dir);
 	cluster = fat_alloc_new_dir(dir, &ts);
 	if (cluster < 0) {
 		err = cluster;
@@ -903,7 +914,8 @@
 }
 
 static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
-		       struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
 	struct buffer_head *dotdot_bh;
 	struct msdos_dir_entry *dotdot_de;
@@ -914,6 +926,9 @@
 	int err, is_dir, update_dotdot, corrupt = 0;
 	struct super_block *sb = old_dir->i_sb;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
 	old_inode = d_inode(old_dentry);
 	new_inode = d_inode(new_dentry);
@@ -931,7 +946,7 @@
 		}
 	}
 
-	ts = CURRENT_TIME_SEC;
+	ts = current_time(old_dir);
 	if (new_inode) {
 		if (is_dir) {
 			err = fat_dir_empty(new_inode);
diff --git a/fs/file.c b/fs/file.c
index 6b1acdf..69d6990 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -23,12 +23,12 @@
 #include <linux/rcupdate.h>
 #include <linux/workqueue.h>
 
-int sysctl_nr_open __read_mostly = 1024*1024;
-int sysctl_nr_open_min = BITS_PER_LONG;
+unsigned int sysctl_nr_open __read_mostly = 1024*1024;
+unsigned int sysctl_nr_open_min = BITS_PER_LONG;
 /* our min() is unusable in constant expressions ;-/ */
 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
-int sysctl_nr_open_max = __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) &
-			 -BITS_PER_LONG;
+unsigned int sysctl_nr_open_max =
+	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
 
 static void *alloc_fdmem(size_t size)
 {
@@ -163,7 +163,7 @@
  * Return <0 error code on error; 1 on successful completion.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_fdtable(struct files_struct *files, int nr)
+static int expand_fdtable(struct files_struct *files, unsigned int nr)
 	__releases(files->file_lock)
 	__acquires(files->file_lock)
 {
@@ -208,7 +208,7 @@
  * expanded and execution may have blocked.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_files(struct files_struct *files, int nr)
+static int expand_files(struct files_struct *files, unsigned int nr)
 	__releases(files->file_lock)
 	__acquires(files->file_lock)
 {
@@ -243,12 +243,12 @@
 	return expanded;
 }
 
-static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
+static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
 {
 	__set_bit(fd, fdt->close_on_exec);
 }
 
-static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
+static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
 {
 	if (test_bit(fd, fdt->close_on_exec))
 		__clear_bit(fd, fdt->close_on_exec);
@@ -268,10 +268,10 @@
 	__clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
 }
 
-static int count_open_files(struct fdtable *fdt)
+static unsigned int count_open_files(struct fdtable *fdt)
 {
-	int size = fdt->max_fds;
-	int i;
+	unsigned int size = fdt->max_fds;
+	unsigned int i;
 
 	/* Find the last open fd */
 	for (i = size / BITS_PER_LONG; i > 0; ) {
@@ -291,7 +291,7 @@
 {
 	struct files_struct *newf;
 	struct file **old_fds, **new_fds;
-	int open_files, i;
+	unsigned int open_files, i;
 	struct fdtable *old_fdt, *new_fdt;
 
 	*errorp = -ENOMEM;
@@ -391,7 +391,7 @@
 	 * files structure.
 	 */
 	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-	int i, j = 0;
+	unsigned int i, j = 0;
 
 	for (;;) {
 		unsigned long set;
@@ -477,11 +477,11 @@
 	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
 };
 
-static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
+static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
 {
-	unsigned long maxfd = fdt->max_fds;
-	unsigned long maxbit = maxfd / BITS_PER_LONG;
-	unsigned long bitbit = start / BITS_PER_LONG;
+	unsigned int maxfd = fdt->max_fds;
+	unsigned int maxbit = maxfd / BITS_PER_LONG;
+	unsigned int bitbit = start / BITS_PER_LONG;
 
 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
 	if (bitbit > maxfd)
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index f863ac6..6e22748 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -220,7 +220,7 @@
 	inode->i_mode = mode;
 	inode->i_uid = fc->user_id;
 	inode->i_gid = fc->group_id;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	/* setting ->i_op to NULL is not allowed */
 	if (iop)
 		inode->i_op = iop;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index f7c84ab..6a4d0e5 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -647,7 +647,7 @@
 void fuse_update_ctime(struct inode *inode)
 {
 	if (!IS_NOCMTIME(inode)) {
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 		mark_inode_dirty_sync(inode);
 	}
 }
@@ -1604,9 +1604,10 @@
  * vmtruncate() doesn't allow for this case, so do the rlimit checking
  * and the actual truncation by hand.
  */
-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
+int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
 		    struct file *file)
 {
+	struct inode *inode = d_inode(dentry);
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
 	FUSE_ARGS(args);
@@ -1621,7 +1622,7 @@
 	if (!fc->default_permissions)
 		attr->ia_valid |= ATTR_FORCE;
 
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -1758,7 +1759,7 @@
 	if (!attr->ia_valid)
 		return 0;
 
-	ret = fuse_do_setattr(inode, attr, file);
+	ret = fuse_do_setattr(entry, attr, file);
 	if (!ret) {
 		/*
 		 * If filesystem supports acls it may have updated acl xattrs in
@@ -1792,7 +1793,7 @@
 	.symlink	= fuse_symlink,
 	.unlink		= fuse_unlink,
 	.rmdir		= fuse_rmdir,
-	.rename2	= fuse_rename2,
+	.rename		= fuse_rename2,
 	.link		= fuse_link,
 	.setattr	= fuse_setattr,
 	.create		= fuse_create,
@@ -1800,10 +1801,7 @@
 	.mknod		= fuse_mknod,
 	.permission	= fuse_permission,
 	.getattr	= fuse_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= fuse_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= fuse_get_acl,
 	.set_acl	= fuse_set_acl,
 };
@@ -1823,10 +1821,7 @@
 	.setattr	= fuse_setattr,
 	.permission	= fuse_permission,
 	.getattr	= fuse_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= fuse_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= fuse_get_acl,
 	.set_acl	= fuse_set_acl,
 };
@@ -1836,10 +1831,7 @@
 	.get_link	= fuse_get_link,
 	.readlink	= generic_readlink,
 	.getattr	= fuse_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= fuse_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 void fuse_init_common(struct inode *inode)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b7beb67..abc66a6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2810,7 +2810,7 @@
 	attr.ia_file = file;
 	attr.ia_valid |= ATTR_FILE;
 
-	fuse_do_setattr(inode, &attr, file);
+	fuse_do_setattr(file_dentry(file), &attr, file);
 }
 
 static inline loff_t fuse_round_up(loff_t off)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 24ada5d..0dfbb13 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -961,7 +961,7 @@
 int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
 
-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
+int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
 		    struct file *file);
 
 void fuse_set_initialized(struct fuse_conn *fc);
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 363ba9e..2524807 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -92,17 +92,11 @@
 	if (type == ACL_TYPE_ACCESS) {
 		umode_t mode = inode->i_mode;
 
-		error = posix_acl_equiv_mode(acl, &mode);
-		if (error < 0)
+		error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+		if (error)
 			return error;
-
-		if (error == 0)
-			acl = NULL;
-
-		if (mode != inode->i_mode) {
-			inode->i_mode = mode;
+		if (mode != inode->i_mode)
 			mark_inode_dirty(inode);
-		}
 	}
 
 	if (acl) {
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 645721f..fc5da4c 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -836,7 +836,7 @@
 	gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
 			  ip->i_inode.i_gid);
 
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 
 	gfs2_dinode_out(ip, dibh->b_data);
 
@@ -1063,7 +1063,7 @@
 	}
 
 	i_size_write(inode, newsize);
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	gfs2_dinode_out(ip, dibh->b_data);
 
 	if (journaled)
@@ -1142,7 +1142,7 @@
 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 		gfs2_ordered_del_inode(ip);
 	}
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
 
 	gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -1252,7 +1252,7 @@
 		goto do_end_trans;
 
 	i_size_write(inode, size);
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	gfs2_trans_add_meta(ip->i_gl, dibh);
 	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index db8fbeb..3cdde5f 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -135,7 +135,7 @@
 	memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
 	if (ip->i_inode.i_size < offset + size)
 		i_size_write(&ip->i_inode, offset + size);
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	gfs2_dinode_out(ip, dibh->b_data);
 
 	brelse(dibh);
@@ -233,7 +233,7 @@
 
 	if (ip->i_inode.i_size < offset + copied)
 		i_size_write(&ip->i_inode, offset + copied);
-	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 
 	gfs2_trans_add_meta(ip->i_gl, dibh);
 	gfs2_dinode_out(ip, dibh->b_data);
@@ -872,7 +872,7 @@
 	struct gfs2_leaf *leaf;
 	struct gfs2_dirent *dent;
 	struct qstr name = { .name = "" };
-	struct timespec tv = CURRENT_TIME;
+	struct timespec tv = current_time(inode);
 
 	error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 	if (error)
@@ -1816,7 +1816,7 @@
 			gfs2_inum_out(nip, dent);
 			dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
 			dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip));
-			tv = CURRENT_TIME;
+			tv = current_time(&ip->i_inode);
 			if (ip->i_diskflags & GFS2_DIF_EXHASH) {
 				leaf = (struct gfs2_leaf *)bh->b_data;
 				be16_add_cpu(&leaf->lf_entries, 1);
@@ -1878,7 +1878,7 @@
 	const struct qstr *name = &dentry->d_name;
 	struct gfs2_dirent *dent, *prev = NULL;
 	struct buffer_head *bh;
-	struct timespec tv = CURRENT_TIME;
+	struct timespec tv = current_time(&dip->i_inode);
 
 	/* Returns _either_ the entry (if its first in block) or the
 	   previous entry otherwise */
@@ -1960,7 +1960,7 @@
 		gfs2_trans_add_meta(dip->i_gl, bh);
 	}
 
-	dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
+	dip->i_inode.i_mtime = dip->i_inode.i_ctime = current_time(&dip->i_inode);
 	gfs2_dinode_out(dip, bh->b_data);
 	brelse(bh);
 	return 0;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb3a810..fe3f849 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -656,7 +656,7 @@
 	set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
 	inode->i_rdev = dev;
 	inode->i_size = size;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	gfs2_set_inode_blocks(inode, 1);
 	munge_mode_uid_gid(dip, inode);
 	check_and_update_goal(dip);
@@ -983,7 +983,7 @@
 
 	gfs2_trans_add_meta(ip->i_gl, dibh);
 	inc_nlink(&ip->i_inode);
-	ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	ihold(inode);
 	d_instantiate(dentry, inode);
 	mark_inode_dirty(inode);
@@ -1067,7 +1067,7 @@
 		return error;
 
 	ip->i_entries = 0;
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	if (S_ISDIR(inode->i_mode))
 		clear_nlink(inode);
 	else
@@ -1330,7 +1330,7 @@
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (error)
 		return error;
-	ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	gfs2_trans_add_meta(ip->i_gl, dibh);
 	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
@@ -1936,7 +1936,7 @@
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		goto out;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		goto out;
 
@@ -2040,10 +2040,7 @@
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 	.get_acl = gfs2_get_acl,
 	.set_acl = gfs2_set_acl,
@@ -2058,14 +2055,11 @@
 	.mkdir = gfs2_mkdir,
 	.rmdir = gfs2_unlink,
 	.mknod = gfs2_mknod,
-	.rename2 = gfs2_rename2,
+	.rename = gfs2_rename2,
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 	.get_acl = gfs2_get_acl,
 	.set_acl = gfs2_set_acl,
@@ -2078,10 +2072,7 @@
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 };
 
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 8af2dfa..c2ca956 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -854,7 +854,7 @@
 		size = loc + sizeof(struct gfs2_quota);
 		if (size > inode->i_size)
 			i_size_write(inode, size);
-		inode->i_mtime = inode->i_atime = CURRENT_TIME;
+		inode->i_mtime = inode->i_atime = current_time(inode);
 		mark_inode_dirty(inode);
 		set_bit(QDF_REFRESH, &qd->qd_flags);
 	}
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 3a28535..a4a5770 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -309,7 +309,7 @@
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
-		ip->i_inode.i_ctime = CURRENT_TIME;
+		ip->i_inode.i_ctime = current_time(&ip->i_inode);
 		gfs2_trans_add_meta(ip->i_gl, dibh);
 		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
@@ -775,7 +775,7 @@
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
-		ip->i_inode.i_ctime = CURRENT_TIME;
+		ip->i_inode.i_ctime = current_time(&ip->i_inode);
 		gfs2_trans_add_meta(ip->i_gl, dibh);
 		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
@@ -910,7 +910,7 @@
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (error)
 		goto out;
-	ip->i_inode.i_ctime = CURRENT_TIME;
+	ip->i_inode.i_ctime = current_time(&ip->i_inode);
 	gfs2_trans_add_meta(ip->i_gl, dibh);
 	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
@@ -1133,7 +1133,7 @@
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
-		ip->i_inode.i_ctime = CURRENT_TIME;
+		ip->i_inode.i_ctime = current_time(&ip->i_inode);
 		gfs2_trans_add_meta(ip->i_gl, dibh);
 		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index d9a8691..0933600 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -13,9 +13,13 @@
 #include "hfs_fs.h"
 #include "btree.h"
 
-int hfs_setxattr(struct dentry *unused, struct inode *inode,
-		 const char *name, const void *value,
-		 size_t size, int flags)
+enum hfs_xattr_type {
+	HFS_TYPE,
+	HFS_CREATOR,
+};
+
+static int __hfs_setxattr(struct inode *inode, enum hfs_xattr_type type,
+			  const void *value, size_t size, int flags)
 {
 	struct hfs_find_data fd;
 	hfs_cat_rec rec;
@@ -36,18 +40,22 @@
 			sizeof(struct hfs_cat_file));
 	file = &rec.file;
 
-	if (!strcmp(name, "hfs.type")) {
+	switch (type) {
+	case HFS_TYPE:
 		if (size == 4)
 			memcpy(&file->UsrWds.fdType, value, 4);
 		else
 			res = -ERANGE;
-	} else if (!strcmp(name, "hfs.creator")) {
+		break;
+
+	case HFS_CREATOR:
 		if (size == 4)
 			memcpy(&file->UsrWds.fdCreator, value, 4);
 		else
 			res = -ERANGE;
-	} else
-		res = -EOPNOTSUPP;
+		break;
+	}
+
 	if (!res)
 		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
 				sizeof(struct hfs_cat_file));
@@ -56,8 +64,8 @@
 	return res;
 }
 
-ssize_t hfs_getxattr(struct dentry *unused, struct inode *inode,
-		     const char *name, void *value, size_t size)
+static ssize_t __hfs_getxattr(struct inode *inode, enum hfs_xattr_type type,
+			      void *value, size_t size)
 {
 	struct hfs_find_data fd;
 	hfs_cat_rec rec;
@@ -80,41 +88,64 @@
 	}
 	file = &rec.file;
 
-	if (!strcmp(name, "hfs.type")) {
+	switch (type) {
+	case HFS_TYPE:
 		if (size >= 4) {
 			memcpy(value, &file->UsrWds.fdType, 4);
 			res = 4;
 		} else
 			res = size ? -ERANGE : 4;
-	} else if (!strcmp(name, "hfs.creator")) {
+		break;
+
+	case HFS_CREATOR:
 		if (size >= 4) {
 			memcpy(value, &file->UsrWds.fdCreator, 4);
 			res = 4;
 		} else
 			res = size ? -ERANGE : 4;
-	} else
-		res = -ENODATA;
+		break;
+	}
+
 out:
 	if (size)
 		hfs_find_exit(&fd);
 	return res;
 }
 
-#define HFS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
-
-ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+static int hfs_xattr_get(const struct xattr_handler *handler,
+			 struct dentry *unused, struct inode *inode,
+			 const char *name, void *value, size_t size)
 {
-	struct inode *inode = d_inode(dentry);
+	return __hfs_getxattr(inode, handler->flags, value, size);
+}
 
-	if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
+static int hfs_xattr_set(const struct xattr_handler *handler,
+			 struct dentry *unused, struct inode *inode,
+			 const char *name, const void *value, size_t size,
+			 int flags)
+{
+	if (!value)
 		return -EOPNOTSUPP;
 
-	if (!buffer || !size)
-		return HFS_ATTRLIST_SIZE;
-	if (size < HFS_ATTRLIST_SIZE)
-		return -ERANGE;
-	strcpy(buffer, "hfs.type");
-	strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
-
-	return HFS_ATTRLIST_SIZE;
+	return __hfs_setxattr(inode, handler->flags, value, size, flags);
 }
+
+static const struct xattr_handler hfs_creator_handler = {
+	.name = "hfs.creator",
+	.flags = HFS_CREATOR,
+	.get = hfs_xattr_get,
+	.set = hfs_xattr_set,
+};
+
+static const struct xattr_handler hfs_type_handler = {
+	.name = "hfs.type",
+	.flags = HFS_TYPE,
+	.get = hfs_xattr_get,
+	.set = hfs_xattr_set,
+};
+
+const struct xattr_handler *hfs_xattr_handlers[] = {
+	&hfs_creator_handler,
+	&hfs_type_handler,
+	NULL
+};
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 8f4afd3..8a66405 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -125,7 +125,7 @@
 		goto err1;
 
 	dir->i_size++;
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 	hfs_find_exit(&fd);
 	return 0;
@@ -261,7 +261,7 @@
 	}
 
 	dir->i_size--;
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 	res = 0;
 out:
@@ -321,7 +321,7 @@
 	if (err)
 		goto out;
 	dst_dir->i_size++;
-	dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
+	dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
 	mark_inode_dirty(dst_dir);
 
 	/* finally remove the old entry */
@@ -333,7 +333,7 @@
 	if (err)
 		goto out;
 	src_dir->i_size--;
-	src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
+	src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
 	mark_inode_dirty(src_dir);
 
 	type = entry.type;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 163190e..5de5c48 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -268,7 +268,7 @@
 	if (res)
 		return res;
 	clear_nlink(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	hfs_delete_inode(inode);
 	mark_inode_dirty(inode);
 	return 0;
@@ -286,10 +286,14 @@
  * XXX: how do you handle must_be dir?
  */
 static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	int res;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	/* Unlink destination if it already exists */
 	if (d_really_is_positive(new_dentry)) {
 		res = hfs_remove(new_dir, new_dentry);
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 16f5172..4cdec5a 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -212,11 +212,7 @@
 extern void hfs_delete_inode(struct inode *);
 
 /* attr.c */
-extern int hfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
-			const void *value, size_t size, int flags);
-extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode,
-			    const char *name, void *value, size_t size);
-extern ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+extern const struct xattr_handler *hfs_xattr_handlers[];
 
 /* mdb.c */
 extern int hfs_mdb_get(struct super_block *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index c6a3241..f776acf 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -15,6 +15,7 @@
 #include <linux/mpage.h>
 #include <linux/sched.h>
 #include <linux/uio.h>
+#include <linux/xattr.h>
 
 #include "hfs_fs.h"
 #include "btree.h"
@@ -193,7 +194,7 @@
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
 	set_nlink(inode, 1);
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	HFS_I(inode)->flags = 0;
 	HFS_I(inode)->rsrc_inode = NULL;
 	HFS_I(inode)->fs_blocks = 0;
@@ -605,7 +606,7 @@
 	struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
 	int error;
 
-	error = inode_change_ok(inode, attr); /* basic permission checks */
+	error = setattr_prepare(dentry, attr); /* basic permission checks */
 	if (error)
 		return error;
 
@@ -687,7 +688,5 @@
 static const struct inode_operations hfs_file_inode_operations = {
 	.lookup		= hfs_file_lookup,
 	.setattr	= hfs_inode_setattr,
-	.setxattr	= hfs_setxattr,
-	.getxattr	= hfs_getxattr,
-	.listxattr	= hfs_listxattr,
+	.listxattr	= generic_listxattr,
 };
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1ca95c2..bf6304a 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -406,6 +406,7 @@
 	}
 
 	sb->s_op = &hfs_super_operations;
+	sb->s_xattr = hfs_xattr_handlers;
 	sb->s_flags |= MS_NODIRATIME;
 	mutex_init(&sbi->bitmap_lock);
 
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 142534d..a5e00f7 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -303,7 +303,7 @@
 	dir->i_size++;
 	if (S_ISDIR(inode->i_mode))
 		hfsplus_subfolders_inc(dir);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
 
 	hfs_find_exit(&fd);
@@ -400,7 +400,7 @@
 	dir->i_size--;
 	if (type == HFSPLUS_FOLDER)
 		hfsplus_subfolders_dec(dir);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
 
 	if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
@@ -469,7 +469,7 @@
 	dst_dir->i_size++;
 	if (type == HFSPLUS_FOLDER)
 		hfsplus_subfolders_inc(dst_dir);
-	dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
+	dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
 
 	/* finally remove the old entry */
 	err = hfsplus_cat_build_key(sb, src_fd.search_key,
@@ -486,7 +486,7 @@
 	src_dir->i_size--;
 	if (type == HFSPLUS_FOLDER)
 		hfsplus_subfolders_dec(src_dir);
-	src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
+	src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
 
 	/* remove old thread entry */
 	hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 42e1286..31d5e3f 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -347,7 +347,7 @@
 	inc_nlink(inode);
 	hfsplus_instantiate(dst_dentry, inode, cnid);
 	ihold(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 	sbi->file_count++;
 	hfsplus_mark_mdb_dirty(dst_dir->i_sb);
@@ -406,7 +406,7 @@
 			hfsplus_delete_inode(inode);
 	} else
 		sbi->file_count--;
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 out:
 	mutex_unlock(&sbi->vh_mutex);
@@ -427,7 +427,7 @@
 	if (res)
 		goto out;
 	clear_nlink(inode);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	hfsplus_delete_inode(inode);
 	mark_inode_dirty(inode);
 out:
@@ -530,10 +530,14 @@
 }
 
 static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
-			  struct inode *new_dir, struct dentry *new_dentry)
+			  struct inode *new_dir, struct dentry *new_dentry,
+			  unsigned int flags)
 {
 	int res;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	/* Unlink destination if it already exists */
 	if (d_really_is_positive(new_dentry)) {
 		if (d_is_dir(new_dentry))
@@ -562,10 +566,7 @@
 	.symlink		= hfsplus_symlink,
 	.mknod			= hfsplus_mknod,
 	.rename			= hfsplus_rename,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
 	.listxattr		= hfsplus_listxattr,
-	.removexattr		= generic_removexattr,
 #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
 	.get_acl		= hfsplus_get_posix_acl,
 	.set_acl		= hfsplus_set_posix_acl,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 19462d7..2e796f8 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -245,7 +245,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
@@ -333,10 +333,7 @@
 
 static const struct inode_operations hfsplus_file_inode_operations = {
 	.setattr	= hfsplus_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= hfsplus_listxattr,
-	.removexattr	= generic_removexattr,
 #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
 	.get_acl	= hfsplus_get_posix_acl,
 	.set_acl	= hfsplus_set_posix_acl,
@@ -369,7 +366,7 @@
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
 	set_nlink(inode, 1);
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 
 	hip = HFSPLUS_I(inode);
 	INIT_LIST_HEAD(&hip->open_dir_list);
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 32a49e2..99627f8 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -122,7 +122,7 @@
 	else
 		hip->userflags &= ~HFSPLUS_FLG_NODUMP;
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 
 out_unlock_inode:
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index ab7ea25..9b92058 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -65,8 +65,8 @@
 	case ACL_TYPE_ACCESS:
 		xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			err = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (err < 0)
+			err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (err)
 				return err;
 		}
 		err = 0;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 90e46cd..23e15ea 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -812,7 +812,7 @@
 
 	int fd = HOSTFS_I(inode)->fd;
 
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -885,7 +885,7 @@
 	.mkdir		= hostfs_mkdir,
 	.rmdir		= hostfs_rmdir,
 	.mknod		= hostfs_mknod,
-	.rename2	= hostfs_rename2,
+	.rename		= hostfs_rename2,
 	.permission	= hostfs_permission,
 	.setattr	= hostfs_setattr,
 };
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index d3bcdd9..b3be1b5 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -189,6 +189,11 @@
 	return generic_block_bmap(mapping, block, hpfs_get_block);
 }
 
+static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
+{
+	return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block);
+}
+
 const struct address_space_operations hpfs_aops = {
 	.readpage = hpfs_readpage,
 	.writepage = hpfs_writepage,
@@ -214,4 +219,5 @@
 const struct inode_operations hpfs_file_iops =
 {
 	.setattr	= hpfs_setattr,
+	.fiemap		= hpfs_fiemap,
 };
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 1f3c6d7..b9c724e 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -273,7 +273,7 @@
 	if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
 		goto out_unlock;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		goto out_unlock;
 
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index bb8d67e..f30c144 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -507,7 +507,8 @@
 };
 	
 static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
 	const unsigned char *old_name = old_dentry->d_name.name;
 	unsigned old_len = old_dentry->d_name.len;
@@ -524,6 +525,9 @@
 	struct fnode *fnode;
 	int err;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	if ((err = hpfs_chk_name(new_name, &new_len))) return err;
 	err = 0;
 	hpfs_adjust_length(old_name, &old_len);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7337cac..4fb7b10 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -655,7 +655,7 @@
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
 		i_size_write(inode, offset + len);
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 out:
 	inode_unlock(inode);
 	return error;
@@ -670,7 +670,7 @@
 
 	BUG_ON(!inode);
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
@@ -700,7 +700,7 @@
 		inode->i_mode = S_IFDIR | config->mode;
 		inode->i_uid = config->uid;
 		inode->i_gid = config->gid;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		info = HUGETLBFS_I(inode);
 		mpol_shared_policy_init(&info->policy, NULL);
 		inode->i_op = &hugetlbfs_dir_inode_operations;
@@ -739,7 +739,7 @@
 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
 				&hugetlbfs_i_mmap_rwsem_key);
 		inode->i_mapping->a_ops = &hugetlbfs_aops;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		inode->i_mapping->private_data = resv_map;
 		info = HUGETLBFS_I(inode);
 		/*
@@ -788,7 +788,7 @@
 
 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
 	if (inode) {
-		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+		dir->i_ctime = dir->i_mtime = current_time(dir);
 		d_instantiate(dentry, inode);
 		dget(dentry);	/* Extra count - pin the dentry in core */
 		error = 0;
@@ -825,7 +825,7 @@
 		} else
 			iput(inode);
 	}
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 
 	return error;
 }
diff --git a/fs/inode.c b/fs/inode.c
index 7e3ef3a..88110fd 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -140,6 +140,8 @@
 	inode->i_fop = &no_open_fops;
 	inode->__i_nlink = 1;
 	inode->i_opflags = 0;
+	if (sb->s_xattr)
+		inode->i_opflags |= IOP_XATTR;
 	i_uid_write(inode, 0);
 	i_gid_write(inode, 0);
 	atomic_set(&inode->i_writecount, 0);
@@ -1021,13 +1023,17 @@
 {
 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 	struct inode *inode;
-
+again:
 	spin_lock(&inode_hash_lock);
 	inode = find_inode(sb, head, test, data);
 	spin_unlock(&inode_hash_lock);
 
 	if (inode) {
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
 		return inode;
 	}
 
@@ -1064,6 +1070,10 @@
 		destroy_inode(inode);
 		inode = old;
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
 	}
 	return inode;
 
@@ -1091,12 +1101,16 @@
 {
 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
 	struct inode *inode;
-
+again:
 	spin_lock(&inode_hash_lock);
 	inode = find_inode_fast(sb, head, ino);
 	spin_unlock(&inode_hash_lock);
 	if (inode) {
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
 		return inode;
 	}
 
@@ -1131,6 +1145,10 @@
 		destroy_inode(inode);
 		inode = old;
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
 	}
 	return inode;
 }
@@ -1266,10 +1284,16 @@
 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
 		int (*test)(struct inode *, void *), void *data)
 {
-	struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
-
-	if (inode)
+	struct inode *inode;
+again:
+	inode = ilookup5_nowait(sb, hashval, test, data);
+	if (inode) {
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
+	}
 	return inode;
 }
 EXPORT_SYMBOL(ilookup5);
@@ -1286,13 +1310,18 @@
 {
 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
 	struct inode *inode;
-
+again:
 	spin_lock(&inode_hash_lock);
 	inode = find_inode_fast(sb, head, ino);
 	spin_unlock(&inode_hash_lock);
 
-	if (inode)
+	if (inode) {
 		wait_on_inode(inode);
+		if (unlikely(inode_unhashed(inode))) {
+			iput(inode);
+			goto again;
+		}
+	}
 	return inode;
 }
 EXPORT_SYMBOL(ilookup);
@@ -1536,16 +1565,36 @@
 EXPORT_SYMBOL(bmap);
 
 /*
+ * Update times in overlayed inode from underlying real inode
+ */
+static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode,
+			       bool rcu)
+{
+	if (!rcu) {
+		struct inode *realinode = d_real_inode(dentry);
+
+		if (unlikely(inode != realinode) &&
+		    (!timespec_equal(&inode->i_mtime, &realinode->i_mtime) ||
+		     !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) {
+			inode->i_mtime = realinode->i_mtime;
+			inode->i_ctime = realinode->i_ctime;
+		}
+	}
+}
+
+/*
  * With relative atime, only update atime if the previous atime is
  * earlier than either the ctime or mtime or if at least a day has
  * passed since the last atime update.
  */
-static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
-			     struct timespec now)
+static int relatime_need_update(const struct path *path, struct inode *inode,
+				struct timespec now, bool rcu)
 {
 
-	if (!(mnt->mnt_flags & MNT_RELATIME))
+	if (!(path->mnt->mnt_flags & MNT_RELATIME))
 		return 1;
+
+	update_ovl_inode_times(path->dentry, inode, rcu);
 	/*
 	 * Is mtime younger than atime? If yes, update atime:
 	 */
@@ -1612,7 +1661,8 @@
  *	This function automatically handles read only file systems and media,
  *	as well as the "noatime" flag and inode specific "noatime" markers.
  */
-bool atime_needs_update(const struct path *path, struct inode *inode)
+bool __atime_needs_update(const struct path *path, struct inode *inode,
+			  bool rcu)
 {
 	struct vfsmount *mnt = path->mnt;
 	struct timespec now;
@@ -1636,9 +1686,9 @@
 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
 		return false;
 
-	now = current_fs_time(inode->i_sb);
+	now = current_time(inode);
 
-	if (!relatime_need_update(mnt, inode, now))
+	if (!relatime_need_update(path, inode, now, rcu))
 		return false;
 
 	if (timespec_equal(&inode->i_atime, &now))
@@ -1653,7 +1703,7 @@
 	struct inode *inode = d_inode(path->dentry);
 	struct timespec now;
 
-	if (!atime_needs_update(path, inode))
+	if (!__atime_needs_update(path, inode, false))
 		return;
 
 	if (!sb_start_write_trylock(inode->i_sb))
@@ -1670,7 +1720,7 @@
 	 * We may also fail on filesystems that have the ability to make parts
 	 * of the fs read only, e.g. subvolumes in Btrfs.
 	 */
-	now = current_fs_time(inode->i_sb);
+	now = current_time(inode);
 	update_time(inode, &now, S_ATIME);
 	__mnt_drop_write(mnt);
 skip_update:
@@ -1793,7 +1843,7 @@
 	if (IS_NOCMTIME(inode))
 		return 0;
 
-	now = current_fs_time(inode->i_sb);
+	now = current_time(inode);
 	if (!timespec_equal(&inode->i_mtime, &now))
 		sync_it = S_MTIME;
 
@@ -2049,3 +2099,26 @@
 	mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
 }
 EXPORT_SYMBOL(inode_nohighmem);
+
+/**
+ * current_time - Return FS time
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ *
+ * Note that inode and inode->sb cannot be NULL.
+ * Otherwise, the function warns and returns time without truncation.
+ */
+struct timespec current_time(struct inode *inode)
+{
+	struct timespec now = current_kernel_time();
+
+	if (unlikely(!inode->i_sb)) {
+		WARN(1, "current_time() called with uninitialized super_block in the inode");
+		return now;
+	}
+
+	return timespec_trunc(now, inode->i_sb->s_time_gran);
+}
+EXPORT_SYMBOL(current_time);
diff --git a/fs/internal.h b/fs/internal.h
index 8591786..f4da334 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -121,6 +121,15 @@
 extern void inode_add_lru(struct inode *inode);
 extern int dentry_needs_remove_privs(struct dentry *dentry);
 
+extern bool __atime_needs_update(const struct path *, struct inode *, bool);
+static inline bool atime_needs_update_rcu(const struct path *path,
+					  struct inode *inode)
+{
+	return __atime_needs_update(path, inode, true);
+}
+
+extern bool atime_needs_update_rcu(const struct path *, struct inode *);
+
 /*
  * fs-writeback.c
  */
@@ -157,7 +166,7 @@
 /*
  * fs/nsfs.c
  */
-extern struct dentry_operations ns_dentry_operations;
+extern const struct dentry_operations ns_dentry_operations;
 
 /*
  * fs/ioctl.c
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ad0c745..871c8b3 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -687,6 +687,11 @@
 	pri_bh = NULL;
 
 root_found:
+	/* We don't support read-write mounts */
+	if (!(s->s_flags & MS_RDONLY)) {
+		error = -EACCES;
+		goto out_freebh;
+	}
 
 	if (joliet_level && (pri == NULL || !opt.rock)) {
 		/* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@
 static struct dentry *isofs_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	/* We don't support read-write mounts */
-	if (!(flags & MS_RDONLY))
-		return ERR_PTR(-EACCES);
 	return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
 }
 
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5bb565f..31f8ca0 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -269,8 +269,7 @@
 			 * filemap_fdatawait_range(), set it again so
 			 * that user process can get -EIO from fsync().
 			 */
-			set_bit(AS_EIO,
-				&jinode->i_vfs_inode->i_mapping->flags);
+			mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
 
 			if (!ret)
 				ret = err;
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index bc2693d..7ebacf1 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -233,22 +233,21 @@
 	case ACL_TYPE_ACCESS:
 		xprefix = JFFS2_XPREFIX_ACL_ACCESS;
 		if (acl) {
-			umode_t mode = inode->i_mode;
-			rc = posix_acl_equiv_mode(acl, &mode);
-			if (rc < 0)
+			umode_t mode;
+
+			rc = posix_acl_update_mode(inode, &mode, &acl);
+			if (rc)
 				return rc;
 			if (inode->i_mode != mode) {
 				struct iattr attr;
 
 				attr.ia_valid = ATTR_MODE | ATTR_CTIME;
 				attr.ia_mode = mode;
-				attr.ia_ctime = CURRENT_TIME_SEC;
+				attr.ia_ctime = current_time(inode);
 				rc = jffs2_do_setattr(inode, &attr);
 				if (rc < 0)
 					return rc;
 			}
-			if (rc == 0)
-				acl = NULL;
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 30eb33ff..0a754f3 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -35,7 +35,8 @@
 static int jffs2_rmdir (struct inode *,struct dentry *);
 static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
 static int jffs2_rename (struct inode *, struct dentry *,
-			 struct inode *, struct dentry *);
+			 struct inode *, struct dentry *,
+			 unsigned int);
 
 const struct file_operations jffs2_dir_operations =
 {
@@ -61,10 +62,7 @@
 	.get_acl =	jffs2_get_acl,
 	.set_acl =	jffs2_set_acl,
 	.setattr =	jffs2_setattr,
-	.setxattr =	jffs2_setxattr,
-	.getxattr =	jffs2_getxattr,
 	.listxattr =	jffs2_listxattr,
-	.removexattr =	jffs2_removexattr
 };
 
 /***********************************************************************/
@@ -759,7 +757,8 @@
 }
 
 static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
-			 struct inode *new_dir_i, struct dentry *new_dentry)
+			 struct inode *new_dir_i, struct dentry *new_dentry,
+			 unsigned int flags)
 {
 	int ret;
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
@@ -767,6 +766,9 @@
 	uint8_t type;
 	uint32_t now;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	/* The VFS will check for us and prevent trying to rename a
 	 * file over a directory and vice versa, but if it's a directory,
 	 * the VFS can't check whether the victim is empty. The filesystem
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 0e62dec..c12476e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -66,10 +66,7 @@
 	.get_acl =	jffs2_get_acl,
 	.set_acl =	jffs2_set_acl,
 	.setattr =	jffs2_setattr,
-	.setxattr =	jffs2_setxattr,
-	.getxattr =	jffs2_getxattr,
 	.listxattr =	jffs2_listxattr,
-	.removexattr =	jffs2_removexattr
 };
 
 const struct address_space_operations jffs2_file_address_operations =
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index ae2ebb2..567653f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -193,7 +193,7 @@
 	struct inode *inode = d_inode(dentry);
 	int rc;
 
-	rc = inode_change_ok(inode, iattr);
+	rc = setattr_prepare(dentry, iattr);
 	if (rc)
 		return rc;
 
@@ -472,7 +472,7 @@
 	inode->i_mode = jemode_to_cpu(ri->mode);
 	i_gid_write(inode, je16_to_cpu(ri->gid));
 	i_uid_write(inode, je16_to_cpu(ri->uid));
-	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
 
 	inode->i_blocks = 0;
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index 2cabd64..8f3f085 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -16,8 +16,5 @@
 	.readlink =	generic_readlink,
 	.get_link =	simple_get_link,
 	.setattr =	jffs2_setattr,
-	.setxattr =	jffs2_setxattr,
-	.getxattr =	jffs2_getxattr,
 	.listxattr =	jffs2_listxattr,
-	.removexattr =	jffs2_removexattr
 };
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 467ff37..720007b 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -99,9 +99,6 @@
 extern const struct xattr_handler jffs2_trusted_xattr_handler;
 
 extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
-#define jffs2_getxattr		generic_getxattr
-#define jffs2_setxattr		generic_setxattr
-#define jffs2_removexattr	generic_removexattr
 
 #else
 
@@ -116,9 +113,6 @@
 
 #define jffs2_xattr_handlers	NULL
 #define jffs2_listxattr		NULL
-#define jffs2_getxattr		NULL
-#define jffs2_setxattr		NULL
-#define jffs2_removexattr	NULL
 
 #endif /* CONFIG_JFFS2_FS_XATTR */
 
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 21fa92b..7bc186f 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -78,13 +78,11 @@
 	case ACL_TYPE_ACCESS:
 		ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			rc = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (rc < 0)
+			rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (rc)
 				return rc;
-			inode->i_ctime = CURRENT_TIME;
+			inode->i_ctime = current_time(inode);
 			mark_inode_dirty(inode);
-			if (rc == 0)
-				acl = NULL;
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 7f1a585..739492c 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -103,7 +103,7 @@
 	struct inode *inode = d_inode(dentry);
 	int rc;
 
-	rc = inode_change_ok(inode, iattr);
+	rc = setattr_prepare(dentry, iattr);
 	if (rc)
 		return rc;
 
@@ -140,10 +140,7 @@
 }
 
 const struct inode_operations jfs_file_inode_operations = {
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.setattr	= jfs_setattr,
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index ad3e7b1..054cc76 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -403,7 +403,7 @@
 			break;
 		}
 
-		ip->i_mtime = ip->i_ctime = CURRENT_TIME;
+		ip->i_mtime = ip->i_ctime = current_time(ip);
 		mark_inode_dirty(ip);
 
 		txCommit(tid, 1, &ip, 0);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 5e33cb9..375dd25 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -131,7 +131,7 @@
 	jfs_inode->mode2 |= inode->i_mode;
 
 	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	jfs_inode->otime = inode->i_ctime.tv_sec;
 	inode->i_generation = JFS_SBI(sb)->gengen++;
 
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 814b0c5..b41596d 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -162,7 +162,7 @@
 
 	mark_inode_dirty(ip);
 
-	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	dip->i_ctime = dip->i_mtime = current_time(dip);
 
 	mark_inode_dirty(dip);
 
@@ -298,7 +298,7 @@
 
 	/* update parent directory inode */
 	inc_nlink(dip);		/* for '..' from child directory */
-	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	dip->i_ctime = dip->i_mtime = current_time(dip);
 	mark_inode_dirty(dip);
 
 	rc = txCommit(tid, 2, &iplist[0], 0);
@@ -406,7 +406,7 @@
 	/* update parent directory's link count corresponding
 	 * to ".." entry of the target directory deleted
 	 */
-	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	dip->i_ctime = dip->i_mtime = current_time(dip);
 	inode_dec_link_count(dip);
 
 	/*
@@ -528,7 +528,7 @@
 
 	ASSERT(ip->i_nlink);
 
-	ip->i_ctime = dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	ip->i_ctime = dip->i_ctime = dip->i_mtime = current_time(ip);
 	mark_inode_dirty(dip);
 
 	/* update target's inode */
@@ -838,8 +838,8 @@
 
 	/* update object inode */
 	inc_nlink(ip);		/* for new link */
-	ip->i_ctime = CURRENT_TIME;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	ip->i_ctime = current_time(ip);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	ihold(ip);
 
@@ -1039,7 +1039,7 @@
 
 	mark_inode_dirty(ip);
 
-	dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+	dip->i_ctime = dip->i_mtime = current_time(dip);
 	mark_inode_dirty(dip);
 	/*
 	 * commit update of parent directory and link object
@@ -1078,7 +1078,8 @@
  * FUNCTION:	rename a file or directory
  */
 static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-	       struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct btstack btstack;
 	ino_t ino;
@@ -1097,6 +1098,8 @@
 	s64 new_size = 0;
 	int commit_flag;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
 
 	jfs_info("jfs_rename: %pd %pd", old_dentry, new_dentry);
 
@@ -1215,7 +1218,7 @@
 			tblk->xflag |= COMMIT_DELETE;
 			tblk->u.ip = new_ip;
 		} else {
-			new_ip->i_ctime = CURRENT_TIME;
+			new_ip->i_ctime = current_time(new_ip);
 			mark_inode_dirty(new_ip);
 		}
 	} else {
@@ -1278,10 +1281,10 @@
 	/*
 	 * Update ctime on changed/moved inodes & mark dirty
 	 */
-	old_ip->i_ctime = CURRENT_TIME;
+	old_ip->i_ctime = current_time(old_ip);
 	mark_inode_dirty(old_ip);
 
-	new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb);
+	new_dir->i_ctime = new_dir->i_mtime = current_time(new_dir);
 	mark_inode_dirty(new_dir);
 
 	/* Build list of inodes modified by this transaction */
@@ -1293,7 +1296,7 @@
 
 	if (old_dir != new_dir) {
 		iplist[ipcount++] = new_dir;
-		old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+		old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
 		mark_inode_dirty(old_dir);
 	}
 
@@ -1426,7 +1429,7 @@
 
 	mark_inode_dirty(ip);
 
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 
 	mark_inode_dirty(dir);
 
@@ -1537,10 +1540,7 @@
 	.rmdir		= jfs_rmdir,
 	.mknod		= jfs_mknod,
 	.rename		= jfs_rename,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= generic_removexattr,
 	.setattr	= jfs_setattr,
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index cec8814..85671f7 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -830,7 +830,7 @@
 	if (inode->i_size < off+len-towrite)
 		i_size_write(inode, off+len-towrite);
 	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 	inode_unlock(inode);
 	return len - towrite;
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index c94c7e4..c82404f 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -25,19 +25,13 @@
 	.readlink	= generic_readlink,
 	.get_link	= simple_get_link,
 	.setattr	= jfs_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 const struct inode_operations jfs_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.get_link	= page_get_link,
 	.setattr	= jfs_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 0bf3c33..c60f3d3 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -658,7 +658,7 @@
 	if (old_blocks)
 		dquot_free_block(inode, old_blocks);
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 
 	return 0;
 }
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index e57174d..cf4c636 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -110,8 +110,9 @@
  * kn_to:   /n1/n2/n3         [depth=3]
  * result:  /../..
  *
- * return value: length of the string.  If greater than buflen,
- * then contents of buf are undefined.  On error, -1 is returned.
+ * Returns the length of the full path.  If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'.  On error, -errno is returned.
  */
 static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
 					struct kernfs_node *kn_from,
@@ -119,9 +120,8 @@
 {
 	struct kernfs_node *kn, *common;
 	const char parent_str[] = "/..";
-	size_t depth_from, depth_to, len = 0, nlen = 0;
-	char *p;
-	int i;
+	size_t depth_from, depth_to, len = 0;
+	int i, j;
 
 	if (!kn_from)
 		kn_from = kernfs_root(kn_to)->kn;
@@ -131,7 +131,7 @@
 
 	common = kernfs_common_ancestor(kn_from, kn_to);
 	if (WARN_ON(!common))
-		return -1;
+		return -EINVAL;
 
 	depth_to = kernfs_depth(common, kn_to);
 	depth_from = kernfs_depth(common, kn_from);
@@ -144,22 +144,16 @@
 			       len < buflen ? buflen - len : 0);
 
 	/* Calculate how many bytes we need for the rest */
-	for (kn = kn_to; kn != common; kn = kn->parent)
-		nlen += strlen(kn->name) + 1;
-
-	if (len + nlen >= buflen)
-		return len + nlen;
-
-	p = buf + len + nlen;
-	*p = '\0';
-	for (kn = kn_to; kn != common; kn = kn->parent) {
-		size_t tmp = strlen(kn->name);
-		p -= tmp;
-		memcpy(p, kn->name, tmp);
-		*(--p) = '/';
+	for (i = depth_to - 1; i >= 0; i--) {
+		for (kn = kn_to, j = 0; j < i; j++)
+			kn = kn->parent;
+		len += strlcpy(buf + len, "/",
+			       len < buflen ? buflen - len : 0);
+		len += strlcpy(buf + len, kn->name,
+			       len < buflen ? buflen - len : 0);
 	}
 
-	return len + nlen;
+	return len;
 }
 
 /**
@@ -186,29 +180,6 @@
 }
 
 /**
- * kernfs_path_len - determine the length of the full path of a given node
- * @kn: kernfs_node of interest
- *
- * The returned length doesn't include the space for the terminating '\0'.
- */
-size_t kernfs_path_len(struct kernfs_node *kn)
-{
-	size_t len = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&kernfs_rename_lock, flags);
-
-	do {
-		len += strlen(kn->name) + 1;
-		kn = kn->parent;
-	} while (kn && kn->parent);
-
-	spin_unlock_irqrestore(&kernfs_rename_lock, flags);
-
-	return len;
-}
-
-/**
  * kernfs_path_from_node - build path of node @to relative to @from.
  * @from: parent kernfs_node relative to which we need to build the path
  * @to: kernfs_node of interest
@@ -220,8 +191,9 @@
  * path (which includes '..'s) as needed to reach from @from to @to is
  * returned.
  *
- * If @buf isn't long enough, the return value will be greater than @buflen
- * and @buf contents are undefined.
+ * Returns the length of the full path.  If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'.  On error, -errno is returned.
  */
 int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
 			  char *buf, size_t buflen)
@@ -237,28 +209,6 @@
 EXPORT_SYMBOL_GPL(kernfs_path_from_node);
 
 /**
- * kernfs_path - build full path of a given node
- * @kn: kernfs_node of interest
- * @buf: buffer to copy @kn's name into
- * @buflen: size of @buf
- *
- * Builds and returns the full path of @kn in @buf of @buflen bytes.  The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf.  If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
- */
-char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
-{
-	int ret;
-
-	ret = kernfs_path_from_node(kn, NULL, buf, buflen);
-	if (ret < 0 || ret >= buflen)
-		return NULL;
-	return buf;
-}
-EXPORT_SYMBOL_GPL(kernfs_path);
-
-/**
  * pr_cont_kernfs_name - pr_cont name of a kernfs_node
  * @kn: kernfs_node of interest
  *
@@ -1096,13 +1046,17 @@
 }
 
 static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
-			     struct inode *new_dir, struct dentry *new_dentry)
+			     struct inode *new_dir, struct dentry *new_dentry,
+			     unsigned int flags)
 {
 	struct kernfs_node *kn  = old_dentry->d_fsdata;
 	struct kernfs_node *new_parent = new_dir->i_private;
 	struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
 	int ret;
 
+	if (flags)
+		return -EINVAL;
+
 	if (!scops || !scops->rename)
 		return -EPERM;
 
@@ -1126,9 +1080,6 @@
 	.permission	= kernfs_iop_permission,
 	.setattr	= kernfs_iop_setattr,
 	.getattr	= kernfs_iop_getattr,
-	.setxattr	= kernfs_iop_setxattr,
-	.removexattr	= kernfs_iop_removexattr,
-	.getxattr	= kernfs_iop_getxattr,
 	.listxattr	= kernfs_iop_listxattr,
 
 	.mkdir		= kernfs_iop_mkdir,
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 63b925d..a198211 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -28,9 +28,6 @@
 	.permission	= kernfs_iop_permission,
 	.setattr	= kernfs_iop_setattr,
 	.getattr	= kernfs_iop_getattr,
-	.setxattr	= kernfs_iop_setxattr,
-	.removexattr	= kernfs_iop_removexattr,
-	.getxattr	= kernfs_iop_getxattr,
 	.listxattr	= kernfs_iop_listxattr,
 };
 
@@ -122,7 +119,7 @@
 		return -EINVAL;
 
 	mutex_lock(&kernfs_mutex);
-	error = inode_change_ok(inode, iattr);
+	error = setattr_prepare(dentry, iattr);
 	if (error)
 		goto out;
 
@@ -138,17 +135,12 @@
 	return error;
 }
 
-static int kernfs_node_setsecdata(struct kernfs_node *kn, void **secdata,
+static int kernfs_node_setsecdata(struct kernfs_iattrs *attrs, void **secdata,
 				  u32 *secdata_len)
 {
-	struct kernfs_iattrs *attrs;
 	void *old_secdata;
 	size_t old_secdata_len;
 
-	attrs = kernfs_iattrs(kn);
-	if (!attrs)
-		return -ENOMEM;
-
 	old_secdata = attrs->ia_secdata;
 	old_secdata_len = attrs->ia_secdata_len;
 
@@ -160,71 +152,6 @@
 	return 0;
 }
 
-int kernfs_iop_setxattr(struct dentry *unused, struct inode *inode,
-			const char *name, const void *value,
-			size_t size, int flags)
-{
-	struct kernfs_node *kn = inode->i_private;
-	struct kernfs_iattrs *attrs;
-	void *secdata;
-	int error;
-	u32 secdata_len = 0;
-
-	attrs = kernfs_iattrs(kn);
-	if (!attrs)
-		return -ENOMEM;
-
-	if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
-		const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
-		error = security_inode_setsecurity(inode, suffix,
-						value, size, flags);
-		if (error)
-			return error;
-		error = security_inode_getsecctx(inode,
-						&secdata, &secdata_len);
-		if (error)
-			return error;
-
-		mutex_lock(&kernfs_mutex);
-		error = kernfs_node_setsecdata(kn, &secdata, &secdata_len);
-		mutex_unlock(&kernfs_mutex);
-
-		if (secdata)
-			security_release_secctx(secdata, secdata_len);
-		return error;
-	} else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
-		return simple_xattr_set(&attrs->xattrs, name, value, size,
-					flags);
-	}
-
-	return -EINVAL;
-}
-
-int kernfs_iop_removexattr(struct dentry *dentry, const char *name)
-{
-	struct kernfs_node *kn = dentry->d_fsdata;
-	struct kernfs_iattrs *attrs;
-
-	attrs = kernfs_iattrs(kn);
-	if (!attrs)
-		return -ENOMEM;
-
-	return simple_xattr_set(&attrs->xattrs, name, NULL, 0, XATTR_REPLACE);
-}
-
-ssize_t kernfs_iop_getxattr(struct dentry *unused, struct inode *inode,
-			    const char *name, void *buf, size_t size)
-{
-	struct kernfs_node *kn = inode->i_private;
-	struct kernfs_iattrs *attrs;
-
-	attrs = kernfs_iattrs(kn);
-	if (!attrs)
-		return -ENOMEM;
-
-	return simple_xattr_get(&attrs->xattrs, name, buf, size);
-}
-
 ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
 {
 	struct kernfs_node *kn = dentry->d_fsdata;
@@ -241,7 +168,7 @@
 {
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime =
-		inode->i_ctime = current_fs_time(inode->i_sb);
+		inode->i_ctime = current_time(inode);
 }
 
 static inline void set_inode_attr(struct inode *inode, struct iattr *iattr)
@@ -376,3 +303,83 @@
 
 	return generic_permission(inode, mask);
 }
+
+static int kernfs_xattr_get(const struct xattr_handler *handler,
+			    struct dentry *unused, struct inode *inode,
+			    const char *suffix, void *value, size_t size)
+{
+	const char *name = xattr_full_name(handler, suffix);
+	struct kernfs_node *kn = inode->i_private;
+	struct kernfs_iattrs *attrs;
+
+	attrs = kernfs_iattrs(kn);
+	if (!attrs)
+		return -ENOMEM;
+
+	return simple_xattr_get(&attrs->xattrs, name, value, size);
+}
+
+static int kernfs_xattr_set(const struct xattr_handler *handler,
+			    struct dentry *unused, struct inode *inode,
+			    const char *suffix, const void *value,
+			    size_t size, int flags)
+{
+	const char *name = xattr_full_name(handler, suffix);
+	struct kernfs_node *kn = inode->i_private;
+	struct kernfs_iattrs *attrs;
+
+	attrs = kernfs_iattrs(kn);
+	if (!attrs)
+		return -ENOMEM;
+
+	return simple_xattr_set(&attrs->xattrs, name, value, size, flags);
+}
+
+const struct xattr_handler kernfs_trusted_xattr_handler = {
+	.prefix = XATTR_TRUSTED_PREFIX,
+	.get = kernfs_xattr_get,
+	.set = kernfs_xattr_set,
+};
+
+static int kernfs_security_xattr_set(const struct xattr_handler *handler,
+				     struct dentry *unused, struct inode *inode,
+				     const char *suffix, const void *value,
+				     size_t size, int flags)
+{
+	struct kernfs_node *kn = inode->i_private;
+	struct kernfs_iattrs *attrs;
+	void *secdata;
+	u32 secdata_len = 0;
+	int error;
+
+	attrs = kernfs_iattrs(kn);
+	if (!attrs)
+		return -ENOMEM;
+
+	error = security_inode_setsecurity(inode, suffix, value, size, flags);
+	if (error)
+		return error;
+	error = security_inode_getsecctx(inode, &secdata, &secdata_len);
+	if (error)
+		return error;
+
+	mutex_lock(&kernfs_mutex);
+	error = kernfs_node_setsecdata(attrs, &secdata, &secdata_len);
+	mutex_unlock(&kernfs_mutex);
+
+	if (secdata)
+		security_release_secctx(secdata, secdata_len);
+	return error;
+}
+
+const struct xattr_handler kernfs_security_xattr_handler = {
+	.prefix = XATTR_SECURITY_PREFIX,
+	.get = kernfs_xattr_get,
+	.set = kernfs_security_xattr_set,
+};
+
+const struct xattr_handler *kernfs_xattr_handlers[] = {
+	&kernfs_trusted_xattr_handler,
+	&kernfs_security_xattr_handler,
+	NULL
+};
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 3715923..bfd551b 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -76,17 +76,12 @@
 /*
  * inode.c
  */
+extern const struct xattr_handler *kernfs_xattr_handlers[];
 void kernfs_evict_inode(struct inode *inode);
 int kernfs_iop_permission(struct inode *inode, int mask);
 int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
 int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		       struct kstat *stat);
-int kernfs_iop_setxattr(struct dentry *dentry, struct inode *inode,
-			const char *name, const void *value,
-			size_t size, int flags);
-int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
-ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode,
-			    const char *name, void *buf, size_t size);
 ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
 
 /*
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b3d73ad..d5b149a 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -158,6 +158,7 @@
 	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = magic;
 	sb->s_op = &kernfs_sops;
+	sb->s_xattr = kernfs_xattr_handlers;
 	sb->s_time_gran = 1;
 
 	/* get root inode, initialize and unlock it */
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 117b8b3..9b43ca0 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -134,9 +134,6 @@
 }
 
 const struct inode_operations kernfs_symlink_iops = {
-	.setxattr	= kernfs_iop_setxattr,
-	.removexattr	= kernfs_iop_removexattr,
-	.getxattr	= kernfs_iop_getxattr,
 	.listxattr	= kernfs_iop_listxattr,
 	.readlink	= generic_readlink,
 	.get_link	= kernfs_iop_get_link,
diff --git a/fs/libfs.c b/fs/libfs.c
index 74dc8b9..48826d4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -236,8 +236,8 @@
  * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
  * will never be mountable)
  */
-struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
-	const struct super_operations *ops,
+struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
+	const struct super_operations *ops, const struct xattr_handler **xattr,
 	const struct dentry_operations *dops, unsigned long magic)
 {
 	struct super_block *s;
@@ -254,6 +254,7 @@
 	s->s_blocksize_bits = PAGE_SHIFT;
 	s->s_magic = magic;
 	s->s_op = ops ? ops : &simple_super_operations;
+	s->s_xattr = xattr;
 	s->s_time_gran = 1;
 	root = new_inode(s);
 	if (!root)
@@ -265,7 +266,7 @@
 	 */
 	root->i_ino = 1;
 	root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
-	root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+	root->i_atime = root->i_mtime = root->i_ctime = current_time(root);
 	dentry = __d_alloc(s, &d_name);
 	if (!dentry) {
 		iput(root);
@@ -281,7 +282,7 @@
 	deactivate_locked_super(s);
 	return ERR_PTR(-ENOMEM);
 }
-EXPORT_SYMBOL(mount_pseudo);
+EXPORT_SYMBOL(mount_pseudo_xattr);
 
 int simple_open(struct inode *inode, struct file *file)
 {
@@ -295,7 +296,7 @@
 {
 	struct inode *inode = d_inode(old_dentry);
 
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	inc_nlink(inode);
 	ihold(inode);
 	dget(dentry);
@@ -329,7 +330,7 @@
 {
 	struct inode *inode = d_inode(dentry);
 
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	drop_nlink(inode);
 	dput(dentry);
 	return 0;
@@ -349,11 +350,15 @@
 EXPORT_SYMBOL(simple_rmdir);
 
 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+		  struct inode *new_dir, struct dentry *new_dentry,
+		  unsigned int flags)
 {
 	struct inode *inode = d_inode(old_dentry);
 	int they_are_dirs = d_is_dir(old_dentry);
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	if (!simple_empty(new_dentry))
 		return -ENOTEMPTY;
 
@@ -369,7 +374,7 @@
 	}
 
 	old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
-		new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
+		new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
 
 	return 0;
 }
@@ -394,7 +399,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, iattr);
+	error = setattr_prepare(dentry, iattr);
 	if (error)
 		return error;
 
@@ -520,7 +525,7 @@
 	 */
 	inode->i_ino = 1;
 	inode->i_mode = S_IFDIR | 0755;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	set_nlink(inode, 2);
@@ -546,7 +551,7 @@
 			goto out;
 		}
 		inode->i_mode = S_IFREG | files->mode;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		inode->i_fop = files->ops;
 		inode->i_ino = i;
 		d_add(dentry, inode);
@@ -1092,7 +1097,7 @@
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
 	inode->i_flags |= S_PRIVATE;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	return inode;
 }
 EXPORT_SYMBOL(alloc_anon_inode);
@@ -1149,24 +1154,6 @@
 	return -EPERM;
 }
 
-static int empty_dir_setxattr(struct dentry *dentry, struct inode *inode,
-			      const char *name, const void *value,
-			      size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static ssize_t empty_dir_getxattr(struct dentry *dentry, struct inode *inode,
-				  const char *name, void *value, size_t size)
-{
-	return -EOPNOTSUPP;
-}
-
-static int empty_dir_removexattr(struct dentry *dentry, const char *name)
-{
-	return -EOPNOTSUPP;
-}
-
 static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
 {
 	return -EOPNOTSUPP;
@@ -1177,9 +1164,6 @@
 	.permission	= generic_permission,
 	.setattr	= empty_dir_setattr,
 	.getattr	= empty_dir_getattr,
-	.setxattr	= empty_dir_setxattr,
-	.getxattr	= empty_dir_getxattr,
-	.removexattr	= empty_dir_removexattr,
 	.listxattr	= empty_dir_listxattr,
 };
 
@@ -1215,6 +1199,7 @@
 	inode->i_blocks = 0;
 
 	inode->i_op = &empty_dir_inode_operations;
+	inode->i_opflags &= ~IOP_XATTR;
 	inode->i_fop = &empty_dir_operations;
 }
 
diff --git a/fs/lockd/procfs.h b/fs/lockd/procfs.h
index 2257a13..184a15e 100644
--- a/fs/lockd/procfs.h
+++ b/fs/lockd/procfs.h
@@ -6,8 +6,6 @@
 #ifndef _LOCKD_PROCFS_H
 #define _LOCKD_PROCFS_H
 
-#include <linux/kconfig.h>
-
 #if IS_ENABLED(CONFIG_PROC_FS)
 int lockd_create_procfs(void);
 void lockd_remove_procfs(void);
diff --git a/fs/locks.c b/fs/locks.c
index 90ec671..22c5b4a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -138,6 +138,11 @@
 #define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 #define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
 
+static inline bool is_remote_lock(struct file *filp)
+{
+	return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+}
+
 static bool lease_breaking(struct file_lock *fl)
 {
 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
@@ -806,7 +811,7 @@
 {
 	struct file_lock *cfl;
 	struct file_lock_context *ctx;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 
 	ctx = smp_load_acquire(&inode->i_flctx);
 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
@@ -1211,7 +1216,7 @@
 int posix_lock_file(struct file *filp, struct file_lock *fl,
 			struct file_lock *conflock)
 {
-	return posix_lock_inode(file_inode(filp), fl, conflock);
+	return posix_lock_inode(locks_inode(filp), fl, conflock);
 }
 EXPORT_SYMBOL(posix_lock_file);
 
@@ -1251,7 +1256,7 @@
 int locks_mandatory_locked(struct file *file)
 {
 	int ret;
-	struct inode *inode = file_inode(file);
+	struct inode *inode = locks_inode(file);
 	struct file_lock_context *ctx;
 	struct file_lock *fl;
 
@@ -1564,7 +1569,7 @@
 	}
 
 	if (has_lease)
-		*time = current_fs_time(inode->i_sb);
+		*time = current_time(inode);
 	else
 		*time = inode->i_mtime;
 }
@@ -1597,15 +1602,16 @@
 int fcntl_getlease(struct file *filp)
 {
 	struct file_lock *fl;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 	struct file_lock_context *ctx;
 	int type = F_UNLCK;
 	LIST_HEAD(dispose);
 
 	ctx = smp_load_acquire(&inode->i_flctx);
 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+		percpu_down_read_preempt_disable(&file_rwsem);
 		spin_lock(&ctx->flc_lock);
-		time_out_leases(file_inode(filp), &dispose);
+		time_out_leases(inode, &dispose);
 		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
 			if (fl->fl_file != filp)
 				continue;
@@ -1613,6 +1619,8 @@
 			break;
 		}
 		spin_unlock(&ctx->flc_lock);
+		percpu_up_read_preempt_enable(&file_rwsem);
+
 		locks_dispose_list(&dispose);
 	}
 	return type;
@@ -1638,7 +1646,8 @@
 	if (flags & FL_LAYOUT)
 		return 0;
 
-	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+	if ((arg == F_RDLCK) &&
+	    (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
 		return -EAGAIN;
 
 	if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
@@ -1653,7 +1662,7 @@
 {
 	struct file_lock *fl, *my_fl = NULL, *lease;
 	struct dentry *dentry = filp->f_path.dentry;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = dentry->d_inode;
 	struct file_lock_context *ctx;
 	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
 	int error;
@@ -1769,7 +1778,7 @@
 {
 	int error = -EAGAIN;
 	struct file_lock *fl, *victim = NULL;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 	struct file_lock_context *ctx;
 	LIST_HEAD(dispose);
 
@@ -1811,7 +1820,7 @@
 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
 			void **priv)
 {
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 	int error;
 
 	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
@@ -1859,7 +1868,7 @@
 int
 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
 {
-	if (filp->f_op->setlease)
+	if (filp->f_op->setlease && is_remote_lock(filp))
 		return filp->f_op->setlease(filp, arg, lease, priv);
 	else
 		return generic_setlease(filp, arg, lease, priv);
@@ -2008,7 +2017,7 @@
 	if (error)
 		goto out_free;
 
-	if (f.file->f_op->flock)
+	if (f.file->f_op->flock && is_remote_lock(f.file))
 		error = f.file->f_op->flock(f.file,
 					  (can_sleep) ? F_SETLKW : F_SETLK,
 					  lock);
@@ -2034,7 +2043,7 @@
  */
 int vfs_test_lock(struct file *filp, struct file_lock *fl)
 {
-	if (filp->f_op->lock)
+	if (filp->f_op->lock && is_remote_lock(filp))
 		return filp->f_op->lock(filp, F_GETLK, fl);
 	posix_test_lock(filp, fl);
 	return 0;
@@ -2158,7 +2167,7 @@
  */
 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
 {
-	if (filp->f_op->lock)
+	if (filp->f_op->lock && is_remote_lock(filp))
 		return filp->f_op->lock(filp, cmd, fl);
 	else
 		return posix_lock_file(filp, fl, conf);
@@ -2220,7 +2229,7 @@
 	if (file_lock == NULL)
 		return -ENOLCK;
 
-	inode = file_inode(filp);
+	inode = locks_inode(filp);
 
 	/*
 	 * This might block, so we do it before checking the inode.
@@ -2372,7 +2381,7 @@
 	if (copy_from_user(&flock, l, sizeof(flock)))
 		goto out;
 
-	inode = file_inode(filp);
+	inode = locks_inode(filp);
 
 	/* Don't allow mandatory locks on files that may be memory mapped
 	 * and shared.
@@ -2455,6 +2464,7 @@
 void locks_remove_posix(struct file *filp, fl_owner_t owner)
 {
 	int error;
+	struct inode *inode = locks_inode(filp);
 	struct file_lock lock;
 	struct file_lock_context *ctx;
 
@@ -2463,7 +2473,7 @@
 	 * posix_lock_file().  Another process could be setting a lock on this
 	 * file at the same time, but we wouldn't remove that lock anyway.
 	 */
-	ctx =  smp_load_acquire(&file_inode(filp)->i_flctx);
+	ctx =  smp_load_acquire(&inode->i_flctx);
 	if (!ctx || list_empty(&ctx->flc_posix))
 		return;
 
@@ -2481,7 +2491,7 @@
 
 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
 		lock.fl_ops->fl_release_private(&lock);
-	trace_locks_remove_posix(file_inode(filp), &lock, error);
+	trace_locks_remove_posix(inode, &lock, error);
 }
 
 EXPORT_SYMBOL(locks_remove_posix);
@@ -2498,12 +2508,12 @@
 		.fl_type = F_UNLCK,
 		.fl_end = OFFSET_MAX,
 	};
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 
 	if (list_empty(&flctx->flc_flock))
 		return;
 
-	if (filp->f_op->flock)
+	if (filp->f_op->flock && is_remote_lock(filp))
 		filp->f_op->flock(filp, F_SETLKW, &fl);
 	else
 		flock_lock_inode(inode, &fl);
@@ -2522,11 +2532,14 @@
 	if (list_empty(&ctx->flc_lease))
 		return;
 
+	percpu_down_read_preempt_disable(&file_rwsem);
 	spin_lock(&ctx->flc_lock);
 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
 		if (filp == fl->fl_file)
 			lease_modify(fl, F_UNLCK, &dispose);
 	spin_unlock(&ctx->flc_lock);
+	percpu_up_read_preempt_enable(&file_rwsem);
+
 	locks_dispose_list(&dispose);
 }
 
@@ -2537,7 +2550,7 @@
 {
 	struct file_lock_context *ctx;
 
-	ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
+	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
 	if (!ctx)
 		return;
 
@@ -2581,7 +2594,7 @@
  */
 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
 {
-	if (filp->f_op->lock)
+	if (filp->f_op->lock && is_remote_lock(filp))
 		return filp->f_op->lock(filp, F_CANCELLK, fl);
 	return 0;
 }
@@ -2620,7 +2633,7 @@
 		fl_pid = fl->fl_pid;
 
 	if (fl->fl_file != NULL)
-		inode = file_inode(fl->fl_file);
+		inode = locks_inode(fl->fl_file);
 
 	seq_printf(f, "%lld:%s ", id, pfx);
 	if (IS_POSIX(fl)) {
@@ -2726,7 +2739,7 @@
 void show_fd_locks(struct seq_file *f,
 		  struct file *filp, struct files_struct *files)
 {
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = locks_inode(filp);
 	struct file_lock_context *ctx;
 	int id = 0;
 
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9568064..c87ea52 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -226,7 +226,7 @@
 	ta->state = UNLINK_1;
 	ta->ino = inode->i_ino;
 
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 
 	page = logfs_get_dd_page(dir, dentry);
 	if (!page) {
@@ -540,7 +540,7 @@
 {
 	struct inode *inode = d_inode(old_dentry);
 
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	ihold(inode);
 	inc_nlink(inode);
 	mark_inode_dirty_sync(inode);
@@ -573,7 +573,7 @@
 	 * (crc-protected) journal.
 	 */
 	BUG_ON(beyond_eof(dir, pos));
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos);
 	return logfs_delete(dir, pos, NULL);
 }
@@ -718,8 +718,12 @@
 }
 
 static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry)
+			struct inode *new_dir, struct dentry *new_dentry,
+			unsigned int flags)
 {
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	if (d_really_is_positive(new_dentry))
 		return logfs_rename_target(old_dir, old_dentry,
 					   new_dir, new_dentry);
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index f01ddfb..1db0493 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -211,7 +211,7 @@
 		li->li_flags = flags;
 		inode_unlock(inode);
 
-		inode->i_ctime = CURRENT_TIME;
+		inode->i_ctime = current_time(inode);
 		mark_inode_dirty_sync(inode);
 		return 0;
 
@@ -244,7 +244,7 @@
 	struct inode *inode = d_inode(dentry);
 	int err = 0;
 
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index db9cfc5..f440a15 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -213,8 +213,8 @@
 	i_gid_write(inode, 0);
 	inode->i_size	= 0;
 	inode->i_blocks	= 0;
-	inode->i_ctime	= CURRENT_TIME;
-	inode->i_mtime	= CURRENT_TIME;
+	inode->i_ctime	= current_time(inode);
+	inode->i_mtime	= current_time(inode);
 	li->li_refcount = 1;
 	INIT_LIST_HEAD(&li->li_freeing_list);
 
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3fb8c6d..bf19bf4 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1546,7 +1546,7 @@
 	int err;
 
 	flags |= WF_WRITE | WF_DELETE;
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 	logfs_unpack_index(index, &bix, &level);
 	if (logfs_block(page) && logfs_block(page)->reserved_bytes)
@@ -1578,7 +1578,7 @@
 	long flags = WF_DELETE;
 	int err;
 
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 	if (page->index < I0_BLOCKS)
 		return logfs_write_direct(inode, page, flags);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 742942a..c2c3fd3 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -253,7 +253,7 @@
 	}
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = j;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_blocks = 0;
 	memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
 	insert_inode_hash(inode);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 31dcd51..7edc9b3 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -274,7 +274,7 @@
 		de->inode = inode->i_ino;
 	}
 	err = dir_commit_chunk(page, pos, sbi->s_dirsize);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 out_put:
 	dir_put_page(page);
@@ -306,7 +306,7 @@
 		unlock_page(page);
 	}
 	dir_put_page(page);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	mark_inode_dirty(inode);
 	return err;
 }
@@ -430,7 +430,7 @@
 		unlock_page(page);
 	}
 	dir_put_page(page);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 }
 
diff --git a/fs/minix/file.c b/fs/minix/file.c
index 94f0eb9a..a6a4797 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -26,7 +26,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index a731cab..4c57c9a 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -124,7 +124,7 @@
 
 	/* We are done with atomic stuff, now do the rest of housekeeping */
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 
 	/* had we spliced it onto indirect block? */
 	if (where->bh)
@@ -343,7 +343,7 @@
 		}
 		first_whole++;
 	}
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 }
 
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 2887d1d..1e0f11f 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -106,7 +106,7 @@
 {
 	struct inode *inode = d_inode(old_dentry);
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 	return add_nondir(dentry, inode);
@@ -185,7 +185,8 @@
 }
 
 static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
-			   struct inode * new_dir, struct dentry *new_dentry)
+			struct inode * new_dir, struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct inode * old_inode = d_inode(old_dentry);
 	struct inode * new_inode = d_inode(new_dentry);
@@ -195,6 +196,9 @@
 	struct minix_dir_entry * old_de;
 	int err = -ENOENT;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_de = minix_find_entry(old_dentry, &old_page);
 	if (!old_de)
 		goto out;
@@ -219,7 +223,7 @@
 		if (!new_de)
 			goto out_dir;
 		minix_set_link(new_de, new_page, old_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		inode_dec_link_count(new_inode);
diff --git a/fs/namei.c b/fs/namei.c
index adb0414..5b4eed2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1015,7 +1015,7 @@
 	if (!(nd->flags & LOOKUP_RCU)) {
 		touch_atime(&last->link);
 		cond_resched();
-	} else if (atime_needs_update(&last->link, inode)) {
+	} else if (atime_needs_update_rcu(&last->link, inode)) {
 		if (unlikely(unlazy_walk(nd, NULL, 0)))
 			return ERR_PTR(-ECHILD);
 		touch_atime(&last->link);
@@ -4369,12 +4369,9 @@
 	if (error)
 		return error;
 
-	if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
+	if (!old_dir->i_op->rename)
 		return -EPERM;
 
-	if (flags && !old_dir->i_op->rename2)
-		return -EINVAL;
-
 	/*
 	 * If we are going to change the parent - check write permissions,
 	 * we'll need to flip '..'.
@@ -4428,14 +4425,8 @@
 		if (error)
 			goto out;
 	}
-	if (!old_dir->i_op->rename2) {
-		error = old_dir->i_op->rename(old_dir, old_dentry,
-					      new_dir, new_dentry);
-	} else {
-		WARN_ON(old_dir->i_op->rename != NULL);
-		error = old_dir->i_op->rename2(old_dir, old_dentry,
-					       new_dir, new_dentry, flags);
-	}
+	error = old_dir->i_op->rename(old_dir, old_dentry,
+				       new_dir, new_dentry, flags);
 	if (error)
 		goto out;
 
@@ -4677,6 +4668,31 @@
 }
 EXPORT_SYMBOL(generic_readlink);
 
+/**
+ * vfs_get_link - get symlink body
+ * @dentry: dentry on which to get symbolic link
+ * @done: caller needs to free returned data with this
+ *
+ * Calls security hook and i_op->get_link() on the supplied inode.
+ *
+ * It does not touch atime.  That's up to the caller if necessary.
+ *
+ * Does not work on "special" symlinks like /proc/$$/fd/N
+ */
+const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
+{
+	const char *res = ERR_PTR(-EINVAL);
+	struct inode *inode = d_inode(dentry);
+
+	if (d_is_symlink(dentry)) {
+		res = ERR_PTR(security_inode_readlink(dentry));
+		if (!res)
+			res = inode->i_op->get_link(dentry, inode, done);
+	}
+	return res;
+}
+EXPORT_SYMBOL(vfs_get_link);
+
 /* get the link contents into pagecache */
 const char *page_get_link(struct dentry *dentry, struct inode *inode,
 			  struct delayed_call *callback)
diff --git a/fs/namespace.c b/fs/namespace.c
index db1b5a3..e6c234b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2743,7 +2743,7 @@
 
 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
-		   MS_STRICTATIME);
+		   MS_STRICTATIME | MS_NOREMOTELOCK);
 
 	if (flags & MS_REMOUNT)
 		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
@@ -2824,6 +2824,7 @@
 	return new_ns;
 }
 
+__latent_entropy
 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
 		struct user_namespace *user_ns, struct fs_struct *new_fs)
 {
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 17de5c1..6df2a38 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -36,7 +36,7 @@
 static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
 static int ncp_rmdir(struct inode *, struct dentry *);
 static int ncp_rename(struct inode *, struct dentry *,
-	  	      struct inode *, struct dentry *);
+		      struct inode *, struct dentry *, unsigned int);
 static int ncp_mknod(struct inode * dir, struct dentry *dentry,
 		     umode_t mode, dev_t rdev);
 #if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
@@ -133,12 +133,11 @@
 		return 0;
 
 	if (!ncp_case_sensitive(inode)) {
-		struct super_block *sb = dentry->d_sb;
 		struct nls_table *t;
 		unsigned long hash;
 		int i;
 
-		t = NCP_IO_TABLE(sb);
+		t = NCP_IO_TABLE(dentry->d_sb);
 		hash = init_name_hash(dentry);
 		for (i=0; i<this->len ; i++)
 			hash = partial_name_hash(ncp_tolower(t, this->name[i]),
@@ -1106,13 +1105,17 @@
 }
 
 static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct ncp_server *server = NCP_SERVER(old_dir);
 	int error;
 	int old_len, new_len;
 	__u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
 
+	if (flags)
+		return -EINVAL;
+
 	ncp_dbg(1, "%pd2 to %pd2\n", old_dentry, new_dentry);
 
 	ncp_age_dentry(server, old_dentry);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 1af15fc..f6cf4c7e 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -884,7 +884,7 @@
 	/* ageing the dentry to force validation */
 	ncp_age_dentry(server, dentry);
 
-	result = inode_change_ok(inode, attr);
+	result = setattr_prepare(dentry, attr);
 	if (result < 0)
 		goto out;
 
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
index 5f7b053..6de1570 100644
--- a/fs/nfs/cache_lib.c
+++ b/fs/nfs/cache_lib.c
@@ -76,7 +76,7 @@
 
 	dreq = container_of(d, struct nfs_cache_defer_req, deferred_req);
 
-	complete_all(&dreq->completion);
+	complete(&dreq->completion);
 	nfs_cache_defer_req_put(dreq);
 }
 
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 52a2831..532d8e2 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -31,8 +31,6 @@
 struct nfs_callback_data {
 	unsigned int users;
 	struct svc_serv *serv;
-	struct svc_rqst *rqst;
-	struct task_struct *task;
 };
 
 static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
@@ -89,15 +87,6 @@
 	return 0;
 }
 
-/*
- * Prepare to bring up the NFSv4 callback service
- */
-static struct svc_rqst *
-nfs4_callback_up(struct svc_serv *serv)
-{
-	return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-}
-
 #if defined(CONFIG_NFS_V4_1)
 /*
  * The callback service for NFSv4.1 callbacks
@@ -139,29 +128,6 @@
 	return 0;
 }
 
-/*
- * Bring up the NFSv4.1 callback service
- */
-static struct svc_rqst *
-nfs41_callback_up(struct svc_serv *serv)
-{
-	struct svc_rqst *rqstp;
-
-	INIT_LIST_HEAD(&serv->sv_cb_list);
-	spin_lock_init(&serv->sv_cb_lock);
-	init_waitqueue_head(&serv->sv_cb_waitq);
-	rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-	dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp));
-	return rqstp;
-}
-
-static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv,
-		struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
-{
-	*rqstpp = nfs41_callback_up(serv);
-	*callback_svc = nfs41_callback_svc;
-}
-
 static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
 		struct svc_serv *serv)
 {
@@ -173,13 +139,6 @@
 		xprt->bc_serv = serv;
 }
 #else
-static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv,
-		struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
-{
-	*rqstpp = ERR_PTR(-ENOTSUPP);
-	*callback_svc = ERR_PTR(-ENOTSUPP);
-}
-
 static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
 		struct svc_serv *serv)
 {
@@ -189,45 +148,22 @@
 static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
 				  struct svc_serv *serv)
 {
-	struct svc_rqst *rqstp;
-	int (*callback_svc)(void *vrqstp);
-	struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+	int nrservs = nfs_callback_nr_threads;
 	int ret;
 
 	nfs_callback_bc_serv(minorversion, xprt, serv);
 
-	if (cb_info->task)
+	if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS)
+		nrservs = NFS4_MIN_NR_CALLBACK_THREADS;
+
+	if (serv->sv_nrthreads-1 == nrservs)
 		return 0;
 
-	switch (minorversion) {
-	case 0:
-		/* v4.0 callback setup */
-		rqstp = nfs4_callback_up(serv);
-		callback_svc = nfs4_callback_svc;
-		break;
-	default:
-		nfs_minorversion_callback_svc_setup(serv,
-				&rqstp, &callback_svc);
-	}
-
-	if (IS_ERR(rqstp))
-		return PTR_ERR(rqstp);
-
-	svc_sock_update_bufs(serv);
-
-	cb_info->serv = serv;
-	cb_info->rqst = rqstp;
-	cb_info->task = kthread_create(callback_svc, cb_info->rqst,
-				    "nfsv4.%u-svc", minorversion);
-	if (IS_ERR(cb_info->task)) {
-		ret = PTR_ERR(cb_info->task);
-		svc_exit_thread(cb_info->rqst);
-		cb_info->rqst = NULL;
-		cb_info->task = NULL;
+	ret = serv->sv_ops->svo_setup(serv, NULL, nrservs);
+	if (ret) {
+		serv->sv_ops->svo_setup(serv, NULL, 0);
 		return ret;
 	}
-	rqstp->rq_task = cb_info->task;
-	wake_up_process(cb_info->task);
 	dprintk("nfs_callback_up: service started\n");
 	return 0;
 }
@@ -281,19 +217,41 @@
 	return ret;
 }
 
-static struct svc_serv_ops nfs_cb_sv_ops = {
+static struct svc_serv_ops nfs40_cb_sv_ops = {
+	.svo_function		= nfs4_callback_svc,
 	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+	.svo_setup		= svc_set_num_threads,
+	.svo_module		= THIS_MODULE,
 };
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_serv_ops nfs41_cb_sv_ops = {
+	.svo_function		= nfs41_callback_svc,
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+	.svo_setup		= svc_set_num_threads,
+	.svo_module		= THIS_MODULE,
+};
+
+struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+	[0] = &nfs40_cb_sv_ops,
+	[1] = &nfs41_cb_sv_ops,
+};
+#else
+struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+	[0] = &nfs40_cb_sv_ops,
+	[1] = NULL,
+};
+#endif
 
 static struct svc_serv *nfs_callback_create_svc(int minorversion)
 {
 	struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
 	struct svc_serv *serv;
+	struct svc_serv_ops *sv_ops;
 
 	/*
 	 * Check whether we're already up and running.
 	 */
-	if (cb_info->task) {
+	if (cb_info->serv) {
 		/*
 		 * Note: increase service usage, because later in case of error
 		 * svc_destroy() will be called.
@@ -302,6 +260,17 @@
 		return cb_info->serv;
 	}
 
+	switch (minorversion) {
+	case 0:
+		sv_ops = nfs4_cb_sv_ops[0];
+		break;
+	default:
+		sv_ops = nfs4_cb_sv_ops[1];
+	}
+
+	if (sv_ops == NULL)
+		return ERR_PTR(-ENOTSUPP);
+
 	/*
 	 * Sanity check: if there's no task,
 	 * we should be the first user ...
@@ -310,11 +279,12 @@
 		printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
 			cb_info->users);
 
-	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, &nfs_cb_sv_ops);
+	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
 	if (!serv) {
 		printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
 		return ERR_PTR(-ENOMEM);
 	}
+	cb_info->serv = serv;
 	/* As there is only one thread we need to over-ride the
 	 * default maximum of 80 connections
 	 */
@@ -357,6 +327,8 @@
 	 * thread exits.
 	 */
 err_net:
+	if (!cb_info->users)
+		cb_info->serv = NULL;
 	svc_destroy(serv);
 err_create:
 	mutex_unlock(&nfs_callback_mutex);
@@ -374,18 +346,18 @@
 void nfs_callback_down(int minorversion, struct net *net)
 {
 	struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+	struct svc_serv *serv;
 
 	mutex_lock(&nfs_callback_mutex);
-	nfs_callback_down_net(minorversion, cb_info->serv, net);
+	serv = cb_info->serv;
+	nfs_callback_down_net(minorversion, serv, net);
 	cb_info->users--;
-	if (cb_info->users == 0 && cb_info->task != NULL) {
-		kthread_stop(cb_info->task);
-		dprintk("nfs_callback_down: service stopped\n");
-		svc_exit_thread(cb_info->rqst);
+	if (cb_info->users == 0) {
+		svc_get(serv);
+		serv->sv_ops->svo_setup(serv, NULL, 0);
+		svc_destroy(serv);
 		dprintk("nfs_callback_down: service destroyed\n");
 		cb_info->serv = NULL;
-		cb_info->rqst = NULL;
-		cb_info->task = NULL;
 	}
 	mutex_unlock(&nfs_callback_mutex);
 }
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 5fe1cec..c701c30 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -179,6 +179,15 @@
 	struct cb_devicenotifyargs *args,
 	void *dummy, struct cb_process_state *cps);
 
+struct cb_notify_lock_args {
+	struct nfs_fh			cbnl_fh;
+	struct nfs_lowner		cbnl_owner;
+	bool				cbnl_valid;
+};
+
+extern __be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args,
+					 void *dummy,
+					 struct cb_process_state *cps);
 #endif /* CONFIG_NFS_V4_1 */
 extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
@@ -198,6 +207,9 @@
 #define NFS41_BC_MIN_CALLBACKS 1
 #define NFS41_BC_MAX_CALLBACKS 1
 
+#define NFS4_MIN_NR_CALLBACK_THREADS 1
+
 extern unsigned int nfs_callback_set_tcpport;
+extern unsigned short nfs_callback_nr_threads;
 
 #endif /* __LINUX_FS_NFS_CALLBACK_H */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index f953ef6..e9aa235e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -628,4 +628,20 @@
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
 }
+
+__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
+				 struct cb_process_state *cps)
+{
+	if (!cps->clp) /* set in cb_sequence */
+		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+
+	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+	/* Don't wake anybody if the string looked bogus */
+	if (args->cbnl_valid)
+		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
+
+	return htonl(NFS4_OK);
+}
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 656f68f..eb094c6 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -35,6 +35,7 @@
 					 (1 + 3) * 4) // seqid, 3 slotids
 #define CB_OP_RECALLANY_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 #define CB_OP_RECALLSLOT_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_NOTIFY_LOCK_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 #endif /* CONFIG_NFS_V4_1 */
 
 #define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -72,7 +73,7 @@
 	return xdr_ressize_check(rqstp, p);
 }
 
-static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
+static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes)
 {
 	__be32 *p;
 
@@ -534,6 +535,49 @@
 	return 0;
 }
 
+static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_args *args)
+{
+	__be32		*p;
+	unsigned int	len;
+
+	p = read_buf(xdr, 12);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_BADXDR);
+
+	p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
+	len = be32_to_cpu(*p);
+
+	p = read_buf(xdr, len);
+	if (unlikely(p == NULL))
+		return htonl(NFS4ERR_BADXDR);
+
+	/* Only try to decode if the length is right */
+	if (len == 20) {
+		p += 2;	/* skip "lock id:" */
+		args->cbnl_owner.s_dev = be32_to_cpu(*p++);
+		xdr_decode_hyper(p, &args->cbnl_owner.id);
+		args->cbnl_valid = true;
+	} else {
+		args->cbnl_owner.s_dev = 0;
+		args->cbnl_owner.id = 0;
+		args->cbnl_valid = false;
+	}
+	return 0;
+}
+
+static __be32 decode_notify_lock_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_notify_lock_args *args)
+{
+	__be32 status;
+
+	status = decode_fh(xdr, &args->cbnl_fh);
+	if (unlikely(status != 0))
+		goto out;
+	status = decode_lockowner(xdr, args);
+out:
+	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+	return status;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
@@ -746,6 +790,7 @@
 	case OP_CB_RECALL_SLOT:
 	case OP_CB_LAYOUTRECALL:
 	case OP_CB_NOTIFY_DEVICEID:
+	case OP_CB_NOTIFY_LOCK:
 		*op = &callback_ops[op_nr];
 		break;
 
@@ -753,7 +798,6 @@
 	case OP_CB_PUSH_DELEG:
 	case OP_CB_RECALLABLE_OBJ_AVAIL:
 	case OP_CB_WANTS_CANCELLED:
-	case OP_CB_NOTIFY_LOCK:
 		return htonl(NFS4ERR_NOTSUPP);
 
 	default:
@@ -1006,6 +1050,11 @@
 		.decode_args = (callback_decode_arg_t)decode_recallslot_args,
 		.res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
 	},
+	[OP_CB_NOTIFY_LOCK] = {
+		.process_op = (callback_process_op_t)nfs4_callback_notify_lock,
+		.decode_args = (callback_decode_arg_t)decode_notify_lock_args,
+		.res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
+	},
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 1e10678..7555ba8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -313,7 +313,10 @@
 			continue;
 		/* Match the full socket address */
 		if (!rpc_cmp_addr_port(sap, clap))
-			continue;
+			/* Match all xprt_switch full socket addresses */
+			if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
+							   sap))
+				continue;
 
 		atomic_inc(&clp->cl_count);
 		return clp;
@@ -785,7 +788,8 @@
 	}
 
 	fsinfo.fattr = fattr;
-	fsinfo.layouttype = 0;
+	fsinfo.nlayouttypes = 0;
+	memset(fsinfo.layouttype, 0, sizeof(fsinfo.layouttype));
 	error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
 	if (error < 0)
 		goto out_error;
@@ -1078,7 +1082,7 @@
 	idr_init(&nn->cb_ident_idr);
 #endif
 	spin_lock_init(&nn->nfs_client_lock);
-	nn->boot_time = CURRENT_TIME;
+	nn->boot_time = ktime_get_real();
 }
 
 #ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 322c258..dff600a 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -41,6 +41,17 @@
 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
 }
 
+static bool
+nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
+		fmode_t flags)
+{
+	if (delegation != NULL && (delegation->type & flags) == flags &&
+	    !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
+	    !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+		return true;
+	return false;
+}
+
 static int
 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
 {
@@ -50,8 +61,7 @@
 	flags &= FMODE_READ|FMODE_WRITE;
 	rcu_read_lock();
 	delegation = rcu_dereference(NFS_I(inode)->delegation);
-	if (delegation != NULL && (delegation->type & flags) == flags &&
-	    !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+	if (nfs4_is_valid_delegation(delegation, flags)) {
 		if (mark)
 			nfs_mark_delegation_referenced(delegation);
 		ret = 1;
@@ -185,15 +195,13 @@
 			rcu_read_unlock();
 			put_rpccred(oldcred);
 			trace_nfs4_reclaim_delegation(inode, res->delegation_type);
-		} else {
-			/* We appear to have raced with a delegation return. */
-			spin_unlock(&delegation->lock);
-			rcu_read_unlock();
-			nfs_inode_set_delegation(inode, cred, res);
+			return;
 		}
-	} else {
-		rcu_read_unlock();
+		/* We appear to have raced with a delegation return. */
+		spin_unlock(&delegation->lock);
 	}
+	rcu_read_unlock();
+	nfs_inode_set_delegation(inode, cred, res);
 }
 
 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
@@ -642,28 +650,49 @@
 	rcu_read_unlock();
 }
 
-static void nfs_revoke_delegation(struct inode *inode)
+static void nfs_mark_delegation_revoked(struct nfs_server *server,
+		struct nfs_delegation *delegation)
 {
-	struct nfs_delegation *delegation;
-	rcu_read_lock();
-	delegation = rcu_dereference(NFS_I(inode)->delegation);
-	if (delegation != NULL) {
-		set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
-		nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
-	}
-	rcu_read_unlock();
+	set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+	delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
+	nfs_mark_return_delegation(server, delegation);
 }
 
-void nfs_remove_bad_delegation(struct inode *inode)
+static bool nfs_revoke_delegation(struct inode *inode,
+		const nfs4_stateid *stateid)
+{
+	struct nfs_delegation *delegation;
+	nfs4_stateid tmp;
+	bool ret = false;
+
+	rcu_read_lock();
+	delegation = rcu_dereference(NFS_I(inode)->delegation);
+	if (delegation == NULL)
+		goto out;
+	if (stateid == NULL) {
+		nfs4_stateid_copy(&tmp, &delegation->stateid);
+		stateid = &tmp;
+	} else if (!nfs4_stateid_match(stateid, &delegation->stateid))
+		goto out;
+	nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+	ret = true;
+out:
+	rcu_read_unlock();
+	if (ret)
+		nfs_inode_find_state_and_recover(inode, stateid);
+	return ret;
+}
+
+void nfs_remove_bad_delegation(struct inode *inode,
+		const nfs4_stateid *stateid)
 {
 	struct nfs_delegation *delegation;
 
-	nfs_revoke_delegation(inode);
+	if (!nfs_revoke_delegation(inode, stateid))
+		return;
 	delegation = nfs_inode_detach_delegation(inode);
-	if (delegation) {
-		nfs_inode_find_state_and_recover(inode, &delegation->stateid);
+	if (delegation)
 		nfs_free_delegation(delegation);
-	}
 }
 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
 
@@ -786,8 +815,15 @@
 {
 	struct nfs_delegation *delegation;
 
-	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+		/*
+		 * If the delegation may have been admin revoked, then we
+		 * cannot reclaim it.
+		 */
+		if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags))
+			continue;
 		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	}
 }
 
 /**
@@ -851,6 +887,141 @@
 	rcu_read_unlock();
 }
 
+static inline bool nfs4_server_rebooted(const struct nfs_client *clp)
+{
+	return (clp->cl_state & (BIT(NFS4CLNT_CHECK_LEASE) |
+				BIT(NFS4CLNT_LEASE_EXPIRED) |
+				BIT(NFS4CLNT_SESSION_RESET))) != 0;
+}
+
+static void nfs_mark_test_expired_delegation(struct nfs_server *server,
+	    struct nfs_delegation *delegation)
+{
+	if (delegation->stateid.type == NFS4_INVALID_STATEID_TYPE)
+		return;
+	clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+	set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
+}
+
+static void nfs_inode_mark_test_expired_delegation(struct nfs_server *server,
+		struct inode *inode)
+{
+	struct nfs_delegation *delegation;
+
+	rcu_read_lock();
+	delegation = rcu_dereference(NFS_I(inode)->delegation);
+	if (delegation)
+		nfs_mark_test_expired_delegation(server, delegation);
+	rcu_read_unlock();
+
+}
+
+static void nfs_delegation_mark_test_expired_server(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+		nfs_mark_test_expired_delegation(server, delegation);
+}
+
+/**
+ * nfs_mark_test_expired_all_delegations - mark all delegations for testing
+ * @clp: nfs_client to process
+ *
+ * Iterates through all the delegations associated with this server and
+ * marks them as needing to be checked for validity.
+ */
+void nfs_mark_test_expired_all_delegations(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_delegation_mark_test_expired_server(server);
+	rcu_read_unlock();
+}
+
+/**
+ * nfs_reap_expired_delegations - reap expired delegations
+ * @clp: nfs_client to process
+ *
+ * Iterates through all the delegations associated with this server and
+ * checks if they have may have been revoked. This function is usually
+ * expected to be called in cases where the server may have lost its
+ * lease.
+ */
+void nfs_reap_expired_delegations(struct nfs_client *clp)
+{
+	const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
+	struct nfs_delegation *delegation;
+	struct nfs_server *server;
+	struct inode *inode;
+	struct rpc_cred *cred;
+	nfs4_stateid stateid;
+
+restart:
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (test_bit(NFS_DELEGATION_RETURNING,
+						&delegation->flags))
+				continue;
+			if (test_bit(NFS_DELEGATION_TEST_EXPIRED,
+						&delegation->flags) == 0)
+				continue;
+			if (!nfs_sb_active(server->super))
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL) {
+				rcu_read_unlock();
+				nfs_sb_deactive(server->super);
+				goto restart;
+			}
+			cred = get_rpccred_rcu(delegation->cred);
+			nfs4_stateid_copy(&stateid, &delegation->stateid);
+			clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+			rcu_read_unlock();
+			if (cred != NULL &&
+			    ops->test_and_free_expired(server, &stateid, cred) < 0) {
+				nfs_revoke_delegation(inode, &stateid);
+				nfs_inode_find_state_and_recover(inode, &stateid);
+			}
+			put_rpccred(cred);
+			if (nfs4_server_rebooted(clp)) {
+				nfs_inode_mark_test_expired_delegation(server,inode);
+				iput(inode);
+				nfs_sb_deactive(server->super);
+				return;
+			}
+			iput(inode);
+			nfs_sb_deactive(server->super);
+			goto restart;
+		}
+	}
+	rcu_read_unlock();
+}
+
+void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
+		const nfs4_stateid *stateid)
+{
+	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_delegation *delegation;
+	bool found = false;
+
+	rcu_read_lock();
+	delegation = rcu_dereference(NFS_I(inode)->delegation);
+	if (delegation &&
+	    nfs4_stateid_match_other(&delegation->stateid, stateid)) {
+		nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation);
+		found = true;
+	}
+	rcu_read_unlock();
+	if (found)
+		nfs4_schedule_state_manager(clp);
+}
+
 /**
  * nfs_delegations_present - check for existence of delegations
  * @clp: client state handle
@@ -893,7 +1064,7 @@
 	flags &= FMODE_READ|FMODE_WRITE;
 	rcu_read_lock();
 	delegation = rcu_dereference(nfsi->delegation);
-	ret = (delegation != NULL && (delegation->type & flags) == flags);
+	ret = nfs4_is_valid_delegation(delegation, flags);
 	if (ret) {
 		nfs4_stateid_copy(dst, &delegation->stateid);
 		nfs_mark_delegation_referenced(delegation);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 64724d2..e9d5557 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -32,6 +32,7 @@
 	NFS_DELEGATION_REFERENCED,
 	NFS_DELEGATION_RETURNING,
 	NFS_DELEGATION_REVOKED,
+	NFS_DELEGATION_TEST_EXPIRED,
 };
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
@@ -47,11 +48,14 @@
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
 int nfs_delegations_present(struct nfs_client *clp);
-void nfs_remove_bad_delegation(struct inode *inode);
+void nfs_remove_bad_delegation(struct inode *inode, const nfs4_stateid *stateid);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
 
+void nfs_mark_test_expired_all_delegations(struct nfs_client *clp);
+void nfs_reap_expired_delegations(struct nfs_client *clp);
+
 /* NFSv4 delegation-related procedures */
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
@@ -62,6 +66,8 @@
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
 bool nfs4_delegation_flush_on_close(const struct inode *inode);
+void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
+		const nfs4_stateid *stateid);
 
 #endif
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 177fefb..5f1af4c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -435,11 +435,11 @@
 		return 0;
 
 	nfsi = NFS_I(inode);
-	if (entry->fattr->fileid == nfsi->fileid)
-		return 1;
-	if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
-		return 1;
-	return 0;
+	if (entry->fattr->fileid != nfsi->fileid)
+		return 0;
+	if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0)
+		return 0;
+	return 1;
 }
 
 static
@@ -496,6 +496,14 @@
 		return;
 	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
 		return;
+	if (filename.len == 0)
+		return;
+	/* Validate that the name doesn't contain any illegal '\0' */
+	if (strnlen(filename.name, filename.len) != filename.len)
+		return;
+	/* ...or '/' */
+	if (strnchr(filename.name, filename.len, '/'))
+		return;
 	if (filename.name[0] == '.') {
 		if (filename.len == 1)
 			return;
@@ -517,6 +525,8 @@
 					&entry->fattr->fsid))
 			goto out;
 		if (nfs_same_file(dentry, entry)) {
+			if (!entry->fh->size)
+				goto out;
 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
 			status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
 			if (!status)
@@ -529,6 +539,10 @@
 			goto again;
 		}
 	}
+	if (!entry->fh->size) {
+		d_lookup_done(dentry);
+		goto out;
+	}
 
 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
 	alias = d_splice_alias(inode, dentry);
@@ -2013,7 +2027,8 @@
  * the rename.
  */
 int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+	       struct inode *new_dir, struct dentry *new_dentry,
+	       unsigned int flags)
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
@@ -2021,6 +2036,9 @@
 	struct rpc_task *task;
 	int error = -EBUSY;
 
+	if (flags)
+		return -EINVAL;
+
 	dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
 		 old_dentry, new_dentry,
 		 d_count(new_dentry));
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 72b7d13..bd81bcf 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -387,7 +387,7 @@
 		dreq->iocb->ki_complete(dreq->iocb, res, 0);
 	}
 
-	complete_all(&dreq->completion);
+	complete(&dreq->completion);
 
 	nfs_direct_req_release(dreq);
 }
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2efbdde..9ea85ae 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -520,7 +520,9 @@
 	.invalidatepage = nfs_invalidate_page,
 	.releasepage = nfs_release_page,
 	.direct_IO = nfs_direct_IO,
+#ifdef CONFIG_MIGRATION
 	.migratepage = nfs_migrate_page,
+#endif
 	.launder_page = nfs_launder_page,
 	.is_dirty_writeback = nfs_check_dirty_writeback,
 	.error_remove_page = generic_error_remove_page,
@@ -685,11 +687,6 @@
 	goto out;
 }
 
-static int do_vfs_lock(struct file *file, struct file_lock *fl)
-{
-	return locks_lock_file_wait(file, fl);
-}
-
 static int
 do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
 {
@@ -722,7 +719,7 @@
 	if (!is_local)
 		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
 	else
-		status = do_vfs_lock(filp, fl);
+		status = locks_lock_file_wait(filp, fl);
 	return status;
 }
 
@@ -747,7 +744,7 @@
 	if (!is_local)
 		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
 	else
-		status = do_vfs_lock(filp, fl);
+		status = locks_lock_file_wait(filp, fl);
 	if (status < 0)
 		goto out;
 
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 51b5136..98ace12 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1080,7 +1080,7 @@
 	case -NFS4ERR_BAD_STATEID:
 		if (state == NULL)
 			break;
-		nfs_remove_bad_delegation(state->inode);
+		nfs_remove_bad_delegation(state->inode, NULL);
 	case -NFS4ERR_OPENMODE:
 		if (state == NULL)
 			break;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 4b308a1..80bcc0b 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -359,7 +359,8 @@
 int nfs_symlink(struct inode *, struct dentry *, const char *);
 int nfs_link(struct dentry *, struct inode *, struct dentry *);
 int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+int nfs_rename(struct inode *, struct dentry *,
+	       struct inode *, struct dentry *, unsigned int);
 
 /* file.c */
 int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
@@ -533,12 +534,9 @@
 }
 #endif
 
-
 #ifdef CONFIG_MIGRATION
 extern int nfs_migrate_page(struct address_space *,
 		struct page *, struct page *, enum migrate_mode);
-#else
-#define nfs_migrate_page NULL
 #endif
 
 static inline int
@@ -561,7 +559,6 @@
 extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
 
 /* nfs4proc.c */
-extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
 extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 			    const struct nfs_client_initdata *);
 extern int nfs40_walk_client_list(struct nfs_client *clp,
@@ -570,6 +567,9 @@
 extern int nfs41_walk_client_list(struct nfs_client *clp,
 				struct nfs_client **result,
 				struct rpc_cred *cred);
+extern int nfs4_test_session_trunk(struct rpc_clnt *,
+				struct rpc_xprt *,
+				void *);
 
 static inline struct inode *nfs_igrab_and_active(struct inode *inode)
 {
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index f0e06e4..fbce0d8 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -29,7 +29,7 @@
 	int cb_users[NFS4_MAX_MINOR_VERSION + 1];
 #endif
 	spinlock_t nfs_client_lock;
-	struct timespec boot_time;
+	ktime_t boot_time;
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry *proc_nfsfs;
 #endif
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 698be93..dc925b5 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -899,9 +899,6 @@
 	.setattr	= nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
 	.listxattr	= nfs3_listxattr,
-	.getxattr	= generic_getxattr,
-	.setxattr	= generic_setxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= nfs3_get_acl,
 	.set_acl	= nfs3_set_acl,
 #endif
@@ -913,9 +910,6 @@
 	.setattr	= nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
 	.listxattr	= nfs3_listxattr,
-	.getxattr	= generic_getxattr,
-	.setxattr	= generic_setxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= nfs3_get_acl,
 	.set_acl	= nfs3_set_acl,
 #endif
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 64b43b4..608501971 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -443,6 +443,7 @@
 	task = rpc_run_task(&task_setup);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
+	rpc_put_task(task);
 	return 0;
 }
 
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9bf64ea..9b3a82a 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -39,6 +39,7 @@
 	NFS4CLNT_BIND_CONN_TO_SESSION,
 	NFS4CLNT_MOVED,
 	NFS4CLNT_LEASE_MOVED,
+	NFS4CLNT_DELEGATION_EXPIRED,
 };
 
 #define NFS4_RENEW_TIMEOUT		0x01
@@ -57,8 +58,11 @@
 			struct nfs_fsinfo *);
 	void	(*free_lock_state)(struct nfs_server *,
 			struct nfs4_lock_state *);
+	int	(*test_and_free_expired)(struct nfs_server *,
+			nfs4_stateid *, struct rpc_cred *);
 	struct nfs_seqid *
 		(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
+	int	(*session_trunk)(struct rpc_clnt *, struct rpc_xprt *, void *);
 	const struct rpc_call_ops *call_sync_ops;
 	const struct nfs4_state_recovery_ops *reboot_recovery_ops;
 	const struct nfs4_state_recovery_ops *nograce_recovery_ops;
@@ -156,6 +160,7 @@
 	NFS_STATE_RECLAIM_NOGRACE,	/* OPEN stateid needs to recover state */
 	NFS_STATE_POSIX_LOCKS,		/* Posix locks are supported */
 	NFS_STATE_RECOVERY_FAILED,	/* OPEN stateid state recovery failed */
+	NFS_STATE_MAY_NOTIFY_LOCK,	/* server may CB_NOTIFY_LOCK */
 };
 
 struct nfs4_state {
@@ -203,6 +208,11 @@
 		struct rpc_cred *);
 };
 
+struct nfs4_add_xprt_data {
+	struct nfs_client	*clp;
+	struct rpc_cred		*cred;
+};
+
 struct nfs4_state_maintenance_ops {
 	int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
 	struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
@@ -278,6 +288,8 @@
 		struct nfs_fsinfo *fsinfo);
 extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
 				  bool sync);
+extern int nfs4_detect_session_trunking(struct nfs_client *clp,
+		struct nfs41_exchange_id_res *res, struct rpc_xprt *xprt);
 
 static inline bool
 is_ds_only_client(struct nfs_client *clp)
@@ -439,7 +451,7 @@
 extern int nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern int nfs4_schedule_migration_recovery(const struct nfs_server *);
 extern void nfs4_schedule_lease_moved_recovery(struct nfs_client *);
-extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
+extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags, bool);
 extern void nfs41_handle_server_scope(struct nfs_client *,
 				      struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
@@ -471,6 +483,7 @@
 struct dentry *nfs4_try_mount(int, const char *, struct nfs_mount_info *, struct nfs_subversion *);
 extern bool nfs4_disable_idmapping;
 extern unsigned short max_session_slots;
+extern unsigned short max_session_cb_slots;
 extern unsigned short send_implementation_id;
 extern bool recover_lost_locks;
 
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index cd3b7cf..074ac71 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -199,6 +199,9 @@
 	clp->cl_minorversion = cl_init->minorversion;
 	clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
 	clp->cl_mig_gen = 1;
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+	init_waitqueue_head(&clp->cl_lock_waitq);
+#endif
 	return clp;
 
 error:
@@ -562,15 +565,15 @@
 /*
  * Returns true if the client IDs match
  */
-static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
+static bool nfs4_match_clientids(u64 a, u64 b)
 {
-	if (a->cl_clientid != b->cl_clientid) {
+	if (a != b) {
 		dprintk("NFS: --> %s client ID %llx does not match %llx\n",
-			__func__, a->cl_clientid, b->cl_clientid);
+			__func__, a, b);
 		return false;
 	}
 	dprintk("NFS: --> %s client ID %llx matches %llx\n",
-		__func__, a->cl_clientid, b->cl_clientid);
+		__func__, a, b);
 	return true;
 }
 
@@ -578,17 +581,15 @@
  * Returns true if the server major ids match
  */
 static bool
-nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_serverowner_major_id(struct nfs41_server_owner *o1,
+				struct nfs41_server_owner *o2)
 {
-	struct nfs41_server_owner *o1 = a->cl_serverowner;
-	struct nfs41_server_owner *o2 = b->cl_serverowner;
-
 	if (o1->major_id_sz != o2->major_id_sz)
 		goto out_major_mismatch;
 	if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
 		goto out_major_mismatch;
 
-	dprintk("NFS: --> %s server owners match\n", __func__);
+	dprintk("NFS: --> %s server owner major IDs match\n", __func__);
 	return true;
 
 out_major_mismatch:
@@ -597,6 +598,100 @@
 	return false;
 }
 
+/*
+ * Returns true if server minor ids match
+ */
+static bool
+nfs4_check_serverowner_minor_id(struct nfs41_server_owner *o1,
+				struct nfs41_server_owner *o2)
+{
+	/* Check eir_server_owner so_minor_id */
+	if (o1->minor_id != o2->minor_id)
+		goto out_minor_mismatch;
+
+	dprintk("NFS: --> %s server owner minor IDs match\n", __func__);
+	return true;
+
+out_minor_mismatch:
+	dprintk("NFS: --> %s server owner minor IDs do not match\n", __func__);
+	return false;
+}
+
+/*
+ * Returns true if the server scopes match
+ */
+static bool
+nfs4_check_server_scope(struct nfs41_server_scope *s1,
+			struct nfs41_server_scope *s2)
+{
+	if (s1->server_scope_sz != s2->server_scope_sz)
+		goto out_scope_mismatch;
+	if (memcmp(s1->server_scope, s2->server_scope,
+		   s1->server_scope_sz) != 0)
+		goto out_scope_mismatch;
+
+	dprintk("NFS: --> %s server scopes match\n", __func__);
+	return true;
+
+out_scope_mismatch:
+	dprintk("NFS: --> %s server scopes do not match\n",
+		__func__);
+	return false;
+}
+
+/**
+ * nfs4_detect_session_trunking - Checks for session trunking.
+ *
+ * Called after a successful EXCHANGE_ID on a multi-addr connection.
+ * Upon success, add the transport.
+ *
+ * @clp:    original mount nfs_client
+ * @res:    result structure from an exchange_id using the original mount
+ *          nfs_client with a new multi_addr transport
+ *
+ * Returns zero on success, otherwise -EINVAL
+ *
+ * Note: since the exchange_id for the new multi_addr transport uses the
+ * same nfs_client from the original mount, the cl_owner_id is reused,
+ * so eir_clientowner is the same.
+ */
+int nfs4_detect_session_trunking(struct nfs_client *clp,
+				 struct nfs41_exchange_id_res *res,
+				 struct rpc_xprt *xprt)
+{
+	/* Check eir_clientid */
+	if (!nfs4_match_clientids(clp->cl_clientid, res->clientid))
+		goto out_err;
+
+	/* Check eir_server_owner so_major_id */
+	if (!nfs4_check_serverowner_major_id(clp->cl_serverowner,
+					     res->server_owner))
+		goto out_err;
+
+	/* Check eir_server_owner so_minor_id */
+	if (!nfs4_check_serverowner_minor_id(clp->cl_serverowner,
+					     res->server_owner))
+		goto out_err;
+
+	/* Check eir_server_scope */
+	if (!nfs4_check_server_scope(clp->cl_serverscope, res->server_scope))
+		goto out_err;
+
+	/* Session trunking passed, add the xprt */
+	rpc_clnt_xprt_switch_add_xprt(clp->cl_rpcclient, xprt);
+
+	pr_info("NFS:  %s: Session trunking succeeded for %s\n",
+		clp->cl_hostname,
+		xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+	return 0;
+out_err:
+	pr_info("NFS:  %s: Session trunking failed for %s\n", clp->cl_hostname,
+		xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+	return -EINVAL;
+}
+
 /**
  * nfs41_walk_client_list - Find nfs_client that matches a client/server owner
  *
@@ -650,7 +745,7 @@
 		if (pos->cl_cons_state != NFS_CS_READY)
 			continue;
 
-		if (!nfs4_match_clientids(pos, new))
+		if (!nfs4_match_clientids(pos->cl_clientid, new->cl_clientid))
 			continue;
 
 		/*
@@ -658,7 +753,8 @@
 		 * client id trunking. In either case, we want to fall back
 		 * to using the existing nfs_client.
 		 */
-		if (!nfs4_check_clientid_trunking(pos, new))
+		if (!nfs4_check_serverowner_major_id(pos->cl_serverowner,
+						     new->cl_serverowner))
 			continue;
 
 		/* Unlike NFSv4.0, we know that NFSv4.1 always uses the
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a9dec32..ad917bd7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -99,8 +99,8 @@
 #ifdef CONFIG_NFS_V4_1
 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
 		struct rpc_cred *);
-static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
-		struct rpc_cred *);
+static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
+		struct rpc_cred *, bool);
 #endif
 
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
@@ -328,6 +328,33 @@
 	kunmap_atomic(start);
 }
 
+static void nfs4_test_and_free_stateid(struct nfs_server *server,
+		nfs4_stateid *stateid,
+		struct rpc_cred *cred)
+{
+	const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
+
+	ops->test_and_free_expired(server, stateid, cred);
+}
+
+static void __nfs4_free_revoked_stateid(struct nfs_server *server,
+		nfs4_stateid *stateid,
+		struct rpc_cred *cred)
+{
+	stateid->type = NFS4_REVOKED_STATEID_TYPE;
+	nfs4_test_and_free_stateid(server, stateid, cred);
+}
+
+static void nfs4_free_revoked_stateid(struct nfs_server *server,
+		const nfs4_stateid *stateid,
+		struct rpc_cred *cred)
+{
+	nfs4_stateid tmp;
+
+	nfs4_stateid_copy(&tmp, stateid);
+	__nfs4_free_revoked_stateid(server, &tmp, cred);
+}
+
 static long nfs4_update_delay(long *timeout)
 {
 	long ret;
@@ -370,13 +397,23 @@
 	exception->delay = 0;
 	exception->recovering = 0;
 	exception->retry = 0;
+
+	if (stateid == NULL && state != NULL)
+		stateid = &state->stateid;
+
 	switch(errorcode) {
 		case 0:
 			return 0;
-		case -NFS4ERR_OPENMODE:
 		case -NFS4ERR_DELEG_REVOKED:
 		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_EXPIRED:
 		case -NFS4ERR_BAD_STATEID:
+			if (inode != NULL && stateid != NULL) {
+				nfs_inode_find_state_and_recover(inode,
+						stateid);
+				goto wait_on_recovery;
+			}
+		case -NFS4ERR_OPENMODE:
 			if (inode) {
 				int err;
 
@@ -395,12 +432,6 @@
 			if (ret < 0)
 				break;
 			goto wait_on_recovery;
-		case -NFS4ERR_EXPIRED:
-			if (state != NULL) {
-				ret = nfs4_schedule_stateid_recovery(server, state);
-				if (ret < 0)
-					break;
-			}
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_STALE_CLIENTID:
 			nfs4_schedule_lease_recovery(clp);
@@ -616,6 +647,7 @@
 	}
 	spin_unlock(&tbl->slot_tbl_lock);
 
+	slot->privileged = args->sa_privileged ? 1 : 0;
 	args->sa_slot = slot;
 	res->sr_slot = slot;
 
@@ -723,12 +755,20 @@
 	/* Check the SEQUENCE operation status */
 	switch (res->sr_status) {
 	case 0:
+		/* If previous op on slot was interrupted and we reused
+		 * the seq# and got a reply from the cache, then retry
+		 */
+		if (task->tk_status == -EREMOTEIO && interrupted) {
+			++slot->seq_nr;
+			goto retry_nowait;
+		}
 		/* Update the slot's sequence and clientid lease timer */
 		slot->seq_done = 1;
 		clp = session->clp;
 		do_renew_lease(clp, res->sr_timestamp);
 		/* Check sequence flags */
-		nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+		nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
+				!!slot->privileged);
 		nfs41_update_target_slotid(slot->table, slot, res);
 		break;
 	case 1:
@@ -875,6 +915,7 @@
 	}
 	spin_unlock(&tbl->slot_tbl_lock);
 
+	slot->privileged = args->sa_privileged ? 1 : 0;
 	args->sa_slot = slot;
 
 	dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
@@ -1353,6 +1394,19 @@
 	nfs4_state_set_mode_locked(state, state->state | fmode);
 }
 
+#ifdef CONFIG_NFS_V4_1
+static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
+{
+	if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
+		return true;
+	if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
+		return true;
+	if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
+		return true;
+	return false;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
 {
 	struct nfs_client *clp = state->owner->so_server->nfs_client;
@@ -1369,11 +1423,12 @@
 }
 
 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
-		nfs4_stateid *stateid)
+		const nfs4_stateid *stateid, nfs4_stateid *freeme)
 {
 	if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
 		return true;
 	if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
+		nfs4_stateid_copy(freeme, &state->open_stateid);
 		nfs_test_and_clear_all_open_stateid(state);
 		return true;
 	}
@@ -1437,7 +1492,9 @@
 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
 }
 
-static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
+static void nfs_set_open_stateid_locked(struct nfs4_state *state,
+		const nfs4_stateid *stateid, fmode_t fmode,
+		nfs4_stateid *freeme)
 {
 	switch (fmode) {
 		case FMODE_READ:
@@ -1449,14 +1506,18 @@
 		case FMODE_READ|FMODE_WRITE:
 			set_bit(NFS_O_RDWR_STATE, &state->flags);
 	}
-	if (!nfs_need_update_open_stateid(state, stateid))
+	if (!nfs_need_update_open_stateid(state, stateid, freeme))
 		return;
 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
 		nfs4_stateid_copy(&state->stateid, stateid);
 	nfs4_stateid_copy(&state->open_stateid, stateid);
 }
 
-static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
+static void __update_open_stateid(struct nfs4_state *state,
+		const nfs4_stateid *open_stateid,
+		const nfs4_stateid *deleg_stateid,
+		fmode_t fmode,
+		nfs4_stateid *freeme)
 {
 	/*
 	 * Protect the call to nfs4_state_set_mode_locked and
@@ -1469,16 +1530,22 @@
 		set_bit(NFS_DELEGATED_STATE, &state->flags);
 	}
 	if (open_stateid != NULL)
-		nfs_set_open_stateid_locked(state, open_stateid, fmode);
+		nfs_set_open_stateid_locked(state, open_stateid, fmode, freeme);
 	write_sequnlock(&state->seqlock);
 	update_open_stateflags(state, fmode);
 	spin_unlock(&state->owner->so_lock);
 }
 
-static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
+static int update_open_stateid(struct nfs4_state *state,
+		const nfs4_stateid *open_stateid,
+		const nfs4_stateid *delegation,
+		fmode_t fmode)
 {
+	struct nfs_server *server = NFS_SERVER(state->inode);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_inode *nfsi = NFS_I(state->inode);
 	struct nfs_delegation *deleg_cur;
+	nfs4_stateid freeme = {0};
 	int ret = 0;
 
 	fmode &= (FMODE_READ|FMODE_WRITE);
@@ -1500,7 +1567,8 @@
 		goto no_delegation_unlock;
 
 	nfs_mark_delegation_referenced(deleg_cur);
-	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
+	__update_open_stateid(state, open_stateid, &deleg_cur->stateid,
+			fmode, &freeme);
 	ret = 1;
 no_delegation_unlock:
 	spin_unlock(&deleg_cur->lock);
@@ -1508,11 +1576,14 @@
 	rcu_read_unlock();
 
 	if (!ret && open_stateid != NULL) {
-		__update_open_stateid(state, open_stateid, NULL, fmode);
+		__update_open_stateid(state, open_stateid, NULL, fmode, &freeme);
 		ret = 1;
 	}
 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
-		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
+		nfs4_schedule_state_manager(clp);
+	if (freeme.type != 0)
+		nfs4_test_and_free_stateid(server, &freeme,
+				state->owner->so_cred);
 
 	return ret;
 }
@@ -1889,7 +1960,6 @@
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_STALE_STATEID:
 			set_bit(NFS_DELEGATED_STATE, &state->flags);
-		case -NFS4ERR_EXPIRED:
 			/* Don't recall a delegation if it was lost */
 			nfs4_schedule_lease_recovery(server->nfs_client);
 			return -EAGAIN;
@@ -1901,6 +1971,7 @@
 			return -EAGAIN;
 		case -NFS4ERR_DELEG_REVOKED:
 		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_EXPIRED:
 		case -NFS4ERR_BAD_STATEID:
 		case -NFS4ERR_OPENMODE:
 			nfs_inode_find_state_and_recover(state->inode,
@@ -2382,9 +2453,10 @@
 	return ret;
 }
 
-static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
+		const nfs4_stateid *stateid)
 {
-	nfs_remove_bad_delegation(state->inode);
+	nfs_remove_bad_delegation(state->inode, stateid);
 	write_seqlock(&state->seqlock);
 	nfs4_stateid_copy(&state->stateid, &state->open_stateid);
 	write_sequnlock(&state->seqlock);
@@ -2394,7 +2466,7 @@
 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
 {
 	if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
-		nfs_finish_clear_delegation_stateid(state);
+		nfs_finish_clear_delegation_stateid(state, NULL);
 }
 
 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
@@ -2404,7 +2476,45 @@
 	return nfs4_open_expired(sp, state);
 }
 
+static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
+		nfs4_stateid *stateid,
+		struct rpc_cred *cred)
+{
+	return -NFS4ERR_BAD_STATEID;
+}
+
 #if defined(CONFIG_NFS_V4_1)
+static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
+		nfs4_stateid *stateid,
+		struct rpc_cred *cred)
+{
+	int status;
+
+	switch (stateid->type) {
+	default:
+		break;
+	case NFS4_INVALID_STATEID_TYPE:
+	case NFS4_SPECIAL_STATEID_TYPE:
+		return -NFS4ERR_BAD_STATEID;
+	case NFS4_REVOKED_STATEID_TYPE:
+		goto out_free;
+	}
+
+	status = nfs41_test_stateid(server, stateid, cred);
+	switch (status) {
+	case -NFS4ERR_EXPIRED:
+	case -NFS4ERR_ADMIN_REVOKED:
+	case -NFS4ERR_DELEG_REVOKED:
+		break;
+	default:
+		return status;
+	}
+out_free:
+	/* Ack the revoked state to the server */
+	nfs41_free_stateid(server, stateid, cred, true);
+	return -NFS4ERR_EXPIRED;
+}
+
 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
@@ -2422,23 +2532,68 @@
 	}
 
 	nfs4_stateid_copy(&stateid, &delegation->stateid);
-	cred = get_rpccred(delegation->cred);
-	rcu_read_unlock();
-	status = nfs41_test_stateid(server, &stateid, cred);
-	trace_nfs4_test_delegation_stateid(state, NULL, status);
-
-	if (status != NFS_OK) {
-		/* Free the stateid unless the server explicitly
-		 * informs us the stateid is unrecognized. */
-		if (status != -NFS4ERR_BAD_STATEID)
-			nfs41_free_stateid(server, &stateid, cred);
-		nfs_finish_clear_delegation_stateid(state);
+	if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+		rcu_read_unlock();
+		nfs_finish_clear_delegation_stateid(state, &stateid);
+		return;
 	}
 
+	if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	cred = get_rpccred(delegation->cred);
+	rcu_read_unlock();
+	status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
+	trace_nfs4_test_delegation_stateid(state, NULL, status);
+	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
+		nfs_finish_clear_delegation_stateid(state, &stateid);
+
 	put_rpccred(cred);
 }
 
 /**
+ * nfs41_check_expired_locks - possibly free a lock stateid
+ *
+ * @state: NFSv4 state for an inode
+ *
+ * Returns NFS_OK if recovery for this stateid is now finished.
+ * Otherwise a negative NFS4ERR value is returned.
+ */
+static int nfs41_check_expired_locks(struct nfs4_state *state)
+{
+	int status, ret = NFS_OK;
+	struct nfs4_lock_state *lsp;
+	struct nfs_server *server = NFS_SERVER(state->inode);
+
+	if (!test_bit(LK_STATE_IN_USE, &state->flags))
+		goto out;
+	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
+		if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
+			struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
+
+			status = nfs41_test_and_free_expired_stateid(server,
+					&lsp->ls_stateid,
+					cred);
+			trace_nfs4_test_lock_stateid(state, lsp, status);
+			if (status == -NFS4ERR_EXPIRED ||
+			    status == -NFS4ERR_BAD_STATEID) {
+				clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
+				lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
+				if (!recover_lost_locks)
+					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
+			} else if (status != NFS_OK) {
+				ret = status;
+				break;
+			}
+		}
+	};
+out:
+	return ret;
+}
+
+/**
  * nfs41_check_open_stateid - possibly free an open stateid
  *
  * @state: NFSv4 state for an inode
@@ -2453,26 +2608,28 @@
 	struct rpc_cred *cred = state->owner->so_cred;
 	int status;
 
-	/* If a state reset has been done, test_stateid is unneeded */
-	if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
-	    (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
-	    (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
+	if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
+		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)  {
+			if (nfs4_have_delegation(state->inode, state->state))
+				return NFS_OK;
+			return -NFS4ERR_OPENMODE;
+		}
 		return -NFS4ERR_BAD_STATEID;
-
-	status = nfs41_test_stateid(server, stateid, cred);
+	}
+	status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
 	trace_nfs4_test_open_stateid(state, NULL, status);
-	if (status != NFS_OK) {
-		/* Free the stateid unless the server explicitly
-		 * informs us the stateid is unrecognized. */
-		if (status != -NFS4ERR_BAD_STATEID)
-			nfs41_free_stateid(server, stateid, cred);
-
+	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
 		clear_bit(NFS_OPEN_STATE, &state->flags);
+		stateid->type = NFS4_INVALID_STATEID_TYPE;
 	}
-	return status;
+	if (status != NFS_OK)
+		return status;
+	if (nfs_open_stateid_recover_openmode(state))
+		return -NFS4ERR_OPENMODE;
+	return NFS_OK;
 }
 
 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
@@ -2480,6 +2637,9 @@
 	int status;
 
 	nfs41_check_delegation_stateid(state);
+	status = nfs41_check_expired_locks(state);
+	if (status != NFS_OK)
+		return status;
 	status = nfs41_check_open_stateid(state);
 	if (status != NFS_OK)
 		status = nfs4_open_expired(sp, state);
@@ -2537,6 +2697,8 @@
 		goto out;
 	if (server->caps & NFS_CAP_POSIX_LOCK)
 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+	if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
+		set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
 
 	dentry = opendata->dentry;
 	if (d_really_is_negative(dentry)) {
@@ -2899,9 +3061,12 @@
 			break;
 		case -NFS4ERR_ADMIN_REVOKED:
 		case -NFS4ERR_STALE_STATEID:
+		case -NFS4ERR_EXPIRED:
+			nfs4_free_revoked_stateid(server,
+					&calldata->arg.stateid,
+					task->tk_msg.rpc_cred);
 		case -NFS4ERR_OLD_STATEID:
 		case -NFS4ERR_BAD_STATEID:
-		case -NFS4ERR_EXPIRED:
 			if (!nfs4_stateid_match(&calldata->arg.stateid,
 						&state->open_stateid)) {
 				rpc_restart_call_prepare(task);
@@ -4312,7 +4477,7 @@
 	if (error == 0) {
 		/* block layout checks this! */
 		server->pnfs_blksize = fsinfo->blksize;
-		set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
+		set_pnfs_layoutdriver(server, fhandle, fsinfo);
 	}
 
 	return error;
@@ -4399,24 +4564,25 @@
 	return false;
 }
 
-void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
-{
-	nfs_invalidate_atime(hdr->inode);
-}
-
 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
 	struct nfs_server *server = NFS_SERVER(hdr->inode);
 
 	trace_nfs4_read(hdr, task->tk_status);
-	if (nfs4_async_handle_error(task, server,
-				    hdr->args.context->state,
-				    NULL) == -EAGAIN) {
-		rpc_restart_call_prepare(task);
-		return -EAGAIN;
+	if (task->tk_status < 0) {
+		struct nfs4_exception exception = {
+			.inode = hdr->inode,
+			.state = hdr->args.context->state,
+			.stateid = &hdr->args.stateid,
+		};
+		task->tk_status = nfs4_async_handle_exception(task,
+				server, task->tk_status, &exception);
+		if (exception.retry) {
+			rpc_restart_call_prepare(task);
+			return -EAGAIN;
+		}
 	}
 
-	__nfs4_read_done_cb(hdr);
 	if (task->tk_status > 0)
 		renew_lease(server, hdr->timestamp);
 	return 0;
@@ -4445,6 +4611,8 @@
 		return -EAGAIN;
 	if (nfs4_read_stateid_changed(task, &hdr->args))
 		return -EAGAIN;
+	if (task->tk_status > 0)
+		nfs_invalidate_atime(hdr->inode);
 	return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
 				    nfs4_read_done_cb(task, hdr);
 }
@@ -4482,11 +4650,19 @@
 	struct inode *inode = hdr->inode;
 
 	trace_nfs4_write(hdr, task->tk_status);
-	if (nfs4_async_handle_error(task, NFS_SERVER(inode),
-				    hdr->args.context->state,
-				    NULL) == -EAGAIN) {
-		rpc_restart_call_prepare(task);
-		return -EAGAIN;
+	if (task->tk_status < 0) {
+		struct nfs4_exception exception = {
+			.inode = hdr->inode,
+			.state = hdr->args.context->state,
+			.stateid = &hdr->args.stateid,
+		};
+		task->tk_status = nfs4_async_handle_exception(task,
+				NFS_SERVER(inode), task->tk_status,
+				&exception);
+		if (exception.retry) {
+			rpc_restart_call_prepare(task);
+			return -EAGAIN;
+		}
 	}
 	if (task->tk_status >= 0) {
 		renew_lease(NFS_SERVER(inode), hdr->timestamp);
@@ -5123,12 +5299,14 @@
 	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
 		/* An impossible timestamp guarantees this value
 		 * will never match a generated boot time. */
-		verf[0] = 0;
-		verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
+		verf[0] = cpu_to_be32(U32_MAX);
+		verf[1] = cpu_to_be32(U32_MAX);
 	} else {
 		struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
-		verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
-		verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
+		u64 ns = ktime_to_ns(nn->boot_time);
+
+		verf[0] = cpu_to_be32(ns >> 32);
+		verf[1] = cpu_to_be32(ns);
 	}
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
@@ -5393,10 +5571,13 @@
 		renew_lease(data->res.server, data->timestamp);
 	case -NFS4ERR_ADMIN_REVOKED:
 	case -NFS4ERR_DELEG_REVOKED:
+	case -NFS4ERR_EXPIRED:
+		nfs4_free_revoked_stateid(data->res.server,
+				data->args.stateid,
+				task->tk_msg.rpc_cred);
 	case -NFS4ERR_BAD_STATEID:
 	case -NFS4ERR_OLD_STATEID:
 	case -NFS4ERR_STALE_STATEID:
-	case -NFS4ERR_EXPIRED:
 		task->tk_status = 0;
 		if (data->roc)
 			pnfs_roc_set_barrier(data->inode, data->roc_barrier);
@@ -5528,22 +5709,6 @@
 	return err;
 }
 
-#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
-#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
-
-/* 
- * sleep, with exponential backoff, and retry the LOCK operation. 
- */
-static unsigned long
-nfs4_set_lock_task_retry(unsigned long timeout)
-{
-	freezable_schedule_timeout_killable_unsafe(timeout);
-	timeout <<= 1;
-	if (timeout > NFS4_LOCK_MAXTIMEOUT)
-		return NFS4_LOCK_MAXTIMEOUT;
-	return timeout;
-}
-
 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
 {
 	struct inode *inode = state->inode;
@@ -5600,11 +5765,6 @@
 	return err;
 }
 
-static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
-{
-	return locks_lock_inode_wait(inode, fl);
-}
-
 struct nfs4_unlockdata {
 	struct nfs_locku_args arg;
 	struct nfs_locku_res res;
@@ -5657,14 +5817,18 @@
 	switch (task->tk_status) {
 		case 0:
 			renew_lease(calldata->server, calldata->timestamp);
-			do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
+			locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
 			if (nfs4_update_lock_stateid(calldata->lsp,
 					&calldata->res.stateid))
 				break;
+		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_EXPIRED:
+			nfs4_free_revoked_stateid(calldata->server,
+					&calldata->arg.stateid,
+					task->tk_msg.rpc_cred);
 		case -NFS4ERR_BAD_STATEID:
 		case -NFS4ERR_OLD_STATEID:
 		case -NFS4ERR_STALE_STATEID:
-		case -NFS4ERR_EXPIRED:
 			if (!nfs4_stateid_match(&calldata->arg.stateid,
 						&calldata->lsp->ls_stateid))
 				rpc_restart_call_prepare(task);
@@ -5765,7 +5929,7 @@
 	mutex_lock(&sp->so_delegreturn_mutex);
 	/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
 	down_read(&nfsi->rwsem);
-	if (do_vfs_lock(inode, request) == -ENOENT) {
+	if (locks_lock_inode_wait(inode, request) == -ENOENT) {
 		up_read(&nfsi->rwsem);
 		mutex_unlock(&sp->so_delegreturn_mutex);
 		goto out;
@@ -5906,7 +6070,7 @@
 				data->timestamp);
 		if (data->arg.new_lock) {
 			data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
-			if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
+			if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
 				rpc_restart_call_prepare(task);
 				break;
 			}
@@ -5965,6 +6129,7 @@
 {
 	switch (error) {
 	case -NFS4ERR_ADMIN_REVOKED:
+	case -NFS4ERR_EXPIRED:
 	case -NFS4ERR_BAD_STATEID:
 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
 		if (new_lock_owner != 0 ||
@@ -5973,7 +6138,6 @@
 		break;
 	case -NFS4ERR_STALE_STATEID:
 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
-	case -NFS4ERR_EXPIRED:
 		nfs4_schedule_lease_recovery(server->nfs_client);
 	};
 }
@@ -6083,52 +6247,19 @@
 }
 
 #if defined(CONFIG_NFS_V4_1)
-/**
- * nfs41_check_expired_locks - possibly free a lock stateid
- *
- * @state: NFSv4 state for an inode
- *
- * Returns NFS_OK if recovery for this stateid is now finished.
- * Otherwise a negative NFS4ERR value is returned.
- */
-static int nfs41_check_expired_locks(struct nfs4_state *state)
-{
-	int status, ret = -NFS4ERR_BAD_STATEID;
-	struct nfs4_lock_state *lsp;
-	struct nfs_server *server = NFS_SERVER(state->inode);
-
-	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
-		if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
-			struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
-
-			status = nfs41_test_stateid(server,
-					&lsp->ls_stateid,
-					cred);
-			trace_nfs4_test_lock_stateid(state, lsp, status);
-			if (status != NFS_OK) {
-				/* Free the stateid unless the server
-				 * informs us the stateid is unrecognized. */
-				if (status != -NFS4ERR_BAD_STATEID)
-					nfs41_free_stateid(server,
-							&lsp->ls_stateid,
-							cred);
-				clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
-				ret = status;
-			}
-		}
-	};
-
-	return ret;
-}
-
 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
 {
-	int status = NFS_OK;
+	struct nfs4_lock_state *lsp;
+	int status;
 
-	if (test_bit(LK_STATE_IN_USE, &state->flags))
-		status = nfs41_check_expired_locks(state);
-	if (status != NFS_OK)
-		status = nfs4_lock_expired(state, request);
+	status = nfs4_set_lock_state(state, request);
+	if (status != 0)
+		return status;
+	lsp = request->fl_u.nfs4_fl.owner;
+	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
+	    test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
+		return 0;
+	status = nfs4_lock_expired(state, request);
 	return status;
 }
 #endif
@@ -6138,17 +6269,10 @@
 	struct nfs_inode *nfsi = NFS_I(state->inode);
 	struct nfs4_state_owner *sp = state->owner;
 	unsigned char fl_flags = request->fl_flags;
-	int status = -ENOLCK;
+	int status;
 
-	if ((fl_flags & FL_POSIX) &&
-			!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
-		goto out;
-	/* Is this a delegated open? */
-	status = nfs4_set_lock_state(state, request);
-	if (status != 0)
-		goto out;
 	request->fl_flags |= FL_ACCESS;
-	status = do_vfs_lock(state->inode, request);
+	status = locks_lock_inode_wait(state->inode, request);
 	if (status < 0)
 		goto out;
 	mutex_lock(&sp->so_delegreturn_mutex);
@@ -6157,7 +6281,7 @@
 		/* Yes: cache locks! */
 		/* ...but avoid races with delegation recall... */
 		request->fl_flags = fl_flags & ~FL_SLEEP;
-		status = do_vfs_lock(state->inode, request);
+		status = locks_lock_inode_wait(state->inode, request);
 		up_read(&nfsi->rwsem);
 		mutex_unlock(&sp->so_delegreturn_mutex);
 		goto out;
@@ -6188,12 +6312,124 @@
 	return err;
 }
 
+#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
+#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
+
+static int
+nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
+			struct file_lock *request)
+{
+	int		status = -ERESTARTSYS;
+	unsigned long	timeout = NFS4_LOCK_MINTIMEOUT;
+
+	while(!signalled()) {
+		status = nfs4_proc_setlk(state, cmd, request);
+		if ((status != -EAGAIN) || IS_SETLK(cmd))
+			break;
+		freezable_schedule_timeout_interruptible(timeout);
+		timeout *= 2;
+		timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
+		status = -ERESTARTSYS;
+	}
+	return status;
+}
+
+#ifdef CONFIG_NFS_V4_1
+struct nfs4_lock_waiter {
+	struct task_struct	*task;
+	struct inode		*inode;
+	struct nfs_lowner	*owner;
+	bool			notified;
+};
+
+static int
+nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key)
+{
+	int ret;
+	struct cb_notify_lock_args *cbnl = key;
+	struct nfs4_lock_waiter	*waiter	= wait->private;
+	struct nfs_lowner	*lowner = &cbnl->cbnl_owner,
+				*wowner = waiter->owner;
+
+	/* Only wake if the callback was for the same owner */
+	if (lowner->clientid != wowner->clientid ||
+	    lowner->id != wowner->id		 ||
+	    lowner->s_dev != wowner->s_dev)
+		return 0;
+
+	/* Make sure it's for the right inode */
+	if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
+		return 0;
+
+	waiter->notified = true;
+
+	/* override "private" so we can use default_wake_function */
+	wait->private = waiter->task;
+	ret = autoremove_wake_function(wait, mode, flags, key);
+	wait->private = waiter;
+	return ret;
+}
+
+static int
+nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	int status = -ERESTARTSYS;
+	unsigned long flags;
+	struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+	struct nfs_server *server = NFS_SERVER(state->inode);
+	struct nfs_client *clp = server->nfs_client;
+	wait_queue_head_t *q = &clp->cl_lock_waitq;
+	struct nfs_lowner owner = { .clientid = clp->cl_clientid,
+				    .id = lsp->ls_seqid.owner_id,
+				    .s_dev = server->s_dev };
+	struct nfs4_lock_waiter waiter = { .task  = current,
+					   .inode = state->inode,
+					   .owner = &owner,
+					   .notified = false };
+	wait_queue_t wait;
+
+	/* Don't bother with waitqueue if we don't expect a callback */
+	if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
+		return nfs4_retry_setlk_simple(state, cmd, request);
+
+	init_wait(&wait);
+	wait.private = &waiter;
+	wait.func = nfs4_wake_lock_waiter;
+	add_wait_queue(q, &wait);
+
+	while(!signalled()) {
+		status = nfs4_proc_setlk(state, cmd, request);
+		if ((status != -EAGAIN) || IS_SETLK(cmd))
+			break;
+
+		status = -ERESTARTSYS;
+		spin_lock_irqsave(&q->lock, flags);
+		if (waiter.notified) {
+			spin_unlock_irqrestore(&q->lock, flags);
+			continue;
+		}
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(&q->lock, flags);
+
+		freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
+	}
+
+	finish_wait(q, &wait);
+	return status;
+}
+#else /* !CONFIG_NFS_V4_1 */
+static inline int
+nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+	return nfs4_retry_setlk_simple(state, cmd, request);
+}
+#endif
+
 static int
 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
 {
 	struct nfs_open_context *ctx;
 	struct nfs4_state *state;
-	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
 	int status;
 
 	/* verify open state */
@@ -6220,6 +6456,11 @@
 
 	if (state == NULL)
 		return -ENOLCK;
+
+	if ((request->fl_flags & FL_POSIX) &&
+	    !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+		return -ENOLCK;
+
 	/*
 	 * Don't rely on the VFS having checked the file open mode,
 	 * since it won't do this for flock() locks.
@@ -6234,16 +6475,11 @@
 			return -EBADF;
 	}
 
-	do {
-		status = nfs4_proc_setlk(state, cmd, request);
-		if ((status != -EAGAIN) || IS_SETLK(cmd))
-			break;
-		timeout = nfs4_set_lock_task_retry(timeout);
-		status = -ERESTARTSYS;
-		if (signalled())
-			break;
-	} while(status < 0);
-	return status;
+	status = nfs4_set_lock_state(state, request);
+	if (status != 0)
+		return status;
+
+	return nfs4_retry_setlk(state, cmd, request);
 }
 
 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
@@ -7104,114 +7340,56 @@
 	return 0;
 }
 
-/*
- * _nfs4_proc_exchange_id()
- *
- * Wrapper for EXCHANGE_ID operation.
- */
-static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
-	u32 sp4_how)
+struct nfs41_exchange_id_data {
+	struct nfs41_exchange_id_res res;
+	struct nfs41_exchange_id_args args;
+	struct rpc_xprt *xprt;
+	int rpc_status;
+};
+
+static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
 {
-	nfs4_verifier verifier;
-	struct nfs41_exchange_id_args args = {
-		.verifier = &verifier,
-		.client = clp,
-#ifdef CONFIG_NFS_V4_1_MIGRATION
-		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
-			 EXCHGID4_FLAG_BIND_PRINC_STATEID |
-			 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
-#else
-		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
-			 EXCHGID4_FLAG_BIND_PRINC_STATEID,
-#endif
-	};
-	struct nfs41_exchange_id_res res = {
-		0
-	};
-	int status;
-	struct rpc_message msg = {
-		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
-		.rpc_argp = &args,
-		.rpc_resp = &res,
-		.rpc_cred = cred,
-	};
+	struct nfs41_exchange_id_data *cdata =
+					(struct nfs41_exchange_id_data *)data;
+	struct nfs_client *clp = cdata->args.client;
+	int status = task->tk_status;
 
-	nfs4_init_boot_verifier(clp, &verifier);
-
-	status = nfs4_init_uniform_client_string(clp);
-	if (status)
-		goto out;
-
-	dprintk("NFS call  exchange_id auth=%s, '%s'\n",
-		clp->cl_rpcclient->cl_auth->au_ops->au_name,
-		clp->cl_owner_id);
-
-	res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
-					GFP_NOFS);
-	if (unlikely(res.server_owner == NULL)) {
-		status = -ENOMEM;
-		goto out;
-	}
-
-	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
-					GFP_NOFS);
-	if (unlikely(res.server_scope == NULL)) {
-		status = -ENOMEM;
-		goto out_server_owner;
-	}
-
-	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
-	if (unlikely(res.impl_id == NULL)) {
-		status = -ENOMEM;
-		goto out_server_scope;
-	}
-
-	switch (sp4_how) {
-	case SP4_NONE:
-		args.state_protect.how = SP4_NONE;
-		break;
-
-	case SP4_MACH_CRED:
-		args.state_protect = nfs4_sp4_mach_cred_request;
-		break;
-
-	default:
-		/* unsupported! */
-		WARN_ON_ONCE(1);
-		status = -EINVAL;
-		goto out_impl_id;
-	}
-
-	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 	trace_nfs4_exchange_id(clp, status);
-	if (status == 0)
-		status = nfs4_check_cl_exchange_flags(res.flags);
 
 	if (status == 0)
-		status = nfs4_sp4_select_mode(clp, &res.state_protect);
+		status = nfs4_check_cl_exchange_flags(cdata->res.flags);
+
+	if (cdata->xprt && status == 0) {
+		status = nfs4_detect_session_trunking(clp, &cdata->res,
+						      cdata->xprt);
+		goto out;
+	}
+
+	if (status  == 0)
+		status = nfs4_sp4_select_mode(clp, &cdata->res.state_protect);
 
 	if (status == 0) {
-		clp->cl_clientid = res.clientid;
-		clp->cl_exchange_flags = res.flags;
+		clp->cl_clientid = cdata->res.clientid;
+		clp->cl_exchange_flags = cdata->res.flags;
 		/* Client ID is not confirmed */
-		if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
+		if (!(cdata->res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
 			clear_bit(NFS4_SESSION_ESTABLISHED,
-					&clp->cl_session->session_state);
-			clp->cl_seqid = res.seqid;
+			&clp->cl_session->session_state);
+			clp->cl_seqid = cdata->res.seqid;
 		}
 
 		kfree(clp->cl_serverowner);
-		clp->cl_serverowner = res.server_owner;
-		res.server_owner = NULL;
+		clp->cl_serverowner = cdata->res.server_owner;
+		cdata->res.server_owner = NULL;
 
 		/* use the most recent implementation id */
 		kfree(clp->cl_implid);
-		clp->cl_implid = res.impl_id;
-		res.impl_id = NULL;
+		clp->cl_implid = cdata->res.impl_id;
+		cdata->res.impl_id = NULL;
 
 		if (clp->cl_serverscope != NULL &&
 		    !nfs41_same_server_scope(clp->cl_serverscope,
-					     res.server_scope)) {
+					cdata->res.server_scope)) {
 			dprintk("%s: server_scope mismatch detected\n",
 				__func__);
 			set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
@@ -7220,17 +7398,147 @@
 		}
 
 		if (clp->cl_serverscope == NULL) {
-			clp->cl_serverscope = res.server_scope;
-			res.server_scope = NULL;
+			clp->cl_serverscope = cdata->res.server_scope;
+			cdata->res.server_scope = NULL;
 		}
+		/* Save the EXCHANGE_ID verifier session trunk tests */
+		memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
+		       sizeof(clp->cl_confirm.data));
+	}
+out:
+	cdata->rpc_status = status;
+	return;
+}
+
+static void nfs4_exchange_id_release(void *data)
+{
+	struct nfs41_exchange_id_data *cdata =
+					(struct nfs41_exchange_id_data *)data;
+
+	nfs_put_client(cdata->args.client);
+	if (cdata->xprt) {
+		xprt_put(cdata->xprt);
+		rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
+	}
+	kfree(cdata->res.impl_id);
+	kfree(cdata->res.server_scope);
+	kfree(cdata->res.server_owner);
+	kfree(cdata);
+}
+
+static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
+	.rpc_call_done = nfs4_exchange_id_done,
+	.rpc_release = nfs4_exchange_id_release,
+};
+
+/*
+ * _nfs4_proc_exchange_id()
+ *
+ * Wrapper for EXCHANGE_ID operation.
+ */
+static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+			u32 sp4_how, struct rpc_xprt *xprt)
+{
+	nfs4_verifier verifier;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+		.rpc_cred = cred,
+	};
+	struct rpc_task_setup task_setup_data = {
+		.rpc_client = clp->cl_rpcclient,
+		.callback_ops = &nfs4_exchange_id_call_ops,
+		.rpc_message = &msg,
+		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
+	};
+	struct nfs41_exchange_id_data *calldata;
+	struct rpc_task *task;
+	int status = -EIO;
+
+	if (!atomic_inc_not_zero(&clp->cl_count))
+		goto out;
+
+	status = -ENOMEM;
+	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
+	if (!calldata)
+		goto out;
+
+	if (!xprt)
+		nfs4_init_boot_verifier(clp, &verifier);
+
+	status = nfs4_init_uniform_client_string(clp);
+	if (status)
+		goto out_calldata;
+
+	dprintk("NFS call  exchange_id auth=%s, '%s'\n",
+		clp->cl_rpcclient->cl_auth->au_ops->au_name,
+		clp->cl_owner_id);
+
+	calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+						GFP_NOFS);
+	status = -ENOMEM;
+	if (unlikely(calldata->res.server_owner == NULL))
+		goto out_calldata;
+
+	calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+					GFP_NOFS);
+	if (unlikely(calldata->res.server_scope == NULL))
+		goto out_server_owner;
+
+	calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
+	if (unlikely(calldata->res.impl_id == NULL))
+		goto out_server_scope;
+
+	switch (sp4_how) {
+	case SP4_NONE:
+		calldata->args.state_protect.how = SP4_NONE;
+		break;
+
+	case SP4_MACH_CRED:
+		calldata->args.state_protect = nfs4_sp4_mach_cred_request;
+		break;
+
+	default:
+		/* unsupported! */
+		WARN_ON_ONCE(1);
+		status = -EINVAL;
+		goto out_impl_id;
+	}
+	if (xprt) {
+		calldata->xprt = xprt;
+		task_setup_data.rpc_xprt = xprt;
+		task_setup_data.flags =
+				RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+		calldata->args.verifier = &clp->cl_confirm;
+	} else {
+		calldata->args.verifier = &verifier;
+	}
+	calldata->args.client = clp;
+#ifdef CONFIG_NFS_V4_1_MIGRATION
+	calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
+	EXCHGID4_FLAG_BIND_PRINC_STATEID |
+	EXCHGID4_FLAG_SUPP_MOVED_MIGR,
+#else
+	calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
+	EXCHGID4_FLAG_BIND_PRINC_STATEID,
+#endif
+	msg.rpc_argp = &calldata->args;
+	msg.rpc_resp = &calldata->res;
+	task_setup_data.callback_data = calldata;
+
+	task = rpc_run_task(&task_setup_data);
+	if (IS_ERR(task)) {
+	status = PTR_ERR(task);
+		goto out_impl_id;
 	}
 
-out_impl_id:
-	kfree(res.impl_id);
-out_server_scope:
-	kfree(res.server_scope);
-out_server_owner:
-	kfree(res.server_owner);
+	if (!xprt) {
+		status = rpc_wait_for_completion_task(task);
+		if (!status)
+			status = calldata->rpc_status;
+	} else	/* session trunking test */
+		status = calldata->rpc_status;
+
+	rpc_put_task(task);
 out:
 	if (clp->cl_implid != NULL)
 		dprintk("NFS reply exchange_id: Server Implementation ID: "
@@ -7240,6 +7548,16 @@
 			clp->cl_implid->date.nseconds);
 	dprintk("NFS reply exchange_id: %d\n", status);
 	return status;
+
+out_impl_id:
+	kfree(calldata->res.impl_id);
+out_server_scope:
+	kfree(calldata->res.server_scope);
+out_server_owner:
+	kfree(calldata->res.server_owner);
+out_calldata:
+	kfree(calldata);
+	goto out;
 }
 
 /*
@@ -7262,15 +7580,46 @@
 	/* try SP4_MACH_CRED if krb5i/p	*/
 	if (authflavor == RPC_AUTH_GSS_KRB5I ||
 	    authflavor == RPC_AUTH_GSS_KRB5P) {
-		status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
+		status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED, NULL);
 		if (!status)
 			return 0;
 	}
 
 	/* try SP4_NONE */
-	return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
+	return _nfs4_proc_exchange_id(clp, cred, SP4_NONE, NULL);
 }
 
+/**
+ * nfs4_test_session_trunk
+ *
+ * This is an add_xprt_test() test function called from
+ * rpc_clnt_setup_test_and_add_xprt.
+ *
+ * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
+ * and is dereferrenced in nfs4_exchange_id_release
+ *
+ * Upon success, add the new transport to the rpc_clnt
+ *
+ * @clnt: struct rpc_clnt to get new transport
+ * @xprt: the rpc_xprt to test
+ * @data: call data for _nfs4_proc_exchange_id.
+ */
+int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+			    void *data)
+{
+	struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
+	u32 sp4_how;
+
+	dprintk("--> %s try %s\n", __func__,
+		xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+	sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+
+	/* Test connection for session trunking. Async exchange_id call */
+	return  _nfs4_proc_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+}
+EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+
 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
 		struct rpc_cred *cred)
 {
@@ -7463,7 +7812,7 @@
 	args->bc_attrs.max_resp_sz = max_bc_payload;
 	args->bc_attrs.max_resp_sz_cached = 0;
 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
-	args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
+	args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1);
 
 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
@@ -7510,10 +7859,9 @@
 		return -EINVAL;
 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
 		return -EINVAL;
-	/* These would render the backchannel useless: */
-	if (rcvd->max_ops != sent->max_ops)
+	if (rcvd->max_ops > sent->max_ops)
 		return -EINVAL;
-	if (rcvd->max_reqs != sent->max_reqs)
+	if (rcvd->max_reqs > sent->max_reqs)
 		return -EINVAL;
 out:
 	return 0;
@@ -7982,6 +8330,8 @@
 	case -NFS4ERR_RECALLCONFLICT:
 		status = -ERECALLCONFLICT;
 		break;
+	case -NFS4ERR_DELEG_REVOKED:
+	case -NFS4ERR_ADMIN_REVOKED:
 	case -NFS4ERR_EXPIRED:
 	case -NFS4ERR_BAD_STATEID:
 		exception->timeout = 0;
@@ -7993,6 +8343,7 @@
 					&lgp->args.ctx->state->stateid)) {
 			spin_unlock(&inode->i_lock);
 			exception->state = lgp->args.ctx->state;
+			exception->stateid = &lgp->args.stateid;
 			break;
 		}
 
@@ -8591,6 +8942,24 @@
 	return -res.status;
 }
 
+static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
+		int err, struct nfs4_exception *exception)
+{
+	exception->retry = 0;
+	switch(err) {
+	case -NFS4ERR_DELAY:
+	case -NFS4ERR_RETRY_UNCACHED_REP:
+		nfs4_handle_exception(server, err, exception);
+		break;
+	case -NFS4ERR_BADSESSION:
+	case -NFS4ERR_BADSLOT:
+	case -NFS4ERR_BAD_HIGH_SLOT:
+	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+	case -NFS4ERR_DEADSESSION:
+		nfs4_do_handle_exception(server, err, exception);
+	}
+}
+
 /**
  * nfs41_test_stateid - perform a TEST_STATEID operation
  *
@@ -8610,9 +8979,7 @@
 	int err;
 	do {
 		err = _nfs41_test_stateid(server, stateid, cred);
-		if (err != -NFS4ERR_DELAY)
-			break;
-		nfs4_handle_exception(server, err, &exception);
+		nfs4_handle_delay_or_session_error(server, err, &exception);
 	} while (exception.retry);
 	return err;
 }
@@ -8657,7 +9024,7 @@
 };
 
 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
-		nfs4_stateid *stateid,
+		const nfs4_stateid *stateid,
 		struct rpc_cred *cred,
 		bool privileged)
 {
@@ -8687,7 +9054,7 @@
 
 	msg.rpc_argp = &data->args;
 	msg.rpc_resp = &data->res;
-	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
 	if (privileged)
 		nfs4_set_sequence_privileged(&data->args.seq_args);
 
@@ -8700,38 +9067,31 @@
  * @server: server / transport on which to perform the operation
  * @stateid: state ID to release
  * @cred: credential
+ * @is_recovery: set to true if this call needs to be privileged
  *
- * Returns NFS_OK if the server freed "stateid".  Otherwise a
- * negative NFS4ERR value is returned.
+ * Note: this function is always asynchronous.
  */
 static int nfs41_free_stateid(struct nfs_server *server,
-		nfs4_stateid *stateid,
-		struct rpc_cred *cred)
+		const nfs4_stateid *stateid,
+		struct rpc_cred *cred,
+		bool is_recovery)
 {
 	struct rpc_task *task;
-	int ret;
 
-	task = _nfs41_free_stateid(server, stateid, cred, true);
+	task = _nfs41_free_stateid(server, stateid, cred, is_recovery);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
-	ret = rpc_wait_for_completion_task(task);
-	if (!ret)
-		ret = task->tk_status;
 	rpc_put_task(task);
-	return ret;
+	return 0;
 }
 
 static void
 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
 {
-	struct rpc_task *task;
 	struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
 
-	task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
+	nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
 	nfs4_free_lock_state(server, lsp);
-	if (IS_ERR(task))
-		return;
-	rpc_put_task(task);
 }
 
 static bool nfs41_match_stateid(const nfs4_stateid *s1,
@@ -8835,6 +9195,7 @@
 	.match_stateid = nfs4_match_stateid,
 	.find_root_sec = nfs4_find_root_sec,
 	.free_lock_state = nfs4_release_lockowner,
+	.test_and_free_expired = nfs40_test_and_free_expired_stateid,
 	.alloc_seqid = nfs_alloc_seqid,
 	.call_sync_ops = &nfs40_call_sync_ops,
 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
@@ -8862,7 +9223,9 @@
 	.match_stateid = nfs41_match_stateid,
 	.find_root_sec = nfs41_find_root_sec,
 	.free_lock_state = nfs41_free_lock_state,
+	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
 	.alloc_seqid = nfs_alloc_no_seqid,
+	.session_trunk = nfs4_test_session_trunk,
 	.call_sync_ops = &nfs41_call_sync_ops,
 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
@@ -8891,7 +9254,9 @@
 	.find_root_sec = nfs41_find_root_sec,
 	.free_lock_state = nfs41_free_lock_state,
 	.call_sync_ops = &nfs41_call_sync_ops,
+	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
 	.alloc_seqid = nfs_alloc_no_seqid,
+	.session_trunk = nfs4_test_session_trunk,
 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
 	.state_renewal_ops = &nfs41_state_renewal_ops,
@@ -8941,20 +9306,14 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr	= generic_getxattr,
-	.setxattr	= generic_setxattr,
 	.listxattr	= nfs4_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 static const struct inode_operations nfs4_file_inode_operations = {
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr	= generic_getxattr,
-	.setxattr	= generic_setxattr,
 	.listxattr	= nfs4_listxattr,
-	.removexattr	= generic_removexattr,
 };
 
 const struct nfs_rpc_ops nfs_v4_clientops = {
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index f703b75..dae3855 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -9,6 +9,7 @@
 
 /* maximum number of slots to use */
 #define NFS4_DEF_SLOT_TABLE_SIZE (64U)
+#define NFS4_DEF_CB_SLOT_TABLE_SIZE (1U)
 #define NFS4_MAX_SLOT_TABLE (1024U)
 #define NFS4_NO_SLOT ((u32)-1)
 
@@ -22,6 +23,7 @@
 	u32			slot_nr;
 	u32		 	seq_nr;
 	unsigned int		interrupted : 1,
+				privileged : 1,
 				seq_done : 1;
 };
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cada00a..5f4281e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -991,6 +991,8 @@
 {
 	int ret;
 
+	if (!nfs4_valid_open_stateid(state))
+		return -EIO;
 	if (cred != NULL)
 		*cred = NULL;
 	ret = nfs4_copy_lock_stateid(dst, state, lockowner);
@@ -1303,6 +1305,8 @@
 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
 {
 
+	if (!nfs4_valid_open_stateid(state))
+		return 0;
 	set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
 	/* Don't recover state that expired before the reboot */
 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
@@ -1316,6 +1320,8 @@
 
 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
 {
+	if (!nfs4_valid_open_stateid(state))
+		return 0;
 	set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
 	clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
 	set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
@@ -1327,9 +1333,8 @@
 {
 	struct nfs_client *clp = server->nfs_client;
 
-	if (!nfs4_valid_open_stateid(state))
+	if (!nfs4_state_mark_reclaim_nograce(clp, state))
 		return -EBADF;
-	nfs4_state_mark_reclaim_nograce(clp, state);
 	dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
 			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
@@ -1337,6 +1342,35 @@
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
 
+static struct nfs4_lock_state *
+nfs_state_find_lock_state_by_stateid(struct nfs4_state *state,
+		const nfs4_stateid *stateid)
+{
+	struct nfs4_lock_state *pos;
+
+	list_for_each_entry(pos, &state->lock_states, ls_locks) {
+		if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags))
+			continue;
+		if (nfs4_stateid_match_other(&pos->ls_stateid, stateid))
+			return pos;
+	}
+	return NULL;
+}
+
+static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state,
+		const nfs4_stateid *stateid)
+{
+	bool found = false;
+
+	if (test_bit(LK_STATE_IN_USE, &state->flags)) {
+		spin_lock(&state->state_lock);
+		if (nfs_state_find_lock_state_by_stateid(state, stateid))
+			found = true;
+		spin_unlock(&state->state_lock);
+	}
+	return found;
+}
+
 void nfs_inode_find_state_and_recover(struct inode *inode,
 		const nfs4_stateid *stateid)
 {
@@ -1351,14 +1385,18 @@
 		state = ctx->state;
 		if (state == NULL)
 			continue;
-		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+		if (nfs4_stateid_match_other(&state->stateid, stateid) &&
+		    nfs4_state_mark_reclaim_nograce(clp, state)) {
+			found = true;
 			continue;
-		if (!nfs4_stateid_match(&state->stateid, stateid))
-			continue;
-		nfs4_state_mark_reclaim_nograce(clp, state);
-		found = true;
+		}
+		if (nfs_state_lock_state_matches_stateid(state, stateid) &&
+		    nfs4_state_mark_reclaim_nograce(clp, state))
+			found = true;
 	}
 	spin_unlock(&inode->i_lock);
+
+	nfs_inode_find_delegation_state_and_recover(inode, stateid);
 	if (found)
 		nfs4_schedule_state_manager(clp);
 }
@@ -1498,6 +1536,9 @@
 					__func__, status);
 			case -ENOENT:
 			case -ENOMEM:
+			case -EACCES:
+			case -EROFS:
+			case -EIO:
 			case -ESTALE:
 				/* Open state on this file cannot be recovered */
 				nfs4_state_mark_recovery_failed(state, status);
@@ -1656,15 +1697,9 @@
 	put_rpccred(cred);
 }
 
-static void nfs_delegation_clear_all(struct nfs_client *clp)
-{
-	nfs_delegation_mark_reclaim(clp);
-	nfs_delegation_reap_unclaimed(clp);
-}
-
 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
 {
-	nfs_delegation_clear_all(clp);
+	nfs_mark_test_expired_all_delegations(clp);
 	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
 }
 
@@ -2195,7 +2230,7 @@
 
 static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
 {
-	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
+	nfs4_state_start_reclaim_nograce(clp);
 	nfs4_schedule_state_manager(clp);
 
 	dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
@@ -2227,13 +2262,22 @@
 		nfs4_schedule_state_manager(clp);
 }
 
-void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
+void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags,
+		bool recovery)
 {
 	if (!flags)
 		return;
 
 	dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
 		__func__, clp->cl_hostname, clp->cl_clientid, flags);
+	/*
+	 * If we're called from the state manager thread, then assume we're
+	 * already handling the RECLAIM_NEEDED and/or STATE_REVOKED.
+	 * Those flags are expected to remain set until we're done
+	 * recovering (see RFC5661, section 18.46.3).
+	 */
+	if (recovery)
+		goto out_recovery;
 
 	if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
 		nfs41_handle_server_reboot(clp);
@@ -2246,6 +2290,7 @@
 		nfs4_schedule_lease_moved_recovery(clp);
 	if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
 		nfs41_handle_recallable_state_revoked(clp);
+out_recovery:
 	if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
 		nfs41_handle_backchannel_fault(clp);
 	else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
@@ -2410,6 +2455,13 @@
 			nfs4_state_end_reclaim_reboot(clp);
 		}
 
+		/* Detect expired delegations... */
+		if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
+			section = "detect expired delegations";
+			nfs_reap_expired_delegations(clp);
+			continue;
+		}
+
 		/* Now recover expired state... */
 		if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
 			section = "reclaim nograce";
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 7bd3a5c..fc89e5e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1850,7 +1850,7 @@
 	*p++ = cpu_to_be32(RPC_AUTH_UNIX);			/* auth_sys */
 
 	/* authsys_parms rfc1831 */
-	*p++ = cpu_to_be32(nn->boot_time.tv_nsec);	/* stamp */
+	*p++ = cpu_to_be32(ktime_to_ns(nn->boot_time));	/* stamp */
 	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
 	*p++ = cpu_to_be32(0);				/* UID */
 	*p++ = cpu_to_be32(0);				/* GID */
@@ -4725,34 +4725,37 @@
 }
 
 /*
- * Decode potentially multiple layout types. Currently we only support
- * one layout driver per file system.
+ * Decode potentially multiple layout types.
  */
-static int decode_first_pnfs_layout_type(struct xdr_stream *xdr,
-					 uint32_t *layouttype)
+static int decode_pnfs_layout_types(struct xdr_stream *xdr,
+				    struct nfs_fsinfo *fsinfo)
 {
 	__be32 *p;
-	int num;
+	uint32_t i;
 
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
-	num = be32_to_cpup(p);
+	fsinfo->nlayouttypes = be32_to_cpup(p);
 
 	/* pNFS is not supported by the underlying file system */
-	if (num == 0) {
-		*layouttype = 0;
+	if (fsinfo->nlayouttypes == 0)
 		return 0;
-	}
-	if (num > 1)
-		printk(KERN_INFO "NFS: %s: Warning: Multiple pNFS layout "
-			"drivers per filesystem not supported\n", __func__);
 
 	/* Decode and set first layout type, move xdr->p past unused types */
-	p = xdr_inline_decode(xdr, num * 4);
+	p = xdr_inline_decode(xdr, fsinfo->nlayouttypes * 4);
 	if (unlikely(!p))
 		goto out_overflow;
-	*layouttype = be32_to_cpup(p);
+
+	/* If we get too many, then just cap it at the max */
+	if (fsinfo->nlayouttypes > NFS_MAX_LAYOUT_TYPES) {
+		printk(KERN_INFO "NFS: %s: Warning: Too many (%u) pNFS layout types\n",
+			__func__, fsinfo->nlayouttypes);
+		fsinfo->nlayouttypes = NFS_MAX_LAYOUT_TYPES;
+	}
+
+	for(i = 0; i < fsinfo->nlayouttypes; ++i)
+		fsinfo->layouttype[i] = be32_to_cpup(p++);
 	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
@@ -4764,7 +4767,7 @@
  * Note we must ensure that layouttype is set in any non-error case.
  */
 static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
-				uint32_t *layouttype)
+				struct nfs_fsinfo *fsinfo)
 {
 	int status = 0;
 
@@ -4772,10 +4775,9 @@
 	if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U)))
 		return -EIO;
 	if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) {
-		status = decode_first_pnfs_layout_type(xdr, layouttype);
+		status = decode_pnfs_layout_types(xdr, fsinfo);
 		bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES;
-	} else
-		*layouttype = 0;
+	}
 	return status;
 }
 
@@ -4856,7 +4858,7 @@
 	status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta);
 	if (status != 0)
 		goto xdr_error;
-	status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
+	status = decode_attr_pnfstype(xdr, bitmap, fsinfo);
 	if (status != 0)
 		goto xdr_error;
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2c93a85..56b2d96 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -30,6 +30,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/nfs_page.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 #include "internal.h"
 #include "pnfs.h"
 #include "iostat.h"
@@ -99,35 +100,79 @@
 }
 
 /*
+ * When the server sends a list of layout types, we choose one in the order
+ * given in the list below.
+ *
+ * FIXME: should this list be configurable in some fashion? module param?
+ * 	  mount option? something else?
+ */
+static const u32 ld_prefs[] = {
+	LAYOUT_SCSI,
+	LAYOUT_BLOCK_VOLUME,
+	LAYOUT_OSD2_OBJECTS,
+	LAYOUT_FLEX_FILES,
+	LAYOUT_NFSV4_1_FILES,
+	0
+};
+
+static int
+ld_cmp(const void *e1, const void *e2)
+{
+	u32 ld1 = *((u32 *)e1);
+	u32 ld2 = *((u32 *)e2);
+	int i;
+
+	for (i = 0; ld_prefs[i] != 0; i++) {
+		if (ld1 == ld_prefs[i])
+			return -1;
+
+		if (ld2 == ld_prefs[i])
+			return 1;
+	}
+	return 0;
+}
+
+/*
  * Try to set the server's pnfs module to the pnfs layout type specified by id.
  * Currently only one pNFS layout driver per filesystem is supported.
  *
- * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
+ * @ids array of layout types supported by MDS.
  */
 void
 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
-		      u32 id)
+		      struct nfs_fsinfo *fsinfo)
 {
 	struct pnfs_layoutdriver_type *ld_type = NULL;
+	u32 id;
+	int i;
 
-	if (id == 0)
-		goto out_no_driver;
 	if (!(server->nfs_client->cl_exchange_flags &
 		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
-		printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
-			__func__, id, server->nfs_client->cl_exchange_flags);
+		printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
+			__func__, server->nfs_client->cl_exchange_flags);
 		goto out_no_driver;
 	}
-	ld_type = find_pnfs_driver(id);
-	if (!ld_type) {
-		request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
+
+	sort(fsinfo->layouttype, fsinfo->nlayouttypes,
+		sizeof(*fsinfo->layouttype), ld_cmp, NULL);
+
+	for (i = 0; i < fsinfo->nlayouttypes; i++) {
+		id = fsinfo->layouttype[i];
 		ld_type = find_pnfs_driver(id);
 		if (!ld_type) {
-			dprintk("%s: No pNFS module found for %u.\n",
-				__func__, id);
-			goto out_no_driver;
+			request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
+					id);
+			ld_type = find_pnfs_driver(id);
 		}
+		if (ld_type)
+			break;
 	}
+
+	if (!ld_type) {
+		dprintk("%s: No pNFS module found!\n", __func__);
+		goto out_no_driver;
+	}
+
 	server->pnfs_curr_ld = ld_type;
 	if (ld_type->set_layoutdriver
 	    && ld_type->set_layoutdriver(server, mntfh)) {
@@ -2185,10 +2230,8 @@
  */
 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
 {
-	if (likely(!hdr->pnfs_error)) {
-		__nfs4_read_done_cb(hdr);
+	if (likely(!hdr->pnfs_error))
 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
-	}
 	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
 	if (unlikely(hdr->pnfs_error))
 		pnfs_ld_handle_read_error(hdr);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 31d99b2..5c29551 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -236,7 +236,7 @@
 void pnfs_put_lseg(struct pnfs_layout_segment *lseg);
 void pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg);
 
-void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
+void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
 int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
@@ -657,7 +657,8 @@
 }
 
 static inline void set_pnfs_layoutdriver(struct nfs_server *s,
-					 const struct nfs_fh *mntfh, u32 id)
+					 const struct nfs_fh *mntfh,
+					 struct nfs_fsinfo *fsinfo)
 {
 }
 
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index f3468b5..53b4705 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -690,13 +690,50 @@
 		dprintk("%s: DS %s: trying address %s\n",
 			__func__, ds->ds_remotestr, da->da_remotestr);
 
-		clp = nfs4_set_ds_client(mds_srv,
-					(struct sockaddr *)&da->da_addr,
-					da->da_addrlen, IPPROTO_TCP,
-					timeo, retrans, minor_version,
-					au_flavor);
-		if (!IS_ERR(clp))
-			break;
+		if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
+			struct xprt_create xprt_args = {
+				.ident = XPRT_TRANSPORT_TCP,
+				.net = clp->cl_net,
+				.dstaddr = (struct sockaddr *)&da->da_addr,
+				.addrlen = da->da_addrlen,
+				.servername = clp->cl_hostname,
+			};
+			struct nfs4_add_xprt_data xprtdata = {
+				.clp = clp,
+				.cred = nfs4_get_clid_cred(clp),
+			};
+			struct rpc_add_xprt_test rpcdata = {
+				.add_xprt_test = clp->cl_mvops->session_trunk,
+				.data = &xprtdata,
+			};
+
+			/**
+			* Test this address for session trunking and
+			* add as an alias
+			*/
+			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
+					  rpc_clnt_setup_test_and_add_xprt,
+					  &rpcdata);
+			if (xprtdata.cred)
+				put_rpccred(xprtdata.cred);
+		} else {
+			clp = nfs4_set_ds_client(mds_srv,
+						(struct sockaddr *)&da->da_addr,
+						da->da_addrlen, IPPROTO_TCP,
+						timeo, retrans, minor_version,
+						au_flavor);
+			if (IS_ERR(clp))
+				continue;
+
+			status = nfs4_init_ds_session(clp,
+					mds_srv->nfs_client->cl_lease_time);
+			if (status) {
+				nfs_put_client(clp);
+				clp = ERR_PTR(-EIO);
+				continue;
+			}
+
+		}
 	}
 
 	if (IS_ERR(clp)) {
@@ -704,18 +741,11 @@
 		goto out;
 	}
 
-	status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
-	if (status)
-		goto out_put;
-
 	smp_wmb();
 	ds->ds_clp = clp;
 	dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
 out:
 	return status;
-out_put:
-	nfs_put_client(clp);
-	goto out;
 }
 
 /*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index d396013..001796b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2848,19 +2848,23 @@
  * NFS client for backwards compatibility
  */
 unsigned int nfs_callback_set_tcpport;
+unsigned short nfs_callback_nr_threads;
 /* Default cache timeout is 10 minutes */
 unsigned int nfs_idmap_cache_timeout = 600;
 /* Turn off NFSv4 uid/gid mapping when using AUTH_SYS */
 bool nfs4_disable_idmapping = true;
 unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
+unsigned short max_session_cb_slots = NFS4_DEF_CB_SLOT_TABLE_SIZE;
 unsigned short send_implementation_id = 1;
 char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
 bool recover_lost_locks = false;
 
+EXPORT_SYMBOL_GPL(nfs_callback_nr_threads);
 EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
 EXPORT_SYMBOL_GPL(nfs_idmap_cache_timeout);
 EXPORT_SYMBOL_GPL(nfs4_disable_idmapping);
 EXPORT_SYMBOL_GPL(max_session_slots);
+EXPORT_SYMBOL_GPL(max_session_cb_slots);
 EXPORT_SYMBOL_GPL(send_implementation_id);
 EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier);
 EXPORT_SYMBOL_GPL(recover_lost_locks);
@@ -2887,6 +2891,9 @@
 #define param_check_portnr(name, p) __param_check(name, p, unsigned int);
 
 module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
+module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);
+MODULE_PARM_DESC(callback_nr_threads, "Number of threads that will be "
+		"assigned to the NFSv4 callback channels.");
 module_param(nfs_idmap_cache_timeout, int, 0644);
 module_param(nfs4_disable_idmapping, bool, 0644);
 module_param_string(nfs4_unique_id, nfs4_client_id_uniquifier,
@@ -2896,6 +2903,9 @@
 module_param(max_session_slots, ushort, 0644);
 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
 		"requests the client will negotiate");
+module_param(max_session_cb_slots, ushort, 0644);
+MODULE_PARM_DESC(max_session_slots, "Maximum number of parallel NFSv4.1 "
+		"callbacks the client will process for a given server");
 module_param(send_implementation_id, ushort, 0644);
 MODULE_PARM_DESC(send_implementation_id,
 		"Send implementation ID with NFSv4.1 exchange_id");
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 5a17084..0780ff8 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -123,7 +123,7 @@
 
 	if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
 	    timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
-		lcp->lc_mtime = current_fs_time(inode->i_sb);
+		lcp->lc_mtime = current_time(inode);
 	iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
 	iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
 
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
index df880e9..b672873 100644
--- a/fs/nfsd/flexfilelayout.c
+++ b/fs/nfsd/flexfilelayout.c
@@ -126,6 +126,7 @@
 const struct nfsd4_layout_ops ff_layout_ops = {
 	.notify_types		=
 			NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+	.disable_recalls	= true,
 	.proc_getdeviceinfo	= nfsd4_ff_proc_getdeviceinfo,
 	.encode_getdeviceinfo	= nfsd4_ff_encode_getdeviceinfo,
 	.proc_layoutget		= nfsd4_ff_proc_layoutget,
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 5fbf3bb..b10d557 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -84,6 +84,7 @@
 	struct list_head client_lru;
 	struct list_head close_lru;
 	struct list_head del_recall_lru;
+	struct list_head blocked_locks_lru;
 
 	struct delayed_work laundromat_work;
 
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 04c68d9..211dc2a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -448,7 +448,7 @@
 {
 	int status;
 
-	if (cb->cb_minorversion == 0)
+	if (cb->cb_clp->cl_minorversion == 0)
 		return 0;
 
 	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
@@ -485,7 +485,7 @@
 	const struct nfs4_delegation *dp = cb_to_delegation(cb);
 	struct nfs4_cb_compound_hdr hdr = {
 		.ident = cb->cb_clp->cl_cb_ident,
-		.minorversion = cb->cb_minorversion,
+		.minorversion = cb->cb_clp->cl_minorversion,
 	};
 
 	encode_cb_compound4args(xdr, &hdr);
@@ -594,7 +594,7 @@
 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
 	struct nfs4_cb_compound_hdr hdr = {
 		.ident = 0,
-		.minorversion = cb->cb_minorversion,
+		.minorversion = cb->cb_clp->cl_minorversion,
 	};
 
 	encode_cb_compound4args(xdr, &hdr);
@@ -623,6 +623,62 @@
 }
 #endif /* CONFIG_NFSD_PNFS */
 
+static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
+{
+	__be32	*p;
+
+	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
+	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
+	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
+}
+
+static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					const struct nfsd4_callback *cb)
+{
+	const struct nfsd4_blocked_lock *nbl =
+		container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
+	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
+	struct nfs4_cb_compound_hdr hdr = {
+		.ident = 0,
+		.minorversion = cb->cb_clp->cl_minorversion,
+	};
+
+	__be32 *p;
+
+	BUG_ON(hdr.minorversion == 0);
+
+	encode_cb_compound4args(xdr, &hdr);
+	encode_cb_sequence4args(xdr, cb, &hdr);
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
+	encode_nfs_fh4(xdr, &nbl->nbl_fh);
+	encode_stateowner(xdr, &lo->lo_owner);
+	hdr.nops++;
+
+	encode_cb_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					struct nfsd4_callback *cb)
+{
+	struct nfs4_cb_compound_hdr hdr;
+	int status;
+
+	status = decode_cb_compound4res(xdr, &hdr);
+	if (unlikely(status))
+		return status;
+
+	if (cb) {
+		status = decode_cb_sequence4res(xdr, cb);
+		if (unlikely(status || cb->cb_seq_status))
+			return status;
+	}
+	return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
+}
+
 /*
  * RPC procedure tables
  */
@@ -643,6 +699,7 @@
 #ifdef CONFIG_NFSD_PNFS
 	PROC(CB_LAYOUT,	COMPOUND,	cb_layout,	cb_layout),
 #endif
+	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
 };
 
 static struct rpc_version nfs_cb_version4 = {
@@ -862,7 +919,6 @@
 	struct nfs4_client *clp = cb->cb_clp;
 	u32 minorversion = clp->cl_minorversion;
 
-	cb->cb_minorversion = minorversion;
 	/*
 	 * cb_seq_status is only set in decode_cb_sequence4res,
 	 * and so will remain 1 if an rpc level failure occurs.
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 2be9602..42aace4 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -174,7 +174,8 @@
 	list_del_init(&ls->ls_perfile);
 	spin_unlock(&fp->fi_lock);
 
-	vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
+	if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
+		vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
 	fput(ls->ls_file);
 
 	if (ls->ls_recalled)
@@ -189,6 +190,9 @@
 	struct file_lock *fl;
 	int status;
 
+	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
+		return 0;
+
 	fl = locks_alloc_lock();
 	if (!fl)
 		return -ENOMEM;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 1fb2227..abb09b5 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1010,47 +1010,97 @@
 }
 
 static __be32
-nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
-		struct nfsd4_clone *clone)
+nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+		  stateid_t *src_stateid, struct file **src,
+		  stateid_t *dst_stateid, struct file **dst)
 {
-	struct file *src, *dst;
 	__be32 status;
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
-					    &clone->cl_src_stateid, RD_STATE,
-					    &src, NULL);
+					    src_stateid, RD_STATE, src, NULL);
 	if (status) {
 		dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
 		goto out;
 	}
 
 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
-					    &clone->cl_dst_stateid, WR_STATE,
-					    &dst, NULL);
+					    dst_stateid, WR_STATE, dst, NULL);
 	if (status) {
 		dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
 		goto out_put_src;
 	}
 
 	/* fix up for NFS-specific error code */
-	if (!S_ISREG(file_inode(src)->i_mode) ||
-	    !S_ISREG(file_inode(dst)->i_mode)) {
+	if (!S_ISREG(file_inode(*src)->i_mode) ||
+	    !S_ISREG(file_inode(*dst)->i_mode)) {
 		status = nfserr_wrong_type;
 		goto out_put_dst;
 	}
 
+out:
+	return status;
+out_put_dst:
+	fput(*dst);
+out_put_src:
+	fput(*src);
+	goto out;
+}
+
+static __be32
+nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+		struct nfsd4_clone *clone)
+{
+	struct file *src, *dst;
+	__be32 status;
+
+	status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
+				   &clone->cl_dst_stateid, &dst);
+	if (status)
+		goto out;
+
 	status = nfsd4_clone_file_range(src, clone->cl_src_pos,
 			dst, clone->cl_dst_pos, clone->cl_count);
 
-out_put_dst:
 	fput(dst);
-out_put_src:
 	fput(src);
 out:
 	return status;
 }
 
 static __be32
+nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+		struct nfsd4_copy *copy)
+{
+	struct file *src, *dst;
+	__be32 status;
+	ssize_t bytes;
+
+	status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, &src,
+				   &copy->cp_dst_stateid, &dst);
+	if (status)
+		goto out;
+
+	bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
+			dst, copy->cp_dst_pos, copy->cp_count);
+
+	if (bytes < 0)
+		status = nfserrno(bytes);
+	else {
+		copy->cp_res.wr_bytes_written = bytes;
+		copy->cp_res.wr_stable_how = NFS_UNSTABLE;
+		copy->cp_consecutive = 1;
+		copy->cp_synchronous = 1;
+		gen_boot_verifier(&copy->cp_res.wr_verifier, SVC_NET(rqstp));
+		status = nfs_ok;
+	}
+
+	fput(src);
+	fput(dst);
+out:
+	return status;
+}
+
+static __be32
 nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		struct nfsd4_fallocate *fallocate, int flags)
 {
@@ -1966,6 +2016,18 @@
 		op_encode_channel_attrs_maxsz) * sizeof(__be32);
 }
 
+static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+{
+	return (op_encode_hdr_size +
+		1 /* wr_callback */ +
+		op_encode_stateid_maxsz /* wr_callback */ +
+		2 /* wr_count */ +
+		1 /* wr_committed */ +
+		op_encode_verifier_maxsz +
+		1 /* cr_consecutive */ +
+		1 /* cr_synchronous */) * sizeof(__be32);
+}
+
 #ifdef CONFIG_NFSD_PNFS
 /*
  * At this stage we don't really know what layout driver will handle the request,
@@ -2328,6 +2390,12 @@
 		.op_name = "OP_CLONE",
 		.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
 	},
+	[OP_COPY] = {
+		.op_func = (nfsd4op_func)nfsd4_copy,
+		.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+		.op_name = "OP_COPY",
+		.op_rsize_bop = (nfsd4op_rsize)nfsd4_copy_rsize,
+	},
 	[OP_SEEK] = {
 		.op_func = (nfsd4op_func)nfsd4_seek,
 		.op_name = "OP_SEEK",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 39bfaba..9752beb 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -99,6 +99,7 @@
 static void free_session(struct nfsd4_session *);
 
 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
+static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
 
 static bool is_session_dead(struct nfsd4_session *ses)
 {
@@ -210,6 +211,85 @@
 	spin_unlock(&nn->client_lock);
 }
 
+static struct nfsd4_blocked_lock *
+find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
+			struct nfsd_net *nn)
+{
+	struct nfsd4_blocked_lock *cur, *found = NULL;
+
+	spin_lock(&nn->client_lock);
+	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
+		if (fh_match(fh, &cur->nbl_fh)) {
+			list_del_init(&cur->nbl_list);
+			list_del_init(&cur->nbl_lru);
+			found = cur;
+			break;
+		}
+	}
+	spin_unlock(&nn->client_lock);
+	if (found)
+		posix_unblock_lock(&found->nbl_lock);
+	return found;
+}
+
+static struct nfsd4_blocked_lock *
+find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
+			struct nfsd_net *nn)
+{
+	struct nfsd4_blocked_lock *nbl;
+
+	nbl = find_blocked_lock(lo, fh, nn);
+	if (!nbl) {
+		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
+		if (nbl) {
+			fh_copy_shallow(&nbl->nbl_fh, fh);
+			locks_init_lock(&nbl->nbl_lock);
+			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
+					&nfsd4_cb_notify_lock_ops,
+					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
+		}
+	}
+	return nbl;
+}
+
+static void
+free_blocked_lock(struct nfsd4_blocked_lock *nbl)
+{
+	locks_release_private(&nbl->nbl_lock);
+	kfree(nbl);
+}
+
+static int
+nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+	/*
+	 * Since this is just an optimization, we don't try very hard if it
+	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
+	 * just quit trying on anything else.
+	 */
+	switch (task->tk_status) {
+	case -NFS4ERR_DELAY:
+		rpc_delay(task, 1 * HZ);
+		return 0;
+	default:
+		return 1;
+	}
+}
+
+static void
+nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
+{
+	struct nfsd4_blocked_lock	*nbl = container_of(cb,
+						struct nfsd4_blocked_lock, nbl_cb);
+
+	free_blocked_lock(nbl);
+}
+
+static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
+	.done		= nfsd4_cb_notify_lock_done,
+	.release	= nfsd4_cb_notify_lock_release,
+};
+
 static inline struct nfs4_stateowner *
 nfs4_get_stateowner(struct nfs4_stateowner *sop)
 {
@@ -3224,9 +3304,10 @@
 		goto out;
 	/* cases below refer to rfc 3530 section 14.2.34: */
 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
-		if (conf && !unconf) /* case 2: probable retransmit */
+		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
+			/* case 2: probable retransmit */
 			status = nfs_ok;
-		else /* case 4: client hasn't noticed we rebooted yet? */
+		} else /* case 4: client hasn't noticed we rebooted yet? */
 			status = nfserr_stale_clientid;
 		goto out;
 	}
@@ -4410,9 +4491,11 @@
 	* To finish the open response, we just need to set the rflags.
 	*/
 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
-	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
-	    !nfsd4_has_session(&resp->cstate))
+	if (nfsd4_has_session(&resp->cstate))
+		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
+	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
+
 	if (dp)
 		nfs4_put_stid(&dp->dl_stid);
 	if (stp)
@@ -4501,6 +4584,7 @@
 	struct nfs4_openowner *oo;
 	struct nfs4_delegation *dp;
 	struct nfs4_ol_stateid *stp;
+	struct nfsd4_blocked_lock *nbl;
 	struct list_head *pos, *next, reaplist;
 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
 	time_t t, new_timeo = nn->nfsd4_lease;
@@ -4569,6 +4653,41 @@
 	}
 	spin_unlock(&nn->client_lock);
 
+	/*
+	 * It's possible for a client to try and acquire an already held lock
+	 * that is being held for a long time, and then lose interest in it.
+	 * So, we clean out any un-revisited request after a lease period
+	 * under the assumption that the client is no longer interested.
+	 *
+	 * RFC5661, sec. 9.6 states that the client must not rely on getting
+	 * notifications and must continue to poll for locks, even when the
+	 * server supports them. Thus this shouldn't lead to clients blocking
+	 * indefinitely once the lock does become free.
+	 */
+	BUG_ON(!list_empty(&reaplist));
+	spin_lock(&nn->client_lock);
+	while (!list_empty(&nn->blocked_locks_lru)) {
+		nbl = list_first_entry(&nn->blocked_locks_lru,
+					struct nfsd4_blocked_lock, nbl_lru);
+		if (time_after((unsigned long)nbl->nbl_time,
+			       (unsigned long)cutoff)) {
+			t = nbl->nbl_time - cutoff;
+			new_timeo = min(new_timeo, t);
+			break;
+		}
+		list_move(&nbl->nbl_lru, &reaplist);
+		list_del_init(&nbl->nbl_list);
+	}
+	spin_unlock(&nn->client_lock);
+
+	while (!list_empty(&reaplist)) {
+		nbl = list_first_entry(&nn->blocked_locks_lru,
+					struct nfsd4_blocked_lock, nbl_lru);
+		list_del_init(&nbl->nbl_lru);
+		posix_unblock_lock(&nbl->nbl_lock);
+		free_blocked_lock(nbl);
+	}
+
 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
 	return new_timeo;
 }
@@ -5309,7 +5428,31 @@
 		nfs4_put_stateowner(&lo->lo_owner);
 }
 
+static void
+nfsd4_lm_notify(struct file_lock *fl)
+{
+	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
+	struct net			*net = lo->lo_owner.so_client->net;
+	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
+	struct nfsd4_blocked_lock	*nbl = container_of(fl,
+						struct nfsd4_blocked_lock, nbl_lock);
+	bool queue = false;
+
+	/* An empty list means that something else is going to be using it */
+	spin_lock(&nn->client_lock);
+	if (!list_empty(&nbl->nbl_list)) {
+		list_del_init(&nbl->nbl_list);
+		list_del_init(&nbl->nbl_lru);
+		queue = true;
+	}
+	spin_unlock(&nn->client_lock);
+
+	if (queue)
+		nfsd4_run_cb(&nbl->nbl_cb);
+}
+
 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
+	.lm_notify = nfsd4_lm_notify,
 	.lm_get_owner = nfsd4_fl_get_owner,
 	.lm_put_owner = nfsd4_fl_put_owner,
 };
@@ -5407,6 +5550,7 @@
 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
 	if (!lo)
 		return NULL;
+	INIT_LIST_HEAD(&lo->lo_blocked);
 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
 	lo->lo_owner.so_is_open_owner = 0;
 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
@@ -5588,12 +5732,15 @@
 	struct nfs4_ol_stateid *open_stp = NULL;
 	struct nfs4_file *fp;
 	struct file *filp = NULL;
+	struct nfsd4_blocked_lock *nbl = NULL;
 	struct file_lock *file_lock = NULL;
 	struct file_lock *conflock = NULL;
 	__be32 status = 0;
 	int lkflg;
 	int err;
 	bool new = false;
+	unsigned char fl_type;
+	unsigned int fl_flags = FL_POSIX;
 	struct net *net = SVC_NET(rqstp);
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
@@ -5658,46 +5805,55 @@
 	if (!locks_in_grace(net) && lock->lk_reclaim)
 		goto out;
 
-	file_lock = locks_alloc_lock();
-	if (!file_lock) {
-		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
-		status = nfserr_jukebox;
-		goto out;
-	}
-
 	fp = lock_stp->st_stid.sc_file;
 	switch (lock->lk_type) {
-		case NFS4_READ_LT:
 		case NFS4_READW_LT:
+			if (nfsd4_has_session(cstate))
+				fl_flags |= FL_SLEEP;
+			/* Fallthrough */
+		case NFS4_READ_LT:
 			spin_lock(&fp->fi_lock);
 			filp = find_readable_file_locked(fp);
 			if (filp)
 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
 			spin_unlock(&fp->fi_lock);
-			file_lock->fl_type = F_RDLCK;
+			fl_type = F_RDLCK;
 			break;
-		case NFS4_WRITE_LT:
 		case NFS4_WRITEW_LT:
+			if (nfsd4_has_session(cstate))
+				fl_flags |= FL_SLEEP;
+			/* Fallthrough */
+		case NFS4_WRITE_LT:
 			spin_lock(&fp->fi_lock);
 			filp = find_writeable_file_locked(fp);
 			if (filp)
 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
 			spin_unlock(&fp->fi_lock);
-			file_lock->fl_type = F_WRLCK;
+			fl_type = F_WRLCK;
 			break;
 		default:
 			status = nfserr_inval;
 		goto out;
 	}
+
 	if (!filp) {
 		status = nfserr_openmode;
 		goto out;
 	}
 
+	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
+	if (!nbl) {
+		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
+		status = nfserr_jukebox;
+		goto out;
+	}
+
+	file_lock = &nbl->nbl_lock;
+	file_lock->fl_type = fl_type;
 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
 	file_lock->fl_pid = current->tgid;
 	file_lock->fl_file = filp;
-	file_lock->fl_flags = FL_POSIX;
+	file_lock->fl_flags = fl_flags;
 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
 	file_lock->fl_start = lock->lk_offset;
 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
@@ -5710,18 +5866,29 @@
 		goto out;
 	}
 
+	if (fl_flags & FL_SLEEP) {
+		nbl->nbl_time = jiffies;
+		spin_lock(&nn->client_lock);
+		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
+		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
+		spin_unlock(&nn->client_lock);
+	}
+
 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
-	switch (-err) {
+	switch (err) {
 	case 0: /* success! */
 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
 		status = 0;
 		break;
-	case (EAGAIN):		/* conflock holds conflicting lock */
+	case FILE_LOCK_DEFERRED:
+		nbl = NULL;
+		/* Fallthrough */
+	case -EAGAIN:		/* conflock holds conflicting lock */
 		status = nfserr_denied;
 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
 		break;
-	case (EDEADLK):
+	case -EDEADLK:
 		status = nfserr_deadlock;
 		break;
 	default:
@@ -5730,6 +5897,16 @@
 		break;
 	}
 out:
+	if (nbl) {
+		/* dequeue it if we queued it before */
+		if (fl_flags & FL_SLEEP) {
+			spin_lock(&nn->client_lock);
+			list_del_init(&nbl->nbl_list);
+			list_del_init(&nbl->nbl_lru);
+			spin_unlock(&nn->client_lock);
+		}
+		free_blocked_lock(nbl);
+	}
 	if (filp)
 		fput(filp);
 	if (lock_stp) {
@@ -5753,8 +5930,6 @@
 	if (open_stp)
 		nfs4_put_stid(&open_stp->st_stid);
 	nfsd4_bump_seqid(cstate, status);
-	if (file_lock)
-		locks_free_lock(file_lock);
 	if (conflock)
 		locks_free_lock(conflock);
 	return status;
@@ -6768,6 +6943,7 @@
 	INIT_LIST_HEAD(&nn->client_lru);
 	INIT_LIST_HEAD(&nn->close_lru);
 	INIT_LIST_HEAD(&nn->del_recall_lru);
+	INIT_LIST_HEAD(&nn->blocked_locks_lru);
 	spin_lock_init(&nn->client_lock);
 
 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
@@ -6865,6 +7041,7 @@
 	struct nfs4_delegation *dp = NULL;
 	struct list_head *pos, *next, reaplist;
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+	struct nfsd4_blocked_lock *nbl;
 
 	cancel_delayed_work_sync(&nn->laundromat_work);
 	locks_end_grace(&nn->nfsd4_manager);
@@ -6885,6 +7062,24 @@
 		nfs4_put_stid(&dp->dl_stid);
 	}
 
+	BUG_ON(!list_empty(&reaplist));
+	spin_lock(&nn->client_lock);
+	while (!list_empty(&nn->blocked_locks_lru)) {
+		nbl = list_first_entry(&nn->blocked_locks_lru,
+					struct nfsd4_blocked_lock, nbl_lru);
+		list_move(&nbl->nbl_lru, &reaplist);
+		list_del_init(&nbl->nbl_list);
+	}
+	spin_unlock(&nn->client_lock);
+
+	while (!list_empty(&reaplist)) {
+		nbl = list_first_entry(&nn->blocked_locks_lru,
+					struct nfsd4_blocked_lock, nbl_lru);
+		list_del_init(&nbl->nbl_lru);
+		posix_unblock_lock(&nbl->nbl_lock);
+		free_blocked_lock(nbl);
+	}
+
 	nfsd4_client_tracking_exit(net);
 	nfs4_state_destroy_net(net);
 }
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0aa0236..c2d2895 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1694,6 +1694,30 @@
 }
 
 static __be32
+nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
+{
+	DECODE_HEAD;
+	unsigned int tmp;
+
+	status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid);
+	if (status)
+		return status;
+	status = nfsd4_decode_stateid(argp, &copy->cp_dst_stateid);
+	if (status)
+		return status;
+
+	READ_BUF(8 + 8 + 8 + 4 + 4 + 4);
+	p = xdr_decode_hyper(p, &copy->cp_src_pos);
+	p = xdr_decode_hyper(p, &copy->cp_dst_pos);
+	p = xdr_decode_hyper(p, &copy->cp_count);
+	copy->cp_consecutive = be32_to_cpup(p++);
+	copy->cp_synchronous = be32_to_cpup(p++);
+	tmp = be32_to_cpup(p); /* Source server list not supported */
+
+	DECODE_TAIL;
+}
+
+static __be32
 nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
 {
 	DECODE_HEAD;
@@ -1793,7 +1817,7 @@
 
 	/* new operations for NFSv4.2 */
 	[OP_ALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
-	[OP_COPY]		= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_COPY]		= (nfsd4_dec)nfsd4_decode_copy,
 	[OP_COPY_NOTIFY]	= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_DEALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
 	[OP_IO_ADVISE]		= (nfsd4_dec)nfsd4_decode_notsupp,
@@ -4062,7 +4086,7 @@
 	u32 starting_len = xdr->buf->len, needed_len;
 	__be32 *p;
 
-	dprintk("%s: err %d\n", __func__, nfserr);
+	dprintk("%s: err %d\n", __func__, be32_to_cpu(nfserr));
 	if (nfserr)
 		goto out;
 
@@ -4202,6 +4226,41 @@
 #endif /* CONFIG_NFSD_PNFS */
 
 static __be32
+nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
+	if (!p)
+		return nfserr_resource;
+
+	*p++ = cpu_to_be32(0);
+	p = xdr_encode_hyper(p, write->wr_bytes_written);
+	*p++ = cpu_to_be32(write->wr_stable_how);
+	p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
+				    NFS4_VERIFIER_SIZE);
+	return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+		  struct nfsd4_copy *copy)
+{
+	__be32 *p;
+
+	if (!nfserr) {
+		nfserr = nfsd42_encode_write_res(resp, &copy->cp_res);
+		if (nfserr)
+			return nfserr;
+
+		p = xdr_reserve_space(&resp->xdr, 4 + 4);
+		*p++ = cpu_to_be32(copy->cp_consecutive);
+		*p++ = cpu_to_be32(copy->cp_synchronous);
+	}
+	return nfserr;
+}
+
+static __be32
 nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
 		  struct nfsd4_seek *seek)
 {
@@ -4300,7 +4359,7 @@
 
 	/* NFSv4.2 operations */
 	[OP_ALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_COPY]		= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_COPY]		= (nfsd4_enc)nfsd4_encode_copy,
 	[OP_COPY_NOTIFY]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_DEALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_IO_ADVISE]		= (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 65ad016..36b2af9 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1216,6 +1216,8 @@
 		goto out_idmap_error;
 	nn->nfsd4_lease = 90;	/* default lease time */
 	nn->nfsd4_grace = 90;
+	nn->clverifier_counter = prandom_u32();
+	nn->clientid_counter = prandom_u32();
 	return 0;
 
 out_idmap_error:
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index e921476..010aff5 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -74,10 +74,10 @@
 	 * which only requires access, and "set-[ac]time-to-X" which
 	 * requires ownership.
 	 * So if it looks like it might be "set both to the same time which
-	 * is close to now", and if inode_change_ok fails, then we
+	 * is close to now", and if setattr_prepare fails, then we
 	 * convert to "set to now" instead of "set to explicit time"
 	 *
-	 * We only call inode_change_ok as the last test as technically
+	 * We only call setattr_prepare as the last test as technically
 	 * it is not an interface that we should be using.
 	 */
 #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
@@ -92,17 +92,15 @@
 		 * request is.  We require it be within 30 minutes of now.
 		 */
 		time_t delta = iap->ia_atime.tv_sec - get_seconds();
-		struct inode *inode;
 
 		nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
 		if (nfserr)
 			goto done;
-		inode = d_inode(fhp->fh_dentry);
 
 		if (delta < 0)
 			delta = -delta;
 		if (delta < MAX_TOUCH_TIME_ERROR &&
-		    inode_change_ok(inode, iap) != 0) {
+		    setattr_prepare(fhp->fh_dentry, iap) != 0) {
 			/*
 			 * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
 			 * This will cause notify_change to set these times
@@ -791,6 +789,7 @@
 		{ nfserr_toosmall, -ETOOSMALL },
 		{ nfserr_serverfault, -ESERVERFAULT },
 		{ nfserr_serverfault, -ENFILE },
+		{ nfserr_io, -EUCLEAN },
 	};
 	int	i;
 
@@ -798,7 +797,7 @@
 		if (nfs_errtbl[i].syserr == errno)
 			return nfs_errtbl[i].nfserr;
 	}
-	WARN(1, "nfsd: non-standard errno: %d\n", errno);
+	WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
 	return nfserr_io;
 }
 
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 45007ac..a2b65fc 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -366,14 +366,21 @@
 };
 #endif
 
+/* Only used under nfsd_mutex, so this atomic may be overkill: */
+static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+
 static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 {
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-	unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
+	/* check if the notifier still has clients */
+	if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
+		unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
 #if IS_ENABLED(CONFIG_IPV6)
-	unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
+		unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
+	}
+
 	/*
 	 * write_ports can create the server without actually starting
 	 * any threads--if we get shut down before any threads are
@@ -488,10 +495,13 @@
 	}
 
 	set_max_drc();
-	register_inetaddr_notifier(&nfsd_inetaddr_notifier);
+	/* check if the notifier is already set */
+	if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
+		register_inetaddr_notifier(&nfsd_inetaddr_notifier);
 #if IS_ENABLED(CONFIG_IPV6)
-	register_inet6addr_notifier(&nfsd_inet6addr_notifier);
+		register_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
+	}
 	do_gettimeofday(&nn->nfssvc_boot);		/* record boot time */
 	return 0;
 }
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 0c2a716..d27a5aa 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -19,6 +19,7 @@
 
 struct nfsd4_layout_ops {
 	u32		notify_types;
+	bool		disable_recalls;
 
 	__be32 (*proc_getdeviceinfo)(struct super_block *sb,
 			struct svc_rqst *rqstp,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index b95adf9..c939936 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -63,7 +63,6 @@
 
 struct nfsd4_callback {
 	struct nfs4_client *cb_clp;
-	u32 cb_minorversion;
 	struct rpc_message cb_msg;
 	const struct nfsd4_callback_ops *cb_ops;
 	struct work_struct cb_work;
@@ -441,11 +440,11 @@
 /*
  * Represents a generic "lockowner". Similar to an openowner. References to it
  * are held by the lock stateids that are created on its behalf. This object is
- * a superset of the nfs4_stateowner struct (or would be if it needed any extra
- * fields).
+ * a superset of the nfs4_stateowner struct.
  */
 struct nfs4_lockowner {
-	struct nfs4_stateowner	lo_owner; /* must be first element */
+	struct nfs4_stateowner	lo_owner;	/* must be first element */
+	struct list_head	lo_blocked;	/* blocked file_locks */
 };
 
 static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so)
@@ -572,6 +571,7 @@
 	NFSPROC4_CLNT_CB_RECALL,
 	NFSPROC4_CLNT_CB_LAYOUT,
 	NFSPROC4_CLNT_CB_SEQUENCE,
+	NFSPROC4_CLNT_CB_NOTIFY_LOCK,
 };
 
 /* Returns true iff a is later than b: */
@@ -580,6 +580,20 @@
 	return (s32)(a->si_generation - b->si_generation) > 0;
 }
 
+/*
+ * When a client tries to get a lock on a file, we set one of these objects
+ * on the blocking lock. When the lock becomes free, we can then issue a
+ * CB_NOTIFY_LOCK to the server.
+ */
+struct nfsd4_blocked_lock {
+	struct list_head	nbl_list;
+	struct list_head	nbl_lru;
+	unsigned long		nbl_time;
+	struct file_lock	nbl_lock;
+	struct knfsd_fh		nbl_fh;
+	struct nfsd4_callback	nbl_cb;
+};
+
 struct nfsd4_compound_state;
 struct nfsd_net;
 
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index ff476e6..8ca642f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -513,6 +513,22 @@
 			count));
 }
 
+ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
+			     u64 dst_pos, u64 count)
+{
+
+	/*
+	 * Limit copy to 4MB to prevent indefinitely blocking an nfsd
+	 * thread and client rpc slot.  The choice of 4MB is somewhat
+	 * arbitrary.  We might instead base this on r/wsize, or make it
+	 * tunable, or use a time instead of a byte limit, or implement
+	 * asynchronous copy.  In theory a client could also recognize a
+	 * limit like this and pipeline multiple COPY requests.
+	 */
+	count = min_t(u64, count, 1 << 22);
+	return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
+}
+
 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
 			   struct file *file, loff_t offset, loff_t len,
 			   int flags)
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 3cbb1b3..0bf9e7b 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -96,6 +96,8 @@
 				struct svc_fh *res);
 __be32		nfsd_link(struct svc_rqst *, struct svc_fh *,
 				char *, int, struct svc_fh *);
+ssize_t		nfsd_copy_file_range(struct file *, u64,
+				     struct file *, u64, u64);
 __be32		nfsd_rename(struct svc_rqst *,
 				struct svc_fh *, char *, int,
 				struct svc_fh *, char *, int);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index beea0c5..8fda4ab 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -503,6 +503,28 @@
 	u64		cl_count;
 };
 
+struct nfsd42_write_res {
+	u64			wr_bytes_written;
+	u32			wr_stable_how;
+	nfs4_verifier		wr_verifier;
+};
+
+struct nfsd4_copy {
+	/* request */
+	stateid_t	cp_src_stateid;
+	stateid_t	cp_dst_stateid;
+	u64		cp_src_pos;
+	u64		cp_dst_pos;
+	u64		cp_count;
+
+	/* both */
+	bool		cp_consecutive;
+	bool		cp_synchronous;
+
+	/* response */
+	struct nfsd42_write_res	cp_res;
+};
+
 struct nfsd4_seek {
 	/* request */
 	stateid_t	seek_stateid;
@@ -568,6 +590,7 @@
 		struct nfsd4_fallocate		allocate;
 		struct nfsd4_fallocate		deallocate;
 		struct nfsd4_clone		clone;
+		struct nfsd4_copy		copy;
 		struct nfsd4_seek		seek;
 	} u;
 	struct nfs4_replay *			replay;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index c47f6fd..49b719d 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -28,3 +28,12 @@
 #define NFS4_dec_cb_layout_sz		(cb_compound_dec_hdr_sz  +      \
 					cb_sequence_dec_sz +            \
 					op_dec_sz)
+
+#define NFS4_enc_cb_notify_lock_sz	(cb_compound_enc_hdr_sz +        \
+					cb_sequence_enc_sz +             \
+					2 + 1 +				 \
+					XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+					enc_nfs4_fh_sz)
+#define NFS4_dec_cb_notify_lock_sz	(cb_compound_dec_hdr_sz  +      \
+					cb_sequence_dec_sz +            \
+					op_dec_sz)
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 908ebbf..582831a 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -438,7 +438,7 @@
 	nilfs_set_de_type(de, inode);
 	nilfs_commit_chunk(page, mapping, from, to);
 	nilfs_put_page(page);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 }
 
 /*
@@ -528,7 +528,7 @@
 	de->inode = cpu_to_le64(inode->i_ino);
 	nilfs_set_de_type(de, inode);
 	nilfs_commit_chunk(page, page->mapping, from, to);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	nilfs_mark_inode_dirty(dir);
 	/* OFFSET_CACHE */
 out_put:
@@ -576,7 +576,7 @@
 		pde->rec_len = nilfs_rec_len_to_disk(to - from);
 	dir->inode = 0;
 	nilfs_commit_chunk(page, mapping, from, to);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 out:
 	nilfs_put_page(page);
 	return err;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index af04f55..c7f4fef 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -367,7 +367,7 @@
 	atomic64_inc(&root->inodes_count);
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = ino;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 
 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 		err = nilfs_bmap_read(ii->i_bmap, NULL);
@@ -749,7 +749,7 @@
 
 	nilfs_truncate_bmap(ii, blkoff);
 
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 
@@ -829,7 +829,7 @@
 	struct super_block *sb = inode->i_sb;
 	int err;
 
-	err = inode_change_ok(inode, iattr);
+	err = setattr_prepare(dentry, iattr);
 	if (err)
 		return err;
 
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f1d7989..1d2c3d7 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -174,7 +174,7 @@
 		(flags & FS_FL_USER_MODIFIABLE);
 
 	nilfs_set_inode_flags(inode);
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index dbcf1dc..2b71c60 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -194,7 +194,7 @@
 	if (err)
 		return err;
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 
@@ -350,7 +350,8 @@
 }
 
 static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir,	struct dentry *new_dentry)
+			struct inode *new_dir,	struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
@@ -361,6 +362,9 @@
 	struct nilfs_transaction_info ti;
 	int err;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1);
 	if (unlikely(err))
 		return err;
@@ -391,7 +395,7 @@
 			goto out_dir;
 		nilfs_set_link(new_dir, new_de, new_page, old_inode);
 		nilfs_mark_inode_dirty(new_dir);
-		new_inode->i_ctime = CURRENT_TIME;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		drop_nlink(new_inode);
@@ -410,7 +414,7 @@
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old_inode->i_ctime = CURRENT_TIME;
+	old_inode->i_ctime = current_time(old_inode);
 
 	nilfs_delete_entry(old_de, old_page);
 
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 30bb100..8718af8 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -78,7 +78,7 @@
 		return ERR_PTR(-ENOMEM);
 	}
 	inode->i_ino = ns->inum;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_flags |= S_IMMUTABLE;
 	inode->i_mode = S_IFREG | S_IRUGO;
 	inode->i_fop = &ns_file_operations;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index f548629..bf72a2c 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1850,7 +1850,7 @@
 		 * pages being swapped out between us bringing them into memory
 		 * and doing the actual copying.
 		 */
-		if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
+		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
 			status = -EFAULT;
 			break;
 		}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index e01287c..7c410f8 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2813,7 +2813,7 @@
 	 * for real.
 	 */
 	if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
-		struct timespec now = current_fs_time(VFS_I(base_ni)->i_sb);
+		struct timespec now = current_time(VFS_I(base_ni));
 		int sync_it = 0;
 
 		if (!timespec_equal(&VFS_I(base_ni)->i_mtime, &now) ||
@@ -2893,7 +2893,7 @@
 	int err;
 	unsigned int ia_valid = attr->ia_valid;
 
-	err = inode_change_ok(vi, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		goto out;
 	/* We do not support NTFS ACLs yet. */
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index d15d492..d3c0096 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2692,7 +2692,7 @@
 
 		/* Set the inode times to the current time. */
 		vi->i_atime = vi->i_mtime = vi->i_ctime =
-			current_fs_time(vi->i_sb);
+			current_time(vi);
 		/*
 		 * Set the file size to 0, the ntfs inode sizes are set to 0 by
 		 * the call to ntfs_init_big_inode() below.
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 2162434..bed1fcb 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -201,7 +201,7 @@
 	}
 
 	inode->i_mode = new_mode;
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	di->i_mode = cpu_to_le16(inode->i_mode);
 	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
@@ -241,13 +241,11 @@
 	case ACL_TYPE_ACCESS:
 		name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
 		if (acl) {
-			umode_t mode = inode->i_mode;
-			ret = posix_acl_equiv_mode(acl, &mode);
-			if (ret < 0)
-				return ret;
+			umode_t mode;
 
-			if (ret == 0)
-				acl = NULL;
+			ret = posix_acl_update_mode(inode, &mode, &acl);
+			if (ret)
+				return ret;
 
 			ret = ocfs2_acl_set_mode(inode, di_bh,
 						 handle, mode);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f165f86..f72712f 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7293,7 +7293,7 @@
 	}
 
 	inode->i_blocks = ocfs2_inode_sector_count(inode);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 	di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
 	di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bbb4b3e..c5c5b97 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2030,7 +2030,7 @@
 		}
 		inode->i_blocks = ocfs2_inode_sector_count(inode);
 		di->i_size = cpu_to_le64((u64)i_size_read(inode));
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_mtime = inode->i_ctime = current_time(inode);
 		di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
 		di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 		ocfs2_update_inode_fsync_trans(handle, inode, 1);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e1adf28..e7054e2 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1677,7 +1677,7 @@
 				offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
 
 		if (ocfs2_dirent_would_fit(de, rec_len)) {
-			dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+			dir->i_mtime = dir->i_ctime = current_time(dir);
 			retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
 			if (retval < 0) {
 				mlog_errno(retval);
@@ -2990,7 +2990,7 @@
 	ocfs2_dinode_new_extent_list(dir, di);
 
 	i_size_write(dir, sb->s_blocksize);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 
 	di->i_size = cpu_to_le64(sb->s_blocksize);
 	di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 6ea06f8..3f828a1 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3188,6 +3188,9 @@
 				    migrate->new_master,
 				    migrate->master);
 
+	if (ret < 0)
+		kmem_cache_free(dlm_mle_cache, mle);
+
 	spin_unlock(&dlm->master_lock);
 unlock:
 	spin_unlock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 354cdf9..1079fae 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -211,7 +211,7 @@
 	struct inode *inode = d_inode(dentry);
 
 	attr->ia_valid &= ~ATTR_SIZE;
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
@@ -398,7 +398,7 @@
 	if (inode) {
 		inode->i_ino = get_next_ino();
 		inode_init_owner(inode, NULL, mode);
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		inc_nlink(inode);
 
 		inode->i_fop = &simple_dir_operations;
@@ -421,7 +421,7 @@
 
 	inode->i_ino = get_next_ino();
 	inode_init_owner(inode, parent, mode);
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 
 	ip = DLMFS_I(inode);
 	ip->ip_conn = DLMFS_I(parent)->ip_conn;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8f91639..000c234 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -253,7 +253,7 @@
 		return 0;
 	}
 
-	now = CURRENT_TIME;
+	now = current_time(inode);
 	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
 		return 0;
 	else
@@ -287,7 +287,7 @@
 	 * have i_mutex to guard against concurrent changes to other
 	 * inode fields.
 	 */
-	inode->i_atime = CURRENT_TIME;
+	inode->i_atime = current_time(inode);
 	di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
 	di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
@@ -308,7 +308,7 @@
 
 	i_size_write(inode, new_i_size);
 	inode->i_blocks = ocfs2_inode_sector_count(inode);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 	status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
 	if (status < 0) {
@@ -429,7 +429,7 @@
 	}
 
 	i_size_write(inode, new_i_size);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 	di = (struct ocfs2_dinode *) fe_bh->b_data;
 	di->i_size = cpu_to_le64(new_i_size);
@@ -840,7 +840,7 @@
 	i_size_write(inode, abs_to);
 	inode->i_blocks = ocfs2_inode_sector_count(inode);
 	di->i_size = cpu_to_le64((u64)i_size_read(inode));
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
 	di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 	di->i_mtime_nsec = di->i_ctime_nsec;
@@ -1155,7 +1155,7 @@
 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
 		return 0;
 
-	status = inode_change_ok(inode, attr);
+	status = setattr_prepare(dentry, attr);
 	if (status)
 		return status;
 
@@ -1950,7 +1950,7 @@
 	if (change_size && i_size_read(inode) < size)
 		i_size_write(inode, size);
 
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
 	if (ret < 0)
 		mlog_errno(ret);
@@ -2444,10 +2444,7 @@
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
 	.permission	= ocfs2_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ocfs2_listxattr,
-	.removexattr	= generic_removexattr,
 	.fiemap		= ocfs2_fiemap,
 	.get_acl	= ocfs2_iop_get_acl,
 	.set_acl	= ocfs2_iop_set_acl,
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index e3d05d9..4e8f32eb 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -953,7 +953,7 @@
 	}
 
 	di = (struct ocfs2_dinode *)di_bh->b_data;
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a8f1225..8d887c7 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -798,7 +798,7 @@
 	}
 
 	inc_nlink(inode);
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	ocfs2_set_links_count(fe, inode->i_nlink);
 	fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 	fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
@@ -1000,7 +1000,7 @@
 	ocfs2_set_links_count(fe, inode->i_nlink);
 	ocfs2_journal_dirty(handle, fe_bh);
 
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	if (S_ISDIR(inode->i_mode))
 		drop_nlink(dir);
 
@@ -1203,7 +1203,8 @@
 static int ocfs2_rename(struct inode *old_dir,
 			struct dentry *old_dentry,
 			struct inode *new_dir,
-			struct dentry *new_dentry)
+			struct dentry *new_dentry,
+			unsigned int flags)
 {
 	int status = 0, rename_lock = 0, parents_locked = 0, target_exists = 0;
 	int old_child_locked = 0, new_child_locked = 0, update_dot_dot = 0;
@@ -1228,6 +1229,9 @@
 	struct ocfs2_dir_lookup_result target_insert = { NULL, };
 	bool should_add_orphan = false;
 
+	if (flags)
+		return -EINVAL;
+
 	/* At some point it might be nice to break this function up a
 	 * bit. */
 
@@ -1537,7 +1541,7 @@
 					 new_dir_bh, &target_insert);
 	}
 
-	old_inode->i_ctime = CURRENT_TIME;
+	old_inode->i_ctime = current_time(old_inode);
 	mark_inode_dirty(old_inode);
 
 	status = ocfs2_journal_access_di(handle, INODE_CACHE(old_inode),
@@ -1586,9 +1590,9 @@
 
 	if (new_inode) {
 		drop_nlink(new_inode);
-		new_inode->i_ctime = CURRENT_TIME;
+		new_inode->i_ctime = current_time(new_inode);
 	}
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+	old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
 
 	if (update_dot_dot) {
 		status = ocfs2_update_entry(old_inode, handle,
@@ -2913,10 +2917,7 @@
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
 	.permission	= ocfs2_permission,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ocfs2_listxattr,
-	.removexattr	= generic_removexattr,
 	.fiemap         = ocfs2_fiemap,
 	.get_acl	= ocfs2_iop_get_acl,
 	.set_acl	= ocfs2_iop_set_acl,
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 92bbe93..1923851 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3778,7 +3778,7 @@
 		goto out_commit;
 	}
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
@@ -4094,7 +4094,7 @@
 		 * we want mtime to appear identical to the source and
 		 * update ctime.
 		 */
-		t_inode->i_ctime = CURRENT_TIME;
+		t_inode->i_ctime = current_time(t_inode);
 
 		di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
 		di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 6c2a3e3..6ad8eec 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -91,9 +91,6 @@
 	.get_link	= page_get_link,
 	.getattr	= ocfs2_getattr,
 	.setattr	= ocfs2_setattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ocfs2_listxattr,
-	.removexattr	= generic_removexattr,
 	.fiemap		= ocfs2_fiemap,
 };
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 5bb44f7..cb157a3 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3431,7 +3431,7 @@
 			goto out;
 		}
 
-		inode->i_ctime = CURRENT_TIME;
+		inode->i_ctime = current_time(inode);
 		di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 		di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 		ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index c8cbf3b..b714652 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -143,7 +143,7 @@
 	mark_buffer_dirty(bh);
 	brelse(bh);
 
-	dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_ctime = current_time(dir);
 
 	/* mark affected inodes dirty to rebuild checksums */
 	mark_inode_dirty(dir);
@@ -371,12 +371,16 @@
 }
 
 static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
 	struct inode *new_inode = d_inode(new_dentry);
 	struct inode *old_inode = d_inode(old_dentry);
 	int err;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	if (new_inode) {
 		/* overwriting existing file/dir */
 		err = omfs_remove(new_dir, new_dentry);
@@ -395,7 +399,7 @@
 	if (err)
 		goto out;
 
-	old_inode->i_ctime = CURRENT_TIME_SEC;
+	old_inode->i_ctime = current_time(old_inode);
 	mark_inode_dirty(old_inode);
 out:
 	return err;
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d9e26cf..bf83e66 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -349,7 +349,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 3d935c8..df7ea85 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -49,7 +49,7 @@
 	inode_init_owner(inode, NULL, mode);
 	inode->i_mapping->a_ops = &omfs_aops;
 
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	switch (mode & S_IFMT) {
 	case S_IFDIR:
 		inode->i_op = &omfs_dir_inops;
diff --git a/fs/open.c b/fs/open.c
index 4fd6e25..d3ed817 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -68,6 +68,7 @@
 long vfs_truncate(const struct path *path, loff_t length)
 {
 	struct inode *inode;
+	struct dentry *upperdentry;
 	long error;
 
 	inode = path->dentry->d_inode;
@@ -90,7 +91,17 @@
 	if (IS_APPEND(inode))
 		goto mnt_drop_write_and_out;
 
-	error = get_write_access(inode);
+	/*
+	 * If this is an overlayfs then do as if opening the file so we get
+	 * write access on the upper inode, not on the overlay inode.  For
+	 * non-overlay filesystems d_real() is an identity function.
+	 */
+	upperdentry = d_real(path->dentry, NULL, O_WRONLY);
+	error = PTR_ERR(upperdentry);
+	if (IS_ERR(upperdentry))
+		goto mnt_drop_write_and_out;
+
+	error = get_write_access(upperdentry->d_inode);
 	if (error)
 		goto mnt_drop_write_and_out;
 
@@ -109,7 +120,7 @@
 		error = do_truncate(path->dentry, length, 0, NULL);
 
 put_write_and_out:
-	put_write_access(inode);
+	put_write_access(upperdentry->d_inode);
 mnt_drop_write_and_out:
 	mnt_drop_write(path->mnt);
 out:
@@ -256,6 +267,11 @@
 	    (mode & ~FALLOC_FL_INSERT_RANGE))
 		return -EINVAL;
 
+	/* Unshare range should only be used with allocate mode. */
+	if ((mode & FALLOC_FL_UNSHARE_RANGE) &&
+	    (mode & ~(FALLOC_FL_UNSHARE_RANGE | FALLOC_FL_KEEP_SIZE)))
+		return -EINVAL;
+
 	if (!(file->f_mode & FMODE_WRITE))
 		return -EBADF;
 
@@ -289,7 +305,8 @@
 	 * Let individual file system decide if it supports preallocation
 	 * for directories or not.
 	 */
-	if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+	if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
+	    !S_ISBLK(inode->i_mode))
 		return -ENODEV;
 
 	/* Check for wrap through zero too */
@@ -726,7 +743,7 @@
 	if (error)
 		goto cleanup_all;
 
-	error = break_lease(inode, f->f_flags);
+	error = break_lease(locks_inode(f), f->f_flags);
 	if (error)
 		goto cleanup_all;
 
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index c7a8699..c003a66 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -355,7 +355,7 @@
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
 	if (inode->i_state & I_NEW) {
-		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 		if (inode->i_ino == OPENPROM_ROOT_INO) {
 			inode->i_op = &openprom_inode_operations;
 			inode->i_fop = &openprom_operations;
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 28f2195..7a37544 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -73,14 +73,11 @@
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			umode_t mode = inode->i_mode;
-			/*
-			 * can we represent this with the traditional file
-			 * mode permission bits?
-			 */
-			error = posix_acl_equiv_mode(acl, &mode);
-			if (error < 0) {
-				gossip_err("%s: posix_acl_equiv_mode err: %d\n",
+			umode_t mode;
+
+			error = posix_acl_update_mode(inode, &mode, &acl);
+			if (error) {
+				gossip_err("%s: posix_acl_update_mode err: %d\n",
 					   __func__,
 					   error);
 				return error;
@@ -90,8 +87,6 @@
 				SetModeFlag(orangefs_inode);
 			inode->i_mode = mode;
 			mark_inode_dirty_sync(inode);
-			if (error == 0)
-				acl = NULL;
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index 324f0af..284373a 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -177,8 +177,8 @@
 	}
 
 	gossip_debug(GOSSIP_DIR_DEBUG,
-		     "orangefs_readdir called on %s (pos=%llu)\n",
-		     dentry->d_name.name, llu(pos));
+		     "orangefs_readdir called on %pd (pos=%llu)\n",
+		     dentry, llu(pos));
 
 	memset(&readdir_response, 0, sizeof(readdir_response));
 
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 3386886..66ea0cc 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -384,7 +384,7 @@
 			file_accessed(file);
 		} else {
 			SetMtimeFlag(orangefs_inode);
-			inode->i_mtime = CURRENT_TIME;
+			inode->i_mtime = current_time(inode);
 			mark_inode_dirty_sync(inode);
 		}
 	}
@@ -611,8 +611,8 @@
 static int orangefs_file_release(struct inode *inode, struct file *file)
 {
 	gossip_debug(GOSSIP_FILE_DEBUG,
-		     "orangefs_file_release: called on %s\n",
-		     file->f_path.dentry->d_name.name);
+		     "orangefs_file_release: called on %pD\n",
+		     file);
 
 	orangefs_flush_inode(inode);
 
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 28a0557..ef3b4eb 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -129,8 +129,8 @@
 				  struct iov_iter *iter)
 {
 	gossip_debug(GOSSIP_INODE_DEBUG,
-		     "orangefs_direct_IO: %s\n",
-		     iocb->ki_filp->f_path.dentry->d_name.name);
+		     "orangefs_direct_IO: %pD\n",
+		     iocb->ki_filp);
 
 	return -EINVAL;
 }
@@ -216,10 +216,10 @@
 	struct inode *inode = dentry->d_inode;
 
 	gossip_debug(GOSSIP_INODE_DEBUG,
-		     "orangefs_setattr: called on %s\n",
-		     dentry->d_name.name);
+		     "orangefs_setattr: called on %pd\n",
+		     dentry);
 
-	ret = inode_change_ok(inode, iattr);
+	ret = setattr_prepare(dentry, iattr);
 	if (ret)
 		goto out;
 
@@ -259,8 +259,8 @@
 	struct orangefs_inode_s *orangefs_inode = NULL;
 
 	gossip_debug(GOSSIP_INODE_DEBUG,
-		     "orangefs_getattr: called on %s\n",
-		     dentry->d_name.name);
+		     "orangefs_getattr: called on %pd\n",
+		     dentry);
 
 	ret = orangefs_inode_getattr(inode, 0, 0);
 	if (ret == 0) {
@@ -296,10 +296,7 @@
 	.set_acl = orangefs_set_acl,
 	.setattr = orangefs_setattr,
 	.getattr = orangefs_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = orangefs_listxattr,
-	.removexattr = generic_removexattr,
 	.permission = orangefs_permission,
 };
 
@@ -438,7 +435,7 @@
 	inode->i_mode = mode;
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_size = PAGE_SIZE;
 	inode->i_rdev = dev;
 
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 35269e3..d15d3d2 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -24,9 +24,9 @@
 	struct inode *inode;
 	int ret;
 
-	gossip_debug(GOSSIP_NAME_DEBUG, "%s: %s\n",
+	gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd\n",
 		     __func__,
-		     dentry->d_name.name);
+		     dentry);
 
 	new_op = op_alloc(ORANGEFS_VFS_OP_CREATE);
 	if (!new_op)
@@ -43,9 +43,9 @@
 	ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "%s: %s: handle:%pU: fsid:%d: new_op:%p: ret:%d:\n",
+		     "%s: %pd: handle:%pU: fsid:%d: new_op:%p: ret:%d:\n",
 		     __func__,
-		     dentry->d_name.name,
+		     dentry,
 		     &new_op->downcall.resp.create.refn.khandle,
 		     new_op->downcall.resp.create.refn.fs_id,
 		     new_op,
@@ -57,18 +57,18 @@
 	inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0,
 				&new_op->downcall.resp.create.refn);
 	if (IS_ERR(inode)) {
-		gossip_err("%s: Failed to allocate inode for file :%s:\n",
+		gossip_err("%s: Failed to allocate inode for file :%pd:\n",
 			   __func__,
-			   dentry->d_name.name);
+			   dentry);
 		ret = PTR_ERR(inode);
 		goto out;
 	}
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "%s: Assigned inode :%pU: for file :%s:\n",
+		     "%s: Assigned inode :%pU: for file :%pd:\n",
 		     __func__,
 		     get_khandle_from_ino(inode),
-		     dentry->d_name.name);
+		     dentry);
 
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
@@ -76,20 +76,20 @@
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "%s: dentry instantiated for %s\n",
+		     "%s: dentry instantiated for %pd\n",
 		     __func__,
-		     dentry->d_name.name);
+		     dentry);
 
 	SetMtimeFlag(parent);
-	dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty_sync(dir);
 	ret = 0;
 out:
 	op_release(new_op);
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "%s: %s: returning %d\n",
+		     "%s: %pd: returning %d\n",
 		     __func__,
-		     dentry->d_name.name,
+		     dentry,
 		     ret);
 	return ret;
 }
@@ -115,8 +115,8 @@
 	 * -EEXIST on O_EXCL opens, which is broken if we skip this lookup
 	 * in the create path)
 	 */
-	gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n",
-		     __func__, dentry->d_name.name);
+	gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %pd\n",
+		     __func__, dentry);
 
 	if (dentry->d_name.len > (ORANGEFS_NAME_MAX - 1))
 		return ERR_PTR(-ENAMETOOLONG);
@@ -169,9 +169,9 @@
 
 			gossip_debug(GOSSIP_NAME_DEBUG,
 				     "orangefs_lookup: Adding *negative* dentry "
-				     "%p for %s\n",
+				     "%p for %pd\n",
 				     dentry,
-				     dentry->d_name.name);
+				     dentry);
 
 			d_add(dentry, NULL);
 			res = NULL;
@@ -224,10 +224,10 @@
 	int ret;
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "%s: called on %s\n"
+		     "%s: called on %pd\n"
 		     "  (inode %pU): Parent is %pU | fs_id %d\n",
 		     __func__,
-		     dentry->d_name.name,
+		     dentry,
 		     get_khandle_from_ino(inode),
 		     &parent->refn.khandle,
 		     parent->refn.fs_id);
@@ -254,7 +254,7 @@
 		drop_nlink(inode);
 
 		SetMtimeFlag(parent);
-		dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+		dir->i_mtime = dir->i_ctime = current_time(dir);
 		mark_inode_dirty_sync(dir);
 	}
 	return ret;
@@ -326,12 +326,12 @@
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "Inode (Symlink) %pU -> %s\n",
+		     "Inode (Symlink) %pU -> %pd\n",
 		     get_khandle_from_ino(inode),
-		     dentry->d_name.name);
+		     dentry);
 
 	SetMtimeFlag(parent);
-	dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty_sync(dir);
 	ret = 0;
 out:
@@ -390,16 +390,16 @@
 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
 
 	gossip_debug(GOSSIP_NAME_DEBUG,
-		     "Inode (Directory) %pU -> %s\n",
+		     "Inode (Directory) %pU -> %pd\n",
 		     get_khandle_from_ino(inode),
-		     dentry->d_name.name);
+		     dentry);
 
 	/*
 	 * NOTE: we have no good way to keep nlink consistent for directories
 	 * across clients; keep constant at 1.
 	 */
 	SetMtimeFlag(parent);
-	dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty_sync(dir);
 out:
 	op_release(new_op);
@@ -409,11 +409,15 @@
 static int orangefs_rename(struct inode *old_dir,
 			struct dentry *old_dentry,
 			struct inode *new_dir,
-			struct dentry *new_dentry)
+			struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct orangefs_kernel_op_s *new_op;
 	int ret;
 
+	if (flags)
+		return -EINVAL;
+
 	gossip_debug(GOSSIP_NAME_DEBUG,
 		     "orangefs_rename: called (%pd2 => %pd2) ct=%d\n",
 		     old_dentry, new_dentry, d_count(new_dentry));
@@ -443,7 +447,7 @@
 		     ret);
 
 	if (new_dentry->d_inode)
-		new_dentry->d_inode->i_ctime = CURRENT_TIME;
+		new_dentry->d_inode->i_ctime = current_time(new_dentry->d_inode);
 
 	op_release(new_op);
 	return ret;
@@ -462,9 +466,6 @@
 	.rename = orangefs_rename,
 	.setattr = orangefs_setattr,
 	.getattr = orangefs_getattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
 	.listxattr = orangefs_listxattr,
 	.permission = orangefs_permission,
 };
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 9b24107..eb09aa0 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -428,8 +428,8 @@
 	struct client_debug_mask c_mask = { NULL, 0, 0 };
 
 	gossip_debug(GOSSIP_DEBUGFS_DEBUG,
-		"orangefs_debug_write: %s\n",
-		file->f_path.dentry->d_name.name);
+		"orangefs_debug_write: %pD\n",
+		file);
 
 	/*
 	 * Thwart users who try to jamb a ridiculous number
diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c
index 8fecf82..10b0b06 100644
--- a/fs/orangefs/symlink.c
+++ b/fs/orangefs/symlink.c
@@ -14,6 +14,5 @@
 	.setattr = orangefs_setattr,
 	.getattr = orangefs_getattr,
 	.listxattr = orangefs_listxattr,
-	.setxattr = generic_setxattr,
 	.permission = orangefs_permission,
 };
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 2a9f07f..74a81b1 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -73,6 +73,9 @@
 		     "%s: name %s, buffer_size %zd\n",
 		     __func__, name, size);
 
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
 	if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
 		gossip_err("Invalid key length (%d)\n",
 			   (int)strlen(name));
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index db37a0e..aeb60f7 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -57,9 +57,10 @@
 	ssize_t list_size, size, value_size = 0;
 	char *buf, *name, *value = NULL;
 	int uninitialized_var(error);
+	size_t slen;
 
-	if (!old->d_inode->i_op->getxattr ||
-	    !new->d_inode->i_op->getxattr)
+	if (!(old->d_inode->i_opflags & IOP_XATTR) ||
+	    !(new->d_inode->i_opflags & IOP_XATTR))
 		return 0;
 
 	list_size = vfs_listxattr(old, NULL, 0);
@@ -79,7 +80,16 @@
 		goto out;
 	}
 
-	for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+	for (name = buf; list_size; name += slen) {
+		slen = strnlen(name, list_size) + 1;
+
+		/* underlying fs providing us with an broken xattr list? */
+		if (WARN_ON(slen > list_size)) {
+			error = -EIO;
+			break;
+		}
+		list_size -= slen;
+
 		if (ovl_is_private_xattr(name))
 			continue;
 retry:
@@ -174,40 +184,6 @@
 	return error;
 }
 
-static char *ovl_read_symlink(struct dentry *realdentry)
-{
-	int res;
-	char *buf;
-	struct inode *inode = realdentry->d_inode;
-	mm_segment_t old_fs;
-
-	res = -EINVAL;
-	if (!inode->i_op->readlink)
-		goto err;
-
-	res = -ENOMEM;
-	buf = (char *) __get_free_page(GFP_KERNEL);
-	if (!buf)
-		goto err;
-
-	old_fs = get_fs();
-	set_fs(get_ds());
-	/* The cast to a user pointer is valid due to the set_fs() */
-	res = inode->i_op->readlink(realdentry,
-				    (char __user *)buf, PAGE_SIZE - 1);
-	set_fs(old_fs);
-	if (res < 0) {
-		free_page((unsigned long) buf);
-		goto err;
-	}
-	buf[res] = '\0';
-
-	return buf;
-
-err:
-	return ERR_PTR(res);
-}
-
 static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
 {
 	struct iattr attr = {
@@ -354,19 +330,20 @@
 int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
 		    struct path *lowerpath, struct kstat *stat)
 {
+	DEFINE_DELAYED_CALL(done);
 	struct dentry *workdir = ovl_workdir(dentry);
 	int err;
 	struct kstat pstat;
 	struct path parentpath;
+	struct dentry *lowerdentry = lowerpath->dentry;
 	struct dentry *upperdir;
 	struct dentry *upperdentry;
-	const struct cred *old_cred;
-	char *link = NULL;
+	const char *link = NULL;
 
 	if (WARN_ON(!workdir))
 		return -EROFS;
 
-	ovl_do_check_copy_up(lowerpath->dentry);
+	ovl_do_check_copy_up(lowerdentry);
 
 	ovl_path_upper(parent, &parentpath);
 	upperdir = parentpath.dentry;
@@ -376,13 +353,11 @@
 		return err;
 
 	if (S_ISLNK(stat->mode)) {
-		link = ovl_read_symlink(lowerpath->dentry);
+		link = vfs_get_link(lowerdentry, &done);
 		if (IS_ERR(link))
 			return PTR_ERR(link);
 	}
 
-	old_cred = ovl_override_creds(dentry->d_sb);
-
 	err = -EIO;
 	if (lock_rename(workdir, upperdir) != NULL) {
 		pr_err("overlayfs: failed to lock workdir+upperdir\n");
@@ -403,19 +378,16 @@
 	}
 out_unlock:
 	unlock_rename(workdir, upperdir);
-	revert_creds(old_cred);
-
-	if (link)
-		free_page((unsigned long) link);
+	do_delayed_call(&done);
 
 	return err;
 }
 
 int ovl_copy_up(struct dentry *dentry)
 {
-	int err;
+	int err = 0;
+	const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
 
-	err = 0;
 	while (!err) {
 		struct dentry *next;
 		struct dentry *parent;
@@ -447,6 +419,7 @@
 		dput(parent);
 		dput(next);
 	}
+	revert_creds(old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b0ffa1d..306b6c1 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -14,6 +14,7 @@
 #include <linux/cred.h>
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/atomic.h>
 #include "overlayfs.h"
 
 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -37,8 +38,10 @@
 {
 	struct dentry *temp;
 	char name[20];
+	static atomic_t temp_id = ATOMIC_INIT(0);
 
-	snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
+	/* counter is allowed to wrap, since temp dentries are ephemeral */
+	snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
 
 	temp = lookup_one_len(name, workdir, strlen(name));
 	if (!IS_ERR(temp) && temp->d_inode) {
@@ -1006,17 +1009,14 @@
 	.symlink	= ovl_symlink,
 	.unlink		= ovl_unlink,
 	.rmdir		= ovl_rmdir,
-	.rename2	= ovl_rename2,
+	.rename		= ovl_rename2,
 	.link		= ovl_link,
 	.setattr	= ovl_setattr,
 	.create		= ovl_create,
 	.mknod		= ovl_mknod,
 	.permission	= ovl_permission,
 	.getattr	= ovl_dir_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ovl_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= ovl_get_acl,
 	.update_time	= ovl_update_time,
 };
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c75625c..c58f01b 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -19,6 +19,7 @@
 	struct dentry *parent;
 	struct kstat stat;
 	struct path lowerpath;
+	const struct cred *old_cred;
 
 	parent = dget_parent(dentry);
 	err = ovl_copy_up(parent);
@@ -26,12 +27,14 @@
 		goto out_dput_parent;
 
 	ovl_path_lower(dentry, &lowerpath);
-	err = vfs_getattr(&lowerpath, &stat);
-	if (err)
-		goto out_dput_parent;
 
-	stat.size = 0;
-	err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
+	old_cred = ovl_override_creds(dentry->d_sb);
+	err = vfs_getattr(&lowerpath, &stat);
+	if (!err) {
+		stat.size = 0;
+		err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
+	}
+	revert_creds(old_cred);
 
 out_dput_parent:
 	dput(parent);
@@ -53,7 +56,7 @@
 	 * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
 	 * check for a swapfile (which this won't be anyway).
 	 */
-	err = inode_change_ok(dentry->d_inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -153,45 +156,18 @@
 				struct inode *inode,
 				struct delayed_call *done)
 {
-	struct dentry *realdentry;
-	struct inode *realinode;
 	const struct cred *old_cred;
 	const char *p;
 
 	if (!dentry)
 		return ERR_PTR(-ECHILD);
 
-	realdentry = ovl_dentry_real(dentry);
-	realinode = realdentry->d_inode;
-
-	if (WARN_ON(!realinode->i_op->get_link))
-		return ERR_PTR(-EPERM);
-
 	old_cred = ovl_override_creds(dentry->d_sb);
-	p = realinode->i_op->get_link(realdentry, realinode, done);
+	p = vfs_get_link(ovl_dentry_real(dentry), done);
 	revert_creds(old_cred);
 	return p;
 }
 
-static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
-{
-	struct path realpath;
-	struct inode *realinode;
-	const struct cred *old_cred;
-	int err;
-
-	ovl_path_real(dentry, &realpath);
-	realinode = realpath.dentry->d_inode;
-
-	if (!realinode->i_op->readlink)
-		return -EINVAL;
-
-	old_cred = ovl_override_creds(dentry->d_sb);
-	err = realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
-	revert_creds(old_cred);
-	return err;
-}
-
 bool ovl_is_private_xattr(const char *name)
 {
 	return strncmp(name, OVL_XATTR_PREFIX,
@@ -367,10 +343,7 @@
 	.setattr	= ovl_setattr,
 	.permission	= ovl_permission,
 	.getattr	= ovl_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ovl_listxattr,
-	.removexattr	= generic_removexattr,
 	.get_acl	= ovl_get_acl,
 	.update_time	= ovl_update_time,
 };
@@ -378,12 +351,9 @@
 static const struct inode_operations ovl_symlink_inode_operations = {
 	.setattr	= ovl_setattr,
 	.get_link	= ovl_get_link,
-	.readlink	= ovl_readlink,
+	.readlink	= generic_readlink,
 	.getattr	= ovl_getattr,
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= ovl_listxattr,
-	.removexattr	= generic_removexattr,
 	.update_time	= ovl_update_time,
 };
 
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 5813ccf..e218e74 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -114,13 +114,13 @@
 {
 	int err;
 
-	pr_debug("rename2(%pd2, %pd2, 0x%x)\n",
+	pr_debug("rename(%pd2, %pd2, 0x%x)\n",
 		 olddentry, newdentry, flags);
 
 	err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags);
 
 	if (err) {
-		pr_debug("...rename2(%pd2, %pd2, ...) = %i\n",
+		pr_debug("...rename(%pd2, %pd2, ...) = %i\n",
 			 olddentry, newdentry, err);
 	}
 	return err;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index e2a94a2..bcf3965 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -273,12 +273,11 @@
 {
 	int res;
 	char val;
-	struct inode *inode = dentry->d_inode;
 
-	if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
+	if (!d_is_dir(dentry))
 		return false;
 
-	res = inode->i_op->getxattr(dentry, inode, OVL_XATTR_OPAQUE, &val, 1);
+	res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
 	if (res == 1 && val == 'y')
 		return true;
 
@@ -419,16 +418,12 @@
 				  DCACHE_OP_COMPARE);
 }
 
-static inline struct dentry *ovl_lookup_real(struct super_block *ovl_sb,
-					     struct dentry *dir,
+static inline struct dentry *ovl_lookup_real(struct dentry *dir,
 					     const struct qstr *name)
 {
-	const struct cred *old_cred;
 	struct dentry *dentry;
 
-	old_cred = ovl_override_creds(ovl_sb);
 	dentry = lookup_one_len_unlocked(name->name, dir, name->len);
-	revert_creds(old_cred);
 
 	if (IS_ERR(dentry)) {
 		if (PTR_ERR(dentry) == -ENOENT)
@@ -469,6 +464,7 @@
 			  unsigned int flags)
 {
 	struct ovl_entry *oe;
+	const struct cred *old_cred;
 	struct ovl_entry *poe = dentry->d_parent->d_fsdata;
 	struct path *stack = NULL;
 	struct dentry *upperdir, *upperdentry = NULL;
@@ -479,9 +475,10 @@
 	unsigned int i;
 	int err;
 
+	old_cred = ovl_override_creds(dentry->d_sb);
 	upperdir = ovl_upperdentry_dereference(poe);
 	if (upperdir) {
-		this = ovl_lookup_real(dentry->d_sb, upperdir, &dentry->d_name);
+		this = ovl_lookup_real(upperdir, &dentry->d_name);
 		err = PTR_ERR(this);
 		if (IS_ERR(this))
 			goto out;
@@ -514,8 +511,7 @@
 		bool opaque = false;
 		struct path lowerpath = poe->lowerstack[i];
 
-		this = ovl_lookup_real(dentry->d_sb,
-				       lowerpath.dentry, &dentry->d_name);
+		this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
 		err = PTR_ERR(this);
 		if (IS_ERR(this)) {
 			/*
@@ -588,6 +584,7 @@
 		ovl_copyattr(realdentry->d_inode, inode);
 	}
 
+	revert_creds(old_cred);
 	oe->opaque = upperopaque;
 	oe->__upperdentry = upperdentry;
 	memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
@@ -606,6 +603,7 @@
 out_put_upper:
 	dput(upperdentry);
 out:
+	revert_creds(old_cred);
 	return ERR_PTR(err);
 }
 
@@ -834,6 +832,19 @@
 		if (err)
 			goto out_dput;
 
+		/*
+		 * Try to remove POSIX ACL xattrs from workdir.  We are good if:
+		 *
+		 * a) success (there was a POSIX ACL xattr and was removed)
+		 * b) -ENODATA (there was no POSIX ACL xattr)
+		 * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported)
+		 *
+		 * There are various other error values that could effectively
+		 * mean that the xattr doesn't exist (e.g. -ERANGE is returned
+		 * if the xattr name is too long), but the set of filesystems
+		 * allowed as upper are limited to "normal" ones, where checking
+		 * for the above two errors is sufficient.
+		 */
 		err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
 		if (err && err != -ENODATA && err != -EOPNOTSUPP)
 			goto out_dput;
@@ -1292,6 +1303,12 @@
 	if (!oe)
 		goto out_put_cred;
 
+	sb->s_magic = OVERLAYFS_SUPER_MAGIC;
+	sb->s_op = &ovl_super_operations;
+	sb->s_xattr = ovl_xattr_handlers;
+	sb->s_fs_info = ufs;
+	sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+
 	root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
 	if (!root_dentry)
 		goto out_free_oe;
@@ -1315,12 +1332,7 @@
 	ovl_inode_init(d_inode(root_dentry), realinode, !!upperpath.dentry);
 	ovl_copyattr(realinode, d_inode(root_dentry));
 
-	sb->s_magic = OVERLAYFS_SUPER_MAGIC;
-	sb->s_op = &ovl_super_operations;
-	sb->s_xattr = ovl_xattr_handlers;
 	sb->s_root = root_dentry;
-	sb->s_fs_info = ufs;
-	sb->s_flags |= MS_POSIXACL;
 
 	return 0;
 
diff --git a/fs/pipe.c b/fs/pipe.c
index 4fc422f..8e0d9f2 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -601,54 +601,63 @@
 	return retval;
 }
 
-static void account_pipe_buffers(struct pipe_inode_info *pipe,
+static unsigned long account_pipe_buffers(struct user_struct *user,
                                  unsigned long old, unsigned long new)
 {
-	atomic_long_add(new - old, &pipe->user->pipe_bufs);
+	return atomic_long_add_return(new - old, &user->pipe_bufs);
 }
 
-static bool too_many_pipe_buffers_soft(struct user_struct *user)
+static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
 {
-	return pipe_user_pages_soft &&
-	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+	return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
 }
 
-static bool too_many_pipe_buffers_hard(struct user_struct *user)
+static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
 {
-	return pipe_user_pages_hard &&
-	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+	return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
 }
 
 struct pipe_inode_info *alloc_pipe_info(void)
 {
 	struct pipe_inode_info *pipe;
+	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+	struct user_struct *user = get_current_user();
+	unsigned long user_bufs;
 
 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
-	if (pipe) {
-		unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
-		struct user_struct *user = get_current_user();
+	if (pipe == NULL)
+		goto out_free_uid;
 
-		if (!too_many_pipe_buffers_hard(user)) {
-			if (too_many_pipe_buffers_soft(user))
-				pipe_bufs = 1;
-			pipe->bufs = kcalloc(pipe_bufs,
-					     sizeof(struct pipe_buffer),
-					     GFP_KERNEL_ACCOUNT);
-		}
+	if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+		pipe_bufs = pipe_max_size >> PAGE_SHIFT;
 
-		if (pipe->bufs) {
-			init_waitqueue_head(&pipe->wait);
-			pipe->r_counter = pipe->w_counter = 1;
-			pipe->buffers = pipe_bufs;
-			pipe->user = user;
-			account_pipe_buffers(pipe, 0, pipe_bufs);
-			mutex_init(&pipe->mutex);
-			return pipe;
-		}
-		free_uid(user);
-		kfree(pipe);
+	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
+
+	if (too_many_pipe_buffers_soft(user_bufs)) {
+		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
+		pipe_bufs = 1;
 	}
 
+	if (too_many_pipe_buffers_hard(user_bufs))
+		goto out_revert_acct;
+
+	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+			     GFP_KERNEL_ACCOUNT);
+
+	if (pipe->bufs) {
+		init_waitqueue_head(&pipe->wait);
+		pipe->r_counter = pipe->w_counter = 1;
+		pipe->buffers = pipe_bufs;
+		pipe->user = user;
+		mutex_init(&pipe->mutex);
+		return pipe;
+	}
+
+out_revert_acct:
+	(void) account_pipe_buffers(user, pipe_bufs, 0);
+	kfree(pipe);
+out_free_uid:
+	free_uid(user);
 	return NULL;
 }
 
@@ -656,7 +665,7 @@
 {
 	int i;
 
-	account_pipe_buffers(pipe, pipe->buffers, 0);
+	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
 	free_uid(pipe->user);
 	for (i = 0; i < pipe->buffers; i++) {
 		struct pipe_buffer *buf = pipe->bufs + i;
@@ -713,7 +722,7 @@
 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 
 	return inode;
 
@@ -1008,12 +1017,54 @@
 };
 
 /*
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+static inline unsigned int round_pipe_size(unsigned int size)
+{
+	unsigned long nr_pages;
+
+	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+}
+
+/*
  * Allocate a new array of pipe buffers and copy the info over. Returns the
  * pipe size if successful, or return -ERROR on error.
  */
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
 {
 	struct pipe_buffer *bufs;
+	unsigned int size, nr_pages;
+	unsigned long user_bufs;
+	long ret = 0;
+
+	size = round_pipe_size(arg);
+	nr_pages = size >> PAGE_SHIFT;
+
+	if (!nr_pages)
+		return -EINVAL;
+
+	/*
+	 * If trying to increase the pipe capacity, check that an
+	 * unprivileged user is not trying to exceed various limits
+	 * (soft limit check here, hard limit check just below).
+	 * Decreasing the pipe capacity is always permitted, even
+	 * if the user is currently over a limit.
+	 */
+	if (nr_pages > pipe->buffers &&
+			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+		return -EPERM;
+
+	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+
+	if (nr_pages > pipe->buffers &&
+			(too_many_pipe_buffers_hard(user_bufs) ||
+			 too_many_pipe_buffers_soft(user_bufs)) &&
+			!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
+		ret = -EPERM;
+		goto out_revert_acct;
+	}
 
 	/*
 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
@@ -1021,13 +1072,17 @@
 	 * again like we would do for growing. If the pipe currently
 	 * contains more buffers than arg, then return busy.
 	 */
-	if (nr_pages < pipe->nrbufs)
-		return -EBUSY;
+	if (nr_pages < pipe->nrbufs) {
+		ret = -EBUSY;
+		goto out_revert_acct;
+	}
 
 	bufs = kcalloc(nr_pages, sizeof(*bufs),
 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
-	if (unlikely(!bufs))
-		return -ENOMEM;
+	if (unlikely(!bufs)) {
+		ret = -ENOMEM;
+		goto out_revert_acct;
+	}
 
 	/*
 	 * The pipe array wraps around, so just start the new one at zero
@@ -1050,24 +1105,15 @@
 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
 	}
 
-	account_pipe_buffers(pipe, pipe->buffers, nr_pages);
 	pipe->curbuf = 0;
 	kfree(pipe->bufs);
 	pipe->bufs = bufs;
 	pipe->buffers = nr_pages;
 	return nr_pages * PAGE_SIZE;
-}
 
-/*
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
-static inline unsigned int round_pipe_size(unsigned int size)
-{
-	unsigned long nr_pages;
-
-	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+out_revert_acct:
+	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+	return ret;
 }
 
 /*
@@ -1109,28 +1155,9 @@
 	__pipe_lock(pipe);
 
 	switch (cmd) {
-	case F_SETPIPE_SZ: {
-		unsigned int size, nr_pages;
-
-		size = round_pipe_size(arg);
-		nr_pages = size >> PAGE_SHIFT;
-
-		ret = -EINVAL;
-		if (!nr_pages)
-			goto out;
-
-		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
-			ret = -EPERM;
-			goto out;
-		} else if ((too_many_pipe_buffers_hard(pipe->user) ||
-			    too_many_pipe_buffers_soft(pipe->user)) &&
-		           !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
-			ret = -EPERM;
-			goto out;
-		}
-		ret = pipe_set_size(pipe, nr_pages);
+	case F_SETPIPE_SZ:
+		ret = pipe_set_size(pipe, arg);
 		break;
-		}
 	case F_GETPIPE_SZ:
 		ret = pipe->buffers * PAGE_SIZE;
 		break;
@@ -1139,7 +1166,6 @@
 		break;
 	}
 
-out:
 	__pipe_unlock(pipe);
 	return ret;
 }
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 59d47ab0..5955220 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -598,13 +598,14 @@
 	if (IS_ERR(p))
 		return PTR_ERR(p);
 
+	ret = -ENOMEM;
 	clone = posix_acl_clone(p, GFP_NOFS);
 	if (!clone)
-		goto no_mem;
+		goto err_release;
 
 	ret = posix_acl_create_masq(clone, mode);
 	if (ret < 0)
-		goto no_mem_clone;
+		goto err_release_clone;
 
 	if (ret == 0)
 		posix_acl_release(clone);
@@ -618,14 +619,45 @@
 
 	return 0;
 
-no_mem_clone:
+err_release_clone:
 	posix_acl_release(clone);
-no_mem:
+err_release:
 	posix_acl_release(p);
-	return -ENOMEM;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(posix_acl_create);
 
+/**
+ * posix_acl_update_mode  -  update mode in set_acl
+ *
+ * Update the file mode when setting an ACL: compute the new file permission
+ * bits based on the ACL.  In addition, if the ACL is equivalent to the new
+ * file mode, set *acl to NULL to indicate that no ACL should be set.
+ *
+ * As with chmod, clear the setgit bit if the caller is not in the owning group
+ * or capable of CAP_FSETID (see inode_change_ok).
+ *
+ * Called from set_acl inode operations.
+ */
+int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
+			  struct posix_acl **acl)
+{
+	umode_t mode = inode->i_mode;
+	int error;
+
+	error = posix_acl_equiv_mode(*acl, &mode);
+	if (error < 0)
+		return error;
+	if (error == 0)
+		*acl = NULL;
+	if (!in_group_p(inode->i_gid) &&
+	    !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+		mode &= ~S_ISGID;
+	*mode_p = mode;
+	return 0;
+}
+EXPORT_SYMBOL(posix_acl_update_mode);
+
 /*
  * Fix up the uids and gids in posix acl extended attributes in place.
  */
@@ -633,15 +665,15 @@
 	struct user_namespace *to, struct user_namespace *from,
 	void *value, size_t size)
 {
-	posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
-	posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+	struct posix_acl_xattr_header *header = value;
+	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
 	int count;
 	kuid_t uid;
 	kgid_t gid;
 
 	if (!value)
 		return;
-	if (size < sizeof(posix_acl_xattr_header))
+	if (size < sizeof(struct posix_acl_xattr_header))
 		return;
 	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
 		return;
@@ -691,15 +723,15 @@
 posix_acl_from_xattr(struct user_namespace *user_ns,
 		     const void *value, size_t size)
 {
-	posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
-	posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+	const struct posix_acl_xattr_header *header = value;
+	const struct posix_acl_xattr_entry *entry = (const void *)(header + 1), *end;
 	int count;
 	struct posix_acl *acl;
 	struct posix_acl_entry *acl_e;
 
 	if (!value)
 		return NULL;
-	if (size < sizeof(posix_acl_xattr_header))
+	if (size < sizeof(struct posix_acl_xattr_header))
 		 return ERR_PTR(-EINVAL);
 	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
 		return ERR_PTR(-EOPNOTSUPP);
@@ -760,8 +792,8 @@
 posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
 		   void *buffer, size_t size)
 {
-	posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
-	posix_acl_xattr_entry *ext_entry;
+	struct posix_acl_xattr_header *ext_acl = buffer;
+	struct posix_acl_xattr_entry *ext_entry;
 	int real_size, n;
 
 	real_size = posix_acl_xattr_size(acl->a_count);
@@ -770,7 +802,7 @@
 	if (real_size > size)
 		return -ERANGE;
 
-	ext_entry = ext_acl->a_entries;
+	ext_entry = (void *)(ext_acl + 1);
 	ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
 
 	for (n=0; n < acl->a_count; n++, ext_entry++) {
@@ -897,7 +929,7 @@
 			acl = NULL;
 	}
 
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 	set_cached_acl(inode, type, acl);
 	return 0;
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dc7fe5f..8e65446 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -252,7 +252,7 @@
 	 * Inherently racy -- command line shares address space
 	 * with code and data.
 	 */
-	rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+	rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
 	if (rv <= 0)
 		goto out_free_page;
 
@@ -270,7 +270,8 @@
 			int nr_read;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count,
+					FOLL_FORCE);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -305,7 +306,8 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count,
+					FOLL_FORCE);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -354,7 +356,8 @@
 			bool final;
 
 			_count = min3(count, len, PAGE_SIZE);
-			nr_read = access_remote_vm(mm, p, page, _count, 0);
+			nr_read = access_remote_vm(mm, p, page, _count,
+					FOLL_FORCE);
 			if (nr_read < 0)
 				rv = nr_read;
 			if (nr_read <= 0)
@@ -400,23 +403,6 @@
 	.llseek	= generic_file_llseek,
 };
 
-static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
-			 struct pid *pid, struct task_struct *task)
-{
-	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
-	if (mm && !IS_ERR(mm)) {
-		unsigned int nwords = 0;
-		do {
-			nwords += 2;
-		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
-		seq_write(m, mm->saved_auxv, nwords * sizeof(mm->saved_auxv[0]));
-		mmput(mm);
-		return 0;
-	} else
-		return PTR_ERR(mm);
-}
-
-
 #ifdef CONFIG_KALLSYMS
 /*
  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
@@ -709,7 +695,7 @@
 	if (attr->ia_valid & ATTR_MODE)
 		return -EPERM;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
@@ -849,6 +835,7 @@
 	unsigned long addr = *ppos;
 	ssize_t copied;
 	char *page;
+	unsigned int flags = FOLL_FORCE;
 
 	if (!mm)
 		return 0;
@@ -861,6 +848,9 @@
 	if (!atomic_inc_not_zero(&mm->mm_users))
 		goto free;
 
+	if (write)
+		flags |= FOLL_WRITE;
+
 	while (count > 0) {
 		int this_len = min_t(int, count, PAGE_SIZE);
 
@@ -869,7 +859,7 @@
 			break;
 		}
 
-		this_len = access_remote_vm(mm, addr, page, this_len, write);
+		this_len = access_remote_vm(mm, addr, page, this_len, flags);
 		if (!this_len) {
 			if (!copied)
 				copied = -EIO;
@@ -982,7 +972,7 @@
 		this_len = min(max_len, this_len);
 
 		retval = access_remote_vm(mm, (env_start + src),
-			page, this_len, 0);
+			page, this_len, FOLL_FORCE);
 
 		if (retval <= 0) {
 			ret = retval;
@@ -1014,6 +1004,30 @@
 	.release	= mem_release,
 };
 
+static int auxv_open(struct inode *inode, struct file *file)
+{
+	return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+}
+
+static ssize_t auxv_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct mm_struct *mm = file->private_data;
+	unsigned int nwords = 0;
+	do {
+		nwords += 2;
+	} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+	return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv,
+				       nwords * sizeof(mm->saved_auxv[0]));
+}
+
+static const struct file_operations proc_auxv_operations = {
+	.open		= auxv_open,
+	.read		= auxv_read,
+	.llseek		= generic_file_llseek,
+	.release	= mem_release,
+};
+
 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
 			    loff_t *ppos)
 {
@@ -1664,7 +1678,7 @@
 	/* Common stuff */
 	ei = PROC_I(inode);
 	inode->i_ino = get_next_ino();
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_op = &proc_def_inode_operations;
 
 	/*
@@ -2842,7 +2856,7 @@
 	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
 #endif
 	REG("environ",    S_IRUSR, proc_environ_operations),
-	ONE("auxv",       S_IRUSR, proc_pid_auxv),
+	REG("auxv",       S_IRUSR, proc_auxv_operations),
 	ONE("status",     S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	  S_IRUGO, proc_pid_limits),
@@ -3230,7 +3244,7 @@
 	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
 #endif
 	REG("environ",   S_IRUSR, proc_environ_operations),
-	ONE("auxv",      S_IRUSR, proc_pid_auxv),
+	REG("auxv",      S_IRUSR, proc_auxv_operations),
 	ONE("status",    S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	 S_IRUGO, proc_pid_limits),
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 01df23c..d21dafe 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -31,7 +31,7 @@
 	put_task_struct(task);
 
 	if (files) {
-		int fd = proc_fd(m->private);
+		unsigned int fd = proc_fd(m->private);
 
 		spin_lock(&files->file_lock);
 		file = fcheck_files(files, fd);
@@ -86,7 +86,7 @@
 	struct task_struct *task;
 	const struct cred *cred;
 	struct inode *inode;
-	int fd;
+	unsigned int fd;
 
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
@@ -158,7 +158,7 @@
 	}
 
 	if (files) {
-		int fd = proc_fd(d_inode(dentry));
+		unsigned int fd = proc_fd(d_inode(dentry));
 		struct file *fd_file;
 
 		spin_lock(&files->file_lock);
@@ -253,7 +253,7 @@
 			continue;
 		rcu_read_unlock();
 
-		len = snprintf(name, sizeof(name), "%d", fd);
+		len = snprintf(name, sizeof(name), "%u", fd);
 		if (!proc_fill_cache(file, ctx,
 				     name, len, instantiate, p,
 				     (void *)(unsigned long)fd))
diff --git a/fs/proc/fd.h b/fs/proc/fd.h
index 7c047f2..46dafad 100644
--- a/fs/proc/fd.h
+++ b/fs/proc/fd.h
@@ -11,7 +11,7 @@
 
 extern int proc_fd_permission(struct inode *inode, int mask);
 
-static inline int proc_fd(struct inode *inode)
+static inline unsigned int proc_fd(struct inode *inode)
 {
 	return PROC_I(inode)->fd;
 }
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index bca66d8..5f2dc20 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -105,7 +105,7 @@
 	struct proc_dir_entry *de = PDE(inode);
 	int error;
 
-	error = inode_change_ok(inode, iattr);
+	error = setattr_prepare(dentry, iattr);
 	if (error)
 		return error;
 
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index c1b7238..e69ebe6 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -68,7 +68,6 @@
 	ei->sysctl_entry = NULL;
 	ei->ns_ops = NULL;
 	inode = &ei->vfs_inode;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	return inode;
 }
 
@@ -421,7 +420,7 @@
 
 	if (inode) {
 		inode->i_ino = de->low_ino;
-		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 		PROC_I(inode)->pde = de;
 
 		if (is_empty_pde(de)) {
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 7931c55..5378441 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -60,7 +60,7 @@
 
 struct proc_inode {
 	struct pid *pid;
-	int fd;
+	unsigned int fd;
 	union proc_op op;
 	struct proc_dir_entry *pde;
 	struct ctl_table_header *sysctl;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 71025b9..55313d9 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -445,7 +445,7 @@
 	ei->sysctl = head;
 	ei->sysctl_entry = table;
 
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_mode = table->mode;
 	if (!S_ISDIR(table->mode)) {
 		inode->i_mode |= S_IFREG;
@@ -759,7 +759,7 @@
 	if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
 		return -EPERM;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/proc/self.c b/fs/proc/self.c
index b6a8d35..4024595 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -56,7 +56,7 @@
 		struct inode *inode = new_inode_pseudo(s);
 		if (inode) {
 			inode->i_ino = self_inum;
-			inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+			inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 			inode->i_mode = S_IFLNK | S_IRWXUGO;
 			inode->i_uid = GLOBAL_ROOT_UID;
 			inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index e58a31e..595b90a97 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -58,7 +58,7 @@
 		struct inode *inode = new_inode_pseudo(s);
 		if (inode) {
 			inode->i_ino = thread_self_inum;
-			inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+			inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 			inode->i_mode = S_IFLNK | S_IRWXUGO;
 			inode->i_uid = GLOBAL_ROOT_UID;
 			inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index ec9ddef..1781dc5 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -230,7 +230,7 @@
 	struct inode *inode = new_inode(sb);
 	if (inode) {
 		inode->i_ino = get_next_ino();
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	}
 	return inode;
 }
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index be3ddd1..2bcbf4e 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -169,7 +169,7 @@
 	int ret = 0;
 
 	/* POSIX UID/GID verification for setting inode attributes */
-	ret = inode_change_ok(inode, ia);
+	ret = setattr_prepare(dentry, ia);
 	if (ret)
 		return ret;
 
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 1ab6e6c..8621c03 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -61,7 +61,7 @@
 		inode->i_mapping->a_ops = &ramfs_aops;
 		mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
 		mapping_set_unevictable(inode->i_mapping);
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		switch (mode & S_IFMT) {
 		default:
 			init_special_inode(inode, mode, dev);
@@ -100,7 +100,7 @@
 		d_instantiate(dentry, inode);
 		dget(dentry);	/* Extra count - pin the dentry in core */
 		error = 0;
-		dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+		dir->i_mtime = dir->i_ctime = current_time(dir);
 	}
 	return error;
 }
@@ -130,7 +130,7 @@
 		if (!error) {
 			d_instantiate(dentry, inode);
 			dget(dentry);
-			dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+			dir->i_mtime = dir->i_ctime = current_time(dir);
 		} else
 			iput(inode);
 	}
diff --git a/fs/read_write.c b/fs/read_write.c
index 66215a7..190e0d36 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -730,6 +730,35 @@
 /* A write operation does a read from user space and vice versa */
 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
 
+/**
+ * rw_copy_check_uvector() - Copy an array of &struct iovec from userspace
+ *     into the kernel and check that it is valid.
+ *
+ * @type: One of %CHECK_IOVEC_ONLY, %READ, or %WRITE.
+ * @uvector: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @fast_pointer.
+ * @fast_pointer: Pointer to (usually small on-stack) kernel array.
+ * @ret_pointer: (output parameter) Pointer to a variable that will point to
+ *     either @fast_pointer, a newly allocated kernel array, or NULL,
+ *     depending on which array was used.
+ *
+ * This function copies an array of &struct iovec of @nr_segs from
+ * userspace into the kernel and checks that each element is valid (e.g.
+ * it does not point to a kernel address or cause overflow by being too
+ * large, etc.).
+ *
+ * As an optimization, the caller may provide a pointer to a small
+ * on-stack array in @fast_pointer, typically %UIO_FASTIOV elements long
+ * (the size of this array, or 0 if unused, should be given in @fast_segs).
+ *
+ * @ret_pointer will always point to the array that was used, so the
+ * caller must take care not to call kfree() on it e.g. in case the
+ * @fast_pointer array was used and it was allocated on the stack.
+ *
+ * Return: The total number of bytes covered by the iovec array on success
+ *   or a negative error code on error.
+ */
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
 			      unsigned long nr_segs, unsigned long fast_segs,
 			      struct iovec *fast_pointer,
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 90f815b..2f8c5c9 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -260,10 +260,7 @@
 
 const struct inode_operations reiserfs_file_inode_operations = {
 	.setattr = reiserfs_setattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index c2c59f9..58b2ded 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2005,7 +2005,7 @@
 	if (S_ISLNK(inode->i_mode))
 		inode->i_flags &= ~(S_IMMUTABLE | S_APPEND);
 
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_size = i_size;
 	inode->i_blocks = 0;
 	inode->i_bytes = 0;
@@ -3312,7 +3312,7 @@
 	unsigned int ia_valid;
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 2f1ddc9..1f4692a 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -94,7 +94,7 @@
 			}
 			sd_attrs_to_i_attrs(flags, inode);
 			REISERFS_I(inode)->i_attrs = flags;
-			inode->i_ctime = CURRENT_TIME_SEC;
+			inode->i_ctime = current_time(inode);
 			mark_inode_dirty(inode);
 setflags_out:
 			mnt_drop_write_file(filp);
@@ -115,7 +115,7 @@
 			err = -EFAULT;
 			goto setversion_out;
 		}
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = current_time(inode);
 		mark_inode_dirty(inode);
 setversion_out:
 		mnt_drop_write_file(filp);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 8a36696..e6a2b40 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -570,7 +570,7 @@
 	}
 
 	dir->i_size += paste_size;
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	if (!S_ISDIR(inode->i_mode) && visible)
 		/* reiserfs_mkdir or reiserfs_rename will do that by itself */
 		reiserfs_update_sd(th, dir);
@@ -963,7 +963,7 @@
 			       inode->i_nlink);
 
 	clear_nlink(inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(dir);
 	reiserfs_update_sd(&th, inode);
 
 	DEC_DIR_INODE_NLINK(dir)
@@ -1067,11 +1067,11 @@
 		inc_nlink(inode);
 		goto end_unlink;
 	}
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	reiserfs_update_sd(&th, inode);
 
 	dir->i_size -= (de.de_entrylen + DEH_SIZE);
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	reiserfs_update_sd(&th, dir);
 
 	if (!savelink)
@@ -1246,7 +1246,7 @@
 		return err ? err : retval;
 	}
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	reiserfs_update_sd(&th, inode);
 
 	ihold(inode);
@@ -1306,7 +1306,8 @@
  * get_empty_nodes or its clones
  */
 static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			   struct inode *new_dir, struct dentry *new_dentry)
+			   struct inode *new_dir, struct dentry *new_dentry,
+			   unsigned int flags)
 {
 	int retval;
 	INITIALIZE_PATH(old_entry_path);
@@ -1321,6 +1322,9 @@
 	unsigned long savelink = 1;
 	struct timespec ctime;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	/*
 	 * three balancings: (1) old name removal, (2) new name insertion
 	 * and (3) maybe "save" link insertion
@@ -1567,7 +1571,7 @@
 
 	mark_de_hidden(old_de.de_deh + old_de.de_entry_num);
 	journal_mark_dirty(&th, old_de.de_bh);
-	ctime = CURRENT_TIME_SEC;
+	ctime = current_time(old_dir);
 	old_dir->i_ctime = old_dir->i_mtime = ctime;
 	new_dir->i_ctime = new_dir->i_mtime = ctime;
 	/*
@@ -1650,10 +1654,7 @@
 	.mknod = reiserfs_mknod,
 	.rename = reiserfs_rename,
 	.setattr = reiserfs_setattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
@@ -1667,10 +1668,7 @@
 	.readlink = generic_readlink,
 	.get_link	= page_get_link,
 	.setattr = reiserfs_setattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 };
 
@@ -1679,10 +1677,7 @@
  */
 const struct inode_operations reiserfs_special_inode_operations = {
 	.setattr = reiserfs_setattr,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 4032d1e..a97e352 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1987,8 +1987,8 @@
 			pathrelse(&s_search_path);
 
 			if (update_timestamps) {
-				inode->i_mtime = CURRENT_TIME_SEC;
-				inode->i_ctime = CURRENT_TIME_SEC;
+				inode->i_mtime = current_time(inode);
+				inode->i_ctime = current_time(inode);
 			}
 			reiserfs_update_sd(th, inode);
 
@@ -2012,8 +2012,8 @@
 update_and_out:
 	if (update_timestamps) {
 		/* this is truncate, not file closing */
-		inode->i_mtime = CURRENT_TIME_SEC;
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_mtime = current_time(inode);
+		inode->i_ctime = current_time(inode);
 	}
 	reiserfs_update_sd(th, inode);
 
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 74d5ddd..0a6ad4e 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2522,7 +2522,7 @@
 	if (inode->i_size < off + len - towrite)
 		i_size_write(inode, off + len - towrite);
 	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 	return len - towrite;
 }
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index a33812a..e87aa21 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -450,13 +450,13 @@
 
 static void update_ctime(struct inode *inode)
 {
-	struct timespec now = current_fs_time(inode->i_sb);
+	struct timespec now = current_time(inode);
 
 	if (inode_unhashed(inode) || !inode->i_nlink ||
 	    timespec_equal(&inode->i_ctime, &now))
 		return;
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 }
 
@@ -575,7 +575,7 @@
 	new_size = buffer_size + sizeof(struct reiserfs_xattr_header);
 	if (!err && new_size < i_size_read(d_inode(dentry))) {
 		struct iattr newattrs = {
-			.ia_ctime = current_fs_time(inode->i_sb),
+			.ia_ctime = current_time(inode),
 			.ia_size = new_size,
 			.ia_valid = ATTR_SIZE | ATTR_CTIME,
 		};
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index dbed42f..3d2256a 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -242,13 +242,9 @@
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
 		if (acl) {
-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
-			if (error < 0)
+			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+			if (error)
 				return error;
-			else {
-				if (error == 0)
-					acl = NULL;
-			}
 		}
 		break;
 	case ACL_TYPE_DEFAULT:
@@ -277,7 +273,7 @@
 	if (error == -ENODATA) {
 		error = 0;
 		if (type == ACL_TYPE_ACCESS) {
-			inode->i_ctime = CURRENT_TIME_SEC;
+			inode->i_ctime = current_time(inode);
 			mark_inode_dirty(inode);
 		}
 	}
diff --git a/fs/select.c b/fs/select.c
index 8ed9da5..3d4f85d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -29,6 +29,7 @@
 #include <linux/sched/rt.h>
 #include <linux/freezer.h>
 #include <net/busy_poll.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
@@ -554,7 +555,7 @@
 	fd_set_bits fds;
 	void *bits;
 	int ret, max_fds;
-	unsigned int size;
+	size_t size, alloc_size;
 	struct fdtable *fdt;
 	/* Allocate small arguments on the stack to save memory and be faster */
 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
@@ -581,7 +582,14 @@
 	if (size > sizeof(stack_fds) / 6) {
 		/* Not enough space in on-stack array; must use kmalloc */
 		ret = -ENOMEM;
-		bits = kmalloc(6 * size, GFP_KERNEL);
+		if (size > (SIZE_MAX / 6))
+			goto out_nofds;
+
+		alloc_size = 6 * size;
+		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
+		if (!bits && alloc_size > PAGE_SIZE)
+			bits = vmalloc(alloc_size);
+
 		if (!bits)
 			goto out_nofds;
 	}
@@ -618,7 +626,7 @@
 
 out:
 	if (bits != stack_fds)
-		kfree(bits);
+		kvfree(bits);
 out_nofds:
 	return ret;
 }
diff --git a/fs/splice.c b/fs/splice.c
index aa38901..153d4f3 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -315,15 +315,9 @@
 		*ppos = kiocb.ki_pos;
 		file_accessed(in);
 	} else if (ret < 0) {
-		if (WARN_ON(to.idx != idx || to.iov_offset)) {
-			/*
-			 * a bogus ->read_iter() has copied something and still
-			 * returned an error instead of a short read.
-			 */
-			to.idx = idx;
-			to.iov_offset = 0;
-			iov_iter_advance(&to, 0); /* to free what was emitted */
-		}
+		to.idx = idx;
+		to.iov_offset = 0;
+		iov_iter_advance(&to, 0); /* to free what was emitted */
 		/*
 		 * callers of ->splice_read() expect -EAGAIN on
 		 * "can't put anything in there", rather than -EFAULT.
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 0927b1e..e9793b1 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -425,7 +425,6 @@
 
 
 const struct inode_operations squashfs_inode_ops = {
-	.getxattr = generic_getxattr,
 	.listxattr = squashfs_listxattr
 };
 
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 67cad77..40c10d9 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -247,6 +247,5 @@
 
 const struct inode_operations squashfs_dir_inode_ops = {
 	.lookup = squashfs_lookup,
-	.getxattr = generic_getxattr,
 	.listxattr = squashfs_listxattr
 };
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index d688ef4..79b9c31 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -120,7 +120,6 @@
 const struct inode_operations squashfs_symlink_inode_ops = {
 	.readlink = generic_readlink,
 	.get_link = page_get_link,
-	.getxattr = generic_getxattr,
 	.listxattr = squashfs_listxattr
 };
 
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index c83f5d9..afe70f8 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -42,6 +42,5 @@
 	return 0;
 }
 #define squashfs_listxattr NULL
-#define generic_getxattr NULL
 #define squashfs_xattr_handlers NULL
 #endif
diff --git a/fs/super.c b/fs/super.c
index c2ff475..c183835 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1269,25 +1269,34 @@
 static void sb_wait_write(struct super_block *sb, int level)
 {
 	percpu_down_write(sb->s_writers.rw_sem + level-1);
-	/*
-	 * We are going to return to userspace and forget about this lock, the
-	 * ownership goes to the caller of thaw_super() which does unlock.
-	 *
-	 * FIXME: we should do this before return from freeze_super() after we
-	 * called sync_filesystem(sb) and s_op->freeze_fs(sb), and thaw_super()
-	 * should re-acquire these locks before s_op->unfreeze_fs(sb). However
-	 * this leads to lockdep false-positives, so currently we do the early
-	 * release right after acquire.
-	 */
-	percpu_rwsem_release(sb->s_writers.rw_sem + level-1, 0, _THIS_IP_);
 }
 
-static void sb_freeze_unlock(struct super_block *sb)
+/*
+ * We are going to return to userspace and forget about these locks, the
+ * ownership goes to the caller of thaw_super() which does unlock().
+ */
+static void lockdep_sb_freeze_release(struct super_block *sb)
+{
+	int level;
+
+	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+		percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+}
+
+/*
+ * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
+ */
+static void lockdep_sb_freeze_acquire(struct super_block *sb)
 {
 	int level;
 
 	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
 		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+}
+
+static void sb_freeze_unlock(struct super_block *sb)
+{
+	int level;
 
 	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
 		percpu_up_write(sb->s_writers.rw_sem + level);
@@ -1379,10 +1388,11 @@
 		}
 	}
 	/*
-	 * This is just for debugging purposes so that fs can warn if it
-	 * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
+	 * For debugging purposes so that fs can warn if it sees write activity
+	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
 	 */
 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
+	lockdep_sb_freeze_release(sb);
 	up_write(&sb->s_umount);
 	return 0;
 }
@@ -1399,7 +1409,7 @@
 	int error;
 
 	down_write(&sb->s_umount);
-	if (sb->s_writers.frozen == SB_UNFROZEN) {
+	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
 		up_write(&sb->s_umount);
 		return -EINVAL;
 	}
@@ -1409,11 +1419,14 @@
 		goto out;
 	}
 
+	lockdep_sb_freeze_acquire(sb);
+
 	if (sb->s_op->unfreeze_fs) {
 		error = sb->s_op->unfreeze_fs(sb);
 		if (error) {
 			printk(KERN_ERR
 				"VFS:Filesystem thaw failed\n");
+			lockdep_sb_freeze_release(sb);
 			up_write(&sb->s_umount);
 			return error;
 		}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 94374e4..2b67bda 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -21,14 +21,14 @@
 
 void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
 {
-	char *buf, *path = NULL;
+	char *buf;
 
 	buf = kzalloc(PATH_MAX, GFP_KERNEL);
 	if (buf)
-		path = kernfs_path(parent, buf, PATH_MAX);
+		kernfs_path(parent, buf, PATH_MAX);
 
 	WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s/%s'\n",
-	     path, name);
+	     buf, name);
 
 	kfree(buf);
 }
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 2661b77..5bdae85 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -215,7 +215,7 @@
 	memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
 	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
 	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 out_page:
 	dir_put_page(page);
@@ -239,7 +239,7 @@
 	de->inode = 0;
 	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
 	dir_put_page(page);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	mark_inode_dirty(inode);
 	return err;
 }
@@ -337,7 +337,7 @@
 	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
 	err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
 	dir_put_page(page);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 }
 
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 82ddc09..7ba997e 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -33,7 +33,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index f9db4eb..53f1b789 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -164,7 +164,7 @@
 	dirty_sb(sb);
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = fs16_to_cpu(sbi, ino);
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_blocks = 0;
 	memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
 	SYSV_I(inode)->i_dir_start_lookup = 0;
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 2fde40a..08d3e63 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -178,7 +178,7 @@
 	*where->p = where->key;
 	write_unlock(&pointers_lock);
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 
 	/* had we spliced it onto indirect block? */
 	if (where->bh)
@@ -418,7 +418,7 @@
 		}
 		n++;
 	}
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode))
 		sysv_sync_inode (inode);
 	else
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a42de45c..d8817f1 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -120,7 +120,7 @@
 {
 	struct inode *inode = d_inode(old_dentry);
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 
@@ -206,7 +206,8 @@
  * higher-level routines.
  */
 static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
-		  struct inode * new_dir, struct dentry * new_dentry)
+		       struct inode * new_dir, struct dentry * new_dentry,
+		       unsigned int flags)
 {
 	struct inode * old_inode = d_inode(old_dentry);
 	struct inode * new_inode = d_inode(new_dentry);
@@ -216,6 +217,9 @@
 	struct sysv_dir_entry * old_de;
 	int err = -ENOENT;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_de = sysv_find_entry(old_dentry, &old_page);
 	if (!old_de)
 		goto out;
@@ -240,7 +244,7 @@
 		if (!new_de)
 			goto out_dir;
 		sysv_set_link(new_de, new_page, old_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		inode_dec_link_count(new_inode);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index ad40b64..21d36d2 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -133,7 +133,7 @@
 	struct inode *inode = new_inode(sb);
 	if (inode) {
 		inode->i_ino = get_next_ino();
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	}
 	return inode;
 }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 4b86d3a..c8f60df 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -301,6 +301,95 @@
 	return err;
 }
 
+static int do_tmpfile(struct inode *dir, struct dentry *dentry,
+		      umode_t mode, struct inode **whiteout)
+{
+	struct inode *inode;
+	struct ubifs_info *c = dir->i_sb->s_fs_info;
+	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
+	struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
+	struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
+	int err, instantiated = 0;
+
+	/*
+	 * Budget request settings: new dirty inode, new direntry,
+	 * budget for dirtied inode will be released via writeback.
+	 */
+
+	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+		dentry, mode, dir->i_ino);
+
+	err = ubifs_budget_space(c, &req);
+	if (err)
+		return err;
+
+	err = ubifs_budget_space(c, &ino_req);
+	if (err) {
+		ubifs_release_budget(c, &req);
+		return err;
+	}
+
+	inode = ubifs_new_inode(c, dir, mode);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out_budg;
+	}
+	ui = ubifs_inode(inode);
+
+	if (whiteout) {
+		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
+		ubifs_assert(inode->i_op == &ubifs_file_inode_operations);
+	}
+
+	err = ubifs_init_security(dir, inode, &dentry->d_name);
+	if (err)
+		goto out_inode;
+
+	mutex_lock(&ui->ui_mutex);
+	insert_inode_hash(inode);
+
+	if (whiteout) {
+		mark_inode_dirty(inode);
+		drop_nlink(inode);
+		*whiteout = inode;
+	} else {
+		d_tmpfile(dentry, inode);
+	}
+	ubifs_assert(ui->dirty);
+
+	instantiated = 1;
+	mutex_unlock(&ui->ui_mutex);
+
+	mutex_lock(&dir_ui->ui_mutex);
+	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0);
+	if (err)
+		goto out_cancel;
+	mutex_unlock(&dir_ui->ui_mutex);
+
+	ubifs_release_budget(c, &req);
+
+	return 0;
+
+out_cancel:
+	mutex_unlock(&dir_ui->ui_mutex);
+out_inode:
+	make_bad_inode(inode);
+	if (!instantiated)
+		iput(inode);
+out_budg:
+	ubifs_release_budget(c, &req);
+	if (!instantiated)
+		ubifs_release_budget(c, &ino_req);
+	ubifs_err(c, "cannot create temporary file, error %d", err);
+	return err;
+}
+
+static int ubifs_tmpfile(struct inode *dir, struct dentry *dentry,
+			 umode_t mode)
+{
+	return do_tmpfile(dir, dentry, mode, NULL);
+}
+
 /**
  * vfs_dent_type - get VFS directory entry type.
  * @type: UBIFS directory entry type
@@ -927,37 +1016,43 @@
 }
 
 /**
- * lock_3_inodes - a wrapper for locking three UBIFS inodes.
+ * lock_4_inodes - a wrapper for locking three UBIFS inodes.
  * @inode1: first inode
  * @inode2: second inode
  * @inode3: third inode
+ * @inode4: fouth inode
  *
  * This function is used for 'ubifs_rename()' and @inode1 may be the same as
- * @inode2 whereas @inode3 may be %NULL.
+ * @inode2 whereas @inode3 and @inode4 may be %NULL.
  *
  * We do not implement any tricks to guarantee strict lock ordering, because
  * VFS has already done it for us on the @i_mutex. So this is just a simple
  * wrapper function.
  */
-static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
-			  struct inode *inode3)
+static void lock_4_inodes(struct inode *inode1, struct inode *inode2,
+			  struct inode *inode3, struct inode *inode4)
 {
 	mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
 	if (inode2 != inode1)
 		mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
 	if (inode3)
 		mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3);
+	if (inode4)
+		mutex_lock_nested(&ubifs_inode(inode4)->ui_mutex, WB_MUTEX_4);
 }
 
 /**
- * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename.
+ * unlock_4_inodes - a wrapper for unlocking three UBIFS inodes for rename.
  * @inode1: first inode
  * @inode2: second inode
  * @inode3: third inode
+ * @inode4: fouth inode
  */
-static void unlock_3_inodes(struct inode *inode1, struct inode *inode2,
-			    struct inode *inode3)
+static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
+			    struct inode *inode3, struct inode *inode4)
 {
+	if (inode4)
+		mutex_unlock(&ubifs_inode(inode4)->ui_mutex);
 	if (inode3)
 		mutex_unlock(&ubifs_inode(inode3)->ui_mutex);
 	if (inode1 != inode2)
@@ -966,12 +1061,15 @@
 }
 
 static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry)
+			struct inode *new_dir, struct dentry *new_dentry,
+			unsigned int flags)
 {
 	struct ubifs_info *c = old_dir->i_sb->s_fs_info;
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
+	struct inode *whiteout = NULL;
 	struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode);
+	struct ubifs_inode *whiteout_ui = NULL;
 	int err, release, sync = 0, move = (new_dir != old_dir);
 	int is_dir = S_ISDIR(old_inode->i_mode);
 	int unlink = !!new_inode;
@@ -984,6 +1082,9 @@
 	struct timespec time;
 	unsigned int uninitialized_var(saved_nlink);
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	/*
 	 * Budget request settings: deletion direntry, new direntry, removing
 	 * the old inode, and changing old and new parent directory inodes.
@@ -993,15 +1094,13 @@
 	 * separately.
 	 */
 
-	dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
+	dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
 		old_dentry, old_inode->i_ino, old_dir->i_ino,
-		new_dentry, new_dir->i_ino);
-	ubifs_assert(inode_is_locked(old_dir));
-	ubifs_assert(inode_is_locked(new_dir));
+		new_dentry, new_dir->i_ino, flags);
+
 	if (unlink)
 		ubifs_assert(inode_is_locked(new_inode));
 
-
 	if (unlink && is_dir) {
 		err = check_dir_empty(c, new_inode);
 		if (err)
@@ -1017,7 +1116,32 @@
 		return err;
 	}
 
-	lock_3_inodes(old_dir, new_dir, new_inode);
+	if (flags & RENAME_WHITEOUT) {
+		union ubifs_dev_desc *dev = NULL;
+
+		dev = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS);
+		if (!dev) {
+			ubifs_release_budget(c, &req);
+			ubifs_release_budget(c, &ino_req);
+			return -ENOMEM;
+		}
+
+		err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
+		if (err) {
+			ubifs_release_budget(c, &req);
+			ubifs_release_budget(c, &ino_req);
+			kfree(dev);
+			return err;
+		}
+
+		whiteout->i_state |= I_LINKABLE;
+		whiteout_ui = ubifs_inode(whiteout);
+		whiteout_ui->data = dev;
+		whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
+		ubifs_assert(!whiteout_ui->dirty);
+	}
+
+	lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 
 	/*
 	 * Like most other Unix systems, set the @i_ctime for inodes on a
@@ -1087,12 +1211,34 @@
 		if (unlink && IS_SYNC(new_inode))
 			sync = 1;
 	}
-	err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry,
+
+	if (whiteout) {
+		struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
+				.dirtied_ino_d = \
+				ALIGN(ubifs_inode(whiteout)->data_len, 8) };
+
+		err = ubifs_budget_space(c, &wht_req);
+		if (err) {
+			ubifs_release_budget(c, &req);
+			ubifs_release_budget(c, &ino_req);
+			kfree(whiteout_ui->data);
+			whiteout_ui->data_len = 0;
+			iput(whiteout);
+			return err;
+		}
+
+		inc_nlink(whiteout);
+		mark_inode_dirty(whiteout);
+		whiteout->i_state &= ~I_LINKABLE;
+		iput(whiteout);
+	}
+
+	err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, whiteout,
 			       sync);
 	if (err)
 		goto out_cancel;
 
-	unlock_3_inodes(old_dir, new_dir, new_inode);
+	unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 	ubifs_release_budget(c, &req);
 
 	mutex_lock(&old_inode_ui->ui_mutex);
@@ -1125,12 +1271,74 @@
 				inc_nlink(old_dir);
 		}
 	}
-	unlock_3_inodes(old_dir, new_dir, new_inode);
+	if (whiteout) {
+		drop_nlink(whiteout);
+		iput(whiteout);
+	}
+	unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 	ubifs_release_budget(c, &ino_req);
 	ubifs_release_budget(c, &req);
 	return err;
 }
 
+static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct ubifs_info *c = old_dir->i_sb->s_fs_info;
+	struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
+				.dirtied_ino = 2 };
+	int sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
+	struct inode *fst_inode = d_inode(old_dentry);
+	struct inode *snd_inode = d_inode(new_dentry);
+	struct timespec time;
+	int err;
+
+	ubifs_assert(fst_inode && snd_inode);
+
+	lock_4_inodes(old_dir, new_dir, NULL, NULL);
+
+	time = ubifs_current_time(old_dir);
+	fst_inode->i_ctime = time;
+	snd_inode->i_ctime = time;
+	old_dir->i_mtime = old_dir->i_ctime = time;
+	new_dir->i_mtime = new_dir->i_ctime = time;
+
+	if (old_dir != new_dir) {
+		if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
+			inc_nlink(new_dir);
+			drop_nlink(old_dir);
+		}
+		else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
+			drop_nlink(new_dir);
+			inc_nlink(old_dir);
+		}
+	}
+
+	err = ubifs_jnl_xrename(c, old_dir, old_dentry, new_dir, new_dentry,
+				sync);
+
+	unlock_4_inodes(old_dir, new_dir, NULL, NULL);
+	ubifs_release_budget(c, &req);
+
+	return err;
+}
+
+static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry,
+			unsigned int flags)
+{
+	if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
+		return -EINVAL;
+
+	ubifs_assert(inode_is_locked(old_dir));
+	ubifs_assert(inode_is_locked(new_dir));
+
+	if (flags & RENAME_EXCHANGE)
+		return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
+
+	return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+}
+
 int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		  struct kstat *stat)
 {
@@ -1179,16 +1387,14 @@
 	.mkdir       = ubifs_mkdir,
 	.rmdir       = ubifs_rmdir,
 	.mknod       = ubifs_mknod,
-	.rename      = ubifs_rename,
+	.rename      = ubifs_rename2,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = generic_setxattr,
-	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
+	.tmpfile     = ubifs_tmpfile,
 };
 
 const struct file_operations ubifs_dir_operations = {
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 7bbf420..b4fbeef 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1262,7 +1262,7 @@
 
 	dbg_gen("ino %lu, mode %#x, ia_valid %#x",
 		inode->i_ino, inode->i_mode, attr->ia_valid);
-	err = inode_change_ok(inode, attr);
+	err = setattr_prepare(dentry, attr);
 	if (err)
 		return err;
 
@@ -1397,7 +1397,7 @@
 #endif
 
 /**
- * update_ctime - update mtime and ctime of an inode.
+ * update_mctime - update mtime and ctime of an inode.
  * @inode: inode to update
  *
  * This function updates mtime and ctime of the inode if it is not equivalent to
@@ -1621,10 +1621,7 @@
 const struct inode_operations ubifs_file_inode_operations = {
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = generic_setxattr,
-	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
@@ -1635,10 +1632,7 @@
 	.get_link    = simple_get_link,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = generic_setxattr,
-	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 821b348..e845c64 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -113,7 +113,7 @@
  * data_nodes_cmp - compare 2 data nodes.
  * @priv: UBIFS file-system description object
  * @a: first data node
- * @a: second data node
+ * @b: second data node
  *
  * This function compares data nodes @a and @b. Returns %1 if @a has greater
  * inode or block number, and %-1 otherwise.
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 0b9da5b..91bc76dc 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -908,6 +908,147 @@
 }
 
 /**
+ * ubifs_jnl_xrename - cross rename two directory entries.
+ * @c: UBIFS file-system description object
+ * @fst_dir: parent inode of 1st directory entry to exchange
+ * @fst_dentry: 1st directory entry to exchange
+ * @snd_dir: parent inode of 2nd directory entry to exchange
+ * @snd_dentry: 2nd directory entry to exchange
+ * @sync: non-zero if the write-buffer has to be synchronized
+ *
+ * This function implements the cross rename operation which may involve
+ * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
+ * and returns zero on success. In case of failure, a negative error code is
+ * returned.
+ */
+int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
+		      const struct dentry *fst_dentry,
+		      const struct inode *snd_dir,
+		      const struct dentry *snd_dentry, int sync)
+{
+	union ubifs_key key;
+	struct ubifs_dent_node *dent1, *dent2;
+	int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
+	int aligned_dlen1, aligned_dlen2;
+	int twoparents = (fst_dir != snd_dir);
+	const struct inode *fst_inode = d_inode(fst_dentry);
+	const struct inode *snd_inode = d_inode(snd_dentry);
+	void *p;
+
+	dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
+		fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
+
+	ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
+	ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
+	ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
+	ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
+
+	dlen1 = UBIFS_DENT_NODE_SZ + snd_dentry->d_name.len + 1;
+	dlen2 = UBIFS_DENT_NODE_SZ + fst_dentry->d_name.len + 1;
+	aligned_dlen1 = ALIGN(dlen1, 8);
+	aligned_dlen2 = ALIGN(dlen2, 8);
+
+	len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
+	if (twoparents)
+		len += plen;
+
+	dent1 = kmalloc(len, GFP_NOFS);
+	if (!dent1)
+		return -ENOMEM;
+
+	/* Make reservation before allocating sequence numbers */
+	err = make_reservation(c, BASEHD, len);
+	if (err)
+		goto out_free;
+
+	/* Make new dent for 1st entry */
+	dent1->ch.node_type = UBIFS_DENT_NODE;
+	dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, &snd_dentry->d_name);
+	dent1->inum = cpu_to_le64(fst_inode->i_ino);
+	dent1->type = get_dent_type(fst_inode->i_mode);
+	dent1->nlen = cpu_to_le16(snd_dentry->d_name.len);
+	memcpy(dent1->name, snd_dentry->d_name.name, snd_dentry->d_name.len);
+	dent1->name[snd_dentry->d_name.len] = '\0';
+	zero_dent_node_unused(dent1);
+	ubifs_prep_grp_node(c, dent1, dlen1, 0);
+
+	/* Make new dent for 2nd entry */
+	dent2 = (void *)dent1 + aligned_dlen1;
+	dent2->ch.node_type = UBIFS_DENT_NODE;
+	dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, &fst_dentry->d_name);
+	dent2->inum = cpu_to_le64(snd_inode->i_ino);
+	dent2->type = get_dent_type(snd_inode->i_mode);
+	dent2->nlen = cpu_to_le16(fst_dentry->d_name.len);
+	memcpy(dent2->name, fst_dentry->d_name.name, fst_dentry->d_name.len);
+	dent2->name[fst_dentry->d_name.len] = '\0';
+	zero_dent_node_unused(dent2);
+	ubifs_prep_grp_node(c, dent2, dlen2, 0);
+
+	p = (void *)dent2 + aligned_dlen2;
+	if (!twoparents)
+		pack_inode(c, p, fst_dir, 1);
+	else {
+		pack_inode(c, p, fst_dir, 0);
+		p += ALIGN(plen, 8);
+		pack_inode(c, p, snd_dir, 1);
+	}
+
+	err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
+	if (err)
+		goto out_release;
+	if (!sync) {
+		struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
+
+		ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
+		ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
+	}
+	release_head(c, BASEHD);
+
+	dent_key_init(c, &key, snd_dir->i_ino, &snd_dentry->d_name);
+	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &snd_dentry->d_name);
+	if (err)
+		goto out_ro;
+
+	offs += aligned_dlen1;
+	dent_key_init(c, &key, fst_dir->i_ino, &fst_dentry->d_name);
+	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &fst_dentry->d_name);
+	if (err)
+		goto out_ro;
+
+	offs += aligned_dlen2;
+
+	ino_key_init(c, &key, fst_dir->i_ino);
+	err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+	if (err)
+		goto out_ro;
+
+	if (twoparents) {
+		offs += ALIGN(plen, 8);
+		ino_key_init(c, &key, snd_dir->i_ino);
+		err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+		if (err)
+			goto out_ro;
+	}
+
+	finish_reservation(c);
+
+	mark_inode_clean(c, ubifs_inode(fst_dir));
+	if (twoparents)
+		mark_inode_clean(c, ubifs_inode(snd_dir));
+	kfree(dent1);
+	return 0;
+
+out_release:
+	release_head(c, BASEHD);
+out_ro:
+	ubifs_ro_mode(c, err);
+	finish_reservation(c);
+out_free:
+	kfree(dent1);
+	return err;
+}
+
+/**
  * ubifs_jnl_rename - rename a directory entry.
  * @c: UBIFS file-system description object
  * @old_dir: parent inode of directory entry to rename
@@ -917,14 +1058,15 @@
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the re-name operation which may involve writing up
- * to 3 inodes and 2 directory entries. It marks the written inodes as clean
+ * to 4 inodes and 2 directory entries. It marks the written inodes as clean
  * and returns zero on success. In case of failure, a negative error code is
  * returned.
  */
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 		     const struct dentry *old_dentry,
 		     const struct inode *new_dir,
-		     const struct dentry *new_dentry, int sync)
+		     const struct dentry *new_dentry,
+		     const struct inode *whiteout, int sync)
 {
 	void *p;
 	union ubifs_key key;
@@ -958,7 +1100,7 @@
 	aligned_dlen1 = ALIGN(dlen1, 8);
 	aligned_dlen2 = ALIGN(dlen2, 8);
 	len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
-	if (old_dir != new_dir)
+	if (move)
 		len += plen;
 	dent = kmalloc(len, GFP_NOFS);
 	if (!dent)
@@ -980,13 +1122,19 @@
 	zero_dent_node_unused(dent);
 	ubifs_prep_grp_node(c, dent, dlen1, 0);
 
-	/* Make deletion dent */
 	dent2 = (void *)dent + aligned_dlen1;
 	dent2->ch.node_type = UBIFS_DENT_NODE;
 	dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
 			    &old_dentry->d_name);
-	dent2->inum = 0;
-	dent2->type = DT_UNKNOWN;
+
+	if (whiteout) {
+		dent2->inum = cpu_to_le64(whiteout->i_ino);
+		dent2->type = get_dent_type(whiteout->i_mode);
+	} else {
+		/* Make deletion dent */
+		dent2->inum = 0;
+		dent2->type = DT_UNKNOWN;
+	}
 	dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
 	memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
 	dent2->name[old_dentry->d_name.len] = '\0';
@@ -1035,16 +1183,26 @@
 	if (err)
 		goto out_ro;
 
-	err = ubifs_add_dirt(c, lnum, dlen2);
-	if (err)
-		goto out_ro;
+	offs += aligned_dlen1;
+	if (whiteout) {
+		dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
+		err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &old_dentry->d_name);
+		if (err)
+			goto out_ro;
 
-	dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
-	err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
-	if (err)
-		goto out_ro;
+		ubifs_delete_orphan(c, whiteout->i_ino);
+	} else {
+		err = ubifs_add_dirt(c, lnum, dlen2);
+		if (err)
+			goto out_ro;
 
-	offs += aligned_dlen1 + aligned_dlen2;
+		dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
+		err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
+		if (err)
+			goto out_ro;
+	}
+
+	offs += aligned_dlen2;
 	if (new_inode) {
 		ino_key_init(c, &key, new_inode->i_ino);
 		err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
@@ -1058,7 +1216,7 @@
 	if (err)
 		goto out_ro;
 
-	if (old_dir != new_dir) {
+	if (move) {
 		offs += ALIGN(plen, 8);
 		ino_key_init(c, &key, new_dir->i_ino);
 		err = ubifs_tnc_add(c, &key, lnum, offs, plen);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index a0011aa..6c3a1ab 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -636,7 +636,7 @@
 /**
  * ubifs_get_lp_stats - get lprops statistics.
  * @c: UBIFS file-system description object
- * @st: return statistics
+ * @lst: return statistics
  */
 void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst)
 {
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index ce89bdc..235654c 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -34,7 +34,6 @@
 
 /**
  * first_dirty_cnode - find first dirty cnode.
- * @c: UBIFS file-system description object
  * @nnode: nnode at which to start
  *
  * This function returns the first dirty cnode or %NULL if there is not one.
@@ -1623,7 +1622,6 @@
  * dbg_check_ltab_lnum - check the ltab for a LPT LEB number.
  * @c: the UBIFS file-system description object
  * @lnum: LEB number where node was written
- * @offs: offset where node was written
  *
  * This function returns %0 on success and a negative error code on failure.
  */
@@ -1870,7 +1868,7 @@
 }
 
 /**
- * ubifs_dump_lpt_leb - dump an LPT LEB.
+ * dump_lpt_leb - dump an LPT LEB.
  * @c: UBIFS file-system description object
  * @lnum: LEB number to dump
  *
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 3ca4540..fb0f44c 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -267,7 +267,7 @@
  * replay_entries_cmp - compare 2 replay entries.
  * @priv: UBIFS file-system description object
  * @a: first replay entry
- * @a: second replay entry
+ * @b: second replay entry
  *
  * This is a comparios function for 'list_sort()' which compares 2 replay
  * entries @a and @b by comparing their sequence numer.  Returns %1 if @a has
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 4617d45..096035e 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -157,6 +157,7 @@
 	WB_MUTEX_1 = 0,
 	WB_MUTEX_2 = 1,
 	WB_MUTEX_3 = 2,
+	WB_MUTEX_4 = 3,
 };
 
 /*
@@ -1520,10 +1521,15 @@
 			 const union ubifs_key *key, const void *buf, int len);
 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
+int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
+		      const struct dentry *fst_dentry,
+		      const struct inode *snd_dir,
+		      const struct dentry *snd_dentry, int sync);
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 		     const struct dentry *old_dentry,
 		     const struct inode *new_dir,
-		     const struct dentry *new_dentry, int sync);
+		     const struct dentry *new_dentry,
+		     const struct inode *whiteout, int sync);
 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
 		       loff_t old_size, loff_t new_size);
 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 11a0041..6c2f4d4 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -200,6 +200,7 @@
 	struct ubifs_inode *host_ui = ubifs_inode(host);
 	struct ubifs_inode *ui = ubifs_inode(inode);
 	void *buf = NULL;
+	int old_size;
 	struct ubifs_budget_req req = { .dirtied_ino = 2,
 		.dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) };
 
@@ -217,12 +218,13 @@
 	kfree(ui->data);
 	ui->data = buf;
 	inode->i_size = ui->ui_size = size;
+	old_size = ui->data_len;
 	ui->data_len = size;
 	mutex_unlock(&ui->ui_mutex);
 
 	mutex_lock(&host_ui->ui_mutex);
 	host->i_ctime = ubifs_current_time(host);
-	host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
+	host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
 	host_ui->xattr_size += CALC_XATTR_BYTES(size);
 
 	/*
@@ -241,7 +243,7 @@
 
 out_cancel:
 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
-	host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+	host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
 	mutex_unlock(&host_ui->ui_mutex);
 	make_bad_inode(inode);
 out_free:
diff --git a/fs/udf/file.c b/fs/udf/file.c
index e855bf8..dbcb3a4a 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -261,7 +261,7 @@
 	struct inode *inode = d_inode(dentry);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index e77db62..c1ed18a 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -121,7 +121,7 @@
 	else
 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
 	inode->i_mtime = inode->i_atime = inode->i_ctime =
-		iinfo->i_crtime = current_fs_time(inode->i_sb);
+		iinfo->i_crtime = current_time(inode);
 	if (unlikely(insert_inode_locked(inode) < 0)) {
 		make_bad_inode(inode);
 		iput(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 55aa587..aad4640 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -886,7 +886,7 @@
 	*new = 1;
 	iinfo->i_next_alloc_block = block;
 	iinfo->i_next_alloc_goal = newblocknum;
-	inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_ctime = current_time(inode);
 
 	if (IS_SYNC(inode))
 		udf_sync_inode(inode);
@@ -1268,7 +1268,7 @@
 		up_write(&iinfo->i_data_sem);
 	}
 update_time:
-	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode))
 		udf_sync_inode(inode);
 	else
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c3e5c96..2d65e28 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -616,7 +616,7 @@
 	*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
 		cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
-	dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
@@ -730,7 +730,7 @@
 	cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	inc_nlink(dir);
-	dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	unlock_new_inode(inode);
 	d_instantiate(dentry, inode);
@@ -845,7 +845,7 @@
 	inode->i_size = 0;
 	inode_dec_link_count(dir);
 	inode->i_ctime = dir->i_ctime = dir->i_mtime =
-						current_fs_time(dir->i_sb);
+						current_time(inode);
 	mark_inode_dirty(dir);
 
 end_rmdir:
@@ -888,7 +888,7 @@
 	retval = udf_delete_entry(dir, fi, &fibh, &cfi);
 	if (retval)
 		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	inode_dec_link_count(inode);
 	inode->i_ctime = dir->i_ctime;
@@ -1079,9 +1079,9 @@
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
 	inc_nlink(inode);
-	inode->i_ctime = current_fs_time(inode->i_sb);
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
-	dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	mark_inode_dirty(dir);
 	ihold(inode);
 	d_instantiate(dentry, inode);
@@ -1093,7 +1093,8 @@
  * higher-level routines.
  */
 static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
@@ -1105,6 +1106,9 @@
 	struct kernel_lb_addr tloc;
 	struct udf_inode_info *old_iinfo = UDF_I(old_inode);
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
 	if (IS_ERR(ofi)) {
 		retval = PTR_ERR(ofi);
@@ -1172,7 +1176,7 @@
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old_inode->i_ctime = current_fs_time(old_inode->i_sb);
+	old_inode->i_ctime = current_time(old_inode);
 	mark_inode_dirty(old_inode);
 
 	/*
@@ -1188,11 +1192,11 @@
 	udf_delete_entry(old_dir, ofi, &ofibh, &ocfi);
 
 	if (new_inode) {
-		new_inode->i_ctime = current_fs_time(new_inode->i_sb);
+		new_inode->i_ctime = current_time(new_inode);
 		inode_dec_link_count(new_inode);
 	}
-	old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb);
-	new_dir->i_ctime = new_dir->i_mtime = current_fs_time(new_dir->i_sb);
+	old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+	new_dir->i_ctime = new_dir->i_mtime = current_time(new_dir);
 	mark_inode_dirty(old_dir);
 	mark_inode_dirty(new_dir);
 
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index fa3bda1..de01b8f 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -100,7 +100,7 @@
 	err = ufs_commit_chunk(page, pos, len);
 	ufs_put_page(page);
 	if (update_times)
-		dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+		dir->i_mtime = dir->i_ctime = current_time(dir);
 	mark_inode_dirty(dir);
 }
 
@@ -389,7 +389,7 @@
 	ufs_set_de_type(sb, de, inode->i_mode);
 
 	err = ufs_commit_chunk(page, pos, rec_len);
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 
 	mark_inode_dirty(dir);
 	/* OFFSET_CACHE */
@@ -530,7 +530,7 @@
 		pde->d_reclen = cpu_to_fs16(sb, to - from);
 	dir->d_ino = 0;
 	err = ufs_commit_chunk(page, pos, to - from);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 	mark_inode_dirty(inode);
 out:
 	ufs_put_page(page);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index fd0203c..9774555 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -290,7 +290,7 @@
 	inode_init_owner(inode, dir, mode);
 	inode->i_blocks = 0;
 	inode->i_generation = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	ufsi->i_flags = UFS_I(dir)->i_flags;
 	ufsi->i_lastfrag = 0;
 	ufsi->i_shadow = 0;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 9f49431..190d64b 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -293,7 +293,7 @@
 
 	if (new)
 		*new = 1;
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	if (IS_SYNC(inode))
 		ufs_sync_inode (inode);
 	mark_inode_dirty(inode);
@@ -375,7 +375,7 @@
 	mark_buffer_dirty(bh);
 	if (IS_SYNC(inode))
 		sync_dirty_buffer(bh);
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 out:
 	brelse (bh);
@@ -1185,7 +1185,7 @@
 	truncate_setsize(inode, size);
 
 	__ufs_truncate_blocks(inode);
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 out:
 	UFSD("EXIT: err %d\n", err);
@@ -1208,7 +1208,7 @@
 	unsigned int ia_valid = attr->ia_valid;
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index a1559f7..8eca4ed 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -153,7 +153,7 @@
 	struct inode *inode = d_inode(old_dentry);
 	int error;
 
-	inode->i_ctime = CURRENT_TIME_SEC;
+	inode->i_ctime = current_time(inode);
 	inode_inc_link_count(inode);
 	ihold(inode);
 
@@ -245,7 +245,8 @@
 }
 
 static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
+		      struct inode *new_dir, struct dentry *new_dentry,
+		      unsigned int flags)
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
@@ -255,6 +256,9 @@
 	struct ufs_dir_entry *old_de;
 	int err = -ENOENT;
 
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
 	old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
 	if (!old_de)
 		goto out;
@@ -279,7 +283,7 @@
 		if (!new_de)
 			goto out_dir;
 		ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
+		new_inode->i_ctime = current_time(new_inode);
 		if (dir_de)
 			drop_nlink(new_inode);
 		inode_dec_link_count(new_inode);
@@ -295,7 +299,7 @@
 	 * Like most other Unix systems, set the ctime for inodes on a
  	 * rename.
 	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
+	old_inode->i_ctime = current_time(old_inode);
 
 	ufs_delete_entry(old_dir, old_de, old_page);
 	mark_inode_dirty(old_inode);
diff --git a/fs/utimes.c b/fs/utimes.c
index 794f5f5..22307cd 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -81,27 +81,13 @@
 			newattrs.ia_valid |= ATTR_MTIME_SET;
 		}
 		/*
-		 * Tell inode_change_ok(), that this is an explicit time
+		 * Tell setattr_prepare(), that this is an explicit time
 		 * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET
 		 * were used.
 		 */
 		newattrs.ia_valid |= ATTR_TIMES_SET;
 	} else {
-		/*
-		 * If times is NULL (or both times are UTIME_NOW),
-		 * then we need to check permissions, because
-		 * inode_change_ok() won't do it.
-		 */
-		error = -EPERM;
-                if (IS_IMMUTABLE(inode))
-			goto mnt_drop_write_and_out;
-
-		error = -EACCES;
-		if (!inode_owner_or_capable(inode)) {
-			error = inode_permission(inode, MAY_WRITE);
-			if (error)
-				goto mnt_drop_write_and_out;
-		}
+		newattrs.ia_valid |= ATTR_TOUCH;
 	}
 retry_deleg:
 	inode_lock(inode);
@@ -113,7 +99,6 @@
 			goto retry_deleg;
 	}
 
-mnt_drop_write_and_out:
 	mnt_drop_write(path->mnt);
 out:
 	return error;
diff --git a/fs/xattr.c b/fs/xattr.c
index c243905..3368659 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -24,6 +24,59 @@
 
 #include <asm/uaccess.h>
 
+static const char *
+strcmp_prefix(const char *a, const char *a_prefix)
+{
+	while (*a_prefix && *a == *a_prefix) {
+		a++;
+		a_prefix++;
+	}
+	return *a_prefix ? NULL : a;
+}
+
+/*
+ * In order to implement different sets of xattr operations for each xattr
+ * prefix, a filesystem should create a null-terminated array of struct
+ * xattr_handler (one for each prefix) and hang a pointer to it off of the
+ * s_xattr field of the superblock.
+ */
+#define for_each_xattr_handler(handlers, handler)		\
+	if (handlers)						\
+		for ((handler) = *(handlers)++;			\
+			(handler) != NULL;			\
+			(handler) = *(handlers)++)
+
+/*
+ * Find the xattr_handler with the matching prefix.
+ */
+static const struct xattr_handler *
+xattr_resolve_name(struct inode *inode, const char **name)
+{
+	const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+	const struct xattr_handler *handler;
+
+	if (!(inode->i_opflags & IOP_XATTR)) {
+		if (unlikely(is_bad_inode(inode)))
+			return ERR_PTR(-EIO);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+	for_each_xattr_handler(handlers, handler) {
+		const char *n;
+
+		n = strcmp_prefix(*name, xattr_prefix(handler));
+		if (n) {
+			if (!handler->prefix ^ !*n) {
+				if (*n)
+					continue;
+				return ERR_PTR(-EINVAL);
+			}
+			*name = n;
+			return handler;
+		}
+	}
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
 /*
  * Check permissions for extended attribute access.  This is a bit complicated
  * because different namespaces have very different rules.
@@ -80,6 +133,23 @@
 	return inode_permission(inode, mask);
 }
 
+int
+__vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
+	       const void *value, size_t size, int flags)
+{
+	const struct xattr_handler *handler;
+
+	handler = xattr_resolve_name(inode, &name);
+	if (IS_ERR(handler))
+		return PTR_ERR(handler);
+	if (!handler->set)
+		return -EOPNOTSUPP;
+	if (size == 0)
+		value = "";  /* empty EA, do not remove */
+	return handler->set(handler, dentry, inode, name, value, size, flags);
+}
+EXPORT_SYMBOL(__vfs_setxattr);
+
 /**
  *  __vfs_setxattr_noperm - perform setxattr operation without performing
  *  permission checks.
@@ -106,8 +176,8 @@
 
 	if (issec)
 		inode->i_flags &= ~S_NOSEC;
-	if (inode->i_op->setxattr) {
-		error = inode->i_op->setxattr(dentry, inode, name, value, size, flags);
+	if (inode->i_opflags & IOP_XATTR) {
+		error = __vfs_setxattr(dentry, inode, name, value, size, flags);
 		if (!error) {
 			fsnotify_xattr(dentry);
 			security_inode_post_setxattr(dentry, name, value,
@@ -115,6 +185,9 @@
 		}
 	} else if (issec) {
 		const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+
+		if (unlikely(is_bad_inode(inode)))
+			return -EIO;
 		error = security_inode_setsecurity(inode, suffix, value,
 						   size, flags);
 		if (!error)
@@ -188,6 +261,7 @@
 vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
 		   size_t xattr_size, gfp_t flags)
 {
+	const struct xattr_handler *handler;
 	struct inode *inode = dentry->d_inode;
 	char *value = *xattr_value;
 	int error;
@@ -196,10 +270,12 @@
 	if (error)
 		return error;
 
-	if (!inode->i_op->getxattr)
+	handler = xattr_resolve_name(inode, &name);
+	if (IS_ERR(handler))
+		return PTR_ERR(handler);
+	if (!handler->get)
 		return -EOPNOTSUPP;
-
-	error = inode->i_op->getxattr(dentry, inode, name, NULL, 0);
+	error = handler->get(handler, dentry, inode, name, NULL, 0);
 	if (error < 0)
 		return error;
 
@@ -210,12 +286,27 @@
 		memset(value, 0, error + 1);
 	}
 
-	error = inode->i_op->getxattr(dentry, inode, name, value, error);
+	error = handler->get(handler, dentry, inode, name, value, error);
 	*xattr_value = value;
 	return error;
 }
 
 ssize_t
+__vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name,
+	       void *value, size_t size)
+{
+	const struct xattr_handler *handler;
+
+	handler = xattr_resolve_name(inode, &name);
+	if (IS_ERR(handler))
+		return PTR_ERR(handler);
+	if (!handler->get)
+		return -EOPNOTSUPP;
+	return handler->get(handler, dentry, inode, name, value, size);
+}
+EXPORT_SYMBOL(__vfs_getxattr);
+
+ssize_t
 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
 {
 	struct inode *inode = dentry->d_inode;
@@ -242,28 +333,24 @@
 		return ret;
 	}
 nolsm:
-	if (inode->i_op->getxattr)
-		error = inode->i_op->getxattr(dentry, inode, name, value, size);
-	else
-		error = -EOPNOTSUPP;
-
-	return error;
+	return __vfs_getxattr(dentry, inode, name, value, size);
 }
 EXPORT_SYMBOL_GPL(vfs_getxattr);
 
 ssize_t
-vfs_listxattr(struct dentry *d, char *list, size_t size)
+vfs_listxattr(struct dentry *dentry, char *list, size_t size)
 {
+	struct inode *inode = d_inode(dentry);
 	ssize_t error;
 
-	error = security_inode_listxattr(d);
+	error = security_inode_listxattr(dentry);
 	if (error)
 		return error;
-	error = -EOPNOTSUPP;
-	if (d->d_inode->i_op->listxattr) {
-		error = d->d_inode->i_op->listxattr(d, list, size);
+	if (inode->i_op->listxattr && (inode->i_opflags & IOP_XATTR)) {
+		error = -EOPNOTSUPP;
+		error = inode->i_op->listxattr(dentry, list, size);
 	} else {
-		error = security_inode_listsecurity(d->d_inode, list, size);
+		error = security_inode_listsecurity(inode, list, size);
 		if (size && error > size)
 			error = -ERANGE;
 	}
@@ -272,14 +359,26 @@
 EXPORT_SYMBOL_GPL(vfs_listxattr);
 
 int
+__vfs_removexattr(struct dentry *dentry, const char *name)
+{
+	struct inode *inode = d_inode(dentry);
+	const struct xattr_handler *handler;
+
+	handler = xattr_resolve_name(inode, &name);
+	if (IS_ERR(handler))
+		return PTR_ERR(handler);
+	if (!handler->set)
+		return -EOPNOTSUPP;
+	return handler->set(handler, dentry, inode, name, NULL, 0, XATTR_REPLACE);
+}
+EXPORT_SYMBOL(__vfs_removexattr);
+
+int
 vfs_removexattr(struct dentry *dentry, const char *name)
 {
 	struct inode *inode = dentry->d_inode;
 	int error;
 
-	if (!inode->i_op->removexattr)
-		return -EOPNOTSUPP;
-
 	error = xattr_permission(inode, name, MAY_WRITE);
 	if (error)
 		return error;
@@ -289,7 +388,7 @@
 	if (error)
 		goto out;
 
-	error = inode->i_op->removexattr(dentry, name);
+	error = __vfs_removexattr(dentry, name);
 
 	if (!error) {
 		fsnotify_xattr(dentry);
@@ -641,76 +740,6 @@
 	return error;
 }
 
-
-static const char *
-strcmp_prefix(const char *a, const char *a_prefix)
-{
-	while (*a_prefix && *a == *a_prefix) {
-		a++;
-		a_prefix++;
-	}
-	return *a_prefix ? NULL : a;
-}
-
-/*
- * In order to implement different sets of xattr operations for each xattr
- * prefix with the generic xattr API, a filesystem should create a
- * null-terminated array of struct xattr_handler (one for each prefix) and
- * hang a pointer to it off of the s_xattr field of the superblock.
- *
- * The generic_fooxattr() functions will use this list to dispatch xattr
- * operations to the correct xattr_handler.
- */
-#define for_each_xattr_handler(handlers, handler)		\
-	if (handlers)						\
-		for ((handler) = *(handlers)++;			\
-			(handler) != NULL;			\
-			(handler) = *(handlers)++)
-
-/*
- * Find the xattr_handler with the matching prefix.
- */
-static const struct xattr_handler *
-xattr_resolve_name(const struct xattr_handler **handlers, const char **name)
-{
-	const struct xattr_handler *handler;
-
-	if (!*name)
-		return ERR_PTR(-EINVAL);
-
-	for_each_xattr_handler(handlers, handler) {
-		const char *n;
-
-		n = strcmp_prefix(*name, xattr_prefix(handler));
-		if (n) {
-			if (!handler->prefix ^ !*n) {
-				if (*n)
-					continue;
-				return ERR_PTR(-EINVAL);
-			}
-			*name = n;
-			return handler;
-		}
-	}
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-/*
- * Find the handler for the prefix and dispatch its get() operation.
- */
-ssize_t
-generic_getxattr(struct dentry *dentry, struct inode *inode,
-		 const char *name, void *buffer, size_t size)
-{
-	const struct xattr_handler *handler;
-
-	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
-	if (IS_ERR(handler))
-		return PTR_ERR(handler);
-	return handler->get(handler, dentry, inode,
-			    name, buffer, size);
-}
-
 /*
  * Combine the results of the list() operation from every xattr_handler in the
  * list.
@@ -747,44 +776,7 @@
 	}
 	return size;
 }
-
-/*
- * Find the handler for the prefix and dispatch its set() operation.
- */
-int
-generic_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
-		 const void *value, size_t size, int flags)
-{
-	const struct xattr_handler *handler;
-
-	if (size == 0)
-		value = "";  /* empty EA, do not remove */
-	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
-	if (IS_ERR(handler))
-		return PTR_ERR(handler);
-	return handler->set(handler, dentry, inode, name, value, size, flags);
-}
-
-/*
- * Find the handler for the prefix and dispatch its set() operation to remove
- * any associated extended attribute.
- */
-int
-generic_removexattr(struct dentry *dentry, const char *name)
-{
-	const struct xattr_handler *handler;
-
-	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
-	if (IS_ERR(handler))
-		return PTR_ERR(handler);
-	return handler->set(handler, dentry, d_inode(dentry), name, NULL,
-			    0, XATTR_REPLACE);
-}
-
-EXPORT_SYMBOL(generic_getxattr);
 EXPORT_SYMBOL(generic_listxattr);
-EXPORT_SYMBOL(generic_setxattr);
-EXPORT_SYMBOL(generic_removexattr);
 
 /**
  * xattr_full_name  -  Compute full attribute name from suffix
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 584e87e..26ef195 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -55,6 +55,8 @@
 				   xfs_ag_resv.o \
 				   xfs_rmap.o \
 				   xfs_rmap_btree.o \
+				   xfs_refcount.o \
+				   xfs_refcount_btree.o \
 				   xfs_sb.o \
 				   xfs_symlink_remote.o \
 				   xfs_trans_resv.o \
@@ -88,6 +90,7 @@
 				   xfs_message.o \
 				   xfs_mount.o \
 				   xfs_mru_cache.o \
+				   xfs_reflink.o \
 				   xfs_stats.o \
 				   xfs_super.o \
 				   xfs_symlink.o \
@@ -100,16 +103,20 @@
 # low-level transaction/log code
 xfs-y				+= xfs_log.o \
 				   xfs_log_cil.o \
+				   xfs_bmap_item.o \
 				   xfs_buf_item.o \
 				   xfs_extfree_item.o \
 				   xfs_icreate_item.o \
 				   xfs_inode_item.o \
+				   xfs_refcount_item.o \
 				   xfs_rmap_item.o \
 				   xfs_log_recover.o \
 				   xfs_trans_ail.o \
+				   xfs_trans_bmap.o \
 				   xfs_trans_buf.o \
 				   xfs_trans_extfree.o \
 				   xfs_trans_inode.o \
+				   xfs_trans_refcount.o \
 				   xfs_trans_rmap.o \
 
 # optional features
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e3ae0f2..e5ebc37 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -38,6 +38,7 @@
 #include "xfs_trans_space.h"
 #include "xfs_rmap_btree.h"
 #include "xfs_btree.h"
+#include "xfs_refcount_btree.h"
 
 /*
  * Per-AG Block Reservations
@@ -108,7 +109,9 @@
 	trace_xfs_ag_resv_critical(pag, type, avail);
 
 	/* Critically low if less than 10% or max btree height remains. */
-	return avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS;
+	return XFS_TEST_ERROR(avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS,
+			pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL,
+			XFS_RANDOM_AG_RESV_CRITICAL);
 }
 
 /*
@@ -228,6 +231,11 @@
 	if (pag->pag_meta_resv.ar_asked == 0) {
 		ask = used = 0;
 
+		error = xfs_refcountbt_calc_reserves(pag->pag_mount,
+				pag->pag_agno, &ask, &used);
+		if (error)
+			goto out;
+
 		error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
 				ask, used);
 		if (error)
@@ -238,6 +246,11 @@
 	if (pag->pag_agfl_resv.ar_asked == 0) {
 		ask = used = 0;
 
+		error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
+				&ask, &used);
+		if (error)
+			goto out;
+
 		error = __xfs_ag_resv_init(pag, XFS_AG_RESV_AGFL, ask, used);
 		if (error)
 			goto out;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index ca75dc9..effb64c 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -52,10 +52,23 @@
 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
 
+unsigned int
+xfs_refc_block(
+	struct xfs_mount	*mp)
+{
+	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+		return XFS_RMAP_BLOCK(mp) + 1;
+	if (xfs_sb_version_hasfinobt(&mp->m_sb))
+		return XFS_FIBT_BLOCK(mp) + 1;
+	return XFS_IBT_BLOCK(mp) + 1;
+}
+
 xfs_extlen_t
 xfs_prealloc_blocks(
 	struct xfs_mount	*mp)
 {
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		return xfs_refc_block(mp) + 1;
 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
 		return XFS_RMAP_BLOCK(mp) + 1;
 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
@@ -115,6 +128,8 @@
 		blocks++;		/* finobt root block */
 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
 		blocks++; 		/* rmap root block */
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		blocks++;		/* refcount root block */
 
 	return mp->m_sb.sb_agblocks - blocks;
 }
@@ -2321,6 +2336,9 @@
 		offsetof(xfs_agf_t, agf_btreeblks),
 		offsetof(xfs_agf_t, agf_uuid),
 		offsetof(xfs_agf_t, agf_rmap_blocks),
+		offsetof(xfs_agf_t, agf_refcount_blocks),
+		offsetof(xfs_agf_t, agf_refcount_root),
+		offsetof(xfs_agf_t, agf_refcount_level),
 		/* needed so that we don't log the whole rest of the structure: */
 		offsetof(xfs_agf_t, agf_spare64),
 		sizeof(xfs_agf_t)
@@ -2458,6 +2476,10 @@
 	    be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
 		return false;
 
+	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
+	    be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)
+		return false;
+
 	return true;;
 
 }
@@ -2578,6 +2600,7 @@
 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
+		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
 		spin_lock_init(&pag->pagb_lock);
 		pag->pagb_count = 0;
 		pag->pagb_tree = RB_ROOT;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 9d7f61d..c27344c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -48,6 +48,7 @@
 #include "xfs_filestream.h"
 #include "xfs_rmap.h"
 #include "xfs_ag_resv.h"
+#include "xfs_refcount.h"
 
 
 kmem_zone_t		*xfs_bmap_free_item_zone;
@@ -140,7 +141,8 @@
  */
 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
 {
-	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+	return whichfork != XFS_COW_FORK &&
+		XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
 		XFS_IFORK_NEXTENTS(ip, whichfork) >
 			XFS_IFORK_MAXEXT(ip, whichfork);
 }
@@ -150,7 +152,8 @@
  */
 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
 {
-	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+	return whichfork != XFS_COW_FORK &&
+		XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
 		XFS_IFORK_NEXTENTS(ip, whichfork) <=
 			XFS_IFORK_MAXEXT(ip, whichfork);
 }
@@ -640,6 +643,7 @@
 
 	mp = ip->i_mount;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
+	ASSERT(whichfork != XFS_COW_FORK);
 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
 	rblock = ifp->if_broot;
@@ -706,6 +710,7 @@
 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
 
 	mp = ip->i_mount;
+	ASSERT(whichfork != XFS_COW_FORK);
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
 
@@ -748,6 +753,7 @@
 		args.type = XFS_ALLOCTYPE_START_BNO;
 		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
 	} else if (dfops->dop_low) {
+try_another_ag:
 		args.type = XFS_ALLOCTYPE_START_BNO;
 		args.fsbno = *firstblock;
 	} else {
@@ -762,6 +768,21 @@
 		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 		return error;
 	}
+
+	/*
+	 * During a CoW operation, the allocation and bmbt updates occur in
+	 * different transactions.  The mapping code tries to put new bmbt
+	 * blocks near extents being mapped, but the only way to guarantee this
+	 * is if the alloc and the mapping happen in a single transaction that
+	 * has a block reservation.  That isn't the case here, so if we run out
+	 * of space we'll try again with another AG.
+	 */
+	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
+	    args.fsbno == NULLFSBLOCK &&
+	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
+		dfops->dop_low = true;
+		goto try_another_ag;
+	}
 	/*
 	 * Allocation can't fail, the space was reserved.
 	 */
@@ -837,6 +858,7 @@
 {
 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 
+	ASSERT(whichfork != XFS_COW_FORK);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
 	ASSERT(ifp->if_bytes == 0);
 	ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
@@ -896,6 +918,7 @@
 	 * file currently fits in an inode.
 	 */
 	if (*firstblock == NULLFSBLOCK) {
+try_another_ag:
 		args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
 		args.type = XFS_ALLOCTYPE_START_BNO;
 	} else {
@@ -908,6 +931,19 @@
 	if (error)
 		goto done;
 
+	/*
+	 * During a CoW operation, the allocation and bmbt updates occur in
+	 * different transactions.  The mapping code tries to put new bmbt
+	 * blocks near extents being mapped, but the only way to guarantee this
+	 * is if the alloc and the mapping happen in a single transaction that
+	 * has a block reservation.  That isn't the case here, so if we run out
+	 * of space we'll try again with another AG.
+	 */
+	if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) &&
+	    args.fsbno == NULLFSBLOCK &&
+	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
+		goto try_another_ag;
+	}
 	/* Can't fail, the space was reserved. */
 	ASSERT(args.fsbno != NULLFSBLOCK);
 	ASSERT(args.len == 1);
@@ -1670,7 +1706,8 @@
  */
 STATIC int				/* error */
 xfs_bmap_add_extent_delay_real(
-	struct xfs_bmalloca	*bma)
+	struct xfs_bmalloca	*bma,
+	int			whichfork)
 {
 	struct xfs_bmbt_irec	*new = &bma->got;
 	int			diff;	/* temp value */
@@ -1688,11 +1725,14 @@
 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
 	xfs_filblks_t		temp2=0;/* value for da_new calculations */
 	int			tmp_rval;	/* partial logging flags */
-	int			whichfork = XFS_DATA_FORK;
 	struct xfs_mount	*mp;
+	xfs_extnum_t		*nextents;
 
 	mp = bma->ip->i_mount;
 	ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+	ASSERT(whichfork != XFS_ATTR_FORK);
+	nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
+						&bma->ip->i_d.di_nextents);
 
 	ASSERT(bma->idx >= 0);
 	ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
@@ -1706,6 +1746,9 @@
 #define	RIGHT		r[1]
 #define	PREV		r[2]
 
+	if (whichfork == XFS_COW_FORK)
+		state |= BMAP_COWFORK;
+
 	/*
 	 * Set up a bunch of variables to make the tests simpler.
 	 */
@@ -1792,7 +1835,7 @@
 		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 
 		xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
-		bma->ip->i_d.di_nextents--;
+		(*nextents)--;
 		if (bma->cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -1894,7 +1937,7 @@
 		xfs_bmbt_set_startblock(ep, new->br_startblock);
 		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 
-		bma->ip->i_d.di_nextents++;
+		(*nextents)++;
 		if (bma->cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -1964,7 +2007,7 @@
 		temp = PREV.br_blockcount - new->br_blockcount;
 		xfs_bmbt_set_blockcount(ep, temp);
 		xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
-		bma->ip->i_d.di_nextents++;
+		(*nextents)++;
 		if (bma->cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2048,7 +2091,7 @@
 		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 		xfs_bmbt_set_blockcount(ep, temp);
 		xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
-		bma->ip->i_d.di_nextents++;
+		(*nextents)++;
 		if (bma->cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2117,7 +2160,7 @@
 		RIGHT.br_blockcount = temp2;
 		/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
 		xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
-		bma->ip->i_d.di_nextents++;
+		(*nextents)++;
 		if (bma->cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2215,7 +2258,8 @@
 
 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
 done:
-	bma->logflags |= rval;
+	if (whichfork != XFS_COW_FORK)
+		bma->logflags |= rval;
 	return error;
 #undef	LEFT
 #undef	RIGHT
@@ -2759,6 +2803,7 @@
 STATIC void
 xfs_bmap_add_extent_hole_delay(
 	xfs_inode_t		*ip,	/* incore inode pointer */
+	int			whichfork,
 	xfs_extnum_t		*idx,	/* extent number to update/insert */
 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
 {
@@ -2770,8 +2815,10 @@
 	int			state;  /* state bits, accessed thru macros */
 	xfs_filblks_t		temp=0;	/* temp for indirect calculations */
 
-	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
 	state = 0;
+	if (whichfork == XFS_COW_FORK)
+		state |= BMAP_COWFORK;
 	ASSERT(isnullstartblock(new->br_startblock));
 
 	/*
@@ -2789,7 +2836,7 @@
 	 * Check and set flags if the current (right) segment exists.
 	 * If it doesn't exist, we're converting the hole at end-of-file.
 	 */
-	if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+	if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
 
@@ -2923,6 +2970,7 @@
 	ASSERT(!isnullstartblock(new->br_startblock));
 	ASSERT(!bma->cur ||
 	       !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+	ASSERT(whichfork != XFS_COW_FORK);
 
 	XFS_STATS_INC(mp, xs_add_exlist);
 
@@ -3648,7 +3696,9 @@
 	else if (mp->m_dalign)
 		stripe_align = mp->m_dalign;
 
-	if (xfs_alloc_is_userdata(ap->datatype))
+	if (ap->flags & XFS_BMAPI_COWFORK)
+		align = xfs_get_cowextsz_hint(ap->ip);
+	else if (xfs_alloc_is_userdata(ap->datatype))
 		align = xfs_get_extsz_hint(ap->ip);
 	if (unlikely(align)) {
 		error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3856,7 +3906,8 @@
 		ASSERT(nullfb || fb_agno == args.agno ||
 		       (ap->dfops->dop_low && fb_agno < args.agno));
 		ap->length = args.len;
-		ap->ip->i_d.di_nblocks += args.len;
+		if (!(ap->flags & XFS_BMAPI_COWFORK))
+			ap->ip->i_d.di_nblocks += args.len;
 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 		if (ap->wasdel)
 			ap->ip->i_delayed_blks -= args.len;
@@ -3876,6 +3927,63 @@
 }
 
 /*
+ * For a remap operation, just "allocate" an extent at the address that the
+ * caller passed in, and ensure that the AGFL is the right size.  The caller
+ * will then map the "allocated" extent into the file somewhere.
+ */
+STATIC int
+xfs_bmap_remap_alloc(
+	struct xfs_bmalloca	*ap)
+{
+	struct xfs_trans	*tp = ap->tp;
+	struct xfs_mount	*mp = tp->t_mountp;
+	xfs_agblock_t		bno;
+	struct xfs_alloc_arg	args;
+	int			error;
+
+	/*
+	 * validate that the block number is legal - the enables us to detect
+	 * and handle a silent filesystem corruption rather than crashing.
+	 */
+	memset(&args, 0, sizeof(struct xfs_alloc_arg));
+	args.tp = ap->tp;
+	args.mp = ap->tp->t_mountp;
+	bno = *ap->firstblock;
+	args.agno = XFS_FSB_TO_AGNO(mp, bno);
+	args.agbno = XFS_FSB_TO_AGBNO(mp, bno);
+	if (args.agno >= mp->m_sb.sb_agcount ||
+	    args.agbno >= mp->m_sb.sb_agblocks)
+		return -EFSCORRUPTED;
+
+	/* "Allocate" the extent from the range we passed in. */
+	trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length);
+	ap->blkno = bno;
+	ap->ip->i_d.di_nblocks += ap->length;
+	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+
+	/* Fix the freelist, like a real allocator does. */
+	args.datatype = ap->datatype;
+	args.pag = xfs_perag_get(args.mp, args.agno);
+	ASSERT(args.pag);
+
+	/*
+	 * The freelist fixing code will decline the allocation if
+	 * the size and shape of the free space doesn't allow for
+	 * allocating the extent and updating all the metadata that
+	 * happens during an allocation.  We're remapping, not
+	 * allocating, so skip that check by pretending to be freeing.
+	 */
+	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+	if (error)
+		goto error0;
+error0:
+	xfs_perag_put(args.pag);
+	if (error)
+		trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
+	return error;
+}
+
+/*
  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
  * It figures out where to ask the underlying allocator to put the new extent.
  */
@@ -3883,6 +3991,8 @@
 xfs_bmap_alloc(
 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
 {
+	if (ap->flags & XFS_BMAPI_REMAP)
+		return xfs_bmap_remap_alloc(ap);
 	if (XFS_IS_REALTIME_INODE(ap->ip) &&
 	    xfs_alloc_is_userdata(ap->datatype))
 		return xfs_bmap_rtalloc(ap);
@@ -4012,12 +4122,11 @@
 	int			error;
 	int			eof;
 	int			n = 0;
-	int			whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
-						XFS_ATTR_FORK : XFS_DATA_FORK;
+	int			whichfork = xfs_bmapi_whichfork(flags);
 
 	ASSERT(*nmap >= 1);
 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
-			   XFS_BMAPI_IGSTATE)));
+			   XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
 
 	if (unlikely(XFS_TEST_ERROR(
@@ -4035,6 +4144,16 @@
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 
+	/* No CoW fork?  Return a hole. */
+	if (whichfork == XFS_COW_FORK && !ifp) {
+		mval->br_startoff = bno;
+		mval->br_startblock = HOLESTARTBLOCK;
+		mval->br_blockcount = len;
+		mval->br_state = XFS_EXT_NORM;
+		*nmap = 1;
+		return 0;
+	}
+
 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
 		error = xfs_iread_extents(NULL, ip, whichfork);
 		if (error)
@@ -4084,6 +4203,7 @@
 int
 xfs_bmapi_reserve_delalloc(
 	struct xfs_inode	*ip,
+	int			whichfork,
 	xfs_fileoff_t		aoff,
 	xfs_filblks_t		len,
 	struct xfs_bmbt_irec	*got,
@@ -4092,7 +4212,7 @@
 	int			eof)
 {
 	struct xfs_mount	*mp = ip->i_mount;
-	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 	xfs_extlen_t		alen;
 	xfs_extlen_t		indlen;
 	char			rt = XFS_IS_REALTIME_INODE(ip);
@@ -4104,7 +4224,10 @@
 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
 
 	/* Figure out the extent size, adjust alen */
-	extsz = xfs_get_extsz_hint(ip);
+	if (whichfork == XFS_COW_FORK)
+		extsz = xfs_get_cowextsz_hint(ip);
+	else
+		extsz = xfs_get_extsz_hint(ip);
 	if (extsz) {
 		error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
 					       1, 0, &aoff, &alen);
@@ -4151,7 +4274,7 @@
 	got->br_startblock = nullstartblock(indlen);
 	got->br_blockcount = alen;
 	got->br_state = XFS_EXT_NORM;
-	xfs_bmap_add_extent_hole_delay(ip, lastx, got);
+	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
 	/*
 	 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
@@ -4182,8 +4305,7 @@
 	struct xfs_bmalloca	*bma)
 {
 	struct xfs_mount	*mp = bma->ip->i_mount;
-	int			whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
-						XFS_ATTR_FORK : XFS_DATA_FORK;
+	int			whichfork = xfs_bmapi_whichfork(bma->flags);
 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
 	int			tmp_logflags = 0;
 	int			error;
@@ -4278,7 +4400,7 @@
 		bma->got.br_state = XFS_EXT_UNWRITTEN;
 
 	if (bma->wasdel)
-		error = xfs_bmap_add_extent_delay_real(bma);
+		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
 	else
 		error = xfs_bmap_add_extent_hole_real(bma, whichfork);
 
@@ -4308,8 +4430,7 @@
 	xfs_filblks_t		len,
 	int			flags)
 {
-	int			whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
-						XFS_ATTR_FORK : XFS_DATA_FORK;
+	int			whichfork = xfs_bmapi_whichfork(flags);
 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
 	int			tmp_logflags = 0;
 	int			error;
@@ -4325,6 +4446,8 @@
 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
 		return 0;
 
+	ASSERT(whichfork != XFS_COW_FORK);
+
 	/*
 	 * Modify (by adding) the state flag, if writing.
 	 */
@@ -4431,8 +4554,7 @@
 	orig_mval = mval;
 	orig_nmap = *nmap;
 #endif
-	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
-		XFS_ATTR_FORK : XFS_DATA_FORK;
+	whichfork = xfs_bmapi_whichfork(flags);
 
 	ASSERT(*nmap >= 1);
 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
@@ -4441,6 +4563,11 @@
 	ASSERT(len > 0);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+	ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
+	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
+	ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
+	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || whichfork != XFS_COW_FORK);
+	ASSERT(!(flags & XFS_BMAPI_CONVERT) || whichfork != XFS_COW_FORK);
 
 	/* zeroing is for currently only for data extents, not metadata */
 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
@@ -4502,6 +4629,14 @@
 		wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
 
 		/*
+		 * Make sure we only reflink into a hole.
+		 */
+		if (flags & XFS_BMAPI_REMAP)
+			ASSERT(inhole);
+		if (flags & XFS_BMAPI_COWFORK)
+			ASSERT(!inhole);
+
+		/*
 		 * First, deal with the hole before the allocated space
 		 * that we found, if any.
 		 */
@@ -4531,6 +4666,17 @@
 				goto error0;
 			if (bma.blkno == NULLFSBLOCK)
 				break;
+
+			/*
+			 * If this is a CoW allocation, record the data in
+			 * the refcount btree for orphan recovery.
+			 */
+			if (whichfork == XFS_COW_FORK) {
+				error = xfs_refcount_alloc_cow_extent(mp, dfops,
+						bma.blkno, bma.length);
+				if (error)
+					goto error0;
+			}
 		}
 
 		/* Deal with the allocated space we found.  */
@@ -4696,7 +4842,8 @@
 	xfs_btree_cur_t		*cur,	/* if null, not a btree */
 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
 	int			*logflagsp, /* inode logging flags */
-	int			whichfork) /* data or attr fork */
+	int			whichfork, /* data or attr fork */
+	int			bflags)	/* bmapi flags */
 {
 	xfs_filblks_t		da_new;	/* new delay-alloc indirect blocks */
 	xfs_filblks_t		da_old;	/* old delay-alloc indirect blocks */
@@ -4725,6 +4872,8 @@
 
 	if (whichfork == XFS_ATTR_FORK)
 		state |= BMAP_ATTRFORK;
+	else if (whichfork == XFS_COW_FORK)
+		state |= BMAP_COWFORK;
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
@@ -4805,6 +4954,7 @@
 		/*
 		 * Matches the whole extent.  Delete the entry.
 		 */
+		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
 		xfs_iext_remove(ip, *idx, 1,
 				whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
 		--*idx;
@@ -4988,9 +5138,16 @@
 	/*
 	 * If we need to, add to list of extents to delete.
 	 */
-	if (do_fx)
-		xfs_bmap_add_free(mp, dfops, del->br_startblock,
-				del->br_blockcount, NULL);
+	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
+		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
+			error = xfs_refcount_decrease_extent(mp, dfops, del);
+			if (error)
+				goto done;
+		} else
+			xfs_bmap_add_free(mp, dfops, del->br_startblock,
+					del->br_blockcount, NULL);
+	}
+
 	/*
 	 * Adjust inode # blocks in the file.
 	 */
@@ -4999,7 +5156,7 @@
 	/*
 	 * Adjust quota data.
 	 */
-	if (qfield)
+	if (qfield && !(bflags & XFS_BMAPI_REMAP))
 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
 
 	/*
@@ -5014,6 +5171,175 @@
 	return error;
 }
 
+/* Remove an extent from the CoW fork.  Similar to xfs_bmap_del_extent. */
+int
+xfs_bunmapi_cow(
+	struct xfs_inode		*ip,
+	struct xfs_bmbt_irec		*del)
+{
+	xfs_filblks_t			da_new;
+	xfs_filblks_t			da_old;
+	xfs_fsblock_t			del_endblock = 0;
+	xfs_fileoff_t			del_endoff;
+	int				delay;
+	struct xfs_bmbt_rec_host	*ep;
+	int				error;
+	struct xfs_bmbt_irec		got;
+	xfs_fileoff_t			got_endoff;
+	struct xfs_ifork		*ifp;
+	struct xfs_mount		*mp;
+	xfs_filblks_t			nblks;
+	struct xfs_bmbt_irec		new;
+	/* REFERENCED */
+	uint				qfield;
+	xfs_filblks_t			temp;
+	xfs_filblks_t			temp2;
+	int				state = BMAP_COWFORK;
+	int				eof;
+	xfs_extnum_t			eidx;
+
+	mp = ip->i_mount;
+	XFS_STATS_INC(mp, xs_del_exlist);
+
+	ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,
+			&eidx, &got, &new);
+
+	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;
+	ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /
+		(uint)sizeof(xfs_bmbt_rec_t)));
+	ASSERT(del->br_blockcount > 0);
+	ASSERT(got.br_startoff <= del->br_startoff);
+	del_endoff = del->br_startoff + del->br_blockcount;
+	got_endoff = got.br_startoff + got.br_blockcount;
+	ASSERT(got_endoff >= del_endoff);
+	delay = isnullstartblock(got.br_startblock);
+	ASSERT(isnullstartblock(del->br_startblock) == delay);
+	qfield = 0;
+	error = 0;
+	/*
+	 * If deleting a real allocation, must free up the disk space.
+	 */
+	if (!delay) {
+		nblks = del->br_blockcount;
+		qfield = XFS_TRANS_DQ_BCOUNT;
+		/*
+		 * Set up del_endblock and cur for later.
+		 */
+		del_endblock = del->br_startblock + del->br_blockcount;
+		da_old = da_new = 0;
+	} else {
+		da_old = startblockval(got.br_startblock);
+		da_new = 0;
+		nblks = 0;
+	}
+	qfield = qfield;
+	nblks = nblks;
+
+	/*
+	 * Set flag value to use in switch statement.
+	 * Left-contig is 2, right-contig is 1.
+	 */
+	switch (((got.br_startoff == del->br_startoff) << 1) |
+		(got_endoff == del_endoff)) {
+	case 3:
+		/*
+		 * Matches the whole extent.  Delete the entry.
+		 */
+		xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);
+		--eidx;
+		break;
+
+	case 2:
+		/*
+		 * Deleting the first part of the extent.
+		 */
+		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
+		xfs_bmbt_set_startoff(ep, del_endoff);
+		temp = got.br_blockcount - del->br_blockcount;
+		xfs_bmbt_set_blockcount(ep, temp);
+		if (delay) {
+			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+				da_old);
+			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+			trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
+			da_new = temp;
+			break;
+		}
+		xfs_bmbt_set_startblock(ep, del_endblock);
+		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
+		break;
+
+	case 1:
+		/*
+		 * Deleting the last part of the extent.
+		 */
+		temp = got.br_blockcount - del->br_blockcount;
+		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
+		xfs_bmbt_set_blockcount(ep, temp);
+		if (delay) {
+			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+				da_old);
+			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+			trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
+			da_new = temp;
+			break;
+		}
+		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
+		break;
+
+	case 0:
+		/*
+		 * Deleting the middle of the extent.
+		 */
+		temp = del->br_startoff - got.br_startoff;
+		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
+		xfs_bmbt_set_blockcount(ep, temp);
+		new.br_startoff = del_endoff;
+		temp2 = got_endoff - del_endoff;
+		new.br_blockcount = temp2;
+		new.br_state = got.br_state;
+		if (!delay) {
+			new.br_startblock = del_endblock;
+		} else {
+			temp = xfs_bmap_worst_indlen(ip, temp);
+			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+			temp2 = xfs_bmap_worst_indlen(ip, temp2);
+			new.br_startblock = nullstartblock((int)temp2);
+			da_new = temp + temp2;
+			while (da_new > da_old) {
+				if (temp) {
+					temp--;
+					da_new--;
+					xfs_bmbt_set_startblock(ep,
+						nullstartblock((int)temp));
+				}
+				if (da_new == da_old)
+					break;
+				if (temp2) {
+					temp2--;
+					da_new--;
+					new.br_startblock =
+						nullstartblock((int)temp2);
+				}
+			}
+		}
+		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
+		xfs_iext_insert(ip, eidx + 1, 1, &new, state);
+		++eidx;
+		break;
+	}
+
+	/*
+	 * Account for change in delayed indirect blocks.
+	 * Nothing to do for disk quota accounting here.
+	 */
+	ASSERT(da_old >= da_new);
+	if (da_old > da_new)
+		xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
+
+	return error;
+}
+
 /*
  * Unmap (remove) blocks from a file.
  * If nexts is nonzero then the number of extents to remove is limited to
@@ -5021,17 +5347,16 @@
  * *done is set.
  */
 int						/* error */
-xfs_bunmapi(
+__xfs_bunmapi(
 	xfs_trans_t		*tp,		/* transaction pointer */
 	struct xfs_inode	*ip,		/* incore inode */
 	xfs_fileoff_t		bno,		/* starting offset to unmap */
-	xfs_filblks_t		len,		/* length to unmap in file */
+	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
 	int			flags,		/* misc flags */
 	xfs_extnum_t		nexts,		/* number of extents max */
 	xfs_fsblock_t		*firstblock,	/* first allocated block
 						   controls a.g. for allocs */
-	struct xfs_defer_ops	*dfops,		/* i/o: list extents to free */
-	int			*done)		/* set if not done yet */
+	struct xfs_defer_ops	*dfops)		/* i/o: deferred updates */
 {
 	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
 	xfs_bmbt_irec_t		del;		/* extent being deleted */
@@ -5053,11 +5378,12 @@
 	int			wasdel;		/* was a delayed alloc extent */
 	int			whichfork;	/* data or attribute fork */
 	xfs_fsblock_t		sum;
+	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
 
 	trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
 
-	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
-		XFS_ATTR_FORK : XFS_DATA_FORK;
+	whichfork = xfs_bmapi_whichfork(flags);
+	ASSERT(whichfork != XFS_COW_FORK);
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	if (unlikely(
 	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
@@ -5079,7 +5405,7 @@
 		return error;
 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
 	if (nextents == 0) {
-		*done = 1;
+		*rlen = 0;
 		return 0;
 	}
 	XFS_STATS_INC(mp, xs_blk_unmap);
@@ -5324,7 +5650,7 @@
 			cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
 
 		error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
-				&tmp_logflags, whichfork);
+				&tmp_logflags, whichfork, flags);
 		logflags |= tmp_logflags;
 		if (error)
 			goto error0;
@@ -5350,7 +5676,10 @@
 			extno++;
 		}
 	}
-	*done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
+	if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
+		*rlen = 0;
+	else
+		*rlen = bno - start + 1;
 
 	/*
 	 * Convert to a btree if necessary.
@@ -5406,6 +5735,27 @@
 	return error;
 }
 
+/* Unmap a range of a file. */
+int
+xfs_bunmapi(
+	xfs_trans_t		*tp,
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		bno,
+	xfs_filblks_t		len,
+	int			flags,
+	xfs_extnum_t		nexts,
+	xfs_fsblock_t		*firstblock,
+	struct xfs_defer_ops	*dfops,
+	int			*done)
+{
+	int			error;
+
+	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
+			dfops);
+	*done = (len == 0);
+	return error;
+}
+
 /*
  * Determine whether an extent shift can be accomplished by a merge with the
  * extent that precedes the target hole of the shift.
@@ -5985,3 +6335,146 @@
 	xfs_trans_cancel(tp);
 	return error;
 }
+
+/* Deferred mapping is only for real extents in the data fork. */
+static bool
+xfs_bmap_is_update_needed(
+	struct xfs_bmbt_irec	*bmap)
+{
+	return  bmap->br_startblock != HOLESTARTBLOCK &&
+		bmap->br_startblock != DELAYSTARTBLOCK;
+}
+
+/* Record a bmap intent. */
+static int
+__xfs_bmap_add(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	enum xfs_bmap_intent_type	type,
+	struct xfs_inode		*ip,
+	int				whichfork,
+	struct xfs_bmbt_irec		*bmap)
+{
+	int				error;
+	struct xfs_bmap_intent		*bi;
+
+	trace_xfs_bmap_defer(mp,
+			XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+			type,
+			XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+			ip->i_ino, whichfork,
+			bmap->br_startoff,
+			bmap->br_blockcount,
+			bmap->br_state);
+
+	bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
+	INIT_LIST_HEAD(&bi->bi_list);
+	bi->bi_type = type;
+	bi->bi_owner = ip;
+	bi->bi_whichfork = whichfork;
+	bi->bi_bmap = *bmap;
+
+	error = xfs_defer_join(dfops, bi->bi_owner);
+	if (error) {
+		kmem_free(bi);
+		return error;
+	}
+
+	xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
+	return 0;
+}
+
+/* Map an extent into a file. */
+int
+xfs_bmap_map_extent(
+	struct xfs_mount	*mp,
+	struct xfs_defer_ops	*dfops,
+	struct xfs_inode	*ip,
+	struct xfs_bmbt_irec	*PREV)
+{
+	if (!xfs_bmap_is_update_needed(PREV))
+		return 0;
+
+	return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
+			XFS_DATA_FORK, PREV);
+}
+
+/* Unmap an extent out of a file. */
+int
+xfs_bmap_unmap_extent(
+	struct xfs_mount	*mp,
+	struct xfs_defer_ops	*dfops,
+	struct xfs_inode	*ip,
+	struct xfs_bmbt_irec	*PREV)
+{
+	if (!xfs_bmap_is_update_needed(PREV))
+		return 0;
+
+	return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
+			XFS_DATA_FORK, PREV);
+}
+
+/*
+ * Process one of the deferred bmap operations.  We pass back the
+ * btree cursor to maintain our lock on the bmapbt between calls.
+ */
+int
+xfs_bmap_finish_one(
+	struct xfs_trans		*tp,
+	struct xfs_defer_ops		*dfops,
+	struct xfs_inode		*ip,
+	enum xfs_bmap_intent_type	type,
+	int				whichfork,
+	xfs_fileoff_t			startoff,
+	xfs_fsblock_t			startblock,
+	xfs_filblks_t			blockcount,
+	xfs_exntst_t			state)
+{
+	struct xfs_bmbt_irec		bmap;
+	int				nimaps = 1;
+	xfs_fsblock_t			firstfsb;
+	int				flags = XFS_BMAPI_REMAP;
+	int				done;
+	int				error = 0;
+
+	bmap.br_startblock = startblock;
+	bmap.br_startoff = startoff;
+	bmap.br_blockcount = blockcount;
+	bmap.br_state = state;
+
+	trace_xfs_bmap_deferred(tp->t_mountp,
+			XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
+			XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
+			ip->i_ino, whichfork, startoff, blockcount, state);
+
+	if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
+		return -EFSCORRUPTED;
+	if (whichfork == XFS_ATTR_FORK)
+		flags |= XFS_BMAPI_ATTRFORK;
+
+	if (XFS_TEST_ERROR(false, tp->t_mountp,
+			XFS_ERRTAG_BMAP_FINISH_ONE,
+			XFS_RANDOM_BMAP_FINISH_ONE))
+		return -EIO;
+
+	switch (type) {
+	case XFS_BMAP_MAP:
+		firstfsb = bmap.br_startblock;
+		error = xfs_bmapi_write(tp, ip, bmap.br_startoff,
+					bmap.br_blockcount, flags, &firstfsb,
+					bmap.br_blockcount, &bmap, &nimaps,
+					dfops);
+		break;
+	case XFS_BMAP_UNMAP:
+		error = xfs_bunmapi(tp, ip, bmap.br_startoff,
+				bmap.br_blockcount, flags, 1, &firstfsb,
+				dfops, &done);
+		ASSERT(done);
+		break;
+	default:
+		ASSERT(0);
+		error = -EFSCORRUPTED;
+	}
+
+	return error;
+}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 8395f6e..f97db71 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -97,6 +97,19 @@
  */
 #define XFS_BMAPI_ZERO		0x080
 
+/*
+ * Map the inode offset to the block given in ap->firstblock.  Primarily
+ * used for reflink.  The range must be in a hole, and this flag cannot be
+ * turned on with PREALLOC or CONVERT, and cannot be used on the attr fork.
+ *
+ * For bunmapi, this flag unmaps the range without adjusting quota, reducing
+ * refcount, or freeing the blocks.
+ */
+#define XFS_BMAPI_REMAP		0x100
+
+/* Map something in the CoW fork. */
+#define XFS_BMAPI_COWFORK	0x200
+
 #define XFS_BMAPI_FLAGS \
 	{ XFS_BMAPI_ENTIRE,	"ENTIRE" }, \
 	{ XFS_BMAPI_METADATA,	"METADATA" }, \
@@ -105,12 +118,24 @@
 	{ XFS_BMAPI_IGSTATE,	"IGSTATE" }, \
 	{ XFS_BMAPI_CONTIG,	"CONTIG" }, \
 	{ XFS_BMAPI_CONVERT,	"CONVERT" }, \
-	{ XFS_BMAPI_ZERO,	"ZERO" }
+	{ XFS_BMAPI_ZERO,	"ZERO" }, \
+	{ XFS_BMAPI_REMAP,	"REMAP" }, \
+	{ XFS_BMAPI_COWFORK,	"COWFORK" }
 
 
 static inline int xfs_bmapi_aflag(int w)
 {
-	return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
+	return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK :
+	       (w == XFS_COW_FORK ? XFS_BMAPI_COWFORK : 0));
+}
+
+static inline int xfs_bmapi_whichfork(int bmapi_flags)
+{
+	if (bmapi_flags & XFS_BMAPI_COWFORK)
+		return XFS_COW_FORK;
+	else if (bmapi_flags & XFS_BMAPI_ATTRFORK)
+		return XFS_ATTR_FORK;
+	return XFS_DATA_FORK;
 }
 
 /*
@@ -131,13 +156,15 @@
 #define BMAP_LEFT_VALID		(1 << 6)
 #define BMAP_RIGHT_VALID	(1 << 7)
 #define BMAP_ATTRFORK		(1 << 8)
+#define BMAP_COWFORK		(1 << 9)
 
 #define XFS_BMAP_EXT_FLAGS \
 	{ BMAP_LEFT_CONTIG,	"LC" }, \
 	{ BMAP_RIGHT_CONTIG,	"RC" }, \
 	{ BMAP_LEFT_FILLING,	"LF" }, \
 	{ BMAP_RIGHT_FILLING,	"RF" }, \
-	{ BMAP_ATTRFORK,	"ATTR" }
+	{ BMAP_ATTRFORK,	"ATTR" }, \
+	{ BMAP_COWFORK,		"COW" }
 
 
 /*
@@ -186,10 +213,15 @@
 		xfs_fsblock_t *firstblock, xfs_extlen_t total,
 		struct xfs_bmbt_irec *mval, int *nmap,
 		struct xfs_defer_ops *dfops);
+int	__xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
+		xfs_fileoff_t bno, xfs_filblks_t *rlen, int flags,
+		xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
+		struct xfs_defer_ops *dfops);
 int	xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
 		xfs_fileoff_t bno, xfs_filblks_t len, int flags,
 		xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
 		struct xfs_defer_ops *dfops, int *done);
+int	xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del);
 int	xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
 		xfs_extnum_t num);
 uint	xfs_default_attroffset(struct xfs_inode *ip);
@@ -203,8 +235,31 @@
 	xfs_bmap_search_extents(struct xfs_inode *ip, xfs_fileoff_t bno,
 		int fork, int *eofp, xfs_extnum_t *lastxp,
 		struct xfs_bmbt_irec *gotp, struct xfs_bmbt_irec *prevp);
-int	xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, xfs_fileoff_t aoff,
-		xfs_filblks_t len, struct xfs_bmbt_irec *got,
-		struct xfs_bmbt_irec *prev, xfs_extnum_t *lastx, int eof);
+int	xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
+		xfs_fileoff_t aoff, xfs_filblks_t len,
+		struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *prev,
+		xfs_extnum_t *lastx, int eof);
+
+enum xfs_bmap_intent_type {
+	XFS_BMAP_MAP = 1,
+	XFS_BMAP_UNMAP,
+};
+
+struct xfs_bmap_intent {
+	struct list_head			bi_list;
+	enum xfs_bmap_intent_type		bi_type;
+	struct xfs_inode			*bi_owner;
+	int					bi_whichfork;
+	struct xfs_bmbt_irec			bi_bmap;
+};
+
+int	xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
+		struct xfs_inode *ip, enum xfs_bmap_intent_type type,
+		int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
+		xfs_filblks_t blockcount, xfs_exntst_t state);
+int	xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+		struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
+int	xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+		struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
 
 #endif	/* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index cd85274..8007d2b 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -453,6 +453,7 @@
 
 	if (args.fsbno == NULLFSBLOCK) {
 		args.fsbno = be64_to_cpu(start->l);
+try_another_ag:
 		args.type = XFS_ALLOCTYPE_START_BNO;
 		/*
 		 * Make sure there is sufficient room left in the AG to
@@ -482,6 +483,22 @@
 	if (error)
 		goto error0;
 
+	/*
+	 * During a CoW operation, the allocation and bmbt updates occur in
+	 * different transactions.  The mapping code tries to put new bmbt
+	 * blocks near extents being mapped, but the only way to guarantee this
+	 * is if the alloc and the mapping happen in a single transaction that
+	 * has a block reservation.  That isn't the case here, so if we run out
+	 * of space we'll try again with another AG.
+	 */
+	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
+	    args.fsbno == NULLFSBLOCK &&
+	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
+		cur->bc_private.b.dfops->dop_low = true;
+		args.fsbno = cur->bc_private.b.firstblock;
+		goto try_another_ag;
+	}
+
 	if (args.fsbno == NULLFSBLOCK && args.minleft) {
 		/*
 		 * Could not find an AG with enough free space to satisfy
@@ -777,6 +794,7 @@
 {
 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 	struct xfs_btree_cur	*cur;
+	ASSERT(whichfork != XFS_COW_FORK);
 
 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
 
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index aa1752f..5c8e6f2 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -45,9 +45,10 @@
  */
 static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
 	{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
-	  XFS_FIBT_MAGIC },
+	  XFS_FIBT_MAGIC, 0 },
 	{ XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
-	  XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
+	  XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
+	  XFS_REFC_CRC_MAGIC }
 };
 #define xfs_btree_magic(cur) \
 	xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
@@ -1216,6 +1217,9 @@
 	case XFS_BTNUM_RMAP:
 		xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
 		break;
+	case XFS_BTNUM_REFC:
+		xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
+		break;
 	default:
 		ASSERT(0);
 	}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 3f8556a..c2b01d1 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -49,6 +49,7 @@
 	struct xfs_inobt_key		inobt;
 	struct xfs_rmap_key		rmap;
 	struct xfs_rmap_key		__rmap_bigkey[2];
+	struct xfs_refcount_key		refc;
 };
 
 union xfs_btree_rec {
@@ -57,6 +58,7 @@
 	struct xfs_alloc_rec		alloc;
 	struct xfs_inobt_rec		inobt;
 	struct xfs_rmap_rec		rmap;
+	struct xfs_refcount_rec		refc;
 };
 
 /*
@@ -72,6 +74,7 @@
 #define	XFS_BTNUM_INO	((xfs_btnum_t)XFS_BTNUM_INOi)
 #define	XFS_BTNUM_FINO	((xfs_btnum_t)XFS_BTNUM_FINOi)
 #define	XFS_BTNUM_RMAP	((xfs_btnum_t)XFS_BTNUM_RMAPi)
+#define	XFS_BTNUM_REFC	((xfs_btnum_t)XFS_BTNUM_REFCi)
 
 /*
  * For logging record fields.
@@ -105,6 +108,7 @@
 	case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(__mp, ibt, stat); break; \
 	case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(__mp, fibt, stat); break; \
 	case XFS_BTNUM_RMAP: __XFS_BTREE_STATS_INC(__mp, rmap, stat); break; \
+	case XFS_BTNUM_REFC: __XFS_BTREE_STATS_INC(__mp, refcbt, stat); break; \
 	case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break;	\
 	}       \
 } while (0)
@@ -127,6 +131,8 @@
 		__XFS_BTREE_STATS_ADD(__mp, fibt, stat, val); break; \
 	case XFS_BTNUM_RMAP:	\
 		__XFS_BTREE_STATS_ADD(__mp, rmap, stat, val); break; \
+	case XFS_BTNUM_REFC:	\
+		__XFS_BTREE_STATS_ADD(__mp, refcbt, stat, val); break; \
 	case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
 	}       \
 } while (0)
@@ -217,6 +223,15 @@
 	struct xfs_bmbt_irec		b;
 	struct xfs_inobt_rec_incore	i;
 	struct xfs_rmap_irec		r;
+	struct xfs_refcount_irec	rc;
+};
+
+/* Per-AG btree private information. */
+union xfs_btree_cur_private {
+	struct {
+		unsigned long	nr_ops;		/* # record updates */
+		int		shape_changes;	/* # of extent splits */
+	} refc;
 };
 
 /*
@@ -243,6 +258,7 @@
 			struct xfs_buf	*agbp;	/* agf/agi buffer pointer */
 			struct xfs_defer_ops *dfops;	/* deferred updates */
 			xfs_agnumber_t	agno;	/* ag number */
+			union xfs_btree_cur_private	priv;
 		} a;
 		struct {			/* needed for BMAP */
 			struct xfs_inode *ip;	/* pointer to our inode */
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index e96533d..f6e93ef 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -51,6 +51,8 @@
  * find all the space it needs.
  */
 enum xfs_defer_ops_type {
+	XFS_DEFER_OPS_TYPE_BMAP,
+	XFS_DEFER_OPS_TYPE_REFCOUNT,
 	XFS_DEFER_OPS_TYPE_RMAP,
 	XFS_DEFER_OPS_TYPE_FREE,
 	XFS_DEFER_OPS_TYPE_MAX,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 270fb5c..f6547fc 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -456,9 +456,11 @@
 
 #define XFS_SB_FEAT_RO_COMPAT_FINOBT   (1 << 0)		/* free inode btree */
 #define XFS_SB_FEAT_RO_COMPAT_RMAPBT   (1 << 1)		/* reverse map btree */
+#define XFS_SB_FEAT_RO_COMPAT_REFLINK  (1 << 2)		/* reflinked files */
 #define XFS_SB_FEAT_RO_COMPAT_ALL \
 		(XFS_SB_FEAT_RO_COMPAT_FINOBT | \
-		 XFS_SB_FEAT_RO_COMPAT_RMAPBT)
+		 XFS_SB_FEAT_RO_COMPAT_RMAPBT | \
+		 XFS_SB_FEAT_RO_COMPAT_REFLINK)
 #define XFS_SB_FEAT_RO_COMPAT_UNKNOWN	~XFS_SB_FEAT_RO_COMPAT_ALL
 static inline bool
 xfs_sb_has_ro_compat_feature(
@@ -546,6 +548,12 @@
 		(sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_RMAPBT);
 }
 
+static inline bool xfs_sb_version_hasreflink(struct xfs_sb *sbp)
+{
+	return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+		(sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_REFLINK);
+}
+
 /*
  * end of superblock version macros
  */
@@ -641,14 +649,17 @@
 	uuid_t		agf_uuid;	/* uuid of filesystem */
 
 	__be32		agf_rmap_blocks;	/* rmapbt blocks used */
-	__be32		agf_padding;		/* padding */
+	__be32		agf_refcount_blocks;	/* refcountbt blocks used */
+
+	__be32		agf_refcount_root;	/* refcount tree root block */
+	__be32		agf_refcount_level;	/* refcount btree levels */
 
 	/*
 	 * reserve some contiguous space for future logged fields before we add
 	 * the unlogged fields. This makes the range logging via flags and
 	 * structure offsets much simpler.
 	 */
-	__be64		agf_spare64[15];
+	__be64		agf_spare64[14];
 
 	/* unlogged fields, written during buffer writeback. */
 	__be64		agf_lsn;	/* last write sequence */
@@ -674,8 +685,11 @@
 #define	XFS_AGF_BTREEBLKS	0x00000800
 #define	XFS_AGF_UUID		0x00001000
 #define	XFS_AGF_RMAP_BLOCKS	0x00002000
-#define	XFS_AGF_SPARE64		0x00004000
-#define	XFS_AGF_NUM_BITS	15
+#define	XFS_AGF_REFCOUNT_BLOCKS	0x00004000
+#define	XFS_AGF_REFCOUNT_ROOT	0x00008000
+#define	XFS_AGF_REFCOUNT_LEVEL	0x00010000
+#define	XFS_AGF_SPARE64		0x00020000
+#define	XFS_AGF_NUM_BITS	18
 #define	XFS_AGF_ALL_BITS	((1 << XFS_AGF_NUM_BITS) - 1)
 
 #define XFS_AGF_FLAGS \
@@ -693,6 +707,9 @@
 	{ XFS_AGF_BTREEBLKS,	"BTREEBLKS" }, \
 	{ XFS_AGF_UUID,		"UUID" }, \
 	{ XFS_AGF_RMAP_BLOCKS,	"RMAP_BLOCKS" }, \
+	{ XFS_AGF_REFCOUNT_BLOCKS,	"REFCOUNT_BLOCKS" }, \
+	{ XFS_AGF_REFCOUNT_ROOT,	"REFCOUNT_ROOT" }, \
+	{ XFS_AGF_REFCOUNT_LEVEL,	"REFCOUNT_LEVEL" }, \
 	{ XFS_AGF_SPARE64,	"SPARE64" }
 
 /* disk block (xfs_daddr_t) in the AG */
@@ -885,7 +902,8 @@
 	__be64		di_changecount;	/* number of attribute changes */
 	__be64		di_lsn;		/* flush sequence */
 	__be64		di_flags2;	/* more random flags */
-	__u8		di_pad2[16];	/* more padding for future expansion */
+	__be32		di_cowextsize;	/* basic cow extent size for file */
+	__u8		di_pad2[12];	/* more padding for future expansion */
 
 	/* fields only written to during inode creation */
 	xfs_timestamp_t	di_crtime;	/* time created */
@@ -1041,9 +1059,14 @@
  * 16 bits of the XFS_XFLAG_s range.
  */
 #define XFS_DIFLAG2_DAX_BIT	0	/* use DAX for this inode */
+#define XFS_DIFLAG2_REFLINK_BIT	1	/* file's blocks may be shared */
+#define XFS_DIFLAG2_COWEXTSIZE_BIT   2  /* copy on write extent size hint */
 #define XFS_DIFLAG2_DAX		(1 << XFS_DIFLAG2_DAX_BIT)
+#define XFS_DIFLAG2_REFLINK     (1 << XFS_DIFLAG2_REFLINK_BIT)
+#define XFS_DIFLAG2_COWEXTSIZE  (1 << XFS_DIFLAG2_COWEXTSIZE_BIT)
 
-#define XFS_DIFLAG2_ANY		(XFS_DIFLAG2_DAX)
+#define XFS_DIFLAG2_ANY \
+	(XFS_DIFLAG2_DAX | XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)
 
 /*
  * Inode number format:
@@ -1353,7 +1376,9 @@
 #define XFS_RMAP_OWN_AG		(-5ULL)	/* AG freespace btree blocks */
 #define XFS_RMAP_OWN_INOBT	(-6ULL)	/* Inode btree blocks */
 #define XFS_RMAP_OWN_INODES	(-7ULL)	/* Inode chunk */
-#define XFS_RMAP_OWN_MIN	(-8ULL) /* guard */
+#define XFS_RMAP_OWN_REFC	(-8ULL) /* refcount tree */
+#define XFS_RMAP_OWN_COW	(-9ULL) /* cow allocations */
+#define XFS_RMAP_OWN_MIN	(-10ULL) /* guard */
 
 #define XFS_RMAP_NON_INODE_OWNER(owner)	(!!((owner) & (1ULL << 63)))
 
@@ -1434,6 +1459,62 @@
 	 XFS_IBT_BLOCK(mp) + 1)
 
 /*
+ * Reference Count Btree format definitions
+ *
+ */
+#define	XFS_REFC_CRC_MAGIC	0x52334643	/* 'R3FC' */
+
+unsigned int xfs_refc_block(struct xfs_mount *mp);
+
+/*
+ * Data record/key structure
+ *
+ * Each record associates a range of physical blocks (starting at
+ * rc_startblock and ending rc_blockcount blocks later) with a reference
+ * count (rc_refcount).  Extents that are being used to stage a copy on
+ * write (CoW) operation are recorded in the refcount btree with a
+ * refcount of 1.  All other records must have a refcount > 1 and must
+ * track an extent mapped only by file data forks.
+ *
+ * Extents with a single owner (attributes, metadata, non-shared file
+ * data) are not tracked here.  Free space is also not tracked here.
+ * This is consistent with pre-reflink XFS.
+ */
+
+/*
+ * Extents that are being used to stage a copy on write are stored
+ * in the refcount btree with a refcount of 1 and the upper bit set
+ * on the startblock.  This speeds up mount time deletion of stale
+ * staging extents because they're all at the right side of the tree.
+ */
+#define XFS_REFC_COW_START		((xfs_agblock_t)(1U << 31))
+#define REFCNTBT_COWFLAG_BITLEN		1
+#define REFCNTBT_AGBLOCK_BITLEN		31
+
+struct xfs_refcount_rec {
+	__be32		rc_startblock;	/* starting block number */
+	__be32		rc_blockcount;	/* count of blocks */
+	__be32		rc_refcount;	/* number of inodes linked here */
+};
+
+struct xfs_refcount_key {
+	__be32		rc_startblock;	/* starting block number */
+};
+
+struct xfs_refcount_irec {
+	xfs_agblock_t	rc_startblock;	/* starting block number */
+	xfs_extlen_t	rc_blockcount;	/* count of free blocks */
+	xfs_nlink_t	rc_refcount;	/* number of inodes linked here */
+};
+
+#define MAXREFCOUNT	((xfs_nlink_t)~0U)
+#define MAXREFCEXTLEN	((xfs_extlen_t)~0U)
+
+/* btree pointer type */
+typedef __be32 xfs_refcount_ptr_t;
+
+
+/*
  * BMAP Btree format definitions
  *
  * This includes both the root block definition that sits inside an inode fork
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 7945505..b72dc82 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -81,14 +81,16 @@
 #define BMV_IF_PREALLOC		0x4	/* rtn status BMV_OF_PREALLOC if req */
 #define BMV_IF_DELALLOC		0x8	/* rtn status BMV_OF_DELALLOC if req */
 #define BMV_IF_NO_HOLES		0x10	/* Do not return holes */
+#define BMV_IF_COWFORK		0x20	/* return CoW fork rather than data */
 #define BMV_IF_VALID	\
 	(BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|	\
-	 BMV_IF_DELALLOC|BMV_IF_NO_HOLES)
+	 BMV_IF_DELALLOC|BMV_IF_NO_HOLES|BMV_IF_COWFORK)
 
 /*	bmv_oflags values - returned for each non-header segment */
 #define BMV_OF_PREALLOC		0x1	/* segment = unwritten pre-allocation */
 #define BMV_OF_DELALLOC		0x2	/* segment = delayed allocation */
 #define BMV_OF_LAST		0x4	/* segment is the last in the file */
+#define BMV_OF_SHARED		0x8	/* segment shared with another file */
 
 /*
  * Structure for XFS_IOC_FSSETDM.
@@ -206,7 +208,8 @@
 #define XFS_FSOP_GEOM_FLAGS_FTYPE	0x10000	/* inode directory types */
 #define XFS_FSOP_GEOM_FLAGS_FINOBT	0x20000	/* free inode btree */
 #define XFS_FSOP_GEOM_FLAGS_SPINODES	0x40000	/* sparse inode chunks	*/
-#define XFS_FSOP_GEOM_FLAGS_RMAPBT	0x80000	/* Reverse mapping btree */
+#define XFS_FSOP_GEOM_FLAGS_RMAPBT	0x80000	/* reverse mapping btree */
+#define XFS_FSOP_GEOM_FLAGS_REFLINK	0x100000 /* files can share blocks */
 
 /*
  * Minimum and maximum sizes need for growth checks.
@@ -275,7 +278,8 @@
 #define	bs_projid	bs_projid_lo	/* (previously just bs_projid)	*/
 	__u16		bs_forkoff;	/* inode fork offset in bytes	*/
 	__u16		bs_projid_hi;	/* higher part of project id	*/
-	unsigned char	bs_pad[10];	/* pad space, unused		*/
+	unsigned char	bs_pad[6];	/* pad space, unused		*/
+	__u32		bs_cowextsize;	/* cow extent size		*/
 	__u32		bs_dmevmask;	/* DMIG event mask		*/
 	__u16		bs_dmstate;	/* DMIG state info		*/
 	__u16		bs_aextents;	/* attribute number of extents	*/
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 4b9769e..8de9a3a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -256,6 +256,7 @@
 		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
 		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
 		to->di_flags2 = be64_to_cpu(from->di_flags2);
+		to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
 	}
 }
 
@@ -305,7 +306,7 @@
 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
 		to->di_flags2 = cpu_to_be64(from->di_flags2);
-
+		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
 		to->di_ino = cpu_to_be64(ip->i_ino);
 		to->di_lsn = cpu_to_be64(lsn);
 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
@@ -357,6 +358,7 @@
 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
 		to->di_flags2 = cpu_to_be64(from->di_flags2);
+		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
 		to->di_ino = cpu_to_be64(from->di_ino);
 		to->di_lsn = cpu_to_be64(from->di_lsn);
 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
@@ -373,6 +375,9 @@
 	struct xfs_inode	*ip,
 	struct xfs_dinode	*dip)
 {
+	uint16_t		flags;
+	uint64_t		flags2;
+
 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
 		return false;
 
@@ -389,6 +394,23 @@
 		return false;
 	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
+
+	flags = be16_to_cpu(dip->di_flags);
+	flags2 = be64_to_cpu(dip->di_flags2);
+
+	/* don't allow reflink/cowextsize if we don't have reflink */
+	if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
+            !xfs_sb_version_hasreflink(&mp->m_sb))
+		return false;
+
+	/* don't let reflink and realtime mix */
+	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
+		return false;
+
+	/* don't let reflink and dax mix */
+	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
+		return false;
+
 	return true;
 }
 
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 7c4dd32..62d9d46 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -47,6 +47,7 @@
 	__uint16_t	di_flags;	/* random flags, XFS_DIFLAG_... */
 
 	__uint64_t	di_flags2;	/* more random flags */
+	__uint32_t	di_cowextsize;	/* basic cow extent size for file */
 
 	xfs_ictimestamp_t di_crtime;	/* time created */
 };
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index bbcc8c7..5dd56d3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -121,6 +121,26 @@
 		return -EFSCORRUPTED;
 	}
 
+	if (unlikely(xfs_is_reflink_inode(ip) &&
+	    (VFS_I(ip)->i_mode & S_IFMT) != S_IFREG)) {
+		xfs_warn(ip->i_mount,
+			"corrupt dinode %llu, wrong file type for reflink.",
+			ip->i_ino);
+		XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
+				     XFS_ERRLEVEL_LOW, ip->i_mount, dip);
+		return -EFSCORRUPTED;
+	}
+
+	if (unlikely(xfs_is_reflink_inode(ip) &&
+	    (ip->i_d.di_flags & XFS_DIFLAG_REALTIME))) {
+		xfs_warn(ip->i_mount,
+			"corrupt dinode %llu, has reflink+realtime flag set.",
+			ip->i_ino);
+		XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
+				     XFS_ERRLEVEL_LOW, ip->i_mount, dip);
+		return -EFSCORRUPTED;
+	}
+
 	switch (VFS_I(ip)->i_mode & S_IFMT) {
 	case S_IFIFO:
 	case S_IFCHR:
@@ -186,9 +206,14 @@
 		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
 		return -EFSCORRUPTED;
 	}
-	if (error) {
+	if (error)
 		return error;
+
+	if (xfs_is_reflink_inode(ip)) {
+		ASSERT(ip->i_cowfp == NULL);
+		xfs_ifork_init_cow(ip);
 	}
+
 	if (!XFS_DFORK_Q(dip))
 		return 0;
 
@@ -208,7 +233,8 @@
 			XFS_CORRUPTION_ERROR("xfs_iformat(8)",
 					     XFS_ERRLEVEL_LOW,
 					     ip->i_mount, dip);
-			return -EFSCORRUPTED;
+			error = -EFSCORRUPTED;
+			break;
 		}
 
 		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
@@ -226,6 +252,9 @@
 	if (error) {
 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
 		ip->i_afp = NULL;
+		if (ip->i_cowfp)
+			kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+		ip->i_cowfp = NULL;
 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
 	}
 	return error;
@@ -740,6 +769,9 @@
 	if (whichfork == XFS_ATTR_FORK) {
 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
 		ip->i_afp = NULL;
+	} else if (whichfork == XFS_COW_FORK) {
+		kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+		ip->i_cowfp = NULL;
 	}
 }
 
@@ -927,6 +959,19 @@
 	}
 }
 
+/* Convert bmap state flags to an inode fork. */
+struct xfs_ifork *
+xfs_iext_state_to_fork(
+	struct xfs_inode	*ip,
+	int			state)
+{
+	if (state & BMAP_COWFORK)
+		return ip->i_cowfp;
+	else if (state & BMAP_ATTRFORK)
+		return ip->i_afp;
+	return &ip->i_df;
+}
+
 /*
  * Insert new item(s) into the extent records for incore inode
  * fork 'ifp'.  'count' new items are inserted at index 'idx'.
@@ -939,7 +984,7 @@
 	xfs_bmbt_irec_t	*new,		/* items to insert */
 	int		state)		/* type of extent conversion */
 {
-	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
+	xfs_ifork_t	*ifp = xfs_iext_state_to_fork(ip, state);
 	xfs_extnum_t	i;		/* extent record index */
 
 	trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
@@ -1189,7 +1234,7 @@
 	int		ext_diff,	/* number of extents to remove */
 	int		state)		/* type of extent conversion */
 {
-	xfs_ifork_t	*ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
+	xfs_ifork_t	*ifp = xfs_iext_state_to_fork(ip, state);
 	xfs_extnum_t	nextents;	/* number of extents in file */
 	int		new_size;	/* size of extents after removal */
 
@@ -1934,3 +1979,20 @@
 		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
 	}
 }
+
+/*
+ * Initialize an inode's copy-on-write fork.
+ */
+void
+xfs_ifork_init_cow(
+	struct xfs_inode	*ip)
+{
+	if (ip->i_cowfp)
+		return;
+
+	ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone,
+				       KM_SLEEP | KM_NOFS);
+	ip->i_cowfp->if_flags = XFS_IFEXTENTS;
+	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
+	ip->i_cnextents = 0;
+}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index f95e072..c9476f5 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -92,7 +92,9 @@
 #define XFS_IFORK_PTR(ip,w)		\
 	((w) == XFS_DATA_FORK ? \
 		&(ip)->i_df : \
-		(ip)->i_afp)
+		((w) == XFS_ATTR_FORK ? \
+			(ip)->i_afp : \
+			(ip)->i_cowfp))
 #define XFS_IFORK_DSIZE(ip) \
 	(XFS_IFORK_Q(ip) ? \
 		XFS_IFORK_BOFF(ip) : \
@@ -105,26 +107,38 @@
 #define XFS_IFORK_SIZE(ip,w) \
 	((w) == XFS_DATA_FORK ? \
 		XFS_IFORK_DSIZE(ip) : \
-		XFS_IFORK_ASIZE(ip))
+		((w) == XFS_ATTR_FORK ? \
+			XFS_IFORK_ASIZE(ip) : \
+			0))
 #define XFS_IFORK_FORMAT(ip,w) \
 	((w) == XFS_DATA_FORK ? \
 		(ip)->i_d.di_format : \
-		(ip)->i_d.di_aformat)
+		((w) == XFS_ATTR_FORK ? \
+			(ip)->i_d.di_aformat : \
+			(ip)->i_cformat))
 #define XFS_IFORK_FMT_SET(ip,w,n) \
 	((w) == XFS_DATA_FORK ? \
 		((ip)->i_d.di_format = (n)) : \
-		((ip)->i_d.di_aformat = (n)))
+		((w) == XFS_ATTR_FORK ? \
+			((ip)->i_d.di_aformat = (n)) : \
+			((ip)->i_cformat = (n))))
 #define XFS_IFORK_NEXTENTS(ip,w) \
 	((w) == XFS_DATA_FORK ? \
 		(ip)->i_d.di_nextents : \
-		(ip)->i_d.di_anextents)
+		((w) == XFS_ATTR_FORK ? \
+			(ip)->i_d.di_anextents : \
+			(ip)->i_cnextents))
 #define XFS_IFORK_NEXT_SET(ip,w,n) \
 	((w) == XFS_DATA_FORK ? \
 		((ip)->i_d.di_nextents = (n)) : \
-		((ip)->i_d.di_anextents = (n)))
+		((w) == XFS_ATTR_FORK ? \
+			((ip)->i_d.di_anextents = (n)) : \
+			((ip)->i_cnextents = (n))))
 #define XFS_IFORK_MAXEXT(ip, w) \
 	(XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
 
+struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
+
 int		xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
 void		xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
 				struct xfs_inode_log_item *, int);
@@ -169,4 +183,6 @@
 
 extern struct kmem_zone	*xfs_ifork_zone;
 
+extern void xfs_ifork_init_cow(struct xfs_inode *ip);
+
 #endif	/* __XFS_INODE_FORK_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index fc5eef8..083cdd6 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -112,7 +112,11 @@
 #define XLOG_REG_TYPE_ICREATE		20
 #define XLOG_REG_TYPE_RUI_FORMAT	21
 #define XLOG_REG_TYPE_RUD_FORMAT	22
-#define XLOG_REG_TYPE_MAX		22
+#define XLOG_REG_TYPE_CUI_FORMAT	23
+#define XLOG_REG_TYPE_CUD_FORMAT	24
+#define XLOG_REG_TYPE_BUI_FORMAT	25
+#define XLOG_REG_TYPE_BUD_FORMAT	26
+#define XLOG_REG_TYPE_MAX		26
 
 /*
  * Flags to log operation header
@@ -231,6 +235,10 @@
 #define	XFS_LI_ICREATE		0x123f
 #define	XFS_LI_RUI		0x1240	/* rmap update intent */
 #define	XFS_LI_RUD		0x1241
+#define	XFS_LI_CUI		0x1242	/* refcount update intent */
+#define	XFS_LI_CUD		0x1243
+#define	XFS_LI_BUI		0x1244	/* bmbt update intent */
+#define	XFS_LI_BUD		0x1245
 
 #define XFS_LI_TYPE_DESC \
 	{ XFS_LI_EFI,		"XFS_LI_EFI" }, \
@@ -242,7 +250,11 @@
 	{ XFS_LI_QUOTAOFF,	"XFS_LI_QUOTAOFF" }, \
 	{ XFS_LI_ICREATE,	"XFS_LI_ICREATE" }, \
 	{ XFS_LI_RUI,		"XFS_LI_RUI" }, \
-	{ XFS_LI_RUD,		"XFS_LI_RUD" }
+	{ XFS_LI_RUD,		"XFS_LI_RUD" }, \
+	{ XFS_LI_CUI,		"XFS_LI_CUI" }, \
+	{ XFS_LI_CUD,		"XFS_LI_CUD" }, \
+	{ XFS_LI_BUI,		"XFS_LI_BUI" }, \
+	{ XFS_LI_BUD,		"XFS_LI_BUD" }
 
 /*
  * Inode Log Item Format definitions.
@@ -411,7 +423,8 @@
 	__uint64_t	di_changecount;	/* number of attribute changes */
 	xfs_lsn_t	di_lsn;		/* flush sequence */
 	__uint64_t	di_flags2;	/* more random flags */
-	__uint8_t	di_pad2[16];	/* more padding for future expansion */
+	__uint32_t	di_cowextsize;	/* basic cow extent size for file */
+	__uint8_t	di_pad2[12];	/* more padding for future expansion */
 
 	/* fields only written to during inode creation */
 	xfs_ictimestamp_t di_crtime;	/* time created */
@@ -622,8 +635,11 @@
 
 /* rmap me_flags: upper bits are flags, lower byte is type code */
 #define XFS_RMAP_EXTENT_MAP		1
+#define XFS_RMAP_EXTENT_MAP_SHARED	2
 #define XFS_RMAP_EXTENT_UNMAP		3
+#define XFS_RMAP_EXTENT_UNMAP_SHARED	4
 #define XFS_RMAP_EXTENT_CONVERT		5
+#define XFS_RMAP_EXTENT_CONVERT_SHARED	6
 #define XFS_RMAP_EXTENT_ALLOC		7
 #define XFS_RMAP_EXTENT_FREE		8
 #define XFS_RMAP_EXTENT_TYPE_MASK	0xFF
@@ -671,6 +687,102 @@
 };
 
 /*
+ * CUI/CUD (refcount update) log format definitions
+ */
+struct xfs_phys_extent {
+	__uint64_t		pe_startblock;
+	__uint32_t		pe_len;
+	__uint32_t		pe_flags;
+};
+
+/* refcount pe_flags: upper bits are flags, lower byte is type code */
+/* Type codes are taken directly from enum xfs_refcount_intent_type. */
+#define XFS_REFCOUNT_EXTENT_TYPE_MASK	0xFF
+
+#define XFS_REFCOUNT_EXTENT_FLAGS	(XFS_REFCOUNT_EXTENT_TYPE_MASK)
+
+/*
+ * This is the structure used to lay out a cui log item in the
+ * log.  The cui_extents field is a variable size array whose
+ * size is given by cui_nextents.
+ */
+struct xfs_cui_log_format {
+	__uint16_t		cui_type;	/* cui log item type */
+	__uint16_t		cui_size;	/* size of this item */
+	__uint32_t		cui_nextents;	/* # extents to free */
+	__uint64_t		cui_id;		/* cui identifier */
+	struct xfs_phys_extent	cui_extents[];	/* array of extents */
+};
+
+static inline size_t
+xfs_cui_log_format_sizeof(
+	unsigned int		nr)
+{
+	return sizeof(struct xfs_cui_log_format) +
+			nr * sizeof(struct xfs_phys_extent);
+}
+
+/*
+ * This is the structure used to lay out a cud log item in the
+ * log.  The cud_extents array is a variable size array whose
+ * size is given by cud_nextents;
+ */
+struct xfs_cud_log_format {
+	__uint16_t		cud_type;	/* cud log item type */
+	__uint16_t		cud_size;	/* size of this item */
+	__uint32_t		__pad;
+	__uint64_t		cud_cui_id;	/* id of corresponding cui */
+};
+
+/*
+ * BUI/BUD (inode block mapping) log format definitions
+ */
+
+/* bmbt me_flags: upper bits are flags, lower byte is type code */
+/* Type codes are taken directly from enum xfs_bmap_intent_type. */
+#define XFS_BMAP_EXTENT_TYPE_MASK	0xFF
+
+#define XFS_BMAP_EXTENT_ATTR_FORK	(1U << 31)
+#define XFS_BMAP_EXTENT_UNWRITTEN	(1U << 30)
+
+#define XFS_BMAP_EXTENT_FLAGS		(XFS_BMAP_EXTENT_TYPE_MASK | \
+					 XFS_BMAP_EXTENT_ATTR_FORK | \
+					 XFS_BMAP_EXTENT_UNWRITTEN)
+
+/*
+ * This is the structure used to lay out an bui log item in the
+ * log.  The bui_extents field is a variable size array whose
+ * size is given by bui_nextents.
+ */
+struct xfs_bui_log_format {
+	__uint16_t		bui_type;	/* bui log item type */
+	__uint16_t		bui_size;	/* size of this item */
+	__uint32_t		bui_nextents;	/* # extents to free */
+	__uint64_t		bui_id;		/* bui identifier */
+	struct xfs_map_extent	bui_extents[];	/* array of extents to bmap */
+};
+
+static inline size_t
+xfs_bui_log_format_sizeof(
+	unsigned int		nr)
+{
+	return sizeof(struct xfs_bui_log_format) +
+			nr * sizeof(struct xfs_map_extent);
+}
+
+/*
+ * This is the structure used to lay out an bud log item in the
+ * log.  The bud_extents array is a variable size array whose
+ * size is given by bud_nextents;
+ */
+struct xfs_bud_log_format {
+	__uint16_t		bud_type;	/* bud log item type */
+	__uint16_t		bud_size;	/* size of this item */
+	__uint32_t		__pad;
+	__uint64_t		bud_bui_id;	/* id of corresponding bui */
+};
+
+/*
  * Dquot Log format definitions.
  *
  * The first two fields must be the type and size fitting into
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
new file mode 100644
index 0000000..b177ef3
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -0,0 +1,1698 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_trans.h"
+#include "xfs_bit.h"
+#include "xfs_refcount.h"
+#include "xfs_rmap.h"
+
+/* Allowable refcount adjustment amounts. */
+enum xfs_refc_adjust_op {
+	XFS_REFCOUNT_ADJUST_INCREASE	= 1,
+	XFS_REFCOUNT_ADJUST_DECREASE	= -1,
+	XFS_REFCOUNT_ADJUST_COW_ALLOC	= 0,
+	XFS_REFCOUNT_ADJUST_COW_FREE	= -1,
+};
+
+STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
+		xfs_agblock_t agbno, xfs_extlen_t aglen,
+		struct xfs_defer_ops *dfops);
+STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
+		xfs_agblock_t agbno, xfs_extlen_t aglen,
+		struct xfs_defer_ops *dfops);
+
+/*
+ * Look up the first record less than or equal to [bno, len] in the btree
+ * given by cur.
+ */
+int
+xfs_refcount_lookup_le(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	int			*stat)
+{
+	trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+			XFS_LOOKUP_LE);
+	cur->bc_rec.rc.rc_startblock = bno;
+	cur->bc_rec.rc.rc_blockcount = 0;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Look up the first record greater than or equal to [bno, len] in the btree
+ * given by cur.
+ */
+int
+xfs_refcount_lookup_ge(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	int			*stat)
+{
+	trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+			XFS_LOOKUP_GE);
+	cur->bc_rec.rc.rc_startblock = bno;
+	cur->bc_rec.rc.rc_blockcount = 0;
+	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+/* Convert on-disk record to in-core format. */
+static inline void
+xfs_refcount_btrec_to_irec(
+	union xfs_btree_rec		*rec,
+	struct xfs_refcount_irec	*irec)
+{
+	irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock);
+	irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
+	irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_refcount_get_rec(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*irec,
+	int				*stat)
+{
+	union xfs_btree_rec		*rec;
+	int				error;
+
+	error = xfs_btree_get_rec(cur, &rec, stat);
+	if (!error && *stat == 1) {
+		xfs_refcount_btrec_to_irec(rec, irec);
+		trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno,
+				irec);
+	}
+	return error;
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [bno, len, refcount].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_refcount_update(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*irec)
+{
+	union xfs_btree_rec	rec;
+	int			error;
+
+	trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
+	rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
+	rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
+	rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
+	error = xfs_btree_update(cur, &rec);
+	if (error)
+		trace_xfs_refcount_update_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Insert the record referred to by cur to the value given
+ * by [bno, len, refcount].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_refcount_insert(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*irec,
+	int				*i)
+{
+	int				error;
+
+	trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
+	cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
+	cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
+	cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
+	error = xfs_btree_insert(cur, i);
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+out_error:
+	if (error)
+		trace_xfs_refcount_insert_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Remove the record referred to by cur, then set the pointer to the spot
+ * where the record could be re-inserted, in case we want to increment or
+ * decrement the cursor.
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_refcount_delete(
+	struct xfs_btree_cur	*cur,
+	int			*i)
+{
+	struct xfs_refcount_irec	irec;
+	int			found_rec;
+	int			error;
+
+	error = xfs_refcount_get_rec(cur, &irec, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+	trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
+	error = xfs_btree_delete(cur, i);
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+	if (error)
+		goto out_error;
+	error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
+out_error:
+	if (error)
+		trace_xfs_refcount_delete_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Adjusting the Reference Count
+ *
+ * As stated elsewhere, the reference count btree (refcbt) stores
+ * >1 reference counts for extents of physical blocks.  In this
+ * operation, we're either raising or lowering the reference count of
+ * some subrange stored in the tree:
+ *
+ *      <------ adjustment range ------>
+ * ----+   +---+-----+ +--+--------+---------
+ *  2  |   | 3 |  4  | |17|   55   |   10
+ * ----+   +---+-----+ +--+--------+---------
+ * X axis is physical blocks number;
+ * reference counts are the numbers inside the rectangles
+ *
+ * The first thing we need to do is to ensure that there are no
+ * refcount extents crossing either boundary of the range to be
+ * adjusted.  For any extent that does cross a boundary, split it into
+ * two extents so that we can increment the refcount of one of the
+ * pieces later:
+ *
+ *      <------ adjustment range ------>
+ * ----+   +---+-----+ +--+--------+----+----
+ *  2  |   | 3 |  2  | |17|   55   | 10 | 10
+ * ----+   +---+-----+ +--+--------+----+----
+ *
+ * For this next step, let's assume that all the physical blocks in
+ * the adjustment range are mapped to a file and are therefore in use
+ * at least once.  Therefore, we can infer that any gap in the
+ * refcount tree within the adjustment range represents a physical
+ * extent with refcount == 1:
+ *
+ *      <------ adjustment range ------>
+ * ----+---+---+-----+-+--+--------+----+----
+ *  2  |"1"| 3 |  2  |1|17|   55   | 10 | 10
+ * ----+---+---+-----+-+--+--------+----+----
+ *      ^
+ *
+ * For each extent that falls within the interval range, figure out
+ * which extent is to the left or the right of that extent.  Now we
+ * have a left, current, and right extent.  If the new reference count
+ * of the center extent enables us to merge left, center, and right
+ * into one record covering all three, do so.  If the center extent is
+ * at the left end of the range, abuts the left extent, and its new
+ * reference count matches the left extent's record, then merge them.
+ * If the center extent is at the right end of the range, abuts the
+ * right extent, and the reference counts match, merge those.  In the
+ * example, we can left merge (assuming an increment operation):
+ *
+ *      <------ adjustment range ------>
+ * --------+---+-----+-+--+--------+----+----
+ *    2    | 3 |  2  |1|17|   55   | 10 | 10
+ * --------+---+-----+-+--+--------+----+----
+ *          ^
+ *
+ * For all other extents within the range, adjust the reference count
+ * or delete it if the refcount falls below 2.  If we were
+ * incrementing, the end result looks like this:
+ *
+ *      <------ adjustment range ------>
+ * --------+---+-----+-+--+--------+----+----
+ *    2    | 4 |  3  |2|18|   56   | 11 | 10
+ * --------+---+-----+-+--+--------+----+----
+ *
+ * The result of a decrement operation looks as such:
+ *
+ *      <------ adjustment range ------>
+ * ----+   +---+       +--+--------+----+----
+ *  2  |   | 2 |       |16|   54   |  9 | 10
+ * ----+   +---+       +--+--------+----+----
+ *      DDDD    111111DD
+ *
+ * The blocks marked "D" are freed; the blocks marked "1" are only
+ * referenced once and therefore the record is removed from the
+ * refcount btree.
+ */
+
+/* Next block after this extent. */
+static inline xfs_agblock_t
+xfs_refc_next(
+	struct xfs_refcount_irec	*rc)
+{
+	return rc->rc_startblock + rc->rc_blockcount;
+}
+
+/*
+ * Split a refcount extent that crosses agbno.
+ */
+STATIC int
+xfs_refcount_split_extent(
+	struct xfs_btree_cur		*cur,
+	xfs_agblock_t			agbno,
+	bool				*shape_changed)
+{
+	struct xfs_refcount_irec	rcext, tmp;
+	int				found_rec;
+	int				error;
+
+	*shape_changed = false;
+	error = xfs_refcount_lookup_le(cur, agbno, &found_rec);
+	if (error)
+		goto out_error;
+	if (!found_rec)
+		return 0;
+
+	error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+	if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
+		return 0;
+
+	*shape_changed = true;
+	trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
+			&rcext, agbno);
+
+	/* Establish the right extent. */
+	tmp = rcext;
+	tmp.rc_startblock = agbno;
+	tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
+	error = xfs_refcount_update(cur, &tmp);
+	if (error)
+		goto out_error;
+
+	/* Insert the left extent. */
+	tmp = rcext;
+	tmp.rc_blockcount = agbno - rcext.rc_startblock;
+	error = xfs_refcount_insert(cur, &tmp, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+	return error;
+
+out_error:
+	trace_xfs_refcount_split_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Merge the left, center, and right extents.
+ */
+STATIC int
+xfs_refcount_merge_center_extents(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*left,
+	struct xfs_refcount_irec	*center,
+	struct xfs_refcount_irec	*right,
+	unsigned long long		extlen,
+	xfs_agblock_t			*agbno,
+	xfs_extlen_t			*aglen)
+{
+	int				error;
+	int				found_rec;
+
+	trace_xfs_refcount_merge_center_extents(cur->bc_mp,
+			cur->bc_private.a.agno, left, center, right);
+
+	/*
+	 * Make sure the center and right extents are not in the btree.
+	 * If the center extent was synthesized, the first delete call
+	 * removes the right extent and we skip the second deletion.
+	 * If center and right were in the btree, then the first delete
+	 * call removes the center and the second one removes the right
+	 * extent.
+	 */
+	error = xfs_refcount_lookup_ge(cur, center->rc_startblock,
+			&found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	error = xfs_refcount_delete(cur, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	if (center->rc_refcount > 1) {
+		error = xfs_refcount_delete(cur, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+	}
+
+	/* Enlarge the left extent. */
+	error = xfs_refcount_lookup_le(cur, left->rc_startblock,
+			&found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	left->rc_blockcount = extlen;
+	error = xfs_refcount_update(cur, left);
+	if (error)
+		goto out_error;
+
+	*aglen = 0;
+	return error;
+
+out_error:
+	trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Merge with the left extent.
+ */
+STATIC int
+xfs_refcount_merge_left_extent(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*left,
+	struct xfs_refcount_irec	*cleft,
+	xfs_agblock_t			*agbno,
+	xfs_extlen_t			*aglen)
+{
+	int				error;
+	int				found_rec;
+
+	trace_xfs_refcount_merge_left_extent(cur->bc_mp,
+			cur->bc_private.a.agno, left, cleft);
+
+	/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
+	if (cleft->rc_refcount > 1) {
+		error = xfs_refcount_lookup_le(cur, cleft->rc_startblock,
+				&found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+
+		error = xfs_refcount_delete(cur, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+	}
+
+	/* Enlarge the left extent. */
+	error = xfs_refcount_lookup_le(cur, left->rc_startblock,
+			&found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	left->rc_blockcount += cleft->rc_blockcount;
+	error = xfs_refcount_update(cur, left);
+	if (error)
+		goto out_error;
+
+	*agbno += cleft->rc_blockcount;
+	*aglen -= cleft->rc_blockcount;
+	return error;
+
+out_error:
+	trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Merge with the right extent.
+ */
+STATIC int
+xfs_refcount_merge_right_extent(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*right,
+	struct xfs_refcount_irec	*cright,
+	xfs_agblock_t			*agbno,
+	xfs_extlen_t			*aglen)
+{
+	int				error;
+	int				found_rec;
+
+	trace_xfs_refcount_merge_right_extent(cur->bc_mp,
+			cur->bc_private.a.agno, cright, right);
+
+	/*
+	 * If the extent ending at agbno+aglen (cright) wasn't synthesized,
+	 * remove it.
+	 */
+	if (cright->rc_refcount > 1) {
+		error = xfs_refcount_lookup_le(cur, cright->rc_startblock,
+			&found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+
+		error = xfs_refcount_delete(cur, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+	}
+
+	/* Enlarge the right extent. */
+	error = xfs_refcount_lookup_le(cur, right->rc_startblock,
+			&found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	right->rc_startblock -= cright->rc_blockcount;
+	right->rc_blockcount += cright->rc_blockcount;
+	error = xfs_refcount_update(cur, right);
+	if (error)
+		goto out_error;
+
+	*aglen -= cright->rc_blockcount;
+	return error;
+
+out_error:
+	trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+#define XFS_FIND_RCEXT_SHARED	1
+#define XFS_FIND_RCEXT_COW	2
+/*
+ * Find the left extent and the one after it (cleft).  This function assumes
+ * that we've already split any extent crossing agbno.
+ */
+STATIC int
+xfs_refcount_find_left_extents(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*left,
+	struct xfs_refcount_irec	*cleft,
+	xfs_agblock_t			agbno,
+	xfs_extlen_t			aglen,
+	int				flags)
+{
+	struct xfs_refcount_irec	tmp;
+	int				error;
+	int				found_rec;
+
+	left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
+	error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec);
+	if (error)
+		goto out_error;
+	if (!found_rec)
+		return 0;
+
+	error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	if (xfs_refc_next(&tmp) != agbno)
+		return 0;
+	if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
+		return 0;
+	if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
+		return 0;
+	/* We have a left extent; retrieve (or invent) the next right one */
+	*left = tmp;
+
+	error = xfs_btree_increment(cur, 0, &found_rec);
+	if (error)
+		goto out_error;
+	if (found_rec) {
+		error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+
+		/* if tmp starts at the end of our range, just use that */
+		if (tmp.rc_startblock == agbno)
+			*cleft = tmp;
+		else {
+			/*
+			 * There's a gap in the refcntbt at the start of the
+			 * range we're interested in (refcount == 1) so
+			 * synthesize the implied extent and pass it back.
+			 * We assume here that the agbno/aglen range was
+			 * passed in from a data fork extent mapping and
+			 * therefore is allocated to exactly one owner.
+			 */
+			cleft->rc_startblock = agbno;
+			cleft->rc_blockcount = min(aglen,
+					tmp.rc_startblock - agbno);
+			cleft->rc_refcount = 1;
+		}
+	} else {
+		/*
+		 * No extents, so pretend that there's one covering the whole
+		 * range.
+		 */
+		cleft->rc_startblock = agbno;
+		cleft->rc_blockcount = aglen;
+		cleft->rc_refcount = 1;
+	}
+	trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
+			left, cleft, agbno);
+	return error;
+
+out_error:
+	trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Find the right extent and the one before it (cright).  This function
+ * assumes that we've already split any extents crossing agbno + aglen.
+ */
+STATIC int
+xfs_refcount_find_right_extents(
+	struct xfs_btree_cur		*cur,
+	struct xfs_refcount_irec	*right,
+	struct xfs_refcount_irec	*cright,
+	xfs_agblock_t			agbno,
+	xfs_extlen_t			aglen,
+	int				flags)
+{
+	struct xfs_refcount_irec	tmp;
+	int				error;
+	int				found_rec;
+
+	right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
+	error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec);
+	if (error)
+		goto out_error;
+	if (!found_rec)
+		return 0;
+
+	error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+
+	if (tmp.rc_startblock != agbno + aglen)
+		return 0;
+	if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
+		return 0;
+	if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
+		return 0;
+	/* We have a right extent; retrieve (or invent) the next left one */
+	*right = tmp;
+
+	error = xfs_btree_decrement(cur, 0, &found_rec);
+	if (error)
+		goto out_error;
+	if (found_rec) {
+		error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
+				out_error);
+
+		/* if tmp ends at the end of our range, just use that */
+		if (xfs_refc_next(&tmp) == agbno + aglen)
+			*cright = tmp;
+		else {
+			/*
+			 * There's a gap in the refcntbt at the end of the
+			 * range we're interested in (refcount == 1) so
+			 * create the implied extent and pass it back.
+			 * We assume here that the agbno/aglen range was
+			 * passed in from a data fork extent mapping and
+			 * therefore is allocated to exactly one owner.
+			 */
+			cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
+			cright->rc_blockcount = right->rc_startblock -
+					cright->rc_startblock;
+			cright->rc_refcount = 1;
+		}
+	} else {
+		/*
+		 * No extents, so pretend that there's one covering the whole
+		 * range.
+		 */
+		cright->rc_startblock = agbno;
+		cright->rc_blockcount = aglen;
+		cright->rc_refcount = 1;
+	}
+	trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
+			cright, right, agbno + aglen);
+	return error;
+
+out_error:
+	trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/* Is this extent valid? */
+static inline bool
+xfs_refc_valid(
+	struct xfs_refcount_irec	*rc)
+{
+	return rc->rc_startblock != NULLAGBLOCK;
+}
+
+/*
+ * Try to merge with any extents on the boundaries of the adjustment range.
+ */
+STATIC int
+xfs_refcount_merge_extents(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		*agbno,
+	xfs_extlen_t		*aglen,
+	enum xfs_refc_adjust_op adjust,
+	int			flags,
+	bool			*shape_changed)
+{
+	struct xfs_refcount_irec	left = {0}, cleft = {0};
+	struct xfs_refcount_irec	cright = {0}, right = {0};
+	int				error;
+	unsigned long long		ulen;
+	bool				cequal;
+
+	*shape_changed = false;
+	/*
+	 * Find the extent just below agbno [left], just above agbno [cleft],
+	 * just below (agbno + aglen) [cright], and just above (agbno + aglen)
+	 * [right].
+	 */
+	error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno,
+			*aglen, flags);
+	if (error)
+		return error;
+	error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno,
+			*aglen, flags);
+	if (error)
+		return error;
+
+	/* No left or right extent to merge; exit. */
+	if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
+		return 0;
+
+	cequal = (cleft.rc_startblock == cright.rc_startblock) &&
+		 (cleft.rc_blockcount == cright.rc_blockcount);
+
+	/* Try to merge left, cleft, and right.  cleft must == cright. */
+	ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
+			right.rc_blockcount;
+	if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
+	    xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
+	    left.rc_refcount == cleft.rc_refcount + adjust &&
+	    right.rc_refcount == cleft.rc_refcount + adjust &&
+	    ulen < MAXREFCEXTLEN) {
+		*shape_changed = true;
+		return xfs_refcount_merge_center_extents(cur, &left, &cleft,
+				&right, ulen, agbno, aglen);
+	}
+
+	/* Try to merge left and cleft. */
+	ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
+	if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
+	    left.rc_refcount == cleft.rc_refcount + adjust &&
+	    ulen < MAXREFCEXTLEN) {
+		*shape_changed = true;
+		error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
+				agbno, aglen);
+		if (error)
+			return error;
+
+		/*
+		 * If we just merged left + cleft and cleft == cright,
+		 * we no longer have a cright to merge with right.  We're done.
+		 */
+		if (cequal)
+			return 0;
+	}
+
+	/* Try to merge cright and right. */
+	ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
+	if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
+	    right.rc_refcount == cright.rc_refcount + adjust &&
+	    ulen < MAXREFCEXTLEN) {
+		*shape_changed = true;
+		return xfs_refcount_merge_right_extent(cur, &right, &cright,
+				agbno, aglen);
+	}
+
+	return error;
+}
+
+/*
+ * While we're adjusting the refcounts records of an extent, we have
+ * to keep an eye on the number of extents we're dirtying -- run too
+ * many in a single transaction and we'll exceed the transaction's
+ * reservation and crash the fs.  Each record adds 12 bytes to the
+ * log (plus any key updates) so we'll conservatively assume 24 bytes
+ * per record.  We must also leave space for btree splits on both ends
+ * of the range and space for the CUD and a new CUI.
+ *
+ * XXX: This is a pretty hand-wavy estimate.  The penalty for guessing
+ * true incorrectly is a shutdown FS; the penalty for guessing false
+ * incorrectly is more transaction rolls than might be necessary.
+ * Be conservative here.
+ */
+static bool
+xfs_refcount_still_have_space(
+	struct xfs_btree_cur		*cur)
+{
+	unsigned long			overhead;
+
+	overhead = cur->bc_private.a.priv.refc.shape_changes *
+			xfs_allocfree_log_count(cur->bc_mp, 1);
+	overhead *= cur->bc_mp->m_sb.sb_blocksize;
+
+	/*
+	 * Only allow 2 refcount extent updates per transaction if the
+	 * refcount continue update "error" has been injected.
+	 */
+	if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
+	    XFS_TEST_ERROR(false, cur->bc_mp,
+			XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE,
+			XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE))
+		return false;
+
+	if (cur->bc_private.a.priv.refc.nr_ops == 0)
+		return true;
+	else if (overhead > cur->bc_tp->t_log_res)
+		return false;
+	return  cur->bc_tp->t_log_res - overhead >
+		cur->bc_private.a.priv.refc.nr_ops * 32;
+}
+
+/*
+ * Adjust the refcounts of middle extents.  At this point we should have
+ * split extents that crossed the adjustment range; merged with adjacent
+ * extents; and updated agbno/aglen to reflect the merges.  Therefore,
+ * all we have to do is update the extents inside [agbno, agbno + aglen].
+ */
+STATIC int
+xfs_refcount_adjust_extents(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		*agbno,
+	xfs_extlen_t		*aglen,
+	enum xfs_refc_adjust_op	adj,
+	struct xfs_defer_ops	*dfops,
+	struct xfs_owner_info	*oinfo)
+{
+	struct xfs_refcount_irec	ext, tmp;
+	int				error;
+	int				found_rec, found_tmp;
+	xfs_fsblock_t			fsbno;
+
+	/* Merging did all the work already. */
+	if (*aglen == 0)
+		return 0;
+
+	error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec);
+	if (error)
+		goto out_error;
+
+	while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
+		error = xfs_refcount_get_rec(cur, &ext, &found_rec);
+		if (error)
+			goto out_error;
+		if (!found_rec) {
+			ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+			ext.rc_blockcount = 0;
+			ext.rc_refcount = 0;
+		}
+
+		/*
+		 * Deal with a hole in the refcount tree; if a file maps to
+		 * these blocks and there's no refcountbt record, pretend that
+		 * there is one with refcount == 1.
+		 */
+		if (ext.rc_startblock != *agbno) {
+			tmp.rc_startblock = *agbno;
+			tmp.rc_blockcount = min(*aglen,
+					ext.rc_startblock - *agbno);
+			tmp.rc_refcount = 1 + adj;
+			trace_xfs_refcount_modify_extent(cur->bc_mp,
+					cur->bc_private.a.agno, &tmp);
+
+			/*
+			 * Either cover the hole (increment) or
+			 * delete the range (decrement).
+			 */
+			if (tmp.rc_refcount) {
+				error = xfs_refcount_insert(cur, &tmp,
+						&found_tmp);
+				if (error)
+					goto out_error;
+				XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+						found_tmp == 1, out_error);
+				cur->bc_private.a.priv.refc.nr_ops++;
+			} else {
+				fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
+						cur->bc_private.a.agno,
+						tmp.rc_startblock);
+				xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
+						tmp.rc_blockcount, oinfo);
+			}
+
+			(*agbno) += tmp.rc_blockcount;
+			(*aglen) -= tmp.rc_blockcount;
+
+			error = xfs_refcount_lookup_ge(cur, *agbno,
+					&found_rec);
+			if (error)
+				goto out_error;
+		}
+
+		/* Stop if there's nothing left to modify */
+		if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
+			break;
+
+		/*
+		 * Adjust the reference count and either update the tree
+		 * (incr) or free the blocks (decr).
+		 */
+		if (ext.rc_refcount == MAXREFCOUNT)
+			goto skip;
+		ext.rc_refcount += adj;
+		trace_xfs_refcount_modify_extent(cur->bc_mp,
+				cur->bc_private.a.agno, &ext);
+		if (ext.rc_refcount > 1) {
+			error = xfs_refcount_update(cur, &ext);
+			if (error)
+				goto out_error;
+			cur->bc_private.a.priv.refc.nr_ops++;
+		} else if (ext.rc_refcount == 1) {
+			error = xfs_refcount_delete(cur, &found_rec);
+			if (error)
+				goto out_error;
+			XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+					found_rec == 1, out_error);
+			cur->bc_private.a.priv.refc.nr_ops++;
+			goto advloop;
+		} else {
+			fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
+					cur->bc_private.a.agno,
+					ext.rc_startblock);
+			xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
+					ext.rc_blockcount, oinfo);
+		}
+
+skip:
+		error = xfs_btree_increment(cur, 0, &found_rec);
+		if (error)
+			goto out_error;
+
+advloop:
+		(*agbno) += ext.rc_blockcount;
+		(*aglen) -= ext.rc_blockcount;
+	}
+
+	return error;
+out_error:
+	trace_xfs_refcount_modify_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/* Adjust the reference count of a range of AG blocks. */
+STATIC int
+xfs_refcount_adjust(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	xfs_agblock_t		*new_agbno,
+	xfs_extlen_t		*new_aglen,
+	enum xfs_refc_adjust_op	adj,
+	struct xfs_defer_ops	*dfops,
+	struct xfs_owner_info	*oinfo)
+{
+	bool			shape_changed;
+	int			shape_changes = 0;
+	int			error;
+
+	*new_agbno = agbno;
+	*new_aglen = aglen;
+	if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
+		trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
+				agbno, aglen);
+	else
+		trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
+				agbno, aglen);
+
+	/*
+	 * Ensure that no rcextents cross the boundary of the adjustment range.
+	 */
+	error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
+	if (error)
+		goto out_error;
+	if (shape_changed)
+		shape_changes++;
+
+	error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
+	if (error)
+		goto out_error;
+	if (shape_changed)
+		shape_changes++;
+
+	/*
+	 * Try to merge with the left or right extents of the range.
+	 */
+	error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj,
+			XFS_FIND_RCEXT_SHARED, &shape_changed);
+	if (error)
+		goto out_error;
+	if (shape_changed)
+		shape_changes++;
+	if (shape_changes)
+		cur->bc_private.a.priv.refc.shape_changes++;
+
+	/* Now that we've taken care of the ends, adjust the middle extents */
+	error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
+			adj, dfops, oinfo);
+	if (error)
+		goto out_error;
+
+	return 0;
+
+out_error:
+	trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
+			error, _RET_IP_);
+	return error;
+}
+
+/* Clean up after calling xfs_refcount_finish_one. */
+void
+xfs_refcount_finish_one_cleanup(
+	struct xfs_trans	*tp,
+	struct xfs_btree_cur	*rcur,
+	int			error)
+{
+	struct xfs_buf		*agbp;
+
+	if (rcur == NULL)
+		return;
+	agbp = rcur->bc_private.a.agbp;
+	xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	if (error)
+		xfs_trans_brelse(tp, agbp);
+}
+
+/*
+ * Process one of the deferred refcount operations.  We pass back the
+ * btree cursor to maintain our lock on the btree between calls.
+ * This saves time and eliminates a buffer deadlock between the
+ * superblock and the AGF because we'll always grab them in the same
+ * order.
+ */
+int
+xfs_refcount_finish_one(
+	struct xfs_trans		*tp,
+	struct xfs_defer_ops		*dfops,
+	enum xfs_refcount_intent_type	type,
+	xfs_fsblock_t			startblock,
+	xfs_extlen_t			blockcount,
+	xfs_fsblock_t			*new_fsb,
+	xfs_extlen_t			*new_len,
+	struct xfs_btree_cur		**pcur)
+{
+	struct xfs_mount		*mp = tp->t_mountp;
+	struct xfs_btree_cur		*rcur;
+	struct xfs_buf			*agbp = NULL;
+	int				error = 0;
+	xfs_agnumber_t			agno;
+	xfs_agblock_t			bno;
+	xfs_agblock_t			new_agbno;
+	unsigned long			nr_ops = 0;
+	int				shape_changes = 0;
+
+	agno = XFS_FSB_TO_AGNO(mp, startblock);
+	ASSERT(agno != NULLAGNUMBER);
+	bno = XFS_FSB_TO_AGBNO(mp, startblock);
+
+	trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
+			type, XFS_FSB_TO_AGBNO(mp, startblock),
+			blockcount);
+
+	if (XFS_TEST_ERROR(false, mp,
+			XFS_ERRTAG_REFCOUNT_FINISH_ONE,
+			XFS_RANDOM_REFCOUNT_FINISH_ONE))
+		return -EIO;
+
+	/*
+	 * If we haven't gotten a cursor or the cursor AG doesn't match
+	 * the startblock, get one now.
+	 */
+	rcur = *pcur;
+	if (rcur != NULL && rcur->bc_private.a.agno != agno) {
+		nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
+		shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
+		xfs_refcount_finish_one_cleanup(tp, rcur, 0);
+		rcur = NULL;
+		*pcur = NULL;
+	}
+	if (rcur == NULL) {
+		error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
+				XFS_ALLOC_FLAG_FREEING, &agbp);
+		if (error)
+			return error;
+		if (!agbp)
+			return -EFSCORRUPTED;
+
+		rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
+		if (!rcur) {
+			error = -ENOMEM;
+			goto out_cur;
+		}
+		rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
+		rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
+	}
+	*pcur = rcur;
+
+	switch (type) {
+	case XFS_REFCOUNT_INCREASE:
+		error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
+			new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
+		*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+		break;
+	case XFS_REFCOUNT_DECREASE:
+		error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
+			new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
+		*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
+		break;
+	case XFS_REFCOUNT_ALLOC_COW:
+		*new_fsb = startblock + blockcount;
+		*new_len = 0;
+		error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
+		break;
+	case XFS_REFCOUNT_FREE_COW:
+		*new_fsb = startblock + blockcount;
+		*new_len = 0;
+		error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
+		break;
+	default:
+		ASSERT(0);
+		error = -EFSCORRUPTED;
+	}
+	if (!error && *new_len > 0)
+		trace_xfs_refcount_finish_one_leftover(mp, agno, type,
+				bno, blockcount, new_agbno, *new_len);
+	return error;
+
+out_cur:
+	xfs_trans_brelse(tp, agbp);
+
+	return error;
+}
+
+/*
+ * Record a refcount intent for later processing.
+ */
+static int
+__xfs_refcount_add(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	enum xfs_refcount_intent_type	type,
+	xfs_fsblock_t			startblock,
+	xfs_extlen_t			blockcount)
+{
+	struct xfs_refcount_intent	*ri;
+
+	trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
+			type, XFS_FSB_TO_AGBNO(mp, startblock),
+			blockcount);
+
+	ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
+			KM_SLEEP | KM_NOFS);
+	INIT_LIST_HEAD(&ri->ri_list);
+	ri->ri_type = type;
+	ri->ri_startblock = startblock;
+	ri->ri_blockcount = blockcount;
+
+	xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
+	return 0;
+}
+
+/*
+ * Increase the reference count of the blocks backing a file's extent.
+ */
+int
+xfs_refcount_increase_extent(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	struct xfs_bmbt_irec		*PREV)
+{
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
+			PREV->br_startblock, PREV->br_blockcount);
+}
+
+/*
+ * Decrease the reference count of the blocks backing a file's extent.
+ */
+int
+xfs_refcount_decrease_extent(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	struct xfs_bmbt_irec		*PREV)
+{
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
+			PREV->br_startblock, PREV->br_blockcount);
+}
+
+/*
+ * Given an AG extent, find the lowest-numbered run of shared blocks
+ * within that range and return the range in fbno/flen.  If
+ * find_end_of_shared is set, return the longest contiguous extent of
+ * shared blocks; if not, just return the first extent we find.  If no
+ * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
+ * and 0, respectively.
+ */
+int
+xfs_refcount_find_shared(
+	struct xfs_btree_cur		*cur,
+	xfs_agblock_t			agbno,
+	xfs_extlen_t			aglen,
+	xfs_agblock_t			*fbno,
+	xfs_extlen_t			*flen,
+	bool				find_end_of_shared)
+{
+	struct xfs_refcount_irec	tmp;
+	int				i;
+	int				have;
+	int				error;
+
+	trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
+			agbno, aglen);
+
+	/* By default, skip the whole range */
+	*fbno = NULLAGBLOCK;
+	*flen = 0;
+
+	/* Try to find a refcount extent that crosses the start */
+	error = xfs_refcount_lookup_le(cur, agbno, &have);
+	if (error)
+		goto out_error;
+	if (!have) {
+		/* No left extent, look at the next one */
+		error = xfs_btree_increment(cur, 0, &have);
+		if (error)
+			goto out_error;
+		if (!have)
+			goto done;
+	}
+	error = xfs_refcount_get_rec(cur, &tmp, &i);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+
+	/* If the extent ends before the start, look at the next one */
+	if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
+		error = xfs_btree_increment(cur, 0, &have);
+		if (error)
+			goto out_error;
+		if (!have)
+			goto done;
+		error = xfs_refcount_get_rec(cur, &tmp, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+	}
+
+	/* If the extent starts after the range we want, bail out */
+	if (tmp.rc_startblock >= agbno + aglen)
+		goto done;
+
+	/* We found the start of a shared extent! */
+	if (tmp.rc_startblock < agbno) {
+		tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
+		tmp.rc_startblock = agbno;
+	}
+
+	*fbno = tmp.rc_startblock;
+	*flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
+	if (!find_end_of_shared)
+		goto done;
+
+	/* Otherwise, find the end of this shared extent */
+	while (*fbno + *flen < agbno + aglen) {
+		error = xfs_btree_increment(cur, 0, &have);
+		if (error)
+			goto out_error;
+		if (!have)
+			break;
+		error = xfs_refcount_get_rec(cur, &tmp, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+		if (tmp.rc_startblock >= agbno + aglen ||
+		    tmp.rc_startblock != *fbno + *flen)
+			break;
+		*flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
+	}
+
+done:
+	trace_xfs_refcount_find_shared_result(cur->bc_mp,
+			cur->bc_private.a.agno, *fbno, *flen);
+
+out_error:
+	if (error)
+		trace_xfs_refcount_find_shared_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Recovering CoW Blocks After a Crash
+ *
+ * Due to the way that the copy on write mechanism works, there's a window of
+ * opportunity in which we can lose track of allocated blocks during a crash.
+ * Because CoW uses delayed allocation in the in-core CoW fork, writeback
+ * causes blocks to be allocated and stored in the CoW fork.  The blocks are
+ * no longer in the free space btree but are not otherwise recorded anywhere
+ * until the write completes and the blocks are mapped into the file.  A crash
+ * in between allocation and remapping results in the replacement blocks being
+ * lost.  This situation is exacerbated by the CoW extent size hint because
+ * allocations can hang around for long time.
+ *
+ * However, there is a place where we can record these allocations before they
+ * become mappings -- the reference count btree.  The btree does not record
+ * extents with refcount == 1, so we can record allocations with a refcount of
+ * 1.  Blocks being used for CoW writeout cannot be shared, so there should be
+ * no conflict with shared block records.  These mappings should be created
+ * when we allocate blocks to the CoW fork and deleted when they're removed
+ * from the CoW fork.
+ *
+ * Minor nit: records for in-progress CoW allocations and records for shared
+ * extents must never be merged, to preserve the property that (except for CoW
+ * allocations) there are no refcount btree entries with refcount == 1.  The
+ * only time this could potentially happen is when unsharing a block that's
+ * adjacent to CoW allocations, so we must be careful to avoid this.
+ *
+ * At mount time we recover lost CoW allocations by searching the refcount
+ * btree for these refcount == 1 mappings.  These represent CoW allocations
+ * that were in progress at the time the filesystem went down, so we can free
+ * them to get the space back.
+ *
+ * This mechanism is superior to creating EFIs for unmapped CoW extents for
+ * several reasons -- first, EFIs pin the tail of the log and would have to be
+ * periodically relogged to avoid filling up the log.  Second, CoW completions
+ * will have to file an EFD and create new EFIs for whatever remains in the
+ * CoW fork; this partially takes care of (1) but extent-size reservations
+ * will have to periodically relog even if there's no writeout in progress.
+ * This can happen if the CoW extent size hint is set, which you really want.
+ * Third, EFIs cannot currently be automatically relogged into newer
+ * transactions to advance the log tail.  Fourth, stuffing the log full of
+ * EFIs places an upper bound on the number of CoW allocations that can be
+ * held filesystem-wide at any given time.  Recording them in the refcount
+ * btree doesn't require us to maintain any state in memory and doesn't pin
+ * the log.
+ */
+/*
+ * Adjust the refcounts of CoW allocations.  These allocations are "magic"
+ * in that they're not referenced anywhere else in the filesystem, so we
+ * stash them in the refcount btree with a refcount of 1 until either file
+ * remapping (or CoW cancellation) happens.
+ */
+STATIC int
+xfs_refcount_adjust_cow_extents(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	enum xfs_refc_adjust_op	adj,
+	struct xfs_defer_ops	*dfops,
+	struct xfs_owner_info	*oinfo)
+{
+	struct xfs_refcount_irec	ext, tmp;
+	int				error;
+	int				found_rec, found_tmp;
+
+	if (aglen == 0)
+		return 0;
+
+	/* Find any overlapping refcount records */
+	error = xfs_refcount_lookup_ge(cur, agbno, &found_rec);
+	if (error)
+		goto out_error;
+	error = xfs_refcount_get_rec(cur, &ext, &found_rec);
+	if (error)
+		goto out_error;
+	if (!found_rec) {
+		ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks +
+				XFS_REFC_COW_START;
+		ext.rc_blockcount = 0;
+		ext.rc_refcount = 0;
+	}
+
+	switch (adj) {
+	case XFS_REFCOUNT_ADJUST_COW_ALLOC:
+		/* Adding a CoW reservation, there should be nothing here. */
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+				ext.rc_startblock >= agbno + aglen, out_error);
+
+		tmp.rc_startblock = agbno;
+		tmp.rc_blockcount = aglen;
+		tmp.rc_refcount = 1;
+		trace_xfs_refcount_modify_extent(cur->bc_mp,
+				cur->bc_private.a.agno, &tmp);
+
+		error = xfs_refcount_insert(cur, &tmp,
+				&found_tmp);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+				found_tmp == 1, out_error);
+		break;
+	case XFS_REFCOUNT_ADJUST_COW_FREE:
+		/* Removing a CoW reservation, there should be one extent. */
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+			ext.rc_startblock == agbno, out_error);
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+			ext.rc_blockcount == aglen, out_error);
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+			ext.rc_refcount == 1, out_error);
+
+		ext.rc_refcount = 0;
+		trace_xfs_refcount_modify_extent(cur->bc_mp,
+				cur->bc_private.a.agno, &ext);
+		error = xfs_refcount_delete(cur, &found_rec);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
+				found_rec == 1, out_error);
+		break;
+	default:
+		ASSERT(0);
+	}
+
+	return error;
+out_error:
+	trace_xfs_refcount_modify_extent_error(cur->bc_mp,
+			cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Add or remove refcount btree entries for CoW reservations.
+ */
+STATIC int
+xfs_refcount_adjust_cow(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	enum xfs_refc_adjust_op	adj,
+	struct xfs_defer_ops	*dfops)
+{
+	bool			shape_changed;
+	int			error;
+
+	agbno += XFS_REFC_COW_START;
+
+	/*
+	 * Ensure that no rcextents cross the boundary of the adjustment range.
+	 */
+	error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
+	if (error)
+		goto out_error;
+
+	error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
+	if (error)
+		goto out_error;
+
+	/*
+	 * Try to merge with the left or right extents of the range.
+	 */
+	error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj,
+			XFS_FIND_RCEXT_COW, &shape_changed);
+	if (error)
+		goto out_error;
+
+	/* Now that we've taken care of the ends, adjust the middle extents */
+	error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
+			dfops, NULL);
+	if (error)
+		goto out_error;
+
+	return 0;
+
+out_error:
+	trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
+			error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Record a CoW allocation in the refcount btree.
+ */
+STATIC int
+__xfs_refcount_cow_alloc(
+	struct xfs_btree_cur	*rcur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	struct xfs_defer_ops	*dfops)
+{
+	int			error;
+
+	trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
+			agbno, aglen);
+
+	/* Add refcount btree reservation */
+	error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+			XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
+	if (error)
+		return error;
+
+	/* Add rmap entry */
+	if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
+		error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
+				rcur->bc_private.a.agno,
+				agbno, aglen, XFS_RMAP_OWN_COW);
+		if (error)
+			return error;
+	}
+
+	return error;
+}
+
+/*
+ * Remove a CoW allocation from the refcount btree.
+ */
+STATIC int
+__xfs_refcount_cow_free(
+	struct xfs_btree_cur	*rcur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	struct xfs_defer_ops	*dfops)
+{
+	int			error;
+
+	trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
+			agbno, aglen);
+
+	/* Remove refcount btree reservation */
+	error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+			XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
+	if (error)
+		return error;
+
+	/* Remove rmap entry */
+	if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
+		error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
+				rcur->bc_private.a.agno,
+				agbno, aglen, XFS_RMAP_OWN_COW);
+		if (error)
+			return error;
+	}
+
+	return error;
+}
+
+/* Record a CoW staging extent in the refcount btree. */
+int
+xfs_refcount_alloc_cow_extent(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	xfs_fsblock_t			fsb,
+	xfs_extlen_t			len)
+{
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
+			fsb, len);
+}
+
+/* Forget a CoW staging event in the refcount btree. */
+int
+xfs_refcount_free_cow_extent(
+	struct xfs_mount		*mp,
+	struct xfs_defer_ops		*dfops,
+	xfs_fsblock_t			fsb,
+	xfs_extlen_t			len)
+{
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
+			fsb, len);
+}
+
+struct xfs_refcount_recovery {
+	struct list_head		rr_list;
+	struct xfs_refcount_irec	rr_rrec;
+};
+
+/* Stuff an extent on the recovery list. */
+STATIC int
+xfs_refcount_recover_extent(
+	struct xfs_btree_cur		*cur,
+	union xfs_btree_rec		*rec,
+	void				*priv)
+{
+	struct list_head		*debris = priv;
+	struct xfs_refcount_recovery	*rr;
+
+	if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+		return -EFSCORRUPTED;
+
+	rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP);
+	xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
+	list_add_tail(&rr->rr_list, debris);
+
+	return 0;
+}
+
+/* Find and remove leftover CoW reservations. */
+int
+xfs_refcount_recover_cow_leftovers(
+	struct xfs_mount		*mp,
+	xfs_agnumber_t			agno)
+{
+	struct xfs_trans		*tp;
+	struct xfs_btree_cur		*cur;
+	struct xfs_buf			*agbp;
+	struct xfs_refcount_recovery	*rr, *n;
+	struct list_head		debris;
+	union xfs_btree_irec		low;
+	union xfs_btree_irec		high;
+	struct xfs_defer_ops		dfops;
+	xfs_fsblock_t			fsb;
+	xfs_agblock_t			agbno;
+	int				error;
+
+	if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
+		return -EOPNOTSUPP;
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error)
+		return error;
+	cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+	/* Find all the leftover CoW staging extents. */
+	INIT_LIST_HEAD(&debris);
+	memset(&low, 0, sizeof(low));
+	memset(&high, 0, sizeof(high));
+	low.rc.rc_startblock = XFS_REFC_COW_START;
+	high.rc.rc_startblock = -1U;
+	error = xfs_btree_query_range(cur, &low, &high,
+			xfs_refcount_recover_extent, &debris);
+	if (error)
+		goto out_cursor;
+	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+	xfs_buf_relse(agbp);
+
+	/* Now iterate the list to free the leftovers */
+	list_for_each_entry(rr, &debris, rr_list) {
+		/* Set up transaction. */
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
+		if (error)
+			goto out_free;
+
+		trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
+
+		/* Free the orphan record */
+		xfs_defer_init(&dfops, &fsb);
+		agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
+		fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
+		error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
+				rr->rr_rrec.rc_blockcount);
+		if (error)
+			goto out_defer;
+
+		/* Free the block. */
+		xfs_bmap_add_free(mp, &dfops, fsb,
+				rr->rr_rrec.rc_blockcount, NULL);
+
+		error = xfs_defer_finish(&tp, &dfops, NULL);
+		if (error)
+			goto out_defer;
+
+		error = xfs_trans_commit(tp);
+		if (error)
+			goto out_free;
+	}
+
+out_free:
+	/* Free the leftover list */
+	list_for_each_entry_safe(rr, n, &debris, rr_list) {
+		list_del(&rr->rr_list);
+		kmem_free(rr);
+	}
+	return error;
+
+out_cursor:
+	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+	xfs_buf_relse(agbp);
+	goto out_free;
+
+out_defer:
+	xfs_defer_cancel(&dfops);
+	xfs_trans_cancel(tp);
+	goto out_free;
+}
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
new file mode 100644
index 0000000..098dc668
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __XFS_REFCOUNT_H__
+#define __XFS_REFCOUNT_H__
+
+extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur,
+		xfs_agblock_t bno, int *stat);
+extern int xfs_refcount_lookup_ge(struct xfs_btree_cur *cur,
+		xfs_agblock_t bno, int *stat);
+extern int xfs_refcount_get_rec(struct xfs_btree_cur *cur,
+		struct xfs_refcount_irec *irec, int *stat);
+
+enum xfs_refcount_intent_type {
+	XFS_REFCOUNT_INCREASE = 1,
+	XFS_REFCOUNT_DECREASE,
+	XFS_REFCOUNT_ALLOC_COW,
+	XFS_REFCOUNT_FREE_COW,
+};
+
+struct xfs_refcount_intent {
+	struct list_head			ri_list;
+	enum xfs_refcount_intent_type		ri_type;
+	xfs_fsblock_t				ri_startblock;
+	xfs_extlen_t				ri_blockcount;
+};
+
+extern int xfs_refcount_increase_extent(struct xfs_mount *mp,
+		struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
+extern int xfs_refcount_decrease_extent(struct xfs_mount *mp,
+		struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
+
+extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
+		struct xfs_btree_cur *rcur, int error);
+extern int xfs_refcount_finish_one(struct xfs_trans *tp,
+		struct xfs_defer_ops *dfops, enum xfs_refcount_intent_type type,
+		xfs_fsblock_t startblock, xfs_extlen_t blockcount,
+		xfs_fsblock_t *new_fsb, xfs_extlen_t *new_len,
+		struct xfs_btree_cur **pcur);
+
+extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
+		xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
+		xfs_extlen_t *flen, bool find_end_of_shared);
+
+extern int xfs_refcount_alloc_cow_extent(struct xfs_mount *mp,
+		struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
+		xfs_extlen_t len);
+extern int xfs_refcount_free_cow_extent(struct xfs_mount *mp,
+		struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
+		xfs_extlen_t len);
+extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
+		xfs_agnumber_t agno);
+
+#endif	/* __XFS_REFCOUNT_H__ */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
new file mode 100644
index 0000000..453bb27
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_trans.h"
+#include "xfs_bit.h"
+#include "xfs_rmap.h"
+
+static struct xfs_btree_cur *
+xfs_refcountbt_dup_cursor(
+	struct xfs_btree_cur	*cur)
+{
+	return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
+			cur->bc_private.a.agbp, cur->bc_private.a.agno,
+			cur->bc_private.a.dfops);
+}
+
+STATIC void
+xfs_refcountbt_set_root(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr,
+	int			inc)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
+	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
+
+	ASSERT(ptr->s != 0);
+
+	agf->agf_refcount_root = ptr->s;
+	be32_add_cpu(&agf->agf_refcount_level, inc);
+	pag->pagf_refcount_level += inc;
+	xfs_perag_put(pag);
+
+	xfs_alloc_log_agf(cur->bc_tp, agbp,
+			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
+}
+
+STATIC int
+xfs_refcountbt_alloc_block(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*start,
+	union xfs_btree_ptr	*new,
+	int			*stat)
+{
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	struct xfs_alloc_arg	args;		/* block allocation args */
+	int			error;		/* error return value */
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+	memset(&args, 0, sizeof(args));
+	args.tp = cur->bc_tp;
+	args.mp = cur->bc_mp;
+	args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+			xfs_refc_block(args.mp));
+	args.firstblock = args.fsbno;
+	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
+	args.minlen = args.maxlen = args.prod = 1;
+	args.resv = XFS_AG_RESV_METADATA;
+
+	error = xfs_alloc_vextent(&args);
+	if (error)
+		goto out_error;
+	trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+			args.agbno, 1);
+	if (args.fsbno == NULLFSBLOCK) {
+		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+		*stat = 0;
+		return 0;
+	}
+	ASSERT(args.agno == cur->bc_private.a.agno);
+	ASSERT(args.len == 1);
+
+	new->s = cpu_to_be32(args.agbno);
+	be32_add_cpu(&agf->agf_refcount_blocks, 1);
+	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
+
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+	*stat = 1;
+	return 0;
+
+out_error:
+	XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+	return error;
+}
+
+STATIC int
+xfs_refcountbt_free_block(
+	struct xfs_btree_cur	*cur,
+	struct xfs_buf		*bp)
+{
+	struct xfs_mount	*mp = cur->bc_mp;
+	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+	struct xfs_owner_info	oinfo;
+	int			error;
+
+	trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+			XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
+	xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
+	be32_add_cpu(&agf->agf_refcount_blocks, -1);
+	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
+	error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
+			XFS_AG_RESV_METADATA);
+	if (error)
+		return error;
+
+	return error;
+}
+
+STATIC int
+xfs_refcountbt_get_minrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_refc_mnr[level != 0];
+}
+
+STATIC int
+xfs_refcountbt_get_maxrecs(
+	struct xfs_btree_cur	*cur,
+	int			level)
+{
+	return cur->bc_mp->m_refc_mxr[level != 0];
+}
+
+STATIC void
+xfs_refcountbt_init_key_from_rec(
+	union xfs_btree_key	*key,
+	union xfs_btree_rec	*rec)
+{
+	key->refc.rc_startblock = rec->refc.rc_startblock;
+}
+
+STATIC void
+xfs_refcountbt_init_high_key_from_rec(
+	union xfs_btree_key	*key,
+	union xfs_btree_rec	*rec)
+{
+	__u32			x;
+
+	x = be32_to_cpu(rec->refc.rc_startblock);
+	x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
+	key->refc.rc_startblock = cpu_to_be32(x);
+}
+
+STATIC void
+xfs_refcountbt_init_rec_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*rec)
+{
+	rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
+	rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
+	rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
+}
+
+STATIC void
+xfs_refcountbt_init_ptr_from_cur(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*ptr)
+{
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+
+	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+	ASSERT(agf->agf_refcount_root != 0);
+
+	ptr->s = agf->agf_refcount_root;
+}
+
+STATIC __int64_t
+xfs_refcountbt_key_diff(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*key)
+{
+	struct xfs_refcount_irec	*rec = &cur->bc_rec.rc;
+	struct xfs_refcount_key		*kp = &key->refc;
+
+	return (__int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
+}
+
+STATIC __int64_t
+xfs_refcountbt_diff_two_keys(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*k1,
+	union xfs_btree_key	*k2)
+{
+	return (__int64_t)be32_to_cpu(k1->refc.rc_startblock) -
+			  be32_to_cpu(k2->refc.rc_startblock);
+}
+
+STATIC bool
+xfs_refcountbt_verify(
+	struct xfs_buf		*bp)
+{
+	struct xfs_mount	*mp = bp->b_target->bt_mount;
+	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
+	struct xfs_perag	*pag = bp->b_pag;
+	unsigned int		level;
+
+	if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
+		return false;
+
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return false;
+	if (!xfs_btree_sblock_v5hdr_verify(bp))
+		return false;
+
+	level = be16_to_cpu(block->bb_level);
+	if (pag && pag->pagf_init) {
+		if (level >= pag->pagf_refcount_level)
+			return false;
+	} else if (level >= mp->m_refc_maxlevels)
+		return false;
+
+	return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
+}
+
+STATIC void
+xfs_refcountbt_read_verify(
+	struct xfs_buf	*bp)
+{
+	if (!xfs_btree_sblock_verify_crc(bp))
+		xfs_buf_ioerror(bp, -EFSBADCRC);
+	else if (!xfs_refcountbt_verify(bp))
+		xfs_buf_ioerror(bp, -EFSCORRUPTED);
+
+	if (bp->b_error) {
+		trace_xfs_btree_corrupt(bp, _RET_IP_);
+		xfs_verifier_error(bp);
+	}
+}
+
+STATIC void
+xfs_refcountbt_write_verify(
+	struct xfs_buf	*bp)
+{
+	if (!xfs_refcountbt_verify(bp)) {
+		trace_xfs_btree_corrupt(bp, _RET_IP_);
+		xfs_buf_ioerror(bp, -EFSCORRUPTED);
+		xfs_verifier_error(bp);
+		return;
+	}
+	xfs_btree_sblock_calc_crc(bp);
+
+}
+
+const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
+	.name			= "xfs_refcountbt",
+	.verify_read		= xfs_refcountbt_read_verify,
+	.verify_write		= xfs_refcountbt_write_verify,
+};
+
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_refcountbt_keys_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_key	*k1,
+	union xfs_btree_key	*k2)
+{
+	return be32_to_cpu(k1->refc.rc_startblock) <
+	       be32_to_cpu(k2->refc.rc_startblock);
+}
+
+STATIC int
+xfs_refcountbt_recs_inorder(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_rec	*r1,
+	union xfs_btree_rec	*r2)
+{
+	return  be32_to_cpu(r1->refc.rc_startblock) +
+		be32_to_cpu(r1->refc.rc_blockcount) <=
+		be32_to_cpu(r2->refc.rc_startblock);
+}
+#endif
+
+static const struct xfs_btree_ops xfs_refcountbt_ops = {
+	.rec_len		= sizeof(struct xfs_refcount_rec),
+	.key_len		= sizeof(struct xfs_refcount_key),
+
+	.dup_cursor		= xfs_refcountbt_dup_cursor,
+	.set_root		= xfs_refcountbt_set_root,
+	.alloc_block		= xfs_refcountbt_alloc_block,
+	.free_block		= xfs_refcountbt_free_block,
+	.get_minrecs		= xfs_refcountbt_get_minrecs,
+	.get_maxrecs		= xfs_refcountbt_get_maxrecs,
+	.init_key_from_rec	= xfs_refcountbt_init_key_from_rec,
+	.init_high_key_from_rec	= xfs_refcountbt_init_high_key_from_rec,
+	.init_rec_from_cur	= xfs_refcountbt_init_rec_from_cur,
+	.init_ptr_from_cur	= xfs_refcountbt_init_ptr_from_cur,
+	.key_diff		= xfs_refcountbt_key_diff,
+	.buf_ops		= &xfs_refcountbt_buf_ops,
+	.diff_two_keys		= xfs_refcountbt_diff_two_keys,
+#if defined(DEBUG) || defined(XFS_WARN)
+	.keys_inorder		= xfs_refcountbt_keys_inorder,
+	.recs_inorder		= xfs_refcountbt_recs_inorder,
+#endif
+};
+
+/*
+ * Allocate a new refcount btree cursor.
+ */
+struct xfs_btree_cur *
+xfs_refcountbt_init_cursor(
+	struct xfs_mount	*mp,
+	struct xfs_trans	*tp,
+	struct xfs_buf		*agbp,
+	xfs_agnumber_t		agno,
+	struct xfs_defer_ops	*dfops)
+{
+	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
+	struct xfs_btree_cur	*cur;
+
+	ASSERT(agno != NULLAGNUMBER);
+	ASSERT(agno < mp->m_sb.sb_agcount);
+	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
+
+	cur->bc_tp = tp;
+	cur->bc_mp = mp;
+	cur->bc_btnum = XFS_BTNUM_REFC;
+	cur->bc_blocklog = mp->m_sb.sb_blocklog;
+	cur->bc_ops = &xfs_refcountbt_ops;
+
+	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
+
+	cur->bc_private.a.agbp = agbp;
+	cur->bc_private.a.agno = agno;
+	cur->bc_private.a.dfops = dfops;
+	cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
+
+	cur->bc_private.a.priv.refc.nr_ops = 0;
+	cur->bc_private.a.priv.refc.shape_changes = 0;
+
+	return cur;
+}
+
+/*
+ * Calculate the number of records in a refcount btree block.
+ */
+int
+xfs_refcountbt_maxrecs(
+	struct xfs_mount	*mp,
+	int			blocklen,
+	bool			leaf)
+{
+	blocklen -= XFS_REFCOUNT_BLOCK_LEN;
+
+	if (leaf)
+		return blocklen / sizeof(struct xfs_refcount_rec);
+	return blocklen / (sizeof(struct xfs_refcount_key) +
+			   sizeof(xfs_refcount_ptr_t));
+}
+
+/* Compute the maximum height of a refcount btree. */
+void
+xfs_refcountbt_compute_maxlevels(
+	struct xfs_mount		*mp)
+{
+	mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
+			mp->m_refc_mnr, mp->m_sb.sb_agblocks);
+}
+
+/* Calculate the refcount btree size for some records. */
+xfs_extlen_t
+xfs_refcountbt_calc_size(
+	struct xfs_mount	*mp,
+	unsigned long long	len)
+{
+	return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
+}
+
+/*
+ * Calculate the maximum refcount btree size.
+ */
+xfs_extlen_t
+xfs_refcountbt_max_size(
+	struct xfs_mount	*mp)
+{
+	/* Bail out if we're uninitialized, which can happen in mkfs. */
+	if (mp->m_refc_mxr[0] == 0)
+		return 0;
+
+	return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks);
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_refcountbt_calc_reserves(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_extlen_t		*ask,
+	xfs_extlen_t		*used)
+{
+	struct xfs_buf		*agbp;
+	struct xfs_agf		*agf;
+	xfs_extlen_t		tree_len;
+	int			error;
+
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	*ask += xfs_refcountbt_max_size(mp);
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error)
+		return error;
+
+	agf = XFS_BUF_TO_AGF(agbp);
+	tree_len = be32_to_cpu(agf->agf_refcount_blocks);
+	xfs_buf_relse(agbp);
+
+	*used += tree_len;
+
+	return error;
+}
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
new file mode 100644
index 0000000..3be7768
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __XFS_REFCOUNT_BTREE_H__
+#define	__XFS_REFCOUNT_BTREE_H__
+
+/*
+ * Reference Count Btree on-disk structures
+ */
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_mount;
+
+/*
+ * Btree block header size
+ */
+#define XFS_REFCOUNT_BLOCK_LEN	XFS_BTREE_SBLOCK_CRC_LEN
+
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_REFCOUNT_REC_ADDR(block, index) \
+	((struct xfs_refcount_rec *) \
+		((char *)(block) + \
+		 XFS_REFCOUNT_BLOCK_LEN + \
+		 (((index) - 1) * sizeof(struct xfs_refcount_rec))))
+
+#define XFS_REFCOUNT_KEY_ADDR(block, index) \
+	((struct xfs_refcount_key *) \
+		((char *)(block) + \
+		 XFS_REFCOUNT_BLOCK_LEN + \
+		 ((index) - 1) * sizeof(struct xfs_refcount_key)))
+
+#define XFS_REFCOUNT_PTR_ADDR(block, index, maxrecs) \
+	((xfs_refcount_ptr_t *) \
+		((char *)(block) + \
+		 XFS_REFCOUNT_BLOCK_LEN + \
+		 (maxrecs) * sizeof(struct xfs_refcount_key) + \
+		 ((index) - 1) * sizeof(xfs_refcount_ptr_t)))
+
+extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
+		struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
+		struct xfs_defer_ops *dfops);
+extern int xfs_refcountbt_maxrecs(struct xfs_mount *mp, int blocklen,
+		bool leaf);
+extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
+
+extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
+		unsigned long long len);
+extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp);
+
+extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
+		xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+
+#endif	/* __XFS_REFCOUNT_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 73d0540..3a8cc71 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -148,6 +148,37 @@
 	return error;
 }
 
+STATIC int
+xfs_rmap_delete(
+	struct xfs_btree_cur	*rcur,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags)
+{
+	int			i;
+	int			error;
+
+	trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
+			len, owner, offset, flags);
+
+	error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
+	if (error)
+		goto done;
+	XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+
+	error = xfs_btree_delete(rcur, &i);
+	if (error)
+		goto done;
+	XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+done:
+	if (error)
+		trace_xfs_rmap_delete_error(rcur->bc_mp,
+				rcur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
 static int
 xfs_rmap_btrec_to_irec(
 	union xfs_btree_rec	*rec,
@@ -180,6 +211,160 @@
 	return xfs_rmap_btrec_to_irec(rec, irec);
 }
 
+struct xfs_find_left_neighbor_info {
+	struct xfs_rmap_irec	high;
+	struct xfs_rmap_irec	*irec;
+	int			*stat;
+};
+
+/* For each rmap given, figure out if it matches the key we want. */
+STATIC int
+xfs_rmap_find_left_neighbor_helper(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*rec,
+	void			*priv)
+{
+	struct xfs_find_left_neighbor_info	*info = priv;
+
+	trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
+			cur->bc_private.a.agno, rec->rm_startblock,
+			rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
+			rec->rm_flags);
+
+	if (rec->rm_owner != info->high.rm_owner)
+		return XFS_BTREE_QUERY_RANGE_CONTINUE;
+	if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
+	    !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
+	    rec->rm_offset + rec->rm_blockcount - 1 != info->high.rm_offset)
+		return XFS_BTREE_QUERY_RANGE_CONTINUE;
+
+	*info->irec = *rec;
+	*info->stat = 1;
+	return XFS_BTREE_QUERY_RANGE_ABORT;
+}
+
+/*
+ * Find the record to the left of the given extent, being careful only to
+ * return a match with the same owner and adjacent physical and logical
+ * block ranges.
+ */
+int
+xfs_rmap_find_left_neighbor(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	struct xfs_rmap_irec	*irec,
+	int			*stat)
+{
+	struct xfs_find_left_neighbor_info	info;
+	int			error;
+
+	*stat = 0;
+	if (bno == 0)
+		return 0;
+	info.high.rm_startblock = bno - 1;
+	info.high.rm_owner = owner;
+	if (!XFS_RMAP_NON_INODE_OWNER(owner) &&
+	    !(flags & XFS_RMAP_BMBT_BLOCK)) {
+		if (offset == 0)
+			return 0;
+		info.high.rm_offset = offset - 1;
+	} else
+		info.high.rm_offset = 0;
+	info.high.rm_flags = flags;
+	info.high.rm_blockcount = 0;
+	info.irec = irec;
+	info.stat = stat;
+
+	trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
+			cur->bc_private.a.agno, bno, 0, owner, offset, flags);
+
+	error = xfs_rmap_query_range(cur, &info.high, &info.high,
+			xfs_rmap_find_left_neighbor_helper, &info);
+	if (error == XFS_BTREE_QUERY_RANGE_ABORT)
+		error = 0;
+	if (*stat)
+		trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
+				cur->bc_private.a.agno, irec->rm_startblock,
+				irec->rm_blockcount, irec->rm_owner,
+				irec->rm_offset, irec->rm_flags);
+	return error;
+}
+
+/* For each rmap given, figure out if it matches the key we want. */
+STATIC int
+xfs_rmap_lookup_le_range_helper(
+	struct xfs_btree_cur	*cur,
+	struct xfs_rmap_irec	*rec,
+	void			*priv)
+{
+	struct xfs_find_left_neighbor_info	*info = priv;
+
+	trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
+			cur->bc_private.a.agno, rec->rm_startblock,
+			rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
+			rec->rm_flags);
+
+	if (rec->rm_owner != info->high.rm_owner)
+		return XFS_BTREE_QUERY_RANGE_CONTINUE;
+	if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
+	    !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
+	    (rec->rm_offset > info->high.rm_offset ||
+	     rec->rm_offset + rec->rm_blockcount <= info->high.rm_offset))
+		return XFS_BTREE_QUERY_RANGE_CONTINUE;
+
+	*info->irec = *rec;
+	*info->stat = 1;
+	return XFS_BTREE_QUERY_RANGE_ABORT;
+}
+
+/*
+ * Find the record to the left of the given extent, being careful only to
+ * return a match with the same owner and overlapping physical and logical
+ * block ranges.  This is the overlapping-interval version of
+ * xfs_rmap_lookup_le.
+ */
+int
+xfs_rmap_lookup_le_range(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags,
+	struct xfs_rmap_irec	*irec,
+	int			*stat)
+{
+	struct xfs_find_left_neighbor_info	info;
+	int			error;
+
+	info.high.rm_startblock = bno;
+	info.high.rm_owner = owner;
+	if (!XFS_RMAP_NON_INODE_OWNER(owner) && !(flags & XFS_RMAP_BMBT_BLOCK))
+		info.high.rm_offset = offset;
+	else
+		info.high.rm_offset = 0;
+	info.high.rm_flags = flags;
+	info.high.rm_blockcount = 0;
+	*stat = 0;
+	info.irec = irec;
+	info.stat = stat;
+
+	trace_xfs_rmap_lookup_le_range(cur->bc_mp,
+			cur->bc_private.a.agno, bno, 0, owner, offset, flags);
+	error = xfs_rmap_query_range(cur, &info.high, &info.high,
+			xfs_rmap_lookup_le_range_helper, &info);
+	if (error == XFS_BTREE_QUERY_RANGE_ABORT)
+		error = 0;
+	if (*stat)
+		trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+				cur->bc_private.a.agno, irec->rm_startblock,
+				irec->rm_blockcount, irec->rm_owner,
+				irec->rm_offset, irec->rm_flags);
+	return error;
+}
+
 /*
  * Find the extent in the rmap btree and remove it.
  *
@@ -1093,11 +1278,704 @@
 	return error;
 }
 
+/*
+ * Convert an unwritten extent to a real extent or vice versa.  If there is no
+ * possibility of overlapping extents, delegate to the simpler convert
+ * function.
+ */
+STATIC int
+xfs_rmap_convert_shared(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	bool			unwritten,
+	struct xfs_owner_info	*oinfo)
+{
+	struct xfs_mount	*mp = cur->bc_mp;
+	struct xfs_rmap_irec	r[4];	/* neighbor extent entries */
+					/* left is 0, right is 1, prev is 2 */
+					/* new is 3 */
+	uint64_t		owner;
+	uint64_t		offset;
+	uint64_t		new_endoff;
+	unsigned int		oldext;
+	unsigned int		newext;
+	unsigned int		flags = 0;
+	int			i;
+	int			state = 0;
+	int			error;
+
+	xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+	ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
+			(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
+	oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
+	new_endoff = offset + len;
+	trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+
+	/*
+	 * For the initial lookup, look for and exact match or the left-adjacent
+	 * record for our insertion point. This will also give us the record for
+	 * start block contiguity tests.
+	 */
+	error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
+			&PREV, &i);
+	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+
+	ASSERT(PREV.rm_offset <= offset);
+	ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
+	ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
+	newext = ~oldext & XFS_RMAP_UNWRITTEN;
+
+	/*
+	 * Set flags determining what part of the previous oldext allocation
+	 * extent is being replaced by a newext allocation.
+	 */
+	if (PREV.rm_offset == offset)
+		state |= RMAP_LEFT_FILLING;
+	if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
+		state |= RMAP_RIGHT_FILLING;
+
+	/* Is there a left record that abuts our range? */
+	error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
+			&LEFT, &i);
+	if (error)
+		goto done;
+	if (i) {
+		state |= RMAP_LEFT_VALID;
+		XFS_WANT_CORRUPTED_GOTO(mp,
+				LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
+				done);
+		if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
+			state |= RMAP_LEFT_CONTIG;
+	}
+
+	/* Is there a right record that abuts our range? */
+	error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
+			newext, &i);
+	if (error)
+		goto done;
+	if (i) {
+		state |= RMAP_RIGHT_VALID;
+		error = xfs_rmap_get_rec(cur, &RIGHT, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
+				done);
+		trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+				cur->bc_private.a.agno, RIGHT.rm_startblock,
+				RIGHT.rm_blockcount, RIGHT.rm_owner,
+				RIGHT.rm_offset, RIGHT.rm_flags);
+		if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
+			state |= RMAP_RIGHT_CONTIG;
+	}
+
+	/* check that left + prev + right is not too long */
+	if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+			 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
+	    (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+	     RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
+	    (unsigned long)LEFT.rm_blockcount + len +
+	     RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
+		state &= ~RMAP_RIGHT_CONTIG;
+
+	trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
+			_RET_IP_);
+	/*
+	 * Switch out based on the FILLING and CONTIG state bits.
+	 */
+	switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+			 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
+	case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+	     RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The left and right neighbors are both contiguous with new.
+		 */
+		error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
+				RIGHT.rm_blockcount, RIGHT.rm_owner,
+				RIGHT.rm_offset, RIGHT.rm_flags);
+		if (error)
+			goto done;
+		error = xfs_rmap_delete(cur, PREV.rm_startblock,
+				PREV.rm_blockcount, PREV.rm_owner,
+				PREV.rm_offset, PREV.rm_flags);
+		if (error)
+			goto done;
+		NEW = LEFT;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The left neighbor is contiguous, the right is not.
+		 */
+		error = xfs_rmap_delete(cur, PREV.rm_startblock,
+				PREV.rm_blockcount, PREV.rm_owner,
+				PREV.rm_offset, PREV.rm_flags);
+		if (error)
+			goto done;
+		NEW = LEFT;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount += PREV.rm_blockcount;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * The right neighbor is contiguous, the left is not.
+		 */
+		error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
+				RIGHT.rm_blockcount, RIGHT.rm_owner,
+				RIGHT.rm_offset, RIGHT.rm_flags);
+		if (error)
+			goto done;
+		NEW = PREV;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount += RIGHT.rm_blockcount;
+		NEW.rm_flags = RIGHT.rm_flags;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
+		/*
+		 * Setting all of a previous oldext extent to newext.
+		 * Neither the left nor right neighbors are contiguous with
+		 * the new one.
+		 */
+		NEW = PREV;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_flags = newext;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
+		/*
+		 * Setting the first part of a previous oldext extent to newext.
+		 * The left neighbor is contiguous.
+		 */
+		NEW = PREV;
+		error = xfs_rmap_delete(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		NEW.rm_offset += len;
+		NEW.rm_startblock += len;
+		NEW.rm_blockcount -= len;
+		error = xfs_rmap_insert(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		NEW = LEFT;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount += len;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING:
+		/*
+		 * Setting the first part of a previous oldext extent to newext.
+		 * The left neighbor is not contiguous.
+		 */
+		NEW = PREV;
+		error = xfs_rmap_delete(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		NEW.rm_offset += len;
+		NEW.rm_startblock += len;
+		NEW.rm_blockcount -= len;
+		error = xfs_rmap_insert(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+		/*
+		 * Setting the last part of a previous oldext extent to newext.
+		 * The right neighbor is contiguous with the new allocation.
+		 */
+		NEW = PREV;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount = offset - NEW.rm_offset;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		NEW = RIGHT;
+		error = xfs_rmap_delete(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		NEW.rm_offset = offset;
+		NEW.rm_startblock = bno;
+		NEW.rm_blockcount += len;
+		error = xfs_rmap_insert(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_RIGHT_FILLING:
+		/*
+		 * Setting the last part of a previous oldext extent to newext.
+		 * The right neighbor is not contiguous.
+		 */
+		NEW = PREV;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount -= len;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
+		if (error)
+			goto done;
+		break;
+
+	case 0:
+		/*
+		 * Setting the middle part of a previous oldext extent to
+		 * newext.  Contiguity is impossible here.
+		 * One extent becomes three extents.
+		 */
+		/* new right extent - oldext */
+		NEW.rm_startblock = bno + len;
+		NEW.rm_owner = owner;
+		NEW.rm_offset = new_endoff;
+		NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
+				new_endoff;
+		NEW.rm_flags = PREV.rm_flags;
+		error = xfs_rmap_insert(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
+				NEW.rm_flags);
+		if (error)
+			goto done;
+		/* new left extent - oldext */
+		NEW = PREV;
+		error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner,
+				NEW.rm_offset, NEW.rm_flags, &i);
+		if (error)
+			goto done;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+		NEW.rm_blockcount = offset - NEW.rm_offset;
+		error = xfs_rmap_update(cur, &NEW);
+		if (error)
+			goto done;
+		/* new middle extent - newext */
+		NEW.rm_startblock = bno;
+		NEW.rm_blockcount = len;
+		NEW.rm_owner = owner;
+		NEW.rm_offset = offset;
+		NEW.rm_flags = newext;
+		error = xfs_rmap_insert(cur, NEW.rm_startblock,
+				NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
+				NEW.rm_flags);
+		if (error)
+			goto done;
+		break;
+
+	case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+	case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+	case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
+	case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+	case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+	case RMAP_LEFT_CONTIG:
+	case RMAP_RIGHT_CONTIG:
+		/*
+		 * These cases are all impossible.
+		 */
+		ASSERT(0);
+	}
+
+	trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+done:
+	if (error)
+		trace_xfs_rmap_convert_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
 #undef	NEW
 #undef	LEFT
 #undef	RIGHT
 #undef	PREV
 
+/*
+ * Find an extent in the rmap btree and unmap it.  For rmap extent types that
+ * can overlap (data fork rmaps on reflink filesystems) we must be careful
+ * that the prev/next records in the btree might belong to another owner.
+ * Therefore we must use delete+insert to alter any of the key fields.
+ *
+ * For every other situation there can only be one owner for a given extent,
+ * so we can call the regular _free function.
+ */
+STATIC int
+xfs_rmap_unmap_shared(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	bool			unwritten,
+	struct xfs_owner_info	*oinfo)
+{
+	struct xfs_mount	*mp = cur->bc_mp;
+	struct xfs_rmap_irec	ltrec;
+	uint64_t		ltoff;
+	int			error = 0;
+	int			i;
+	uint64_t		owner;
+	uint64_t		offset;
+	unsigned int		flags;
+
+	xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+	if (unwritten)
+		flags |= XFS_RMAP_UNWRITTEN;
+	trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+
+	/*
+	 * We should always have a left record because there's a static record
+	 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
+	 * will not ever be removed from the tree.
+	 */
+	error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
+			&ltrec, &i);
+	if (error)
+		goto out_error;
+	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+	ltoff = ltrec.rm_offset;
+
+	/* Make sure the extent we found covers the entire freeing range. */
+	XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
+		ltrec.rm_startblock + ltrec.rm_blockcount >=
+		bno + len, out_error);
+
+	/* Make sure the owner matches what we expect to find in the tree. */
+	XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner, out_error);
+
+	/* Make sure the unwritten flag matches. */
+	XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+			(ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+
+	/* Check the offset. */
+	XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_offset <= offset, out_error);
+	XFS_WANT_CORRUPTED_GOTO(mp, offset <= ltoff + ltrec.rm_blockcount,
+			out_error);
+
+	if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
+		/* Exact match, simply remove the record from rmap tree. */
+		error = xfs_rmap_delete(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags);
+		if (error)
+			goto out_error;
+	} else if (ltrec.rm_startblock == bno) {
+		/*
+		 * Overlap left hand side of extent: move the start, trim the
+		 * length and update the current record.
+		 *
+		 *       ltbno                ltlen
+		 * Orig:    |oooooooooooooooooooo|
+		 * Freeing: |fffffffff|
+		 * Result:            |rrrrrrrrrr|
+		 *         bno       len
+		 */
+
+		/* Delete prev rmap. */
+		error = xfs_rmap_delete(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags);
+		if (error)
+			goto out_error;
+
+		/* Add an rmap at the new offset. */
+		ltrec.rm_startblock += len;
+		ltrec.rm_blockcount -= len;
+		ltrec.rm_offset += len;
+		error = xfs_rmap_insert(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags);
+		if (error)
+			goto out_error;
+	} else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
+		/*
+		 * Overlap right hand side of extent: trim the length and
+		 * update the current record.
+		 *
+		 *       ltbno                ltlen
+		 * Orig:    |oooooooooooooooooooo|
+		 * Freeing:            |fffffffff|
+		 * Result:  |rrrrrrrrrr|
+		 *                    bno       len
+		 */
+		error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+		ltrec.rm_blockcount -= len;
+		error = xfs_rmap_update(cur, &ltrec);
+		if (error)
+			goto out_error;
+	} else {
+		/*
+		 * Overlap middle of extent: trim the length of the existing
+		 * record to the length of the new left-extent size, increment
+		 * the insertion position so we can insert a new record
+		 * containing the remaining right-extent space.
+		 *
+		 *       ltbno                ltlen
+		 * Orig:    |oooooooooooooooooooo|
+		 * Freeing:       |fffffffff|
+		 * Result:  |rrrrr|         |rrrr|
+		 *               bno       len
+		 */
+		xfs_extlen_t	orig_len = ltrec.rm_blockcount;
+
+		/* Shrink the left side of the rmap */
+		error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+		ltrec.rm_blockcount = bno - ltrec.rm_startblock;
+		error = xfs_rmap_update(cur, &ltrec);
+		if (error)
+			goto out_error;
+
+		/* Add an rmap at the new offset */
+		error = xfs_rmap_insert(cur, bno + len,
+				orig_len - len - ltrec.rm_blockcount,
+				ltrec.rm_owner, offset + len,
+				ltrec.rm_flags);
+		if (error)
+			goto out_error;
+	}
+
+	trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+out_error:
+	if (error)
+		trace_xfs_rmap_unmap_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Find an extent in the rmap btree and map it.  For rmap extent types that
+ * can overlap (data fork rmaps on reflink filesystems) we must be careful
+ * that the prev/next records in the btree might belong to another owner.
+ * Therefore we must use delete+insert to alter any of the key fields.
+ *
+ * For every other situation there can only be one owner for a given extent,
+ * so we can call the regular _alloc function.
+ */
+STATIC int
+xfs_rmap_map_shared(
+	struct xfs_btree_cur	*cur,
+	xfs_agblock_t		bno,
+	xfs_extlen_t		len,
+	bool			unwritten,
+	struct xfs_owner_info	*oinfo)
+{
+	struct xfs_mount	*mp = cur->bc_mp;
+	struct xfs_rmap_irec	ltrec;
+	struct xfs_rmap_irec	gtrec;
+	int			have_gt;
+	int			have_lt;
+	int			error = 0;
+	int			i;
+	uint64_t		owner;
+	uint64_t		offset;
+	unsigned int		flags = 0;
+
+	xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+	if (unwritten)
+		flags |= XFS_RMAP_UNWRITTEN;
+	trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+
+	/* Is there a left record that abuts our range? */
+	error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
+			&ltrec, &have_lt);
+	if (error)
+		goto out_error;
+	if (have_lt &&
+	    !xfs_rmap_is_mergeable(&ltrec, owner, flags))
+		have_lt = 0;
+
+	/* Is there a right record that abuts our range? */
+	error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
+			flags, &have_gt);
+	if (error)
+		goto out_error;
+	if (have_gt) {
+		error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
+		trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+			cur->bc_private.a.agno, gtrec.rm_startblock,
+			gtrec.rm_blockcount, gtrec.rm_owner,
+			gtrec.rm_offset, gtrec.rm_flags);
+
+		if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
+			have_gt = 0;
+	}
+
+	if (have_lt &&
+	    ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
+	    ltrec.rm_offset + ltrec.rm_blockcount == offset) {
+		/*
+		 * Left edge contiguous, merge into left record.
+		 *
+		 *       ltbno     ltlen
+		 * orig:   |ooooooooo|
+		 * adding:           |aaaaaaaaa|
+		 * result: |rrrrrrrrrrrrrrrrrrr|
+		 *                  bno       len
+		 */
+		ltrec.rm_blockcount += len;
+		if (have_gt &&
+		    bno + len == gtrec.rm_startblock &&
+		    offset + len == gtrec.rm_offset) {
+			/*
+			 * Right edge also contiguous, delete right record
+			 * and merge into left record.
+			 *
+			 *       ltbno     ltlen    gtbno     gtlen
+			 * orig:   |ooooooooo|         |ooooooooo|
+			 * adding:           |aaaaaaaaa|
+			 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
+			 */
+			ltrec.rm_blockcount += gtrec.rm_blockcount;
+			error = xfs_rmap_delete(cur, gtrec.rm_startblock,
+					gtrec.rm_blockcount, gtrec.rm_owner,
+					gtrec.rm_offset, gtrec.rm_flags);
+			if (error)
+				goto out_error;
+		}
+
+		/* Point the cursor back to the left record and update. */
+		error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
+				ltrec.rm_blockcount, ltrec.rm_owner,
+				ltrec.rm_offset, ltrec.rm_flags, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+
+		error = xfs_rmap_update(cur, &ltrec);
+		if (error)
+			goto out_error;
+	} else if (have_gt &&
+		   bno + len == gtrec.rm_startblock &&
+		   offset + len == gtrec.rm_offset) {
+		/*
+		 * Right edge contiguous, merge into right record.
+		 *
+		 *                 gtbno     gtlen
+		 * Orig:             |ooooooooo|
+		 * adding: |aaaaaaaaa|
+		 * Result: |rrrrrrrrrrrrrrrrrrr|
+		 *        bno       len
+		 */
+		/* Delete the old record. */
+		error = xfs_rmap_delete(cur, gtrec.rm_startblock,
+				gtrec.rm_blockcount, gtrec.rm_owner,
+				gtrec.rm_offset, gtrec.rm_flags);
+		if (error)
+			goto out_error;
+
+		/* Move the start and re-add it. */
+		gtrec.rm_startblock = bno;
+		gtrec.rm_blockcount += len;
+		gtrec.rm_offset = offset;
+		error = xfs_rmap_insert(cur, gtrec.rm_startblock,
+				gtrec.rm_blockcount, gtrec.rm_owner,
+				gtrec.rm_offset, gtrec.rm_flags);
+		if (error)
+			goto out_error;
+	} else {
+		/*
+		 * No contiguous edge with identical owner, insert
+		 * new record at current cursor position.
+		 */
+		error = xfs_rmap_insert(cur, bno, len, owner, offset, flags);
+		if (error)
+			goto out_error;
+	}
+
+	trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
+			unwritten, oinfo);
+out_error:
+	if (error)
+		trace_xfs_rmap_map_error(cur->bc_mp,
+				cur->bc_private.a.agno, error, _RET_IP_);
+	return error;
+}
+
 struct xfs_rmap_query_range_info {
 	xfs_rmap_query_range_fn	fn;
 	void				*priv;
@@ -1237,15 +2115,27 @@
 	case XFS_RMAP_MAP:
 		error = xfs_rmap_map(rcur, bno, blockcount, unwritten, &oinfo);
 		break;
+	case XFS_RMAP_MAP_SHARED:
+		error = xfs_rmap_map_shared(rcur, bno, blockcount, unwritten,
+				&oinfo);
+		break;
 	case XFS_RMAP_FREE:
 	case XFS_RMAP_UNMAP:
 		error = xfs_rmap_unmap(rcur, bno, blockcount, unwritten,
 				&oinfo);
 		break;
+	case XFS_RMAP_UNMAP_SHARED:
+		error = xfs_rmap_unmap_shared(rcur, bno, blockcount, unwritten,
+				&oinfo);
+		break;
 	case XFS_RMAP_CONVERT:
 		error = xfs_rmap_convert(rcur, bno, blockcount, !unwritten,
 				&oinfo);
 		break;
+	case XFS_RMAP_CONVERT_SHARED:
+		error = xfs_rmap_convert_shared(rcur, bno, blockcount,
+				!unwritten, &oinfo);
+		break;
 	default:
 		ASSERT(0);
 		error = -EFSCORRUPTED;
@@ -1263,9 +2153,10 @@
  */
 static bool
 xfs_rmap_update_is_needed(
-	struct xfs_mount	*mp)
+	struct xfs_mount	*mp,
+	int			whichfork)
 {
-	return xfs_sb_version_hasrmapbt(&mp->m_sb);
+	return xfs_sb_version_hasrmapbt(&mp->m_sb) && whichfork != XFS_COW_FORK;
 }
 
 /*
@@ -1311,10 +2202,11 @@
 	int			whichfork,
 	struct xfs_bmbt_irec	*PREV)
 {
-	if (!xfs_rmap_update_is_needed(mp))
+	if (!xfs_rmap_update_is_needed(mp, whichfork))
 		return 0;
 
-	return __xfs_rmap_add(mp, dfops, XFS_RMAP_MAP, ip->i_ino,
+	return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+			XFS_RMAP_MAP_SHARED : XFS_RMAP_MAP, ip->i_ino,
 			whichfork, PREV);
 }
 
@@ -1327,10 +2219,11 @@
 	int			whichfork,
 	struct xfs_bmbt_irec	*PREV)
 {
-	if (!xfs_rmap_update_is_needed(mp))
+	if (!xfs_rmap_update_is_needed(mp, whichfork))
 		return 0;
 
-	return __xfs_rmap_add(mp, dfops, XFS_RMAP_UNMAP, ip->i_ino,
+	return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+			XFS_RMAP_UNMAP_SHARED : XFS_RMAP_UNMAP, ip->i_ino,
 			whichfork, PREV);
 }
 
@@ -1343,10 +2236,11 @@
 	int			whichfork,
 	struct xfs_bmbt_irec	*PREV)
 {
-	if (!xfs_rmap_update_is_needed(mp))
+	if (!xfs_rmap_update_is_needed(mp, whichfork))
 		return 0;
 
-	return __xfs_rmap_add(mp, dfops, XFS_RMAP_CONVERT, ip->i_ino,
+	return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+			XFS_RMAP_CONVERT_SHARED : XFS_RMAP_CONVERT, ip->i_ino,
 			whichfork, PREV);
 }
 
@@ -1362,7 +2256,7 @@
 {
 	struct xfs_bmbt_irec	bmap;
 
-	if (!xfs_rmap_update_is_needed(mp))
+	if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
 		return 0;
 
 	bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
@@ -1386,7 +2280,7 @@
 {
 	struct xfs_bmbt_irec	bmap;
 
-	if (!xfs_rmap_update_is_needed(mp))
+	if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
 		return 0;
 
 	bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 71cf99a..7899305 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -206,4 +206,11 @@
 		xfs_fsblock_t startblock, xfs_filblks_t blockcount,
 		xfs_exntst_t state, struct xfs_btree_cur **pcur);
 
+int xfs_rmap_find_left_neighbor(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		uint64_t owner, uint64_t offset, unsigned int flags,
+		struct xfs_rmap_irec *irec, int	*stat);
+int xfs_rmap_lookup_le_range(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+		uint64_t owner, uint64_t offset, unsigned int flags,
+		struct xfs_rmap_irec *irec, int	*stat);
+
 #endif	/* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 17b8eeb..83e672f 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -35,6 +35,7 @@
 #include "xfs_cksum.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
+#include "xfs_ag_resv.h"
 
 /*
  * Reverse map btree.
@@ -512,6 +513,83 @@
 xfs_rmapbt_compute_maxlevels(
 	struct xfs_mount		*mp)
 {
-	mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
-			mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
+	/*
+	 * On a non-reflink filesystem, the maximum number of rmap
+	 * records is the number of blocks in the AG, hence the max
+	 * rmapbt height is log_$maxrecs($agblocks).  However, with
+	 * reflink each AG block can have up to 2^32 (per the refcount
+	 * record format) owners, which means that theoretically we
+	 * could face up to 2^64 rmap records.
+	 *
+	 * That effectively means that the max rmapbt height must be
+	 * XFS_BTREE_MAXLEVELS.  "Fortunately" we'll run out of AG
+	 * blocks to feed the rmapbt long before the rmapbt reaches
+	 * maximum height.  The reflink code uses ag_resv_critical to
+	 * disallow reflinking when less than 10% of the per-AG metadata
+	 * block reservation since the fallback is a regular file copy.
+	 */
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
+	else
+		mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
+				mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
+}
+
+/* Calculate the refcount btree size for some records. */
+xfs_extlen_t
+xfs_rmapbt_calc_size(
+	struct xfs_mount	*mp,
+	unsigned long long	len)
+{
+	return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
+}
+
+/*
+ * Calculate the maximum refcount btree size.
+ */
+xfs_extlen_t
+xfs_rmapbt_max_size(
+	struct xfs_mount	*mp)
+{
+	/* Bail out if we're uninitialized, which can happen in mkfs. */
+	if (mp->m_rmap_mxr[0] == 0)
+		return 0;
+
+	return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks);
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_rmapbt_calc_reserves(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_extlen_t		*ask,
+	xfs_extlen_t		*used)
+{
+	struct xfs_buf		*agbp;
+	struct xfs_agf		*agf;
+	xfs_extlen_t		pool_len;
+	xfs_extlen_t		tree_len;
+	int			error;
+
+	if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+		return 0;
+
+	/* Reserve 1% of the AG or enough for 1 block per record. */
+	pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
+	*ask += pool_len;
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error)
+		return error;
+
+	agf = XFS_BUF_TO_AGF(agbp);
+	tree_len = be32_to_cpu(agf->agf_rmap_blocks);
+	xfs_buf_relse(agbp);
+
+	*used += tree_len;
+
+	return error;
 }
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index e73a553..2a9ac47 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -58,4 +58,11 @@
 int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
 extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
 
+extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
+		unsigned long long len);
+extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp);
+
+extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
+		xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+
 #endif	/* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 4aecc5f..a70aec9 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -38,6 +38,8 @@
 #include "xfs_ialloc_btree.h"
 #include "xfs_log.h"
 #include "xfs_rmap_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_refcount_btree.h"
 
 /*
  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -737,6 +739,13 @@
 	mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
 	mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
 
+	mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
+			true);
+	mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
+			false);
+	mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
+	mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
+
 	mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
 	mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
 					sbp->sb_inopblock);
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 0c5b30b..c6f4eb4 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -39,6 +39,7 @@
 extern const struct xfs_buf_ops xfs_agfl_buf_ops;
 extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
 extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
+extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
 extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
 extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
 extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
@@ -122,6 +123,7 @@
 #define	XFS_INO_REF		2
 #define	XFS_ATTR_BTREE_REF	1
 #define	XFS_DQUOT_REF		1
+#define	XFS_REFC_BTREE_REF	1
 
 /*
  * Flags for xfs_trans_ichgtime().
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 301ef2f..b456cca 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -67,13 +67,14 @@
  * Per-extent log reservation for the btree changes involved in freeing or
  * allocating an extent.  In classic XFS there were two trees that will be
  * modified (bnobt + cntbt).  With rmap enabled, there are three trees
- * (rmapbt).  The number of blocks reserved is based on the formula:
+ * (rmapbt).  With reflink, there are four trees (refcountbt).  The number of
+ * blocks reserved is based on the formula:
  *
  * num trees * ((2 blocks/level * max depth) - 1)
  *
  * Keep in mind that max depth is calculated separately for each type of tree.
  */
-static uint
+uint
 xfs_allocfree_log_count(
 	struct xfs_mount *mp,
 	uint		num_ops)
@@ -83,6 +84,8 @@
 	blocks = num_ops * 2 * (2 * mp->m_ag_maxlevels - 1);
 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
 		blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		blocks += num_ops * (2 * mp->m_refc_maxlevels - 1);
 
 	return blocks;
 }
@@ -809,11 +812,18 @@
 	 * require a permanent reservation on space.
 	 */
 	resp->tr_write.tr_logres = xfs_calc_write_reservation(mp);
-	resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK;
+	else
+		resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
 	resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
 
 	resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp);
-	resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		resp->tr_itruncate.tr_logcount =
+				XFS_ITRUNCATE_LOG_COUNT_REFLINK;
+	else
+		resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
 	resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
 
 	resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
@@ -870,7 +880,10 @@
 	resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
 
 	resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp);
-	resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK;
+	else
+		resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
 	resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
 
 	/*
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 0eb46ed..b7e5357 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -87,6 +87,7 @@
 #define	XFS_DEFAULT_LOG_COUNT		1
 #define	XFS_DEFAULT_PERM_LOG_COUNT	2
 #define	XFS_ITRUNCATE_LOG_COUNT		2
+#define	XFS_ITRUNCATE_LOG_COUNT_REFLINK	8
 #define XFS_INACTIVE_LOG_COUNT		2
 #define	XFS_CREATE_LOG_COUNT		2
 #define	XFS_CREATE_TMPFILE_LOG_COUNT	2
@@ -96,11 +97,13 @@
 #define	XFS_LINK_LOG_COUNT		2
 #define	XFS_RENAME_LOG_COUNT		2
 #define	XFS_WRITE_LOG_COUNT		2
+#define	XFS_WRITE_LOG_COUNT_REFLINK	8
 #define	XFS_ADDAFORK_LOG_COUNT		2
 #define	XFS_ATTRINVAL_LOG_COUNT		1
 #define	XFS_ATTRSET_LOG_COUNT		3
 #define	XFS_ATTRRM_LOG_COUNT		3
 
 void xfs_trans_resv_calc(struct xfs_mount *mp, struct xfs_trans_resv *resp);
+uint xfs_allocfree_log_count(struct xfs_mount *mp, uint num_ops);
 
 #endif	/* __XFS_TRANS_RESV_H__ */
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 41e0428..7917f6e 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -21,6 +21,8 @@
 /*
  * Components of space reservations.
  */
+#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)    \
+		(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
 #define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)    \
 		(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
 #define	XFS_EXTENTADD_SPACE_RES(mp,w)	(XFS_BM_MAXLEVELS(mp,w) - 1)
@@ -28,6 +30,13 @@
 	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
 	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
 	  XFS_EXTENTADD_SPACE_RES(mp,w))
+#define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
+	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
+	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
+	  XFS_EXTENTADD_SPACE_RES(mp,w) + \
+	 ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
+	  XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
+	  (mp)->m_rmap_maxlevels)
 #define	XFS_DAENTER_1B(mp,w)	\
 	((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
 #define	XFS_DAENTER_DBS(mp,w)	\
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 3d50364..8d74870 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -90,6 +90,7 @@
  */
 #define	XFS_DATA_FORK	0
 #define	XFS_ATTR_FORK	1
+#define	XFS_COW_FORK	2
 
 /*
  * Min numbers of data/attr fork btree root pointers.
@@ -109,7 +110,7 @@
 
 typedef enum {
 	XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi,
-	XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_MAX
+	XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_REFCi, XFS_BTNUM_MAX
 } xfs_btnum_t;
 
 struct xfs_name {
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6e527b..b468e04 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -236,7 +236,7 @@
 
 		iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
 		iattr.ia_mode = mode;
-		iattr.ia_ctime = current_fs_time(inode->i_sb);
+		iattr.ia_ctime = current_time(inode);
 
 		error = xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
 	}
@@ -257,16 +257,11 @@
 		return error;
 
 	if (type == ACL_TYPE_ACCESS) {
-		umode_t mode = inode->i_mode;
-		error = posix_acl_equiv_mode(acl, &mode);
+		umode_t mode;
 
-		if (error <= 0) {
-			acl = NULL;
-
-			if (error < 0)
-				return error;
-		}
-
+		error = posix_acl_update_mode(inode, &mode, &acl);
+		if (error)
+			return error;
 		error = xfs_set_mode(inode, mode);
 		if (error)
 			return error;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 4a28fa9..3e57a56 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -31,6 +31,7 @@
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_bmap_btree.h"
+#include "xfs_reflink.h"
 #include <linux/gfp.h>
 #include <linux/mpage.h>
 #include <linux/pagevec.h>
@@ -39,6 +40,7 @@
 /* flags for direct write completions */
 #define XFS_DIO_FLAG_UNWRITTEN	(1 << 0)
 #define XFS_DIO_FLAG_APPEND	(1 << 1)
+#define XFS_DIO_FLAG_COW	(1 << 2)
 
 /*
  * structure owned by writepages passed to individual writepage calls
@@ -287,6 +289,25 @@
 		error = -EIO;
 
 	/*
+	 * For a CoW extent, we need to move the mapping from the CoW fork
+	 * to the data fork.  If instead an error happened, just dump the
+	 * new blocks.
+	 */
+	if (ioend->io_type == XFS_IO_COW) {
+		if (error)
+			goto done;
+		if (ioend->io_bio->bi_error) {
+			error = xfs_reflink_cancel_cow_range(ip,
+					ioend->io_offset, ioend->io_size);
+			goto done;
+		}
+		error = xfs_reflink_end_cow(ip, ioend->io_offset,
+				ioend->io_size);
+		if (error)
+			goto done;
+	}
+
+	/*
 	 * For unwritten extents we need to issue transactions to convert a
 	 * range to normal written extens after the data I/O has finished.
 	 * Detecting and handling completion IO errors is done individually
@@ -301,7 +322,8 @@
 	} else if (ioend->io_append_trans) {
 		error = xfs_setfilesize_ioend(ioend, error);
 	} else {
-		ASSERT(!xfs_ioend_is_append(ioend));
+		ASSERT(!xfs_ioend_is_append(ioend) ||
+		       ioend->io_type == XFS_IO_COW);
 	}
 
 done:
@@ -315,7 +337,7 @@
 	struct xfs_ioend	*ioend = bio->bi_private;
 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
 
-	if (ioend->io_type == XFS_IO_UNWRITTEN)
+	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
 		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
 	else if (ioend->io_append_trans)
 		queue_work(mp->m_data_workqueue, &ioend->io_work);
@@ -341,6 +363,7 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
+	ASSERT(type != XFS_IO_COW);
 	if (type == XFS_IO_UNWRITTEN)
 		bmapi_flags |= XFS_BMAPI_IGSTATE;
 
@@ -355,6 +378,13 @@
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
 				imap, &nimaps, bmapi_flags);
+	/*
+	 * Truncate an overwrite extent if there's a pending CoW
+	 * reservation before the end of this extent.  This forces us
+	 * to come back to writepage to take care of the CoW.
+	 */
+	if (nimaps && type == XFS_IO_OVERWRITE)
+		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 	if (error)
@@ -362,7 +392,8 @@
 
 	if (type == XFS_IO_DELALLOC &&
 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
-		error = xfs_iomap_write_allocate(ip, offset, imap);
+		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
+				imap);
 		if (!error)
 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
 		return error;
@@ -737,6 +768,56 @@
 	return;
 }
 
+static int
+xfs_map_cow(
+	struct xfs_writepage_ctx *wpc,
+	struct inode		*inode,
+	loff_t			offset,
+	unsigned int		*new_type)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_bmbt_irec	imap;
+	bool			is_cow = false, need_alloc = false;
+	int			error;
+
+	/*
+	 * If we already have a valid COW mapping keep using it.
+	 */
+	if (wpc->io_type == XFS_IO_COW) {
+		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
+		if (wpc->imap_valid) {
+			*new_type = XFS_IO_COW;
+			return 0;
+		}
+	}
+
+	/*
+	 * Else we need to check if there is a COW mapping at this offset.
+	 */
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap, &need_alloc);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	if (!is_cow)
+		return 0;
+
+	/*
+	 * And if the COW mapping has a delayed extent here we need to
+	 * allocate real space for it now.
+	 */
+	if (need_alloc) {
+		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
+				&imap);
+		if (error)
+			return error;
+	}
+
+	wpc->io_type = *new_type = XFS_IO_COW;
+	wpc->imap_valid = true;
+	wpc->imap = imap;
+	return 0;
+}
+
 /*
  * We implement an immediate ioend submission policy here to avoid needing to
  * chain multiple ioends and hence nest mempool allocations which can violate
@@ -769,6 +850,7 @@
 	int			error = 0;
 	int			count = 0;
 	int			uptodate = 1;
+	unsigned int		new_type;
 
 	bh = head = page_buffers(page);
 	offset = page_offset(page);
@@ -789,22 +871,13 @@
 			continue;
 		}
 
-		if (buffer_unwritten(bh)) {
-			if (wpc->io_type != XFS_IO_UNWRITTEN) {
-				wpc->io_type = XFS_IO_UNWRITTEN;
-				wpc->imap_valid = false;
-			}
-		} else if (buffer_delay(bh)) {
-			if (wpc->io_type != XFS_IO_DELALLOC) {
-				wpc->io_type = XFS_IO_DELALLOC;
-				wpc->imap_valid = false;
-			}
-		} else if (buffer_uptodate(bh)) {
-			if (wpc->io_type != XFS_IO_OVERWRITE) {
-				wpc->io_type = XFS_IO_OVERWRITE;
-				wpc->imap_valid = false;
-			}
-		} else {
+		if (buffer_unwritten(bh))
+			new_type = XFS_IO_UNWRITTEN;
+		else if (buffer_delay(bh))
+			new_type = XFS_IO_DELALLOC;
+		else if (buffer_uptodate(bh))
+			new_type = XFS_IO_OVERWRITE;
+		else {
 			if (PageUptodate(page))
 				ASSERT(buffer_mapped(bh));
 			/*
@@ -817,6 +890,17 @@
 			continue;
 		}
 
+		if (xfs_is_reflink_inode(XFS_I(inode))) {
+			error = xfs_map_cow(wpc, inode, offset, &new_type);
+			if (error)
+				goto out;
+		}
+
+		if (wpc->io_type != new_type) {
+			wpc->io_type = new_type;
+			wpc->imap_valid = false;
+		}
+
 		if (wpc->imap_valid)
 			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
 							 offset);
@@ -1107,18 +1191,24 @@
 	struct inode		*inode,
 	struct buffer_head	*bh_result,
 	struct xfs_bmbt_irec	*imap,
-	xfs_off_t		offset)
+	xfs_off_t		offset,
+	bool			is_cow)
 {
 	uintptr_t		*flags = (uintptr_t *)&bh_result->b_private;
 	xfs_off_t		size = bh_result->b_size;
 
 	trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
-		ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
+		ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : is_cow ? XFS_IO_COW :
+		XFS_IO_OVERWRITE, imap);
 
 	if (ISUNWRITTEN(imap)) {
 		*flags |= XFS_DIO_FLAG_UNWRITTEN;
 		set_buffer_defer_completion(bh_result);
-	} else if (offset + size > i_size_read(inode) || offset + size < 0) {
+	} else if (is_cow) {
+		*flags |= XFS_DIO_FLAG_COW;
+		set_buffer_defer_completion(bh_result);
+	}
+	if (offset + size > i_size_read(inode) || offset + size < 0) {
 		*flags |= XFS_DIO_FLAG_APPEND;
 		set_buffer_defer_completion(bh_result);
 	}
@@ -1164,6 +1254,44 @@
 	bh_result->b_size = mapping_size;
 }
 
+/* Bounce unaligned directio writes to the page cache. */
+static int
+xfs_bounce_unaligned_dio_write(
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		offset_fsb,
+	struct xfs_bmbt_irec	*imap)
+{
+	struct xfs_bmbt_irec	irec;
+	xfs_fileoff_t		delta;
+	bool			shared;
+	bool			x;
+	int			error;
+
+	irec = *imap;
+	if (offset_fsb > irec.br_startoff) {
+		delta = offset_fsb - irec.br_startoff;
+		irec.br_blockcount -= delta;
+		irec.br_startblock += delta;
+		irec.br_startoff = offset_fsb;
+	}
+	error = xfs_reflink_trim_around_shared(ip, &irec, &shared, &x);
+	if (error)
+		return error;
+
+	/*
+	 * We're here because we're trying to do a directio write to a
+	 * region that isn't aligned to a filesystem block.  If any part
+	 * of the extent is shared, fall back to buffered mode to handle
+	 * the RMW.  This is done by returning -EREMCHG ("remote addr
+	 * changed"), which is caught further up the call stack.
+	 */
+	if (shared) {
+		trace_xfs_reflink_bounce_dio_write(ip, imap);
+		return -EREMCHG;
+	}
+	return 0;
+}
+
 STATIC int
 __xfs_get_blocks(
 	struct inode		*inode,
@@ -1183,6 +1311,8 @@
 	xfs_off_t		offset;
 	ssize_t			size;
 	int			new = 0;
+	bool			is_cow = false;
+	bool			need_alloc = false;
 
 	BUG_ON(create && !direct);
 
@@ -1208,8 +1338,26 @@
 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 
-	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
-				&imap, &nimaps, XFS_BMAPI_ENTIRE);
+	if (create && direct && xfs_is_reflink_inode(ip))
+		is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap,
+					&need_alloc);
+	if (!is_cow) {
+		error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+					&imap, &nimaps, XFS_BMAPI_ENTIRE);
+		/*
+		 * Truncate an overwrite extent if there's a pending CoW
+		 * reservation before the end of this extent.  This
+		 * forces us to come back to get_blocks to take care of
+		 * the CoW.
+		 */
+		if (create && direct && nimaps &&
+		    imap.br_startblock != HOLESTARTBLOCK &&
+		    imap.br_startblock != DELAYSTARTBLOCK &&
+		    !ISUNWRITTEN(&imap))
+			xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb,
+					&imap);
+	}
+	ASSERT(!need_alloc);
 	if (error)
 		goto out_unlock;
 
@@ -1261,6 +1409,13 @@
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK &&
 	    (create || !ISUNWRITTEN(&imap))) {
+		if (create && direct && !is_cow) {
+			error = xfs_bounce_unaligned_dio_write(ip, offset_fsb,
+					&imap);
+			if (error)
+				return error;
+		}
+
 		xfs_map_buffer(inode, bh_result, &imap, offset);
 		if (ISUNWRITTEN(&imap))
 			set_buffer_unwritten(bh_result);
@@ -1269,7 +1424,8 @@
 			if (dax_fault)
 				ASSERT(!ISUNWRITTEN(&imap));
 			else
-				xfs_map_direct(inode, bh_result, &imap, offset);
+				xfs_map_direct(inode, bh_result, &imap, offset,
+						is_cow);
 		}
 	}
 
@@ -1391,11 +1547,14 @@
 		i_size_write(inode, offset + size);
 	spin_unlock(&ip->i_flags_lock);
 
+	if (flags & XFS_DIO_FLAG_COW)
+		error = xfs_reflink_end_cow(ip, offset, size);
 	if (flags & XFS_DIO_FLAG_UNWRITTEN) {
 		trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
 
 		error = xfs_iomap_write_unwritten(ip, offset, size);
-	} else if (flags & XFS_DIO_FLAG_APPEND) {
+	}
+	if (flags & XFS_DIO_FLAG_APPEND) {
 		trace_xfs_end_io_direct_write_append(ip, offset, size);
 
 		error = xfs_setfilesize(ip, offset, size);
@@ -1425,6 +1584,17 @@
 
 	trace_xfs_vm_bmap(XFS_I(inode));
 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+	/*
+	 * The swap code (ab-)uses ->bmap to get a block mapping and then
+	 * bypasseѕ the file system for actual I/O.  We really can't allow
+	 * that on reflinks inodes, so we have to skip out here.  And yes,
+	 * 0 is the magic code for a bmap error..
+	 */
+	if (xfs_is_reflink_inode(ip)) {
+		xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+		return 0;
+	}
 	filemap_write_and_wait(mapping);
 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 	return generic_block_bmap(mapping, block, xfs_get_blocks);
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 1950e3b..b3c6634 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -28,13 +28,15 @@
 	XFS_IO_DELALLOC,	/* covers delalloc region */
 	XFS_IO_UNWRITTEN,	/* covers allocated but uninitialized data */
 	XFS_IO_OVERWRITE,	/* covers already allocated extent */
+	XFS_IO_COW,		/* covers copy-on-write extent */
 };
 
 #define XFS_IO_TYPES \
 	{ XFS_IO_INVALID,		"invalid" }, \
 	{ XFS_IO_DELALLOC,		"delalloc" }, \
 	{ XFS_IO_UNWRITTEN,		"unwritten" }, \
-	{ XFS_IO_OVERWRITE,		"overwrite" }
+	{ XFS_IO_OVERWRITE,		"overwrite" }, \
+	{ XFS_IO_COW,			"CoW" }
 
 /*
  * Structure for buffered I/O completions.
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
new file mode 100644
index 0000000..9bf57c7
--- /dev/null
+++ b/fs/xfs/xfs_bmap_item.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_bmap_item.h"
+#include "xfs_log.h"
+#include "xfs_bmap.h"
+#include "xfs_icache.h"
+#include "xfs_trace.h"
+
+
+kmem_zone_t	*xfs_bui_zone;
+kmem_zone_t	*xfs_bud_zone;
+
+static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
+{
+	return container_of(lip, struct xfs_bui_log_item, bui_item);
+}
+
+void
+xfs_bui_item_free(
+	struct xfs_bui_log_item	*buip)
+{
+	kmem_zone_free(xfs_bui_zone, buip);
+}
+
+STATIC void
+xfs_bui_item_size(
+	struct xfs_log_item	*lip,
+	int			*nvecs,
+	int			*nbytes)
+{
+	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
+
+	*nvecs += 1;
+	*nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given bui log item. We use only 1 iovec, and we point that
+ * at the bui_log_format structure embedded in the bui item.
+ * It is at this point that we assert that all of the extent
+ * slots in the bui item have been filled.
+ */
+STATIC void
+xfs_bui_item_format(
+	struct xfs_log_item	*lip,
+	struct xfs_log_vec	*lv)
+{
+	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
+	struct xfs_log_iovec	*vecp = NULL;
+
+	ASSERT(atomic_read(&buip->bui_next_extent) ==
+			buip->bui_format.bui_nextents);
+
+	buip->bui_format.bui_type = XFS_LI_BUI;
+	buip->bui_format.bui_size = 1;
+
+	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
+			xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
+}
+
+/*
+ * Pinning has no meaning for an bui item, so just return.
+ */
+STATIC void
+xfs_bui_item_pin(
+	struct xfs_log_item	*lip)
+{
+}
+
+/*
+ * The unpin operation is the last place an BUI is manipulated in the log. It is
+ * either inserted in the AIL or aborted in the event of a log I/O error. In
+ * either case, the BUI transaction has been successfully committed to make it
+ * this far. Therefore, we expect whoever committed the BUI to either construct
+ * and commit the BUD or drop the BUD's reference in the event of error. Simply
+ * drop the log's BUI reference now that the log is done with it.
+ */
+STATIC void
+xfs_bui_item_unpin(
+	struct xfs_log_item	*lip,
+	int			remove)
+{
+	struct xfs_bui_log_item	*buip = BUI_ITEM(lip);
+
+	xfs_bui_release(buip);
+}
+
+/*
+ * BUI items have no locking or pushing.  However, since BUIs are pulled from
+ * the AIL when their corresponding BUDs are committed to disk, their situation
+ * is very similar to being pinned.  Return XFS_ITEM_PINNED so that the caller
+ * will eventually flush the log.  This should help in getting the BUI out of
+ * the AIL.
+ */
+STATIC uint
+xfs_bui_item_push(
+	struct xfs_log_item	*lip,
+	struct list_head	*buffer_list)
+{
+	return XFS_ITEM_PINNED;
+}
+
+/*
+ * The BUI has been either committed or aborted if the transaction has been
+ * cancelled. If the transaction was cancelled, an BUD isn't going to be
+ * constructed and thus we free the BUI here directly.
+ */
+STATIC void
+xfs_bui_item_unlock(
+	struct xfs_log_item	*lip)
+{
+	if (lip->li_flags & XFS_LI_ABORTED)
+		xfs_bui_item_free(BUI_ITEM(lip));
+}
+
+/*
+ * The BUI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.
+ */
+STATIC xfs_lsn_t
+xfs_bui_item_committed(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+	return lsn;
+}
+
+/*
+ * The BUI dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_bui_item_committing(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all bui log items.
+ */
+static const struct xfs_item_ops xfs_bui_item_ops = {
+	.iop_size	= xfs_bui_item_size,
+	.iop_format	= xfs_bui_item_format,
+	.iop_pin	= xfs_bui_item_pin,
+	.iop_unpin	= xfs_bui_item_unpin,
+	.iop_unlock	= xfs_bui_item_unlock,
+	.iop_committed	= xfs_bui_item_committed,
+	.iop_push	= xfs_bui_item_push,
+	.iop_committing = xfs_bui_item_committing,
+};
+
+/*
+ * Allocate and initialize an bui item with the given number of extents.
+ */
+struct xfs_bui_log_item *
+xfs_bui_init(
+	struct xfs_mount		*mp)
+
+{
+	struct xfs_bui_log_item		*buip;
+
+	buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP);
+
+	xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
+	buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
+	buip->bui_format.bui_id = (uintptr_t)(void *)buip;
+	atomic_set(&buip->bui_next_extent, 0);
+	atomic_set(&buip->bui_refcount, 2);
+
+	return buip;
+}
+
+/*
+ * Freeing the BUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the BUI may not yet have been placed in the AIL
+ * when called by xfs_bui_release() from BUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the BUI.
+ */
+void
+xfs_bui_release(
+	struct xfs_bui_log_item	*buip)
+{
+	if (atomic_dec_and_test(&buip->bui_refcount)) {
+		xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
+		xfs_bui_item_free(buip);
+	}
+}
+
+static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
+{
+	return container_of(lip, struct xfs_bud_log_item, bud_item);
+}
+
+STATIC void
+xfs_bud_item_size(
+	struct xfs_log_item	*lip,
+	int			*nvecs,
+	int			*nbytes)
+{
+	*nvecs += 1;
+	*nbytes += sizeof(struct xfs_bud_log_format);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given bud log item. We use only 1 iovec, and we point that
+ * at the bud_log_format structure embedded in the bud item.
+ * It is at this point that we assert that all of the extent
+ * slots in the bud item have been filled.
+ */
+STATIC void
+xfs_bud_item_format(
+	struct xfs_log_item	*lip,
+	struct xfs_log_vec	*lv)
+{
+	struct xfs_bud_log_item	*budp = BUD_ITEM(lip);
+	struct xfs_log_iovec	*vecp = NULL;
+
+	budp->bud_format.bud_type = XFS_LI_BUD;
+	budp->bud_format.bud_size = 1;
+
+	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
+			sizeof(struct xfs_bud_log_format));
+}
+
+/*
+ * Pinning has no meaning for an bud item, so just return.
+ */
+STATIC void
+xfs_bud_item_pin(
+	struct xfs_log_item	*lip)
+{
+}
+
+/*
+ * Since pinning has no meaning for an bud item, unpinning does
+ * not either.
+ */
+STATIC void
+xfs_bud_item_unpin(
+	struct xfs_log_item	*lip,
+	int			remove)
+{
+}
+
+/*
+ * There isn't much you can do to push on an bud item.  It is simply stuck
+ * waiting for the log to be flushed to disk.
+ */
+STATIC uint
+xfs_bud_item_push(
+	struct xfs_log_item	*lip,
+	struct list_head	*buffer_list)
+{
+	return XFS_ITEM_PINNED;
+}
+
+/*
+ * The BUD is either committed or aborted if the transaction is cancelled. If
+ * the transaction is cancelled, drop our reference to the BUI and free the
+ * BUD.
+ */
+STATIC void
+xfs_bud_item_unlock(
+	struct xfs_log_item	*lip)
+{
+	struct xfs_bud_log_item	*budp = BUD_ITEM(lip);
+
+	if (lip->li_flags & XFS_LI_ABORTED) {
+		xfs_bui_release(budp->bud_buip);
+		kmem_zone_free(xfs_bud_zone, budp);
+	}
+}
+
+/*
+ * When the bud item is committed to disk, all we need to do is delete our
+ * reference to our partner bui item and then free ourselves. Since we're
+ * freeing ourselves we must return -1 to keep the transaction code from
+ * further referencing this item.
+ */
+STATIC xfs_lsn_t
+xfs_bud_item_committed(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+	struct xfs_bud_log_item	*budp = BUD_ITEM(lip);
+
+	/*
+	 * Drop the BUI reference regardless of whether the BUD has been
+	 * aborted. Once the BUD transaction is constructed, it is the sole
+	 * responsibility of the BUD to release the BUI (even if the BUI is
+	 * aborted due to log I/O error).
+	 */
+	xfs_bui_release(budp->bud_buip);
+	kmem_zone_free(xfs_bud_zone, budp);
+
+	return (xfs_lsn_t)-1;
+}
+
+/*
+ * The BUD dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_bud_item_committing(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all bud log items.
+ */
+static const struct xfs_item_ops xfs_bud_item_ops = {
+	.iop_size	= xfs_bud_item_size,
+	.iop_format	= xfs_bud_item_format,
+	.iop_pin	= xfs_bud_item_pin,
+	.iop_unpin	= xfs_bud_item_unpin,
+	.iop_unlock	= xfs_bud_item_unlock,
+	.iop_committed	= xfs_bud_item_committed,
+	.iop_push	= xfs_bud_item_push,
+	.iop_committing = xfs_bud_item_committing,
+};
+
+/*
+ * Allocate and initialize an bud item with the given number of extents.
+ */
+struct xfs_bud_log_item *
+xfs_bud_init(
+	struct xfs_mount		*mp,
+	struct xfs_bui_log_item		*buip)
+
+{
+	struct xfs_bud_log_item	*budp;
+
+	budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP);
+	xfs_log_item_init(mp, &budp->bud_item, XFS_LI_BUD, &xfs_bud_item_ops);
+	budp->bud_buip = buip;
+	budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
+
+	return budp;
+}
+
+/*
+ * Process a bmap update intent item that was recovered from the log.
+ * We need to update some inode's bmbt.
+ */
+int
+xfs_bui_recover(
+	struct xfs_mount		*mp,
+	struct xfs_bui_log_item		*buip)
+{
+	int				error = 0;
+	unsigned int			bui_type;
+	struct xfs_map_extent		*bmap;
+	xfs_fsblock_t			startblock_fsb;
+	xfs_fsblock_t			inode_fsb;
+	bool				op_ok;
+	struct xfs_bud_log_item		*budp;
+	enum xfs_bmap_intent_type	type;
+	int				whichfork;
+	xfs_exntst_t			state;
+	struct xfs_trans		*tp;
+	struct xfs_inode		*ip = NULL;
+	struct xfs_defer_ops		dfops;
+	xfs_fsblock_t			firstfsb;
+
+	ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
+
+	/* Only one mapping operation per BUI... */
+	if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
+		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+		xfs_bui_release(buip);
+		return -EIO;
+	}
+
+	/*
+	 * First check the validity of the extent described by the
+	 * BUI.  If anything is bad, then toss the BUI.
+	 */
+	bmap = &buip->bui_format.bui_extents[0];
+	startblock_fsb = XFS_BB_TO_FSB(mp,
+			   XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
+	inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
+			XFS_INO_TO_FSB(mp, bmap->me_owner)));
+	switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
+	case XFS_BMAP_MAP:
+	case XFS_BMAP_UNMAP:
+		op_ok = true;
+		break;
+	default:
+		op_ok = false;
+		break;
+	}
+	if (!op_ok || startblock_fsb == 0 ||
+	    bmap->me_len == 0 ||
+	    inode_fsb == 0 ||
+	    startblock_fsb >= mp->m_sb.sb_dblocks ||
+	    bmap->me_len >= mp->m_sb.sb_agblocks ||
+	    inode_fsb >= mp->m_sb.sb_dblocks ||
+	    (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) {
+		/*
+		 * This will pull the BUI from the AIL and
+		 * free the memory associated with it.
+		 */
+		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+		xfs_bui_release(buip);
+		return -EIO;
+	}
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+	if (error)
+		return error;
+	budp = xfs_trans_get_bud(tp, buip);
+
+	/* Grab the inode. */
+	error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip);
+	if (error)
+		goto err_inode;
+
+	if (VFS_I(ip)->i_nlink == 0)
+		xfs_iflags_set(ip, XFS_IRECOVERY);
+	xfs_defer_init(&dfops, &firstfsb);
+
+	/* Process deferred bmap item. */
+	state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
+			XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
+	whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
+			XFS_ATTR_FORK : XFS_DATA_FORK;
+	bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
+	switch (bui_type) {
+	case XFS_BMAP_MAP:
+	case XFS_BMAP_UNMAP:
+		type = bui_type;
+		break;
+	default:
+		error = -EFSCORRUPTED;
+		goto err_dfops;
+	}
+	xfs_trans_ijoin(tp, ip, 0);
+
+	error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
+			ip, whichfork, bmap->me_startoff,
+			bmap->me_startblock, bmap->me_len,
+			state);
+	if (error)
+		goto err_dfops;
+
+	/* Finish transaction, free inodes. */
+	error = xfs_defer_finish(&tp, &dfops, NULL);
+	if (error)
+		goto err_dfops;
+
+	set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+	error = xfs_trans_commit(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	IRELE(ip);
+
+	return error;
+
+err_dfops:
+	xfs_defer_cancel(&dfops);
+err_inode:
+	xfs_trans_cancel(tp);
+	if (ip) {
+		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+		IRELE(ip);
+	}
+	return error;
+}
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
new file mode 100644
index 0000000..c867daa
--- /dev/null
+++ b/fs/xfs/xfs_bmap_item.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef	__XFS_BMAP_ITEM_H__
+#define	__XFS_BMAP_ITEM_H__
+
+/*
+ * There are (currently) two pairs of bmap btree redo item types: map & unmap.
+ * The common abbreviations for these are BUI (bmap update intent) and BUD
+ * (bmap update done).  The redo item type is encoded in the flags field of
+ * each xfs_map_extent.
+ *
+ * *I items should be recorded in the *first* of a series of rolled
+ * transactions, and the *D items should be recorded in the same transaction
+ * that records the associated bmbt updates.
+ *
+ * Should the system crash after the commit of the first transaction but
+ * before the commit of the final transaction in a series, log recovery will
+ * use the redo information recorded by the intent items to replay the
+ * bmbt metadata updates in the non-first transaction.
+ */
+
+/* kernel only BUI/BUD definitions */
+
+struct xfs_mount;
+struct kmem_zone;
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define	XFS_BUI_MAX_FAST_EXTENTS	1
+
+/*
+ * Define BUI flag bits. Manipulated by set/clear/test_bit operators.
+ */
+#define	XFS_BUI_RECOVERED		1
+
+/*
+ * This is the "bmap update intent" log item.  It is used to log the fact that
+ * some reverse mappings need to change.  It is used in conjunction with the
+ * "bmap update done" log item described below.
+ *
+ * These log items follow the same rules as struct xfs_efi_log_item; see the
+ * comments about that structure (in xfs_extfree_item.h) for more details.
+ */
+struct xfs_bui_log_item {
+	struct xfs_log_item		bui_item;
+	atomic_t			bui_refcount;
+	atomic_t			bui_next_extent;
+	unsigned long			bui_flags;	/* misc flags */
+	struct xfs_bui_log_format	bui_format;
+};
+
+static inline size_t
+xfs_bui_log_item_sizeof(
+	unsigned int		nr)
+{
+	return offsetof(struct xfs_bui_log_item, bui_format) +
+			xfs_bui_log_format_sizeof(nr);
+}
+
+/*
+ * This is the "bmap update done" log item.  It is used to log the fact that
+ * some bmbt updates mentioned in an earlier bui item have been performed.
+ */
+struct xfs_bud_log_item {
+	struct xfs_log_item		bud_item;
+	struct xfs_bui_log_item		*bud_buip;
+	struct xfs_bud_log_format	bud_format;
+};
+
+extern struct kmem_zone	*xfs_bui_zone;
+extern struct kmem_zone	*xfs_bud_zone;
+
+struct xfs_bui_log_item *xfs_bui_init(struct xfs_mount *);
+struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
+		struct xfs_bui_log_item *);
+void xfs_bui_item_free(struct xfs_bui_log_item *);
+void xfs_bui_release(struct xfs_bui_log_item *);
+int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
+
+#endif	/* __XFS_BMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e827d65..552465e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -42,6 +42,9 @@
 #include "xfs_icache.h"
 #include "xfs_log.h"
 #include "xfs_rmap_btree.h"
+#include "xfs_iomap.h"
+#include "xfs_reflink.h"
+#include "xfs_refcount.h"
 
 /* Kernel only BMAP related definitions and functions */
 
@@ -389,11 +392,13 @@
 STATIC int
 xfs_getbmapx_fix_eof_hole(
 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
+	int			whichfork,
 	struct getbmapx		*out,		/* output structure */
 	int			prealloced,	/* this is a file with
 						 * preallocated data space */
 	__int64_t		end,		/* last block requested */
-	xfs_fsblock_t		startblock)
+	xfs_fsblock_t		startblock,
+	bool			moretocome)
 {
 	__int64_t		fixlen;
 	xfs_mount_t		*mp;		/* file system mount point */
@@ -418,8 +423,9 @@
 		else
 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
-		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-		if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
+		ifp = XFS_IFORK_PTR(ip, whichfork);
+		if (!moretocome &&
+		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
 		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
 			out->bmv_oflags |= BMV_OF_LAST;
 	}
@@ -427,6 +433,81 @@
 	return 1;
 }
 
+/* Adjust the reported bmap around shared/unshared extent transitions. */
+STATIC int
+xfs_getbmap_adjust_shared(
+	struct xfs_inode		*ip,
+	int				whichfork,
+	struct xfs_bmbt_irec		*map,
+	struct getbmapx			*out,
+	struct xfs_bmbt_irec		*next_map)
+{
+	struct xfs_mount		*mp = ip->i_mount;
+	xfs_agnumber_t			agno;
+	xfs_agblock_t			agbno;
+	xfs_agblock_t			ebno;
+	xfs_extlen_t			elen;
+	xfs_extlen_t			nlen;
+	int				error;
+
+	next_map->br_startblock = NULLFSBLOCK;
+	next_map->br_startoff = NULLFILEOFF;
+	next_map->br_blockcount = 0;
+
+	/* Only written data blocks can be shared. */
+	if (!xfs_is_reflink_inode(ip) || whichfork != XFS_DATA_FORK ||
+	    map->br_startblock == DELAYSTARTBLOCK ||
+	    map->br_startblock == HOLESTARTBLOCK ||
+	    ISUNWRITTEN(map))
+		return 0;
+
+	agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
+	agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
+	error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
+			&ebno, &elen, true);
+	if (error)
+		return error;
+
+	if (ebno == NULLAGBLOCK) {
+		/* No shared blocks at all. */
+		return 0;
+	} else if (agbno == ebno) {
+		/*
+		 * Shared extent at (agbno, elen).  Shrink the reported
+		 * extent length and prepare to move the start of map[i]
+		 * to agbno+elen, with the aim of (re)formatting the new
+		 * map[i] the next time through the inner loop.
+		 */
+		out->bmv_length = XFS_FSB_TO_BB(mp, elen);
+		out->bmv_oflags |= BMV_OF_SHARED;
+		if (elen != map->br_blockcount) {
+			*next_map = *map;
+			next_map->br_startblock += elen;
+			next_map->br_startoff += elen;
+			next_map->br_blockcount -= elen;
+		}
+		map->br_blockcount -= elen;
+	} else {
+		/*
+		 * There's an unshared extent (agbno, ebno - agbno)
+		 * followed by shared extent at (ebno, elen).  Shrink
+		 * the reported extent length to cover only the unshared
+		 * extent and prepare to move up the start of map[i] to
+		 * ebno, with the aim of (re)formatting the new map[i]
+		 * the next time through the inner loop.
+		 */
+		*next_map = *map;
+		nlen = ebno - agbno;
+		out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
+		next_map->br_startblock += nlen;
+		next_map->br_startoff += nlen;
+		next_map->br_blockcount -= nlen;
+		map->br_blockcount -= nlen;
+	}
+
+	return 0;
+}
+
 /*
  * Get inode's extents as described in bmv, and format for output.
  * Calls formatter to fill the user's buffer until all extents
@@ -459,12 +540,28 @@
 	int			iflags;		/* interface flags */
 	int			bmapi_flags;	/* flags for xfs_bmapi */
 	int			cur_ext = 0;
+	struct xfs_bmbt_irec	inject_map;
 
 	mp = ip->i_mount;
 	iflags = bmv->bmv_iflags;
-	whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
 
-	if (whichfork == XFS_ATTR_FORK) {
+#ifndef DEBUG
+	/* Only allow CoW fork queries if we're debugging. */
+	if (iflags & BMV_IF_COWFORK)
+		return -EINVAL;
+#endif
+	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
+		return -EINVAL;
+
+	if (iflags & BMV_IF_ATTRFORK)
+		whichfork = XFS_ATTR_FORK;
+	else if (iflags & BMV_IF_COWFORK)
+		whichfork = XFS_COW_FORK;
+	else
+		whichfork = XFS_DATA_FORK;
+
+	switch (whichfork) {
+	case XFS_ATTR_FORK:
 		if (XFS_IFORK_Q(ip)) {
 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
@@ -480,7 +577,20 @@
 
 		prealloced = 0;
 		fixlen = 1LL << 32;
-	} else {
+		break;
+	case XFS_COW_FORK:
+		if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
+			return -EINVAL;
+
+		if (xfs_get_cowextsz_hint(ip)) {
+			prealloced = 1;
+			fixlen = mp->m_super->s_maxbytes;
+		} else {
+			prealloced = 0;
+			fixlen = XFS_ISIZE(ip);
+		}
+		break;
+	default:
 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
@@ -494,6 +604,7 @@
 			prealloced = 0;
 			fixlen = XFS_ISIZE(ip);
 		}
+		break;
 	}
 
 	if (bmv->bmv_length == -1) {
@@ -520,7 +631,8 @@
 		return -ENOMEM;
 
 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
-	if (whichfork == XFS_DATA_FORK) {
+	switch (whichfork) {
+	case XFS_DATA_FORK:
 		if (!(iflags & BMV_IF_DELALLOC) &&
 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
@@ -538,8 +650,14 @@
 		}
 
 		lock = xfs_ilock_data_map_shared(ip);
-	} else {
+		break;
+	case XFS_COW_FORK:
+		lock = XFS_ILOCK_SHARED;
+		xfs_ilock(ip, lock);
+		break;
+	case XFS_ATTR_FORK:
 		lock = xfs_ilock_attr_map_shared(ip);
+		break;
 	}
 
 	/*
@@ -581,7 +699,8 @@
 			goto out_free_map;
 		ASSERT(nmap <= subnex);
 
-		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
+		for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
+				cur_ext < bmv->bmv_count; i++) {
 			out[cur_ext].bmv_oflags = 0;
 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -614,9 +733,16 @@
 				goto out_free_map;
 			}
 
-			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
-					prealloced, bmvend,
-					map[i].br_startblock))
+			/* Is this a shared block? */
+			error = xfs_getbmap_adjust_shared(ip, whichfork,
+					&map[i], &out[cur_ext], &inject_map);
+			if (error)
+				goto out_free_map;
+
+			if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
+					&out[cur_ext], prealloced, bmvend,
+					map[i].br_startblock,
+					inject_map.br_startblock != NULLFSBLOCK))
 				goto out_free_map;
 
 			bmv->bmv_offset =
@@ -636,11 +762,16 @@
 				continue;
 			}
 
-			nexleft--;
+			if (inject_map.br_startblock != NULLFSBLOCK) {
+				map[i] = inject_map;
+				i--;
+			} else
+				nexleft--;
 			bmv->bmv_entries++;
 			cur_ext++;
 		}
-	} while (nmap && nexleft && bmv->bmv_length);
+	} while (nmap && nexleft && bmv->bmv_length &&
+		 cur_ext < bmv->bmv_count);
 
  out_free_map:
 	kmem_free(map);
@@ -1433,8 +1564,8 @@
  */
 static int
 xfs_swap_extents_check_format(
-	xfs_inode_t	*ip,	/* target inode */
-	xfs_inode_t	*tip)	/* tmp inode */
+	struct xfs_inode	*ip,	/* target inode */
+	struct xfs_inode	*tip)	/* tmp inode */
 {
 
 	/* Should never get a local format */
@@ -1450,6 +1581,13 @@
 		return -EINVAL;
 
 	/*
+	 * If we have to use the (expensive) rmap swap method, we can
+	 * handle any number of extents and any format.
+	 */
+	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
+		return 0;
+
+	/*
 	 * if the target inode is in extent form and the temp inode is in btree
 	 * form then we will end up with the target inode in the wrong format
 	 * as we already know there are less extents in the temp inode.
@@ -1518,125 +1656,161 @@
 	return 0;
 }
 
-int
-xfs_swap_extents(
-	xfs_inode_t	*ip,	/* target inode */
-	xfs_inode_t	*tip,	/* tmp inode */
-	xfs_swapext_t	*sxp)
+/*
+ * Move extents from one file to another, when rmap is enabled.
+ */
+STATIC int
+xfs_swap_extent_rmap(
+	struct xfs_trans		**tpp,
+	struct xfs_inode		*ip,
+	struct xfs_inode		*tip)
 {
-	xfs_mount_t	*mp = ip->i_mount;
-	xfs_trans_t	*tp;
-	xfs_bstat_t	*sbp = &sxp->sx_stat;
-	xfs_ifork_t	*tempifp, *ifp, *tifp;
-	int		src_log_flags, target_log_flags;
-	int		error = 0;
-	int		aforkblks = 0;
-	int		taforkblks = 0;
-	__uint64_t	tmp;
-	int		lock_flags;
-
-	/* XXX: we can't do this with rmap, will fix later */
-	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
-		return -EOPNOTSUPP;
-
-	tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
-	if (!tempifp) {
-		error = -ENOMEM;
-		goto out;
-	}
+	struct xfs_bmbt_irec		irec;
+	struct xfs_bmbt_irec		uirec;
+	struct xfs_bmbt_irec		tirec;
+	xfs_fileoff_t			offset_fsb;
+	xfs_fileoff_t			end_fsb;
+	xfs_filblks_t			count_fsb;
+	xfs_fsblock_t			firstfsb;
+	struct xfs_defer_ops		dfops;
+	int				error;
+	xfs_filblks_t			ilen;
+	xfs_filblks_t			rlen;
+	int				nimaps;
+	__uint64_t			tip_flags2;
 
 	/*
-	 * Lock the inodes against other IO, page faults and truncate to
-	 * begin with.  Then we can ensure the inodes are flushed and have no
-	 * page cache safely. Once we have done this we can take the ilocks and
-	 * do the rest of the checks.
+	 * If the source file has shared blocks, we must flag the donor
+	 * file as having shared blocks so that we get the shared-block
+	 * rmap functions when we go to fix up the rmaps.  The flags
+	 * will be switch for reals later.
 	 */
-	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
-	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
-	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
+	tip_flags2 = tip->i_d.di_flags2;
+	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
+		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
 
-	/* Verify that both files have the same format */
-	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
-		error = -EINVAL;
-		goto out_unlock;
+	offset_fsb = 0;
+	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
+	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
+
+	while (count_fsb) {
+		/* Read extent from the donor file */
+		nimaps = 1;
+		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
+				&nimaps, 0);
+		if (error)
+			goto out;
+		ASSERT(nimaps == 1);
+		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
+
+		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
+		ilen = tirec.br_blockcount;
+
+		/* Unmap the old blocks in the source file. */
+		while (tirec.br_blockcount) {
+			xfs_defer_init(&dfops, &firstfsb);
+			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
+
+			/* Read extent from the source file */
+			nimaps = 1;
+			error = xfs_bmapi_read(ip, tirec.br_startoff,
+					tirec.br_blockcount, &irec,
+					&nimaps, 0);
+			if (error)
+				goto out_defer;
+			ASSERT(nimaps == 1);
+			ASSERT(tirec.br_startoff == irec.br_startoff);
+			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
+
+			/* Trim the extent. */
+			uirec = tirec;
+			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
+					tirec.br_blockcount,
+					irec.br_blockcount);
+			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
+
+			/* Remove the mapping from the donor file. */
+			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
+					tip, &uirec);
+			if (error)
+				goto out_defer;
+
+			/* Remove the mapping from the source file. */
+			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
+					ip, &irec);
+			if (error)
+				goto out_defer;
+
+			/* Map the donor file's blocks into the source file. */
+			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
+					ip, &uirec);
+			if (error)
+				goto out_defer;
+
+			/* Map the source file's blocks into the donor file. */
+			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
+					tip, &irec);
+			if (error)
+				goto out_defer;
+
+			error = xfs_defer_finish(tpp, &dfops, ip);
+			if (error)
+				goto out_defer;
+
+			tirec.br_startoff += rlen;
+			if (tirec.br_startblock != HOLESTARTBLOCK &&
+			    tirec.br_startblock != DELAYSTARTBLOCK)
+				tirec.br_startblock += rlen;
+			tirec.br_blockcount -= rlen;
+		}
+
+		/* Roll on... */
+		count_fsb -= ilen;
+		offset_fsb += ilen;
 	}
 
-	/* Verify both files are either real-time or non-realtime */
-	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
-		error = -EINVAL;
-		goto out_unlock;
-	}
+	tip->i_d.di_flags2 = tip_flags2;
+	return 0;
 
-	error = xfs_swap_extent_flush(ip);
-	if (error)
-		goto out_unlock;
-	error = xfs_swap_extent_flush(tip);
-	if (error)
-		goto out_unlock;
+out_defer:
+	xfs_defer_cancel(&dfops);
+out:
+	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
+	tip->i_d.di_flags2 = tip_flags2;
+	return error;
+}
 
-	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
-	if (error)
-		goto out_unlock;
+/* Swap the extents of two files by swapping data forks. */
+STATIC int
+xfs_swap_extent_forks(
+	struct xfs_trans	*tp,
+	struct xfs_inode	*ip,
+	struct xfs_inode	*tip,
+	int			*src_log_flags,
+	int			*target_log_flags)
+{
+	struct xfs_ifork	tempifp, *ifp, *tifp;
+	int			aforkblks = 0;
+	int			taforkblks = 0;
+	__uint64_t		tmp;
+	int			error;
 
 	/*
-	 * Lock and join the inodes to the tansaction so that transaction commit
-	 * or cancel will unlock the inodes from this point onwards.
-	 */
-	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
-	lock_flags |= XFS_ILOCK_EXCL;
-	xfs_trans_ijoin(tp, ip, lock_flags);
-	xfs_trans_ijoin(tp, tip, lock_flags);
-
-
-	/* Verify all data are being swapped */
-	if (sxp->sx_offset != 0 ||
-	    sxp->sx_length != ip->i_d.di_size ||
-	    sxp->sx_length != tip->i_d.di_size) {
-		error = -EFAULT;
-		goto out_trans_cancel;
-	}
-
-	trace_xfs_swap_extent_before(ip, 0);
-	trace_xfs_swap_extent_before(tip, 1);
-
-	/* check inode formats now that data is flushed */
-	error = xfs_swap_extents_check_format(ip, tip);
-	if (error) {
-		xfs_notice(mp,
-		    "%s: inode 0x%llx format is incompatible for exchanging.",
-				__func__, ip->i_ino);
-		goto out_trans_cancel;
-	}
-
-	/*
-	 * Compare the current change & modify times with that
-	 * passed in.  If they differ, we abort this swap.
-	 * This is the mechanism used to ensure the calling
-	 * process that the file was not changed out from
-	 * under it.
-	 */
-	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
-	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
-	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
-	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
-		error = -EBUSY;
-		goto out_trans_cancel;
-	}
-	/*
 	 * Count the number of extended attribute blocks
 	 */
 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
-		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
+		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
+				&aforkblks);
 		if (error)
-			goto out_trans_cancel;
+			return error;
 	}
 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
-			&taforkblks);
+				&taforkblks);
 		if (error)
-			goto out_trans_cancel;
+			return error;
 	}
 
 	/*
@@ -1645,31 +1819,23 @@
 	 * buffers, and so the validation done on read will expect the owner
 	 * field to be correctly set. Once we change the owners, we can swap the
 	 * inode forks.
-	 *
-	 * Note the trickiness in setting the log flags - we set the owner log
-	 * flag on the opposite inode (i.e. the inode we are setting the new
-	 * owner to be) because once we swap the forks and log that, log
-	 * recovery is going to see the fork as owned by the swapped inode,
-	 * not the pre-swapped inodes.
 	 */
-	src_log_flags = XFS_ILOG_CORE;
-	target_log_flags = XFS_ILOG_CORE;
 	if (ip->i_d.di_version == 3 &&
 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
-		target_log_flags |= XFS_ILOG_DOWNER;
+		(*target_log_flags) |= XFS_ILOG_DOWNER;
 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
 					      tip->i_ino, NULL);
 		if (error)
-			goto out_trans_cancel;
+			return error;
 	}
 
 	if (tip->i_d.di_version == 3 &&
 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
-		src_log_flags |= XFS_ILOG_DOWNER;
+		(*src_log_flags) |= XFS_ILOG_DOWNER;
 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
 					      ip->i_ino, NULL);
 		if (error)
-			goto out_trans_cancel;
+			return error;
 	}
 
 	/*
@@ -1677,9 +1843,9 @@
 	 */
 	ifp = &ip->i_df;
 	tifp = &tip->i_df;
-	*tempifp = *ifp;	/* struct copy */
+	tempifp = *ifp;		/* struct copy */
 	*ifp = *tifp;		/* struct copy */
-	*tifp = *tempifp;	/* struct copy */
+	*tifp = tempifp;	/* struct copy */
 
 	/*
 	 * Fix the on-disk inode values
@@ -1719,12 +1885,12 @@
 			ifp->if_u1.if_extents =
 				ifp->if_u2.if_inline_ext;
 		}
-		src_log_flags |= XFS_ILOG_DEXT;
+		(*src_log_flags) |= XFS_ILOG_DEXT;
 		break;
 	case XFS_DINODE_FMT_BTREE:
 		ASSERT(ip->i_d.di_version < 3 ||
-		       (src_log_flags & XFS_ILOG_DOWNER));
-		src_log_flags |= XFS_ILOG_DBROOT;
+		       (*src_log_flags & XFS_ILOG_DOWNER));
+		(*src_log_flags) |= XFS_ILOG_DBROOT;
 		break;
 	}
 
@@ -1738,15 +1904,166 @@
 			tifp->if_u1.if_extents =
 				tifp->if_u2.if_inline_ext;
 		}
-		target_log_flags |= XFS_ILOG_DEXT;
+		(*target_log_flags) |= XFS_ILOG_DEXT;
 		break;
 	case XFS_DINODE_FMT_BTREE:
-		target_log_flags |= XFS_ILOG_DBROOT;
+		(*target_log_flags) |= XFS_ILOG_DBROOT;
 		ASSERT(tip->i_d.di_version < 3 ||
-		       (target_log_flags & XFS_ILOG_DOWNER));
+		       (*target_log_flags & XFS_ILOG_DOWNER));
 		break;
 	}
 
+	return 0;
+}
+
+int
+xfs_swap_extents(
+	struct xfs_inode	*ip,	/* target inode */
+	struct xfs_inode	*tip,	/* tmp inode */
+	struct xfs_swapext	*sxp)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	struct xfs_bstat	*sbp = &sxp->sx_stat;
+	int			src_log_flags, target_log_flags;
+	int			error = 0;
+	int			lock_flags;
+	struct xfs_ifork	*cowfp;
+	__uint64_t		f;
+	int			resblks;
+
+	/*
+	 * Lock the inodes against other IO, page faults and truncate to
+	 * begin with.  Then we can ensure the inodes are flushed and have no
+	 * page cache safely. Once we have done this we can take the ilocks and
+	 * do the rest of the checks.
+	 */
+	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
+	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
+	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
+
+	/* Verify that both files have the same format */
+	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
+		error = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Verify both files are either real-time or non-realtime */
+	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
+		error = -EINVAL;
+		goto out_unlock;
+	}
+
+	error = xfs_swap_extent_flush(ip);
+	if (error)
+		goto out_unlock;
+	error = xfs_swap_extent_flush(tip);
+	if (error)
+		goto out_unlock;
+
+	/*
+	 * Extent "swapping" with rmap requires a permanent reservation and
+	 * a block reservation because it's really just a remap operation
+	 * performed with log redo items!
+	 */
+	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+		/*
+		 * Conceptually this shouldn't affect the shape of either
+		 * bmbt, but since we atomically move extents one by one,
+		 * we reserve enough space to rebuild both trees.
+		 */
+		resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
+				XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
+				XFS_DATA_FORK) +
+			  XFS_SWAP_RMAP_SPACE_RES(mp,
+				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
+				XFS_DATA_FORK);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
+				0, 0, &tp);
+	} else
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
+				0, 0, &tp);
+	if (error)
+		goto out_unlock;
+
+	/*
+	 * Lock and join the inodes to the tansaction so that transaction commit
+	 * or cancel will unlock the inodes from this point onwards.
+	 */
+	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+	lock_flags |= XFS_ILOCK_EXCL;
+	xfs_trans_ijoin(tp, ip, 0);
+	xfs_trans_ijoin(tp, tip, 0);
+
+
+	/* Verify all data are being swapped */
+	if (sxp->sx_offset != 0 ||
+	    sxp->sx_length != ip->i_d.di_size ||
+	    sxp->sx_length != tip->i_d.di_size) {
+		error = -EFAULT;
+		goto out_trans_cancel;
+	}
+
+	trace_xfs_swap_extent_before(ip, 0);
+	trace_xfs_swap_extent_before(tip, 1);
+
+	/* check inode formats now that data is flushed */
+	error = xfs_swap_extents_check_format(ip, tip);
+	if (error) {
+		xfs_notice(mp,
+		    "%s: inode 0x%llx format is incompatible for exchanging.",
+				__func__, ip->i_ino);
+		goto out_trans_cancel;
+	}
+
+	/*
+	 * Compare the current change & modify times with that
+	 * passed in.  If they differ, we abort this swap.
+	 * This is the mechanism used to ensure the calling
+	 * process that the file was not changed out from
+	 * under it.
+	 */
+	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
+	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
+	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
+	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
+		error = -EBUSY;
+		goto out_trans_cancel;
+	}
+
+	/*
+	 * Note the trickiness in setting the log flags - we set the owner log
+	 * flag on the opposite inode (i.e. the inode we are setting the new
+	 * owner to be) because once we swap the forks and log that, log
+	 * recovery is going to see the fork as owned by the swapped inode,
+	 * not the pre-swapped inodes.
+	 */
+	src_log_flags = XFS_ILOG_CORE;
+	target_log_flags = XFS_ILOG_CORE;
+
+	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+		error = xfs_swap_extent_rmap(&tp, ip, tip);
+	else
+		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
+				&target_log_flags);
+	if (error)
+		goto out_trans_cancel;
+
+	/* Do we have to swap reflink flags? */
+	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
+	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
+		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
+		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
+		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+		cowfp = ip->i_cowfp;
+		ip->i_cowfp = tip->i_cowfp;
+		tip->i_cowfp = cowfp;
+		xfs_inode_set_cowblocks_tag(ip);
+		xfs_inode_set_cowblocks_tag(tip);
+	}
+
 	xfs_trans_log_inode(tp, ip,  src_log_flags);
 	xfs_trans_log_inode(tp, tip, target_log_flags);
 
@@ -1761,16 +2078,16 @@
 
 	trace_xfs_swap_extent_after(ip, 0);
 	trace_xfs_swap_extent_after(tip, 1);
-out:
-	kmem_free(tempifp);
+
+	xfs_iunlock(ip, lock_flags);
+	xfs_iunlock(tip, lock_flags);
 	return error;
 
+out_trans_cancel:
+	xfs_trans_cancel(tp);
+
 out_unlock:
 	xfs_iunlock(ip, lock_flags);
 	xfs_iunlock(tip, lock_flags);
-	goto out;
-
-out_trans_cancel:
-	xfs_trans_cancel(tp);
-	goto out;
+	return error;
 }
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index f44f799..2981698 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -84,7 +84,8 @@
 
 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
 
-	ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
+	if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
+		return -EFSCORRUPTED;
 
 	/*
 	 * If the block number in the offset is out of range, we're done.
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 3d22470..05f8666 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -92,7 +92,11 @@
 #define XFS_ERRTAG_BMAPIFORMAT				21
 #define XFS_ERRTAG_FREE_EXTENT				22
 #define XFS_ERRTAG_RMAP_FINISH_ONE			23
-#define XFS_ERRTAG_MAX					24
+#define XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE		24
+#define XFS_ERRTAG_REFCOUNT_FINISH_ONE			25
+#define XFS_ERRTAG_BMAP_FINISH_ONE			26
+#define XFS_ERRTAG_AG_RESV_CRITICAL			27
+#define XFS_ERRTAG_MAX					28
 
 /*
  * Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -121,6 +125,10 @@
 #define	XFS_RANDOM_BMAPIFORMAT				XFS_RANDOM_DEFAULT
 #define XFS_RANDOM_FREE_EXTENT				1
 #define XFS_RANDOM_RMAP_FINISH_ONE			1
+#define XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE		1
+#define XFS_RANDOM_REFCOUNT_FINISH_ONE			1
+#define XFS_RANDOM_BMAP_FINISH_ONE			1
+#define XFS_RANDOM_AG_RESV_CRITICAL			4
 
 #ifdef DEBUG
 extern int xfs_error_test_active;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 26acfbb..a314fc7 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -38,6 +38,7 @@
 #include "xfs_icache.h"
 #include "xfs_pnfs.h"
 #include "xfs_iomap.h"
+#include "xfs_reflink.h"
 
 #include <linux/dcache.h>
 #include <linux/falloc.h>
@@ -319,7 +320,7 @@
 	data = *to;
 	ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
 			xfs_get_blocks_direct, NULL, NULL, 0);
-	if (ret > 0) {
+	if (ret >= 0) {
 		iocb->ki_pos += ret;
 		iov_iter_advance(to, ret);
 	}
@@ -634,6 +635,13 @@
 
 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 
+	/* If this is a block-aligned directio CoW, remap immediately. */
+	if (xfs_is_reflink_inode(ip) && !unaligned_io) {
+		ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
+		if (ret)
+			goto out;
+	}
+
 	data = *from;
 	ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
 			xfs_get_blocks_direct, xfs_end_io_direct_write,
@@ -735,6 +743,9 @@
 		enospc = xfs_inode_free_quota_eofblocks(ip);
 		if (enospc)
 			goto write_retry;
+		enospc = xfs_inode_free_quota_cowblocks(ip);
+		if (enospc)
+			goto write_retry;
 	} else if (ret == -ENOSPC && !enospc) {
 		struct xfs_eofblocks eofb = {0};
 
@@ -774,10 +785,20 @@
 
 	if (IS_DAX(inode))
 		ret = xfs_file_dax_write(iocb, from);
-	else if (iocb->ki_flags & IOCB_DIRECT)
+	else if (iocb->ki_flags & IOCB_DIRECT) {
+		/*
+		 * Allow a directio write to fall back to a buffered
+		 * write *only* in the case that we're doing a reflink
+		 * CoW.  In all other directio scenarios we do not
+		 * allow an operation to fall back to buffered mode.
+		 */
 		ret = xfs_file_dio_aio_write(iocb, from);
-	else
+		if (ret == -EREMCHG)
+			goto buffered;
+	} else {
+buffered:
 		ret = xfs_file_buffered_aio_write(iocb, from);
+	}
 
 	if (ret > 0) {
 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
@@ -791,7 +812,7 @@
 #define	XFS_FALLOC_FL_SUPPORTED						\
 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
-		 FALLOC_FL_INSERT_RANGE)
+		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
 
 STATIC long
 xfs_file_fallocate(
@@ -881,9 +902,15 @@
 
 		if (mode & FALLOC_FL_ZERO_RANGE)
 			error = xfs_zero_file_space(ip, offset, len);
-		else
+		else {
+			if (mode & FALLOC_FL_UNSHARE_RANGE) {
+				error = xfs_reflink_unshare(ip, offset, len);
+				if (error)
+					goto out_unlock;
+			}
 			error = xfs_alloc_file_space(ip, offset, len,
 						     XFS_BMAPI_PREALLOC);
+		}
 		if (error)
 			goto out_unlock;
 	}
@@ -901,7 +928,7 @@
 
 		iattr.ia_valid = ATTR_SIZE;
 		iattr.ia_size = new_size;
-		error = xfs_setattr_size(ip, &iattr);
+		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
 		if (error)
 			goto out_unlock;
 	}
@@ -920,6 +947,189 @@
 	return error;
 }
 
+/*
+ * Flush all file writes out to disk.
+ */
+static int
+xfs_file_wait_for_io(
+	struct inode	*inode,
+	loff_t		offset,
+	size_t		len)
+{
+	loff_t		rounding;
+	loff_t		ioffset;
+	loff_t		iendoffset;
+	loff_t		bs;
+	int		ret;
+
+	bs = inode->i_sb->s_blocksize;
+	inode_dio_wait(inode);
+
+	rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
+	ioffset = round_down(offset, rounding);
+	iendoffset = round_up(offset + len, rounding) - 1;
+	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+					   iendoffset);
+	return ret;
+}
+
+/* Hook up to the VFS reflink function */
+STATIC int
+xfs_file_share_range(
+	struct file	*file_in,
+	loff_t		pos_in,
+	struct file	*file_out,
+	loff_t		pos_out,
+	u64		len,
+	bool		is_dedupe)
+{
+	struct inode	*inode_in;
+	struct inode	*inode_out;
+	ssize_t		ret;
+	loff_t		bs;
+	loff_t		isize;
+	int		same_inode;
+	loff_t		blen;
+	unsigned int	flags = 0;
+
+	inode_in = file_inode(file_in);
+	inode_out = file_inode(file_out);
+	bs = inode_out->i_sb->s_blocksize;
+
+	/* Don't touch certain kinds of inodes */
+	if (IS_IMMUTABLE(inode_out))
+		return -EPERM;
+	if (IS_SWAPFILE(inode_in) ||
+	    IS_SWAPFILE(inode_out))
+		return -ETXTBSY;
+
+	/* Reflink only works within this filesystem. */
+	if (inode_in->i_sb != inode_out->i_sb)
+		return -EXDEV;
+	same_inode = (inode_in->i_ino == inode_out->i_ino);
+
+	/* Don't reflink dirs, pipes, sockets... */
+	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
+		return -EISDIR;
+	if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
+		return -EINVAL;
+	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
+		return -EINVAL;
+
+	/* Don't share DAX file data for now. */
+	if (IS_DAX(inode_in) || IS_DAX(inode_out))
+		return -EINVAL;
+
+	/* Are we going all the way to the end? */
+	isize = i_size_read(inode_in);
+	if (isize == 0)
+		return 0;
+	if (len == 0)
+		len = isize - pos_in;
+
+	/* Ensure offsets don't wrap and the input is inside i_size */
+	if (pos_in + len < pos_in || pos_out + len < pos_out ||
+	    pos_in + len > isize)
+		return -EINVAL;
+
+	/* Don't allow dedupe past EOF in the dest file */
+	if (is_dedupe) {
+		loff_t	disize;
+
+		disize = i_size_read(inode_out);
+		if (pos_out >= disize || pos_out + len > disize)
+			return -EINVAL;
+	}
+
+	/* If we're linking to EOF, continue to the block boundary. */
+	if (pos_in + len == isize)
+		blen = ALIGN(isize, bs) - pos_in;
+	else
+		blen = len;
+
+	/* Only reflink if we're aligned to block boundaries */
+	if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
+	    !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
+		return -EINVAL;
+
+	/* Don't allow overlapped reflink within the same file */
+	if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
+		return -EINVAL;
+
+	/* Wait for the completion of any pending IOs on srcfile */
+	ret = xfs_file_wait_for_io(inode_in, pos_in, len);
+	if (ret)
+		goto out;
+	ret = xfs_file_wait_for_io(inode_out, pos_out, len);
+	if (ret)
+		goto out;
+
+	if (is_dedupe)
+		flags |= XFS_REFLINK_DEDUPE;
+	ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
+			pos_out, len, flags);
+	if (ret < 0)
+		goto out;
+
+out:
+	return ret;
+}
+
+STATIC ssize_t
+xfs_file_copy_range(
+	struct file	*file_in,
+	loff_t		pos_in,
+	struct file	*file_out,
+	loff_t		pos_out,
+	size_t		len,
+	unsigned int	flags)
+{
+	int		error;
+
+	error = xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+				     len, false);
+	if (error)
+		return error;
+	return len;
+}
+
+STATIC int
+xfs_file_clone_range(
+	struct file	*file_in,
+	loff_t		pos_in,
+	struct file	*file_out,
+	loff_t		pos_out,
+	u64		len)
+{
+	return xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+				     len, false);
+}
+
+#define XFS_MAX_DEDUPE_LEN	(16 * 1024 * 1024)
+STATIC ssize_t
+xfs_file_dedupe_range(
+	struct file	*src_file,
+	u64		loff,
+	u64		len,
+	struct file	*dst_file,
+	u64		dst_loff)
+{
+	int		error;
+
+	/*
+	 * Limit the total length we will dedupe for each operation.
+	 * This is intended to bound the total time spent in this
+	 * ioctl to something sane.
+	 */
+	if (len > XFS_MAX_DEDUPE_LEN)
+		len = XFS_MAX_DEDUPE_LEN;
+
+	error = xfs_file_share_range(src_file, loff, dst_file, dst_loff,
+				     len, true);
+	if (error)
+		return error;
+	return len;
+}
 
 STATIC int
 xfs_file_open(
@@ -1581,6 +1791,9 @@
 	.fsync		= xfs_file_fsync,
 	.get_unmapped_area = thp_get_unmapped_area,
 	.fallocate	= xfs_file_fallocate,
+	.copy_file_range = xfs_file_copy_range,
+	.clone_file_range = xfs_file_clone_range,
+	.dedupe_file_range = xfs_file_dedupe_range,
 };
 
 const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 94ac06f..93d12fa 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -43,6 +43,7 @@
 #include "xfs_log.h"
 #include "xfs_filestream.h"
 #include "xfs_rmap.h"
+#include "xfs_ag_resv.h"
 
 /*
  * File system operations
@@ -108,7 +109,9 @@
 			(xfs_sb_version_hassparseinodes(&mp->m_sb) ?
 				XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
 			(xfs_sb_version_hasrmapbt(&mp->m_sb) ?
-				XFS_FSOP_GEOM_FLAGS_RMAPBT : 0);
+				XFS_FSOP_GEOM_FLAGS_RMAPBT : 0) |
+			(xfs_sb_version_hasreflink(&mp->m_sb) ?
+				XFS_FSOP_GEOM_FLAGS_REFLINK : 0);
 		geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
 				mp->m_sb.sb_logsectsize : BBSIZE;
 		geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -259,6 +262,12 @@
 		agf->agf_longest = cpu_to_be32(tmpsize);
 		if (xfs_sb_version_hascrc(&mp->m_sb))
 			uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
+		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+			agf->agf_refcount_root = cpu_to_be32(
+					xfs_refc_block(mp));
+			agf->agf_refcount_level = cpu_to_be32(1);
+			agf->agf_refcount_blocks = cpu_to_be32(1);
+		}
 
 		error = xfs_bwrite(bp);
 		xfs_buf_relse(bp);
@@ -450,6 +459,17 @@
 			rrec->rm_offset = 0;
 			be16_add_cpu(&block->bb_numrecs, 1);
 
+			/* account for refc btree root */
+			if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+				rrec = XFS_RMAP_REC_ADDR(block, 5);
+				rrec->rm_startblock = cpu_to_be32(
+						xfs_refc_block(mp));
+				rrec->rm_blockcount = cpu_to_be32(1);
+				rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
+				rrec->rm_offset = 0;
+				be16_add_cpu(&block->bb_numrecs, 1);
+			}
+
 			error = xfs_bwrite(bp);
 			xfs_buf_relse(bp);
 			if (error)
@@ -507,6 +527,28 @@
 				goto error0;
 		}
 
+		/*
+		 * refcount btree root block
+		 */
+		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+			bp = xfs_growfs_get_hdr_buf(mp,
+				XFS_AGB_TO_DADDR(mp, agno, xfs_refc_block(mp)),
+				BTOBB(mp->m_sb.sb_blocksize), 0,
+				&xfs_refcountbt_buf_ops);
+			if (!bp) {
+				error = -ENOMEM;
+				goto error0;
+			}
+
+			xfs_btree_init_block(mp, bp, XFS_REFC_CRC_MAGIC,
+					     0, 0, agno,
+					     XFS_BTREE_CRC_BLOCKS);
+
+			error = xfs_bwrite(bp);
+			xfs_buf_relse(bp);
+			if (error)
+				goto error0;
+		}
 	}
 	xfs_trans_agblocks_delta(tp, nfree);
 	/*
@@ -589,6 +631,11 @@
 	xfs_set_low_space_thresholds(mp);
 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
 
+	/* Reserve AG metadata blocks. */
+	error = xfs_fs_reserve_ag_blocks(mp);
+	if (error && error != -ENOSPC)
+		goto out;
+
 	/* update secondary superblocks. */
 	for (agno = 1; agno < nagcount; agno++) {
 		error = 0;
@@ -639,6 +686,8 @@
 			continue;
 		}
 	}
+
+ out:
 	return saved_error ? saved_error : error;
 
  error0:
@@ -948,3 +997,59 @@
 	"Please umount the filesystem and rectify the problem(s)");
 	}
 }
+
+/*
+ * Reserve free space for per-AG metadata.
+ */
+int
+xfs_fs_reserve_ag_blocks(
+	struct xfs_mount	*mp)
+{
+	xfs_agnumber_t		agno;
+	struct xfs_perag	*pag;
+	int			error = 0;
+	int			err2;
+
+	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+		pag = xfs_perag_get(mp, agno);
+		err2 = xfs_ag_resv_init(pag);
+		xfs_perag_put(pag);
+		if (err2 && !error)
+			error = err2;
+	}
+
+	if (error && error != -ENOSPC) {
+		xfs_warn(mp,
+	"Error %d reserving per-AG metadata reserve pool.", error);
+		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+	}
+
+	return error;
+}
+
+/*
+ * Free space reserved for per-AG metadata.
+ */
+int
+xfs_fs_unreserve_ag_blocks(
+	struct xfs_mount	*mp)
+{
+	xfs_agnumber_t		agno;
+	struct xfs_perag	*pag;
+	int			error = 0;
+	int			err2;
+
+	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+		pag = xfs_perag_get(mp, agno);
+		err2 = xfs_ag_resv_free(pag);
+		xfs_perag_put(pag);
+		if (err2 && !error)
+			error = err2;
+	}
+
+	if (error)
+		xfs_warn(mp,
+	"Error %d freeing per-AG metadata reserve pool.", error);
+
+	return error;
+}
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index f32713f..f349158 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -26,4 +26,7 @@
 				xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
 
+extern int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp);
+extern int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp);
+
 #endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c
index 4d41b24..687a4b0 100644
--- a/fs/xfs/xfs_globals.c
+++ b/fs/xfs/xfs_globals.c
@@ -21,8 +21,8 @@
 /*
  * Tunable XFS parameters.  xfs_params is required even when CONFIG_SYSCTL=n,
  * other XFS code uses these values.  Times are measured in centisecs (i.e.
- * 100ths of a second) with the exception of eofb_timer, which is measured in
- * seconds.
+ * 100ths of a second) with the exception of eofb_timer and cowb_timer, which
+ * are measured in seconds.
  */
 xfs_param_t xfs_params = {
 			  /*	MIN		DFLT		MAX	*/
@@ -42,6 +42,7 @@
 	.inherit_nodfrg	= {	0,		1,		1	},
 	.fstrm_timer	= {	1,		30*100,		3600*100},
 	.eofb_timer	= {	1,		300,		3600*24},
+	.cowb_timer	= {	1,		1800,		3600*24},
 };
 
 struct xfs_globals xfs_globals = {
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 65b2e3f..14796b7 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -33,6 +33,7 @@
 #include "xfs_bmap_util.h"
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
+#include "xfs_reflink.h"
 
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -76,6 +77,9 @@
 	ip->i_mount = mp;
 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
 	ip->i_afp = NULL;
+	ip->i_cowfp = NULL;
+	ip->i_cnextents = 0;
+	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
 	ip->i_flags = 0;
 	ip->i_delayed_blks = 0;
@@ -101,6 +105,8 @@
 
 	if (ip->i_afp)
 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+	if (ip->i_cowfp)
+		xfs_idestroy_fork(ip, XFS_COW_FORK);
 
 	if (ip->i_itemp) {
 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
@@ -787,6 +793,33 @@
 	xfs_queue_eofblocks(mp);
 }
 
+/*
+ * Background scanning to trim preallocated CoW space. This is queued
+ * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
+ * (We'll just piggyback on the post-EOF prealloc space workqueue.)
+ */
+STATIC void
+xfs_queue_cowblocks(
+	struct xfs_mount *mp)
+{
+	rcu_read_lock();
+	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
+		queue_delayed_work(mp->m_eofblocks_workqueue,
+				   &mp->m_cowblocks_work,
+				   msecs_to_jiffies(xfs_cowb_secs * 1000));
+	rcu_read_unlock();
+}
+
+void
+xfs_cowblocks_worker(
+	struct work_struct *work)
+{
+	struct xfs_mount *mp = container_of(to_delayed_work(work),
+				struct xfs_mount, m_cowblocks_work);
+	xfs_icache_free_cowblocks(mp, NULL);
+	xfs_queue_cowblocks(mp);
+}
+
 int
 xfs_inode_ag_iterator(
 	struct xfs_mount	*mp,
@@ -1343,18 +1376,30 @@
 	return ret;
 }
 
-int
-xfs_icache_free_eofblocks(
+static int
+__xfs_icache_free_eofblocks(
 	struct xfs_mount	*mp,
-	struct xfs_eofblocks	*eofb)
+	struct xfs_eofblocks	*eofb,
+	int			(*execute)(struct xfs_inode *ip, int flags,
+					   void *args),
+	int			tag)
 {
 	int flags = SYNC_TRYLOCK;
 
 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
 		flags = SYNC_WAIT;
 
-	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
-					 eofb, XFS_ICI_EOFBLOCKS_TAG);
+	return xfs_inode_ag_iterator_tag(mp, execute, flags,
+					 eofb, tag);
+}
+
+int
+xfs_icache_free_eofblocks(
+	struct xfs_mount	*mp,
+	struct xfs_eofblocks	*eofb)
+{
+	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
+			XFS_ICI_EOFBLOCKS_TAG);
 }
 
 /*
@@ -1363,9 +1408,11 @@
  * failure. We make a best effort by including each quota under low free space
  * conditions (less than 1% free space) in the scan.
  */
-int
-xfs_inode_free_quota_eofblocks(
-	struct xfs_inode *ip)
+static int
+__xfs_inode_free_quota_eofblocks(
+	struct xfs_inode	*ip,
+	int			(*execute)(struct xfs_mount *mp,
+					   struct xfs_eofblocks	*eofb))
 {
 	int scan = 0;
 	struct xfs_eofblocks eofb = {0};
@@ -1401,14 +1448,25 @@
 	}
 
 	if (scan)
-		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
+		execute(ip->i_mount, &eofb);
 
 	return scan;
 }
 
-void
-xfs_inode_set_eofblocks_tag(
-	xfs_inode_t	*ip)
+int
+xfs_inode_free_quota_eofblocks(
+	struct xfs_inode *ip)
+{
+	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
+}
+
+static void
+__xfs_inode_set_eofblocks_tag(
+	xfs_inode_t	*ip,
+	void		(*execute)(struct xfs_mount *mp),
+	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
+				  int error, unsigned long caller_ip),
+	int		tag)
 {
 	struct xfs_mount *mp = ip->i_mount;
 	struct xfs_perag *pag;
@@ -1426,26 +1484,22 @@
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 	spin_lock(&pag->pag_ici_lock);
-	trace_xfs_inode_set_eofblocks_tag(ip);
 
-	tagged = radix_tree_tagged(&pag->pag_ici_root,
-				   XFS_ICI_EOFBLOCKS_TAG);
+	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
 	radix_tree_tag_set(&pag->pag_ici_root,
-			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
-			   XFS_ICI_EOFBLOCKS_TAG);
+			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
 	if (!tagged) {
 		/* propagate the eofblocks tag up into the perag radix tree */
 		spin_lock(&ip->i_mount->m_perag_lock);
 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
-				   XFS_ICI_EOFBLOCKS_TAG);
+				   tag);
 		spin_unlock(&ip->i_mount->m_perag_lock);
 
 		/* kick off background trimming */
-		xfs_queue_eofblocks(ip->i_mount);
+		execute(ip->i_mount);
 
-		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
-					      -1, _RET_IP_);
+		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
 	}
 
 	spin_unlock(&pag->pag_ici_lock);
@@ -1453,9 +1507,22 @@
 }
 
 void
-xfs_inode_clear_eofblocks_tag(
+xfs_inode_set_eofblocks_tag(
 	xfs_inode_t	*ip)
 {
+	trace_xfs_inode_set_eofblocks_tag(ip);
+	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
+			trace_xfs_perag_set_eofblocks,
+			XFS_ICI_EOFBLOCKS_TAG);
+}
+
+static void
+__xfs_inode_clear_eofblocks_tag(
+	xfs_inode_t	*ip,
+	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
+				    int error, unsigned long caller_ip),
+	int		tag)
+{
 	struct xfs_mount *mp = ip->i_mount;
 	struct xfs_perag *pag;
 
@@ -1465,23 +1532,141 @@
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 	spin_lock(&pag->pag_ici_lock);
-	trace_xfs_inode_clear_eofblocks_tag(ip);
 
 	radix_tree_tag_clear(&pag->pag_ici_root,
-			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
-			     XFS_ICI_EOFBLOCKS_TAG);
-	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
+			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
+	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
 		/* clear the eofblocks tag from the perag radix tree */
 		spin_lock(&ip->i_mount->m_perag_lock);
 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
-				     XFS_ICI_EOFBLOCKS_TAG);
+				     tag);
 		spin_unlock(&ip->i_mount->m_perag_lock);
-		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
-					       -1, _RET_IP_);
+		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
 	}
 
 	spin_unlock(&pag->pag_ici_lock);
 	xfs_perag_put(pag);
 }
 
+void
+xfs_inode_clear_eofblocks_tag(
+	xfs_inode_t	*ip)
+{
+	trace_xfs_inode_clear_eofblocks_tag(ip);
+	return __xfs_inode_clear_eofblocks_tag(ip,
+			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
+}
+
+/*
+ * Automatic CoW Reservation Freeing
+ *
+ * These functions automatically garbage collect leftover CoW reservations
+ * that were made on behalf of a cowextsize hint when we start to run out
+ * of quota or when the reservations sit around for too long.  If the file
+ * has dirty pages or is undergoing writeback, its CoW reservations will
+ * be retained.
+ *
+ * The actual garbage collection piggybacks off the same code that runs
+ * the speculative EOF preallocation garbage collector.
+ */
+STATIC int
+xfs_inode_free_cowblocks(
+	struct xfs_inode	*ip,
+	int			flags,
+	void			*args)
+{
+	int ret;
+	struct xfs_eofblocks *eofb = args;
+	bool need_iolock = true;
+	int match;
+
+	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
+
+	if (!xfs_reflink_has_real_cow_blocks(ip)) {
+		trace_xfs_inode_free_cowblocks_invalid(ip);
+		xfs_inode_clear_cowblocks_tag(ip);
+		return 0;
+	}
+
+	/*
+	 * If the mapping is dirty or under writeback we cannot touch the
+	 * CoW fork.  Leave it alone if we're in the midst of a directio.
+	 */
+	if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
+	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
+	    atomic_read(&VFS_I(ip)->i_dio_count))
+		return 0;
+
+	if (eofb) {
+		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
+			match = xfs_inode_match_id_union(ip, eofb);
+		else
+			match = xfs_inode_match_id(ip, eofb);
+		if (!match)
+			return 0;
+
+		/* skip the inode if the file size is too small */
+		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
+		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
+			return 0;
+
+		/*
+		 * A scan owner implies we already hold the iolock. Skip it in
+		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
+		 * the possibility of EAGAIN being returned.
+		 */
+		if (eofb->eof_scan_owner == ip->i_ino)
+			need_iolock = false;
+	}
+
+	/* Free the CoW blocks */
+	if (need_iolock) {
+		xfs_ilock(ip, XFS_IOLOCK_EXCL);
+		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+	}
+
+	ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+
+	if (need_iolock) {
+		xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
+		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	}
+
+	return ret;
+}
+
+int
+xfs_icache_free_cowblocks(
+	struct xfs_mount	*mp,
+	struct xfs_eofblocks	*eofb)
+{
+	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
+			XFS_ICI_COWBLOCKS_TAG);
+}
+
+int
+xfs_inode_free_quota_cowblocks(
+	struct xfs_inode *ip)
+{
+	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
+}
+
+void
+xfs_inode_set_cowblocks_tag(
+	xfs_inode_t	*ip)
+{
+	trace_xfs_inode_set_eofblocks_tag(ip);
+	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
+			trace_xfs_perag_set_eofblocks,
+			XFS_ICI_COWBLOCKS_TAG);
+}
+
+void
+xfs_inode_clear_cowblocks_tag(
+	xfs_inode_t	*ip)
+{
+	trace_xfs_inode_clear_eofblocks_tag(ip);
+	return __xfs_inode_clear_eofblocks_tag(ip,
+			trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG);
+}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 05bac99..a1e02f4 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -40,6 +40,7 @@
 					   in xfs_inode_ag_iterator */
 #define XFS_ICI_RECLAIM_TAG	0	/* inode is to be reclaimed */
 #define XFS_ICI_EOFBLOCKS_TAG	1	/* inode has blocks beyond EOF */
+#define XFS_ICI_COWBLOCKS_TAG	2	/* inode can have cow blocks to gc */
 
 /*
  * Flags for xfs_iget()
@@ -70,6 +71,12 @@
 void xfs_eofblocks_worker(struct work_struct *);
 void xfs_queue_eofblocks(struct xfs_mount *);
 
+void xfs_inode_set_cowblocks_tag(struct xfs_inode *ip);
+void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
+int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
+int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
+void xfs_cowblocks_worker(struct work_struct *);
+
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
 	int flags, void *args);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e08eaea..4e560e6 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -49,6 +49,7 @@
 #include "xfs_trans_priv.h"
 #include "xfs_log.h"
 #include "xfs_bmap_btree.h"
+#include "xfs_reflink.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -77,6 +78,29 @@
 }
 
 /*
+ * Helper function to extract CoW extent size hint from inode.
+ * Between the extent size hint and the CoW extent size hint, we
+ * return the greater of the two.  If the value is zero (automatic),
+ * use the default size.
+ */
+xfs_extlen_t
+xfs_get_cowextsz_hint(
+	struct xfs_inode	*ip)
+{
+	xfs_extlen_t		a, b;
+
+	a = 0;
+	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
+		a = ip->i_d.di_cowextsize;
+	b = xfs_get_extsz_hint(ip);
+
+	a = max(a, b);
+	if (a == 0)
+		return XFS_DEFAULT_COWEXTSZ_HINT;
+	return a;
+}
+
+/*
  * These two are wrapper routines around the xfs_ilock() routine used to
  * centralize some grungy code.  They are used in places that wish to lock the
  * inode solely for reading the extents.  The reason these places can't just
@@ -651,6 +675,8 @@
 	if (di_flags2 & XFS_DIFLAG2_ANY) {
 		if (di_flags2 & XFS_DIFLAG2_DAX)
 			flags |= FS_XFLAG_DAX;
+		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
+			flags |= FS_XFLAG_COWEXTSIZE;
 	}
 
 	if (has_attr)
@@ -821,7 +847,7 @@
 	ip->i_d.di_nextents = 0;
 	ASSERT(ip->i_d.di_nblocks == 0);
 
-	tv = current_fs_time(mp->m_super);
+	tv = current_time(inode);
 	inode->i_mtime = tv;
 	inode->i_atime = tv;
 	inode->i_ctime = tv;
@@ -834,6 +860,7 @@
 	if (ip->i_d.di_version == 3) {
 		inode->i_version = 1;
 		ip->i_d.di_flags2 = 0;
+		ip->i_d.di_cowextsize = 0;
 		ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
 		ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
 	}
@@ -896,6 +923,15 @@
 			ip->i_d.di_flags |= di_flags;
 			ip->i_d.di_flags2 |= di_flags2;
 		}
+		if (pip &&
+		    (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
+		    pip->i_d.di_version == 3 &&
+		    ip->i_d.di_version == 3) {
+			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
+				ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
+			}
+		}
 		/* FALLTHROUGH */
 	case S_IFLNK:
 		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
@@ -1586,6 +1622,20 @@
 			goto out;
 	}
 
+	/* Remove all pending CoW reservations. */
+	error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
+			last_block);
+	if (error)
+		goto out;
+
+	/*
+	 * Clear the reflink flag if we truncated everything.
+	 */
+	if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
+		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+		xfs_inode_clear_cowblocks_tag(ip);
+	}
+
 	/*
 	 * Always re-log the inode so that our permanent transaction can keep
 	 * on rolling it forward in the log.
@@ -1710,7 +1760,7 @@
 	/*
 	 * Log the inode size first to prevent stale data exposure in the event
 	 * of a system crash before the truncate completes. See the related
-	 * comment in xfs_setattr_size() for details.
+	 * comment in xfs_vn_setattr_size() for details.
 	 */
 	ip->i_d.di_size = 0;
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -1850,6 +1900,7 @@
 	}
 
 	mp = ip->i_mount;
+	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
 
 	/* If this is a read-only mount, don't do this (would generate I/O) */
 	if (mp->m_flags & XFS_MOUNT_RDONLY)
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 8f30d25..f14c1de 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -47,6 +47,7 @@
 
 	/* Extent information. */
 	xfs_ifork_t		*i_afp;		/* attribute fork pointer */
+	xfs_ifork_t		*i_cowfp;	/* copy on write extents */
 	xfs_ifork_t		i_df;		/* data fork */
 
 	/* operations vectors */
@@ -65,6 +66,9 @@
 
 	struct xfs_icdinode	i_d;		/* most of ondisk inode */
 
+	xfs_extnum_t		i_cnextents;	/* # of extents in cow fork */
+	unsigned int		i_cformat;	/* format of cow fork */
+
 	/* VFS inode */
 	struct inode		i_vnode;	/* embedded VFS inode */
 } xfs_inode_t;
@@ -202,6 +206,11 @@
 	return XFS_PROJID_DEFAULT;
 }
 
+static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
+{
+	return ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
+}
+
 /*
  * In-core inode flags.
  */
@@ -217,6 +226,12 @@
 #define XFS_IPINNED		(1 << __XFS_IPINNED_BIT)
 #define XFS_IDONTCACHE		(1 << 9) /* don't cache the inode long term */
 #define XFS_IEOFBLOCKS		(1 << 10)/* has the preallocblocks tag set */
+/*
+ * If this unlinked inode is in the middle of recovery, don't let drop_inode
+ * truncate and free the inode.  This can happen if we iget the inode during
+ * log recovery to replay a bmap operation on the inode.
+ */
+#define XFS_IRECOVERY		(1 << 11)
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
@@ -411,6 +426,7 @@
 void		xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
 xfs_extlen_t	xfs_get_extsz_hint(struct xfs_inode *ip);
+xfs_extlen_t	xfs_get_cowextsz_hint(struct xfs_inode *ip);
 
 int		xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
 			       xfs_nlink_t, xfs_dev_t, prid_t, int,
@@ -474,4 +490,7 @@
 
 extern struct kmem_zone	*xfs_inode_zone;
 
+/* The default CoW extent size hint. */
+#define XFS_DEFAULT_COWEXTSZ_HINT 32
+
 #endif	/* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 892c2ac..9610e9c 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -368,7 +368,7 @@
 		to->di_crtime.t_sec = from->di_crtime.t_sec;
 		to->di_crtime.t_nsec = from->di_crtime.t_nsec;
 		to->di_flags2 = from->di_flags2;
-
+		to->di_cowextsize = from->di_cowextsize;
 		to->di_ino = ip->i_ino;
 		to->di_lsn = lsn;
 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 96a70fd..c245bed 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -720,7 +720,7 @@
 		iattr.ia_valid = ATTR_SIZE;
 		iattr.ia_size = bf->l_start;
 
-		error = xfs_setattr_size(ip, &iattr);
+		error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
 		break;
 	default:
 		ASSERT(0);
@@ -903,6 +903,8 @@
 	xfs_ilock(ip, XFS_ILOCK_SHARED);
 	fa.fsx_xflags = xfs_ip2xflags(ip);
 	fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
+	fa.fsx_cowextsize = ip->i_d.di_cowextsize <<
+			ip->i_mount->m_sb.sb_blocklog;
 	fa.fsx_projid = xfs_get_projid(ip);
 
 	if (attr) {
@@ -973,12 +975,13 @@
 	if (ip->i_d.di_version < 3)
 		return;
 
-	di_flags2 = 0;
+	di_flags2 = (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
 	if (xflags & FS_XFLAG_DAX)
 		di_flags2 |= XFS_DIFLAG2_DAX;
+	if (xflags & FS_XFLAG_COWEXTSIZE)
+		di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 
 	ip->i_d.di_flags2 = di_flags2;
-
 }
 
 STATIC void
@@ -1031,6 +1034,14 @@
 			return -EINVAL;
 	}
 
+	/* Clear reflink if we are actually able to set the rt flag. */
+	if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
+		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+
+	/* Don't allow us to set DAX mode for a reflinked file for now. */
+	if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
+		return -EINVAL;
+
 	/*
 	 * Can't modify an immutable/append-only file unless
 	 * we have appropriate permission.
@@ -1219,6 +1230,56 @@
 	return 0;
 }
 
+/*
+ * CoW extent size hint validation rules are:
+ *
+ * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
+ *    The inode does not have to have any shared blocks, but it must be a v3.
+ * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
+ *    for a directory, the hint is propagated to new files.
+ * 3. Can be changed on files & directories at any time.
+ * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
+ * 5. Extent size must be a multiple of the appropriate block size.
+ * 6. The extent size hint must be limited to half the AG size to avoid
+ *    alignment extending the extent beyond the limits of the AG.
+ */
+static int
+xfs_ioctl_setattr_check_cowextsize(
+	struct xfs_inode	*ip,
+	struct fsxattr		*fa)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+
+	if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
+		return 0;
+
+	if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
+	    ip->i_d.di_version != 3)
+		return -EINVAL;
+
+	if (!S_ISREG(VFS_I(ip)->i_mode) && !S_ISDIR(VFS_I(ip)->i_mode))
+		return -EINVAL;
+
+	if (fa->fsx_cowextsize != 0) {
+		xfs_extlen_t    size;
+		xfs_fsblock_t   cowextsize_fsb;
+
+		cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
+		if (cowextsize_fsb > MAXEXTLEN)
+			return -EINVAL;
+
+		size = mp->m_sb.sb_blocksize;
+		if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
+			return -EINVAL;
+
+		if (fa->fsx_cowextsize % size)
+			return -EINVAL;
+	} else
+		fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
+
+	return 0;
+}
+
 static int
 xfs_ioctl_setattr_check_projid(
 	struct xfs_inode	*ip,
@@ -1311,6 +1372,10 @@
 	if (code)
 		goto error_trans_cancel;
 
+	code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
+	if (code)
+		goto error_trans_cancel;
+
 	code = xfs_ioctl_setattr_xflags(tp, ip, fa);
 	if (code)
 		goto error_trans_cancel;
@@ -1346,6 +1411,12 @@
 		ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
 	else
 		ip->i_d.di_extsize = 0;
+	if (ip->i_d.di_version == 3 &&
+	    (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
+		ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
+				mp->m_sb.sb_blocklog;
+	else
+		ip->i_d.di_cowextsize = 0;
 
 	code = xfs_trans_commit(tp);
 
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index c08253e..d907eb9 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -39,6 +39,7 @@
 #include "xfs_quota.h"
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
+#include "xfs_reflink.h"
 
 
 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
@@ -70,7 +71,7 @@
 	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
 }
 
-static xfs_extlen_t
+xfs_extlen_t
 xfs_eof_alignment(
 	struct xfs_inode	*ip,
 	xfs_extlen_t		extsize)
@@ -609,7 +610,7 @@
 	}
 
 retry:
-	error = xfs_bmapi_reserve_delalloc(ip, offset_fsb,
+	error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
 			end_fsb - offset_fsb, &got,
 			&prev, &idx, eof);
 	switch (error) {
@@ -666,6 +667,7 @@
 int
 xfs_iomap_write_allocate(
 	xfs_inode_t	*ip,
+	int		whichfork,
 	xfs_off_t	offset,
 	xfs_bmbt_irec_t *imap)
 {
@@ -678,8 +680,12 @@
 	xfs_trans_t	*tp;
 	int		nimaps;
 	int		error = 0;
+	int		flags = 0;
 	int		nres;
 
+	if (whichfork == XFS_COW_FORK)
+		flags |= XFS_BMAPI_COWFORK;
+
 	/*
 	 * Make sure that the dquots are there.
 	 */
@@ -773,7 +779,7 @@
 			 * pointer that the caller gave to us.
 			 */
 			error = xfs_bmapi_write(tp, ip, map_start_fsb,
-						count_fsb, 0, &first_block,
+						count_fsb, flags, &first_block,
 						nres, imap, &nimaps,
 						&dfops);
 			if (error)
@@ -955,14 +961,22 @@
 	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_bmbt_irec	imap;
 	xfs_fileoff_t		offset_fsb, end_fsb;
+	bool			shared, trimmed;
 	int			nimaps = 1, error = 0;
 	unsigned		lockmode;
 
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	if ((flags & IOMAP_WRITE) &&
-	    !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
+	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+		error = xfs_reflink_reserve_cow_range(ip, offset, length);
+		if (error < 0)
+			return error;
+	}
+
+	if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
+		   !xfs_get_extsz_hint(ip)) {
+		/* Reserve delalloc blocks for regular writeback. */
 		return xfs_file_iomap_begin_delay(inode, offset, length, flags,
 				iomap);
 	}
@@ -976,7 +990,14 @@
 	end_fsb = XFS_B_TO_FSB(mp, offset + length);
 
 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
-			       &nimaps, XFS_BMAPI_ENTIRE);
+			       &nimaps, 0);
+	if (error) {
+		xfs_iunlock(ip, lockmode);
+		return error;
+	}
+
+	/* Trim the mapping to the nearest shared extent boundary. */
+	error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
 	if (error) {
 		xfs_iunlock(ip, lockmode);
 		return error;
@@ -1015,6 +1036,8 @@
 	}
 
 	xfs_bmbt_to_iomap(ip, iomap, &imap);
+	if (shared)
+		iomap->flags |= IOMAP_F_SHARED;
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 6498be4..6d45cf0 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -25,12 +25,13 @@
 
 int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
 			struct xfs_bmbt_irec *, int);
-int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
+int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
 			struct xfs_bmbt_irec *);
 int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
 
 void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
 		struct xfs_bmbt_irec *);
+xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
 
 extern struct iomap_ops xfs_iomap_ops;
 extern struct iomap_ops xfs_xattr_iomap_ops;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index b24c310..405a65c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -542,6 +542,28 @@
 		inode->i_mtime = iattr->ia_mtime;
 }
 
+static int
+xfs_vn_change_ok(
+	struct dentry	*dentry,
+	struct iattr	*iattr)
+{
+	struct xfs_mount	*mp = XFS_I(d_inode(dentry))->i_mount;
+
+	if (mp->m_flags & XFS_MOUNT_RDONLY)
+		return -EROFS;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -EIO;
+
+	return setattr_prepare(dentry, iattr);
+}
+
+/*
+ * Set non-size attributes of an inode.
+ *
+ * Caution: The caller of this function is responsible for calling
+ * setattr_prepare() or otherwise verifying the change is fine.
+ */
 int
 xfs_setattr_nonsize(
 	struct xfs_inode	*ip,
@@ -558,21 +580,6 @@
 	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;
 	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;
 
-	trace_xfs_setattr(ip);
-
-	/* If acls are being inherited, we already have this checked */
-	if (!(flags & XFS_ATTR_NOACL)) {
-		if (mp->m_flags & XFS_MOUNT_RDONLY)
-			return -EROFS;
-
-		if (XFS_FORCED_SHUTDOWN(mp))
-			return -EIO;
-
-		error = inode_change_ok(inode, iattr);
-		if (error)
-			return error;
-	}
-
 	ASSERT((mask & ATTR_SIZE) == 0);
 
 	/*
@@ -743,8 +750,27 @@
 	return error;
 }
 
+int
+xfs_vn_setattr_nonsize(
+	struct dentry		*dentry,
+	struct iattr		*iattr)
+{
+	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
+	int error;
+
+	trace_xfs_setattr(ip);
+
+	error = xfs_vn_change_ok(dentry, iattr);
+	if (error)
+		return error;
+	return xfs_setattr_nonsize(ip, iattr, 0);
+}
+
 /*
  * Truncate file.  Must have write permission and not be a directory.
+ *
+ * Caution: The caller of this function is responsible for calling
+ * setattr_prepare() or otherwise verifying the change is fine.
  */
 int
 xfs_setattr_size(
@@ -759,18 +785,6 @@
 	uint			lock_flags = 0;
 	bool			did_zeroing = false;
 
-	trace_xfs_setattr(ip);
-
-	if (mp->m_flags & XFS_MOUNT_RDONLY)
-		return -EROFS;
-
-	if (XFS_FORCED_SHUTDOWN(mp))
-		return -EIO;
-
-	error = inode_change_ok(inode, iattr);
-	if (error)
-		return error;
-
 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
 	ASSERT(S_ISREG(inode->i_mode));
@@ -882,7 +896,7 @@
 	if (newsize != oldsize &&
 	    !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
 		iattr->ia_ctime = iattr->ia_mtime =
-			current_fs_time(inode->i_sb);
+			current_time(inode);
 		iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
 	}
 
@@ -942,16 +956,32 @@
 	goto out_unlock;
 }
 
+int
+xfs_vn_setattr_size(
+	struct dentry		*dentry,
+	struct iattr		*iattr)
+{
+	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
+	int error;
+
+	trace_xfs_setattr(ip);
+
+	error = xfs_vn_change_ok(dentry, iattr);
+	if (error)
+		return error;
+	return xfs_setattr_size(ip, iattr);
+}
+
 STATIC int
 xfs_vn_setattr(
 	struct dentry		*dentry,
 	struct iattr		*iattr)
 {
-	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 	int			error;
 
 	if (iattr->ia_valid & ATTR_SIZE) {
-		uint		iolock = XFS_IOLOCK_EXCL;
+		struct xfs_inode	*ip = XFS_I(d_inode(dentry));
+		uint			iolock = XFS_IOLOCK_EXCL;
 
 		xfs_ilock(ip, iolock);
 		error = xfs_break_layouts(d_inode(dentry), &iolock, true);
@@ -959,11 +989,11 @@
 			xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 			iolock |= XFS_MMAPLOCK_EXCL;
 
-			error = xfs_setattr_size(ip, iattr);
+			error = xfs_vn_setattr_size(dentry, iattr);
 		}
 		xfs_iunlock(ip, iolock);
 	} else {
-		error = xfs_setattr_nonsize(ip, iattr, 0);
+		error = xfs_vn_setattr_nonsize(dentry, iattr);
 	}
 
 	return error;
@@ -1036,9 +1066,6 @@
 	.set_acl		= xfs_set_acl,
 	.getattr		= xfs_vn_getattr,
 	.setattr		= xfs_vn_setattr,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
-	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 	.fiemap			= xfs_vn_fiemap,
 	.update_time		= xfs_vn_update_time,
@@ -1059,14 +1086,11 @@
 	 */
 	.rmdir			= xfs_vn_unlink,
 	.mknod			= xfs_vn_mknod,
-	.rename2		= xfs_vn_rename,
+	.rename			= xfs_vn_rename,
 	.get_acl		= xfs_get_acl,
 	.set_acl		= xfs_set_acl,
 	.getattr		= xfs_vn_getattr,
 	.setattr		= xfs_vn_setattr,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
-	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 	.update_time		= xfs_vn_update_time,
 	.tmpfile		= xfs_vn_tmpfile,
@@ -1087,14 +1111,11 @@
 	 */
 	.rmdir			= xfs_vn_unlink,
 	.mknod			= xfs_vn_mknod,
-	.rename2		= xfs_vn_rename,
+	.rename			= xfs_vn_rename,
 	.get_acl		= xfs_get_acl,
 	.set_acl		= xfs_set_acl,
 	.getattr		= xfs_vn_getattr,
 	.setattr		= xfs_vn_setattr,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
-	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 	.update_time		= xfs_vn_update_time,
 	.tmpfile		= xfs_vn_tmpfile,
@@ -1105,9 +1126,6 @@
 	.get_link		= xfs_vn_get_link,
 	.getattr		= xfs_vn_getattr,
 	.setattr		= xfs_vn_setattr,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
-	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 	.update_time		= xfs_vn_update_time,
 };
@@ -1117,9 +1135,6 @@
 	.get_link		= xfs_vn_get_link_inline,
 	.getattr		= xfs_vn_getattr,
 	.setattr		= xfs_vn_setattr,
-	.setxattr		= generic_setxattr,
-	.getxattr		= generic_getxattr,
-	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
 	.update_time		= xfs_vn_update_time,
 };
@@ -1144,6 +1159,7 @@
 		inode->i_flags |= S_NOATIME;
 	if (S_ISREG(inode->i_mode) &&
 	    ip->i_mount->m_sb.sb_blocksize == PAGE_SIZE &&
+	    !xfs_is_reflink_inode(ip) &&
 	    (ip->i_mount->m_flags & XFS_MOUNT_DAX ||
 	     ip->i_d.di_flags2 & XFS_DIFLAG2_DAX))
 		inode->i_flags |= S_DAX;
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index a0f84ab..0259a38 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -33,6 +33,7 @@
 extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
 			       int flags);
-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
+extern int xfs_vn_setattr_nonsize(struct dentry *dentry, struct iattr *vap);
+extern int xfs_vn_setattr_size(struct dentry *dentry, struct iattr *vap);
 
 #endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index ce73eb3..66e8817 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -66,7 +66,7 @@
 	if (!buffer || xfs_internal_inum(mp, ino))
 		return -EINVAL;
 
-	buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
+	buf = kmem_zalloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
 	if (!buf)
 		return -ENOMEM;
 
@@ -111,6 +111,12 @@
 	buf->bs_aextents = dic->di_anextents;
 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
 
+	if (dic->di_version == 3) {
+		if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
+			buf->bs_cowextsize = dic->di_cowextsize <<
+					mp->m_sb.sb_blocklog;
+	}
+
 	switch (dic->di_format) {
 	case XFS_DINODE_FMT_DEV:
 		buf->bs_rdev = ip->i_df.if_u2.if_rdev;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index b8d64d5..68640fb 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -116,6 +116,7 @@
 #define xfs_inherit_nodefrag	xfs_params.inherit_nodfrg.val
 #define xfs_fstrm_centisecs	xfs_params.fstrm_timer.val
 #define xfs_eofb_secs		xfs_params.eofb_timer.val
+#define xfs_cowb_secs		xfs_params.cowb_timer.val
 
 #define current_cpu()		(raw_smp_processor_id())
 #define current_pid()		(current->pid)
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 846483d..9b3d7c7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -45,6 +45,8 @@
 #include "xfs_dir2.h"
 #include "xfs_rmap_item.h"
 #include "xfs_buf_item.h"
+#include "xfs_refcount_item.h"
+#include "xfs_bmap_item.h"
 
 #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
 
@@ -1924,6 +1926,10 @@
 		case XFS_LI_EFI:
 		case XFS_LI_RUI:
 		case XFS_LI_RUD:
+		case XFS_LI_CUI:
+		case XFS_LI_CUD:
+		case XFS_LI_BUI:
+		case XFS_LI_BUD:
 			trace_xfs_log_recover_item_reorder_tail(log,
 							trans, item, pass);
 			list_move_tail(&item->ri_list, &inode_list);
@@ -2242,6 +2248,7 @@
 	case XFS_ABTB_MAGIC:
 	case XFS_ABTC_MAGIC:
 	case XFS_RMAP_CRC_MAGIC:
+	case XFS_REFC_CRC_MAGIC:
 	case XFS_IBT_CRC_MAGIC:
 	case XFS_IBT_MAGIC: {
 		struct xfs_btree_block *btb = blk;
@@ -2415,6 +2422,9 @@
 		case XFS_RMAP_CRC_MAGIC:
 			bp->b_ops = &xfs_rmapbt_buf_ops;
 			break;
+		case XFS_REFC_CRC_MAGIC:
+			bp->b_ops = &xfs_refcountbt_buf_ops;
+			break;
 		default:
 			warnmsg = "Bad btree block magic!";
 			break;
@@ -3547,6 +3557,242 @@
 }
 
 /*
+ * Copy an CUI format buffer from the given buf, and into the destination
+ * CUI format structure.  The CUI/CUD items were designed not to need any
+ * special alignment handling.
+ */
+static int
+xfs_cui_copy_format(
+	struct xfs_log_iovec		*buf,
+	struct xfs_cui_log_format	*dst_cui_fmt)
+{
+	struct xfs_cui_log_format	*src_cui_fmt;
+	uint				len;
+
+	src_cui_fmt = buf->i_addr;
+	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
+
+	if (buf->i_len == len) {
+		memcpy(dst_cui_fmt, src_cui_fmt, len);
+		return 0;
+	}
+	return -EFSCORRUPTED;
+}
+
+/*
+ * This routine is called to create an in-core extent refcount update
+ * item from the cui format structure which was logged on disk.
+ * It allocates an in-core cui, copies the extents from the format
+ * structure into it, and adds the cui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_cui_pass2(
+	struct xlog			*log,
+	struct xlog_recover_item	*item,
+	xfs_lsn_t			lsn)
+{
+	int				error;
+	struct xfs_mount		*mp = log->l_mp;
+	struct xfs_cui_log_item		*cuip;
+	struct xfs_cui_log_format	*cui_formatp;
+
+	cui_formatp = item->ri_buf[0].i_addr;
+
+	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
+	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
+	if (error) {
+		xfs_cui_item_free(cuip);
+		return error;
+	}
+	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
+
+	spin_lock(&log->l_ailp->xa_lock);
+	/*
+	 * The CUI has two references. One for the CUD and one for CUI to ensure
+	 * it makes it into the AIL. Insert the CUI into the AIL directly and
+	 * drop the CUI reference. Note that xfs_trans_ail_update() drops the
+	 * AIL lock.
+	 */
+	xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
+	xfs_cui_release(cuip);
+	return 0;
+}
+
+
+/*
+ * This routine is called when an CUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding CUI if it
+ * was still in the log. To do this it searches the AIL for the CUI with an id
+ * equal to that in the CUD format structure. If we find it we drop the CUD
+ * reference, which removes the CUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_cud_pass2(
+	struct xlog			*log,
+	struct xlog_recover_item	*item)
+{
+	struct xfs_cud_log_format	*cud_formatp;
+	struct xfs_cui_log_item		*cuip = NULL;
+	struct xfs_log_item		*lip;
+	__uint64_t			cui_id;
+	struct xfs_ail_cursor		cur;
+	struct xfs_ail			*ailp = log->l_ailp;
+
+	cud_formatp = item->ri_buf[0].i_addr;
+	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
+		return -EFSCORRUPTED;
+	cui_id = cud_formatp->cud_cui_id;
+
+	/*
+	 * Search for the CUI with the id in the CUD format structure in the
+	 * AIL.
+	 */
+	spin_lock(&ailp->xa_lock);
+	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+	while (lip != NULL) {
+		if (lip->li_type == XFS_LI_CUI) {
+			cuip = (struct xfs_cui_log_item *)lip;
+			if (cuip->cui_format.cui_id == cui_id) {
+				/*
+				 * Drop the CUD reference to the CUI. This
+				 * removes the CUI from the AIL and frees it.
+				 */
+				spin_unlock(&ailp->xa_lock);
+				xfs_cui_release(cuip);
+				spin_lock(&ailp->xa_lock);
+				break;
+			}
+		}
+		lip = xfs_trans_ail_cursor_next(ailp, &cur);
+	}
+
+	xfs_trans_ail_cursor_done(&cur);
+	spin_unlock(&ailp->xa_lock);
+
+	return 0;
+}
+
+/*
+ * Copy an BUI format buffer from the given buf, and into the destination
+ * BUI format structure.  The BUI/BUD items were designed not to need any
+ * special alignment handling.
+ */
+static int
+xfs_bui_copy_format(
+	struct xfs_log_iovec		*buf,
+	struct xfs_bui_log_format	*dst_bui_fmt)
+{
+	struct xfs_bui_log_format	*src_bui_fmt;
+	uint				len;
+
+	src_bui_fmt = buf->i_addr;
+	len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
+
+	if (buf->i_len == len) {
+		memcpy(dst_bui_fmt, src_bui_fmt, len);
+		return 0;
+	}
+	return -EFSCORRUPTED;
+}
+
+/*
+ * This routine is called to create an in-core extent bmap update
+ * item from the bui format structure which was logged on disk.
+ * It allocates an in-core bui, copies the extents from the format
+ * structure into it, and adds the bui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_bui_pass2(
+	struct xlog			*log,
+	struct xlog_recover_item	*item,
+	xfs_lsn_t			lsn)
+{
+	int				error;
+	struct xfs_mount		*mp = log->l_mp;
+	struct xfs_bui_log_item		*buip;
+	struct xfs_bui_log_format	*bui_formatp;
+
+	bui_formatp = item->ri_buf[0].i_addr;
+
+	if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
+		return -EFSCORRUPTED;
+	buip = xfs_bui_init(mp);
+	error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
+	if (error) {
+		xfs_bui_item_free(buip);
+		return error;
+	}
+	atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
+
+	spin_lock(&log->l_ailp->xa_lock);
+	/*
+	 * The RUI has two references. One for the RUD and one for RUI to ensure
+	 * it makes it into the AIL. Insert the RUI into the AIL directly and
+	 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
+	 * AIL lock.
+	 */
+	xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
+	xfs_bui_release(buip);
+	return 0;
+}
+
+
+/*
+ * This routine is called when an BUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding BUI if it
+ * was still in the log. To do this it searches the AIL for the BUI with an id
+ * equal to that in the BUD format structure. If we find it we drop the BUD
+ * reference, which removes the BUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_bud_pass2(
+	struct xlog			*log,
+	struct xlog_recover_item	*item)
+{
+	struct xfs_bud_log_format	*bud_formatp;
+	struct xfs_bui_log_item		*buip = NULL;
+	struct xfs_log_item		*lip;
+	__uint64_t			bui_id;
+	struct xfs_ail_cursor		cur;
+	struct xfs_ail			*ailp = log->l_ailp;
+
+	bud_formatp = item->ri_buf[0].i_addr;
+	if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
+		return -EFSCORRUPTED;
+	bui_id = bud_formatp->bud_bui_id;
+
+	/*
+	 * Search for the BUI with the id in the BUD format structure in the
+	 * AIL.
+	 */
+	spin_lock(&ailp->xa_lock);
+	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+	while (lip != NULL) {
+		if (lip->li_type == XFS_LI_BUI) {
+			buip = (struct xfs_bui_log_item *)lip;
+			if (buip->bui_format.bui_id == bui_id) {
+				/*
+				 * Drop the BUD reference to the BUI. This
+				 * removes the BUI from the AIL and frees it.
+				 */
+				spin_unlock(&ailp->xa_lock);
+				xfs_bui_release(buip);
+				spin_lock(&ailp->xa_lock);
+				break;
+			}
+		}
+		lip = xfs_trans_ail_cursor_next(ailp, &cur);
+	}
+
+	xfs_trans_ail_cursor_done(&cur);
+	spin_unlock(&ailp->xa_lock);
+
+	return 0;
+}
+
+/*
  * This routine is called when an inode create format structure is found in a
  * committed transaction in the log.  It's purpose is to initialise the inodes
  * being allocated on disk. This requires us to get inode cluster buffers that
@@ -3773,6 +4019,10 @@
 	case XFS_LI_QUOTAOFF:
 	case XFS_LI_RUI:
 	case XFS_LI_RUD:
+	case XFS_LI_CUI:
+	case XFS_LI_CUD:
+	case XFS_LI_BUI:
+	case XFS_LI_BUD:
 	default:
 		break;
 	}
@@ -3798,6 +4048,10 @@
 	case XFS_LI_ICREATE:
 	case XFS_LI_RUI:
 	case XFS_LI_RUD:
+	case XFS_LI_CUI:
+	case XFS_LI_CUD:
+	case XFS_LI_BUI:
+	case XFS_LI_BUD:
 		/* nothing to do in pass 1 */
 		return 0;
 	default:
@@ -3832,6 +4086,14 @@
 		return xlog_recover_rui_pass2(log, item, trans->r_lsn);
 	case XFS_LI_RUD:
 		return xlog_recover_rud_pass2(log, item);
+	case XFS_LI_CUI:
+		return xlog_recover_cui_pass2(log, item, trans->r_lsn);
+	case XFS_LI_CUD:
+		return xlog_recover_cud_pass2(log, item);
+	case XFS_LI_BUI:
+		return xlog_recover_bui_pass2(log, item, trans->r_lsn);
+	case XFS_LI_BUD:
+		return xlog_recover_bud_pass2(log, item);
 	case XFS_LI_DQUOT:
 		return xlog_recover_dquot_pass2(log, buffer_list, item,
 						trans->r_lsn);
@@ -4419,12 +4681,94 @@
 	spin_lock(&ailp->xa_lock);
 }
 
+/* Recover the CUI if necessary. */
+STATIC int
+xlog_recover_process_cui(
+	struct xfs_mount		*mp,
+	struct xfs_ail			*ailp,
+	struct xfs_log_item		*lip)
+{
+	struct xfs_cui_log_item		*cuip;
+	int				error;
+
+	/*
+	 * Skip CUIs that we've already processed.
+	 */
+	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
+	if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
+		return 0;
+
+	spin_unlock(&ailp->xa_lock);
+	error = xfs_cui_recover(mp, cuip);
+	spin_lock(&ailp->xa_lock);
+
+	return error;
+}
+
+/* Release the CUI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_cui(
+	struct xfs_mount		*mp,
+	struct xfs_ail			*ailp,
+	struct xfs_log_item		*lip)
+{
+	struct xfs_cui_log_item		*cuip;
+
+	cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
+
+	spin_unlock(&ailp->xa_lock);
+	xfs_cui_release(cuip);
+	spin_lock(&ailp->xa_lock);
+}
+
+/* Recover the BUI if necessary. */
+STATIC int
+xlog_recover_process_bui(
+	struct xfs_mount		*mp,
+	struct xfs_ail			*ailp,
+	struct xfs_log_item		*lip)
+{
+	struct xfs_bui_log_item		*buip;
+	int				error;
+
+	/*
+	 * Skip BUIs that we've already processed.
+	 */
+	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
+	if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
+		return 0;
+
+	spin_unlock(&ailp->xa_lock);
+	error = xfs_bui_recover(mp, buip);
+	spin_lock(&ailp->xa_lock);
+
+	return error;
+}
+
+/* Release the BUI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_bui(
+	struct xfs_mount		*mp,
+	struct xfs_ail			*ailp,
+	struct xfs_log_item		*lip)
+{
+	struct xfs_bui_log_item		*buip;
+
+	buip = container_of(lip, struct xfs_bui_log_item, bui_item);
+
+	spin_unlock(&ailp->xa_lock);
+	xfs_bui_release(buip);
+	spin_lock(&ailp->xa_lock);
+}
+
 /* Is this log item a deferred action intent? */
 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
 {
 	switch (lip->li_type) {
 	case XFS_LI_EFI:
 	case XFS_LI_RUI:
+	case XFS_LI_CUI:
+	case XFS_LI_BUI:
 		return true;
 	default:
 		return false;
@@ -4488,6 +4832,12 @@
 		case XFS_LI_RUI:
 			error = xlog_recover_process_rui(log->l_mp, ailp, lip);
 			break;
+		case XFS_LI_CUI:
+			error = xlog_recover_process_cui(log->l_mp, ailp, lip);
+			break;
+		case XFS_LI_BUI:
+			error = xlog_recover_process_bui(log->l_mp, ailp, lip);
+			break;
 		}
 		if (error)
 			goto out;
@@ -4535,6 +4885,12 @@
 		case XFS_LI_RUI:
 			xlog_recover_cancel_rui(log->l_mp, ailp, lip);
 			break;
+		case XFS_LI_CUI:
+			xlog_recover_cancel_cui(log->l_mp, ailp, lip);
+			break;
+		case XFS_LI_BUI:
+			xlog_recover_cancel_bui(log->l_mp, ailp, lip);
+			break;
 		}
 
 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
@@ -4613,6 +4969,7 @@
 	if (error)
 		goto fail_iput;
 
+	xfs_iflags_clear(ip, XFS_IRECOVERY);
 	ASSERT(VFS_I(ip)->i_nlink == 0);
 	ASSERT(VFS_I(ip)->i_mode != 0);
 
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 56e85a6..fc78739 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -43,6 +43,8 @@
 #include "xfs_icache.h"
 #include "xfs_sysfs.h"
 #include "xfs_rmap_btree.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_reflink.h"
 
 
 static DEFINE_MUTEX(xfs_uuid_table_mutex);
@@ -684,6 +686,7 @@
 	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
 	xfs_ialloc_compute_maxlevels(mp);
 	xfs_rmapbt_compute_maxlevels(mp);
+	xfs_refcountbt_compute_maxlevels(mp);
 
 	xfs_set_maxicount(mp);
 
@@ -923,6 +926,15 @@
 	}
 
 	/*
+	 * During the second phase of log recovery, we need iget and
+	 * iput to behave like they do for an active filesystem.
+	 * xfs_fs_drop_inode needs to be able to prevent the deletion
+	 * of inodes before we're done replaying log items on those
+	 * inodes.
+	 */
+	mp->m_super->s_flags |= MS_ACTIVE;
+
+	/*
 	 * Finish recovering the file system.  This part needed to be delayed
 	 * until after the root and real-time bitmap inodes were consistently
 	 * read in.
@@ -974,10 +986,28 @@
 		if (error)
 			xfs_warn(mp,
 	"Unable to allocate reserve blocks. Continuing without reserve pool.");
+
+		/* Recover any CoW blocks that never got remapped. */
+		error = xfs_reflink_recover_cow(mp);
+		if (error) {
+			xfs_err(mp,
+	"Error %d recovering leftover CoW allocations.", error);
+			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			goto out_quota;
+		}
+
+		/* Reserve AG blocks for future btree expansion. */
+		error = xfs_fs_reserve_ag_blocks(mp);
+		if (error && error != -ENOSPC)
+			goto out_agresv;
 	}
 
 	return 0;
 
+ out_agresv:
+	xfs_fs_unreserve_ag_blocks(mp);
+ out_quota:
+	xfs_qm_unmount_quotas(mp);
  out_rtunmount:
 	xfs_rtunmount_inodes(mp);
  out_rele_rip:
@@ -1019,7 +1049,9 @@
 	int			error;
 
 	cancel_delayed_work_sync(&mp->m_eofblocks_work);
+	cancel_delayed_work_sync(&mp->m_cowblocks_work);
 
+	xfs_fs_unreserve_ag_blocks(mp);
 	xfs_qm_unmount_quotas(mp);
 	xfs_rtunmount_inodes(mp);
 	IRELE(mp->m_rootip);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 041d949..819b80b 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -124,10 +124,13 @@
 	uint			m_inobt_mnr[2];	/* min inobt btree records */
 	uint			m_rmap_mxr[2];	/* max rmap btree records */
 	uint			m_rmap_mnr[2];	/* min rmap btree records */
+	uint			m_refc_mxr[2];	/* max refc btree records */
+	uint			m_refc_mnr[2];	/* min refc btree records */
 	uint			m_ag_maxlevels;	/* XFS_AG_MAXLEVELS */
 	uint			m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
 	uint			m_in_maxlevels;	/* max inobt btree levels. */
 	uint			m_rmap_maxlevels; /* max rmap btree levels */
+	uint			m_refc_maxlevels; /* max refcount btree level */
 	xfs_extlen_t		m_ag_prealloc_blocks; /* reserved ag blocks */
 	uint			m_alloc_set_aside; /* space we can't use */
 	uint			m_ag_max_usable; /* max space per AG */
@@ -161,6 +164,8 @@
 	struct delayed_work	m_reclaim_work;	/* background inode reclaim */
 	struct delayed_work	m_eofblocks_work; /* background eof blocks
 						     trimming */
+	struct delayed_work	m_cowblocks_work; /* background cow blocks
+						     trimming */
 	bool			m_update_sb;	/* sb needs update in mount */
 	int64_t			m_low_space[XFS_LOWSP_MAX];
 						/* low free space thresholds */
@@ -399,6 +404,9 @@
 	struct xfs_ag_resv	pag_meta_resv;
 	/* Blocks reserved for just AGFL-based metadata. */
 	struct xfs_ag_resv	pag_agfl_resv;
+
+	/* reference count */
+	__uint8_t		pagf_refcount_level;
 } xfs_perag_t;
 
 static inline struct xfs_ag_resv *
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 69e2986..0c381d7 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -49,6 +49,8 @@
 	XFS_CHECK_STRUCT_SIZE(struct xfs_dsymlink_hdr,		56);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_key,		4);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_rec,		16);
+	XFS_CHECK_STRUCT_SIZE(struct xfs_refcount_key,		4);
+	XFS_CHECK_STRUCT_SIZE(struct xfs_refcount_rec,		12);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_key,		20);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_rec,		24);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_timestamp,		8);
@@ -56,6 +58,7 @@
 	XFS_CHECK_STRUCT_SIZE(xfs_alloc_ptr_t,			4);
 	XFS_CHECK_STRUCT_SIZE(xfs_alloc_rec_t,			8);
 	XFS_CHECK_STRUCT_SIZE(xfs_inobt_ptr_t,			4);
+	XFS_CHECK_STRUCT_SIZE(xfs_refcount_ptr_t,		4);
 	XFS_CHECK_STRUCT_SIZE(xfs_rmap_ptr_t,			4);
 
 	/* dir/attr trees */
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0f14b2e..93a7aaf 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -114,6 +114,13 @@
 		return -ENXIO;
 
 	/*
+	 * The pNFS block layout spec actually supports reflink like
+	 * functionality, but the Linux pNFS server doesn't implement it yet.
+	 */
+	if (xfs_is_reflink_inode(ip))
+		return -ENXIO;
+
+	/*
 	 * Lock out any other I/O before we flush and invalidate the pagecache,
 	 * and then hand out a layout to the remote system.  This is very
 	 * similar to direct I/O, except that the synchronization is much more
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
new file mode 100644
index 0000000..fe86a66
--- /dev/null
+++ b/fs/xfs/xfs_refcount_item.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_refcount_item.h"
+#include "xfs_log.h"
+#include "xfs_refcount.h"
+
+
+kmem_zone_t	*xfs_cui_zone;
+kmem_zone_t	*xfs_cud_zone;
+
+static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
+{
+	return container_of(lip, struct xfs_cui_log_item, cui_item);
+}
+
+void
+xfs_cui_item_free(
+	struct xfs_cui_log_item	*cuip)
+{
+	if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
+		kmem_free(cuip);
+	else
+		kmem_zone_free(xfs_cui_zone, cuip);
+}
+
+STATIC void
+xfs_cui_item_size(
+	struct xfs_log_item	*lip,
+	int			*nvecs,
+	int			*nbytes)
+{
+	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
+
+	*nvecs += 1;
+	*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given cui log item. We use only 1 iovec, and we point that
+ * at the cui_log_format structure embedded in the cui item.
+ * It is at this point that we assert that all of the extent
+ * slots in the cui item have been filled.
+ */
+STATIC void
+xfs_cui_item_format(
+	struct xfs_log_item	*lip,
+	struct xfs_log_vec	*lv)
+{
+	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
+	struct xfs_log_iovec	*vecp = NULL;
+
+	ASSERT(atomic_read(&cuip->cui_next_extent) ==
+			cuip->cui_format.cui_nextents);
+
+	cuip->cui_format.cui_type = XFS_LI_CUI;
+	cuip->cui_format.cui_size = 1;
+
+	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
+			xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
+}
+
+/*
+ * Pinning has no meaning for an cui item, so just return.
+ */
+STATIC void
+xfs_cui_item_pin(
+	struct xfs_log_item	*lip)
+{
+}
+
+/*
+ * The unpin operation is the last place an CUI is manipulated in the log. It is
+ * either inserted in the AIL or aborted in the event of a log I/O error. In
+ * either case, the CUI transaction has been successfully committed to make it
+ * this far. Therefore, we expect whoever committed the CUI to either construct
+ * and commit the CUD or drop the CUD's reference in the event of error. Simply
+ * drop the log's CUI reference now that the log is done with it.
+ */
+STATIC void
+xfs_cui_item_unpin(
+	struct xfs_log_item	*lip,
+	int			remove)
+{
+	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
+
+	xfs_cui_release(cuip);
+}
+
+/*
+ * CUI items have no locking or pushing.  However, since CUIs are pulled from
+ * the AIL when their corresponding CUDs are committed to disk, their situation
+ * is very similar to being pinned.  Return XFS_ITEM_PINNED so that the caller
+ * will eventually flush the log.  This should help in getting the CUI out of
+ * the AIL.
+ */
+STATIC uint
+xfs_cui_item_push(
+	struct xfs_log_item	*lip,
+	struct list_head	*buffer_list)
+{
+	return XFS_ITEM_PINNED;
+}
+
+/*
+ * The CUI has been either committed or aborted if the transaction has been
+ * cancelled. If the transaction was cancelled, an CUD isn't going to be
+ * constructed and thus we free the CUI here directly.
+ */
+STATIC void
+xfs_cui_item_unlock(
+	struct xfs_log_item	*lip)
+{
+	if (lip->li_flags & XFS_LI_ABORTED)
+		xfs_cui_item_free(CUI_ITEM(lip));
+}
+
+/*
+ * The CUI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.
+ */
+STATIC xfs_lsn_t
+xfs_cui_item_committed(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+	return lsn;
+}
+
+/*
+ * The CUI dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_cui_item_committing(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all cui log items.
+ */
+static const struct xfs_item_ops xfs_cui_item_ops = {
+	.iop_size	= xfs_cui_item_size,
+	.iop_format	= xfs_cui_item_format,
+	.iop_pin	= xfs_cui_item_pin,
+	.iop_unpin	= xfs_cui_item_unpin,
+	.iop_unlock	= xfs_cui_item_unlock,
+	.iop_committed	= xfs_cui_item_committed,
+	.iop_push	= xfs_cui_item_push,
+	.iop_committing = xfs_cui_item_committing,
+};
+
+/*
+ * Allocate and initialize an cui item with the given number of extents.
+ */
+struct xfs_cui_log_item *
+xfs_cui_init(
+	struct xfs_mount		*mp,
+	uint				nextents)
+
+{
+	struct xfs_cui_log_item		*cuip;
+
+	ASSERT(nextents > 0);
+	if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
+		cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
+				KM_SLEEP);
+	else
+		cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP);
+
+	xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
+	cuip->cui_format.cui_nextents = nextents;
+	cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
+	atomic_set(&cuip->cui_next_extent, 0);
+	atomic_set(&cuip->cui_refcount, 2);
+
+	return cuip;
+}
+
+/*
+ * Freeing the CUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the CUI may not yet have been placed in the AIL
+ * when called by xfs_cui_release() from CUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the CUI.
+ */
+void
+xfs_cui_release(
+	struct xfs_cui_log_item	*cuip)
+{
+	if (atomic_dec_and_test(&cuip->cui_refcount)) {
+		xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
+		xfs_cui_item_free(cuip);
+	}
+}
+
+static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
+{
+	return container_of(lip, struct xfs_cud_log_item, cud_item);
+}
+
+STATIC void
+xfs_cud_item_size(
+	struct xfs_log_item	*lip,
+	int			*nvecs,
+	int			*nbytes)
+{
+	*nvecs += 1;
+	*nbytes += sizeof(struct xfs_cud_log_format);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given cud log item. We use only 1 iovec, and we point that
+ * at the cud_log_format structure embedded in the cud item.
+ * It is at this point that we assert that all of the extent
+ * slots in the cud item have been filled.
+ */
+STATIC void
+xfs_cud_item_format(
+	struct xfs_log_item	*lip,
+	struct xfs_log_vec	*lv)
+{
+	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
+	struct xfs_log_iovec	*vecp = NULL;
+
+	cudp->cud_format.cud_type = XFS_LI_CUD;
+	cudp->cud_format.cud_size = 1;
+
+	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
+			sizeof(struct xfs_cud_log_format));
+}
+
+/*
+ * Pinning has no meaning for an cud item, so just return.
+ */
+STATIC void
+xfs_cud_item_pin(
+	struct xfs_log_item	*lip)
+{
+}
+
+/*
+ * Since pinning has no meaning for an cud item, unpinning does
+ * not either.
+ */
+STATIC void
+xfs_cud_item_unpin(
+	struct xfs_log_item	*lip,
+	int			remove)
+{
+}
+
+/*
+ * There isn't much you can do to push on an cud item.  It is simply stuck
+ * waiting for the log to be flushed to disk.
+ */
+STATIC uint
+xfs_cud_item_push(
+	struct xfs_log_item	*lip,
+	struct list_head	*buffer_list)
+{
+	return XFS_ITEM_PINNED;
+}
+
+/*
+ * The CUD is either committed or aborted if the transaction is cancelled. If
+ * the transaction is cancelled, drop our reference to the CUI and free the
+ * CUD.
+ */
+STATIC void
+xfs_cud_item_unlock(
+	struct xfs_log_item	*lip)
+{
+	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
+
+	if (lip->li_flags & XFS_LI_ABORTED) {
+		xfs_cui_release(cudp->cud_cuip);
+		kmem_zone_free(xfs_cud_zone, cudp);
+	}
+}
+
+/*
+ * When the cud item is committed to disk, all we need to do is delete our
+ * reference to our partner cui item and then free ourselves. Since we're
+ * freeing ourselves we must return -1 to keep the transaction code from
+ * further referencing this item.
+ */
+STATIC xfs_lsn_t
+xfs_cud_item_committed(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
+
+	/*
+	 * Drop the CUI reference regardless of whether the CUD has been
+	 * aborted. Once the CUD transaction is constructed, it is the sole
+	 * responsibility of the CUD to release the CUI (even if the CUI is
+	 * aborted due to log I/O error).
+	 */
+	xfs_cui_release(cudp->cud_cuip);
+	kmem_zone_free(xfs_cud_zone, cudp);
+
+	return (xfs_lsn_t)-1;
+}
+
+/*
+ * The CUD dependency tracking op doesn't do squat.  It can't because
+ * it doesn't know where the free extent is coming from.  The dependency
+ * tracking has to be handled by the "enclosing" metadata object.  For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_cud_item_committing(
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all cud log items.
+ */
+static const struct xfs_item_ops xfs_cud_item_ops = {
+	.iop_size	= xfs_cud_item_size,
+	.iop_format	= xfs_cud_item_format,
+	.iop_pin	= xfs_cud_item_pin,
+	.iop_unpin	= xfs_cud_item_unpin,
+	.iop_unlock	= xfs_cud_item_unlock,
+	.iop_committed	= xfs_cud_item_committed,
+	.iop_push	= xfs_cud_item_push,
+	.iop_committing = xfs_cud_item_committing,
+};
+
+/*
+ * Allocate and initialize an cud item with the given number of extents.
+ */
+struct xfs_cud_log_item *
+xfs_cud_init(
+	struct xfs_mount		*mp,
+	struct xfs_cui_log_item		*cuip)
+
+{
+	struct xfs_cud_log_item	*cudp;
+
+	cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP);
+	xfs_log_item_init(mp, &cudp->cud_item, XFS_LI_CUD, &xfs_cud_item_ops);
+	cudp->cud_cuip = cuip;
+	cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
+
+	return cudp;
+}
+
+/*
+ * Process a refcount update intent item that was recovered from the log.
+ * We need to update the refcountbt.
+ */
+int
+xfs_cui_recover(
+	struct xfs_mount		*mp,
+	struct xfs_cui_log_item		*cuip)
+{
+	int				i;
+	int				error = 0;
+	unsigned int			refc_type;
+	struct xfs_phys_extent		*refc;
+	xfs_fsblock_t			startblock_fsb;
+	bool				op_ok;
+	struct xfs_cud_log_item		*cudp;
+	struct xfs_trans		*tp;
+	struct xfs_btree_cur		*rcur = NULL;
+	enum xfs_refcount_intent_type	type;
+	xfs_fsblock_t			firstfsb;
+	xfs_fsblock_t			new_fsb;
+	xfs_extlen_t			new_len;
+	struct xfs_bmbt_irec		irec;
+	struct xfs_defer_ops		dfops;
+	bool				requeue_only = false;
+
+	ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
+
+	/*
+	 * First check the validity of the extents described by the
+	 * CUI.  If any are bad, then assume that all are bad and
+	 * just toss the CUI.
+	 */
+	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
+		refc = &cuip->cui_format.cui_extents[i];
+		startblock_fsb = XFS_BB_TO_FSB(mp,
+				   XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
+		switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
+		case XFS_REFCOUNT_INCREASE:
+		case XFS_REFCOUNT_DECREASE:
+		case XFS_REFCOUNT_ALLOC_COW:
+		case XFS_REFCOUNT_FREE_COW:
+			op_ok = true;
+			break;
+		default:
+			op_ok = false;
+			break;
+		}
+		if (!op_ok || startblock_fsb == 0 ||
+		    refc->pe_len == 0 ||
+		    startblock_fsb >= mp->m_sb.sb_dblocks ||
+		    refc->pe_len >= mp->m_sb.sb_agblocks ||
+		    (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)) {
+			/*
+			 * This will pull the CUI from the AIL and
+			 * free the memory associated with it.
+			 */
+			set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
+			xfs_cui_release(cuip);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * Under normal operation, refcount updates are deferred, so we
+	 * wouldn't be adding them directly to a transaction.  All
+	 * refcount updates manage reservation usage internally and
+	 * dynamically by deferring work that won't fit in the
+	 * transaction.  Normally, any work that needs to be deferred
+	 * gets attached to the same defer_ops that scheduled the
+	 * refcount update.  However, we're in log recovery here, so we
+	 * we create our own defer_ops and use that to finish up any
+	 * work that doesn't fit.
+	 */
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+	if (error)
+		return error;
+	cudp = xfs_trans_get_cud(tp, cuip);
+
+	xfs_defer_init(&dfops, &firstfsb);
+	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
+		refc = &cuip->cui_format.cui_extents[i];
+		refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
+		switch (refc_type) {
+		case XFS_REFCOUNT_INCREASE:
+		case XFS_REFCOUNT_DECREASE:
+		case XFS_REFCOUNT_ALLOC_COW:
+		case XFS_REFCOUNT_FREE_COW:
+			type = refc_type;
+			break;
+		default:
+			error = -EFSCORRUPTED;
+			goto abort_error;
+		}
+		if (requeue_only) {
+			new_fsb = refc->pe_startblock;
+			new_len = refc->pe_len;
+		} else
+			error = xfs_trans_log_finish_refcount_update(tp, cudp,
+				&dfops, type, refc->pe_startblock, refc->pe_len,
+				&new_fsb, &new_len, &rcur);
+		if (error)
+			goto abort_error;
+
+		/* Requeue what we didn't finish. */
+		if (new_len > 0) {
+			irec.br_startblock = new_fsb;
+			irec.br_blockcount = new_len;
+			switch (type) {
+			case XFS_REFCOUNT_INCREASE:
+				error = xfs_refcount_increase_extent(
+						tp->t_mountp, &dfops, &irec);
+				break;
+			case XFS_REFCOUNT_DECREASE:
+				error = xfs_refcount_decrease_extent(
+						tp->t_mountp, &dfops, &irec);
+				break;
+			case XFS_REFCOUNT_ALLOC_COW:
+				error = xfs_refcount_alloc_cow_extent(
+						tp->t_mountp, &dfops,
+						irec.br_startblock,
+						irec.br_blockcount);
+				break;
+			case XFS_REFCOUNT_FREE_COW:
+				error = xfs_refcount_free_cow_extent(
+						tp->t_mountp, &dfops,
+						irec.br_startblock,
+						irec.br_blockcount);
+				break;
+			default:
+				ASSERT(0);
+			}
+			if (error)
+				goto abort_error;
+			requeue_only = true;
+		}
+	}
+
+	xfs_refcount_finish_one_cleanup(tp, rcur, error);
+	error = xfs_defer_finish(&tp, &dfops, NULL);
+	if (error)
+		goto abort_error;
+	set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
+	error = xfs_trans_commit(tp);
+	return error;
+
+abort_error:
+	xfs_refcount_finish_one_cleanup(tp, rcur, error);
+	xfs_defer_cancel(&dfops);
+	xfs_trans_cancel(tp);
+	return error;
+}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
new file mode 100644
index 0000000..5b74ddd
--- /dev/null
+++ b/fs/xfs/xfs_refcount_item.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef	__XFS_REFCOUNT_ITEM_H__
+#define	__XFS_REFCOUNT_ITEM_H__
+
+/*
+ * There are (currently) two pairs of refcount btree redo item types:
+ * increase and decrease.  The log items for these are CUI (refcount
+ * update intent) and CUD (refcount update done).  The redo item type
+ * is encoded in the flags field of each xfs_map_extent.
+ *
+ * *I items should be recorded in the *first* of a series of rolled
+ * transactions, and the *D items should be recorded in the same
+ * transaction that records the associated refcountbt updates.
+ *
+ * Should the system crash after the commit of the first transaction
+ * but before the commit of the final transaction in a series, log
+ * recovery will use the redo information recorded by the intent items
+ * to replay the refcountbt metadata updates.
+ */
+
+/* kernel only CUI/CUD definitions */
+
+struct xfs_mount;
+struct kmem_zone;
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define	XFS_CUI_MAX_FAST_EXTENTS	16
+
+/*
+ * Define CUI flag bits. Manipulated by set/clear/test_bit operators.
+ */
+#define	XFS_CUI_RECOVERED		1
+
+/*
+ * This is the "refcount update intent" log item.  It is used to log
+ * the fact that some reverse mappings need to change.  It is used in
+ * conjunction with the "refcount update done" log item described
+ * below.
+ *
+ * These log items follow the same rules as struct xfs_efi_log_item;
+ * see the comments about that structure (in xfs_extfree_item.h) for
+ * more details.
+ */
+struct xfs_cui_log_item {
+	struct xfs_log_item		cui_item;
+	atomic_t			cui_refcount;
+	atomic_t			cui_next_extent;
+	unsigned long			cui_flags;	/* misc flags */
+	struct xfs_cui_log_format	cui_format;
+};
+
+static inline size_t
+xfs_cui_log_item_sizeof(
+	unsigned int		nr)
+{
+	return offsetof(struct xfs_cui_log_item, cui_format) +
+			xfs_cui_log_format_sizeof(nr);
+}
+
+/*
+ * This is the "refcount update done" log item.  It is used to log the
+ * fact that some refcountbt updates mentioned in an earlier cui item
+ * have been performed.
+ */
+struct xfs_cud_log_item {
+	struct xfs_log_item		cud_item;
+	struct xfs_cui_log_item		*cud_cuip;
+	struct xfs_cud_log_format	cud_format;
+};
+
+extern struct kmem_zone	*xfs_cui_zone;
+extern struct kmem_zone	*xfs_cud_zone;
+
+struct xfs_cui_log_item *xfs_cui_init(struct xfs_mount *, uint);
+struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
+		struct xfs_cui_log_item *);
+void xfs_cui_item_free(struct xfs_cui_log_item *);
+void xfs_cui_release(struct xfs_cui_log_item *);
+int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
+
+#endif	/* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
new file mode 100644
index 0000000..5965e94
--- /dev/null
+++ b/fs/xfs/xfs_reflink.c
@@ -0,0 +1,1688 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_error.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
+#include "xfs_ioctl.h"
+#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_icache.h"
+#include "xfs_pnfs.h"
+#include "xfs_btree.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
+#include "xfs_bit.h"
+#include "xfs_alloc.h"
+#include "xfs_quota_defs.h"
+#include "xfs_quota.h"
+#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_reflink.h"
+#include "xfs_iomap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_sb.h"
+#include "xfs_ag_resv.h"
+
+/*
+ * Copy on Write of Shared Blocks
+ *
+ * XFS must preserve "the usual" file semantics even when two files share
+ * the same physical blocks.  This means that a write to one file must not
+ * alter the blocks in a different file; the way that we'll do that is
+ * through the use of a copy-on-write mechanism.  At a high level, that
+ * means that when we want to write to a shared block, we allocate a new
+ * block, write the data to the new block, and if that succeeds we map the
+ * new block into the file.
+ *
+ * XFS provides a "delayed allocation" mechanism that defers the allocation
+ * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
+ * possible.  This reduces fragmentation by enabling the filesystem to ask
+ * for bigger chunks less often, which is exactly what we want for CoW.
+ *
+ * The delalloc mechanism begins when the kernel wants to make a block
+ * writable (write_begin or page_mkwrite).  If the offset is not mapped, we
+ * create a delalloc mapping, which is a regular in-core extent, but without
+ * a real startblock.  (For delalloc mappings, the startblock encodes both
+ * a flag that this is a delalloc mapping, and a worst-case estimate of how
+ * many blocks might be required to put the mapping into the BMBT.)  delalloc
+ * mappings are a reservation against the free space in the filesystem;
+ * adjacent mappings can also be combined into fewer larger mappings.
+ *
+ * When dirty pages are being written out (typically in writepage), the
+ * delalloc reservations are converted into real mappings by allocating
+ * blocks and replacing the delalloc mapping with real ones.  A delalloc
+ * mapping can be replaced by several real ones if the free space is
+ * fragmented.
+ *
+ * We want to adapt the delalloc mechanism for copy-on-write, since the
+ * write paths are similar.  The first two steps (creating the reservation
+ * and allocating the blocks) are exactly the same as delalloc except that
+ * the mappings must be stored in a separate CoW fork because we do not want
+ * to disturb the mapping in the data fork until we're sure that the write
+ * succeeded.  IO completion in this case is the process of removing the old
+ * mapping from the data fork and moving the new mapping from the CoW fork to
+ * the data fork.  This will be discussed shortly.
+ *
+ * For now, unaligned directio writes will be bounced back to the page cache.
+ * Block-aligned directio writes will use the same mechanism as buffered
+ * writes.
+ *
+ * CoW remapping must be done after the data block write completes,
+ * because we don't want to destroy the old data fork map until we're sure
+ * the new block has been written.  Since the new mappings are kept in a
+ * separate fork, we can simply iterate these mappings to find the ones
+ * that cover the file blocks that we just CoW'd.  For each extent, simply
+ * unmap the corresponding range in the data fork, map the new range into
+ * the data fork, and remove the extent from the CoW fork.
+ *
+ * Since the remapping operation can be applied to an arbitrary file
+ * range, we record the need for the remap step as a flag in the ioend
+ * instead of declaring a new IO type.  This is required for direct io
+ * because we only have ioend for the whole dio, and we have to be able to
+ * remember the presence of unwritten blocks and CoW blocks with a single
+ * ioend structure.  Better yet, the more ground we can cover with one
+ * ioend, the better.
+ */
+
+/*
+ * Given an AG extent, find the lowest-numbered run of shared blocks
+ * within that range and return the range in fbno/flen.  If
+ * find_end_of_shared is true, return the longest contiguous extent of
+ * shared blocks.  If there are no shared extents, fbno and flen will
+ * be set to NULLAGBLOCK and 0, respectively.
+ */
+int
+xfs_reflink_find_shared(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_agblock_t		agbno,
+	xfs_extlen_t		aglen,
+	xfs_agblock_t		*fbno,
+	xfs_extlen_t		*flen,
+	bool			find_end_of_shared)
+{
+	struct xfs_buf		*agbp;
+	struct xfs_btree_cur	*cur;
+	int			error;
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error)
+		return error;
+
+	cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+	error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
+			find_end_of_shared);
+
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+
+	xfs_buf_relse(agbp);
+	return error;
+}
+
+/*
+ * Trim the mapping to the next block where there's a change in the
+ * shared/unshared status.  More specifically, this means that we
+ * find the lowest-numbered extent of shared blocks that coincides with
+ * the given block mapping.  If the shared extent overlaps the start of
+ * the mapping, trim the mapping to the end of the shared extent.  If
+ * the shared region intersects the mapping, trim the mapping to the
+ * start of the shared extent.  If there are no shared regions that
+ * overlap, just return the original extent.
+ */
+int
+xfs_reflink_trim_around_shared(
+	struct xfs_inode	*ip,
+	struct xfs_bmbt_irec	*irec,
+	bool			*shared,
+	bool			*trimmed)
+{
+	xfs_agnumber_t		agno;
+	xfs_agblock_t		agbno;
+	xfs_extlen_t		aglen;
+	xfs_agblock_t		fbno;
+	xfs_extlen_t		flen;
+	int			error = 0;
+
+	/* Holes, unwritten, and delalloc extents cannot be shared */
+	if (!xfs_is_reflink_inode(ip) ||
+	    ISUNWRITTEN(irec) ||
+	    irec->br_startblock == HOLESTARTBLOCK ||
+	    irec->br_startblock == DELAYSTARTBLOCK) {
+		*shared = false;
+		return 0;
+	}
+
+	trace_xfs_reflink_trim_around_shared(ip, irec);
+
+	agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
+	agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
+	aglen = irec->br_blockcount;
+
+	error = xfs_reflink_find_shared(ip->i_mount, agno, agbno,
+			aglen, &fbno, &flen, true);
+	if (error)
+		return error;
+
+	*shared = *trimmed = false;
+	if (fbno == NULLAGBLOCK) {
+		/* No shared blocks at all. */
+		return 0;
+	} else if (fbno == agbno) {
+		/*
+		 * The start of this extent is shared.  Truncate the
+		 * mapping at the end of the shared region so that a
+		 * subsequent iteration starts at the start of the
+		 * unshared region.
+		 */
+		irec->br_blockcount = flen;
+		*shared = true;
+		if (flen != aglen)
+			*trimmed = true;
+		return 0;
+	} else {
+		/*
+		 * There's a shared extent midway through this extent.
+		 * Truncate the mapping at the start of the shared
+		 * extent so that a subsequent iteration starts at the
+		 * start of the shared region.
+		 */
+		irec->br_blockcount = fbno - agbno;
+		*trimmed = true;
+		return 0;
+	}
+}
+
+/* Create a CoW reservation for a range of blocks within a file. */
+static int
+__xfs_reflink_reserve_cow(
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		*offset_fsb,
+	xfs_fileoff_t		end_fsb,
+	bool			*skipped)
+{
+	struct xfs_bmbt_irec	got, prev, imap;
+	xfs_fileoff_t		orig_end_fsb;
+	int			nimaps, eof = 0, error = 0;
+	bool			shared = false, trimmed = false;
+	xfs_extnum_t		idx;
+	xfs_extlen_t		align;
+
+	/* Already reserved?  Skip the refcount btree access. */
+	xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx,
+			&got, &prev);
+	if (!eof && got.br_startoff <= *offset_fsb) {
+		end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount;
+		trace_xfs_reflink_cow_found(ip, &got);
+		goto done;
+	}
+
+	/* Read extent from the source file. */
+	nimaps = 1;
+	error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
+			&imap, &nimaps, 0);
+	if (error)
+		goto out_unlock;
+	ASSERT(nimaps == 1);
+
+	/* Trim the mapping to the nearest shared extent boundary. */
+	error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
+	if (error)
+		goto out_unlock;
+
+	end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
+
+	/* Not shared?  Just report the (potentially capped) extent. */
+	if (!shared) {
+		*skipped = true;
+		goto done;
+	}
+
+	/*
+	 * Fork all the shared blocks from our write offset until the end of
+	 * the extent.
+	 */
+	error = xfs_qm_dqattach_locked(ip, 0);
+	if (error)
+		goto out_unlock;
+
+	align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
+	if (align)
+		end_fsb = roundup_64(end_fsb, align);
+
+retry:
+	error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb,
+			end_fsb - *offset_fsb, &got,
+			&prev, &idx, eof);
+	switch (error) {
+	case 0:
+		break;
+	case -ENOSPC:
+	case -EDQUOT:
+		/* retry without any preallocation */
+		trace_xfs_reflink_cow_enospc(ip, &imap);
+		if (end_fsb != orig_end_fsb) {
+			end_fsb = orig_end_fsb;
+			goto retry;
+		}
+		/*FALLTHRU*/
+	default:
+		goto out_unlock;
+	}
+
+	if (end_fsb != orig_end_fsb)
+		xfs_inode_set_cowblocks_tag(ip);
+
+	trace_xfs_reflink_cow_alloc(ip, &got);
+done:
+	*offset_fsb = end_fsb;
+out_unlock:
+	return error;
+}
+
+/* Create a CoW reservation for part of a file. */
+int
+xfs_reflink_reserve_cow_range(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		count)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	bool			skipped = false;
+	int			error;
+
+	trace_xfs_reflink_reserve_cow_range(ip, offset, count);
+
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	end_fsb = XFS_B_TO_FSB(mp, offset + count);
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	while (offset_fsb < end_fsb) {
+		error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb,
+				&skipped);
+		if (error) {
+			trace_xfs_reflink_reserve_cow_range_error(ip, error,
+				_RET_IP_);
+			break;
+		}
+	}
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+	return error;
+}
+
+/* Allocate all CoW reservations covering a range of blocks in a file. */
+static int
+__xfs_reflink_allocate_cow(
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		*offset_fsb,
+	xfs_fileoff_t		end_fsb)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_bmbt_irec	imap;
+	struct xfs_defer_ops	dfops;
+	struct xfs_trans	*tp;
+	xfs_fsblock_t		first_block;
+	xfs_fileoff_t		next_fsb;
+	int			nimaps = 1, error;
+	bool			skipped = false;
+
+	xfs_defer_init(&dfops, &first_block);
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
+			XFS_TRANS_RESERVE, &tp);
+	if (error)
+		return error;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	next_fsb = *offset_fsb;
+	error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped);
+	if (error)
+		goto out_trans_cancel;
+
+	if (skipped) {
+		*offset_fsb = next_fsb;
+		goto out_trans_cancel;
+	}
+
+	xfs_trans_ijoin(tp, ip, 0);
+	error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb,
+			XFS_BMAPI_COWFORK, &first_block,
+			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
+			&imap, &nimaps, &dfops);
+	if (error)
+		goto out_trans_cancel;
+
+	/* We might not have been able to map the whole delalloc extent */
+	*offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb);
+
+	error = xfs_defer_finish(&tp, &dfops, NULL);
+	if (error)
+		goto out_trans_cancel;
+
+	error = xfs_trans_commit(tp);
+
+out_unlock:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
+out_trans_cancel:
+	xfs_defer_cancel(&dfops);
+	xfs_trans_cancel(tp);
+	goto out_unlock;
+}
+
+/* Allocate all CoW reservations covering a part of a file. */
+int
+xfs_reflink_allocate_cow_range(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		count)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
+	int			error;
+
+	ASSERT(xfs_is_reflink_inode(ip));
+
+	trace_xfs_reflink_allocate_cow_range(ip, offset, count);
+
+	/*
+	 * Make sure that the dquots are there.
+	 */
+	error = xfs_qm_dqattach(ip, 0);
+	if (error)
+		return error;
+
+	while (offset_fsb < end_fsb) {
+		error = __xfs_reflink_allocate_cow(ip, &offset_fsb, end_fsb);
+		if (error) {
+			trace_xfs_reflink_allocate_cow_range_error(ip, error,
+					_RET_IP_);
+			break;
+		}
+	}
+
+	return error;
+}
+
+/*
+ * Find the CoW reservation (and whether or not it needs block allocation)
+ * for a given byte offset of a file.
+ */
+bool
+xfs_reflink_find_cow_mapping(
+	struct xfs_inode		*ip,
+	xfs_off_t			offset,
+	struct xfs_bmbt_irec		*imap,
+	bool				*need_alloc)
+{
+	struct xfs_bmbt_irec		irec;
+	struct xfs_ifork		*ifp;
+	struct xfs_bmbt_rec_host	*gotp;
+	xfs_fileoff_t			bno;
+	xfs_extnum_t			idx;
+
+	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
+	ASSERT(xfs_is_reflink_inode(ip));
+
+	/* Find the extent in the CoW fork. */
+	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	bno = XFS_B_TO_FSBT(ip->i_mount, offset);
+	gotp = xfs_iext_bno_to_ext(ifp, bno, &idx);
+	if (!gotp)
+		return false;
+
+	xfs_bmbt_get_all(gotp, &irec);
+	if (bno >= irec.br_startoff + irec.br_blockcount ||
+	    bno < irec.br_startoff)
+		return false;
+
+	trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
+			&irec);
+
+	/* If it's still delalloc, we must allocate later. */
+	*imap = irec;
+	*need_alloc = !!(isnullstartblock(irec.br_startblock));
+
+	return true;
+}
+
+/*
+ * Trim an extent to end at the next CoW reservation past offset_fsb.
+ */
+int
+xfs_reflink_trim_irec_to_next_cow(
+	struct xfs_inode		*ip,
+	xfs_fileoff_t			offset_fsb,
+	struct xfs_bmbt_irec		*imap)
+{
+	struct xfs_bmbt_irec		irec;
+	struct xfs_ifork		*ifp;
+	struct xfs_bmbt_rec_host	*gotp;
+	xfs_extnum_t			idx;
+
+	if (!xfs_is_reflink_inode(ip))
+		return 0;
+
+	/* Find the extent in the CoW fork. */
+	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	gotp = xfs_iext_bno_to_ext(ifp, offset_fsb, &idx);
+	if (!gotp)
+		return 0;
+	xfs_bmbt_get_all(gotp, &irec);
+
+	/* This is the extent before; try sliding up one. */
+	if (irec.br_startoff < offset_fsb) {
+		idx++;
+		if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+			return 0;
+		gotp = xfs_iext_get_ext(ifp, idx);
+		xfs_bmbt_get_all(gotp, &irec);
+	}
+
+	if (irec.br_startoff >= imap->br_startoff + imap->br_blockcount)
+		return 0;
+
+	imap->br_blockcount = irec.br_startoff - imap->br_startoff;
+	trace_xfs_reflink_trim_irec(ip, imap);
+
+	return 0;
+}
+
+/*
+ * Cancel all pending CoW reservations for some block range of an inode.
+ */
+int
+xfs_reflink_cancel_cow_blocks(
+	struct xfs_inode		*ip,
+	struct xfs_trans		**tpp,
+	xfs_fileoff_t			offset_fsb,
+	xfs_fileoff_t			end_fsb)
+{
+	struct xfs_bmbt_irec		irec;
+	xfs_filblks_t			count_fsb;
+	xfs_fsblock_t			firstfsb;
+	struct xfs_defer_ops		dfops;
+	int				error = 0;
+	int				nimaps;
+
+	if (!xfs_is_reflink_inode(ip))
+		return 0;
+
+	/* Go find the old extent in the CoW fork. */
+	while (offset_fsb < end_fsb) {
+		nimaps = 1;
+		count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
+		error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
+				&nimaps, XFS_BMAPI_COWFORK);
+		if (error)
+			break;
+		ASSERT(nimaps == 1);
+
+		trace_xfs_reflink_cancel_cow(ip, &irec);
+
+		if (irec.br_startblock == DELAYSTARTBLOCK) {
+			/* Free a delayed allocation. */
+			xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount,
+					false);
+			ip->i_delayed_blks -= irec.br_blockcount;
+
+			/* Remove the mapping from the CoW fork. */
+			error = xfs_bunmapi_cow(ip, &irec);
+			if (error)
+				break;
+		} else if (irec.br_startblock == HOLESTARTBLOCK) {
+			/* empty */
+		} else {
+			xfs_trans_ijoin(*tpp, ip, 0);
+			xfs_defer_init(&dfops, &firstfsb);
+
+			/* Free the CoW orphan record. */
+			error = xfs_refcount_free_cow_extent(ip->i_mount,
+					&dfops, irec.br_startblock,
+					irec.br_blockcount);
+			if (error)
+				break;
+
+			xfs_bmap_add_free(ip->i_mount, &dfops,
+					irec.br_startblock, irec.br_blockcount,
+					NULL);
+
+			/* Update quota accounting */
+			xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
+					-(long)irec.br_blockcount);
+
+			/* Roll the transaction */
+			error = xfs_defer_finish(tpp, &dfops, ip);
+			if (error) {
+				xfs_defer_cancel(&dfops);
+				break;
+			}
+
+			/* Remove the mapping from the CoW fork. */
+			error = xfs_bunmapi_cow(ip, &irec);
+			if (error)
+				break;
+		}
+
+		/* Roll on... */
+		offset_fsb = irec.br_startoff + irec.br_blockcount;
+	}
+
+	return error;
+}
+
+/*
+ * Cancel all pending CoW reservations for some byte range of an inode.
+ */
+int
+xfs_reflink_cancel_cow_range(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		count)
+{
+	struct xfs_trans	*tp;
+	xfs_fileoff_t		offset_fsb;
+	xfs_fileoff_t		end_fsb;
+	int			error;
+
+	trace_xfs_reflink_cancel_cow_range(ip, offset, count);
+	ASSERT(xfs_is_reflink_inode(ip));
+
+	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+	if (count == NULLFILEOFF)
+		end_fsb = NULLFILEOFF;
+	else
+		end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
+
+	/* Start a rolling transaction to remove the mappings */
+	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
+			0, 0, 0, &tp);
+	if (error)
+		goto out;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
+
+	/* Scrape out the old CoW reservations */
+	error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+	if (error)
+		goto out_cancel;
+
+	error = xfs_trans_commit(tp);
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
+
+out_cancel:
+	xfs_trans_cancel(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out:
+	trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Remap parts of a file's data fork after a successful CoW.
+ */
+int
+xfs_reflink_end_cow(
+	struct xfs_inode		*ip,
+	xfs_off_t			offset,
+	xfs_off_t			count)
+{
+	struct xfs_bmbt_irec		irec;
+	struct xfs_bmbt_irec		uirec;
+	struct xfs_trans		*tp;
+	xfs_fileoff_t			offset_fsb;
+	xfs_fileoff_t			end_fsb;
+	xfs_filblks_t			count_fsb;
+	xfs_fsblock_t			firstfsb;
+	struct xfs_defer_ops		dfops;
+	int				error;
+	unsigned int			resblks;
+	xfs_filblks_t			ilen;
+	xfs_filblks_t			rlen;
+	int				nimaps;
+
+	trace_xfs_reflink_end_cow(ip, offset, count);
+
+	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
+	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
+
+	/* Start a rolling transaction to switch the mappings */
+	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
+			resblks, 0, 0, &tp);
+	if (error)
+		goto out;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
+
+	/* Go find the old extent in the CoW fork. */
+	while (offset_fsb < end_fsb) {
+		/* Read extent from the source file */
+		nimaps = 1;
+		count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
+		error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
+				&nimaps, XFS_BMAPI_COWFORK);
+		if (error)
+			goto out_cancel;
+		ASSERT(nimaps == 1);
+
+		ASSERT(irec.br_startblock != DELAYSTARTBLOCK);
+		trace_xfs_reflink_cow_remap(ip, &irec);
+
+		/*
+		 * We can have a hole in the CoW fork if part of a directio
+		 * write is CoW but part of it isn't.
+		 */
+		rlen = ilen = irec.br_blockcount;
+		if (irec.br_startblock == HOLESTARTBLOCK)
+			goto next_extent;
+
+		/* Unmap the old blocks in the data fork. */
+		while (rlen) {
+			xfs_defer_init(&dfops, &firstfsb);
+			error = __xfs_bunmapi(tp, ip, irec.br_startoff,
+					&rlen, 0, 1, &firstfsb, &dfops);
+			if (error)
+				goto out_defer;
+
+			/*
+			 * Trim the extent to whatever got unmapped.
+			 * Remember, bunmapi works backwards.
+			 */
+			uirec.br_startblock = irec.br_startblock + rlen;
+			uirec.br_startoff = irec.br_startoff + rlen;
+			uirec.br_blockcount = irec.br_blockcount - rlen;
+			irec.br_blockcount = rlen;
+			trace_xfs_reflink_cow_remap_piece(ip, &uirec);
+
+			/* Free the CoW orphan record. */
+			error = xfs_refcount_free_cow_extent(tp->t_mountp,
+					&dfops, uirec.br_startblock,
+					uirec.br_blockcount);
+			if (error)
+				goto out_defer;
+
+			/* Map the new blocks into the data fork. */
+			error = xfs_bmap_map_extent(tp->t_mountp, &dfops,
+					ip, &uirec);
+			if (error)
+				goto out_defer;
+
+			/* Remove the mapping from the CoW fork. */
+			error = xfs_bunmapi_cow(ip, &uirec);
+			if (error)
+				goto out_defer;
+
+			error = xfs_defer_finish(&tp, &dfops, ip);
+			if (error)
+				goto out_defer;
+		}
+
+next_extent:
+		/* Roll on... */
+		offset_fsb = irec.br_startoff + ilen;
+	}
+
+	error = xfs_trans_commit(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		goto out;
+	return 0;
+
+out_defer:
+	xfs_defer_cancel(&dfops);
+out_cancel:
+	xfs_trans_cancel(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out:
+	trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Free leftover CoW reservations that didn't get cleaned out.
+ */
+int
+xfs_reflink_recover_cow(
+	struct xfs_mount	*mp)
+{
+	xfs_agnumber_t		agno;
+	int			error = 0;
+
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return 0;
+
+	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+		error = xfs_refcount_recover_cow_leftovers(mp, agno);
+		if (error)
+			break;
+	}
+
+	return error;
+}
+
+/*
+ * Reflinking (Block) Ranges of Two Files Together
+ *
+ * First, ensure that the reflink flag is set on both inodes.  The flag is an
+ * optimization to avoid unnecessary refcount btree lookups in the write path.
+ *
+ * Now we can iteratively remap the range of extents (and holes) in src to the
+ * corresponding ranges in dest.  Let drange and srange denote the ranges of
+ * logical blocks in dest and src touched by the reflink operation.
+ *
+ * While the length of drange is greater than zero,
+ *    - Read src's bmbt at the start of srange ("imap")
+ *    - If imap doesn't exist, make imap appear to start at the end of srange
+ *      with zero length.
+ *    - If imap starts before srange, advance imap to start at srange.
+ *    - If imap goes beyond srange, truncate imap to end at the end of srange.
+ *    - Punch (imap start - srange start + imap len) blocks from dest at
+ *      offset (drange start).
+ *    - If imap points to a real range of pblks,
+ *         > Increase the refcount of the imap's pblks
+ *         > Map imap's pblks into dest at the offset
+ *           (drange start + imap start - srange start)
+ *    - Advance drange and srange by (imap start - srange start + imap len)
+ *
+ * Finally, if the reflink made dest longer, update both the in-core and
+ * on-disk file sizes.
+ *
+ * ASCII Art Demonstration:
+ *
+ * Let's say we want to reflink this source file:
+ *
+ * ----SSSSSSS-SSSSS----SSSSSS (src file)
+ *   <-------------------->
+ *
+ * into this destination file:
+ *
+ * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
+ *        <-------------------->
+ * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
+ * Observe that the range has different logical offsets in either file.
+ *
+ * Consider that the first extent in the source file doesn't line up with our
+ * reflink range.  Unmapping  and remapping are separate operations, so we can
+ * unmap more blocks from the destination file than we remap.
+ *
+ * ----SSSSSSS-SSSSS----SSSSSS
+ *   <------->
+ * --DDDDD---------DDDDD--DDD
+ *        <------->
+ *
+ * Now remap the source extent into the destination file:
+ *
+ * ----SSSSSSS-SSSSS----SSSSSS
+ *   <------->
+ * --DDDDD--SSSSSSSDDDDD--DDD
+ *        <------->
+ *
+ * Do likewise with the second hole and extent in our range.  Holes in the
+ * unmap range don't affect our operation.
+ *
+ * ----SSSSSSS-SSSSS----SSSSSS
+ *            <---->
+ * --DDDDD--SSSSSSS-SSSSS-DDD
+ *                 <---->
+ *
+ * Finally, unmap and remap part of the third extent.  This will increase the
+ * size of the destination file.
+ *
+ * ----SSSSSSS-SSSSS----SSSSSS
+ *                  <----->
+ * --DDDDD--SSSSSSS-SSSSS----SSS
+ *                       <----->
+ *
+ * Once we update the destination file's i_size, we're done.
+ */
+
+/*
+ * Ensure the reflink bit is set in both inodes.
+ */
+STATIC int
+xfs_reflink_set_inode_flag(
+	struct xfs_inode	*src,
+	struct xfs_inode	*dest)
+{
+	struct xfs_mount	*mp = src->i_mount;
+	int			error;
+	struct xfs_trans	*tp;
+
+	if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
+		return 0;
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+	if (error)
+		goto out_error;
+
+	/* Lock both files against IO */
+	if (src->i_ino == dest->i_ino)
+		xfs_ilock(src, XFS_ILOCK_EXCL);
+	else
+		xfs_lock_two_inodes(src, dest, XFS_ILOCK_EXCL);
+
+	if (!xfs_is_reflink_inode(src)) {
+		trace_xfs_reflink_set_inode_flag(src);
+		xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
+		src->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
+		xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
+		xfs_ifork_init_cow(src);
+	} else
+		xfs_iunlock(src, XFS_ILOCK_EXCL);
+
+	if (src->i_ino == dest->i_ino)
+		goto commit_flags;
+
+	if (!xfs_is_reflink_inode(dest)) {
+		trace_xfs_reflink_set_inode_flag(dest);
+		xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
+		dest->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
+		xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
+		xfs_ifork_init_cow(dest);
+	} else
+		xfs_iunlock(dest, XFS_ILOCK_EXCL);
+
+commit_flags:
+	error = xfs_trans_commit(tp);
+	if (error)
+		goto out_error;
+	return error;
+
+out_error:
+	trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Update destination inode size & cowextsize hint, if necessary.
+ */
+STATIC int
+xfs_reflink_update_dest(
+	struct xfs_inode	*dest,
+	xfs_off_t		newlen,
+	xfs_extlen_t		cowextsize)
+{
+	struct xfs_mount	*mp = dest->i_mount;
+	struct xfs_trans	*tp;
+	int			error;
+
+	if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
+		return 0;
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+	if (error)
+		goto out_error;
+
+	xfs_ilock(dest, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
+
+	if (newlen > i_size_read(VFS_I(dest))) {
+		trace_xfs_reflink_update_inode_size(dest, newlen);
+		i_size_write(VFS_I(dest), newlen);
+		dest->i_d.di_size = newlen;
+	}
+
+	if (cowextsize) {
+		dest->i_d.di_cowextsize = cowextsize;
+		dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+	}
+
+	xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
+
+	error = xfs_trans_commit(tp);
+	if (error)
+		goto out_error;
+	return error;
+
+out_error:
+	trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Do we have enough reserve in this AG to handle a reflink?  The refcount
+ * btree already reserved all the space it needs, but the rmap btree can grow
+ * infinitely, so we won't allow more reflinks when the AG is down to the
+ * btree reserves.
+ */
+static int
+xfs_reflink_ag_has_free_space(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno)
+{
+	struct xfs_perag	*pag;
+	int			error = 0;
+
+	if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+		return 0;
+
+	pag = xfs_perag_get(mp, agno);
+	if (xfs_ag_resv_critical(pag, XFS_AG_RESV_AGFL) ||
+	    xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
+		error = -ENOSPC;
+	xfs_perag_put(pag);
+	return error;
+}
+
+/*
+ * Unmap a range of blocks from a file, then map other blocks into the hole.
+ * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount).
+ * The extent irec is mapped into dest at irec->br_startoff.
+ */
+STATIC int
+xfs_reflink_remap_extent(
+	struct xfs_inode	*ip,
+	struct xfs_bmbt_irec	*irec,
+	xfs_fileoff_t		destoff,
+	xfs_off_t		new_isize)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	xfs_fsblock_t		firstfsb;
+	unsigned int		resblks;
+	struct xfs_defer_ops	dfops;
+	struct xfs_bmbt_irec	uirec;
+	bool			real_extent;
+	xfs_filblks_t		rlen;
+	xfs_filblks_t		unmap_len;
+	xfs_off_t		newlen;
+	int			error;
+
+	unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
+	trace_xfs_reflink_punch_range(ip, destoff, unmap_len);
+
+	/* Only remap normal extents. */
+	real_extent =  (irec->br_startblock != HOLESTARTBLOCK &&
+			irec->br_startblock != DELAYSTARTBLOCK &&
+			!ISUNWRITTEN(irec));
+
+	/* No reflinking if we're low on space */
+	if (real_extent) {
+		error = xfs_reflink_ag_has_free_space(mp,
+				XFS_FSB_TO_AGNO(mp, irec->br_startblock));
+		if (error)
+			goto out;
+	}
+
+	/* Start a rolling transaction to switch the mappings */
+	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+	if (error)
+		goto out;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
+
+	/* If we're not just clearing space, then do we have enough quota? */
+	if (real_extent) {
+		error = xfs_trans_reserve_quota_nblks(tp, ip,
+				irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
+		if (error)
+			goto out_cancel;
+	}
+
+	trace_xfs_reflink_remap(ip, irec->br_startoff,
+				irec->br_blockcount, irec->br_startblock);
+
+	/* Unmap the old blocks in the data fork. */
+	rlen = unmap_len;
+	while (rlen) {
+		xfs_defer_init(&dfops, &firstfsb);
+		error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1,
+				&firstfsb, &dfops);
+		if (error)
+			goto out_defer;
+
+		/*
+		 * Trim the extent to whatever got unmapped.
+		 * Remember, bunmapi works backwards.
+		 */
+		uirec.br_startblock = irec->br_startblock + rlen;
+		uirec.br_startoff = irec->br_startoff + rlen;
+		uirec.br_blockcount = unmap_len - rlen;
+		unmap_len = rlen;
+
+		/* If this isn't a real mapping, we're done. */
+		if (!real_extent || uirec.br_blockcount == 0)
+			goto next_extent;
+
+		trace_xfs_reflink_remap(ip, uirec.br_startoff,
+				uirec.br_blockcount, uirec.br_startblock);
+
+		/* Update the refcount tree */
+		error = xfs_refcount_increase_extent(mp, &dfops, &uirec);
+		if (error)
+			goto out_defer;
+
+		/* Map the new blocks into the data fork. */
+		error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec);
+		if (error)
+			goto out_defer;
+
+		/* Update quota accounting. */
+		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
+				uirec.br_blockcount);
+
+		/* Update dest isize if needed. */
+		newlen = XFS_FSB_TO_B(mp,
+				uirec.br_startoff + uirec.br_blockcount);
+		newlen = min_t(xfs_off_t, newlen, new_isize);
+		if (newlen > i_size_read(VFS_I(ip))) {
+			trace_xfs_reflink_update_inode_size(ip, newlen);
+			i_size_write(VFS_I(ip), newlen);
+			ip->i_d.di_size = newlen;
+			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+		}
+
+next_extent:
+		/* Process all the deferred stuff. */
+		error = xfs_defer_finish(&tp, &dfops, ip);
+		if (error)
+			goto out_defer;
+	}
+
+	error = xfs_trans_commit(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		goto out;
+	return 0;
+
+out_defer:
+	xfs_defer_cancel(&dfops);
+out_cancel:
+	xfs_trans_cancel(tp);
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out:
+	trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Iteratively remap one file's extents (and holes) to another's.
+ */
+STATIC int
+xfs_reflink_remap_blocks(
+	struct xfs_inode	*src,
+	xfs_fileoff_t		srcoff,
+	struct xfs_inode	*dest,
+	xfs_fileoff_t		destoff,
+	xfs_filblks_t		len,
+	xfs_off_t		new_isize)
+{
+	struct xfs_bmbt_irec	imap;
+	int			nimaps;
+	int			error = 0;
+	xfs_filblks_t		range_len;
+
+	/* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
+	while (len) {
+		trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
+				dest, destoff);
+		/* Read extent from the source file */
+		nimaps = 1;
+		xfs_ilock(src, XFS_ILOCK_EXCL);
+		error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
+		xfs_iunlock(src, XFS_ILOCK_EXCL);
+		if (error)
+			goto err;
+		ASSERT(nimaps == 1);
+
+		trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
+				&imap);
+
+		/* Translate imap into the destination file. */
+		range_len = imap.br_startoff + imap.br_blockcount - srcoff;
+		imap.br_startoff += destoff - srcoff;
+
+		/* Clear dest from destoff to the end of imap and map it in. */
+		error = xfs_reflink_remap_extent(dest, &imap, destoff,
+				new_isize);
+		if (error)
+			goto err;
+
+		if (fatal_signal_pending(current)) {
+			error = -EINTR;
+			goto err;
+		}
+
+		/* Advance drange/srange */
+		srcoff += range_len;
+		destoff += range_len;
+		len -= range_len;
+	}
+
+	return 0;
+
+err:
+	trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Read a page's worth of file data into the page cache.  Return the page
+ * locked.
+ */
+static struct page *
+xfs_get_page(
+	struct inode	*inode,
+	xfs_off_t	offset)
+{
+	struct address_space	*mapping;
+	struct page		*page;
+	pgoff_t			n;
+
+	n = offset >> PAGE_SHIFT;
+	mapping = inode->i_mapping;
+	page = read_mapping_page(mapping, n, NULL);
+	if (IS_ERR(page))
+		return page;
+	if (!PageUptodate(page)) {
+		put_page(page);
+		return ERR_PTR(-EIO);
+	}
+	lock_page(page);
+	return page;
+}
+
+/*
+ * Compare extents of two files to see if they are the same.
+ */
+static int
+xfs_compare_extents(
+	struct inode	*src,
+	xfs_off_t	srcoff,
+	struct inode	*dest,
+	xfs_off_t	destoff,
+	xfs_off_t	len,
+	bool		*is_same)
+{
+	xfs_off_t	src_poff;
+	xfs_off_t	dest_poff;
+	void		*src_addr;
+	void		*dest_addr;
+	struct page	*src_page;
+	struct page	*dest_page;
+	xfs_off_t	cmp_len;
+	bool		same;
+	int		error;
+
+	error = -EINVAL;
+	same = true;
+	while (len) {
+		src_poff = srcoff & (PAGE_SIZE - 1);
+		dest_poff = destoff & (PAGE_SIZE - 1);
+		cmp_len = min(PAGE_SIZE - src_poff,
+			      PAGE_SIZE - dest_poff);
+		cmp_len = min(cmp_len, len);
+		ASSERT(cmp_len > 0);
+
+		trace_xfs_reflink_compare_extents(XFS_I(src), srcoff, cmp_len,
+				XFS_I(dest), destoff);
+
+		src_page = xfs_get_page(src, srcoff);
+		if (IS_ERR(src_page)) {
+			error = PTR_ERR(src_page);
+			goto out_error;
+		}
+		dest_page = xfs_get_page(dest, destoff);
+		if (IS_ERR(dest_page)) {
+			error = PTR_ERR(dest_page);
+			unlock_page(src_page);
+			put_page(src_page);
+			goto out_error;
+		}
+		src_addr = kmap_atomic(src_page);
+		dest_addr = kmap_atomic(dest_page);
+
+		flush_dcache_page(src_page);
+		flush_dcache_page(dest_page);
+
+		if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
+			same = false;
+
+		kunmap_atomic(dest_addr);
+		kunmap_atomic(src_addr);
+		unlock_page(dest_page);
+		unlock_page(src_page);
+		put_page(dest_page);
+		put_page(src_page);
+
+		if (!same)
+			break;
+
+		srcoff += cmp_len;
+		destoff += cmp_len;
+		len -= cmp_len;
+	}
+
+	*is_same = same;
+	return 0;
+
+out_error:
+	trace_xfs_reflink_compare_extents_error(XFS_I(dest), error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+	struct xfs_inode	*src,
+	xfs_off_t		srcoff,
+	struct xfs_inode	*dest,
+	xfs_off_t		destoff,
+	xfs_off_t		len,
+	unsigned int		flags)
+{
+	struct xfs_mount	*mp = src->i_mount;
+	xfs_fileoff_t		sfsbno, dfsbno;
+	xfs_filblks_t		fsblen;
+	int			error;
+	xfs_extlen_t		cowextsize;
+	bool			is_same;
+
+	if (!xfs_sb_version_hasreflink(&mp->m_sb))
+		return -EOPNOTSUPP;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -EIO;
+
+	/* Don't reflink realtime inodes */
+	if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
+		return -EINVAL;
+
+	if (flags & ~XFS_REFLINK_ALL)
+		return -EINVAL;
+
+	trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff);
+
+	/* Lock both files against IO */
+	if (src->i_ino == dest->i_ino) {
+		xfs_ilock(src, XFS_IOLOCK_EXCL);
+		xfs_ilock(src, XFS_MMAPLOCK_EXCL);
+	} else {
+		xfs_lock_two_inodes(src, dest, XFS_IOLOCK_EXCL);
+		xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
+	}
+
+	/*
+	 * Check that the extents are the same.
+	 */
+	if (flags & XFS_REFLINK_DEDUPE) {
+		is_same = false;
+		error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest),
+				destoff, len, &is_same);
+		if (error)
+			goto out_error;
+		if (!is_same) {
+			error = -EBADE;
+			goto out_error;
+		}
+	}
+
+	error = xfs_reflink_set_inode_flag(src, dest);
+	if (error)
+		goto out_error;
+
+	/*
+	 * Invalidate the page cache so that we can clear any CoW mappings
+	 * in the destination file.
+	 */
+	truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff,
+				   PAGE_ALIGN(destoff + len) - 1);
+
+	dfsbno = XFS_B_TO_FSBT(mp, destoff);
+	sfsbno = XFS_B_TO_FSBT(mp, srcoff);
+	fsblen = XFS_B_TO_FSB(mp, len);
+	error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
+			destoff + len);
+	if (error)
+		goto out_error;
+
+	/*
+	 * Carry the cowextsize hint from src to dest if we're sharing the
+	 * entire source file to the entire destination file, the source file
+	 * has a cowextsize hint, and the destination file does not.
+	 */
+	cowextsize = 0;
+	if (srcoff == 0 && len == i_size_read(VFS_I(src)) &&
+	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+	    destoff == 0 && len >= i_size_read(VFS_I(dest)) &&
+	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
+		cowextsize = src->i_d.di_cowextsize;
+
+	error = xfs_reflink_update_dest(dest, destoff + len, cowextsize);
+	if (error)
+		goto out_error;
+
+out_error:
+	xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
+	xfs_iunlock(src, XFS_IOLOCK_EXCL);
+	if (src->i_ino != dest->i_ino) {
+		xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+		xfs_iunlock(dest, XFS_IOLOCK_EXCL);
+	}
+	if (error)
+		trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * The user wants to preemptively CoW all shared blocks in this file,
+ * which enables us to turn off the reflink flag.  Iterate all
+ * extents which are not prealloc/delalloc to see which ranges are
+ * mentioned in the refcount tree, then read those blocks into the
+ * pagecache, dirty them, fsync them back out, and then we can update
+ * the inode flag.  What happens if we run out of memory? :)
+ */
+STATIC int
+xfs_reflink_dirty_extents(
+	struct xfs_inode	*ip,
+	xfs_fileoff_t		fbno,
+	xfs_filblks_t		end,
+	xfs_off_t		isize)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_agnumber_t		agno;
+	xfs_agblock_t		agbno;
+	xfs_extlen_t		aglen;
+	xfs_agblock_t		rbno;
+	xfs_extlen_t		rlen;
+	xfs_off_t		fpos;
+	xfs_off_t		flen;
+	struct xfs_bmbt_irec	map[2];
+	int			nmaps;
+	int			error = 0;
+
+	while (end - fbno > 0) {
+		nmaps = 1;
+		/*
+		 * Look for extents in the file.  Skip holes, delalloc, or
+		 * unwritten extents; they can't be reflinked.
+		 */
+		error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);
+		if (error)
+			goto out;
+		if (nmaps == 0)
+			break;
+		if (map[0].br_startblock == HOLESTARTBLOCK ||
+		    map[0].br_startblock == DELAYSTARTBLOCK ||
+		    ISUNWRITTEN(&map[0]))
+			goto next;
+
+		map[1] = map[0];
+		while (map[1].br_blockcount) {
+			agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);
+			agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
+			aglen = map[1].br_blockcount;
+
+			error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
+					&rbno, &rlen, true);
+			if (error)
+				goto out;
+			if (rbno == NULLAGBLOCK)
+				break;
+
+			/* Dirty the pages */
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +
+					(rbno - agbno));
+			flen = XFS_FSB_TO_B(mp, rlen);
+			if (fpos + flen > isize)
+				flen = isize - fpos;
+			error = iomap_file_dirty(VFS_I(ip), fpos, flen,
+					&xfs_iomap_ops);
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+			if (error)
+				goto out;
+
+			map[1].br_blockcount -= (rbno - agbno + rlen);
+			map[1].br_startoff += (rbno - agbno + rlen);
+			map[1].br_startblock += (rbno - agbno + rlen);
+		}
+
+next:
+		fbno = map[0].br_startoff + map[0].br_blockcount;
+	}
+out:
+	return error;
+}
+
+/* Clear the inode reflink flag if there are no shared extents. */
+int
+xfs_reflink_clear_inode_flag(
+	struct xfs_inode	*ip,
+	struct xfs_trans	**tpp)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		fbno;
+	xfs_filblks_t		end;
+	xfs_agnumber_t		agno;
+	xfs_agblock_t		agbno;
+	xfs_extlen_t		aglen;
+	xfs_agblock_t		rbno;
+	xfs_extlen_t		rlen;
+	struct xfs_bmbt_irec	map;
+	int			nmaps;
+	int			error = 0;
+
+	ASSERT(xfs_is_reflink_inode(ip));
+
+	fbno = 0;
+	end = XFS_B_TO_FSB(mp, i_size_read(VFS_I(ip)));
+	while (end - fbno > 0) {
+		nmaps = 1;
+		/*
+		 * Look for extents in the file.  Skip holes, delalloc, or
+		 * unwritten extents; they can't be reflinked.
+		 */
+		error = xfs_bmapi_read(ip, fbno, end - fbno, &map, &nmaps, 0);
+		if (error)
+			return error;
+		if (nmaps == 0)
+			break;
+		if (map.br_startblock == HOLESTARTBLOCK ||
+		    map.br_startblock == DELAYSTARTBLOCK ||
+		    ISUNWRITTEN(&map))
+			goto next;
+
+		agno = XFS_FSB_TO_AGNO(mp, map.br_startblock);
+		agbno = XFS_FSB_TO_AGBNO(mp, map.br_startblock);
+		aglen = map.br_blockcount;
+
+		error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
+				&rbno, &rlen, false);
+		if (error)
+			return error;
+		/* Is there still a shared block here? */
+		if (rbno != NULLAGBLOCK)
+			return 0;
+next:
+		fbno = map.br_startoff + map.br_blockcount;
+	}
+
+	/*
+	 * We didn't find any shared blocks so turn off the reflink flag.
+	 * First, get rid of any leftover CoW mappings.
+	 */
+	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+	if (error)
+		return error;
+
+	/* Clear the inode flag. */
+	trace_xfs_reflink_unset_inode_flag(ip);
+	ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+	xfs_inode_clear_cowblocks_tag(ip);
+	xfs_trans_ijoin(*tpp, ip, 0);
+	xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
+
+	return error;
+}
+
+/*
+ * Clear the inode reflink flag if there are no shared extents and the size
+ * hasn't changed.
+ */
+STATIC int
+xfs_reflink_try_clear_inode_flag(
+	struct xfs_inode	*ip)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	int			error = 0;
+
+	/* Start a rolling transaction to remove the mappings */
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
+	if (error)
+		return error;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
+
+	error = xfs_reflink_clear_inode_flag(ip, &tp);
+	if (error)
+		goto cancel;
+
+	error = xfs_trans_commit(tp);
+	if (error)
+		goto out;
+
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return 0;
+cancel:
+	xfs_trans_cancel(tp);
+out:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
+}
+
+/*
+ * Pre-COW all shared blocks within a given byte range of a file and turn off
+ * the reflink flag if we unshare all of the file's blocks.
+ */
+int
+xfs_reflink_unshare(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		len)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		fbno;
+	xfs_filblks_t		end;
+	xfs_off_t		isize;
+	int			error;
+
+	if (!xfs_is_reflink_inode(ip))
+		return 0;
+
+	trace_xfs_reflink_unshare(ip, offset, len);
+
+	inode_dio_wait(VFS_I(ip));
+
+	/* Try to CoW the selected ranges */
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	fbno = XFS_B_TO_FSBT(mp, offset);
+	isize = i_size_read(VFS_I(ip));
+	end = XFS_B_TO_FSB(mp, offset + len);
+	error = xfs_reflink_dirty_extents(ip, fbno, end, isize);
+	if (error)
+		goto out_unlock;
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+	/* Wait for the IO to finish */
+	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+	if (error)
+		goto out;
+
+	/* Turn off the reflink flag if possible. */
+	error = xfs_reflink_try_clear_inode_flag(ip);
+	if (error)
+		goto out;
+
+	return 0;
+
+out_unlock:
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out:
+	trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
+	return error;
+}
+
+/*
+ * Does this inode have any real CoW reservations?
+ */
+bool
+xfs_reflink_has_real_cow_blocks(
+	struct xfs_inode		*ip)
+{
+	struct xfs_bmbt_irec		irec;
+	struct xfs_ifork		*ifp;
+	struct xfs_bmbt_rec_host	*gotp;
+	xfs_extnum_t			idx;
+
+	if (!xfs_is_reflink_inode(ip))
+		return false;
+
+	/* Go find the old extent in the CoW fork. */
+	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	gotp = xfs_iext_bno_to_ext(ifp, 0, &idx);
+	while (gotp) {
+		xfs_bmbt_get_all(gotp, &irec);
+
+		if (!isnullstartblock(irec.br_startblock))
+			return true;
+
+		/* Roll on... */
+		idx++;
+		if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+			break;
+		gotp = xfs_iext_get_ext(ifp, idx);
+	}
+
+	return false;
+}
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
new file mode 100644
index 0000000..5dc3c8a
--- /dev/null
+++ b/fs/xfs/xfs_reflink.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __XFS_REFLINK_H
+#define __XFS_REFLINK_H 1
+
+extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
+		xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
+		xfs_extlen_t *flen, bool find_maximal);
+extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
+		struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
+
+extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip,
+		xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
+		xfs_off_t offset, xfs_off_t count);
+extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
+		struct xfs_bmbt_irec *imap, bool *need_alloc);
+extern int xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
+		xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap);
+
+extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
+		struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
+		xfs_fileoff_t end_fsb);
+extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
+		xfs_off_t count);
+extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
+		xfs_off_t count);
+extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
+#define XFS_REFLINK_DEDUPE	1	/* only reflink if contents match */
+#define XFS_REFLINK_ALL		(XFS_REFLINK_DEDUPE)
+extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
+		struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
+		unsigned int flags);
+extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
+		struct xfs_trans **tpp);
+extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
+		xfs_off_t len);
+
+extern bool xfs_reflink_has_real_cow_blocks(struct xfs_inode *ip);
+
+#endif /* __XFS_REFLINK_H */
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 0432a45..73c8278 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -441,8 +441,11 @@
 				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
 		case XFS_RMAP_EXTENT_MAP:
+		case XFS_RMAP_EXTENT_MAP_SHARED:
 		case XFS_RMAP_EXTENT_UNMAP:
+		case XFS_RMAP_EXTENT_UNMAP_SHARED:
 		case XFS_RMAP_EXTENT_CONVERT:
+		case XFS_RMAP_EXTENT_CONVERT_SHARED:
 		case XFS_RMAP_EXTENT_ALLOC:
 		case XFS_RMAP_EXTENT_FREE:
 			op_ok = true;
@@ -481,12 +484,21 @@
 		case XFS_RMAP_EXTENT_MAP:
 			type = XFS_RMAP_MAP;
 			break;
+		case XFS_RMAP_EXTENT_MAP_SHARED:
+			type = XFS_RMAP_MAP_SHARED;
+			break;
 		case XFS_RMAP_EXTENT_UNMAP:
 			type = XFS_RMAP_UNMAP;
 			break;
+		case XFS_RMAP_EXTENT_UNMAP_SHARED:
+			type = XFS_RMAP_UNMAP_SHARED;
+			break;
 		case XFS_RMAP_EXTENT_CONVERT:
 			type = XFS_RMAP_CONVERT;
 			break;
+		case XFS_RMAP_EXTENT_CONVERT_SHARED:
+			type = XFS_RMAP_CONVERT_SHARED;
+			break;
 		case XFS_RMAP_EXTENT_ALLOC:
 			type = XFS_RMAP_ALLOC;
 			break;
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 6e812fe0..12d48cd 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -62,6 +62,7 @@
 		{ "ibt2",		XFSSTAT_END_IBT_V2		},
 		{ "fibt2",		XFSSTAT_END_FIBT_V2		},
 		{ "rmapbt",		XFSSTAT_END_RMAP_V2		},
+		{ "refcntbt",		XFSSTAT_END_REFCOUNT		},
 		/* we print both series of quota information together */
 		{ "qm",			XFSSTAT_END_QM			},
 	};
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 657865f..79ad2e6 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -213,7 +213,23 @@
 	__uint32_t		xs_rmap_2_alloc;
 	__uint32_t		xs_rmap_2_free;
 	__uint32_t		xs_rmap_2_moves;
-#define XFSSTAT_END_XQMSTAT		(XFSSTAT_END_RMAP_V2+6)
+#define XFSSTAT_END_REFCOUNT		(XFSSTAT_END_RMAP_V2 + 15)
+	__uint32_t		xs_refcbt_2_lookup;
+	__uint32_t		xs_refcbt_2_compare;
+	__uint32_t		xs_refcbt_2_insrec;
+	__uint32_t		xs_refcbt_2_delrec;
+	__uint32_t		xs_refcbt_2_newroot;
+	__uint32_t		xs_refcbt_2_killroot;
+	__uint32_t		xs_refcbt_2_increment;
+	__uint32_t		xs_refcbt_2_decrement;
+	__uint32_t		xs_refcbt_2_lshift;
+	__uint32_t		xs_refcbt_2_rshift;
+	__uint32_t		xs_refcbt_2_split;
+	__uint32_t		xs_refcbt_2_join;
+	__uint32_t		xs_refcbt_2_alloc;
+	__uint32_t		xs_refcbt_2_free;
+	__uint32_t		xs_refcbt_2_moves;
+#define XFSSTAT_END_XQMSTAT		(XFSSTAT_END_REFCOUNT + 6)
 	__uint32_t		xs_qm_dqreclaims;
 	__uint32_t		xs_qm_dqreclaim_misses;
 	__uint32_t		xs_qm_dquot_dups;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2d092f9..ade4691 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -47,6 +47,9 @@
 #include "xfs_sysfs.h"
 #include "xfs_ondisk.h"
 #include "xfs_rmap_item.h"
+#include "xfs_refcount_item.h"
+#include "xfs_bmap_item.h"
+#include "xfs_reflink.h"
 
 #include <linux/namei.h>
 #include <linux/init.h>
@@ -936,6 +939,7 @@
 	struct inode		*inode)
 {
 	struct xfs_inode	*ip = XFS_I(inode);
+	int			error;
 
 	trace_xfs_destroy_inode(ip);
 
@@ -943,6 +947,14 @@
 	XFS_STATS_INC(ip->i_mount, vn_rele);
 	XFS_STATS_INC(ip->i_mount, vn_remove);
 
+	if (xfs_is_reflink_inode(ip)) {
+		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+		if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
+			xfs_warn(ip->i_mount,
+"Error %d while evicting CoW blocks for inode %llu.",
+					error, ip->i_ino);
+	}
+
 	xfs_inactive(ip);
 
 	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
@@ -1006,6 +1018,16 @@
 {
 	struct xfs_inode	*ip = XFS_I(inode);
 
+	/*
+	 * If this unlinked inode is in the middle of recovery, don't
+	 * drop the inode just yet; log recovery will take care of
+	 * that.  See the comment for this inode flag.
+	 */
+	if (ip->i_flags & XFS_IRECOVERY) {
+		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
+		return 0;
+	}
+
 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
 }
 
@@ -1296,10 +1318,31 @@
 		xfs_restore_resvblks(mp);
 		xfs_log_work_queue(mp);
 		xfs_queue_eofblocks(mp);
+
+		/* Recover any CoW blocks that never got remapped. */
+		error = xfs_reflink_recover_cow(mp);
+		if (error) {
+			xfs_err(mp,
+	"Error %d recovering leftover CoW allocations.", error);
+			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			return error;
+		}
+
+		/* Create the per-AG metadata reservation pool .*/
+		error = xfs_fs_reserve_ag_blocks(mp);
+		if (error && error != -ENOSPC)
+			return error;
 	}
 
 	/* rw -> ro */
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+		/* Free the per-AG metadata reservation pool. */
+		error = xfs_fs_unreserve_ag_blocks(mp);
+		if (error) {
+			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			return error;
+		}
+
 		/*
 		 * Before we sync the metadata, we need to free up the reserve
 		 * block pool so that the used block count in the superblock on
@@ -1490,6 +1533,7 @@
 	atomic_set(&mp->m_active_trans, 0);
 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
+	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
 	mp->m_kobj.kobject.kset = xfs_kset;
 
 	mp->m_super = sb;
@@ -1572,6 +1616,9 @@
 			"DAX unsupported by block device. Turning off DAX.");
 			mp->m_flags &= ~XFS_MOUNT_DAX;
 		}
+		if (xfs_sb_version_hasreflink(&mp->m_sb))
+			xfs_alert(mp,
+		"DAX and reflink have not been tested together!");
 	}
 
 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
@@ -1585,6 +1632,10 @@
 	"EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
 	}
 
+	if (xfs_sb_version_hasreflink(&mp->m_sb))
+		xfs_alert(mp,
+	"EXPERIMENTAL reflink feature enabled. Use at your own risk!");
+
 	error = xfs_mountfs(mp);
 	if (error)
 		goto out_filestream_unmount;
@@ -1788,8 +1839,38 @@
 	if (!xfs_rui_zone)
 		goto out_destroy_rud_zone;
 
+	xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
+			"xfs_cud_item");
+	if (!xfs_cud_zone)
+		goto out_destroy_rui_zone;
+
+	xfs_cui_zone = kmem_zone_init(
+			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
+			"xfs_cui_item");
+	if (!xfs_cui_zone)
+		goto out_destroy_cud_zone;
+
+	xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
+			"xfs_bud_item");
+	if (!xfs_bud_zone)
+		goto out_destroy_cui_zone;
+
+	xfs_bui_zone = kmem_zone_init(
+			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
+			"xfs_bui_item");
+	if (!xfs_bui_zone)
+		goto out_destroy_bud_zone;
+
 	return 0;
 
+ out_destroy_bud_zone:
+	kmem_zone_destroy(xfs_bud_zone);
+ out_destroy_cui_zone:
+	kmem_zone_destroy(xfs_cui_zone);
+ out_destroy_cud_zone:
+	kmem_zone_destroy(xfs_cud_zone);
+ out_destroy_rui_zone:
+	kmem_zone_destroy(xfs_rui_zone);
  out_destroy_rud_zone:
 	kmem_zone_destroy(xfs_rud_zone);
  out_destroy_icreate_zone:
@@ -1832,6 +1913,10 @@
 	 * destroy caches.
 	 */
 	rcu_barrier();
+	kmem_zone_destroy(xfs_bui_zone);
+	kmem_zone_destroy(xfs_bud_zone);
+	kmem_zone_destroy(xfs_cui_zone);
+	kmem_zone_destroy(xfs_cud_zone);
 	kmem_zone_destroy(xfs_rui_zone);
 	kmem_zone_destroy(xfs_rud_zone);
 	kmem_zone_destroy(xfs_icreate_zone);
@@ -1885,6 +1970,8 @@
 
 	xfs_extent_free_init_defer_op();
 	xfs_rmap_update_init_defer_op();
+	xfs_refcount_update_init_defer_op();
+	xfs_bmap_update_init_defer_op();
 
 	xfs_dir_startup();
 
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index aed74d3..afe1f66 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -184,6 +184,15 @@
 		.extra1		= &xfs_params.eofb_timer.min,
 		.extra2		= &xfs_params.eofb_timer.max,
 	},
+	{
+		.procname	= "speculative_cow_prealloc_lifetime",
+		.data		= &xfs_params.cowb_timer.val,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &xfs_params.cowb_timer.min,
+		.extra2		= &xfs_params.cowb_timer.max,
+	},
 	/* please keep this the last entry */
 #ifdef CONFIG_PROC_FS
 	{
diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h
index ffef453..984a349 100644
--- a/fs/xfs/xfs_sysctl.h
+++ b/fs/xfs/xfs_sysctl.h
@@ -48,6 +48,7 @@
 	xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */
 	xfs_sysctl_val_t fstrm_timer;	/* Filestream dir-AG assoc'n timeout. */
 	xfs_sysctl_val_t eofb_timer;	/* Interval between eofb scan wakeups */
+	xfs_sysctl_val_t cowb_timer;	/* Interval between cowb scan wakeups */
 } xfs_param_t;
 
 /*
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 16093c7..ad188d3 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -39,6 +39,7 @@
 struct xfs_inode_log_format;
 struct xfs_bmbt_irec;
 struct xfs_btree_cur;
+struct xfs_refcount_irec;
 
 DECLARE_EVENT_CLASS(xfs_attr_list_class,
 	TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -135,6 +136,8 @@
 DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
 DEFINE_PERAG_REF_EVENT(xfs_perag_set_eofblocks);
 DEFINE_PERAG_REF_EVENT(xfs_perag_clear_eofblocks);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_cowblocks);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_cowblocks);
 
 DECLARE_EVENT_CLASS(xfs_ag_class,
 	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
@@ -268,10 +271,10 @@
 		__field(unsigned long, caller_ip)
 	),
 	TP_fast_assign(
-		struct xfs_ifork	*ifp = (state & BMAP_ATTRFORK) ?
-						ip->i_afp : &ip->i_df;
+		struct xfs_ifork	*ifp;
 		struct xfs_bmbt_irec	r;
 
+		ifp = xfs_iext_state_to_fork(ip, state);
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
 		__entry->dev = VFS_I(ip)->i_sb->s_dev;
 		__entry->ino = ip->i_ino;
@@ -686,6 +689,9 @@
 DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
 DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
 DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
+DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
+DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
+DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
 
 DEFINE_INODE_EVENT(xfs_filemap_fault);
 DEFINE_INODE_EVENT(xfs_filemap_pmd_fault);
@@ -2581,10 +2587,20 @@
 DEFINE_AG_ERROR_EVENT(xfs_rmap_insert_error);
 DEFINE_AG_ERROR_EVENT(xfs_rmap_delete_error);
 DEFINE_AG_ERROR_EVENT(xfs_rmap_update_error);
+
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_candidate);
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_query);
+DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_candidate);
+DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range);
 DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
 DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
 DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
 
+/* deferred bmbt updates */
+#define DEFINE_BMAP_DEFERRED_EVENT	DEFINE_RMAP_DEFERRED_EVENT
+DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_defer);
+DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_deferred);
+
 /* per-AG reservation */
 DECLARE_EVENT_CLASS(xfs_ag_resv_class,
 	TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type resv,
@@ -2639,6 +2655,728 @@
 DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error);
 DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
 
+/* refcount tracepoint classes */
+
+/* reuse the discard trace class for agbno/aglen-based traces */
+#define DEFINE_AG_EXTENT_EVENT(name) DEFINE_DISCARD_EVENT(name)
+
+/* ag btree lookup tracepoint class */
+#define XFS_AG_BTREE_CMP_FORMAT_STR \
+	{ XFS_LOOKUP_EQ,	"eq" }, \
+	{ XFS_LOOKUP_LE,	"le" }, \
+	{ XFS_LOOKUP_GE,	"ge" }
+DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 xfs_agblock_t agbno, xfs_lookup_t dir),
+	TP_ARGS(mp, agno, agbno, dir),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, agbno)
+		__field(xfs_lookup_t, dir)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->agbno = agbno;
+		__entry->dir = dir;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u cmp %s(%d)\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->agbno,
+		  __print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR),
+		  __entry->dir)
+)
+
+#define DEFINE_AG_BTREE_LOOKUP_EVENT(name) \
+DEFINE_EVENT(xfs_ag_btree_lookup_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 xfs_agblock_t agbno, xfs_lookup_t dir), \
+	TP_ARGS(mp, agno, agbno, dir))
+
+/* single-rcext tracepoint class */
+DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 struct xfs_refcount_irec *irec),
+	TP_ARGS(mp, agno, irec),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, startblock)
+		__field(xfs_extlen_t, blockcount)
+		__field(xfs_nlink_t, refcount)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->startblock = irec->rc_startblock;
+		__entry->blockcount = irec->rc_blockcount;
+		__entry->refcount = irec->rc_refcount;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->startblock,
+		  __entry->blockcount,
+		  __entry->refcount)
+)
+
+#define DEFINE_REFCOUNT_EXTENT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_extent_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 struct xfs_refcount_irec *irec), \
+	TP_ARGS(mp, agno, irec))
+
+/* single-rcext and an agbno tracepoint class */
+DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 struct xfs_refcount_irec *irec, xfs_agblock_t agbno),
+	TP_ARGS(mp, agno, irec, agbno),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, startblock)
+		__field(xfs_extlen_t, blockcount)
+		__field(xfs_nlink_t, refcount)
+		__field(xfs_agblock_t, agbno)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->startblock = irec->rc_startblock;
+		__entry->blockcount = irec->rc_blockcount;
+		__entry->refcount = irec->rc_refcount;
+		__entry->agbno = agbno;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u @ agbno %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->startblock,
+		  __entry->blockcount,
+		  __entry->refcount,
+		  __entry->agbno)
+)
+
+#define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_extent_at_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 struct xfs_refcount_irec *irec, xfs_agblock_t agbno), \
+	TP_ARGS(mp, agno, irec, agbno))
+
+/* double-rcext tracepoint class */
+DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2),
+	TP_ARGS(mp, agno, i1, i2),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, i1_startblock)
+		__field(xfs_extlen_t, i1_blockcount)
+		__field(xfs_nlink_t, i1_refcount)
+		__field(xfs_agblock_t, i2_startblock)
+		__field(xfs_extlen_t, i2_blockcount)
+		__field(xfs_nlink_t, i2_refcount)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->i1_startblock = i1->rc_startblock;
+		__entry->i1_blockcount = i1->rc_blockcount;
+		__entry->i1_refcount = i1->rc_refcount;
+		__entry->i2_startblock = i2->rc_startblock;
+		__entry->i2_blockcount = i2->rc_blockcount;
+		__entry->i2_refcount = i2->rc_refcount;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
+		  "agbno %u len %u refcount %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->i1_startblock,
+		  __entry->i1_blockcount,
+		  __entry->i1_refcount,
+		  __entry->i2_startblock,
+		  __entry->i2_blockcount,
+		  __entry->i2_refcount)
+)
+
+#define DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_double_extent_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2), \
+	TP_ARGS(mp, agno, i1, i2))
+
+/* double-rcext and an agbno tracepoint class */
+DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
+		 xfs_agblock_t agbno),
+	TP_ARGS(mp, agno, i1, i2, agbno),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, i1_startblock)
+		__field(xfs_extlen_t, i1_blockcount)
+		__field(xfs_nlink_t, i1_refcount)
+		__field(xfs_agblock_t, i2_startblock)
+		__field(xfs_extlen_t, i2_blockcount)
+		__field(xfs_nlink_t, i2_refcount)
+		__field(xfs_agblock_t, agbno)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->i1_startblock = i1->rc_startblock;
+		__entry->i1_blockcount = i1->rc_blockcount;
+		__entry->i1_refcount = i1->rc_refcount;
+		__entry->i2_startblock = i2->rc_startblock;
+		__entry->i2_blockcount = i2->rc_blockcount;
+		__entry->i2_refcount = i2->rc_refcount;
+		__entry->agbno = agbno;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
+		  "agbno %u len %u refcount %u @ agbno %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->i1_startblock,
+		  __entry->i1_blockcount,
+		  __entry->i1_refcount,
+		  __entry->i2_startblock,
+		  __entry->i2_blockcount,
+		  __entry->i2_refcount,
+		  __entry->agbno)
+)
+
+#define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
+		 xfs_agblock_t agbno), \
+	TP_ARGS(mp, agno, i1, i2, agbno))
+
+/* triple-rcext tracepoint class */
+DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
+		 struct xfs_refcount_irec *i3),
+	TP_ARGS(mp, agno, i1, i2, i3),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, i1_startblock)
+		__field(xfs_extlen_t, i1_blockcount)
+		__field(xfs_nlink_t, i1_refcount)
+		__field(xfs_agblock_t, i2_startblock)
+		__field(xfs_extlen_t, i2_blockcount)
+		__field(xfs_nlink_t, i2_refcount)
+		__field(xfs_agblock_t, i3_startblock)
+		__field(xfs_extlen_t, i3_blockcount)
+		__field(xfs_nlink_t, i3_refcount)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->i1_startblock = i1->rc_startblock;
+		__entry->i1_blockcount = i1->rc_blockcount;
+		__entry->i1_refcount = i1->rc_refcount;
+		__entry->i2_startblock = i2->rc_startblock;
+		__entry->i2_blockcount = i2->rc_blockcount;
+		__entry->i2_refcount = i2->rc_refcount;
+		__entry->i3_startblock = i3->rc_startblock;
+		__entry->i3_blockcount = i3->rc_blockcount;
+		__entry->i3_refcount = i3->rc_refcount;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
+		  "agbno %u len %u refcount %u -- "
+		  "agbno %u len %u refcount %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->i1_startblock,
+		  __entry->i1_blockcount,
+		  __entry->i1_refcount,
+		  __entry->i2_startblock,
+		  __entry->i2_blockcount,
+		  __entry->i2_refcount,
+		  __entry->i3_startblock,
+		  __entry->i3_blockcount,
+		  __entry->i3_refcount)
+);
+
+#define DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(name) \
+DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
+		 struct xfs_refcount_irec *i3), \
+	TP_ARGS(mp, agno, i1, i2, i3))
+
+/* refcount btree tracepoints */
+DEFINE_BUSY_EVENT(xfs_refcountbt_alloc_block);
+DEFINE_BUSY_EVENT(xfs_refcountbt_free_block);
+DEFINE_AG_BTREE_LOOKUP_EVENT(xfs_refcount_lookup);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_insert);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_delete);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_insert_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_delete_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_update_error);
+
+/* refcount adjustment tracepoints */
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_increase);
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_decrease);
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_increase);
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_decrease);
+DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(xfs_refcount_merge_center_extents);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_modify_extent);
+DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_recover_extent);
+DEFINE_REFCOUNT_EXTENT_AT_EVENT(xfs_refcount_split_extent);
+DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_left_extent);
+DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_right_extent);
+DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_left_extent);
+DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_right_extent);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_cow_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_center_extents_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_modify_extent_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_split_extent_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_left_extent_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_right_extent_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_find_left_extent_error);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_find_right_extent_error);
+
+/* reflink helpers */
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared);
+DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared_result);
+DEFINE_AG_ERROR_EVENT(xfs_refcount_find_shared_error);
+#define DEFINE_REFCOUNT_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
+DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_defer);
+DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred);
+
+TRACE_EVENT(xfs_refcount_finish_one_leftover,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 int type, xfs_agblock_t agbno, xfs_extlen_t len,
+		 xfs_agblock_t new_agbno, xfs_extlen_t new_len),
+	TP_ARGS(mp, agno, type, agbno, len, new_agbno, new_len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(int, type)
+		__field(xfs_agblock_t, agbno)
+		__field(xfs_extlen_t, len)
+		__field(xfs_agblock_t, new_agbno)
+		__field(xfs_extlen_t, new_len)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->type = type;
+		__entry->agbno = agbno;
+		__entry->len = len;
+		__entry->new_agbno = new_agbno;
+		__entry->new_len = new_len;
+	),
+	TP_printk("dev %d:%d type %d agno %u agbno %u len %u new_agbno %u new_len %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->type,
+		  __entry->agno,
+		  __entry->agbno,
+		  __entry->len,
+		  __entry->new_agbno,
+		  __entry->new_len)
+);
+
+/* simple inode-based error/%ip tracepoint class */
+DECLARE_EVENT_CLASS(xfs_inode_error_class,
+	TP_PROTO(struct xfs_inode *ip, int error, unsigned long caller_ip),
+	TP_ARGS(ip, error, caller_ip),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(int, error)
+		__field(unsigned long, caller_ip)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->error = error;
+		__entry->caller_ip = caller_ip;
+	),
+	TP_printk("dev %d:%d ino %llx error %d caller %ps",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->error,
+		  (char *)__entry->caller_ip)
+);
+
+#define DEFINE_INODE_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_inode_error_class, name, \
+	TP_PROTO(struct xfs_inode *ip, int error, \
+		 unsigned long caller_ip), \
+	TP_ARGS(ip, error, caller_ip))
+
+/* reflink allocator */
+TRACE_EVENT(xfs_bmap_remap_alloc,
+	TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t fsbno,
+		 xfs_extlen_t len),
+	TP_ARGS(ip, fsbno, len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(xfs_fsblock_t, fsbno)
+		__field(xfs_extlen_t, len)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->fsbno = fsbno;
+		__entry->len = len;
+	),
+	TP_printk("dev %d:%d ino 0x%llx fsbno 0x%llx len %x",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->fsbno,
+		  __entry->len)
+);
+DEFINE_INODE_ERROR_EVENT(xfs_bmap_remap_alloc_error);
+
+/* reflink tracepoint classes */
+
+/* two-file io tracepoint class */
+DECLARE_EVENT_CLASS(xfs_double_io_class,
+	TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len,
+		 struct xfs_inode *dest, xfs_off_t doffset),
+	TP_ARGS(src, soffset, len, dest, doffset),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, src_ino)
+		__field(loff_t, src_isize)
+		__field(loff_t, src_disize)
+		__field(loff_t, src_offset)
+		__field(size_t, len)
+		__field(xfs_ino_t, dest_ino)
+		__field(loff_t, dest_isize)
+		__field(loff_t, dest_disize)
+		__field(loff_t, dest_offset)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(src)->i_sb->s_dev;
+		__entry->src_ino = src->i_ino;
+		__entry->src_isize = VFS_I(src)->i_size;
+		__entry->src_disize = src->i_d.di_size;
+		__entry->src_offset = soffset;
+		__entry->len = len;
+		__entry->dest_ino = dest->i_ino;
+		__entry->dest_isize = VFS_I(dest)->i_size;
+		__entry->dest_disize = dest->i_d.di_size;
+		__entry->dest_offset = doffset;
+	),
+	TP_printk("dev %d:%d count %zd "
+		  "ino 0x%llx isize 0x%llx disize 0x%llx offset 0x%llx -> "
+		  "ino 0x%llx isize 0x%llx disize 0x%llx offset 0x%llx",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->len,
+		  __entry->src_ino,
+		  __entry->src_isize,
+		  __entry->src_disize,
+		  __entry->src_offset,
+		  __entry->dest_ino,
+		  __entry->dest_isize,
+		  __entry->dest_disize,
+		  __entry->dest_offset)
+)
+
+#define DEFINE_DOUBLE_IO_EVENT(name)	\
+DEFINE_EVENT(xfs_double_io_class, name,	\
+	TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len, \
+		 struct xfs_inode *dest, xfs_off_t doffset), \
+	TP_ARGS(src, soffset, len, dest, doffset))
+
+/* two-file vfs io tracepoint class */
+DECLARE_EVENT_CLASS(xfs_double_vfs_io_class,
+	TP_PROTO(struct inode *src, u64 soffset, u64 len,
+		 struct inode *dest, u64 doffset),
+	TP_ARGS(src, soffset, len, dest, doffset),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(unsigned long, src_ino)
+		__field(loff_t, src_isize)
+		__field(loff_t, src_offset)
+		__field(size_t, len)
+		__field(unsigned long, dest_ino)
+		__field(loff_t, dest_isize)
+		__field(loff_t, dest_offset)
+	),
+	TP_fast_assign(
+		__entry->dev = src->i_sb->s_dev;
+		__entry->src_ino = src->i_ino;
+		__entry->src_isize = i_size_read(src);
+		__entry->src_offset = soffset;
+		__entry->len = len;
+		__entry->dest_ino = dest->i_ino;
+		__entry->dest_isize = i_size_read(dest);
+		__entry->dest_offset = doffset;
+	),
+	TP_printk("dev %d:%d count %zd "
+		  "ino 0x%lx isize 0x%llx offset 0x%llx -> "
+		  "ino 0x%lx isize 0x%llx offset 0x%llx",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->len,
+		  __entry->src_ino,
+		  __entry->src_isize,
+		  __entry->src_offset,
+		  __entry->dest_ino,
+		  __entry->dest_isize,
+		  __entry->dest_offset)
+)
+
+#define DEFINE_DOUBLE_VFS_IO_EVENT(name)	\
+DEFINE_EVENT(xfs_double_vfs_io_class, name,	\
+	TP_PROTO(struct inode *src, u64 soffset, u64 len, \
+		 struct inode *dest, u64 doffset), \
+	TP_ARGS(src, soffset, len, dest, doffset))
+
+/* CoW write tracepoint */
+DECLARE_EVENT_CLASS(xfs_copy_on_write_class,
+	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, xfs_fsblock_t pblk,
+		 xfs_extlen_t len, xfs_fsblock_t new_pblk),
+	TP_ARGS(ip, lblk, pblk, len, new_pblk),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(xfs_fileoff_t, lblk)
+		__field(xfs_fsblock_t, pblk)
+		__field(xfs_extlen_t, len)
+		__field(xfs_fsblock_t, new_pblk)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->lblk = lblk;
+		__entry->pblk = pblk;
+		__entry->len = len;
+		__entry->new_pblk = new_pblk;
+	),
+	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx pblk 0x%llx "
+		  "len 0x%x new_pblk %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->lblk,
+		  __entry->pblk,
+		  __entry->len,
+		  __entry->new_pblk)
+)
+
+#define DEFINE_COW_EVENT(name)	\
+DEFINE_EVENT(xfs_copy_on_write_class, name,	\
+	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, xfs_fsblock_t pblk, \
+		 xfs_extlen_t len, xfs_fsblock_t new_pblk), \
+	TP_ARGS(ip, lblk, pblk, len, new_pblk))
+
+/* inode/irec events */
+DECLARE_EVENT_CLASS(xfs_inode_irec_class,
+	TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec),
+	TP_ARGS(ip, irec),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(xfs_fileoff_t, lblk)
+		__field(xfs_extlen_t, len)
+		__field(xfs_fsblock_t, pblk)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->lblk = irec->br_startoff;
+		__entry->len = irec->br_blockcount;
+		__entry->pblk = irec->br_startblock;
+	),
+	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x pblk %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->lblk,
+		  __entry->len,
+		  __entry->pblk)
+);
+#define DEFINE_INODE_IREC_EVENT(name) \
+DEFINE_EVENT(xfs_inode_irec_class, name, \
+	TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec), \
+	TP_ARGS(ip, irec))
+
+/* refcount/reflink tracepoint definitions */
+
+/* reflink tracepoints */
+DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
+DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
+DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
+DEFINE_IOMAP_EVENT(xfs_reflink_remap_imap);
+TRACE_EVENT(xfs_reflink_remap_blocks_loop,
+	TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
+		 xfs_filblks_t len, struct xfs_inode *dest,
+		 xfs_fileoff_t doffset),
+	TP_ARGS(src, soffset, len, dest, doffset),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, src_ino)
+		__field(xfs_fileoff_t, src_lblk)
+		__field(xfs_filblks_t, len)
+		__field(xfs_ino_t, dest_ino)
+		__field(xfs_fileoff_t, dest_lblk)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(src)->i_sb->s_dev;
+		__entry->src_ino = src->i_ino;
+		__entry->src_lblk = soffset;
+		__entry->len = len;
+		__entry->dest_ino = dest->i_ino;
+		__entry->dest_lblk = doffset;
+	),
+	TP_printk("dev %d:%d len 0x%llx "
+		  "ino 0x%llx offset 0x%llx blocks -> "
+		  "ino 0x%llx offset 0x%llx blocks",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->len,
+		  __entry->src_ino,
+		  __entry->src_lblk,
+		  __entry->dest_ino,
+		  __entry->dest_lblk)
+);
+TRACE_EVENT(xfs_reflink_punch_range,
+	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk,
+		 xfs_extlen_t len),
+	TP_ARGS(ip, lblk, len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(xfs_fileoff_t, lblk)
+		__field(xfs_extlen_t, len)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->lblk = lblk;
+		__entry->len = len;
+	),
+	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->lblk,
+		  __entry->len)
+);
+TRACE_EVENT(xfs_reflink_remap,
+	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk,
+		 xfs_extlen_t len, xfs_fsblock_t new_pblk),
+	TP_ARGS(ip, lblk, len, new_pblk),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_ino_t, ino)
+		__field(xfs_fileoff_t, lblk)
+		__field(xfs_extlen_t, len)
+		__field(xfs_fsblock_t, new_pblk)
+	),
+	TP_fast_assign(
+		__entry->dev = VFS_I(ip)->i_sb->s_dev;
+		__entry->ino = ip->i_ino;
+		__entry->lblk = lblk;
+		__entry->len = len;
+		__entry->new_pblk = new_pblk;
+	),
+	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x new_pblk %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->ino,
+		  __entry->lblk,
+		  __entry->len,
+		  __entry->new_pblk)
+);
+DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_reflink_main_loop_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_read_iomap_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error);
+
+/* dedupe tracepoints */
+DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error);
+
+/* ioctl tracepoints */
+DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_reflink);
+DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_clone_range);
+DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_file_extent_same);
+TRACE_EVENT(xfs_ioctl_clone,
+	TP_PROTO(struct inode *src, struct inode *dest),
+	TP_ARGS(src, dest),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(unsigned long, src_ino)
+		__field(loff_t, src_isize)
+		__field(unsigned long, dest_ino)
+		__field(loff_t, dest_isize)
+	),
+	TP_fast_assign(
+		__entry->dev = src->i_sb->s_dev;
+		__entry->src_ino = src->i_ino;
+		__entry->src_isize = i_size_read(src);
+		__entry->dest_ino = dest->i_ino;
+		__entry->dest_isize = i_size_read(dest);
+	),
+	TP_printk("dev %d:%d "
+		  "ino 0x%lx isize 0x%llx -> "
+		  "ino 0x%lx isize 0x%llx\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->src_ino,
+		  __entry->src_isize,
+		  __entry->dest_ino,
+		  __entry->dest_isize)
+);
+
+/* unshare tracepoints */
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare);
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cow_eof_block);
+DEFINE_PAGE_EVENT(xfs_reflink_unshare_page);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_cow_eof_block_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_dirty_page_error);
+
+/* copy on write */
+DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
+
+DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range);
+DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
+
+DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
+DEFINE_IOMAP_EVENT(xfs_reflink_find_cow_mapping);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
+
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
+
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
+
+DEFINE_COW_EVENT(xfs_reflink_fork_buf);
+DEFINE_COW_EVENT(xfs_reflink_finish_fork_buf);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_fork_buf_error);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_finish_fork_buf_error);
+
+DEFINE_INODE_EVENT(xfs_reflink_cancel_pending_cow);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_cancel_cow);
+DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_pending_cow_error);
+
+/* rmap swapext tracepoints */
+DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap);
+DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece);
+DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index e2bf86a..61b7fbd 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -36,6 +36,11 @@
 struct xfs_rud_log_item;
 struct xfs_rui_log_item;
 struct xfs_btree_cur;
+struct xfs_cui_log_item;
+struct xfs_cud_log_item;
+struct xfs_defer_ops;
+struct xfs_bui_log_item;
+struct xfs_bud_log_item;
 
 typedef struct xfs_log_item {
 	struct list_head		li_ail;		/* AIL pointers */
@@ -248,4 +253,28 @@
 		xfs_fsblock_t startblock, xfs_filblks_t blockcount,
 		xfs_exntst_t state, struct xfs_btree_cur **pcur);
 
+/* refcount updates */
+enum xfs_refcount_intent_type;
+
+void xfs_refcount_update_init_defer_op(void);
+struct xfs_cud_log_item *xfs_trans_get_cud(struct xfs_trans *tp,
+		struct xfs_cui_log_item *cuip);
+int xfs_trans_log_finish_refcount_update(struct xfs_trans *tp,
+		struct xfs_cud_log_item *cudp, struct xfs_defer_ops *dfops,
+		enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
+		xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
+		xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
+
+/* mapping updates */
+enum xfs_bmap_intent_type;
+
+void xfs_bmap_update_init_defer_op(void);
+struct xfs_bud_log_item *xfs_trans_get_bud(struct xfs_trans *tp,
+		struct xfs_bui_log_item *buip);
+int xfs_trans_log_finish_bmap_update(struct xfs_trans *tp,
+		struct xfs_bud_log_item *rudp, struct xfs_defer_ops *dfops,
+		enum xfs_bmap_intent_type type, struct xfs_inode *ip,
+		int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
+		xfs_filblks_t blockcount, xfs_exntst_t state);
+
 #endif	/* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
new file mode 100644
index 0000000..6408e7d
--- /dev/null
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_bmap_item.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_inode.h"
+
+/*
+ * This routine is called to allocate a "bmap update done"
+ * log item.
+ */
+struct xfs_bud_log_item *
+xfs_trans_get_bud(
+	struct xfs_trans		*tp,
+	struct xfs_bui_log_item		*buip)
+{
+	struct xfs_bud_log_item		*budp;
+
+	budp = xfs_bud_init(tp->t_mountp, buip);
+	xfs_trans_add_item(tp, &budp->bud_item);
+	return budp;
+}
+
+/*
+ * Finish an bmap update and log it to the BUD. Note that the
+ * transaction is marked dirty regardless of whether the bmap update
+ * succeeds or fails to support the BUI/BUD lifecycle rules.
+ */
+int
+xfs_trans_log_finish_bmap_update(
+	struct xfs_trans		*tp,
+	struct xfs_bud_log_item		*budp,
+	struct xfs_defer_ops		*dop,
+	enum xfs_bmap_intent_type	type,
+	struct xfs_inode		*ip,
+	int				whichfork,
+	xfs_fileoff_t			startoff,
+	xfs_fsblock_t			startblock,
+	xfs_filblks_t			blockcount,
+	xfs_exntst_t			state)
+{
+	int				error;
+
+	error = xfs_bmap_finish_one(tp, dop, ip, type, whichfork, startoff,
+			startblock, blockcount, state);
+
+	/*
+	 * Mark the transaction dirty, even on error. This ensures the
+	 * transaction is aborted, which:
+	 *
+	 * 1.) releases the BUI and frees the BUD
+	 * 2.) shuts down the filesystem
+	 */
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	budp->bud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+	return error;
+}
+
+/* Sort bmap intents by inode. */
+static int
+xfs_bmap_update_diff_items(
+	void				*priv,
+	struct list_head		*a,
+	struct list_head		*b)
+{
+	struct xfs_bmap_intent		*ba;
+	struct xfs_bmap_intent		*bb;
+
+	ba = container_of(a, struct xfs_bmap_intent, bi_list);
+	bb = container_of(b, struct xfs_bmap_intent, bi_list);
+	return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
+}
+
+/* Get an BUI. */
+STATIC void *
+xfs_bmap_update_create_intent(
+	struct xfs_trans		*tp,
+	unsigned int			count)
+{
+	struct xfs_bui_log_item		*buip;
+
+	ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
+	ASSERT(tp != NULL);
+
+	buip = xfs_bui_init(tp->t_mountp);
+	ASSERT(buip != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	xfs_trans_add_item(tp, &buip->bui_item);
+	return buip;
+}
+
+/* Set the map extent flags for this mapping. */
+static void
+xfs_trans_set_bmap_flags(
+	struct xfs_map_extent		*bmap,
+	enum xfs_bmap_intent_type	type,
+	int				whichfork,
+	xfs_exntst_t			state)
+{
+	bmap->me_flags = 0;
+	switch (type) {
+	case XFS_BMAP_MAP:
+	case XFS_BMAP_UNMAP:
+		bmap->me_flags = type;
+		break;
+	default:
+		ASSERT(0);
+	}
+	if (state == XFS_EXT_UNWRITTEN)
+		bmap->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
+	if (whichfork == XFS_ATTR_FORK)
+		bmap->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
+}
+
+/* Log bmap updates in the intent item. */
+STATIC void
+xfs_bmap_update_log_item(
+	struct xfs_trans		*tp,
+	void				*intent,
+	struct list_head		*item)
+{
+	struct xfs_bui_log_item		*buip = intent;
+	struct xfs_bmap_intent		*bmap;
+	uint				next_extent;
+	struct xfs_map_extent		*map;
+
+	bmap = container_of(item, struct xfs_bmap_intent, bi_list);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	buip->bui_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+	/*
+	 * atomic_inc_return gives us the value after the increment;
+	 * we want to use it as an array index so we need to subtract 1 from
+	 * it.
+	 */
+	next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
+	ASSERT(next_extent < buip->bui_format.bui_nextents);
+	map = &buip->bui_format.bui_extents[next_extent];
+	map->me_owner = bmap->bi_owner->i_ino;
+	map->me_startblock = bmap->bi_bmap.br_startblock;
+	map->me_startoff = bmap->bi_bmap.br_startoff;
+	map->me_len = bmap->bi_bmap.br_blockcount;
+	xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork,
+			bmap->bi_bmap.br_state);
+}
+
+/* Get an BUD so we can process all the deferred rmap updates. */
+STATIC void *
+xfs_bmap_update_create_done(
+	struct xfs_trans		*tp,
+	void				*intent,
+	unsigned int			count)
+{
+	return xfs_trans_get_bud(tp, intent);
+}
+
+/* Process a deferred rmap update. */
+STATIC int
+xfs_bmap_update_finish_item(
+	struct xfs_trans		*tp,
+	struct xfs_defer_ops		*dop,
+	struct list_head		*item,
+	void				*done_item,
+	void				**state)
+{
+	struct xfs_bmap_intent		*bmap;
+	int				error;
+
+	bmap = container_of(item, struct xfs_bmap_intent, bi_list);
+	error = xfs_trans_log_finish_bmap_update(tp, done_item, dop,
+			bmap->bi_type,
+			bmap->bi_owner, bmap->bi_whichfork,
+			bmap->bi_bmap.br_startoff,
+			bmap->bi_bmap.br_startblock,
+			bmap->bi_bmap.br_blockcount,
+			bmap->bi_bmap.br_state);
+	kmem_free(bmap);
+	return error;
+}
+
+/* Abort all pending BUIs. */
+STATIC void
+xfs_bmap_update_abort_intent(
+	void				*intent)
+{
+	xfs_bui_release(intent);
+}
+
+/* Cancel a deferred rmap update. */
+STATIC void
+xfs_bmap_update_cancel_item(
+	struct list_head		*item)
+{
+	struct xfs_bmap_intent		*bmap;
+
+	bmap = container_of(item, struct xfs_bmap_intent, bi_list);
+	kmem_free(bmap);
+}
+
+static const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
+	.type		= XFS_DEFER_OPS_TYPE_BMAP,
+	.max_items	= XFS_BUI_MAX_FAST_EXTENTS,
+	.diff_items	= xfs_bmap_update_diff_items,
+	.create_intent	= xfs_bmap_update_create_intent,
+	.abort_intent	= xfs_bmap_update_abort_intent,
+	.log_item	= xfs_bmap_update_log_item,
+	.create_done	= xfs_bmap_update_create_done,
+	.finish_item	= xfs_bmap_update_finish_item,
+	.cancel_item	= xfs_bmap_update_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_bmap_update_init_defer_op(void)
+{
+	xfs_defer_init_op_type(&xfs_bmap_update_defer_type);
+}
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 11a3af0..dab8daa 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -73,7 +73,7 @@
 	ASSERT(tp);
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-	tv = current_fs_time(inode->i_sb);
+	tv = current_time(inode);
 
 	if (flags & XFS_ICHGTIME_MOD)
 		inode->i_mtime = tv;
diff --git a/fs/xfs/xfs_trans_refcount.c b/fs/xfs/xfs_trans_refcount.c
new file mode 100644
index 0000000..94c1877
--- /dev/null
+++ b/fs/xfs/xfs_trans_refcount.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2016 Oracle.  All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_refcount_item.h"
+#include "xfs_alloc.h"
+#include "xfs_refcount.h"
+
+/*
+ * This routine is called to allocate a "refcount update done"
+ * log item.
+ */
+struct xfs_cud_log_item *
+xfs_trans_get_cud(
+	struct xfs_trans		*tp,
+	struct xfs_cui_log_item		*cuip)
+{
+	struct xfs_cud_log_item		*cudp;
+
+	cudp = xfs_cud_init(tp->t_mountp, cuip);
+	xfs_trans_add_item(tp, &cudp->cud_item);
+	return cudp;
+}
+
+/*
+ * Finish an refcount update and log it to the CUD. Note that the
+ * transaction is marked dirty regardless of whether the refcount
+ * update succeeds or fails to support the CUI/CUD lifecycle rules.
+ */
+int
+xfs_trans_log_finish_refcount_update(
+	struct xfs_trans		*tp,
+	struct xfs_cud_log_item		*cudp,
+	struct xfs_defer_ops		*dop,
+	enum xfs_refcount_intent_type	type,
+	xfs_fsblock_t			startblock,
+	xfs_extlen_t			blockcount,
+	xfs_fsblock_t			*new_fsb,
+	xfs_extlen_t			*new_len,
+	struct xfs_btree_cur		**pcur)
+{
+	int				error;
+
+	error = xfs_refcount_finish_one(tp, dop, type, startblock,
+			blockcount, new_fsb, new_len, pcur);
+
+	/*
+	 * Mark the transaction dirty, even on error. This ensures the
+	 * transaction is aborted, which:
+	 *
+	 * 1.) releases the CUI and frees the CUD
+	 * 2.) shuts down the filesystem
+	 */
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	cudp->cud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+	return error;
+}
+
+/* Sort refcount intents by AG. */
+static int
+xfs_refcount_update_diff_items(
+	void				*priv,
+	struct list_head		*a,
+	struct list_head		*b)
+{
+	struct xfs_mount		*mp = priv;
+	struct xfs_refcount_intent	*ra;
+	struct xfs_refcount_intent	*rb;
+
+	ra = container_of(a, struct xfs_refcount_intent, ri_list);
+	rb = container_of(b, struct xfs_refcount_intent, ri_list);
+	return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
+		XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
+}
+
+/* Get an CUI. */
+STATIC void *
+xfs_refcount_update_create_intent(
+	struct xfs_trans		*tp,
+	unsigned int			count)
+{
+	struct xfs_cui_log_item		*cuip;
+
+	ASSERT(tp != NULL);
+	ASSERT(count > 0);
+
+	cuip = xfs_cui_init(tp->t_mountp, count);
+	ASSERT(cuip != NULL);
+
+	/*
+	 * Get a log_item_desc to point at the new item.
+	 */
+	xfs_trans_add_item(tp, &cuip->cui_item);
+	return cuip;
+}
+
+/* Set the phys extent flags for this reverse mapping. */
+static void
+xfs_trans_set_refcount_flags(
+	struct xfs_phys_extent		*refc,
+	enum xfs_refcount_intent_type	type)
+{
+	refc->pe_flags = 0;
+	switch (type) {
+	case XFS_REFCOUNT_INCREASE:
+	case XFS_REFCOUNT_DECREASE:
+	case XFS_REFCOUNT_ALLOC_COW:
+	case XFS_REFCOUNT_FREE_COW:
+		refc->pe_flags |= type;
+		break;
+	default:
+		ASSERT(0);
+	}
+}
+
+/* Log refcount updates in the intent item. */
+STATIC void
+xfs_refcount_update_log_item(
+	struct xfs_trans		*tp,
+	void				*intent,
+	struct list_head		*item)
+{
+	struct xfs_cui_log_item		*cuip = intent;
+	struct xfs_refcount_intent	*refc;
+	uint				next_extent;
+	struct xfs_phys_extent		*ext;
+
+	refc = container_of(item, struct xfs_refcount_intent, ri_list);
+
+	tp->t_flags |= XFS_TRANS_DIRTY;
+	cuip->cui_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+	/*
+	 * atomic_inc_return gives us the value after the increment;
+	 * we want to use it as an array index so we need to subtract 1 from
+	 * it.
+	 */
+	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
+	ASSERT(next_extent < cuip->cui_format.cui_nextents);
+	ext = &cuip->cui_format.cui_extents[next_extent];
+	ext->pe_startblock = refc->ri_startblock;
+	ext->pe_len = refc->ri_blockcount;
+	xfs_trans_set_refcount_flags(ext, refc->ri_type);
+}
+
+/* Get an CUD so we can process all the deferred refcount updates. */
+STATIC void *
+xfs_refcount_update_create_done(
+	struct xfs_trans		*tp,
+	void				*intent,
+	unsigned int			count)
+{
+	return xfs_trans_get_cud(tp, intent);
+}
+
+/* Process a deferred refcount update. */
+STATIC int
+xfs_refcount_update_finish_item(
+	struct xfs_trans		*tp,
+	struct xfs_defer_ops		*dop,
+	struct list_head		*item,
+	void				*done_item,
+	void				**state)
+{
+	struct xfs_refcount_intent	*refc;
+	xfs_fsblock_t			new_fsb;
+	xfs_extlen_t			new_aglen;
+	int				error;
+
+	refc = container_of(item, struct xfs_refcount_intent, ri_list);
+	error = xfs_trans_log_finish_refcount_update(tp, done_item, dop,
+			refc->ri_type,
+			refc->ri_startblock,
+			refc->ri_blockcount,
+			&new_fsb, &new_aglen,
+			(struct xfs_btree_cur **)state);
+	/* Did we run out of reservation?  Requeue what we didn't finish. */
+	if (!error && new_aglen > 0) {
+		ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
+		       refc->ri_type == XFS_REFCOUNT_DECREASE);
+		refc->ri_startblock = new_fsb;
+		refc->ri_blockcount = new_aglen;
+		return -EAGAIN;
+	}
+	kmem_free(refc);
+	return error;
+}
+
+/* Clean up after processing deferred refcounts. */
+STATIC void
+xfs_refcount_update_finish_cleanup(
+	struct xfs_trans	*tp,
+	void			*state,
+	int			error)
+{
+	struct xfs_btree_cur	*rcur = state;
+
+	xfs_refcount_finish_one_cleanup(tp, rcur, error);
+}
+
+/* Abort all pending CUIs. */
+STATIC void
+xfs_refcount_update_abort_intent(
+	void				*intent)
+{
+	xfs_cui_release(intent);
+}
+
+/* Cancel a deferred refcount update. */
+STATIC void
+xfs_refcount_update_cancel_item(
+	struct list_head		*item)
+{
+	struct xfs_refcount_intent	*refc;
+
+	refc = container_of(item, struct xfs_refcount_intent, ri_list);
+	kmem_free(refc);
+}
+
+static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
+	.type		= XFS_DEFER_OPS_TYPE_REFCOUNT,
+	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
+	.diff_items	= xfs_refcount_update_diff_items,
+	.create_intent	= xfs_refcount_update_create_intent,
+	.abort_intent	= xfs_refcount_update_abort_intent,
+	.log_item	= xfs_refcount_update_log_item,
+	.create_done	= xfs_refcount_update_create_done,
+	.finish_item	= xfs_refcount_update_finish_item,
+	.finish_cleanup = xfs_refcount_update_finish_cleanup,
+	.cancel_item	= xfs_refcount_update_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_refcount_update_init_defer_op(void)
+{
+	xfs_defer_init_op_type(&xfs_refcount_update_defer_type);
+}
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
index 5a50ef8..9ead064 100644
--- a/fs/xfs/xfs_trans_rmap.c
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -48,12 +48,21 @@
 	case XFS_RMAP_MAP:
 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
 		break;
+	case XFS_RMAP_MAP_SHARED:
+		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
+		break;
 	case XFS_RMAP_UNMAP:
 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
 		break;
+	case XFS_RMAP_UNMAP_SHARED:
+		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
+		break;
 	case XFS_RMAP_CONVERT:
 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
 		break;
+	case XFS_RMAP_CONVERT_SHARED:
+		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
+		break;
 	case XFS_RMAP_ALLOC:
 		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
 		break;
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 0000000..43199a0
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,94 @@
+#ifndef __ASM_GENERIC_EXPORT_H
+#define __ASM_GENERIC_EXPORT_H
+
+#ifndef KSYM_FUNC
+#define KSYM_FUNC(x) x
+#endif
+#ifdef CONFIG_64BIT
+#define __put .quad
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 8
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 8
+#endif
+#else
+#define __put .long
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 4
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 4
+#endif
+#endif
+
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+#define KSYM(name) _##name
+#else
+#define KSYM(name) name
+#endif
+
+/*
+ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
+ * since we immediately emit into those sections anyway.
+ */
+.macro ___EXPORT_SYMBOL name,val,sec
+#ifdef CONFIG_MODULES
+	.globl KSYM(__ksymtab_\name)
+	.section ___ksymtab\sec+\name,"a"
+	.balign KSYM_ALIGN
+KSYM(__ksymtab_\name):
+	__put \val, KSYM(__kstrtab_\name)
+	.previous
+	.section __ksymtab_strings,"a"
+KSYM(__kstrtab_\name):
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+	.asciz "_\name"
+#else
+	.asciz "\name"
+#endif
+	.previous
+#ifdef CONFIG_MODVERSIONS
+	.section ___kcrctab\sec+\name,"a"
+	.balign KCRC_ALIGN
+KSYM(__kcrctab_\name):
+	__put KSYM(__crc_\name)
+	.weak KSYM(__crc_\name)
+	.previous
+#endif
+#endif
+.endm
+#undef __put
+
+#if defined(__KSYM_DEPS__)
+
+#define __EXPORT_SYMBOL(sym, val, sec)	=== __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, val, sec)				\
+	__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
+#define __cond_export_sym(sym, val, sec, conf)			\
+	___cond_export_sym(sym, val, sec, conf)
+#define ___cond_export_sym(sym, val, sec, enabled)		\
+	__cond_export_sym_##enabled(sym, val, sec)
+#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#define __cond_export_sym_0(sym, val, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#endif
+
+#define EXPORT_SYMBOL(name)					\
+	__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
+#define EXPORT_SYMBOL_GPL(name) 				\
+	__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
+#define EXPORT_DATA_SYMBOL(name)				\
+	__EXPORT_SYMBOL(name, KSYM(name),)
+#define EXPORT_DATA_SYMBOL_GPL(name)				\
+	__EXPORT_SYMBOL(name, KSYM(name),_gpl)
+
+#endif
diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h
deleted file mode 100644
index cf14f2f..0000000
--- a/include/asm-generic/libata-portmap.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
-#define __ASM_GENERIC_LIBATA_PORTMAP_H
-
-#define ATA_PRIMARY_IRQ(dev)	14
-#define ATA_SECONDARY_IRQ(dev)	15
-
-#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 4d9f233..40e8870 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -65,6 +65,11 @@
 #define PER_CPU_DEF_ATTRIBUTES
 #endif
 
+#define raw_cpu_generic_read(pcp)					\
+({									\
+	*raw_cpu_ptr(&(pcp));						\
+})
+
 #define raw_cpu_generic_to_op(pcp, val, op)				\
 do {									\
 	*raw_cpu_ptr(&(pcp)) op val;					\
@@ -72,34 +77,39 @@
 
 #define raw_cpu_generic_add_return(pcp, val)				\
 ({									\
-	raw_cpu_add(pcp, val);						\
-	raw_cpu_read(pcp);						\
+	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
+									\
+	*__p += val;							\
+	*__p;								\
 })
 
 #define raw_cpu_generic_xchg(pcp, nval)					\
 ({									\
+	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
 	typeof(pcp) __ret;						\
-	__ret = raw_cpu_read(pcp);					\
-	raw_cpu_write(pcp, nval);					\
+	__ret = *__p;							\
+	*__p = nval;							\
 	__ret;								\
 })
 
 #define raw_cpu_generic_cmpxchg(pcp, oval, nval)			\
 ({									\
+	typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp));			\
 	typeof(pcp) __ret;						\
-	__ret = raw_cpu_read(pcp);					\
+	__ret = *__p;							\
 	if (__ret == (oval))						\
-		raw_cpu_write(pcp, nval);				\
+		*__p = nval;						\
 	__ret;								\
 })
 
 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
 ({									\
+	typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1));			\
+	typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2));			\
 	int __ret = 0;							\
-	if (raw_cpu_read(pcp1) == (oval1) &&				\
-			 raw_cpu_read(pcp2)  == (oval2)) {		\
-		raw_cpu_write(pcp1, nval1);				\
-		raw_cpu_write(pcp2, nval2);				\
+	if (*__p1 == (oval1) && *__p2  == (oval2)) {			\
+		*__p1 = nval1;						\
+		*__p2 = nval2;						\
 		__ret = 1;						\
 	}								\
 	(__ret);							\
@@ -109,7 +119,7 @@
 ({									\
 	typeof(pcp) __ret;						\
 	preempt_disable();						\
-	__ret = *this_cpu_ptr(&(pcp));					\
+	__ret = raw_cpu_generic_read(pcp);				\
 	preempt_enable();						\
 	__ret;								\
 })
@@ -118,17 +128,17 @@
 do {									\
 	unsigned long __flags;						\
 	raw_local_irq_save(__flags);					\
-	*raw_cpu_ptr(&(pcp)) op val;					\
+	raw_cpu_generic_to_op(pcp, val, op);				\
 	raw_local_irq_restore(__flags);					\
 } while (0)
 
+
 #define this_cpu_generic_add_return(pcp, val)				\
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
 	raw_local_irq_save(__flags);					\
-	raw_cpu_add(pcp, val);						\
-	__ret = raw_cpu_read(pcp);					\
+	__ret = raw_cpu_generic_add_return(pcp, val);			\
 	raw_local_irq_restore(__flags);					\
 	__ret;								\
 })
@@ -138,8 +148,7 @@
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
 	raw_local_irq_save(__flags);					\
-	__ret = raw_cpu_read(pcp);					\
-	raw_cpu_write(pcp, nval);					\
+	__ret = raw_cpu_generic_xchg(pcp, nval);			\
 	raw_local_irq_restore(__flags);					\
 	__ret;								\
 })
@@ -149,9 +158,7 @@
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
 	raw_local_irq_save(__flags);					\
-	__ret = raw_cpu_read(pcp);					\
-	if (__ret == (oval))						\
-		raw_cpu_write(pcp, nval);				\
+	__ret = raw_cpu_generic_cmpxchg(pcp, oval, nval);		\
 	raw_local_irq_restore(__flags);					\
 	__ret;								\
 })
@@ -168,16 +175,16 @@
 })
 
 #ifndef raw_cpu_read_1
-#define raw_cpu_read_1(pcp)		(*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_1(pcp)		raw_cpu_generic_read(pcp)
 #endif
 #ifndef raw_cpu_read_2
-#define raw_cpu_read_2(pcp)		(*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_2(pcp)		raw_cpu_generic_read(pcp)
 #endif
 #ifndef raw_cpu_read_4
-#define raw_cpu_read_4(pcp)		(*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_4(pcp)		raw_cpu_generic_read(pcp)
 #endif
 #ifndef raw_cpu_read_8
-#define raw_cpu_read_8(pcp)		(*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_8(pcp)		raw_cpu_generic_read(pcp)
 #endif
 
 #ifndef raw_cpu_write_1
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6df9b07..cc6bb31 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -69,10 +69,6 @@
 	unsigned long insn, fixup;
 };
 
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
-
 /*
  * architectures with an MMU should override these two
  */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3e42bcd..3074796 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -196,9 +196,14 @@
 	*(.dtb.init.rodata)						\
 	VMLINUX_SYMBOL(__dtb_end) = .;
 
-/* .data section */
+/*
+ * .data section
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
+ * .data.identifier which needs to be pulled in with .data, but don't want to
+ * pull in .data..stuff which has its own requirements. Same for bss.
+ */
 #define DATA_DATA							\
-	*(.data)							\
+	*(.data .data.[0-9a-zA-Z_]*)					\
 	*(.ref.data)							\
 	*(.data..shared_aligned) /* percpu related */			\
 	MEM_KEEP(init.data)						\
@@ -320,76 +325,76 @@
 	/* Kernel symbol table: Normal symbols */			\
 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
-		*(SORT(___ksymtab+*))					\
+		KEEP(*(SORT(___ksymtab+*)))				\
 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
 	}								\
 									\
 	/* Kernel symbol table: GPL-only symbols */			\
 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
-		*(SORT(___ksymtab_gpl+*))				\
+		KEEP(*(SORT(___ksymtab_gpl+*)))				\
 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
 	}								\
 									\
 	/* Kernel symbol table: Normal unused symbols */		\
 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
-		*(SORT(___ksymtab_unused+*))				\
+		KEEP(*(SORT(___ksymtab_unused+*)))			\
 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
 	}								\
 									\
 	/* Kernel symbol table: GPL-only unused symbols */		\
 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
-		*(SORT(___ksymtab_unused_gpl+*))			\
+		KEEP(*(SORT(___ksymtab_unused_gpl+*)))			\
 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
 	}								\
 									\
 	/* Kernel symbol table: GPL-future-only symbols */		\
 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
-		*(SORT(___ksymtab_gpl_future+*))			\
+		KEEP(*(SORT(___ksymtab_gpl_future+*)))			\
 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
 	}								\
 									\
 	/* Kernel symbol table: Normal symbols */			\
 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
-		*(SORT(___kcrctab+*))					\
+		KEEP(*(SORT(___kcrctab+*)))				\
 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
 	}								\
 									\
 	/* Kernel symbol table: GPL-only symbols */			\
 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
-		*(SORT(___kcrctab_gpl+*))				\
+		KEEP(*(SORT(___kcrctab_gpl+*)))				\
 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
 	}								\
 									\
 	/* Kernel symbol table: Normal unused symbols */		\
 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
-		*(SORT(___kcrctab_unused+*))				\
+		KEEP(*(SORT(___kcrctab_unused+*)))			\
 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
 	}								\
 									\
 	/* Kernel symbol table: GPL-only unused symbols */		\
 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
-		*(SORT(___kcrctab_unused_gpl+*))			\
+		KEEP(*(SORT(___kcrctab_unused_gpl+*)))			\
 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
 	}								\
 									\
 	/* Kernel symbol table: GPL-future-only symbols */		\
 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
-		*(SORT(___kcrctab_gpl_future+*))			\
+		KEEP(*(SORT(___kcrctab_gpl_future+*)))			\
 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
 	}								\
 									\
 	/* Kernel symbol table: strings */				\
         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
-		*(__ksymtab_strings)					\
+		KEEP(*(__ksymtab_strings))				\
 	}								\
 									\
 	/* __*init sections */						\
@@ -424,12 +429,17 @@
 #define SECURITY_INIT							\
 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
-		*(.security_initcall.init) 				\
+		KEEP(*(.security_initcall.init))			\
 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
 	}
 
 /* .text section. Map to function alignment to avoid address changes
- * during second ld run in second ld pass when generating System.map */
+ * during second ld run in second ld pass when generating System.map
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
+ * .text.identifier which needs to be pulled in with .text , but some
+ * architectures define .text.foo which is not intended to be pulled in here.
+ * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
+ * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
 #define TEXT_TEXT							\
 		ALIGN_FUNCTION();					\
 		*(.text.hot .text .text.fixup .text.unlikely)		\
@@ -533,6 +543,7 @@
 
 /* init and exit section handling */
 #define INIT_DATA							\
+	KEEP(*(SORT(___kentry+*)))					\
 	*(.init.data)							\
 	MEM_DISCARD(init.data)						\
 	KERNEL_CTORS()							\
@@ -599,7 +610,7 @@
 		BSS_FIRST_SECTIONS					\
 		*(.bss..page_aligned)					\
 		*(.dynbss)						\
-		*(.bss)							\
+		*(.bss .bss.[0-9a-zA-Z_]*)				\
 		*(COMMON)						\
 	}
 
@@ -682,12 +693,12 @@
 
 #define INIT_CALLS_LEVEL(level)						\
 		VMLINUX_SYMBOL(__initcall##level##_start) = .;		\
-		*(.initcall##level##.init)				\
-		*(.initcall##level##s.init)				\
+		KEEP(*(.initcall##level##.init))			\
+		KEEP(*(.initcall##level##s.init))			\
 
 #define INIT_CALLS							\
 		VMLINUX_SYMBOL(__initcall_start) = .;			\
-		*(.initcallearly.init)					\
+		KEEP(*(.initcallearly.init))				\
 		INIT_CALLS_LEVEL(0)					\
 		INIT_CALLS_LEVEL(1)					\
 		INIT_CALLS_LEVEL(2)					\
@@ -701,21 +712,21 @@
 
 #define CON_INITCALL							\
 		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
-		*(.con_initcall.init)					\
+		KEEP(*(.con_initcall.init))				\
 		VMLINUX_SYMBOL(__con_initcall_end) = .;
 
 #define SECURITY_INITCALL						\
 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
-		*(.security_initcall.init)				\
+		KEEP(*(.security_initcall.init))			\
 		VMLINUX_SYMBOL(__security_initcall_end) = .;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #define INIT_RAM_FS							\
 	. = ALIGN(4);							\
 	VMLINUX_SYMBOL(__initramfs_start) = .;				\
-	*(.init.ramfs)							\
+	KEEP(*(.init.ramfs))						\
 	. = ALIGN(8);							\
-	*(.init.ramfs.info)
+	KEEP(*(.init.ramfs.info))
 #else
 #define INIT_RAM_FS
 #endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 8637cdf..404e955 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -15,7 +15,6 @@
 #include <linux/crypto.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
-#include <linux/kthread.h>
 #include <linux/skbuff.h>
 
 struct crypto_aead;
@@ -129,75 +128,6 @@
 	unsigned int		blocksize;
 };
 
-#define ENGINE_NAME_LEN	30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @cur_req_prepared: current request is prepared
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @prepare_request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_message()
- * @crypt_one_request: do encryption for current request
- * @kworker: thread struct for request pump
- * @kworker_task: pointer to task for request pump kworker thread
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
-	char			name[ENGINE_NAME_LEN];
-	bool			idling;
-	bool			busy;
-	bool			running;
-	bool			cur_req_prepared;
-
-	struct list_head	list;
-	spinlock_t		queue_lock;
-	struct crypto_queue	queue;
-
-	bool			rt;
-
-	int (*prepare_crypt_hardware)(struct crypto_engine *engine);
-	int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
-
-	int (*prepare_request)(struct crypto_engine *engine,
-			       struct ablkcipher_request *req);
-	int (*unprepare_request)(struct crypto_engine *engine,
-				 struct ablkcipher_request *req);
-	int (*crypt_one_request)(struct crypto_engine *engine,
-				 struct ablkcipher_request *req);
-
-	struct kthread_worker           kworker;
-	struct task_struct              *kworker_task;
-	struct kthread_work             pump_requests;
-
-	void				*priv_data;
-	struct ablkcipher_request	*cur_req;
-};
-
-int crypto_transfer_request(struct crypto_engine *engine,
-			    struct ablkcipher_request *req, bool need_pump);
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
-				      struct ablkcipher_request *req);
-void crypto_finalize_request(struct crypto_engine *engine,
-			     struct ablkcipher_request *req, int err);
-int crypto_engine_start(struct crypto_engine *engine);
-int crypto_engine_stop(struct crypto_engine *engine);
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
-int crypto_engine_exit(struct crypto_engine *engine);
-
 extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_blkcipher_type;
 
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
new file mode 100644
index 0000000..04eb5c7
--- /dev/null
+++ b/include/crypto/engine.h
@@ -0,0 +1,107 @@
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ENGINE_H
+#define _CRYPTO_ENGINE_H
+
+#include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+
+#define ENGINE_NAME_LEN	30
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @cur_req_prepared: current request is prepared
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to syncronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @prepare_cipher_request: do some prepare if need before handle the current request
+ * @unprepare_cipher_request: undo any work done by prepare_cipher_request()
+ * @cipher_one_request: do encryption for current request
+ * @prepare_hash_request: do some prepare if need before handle the current request
+ * @unprepare_hash_request: undo any work done by prepare_hash_request()
+ * @hash_one_request: do hash for current request
+ * @kworker: thread struct for request pump
+ * @kworker_task: pointer to task for request pump kworker thread
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+	char			name[ENGINE_NAME_LEN];
+	bool			idling;
+	bool			busy;
+	bool			running;
+	bool			cur_req_prepared;
+
+	struct list_head	list;
+	spinlock_t		queue_lock;
+	struct crypto_queue	queue;
+
+	bool			rt;
+
+	int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+	int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+
+	int (*prepare_cipher_request)(struct crypto_engine *engine,
+				      struct ablkcipher_request *req);
+	int (*unprepare_cipher_request)(struct crypto_engine *engine,
+					struct ablkcipher_request *req);
+	int (*prepare_hash_request)(struct crypto_engine *engine,
+				    struct ahash_request *req);
+	int (*unprepare_hash_request)(struct crypto_engine *engine,
+				      struct ahash_request *req);
+	int (*cipher_one_request)(struct crypto_engine *engine,
+				  struct ablkcipher_request *req);
+	int (*hash_one_request)(struct crypto_engine *engine,
+				struct ahash_request *req);
+
+	struct kthread_worker           kworker;
+	struct task_struct              *kworker_task;
+	struct kthread_work             pump_requests;
+
+	void				*priv_data;
+	struct crypto_async_request	*cur_req;
+};
+
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+				   struct ablkcipher_request *req,
+				   bool need_pump);
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+					     struct ablkcipher_request *req);
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+				 struct ahash_request *req, bool need_pump);
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+					   struct ahash_request *req);
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+				    struct ablkcipher_request *req, int err);
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+				  struct ahash_request *req, int err);
+int crypto_engine_start(struct crypto_engine *engine);
+int crypto_engine_stop(struct crypto_engine *engine);
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+int crypto_engine_exit(struct crypto_engine *engine);
+
+#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
new file mode 100644
index 0000000..2a61c9b
--- /dev/null
+++ b/include/crypto/ghash.h
@@ -0,0 +1,23 @@
+/*
+ * Common values for GHASH algorithms
+ */
+
+#ifndef __CRYPTO_GHASH_H__
+#define __CRYPTO_GHASH_H__
+
+#include <linux/types.h>
+#include <crypto/gf128mul.h>
+
+#define GHASH_BLOCK_SIZE	16
+#define GHASH_DIGEST_SIZE	16
+
+struct ghash_ctx {
+	struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+	u8 buffer[GHASH_BLOCK_SIZE];
+	u32 bytes;
+};
+
+#endif
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 261b86d..f6f0c06 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -38,6 +38,10 @@
 			 struct drm_connector *);
 };
 
+int analogix_dp_psr_supported(struct device *dev);
+int analogix_dp_enable_psr(struct device *dev);
+int analogix_dp_disable_psr(struct device *dev);
+
 int analogix_dp_resume(struct device *dev);
 int analogix_dp_suspend(struct device *dev);
 
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d377865..6726440 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -51,6 +51,7 @@
 #include <linux/platform_device.h>
 #include <linux/poll.h>
 #include <linux/ratelimit.h>
+#include <linux/rbtree.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -127,6 +128,7 @@
  * run-time by echoing the debug value in its sysfs node:
  *   # echo 0xf > /sys/module/drm/parameters/debug
  */
+#define DRM_UT_NONE		0x00
 #define DRM_UT_CORE 		0x01
 #define DRM_UT_DRIVER		0x02
 #define DRM_UT_KMS		0x04
@@ -134,11 +136,14 @@
 #define DRM_UT_ATOMIC		0x10
 #define DRM_UT_VBL		0x20
 
-extern __printf(2, 3)
-void drm_ut_debug_printk(const char *function_name,
-			 const char *format, ...);
-extern __printf(1, 2)
-void drm_err(const char *format, ...);
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+		    unsigned int category, const char *function_name,
+		    const char *prefix, const char *format, ...);
+
+extern __printf(3, 4)
+void drm_printk(const char *level, unsigned int category,
+		const char *format, ...);
 
 /***********************************************************************/
 /** \name DRM template customization defaults */
@@ -146,6 +151,7 @@
 
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP			0x1
+#define DRIVER_LEGACY			0x2
 #define DRIVER_PCI_DMA			0x8
 #define DRIVER_SG			0x10
 #define DRIVER_HAVE_DMA			0x20
@@ -162,14 +168,37 @@
 /** \name Macros to make printk easier */
 /*@{*/
 
+#define _DRM_PRINTK(once, level, fmt, ...)				\
+	do {								\
+		printk##once(KERN_##level "[" DRM_NAME "] " fmt,	\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define DRM_INFO(fmt, ...)						\
+	_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...)						\
+	_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...)						\
+	_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
 /**
  * Error output.
  *
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR(fmt, ...)				\
-	drm_err(fmt, ##__VA_ARGS__)
+#define DRM_DEV_ERROR(dev, fmt, ...)					\
+	drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
+		       fmt, ##__VA_ARGS__)
+#define DRM_ERROR(fmt, ...)						\
+	drm_printk(KERN_ERR, DRM_UT_NONE, fmt,	##__VA_ARGS__)
 
 /**
  * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
@@ -177,21 +206,30 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_ERROR_RATELIMITED(fmt, ...)				\
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...)			\
 ({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
 									\
 	if (__ratelimit(&_rs))						\
-		drm_err(fmt, ##__VA_ARGS__);				\
+		DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__);			\
 })
+#define DRM_ERROR_RATELIMITED(fmt, ...)					\
+	DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
 
-#define DRM_INFO(fmt, ...)				\
-	printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO(dev, fmt, ...)					\
+	drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt,	\
+		       ##__VA_ARGS__)
 
-#define DRM_INFO_ONCE(fmt, ...)				\
-	printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...)				\
+({									\
+	static bool __print_once __read_mostly;				\
+	if (!__print_once) {						\
+		__print_once = true;					\
+		DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);			\
+	}								\
+})
 
 /**
  * Debug output.
@@ -199,37 +237,74 @@
  * \param fmt printf() like format string.
  * \param arg arguments
  */
-#define DRM_DEBUG(fmt, args...)						\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_CORE))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
+#define DRM_DEV_DEBUG(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt,	\
+		       ##args)
+#define DRM_DEBUG(fmt, ...)						\
+	drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
 
-#define DRM_DEBUG_DRIVER(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_DRIVER))		\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_KMS(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_KMS))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_PRIME(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_PRIME))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_ATOMIC(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_ATOMIC))		\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
-#define DRM_DEBUG_VBL(fmt, args...)					\
-	do {								\
-		if (unlikely(drm_debug & DRM_UT_VBL))			\
-			drm_ut_debug_printk(__func__, fmt, ##args);	\
-	} while (0)
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "",	\
+		       fmt, ##args)
+#define DRM_DEBUG_DRIVER(fmt, ...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt,	\
+		       ##args)
+#define DRM_DEBUG_KMS(fmt, ...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "",	\
+		       fmt, ##args)
+#define DRM_DEBUG_PRIME(fmt, ...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "",	\
+		       fmt, ##args)
+#define DRM_DEBUG_ATOMIC(fmt, ...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_VBL(dev, fmt, args...)				\
+	drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt,	\
+		       ##args)
+#define DRM_DEBUG_VBL(fmt, ...)					\
+	drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...)	\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+	if (__ratelimit(&_rs))						\
+		drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level,	\
+			       __func__, "", fmt, ##args);		\
+})
+
+/**
+ * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...)			\
+	DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
+#define DRM_DEBUG_RATELIMITED(fmt, args...)				\
+	DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...)			\
+	DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...)				\
+	DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...)		\
+	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...)			\
+	DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
 
 /*@}*/
 
@@ -295,10 +370,10 @@
 		      we deliver the event, for tracing only */
 };
 
-/* initial implementaton using a linked list - todo hashtab */
 struct drm_prime_file_private {
-	struct list_head head;
 	struct mutex lock;
+	struct rb_root dmabufs;
+	struct rb_root handles;
 };
 
 /** File private data */
@@ -320,7 +395,6 @@
 	unsigned is_master:1;
 
 	struct pid *pid;
-	kuid_t uid;
 	drm_magic_t magic;
 	struct list_head lhead;
 	struct drm_minor *minor;
@@ -642,7 +716,7 @@
 };
 
 enum drm_minor_type {
-	DRM_MINOR_LEGACY,
+	DRM_MINOR_PRIMARY,
 	DRM_MINOR_CONTROL,
 	DRM_MINOR_RENDER,
 	DRM_MINOR_CNT,
@@ -856,7 +930,7 @@
 
 static inline bool drm_is_primary_client(const struct drm_file *file_priv)
 {
-	return file_priv->minor->type == DRM_MINOR_LEGACY;
+	return file_priv->minor->type == DRM_MINOR_PRIMARY;
 }
 
 /******************************************************************/
@@ -937,8 +1011,11 @@
 }
 #endif
 
+struct dma_buf_export_info;
+
 extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
-		struct drm_gem_object *obj, int flags);
+					    struct drm_gem_object *obj,
+					    int flags);
 extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
 		int *prime_fd);
@@ -946,6 +1023,8 @@
 		struct dma_buf *dma_buf);
 extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+				      struct dma_buf_export_info *exp_info);
 extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
 
 extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 856a9c8..9701f2d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,160 @@
 
 #include <drm/drm_crtc.h>
 
+/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ *	atomic commit thread			hardware
+ *
+ * 	write new state into hardware	---->	...
+ * 	signal hw_done
+ * 						switch to new state on next
+ * 	...					v/hblank
+ *
+ *	wait for buffers to show up		...
+ *
+ *	...					send completion irq
+ *						irq handler signals flip_done
+ *	cleanup old buffers
+ *
+ * 	signal cleanup_done
+ *
+ * 	wait for flip_done		<----
+ * 	clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+	/**
+	 * @crtc:
+	 *
+	 * DRM CRTC for this commit.
+	 */
+	struct drm_crtc *crtc;
+
+	/**
+	 * @ref:
+	 *
+	 * Reference count for this structure. Needed to allow blocking on
+	 * completions without the risk of the completion disappearing
+	 * meanwhile.
+	 */
+	struct kref ref;
+
+	/**
+	 * @flip_done:
+	 *
+	 * Will be signaled when the hardware has flipped to the new set of
+	 * buffers. Signals at the same time as when the drm event for this
+	 * commit is sent to userspace, or when an out-fence is singalled. Note
+	 * that for most hardware, in most cases this happens after @hw_done is
+	 * signalled.
+	 */
+	struct completion flip_done;
+
+	/**
+	 * @hw_done:
+	 *
+	 * Will be signalled when all hw register changes for this commit have
+	 * been written out. Especially when disabling a pipe this can be much
+	 * later than than @flip_done, since that can signal already when the
+	 * screen goes black, whereas to fully shut down a pipe more register
+	 * I/O is required.
+	 *
+	 * Note that this does not need to include separately reference-counted
+	 * resources like backing storage buffer pinning, or runtime pm
+	 * management.
+	 */
+	struct completion hw_done;
+
+	/**
+	 * @cleanup_done:
+	 *
+	 * Will be signalled after old buffers have been cleaned up by calling
+	 * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+	 * a vblank wait completed it might be a bit later. This completion is
+	 * useful to throttle updates and avoid hardware updates getting ahead
+	 * of the buffer cleanup too much.
+	 */
+	struct completion cleanup_done;
+
+	/**
+	 * @commit_entry:
+	 *
+	 * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+	 */
+	struct list_head commit_entry;
+
+	/**
+	 * @event:
+	 *
+	 * &drm_pending_vblank_event pointer to clean up private events.
+	 */
+	struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+	struct drm_plane *ptr;
+	struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+	struct drm_crtc *ptr;
+	struct drm_crtc_state *state;
+	struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+	struct drm_connector *ptr;
+	struct drm_connector_state *state;
+};
+
+/**
+ * struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
+ * @planes: pointer to array of structures with per-plane data
+ * @crtcs: pointer to array of CRTC pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of structures with per-connector data
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+	struct drm_device *dev;
+	bool allow_modeset : 1;
+	bool legacy_cursor_update : 1;
+	bool legacy_set_config : 1;
+	struct __drm_planes_state *planes;
+	struct __drm_crtcs_state *crtcs;
+	int num_connector;
+	struct __drm_connnectors_state *connectors;
+
+	struct drm_modeset_acquire_ctx *acquire_ctx;
+
+	/**
+	 * @commit_work:
+	 *
+	 * Work item which can be used by the driver or helpers to execute the
+	 * commit without blocking.
+	 */
+	struct work_struct commit_work;
+};
+
 void drm_crtc_commit_put(struct drm_crtc_commit *commit);
 static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
 {
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d86ae5d..7ff92b0 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -29,6 +29,8 @@
 #define DRM_ATOMIC_HELPER_H_
 
 #include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 struct drm_atomic_state;
 
@@ -43,8 +45,9 @@
 			     struct drm_atomic_state *state,
 			     bool nonblock);
 
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-					struct drm_atomic_state *state);
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+					struct drm_atomic_state *state,
+					bool pre_swap);
 bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
 					   struct drm_atomic_state *old_state,
 					   struct drm_crtc *crtc);
@@ -63,14 +66,19 @@
 
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 				     struct drm_atomic_state *state);
+
+#define DRM_PLANE_COMMIT_ACTIVE_ONLY			BIT(0)
+#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET	BIT(1)
+
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
 				     struct drm_atomic_state *state,
-				     bool active_only);
+				     uint32_t flags);
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 				      struct drm_atomic_state *old_state);
 void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
-					      bool atomic);
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+					 bool atomic);
 
 void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
 				  bool stall);
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
new file mode 100644
index 0000000..36baa17
--- /dev/null
+++ b/include/drm/drm_blend.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BLEND_H__
+#define __DRM_BLEND_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+
+struct drm_device;
+struct drm_atomic_state;
+
+/*
+ * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
+ * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
+ * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
+ *
+ * WARNING: These defines are UABI since they're exposed in the rotation
+ * property.
+ */
+#define DRM_ROTATE_0	BIT(0)
+#define DRM_ROTATE_90	BIT(1)
+#define DRM_ROTATE_180	BIT(2)
+#define DRM_ROTATE_270	BIT(3)
+#define DRM_ROTATE_MASK (DRM_ROTATE_0   | DRM_ROTATE_90 | \
+			 DRM_ROTATE_180 | DRM_ROTATE_270)
+#define DRM_REFLECT_X	BIT(4)
+#define DRM_REFLECT_Y	BIT(5)
+#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+						       unsigned int supported_rotations);
+unsigned int drm_rotation_simplify(unsigned int rotation,
+				   unsigned int supported_rotations);
+
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+				   unsigned int zpos,
+				   unsigned int min, unsigned int max);
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+					     unsigned int zpos);
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+			      struct drm_atomic_state *state);
+#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
new file mode 100644
index 0000000..530a1d6
--- /dev/null
+++ b/include/drm/drm_bridge.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BRIDGE_H__
+#define __DRM_BRIDGE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_modes.h>
+
+struct drm_bridge;
+
+/**
+ * struct drm_bridge_funcs - drm_bridge control functions
+ */
+struct drm_bridge_funcs {
+	/**
+	 * @attach:
+	 *
+	 * This callback is invoked whenever our bridge is being attached to a
+	 * &drm_encoder.
+	 *
+	 * The attach callback is optional.
+	 *
+	 * RETURNS:
+	 *
+	 * Zero on success, error code on failure.
+	 */
+	int (*attach)(struct drm_bridge *bridge);
+
+	/**
+	 * @detach:
+	 *
+	 * This callback is invoked whenever our bridge is being detached from a
+	 * &drm_encoder.
+	 *
+	 * The detach callback is optional.
+	 */
+	void (*detach)(struct drm_bridge *bridge);
+
+	/**
+	 * @mode_fixup:
+	 *
+	 * This callback is used to validate and adjust a mode. The paramater
+	 * mode is the display mode that should be fed to the next element in
+	 * the display chain, either the final &drm_connector or the next
+	 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
+	 * requires. It can be modified by this callback and does not need to
+	 * match mode.
+	 *
+	 * This is the only hook that allows a bridge to reject a modeset. If
+	 * this function passes all other callbacks must succeed for this
+	 * configuration.
+	 *
+	 * The mode_fixup callback is optional.
+	 *
+	 * NOTE:
+	 *
+	 * This function is called in the check phase of atomic modesets, which
+	 * can be aborted for any reason (including on userspace's request to
+	 * just check whether a configuration would be possible). Drivers MUST
+	 * NOT touch any persistent state (hardware or software) or data
+	 * structures except the passed in @state parameter.
+	 *
+	 * RETURNS:
+	 *
+	 * True if an acceptable configuration is possible, false if the modeset
+	 * operation should be rejected.
+	 */
+	bool (*mode_fixup)(struct drm_bridge *bridge,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode);
+	/**
+	 * @disable:
+	 *
+	 * This callback should disable the bridge. It is called right before
+	 * the preceding element in the display pipe is disabled. If the
+	 * preceding element is a bridge this means it's called before that
+	 * bridge's ->disable() function. If the preceding element is a
+	 * &drm_encoder it's called right before the encoder's ->disable(),
+	 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+	 *
+	 * The bridge can assume that the display pipe (i.e. clocks and timing
+	 * signals) feeding it is still running when this callback is called.
+	 *
+	 * The disable callback is optional.
+	 */
+	void (*disable)(struct drm_bridge *bridge);
+
+	/**
+	 * @post_disable:
+	 *
+	 * This callback should disable the bridge. It is called right after
+	 * the preceding element in the display pipe is disabled. If the
+	 * preceding element is a bridge this means it's called after that
+	 * bridge's ->post_disable() function. If the preceding element is a
+	 * &drm_encoder it's called right after the encoder's ->disable(),
+	 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+	 *
+	 * The bridge must assume that the display pipe (i.e. clocks and timing
+	 * singals) feeding it is no longer running when this callback is
+	 * called.
+	 *
+	 * The post_disable callback is optional.
+	 */
+	void (*post_disable)(struct drm_bridge *bridge);
+
+	/**
+	 * @mode_set:
+	 *
+	 * This callback should set the given mode on the bridge. It is called
+	 * after the ->mode_set() callback for the preceding element in the
+	 * display pipeline has been called already. The display pipe (i.e.
+	 * clocks and timing signals) is off when this function is called.
+	 */
+	void (*mode_set)(struct drm_bridge *bridge,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode);
+	/**
+	 * @pre_enable:
+	 *
+	 * This callback should enable the bridge. It is called right before
+	 * the preceding element in the display pipe is enabled. If the
+	 * preceding element is a bridge this means it's called before that
+	 * bridge's ->pre_enable() function. If the preceding element is a
+	 * &drm_encoder it's called right before the encoder's ->enable(),
+	 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+	 *
+	 * The display pipe (i.e. clocks and timing signals) feeding this bridge
+	 * will not yet be running when this callback is called. The bridge must
+	 * not enable the display link feeding the next bridge in the chain (if
+	 * there is one) when this callback is called.
+	 *
+	 * The pre_enable callback is optional.
+	 */
+	void (*pre_enable)(struct drm_bridge *bridge);
+
+	/**
+	 * @enable:
+	 *
+	 * This callback should enable the bridge. It is called right after
+	 * the preceding element in the display pipe is enabled. If the
+	 * preceding element is a bridge this means it's called after that
+	 * bridge's ->enable() function. If the preceding element is a
+	 * &drm_encoder it's called right after the encoder's ->enable(),
+	 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+	 *
+	 * The bridge can assume that the display pipe (i.e. clocks and timing
+	 * signals) feeding it is running when this callback is called. This
+	 * callback must enable the display link feeding the next bridge in the
+	 * chain if there is one.
+	 *
+	 * The enable callback is optional.
+	 */
+	void (*enable)(struct drm_bridge *bridge);
+};
+
+/**
+ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @encoder: encoder to which this bridge is connected
+ * @next: the next bridge in the encoder chain
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
+	struct drm_bridge *next;
+#ifdef CONFIG_OF
+	struct device_node *of_node;
+#endif
+	struct list_head list;
+
+	const struct drm_bridge_funcs *funcs;
+	void *driver_private;
+};
+
+int drm_bridge_add(struct drm_bridge *bridge);
+void drm_bridge_remove(struct drm_bridge *bridge);
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+void drm_bridge_detach(struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
new file mode 100644
index 0000000..c767238
--- /dev/null
+++ b/include/drm/drm_color_mgmt.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_COLOR_MGMT_H__
+#define __DRM_COLOR_MGMT_H__
+
+#include <linux/ctype.h>
+
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+				uint degamma_lut_size,
+				bool has_ctm,
+				uint gamma_lut_size);
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+				 int gamma_size);
+
+/**
+ * drm_color_lut_extract - clamp&round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+					     uint32_t bit_precision)
+{
+	uint32_t val = user_input;
+	uint32_t max = 0xffff >> (16 - bit_precision);
+
+	/* Round only if we're not using full precision. */
+	if (bit_precision < 16) {
+		val += 1UL << (16 - bit_precision - 1);
+		val >>= 16 - bit_precision;
+	}
+
+	return clamp_val(val, 0, max);
+}
+
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
new file mode 100644
index 0000000..ac9d7d8
--- /dev/null
+++ b/include/drm/drm_connector.h
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_CONNECTOR_H__
+#define __DRM_CONNECTOR_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+#include <uapi/drm/drm_mode.h>
+
+struct drm_device;
+
+struct drm_connector_helper_funcs;
+struct drm_device;
+struct drm_crtc;
+struct drm_encoder;
+struct drm_property;
+struct drm_property_blob;
+struct edid;
+
+enum drm_connector_force {
+	DRM_FORCE_UNSPECIFIED,
+	DRM_FORCE_OFF,
+	DRM_FORCE_ON,         /* force on analog part normally */
+	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/**
+ * enum drm_connector_status - status for a &drm_connector
+ *
+ * This enum is used to track the connector status. There are no separate
+ * #defines for the uapi!
+ */
+enum drm_connector_status {
+	/**
+	 * @connector_status_connected: The connector is definitely connected to
+	 * a sink device, and can be enabled.
+	 */
+	connector_status_connected = 1,
+	/**
+	 * @connector_status_disconnected: The connector isn't connected to a
+	 * sink device which can be autodetect. For digital outputs like DP or
+	 * HDMI (which can be realiable probed) this means there's really
+	 * nothing there. It is driver-dependent whether a connector with this
+	 * status can be lit up or not.
+	 */
+	connector_status_disconnected = 2,
+	/**
+	 * @connector_status_unknown: The connector's status could not be
+	 * reliably detected. This happens when probing would either cause
+	 * flicker (like load-detection when the connector is in use), or when a
+	 * hardware resource isn't available (like when load-detection needs a
+	 * free CRTC). It should be possible to light up the connector with one
+	 * of the listed fallback modes. For default configuration userspace
+	 * should only try to light up connectors with unknown status when
+	 * there's not connector with @connector_status_connected.
+	 */
+	connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+	SubPixelUnknown = 0,
+	SubPixelHorizontalRGB,
+	SubPixelHorizontalBGR,
+	SubPixelVerticalRGB,
+	SubPixelVerticalBGR,
+	SubPixelNone,
+};
+
+/**
+ * struct drm_display_info - runtime data about the connected sink
+ *
+ * Describes a given display (e.g. CRT or flat panel) and its limitations. For
+ * fixed display sinks like built-in panels there's not much difference between
+ * this and struct &drm_connector. But for sinks with a real cable this
+ * structure is meant to describe all the things at the other end of the cable.
+ *
+ * For sinks which provide an EDID this can be filled out by calling
+ * drm_add_edid_modes().
+ */
+struct drm_display_info {
+	/**
+	 * @name: Name of the display.
+	 */
+	char name[DRM_DISPLAY_INFO_LEN];
+
+	/**
+	 * @width_mm: Physical width in mm.
+	 */
+        unsigned int width_mm;
+	/**
+	 * @height_mm: Physical height in mm.
+	 */
+	unsigned int height_mm;
+
+	/**
+	 * @pixel_clock: Maximum pixel clock supported by the sink, in units of
+	 * 100Hz. This mismatches the clok in &drm_display_mode (which is in
+	 * kHZ), because that's what the EDID uses as base unit.
+	 */
+	unsigned int pixel_clock;
+	/**
+	 * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
+	 */
+	unsigned int bpc;
+
+	/**
+	 * @subpixel_order: Subpixel order of LCD panels.
+	 */
+	enum subpixel_order subpixel_order;
+
+#define DRM_COLOR_FORMAT_RGB444		(1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
+
+	/**
+	 * @color_formats: HDMI Color formats, selects between RGB and YCrCb
+	 * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
+	 * as used to describe the pixel format in framebuffers, and also don't
+	 * match the formats in @bus_formats which are shared with v4l.
+	 */
+	u32 color_formats;
+
+	/**
+	 * @bus_formats: Pixel data format on the wire, somewhat redundant with
+	 * @color_formats. Array of size @num_bus_formats encoded using
+	 * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers.
+	 */
+	const u32 *bus_formats;
+	/**
+	 * @num_bus_formats: Size of @bus_formats array.
+	 */
+	unsigned int num_bus_formats;
+
+#define DRM_BUS_FLAG_DE_LOW		(1<<0)
+#define DRM_BUS_FLAG_DE_HIGH		(1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE	(1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE	(1<<3)
+
+	/**
+	 * @bus_flags: Additional information (like pixel signal polarity) for
+	 * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines.
+	 */
+	u32 bus_flags;
+
+	/**
+	 * @max_tmds_clock: Maximum TMDS clock rate supported by the
+	 * sink in kHz. 0 means undefined.
+	 */
+	int max_tmds_clock;
+
+	/**
+	 * @dvi_dual: Dual-link DVI sink?
+	 */
+	bool dvi_dual;
+
+	/**
+	 * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
+	 * more stuff redundant with @bus_formats.
+	 */
+	u8 edid_hdmi_dc_modes;
+
+	/**
+	 * @cea_rev: CEA revision of the HDMI sink.
+	 */
+	u8 cea_rev;
+};
+
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+				     const u32 *formats,
+				     unsigned int num_formats);
+
+/**
+ * struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+	struct drm_connector *connector;
+
+	/**
+	 * @crtc: CRTC to connect connector to, NULL if disabled.
+	 *
+	 * Do not change this directly, use drm_atomic_set_crtc_for_connector()
+	 * instead.
+	 */
+	struct drm_crtc *crtc;
+
+	struct drm_encoder *best_encoder;
+
+	struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_connector_funcs - control connectors on a given device
+ *
+ * Each CRTC may have one or more connectors attached to it.  The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+	/**
+	 * @dpms:
+	 *
+	 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
+	 * is exposed as a standard property on the connector, but diverted to
+	 * this callback in the drm core. Note that atomic drivers don't
+	 * implement the 4 level DPMS support on the connector any more, but
+	 * instead only have an on/off "ACTIVE" property on the CRTC object.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_connector_dpms() to implement this hook.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*dpms)(struct drm_connector *connector, int mode);
+
+	/**
+	 * @reset:
+	 *
+	 * Reset connector hardware and software state to off. This function isn't
+	 * called by the core directly, only through drm_mode_config_reset().
+	 * It's not a helper hook only for historical reasons.
+	 *
+	 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
+	 * atomic state using this hook.
+	 */
+	void (*reset)(struct drm_connector *connector);
+
+	/**
+	 * @detect:
+	 *
+	 * Check to see if anything is attached to the connector. The parameter
+	 * force is set to false whilst polling, true when checking the
+	 * connector due to a user request. force can be used by the driver to
+	 * avoid expensive, destructive operations during automated probing.
+	 *
+	 * FIXME:
+	 *
+	 * Note that this hook is only called by the probe helper. It's not in
+	 * the helper library vtable purely for historical reasons. The only DRM
+	 * core	entry point to probe connector state is @fill_modes.
+	 *
+	 * RETURNS:
+	 *
+	 * drm_connector_status indicating the connector's status.
+	 */
+	enum drm_connector_status (*detect)(struct drm_connector *connector,
+					    bool force);
+
+	/**
+	 * @force:
+	 *
+	 * This function is called to update internal encoder state when the
+	 * connector is forced to a certain state by userspace, either through
+	 * the sysfs interfaces or on the kernel cmdline. In that case the
+	 * @detect callback isn't called.
+	 *
+	 * FIXME:
+	 *
+	 * Note that this hook is only called by the probe helper. It's not in
+	 * the helper library vtable purely for historical reasons. The only DRM
+	 * core	entry point to probe connector state is @fill_modes.
+	 */
+	void (*force)(struct drm_connector *connector);
+
+	/**
+	 * @fill_modes:
+	 *
+	 * Entry point for output detection and basic mode validation. The
+	 * driver should reprobe the output if needed (e.g. when hotplug
+	 * handling is unreliable), add all detected modes to connector->modes
+	 * and filter out any the device can't support in any configuration. It
+	 * also needs to filter out any modes wider or higher than the
+	 * parameters max_width and max_height indicate.
+	 *
+	 * The drivers must also prune any modes no longer valid from
+	 * connector->modes. Furthermore it must update connector->status and
+	 * connector->edid.  If no EDID has been received for this output
+	 * connector->edid must be NULL.
+	 *
+	 * Drivers using the probe helpers should use
+	 * drm_helper_probe_single_connector_modes() or
+	 * drm_helper_probe_single_connector_modes_nomerge() to implement this
+	 * function.
+	 *
+	 * RETURNS:
+	 *
+	 * The number of modes detected and filled into connector->modes.
+	 */
+	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+
+	/**
+	 * @set_property:
+	 *
+	 * This is the legacy entry point to update a property attached to the
+	 * connector.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_connector_set_property() to implement this hook.
+	 *
+	 * This callback is optional if the driver does not support any legacy
+	 * driver-private properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+			     uint64_t val);
+
+	/**
+	 * @late_register:
+	 *
+	 * This optional hook can be used to register additional userspace
+	 * interfaces attached to the connector, light backlight control, i2c,
+	 * DP aux or similar interfaces. It is called late in the driver load
+	 * sequence from drm_connector_register() when registering all the
+	 * core drm connector interfaces. Everything added from this callback
+	 * should be unregistered in the early_unregister callback.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, or a negative error code on failure.
+	 */
+	int (*late_register)(struct drm_connector *connector);
+
+	/**
+	 * @early_unregister:
+	 *
+	 * This optional hook should be used to unregister the additional
+	 * userspace interfaces attached to the connector from
+	 * late_register(). It is called from drm_connector_unregister(),
+	 * early in the driver unload sequence to disable userspace access
+	 * before data structures are torndown.
+	 */
+	void (*early_unregister)(struct drm_connector *connector);
+
+	/**
+	 * @destroy:
+	 *
+	 * Clean up connector resources. This is called at driver unload time
+	 * through drm_mode_config_cleanup(). It can also be called at runtime
+	 * when a connector is being hot-unplugged for drivers that support
+	 * connector hotplugging (e.g. DisplayPort MST).
+	 */
+	void (*destroy)(struct drm_connector *connector);
+
+	/**
+	 * @atomic_duplicate_state:
+	 *
+	 * Duplicate the current atomic state for this connector and return it.
+	 * The core and helpers guarantee that any atomic state duplicated with
+	 * this hook and still owned by the caller (i.e. not transferred to the
+	 * driver by calling ->atomic_commit() from struct
+	 * &drm_mode_config_funcs) will be cleaned up by calling the
+	 * @atomic_destroy_state hook in this structure.
+	 *
+	 * Atomic drivers which don't subclass struct &drm_connector_state should use
+	 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
+	 * state structure to extend it with driver-private state should use
+	 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
+	 * duplicated in a consistent fashion across drivers.
+	 *
+	 * It is an error to call this hook before connector->state has been
+	 * initialized correctly.
+	 *
+	 * NOTE:
+	 *
+	 * If the duplicate state references refcounted resources this hook must
+	 * acquire a reference for each of them. The driver must release these
+	 * references again in @atomic_destroy_state.
+	 *
+	 * RETURNS:
+	 *
+	 * Duplicated atomic state or NULL when the allocation failed.
+	 */
+	struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+
+	/**
+	 * @atomic_destroy_state:
+	 *
+	 * Destroy a state duplicated with @atomic_duplicate_state and release
+	 * or unreference all resources it references
+	 */
+	void (*atomic_destroy_state)(struct drm_connector *connector,
+				     struct drm_connector_state *state);
+
+	/**
+	 * @atomic_set_property:
+	 *
+	 * Decode a driver-private property value and store the decoded value
+	 * into the passed-in state structure. Since the atomic core decodes all
+	 * standardized properties (even for extensions beyond the core set of
+	 * properties which might not be implemented by all drivers) this
+	 * requires drivers to subclass the state structure.
+	 *
+	 * Such driver-private properties should really only be implemented for
+	 * truly hardware/vendor specific state. Instead it is preferred to
+	 * standardize atomic extension and decode the properties used to expose
+	 * such an extension in the core.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_connector_set_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * NOTE:
+	 *
+	 * This function is called in the state assembly phase of atomic
+	 * modesets, which can be aborted for any reason (including on
+	 * userspace's request to just check whether a configuration would be
+	 * possible). Drivers MUST NOT touch any persistent state (hardware or
+	 * software) or data structures except the passed in @state parameter.
+	 *
+	 * Also since userspace controls in which order properties are set this
+	 * function must not do any input validation (since the state update is
+	 * incomplete and hence likely inconsistent). Instead any such input
+	 * validation must be done in the various atomic_check callbacks.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 if the property has been found, -EINVAL if the property isn't
+	 * implemented by the driver (which shouldn't ever happen, the core only
+	 * asks for properties attached to this connector). No other validation
+	 * is allowed by the driver. The core already checks that the property
+	 * value is within the range (integer, valid enum value, ...) the driver
+	 * set when registering the property.
+	 */
+	int (*atomic_set_property)(struct drm_connector *connector,
+				   struct drm_connector_state *state,
+				   struct drm_property *property,
+				   uint64_t val);
+
+	/**
+	 * @atomic_get_property:
+	 *
+	 * Reads out the decoded driver-private property. This is used to
+	 * implement the GETCONNECTOR IOCTL.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_connector_get_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success, -EINVAL if the property isn't implemented by the
+	 * driver (which shouldn't ever happen, the core only asks for
+	 * properties attached to this connector).
+	 */
+	int (*atomic_get_property)(struct drm_connector *connector,
+				   const struct drm_connector_state *state,
+				   struct drm_property *property,
+				   uint64_t *val);
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+	bool specified;
+	bool refresh_specified;
+	bool bpp_specified;
+	int xres, yres;
+	int bpp;
+	int refresh;
+	bool rb;
+	bool interlace;
+	bool cvt;
+	bool margins;
+	enum drm_connector_force force;
+};
+
+/**
+ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC.  Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+	struct drm_device *dev;
+	struct device *kdev;
+	struct device_attribute *attr;
+	struct list_head head;
+
+	struct drm_mode_object base;
+
+	char *name;
+
+	/**
+	 * @index: Compacted connector index, which matches the position inside
+	 * the mode_config.list for drivers not supporting hot-add/removing. Can
+	 * be used as an array index. It is invariant over the lifetime of the
+	 * connector.
+	 */
+	unsigned index;
+
+	int connector_type;
+	int connector_type_id;
+	bool interlace_allowed;
+	bool doublescan_allowed;
+	bool stereo_allowed;
+	bool registered;
+	struct list_head modes; /* list of modes on this connector */
+
+	enum drm_connector_status status;
+
+	/* these are modes added by probing with DDC or the BIOS */
+	struct list_head probed_modes;
+
+	/**
+	 * @display_info: Display information is filled from EDID information
+	 * when a display is detected. For non hot-pluggable displays such as
+	 * flat panels in embedded systems, the driver should initialize the
+	 * display_info.width_mm and display_info.height_mm fields with the
+	 * physical size of the display.
+	 */
+	struct drm_display_info display_info;
+	const struct drm_connector_funcs *funcs;
+
+	struct drm_property_blob *edid_blob_ptr;
+	struct drm_object_properties properties;
+
+	/**
+	 * @path_blob_ptr:
+	 *
+	 * DRM blob property data for the DP MST path property.
+	 */
+	struct drm_property_blob *path_blob_ptr;
+
+	/**
+	 * @tile_blob_ptr:
+	 *
+	 * DRM blob property data for the tile property (used mostly by DP MST).
+	 * This is meant for screens which are driven through separate display
+	 * pipelines represented by &drm_crtc, which might not be running with
+	 * genlocked clocks. For tiled panels which are genlocked, like
+	 * dual-link LVDS or dual-link DSI, the driver should try to not expose
+	 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+	 */
+	struct drm_property_blob *tile_blob_ptr;
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+	/**
+	 * @polled:
+	 *
+	 * Connector polling mode, a combination of
+	 *
+	 * DRM_CONNECTOR_POLL_HPD
+	 *     The connector generates hotplug events and doesn't need to be
+	 *     periodically polled. The CONNECT and DISCONNECT flags must not
+	 *     be set together with the HPD flag.
+	 *
+	 * DRM_CONNECTOR_POLL_CONNECT
+	 *     Periodically poll the connector for connection.
+	 *
+	 * DRM_CONNECTOR_POLL_DISCONNECT
+	 *     Periodically poll the connector for disconnection.
+	 *
+	 * Set to 0 for connectors that don't support connection status
+	 * discovery.
+	 */
+	uint8_t polled;
+
+	/* requested DPMS state */
+	int dpms;
+
+	const struct drm_connector_helper_funcs *helper_private;
+
+	/* forced on connector */
+	struct drm_cmdline_mode cmdline_mode;
+	enum drm_connector_force force;
+	bool override_edid;
+
+#define DRM_CONNECTOR_MAX_ENCODER 3
+	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+	struct drm_encoder *encoder; /* currently active encoder */
+
+#define MAX_ELD_BYTES	128
+	/* EDID bits */
+	uint8_t eld[MAX_ELD_BYTES];
+	bool latency_present[2];
+	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
+	int audio_latency[2];
+	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+	unsigned bad_edid_counter;
+
+	/* Flag for raw EDID header corruption - used in Displayport
+	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+	 */
+	bool edid_corrupt;
+
+	struct dentry *debugfs_entry;
+
+	struct drm_connector_state *state;
+
+	/* DisplayID bits */
+	bool has_tile;
+	struct drm_tile_group *tile_group;
+	bool tile_is_single_monitor;
+
+	uint8_t num_h_tile, num_v_tile;
+	uint8_t tile_h_loc, tile_v_loc;
+	uint16_t tile_h_size, tile_v_size;
+};
+
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+
+int drm_connector_init(struct drm_device *dev,
+		       struct drm_connector *connector,
+		       const struct drm_connector_funcs *funcs,
+		       int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+				      struct drm_encoder *encoder);
+
+void drm_connector_cleanup(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+	return connector->index;
+}
+
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
+		uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+	return mo ? obj_to_connector(mo) : NULL;
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+	drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+	drm_mode_object_unreference(&connector->base);
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status);
+const char *drm_get_subpixel_order_name(enum subpixel_order order);
+const char *drm_get_dpms_name(int val);
+const char *drm_get_dvi_i_subconnector_name(int val);
+const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_subconnector_name(int val);
+const char *drm_get_tv_select_name(int val);
+
+int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties(struct drm_device *dev,
+				  unsigned int num_modes,
+				  const char * const modes[]);
+int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+					 const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+					    const struct edid *edid);
+
+/**
+ * drm_for_each_connector - iterate over all connectors
+ * @connector: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all connectors of @dev.
+ */
+#define drm_for_each_connector(connector, dev) \
+	for (assert_drm_connector_list_read_locked(&(dev)->mode_config),	\
+	     connector = list_first_entry(&(dev)->mode_config.connector_list,	\
+					  struct drm_connector, head);		\
+	     &connector->head != (&(dev)->mode_config.connector_list);		\
+	     connector = list_next_entry(connector, head))
+
+#endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
deleted file mode 100644
index 4e75238..0000000
--- a/include/drm/drm_core.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#define CORE_AUTHOR		"Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
-
-#define CORE_NAME		"drm"
-#define CORE_DESC		"DRM shared core routines"
-#define CORE_DATE		"20060810"
-
-#define DRM_IF_MAJOR	1
-#define DRM_IF_MINOR	4
-
-#define CORE_MAJOR	1
-#define CORE_MINOR	1
-#define CORE_PATCHLEVEL 0
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 44e0708..0aa2925 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -35,40 +35,27 @@
 #include <uapi/drm/drm_mode.h>
 #include <uapi/drm/drm_fourcc.h>
 #include <drm/drm_modeset_lock.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_property.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
 
 struct drm_device;
 struct drm_mode_set;
-struct drm_framebuffer;
-struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
 struct device_node;
 struct fence;
 struct edid;
 
-struct drm_mode_object {
-	uint32_t id;
-	uint32_t type;
-	struct drm_object_properties *properties;
-	struct kref refcount;
-	void (*free_cb)(struct kref *kref);
-};
-
-#define DRM_OBJECT_MAX_PROPERTY 24
-struct drm_object_properties {
-	int count, atomic_count;
-	/* NOTE: if we ever start dynamically destroying properties (ie.
-	 * not at drm_mode_config_cleanup() time), then we'd have to do
-	 * a better job of detaching property from mode objects to avoid
-	 * dangling property pointers:
-	 */
-	struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
-	/* do not read/write values directly, but use drm_object_property_get_value()
-	 * and drm_object_property_set_value():
-	 */
-	uint64_t values[DRM_OBJECT_MAX_PROPERTY];
-};
-
 static inline int64_t U642I64(uint64_t val)
 {
 	return (int64_t)*((int64_t *)&val);
@@ -78,84 +65,6 @@
 	return (uint64_t)*((uint64_t *)&val);
 }
 
-/*
- * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
- * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
- * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
- */
-#define DRM_ROTATE_MASK 0x0f
-#define DRM_ROTATE_0	0
-#define DRM_ROTATE_90	1
-#define DRM_ROTATE_180	2
-#define DRM_ROTATE_270	3
-#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
-#define DRM_REFLECT_X	4
-#define DRM_REFLECT_Y	5
-
-enum drm_connector_force {
-	DRM_FORCE_UNSPECIFIED,
-	DRM_FORCE_OFF,
-	DRM_FORCE_ON,         /* force on analog part normally */
-	DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
-};
-
-#include <drm/drm_modes.h>
-
-enum drm_connector_status {
-	connector_status_connected = 1,
-	connector_status_disconnected = 2,
-	connector_status_unknown = 3,
-};
-
-enum subpixel_order {
-	SubPixelUnknown = 0,
-	SubPixelHorizontalRGB,
-	SubPixelHorizontalBGR,
-	SubPixelVerticalRGB,
-	SubPixelVerticalBGR,
-	SubPixelNone,
-};
-
-#define DRM_COLOR_FORMAT_RGB444		(1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
-
-#define DRM_BUS_FLAG_DE_LOW		(1<<0)
-#define DRM_BUS_FLAG_DE_HIGH		(1<<1)
-/* drive data on pos. edge */
-#define DRM_BUS_FLAG_PIXDATA_POSEDGE	(1<<2)
-/* drive data on neg. edge */
-#define DRM_BUS_FLAG_PIXDATA_NEGEDGE	(1<<3)
-
-/*
- * Describes a given display (e.g. CRT or flat panel) and its limitations.
- */
-struct drm_display_info {
-	char name[DRM_DISPLAY_INFO_LEN];
-
-	/* Physical size */
-        unsigned int width_mm;
-	unsigned int height_mm;
-
-	/* Clock limits FIXME: storage format */
-	unsigned int min_vfreq, max_vfreq;
-	unsigned int min_hfreq, max_hfreq;
-	unsigned int pixel_clock;
-	unsigned int bpc;
-
-	enum subpixel_order subpixel_order;
-	u32 color_formats;
-
-	const u32 *bus_formats;
-	unsigned int num_bus_formats;
-	u32 bus_flags;
-
-	/* Mask of supported hdmi deep color modes */
-	u8 edid_hdmi_dc_modes;
-
-	u8 cea_rev;
-};
-
 /* data corresponds to displayid vend/prod/serial */
 struct drm_tile_group {
 	struct kref refcount;
@@ -164,130 +73,7 @@
 	u8 group_data[8];
 };
 
-/**
- * struct drm_framebuffer_funcs - framebuffer hooks
- */
-struct drm_framebuffer_funcs {
-	/**
-	 * @destroy:
-	 *
-	 * Clean up framebuffer resources, specifically also unreference the
-	 * backing storage. The core guarantees to call this function for every
-	 * framebuffer successfully created by ->fb_create() in
-	 * &drm_mode_config_funcs. Drivers must also call
-	 * drm_framebuffer_cleanup() to release DRM core resources for this
-	 * framebuffer.
-	 */
-	void (*destroy)(struct drm_framebuffer *framebuffer);
-
-	/**
-	 * @create_handle:
-	 *
-	 * Create a buffer handle in the driver-specific buffer manager (either
-	 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
-	 * the core to implement the GETFB IOCTL, which returns (for
-	 * sufficiently priviledged user) also a native buffer handle. This can
-	 * be used for seamless transitions between modesetting clients by
-	 * copying the current screen contents to a private buffer and blending
-	 * between that and the new contents.
-	 *
-	 * GEM based drivers should call drm_gem_handle_create() to create the
-	 * handle.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*create_handle)(struct drm_framebuffer *fb,
-			     struct drm_file *file_priv,
-			     unsigned int *handle);
-	/**
-	 * @dirty:
-	 *
-	 * Optional callback for the dirty fb IOCTL.
-	 *
-	 * Userspace can notify the driver via this callback that an area of the
-	 * framebuffer has changed and should be flushed to the display
-	 * hardware. This can also be used internally, e.g. by the fbdev
-	 * emulation, though that's not the case currently.
-	 *
-	 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
-	 * for more information as all the semantics and arguments have a one to
-	 * one mapping on this function.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*dirty)(struct drm_framebuffer *framebuffer,
-		     struct drm_file *file_priv, unsigned flags,
-		     unsigned color, struct drm_clip_rect *clips,
-		     unsigned num_clips);
-};
-
-struct drm_framebuffer {
-	struct drm_device *dev;
-	/*
-	 * Note that the fb is refcounted for the benefit of driver internals,
-	 * for example some hw, disabling a CRTC/plane is asynchronous, and
-	 * scanout does not actually complete until the next vblank.  So some
-	 * cleanup (like releasing the reference(s) on the backing GEM bo(s))
-	 * should be deferred.  In cases like this, the driver would like to
-	 * hold a ref to the fb even though it has already been removed from
-	 * userspace perspective.
-	 * The refcount is stored inside the mode object.
-	 */
-	/*
-	 * Place on the dev->mode_config.fb_list, access protected by
-	 * dev->mode_config.fb_lock.
-	 */
-	struct list_head head;
-	struct drm_mode_object base;
-	const struct drm_framebuffer_funcs *funcs;
-	unsigned int pitches[4];
-	unsigned int offsets[4];
-	uint64_t modifier[4];
-	unsigned int width;
-	unsigned int height;
-	/* depth can be 15 or 16 */
-	unsigned int depth;
-	int bits_per_pixel;
-	int flags;
-	uint32_t pixel_format; /* fourcc format */
-	int hot_x;
-	int hot_y;
-	struct list_head filp_head;
-};
-
-struct drm_property_blob {
-	struct drm_mode_object base;
-	struct drm_device *dev;
-	struct list_head head_global;
-	struct list_head head_file;
-	size_t length;
-	unsigned char data[];
-};
-
-struct drm_property_enum {
-	uint64_t value;
-	struct list_head head;
-	char name[DRM_PROP_NAME_LEN];
-};
-
-struct drm_property {
-	struct list_head head;
-	struct drm_mode_object base;
-	uint32_t flags;
-	char name[DRM_PROP_NAME_LEN];
-	uint32_t num_values;
-	uint64_t *values;
-	struct drm_device *dev;
-
-	struct list_head enum_list;
-};
-
 struct drm_crtc;
-struct drm_connector;
 struct drm_encoder;
 struct drm_pending_vblank_event;
 struct drm_plane;
@@ -296,7 +82,6 @@
 
 struct drm_crtc_helper_funcs;
 struct drm_encoder_helper_funcs;
-struct drm_connector_helper_funcs;
 struct drm_plane_helper_funcs;
 
 /**
@@ -324,8 +109,6 @@
  * @ctm: Transformation matrix
  * @gamma_lut: Lookup table for converting pixel data after the
  *	conversion matrix
- * @event: optional pointer to a DRM event to signal upon completion of the
- * 	state update
  * @state: backpointer to global drm_atomic_state
  *
  * Note that the distinction between @enable and @active is rather subtile:
@@ -374,6 +157,46 @@
 	struct drm_property_blob *ctm;
 	struct drm_property_blob *gamma_lut;
 
+	/**
+	 * @event:
+	 *
+	 * Optional pointer to a DRM event to signal upon completion of the
+	 * state update. The driver must send out the event when the atomic
+	 * commit operation completes. There are two cases:
+	 *
+	 *  - The event is for a CRTC which is being disabled through this
+	 *    atomic commit. In that case the event can be send out any time
+	 *    after the hardware has stopped scanning out the current
+	 *    framebuffers. It should contain the timestamp and counter for the
+	 *    last vblank before the display pipeline was shut off.
+	 *
+	 *  - For a CRTC which is enabled at the end of the commit (even when it
+	 *    undergoes an full modeset) the vblank timestamp and counter must
+	 *    be for the vblank right before the first frame that scans out the
+	 *    new set of buffers. Again the event can only be sent out after the
+	 *    hardware has stopped scanning out the old buffers.
+	 *
+	 *  - Events for disabled CRTCs are not allowed, and drivers can ignore
+	 *    that case.
+	 *
+	 * This can be handled by the drm_crtc_send_vblank_event() function,
+	 * which the driver should call on the provided event upon completion of
+	 * the atomic commit. Note that if the driver supports vblank signalling
+	 * and timestamping the vblank counters and timestamps must agree with
+	 * the ones returned from page flip events. With the current vblank
+	 * helper infrastructure this can be achieved by holding a vblank
+	 * reference while the page flip is pending, acquired through
+	 * drm_crtc_vblank_get() and released with drm_crtc_vblank_put().
+	 * Drivers are free to implement their own vblank counter and timestamp
+	 * tracking though, e.g. if they have accurate timestamp registers in
+	 * hardware.
+	 *
+	 * For hardware which supports some means to synchronize vblank
+	 * interrupt delivery with committing display state there's also
+	 * drm_crtc_arm_vblank_event(). See the documentation of that function
+	 * for a detailed discussion of the constraints it needs to be used
+	 * safely.
+	 */
 	struct drm_pending_vblank_event *event;
 
 	struct drm_atomic_state *state;
@@ -545,16 +368,6 @@
 	 * counter and timestamp tracking though, e.g. if they have accurate
 	 * timestamp registers in hardware.
 	 *
-	 * FIXME:
-	 *
-	 * Up to that point drivers need to manage events themselves and can use
-	 * even->base.list freely for that. Specifically they need to ensure
-	 * that they don't send out page flip (or vblank) events for which the
-	 * corresponding drm file has been closed already. The drm core
-	 * unfortunately does not (yet) take care of that. Therefore drivers
-	 * currently must clean up and release pending events in their
-	 * ->preclose driver function.
-	 *
 	 * This callback is optional.
 	 *
 	 * NOTE:
@@ -581,6 +394,24 @@
 			 uint32_t flags);
 
 	/**
+	 * @page_flip_target:
+	 *
+	 * Same as @page_flip but with an additional parameter specifying the
+	 * absolute target vertical blank period (as reported by
+	 * drm_crtc_vblank_count()) when the flip should take effect.
+	 *
+	 * Note that the core code calls drm_crtc_vblank_get before this entry
+	 * point, and will call drm_crtc_vblank_put if this entry point returns
+	 * any non-0 error code. It's the driver's responsibility to call
+	 * drm_crtc_vblank_put after this entry point returns 0, typically when
+	 * the flip completes.
+	 */
+	int (*page_flip_target)(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				struct drm_pending_vblank_event *event,
+				uint32_t flags, uint32_t target);
+
+	/**
 	 * @set_property:
 	 *
 	 * This is the legacy entry point to update a property attached to the
@@ -852,1201 +683,6 @@
 };
 
 /**
- * struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @crtc: CRTC to connect connector to, NULL if disabled
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_connector_state {
-	struct drm_connector *connector;
-
-	struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
-
-	struct drm_encoder *best_encoder;
-
-	struct drm_atomic_state *state;
-};
-
-/**
- * struct drm_connector_funcs - control connectors on a given device
- *
- * Each CRTC may have one or more connectors attached to it.  The functions
- * below allow the core DRM code to control connectors, enumerate available modes,
- * etc.
- */
-struct drm_connector_funcs {
-	/**
-	 * @dpms:
-	 *
-	 * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
-	 * is exposed as a standard property on the connector, but diverted to
-	 * this callback in the drm core. Note that atomic drivers don't
-	 * implement the 4 level DPMS support on the connector any more, but
-	 * instead only have an on/off "ACTIVE" property on the CRTC object.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_connector_dpms() to implement this hook.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*dpms)(struct drm_connector *connector, int mode);
-
-	/**
-	 * @reset:
-	 *
-	 * Reset connector hardware and software state to off. This function isn't
-	 * called by the core directly, only through drm_mode_config_reset().
-	 * It's not a helper hook only for historical reasons.
-	 *
-	 * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
-	 * atomic state using this hook.
-	 */
-	void (*reset)(struct drm_connector *connector);
-
-	/**
-	 * @detect:
-	 *
-	 * Check to see if anything is attached to the connector. The parameter
-	 * force is set to false whilst polling, true when checking the
-	 * connector due to a user request. force can be used by the driver to
-	 * avoid expensive, destructive operations during automated probing.
-	 *
-	 * FIXME:
-	 *
-	 * Note that this hook is only called by the probe helper. It's not in
-	 * the helper library vtable purely for historical reasons. The only DRM
-	 * core	entry point to probe connector state is @fill_modes.
-	 *
-	 * RETURNS:
-	 *
-	 * drm_connector_status indicating the connector's status.
-	 */
-	enum drm_connector_status (*detect)(struct drm_connector *connector,
-					    bool force);
-
-	/**
-	 * @force:
-	 *
-	 * This function is called to update internal encoder state when the
-	 * connector is forced to a certain state by userspace, either through
-	 * the sysfs interfaces or on the kernel cmdline. In that case the
-	 * @detect callback isn't called.
-	 *
-	 * FIXME:
-	 *
-	 * Note that this hook is only called by the probe helper. It's not in
-	 * the helper library vtable purely for historical reasons. The only DRM
-	 * core	entry point to probe connector state is @fill_modes.
-	 */
-	void (*force)(struct drm_connector *connector);
-
-	/**
-	 * @fill_modes:
-	 *
-	 * Entry point for output detection and basic mode validation. The
-	 * driver should reprobe the output if needed (e.g. when hotplug
-	 * handling is unreliable), add all detected modes to connector->modes
-	 * and filter out any the device can't support in any configuration. It
-	 * also needs to filter out any modes wider or higher than the
-	 * parameters max_width and max_height indicate.
-	 *
-	 * The drivers must also prune any modes no longer valid from
-	 * connector->modes. Furthermore it must update connector->status and
-	 * connector->edid.  If no EDID has been received for this output
-	 * connector->edid must be NULL.
-	 *
-	 * Drivers using the probe helpers should use
-	 * drm_helper_probe_single_connector_modes() or
-	 * drm_helper_probe_single_connector_modes_nomerge() to implement this
-	 * function.
-	 *
-	 * RETURNS:
-	 *
-	 * The number of modes detected and filled into connector->modes.
-	 */
-	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
-
-	/**
-	 * @set_property:
-	 *
-	 * This is the legacy entry point to update a property attached to the
-	 * connector.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_connector_set_property() to implement this hook.
-	 *
-	 * This callback is optional if the driver does not support any legacy
-	 * driver-private properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
-			     uint64_t val);
-
-	/**
-	 * @late_register:
-	 *
-	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the connector, light backlight control, i2c,
-	 * DP aux or similar interfaces. It is called late in the driver load
-	 * sequence from drm_connector_register() when registering all the
-	 * core drm connector interfaces. Everything added from this callback
-	 * should be unregistered in the early_unregister callback.
-	 *
-	 * Returns:
-	 *
-	 * 0 on success, or a negative error code on failure.
-	 */
-	int (*late_register)(struct drm_connector *connector);
-
-	/**
-	 * @early_unregister:
-	 *
-	 * This optional hook should be used to unregister the additional
-	 * userspace interfaces attached to the connector from
-	 * late_unregister(). It is called from drm_connector_unregister(),
-	 * early in the driver unload sequence to disable userspace access
-	 * before data structures are torndown.
-	 */
-	void (*early_unregister)(struct drm_connector *connector);
-
-	/**
-	 * @destroy:
-	 *
-	 * Clean up connector resources. This is called at driver unload time
-	 * through drm_mode_config_cleanup(). It can also be called at runtime
-	 * when a connector is being hot-unplugged for drivers that support
-	 * connector hotplugging (e.g. DisplayPort MST).
-	 */
-	void (*destroy)(struct drm_connector *connector);
-
-	/**
-	 * @atomic_duplicate_state:
-	 *
-	 * Duplicate the current atomic state for this connector and return it.
-	 * The core and helpers gurantee that any atomic state duplicated with
-	 * this hook and still owned by the caller (i.e. not transferred to the
-	 * driver by calling ->atomic_commit() from struct
-	 * &drm_mode_config_funcs) will be cleaned up by calling the
-	 * @atomic_destroy_state hook in this structure.
-	 *
-	 * Atomic drivers which don't subclass struct &drm_connector_state should use
-	 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
-	 * state structure to extend it with driver-private state should use
-	 * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
-	 * duplicated in a consistent fashion across drivers.
-	 *
-	 * It is an error to call this hook before connector->state has been
-	 * initialized correctly.
-	 *
-	 * NOTE:
-	 *
-	 * If the duplicate state references refcounted resources this hook must
-	 * acquire a reference for each of them. The driver must release these
-	 * references again in @atomic_destroy_state.
-	 *
-	 * RETURNS:
-	 *
-	 * Duplicated atomic state or NULL when the allocation failed.
-	 */
-	struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
-
-	/**
-	 * @atomic_destroy_state:
-	 *
-	 * Destroy a state duplicated with @atomic_duplicate_state and release
-	 * or unreference all resources it references
-	 */
-	void (*atomic_destroy_state)(struct drm_connector *connector,
-				     struct drm_connector_state *state);
-
-	/**
-	 * @atomic_set_property:
-	 *
-	 * Decode a driver-private property value and store the decoded value
-	 * into the passed-in state structure. Since the atomic core decodes all
-	 * standardized properties (even for extensions beyond the core set of
-	 * properties which might not be implemented by all drivers) this
-	 * requires drivers to subclass the state structure.
-	 *
-	 * Such driver-private properties should really only be implemented for
-	 * truly hardware/vendor specific state. Instead it is preferred to
-	 * standardize atomic extension and decode the properties used to expose
-	 * such an extension in the core.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_connector_set_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * NOTE:
-	 *
-	 * This function is called in the state assembly phase of atomic
-	 * modesets, which can be aborted for any reason (including on
-	 * userspace's request to just check whether a configuration would be
-	 * possible). Drivers MUST NOT touch any persistent state (hardware or
-	 * software) or data structures except the passed in @state parameter.
-	 *
-	 * Also since userspace controls in which order properties are set this
-	 * function must not do any input validation (since the state update is
-	 * incomplete and hence likely inconsistent). Instead any such input
-	 * validation must be done in the various atomic_check callbacks.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 if the property has been found, -EINVAL if the property isn't
-	 * implemented by the driver (which shouldn't ever happen, the core only
-	 * asks for properties attached to this connector). No other validation
-	 * is allowed by the driver. The core already checks that the property
-	 * value is within the range (integer, valid enum value, ...) the driver
-	 * set when registering the property.
-	 */
-	int (*atomic_set_property)(struct drm_connector *connector,
-				   struct drm_connector_state *state,
-				   struct drm_property *property,
-				   uint64_t val);
-
-	/**
-	 * @atomic_get_property:
-	 *
-	 * Reads out the decoded driver-private property. This is used to
-	 * implement the GETCONNECTOR IOCTL.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_connector_get_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success, -EINVAL if the property isn't implemented by the
-	 * driver (which shouldn't ever happen, the core only asks for
-	 * properties attached to this connector).
-	 */
-	int (*atomic_get_property)(struct drm_connector *connector,
-				   const struct drm_connector_state *state,
-				   struct drm_property *property,
-				   uint64_t *val);
-};
-
-/**
- * struct drm_encoder_funcs - encoder controls
- *
- * Encoders sit between CRTCs and connectors.
- */
-struct drm_encoder_funcs {
-	/**
-	 * @reset:
-	 *
-	 * Reset encoder hardware and software state to off. This function isn't
-	 * called by the core directly, only through drm_mode_config_reset().
-	 * It's not a helper hook only for historical reasons.
-	 */
-	void (*reset)(struct drm_encoder *encoder);
-
-	/**
-	 * @destroy:
-	 *
-	 * Clean up encoder resources. This is only called at driver unload time
-	 * through drm_mode_config_cleanup() since an encoder cannot be
-	 * hotplugged in DRM.
-	 */
-	void (*destroy)(struct drm_encoder *encoder);
-
-	/**
-	 * @late_register:
-	 *
-	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the encoder like debugfs interfaces.
-	 * It is called late in the driver load sequence from drm_dev_register().
-	 * Everything added from this callback should be unregistered in
-	 * the early_unregister callback.
-	 *
-	 * Returns:
-	 *
-	 * 0 on success, or a negative error code on failure.
-	 */
-	int (*late_register)(struct drm_encoder *encoder);
-
-	/**
-	 * @early_unregister:
-	 *
-	 * This optional hook should be used to unregister the additional
-	 * userspace interfaces attached to the encoder from
-	 * late_unregister(). It is called from drm_dev_unregister(),
-	 * early in the driver unload sequence to disable userspace access
-	 * before data structures are torndown.
-	 */
-	void (*early_unregister)(struct drm_encoder *encoder);
-};
-
-#define DRM_CONNECTOR_MAX_ENCODER 3
-
-/**
- * struct drm_encoder - central DRM encoder structure
- * @dev: parent DRM device
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
- * @possible_crtcs: bitmask of potential CRTC bindings
- * @possible_clones: bitmask of potential sibling encoders for cloning
- * @crtc: currently bound CRTC
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
- * @helper_private: mid-layer private data
- *
- * CRTCs drive pixels to encoders, which convert them into signals
- * appropriate for a given connector or set of connectors.
- */
-struct drm_encoder {
-	struct drm_device *dev;
-	struct list_head head;
-
-	struct drm_mode_object base;
-	char *name;
-	int encoder_type;
-
-	/**
-	 * @index: Position inside the mode_config.list, can be used as an array
-	 * index. It is invariant over the lifetime of the encoder.
-	 */
-	unsigned index;
-
-	uint32_t possible_crtcs;
-	uint32_t possible_clones;
-
-	struct drm_crtc *crtc;
-	struct drm_bridge *bridge;
-	const struct drm_encoder_funcs *funcs;
-	const struct drm_encoder_helper_funcs *helper_private;
-};
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
-#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
-#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
-#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
-
-#define MAX_ELD_BYTES	128
-
-/**
- * struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
- * @modes: modes available on this connector (from fill_modes() + user)
- * @status: one of the drm_connector_status enums (connected, not, or unknown)
- * @probed_modes: list of modes derived directly from the display
- * @display_info: information about attached display (e.g. from EDID)
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a %DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @encoder: encoder driving this connector, if any
- * @eld: EDID-like data, if present
- * @dvi_dual: dual link DVI, if found
- * @max_tmds_clock: max clock rate, if found
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @state: current atomic state for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- *
- * Each connector may be connected to one or more CRTCs, or may be clonable by
- * another connector if they can share a CRTC.  Each connector also has a specific
- * position in the broader display (referred to as a 'screen' though it could
- * span multiple monitors).
- */
-struct drm_connector {
-	struct drm_device *dev;
-	struct device *kdev;
-	struct device_attribute *attr;
-	struct list_head head;
-
-	struct drm_mode_object base;
-
-	char *name;
-
-	/**
-	 * @index: Compacted connector index, which matches the position inside
-	 * the mode_config.list for drivers not supporting hot-add/removing. Can
-	 * be used as an array index. It is invariant over the lifetime of the
-	 * connector.
-	 */
-	unsigned index;
-
-	int connector_type;
-	int connector_type_id;
-	bool interlace_allowed;
-	bool doublescan_allowed;
-	bool stereo_allowed;
-	bool registered;
-	struct list_head modes; /* list of modes on this connector */
-
-	enum drm_connector_status status;
-
-	/* these are modes added by probing with DDC or the BIOS */
-	struct list_head probed_modes;
-
-	struct drm_display_info display_info;
-	const struct drm_connector_funcs *funcs;
-
-	struct drm_property_blob *edid_blob_ptr;
-	struct drm_object_properties properties;
-
-	/**
-	 * @path_blob_ptr:
-	 *
-	 * DRM blob property data for the DP MST path property.
-	 */
-	struct drm_property_blob *path_blob_ptr;
-
-	/**
-	 * @tile_blob_ptr:
-	 *
-	 * DRM blob property data for the tile property (used mostly by DP MST).
-	 * This is meant for screens which are driven through separate display
-	 * pipelines represented by &drm_crtc, which might not be running with
-	 * genlocked clocks. For tiled panels which are genlocked, like
-	 * dual-link LVDS or dual-link DSI, the driver should try to not expose
-	 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
-	 */
-	struct drm_property_blob *tile_blob_ptr;
-
-	uint8_t polled; /* DRM_CONNECTOR_POLL_* */
-
-	/* requested DPMS state */
-	int dpms;
-
-	const struct drm_connector_helper_funcs *helper_private;
-
-	/* forced on connector */
-	struct drm_cmdline_mode cmdline_mode;
-	enum drm_connector_force force;
-	bool override_edid;
-	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
-	struct drm_encoder *encoder; /* currently active encoder */
-
-	/* EDID bits */
-	uint8_t eld[MAX_ELD_BYTES];
-	bool dvi_dual;
-	int max_tmds_clock;	/* in MHz */
-	bool latency_present[2];
-	int video_latency[2];	/* [0]: progressive, [1]: interlaced */
-	int audio_latency[2];
-	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
-	unsigned bad_edid_counter;
-
-	/* Flag for raw EDID header corruption - used in Displayport
-	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
-	 */
-	bool edid_corrupt;
-
-	struct dentry *debugfs_entry;
-
-	struct drm_connector_state *state;
-
-	/* DisplayID bits */
-	bool has_tile;
-	struct drm_tile_group *tile_group;
-	bool tile_is_single_monitor;
-
-	uint8_t num_h_tile, num_v_tile;
-	uint8_t tile_h_loc, tile_v_loc;
-	uint16_t tile_h_size, tile_v_size;
-};
-
-/**
- * struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- *	plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- *	plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- *	where N is the number of active planes for given crtc
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_plane_state {
-	struct drm_plane *plane;
-
-	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
-	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-	struct fence *fence;
-
-	/* Signed dest location allows it to be partially off screen */
-	int32_t crtc_x, crtc_y;
-	uint32_t crtc_w, crtc_h;
-
-	/* Source values are 16.16 fixed point */
-	uint32_t src_x, src_y;
-	uint32_t src_h, src_w;
-
-	/* Plane rotation */
-	unsigned int rotation;
-
-	/* Plane zpos */
-	unsigned int zpos;
-	unsigned int normalized_zpos;
-
-	struct drm_atomic_state *state;
-};
-
-
-/**
- * struct drm_plane_funcs - driver plane control functions
- */
-struct drm_plane_funcs {
-	/**
-	 * @update_plane:
-	 *
-	 * This is the legacy entry point to enable and configure the plane for
-	 * the given CRTC and framebuffer. It is never called to disable the
-	 * plane, i.e. the passed-in crtc and fb paramters are never NULL.
-	 *
-	 * The source rectangle in frame buffer memory coordinates is given by
-	 * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
-	 * values). Devices that don't support subpixel plane coordinates can
-	 * ignore the fractional part.
-	 *
-	 * The destination rectangle in CRTC coordinates is given by the
-	 * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
-	 * Devices scale the source rectangle to the destination rectangle. If
-	 * scaling is not supported, and the source rectangle size doesn't match
-	 * the destination rectangle size, the driver must return a
-	 * -<errorname>EINVAL</errorname> error.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_update_plane() to implement this hook.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*update_plane)(struct drm_plane *plane,
-			    struct drm_crtc *crtc, struct drm_framebuffer *fb,
-			    int crtc_x, int crtc_y,
-			    unsigned int crtc_w, unsigned int crtc_h,
-			    uint32_t src_x, uint32_t src_y,
-			    uint32_t src_w, uint32_t src_h);
-
-	/**
-	 * @disable_plane:
-	 *
-	 * This is the legacy entry point to disable the plane. The DRM core
-	 * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
-	 * with the frame buffer ID set to 0.  Disabled planes must not be
-	 * processed by the CRTC.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_disable_plane() to implement this hook.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*disable_plane)(struct drm_plane *plane);
-
-	/**
-	 * @destroy:
-	 *
-	 * Clean up plane resources. This is only called at driver unload time
-	 * through drm_mode_config_cleanup() since a plane cannot be hotplugged
-	 * in DRM.
-	 */
-	void (*destroy)(struct drm_plane *plane);
-
-	/**
-	 * @reset:
-	 *
-	 * Reset plane hardware and software state to off. This function isn't
-	 * called by the core directly, only through drm_mode_config_reset().
-	 * It's not a helper hook only for historical reasons.
-	 *
-	 * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
-	 * atomic state using this hook.
-	 */
-	void (*reset)(struct drm_plane *plane);
-
-	/**
-	 * @set_property:
-	 *
-	 * This is the legacy entry point to update a property attached to the
-	 * plane.
-	 *
-	 * Drivers implementing atomic modeset should use
-	 * drm_atomic_helper_plane_set_property() to implement this hook.
-	 *
-	 * This callback is optional if the driver does not support any legacy
-	 * driver-private properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success or a negative error code on failure.
-	 */
-	int (*set_property)(struct drm_plane *plane,
-			    struct drm_property *property, uint64_t val);
-
-	/**
-	 * @atomic_duplicate_state:
-	 *
-	 * Duplicate the current atomic state for this plane and return it.
-	 * The core and helpers gurantee that any atomic state duplicated with
-	 * this hook and still owned by the caller (i.e. not transferred to the
-	 * driver by calling ->atomic_commit() from struct
-	 * &drm_mode_config_funcs) will be cleaned up by calling the
-	 * @atomic_destroy_state hook in this structure.
-	 *
-	 * Atomic drivers which don't subclass struct &drm_plane_state should use
-	 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
-	 * state structure to extend it with driver-private state should use
-	 * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
-	 * duplicated in a consistent fashion across drivers.
-	 *
-	 * It is an error to call this hook before plane->state has been
-	 * initialized correctly.
-	 *
-	 * NOTE:
-	 *
-	 * If the duplicate state references refcounted resources this hook must
-	 * acquire a reference for each of them. The driver must release these
-	 * references again in @atomic_destroy_state.
-	 *
-	 * RETURNS:
-	 *
-	 * Duplicated atomic state or NULL when the allocation failed.
-	 */
-	struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
-
-	/**
-	 * @atomic_destroy_state:
-	 *
-	 * Destroy a state duplicated with @atomic_duplicate_state and release
-	 * or unreference all resources it references
-	 */
-	void (*atomic_destroy_state)(struct drm_plane *plane,
-				     struct drm_plane_state *state);
-
-	/**
-	 * @atomic_set_property:
-	 *
-	 * Decode a driver-private property value and store the decoded value
-	 * into the passed-in state structure. Since the atomic core decodes all
-	 * standardized properties (even for extensions beyond the core set of
-	 * properties which might not be implemented by all drivers) this
-	 * requires drivers to subclass the state structure.
-	 *
-	 * Such driver-private properties should really only be implemented for
-	 * truly hardware/vendor specific state. Instead it is preferred to
-	 * standardize atomic extension and decode the properties used to expose
-	 * such an extension in the core.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_plane_set_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * NOTE:
-	 *
-	 * This function is called in the state assembly phase of atomic
-	 * modesets, which can be aborted for any reason (including on
-	 * userspace's request to just check whether a configuration would be
-	 * possible). Drivers MUST NOT touch any persistent state (hardware or
-	 * software) or data structures except the passed in @state parameter.
-	 *
-	 * Also since userspace controls in which order properties are set this
-	 * function must not do any input validation (since the state update is
-	 * incomplete and hence likely inconsistent). Instead any such input
-	 * validation must be done in the various atomic_check callbacks.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 if the property has been found, -EINVAL if the property isn't
-	 * implemented by the driver (which shouldn't ever happen, the core only
-	 * asks for properties attached to this plane). No other validation is
-	 * allowed by the driver. The core already checks that the property
-	 * value is within the range (integer, valid enum value, ...) the driver
-	 * set when registering the property.
-	 */
-	int (*atomic_set_property)(struct drm_plane *plane,
-				   struct drm_plane_state *state,
-				   struct drm_property *property,
-				   uint64_t val);
-
-	/**
-	 * @atomic_get_property:
-	 *
-	 * Reads out the decoded driver-private property. This is used to
-	 * implement the GETPLANE IOCTL.
-	 *
-	 * Do not call this function directly, use
-	 * drm_atomic_plane_get_property() instead.
-	 *
-	 * This callback is optional if the driver does not support any
-	 * driver-private atomic properties.
-	 *
-	 * RETURNS:
-	 *
-	 * 0 on success, -EINVAL if the property isn't implemented by the
-	 * driver (which should never happen, the core only asks for
-	 * properties attached to this plane).
-	 */
-	int (*atomic_get_property)(struct drm_plane *plane,
-				   const struct drm_plane_state *state,
-				   struct drm_property *property,
-				   uint64_t *val);
-	/**
-	 * @late_register:
-	 *
-	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the plane like debugfs interfaces.
-	 * It is called late in the driver load sequence from drm_dev_register().
-	 * Everything added from this callback should be unregistered in
-	 * the early_unregister callback.
-	 *
-	 * Returns:
-	 *
-	 * 0 on success, or a negative error code on failure.
-	 */
-	int (*late_register)(struct drm_plane *plane);
-
-	/**
-	 * @early_unregister:
-	 *
-	 * This optional hook should be used to unregister the additional
-	 * userspace interfaces attached to the plane from
-	 * late_unregister(). It is called from drm_dev_unregister(),
-	 * early in the driver unload sequence to disable userspace access
-	 * before data structures are torndown.
-	 */
-	void (*early_unregister)(struct drm_plane *plane);
-};
-
-enum drm_plane_type {
-	DRM_PLANE_TYPE_OVERLAY,
-	DRM_PLANE_TYPE_PRIMARY,
-	DRM_PLANE_TYPE_CURSOR,
-};
-
-
-/**
- * struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @crtc: currently bound CRTC
- * @fb: currently bound fb
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- * 	drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @state: current atomic state for this plane
- * @zpos_property: zpos property for this plane
- * @helper_private: mid-layer private data
- */
-struct drm_plane {
-	struct drm_device *dev;
-	struct list_head head;
-
-	char *name;
-
-	/**
-	 * @mutex:
-	 *
-	 * Protects modeset plane state, together with the mutex of &drm_crtc
-	 * this plane is linked to (when active, getting actived or getting
-	 * disabled).
-	 */
-	struct drm_modeset_lock mutex;
-
-	struct drm_mode_object base;
-
-	uint32_t possible_crtcs;
-	uint32_t *format_types;
-	unsigned int format_count;
-	bool format_default;
-
-	struct drm_crtc *crtc;
-	struct drm_framebuffer *fb;
-
-	struct drm_framebuffer *old_fb;
-
-	const struct drm_plane_funcs *funcs;
-
-	struct drm_object_properties properties;
-
-	enum drm_plane_type type;
-
-	/**
-	 * @index: Position inside the mode_config.list, can be used as an array
-	 * index. It is invariant over the lifetime of the plane.
-	 */
-	unsigned index;
-
-	const struct drm_plane_helper_funcs *helper_private;
-
-	struct drm_plane_state *state;
-
-	struct drm_property *zpos_property;
-};
-
-/**
- * struct drm_bridge_funcs - drm_bridge control functions
- * @attach: Called during drm_bridge_attach
- */
-struct drm_bridge_funcs {
-	int (*attach)(struct drm_bridge *bridge);
-
-	/**
-	 * @mode_fixup:
-	 *
-	 * This callback is used to validate and adjust a mode. The paramater
-	 * mode is the display mode that should be fed to the next element in
-	 * the display chain, either the final &drm_connector or the next
-	 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
-	 * requires. It can be modified by this callback and does not need to
-	 * match mode.
-	 *
-	 * This is the only hook that allows a bridge to reject a modeset. If
-	 * this function passes all other callbacks must succeed for this
-	 * configuration.
-	 *
-	 * NOTE:
-	 *
-	 * This function is called in the check phase of atomic modesets, which
-	 * can be aborted for any reason (including on userspace's request to
-	 * just check whether a configuration would be possible). Drivers MUST
-	 * NOT touch any persistent state (hardware or software) or data
-	 * structures except the passed in @state parameter.
-	 *
-	 * RETURNS:
-	 *
-	 * True if an acceptable configuration is possible, false if the modeset
-	 * operation should be rejected.
-	 */
-	bool (*mode_fixup)(struct drm_bridge *bridge,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
-	/**
-	 * @disable:
-	 *
-	 * This callback should disable the bridge. It is called right before
-	 * the preceding element in the display pipe is disabled. If the
-	 * preceding element is a bridge this means it's called before that
-	 * bridge's ->disable() function. If the preceding element is a
-	 * &drm_encoder it's called right before the encoder's ->disable(),
-	 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-	 *
-	 * The bridge can assume that the display pipe (i.e. clocks and timing
-	 * signals) feeding it is still running when this callback is called.
-	 *
-	 * The disable callback is optional.
-	 */
-	void (*disable)(struct drm_bridge *bridge);
-
-	/**
-	 * @post_disable:
-	 *
-	 * This callback should disable the bridge. It is called right after
-	 * the preceding element in the display pipe is disabled. If the
-	 * preceding element is a bridge this means it's called after that
-	 * bridge's ->post_disable() function. If the preceding element is a
-	 * &drm_encoder it's called right after the encoder's ->disable(),
-	 * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-	 *
-	 * The bridge must assume that the display pipe (i.e. clocks and timing
-	 * singals) feeding it is no longer running when this callback is
-	 * called.
-	 *
-	 * The post_disable callback is optional.
-	 */
-	void (*post_disable)(struct drm_bridge *bridge);
-
-	/**
-	 * @mode_set:
-	 *
-	 * This callback should set the given mode on the bridge. It is called
-	 * after the ->mode_set() callback for the preceding element in the
-	 * display pipeline has been called already. The display pipe (i.e.
-	 * clocks and timing signals) is off when this function is called.
-	 */
-	void (*mode_set)(struct drm_bridge *bridge,
-			 struct drm_display_mode *mode,
-			 struct drm_display_mode *adjusted_mode);
-	/**
-	 * @pre_enable:
-	 *
-	 * This callback should enable the bridge. It is called right before
-	 * the preceding element in the display pipe is enabled. If the
-	 * preceding element is a bridge this means it's called before that
-	 * bridge's ->pre_enable() function. If the preceding element is a
-	 * &drm_encoder it's called right before the encoder's ->enable(),
-	 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-	 *
-	 * The display pipe (i.e. clocks and timing signals) feeding this bridge
-	 * will not yet be running when this callback is called. The bridge must
-	 * not enable the display link feeding the next bridge in the chain (if
-	 * there is one) when this callback is called.
-	 *
-	 * The pre_enable callback is optional.
-	 */
-	void (*pre_enable)(struct drm_bridge *bridge);
-
-	/**
-	 * @enable:
-	 *
-	 * This callback should enable the bridge. It is called right after
-	 * the preceding element in the display pipe is enabled. If the
-	 * preceding element is a bridge this means it's called after that
-	 * bridge's ->enable() function. If the preceding element is a
-	 * &drm_encoder it's called right after the encoder's ->enable(),
-	 * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-	 *
-	 * The bridge can assume that the display pipe (i.e. clocks and timing
-	 * signals) feeding it is running when this callback is called. This
-	 * callback must enable the display link feeding the next bridge in the
-	 * chain if there is one.
-	 *
-	 * The enable callback is optional.
-	 */
-	void (*enable)(struct drm_bridge *bridge);
-};
-
-/**
- * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
- */
-struct drm_bridge {
-	struct drm_device *dev;
-	struct drm_encoder *encoder;
-	struct drm_bridge *next;
-#ifdef CONFIG_OF
-	struct device_node *of_node;
-#endif
-	struct list_head list;
-
-	const struct drm_bridge_funcs *funcs;
-	void *driver_private;
-};
-
-/**
- * struct drm_crtc_commit - track modeset commits on a CRTC
- *
- * This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
- * is reference counted to allow waiters to safely wait on an event to complete,
- * without holding any locks.
- *
- * It has 3 different events in total to allow a fine-grained synchronization
- * between outstanding updates::
- *
- *	atomic commit thread			hardware
- *
- * 	write new state into hardware	---->	...
- * 	signal hw_done
- * 						switch to new state on next
- * 	...					v/hblank
- *
- *	wait for buffers to show up		...
- *
- *	...					send completion irq
- *						irq handler signals flip_done
- *	cleanup old buffers
- *
- * 	signal cleanup_done
- *
- * 	wait for flip_done		<----
- * 	clean up atomic state
- *
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
- * and modeset state change.
- *
- * For an implementation of how to use this look at
- * drm_atomic_helper_setup_commit() from the atomic helper library.
- */
-struct drm_crtc_commit {
-	/**
-	 * @crtc:
-	 *
-	 * DRM CRTC for this commit.
-	 */
-	struct drm_crtc *crtc;
-
-	/**
-	 * @ref:
-	 *
-	 * Reference count for this structure. Needed to allow blocking on
-	 * completions without the risk of the completion disappearing
-	 * meanwhile.
-	 */
-	struct kref ref;
-
-	/**
-	 * @flip_done:
-	 *
-	 * Will be signaled when the hardware has flipped to the new set of
-	 * buffers. Signals at the same time as when the drm event for this
-	 * commit is sent to userspace, or when an out-fence is singalled. Note
-	 * that for most hardware, in most cases this happens after @hw_done is
-	 * signalled.
-	 */
-	struct completion flip_done;
-
-	/**
-	 * @hw_done:
-	 *
-	 * Will be signalled when all hw register changes for this commit have
-	 * been written out. Especially when disabling a pipe this can be much
-	 * later than than @flip_done, since that can signal already when the
-	 * screen goes black, whereas to fully shut down a pipe more register
-	 * I/O is required.
-	 *
-	 * Note that this does not need to include separately reference-counted
-	 * resources like backing storage buffer pinning, or runtime pm
-	 * management.
-	 */
-	struct completion hw_done;
-
-	/**
-	 * @cleanup_done:
-	 *
-	 * Will be signalled after old buffers have been cleaned up by calling
-	 * drm_atomic_helper_cleanup_planes(). Since this can only happen after
-	 * a vblank wait completed it might be a bit later. This completion is
-	 * useful to throttle updates and avoid hardware updates getting ahead
-	 * of the buffer cleanup too much.
-	 */
-	struct completion cleanup_done;
-
-	/**
-	 * @commit_entry:
-	 *
-	 * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
-	 */
-	struct list_head commit_entry;
-
-	/**
-	 * @event:
-	 *
-	 * &drm_pending_vblank_event pointer to clean up private events.
-	 */
-	struct drm_pending_vblank_event *event;
-};
-
-struct __drm_planes_state {
-	struct drm_plane *ptr;
-	struct drm_plane_state *state;
-};
-
-struct __drm_crtcs_state {
-	struct drm_crtc *ptr;
-	struct drm_crtc_state *state;
-	struct drm_crtc_commit *commit;
-};
-
-struct __drm_connnectors_state {
-	struct drm_connector *ptr;
-	struct drm_connector_state *state;
-};
-
-/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @dev: parent DRM device
- * @allow_modeset: allow full modeset
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @acquire_ctx: acquire context for this atomic modeset state update
- */
-struct drm_atomic_state {
-	struct drm_device *dev;
-	bool allow_modeset : 1;
-	bool legacy_cursor_update : 1;
-	bool legacy_set_config : 1;
-	struct __drm_planes_state *planes;
-	struct __drm_crtcs_state *crtcs;
-	int num_connector;
-	struct __drm_connnectors_state *connectors;
-
-	struct drm_modeset_acquire_ctx *acquire_ctx;
-
-	/**
-	 * @commit_work:
-	 *
-	 * Work item which can be used by the driver or helpers to execute the
-	 * commit without blocking.
-	 */
-	struct work_struct commit_work;
-};
-
-
-/**
  * struct drm_mode_set - new values for a CRTC config change
  * @fb: framebuffer to use for new config
  * @crtc: CRTC whose configuration we're about to change
@@ -2237,17 +873,9 @@
 	 * CRTC index supplied in &drm_event to userspace.
 	 *
 	 * The drm core will supply a struct &drm_event in the event
-	 * member of each CRTC's &drm_crtc_state structure. This can be handled by the
-	 * drm_crtc_send_vblank_event() function, which the driver should call on
-	 * the provided event upon completion of the atomic commit. Note that if
-	 * the driver supports vblank signalling and timestamping the vblank
-	 * counters and timestamps must agree with the ones returned from page
-	 * flip events. With the current vblank helper infrastructure this can
-	 * be achieved by holding a vblank reference while the page flip is
-	 * pending, acquired through drm_crtc_vblank_get() and released with
-	 * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
-	 * counter and timestamp tracking though, e.g. if they have accurate
-	 * timestamp registers in hardware.
+	 * member of each CRTC's &drm_crtc_state structure. See the
+	 * documentation for &drm_crtc_state for more details about the precise
+	 * semantics of this event.
 	 *
 	 * NOTE:
 	 *
@@ -2636,12 +1264,6 @@
 	 */
 	struct drm_property *aspect_ratio_property;
 	/**
-	 * @dirty_info_property: Optional connector property to give userspace a
-	 * hint that the DIRTY_FB ioctl should be used.
-	 */
-	struct drm_property *dirty_info_property;
-
-	/**
 	 * @degamma_lut_property: Optional CRTC property to set the LUT used to
 	 * convert the framebuffer's colors to linear gamma.
 	 */
@@ -2702,43 +1324,7 @@
 	struct drm_mode_config_helper_funcs *helper_private;
 };
 
-/**
- * drm_for_each_plane_mask - iterate over planes specified by bitmask
- * @plane: the loop cursor
- * @dev: the DRM device
- * @plane_mask: bitmask of plane indices
- *
- * Iterate over all planes specified by bitmask.
- */
-#define drm_for_each_plane_mask(plane, dev, plane_mask) \
-	list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
-		for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
-
-/**
- * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
- * @encoder: the loop cursor
- * @dev: the DRM device
- * @encoder_mask: bitmask of encoder indices
- *
- * Iterate over all encoders specified by bitmask.
- */
-#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
-	list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
-		for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
-
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-#define obj_to_connector(x) container_of(x, struct drm_connector, base)
-#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
-#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
-#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
-#define obj_to_property(x) container_of(x, struct drm_property, base)
-#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
-#define obj_to_plane(x) container_of(x, struct drm_plane, base)
-
-struct drm_prop_enum_list {
-	int type;
-	char *name;
-};
 
 extern __printf(6, 7)
 int drm_crtc_init_with_planes(struct drm_device *dev,
@@ -2756,7 +1342,7 @@
  * Given a registered CRTC, return the index of that CRTC within a DRM
  * device's list of CRTCs.
  */
-static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
+static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
 {
 	return crtc->index;
 }
@@ -2773,184 +1359,17 @@
 	return 1 << drm_crtc_index(crtc);
 }
 
-int drm_connector_init(struct drm_device *dev,
-		       struct drm_connector *connector,
-		       const struct drm_connector_funcs *funcs,
-		       int connector_type);
-int drm_connector_register(struct drm_connector *connector);
-void drm_connector_unregister(struct drm_connector *connector);
-
-extern void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
-{
-	return connector->index;
-}
-
-extern __printf(5, 6)
-int drm_encoder_init(struct drm_device *dev,
-		     struct drm_encoder *encoder,
-		     const struct drm_encoder_funcs *funcs,
-		     int encoder_type, const char *name, ...);
-
-/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
-	return encoder->index;
-}
-
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-				       struct drm_crtc *crtc)
-{
-	return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
-}
-
-extern __printf(8, 9)
-int drm_universal_plane_init(struct drm_device *dev,
-			     struct drm_plane *plane,
-			     unsigned long possible_crtcs,
-			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats,
-			     unsigned int format_count,
-			     enum drm_plane_type type,
-			     const char *name, ...);
-extern int drm_plane_init(struct drm_device *dev,
-			  struct drm_plane *plane,
-			  unsigned long possible_crtcs,
-			  const struct drm_plane_funcs *funcs,
-			  const uint32_t *formats, unsigned int format_count,
-			  bool is_primary);
-extern void drm_plane_cleanup(struct drm_plane *plane);
-
-/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that plane within a DRM
- * device's list of planes.
- */
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
-{
-	return plane->index;
-}
-extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
-extern void drm_plane_force_disable(struct drm_plane *plane);
 extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
 				   int *hdisplay, int *vdisplay);
 extern int drm_crtc_force_disable(struct drm_crtc *crtc);
 extern int drm_crtc_force_disable_all(struct drm_device *dev);
 
-extern void drm_encoder_cleanup(struct drm_encoder *encoder);
-
-extern const char *drm_get_connector_status_name(enum drm_connector_status status);
-extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
-extern const char *drm_get_dpms_name(int val);
-extern const char *drm_get_dvi_i_subconnector_name(int val);
-extern const char *drm_get_dvi_i_select_name(int val);
-extern const char *drm_get_tv_subconnector_name(int val);
-extern const char *drm_get_tv_select_name(int val);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
-extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
-						const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-						   const struct edid *edid);
-
-extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
-					    const u32 *formats,
-					    unsigned int num_formats);
-
-static inline bool drm_property_type_is(struct drm_property *property,
-		uint32_t type)
-{
-	/* instanceof for props.. handles extended type vs original types: */
-	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-		return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
-	return property->flags & type;
-}
-
-extern int drm_object_property_set_value(struct drm_mode_object *obj,
-					 struct drm_property *property,
-					 uint64_t val);
-extern int drm_object_property_get_value(struct drm_mode_object *obj,
-					 struct drm_property *property,
-					 uint64_t *value);
-extern int drm_framebuffer_init(struct drm_device *dev,
-				struct drm_framebuffer *fb,
-				const struct drm_framebuffer_funcs *funcs);
-extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
-						      uint32_t id);
-extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
-extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
-extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-
-extern void drm_object_attach_property(struct drm_mode_object *obj,
-				       struct drm_property *property,
-				       uint64_t init_val);
-extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-						const char *name, int num_values);
-extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-					 const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_values);
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-					 int flags, const char *name,
-					 const struct drm_prop_enum_list *props,
-					 int num_props,
-					 uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-					 const char *name,
-					 uint64_t min, uint64_t max);
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
-					 int flags, const char *name,
-					 int64_t min, int64_t max);
-struct drm_property *drm_property_create_object(struct drm_device *dev,
-					 int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
-					 const char *name);
-struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
-                                                   size_t length,
-                                                   const void *data);
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
-                                                   uint32_t id);
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
-void drm_property_unreference_blob(struct drm_property_blob *blob);
-extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
-extern int drm_property_add_enum(struct drm_property *property, int index,
-				 uint64_t value, const char *name);
-extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev,
-					 unsigned int num_modes,
-					 const char * const modes[]);
-extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-
-extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-					     struct drm_encoder *encoder);
-extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-					 int gamma_size);
-
 extern int drm_mode_set_config_internal(struct drm_mode_set *set);
 
-extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 							 char topology[8]);
 extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
@@ -2958,40 +1377,7 @@
 extern void drm_mode_put_tile_group(struct drm_device *dev,
 				   struct drm_tile_group *tg);
 
-extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-				       struct drm_property *property,
-				       uint64_t value);
-
-extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-							      unsigned int supported_rotations);
-extern unsigned int drm_rotation_simplify(unsigned int rotation,
-					  unsigned int supported_rotations);
-extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-				       uint degamma_lut_size,
-				       bool has_ctm,
-				       uint gamma_lut_size);
-
-int drm_plane_create_zpos_property(struct drm_plane *plane,
-				   unsigned int zpos,
-				   unsigned int min, unsigned int max);
-
-int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
-					     unsigned int zpos);
-
 /* Helpers */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-					     uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
-
-static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
-		uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
-	return mo ? obj_to_plane(mo) : NULL;
-}
-
 static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
 	uint32_t id)
 {
@@ -3000,120 +1386,6 @@
 	return mo ? obj_to_crtc(mo) : NULL;
 }
 
-static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
-	uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
-	return mo ? obj_to_encoder(mo) : NULL;
-}
-
-/**
- * drm_connector_lookup - lookup connector object
- * @dev: DRM device
- * @id: connector object id
- *
- * This function looks up the connector object specified by id
- * add takes a reference to it.
- */
-static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
-		uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
-	return mo ? obj_to_connector(mo) : NULL;
-}
-
-static inline struct drm_property *drm_property_find(struct drm_device *dev,
-		uint32_t id)
-{
-	struct drm_mode_object *mo;
-	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
-	return mo ? obj_to_property(mo) : NULL;
-}
-
-/*
- * Extract a degamma/gamma LUT value provided by user and round it to the
- * precision supported by the hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
-					     uint32_t bit_precision)
-{
-	uint32_t val = user_input;
-	uint32_t max = 0xffff >> (16 - bit_precision);
-
-	/* Round only if we're not using full precision. */
-	if (bit_precision < 16) {
-		val += 1UL << (16 - bit_precision - 1);
-		val >>= 16 - bit_precision;
-	}
-
-	return clamp_val(val, 0, max);
-}
-
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
-	drm_mode_object_reference(&fb->base);
-}
-
-/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-	drm_mode_object_unreference(&fb->base);
-}
-
-/**
- * drm_framebuffer_read_refcount - read the framebuffer reference count.
- * @fb: framebuffer
- *
- * This functions returns the framebuffer's reference count.
- */
-static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
-{
-	return atomic_read(&fb->base.refcount.refcount);
-}
-
-/**
- * drm_connector_reference - incr the connector refcnt
- * @connector: connector
- *
- * This function increments the connector's refcount.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
-	drm_mode_object_reference(&connector->base);
-}
-
-/**
- * drm_connector_unreference - unref a connector
- * @connector: connector to unref
- *
- * This function decrements the connector's refcount and frees it if it drops to zero.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
-	drm_mode_object_unreference(&connector->base);
-}
-
-/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, dev) \
-	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
-		for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-
-#define drm_for_each_plane(plane, dev) \
-	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
-
 #define drm_for_each_crtc(crtc, dev) \
 	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
@@ -3131,67 +1403,4 @@
 		!drm_modeset_is_locked(&mode_config->connection_mutex));
 }
 
-#define drm_for_each_connector(connector, dev) \
-	for (assert_drm_connector_list_read_locked(&(dev)->mode_config),	\
-	     connector = list_first_entry(&(dev)->mode_config.connector_list,	\
-					  struct drm_connector, head);		\
-	     &connector->head != (&(dev)->mode_config.connector_list);		\
-	     connector = list_next_entry(connector, head))
-
-#define drm_for_each_encoder(encoder, dev) \
-	list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
-
-#define drm_for_each_fb(fb, dev) \
-	for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),		\
-	     fb = list_first_entry(&(dev)->mode_config.fb_list,	\
-					  struct drm_framebuffer, head);	\
-	     &fb->head != (&(dev)->mode_config.fb_list);			\
-	     fb = list_next_entry(fb, head))
-
-/* drm_edid.c */
-bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_get_edid(struct drm_connector *connector,
-			  struct i2c_adapter *adapter);
-struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
-				     struct i2c_adapter *adapter);
-struct edid *drm_edid_duplicate(const struct edid *edid);
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-
-u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
-int drm_add_modes_noedid(struct drm_connector *connector,
-			 int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
-			    int hpref, int vpref);
-
-int drm_edid_header_is_valid(const u8 *raw_edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
-			  bool *edid_corrupt);
-bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
-			       int buflen);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-					   int hsize, int vsize, int fresh,
-					   bool rb);
-
-/* drm_bridge.c */
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-			const struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-			struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4b37afa..982c299 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -41,6 +41,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
 extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -53,11 +54,6 @@
 
 extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
-extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
-
-extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
-					   const struct drm_mode_fb_cmd2 *mode_cmd);
-
 extern void drm_helper_resume_force_mode(struct drm_device *dev);
 
 int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h
deleted file mode 100644
index 1b76d99..0000000
--- a/include/drm/drm_dp_aux_dev.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Rafael Antognolli <rafael.antognolli@intel.com>
- *
- */
-
-#ifndef DRM_DP_AUX_DEV
-#define DRM_DP_AUX_DEV
-
-#include <drm/drm_dp_helper.h>
-
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
-
-int drm_dp_aux_dev_init(void);
-void drm_dp_aux_dev_exit(void);
-int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
-
-#else
-
-static inline int drm_dp_aux_dev_init(void)
-{
-	return 0;
-}
-
-static inline void drm_dp_aux_dev_exit(void)
-{
-}
-
-static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
-{
-	return 0;
-}
-
-static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 63b8bd5..2a79882 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -211,14 +211,16 @@
 # define DP_DS_PORT_TYPE_DVI		    2
 # define DP_DS_PORT_TYPE_HDMI		    3
 # define DP_DS_PORT_TYPE_NON_EDID	    4
+# define DP_DS_PORT_TYPE_DP_DUALMODE        5
+# define DP_DS_PORT_TYPE_WIRELESS           6
 # define DP_DS_PORT_HPD			    (1 << 3)
 /* offset 1 for VGA is maximum megapixels per second / 8 */
 /* offset 2 */
-# define DP_DS_VGA_MAX_BPC_MASK		    (3 << 0)
-# define DP_DS_VGA_8BPC			    0
-# define DP_DS_VGA_10BPC		    1
-# define DP_DS_VGA_12BPC		    2
-# define DP_DS_VGA_16BPC		    3
+# define DP_DS_MAX_BPC_MASK	            (3 << 0)
+# define DP_DS_8BPC		            0
+# define DP_DS_10BPC		            1
+# define DP_DS_12BPC		            2
+# define DP_DS_16BPC		            3
 
 /* link configuration */
 #define	DP_LINK_BW_SET		            0x100
@@ -443,6 +445,9 @@
 #define DP_SOURCE_OUI			    0x300
 #define DP_SINK_OUI			    0x400
 #define DP_BRANCH_OUI			    0x500
+#define DP_BRANCH_ID                        0x503
+#define DP_BRANCH_HW_REV                    0x509
+#define DP_BRANCH_SW_REV                    0x50A
 
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
@@ -813,6 +818,13 @@
 int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+				const u8 port_cap[4]);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			      const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			     const u8 port_cap[4], struct drm_dp_aux *aux);
 
 void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 919933d..c3a7d44 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -25,6 +25,9 @@
 
 #include <linux/types.h>
 
+struct drm_device;
+struct i2c_adapter;
+
 #define EDID_LENGTH 128
 #define DDC_ADDR 0x50
 #define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
@@ -423,9 +426,36 @@
 	return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
 }
 
+bool drm_probe_ddc(struct i2c_adapter *adapter);
 struct edid *drm_do_get_edid(struct drm_connector *connector,
 	int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
 			      size_t len),
 	void *data);
+struct edid *drm_get_edid(struct drm_connector *connector,
+			  struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+				     struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+			 int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+			    int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+			  bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+			       int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+					   int hsize, int vsize, int fresh,
+					   bool rb);
 
 #endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
new file mode 100644
index 0000000..387e33a
--- /dev/null
+++ b/include/drm/drm_encoder.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_ENCODER_H__
+#define __DRM_ENCODER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_encoder_funcs - encoder controls
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+	/**
+	 * @reset:
+	 *
+	 * Reset encoder hardware and software state to off. This function isn't
+	 * called by the core directly, only through drm_mode_config_reset().
+	 * It's not a helper hook only for historical reasons.
+	 */
+	void (*reset)(struct drm_encoder *encoder);
+
+	/**
+	 * @destroy:
+	 *
+	 * Clean up encoder resources. This is only called at driver unload time
+	 * through drm_mode_config_cleanup() since an encoder cannot be
+	 * hotplugged in DRM.
+	 */
+	void (*destroy)(struct drm_encoder *encoder);
+
+	/**
+	 * @late_register:
+	 *
+	 * This optional hook can be used to register additional userspace
+	 * interfaces attached to the encoder like debugfs interfaces.
+	 * It is called late in the driver load sequence from drm_dev_register().
+	 * Everything added from this callback should be unregistered in
+	 * the early_unregister callback.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, or a negative error code on failure.
+	 */
+	int (*late_register)(struct drm_encoder *encoder);
+
+	/**
+	 * @early_unregister:
+	 *
+	 * This optional hook should be used to unregister the additional
+	 * userspace interfaces attached to the encoder from
+	 * late_unregister(). It is called from drm_dev_unregister(),
+	 * early in the driver unload sequence to disable userspace access
+	 * before data structures are torndown.
+	 */
+	void (*early_unregister)(struct drm_encoder *encoder);
+};
+
+/**
+ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+	struct drm_device *dev;
+	struct list_head head;
+
+	struct drm_mode_object base;
+	char *name;
+	/**
+	 * @encoder_type:
+	 *
+	 * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following
+	 * encoder types are defined thus far:
+	 *
+	 * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A.
+	 *
+	 * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort.
+	 *
+	 * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel
+	 *   with a proprietary parallel connector.
+	 *
+	 * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+	 *   Component, SCART).
+	 *
+	 * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+	 *
+	 * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus.
+	 *
+	 * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel
+	 *   bus.
+	 *
+	 * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow
+	 *   mutliple DP MST streams to share one physical encoder.
+	 */
+	int encoder_type;
+
+	/**
+	 * @index: Position inside the mode_config.list, can be used as an array
+	 * index. It is invariant over the lifetime of the encoder.
+	 */
+	unsigned index;
+
+	/**
+	 * @possible_crtcs: Bitmask of potential CRTC bindings, using
+	 * drm_crtc_index() as the index into the bitfield. The driver must set
+	 * the bits for all &drm_crtc objects this encoder can be connected to
+	 * before calling drm_encoder_init().
+	 *
+	 * In reality almost every driver gets this wrong.
+	 *
+	 * Note that since CRTC objects can't be hotplugged the assigned indices
+	 * are stable and hence known before registering all objects.
+	 */
+	uint32_t possible_crtcs;
+
+	/**
+	 * @possible_clones: Bitmask of potential sibling encoders for cloning,
+	 * using drm_encoder_index() as the index into the bitfield. The driver
+	 * must set the bits for all &drm_encoder objects which can clone a
+	 * &drm_crtc together with this encoder before calling
+	 * drm_encoder_init(). Drivers should set the bit representing the
+	 * encoder itself, too. Cloning bits should be set such that when two
+	 * encoders can be used in a cloned configuration, they both should have
+	 * each another bits set.
+	 *
+	 * In reality almost every driver gets this wrong.
+	 *
+	 * Note that since encoder objects can't be hotplugged the assigned indices
+	 * are stable and hence known before registering all objects.
+	 */
+	uint32_t possible_clones;
+
+	struct drm_crtc *crtc;
+	struct drm_bridge *bridge;
+	const struct drm_encoder_funcs *funcs;
+	const struct drm_encoder_helper_funcs *helper_private;
+};
+
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+
+__printf(5, 6)
+int drm_encoder_init(struct drm_device *dev,
+		     struct drm_encoder *encoder,
+		     const struct drm_encoder_funcs *funcs,
+		     int encoder_type, const char *name, ...);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+	return encoder->index;
+}
+
+/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Returns false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+				       struct drm_crtc *crtc)
+{
+	return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
+/**
+ * drm_encoder_find - find a &drm_encoder
+ * @dev: DRM device
+ * @id: encoder id
+ *
+ * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+						   uint32_t id)
+{
+	struct drm_mode_object *mo;
+
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+
+	return mo ? obj_to_encoder(mo) : NULL;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+	list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+		for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+
+/**
+ * drm_for_each_encoder - iterate over all encoders
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all encoders of @dev.
+ */
+#define drm_for_each_encoder(encoder, dev) \
+	list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index db8d478..ed8edfe 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
 
 struct drm_fb_helper;
 
+#include <drm/drm_crtc.h>
 #include <linux/kgdb.h>
 
 enum mode_set_atomic {
@@ -176,6 +177,7 @@
  *              the screen buffer
  * @dirty_lock: spinlock protecting @dirty_clip
  * @dirty_work: worker used to flush the framebuffer
+ * @resume_work: worker used during resume if the console lock is already taken
  *
  * This is the main structure used by the fbdev helpers. Drivers supporting
  * fbdev emulation should embedded this into their overall driver structure.
@@ -196,6 +198,7 @@
 	struct drm_clip_rect dirty_clip;
 	spinlock_t dirty_lock;
 	struct work_struct dirty_work;
+	struct work_struct resume_work;
 
 	/**
 	 * @kernel_fb_list:
@@ -214,8 +217,20 @@
 	bool delayed_hotplug;
 };
 
+/**
+ * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
+ *
+ * Helper define to register default implementations of drm_fb_helper
+ * functions. To be used in struct fb_ops of drm drivers.
+ */
+#define DRM_FB_HELPER_DEFAULT_OPS \
+	.fb_check_var	= drm_fb_helper_check_var, \
+	.fb_set_par	= drm_fb_helper_set_par, \
+	.fb_setcmap	= drm_fb_helper_setcmap, \
+	.fb_blank	= drm_fb_helper_blank, \
+	.fb_pan_display	= drm_fb_helper_pan_display
+
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -263,7 +278,9 @@
 void drm_fb_helper_cfb_imageblit(struct fb_info *info,
 				 const struct fb_image *image);
 
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+					bool suspend);
 
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
@@ -283,11 +300,6 @@
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
 #else
-static inline int drm_fb_helper_modinit(void)
-{
-	return 0;
-}
-
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
 					struct drm_fb_helper *helper,
 					const struct drm_fb_helper_funcs *funcs)
@@ -417,7 +429,12 @@
 }
 
 static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
-					     int state)
+					     bool suspend)
+{
+}
+
+static inline void
+drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
 {
 }
 
@@ -475,5 +492,18 @@
 {
 	return 0;
 }
+
 #endif
+
+static inline int
+drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
+					      const char *name, bool primary)
+{
+#if IS_REACHABLE(CONFIG_FB)
+	return remove_conflicting_framebuffers(a, name, primary);
+#else
+	return 0;
+#endif
+}
+
 #endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 7f90a39..30c30fa 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,7 @@
 #include <linux/types.h>
 #include <uapi/drm/drm_fourcc.h>
 
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
 void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
 int drm_format_num_planes(uint32_t format);
 int drm_format_plane_cpp(uint32_t format, int plane);
@@ -32,6 +33,6 @@
 int drm_format_vert_chroma_subsampling(uint32_t format);
 int drm_format_plane_width(int width, uint32_t format, int plane);
 int drm_format_plane_height(int height, uint32_t format, int plane);
-const char *drm_get_format_name(uint32_t format);
+char *drm_get_format_name(uint32_t format) __malloc;
 
 #endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
new file mode 100644
index 0000000..f5ae1f4
--- /dev/null
+++ b/include/drm/drm_framebuffer.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_FRAMEBUFFER_H__
+#define __DRM_FRAMEBUFFER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_framebuffer;
+struct drm_file;
+struct drm_device;
+
+/**
+ * struct drm_framebuffer_funcs - framebuffer hooks
+ */
+struct drm_framebuffer_funcs {
+	/**
+	 * @destroy:
+	 *
+	 * Clean up framebuffer resources, specifically also unreference the
+	 * backing storage. The core guarantees to call this function for every
+	 * framebuffer successfully created by ->fb_create() in
+	 * &drm_mode_config_funcs. Drivers must also call
+	 * drm_framebuffer_cleanup() to release DRM core resources for this
+	 * framebuffer.
+	 */
+	void (*destroy)(struct drm_framebuffer *framebuffer);
+
+	/**
+	 * @create_handle:
+	 *
+	 * Create a buffer handle in the driver-specific buffer manager (either
+	 * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
+	 * the core to implement the GETFB IOCTL, which returns (for
+	 * sufficiently priviledged user) also a native buffer handle. This can
+	 * be used for seamless transitions between modesetting clients by
+	 * copying the current screen contents to a private buffer and blending
+	 * between that and the new contents.
+	 *
+	 * GEM based drivers should call drm_gem_handle_create() to create the
+	 * handle.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*create_handle)(struct drm_framebuffer *fb,
+			     struct drm_file *file_priv,
+			     unsigned int *handle);
+	/**
+	 * @dirty:
+	 *
+	 * Optional callback for the dirty fb IOCTL.
+	 *
+	 * Userspace can notify the driver via this callback that an area of the
+	 * framebuffer has changed and should be flushed to the display
+	 * hardware. This can also be used internally, e.g. by the fbdev
+	 * emulation, though that's not the case currently.
+	 *
+	 * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
+	 * for more information as all the semantics and arguments have a one to
+	 * one mapping on this function.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*dirty)(struct drm_framebuffer *framebuffer,
+		     struct drm_file *file_priv, unsigned flags,
+		     unsigned color, struct drm_clip_rect *clips,
+		     unsigned num_clips);
+};
+
+/**
+ * struct drm_framebuffer - frame buffer object
+ *
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank.  So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred.  In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective. See drm_framebuffer_reference() and
+ * drm_framebuffer_unreference().
+ *
+ * The refcount is stored inside the mode object @base.
+ */
+struct drm_framebuffer {
+	/**
+	 * @dev: DRM device this framebuffer belongs to
+	 */
+	struct drm_device *dev;
+	/**
+	 * @head: Place on the dev->mode_config.fb_list, access protected by
+	 * dev->mode_config.fb_lock.
+	 */
+	struct list_head head;
+
+	/**
+	 * @base: base modeset object structure, contains the reference count.
+	 */
+	struct drm_mode_object base;
+	/**
+	 * @funcs: framebuffer vfunc table
+	 */
+	const struct drm_framebuffer_funcs *funcs;
+	/**
+	 * @pitches: Line stride per buffer. For userspace created object this
+	 * is copied from drm_mode_fb_cmd2.
+	 */
+	unsigned int pitches[4];
+	/**
+	 * @offsets: Offset from buffer start to the actual pixel data in bytes,
+	 * per buffer. For userspace created object this is copied from
+	 * drm_mode_fb_cmd2.
+	 *
+	 * Note that this is a linear offset and does not take into account
+	 * tiling or buffer laytou per @modifier. It meant to be used when the
+	 * actual pixel data for this framebuffer plane starts at an offset,
+	 * e.g.  when multiple planes are allocated within the same backing
+	 * storage buffer object. For tiled layouts this generally means it
+	 * @offsets must at least be tile-size aligned, but hardware often has
+	 * stricter requirements.
+	 *
+	 * This should not be used to specifiy x/y pixel offsets into the buffer
+	 * data (even for linear buffers). Specifying an x/y pixel offset is
+	 * instead done through the source rectangle in struct &drm_plane_state.
+	 */
+	unsigned int offsets[4];
+	/**
+	 * @modifier: Data layout modifier, per buffer. This is used to describe
+	 * tiling, or also special layouts (like compression) of auxiliary
+	 * buffers. For userspace created object this is copied from
+	 * drm_mode_fb_cmd2.
+	 */
+	uint64_t modifier[4];
+	/**
+	 * @width: Logical width of the visible area of the framebuffer, in
+	 * pixels.
+	 */
+	unsigned int width;
+	/**
+	 * @height: Logical height of the visible area of the framebuffer, in
+	 * pixels.
+	 */
+	unsigned int height;
+	/**
+	 * @depth: Depth in bits per pixel for RGB formats. 0 for everything
+	 * else. Legacy information derived from @pixel_format, it's suggested to use
+	 * the DRM FOURCC codes and helper functions directly instead.
+	 */
+	unsigned int depth;
+	/**
+	 * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
+	 * everything else. Legacy information derived from @pixel_format, it's
+	 * suggested to use the DRM FOURCC codes and helper functions directly
+	 * instead.
+	 */
+	int bits_per_pixel;
+	/**
+	 * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
+	 * DRM_MODE_FB_MODIFIERS.
+	 */
+	int flags;
+	/**
+	 * @pixel_format: DRM FOURCC code describing the pixel format.
+	 */
+	uint32_t pixel_format; /* fourcc format */
+	/**
+	 * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
+	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+	 * universal plane.
+	 */
+	int hot_x;
+	/**
+	 * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
+	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+	 * universal plane.
+	 */
+	int hot_y;
+	/**
+	 * @filp_head: Placed on struct &drm_file fbs list_head, protected by
+	 * fbs_lock in the same structure.
+	 */
+	struct list_head filp_head;
+};
+
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+
+int drm_framebuffer_init(struct drm_device *dev,
+			 struct drm_framebuffer *fb,
+			 const struct drm_framebuffer_funcs *funcs);
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+					       uint32_t id);
+void drm_framebuffer_remove(struct drm_framebuffer *fb);
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+	return atomic_read(&fb->base.refcount.refcount);
+}
+
+/**
+ * drm_for_each_fb - iterate over all framebuffers
+ * @fb: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all framebuffers of @dev. User must hold the fb_lock from
+ * &drm_mode_config.
+ */
+#define drm_for_each_fb(fb, dev) \
+	for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),		\
+	     fb = list_first_entry(&(dev)->mode_config.fb_list,	\
+					  struct drm_framebuffer, head);	\
+	     &fb->head != (&(dev)->mode_config.fb_list);			\
+	     fb = list_next_entry(fb, head))
+#endif
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fca1cd1..9f63736 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -210,8 +210,8 @@
  * drm_gem_object_unreference_unlocked().
  *
  * Drivers should never call this directly in their code. Instead they should
- * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
- * *obj) wrapper function, and use that. Shared code should never call this, to
+ * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
+ * *obj)`` wrapper function, and use that. Shared code should never call this, to
  * avoid breaking drivers by accident which still depend upon dev->struct_mutex
  * locking.
  */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 47ac925..4fef190 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -265,11 +265,15 @@
 				    u16 end);
 int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
 				  u16 end);
-int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
 int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
 int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
 			     enum mipi_dsi_dcs_tear_mode mode);
 int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+					u16 brightness);
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+					u16 *brightness);
 
 /**
  * struct mipi_dsi_driver - DSI driver
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index fc65118..205ddcf 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -37,6 +37,7 @@
  * Generic range manager structs
  */
 #include <linux/bug.h>
+#include <linux/rbtree.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -61,6 +62,7 @@
 struct drm_mm_node {
 	struct list_head node_list;
 	struct list_head hole_stack;
+	struct rb_node rb;
 	unsigned hole_follows : 1;
 	unsigned scanned_block : 1;
 	unsigned scanned_prev_free : 1;
@@ -70,6 +72,7 @@
 	unsigned long color;
 	u64 start;
 	u64 size;
+	u64 __subtree_last;
 	struct drm_mm *mm;
 };
 
@@ -79,6 +82,9 @@
 	/* head_node.node_list is the list of all memory nodes, ordered
 	 * according to the (increasing) start address of the memory node. */
 	struct drm_mm_node head_node;
+	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+	struct rb_root interval_tree;
+
 	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_color;
@@ -295,6 +301,12 @@
 void drm_mm_takedown(struct drm_mm *mm);
 bool drm_mm_clean(struct drm_mm *mm);
 
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+
 void drm_mm_init_scan(struct drm_mm *mm,
 		      u64 size,
 		      unsigned alignment,
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
new file mode 100644
index 0000000..43460b2
--- /dev/null
+++ b/include/drm/drm_mode_object.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODESET_H__
+#define __DRM_MODESET_H__
+
+#include <linux/kref.h>
+struct drm_object_properties;
+struct drm_property;
+struct drm_device;
+
+/**
+ * struct drm_mode_object - base structure for modeset objects
+ * @id: userspace visible identifier
+ * @type: type of the object, one of DRM_MODE_OBJECT\_\*
+ * @properties: properties attached to this object, including values
+ * @refcount: reference count for objects which with dynamic lifetime
+ * @free_cb: free function callback, only set for objects with dynamic lifetime
+ *
+ * Base structure for modeset objects visible to userspace. Objects can be
+ * looked up using drm_mode_object_find(). Besides basic uapi interface
+ * properties like @id and @type it provides two services:
+ *
+ * - It tracks attached properties and their values. This is used by &drm_crtc,
+ *   &drm_plane and &drm_connector. Properties are attached by calling
+ *   drm_object_attach_property() before the object is visible to userspace.
+ *
+ * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it
+ *   provides reference counting through drm_mode_object_reference() and
+ *   drm_mode_object_unreference(). This is used by &drm_framebuffer,
+ *   &drm_connector and &drm_property_blob. These objects provide specialized
+ *   reference counting wrappers.
+ */
+struct drm_mode_object {
+	uint32_t id;
+	uint32_t type;
+	struct drm_object_properties *properties;
+	struct kref refcount;
+	void (*free_cb)(struct kref *kref);
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+/**
+ * struct drm_object_properties - property tracking for &drm_mode_object
+ */
+struct drm_object_properties {
+	/**
+	 * @count: number of valid properties, must be less than or equal to
+	 * DRM_OBJECT_MAX_PROPERTY.
+	 */
+
+	int count;
+	/**
+	 * @properties: Array of pointers to &drm_property.
+	 *
+	 * NOTE: if we ever start dynamically destroying properties (ie.
+	 * not at drm_mode_config_cleanup() time), then we'd have to do
+	 * a better job of detaching property from mode objects to avoid
+	 * dangling property pointers:
+	 */
+	struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+
+	/**
+	 * @values: Array to store the property values, matching @properties. Do
+	 * not read/write values directly, but use
+	 * drm_object_property_get_value() and drm_object_property_set_value().
+	 *
+	 * Note that atomic drivers do not store mutable properties in this
+	 * array, but only the decoded values in the corresponding state
+	 * structure. The decoding is done using the ->atomic_get_property and
+	 * ->atomic_set_property hooks of the corresponding object. Hence atomic
+	 * drivers should not use drm_object_property_set_value() and
+	 * drm_object_property_get_value() on mutable objects, i.e. those
+	 * without the DRM_MODE_PROP_IMMUTABLE flag set.
+	 */
+	uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)				\
+	const char *fnname(int val)				\
+	{							\
+		int i;						\
+		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
+			if (list[i].type == val)		\
+				return list[i].name;		\
+		}						\
+		return "(unknown)";				\
+	}
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+					     uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property,
+				  uint64_t val);
+int drm_object_property_get_value(struct drm_mode_object *obj,
+				  struct drm_property *property,
+				  uint64_t *value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val);
+#endif
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index ff48177..9934d91 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -27,6 +27,13 @@
 #ifndef __DRM_MODES_H__
 #define __DRM_MODES_H__
 
+#include <linux/hdmi.h>
+
+#include <drm/drm_mode_object.h>
+#include <drm/drm_connector.h>
+
+struct videomode;
+
 /*
  * Note on terminology:  here, for brevity and convenience, we refer to connector
  * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
@@ -400,20 +407,7 @@
 	enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
-/* mode specified on the command line */
-struct drm_cmdline_mode {
-	bool specified;
-	bool refresh_specified;
-	bool bpp_specified;
-	int xres, yres;
-	int bpp;
-	int refresh;
-	bool rb;
-	bool interlace;
-	bool cvt;
-	bool margins;
-	enum drm_connector_force force;
-};
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
 
 /**
  * drm_mode_is_stereo - check for stereo mode flags
@@ -434,7 +428,7 @@
 struct drm_display_mode *drm_mode_create(struct drm_device *dev);
 void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
 void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
-                               const struct drm_display_mode *in);
+			       const struct drm_display_mode *in);
 int drm_mode_convert_umode(struct drm_display_mode *out,
 			   const struct drm_mode_modeinfo *in);
 void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -457,8 +451,9 @@
 				     struct drm_display_mode *dmode);
 void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
 				   struct videomode *vm);
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
 int of_get_drm_display_mode(struct device_node *np,
-			    struct drm_display_mode *dmode,
+			    struct drm_display_mode *dmode, u32 *bus_flags,
 			    int index);
 
 void drm_mode_set_name(struct drm_display_mode *mode);
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
new file mode 100644
index 0000000..b8051d5
--- /dev/null
+++ b/include/drm/drm_modeset_helper.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_KMS_HELPER_H__
+#define __DRM_KMS_HELPER_H__
+
+#include <drm/drmP.h>
+
+void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+				    const struct drm_mode_fb_cmd2 *mode_cmd);
+
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index b55f218..10e449c 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -266,6 +266,8 @@
 	 * disable anything at the CRTC level. To ensure that runtime PM
 	 * handling (using either DPMS or the new "ACTIVE" property) works
 	 * @disable must be the inverse of @enable for atomic drivers.
+	 * Atomic drivers should consider to use @atomic_disable instead of
+	 * this one.
 	 *
 	 * NOTE:
 	 *
@@ -391,6 +393,28 @@
 	 */
 	void (*atomic_flush)(struct drm_crtc *crtc,
 			     struct drm_crtc_state *old_crtc_state);
+
+	/**
+	 * @atomic_disable:
+	 *
+	 * This callback should be used to disable the CRTC. With the atomic
+	 * drivers it is called after all encoders connected to this CRTC have
+	 * been shut off already using their own ->disable hook. If that
+	 * sequence is too simple drivers can just add their own hooks and call
+	 * it from this CRTC callback here by looping over all encoders
+	 * connected to it using for_each_encoder_on_crtc().
+	 *
+	 * This hook is used only by atomic helpers. Atomic drivers don't
+	 * need to implement it if there's no need to disable anything at the
+	 * CRTC level.
+	 *
+	 * Comparing to @disable, this one provides the additional input
+	 * parameter @old_crtc_state which could be used to access the old
+	 * state. Atomic drivers should consider to use this one instead
+	 * of @disable.
+	 */
+	void (*atomic_disable)(struct drm_crtc *crtc,
+			       struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -523,12 +547,41 @@
 	 *
 	 * This callback is used both by the legacy CRTC helpers and the atomic
 	 * modeset helpers. It is optional in the atomic helpers.
+	 *
+	 * NOTE:
+	 *
+	 * If the driver uses the atomic modeset helpers and needs to inspect
+	 * the connector state or connector display info during mode setting,
+	 * @atomic_mode_set can be used instead.
 	 */
 	void (*mode_set)(struct drm_encoder *encoder,
 			 struct drm_display_mode *mode,
 			 struct drm_display_mode *adjusted_mode);
 
 	/**
+	 * @atomic_mode_set:
+	 *
+	 * This callback is used to update the display mode of an encoder.
+	 *
+	 * Note that the display pipe is completely off when this function is
+	 * called. Drivers which need hardware to be running before they program
+	 * the new display mode (because they implement runtime PM) should not
+	 * use this hook, because the helper library calls it only once and not
+	 * every time the display pipeline is suspended using either DPMS or the
+	 * new "ACTIVE" property. Such drivers should instead move all their
+	 * encoder setup into the ->enable() callback.
+	 *
+	 * This callback is used by the atomic modeset helpers in place of the
+	 * @mode_set callback, if set by the driver. It is optional and should
+	 * be used instead of @mode_set if the driver needs to inspect the
+	 * connector state or display info, since there is no direct way to
+	 * go from the encoder to the current connector.
+	 */
+	void (*atomic_mode_set)(struct drm_encoder *encoder,
+				struct drm_crtc_state *crtc_state,
+				struct drm_connector_state *conn_state);
+
+	/**
 	 * @get_crtc:
 	 *
 	 * This callback is used by the legacy CRTC helpers to work around
@@ -826,7 +879,7 @@
 	 * everything else must complete successfully.
 	 */
 	int (*prepare_fb)(struct drm_plane *plane,
-			  const struct drm_plane_state *new_state);
+			  struct drm_plane_state *new_state);
 	/**
 	 * @cleanup_fb:
 	 *
@@ -837,7 +890,7 @@
 	 * transitional plane helpers, but it is optional.
 	 */
 	void (*cleanup_fb)(struct drm_plane *plane,
-			   const struct drm_plane_state *old_state);
+			   struct drm_plane_state *old_state);
 
 	/**
 	 * @atomic_check:
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
new file mode 100644
index 0000000..43cf193
--- /dev/null
+++ b/include/drm/drm_plane.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PLANE_H__
+#define __DRM_PLANE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ *	plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ *	plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
+ * @zpos: priority of the given plane on crtc (optional)
+ * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
+ *	where N is the number of active planes for given crtc
+ * @src: clipped source coordinates of the plane (in 16.16)
+ * @dst: clipped destination coordinates of the plane
+ * @visible: visibility of the plane
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+	struct drm_plane *plane;
+
+	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
+	struct fence *fence;
+
+	/* Signed dest location allows it to be partially off screen */
+	int32_t crtc_x, crtc_y;
+	uint32_t crtc_w, crtc_h;
+
+	/* Source values are 16.16 fixed point */
+	uint32_t src_x, src_y;
+	uint32_t src_h, src_w;
+
+	/* Plane rotation */
+	unsigned int rotation;
+
+	/* Plane zpos */
+	unsigned int zpos;
+	unsigned int normalized_zpos;
+
+	/* Clipped coordinates */
+	struct drm_rect src, dst;
+
+	/*
+	 * Is the plane actually visible? Can be false even
+	 * if fb!=NULL and crtc!=NULL, due to clipping.
+	 */
+	bool visible;
+
+	struct drm_atomic_state *state;
+};
+
+
+/**
+ * struct drm_plane_funcs - driver plane control functions
+ */
+struct drm_plane_funcs {
+	/**
+	 * @update_plane:
+	 *
+	 * This is the legacy entry point to enable and configure the plane for
+	 * the given CRTC and framebuffer. It is never called to disable the
+	 * plane, i.e. the passed-in crtc and fb paramters are never NULL.
+	 *
+	 * The source rectangle in frame buffer memory coordinates is given by
+	 * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
+	 * values). Devices that don't support subpixel plane coordinates can
+	 * ignore the fractional part.
+	 *
+	 * The destination rectangle in CRTC coordinates is given by the
+	 * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
+	 * Devices scale the source rectangle to the destination rectangle. If
+	 * scaling is not supported, and the source rectangle size doesn't match
+	 * the destination rectangle size, the driver must return a
+	 * -<errorname>EINVAL</errorname> error.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_update_plane() to implement this hook.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*update_plane)(struct drm_plane *plane,
+			    struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			    int crtc_x, int crtc_y,
+			    unsigned int crtc_w, unsigned int crtc_h,
+			    uint32_t src_x, uint32_t src_y,
+			    uint32_t src_w, uint32_t src_h);
+
+	/**
+	 * @disable_plane:
+	 *
+	 * This is the legacy entry point to disable the plane. The DRM core
+	 * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
+	 * with the frame buffer ID set to 0.  Disabled planes must not be
+	 * processed by the CRTC.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_disable_plane() to implement this hook.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*disable_plane)(struct drm_plane *plane);
+
+	/**
+	 * @destroy:
+	 *
+	 * Clean up plane resources. This is only called at driver unload time
+	 * through drm_mode_config_cleanup() since a plane cannot be hotplugged
+	 * in DRM.
+	 */
+	void (*destroy)(struct drm_plane *plane);
+
+	/**
+	 * @reset:
+	 *
+	 * Reset plane hardware and software state to off. This function isn't
+	 * called by the core directly, only through drm_mode_config_reset().
+	 * It's not a helper hook only for historical reasons.
+	 *
+	 * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
+	 * atomic state using this hook.
+	 */
+	void (*reset)(struct drm_plane *plane);
+
+	/**
+	 * @set_property:
+	 *
+	 * This is the legacy entry point to update a property attached to the
+	 * plane.
+	 *
+	 * Drivers implementing atomic modeset should use
+	 * drm_atomic_helper_plane_set_property() to implement this hook.
+	 *
+	 * This callback is optional if the driver does not support any legacy
+	 * driver-private properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or a negative error code on failure.
+	 */
+	int (*set_property)(struct drm_plane *plane,
+			    struct drm_property *property, uint64_t val);
+
+	/**
+	 * @atomic_duplicate_state:
+	 *
+	 * Duplicate the current atomic state for this plane and return it.
+	 * The core and helpers gurantee that any atomic state duplicated with
+	 * this hook and still owned by the caller (i.e. not transferred to the
+	 * driver by calling ->atomic_commit() from struct
+	 * &drm_mode_config_funcs) will be cleaned up by calling the
+	 * @atomic_destroy_state hook in this structure.
+	 *
+	 * Atomic drivers which don't subclass struct &drm_plane_state should use
+	 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
+	 * state structure to extend it with driver-private state should use
+	 * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
+	 * duplicated in a consistent fashion across drivers.
+	 *
+	 * It is an error to call this hook before plane->state has been
+	 * initialized correctly.
+	 *
+	 * NOTE:
+	 *
+	 * If the duplicate state references refcounted resources this hook must
+	 * acquire a reference for each of them. The driver must release these
+	 * references again in @atomic_destroy_state.
+	 *
+	 * RETURNS:
+	 *
+	 * Duplicated atomic state or NULL when the allocation failed.
+	 */
+	struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+
+	/**
+	 * @atomic_destroy_state:
+	 *
+	 * Destroy a state duplicated with @atomic_duplicate_state and release
+	 * or unreference all resources it references
+	 */
+	void (*atomic_destroy_state)(struct drm_plane *plane,
+				     struct drm_plane_state *state);
+
+	/**
+	 * @atomic_set_property:
+	 *
+	 * Decode a driver-private property value and store the decoded value
+	 * into the passed-in state structure. Since the atomic core decodes all
+	 * standardized properties (even for extensions beyond the core set of
+	 * properties which might not be implemented by all drivers) this
+	 * requires drivers to subclass the state structure.
+	 *
+	 * Such driver-private properties should really only be implemented for
+	 * truly hardware/vendor specific state. Instead it is preferred to
+	 * standardize atomic extension and decode the properties used to expose
+	 * such an extension in the core.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_plane_set_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * NOTE:
+	 *
+	 * This function is called in the state assembly phase of atomic
+	 * modesets, which can be aborted for any reason (including on
+	 * userspace's request to just check whether a configuration would be
+	 * possible). Drivers MUST NOT touch any persistent state (hardware or
+	 * software) or data structures except the passed in @state parameter.
+	 *
+	 * Also since userspace controls in which order properties are set this
+	 * function must not do any input validation (since the state update is
+	 * incomplete and hence likely inconsistent). Instead any such input
+	 * validation must be done in the various atomic_check callbacks.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 if the property has been found, -EINVAL if the property isn't
+	 * implemented by the driver (which shouldn't ever happen, the core only
+	 * asks for properties attached to this plane). No other validation is
+	 * allowed by the driver. The core already checks that the property
+	 * value is within the range (integer, valid enum value, ...) the driver
+	 * set when registering the property.
+	 */
+	int (*atomic_set_property)(struct drm_plane *plane,
+				   struct drm_plane_state *state,
+				   struct drm_property *property,
+				   uint64_t val);
+
+	/**
+	 * @atomic_get_property:
+	 *
+	 * Reads out the decoded driver-private property. This is used to
+	 * implement the GETPLANE IOCTL.
+	 *
+	 * Do not call this function directly, use
+	 * drm_atomic_plane_get_property() instead.
+	 *
+	 * This callback is optional if the driver does not support any
+	 * driver-private atomic properties.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success, -EINVAL if the property isn't implemented by the
+	 * driver (which should never happen, the core only asks for
+	 * properties attached to this plane).
+	 */
+	int (*atomic_get_property)(struct drm_plane *plane,
+				   const struct drm_plane_state *state,
+				   struct drm_property *property,
+				   uint64_t *val);
+	/**
+	 * @late_register:
+	 *
+	 * This optional hook can be used to register additional userspace
+	 * interfaces attached to the plane like debugfs interfaces.
+	 * It is called late in the driver load sequence from drm_dev_register().
+	 * Everything added from this callback should be unregistered in
+	 * the early_unregister callback.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, or a negative error code on failure.
+	 */
+	int (*late_register)(struct drm_plane *plane);
+
+	/**
+	 * @early_unregister:
+	 *
+	 * This optional hook should be used to unregister the additional
+	 * userspace interfaces attached to the plane from
+	 * late_unregister(). It is called from drm_dev_unregister(),
+	 * early in the driver unload sequence to disable userspace access
+	 * before data structures are torndown.
+	 */
+	void (*early_unregister)(struct drm_plane *plane);
+};
+
+/**
+ * enum drm_plane_type - uapi plane type enumeration
+ *
+ * For historical reasons not all planes are made the same. This enumeration is
+ * used to tell the different types of planes apart to implement the different
+ * uapi semantics for them. For userspace which is universal plane aware and
+ * which is using that atomic IOCTL there's no difference between these planes
+ * (beyong what the driver and hardware can support of course).
+ *
+ * For compatibility with legacy userspace, only overlay planes are made
+ * available to userspace by default. Userspace clients may set the
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * wish to receive a universal plane list containing all plane types. See also
+ * drm_for_each_legacy_plane().
+ *
+ * WARNING: The values of this enum is UABI since they're exposed in the "type"
+ * property.
+ */
+enum drm_plane_type {
+	/**
+	 * @DRM_PLANE_TYPE_OVERLAY:
+	 *
+	 * Overlay planes represent all non-primary, non-cursor planes. Some
+	 * drivers refer to these types of planes as "sprites" internally.
+	 */
+	DRM_PLANE_TYPE_OVERLAY,
+
+	/**
+	 * @DRM_PLANE_TYPE_PRIMARY:
+	 *
+	 * Primary planes represent a "main" plane for a CRTC.  Primary planes
+	 * are the planes operated upon by CRTC modesetting and flipping
+	 * operations described in the page_flip and set_config hooks in struct
+	 * &drm_crtc_funcs.
+	 */
+	DRM_PLANE_TYPE_PRIMARY,
+
+	/**
+	 * @DRM_PLANE_TYPE_CURSOR:
+	 *
+	 * Cursor planes represent a "cursor" plane for a CRTC.  Cursor planes
+	 * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
+	 * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+	 */
+	DRM_PLANE_TYPE_CURSOR,
+};
+
+
+/**
+ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @name: human readable name, can be overwritten by the driver
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * 	drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
+ * @zpos_property: zpos property for this plane
+ * @helper_private: mid-layer private data
+ */
+struct drm_plane {
+	struct drm_device *dev;
+	struct list_head head;
+
+	char *name;
+
+	/**
+	 * @mutex:
+	 *
+	 * Protects modeset plane state, together with the mutex of &drm_crtc
+	 * this plane is linked to (when active, getting actived or getting
+	 * disabled).
+	 */
+	struct drm_modeset_lock mutex;
+
+	struct drm_mode_object base;
+
+	uint32_t possible_crtcs;
+	uint32_t *format_types;
+	unsigned int format_count;
+	bool format_default;
+
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+
+	struct drm_framebuffer *old_fb;
+
+	const struct drm_plane_funcs *funcs;
+
+	struct drm_object_properties properties;
+
+	enum drm_plane_type type;
+
+	/**
+	 * @index: Position inside the mode_config.list, can be used as an array
+	 * index. It is invariant over the lifetime of the plane.
+	 */
+	unsigned index;
+
+	const struct drm_plane_helper_funcs *helper_private;
+
+	struct drm_plane_state *state;
+
+	struct drm_property *zpos_property;
+};
+
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+extern __printf(8, 9)
+int drm_universal_plane_init(struct drm_device *dev,
+			     struct drm_plane *plane,
+			     unsigned long possible_crtcs,
+			     const struct drm_plane_funcs *funcs,
+			     const uint32_t *formats,
+			     unsigned int format_count,
+			     enum drm_plane_type type,
+			     const char *name, ...);
+extern int drm_plane_init(struct drm_device *dev,
+			  struct drm_plane *plane,
+			  unsigned long possible_crtcs,
+			  const struct drm_plane_funcs *funcs,
+			  const uint32_t *formats, unsigned int format_count,
+			  bool is_primary);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+	return plane->index;
+}
+extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+				       struct drm_property *property,
+				       uint64_t value);
+
+/**
+ * drm_plane_find - find a &drm_plane
+ * @dev: DRM device
+ * @id: plane id
+ *
+ * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+		uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+	return mo ? obj_to_plane(mo) : NULL;
+}
+
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+	list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+		for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+/**
+ * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all legacy planes of @dev, excluding primary and cursor planes.
+ * This is useful for implementing userspace apis when userspace is not
+ * universal plane aware. See also enum &drm_plane_type.
+ */
+#define drm_for_each_legacy_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
+		for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+
+/**
+ * drm_for_each_plane - iterate over all planes
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all planes of @dev, include primary and cursor planes.
+ */
+#define drm_for_each_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+
+#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 0e0c357..c189596 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -27,6 +27,7 @@
 #include <drm/drm_rect.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
 
 /*
  * Drivers that don't allow primary plane scaling may pass this macro in place
@@ -37,9 +38,11 @@
  */
 #define DRM_PLANE_HELPER_NO_SCALING (1<<16)
 
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
-		  const struct drm_crtc_funcs *funcs);
-
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+				 const struct drm_rect *clip,
+				 int min_scale, int max_scale,
+				 bool can_position,
+				 bool can_update_disabled);
 int drm_plane_helper_check_update(struct drm_plane *plane,
 				  struct drm_crtc *crtc,
 				  struct drm_framebuffer *fb,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
new file mode 100644
index 0000000..43c4b6a
--- /dev/null
+++ b/include/drm/drm_property.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PROPERTY_H__
+#define __DRM_PROPERTY_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_property_enum - symbolic values for enumerations
+ * @value: numeric property value for this enum entry
+ * @head: list of enum values, linked to enum_list in &drm_property
+ * @name: symbolic name for the enum
+ *
+ * For enumeration and bitmask properties this structure stores the symbolic
+ * decoding for each value. This is used for example for the rotation property.
+ */
+struct drm_property_enum {
+	uint64_t value;
+	struct list_head head;
+	char name[DRM_PROP_NAME_LEN];
+};
+
+/**
+ * struct drm_property - modeset object property
+ *
+ * This structure represent a modeset object property. It combines both the name
+ * of the property with the set of permissible values. This means that when a
+ * driver wants to use a property with the same name on different objects, but
+ * with different value ranges, then it must create property for each one. An
+ * example would be rotation of &drm_plane, when e.g. the primary plane cannot
+ * be rotated. But if both the name and the value range match, then the same
+ * property structure can be instantiated multiple times for the same object.
+ * Userspace must be able to cope with this and cannot assume that the same
+ * symbolic property will have the same modeset object ID on all modeset
+ * objects.
+ *
+ * Properties are created by one of the special functions, as explained in
+ * detail in the @flags structure member.
+ *
+ * To actually expose a property it must be attached to each object using
+ * drm_object_attach_property(). Currently properties can only be attached to
+ * &drm_connector, &drm_crtc and &drm_plane.
+ *
+ * Properties are also used as the generic metadatatransport for the atomic
+ * IOCTL. Everything that was set directly in structures in the legacy modeset
+ * IOCTLs (like the plane source or destination windows, or e.g. the links to
+ * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set.
+ */
+struct drm_property {
+	/**
+	 * @head: per-device list of properties, for cleanup.
+	 */
+	struct list_head head;
+
+	/**
+	 * @base: base KMS object
+	 */
+	struct drm_mode_object base;
+
+	/**
+	 * @flags:
+	 *
+	 * Property flags and type. A property needs to be one of the following
+	 * types:
+	 *
+	 * DRM_MODE_PROP_RANGE
+	 *     Range properties report their minimum and maximum admissible unsigned values.
+	 *     The KMS core verifies that values set by application fit in that
+	 *     range. The range is unsigned. Range properties are created using
+	 *     drm_property_create_range().
+	 *
+	 * DRM_MODE_PROP_SIGNED_RANGE
+	 *     Range properties report their minimum and maximum admissible unsigned values.
+	 *     The KMS core verifies that values set by application fit in that
+	 *     range. The range is signed. Range properties are created using
+	 *     drm_property_create_signed_range().
+	 *
+	 * DRM_MODE_PROP_ENUM
+	 *     Enumerated properties take a numerical value that ranges from 0 to
+	 *     the number of enumerated values defined by the property minus one,
+	 *     and associate a free-formed string name to each value. Applications
+	 *     can retrieve the list of defined value-name pairs and use the
+	 *     numerical value to get and set property instance values. Enum
+	 *     properties are created using drm_property_create_enum().
+	 *
+	 * DRM_MODE_PROP_BITMASK
+	 *     Bitmask properties are enumeration properties that additionally
+	 *     restrict all enumerated values to the 0..63 range. Bitmask property
+	 *     instance values combine one or more of the enumerated bits defined
+	 *     by the property. Bitmask properties are created using
+	 *     drm_property_create_bitmask().
+	 *
+	 * DRM_MODE_PROB_OBJECT
+	 *     Object properties are used to link modeset objects. This is used
+	 *     extensively in the atomic support to create the display pipeline,
+	 *     by linking &drm_framebuffer to &drm_plane, &drm_plane to
+	 *     &drm_crtc and &drm_connector to &drm_crtc. An object property can
+	 *     only link to a specific type of &drm_mode_object, this limit is
+	 *     enforced by the core. Object properties are created using
+	 *     drm_property_create_object().
+	 *
+	 *     Object properties work like blob properties, but in a more
+	 *     general fashion. They are limited to atomic drivers and must have
+	 *     the DRM_MODE_PROP_ATOMIC flag set.
+	 *
+	 * DRM_MODE_PROP_BLOB
+	 *     Blob properties store a binary blob without any format restriction.
+	 *     The binary blobs are created as KMS standalone objects, and blob
+	 *     property instance values store the ID of their associated blob
+	 *     object. Blob properties are created by calling
+	 *     drm_property_create() with DRM_MODE_PROP_BLOB as the type.
+	 *
+	 *     Actual blob objects to contain blob data are created using
+	 *     drm_property_create_blob(), or through the corresponding IOCTL.
+	 *
+	 *     Besides the built-in limit to only accept blob objects blob
+	 *     properties work exactly like object properties. The only reasons
+	 *     blob properties exist is backwards compatibility with existing
+	 *     userspace.
+	 *
+	 * In addition a property can have any combination of the below flags:
+	 *
+	 * DRM_MODE_PROP_ATOMIC
+	 *     Set for properties which encode atomic modeset state. Such
+	 *     properties are not exposed to legacy userspace.
+	 *
+	 * DRM_MODE_PROP_IMMUTABLE
+	 *     Set for properties where userspace cannot be changed by
+	 *     userspace. The kernel is allowed to update the value of these
+	 *     properties. This is generally used to expose probe state to
+	 *     usersapce, e.g. the EDID, or the connector path property on DP
+	 *     MST sinks.
+	 */
+	uint32_t flags;
+
+	/**
+	 * @name: symbolic name of the properties
+	 */
+	char name[DRM_PROP_NAME_LEN];
+
+	/**
+	 * @num_values: size of the @values array.
+	 */
+	uint32_t num_values;
+
+	/**
+	 * @values:
+	 *
+	 * Array with limits and values for the property. The
+	 * interpretation of these limits is dependent upon the type per @flags.
+	 */
+	uint64_t *values;
+
+	/**
+	 * @dev: DRM device
+	 */
+	struct drm_device *dev;
+
+	/**
+	 * @enum_list:
+	 *
+	 * List of &drm_prop_enum_list structures with the symbolic names for
+	 * enum and bitmask values.
+	 */
+	struct list_head enum_list;
+};
+
+/**
+ * struct drm_property_blob - Blob data for &drm_property
+ * @base: base KMS object
+ * @dev: DRM device
+ * @head_global: entry on the global blob list in &drm_mode_config
+ *	property_blob_list.
+ * @head_file: entry on the per-file blob list in &drm_file blobs list.
+ * @length: size of the blob in bytes, invariant over the lifetime of the object
+ * @data: actual data, embedded at the end of this structure
+ *
+ * Blobs are used to store bigger values than what fits directly into the 64
+ * bits available for a &drm_property.
+ *
+ * Blobs are reference counted using drm_property_reference_blob() and
+ * drm_property_unreference_blob(). They are created using
+ * drm_property_create_blob().
+ */
+struct drm_property_blob {
+	struct drm_mode_object base;
+	struct drm_device *dev;
+	struct list_head head_global;
+	struct list_head head_file;
+	size_t length;
+	unsigned char data[];
+};
+
+struct drm_prop_enum_list {
+	int type;
+	char *name;
+};
+
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+
+/**
+ * drm_property_type_is - check the type of a property
+ * @property: property to check
+ * @type: property type to compare with
+ *
+ * This is a helper function becauase the uapi encoding of property types is
+ * a bit special for historical reasons.
+ */
+static inline bool drm_property_type_is(struct drm_property *property,
+					uint32_t type)
+{
+	/* instanceof for props.. handles extended type vs original types: */
+	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+		return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+	return property->flags & type;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+					 const char *name, int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+					      const char *name,
+					      const struct drm_prop_enum_list *props,
+					      int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+						 int flags, const char *name,
+						 const struct drm_prop_enum_list *props,
+						 int num_props,
+						 uint64_t supported_bits);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+					       const char *name,
+					       uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+						      int flags, const char *name,
+						      int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+						int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+					      const char *name);
+int drm_property_add_enum(struct drm_property *property, int index,
+			  uint64_t value, const char *name);
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+
+struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
+						   size_t length,
+						   const void *data);
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+						   uint32_t id);
+int drm_property_replace_global_blob(struct drm_device *dev,
+				     struct drm_property_blob **replace,
+				     size_t length,
+				     const void *data,
+				     struct drm_mode_object *obj_holds_id,
+				     struct drm_property *prop_holds_id);
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
+void drm_property_unreference_blob(struct drm_property_blob *blob);
+
+/**
+ * drm_connector_find - find property object
+ * @dev: DRM device
+ * @id: property object id
+ *
+ * This function looks up the property object specified by id and returns it.
+ */
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+						     uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+	return mo ? obj_to_property(mo) : NULL;
+}
+
+#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 2690397..01a8436 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -60,9 +60,35 @@
 	 *
 	 * This function is called when the underlying plane state is updated.
 	 * This hook is optional.
+	 *
+	 * This is the function drivers should submit the
+	 * &drm_pending_vblank_event from. Using either
+	 * drm_crtc_arm_vblank_event(), when the driver supports vblank
+	 * interrupt handling, or drm_crtc_send_vblank_event() directly in case
+	 * the hardware lacks vblank support entirely.
 	 */
 	void (*update)(struct drm_simple_display_pipe *pipe,
 		       struct drm_plane_state *plane_state);
+
+	/**
+	 * @prepare_fb:
+	 *
+	 * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb .
+	 * Please read the documentation for the ->prepare_fb hook in
+	 * struct &drm_plane_helper_funcs for more details.
+	 */
+	int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
+			  struct drm_plane_state *plane_state);
+
+	/**
+	 * @cleanup_fb:
+	 *
+	 * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb .
+	 * Please read the documentation for the ->cleanup_fb hook in
+	 * struct &drm_plane_helper_funcs for more details.
+	 */
+	void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
+			   struct drm_plane_state *plane_state);
 };
 
 /**
@@ -85,6 +111,11 @@
 	const struct drm_simple_display_pipe_funcs *funcs;
 };
 
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+					  struct drm_bridge *bridge);
+
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
+
 int drm_simple_display_pipe_init(struct drm_device *dev,
 			struct drm_simple_display_pipe *pipe,
 			const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 06ea8e07..9c03895 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -24,29 +24,28 @@
  */
 
 #include <drm/drm_mm.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
+struct drm_file;
+
 struct drm_vma_offset_file {
 	struct rb_node vm_rb;
-	struct file *vm_filp;
+	struct drm_file *vm_tag;
 	unsigned long vm_count;
 };
 
 struct drm_vma_offset_node {
 	rwlock_t vm_lock;
 	struct drm_mm_node vm_node;
-	struct rb_node vm_rb;
 	struct rb_root vm_files;
 };
 
 struct drm_vma_offset_manager {
 	rwlock_t vm_lock;
-	struct rb_root vm_addr_space_rb;
 	struct drm_mm vm_addr_space_mm;
 };
 
@@ -62,10 +61,11 @@
 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
 			   struct drm_vma_offset_node *node);
 
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+			 struct drm_file *tag);
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-			     struct file *filp);
+			     struct drm_file *tag);
 
 /**
  * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -216,9 +216,9 @@
 /**
  * drm_vma_node_verify_access() - Access verification helper for TTM
  * @node: Offset node
- * @filp: Open-file
+ * @tag: Tag of file to check
  *
- * This checks whether @filp is granted access to @node. It is the same as
+ * This checks whether @tag is granted access to @node. It is the same as
  * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
  * verify_access() callbacks.
  *
@@ -226,9 +226,9 @@
  * 0 if access is granted, -EACCES otherwise.
  */
 static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
-					     struct file *filp)
+					     struct drm_file *tag)
 {
-	return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+	return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
 }
 
 #endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
index 3e419d9..a254830 100644
--- a/include/drm/i2c/tda998x.h
+++ b/include/drm/i2c/tda998x.h
@@ -1,6 +1,24 @@
 #ifndef __DRM_I2C_TDA998X_H__
 #define __DRM_I2C_TDA998X_H__
 
+#include <linux/hdmi.h>
+#include <dt-bindings/display/tda998x.h>
+
+enum {
+	AFMT_UNUSED =	0,
+	AFMT_SPDIF =	TDA998x_SPDIF,
+	AFMT_I2S =	TDA998x_I2S,
+};
+
+struct tda998x_audio_params {
+	u8 config;
+	u8 format;
+	unsigned sample_width;
+	unsigned sample_rate;
+	struct hdmi_audio_infoframe cea;
+	u8 status[5];
+};
+
 struct tda998x_encoder_params {
 	u8 swap_b:3;
 	u8 mirr_b:1;
@@ -15,16 +33,7 @@
 	u8 swap_e:3;
 	u8 mirr_e:1;
 
-	u8 audio_cfg;
-	u8 audio_clk_cfg;
-	u8 audio_frame[6];
-
-	enum {
-		AFMT_SPDIF,
-		AFMT_I2S
-	} audio_format;
-
-	unsigned audio_sample_rate;
+	struct tda998x_audio_params audio_params;
 };
 
 #endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b1755f8..4e1b274 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -93,6 +93,6 @@
 #define    I845_TSEG_SIZE_1M	(3 << 1)
 
 #define INTEL_BSM 0x5c
-#define   INTEL_BSM_MASK (0xFFFF << 20)
+#define   INTEL_BSM_MASK	(-(1u << 20))
 
 #endif				/* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 33466bf..0d5f426 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -134,7 +134,7 @@
 #define INTEL_IVB_Q_IDS(info) \
 	INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
 
-#define INTEL_HSW_D_IDS(info) \
+#define INTEL_HSW_IDS(info) \
 	INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
 	INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
 	INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
@@ -179,9 +179,7 @@
 	INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
 	INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
 	INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
-	INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
-
-#define INTEL_HSW_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0D2E, info),  /* CRW GT3 reserved */ \
 	INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
 	INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
 	INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
@@ -198,17 +196,15 @@
 	INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
 	INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
 
-#define INTEL_VLV_M_IDS(info) \
+#define INTEL_VLV_IDS(info) \
 	INTEL_VGA_DEVICE(0x0f30, info), \
 	INTEL_VGA_DEVICE(0x0f31, info), \
 	INTEL_VGA_DEVICE(0x0f32, info), \
 	INTEL_VGA_DEVICE(0x0f33, info), \
-	INTEL_VGA_DEVICE(0x0157, info)
-
-#define INTEL_VLV_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0157, info), \
 	INTEL_VGA_DEVICE(0x0155, info)
 
-#define INTEL_BDW_GT12M_IDS(info)  \
+#define INTEL_BDW_GT12_IDS(info)  \
 	INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
 	INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
 	INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
@@ -216,21 +212,17 @@
 	INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
 	INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
 	INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
-	INTEL_VGA_DEVICE(0x161E, info)  /* GT2 ULX */
-
-#define INTEL_BDW_GT12D_IDS(info) \
+	INTEL_VGA_DEVICE(0x161E, info),  /* GT2 ULX */ \
 	INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
 	INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
 	INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
 	INTEL_VGA_DEVICE(0x161D, info)  /* GT2 Workstation */
 
-#define INTEL_BDW_GT3M_IDS(info) \
+#define INTEL_BDW_GT3_IDS(info) \
 	INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
 	INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
 	INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
-	INTEL_VGA_DEVICE(0x162E, info)  /* ULX */
-
-#define INTEL_BDW_GT3D_IDS(info) \
+	INTEL_VGA_DEVICE(0x162E, info),  /* ULX */\
 	INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
 	INTEL_VGA_DEVICE(0x162D, info)  /* Workstation */
 
@@ -244,14 +236,12 @@
 	INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
 	INTEL_VGA_DEVICE(0x163D, info)  /* Workstation */
 
-#define INTEL_BDW_M_IDS(info) \
-	INTEL_BDW_GT12M_IDS(info), \
-	INTEL_BDW_GT3M_IDS(info), \
-	INTEL_BDW_RSVDM_IDS(info)
-
-#define INTEL_BDW_D_IDS(info) \
-	INTEL_BDW_GT12D_IDS(info), \
-	INTEL_BDW_GT3D_IDS(info), \
+#define INTEL_BDW_IDS(info) \
+	INTEL_BDW_GT12_IDS(info), \
+	INTEL_BDW_GT3_IDS(info), \
+	INTEL_BDW_RSVDM_IDS(info), \
+	INTEL_BDW_GT12_IDS(info), \
+	INTEL_BDW_GT3_IDS(info), \
 	INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 6f2c598..9eb940d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,37 +45,7 @@
 
 struct drm_mm_node;
 
-/**
- * struct ttm_place
- *
- * @fpfn:	first valid page frame number to put the object
- * @lpfn:	last valid page frame number to put the object
- * @flags:	memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
-	unsigned	fpfn;
-	unsigned	lpfn;
-	uint32_t	flags;
-};
-
-/**
- * struct ttm_placement
- *
- * @num_placement:	number of preferred placements
- * @placement:		preferred placements
- * @num_busy_placement:	number of preferred placements when need to evict buffer
- * @busy_placement:	preferred placements when need to evict buffer
- *
- * Structure indicating the placement you request for an object.
- */
-struct ttm_placement {
-	unsigned		num_placement;
-	const struct ttm_place	*placement;
-	unsigned		num_busy_placement;
-	const struct ttm_place	*busy_placement;
-};
+struct ttm_placement;
 
 /**
  * struct ttm_bus_placement
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 99c6d01..4f0a921 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -133,7 +133,6 @@
  * struct ttm_dma_tt
  *
  * @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
  * @dma_address: The DMA (bus) addresses of the pages
  * @pages_list: used by some page allocation backend
  *
@@ -143,7 +142,6 @@
  */
 struct ttm_dma_tt {
 	struct ttm_tt ttm;
-	void **cpu_address;
 	dma_addr_t *dma_address;
 	struct list_head pages_list;
 };
@@ -961,7 +959,6 @@
  * ttm_bo_move_ttm
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -977,14 +974,13 @@
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-			   bool evict, bool interruptible, bool no_wait_gpu,
+			   bool interruptible, bool no_wait_gpu,
 			   struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1000,8 +996,7 @@
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-			      bool evict, bool interruptible,
-			      bool no_wait_gpu,
+			      bool interruptible, bool no_wait_gpu,
 			      struct ttm_mem_reg *new_mem);
 
 /**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 72dcbe8..c452089 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -155,4 +155,5 @@
 extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
 				     struct page *page);
 extern size_t ttm_round_pot(size_t size);
+extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
 #endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 8ed44f9..932be0c 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -30,6 +30,9 @@
 
 #ifndef _TTM_PLACEMENT_H_
 #define _TTM_PLACEMENT_H_
+
+#include <linux/types.h>
+
 /*
  * Memory regions for data placement.
  */
@@ -37,24 +40,12 @@
 #define TTM_PL_SYSTEM           0
 #define TTM_PL_TT               1
 #define TTM_PL_VRAM             2
-#define TTM_PL_PRIV0            3
-#define TTM_PL_PRIV1            4
-#define TTM_PL_PRIV2            5
-#define TTM_PL_PRIV3            6
-#define TTM_PL_PRIV4            7
-#define TTM_PL_PRIV5            8
-#define TTM_PL_SWAPPED          15
+#define TTM_PL_PRIV             3
 
 #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
 #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
 #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
-#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
-#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
-#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
-#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
-#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
-#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_FLAG_PRIV        (1 << TTM_PL_PRIV)
 #define TTM_PL_MASK_MEM         0x0000FFFF
 
 /*
@@ -72,7 +63,6 @@
 #define TTM_PL_FLAG_CACHED      (1 << 16)
 #define TTM_PL_FLAG_UNCACHED    (1 << 17)
 #define TTM_PL_FLAG_WC          (1 << 18)
-#define TTM_PL_FLAG_SHARED      (1 << 20)
 #define TTM_PL_FLAG_NO_EVICT    (1 << 21)
 #define TTM_PL_FLAG_TOPDOWN     (1 << 22)
 
@@ -82,14 +72,36 @@
 
 #define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
 
-/*
- * Access flags to be used for CPU- and GPU- mappings.
- * The idea is that the TTM synchronization mechanism will
- * allow concurrent READ access and exclusive write access.
- * Currently GPU- and CPU accesses are exclusive.
+/**
+ * struct ttm_place
+ *
+ * @fpfn:	first valid page frame number to put the object
+ * @lpfn:	last valid page frame number to put the object
+ * @flags:	memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
  */
+struct ttm_place {
+	unsigned	fpfn;
+	unsigned	lpfn;
+	uint32_t	flags;
+};
 
-#define TTM_ACCESS_READ         (1 << 0)
-#define TTM_ACCESS_WRITE        (1 << 1)
+/**
+ * struct ttm_placement
+ *
+ * @num_placement:	number of preferred placements
+ * @placement:		preferred placements
+ * @num_busy_placement:	number of preferred placements when need to evict buffer
+ * @busy_placement:	preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+	unsigned		num_placement;
+	const struct ttm_place	*placement;
+	unsigned		num_busy_placement;
+	const struct ttm_place	*busy_placement;
+};
 
 #endif
diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h
new file mode 100644
index 0000000..34757a3
--- /dev/null
+++ b/include/dt-bindings/display/tda998x.h
@@ -0,0 +1,7 @@
+#ifndef _DT_BINDINGS_TDA998X_H
+#define _DT_BINDINGS_TDA998X_H
+
+#define TDA998x_SPDIF	1
+#define TDA998x_I2S	2
+
+#endif /*_DT_BINDINGS_TDA998X_H */
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h
index 78f6678..6764d74 100644
--- a/include/dt-bindings/memory/mt2701-larb-port.h
+++ b/include/dt-bindings/memory/mt2701-larb-port.h
@@ -26,7 +26,7 @@
 #define LARB0_PORT_OFFSET		0
 #define LARB1_PORT_OFFSET		11
 #define LARB2_PORT_OFFSET		21
-#define LARB3_PORT_OFFSET		43
+#define LARB3_PORT_OFFSET		44
 
 #define MT2701_M4U_ID_LARB0(port)	((port) + LARB0_PORT_OFFSET)
 #define MT2701_M4U_ID_LARB1(port)	((port) + LARB1_PORT_OFFSET)
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
index 729ab9f..2a99f1d 100644
--- a/include/dt-bindings/thermal/tegra124-soctherm.h
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -11,4 +11,9 @@
 #define TEGRA124_SOCTHERM_SENSOR_PLLX 3
 #define TEGRA124_SOCTHERM_SENSOR_NUM 4
 
+#define TEGRA_SOCTHERM_THROT_LEVEL_LOW  0
+#define TEGRA_SOCTHERM_THROT_LEVEL_MED  1
+#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 2
+#define TEGRA_SOCTHERM_THROT_LEVEL_NONE -1
+
 #endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 94afcb2c..ddbeda6 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -946,9 +946,17 @@
 #ifdef CONFIG_ACPI
 int acpi_dev_get_property(struct acpi_device *adev, const char *name,
 			  acpi_object_type type, const union acpi_object **obj);
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
-				     const char *name, size_t index,
-				     struct acpi_reference_args *args);
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+				const char *name, size_t index, size_t num_args,
+				struct acpi_reference_args *args);
+
+static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+				const char *name, size_t index,
+				struct acpi_reference_args *args)
+{
+	return __acpi_node_get_property_reference(fwnode, name, index,
+		MAX_ACPI_REFERENCE_ARGS, args);
+}
 
 int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
 		       void **valptr);
@@ -1024,6 +1032,14 @@
 	return -ENXIO;
 }
 
+static inline int
+__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+				const char *name, size_t index, size_t num_args,
+				struct acpi_reference_args *args)
+{
+	return -ENXIO;
+}
+
 static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
 				const char *name, size_t index,
 				struct acpi_reference_args *args)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index e82e3ee..1035879 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -67,6 +67,17 @@
 #define CNTL_LDMAFIFOTIME	(1 << 15)
 #define CNTL_WATERMARK		(1 << 16)
 
+/* ST Microelectronics variant bits */
+#define CNTL_ST_1XBPP_444	0x0
+#define CNTL_ST_1XBPP_5551	(1 << 17)
+#define CNTL_ST_1XBPP_565	(1 << 18)
+#define CNTL_ST_CDWID_12	0x0
+#define CNTL_ST_CDWID_16	(1 << 19)
+#define CNTL_ST_CDWID_18	(1 << 20)
+#define CNTL_ST_CDWID_24	((1 << 19)|(1 << 20))
+#define CNTL_ST_CEAEN		(1 << 21)
+#define CNTL_ST_LCDBPP24_PACKED	(6 << 1)
+
 enum {
 	/* individual formats */
 	CLCD_CAP_RGB444		= (1 << 0),
@@ -93,6 +104,8 @@
 	CLCD_CAP_ALL		= CLCD_CAP_BGR | CLCD_CAP_RGB,
 };
 
+struct backlight_device;
+
 struct clcd_panel {
 	struct fb_videomode	mode;
 	signed short		width;	/* width in mm */
@@ -105,6 +118,13 @@
 				fixedtimings:1,
 				grayscale:1;
 	unsigned int		connector;
+	struct backlight_device	*backlight;
+	/*
+	 * If the B/R lines are switched between the CLCD
+	 * and the panel we need to know this and not try to
+	 * compensate with the BGR bit in the control register.
+	 */
+	bool			bgr_connection;
 };
 
 struct clcd_regs {
@@ -170,11 +190,38 @@
 struct amba_device;
 struct clk;
 
+/**
+ * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
+ * variant information
+ *
+ * @clock_timregs: the CLCD needs to be clocked when accessing the
+ * timer registers, or the hardware will hang.
+ * @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
+ * so that RGB accesses 3 bytes at a time, not just on even 32bit
+ * boundaries, packing the pixel data in memory. ST Microelectronics
+ * have this.
+ * @st_bitmux_control: ST Microelectronics have implemented output
+ * bit line multiplexing into the CLCD control register. This indicates
+ * that we need to use this.
+ * @init_board: custom board init function for this variant
+ * @init_panel: custom panel init function for this variant
+ */
+struct clcd_vendor_data {
+	bool	clock_timregs;
+	bool	packed_24_bit_pixels;
+	bool	st_bitmux_control;
+	int	(*init_board)(struct amba_device *adev,
+			      struct clcd_board *board);
+	int	(*init_panel)(struct clcd_fb *fb,
+			      struct device_node *panel);
+};
+
 /* this data structure describes each frame buffer device we find */
 struct clcd_fb {
 	struct fb_info		fb;
 	struct amba_device	*dev;
 	struct clk		*clk;
+	struct clcd_vendor_data	*vendor;
 	struct clcd_panel	*panel;
 	struct clcd_board	*board;
 	void			*board_data;
@@ -231,16 +278,22 @@
 	if (var->grayscale)
 		val |= CNTL_LCDBW;
 
-	if (fb->panel->caps && fb->board->caps &&
-	    var->bits_per_pixel >= 16) {
+	if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
 		/*
 		 * if board and panel supply capabilities, we can support
-		 * changing BGR/RGB depending on supplied parameters
+		 * changing BGR/RGB depending on supplied parameters. Here
+		 * we switch to what the framebuffer is providing if need
+		 * be, so if the framebuffer is BGR but the display connection
+		 * is RGB (first case) we switch it around. Vice versa mutatis
+		 * mutandis if the framebuffer is RGB but the display connection
+		 * is BGR, we flip it around.
 		 */
 		if (var->red.offset == 0)
 			val &= ~CNTL_BGR;
 		else
 			val |= CNTL_BGR;
+		if (fb->panel->bgr_connection)
+			val ^= CNTL_BGR;
 	}
 
 	switch (var->bits_per_pixel) {
@@ -270,6 +323,10 @@
 		else
 			val |= CNTL_LCDBPP16_444;
 		break;
+	case 24:
+		/* Modified variant supporting 24 bit packed pixels */
+		val |= CNTL_ST_LCDBPP24_PACKED;
+		break;
 	case 32:
 		val |= CNTL_LCDBPP24;
 		break;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index adbc812..fdb1803 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -105,6 +105,7 @@
 	ATA_ID_CFA_KEY_MGMT	= 162,
 	ATA_ID_CFA_MODES	= 163,
 	ATA_ID_DATA_SET_MGMT	= 169,
+	ATA_ID_SCT_CMD_XPORT	= 206,
 	ATA_ID_ROT_SPEED	= 217,
 	ATA_ID_PIO4		= (1 << 1),
 
@@ -789,6 +790,48 @@
 }
 
 /**
+ *
+ * Word: 206 - SCT Command Transport
+ *    15:12 - Vendor Specific
+ *     11:6 - Reserved
+ *        5 - SCT Command Transport Data Tables supported
+ *        4 - SCT Command Transport Features Control supported
+ *        3 - SCT Command Transport Error Recovery Control supported
+ *        2 - SCT Command Transport Write Same supported
+ *        1 - SCT Command Transport Long Sector Access supported
+ *        0 - SCT Command Transport supported
+ */
+static inline bool ata_id_sct_data_tables(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
+}
+
+static inline bool ata_id_sct_features_ctrl(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
+}
+
+static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
+}
+
+static inline bool ata_id_sct_write_same(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
+}
+
+static inline bool ata_id_sct_long_sector_access(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
+}
+
+static inline bool ata_id_sct_supported(const u16 *id)
+{
+	return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
+}
+
+/**
  *	ata_id_major_version	-	get ATA level of drive
  *	@id: Identify data
  *
@@ -1071,32 +1114,6 @@
 #endif
 }
 
-/*
- * Write LBA Range Entries to the buffer that will cover the extent from
- * sector to sector + count.  This is used for TRIM and for ADD LBA(S)
- * TO NV CACHE PINNED SET.
- */
-static inline unsigned ata_set_lba_range_entries(void *_buffer,
-		unsigned num, u64 sector, unsigned long count)
-{
-	__le64 *buffer = _buffer;
-	unsigned i = 0, used_bytes;
-
-	while (i < num) {
-		u64 entry = sector |
-			((u64)(count > 0xffff ? 0xffff : count) << 48);
-		buffer[i++] = __cpu_to_le64(entry);
-		if (count <= 0xffff)
-			break;
-		count -= 0xffff;
-		sector += 0xffff;
-	}
-
-	used_bytes = ALIGN(i * 8, 512);
-	memset(buffer + i, 0, used_bytes - i * 8);
-	return used_bytes;
-}
-
 static inline bool ata_ok(u8 status)
 {
 	return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 7caaf29..28c1505 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -10,214 +10,5 @@
 #ifndef _LINUX_AUTO_DEV_IOCTL_H
 #define _LINUX_AUTO_DEV_IOCTL_H
 
-#include <linux/auto_fs.h>
-#include <linux/string.h>
-
-#define AUTOFS_DEVICE_NAME		"autofs"
-
-#define AUTOFS_DEV_IOCTL_VERSION_MAJOR	1
-#define AUTOFS_DEV_IOCTL_VERSION_MINOR	0
-
-#define AUTOFS_DEVID_LEN		16
-
-#define AUTOFS_DEV_IOCTL_SIZE		sizeof(struct autofs_dev_ioctl)
-
-/*
- * An ioctl interface for autofs mount point control.
- */
-
-struct args_protover {
-	__u32	version;
-};
-
-struct args_protosubver {
-	__u32	sub_version;
-};
-
-struct args_openmount {
-	__u32	devid;
-};
-
-struct args_ready {
-	__u32	token;
-};
-
-struct args_fail {
-	__u32	token;
-	__s32	status;
-};
-
-struct args_setpipefd {
-	__s32	pipefd;
-};
-
-struct args_timeout {
-	__u64	timeout;
-};
-
-struct args_requester {
-	__u32	uid;
-	__u32	gid;
-};
-
-struct args_expire {
-	__u32	how;
-};
-
-struct args_askumount {
-	__u32	may_umount;
-};
-
-struct args_ismountpoint {
-	union {
-		struct args_in {
-			__u32	type;
-		} in;
-		struct args_out {
-			__u32	devid;
-			__u32	magic;
-		} out;
-	};
-};
-
-/*
- * All the ioctls use this structure.
- * When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
- * structure.
- */
-
-struct autofs_dev_ioctl {
-	__u32 ver_major;
-	__u32 ver_minor;
-	__u32 size;		/* total size of data passed in
-				 * including this struct */
-	__s32 ioctlfd;		/* automount command fd */
-
-	/* Command parameters */
-
-	union {
-		struct args_protover		protover;
-		struct args_protosubver		protosubver;
-		struct args_openmount		openmount;
-		struct args_ready		ready;
-		struct args_fail		fail;
-		struct args_setpipefd		setpipefd;
-		struct args_timeout		timeout;
-		struct args_requester		requester;
-		struct args_expire		expire;
-		struct args_askumount		askumount;
-		struct args_ismountpoint	ismountpoint;
-	};
-
-	char path[0];
-};
-
-static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
-{
-	memset(in, 0, sizeof(struct autofs_dev_ioctl));
-	in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
-	in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
-	in->size = sizeof(struct autofs_dev_ioctl);
-	in->ioctlfd = -1;
-}
-
-/*
- * If you change this make sure you make the corresponding change
- * to autofs-dev-ioctl.c:lookup_ioctl()
- */
-enum {
-	/* Get various version info */
-	AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
-	AUTOFS_DEV_IOCTL_PROTOVER_CMD,
-	AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
-
-	/* Open mount ioctl fd */
-	AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
-
-	/* Close mount ioctl fd */
-	AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
-
-	/* Mount/expire status returns */
-	AUTOFS_DEV_IOCTL_READY_CMD,
-	AUTOFS_DEV_IOCTL_FAIL_CMD,
-
-	/* Activate/deactivate autofs mount */
-	AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
-	AUTOFS_DEV_IOCTL_CATATONIC_CMD,
-
-	/* Expiry timeout */
-	AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
-
-	/* Get mount last requesting uid and gid */
-	AUTOFS_DEV_IOCTL_REQUESTER_CMD,
-
-	/* Check for eligible expire candidates */
-	AUTOFS_DEV_IOCTL_EXPIRE_CMD,
-
-	/* Request busy status */
-	AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
-
-	/* Check if path is a mountpoint */
-	AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
-};
-
-#define AUTOFS_IOCTL 0x93
-
-#define AUTOFS_DEV_IOCTL_VERSION \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOVER \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_OPENMOUNT \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_READY \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_FAIL \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_SETPIPEFD \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CATATONIC \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_TIMEOUT \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_REQUESTER \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_EXPIRE \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
-	_IOWR(AUTOFS_IOCTL, \
-	      AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
-
+#include <uapi/linux/auto_dev-ioctl.h>
 #endif	/* _LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index b4066bb..b8f814c 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -10,7 +10,6 @@
 #define _LINUX_AUTO_FS_H
 
 #include <linux/fs.h>
-#include <linux/limits.h>
 #include <linux/ioctl.h>
 #include <uapi/linux/auto_fs.h>
 #endif /* _LINUX_AUTO_FS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index cbdbf34..3bf5d33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -343,16 +343,7 @@
  */
 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
 {
-	char *p;
-
-	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-	if (!p) {
-		strncpy(buf, "<unavailable>", buflen);
-		return -ENAMETOOLONG;
-	}
-
-	memmove(buf, p, buf + buflen - p);
-	return 0;
+	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
 }
 
 /**
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7c2bb27..a765333 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -238,9 +238,6 @@
 };
 
 /***** SHA engine *****/
-#define CCP_SHA_BLOCKSIZE               SHA256_BLOCK_SIZE
-#define CCP_SHA_CTXSIZE                 SHA256_DIGEST_SIZE
-
 /**
  * ccp_sha_type - type of SHA operation
  *
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 1563265..374bb1c 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -104,7 +104,7 @@
 extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
 				  void *buf, size_t len,
 				  void *reply_buf, size_t reply_len);
-extern int ceph_entity_name_encode(const char *name, void **p, void *end);
+int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
 
 extern int ceph_build_auth(struct ceph_auth_client *ac,
 		    void *msg_buf, size_t msg_len);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 7868d602..f96de8d 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -138,6 +138,9 @@
 #define CEPH_MSG_POOLOP_REPLY           48
 #define CEPH_MSG_POOLOP                 49
 
+/* mon commands */
+#define CEPH_MSG_MON_COMMAND            50
+#define CEPH_MSG_MON_COMMAND_ACK        51
 
 /* osd */
 #define CEPH_MSG_OSD_MAP                41
@@ -176,6 +179,14 @@
 	struct ceph_statfs st;
 } __attribute__ ((packed));
 
+struct ceph_mon_command {
+	struct ceph_mon_request_header monhdr;
+	struct ceph_fsid fsid;
+	__le32 num_strs;         /* always 1 */
+	__le32 str_len;
+	char str[];
+} __attribute__ ((packed));
+
 struct ceph_osd_getmap {
 	struct ceph_mon_request_header monhdr;
 	struct ceph_fsid fsid;
@@ -270,6 +281,7 @@
 	CEPH_SESSION_FLUSHMSG,
 	CEPH_SESSION_FLUSHMSG_ACK,
 	CEPH_SESSION_FORCE_RO,
+	CEPH_SESSION_REJECT,
 };
 
 extern const char *ceph_session_op_name(int op);
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
new file mode 100644
index 0000000..84884d8
--- /dev/null
+++ b/include/linux/ceph/cls_lock_client.h
@@ -0,0 +1,49 @@
+#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H
+#define _LINUX_CEPH_CLS_LOCK_CLIENT_H
+
+#include <linux/ceph/osd_client.h>
+
+enum ceph_cls_lock_type {
+	CEPH_CLS_LOCK_NONE = 0,
+	CEPH_CLS_LOCK_EXCLUSIVE = 1,
+	CEPH_CLS_LOCK_SHARED = 2,
+};
+
+struct ceph_locker_id {
+	struct ceph_entity_name name;	/* locker's client name */
+	char *cookie;			/* locker's cookie */
+};
+
+struct ceph_locker_info {
+	struct ceph_entity_addr addr;	/* locker's address */
+};
+
+struct ceph_locker {
+	struct ceph_locker_id id;
+	struct ceph_locker_info info;
+};
+
+int ceph_cls_lock(struct ceph_osd_client *osdc,
+		  struct ceph_object_id *oid,
+		  struct ceph_object_locator *oloc,
+		  char *lock_name, u8 type, char *cookie,
+		  char *tag, char *desc, u8 flags);
+int ceph_cls_unlock(struct ceph_osd_client *osdc,
+		    struct ceph_object_id *oid,
+		    struct ceph_object_locator *oloc,
+		    char *lock_name, char *cookie);
+int ceph_cls_break_lock(struct ceph_osd_client *osdc,
+			struct ceph_object_id *oid,
+			struct ceph_object_locator *oloc,
+			char *lock_name, char *cookie,
+			struct ceph_entity_name *locker);
+
+void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
+
+int ceph_cls_lock_info(struct ceph_osd_client *osdc,
+		       struct ceph_object_id *oid,
+		       struct ceph_object_locator *oloc,
+		       char *lock_name, u8 *type, char **tag,
+		       struct ceph_locker **lockers, u32 *num_lockers);
+
+#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 83fc1ff..1816c5e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -264,7 +264,8 @@
 					      void *private,
 					      u64 supported_features,
 					      u64 required_features);
-extern u64 ceph_client_id(struct ceph_client *client);
+struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
+u64 ceph_client_gid(struct ceph_client *client);
 extern void ceph_destroy_client(struct ceph_client *client);
 extern int __ceph_open_session(struct ceph_client *client,
 			       unsigned long started);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 24d704d..d5a3ece 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -141,6 +141,9 @@
 int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
 				ceph_monc_callback_t cb, u64 private_data);
 
+int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+			    struct ceph_entity_addr *client_addr);
+
 extern int ceph_monc_open_session(struct ceph_mon_client *monc);
 
 extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 8589323..96337b1 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -121,6 +121,9 @@
 			struct ceph_osd_data response_data;
 		} notify;
 		struct {
+			struct ceph_osd_data response_data;
+		} list_watchers;
+		struct {
 			u64 expected_object_size;
 			u64 expected_write_size;
 		} alloc_hint;
@@ -249,6 +252,12 @@
 	size_t *preply_len;
 };
 
+struct ceph_watch_item {
+	struct ceph_entity_name name;
+	u64 cookie;
+	struct ceph_entity_addr addr;
+};
+
 struct ceph_osd_client {
 	struct ceph_client     *client;
 
@@ -346,7 +355,6 @@
 					struct page **pages, u64 length,
 					u32 alignment, bool pages_from_pool,
 					bool own_pages);
-
 extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
 					unsigned int which, u16 opcode,
 					const char *class, const char *method);
@@ -389,6 +397,14 @@
 extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
 
+int ceph_osdc_call(struct ceph_osd_client *osdc,
+		   struct ceph_object_id *oid,
+		   struct ceph_object_locator *oloc,
+		   const char *class, const char *method,
+		   unsigned int flags,
+		   struct page *req_page, size_t req_len,
+		   struct page *resp_page, size_t *resp_len);
+
 extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
 			       struct ceph_vino vino,
 			       struct ceph_file_layout *layout,
@@ -434,5 +450,10 @@
 		     size_t *preply_len);
 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
 			  struct ceph_osd_linger_request *lreq);
+int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
+			    struct ceph_object_id *oid,
+			    struct ceph_object_locator *oloc,
+			    struct ceph_watch_item **watchers,
+			    u32 *num_watchers);
 #endif
 
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 440a721..c83c23f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -97,7 +97,7 @@
 int cgroup_rm_cftypes(struct cftype *cfts);
 void cgroup_file_notify(struct cgroup_file *cfile);
 
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 		     struct pid *pid, struct task_struct *tsk);
@@ -555,8 +555,7 @@
 	return kernfs_name(cgrp->kn, buf, buflen);
 }
 
-static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
-					      size_t buflen)
+static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
 {
 	return kernfs_path(cgrp->kn, buf, buflen);
 }
@@ -658,8 +657,8 @@
 					struct user_namespace *user_ns,
 					struct cgroup_namespace *old_ns);
 
-char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
-		     struct cgroup_namespace *ns);
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+		   struct cgroup_namespace *ns);
 
 #else /* !CONFIG_CGROUPS */
 
diff --git a/include/linux/compat.h b/include/linux/compat.h
index f964ef7..6360939 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -432,7 +432,6 @@
 
 asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
 
-extern __printf(1, 2) int compat_printk(const char *fmt, ...);
 extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
 extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
 
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 573c5a1..432f5c9 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -188,6 +188,13 @@
 #endif /* GCC_VERSION >= 40300 */
 
 #if GCC_VERSION >= 40500
+
+#ifndef __CHECKER__
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6685698..cf0fa5d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -182,6 +182,29 @@
 # define unreachable() do { } while (1)
 #endif
 
+/*
+ * KENTRY - kernel entry point
+ * This can be used to annotate symbols (functions or data) that are used
+ * without their linker symbol being referenced explicitly. For example,
+ * interrupt vector handlers, or functions in the kernel image that are found
+ * programatically.
+ *
+ * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
+ * are handled in their own way (with KEEP() in linker scripts).
+ *
+ * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
+ * linker script. For example an architecture could KEEP() its entire
+ * boot/exception vector code rather than annotate each function and data.
+ */
+#ifndef KENTRY
+# define KENTRY(sym)						\
+	extern typeof(sym) sym;					\
+	static const unsigned long __kentry_##sym		\
+	__used							\
+	__attribute__((section("___kentry" "+" #sym ), used))	\
+	= (unsigned long)&sym;
+#endif
+
 #ifndef RELOC_HIDE
 # define RELOC_HIDE(ptr, off)					\
   ({ unsigned long __ptr;					\
@@ -406,6 +429,10 @@
 # define __attribute_const__	/* unimplemented */
 #endif
 
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
 /*
  * Tell gcc if a function is cold. The compiler will assume any path
  * directly leading to the call is unlikely.
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 631ba33b..5fa55fc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -639,19 +639,19 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq >= target_freq)
-			return i;
+			return pos - table;
 
-		best = i;
+		best = pos;
 	}
 
-	return best;
+	return best - table;
 }
 
 /* Find lowest freq at or above target in a table in descending order */
@@ -659,28 +659,28 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq == target_freq)
-			return i;
+			return pos - table;
 
 		if (freq > target_freq) {
-			best = i;
+			best = pos;
 			continue;
 		}
 
 		/* No freq found above target_freq */
-		if (best == -1)
-			return i;
+		if (best == table - 1)
+			return pos - table;
 
-		return best;
+		return best - pos;
 	}
 
-	return best;
+	return best - pos;
 }
 
 /* Works only on sorted freq-tables */
@@ -700,28 +700,28 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq == target_freq)
-			return i;
+			return pos - table;
 
 		if (freq < target_freq) {
-			best = i;
+			best = pos;
 			continue;
 		}
 
 		/* No freq found below target_freq */
-		if (best == -1)
-			return i;
+		if (best == table - 1)
+			return pos - table;
 
-		return best;
+		return best - table;
 	}
 
-	return best;
+	return best - table;
 }
 
 /* Find highest freq at or below target in a table in descending order */
@@ -729,19 +729,19 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq <= target_freq)
-			return i;
+			return pos - table;
 
-		best = i;
+		best = pos;
 	}
 
-	return best;
+	return best - table;
 }
 
 /* Works only on sorted freq-tables */
@@ -761,32 +761,32 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq == target_freq)
-			return i;
+			return pos - table;
 
 		if (freq < target_freq) {
-			best = i;
+			best = pos;
 			continue;
 		}
 
 		/* No freq found below target_freq */
-		if (best == -1)
-			return i;
+		if (best == table - 1)
+			return pos - table;
 
 		/* Choose the closest freq */
-		if (target_freq - table[best].frequency > freq - target_freq)
-			return i;
+		if (target_freq - best->frequency > freq - target_freq)
+			return pos - table;
 
-		return best;
+		return best - table;
 	}
 
-	return best;
+	return best - table;
 }
 
 /* Find closest freq to target in a table in descending order */
@@ -794,32 +794,32 @@
 					      unsigned int target_freq)
 {
 	struct cpufreq_frequency_table *table = policy->freq_table;
+	struct cpufreq_frequency_table *pos, *best = table - 1;
 	unsigned int freq;
-	int i, best = -1;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		freq = table[i].frequency;
+	cpufreq_for_each_valid_entry(pos, table) {
+		freq = pos->frequency;
 
 		if (freq == target_freq)
-			return i;
+			return pos - table;
 
 		if (freq > target_freq) {
-			best = i;
+			best = pos;
 			continue;
 		}
 
 		/* No freq found above target_freq */
-		if (best == -1)
-			return i;
+		if (best == table - 1)
+			return pos - table;
 
 		/* Choose the closest freq */
-		if (table[best].frequency - target_freq > target_freq - freq)
-			return i;
+		if (best->frequency - target_freq > target_freq - freq)
+			return pos - table;
 
-		return best;
+		return best - table;
 	}
 
-	return best;
+	return best - table;
 }
 
 /* Works only on sorted freq-tables */
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index 653589e..f13e4ff 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -22,7 +22,10 @@
 #define isalnum(c)	((__ismask(c)&(_U|_L|_D)) != 0)
 #define isalpha(c)	((__ismask(c)&(_U|_L)) != 0)
 #define iscntrl(c)	((__ismask(c)&(_C)) != 0)
-#define isdigit(c)	((__ismask(c)&(_D)) != 0)
+static inline int isdigit(int c)
+{
+	return '0' <= c && c <= '9';
+}
 #define isgraph(c)	((__ismask(c)&(_P|_U|_L|_D)) != 0)
 #define islower(c)	((__ismask(c)&(_L)) != 0)
 #define isprint(c)	((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5ff3e9a..5beed7b 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -584,9 +584,10 @@
  * If dentry is on an union/overlay, then return the underlying, real inode.
  * Otherwise return d_inode().
  */
-static inline struct inode *d_real_inode(struct dentry *dentry)
+static inline struct inode *d_real_inode(const struct dentry *dentry)
 {
-	return d_backing_inode(d_real(dentry, NULL, 0));
+	/* This usage of d_real() results in const dentry */
+	return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
 }
 
 
diff --git a/include/linux/device.h b/include/linux/device.h
index 38f0281..bc41e87 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -41,6 +41,7 @@
 struct fwnode_handle;
 struct iommu_ops;
 struct iommu_group;
+struct iommu_fwspec;
 
 struct bus_attribute {
 	struct attribute	attr;
@@ -765,6 +766,7 @@
  * 		gone away. This should be set by the allocator of the
  * 		device (i.e. the bus driver that discovered the device).
  * @iommu_group: IOMMU group the device belongs to.
+ * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
  *
  * @offline_disabled: If set, the device is permanently online.
  * @offline:	Set after successful invocation of bus type's .offline().
@@ -849,6 +851,7 @@
 
 	void	(*release)(struct device *dev);
 	struct iommu_group	*iommu_group;
+	struct iommu_fwspec	*iommu_fwspec;
 
 	bool			offline_disabled:1;
 	bool			offline:1;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 81c5c8d..32c5890 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -21,6 +21,7 @@
 
 #ifdef CONFIG_IOMMU_DMA
 #include <linux/iommu.h>
+#include <linux/msi.h>
 
 int iommu_dma_init(void);
 
@@ -29,7 +30,8 @@
 void iommu_put_dma_cookie(struct iommu_domain *domain);
 
 /* Setup call for arch DMA mapping code */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+		u64 size, struct device *dev);
 
 /* General helpers for DMA-API <-> IOMMU-API interaction */
 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
@@ -62,9 +64,13 @@
 int iommu_dma_supported(struct device *dev, u64 mask);
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
+/* The DMA API isn't _quite_ the whole story, though... */
+void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+
 #else
 
 struct iommu_domain;
+struct msi_msg;
 
 static inline int iommu_dma_init(void)
 {
@@ -80,6 +86,10 @@
 {
 }
 
+static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+{
+}
+
 #endif	/* CONFIG_IOMMU_DMA */
 #endif	/* __KERNEL__ */
 #endif	/* __DMA_IOMMU_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0f90eb5..08528af 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -56,6 +56,11 @@
  * that gives better TLB efficiency.
  */
 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
+/*
+ * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
+ * allocation failure reports (similarly to __GFP_NOWARN).
+ */
+#define DMA_ATTR_NO_WARN	(1UL << 8)
 
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
diff --git a/include/linux/export.h b/include/linux/export.h
index c565f87..2a0f61f 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,5 +1,6 @@
 #ifndef _LINUX_EXPORT_H
 #define _LINUX_EXPORT_H
+
 /*
  * Export symbols from the kernel to modules.  Forked from module.h
  * to reduce the amount of pointless cruft we feed to gcc when only
@@ -42,27 +43,26 @@
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
-#define __CRC_SYMBOL(sym, sec)					\
-	extern __visible void *__crc_##sym __attribute__((weak));		\
-	static const unsigned long __kcrctab_##sym		\
-	__used							\
-	__attribute__((section("___kcrctab" sec "+" #sym), unused))	\
+#define __CRC_SYMBOL(sym, sec)						\
+	extern __visible void *__crc_##sym __attribute__((weak));	\
+	static const unsigned long __kcrctab_##sym			\
+	__used								\
+	__attribute__((section("___kcrctab" sec "+" #sym), used))	\
 	= (unsigned long) &__crc_##sym;
 #else
 #define __CRC_SYMBOL(sym, sec)
 #endif
 
 /* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec)				\
-	extern typeof(sym) sym;					\
-	__CRC_SYMBOL(sym, sec)					\
-	static const char __kstrtab_##sym[]			\
-	__attribute__((section("__ksymtab_strings"), aligned(1))) \
-	= VMLINUX_SYMBOL_STR(sym);				\
-	extern const struct kernel_symbol __ksymtab_##sym;	\
-	__visible const struct kernel_symbol __ksymtab_##sym	\
-	__used							\
-	__attribute__((section("___ksymtab" sec "+" #sym), unused))	\
+#define ___EXPORT_SYMBOL(sym, sec)					\
+	extern typeof(sym) sym;						\
+	__CRC_SYMBOL(sym, sec)						\
+	static const char __kstrtab_##sym[]				\
+	__attribute__((section("__ksymtab_strings"), aligned(1)))	\
+	= VMLINUX_SYMBOL_STR(sym);					\
+	static const struct kernel_symbol __ksymtab_##sym		\
+	__used								\
+	__attribute__((section("___ksymtab" sec "+" #sym), used))	\
 	= { (unsigned long)&sym, __kstrtab_##sym }
 
 #if defined(__KSYM_DEPS__)
@@ -78,7 +78,6 @@
 
 #elif defined(CONFIG_TRIM_UNUSED_KSYMS)
 
-#include <linux/kconfig.h>
 #include <generated/autoksyms.h>
 
 #define __EXPORT_SYMBOL(sym, sec)				\
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index b03c062..5ab958c 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -157,12 +157,13 @@
  *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
  *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
  *    to the same file that the file handle fragment refers to.  If it cannot,
- *    it should return a %NULL pointer if the file was found but no acceptable
- *    &dentries were available, or an %ERR_PTR error code indicating why it
- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
- *    returned including, if necessary, a new dentry created with d_alloc_root.
- *    The caller can then find any other extant dentries by following the
- *    d_alias links.
+ *    it should return a %NULL pointer if the file cannot be found, or an
+ *    %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred.
+ *    Any other error code is treated like %NULL, and will cause an %ESTALE error
+ *    for callers of exportfs_decode_fh().
+ *    Any suitable dentry can be returned including, if necessary, a new dentry
+ *    created with d_alloc_root.  The caller can then find any other extant
+ *    dentries by following the d_alias links.
  *
  * fh_to_parent:
  *    Same as @fh_to_dentry, except that it returns a pointer to the parent
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 9961110..7494dc6 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,6 +25,7 @@
 					 FALLOC_FL_PUNCH_HOLE |		\
 					 FALLOC_FL_COLLAPSE_RANGE |	\
 					 FALLOC_FL_ZERO_RANGE |		\
-					 FALLOC_FL_INSERT_RANGE)
+					 FALLOC_FL_INSERT_RANGE |	\
+					 FALLOC_FL_UNSHARE_RANGE)
 
 #endif /* _FALLOC_H_ */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 5295535..6e84b2cae 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -30,12 +30,12 @@
 	struct rcu_head rcu;
 };
 
-static inline bool close_on_exec(int fd, const struct fdtable *fdt)
+static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
 {
 	return test_bit(fd, fdt->close_on_exec);
 }
 
-static inline bool fd_is_open(int fd, const struct fdtable *fdt)
+static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
 {
 	return test_bit(fd, fdt->open_fds);
 }
@@ -57,7 +57,7 @@
    * written part on a separate cache line in SMP
    */
 	spinlock_t file_lock ____cacheline_aligned_in_smp;
-	int next_fd;
+	unsigned int next_fd;
 	unsigned long close_on_exec_init[1];
 	unsigned long open_fds_init[1];
 	unsigned long full_fds_bits_init[1];
@@ -105,7 +105,7 @@
 void put_files_struct(struct files_struct *fs);
 void reset_files_struct(struct files_struct *);
 int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
 void do_close_on_exec(struct files_struct *);
 int iterate_fd(struct files_struct *, unsigned,
 		int (*)(const void *, struct file *, unsigned),
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
index 86baaa4..a44794e 100644
--- a/include/linux/fence-array.h
+++ b/include/linux/fence-array.h
@@ -52,6 +52,16 @@
 extern const struct fence_ops fence_array_ops;
 
 /**
+ * fence_is_array - check if a fence is from the array subsclass
+ *
+ * Return true if it is a fence_array and false otherwise.
+ */
+static inline bool fence_is_array(struct fence *fence)
+{
+	return fence->ops == &fence_array_ops;
+}
+
+/**
  * to_fence_array - cast a fence to a fence_array
  * @fence: fence to cast to a fence_array
  *
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2ac6fa5..0d76305 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -60,7 +60,7 @@
  * implementer of the fence for its own purposes. Can be used in different
  * ways by different fence implementers, so do not rely on this.
  *
- * *) Since atomic bitops are used, this is not guaranteed to be the case.
+ * Since atomic bitops are used, this is not guaranteed to be the case.
  * Particularly, if the bit was set, but fence_signal was called right
  * before this bit was set, it would have been able to set the
  * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b04883e..16d2b6e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -63,7 +63,7 @@
 
 extern struct files_stat_struct files_stat;
 extern unsigned long get_max_files(void);
-extern int sysctl_nr_open;
+extern unsigned int sysctl_nr_open;
 extern struct inodes_stat_t inodes_stat;
 extern int leases_enable, lease_break_time;
 extern int sysctl_protected_symlinks;
@@ -224,6 +224,7 @@
 #define ATTR_KILL_PRIV	(1 << 14)
 #define ATTR_OPEN	(1 << 15) /* Truncating from open(O_TRUNC) */
 #define ATTR_TIMES_SET	(1 << 16)
+#define ATTR_TOUCH	(1 << 17)
 
 /*
  * Whiteout is represented by a char device.  The following constants define the
@@ -439,8 +440,9 @@
 	unsigned long		nrexceptional;
 	pgoff_t			writeback_index;/* writeback starts here */
 	const struct address_space_operations *a_ops;	/* methods */
-	unsigned long		flags;		/* error bits/gfp mask */
+	unsigned long		flags;		/* error bits */
 	spinlock_t		private_lock;	/* for use by the address_space */
+	gfp_t			gfp_mask;	/* implicit gfp mask for allocations */
 	struct list_head	private_list;	/* ditto */
 	void			*private_data;	/* ditto */
 } __attribute__((aligned(sizeof(long))));
@@ -591,6 +593,7 @@
 #define IOP_FASTPERM	0x0001
 #define IOP_LOOKUP	0x0002
 #define IOP_NOFOLLOW	0x0004
+#define IOP_XATTR	0x0008
 
 /*
  * Keep mostly read-only and often accessed (especially for
@@ -1064,6 +1067,18 @@
 
 extern void send_sigio(struct fown_struct *fown, int fd, int band);
 
+/*
+ * Return the inode to use for locking
+ *
+ * For overlayfs this should be the overlay inode, not the real inode returned
+ * by file_inode().  For any other fs file_inode(filp) and locks_inode(filp) are
+ * equal.
+ */
+static inline struct inode *locks_inode(const struct file *f)
+{
+	return f->f_path.dentry->d_inode;
+}
+
 #ifdef CONFIG_FILE_LOCKING
 extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
 extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
@@ -1251,7 +1266,7 @@
 
 static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
 {
-	return locks_lock_inode_wait(file_inode(filp), fl);
+	return locks_lock_inode_wait(locks_inode(filp), fl);
 }
 
 struct fasync_struct {
@@ -1459,6 +1474,7 @@
 }
 
 extern struct timespec current_fs_time(struct super_block *sb);
+extern struct timespec current_time(struct inode *inode);
 
 /*
  * Snapshotting support.
@@ -1733,17 +1749,10 @@
 	int (*rmdir) (struct inode *,struct dentry *);
 	int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
-			struct inode *, struct dentry *);
-	int (*rename2) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, struct inode *,
-			 const char *, const void *, size_t, int);
-	ssize_t (*getxattr) (struct dentry *, struct inode *,
-			     const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
-	int (*removexattr) (struct dentry *, const char *);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
 	int (*update_time)(struct inode *, struct timespec *, int);
@@ -2006,7 +2015,6 @@
 	S_VERSION = 8,
 };
 
-extern bool atime_needs_update(const struct path *, struct inode *);
 extern void touch_atime(const struct path *);
 static inline void file_accessed(struct file *file)
 {
@@ -2075,10 +2083,19 @@
 			int (*test)(struct super_block *,void *),
 			int (*set)(struct super_block *,void *),
 			int flags, void *data);
-extern struct dentry *mount_pseudo(struct file_system_type *, char *,
-	const struct super_operations *ops,
-	const struct dentry_operations *dops,
-	unsigned long);
+extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
+					 const struct super_operations *ops,
+					 const struct xattr_handler **xattr,
+					 const struct dentry_operations *dops,
+					 unsigned long);
+
+static inline struct dentry *
+mount_pseudo(struct file_system_type *fs_type, char *name,
+	     const struct super_operations *ops,
+	     const struct dentry_operations *dops, unsigned long magic)
+{
+	return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
+}
 
 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
 #define fops_get(fops) \
@@ -2155,7 +2172,7 @@
 
 static inline int locks_verify_locked(struct file *file)
 {
-	if (mandatory_lock(file_inode(file)))
+	if (mandatory_lock(locks_inode(file)))
 		return locks_mandatory_locked(file);
 	return 0;
 }
@@ -2917,6 +2934,7 @@
 extern int vfs_lstat(const char __user *, struct kstat *);
 extern int vfs_fstat(unsigned int, struct kstat *);
 extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
+extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
 
 extern int __generic_block_fiemap(struct inode *inode,
 				  struct fiemap_extent_info *fieinfo,
@@ -2948,7 +2966,8 @@
 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
 extern int simple_unlink(struct inode *, struct dentry *);
 extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *,
+			 struct inode *, struct dentry *, unsigned int);
 extern int noop_fsync(struct file *, loff_t, loff_t, int);
 extern int simple_empty(struct dentry *);
 extern int simple_readpage(struct file *file, struct page *page);
@@ -2993,7 +3012,7 @@
 #define buffer_migrate_page NULL
 #endif
 
-extern int inode_change_ok(const struct inode *, struct iattr *);
+extern int setattr_prepare(struct dentry *, struct iattr *);
 extern int inode_newsize_ok(const struct inode *, loff_t offset);
 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
 
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index eed9e85..b8bcc05 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -29,7 +29,11 @@
 static inline int fsnotify_perm(struct file *file, int mask)
 {
 	struct path *path = &file->f_path;
-	struct inode *inode = file_inode(file);
+	/*
+	 * Do not use file_inode() here or anywhere in this file to get the
+	 * inode.  That would break *notity on overlayfs.
+	 */
+	struct inode *inode = path->dentry->d_inode;
 	__u32 fsnotify_mask = 0;
 	int ret;
 
@@ -173,7 +177,7 @@
 static inline void fsnotify_access(struct file *file)
 {
 	struct path *path = &file->f_path;
-	struct inode *inode = file_inode(file);
+	struct inode *inode = path->dentry->d_inode;
 	__u32 mask = FS_ACCESS;
 
 	if (S_ISDIR(inode->i_mode))
@@ -191,7 +195,7 @@
 static inline void fsnotify_modify(struct file *file)
 {
 	struct path *path = &file->f_path;
-	struct inode *inode = file_inode(file);
+	struct inode *inode = path->dentry->d_inode;
 	__u32 mask = FS_MODIFY;
 
 	if (S_ISDIR(inode->i_mode))
@@ -209,7 +213,7 @@
 static inline void fsnotify_open(struct file *file)
 {
 	struct path *path = &file->f_path;
-	struct inode *inode = file_inode(file);
+	struct inode *inode = path->dentry->d_inode;
 	__u32 mask = FS_OPEN;
 
 	if (S_ISDIR(inode->i_mode))
@@ -225,7 +229,7 @@
 static inline void fsnotify_close(struct file *file)
 {
 	struct path *path = &file->f_path;
-	struct inode *inode = file_inode(file);
+	struct inode *inode = path->dentry->d_inode;
 	fmode_t mode = file->f_mode;
 	__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
 
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 1dbf52f..e0341af 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -437,7 +437,7 @@
 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
 
 /* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
 extern void rand_initialize_disk(struct gendisk *disk);
 
 static inline sector_t get_start_sect(struct block_device *bdev)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 1f0be72..24e2cc5 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -8,7 +8,6 @@
 #include <linux/irqdomain.h>
 #include <linux/lockdep.h>
 #include <linux/pinctrl/pinctrl.h>
-#include <linux/kconfig.h>
 
 struct gpio_desc;
 struct of_phandle_args;
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 4f7d8f4..34a0dc1 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -29,7 +29,9 @@
  *			Returns the number of lower random bytes in "data".
  *			Must not be NULL.    *OBSOLETE*
  * @read:		New API. drivers can fill up to max bytes of data
- *			into the buffer. The buffer is aligned for any type.
+ *			into the buffer. The buffer is aligned for any type
+ *			and max is guaranteed to be >= to that alignment
+ *			(either 4 or 8 depending on architecture).
  * @priv:		Private data, for use by the RNG driver.
  * @quality:		Estimation of true entropy in RNG's bitstream
  *			(per mill).
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 174f43f..c05216a 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -245,7 +245,7 @@
 	return NULL;
 }
 
-static inline int team_num_to_port_index(struct team *team, int num)
+static inline int team_num_to_port_index(struct team *team, unsigned int num)
 {
 	int en_port_count = ACCESS_ONCE(team->en_port_count);
 
diff --git a/include/linux/init.h b/include/linux/init.h
index 5a3321a..e30104c 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init		__section(.init.text) __cold notrace
+#define __init		__section(.init.text) __cold notrace __latent_entropy
 #define __initdata	__section(.init.data)
 #define __initconst	__section(.init.rodata)
 #define __exitdata	__section(.exit.data)
@@ -75,7 +75,8 @@
 #define __exit          __section(.exit.text) __exitused __cold notrace
 
 /* Used for MEMORY_HOTPLUG */
-#define __meminit        __section(.meminit.text) __cold notrace
+#define __meminit        __section(.meminit.text) __cold notrace \
+						  __latent_entropy
 #define __meminitdata    __section(.meminit.data)
 #define __meminitconst   __section(.meminit.rodata)
 #define __memexit        __section(.memexit.text) __exitused __cold notrace
@@ -139,24 +140,8 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_LTO
-/* Work around a LTO gcc problem: when there is no reference to a variable
- * in a module it will be moved to the end of the program. This causes
- * reordering of initcalls which the kernel does not like.
- * Add a dummy reference function to avoid this. The function is
- * deleted by the linker.
- */
-#define LTO_REFERENCE_INITCALL(x) \
-	; /* yes this is needed */			\
-	static __used __exit void *reference_##x(void)	\
-	{						\
-		return &x;				\
-	}
-#else
-#define LTO_REFERENCE_INITCALL(x)
-#endif
-
-/* initcalls are now grouped by functionality into separate 
+/*
+ * initcalls are now grouped by functionality into separate
  * subsections. Ordering inside the subsections is determined
  * by link order. 
  * For backwards compatibility, initcall() puts the call in 
@@ -164,12 +149,16 @@
  *
  * The `id' arg to __define_initcall() is needed so that multiple initcalls
  * can point at the same handler without causing duplicate-symbol build errors.
+ *
+ * Initcalls are run by placing pointers in initcall sections that the
+ * kernel iterates at runtime. The linker can do dead code / data elimination
+ * and remove that completely, so the initcall sections have to be marked
+ * as KEEP() in the linker script.
  */
 
 #define __define_initcall(fn, id) \
 	static initcall_t __initcall_##fn##id __used \
-	__attribute__((__section__(".initcall" #id ".init"))) = fn; \
-	LTO_REFERENCE_INITCALL(__initcall_##fn##id)
+	__attribute__((__section__(".initcall" #id ".init"))) = fn;
 
 /*
  * Early initcalls run before initializing SMP.
@@ -205,15 +194,15 @@
 
 #define __initcall(fn) device_initcall(fn)
 
-#define __exitcall(fn) \
+#define __exitcall(fn)						\
 	static exitcall_t __exitcall_##fn __exit_call = fn
 
-#define console_initcall(fn) \
-	static initcall_t __initcall_##fn \
+#define console_initcall(fn)					\
+	static initcall_t __initcall_##fn			\
 	__used __section(.con_initcall.init) = fn
 
-#define security_initcall(fn) \
-	static initcall_t __initcall_##fn \
+#define security_initcall(fn)					\
+	static initcall_t __initcall_##fn			\
 	__used __section(.security_initcall.init) = fn
 
 struct obs_kernel_param {
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 645ad06..58df02b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -31,16 +31,16 @@
  * See Documentation/io-mapping.txt
  */
 
-#ifdef CONFIG_HAVE_ATOMIC_IOMAP
-
-#include <asm/iomap.h>
-
 struct io_mapping {
 	resource_size_t base;
 	unsigned long size;
 	pgprot_t prot;
+	void __iomem *iomem;
 };
 
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+#include <asm/iomap.h>
 /*
  * For small address space machines, mapping large objects
  * into the kernel virtual space isn't practical. Where
@@ -49,34 +49,25 @@
  */
 
 static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+		   resource_size_t base,
+		   unsigned long size)
 {
-	struct io_mapping *iomap;
 	pgprot_t prot;
 
-	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
-	if (!iomap)
-		goto out_err;
-
 	if (iomap_create_wc(base, size, &prot))
-		goto out_free;
+		return NULL;
 
 	iomap->base = base;
 	iomap->size = size;
 	iomap->prot = prot;
 	return iomap;
-
-out_free:
-	kfree(iomap);
-out_err:
-	return NULL;
 }
 
 static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
 {
 	iomap_free(mapping->base, mapping->size);
-	kfree(mapping);
 }
 
 /* Atomic map/unmap */
@@ -121,21 +112,46 @@
 #else
 
 #include <linux/uaccess.h>
-
-/* this struct isn't actually defined anywhere */
-struct io_mapping;
+#include <asm/pgtable.h>
 
 /* Create the io_mapping object*/
 static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+		   resource_size_t base,
+		   unsigned long size)
 {
-	return (struct io_mapping __force *) ioremap_wc(base, size);
+	iomap->base = base;
+	iomap->size = size;
+	iomap->iomem = ioremap_wc(base, size);
+#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
+	iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
+#elif defined(pgprot_writecombine)
+	iomap->prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+	iomap->prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+	return iomap;
 }
 
 static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
 {
-	iounmap((void __force __iomem *) mapping);
+	iounmap(mapping->iomem);
+}
+
+/* Non-atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping,
+		  unsigned long offset,
+		  unsigned long size)
+{
+	return mapping->iomem + offset;
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
+{
 }
 
 /* Atomic map/unmap */
@@ -145,30 +161,42 @@
 {
 	preempt_disable();
 	pagefault_disable();
-	return ((char __force __iomem *) mapping) + offset;
+	return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
 }
 
 static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
+	io_mapping_unmap(vaddr);
 	pagefault_enable();
 	preempt_enable();
 }
 
-/* Non-atomic map/unmap */
-static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping,
-		  unsigned long offset,
-		  unsigned long size)
+#endif /* HAVE_ATOMIC_IOMAP */
+
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base,
+		     unsigned long size)
 {
-	return ((char __force __iomem *) mapping) + offset;
+	struct io_mapping *iomap;
+
+	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
+	if (!iomap)
+		return NULL;
+
+	if (!io_mapping_init_wc(iomap, base, size)) {
+		kfree(iomap);
+		return NULL;
+	}
+
+	return iomap;
 }
 
 static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_free(struct io_mapping *iomap)
 {
+	io_mapping_fini(iomap);
+	kfree(iomap);
 }
 
-#endif /* HAVE_ATOMIC_IOMAP */
-
 #endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a35fb8b..436dc21 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -331,10 +331,32 @@
 /* Generic device grouping function */
 extern struct iommu_group *generic_device_group(struct device *dev);
 
+/**
+ * struct iommu_fwspec - per-device IOMMU instance data
+ * @ops: ops for this device's IOMMU
+ * @iommu_fwnode: firmware handle for this device's IOMMU
+ * @iommu_priv: IOMMU driver private data for this device
+ * @num_ids: number of associated device IDs
+ * @ids: IDs which this device may present to the IOMMU
+ */
+struct iommu_fwspec {
+	const struct iommu_ops	*ops;
+	struct fwnode_handle	*iommu_fwnode;
+	void			*iommu_priv;
+	unsigned int		num_ids;
+	u32			ids[1];
+};
+
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
+		      const struct iommu_ops *ops);
+void iommu_fwspec_free(struct device *dev);
+int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
 struct iommu_group {};
+struct iommu_fwspec {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -541,6 +563,23 @@
 {
 }
 
+static inline int iommu_fwspec_init(struct device *dev,
+				    struct fwnode_handle *iommu_fwnode,
+				    const struct iommu_ops *ops)
+{
+	return -ENODEV;
+}
+
+static inline void iommu_fwspec_free(struct device *dev)
+{
+}
+
+static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
+				       int num_ids)
+{
+	return -ENODEV;
+}
+
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d600303..820c0ad 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@
 void kasan_unpoison_shadow(const void *address, size_t size);
 
 void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
 
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
 
 static inline void kasan_enable_current(void) {}
 static inline void kasan_disable_current(void) {}
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index c2ce155..f282d4e 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -20,7 +20,7 @@
  * line that had no enclosing \n). Only to be used by core/arch code
  * during early bootup (a continued line is not SMP-safe otherwise).
  */
-#define KERN_CONT	""
+#define KERN_CONT	KERN_SOH "c"
 
 /* integer equivalents of KERN_<LEVEL> */
 #define LOGLEVEL_SCHED		-2	/* Deferred messages from sched code
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 96356ef..7056238 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -269,10 +269,8 @@
 }
 
 int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-size_t kernfs_path_len(struct kernfs_node *kn);
 int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
 			  char *buf, size_t buflen);
-char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
 void pr_cont_kernfs_name(struct kernfs_node *kn);
 void pr_cont_kernfs_path(struct kernfs_node *kn);
 struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -341,12 +339,10 @@
 static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
 { return -ENOSYS; }
 
-static inline size_t kernfs_path_len(struct kernfs_node *kn)
-{ return 0; }
-
-static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
-				size_t buflen)
-{ return NULL; }
+static inline int kernfs_path_from_node(struct kernfs_node *root_kn,
+					struct kernfs_node *kn,
+					char *buf, size_t buflen)
+{ return -ENOSYS; }
 
 static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
 static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
@@ -436,6 +432,22 @@
 
 #endif	/* CONFIG_KERNFS */
 
+/**
+ * kernfs_path - build full path of a given node
+ * @kn: kernfs_node of interest
+ * @buf: buffer to copy @kn's name into
+ * @buflen: size of @buf
+ *
+ * Builds and returns the full path of @kn in @buf of @buflen bytes.  The
+ * path is built from the end of @buf so the returned pointer usually
+ * doesn't match @buf.  If @buf isn't long enough, @buf is nul terminated
+ * and %NULL is returned.
+ */
+static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
+{
+	return kernfs_path_from_node(kn, NULL, buf, buflen);
+}
+
 static inline struct kernfs_node *
 kernfs_find_and_get(struct kernfs_node *kn, const char *name)
 {
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d743777..406c33d 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -259,6 +259,12 @@
 	vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
 #define VMCOREINFO_CONFIG(name) \
 	vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+#define VMCOREINFO_PAGE_OFFSET(value) \
+	vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMALLOC_START(value) \
+	vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMEMMAP_START(value) \
+	vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
 
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 4894c68..1c2a328 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -38,6 +38,11 @@
 extern void kmemleak_ignore(const void *ptr) __ref;
 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
 extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+				gfp_t gfp) __ref;
+extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
+extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
+extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
 
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
 					    int min_count, unsigned long flags,
@@ -106,6 +111,19 @@
 static inline void kmemleak_no_scan(const void *ptr)
 {
 }
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+				       int min_count, gfp_t gfp)
+{
+}
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+static inline void kmemleak_not_leak_phys(phys_addr_t phys)
+{
+}
+static inline void kmemleak_ignore_phys(phys_addr_t phys)
+{
+}
 
 #endif	/* CONFIG_DEBUG_KMEMLEAK */
 
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index e691b6a..a6e82a6 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -10,6 +10,17 @@
 					   int node,
 					   const char namefmt[], ...);
 
+/**
+ * kthread_create - create a kthread on the current node
+ * @threadfn: the function to run in the thread
+ * @data: data pointer for @threadfn()
+ * @namefmt: printf-style format string for the thread name
+ * @...: arguments for @namefmt.
+ *
+ * This macro will create a kthread on the current node, leaving it in
+ * the stopped state.  This is just a helper for kthread_create_on_node();
+ * see the documentation there for more details.
+ */
 #define kthread_create(threadfn, data, namefmt, arg...) \
 	kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
 
@@ -44,7 +55,7 @@
 bool kthread_should_park(void);
 bool kthread_freezable_should_stop(bool *was_frozen);
 void *kthread_data(struct task_struct *k);
-void *probe_kthread_data(struct task_struct *k);
+void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
@@ -57,16 +68,23 @@
  * Simple work processor based on kthread.
  *
  * This provides easier way to make use of kthreads.  A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
  * respectively.  Queued kthread_works are processed by a kthread
  * running kthread_worker_fn().
  */
 struct kthread_work;
 typedef void (*kthread_work_func_t)(struct kthread_work *work);
+void kthread_delayed_work_timer_fn(unsigned long __data);
+
+enum {
+	KTW_FREEZABLE		= 1 << 0,	/* freeze during suspend */
+};
 
 struct kthread_worker {
+	unsigned int		flags;
 	spinlock_t		lock;
 	struct list_head	work_list;
+	struct list_head	delayed_work_list;
 	struct task_struct	*task;
 	struct kthread_work	*current_work;
 };
@@ -75,11 +93,19 @@
 	struct list_head	node;
 	kthread_work_func_t	func;
 	struct kthread_worker	*worker;
+	/* Number of canceling calls that are running at the moment. */
+	int			canceling;
+};
+
+struct kthread_delayed_work {
+	struct kthread_work work;
+	struct timer_list timer;
 };
 
 #define KTHREAD_WORKER_INIT(worker)	{				\
 	.lock = __SPIN_LOCK_UNLOCKED((worker).lock),			\
 	.work_list = LIST_HEAD_INIT((worker).work_list),		\
+	.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
 	}
 
 #define KTHREAD_WORK_INIT(work, fn)	{				\
@@ -87,46 +113,88 @@
 	.func = (fn),							\
 	}
 
+#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) {				\
+	.work = KTHREAD_WORK_INIT((dwork).work, (fn)),			\
+	.timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,	\
+				     0, (unsigned long)&(dwork),	\
+				     TIMER_IRQSAFE),			\
+	}
+
 #define DEFINE_KTHREAD_WORKER(worker)					\
 	struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
 
 #define DEFINE_KTHREAD_WORK(work, fn)					\
 	struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
 
+#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn)				\
+	struct kthread_delayed_work dwork =				\
+		KTHREAD_DELAYED_WORK_INIT(dwork, fn)
+
 /*
  * kthread_worker.lock needs its own lockdep class key when defined on
  * stack with lockdep enabled.  Use the following macros in such cases.
  */
 #ifdef CONFIG_LOCKDEP
 # define KTHREAD_WORKER_INIT_ONSTACK(worker)				\
-	({ init_kthread_worker(&worker); worker; })
+	({ kthread_init_worker(&worker); worker; })
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)				\
 	struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
 #else
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
 #endif
 
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
 			const char *name, struct lock_class_key *key);
 
-#define init_kthread_worker(worker)					\
+#define kthread_init_worker(worker)					\
 	do {								\
 		static struct lock_class_key __key;			\
-		__init_kthread_worker((worker), "("#worker")->lock", &__key); \
+		__kthread_init_worker((worker), "("#worker")->lock", &__key); \
 	} while (0)
 
-#define init_kthread_work(work, fn)					\
+#define kthread_init_work(work, fn)					\
 	do {								\
 		memset((work), 0, sizeof(struct kthread_work));		\
 		INIT_LIST_HEAD(&(work)->node);				\
 		(work)->func = (fn);					\
 	} while (0)
 
+#define kthread_init_delayed_work(dwork, fn)				\
+	do {								\
+		kthread_init_work(&(dwork)->work, (fn));		\
+		__setup_timer(&(dwork)->timer,				\
+			      kthread_delayed_work_timer_fn,		\
+			      (unsigned long)(dwork),			\
+			      TIMER_IRQSAFE);				\
+	} while (0)
+
 int kthread_worker_fn(void *worker_ptr);
 
-bool queue_kthread_work(struct kthread_worker *worker,
+__printf(2, 3)
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+			     const char namefmt[], ...);
+
+bool kthread_queue_work(struct kthread_worker *worker,
 			struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+				struct kthread_delayed_work *dwork,
+				unsigned long delay);
+
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+			      struct kthread_delayed_work *dwork,
+			      unsigned long delay);
+
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
+
+bool kthread_cancel_work_sync(struct kthread_work *work);
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+void kthread_destroy_worker(struct kthread_worker *worker);
 
 #endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e37d4f9..616eef4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -46,7 +46,8 @@
 #ifdef CONFIG_ATA_NONSTANDARD
 #include <asm/libata-portmap.h>
 #else
-#include <asm-generic/libata-portmap.h>
+#define ATA_PRIMARY_IRQ(dev)	14
+#define ATA_SECONDARY_IRQ(dev)	15
 #endif
 
 /*
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index b519e13..f4947fd 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -50,23 +50,6 @@
 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
 		unsigned int buf_len, int *cmd_rc);
 
-struct nd_namespace_label;
-struct nvdimm_drvdata;
-
-struct nd_mapping {
-	struct nvdimm *nvdimm;
-	struct nd_namespace_label **labels;
-	u64 start;
-	u64 size;
-	/*
-	 * @ndd is for private use at region enable / disable time for
-	 * get_ndd() + put_ndd(), all other nd_mapping to ndd
-	 * conversions use to_ndd() which respects enabled state of the
-	 * nvdimm.
-	 */
-	struct nvdimm_drvdata *ndd;
-};
-
 struct nvdimm_bus_descriptor {
 	const struct attribute_group **attr_groups;
 	unsigned long cmd_mask;
@@ -89,9 +72,15 @@
 	u64 cookie;
 };
 
+struct nd_mapping_desc {
+	struct nvdimm *nvdimm;
+	u64 start;
+	u64 size;
+};
+
 struct nd_region_desc {
 	struct resource *res;
-	struct nd_mapping *nd_mapping;
+	struct nd_mapping_desc *mapping;
 	u16 num_mappings;
 	const struct attribute_group **attr_groups;
 	struct nd_interleave_set *nd_set;
@@ -129,6 +118,8 @@
 }
 
 int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+		phys_addr_t start, unsigned int len);
 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
 		struct nvdimm_bus_descriptor *nfit_desc);
 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
@@ -139,6 +130,7 @@
 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
 const char *nvdimm_name(struct nvdimm *nvdimm);
+struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
 void *nvdimm_provider_data(struct nvdimm *nvdimm);
 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 77c1417..5827614 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -92,12 +92,21 @@
 	___t; \
 })
 
-#define MLX5_SET64(typ, p, fld, v) do { \
+#define __MLX5_SET64(typ, p, fld, v) do { \
 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
-	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
 } while (0)
 
+#define MLX5_SET64(typ, p, fld, v) do { \
+	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+	__MLX5_SET64(typ, p, fld, v); \
+} while (0)
+
+#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
+	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+	__MLX5_SET64(typ, p, fld[idx], v); \
+} while (0)
+
 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
 
 #define MLX5_GET64_PR(typ, p, fld) ({ \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6..ffbd729 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,9 +1266,10 @@
 }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+		unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write);
+		void *buf, int len, unsigned int gup_flags);
 
 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		      unsigned long start, unsigned long nr_pages,
@@ -1276,19 +1277,18 @@
 		      struct vm_area_struct **vmas, int *nonblocking);
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long start, unsigned long nr_pages,
-			    int write, int force, struct page **pages,
+			    unsigned int gup_flags, struct page **pages,
 			    struct vm_area_struct **vmas);
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-			    int write, int force, struct page **pages,
+			    unsigned int gup_flags, struct page **pages,
 			    struct vm_area_struct **vmas);
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-		    int write, int force, struct page **pages, int *locked);
+		    unsigned int gup_flags, struct page **pages, int *locked);
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 			       unsigned long start, unsigned long nr_pages,
-			       int write, int force, struct page **pages,
-			       unsigned int gup_flags);
+			       struct page **pages, unsigned int gup_flags);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-		    int write, int force, struct page **pages);
+		    struct page **pages, unsigned int gup_flags);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 			struct page **pages);
 
@@ -1306,7 +1306,7 @@
 struct frame_vector *frame_vector_create(unsigned int nr_frames);
 void frame_vector_destroy(struct frame_vector *vec);
 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
-		     bool write, bool force, struct frame_vector *vec);
+		     unsigned int gup_flags, struct frame_vector *vec);
 void put_vaddr_frames(struct frame_vector *vec);
 int frame_vector_to_pages(struct frame_vector *vec);
 void frame_vector_to_pfns(struct frame_vector *vec);
@@ -2232,6 +2232,7 @@
 #define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
 #define FOLL_MLOCK	0x1000	/* lock present pages */
 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
+#define FOLL_COW	0x4000	/* internal GUP flag */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 29a1706..13f8052 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -127,6 +127,82 @@
 		    struct mtd_oob_region *oobfree);
 };
 
+/**
+ * struct mtd_pairing_info - page pairing information
+ *
+ * @pair: pair id
+ * @group: group id
+ *
+ * The term "pair" is used here, even though TLC NANDs might group pages by 3
+ * (3 bits in a single cell). A pair should regroup all pages that are sharing
+ * the same cell. Pairs are then indexed in ascending order.
+ *
+ * @group is defining the position of a page in a given pair. It can also be
+ * seen as the bit position in the cell: page attached to bit 0 belongs to
+ * group 0, page attached to bit 1 belongs to group 1, etc.
+ *
+ * Example:
+ * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
+ *
+ *		group-0		group-1
+ *
+ *  pair-0	page-0		page-4
+ *  pair-1	page-1		page-5
+ *  pair-2	page-2		page-8
+ *  ...
+ *  pair-127	page-251	page-255
+ *
+ *
+ * Note that the "group" and "pair" terms were extracted from Samsung and
+ * Hynix datasheets, and might be referenced under other names in other
+ * datasheets (Micron is describing this concept as "shared pages").
+ */
+struct mtd_pairing_info {
+	int pair;
+	int group;
+};
+
+/**
+ * struct mtd_pairing_scheme - page pairing scheme description
+ *
+ * @ngroups: number of groups. Should be related to the number of bits
+ *	     per cell.
+ * @get_info: converts a write-unit (page number within an erase block) into
+ *	      mtd_pairing information (pair + group). This function should
+ *	      fill the info parameter based on the wunit index or return
+ *	      -EINVAL if the wunit parameter is invalid.
+ * @get_wunit: converts pairing information into a write-unit (page) number.
+ *	       This function should return the wunit index pointed by the
+ *	       pairing information described in the info argument. It should
+ *	       return -EINVAL, if there's no wunit corresponding to the
+ *	       passed pairing information.
+ *
+ * See mtd_pairing_info documentation for a detailed explanation of the
+ * pair and group concepts.
+ *
+ * The mtd_pairing_scheme structure provides a generic solution to represent
+ * NAND page pairing scheme. Instead of exposing two big tables to do the
+ * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
+ * implement the ->get_info() and ->get_wunit() functions.
+ *
+ * MTD users will then be able to query these information by using the
+ * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
+ *
+ * @ngroups is here to help MTD users iterating over all the pages in a
+ * given pair. This value can be retrieved by MTD users using the
+ * mtd_pairing_groups() helper.
+ *
+ * Examples are given in the mtd_pairing_info_to_wunit() and
+ * mtd_wunit_to_pairing_info() documentation.
+ */
+struct mtd_pairing_scheme {
+	int ngroups;
+	int (*get_info)(struct mtd_info *mtd, int wunit,
+			struct mtd_pairing_info *info);
+	int (*get_wunit)(struct mtd_info *mtd,
+			 const struct mtd_pairing_info *info);
+};
+
 struct module;	/* only needed for owner field in mtd_info */
 
 struct mtd_info {
@@ -188,6 +264,9 @@
 	/* OOB layout description */
 	const struct mtd_ooblayout_ops *ooblayout;
 
+	/* NAND pairing scheme, only provided for MLC/TLC NANDs */
+	const struct mtd_pairing_scheme *pairing;
+
 	/* the ecc step size. */
 	unsigned int ecc_step_size;
 
@@ -296,6 +375,12 @@
 	mtd->ooblayout = ooblayout;
 }
 
+static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
+				const struct mtd_pairing_scheme *pairing)
+{
+	mtd->pairing = pairing;
+}
+
 static inline void mtd_set_of_node(struct mtd_info *mtd,
 				   struct device_node *np)
 {
@@ -312,6 +397,11 @@
 	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
 }
 
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+			      struct mtd_pairing_info *info);
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+			      const struct mtd_pairing_info *info);
+int mtd_pairing_groups(struct mtd_info *mtd);
 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
 	      void **virt, resource_size_t *phys);
@@ -397,6 +487,23 @@
 	return do_div(sz, mtd->writesize);
 }
 
+static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
+{
+	return mtd->erasesize / mtd->writesize;
+}
+
+static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
+{
+	return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
+}
+
+static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
+					 int wunit)
+{
+	return base + (wunit * mtd->writesize);
+}
+
+
 static inline int mtd_has_oob(const struct mtd_info *mtd)
 {
 	return mtd->_read_oob && mtd->_write_oob;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 8dd6e01..c5d3d502 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -29,26 +29,26 @@
 struct device_node;
 
 /* Scan and identify a NAND device */
-extern int nand_scan(struct mtd_info *mtd, int max_chips);
+int nand_scan(struct mtd_info *mtd, int max_chips);
 /*
  * Separate phases of nand_scan(), allowing board driver to intervene
  * and override command or ECC setup according to flash type.
  */
-extern int nand_scan_ident(struct mtd_info *mtd, int max_chips,
+int nand_scan_ident(struct mtd_info *mtd, int max_chips,
 			   struct nand_flash_dev *table);
-extern int nand_scan_tail(struct mtd_info *mtd);
+int nand_scan_tail(struct mtd_info *mtd);
 
-/* Free resources held by the NAND device */
-extern void nand_release(struct mtd_info *mtd);
+/* Unregister the MTD device and free resources held by the NAND device */
+void nand_release(struct mtd_info *mtd);
 
 /* Internal helper for board drivers which need to override command function */
-extern void nand_wait_ready(struct mtd_info *mtd);
+void nand_wait_ready(struct mtd_info *mtd);
 
 /* locks all blocks present in the device */
-extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
 /* unlocks specified locked blocks */
-extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
 /* The maximum number of NAND chips in an array */
 #define NAND_MAX_CHIPS		8
@@ -141,6 +141,7 @@
  * pages and you want to rely on the default implementation.
  */
 #define NAND_ECC_GENERIC_ERASED_CHECK	BIT(0)
+#define NAND_ECC_MAXIMIZE		BIT(1)
 
 /* Bit mask for flags passed to do_nand_read_ecc */
 #define NAND_GET_DEVICE		0x80
@@ -460,6 +461,13 @@
 	wait_queue_head_t wq;
 };
 
+static inline void nand_hw_control_init(struct nand_hw_control *nfc)
+{
+	nfc->active = NULL;
+	spin_lock_init(&nfc->lock);
+	init_waitqueue_head(&nfc->wq);
+}
+
 /**
  * struct nand_ecc_ctrl - Control structure for ECC
  * @mode:	ECC mode
@@ -566,6 +574,123 @@
 };
 
 /**
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
+ * Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tALH_min: ALE hold time
+ * @tADL_min: ALE to data loading time
+ * @tALS_min: ALE setup time
+ * @tAR_min: ALE to RE# delay
+ * @tCEA_max: CE# access time
+ * @tCEH_min:
+ * @tCH_min:  CE# hold time
+ * @tCHZ_max: CE# high to output hi-Z
+ * @tCLH_min: CLE hold time
+ * @tCLR_min: CLE to RE# delay
+ * @tCLS_min: CLE setup time
+ * @tCOH_min: CE# high to output hold
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDS_min: Data setup time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tIR_min: Output hi-Z to RE# low
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tRC_min: RE# cycle time
+ * @tREA_max: RE# access time
+ * @tREH_min: RE# high hold time
+ * @tRHOH_min: RE# high to output hold
+ * @tRHW_min: RE# high to WE# low
+ * @tRHZ_max: RE# high to output hi-Z
+ * @tRLOH_min: RE# low to output hold
+ * @tRP_min: RE# pulse width
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ *	      rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWC_min: WE# cycle time
+ * @tWH_min: WE# high hold time
+ * @tWHR_min: WE# high to RE# low
+ * @tWP_min: WE# pulse width
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_sdr_timings {
+	u32 tALH_min;
+	u32 tADL_min;
+	u32 tALS_min;
+	u32 tAR_min;
+	u32 tCEA_max;
+	u32 tCEH_min;
+	u32 tCH_min;
+	u32 tCHZ_max;
+	u32 tCLH_min;
+	u32 tCLR_min;
+	u32 tCLS_min;
+	u32 tCOH_min;
+	u32 tCS_min;
+	u32 tDH_min;
+	u32 tDS_min;
+	u32 tFEAT_max;
+	u32 tIR_min;
+	u32 tITC_max;
+	u32 tRC_min;
+	u32 tREA_max;
+	u32 tREH_min;
+	u32 tRHOH_min;
+	u32 tRHW_min;
+	u32 tRHZ_max;
+	u32 tRLOH_min;
+	u32 tRP_min;
+	u32 tRR_min;
+	u64 tRST_max;
+	u32 tWB_max;
+	u32 tWC_min;
+	u32 tWH_min;
+	u32 tWHR_min;
+	u32 tWP_min;
+	u32 tWW_min;
+};
+
+/**
+ * enum nand_data_interface_type - NAND interface timing type
+ * @NAND_SDR_IFACE:	Single Data Rate interface
+ */
+enum nand_data_interface_type {
+	NAND_SDR_IFACE,
+};
+
+/**
+ * struct nand_data_interface - NAND interface timing
+ * @type:	type of the timing
+ * @timings:	The timing, type according to @type
+ */
+struct nand_data_interface {
+	enum nand_data_interface_type type;
+	union {
+		struct nand_sdr_timings sdr;
+	} timings;
+};
+
+/**
+ * nand_get_sdr_timings - get SDR timing from data interface
+ * @conf:	The data interface
+ */
+static inline const struct nand_sdr_timings *
+nand_get_sdr_timings(const struct nand_data_interface *conf)
+{
+	if (conf->type != NAND_SDR_IFACE)
+		return ERR_PTR(-EINVAL);
+
+	return &conf->timings.sdr;
+}
+
+/**
  * struct nand_chip - NAND Private Flash Chip Data
  * @mtd:		MTD device registered to the MTD framework
  * @IO_ADDR_R:		[BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -627,10 +752,9 @@
  *                      also from the datasheet. It is the recommended ECC step
  *			size, if known; if unknown, set to zero.
  * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- *			      either deduced from the datasheet if the NAND
- *			      chip is not ONFI compliant or set to 0 if it is
- *			      (an ONFI chip is always configured in mode 0
- *			      after a NAND reset)
+ *			      set to the actually used ONFI mode if the chip is
+ *			      ONFI compliant or deduced from the datasheet if
+ *			      the NAND chip is not ONFI compliant.
  * @numchips:		[INTERN] number of physical chips
  * @chipsize:		[INTERN] the size of one chip for multichip arrays
  * @pagemask:		[INTERN] page number mask = number of (pages / chip) - 1
@@ -650,6 +774,7 @@
  * @read_retries:	[INTERN] the number of read retry modes supported
  * @onfi_set_features:	[REPLACEABLE] set the features for ONFI nand
  * @onfi_get_features:	[REPLACEABLE] get the features for ONFI nand
+ * @setup_data_interface: [OPTIONAL] setup the data interface and timing
  * @bbt:		[INTERN] bad block table pointer
  * @bbt_td:		[REPLACEABLE] bad block table descriptor for flash
  *			lookup.
@@ -696,6 +821,10 @@
 	int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
 			int feature_addr, uint8_t *subfeature_para);
 	int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
+	int (*setup_data_interface)(struct mtd_info *mtd,
+				    const struct nand_data_interface *conf,
+				    bool check_only);
+
 
 	int chip_delay;
 	unsigned int options;
@@ -725,6 +854,8 @@
 		struct nand_jedec_params jedec_params;
 	};
 
+	struct nand_data_interface *data_interface;
+
 	int read_retries;
 
 	flstate_t state;
@@ -893,14 +1024,14 @@
 extern struct nand_flash_dev nand_flash_ids[];
 extern struct nand_manufacturers nand_manuf_ids[];
 
-extern int nand_default_bbt(struct mtd_info *mtd);
-extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
-extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
-extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
-			   int allowbbt);
-extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
-			size_t *retlen, uint8_t *buf);
+int nand_default_bbt(struct mtd_info *mtd);
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+		    int allowbbt);
+int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
+		 size_t *retlen, uint8_t *buf);
 
 /**
  * struct platform_nand_chip - chip level device structure
@@ -988,6 +1119,11 @@
 	return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
 }
 
+int onfi_init_data_interface(struct nand_chip *chip,
+			     struct nand_data_interface *iface,
+			     enum nand_data_interface_type type,
+			     int timing_mode);
+
 /*
  * Check if it is a SLC nand.
  * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
@@ -1023,57 +1159,10 @@
 		: 0;
 }
 
-/*
- * struct nand_sdr_timings - SDR NAND chip timings
- *
- * This struct defines the timing requirements of a SDR NAND chip.
- * These informations can be found in every NAND datasheets and the timings
- * meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
- *
- * All these timings are expressed in picoseconds.
- */
-
-struct nand_sdr_timings {
-	u32 tALH_min;
-	u32 tADL_min;
-	u32 tALS_min;
-	u32 tAR_min;
-	u32 tCEA_max;
-	u32 tCEH_min;
-	u32 tCH_min;
-	u32 tCHZ_max;
-	u32 tCLH_min;
-	u32 tCLR_min;
-	u32 tCLS_min;
-	u32 tCOH_min;
-	u32 tCS_min;
-	u32 tDH_min;
-	u32 tDS_min;
-	u32 tFEAT_max;
-	u32 tIR_min;
-	u32 tITC_max;
-	u32 tRC_min;
-	u32 tREA_max;
-	u32 tREH_min;
-	u32 tRHOH_min;
-	u32 tRHW_min;
-	u32 tRHZ_max;
-	u32 tRLOH_min;
-	u32 tRP_min;
-	u32 tRR_min;
-	u64 tRST_max;
-	u32 tWB_max;
-	u32 tWC_min;
-	u32 tWH_min;
-	u32 tWHR_min;
-	u32 tWP_min;
-	u32 tWW_min;
-};
-
 /* get timing characteristics from ONFI timing mode. */
 const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+/* get data interface from ONFI timing mode 0, used after reset. */
+const struct nand_data_interface *nand_get_default_data_interface(void);
 
 int nand_check_erased_ecc_chunk(void *data, int datalen,
 				void *ecc, int ecclen,
@@ -1093,4 +1182,11 @@
 /* Default read_oob syndrome implementation */
 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
 			   int page);
+
+/* Reset and initialize a NAND device */
+int nand_reset(struct nand_chip *chip);
+
+/* Free resources held by the NAND device */
+void nand_cleanup(struct nand_chip *chip);
+
 #endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/nd.h b/include/linux/nd.h
index f1ea426..fa66aee 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -77,11 +77,13 @@
  * @nsio: device and system physical address range to drive
  * @alt_name: namespace name supplied in the dimm label
  * @uuid: namespace name supplied in the dimm label
+ * @id: ida allocated id
  */
 struct nd_namespace_pmem {
 	struct nd_namespace_io nsio;
 	char *alt_name;
 	u8 *uuid;
+	int id;
 };
 
 /**
@@ -105,19 +107,19 @@
 	struct resource **res;
 };
 
-static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
+static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
 {
 	return container_of(dev, struct nd_namespace_io, common.dev);
 }
 
-static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
+static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev)
 {
 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
 
 	return container_of(nsio, struct nd_namespace_pmem, nsio);
 }
 
-static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
+static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
 {
 	return container_of(dev, struct nd_namespace_blk, common.dev);
 }
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c6564ad..9094faf 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -67,6 +67,7 @@
 		NFS4_DELEGATION_STATEID_TYPE,
 		NFS4_LAYOUT_STATEID_TYPE,
 		NFS4_PNFS_DS_STATEID_TYPE,
+		NFS4_REVOKED_STATEID_TYPE,
 	} type;
 };
 
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 14a762d..b34097c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -103,6 +103,9 @@
 #define NFS_SP4_MACH_CRED_WRITE    5	/* WRITE */
 #define NFS_SP4_MACH_CRED_COMMIT   6	/* COMMIT */
 #define NFS_SP4_MACH_CRED_PNFS_CLEANUP  7 /* LAYOUTRETURN */
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+	wait_queue_head_t	cl_lock_waitq;
+#endif /* CONFIG_NFS_V4_1 */
 #endif /* CONFIG_NFS_V4 */
 
 	/* Our own IP address, as a null-terminated string.
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7cc0dee..beb1e10 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -125,6 +125,11 @@
 		| NFS_ATTR_FATTR_V4_SECURITY_LABEL)
 
 /*
+ * Maximal number of supported layout drivers.
+ */
+#define NFS_MAX_LAYOUT_TYPES 8
+
+/*
  * Info on the file system
  */
 struct nfs_fsinfo {
@@ -139,7 +144,8 @@
 	__u64			maxfilesize;
 	struct timespec		time_delta; /* server time granularity */
 	__u32			lease_time; /* in seconds */
-	__u32			layouttype; /* supported pnfs layout driver */
+	__u32			nlayouttypes; /* number of layouttypes */
+	__u32			layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
 	__u32			blksize; /* preferred pnfs io block size */
 	__u32			clone_blksize; /* granularity of a CLONE operation */
 };
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index b969e94..7fd5cfc 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -17,6 +17,9 @@
 int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
 int of_get_pci_domain_nr(struct device_node *node);
 void of_pci_check_probe_only(void);
+int of_pci_map_rid(struct device_node *np, u32 rid,
+		   const char *map_name, const char *map_mask_name,
+		   struct device_node **target, u32 *id_out);
 #else
 static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
 {
@@ -52,6 +55,13 @@
 	return -1;
 }
 
+static inline int of_pci_map_rid(struct device_node *np, u32 rid,
+			const char *map_name, const char *map_mask_name,
+			struct device_node **target, u32 *id_out)
+{
+	return -EINVAL;
+}
+
 static inline void of_pci_check_probe_only(void) { }
 #endif
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 794dbcb..dd15d39 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,17 +16,16 @@
 #include <linux/hugetlb_inline.h>
 
 /*
- * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
- * allocation mode flags.
+ * Bits in mapping->flags.
  */
 enum mapping_flags {
-	AS_EIO		= __GFP_BITS_SHIFT + 0,	/* IO error on async write */
-	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
-	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
-	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
-	AS_EXITING	= __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+	AS_EIO		= 0,	/* IO error on async write */
+	AS_ENOSPC	= 1,	/* ENOSPC on async write */
+	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
+	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
+	AS_EXITING	= 4, 	/* final truncate in progress */
 	/* writeback related tags are not used */
-	AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5,
+	AS_NO_WRITEBACK_TAGS = 5,
 };
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -78,7 +77,7 @@
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
-	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+	return mapping->gfp_mask;
 }
 
 /* Restricts the given gfp_mask to what the mapping allows. */
@@ -94,8 +93,7 @@
  */
 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 {
-	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
-				(__force unsigned long)mask;
+	m->gfp_mask = mask;
 }
 
 void release_pages(struct page **pages, int nr, bool cold);
@@ -530,59 +528,10 @@
 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
 
 /*
- * Fault one or two userspace pages into pagetables.
- * Return -EINVAL if more than two pages would be needed.
- * Return non-zero on a fault.
+ * Fault everything in given userspace address range in.
  */
 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 {
-	int span, ret;
-
-	if (unlikely(size == 0))
-		return 0;
-
-	span = offset_in_page(uaddr) + size;
-	if (span > 2 * PAGE_SIZE)
-		return -EINVAL;
-	/*
-	 * Writing zeroes into userspace here is OK, because we know that if
-	 * the zero gets there, we'll be overwriting it.
-	 */
-	ret = __put_user(0, uaddr);
-	if (ret == 0 && span > PAGE_SIZE)
-		ret = __put_user(0, uaddr + size - 1);
-	return ret;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
-{
-	volatile char c;
-	int ret;
-
-	if (unlikely(size == 0))
-		return 0;
-
-	ret = __get_user(c, uaddr);
-	if (ret == 0) {
-		const char __user *end = uaddr + size - 1;
-
-		if (((unsigned long)uaddr & PAGE_MASK) !=
-				((unsigned long)end & PAGE_MASK)) {
-			ret = __get_user(c, end);
-			(void)c;
-		}
-	}
-	return ret;
-}
-
-/*
- * Multipage variants of the above prefault helpers, useful if more than
- * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
- * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
- * filemap.c hotpaths.
- */
-static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
-{
 	char __user *end = uaddr + size - 1;
 
 	if (unlikely(size == 0))
@@ -608,8 +557,7 @@
 	return 0;
 }
 
-static inline int fault_in_multipages_readable(const char __user *uaddr,
-					       int size)
+static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 {
 	volatile char c;
 	const char __user *end = uaddr + size - 1;
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 1d405a2..a1bacf1 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,11 +4,6 @@
 #include <linux/mm_types.h>
 #include <asm/mmu_context.h>
 
-#define PKEY_DISABLE_ACCESS	0x1
-#define PKEY_DISABLE_WRITE	0x2
-#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
-				 PKEY_DISABLE_WRITE)
-
 #ifdef CONFIG_ARCH_HAS_PKEYS
 #include <asm/pkeys.h>
 #else /* ! CONFIG_ARCH_HAS_PKEYS */
@@ -16,18 +11,33 @@
 #define execute_only_pkey(mm) (0)
 #define arch_override_mprotect_pkey(vma, prot, pkey) (0)
 #define PKEY_DEDICATED_EXECUTE_ONLY 0
-#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+#define ARCH_VM_PKEY_FLAGS 0
 
-/*
- * This is called from mprotect_pkey().
- *
- * Returns true if the protection keys is valid.
- */
-static inline bool validate_pkey(int pkey)
+static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
 {
-	if (pkey < 0)
-		return false;
-	return (pkey < arch_max_pkey());
+	return (pkey == 0);
 }
 
+static inline int mm_pkey_alloc(struct mm_struct *mm)
+{
+	return -1;
+}
+
+static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
+{
+	return -EINVAL;
+}
+
+static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+			unsigned long init_val)
+{
+	return 0;
+}
+
+static inline void copy_init_pkru_to_fpregs(void)
+{
+}
+
+#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+
 #endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h
index 6709b1c..ce5d90e 100644
--- a/include/linux/platform_data/media/camera-pxa.h
+++ b/include/linux/platform_data/media/camera-pxa.h
@@ -37,6 +37,8 @@
 struct pxacamera_platform_data {
 	unsigned long flags;
 	unsigned long mclk_10khz;
+	int sensor_i2c_adapter_id;
+	int sensor_i2c_address;
 };
 
 extern void pxa_set_camera_info(struct pxacamera_platform_data *);
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index d5d3d74..5a9a739 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -11,27 +11,7 @@
 #include <linux/bug.h>
 #include <linux/slab.h>
 #include <linux/rcupdate.h>
-
-#define ACL_UNDEFINED_ID	(-1)
-
-/* a_type field in acl_user_posix_entry_t */
-#define ACL_TYPE_ACCESS		(0x8000)
-#define ACL_TYPE_DEFAULT	(0x4000)
-
-/* e_tag entry in struct posix_acl_entry */
-#define ACL_USER_OBJ		(0x01)
-#define ACL_USER		(0x02)
-#define ACL_GROUP_OBJ		(0x04)
-#define ACL_GROUP		(0x08)
-#define ACL_MASK		(0x10)
-#define ACL_OTHER		(0x20)
-
-/* permissions in the e_perm field */
-#define ACL_READ		(0x04)
-#define ACL_WRITE		(0x02)
-#define ACL_EXECUTE		(0x01)
-//#define ACL_ADD		(0x08)
-//#define ACL_DELETE		(0x10)
+#include <uapi/linux/posix_acl.h>
 
 struct posix_acl_entry {
 	short			e_tag;
@@ -93,6 +73,7 @@
 extern int posix_acl_chmod(struct inode *, umode_t);
 extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
 		struct posix_acl **);
+extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
 
 extern int simple_set_acl(struct inode *, struct posix_acl *, int);
 extern int simple_acl_create(struct inode *, struct inode *);
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index e5e8ec4..8b867e3 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -10,42 +10,25 @@
 #define _POSIX_ACL_XATTR_H
 
 #include <uapi/linux/xattr.h>
+#include <uapi/linux/posix_acl_xattr.h>
 #include <linux/posix_acl.h>
 
-/* Supported ACL a_version fields */
-#define POSIX_ACL_XATTR_VERSION	0x0002
-
-/* An undefined entry e_id value */
-#define ACL_UNDEFINED_ID	(-1)
-
-typedef struct {
-	__le16			e_tag;
-	__le16			e_perm;
-	__le32			e_id;
-} posix_acl_xattr_entry;
-
-typedef struct {
-	__le32			a_version;
-	posix_acl_xattr_entry	a_entries[0];
-} posix_acl_xattr_header;
-
-
 static inline size_t
 posix_acl_xattr_size(int count)
 {
-	return (sizeof(posix_acl_xattr_header) +
-		(count * sizeof(posix_acl_xattr_entry)));
+	return (sizeof(struct posix_acl_xattr_header) +
+		(count * sizeof(struct posix_acl_xattr_entry)));
 }
 
 static inline int
 posix_acl_xattr_count(size_t size)
 {
-	if (size < sizeof(posix_acl_xattr_header))
+	if (size < sizeof(struct posix_acl_xattr_header))
 		return -1;
-	size -= sizeof(posix_acl_xattr_header);
-	if (size % sizeof(posix_acl_xattr_entry))
+	size -= sizeof(struct posix_acl_xattr_header);
+	if (size % sizeof(struct posix_acl_xattr_entry))
 		return -1;
-	return size / sizeof(posix_acl_xattr_entry);
+	return size / sizeof(struct posix_acl_xattr_entry);
 }
 
 #ifdef CONFIG_FS_POSIX_ACL
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 696a56b..eac1af8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -16,6 +16,7 @@
 		switch (buffer[1]) {
 		case '0' ... '7':
 		case 'd':	/* KERN_DEFAULT */
+		case 'c':	/* KERN_CONT */
 			return buffer[1];
 		}
 	}
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f1bbae0..2c6c511 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -641,6 +641,7 @@
 #ifdef CONFIG_PWM_SYSFS
 void pwmchip_sysfs_export(struct pwm_chip *chip);
 void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
 #else
 static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
 {
@@ -649,6 +650,10 @@
 static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
 {
 }
+
+static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+}
 #endif /* CONFIG_PWM_SYSFS */
 
 #endif /* __LINUX_PWM_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 52b97db..af3581b 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -461,6 +461,14 @@
  *
  * This function updates @iter->index in the case of a successful lookup.
  * For tagged lookup it also eats @iter->tags.
+ *
+ * There are several cases where 'slot' can be passed in as NULL to this
+ * function.  These cases result from the use of radix_tree_iter_next() or
+ * radix_tree_iter_retry().  In these cases we don't end up dereferencing
+ * 'slot' because either:
+ * a) we are doing tagged iteration and iter->tags has been set to 0, or
+ * b) we are doing non-tagged iteration, and iter->index and iter->next_index
+ *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
  */
 static __always_inline void **
 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
diff --git a/include/linux/random.h b/include/linux/random.h
index 3d6e981..7bd2403 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -18,9 +18,20 @@
 };
 
 extern void add_device_randomness(const void *, unsigned int);
+
+#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
+static inline void add_latent_entropy(void)
+{
+	add_device_randomness((const void *)&latent_entropy,
+			      sizeof(latent_entropy));
+}
+#else
+static inline void add_latent_entropy(void) {}
+#endif
+
 extern void add_input_randomness(unsigned int type, unsigned int code,
-				 unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+				 unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
 
 extern void get_random_bytes(void *buf, int nbytes);
 extern int add_random_ready_callback(struct random_ready_callback *rdy);
@@ -34,7 +45,7 @@
 
 unsigned int get_random_int(void);
 unsigned long get_random_long(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+unsigned long randomize_page(unsigned long start, unsigned long range);
 
 u32 prandom_u32(void);
 void prandom_bytes(void *buf, size_t nbytes);
diff --git a/include/linux/relay.h b/include/linux/relay.h
index ecbb34a..68c1448 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -15,6 +15,7 @@
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/list.h>
+#include <linux/irq_work.h>
 #include <linux/bug.h>
 #include <linux/fs.h>
 #include <linux/poll.h>
@@ -38,7 +39,7 @@
 	size_t subbufs_consumed;	/* count of sub-buffers consumed */
 	struct rchan *chan;		/* associated channel */
 	wait_queue_head_t read_wait;	/* reader wait queue */
-	struct timer_list timer; 	/* reader wake-up timer */
+	struct irq_work wakeup_work;	/* reader wakeup */
 	struct dentry *dentry;		/* channel file dentry */
 	struct kref kref;		/* channel buffer refcount */
 	struct page **page_array;	/* array of current buffer pages */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 976ce3a..d0efd6e 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -21,6 +21,7 @@
 	struct list_head	list_id;	/* undo requests on this array */
 	int			sem_nsems;	/* no. of semaphores in array */
 	int			complex_count;	/* pending complex operations */
+	bool			complex_mode;	/* no parallel simple ops */
 };
 
 #ifdef CONFIG_SYSVIPC
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4ccf184..b1bc62b 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -131,6 +131,7 @@
 	struct rpc_auth *	(*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
 	void			(*destroy)(struct rpc_auth *);
 
+	int			(*hash_cred)(struct auth_cred *, unsigned int);
 	struct rpc_cred *	(*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
 	struct rpc_cred *	(*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
 	int			(*list_pseudoflavors)(rpc_authflavor_t *, int);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 5c02b06..85cc819 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -125,6 +125,13 @@
 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
 };
 
+struct rpc_add_xprt_test {
+	int (*add_xprt_test)(struct rpc_clnt *,
+		struct rpc_xprt *,
+		void *calldata);
+	void *data;
+};
+
 /* Values for "flags" field */
 #define RPC_CLNT_CREATE_HARDRTRY	(1UL << 0)
 #define RPC_CLNT_CREATE_AUTOBIND	(1UL << 2)
@@ -198,6 +205,16 @@
 void		rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
 			unsigned long timeo);
 
+int		rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
+			struct rpc_xprt_switch *,
+			struct rpc_xprt *,
+			void *);
+
 const char *rpc_proc_name(const struct rpc_task *task);
+
+void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
+void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
+			const struct sockaddr *sap);
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 3b1ff38..cfda6adc 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -41,10 +41,15 @@
 #define _LINUX_SUNRPC_RPC_RDMA_H
 
 #include <linux/types.h>
+#include <linux/bitops.h>
 
 #define RPCRDMA_VERSION		1
 #define rpcrdma_version		cpu_to_be32(RPCRDMA_VERSION)
 
+enum {
+	RPCRDMA_V1_DEF_INLINE_SIZE	= 1024,
+};
+
 struct rpcrdma_segment {
 	__be32 rs_handle;	/* Registered memory handle */
 	__be32 rs_length;	/* Length of the chunk in bytes */
@@ -129,4 +134,38 @@
 #define rdma_done	cpu_to_be32(RDMA_DONE)
 #define rdma_error	cpu_to_be32(RDMA_ERROR)
 
+/*
+ * Private extension to RPC-over-RDMA Version One.
+ * Message passed during RDMA-CM connection set-up.
+ *
+ * Add new fields at the end, and don't permute existing
+ * fields.
+ */
+struct rpcrdma_connect_private {
+	__be32			cp_magic;
+	u8			cp_version;
+	u8			cp_flags;
+	u8			cp_send_size;
+	u8			cp_recv_size;
+} __packed;
+
+#define rpcrdma_cmp_magic	__cpu_to_be32(0xf6ab0e18)
+
+enum {
+	RPCRDMA_CMP_VERSION		= 1,
+	RPCRDMA_CMP_F_SND_W_INV_OK	= BIT(0),
+};
+
+static inline u8
+rpcrdma_encode_buffer_size(unsigned int size)
+{
+	return (size >> 10) - 1;
+}
+
+static inline unsigned int
+rpcrdma_decode_buffer_size(u8 val)
+{
+	return ((unsigned int)val + 1) << 10;
+}
+
 #endif				/* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 817af0b..7ba040c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -239,8 +239,8 @@
 					void *);
 void		rpc_wake_up_status(struct rpc_wait_queue *, int);
 void		rpc_delay(struct rpc_task *, unsigned long);
-void *		rpc_malloc(struct rpc_task *, size_t);
-void		rpc_free(void *);
+int		rpc_malloc(struct rpc_task *);
+void		rpc_free(struct rpc_task *);
 int		rpciod_up(void);
 void		rpciod_down(void);
 int		__rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d6917b8..cc3ae16 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -86,6 +86,7 @@
 	unsigned long flags;
 	enum dma_data_direction direction;
 	int count;
+	unsigned int mapped_sges;
 	struct ib_sge sge[RPCSVC_MAXPAGES];
 	struct page *pages[RPCSVC_MAXPAGES];
 };
@@ -136,6 +137,7 @@
 	int		     sc_ord;		/* RDMA read limit */
 	int                  sc_max_sge;
 	int                  sc_max_sge_rd;	/* max sge for read target */
+	bool		     sc_snd_w_inv;	/* OK to use Send With Invalidate */
 
 	atomic_t             sc_sq_count;	/* Number of SQ WR on queue */
 	unsigned int	     sc_sq_depth;	/* Depth of SQ */
@@ -193,6 +195,14 @@
 
 #define RPCSVC_MAXPAYLOAD_RDMA	RPCSVC_MAXPAYLOAD
 
+/* Track DMA maps for this transport and context */
+static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
+					   struct svc_rdma_op_ctxt *ctxt)
+{
+	ctxt->mapped_sges++;
+	atomic_inc(&rdma->sc_dma_used);
+}
+
 /* svc_rdma_backchannel.c */
 extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
 				    struct rpcrdma_msg *rmsgp,
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 70c6b92..56c48c8 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -67,6 +67,18 @@
 			len;		/* Length of XDR encoded message */
 };
 
+static inline void
+xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
+{
+	buf->head[0].iov_base = start;
+	buf->head[0].iov_len = len;
+	buf->tail[0].iov_len = 0;
+	buf->page_len = 0;
+	buf->flags = 0;
+	buf->len = 0;
+	buf->buflen = len;
+}
+
 /*
  * pre-xdr'ed macros.
  */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a16070d..a5da60b 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -83,9 +83,11 @@
 	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
 	struct list_head	rq_list;
 
-	__u32 *			rq_buffer;	/* XDR encode buffer */
-	size_t			rq_callsize,
-				rq_rcvsize;
+	void			*rq_xprtdata;	/* Per-xprt private data */
+	void			*rq_buffer;	/* Call XDR encode buffer */
+	size_t			rq_callsize;
+	void			*rq_rbuffer;	/* Reply XDR decode buffer */
+	size_t			rq_rcvsize;
 	size_t			rq_xmit_bytes_sent;	/* total bytes sent */
 	size_t			rq_reply_bytes_recvd;	/* total reply bytes */
 							/* received */
@@ -127,8 +129,8 @@
 	void		(*rpcbind)(struct rpc_task *task);
 	void		(*set_port)(struct rpc_xprt *xprt, unsigned short port);
 	void		(*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
-	void *		(*buf_alloc)(struct rpc_task *task, size_t size);
-	void		(*buf_free)(void *buffer);
+	int		(*buf_alloc)(struct rpc_task *task);
+	void		(*buf_free)(struct rpc_task *task);
 	int		(*send_request)(struct rpc_task *task);
 	void		(*set_retrans_timeout)(struct rpc_task *task);
 	void		(*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index 5a9acff..507418c 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -66,4 +66,6 @@
 extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
 extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
 
+extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+		const struct sockaddr *sap);
 #endif
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 39267dc..221b7a2 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -53,8 +53,8 @@
 #define RPCRDMA_MAX_SLOT_TABLE	(256U)
 
 #define RPCRDMA_MIN_INLINE  (1024)	/* min inline thresh */
-#define RPCRDMA_DEF_INLINE  (1024)	/* default inline thresh */
-#define RPCRDMA_MAX_INLINE  (3068)	/* max inline thresh */
+#define RPCRDMA_DEF_INLINE  (4096)	/* default inline thresh */
+#define RPCRDMA_MAX_INLINE  (65536)	/* max inline thresh */
 
 /* Memory registration strategies, by number.
  * This is part of a kernel / user space API. Do not remove. */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index c6ffe8b..aa17ccf 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -19,12 +19,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/fence.h>
-
-struct sync_file_cb {
-	struct fence_cb cb;
-	struct fence *fence;
-	struct sync_file *sync_file;
-};
+#include <linux/fence-array.h>
 
 /**
  * struct sync_file - sync file to export to the userspace
@@ -32,10 +27,9 @@
  * @kref:		reference count on fence.
  * @name:		name of sync_file.  Useful for debugging
  * @sync_file_list:	membership in global file list
- * @num_fences:		number of sync_pts in the fence
  * @wq:			wait queue for fence signaling
- * @status:		0: signaled, >0:active, <0: error
- * @cbs:		sync_pts callback information
+ * @fence:		fence with the fences in the sync_file
+ * @cb:			fence callback information
  */
 struct sync_file {
 	struct file		*file;
@@ -44,14 +38,16 @@
 #ifdef CONFIG_DEBUG_FS
 	struct list_head	sync_file_list;
 #endif
-	int num_fences;
 
 	wait_queue_head_t	wq;
-	atomic_t		status;
 
-	struct sync_file_cb	cbs[];
+	struct fence		*fence;
+	struct fence_cb cb;
 };
 
+#define POLL_ENABLED FENCE_FLAG_USER_BITS
+
 struct sync_file *sync_file_create(struct fence *fence);
+struct fence *sync_file_get_fence(int fd);
 
 #endif /* _LINUX_SYNC_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d022390..91a740f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -898,4 +898,9 @@
 
 asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
 
+asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
+				  unsigned long prot, int pkey);
+asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
+asmlinkage long sys_pkey_free(int pkey);
+
 #endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index ee517be..511182a 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,12 +92,24 @@
 	THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
 };
 
+/* Thermal notification reason */
+enum thermal_notify_event {
+	THERMAL_EVENT_UNSPECIFIED, /* Unspecified event */
+	THERMAL_EVENT_TEMP_SAMPLE, /* New Temperature sample */
+	THERMAL_TRIP_VIOLATED, /* TRIP Point violation */
+	THERMAL_TRIP_CHANGED, /* TRIP Point temperature changed */
+	THERMAL_DEVICE_DOWN, /* Thermal device is down */
+	THERMAL_DEVICE_UP, /* Thermal device is up after a down event */
+	THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
+};
+
 struct thermal_zone_device_ops {
 	int (*bind) (struct thermal_zone_device *,
 		     struct thermal_cooling_device *);
 	int (*unbind) (struct thermal_zone_device *,
 		       struct thermal_cooling_device *);
 	int (*get_temp) (struct thermal_zone_device *, int *);
+	int (*set_trips) (struct thermal_zone_device *, int, int);
 	int (*get_mode) (struct thermal_zone_device *,
 			 enum thermal_device_mode *);
 	int (*set_mode) (struct thermal_zone_device *,
@@ -168,6 +180,10 @@
  * @last_temperature:	previous temperature read
  * @emul_temperature:	emulated temperature when using CONFIG_THERMAL_EMULATION
  * @passive:		1 if you've crossed a passive trip point, 0 otherwise.
+ * @prev_low_trip:	the low current temperature if you've crossed a passive
+			trip point.
+ * @prev_high_trip:	the above current temperature if you've crossed a
+			passive trip point.
  * @forced_passive:	If > 0, temperature at which to switch on all ACPI
  *			processor cooling devices.  Currently only used by the
  *			step-wise governor.
@@ -182,6 +198,7 @@
  * @lock:	lock to protect thermal_instances list
  * @node:	node in thermal_tz_list (in thermal_core.c)
  * @poll_queue:	delayed work for polling
+ * @notify_event: Last notification event
  */
 struct thermal_zone_device {
 	int id;
@@ -199,6 +216,8 @@
 	int last_temperature;
 	int emul_temperature;
 	int passive;
+	int prev_low_trip;
+	int prev_high_trip;
 	unsigned int forced_passive;
 	atomic_t need_update;
 	struct thermal_zone_device_ops *ops;
@@ -210,6 +229,7 @@
 	struct mutex lock;
 	struct list_head node;
 	struct delayed_work poll_queue;
+	enum thermal_notify_event notify_event;
 };
 
 /**
@@ -333,6 +353,9 @@
  *
  * Optional:
  * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_trips: a pointer to a function that sets a temperature window. When
+ *	       this window is left the driver must inform the thermal core via
+ *	       thermal_zone_device_update.
  * @set_emul_temp: a pointer to a function that sets sensor emulated
  *		   temperature.
  * @set_trip_temp: a pointer to a function that sets the trip temperature on
@@ -340,7 +363,8 @@
  */
 struct thermal_zone_of_device_ops {
 	int (*get_temp)(void *, int *);
-	int (*get_trend)(void *, long *);
+	int (*get_trend)(void *, int, enum thermal_trend *);
+	int (*set_trips)(void *, int, int);
 	int (*set_emul_temp)(void *, int);
 	int (*set_trip_temp)(void *, int, int);
 };
@@ -425,7 +449,9 @@
 				     unsigned int);
 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
 				       struct thermal_cooling_device *);
-void thermal_zone_device_update(struct thermal_zone_device *);
+void thermal_zone_device_update(struct thermal_zone_device *,
+				enum thermal_notify_event);
+void thermal_zone_set_trips(struct thermal_zone_device *);
 
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
 		const struct thermal_cooling_device_ops *);
@@ -435,6 +461,8 @@
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
 int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+int thermal_zone_get_slope(struct thermal_zone_device *tz);
+int thermal_zone_get_offset(struct thermal_zone_device *tz);
 
 int get_tz_trend(struct thermal_zone_device *, int);
 struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -473,7 +501,10 @@
 	struct thermal_zone_device *tz, int trip,
 	struct thermal_cooling_device *cdev)
 { return -ENODEV; }
-static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
+					      enum thermal_notify_event event)
+{ }
+static inline void thermal_zone_set_trips(struct thermal_zone_device *tz)
 { }
 static inline struct thermal_cooling_device *
 thermal_cooling_device_register(char *type, void *devdata,
@@ -492,6 +523,12 @@
 static inline int thermal_zone_get_temp(
 		struct thermal_zone_device *tz, int *temp)
 { return -ENODEV; }
+static inline int thermal_zone_get_slope(
+		struct thermal_zone_device *tz)
+{ return -ENODEV; }
+static inline int thermal_zone_get_offset(
+		struct thermal_zone_device *tz)
+{ return -ENODEV; }
 static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
 { return -ENODEV; }
 static inline struct thermal_instance *
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b5ebe6d..6e22b54 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -82,7 +82,6 @@
 		struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i);
@@ -110,12 +109,12 @@
 
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 
-static inline size_t iov_iter_count(struct iov_iter *i)
+static inline size_t iov_iter_count(const struct iov_iter *i)
 {
 	return i->count;
 }
 
-static inline bool iter_is_iovec(struct iov_iter *i)
+static inline bool iter_is_iovec(const struct iov_iter *i)
 {
 	return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
 }
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 8c3b412..ee162e3 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -73,34 +73,6 @@
 					   unsigned int decodes) { };
 #endif
 
-/**
- *     vga_get         - acquire & locks VGA resources
- *
- *     @pdev: pci device of the VGA card or NULL for the system default
- *     @rsrc: bit mask of resources to acquire and lock
- *     @interruptible: blocking should be interruptible by signals ?
- *
- *     This function acquires VGA resources for the given
- *     card and mark those resources locked. If the resource requested
- *     are "normal" (and not legacy) resources, the arbiter will first check
- *     whether the card is doing legacy decoding for that type of resource. If
- *     yes, the lock is "converted" into a legacy resource lock.
- *     The arbiter will first look for all VGA cards that might conflict
- *     and disable their IOs and/or Memory access, including VGA forwarding
- *     on P2P bridges if necessary, so that the requested resources can
- *     be used. Then, the card is marked as locking these resources and
- *     the IO and/or Memory accesse are enabled on the card (including
- *     VGA forwarding on parent P2P bridges if any).
- *     This function will block if some conflicting card is already locking
- *     one of the required resources (or any resource on a different bus
- *     segment, since P2P bridges don't differenciate VGA memory and IO
- *     afaik). You can indicate whether this blocking should be interruptible
- *     by a signal (for userland interface) or not.
- *     Must not be called at interrupt time or in atomic context.
- *     If the card already owns the resources, the function succeeds.
- *     Nested calls are supported (a per-resource counter is maintained)
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
 #else
@@ -108,11 +80,14 @@
 #endif
 
 /**
- *     vga_get_interruptible
+ * vga_get_interruptible
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to true.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_interruptible(struct pci_dev *pdev,
 					unsigned int rsrc)
 {
@@ -120,47 +95,26 @@
 }
 
 /**
- *     vga_get_uninterruptible
+ * vga_get_uninterruptible - shortcut to vga_get()
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
  *
- *     Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to false.
+ *
+ * On success, release the VGA resource again with vga_put().
  */
-
 static inline int vga_get_uninterruptible(struct pci_dev *pdev,
 					  unsigned int rsrc)
 {
        return vga_get(pdev, rsrc, 0);
 }
 
-/**
- *     vga_tryget      - try to acquire & lock legacy VGA resources
- *
- *     @pdev: pci devivce of VGA card or NULL for system default
- *     @rsrc: bit mask of resources to acquire and lock
- *
- *     This function performs the same operation as vga_get(), but
- *     will return an error (-EBUSY) instead of blocking if the resources
- *     are already locked by another card. It can be called in any context
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
 #else
 static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
 #endif
 
-/**
- *     vga_put         - release lock on legacy VGA resources
- *
- *     @pdev: pci device of VGA card or NULL for system default
- *     @rsrc: but mask of resource to release
- *
- *     This function releases resources previously locked by vga_get()
- *     or vga_tryget(). The resources aren't disabled right away, so
- *     that a subsequence vga_get() on the same card will succeed
- *     immediately. Resources have a counter, so locks are only
- *     released if the counter reaches 0.
- */
-
 #if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
 #else
@@ -168,25 +122,6 @@
 #endif
 
 
-/**
- *     vga_default_device
- *
- *     This can be defined by the platform. The default implementation
- *     is rather dumb and will probably only work properly on single
- *     vga card setups and/or x86 platforms.
- *
- *     If your VGA default device is not PCI, you'll have to return
- *     NULL here. In this case, I assume it will not conflict with
- *     any PCI card. If this is not true, I'll have to define two archs
- *     hooks for enabling/disabling the VGA default device if that is
- *     possible. This may be a problem with real _ISA_ VGA cards, in
- *     addition to a PCI one. I don't know at this point how to deal
- *     with that card. Can theirs IOs be disabled at all ? If not, then
- *     I suppose it's a matter of having the proper arch hook telling
- *     us about it, so we basically never allow anybody to succeed a
- *     vga_get()...
- */
-
 #ifdef CONFIG_VGA_ARB
 extern struct pci_dev *vga_default_device(void);
 extern void vga_set_default_device(struct pci_dev *pdev);
@@ -195,14 +130,11 @@
 static inline void vga_set_default_device(struct pci_dev *pdev) { };
 #endif
 
-/**
- *     vga_conflicts
- *
- *     Architectures should define this if they have several
- *     independent PCI domains that can afford concurrent VGA
- *     decoding
+/*
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
  */
-
 #ifndef __ARCH_HAS_VGA_CONFLICT
 static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
 {
@@ -210,34 +142,6 @@
 }
 #endif
 
-/**
- *	vga_client_register
- *
- *	@pdev: pci device of the VGA client
- *	@cookie: client cookie to be used in callbacks
- *	@irq_set_state: irq state change callback
- *	@set_vga_decode: vga decode change callback
- *
- * 	return value: 0 on success, -1 on failure
- * 	Register a client with the VGA arbitration logic
- *
- *	Clients have two callback mechanisms they can use.
- *	irq enable/disable callback -
- *		If a client can't disable its GPUs VGA resources, then we
- *		need to be able to ask it to turn off its irqs when we
- *		turn off its mem and io decoding.
- *	set_vga_decode
- *		If a client can disable its GPU VGA resource, it will
- *		get a callback from this to set the encode/decode state
- *
- * Rationale: we cannot disable VGA decode resources unconditionally
- * some single GPU laptops seem to require ACPI or BIOS access to the
- * VGA registers to control things like backlights etc.
- * Hopefully newer multi-GPU laptops do something saner, and desktops
- * won't have any special ACPI for this.
- * They driver will get a callback when VGA arbitration is first used
- * by userspace since we some older X servers have issues.
- */
 #if defined(CONFIG_VGA_ARB)
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 7047bc7..35a4d81 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -19,6 +19,7 @@
 struct watchdog_ops;
 struct watchdog_device;
 struct watchdog_core_data;
+struct watchdog_governor;
 
 /** struct watchdog_ops - The watchdog-devices operations
  *
@@ -28,6 +29,7 @@
  * @ping:	The routine that sends a keepalive ping to the watchdog device.
  * @status:	The routine that shows the status of the watchdog device.
  * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds).
+ * @set_pretimeout:The routine for setting the watchdog devices pretimeout.
  * @get_timeleft:The routine that gets the time left before a reset (in seconds).
  * @restart:	The routine for restarting the machine.
  * @ioctl:	The routines that handles extra ioctl calls.
@@ -46,6 +48,7 @@
 	int (*ping)(struct watchdog_device *);
 	unsigned int (*status)(struct watchdog_device *);
 	int (*set_timeout)(struct watchdog_device *, unsigned int);
+	int (*set_pretimeout)(struct watchdog_device *, unsigned int);
 	unsigned int (*get_timeleft)(struct watchdog_device *);
 	int (*restart)(struct watchdog_device *, unsigned long, void *);
 	long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
@@ -59,8 +62,10 @@
  *		watchdog device.
  * @info:	Pointer to a watchdog_info structure.
  * @ops:	Pointer to the list of watchdog operations.
+ * @gov:	Pointer to watchdog pretimeout governor.
  * @bootstatus:	Status of the watchdog device at boot.
  * @timeout:	The watchdog devices timeout value (in seconds).
+ * @pretimeout: The watchdog devices pre_timeout value.
  * @min_timeout:The watchdog devices minimum timeout value (in seconds).
  * @max_timeout:The watchdog devices maximum timeout value (in seconds)
  *		as configurable from user space. Only relevant if
@@ -94,8 +99,10 @@
 	const struct attribute_group **groups;
 	const struct watchdog_info *info;
 	const struct watchdog_ops *ops;
+	const struct watchdog_governor *gov;
 	unsigned int bootstatus;
 	unsigned int timeout;
+	unsigned int pretimeout;
 	unsigned int min_timeout;
 	unsigned int max_timeout;
 	unsigned int min_hw_heartbeat_ms;
@@ -163,6 +170,13 @@
 		 t > wdd->max_timeout);
 }
 
+/* Use the following function to check if a pretimeout value is invalid */
+static inline bool watchdog_pretimeout_invalid(struct watchdog_device *wdd,
+					       unsigned int t)
+{
+	return t && wdd->timeout && t >= wdd->timeout;
+}
+
 /* Use the following functions to manipulate watchdog driver specific data */
 static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
 {
@@ -174,6 +188,16 @@
 	return wdd->driver_data;
 }
 
+/* Use the following functions to report watchdog pretimeout event */
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)
+void watchdog_notify_pretimeout(struct watchdog_device *wdd);
+#else
+static inline void watchdog_notify_pretimeout(struct watchdog_device *wdd)
+{
+	pr_alert("watchdog%d: pretimeout event\n", wdd->id);
+}
+#endif
+
 /* drivers/watchdog/watchdog_core.c */
 void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority);
 extern int watchdog_init_timeout(struct watchdog_device *wdd,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 94079ba..e77605a 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -46,17 +46,16 @@
 };
 
 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
 int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int __vfs_removexattr(struct dentry *, const char *);
 int vfs_removexattr(struct dentry *, const char *);
 
-ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-int generic_setxattr(struct dentry *dentry, struct inode *inode,
-		     const char *name, const void *value, size_t size, int flags);
-int generic_removexattr(struct dentry *dentry, const char *name);
 ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
 			   char **xattr_value, size_t size, gfp_t flags);
 
diff --git a/include/media/drv-intf/sh_mobile_ceu.h b/include/media/drv-intf/sh_mobile_ceu.h
index 7f57056..2f43f7d 100644
--- a/include/media/drv-intf/sh_mobile_ceu.h
+++ b/include/media/drv-intf/sh_mobile_ceu.h
@@ -21,7 +21,6 @@
 	unsigned long flags;
 	int max_width;
 	int max_height;
-	struct sh_mobile_ceu_companion *csi2;
 	struct v4l2_async_subdev **asd;	/* Flat array, arranged in groups */
 	unsigned int *asd_sizes;	/* 0-terminated array pf asd group sizes */
 };
diff --git a/include/media/drv-intf/sh_mobile_csi2.h b/include/media/drv-intf/sh_mobile_csi2.h
deleted file mode 100644
index 14030db..0000000
--- a/include/media/drv-intf/sh_mobile_csi2.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Driver header for the SH-Mobile MIPI CSI-2 unit
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SH_MIPI_CSI
-#define SH_MIPI_CSI
-
-#include <linux/list.h>
-
-enum sh_csi2_phy {
-	SH_CSI2_PHY_MAIN,
-	SH_CSI2_PHY_SUB,
-};
-
-enum sh_csi2_type {
-	SH_CSI2C,
-	SH_CSI2I,
-};
-
-#define SH_CSI2_CRC	(1 << 0)
-#define SH_CSI2_ECC	(1 << 1)
-
-struct platform_device;
-
-struct sh_csi2_client_config {
-	enum sh_csi2_phy phy;
-	unsigned char lanes;		/* bitmask[3:0] */
-	unsigned char channel;		/* 0..3 */
-	struct platform_device *pdev;	/* client platform device */
-	const char *name;		/* async matching: client name */
-};
-
-struct v4l2_device;
-
-struct sh_csi2_pdata {
-	enum sh_csi2_type type;
-	unsigned int flags;
-	struct sh_csi2_client_config *clients;
-	int num_clients;
-};
-
-#endif
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
index 029142d..635007e 100644
--- a/include/media/i2c/smiapp.h
+++ b/include/media/i2c/smiapp.h
@@ -36,8 +36,6 @@
 #define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE	1
 #define SMIAPP_CSI_SIGNALLING_MODE_CSI2			2
 
-#define SMIAPP_NO_XSHUTDOWN	-1
-
 /*
  * Sometimes due to board layout considerations the camera module can be
  * mounted rotated. The typical rotation used is 180 degrees which can be
@@ -57,7 +55,7 @@
 	u8 trigger;
 };
 
-struct smiapp_platform_data {
+struct smiapp_hwconfig {
 	/*
 	 * Change the cci address if i2c_addr_alt is set.
 	 * Both default and alternate cci addr need to be present
@@ -75,9 +73,6 @@
 	enum smiapp_module_board_orient module_board_orient;
 
 	struct smiapp_flash_strobe_parms *strobe_setup;
-
-	int (*set_xclk)(struct v4l2_subdev *sd, int hz);
-	int32_t xshutdown;		/* gpio or SMIAPP_NO_XSHUTDOWN */
 };
 
 #endif /* __SMIAPP_H_  */
diff --git a/include/media/media-device.h b/include/media/media-device.h
index 2819524..ef93e21 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -49,11 +49,21 @@
 };
 
 /**
+ * struct media_device_ops - Media device operations
+ * @link_notify: Link state change notification callback. This callback is
+ *		 called with the graph_mutex held.
+ */
+struct media_device_ops {
+	int (*link_notify)(struct media_link *link, u32 flags,
+			   unsigned int notification);
+};
+
+/**
  * struct media_device - Media device
  * @dev:	Parent device
  * @devnode:	Media device node
  * @driver_name: Optional device driver name. If not set, calls to
- *		%MEDIA_IOC_DEVICE_INFO will return dev->driver->name.
+ *		%MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``.
  *		This is needed for USB drivers for example, as otherwise
  *		they'll all appear as if the driver name was "usb".
  * @model:	Device model name
@@ -80,8 +90,7 @@
  * @enable_source: Enable Source Handler function pointer
  * @disable_source: Disable Source Handler function pointer
  *
- * @link_notify: Link state change notification callback. This callback is
- *		 called with the graph_mutex held.
+ * @ops:	Operation handler callbacks
  *
  * This structure represents an abstract high-level media device. It allows easy
  * access to entities and provides basic media device-level support. The
@@ -102,16 +111,18 @@
  * sink entity  and deactivate the link between them. Drivers
  * should call this handler to release the source.
  *
- * Note: Bridge driver is expected to implement and set the
- * handler when media_device is registered or when
- * bridge driver finds the media_device during probe.
- * Bridge driver sets source_priv with information
- * necessary to run enable/disable source handlers.
- *
  * Use-case: find tuner entity connected to the decoder
  * entity and check if it is available, and activate the
- * the link between them from enable_source and deactivate
- * from disable_source.
+ * the link between them from @enable_source and deactivate
+ * from @disable_source.
+ *
+ * .. note::
+ *
+ *    Bridge driver is expected to implement and set the
+ *    handler when &media_device is registered or when
+ *    bridge driver finds the media_device during probe.
+ *    Bridge driver sets source_priv with information
+ *    necessary to run @enable_source and @disable_source handlers.
  */
 struct media_device {
 	/* dev->driver_data points to this struct. */
@@ -148,8 +159,7 @@
 			     struct media_pipeline *pipe);
 	void (*disable_source)(struct media_entity *entity);
 
-	int (*link_notify)(struct media_link *link, u32 flags,
-			   unsigned int notification);
+	const struct media_device_ops *ops;
 };
 
 /* We don't need to include pci.h or usb.h here */
@@ -168,7 +178,7 @@
  * @ent_enum: Entity enumeration to be initialised
  * @mdev: The related media device
  *
- * Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
  */
 static inline __must_check int media_entity_enum_init(
 	struct media_entity_enum *ent_enum, struct media_device *mdev)
@@ -211,36 +221,38 @@
  *
  * Users, should, instead, call the media_device_register() macro.
  *
- * The caller is responsible for initializing the media_device structure before
- * registration. The following fields must be set:
+ * The caller is responsible for initializing the &media_device structure
+ * before registration. The following fields of &media_device must be set:
  *
- *  - dev must point to the parent device (usually a &pci_dev, &usb_interface or
- *    &platform_device instance).
+ *  - &media_entity.dev must point to the parent device (usually a &pci_dev,
+ *    &usb_interface or &platform_device instance).
  *
- *  - model must be filled with the device model name as a NUL-terminated UTF-8
- *    string. The device/model revision must not be stored in this field.
+ *  - &media_entity.model must be filled with the device model name as a
+ *    NUL-terminated UTF-8 string. The device/model revision must not be
+ *    stored in this field.
  *
  * The following fields are optional:
  *
- *  - serial is a unique serial number stored as a NUL-terminated ASCII string.
- *    The field is big enough to store a GUID in text form. If the hardware
- *    doesn't provide a unique serial number this field must be left empty.
+ *  - &media_entity.serial is a unique serial number stored as a
+ *    NUL-terminated ASCII string. The field is big enough to store a GUID
+ *    in text form. If the hardware doesn't provide a unique serial number
+ *    this field must be left empty.
  *
- *  - bus_info represents the location of the device in the system as a
- *    NUL-terminated ASCII string. For PCI/PCIe devices bus_info must be set to
- *    "PCI:" (or "PCIe:") followed by the value of pci_name(). For USB devices,
- *    the usb_make_path() function must be used. This field is used by
- *    applications to distinguish between otherwise identical devices that don't
- *    provide a serial number.
+ *  - &media_entity.bus_info represents the location of the device in the
+ *    system as a NUL-terminated ASCII string. For PCI/PCIe devices
+ *    &media_entity.bus_info must be set to "PCI:" (or "PCIe:") followed by
+ *    the value of pci_name(). For USB devices,the usb_make_path() function
+ *    must be used. This field is used by applications to distinguish between
+ *    otherwise identical devices that don't provide a serial number.
  *
- *  - hw_revision is the hardware device revision in a driver-specific format.
- *    When possible the revision should be formatted with the KERNEL_VERSION
- *    macro.
+ *  - &media_entity.hw_revision is the hardware device revision in a
+ *    driver-specific format. When possible the revision should be formatted
+ *    with the KERNEL_VERSION() macro.
  *
- *  - driver_version is formatted with the KERNEL_VERSION macro. The version
- *    minor must be incremented when new features are added to the userspace API
- *    without breaking binary compatibility. The version major must be
- *    incremented when binary compatibility is broken.
+ *  - &media_entity.driver_version is formatted with the KERNEL_VERSION()
+ *    macro. The version minor must be incremented when new features are added
+ *    to the userspace API without breaking binary compatibility. The version
+ *    major must be incremented when binary compatibility is broken.
  *
  * .. note::
  *
@@ -252,6 +264,16 @@
  */
 int __must_check __media_device_register(struct media_device *mdev,
 					 struct module *owner);
+
+
+/**
+ * media_device_register() - Registers a media device element
+ *
+ * @mdev:	pointer to struct &media_device
+ *
+ * This macro calls __media_device_register() passing %THIS_MODULE as
+ * the __media_device_register() second argument (**owner**).
+ */
 #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
 
 /**
@@ -259,7 +281,6 @@
  *
  * @mdev:	pointer to struct &media_device
  *
- *
  * It is safe to call this function on an unregistered (but initialised)
  * media device.
  */
@@ -285,14 +306,15 @@
  * framework.
  *
  * If the device has pads, media_entity_pads_init() should be called before
- * this function. Otherwise, the &media_entity.@pad and &media_entity.@num_pads
+ * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads
  * should be zeroed before calling this function.
  *
  * Entities have flags that describe the entity capabilities and state:
  *
- * %MEDIA_ENT_FL_DEFAULT indicates the default entity for a given type.
- *	This can be used to report the default audio and video devices or the
- *	default camera sensor.
+ * %MEDIA_ENT_FL_DEFAULT
+ *    indicates the default entity for a given type.
+ *    This can be used to report the default audio and video devices or the
+ *    default camera sensor.
  *
  * .. note::
  *
@@ -331,8 +353,10 @@
  * @mdev:      The media device
  * @nptr:      The media_entity_notify
  *
- * Note: When a new entity is registered, all the registered
- * media_entity_notify callbacks are invoked.
+ * .. note::
+ *
+ *    When a new entity is registered, all the registered
+ *    media_entity_notify callbacks are invoked.
  */
 
 int __must_check media_device_register_entity_notify(struct media_device *mdev,
@@ -410,11 +434,13 @@
  * @board_name:	media device name. If %NULL, the routine will use the usb
  *		product name, if available.
  * @driver_name: name of the driver. if %NULL, the routine will use the name
- *		given by udev->dev->driver->name, with is usually the wrong
+ *		given by ``udev->dev->driver->name``, with is usually the wrong
  *		thing to do.
  *
- * NOTE: It is better to call media_device_usb_init() instead, as
- * such macro fills driver_name with %KBUILD_MODNAME.
+ * .. note::
+ *
+ *    It is better to call media_device_usb_init() instead, as
+ *    such macro fills driver_name with %KBUILD_MODNAME.
  */
 void __media_device_usb_init(struct media_device *mdev,
 			     struct usb_device *udev,
@@ -472,6 +498,19 @@
 
 #endif /* CONFIG_MEDIA_CONTROLLER */
 
+/**
+ * media_device_usb_init() - create and initialize a
+ *	struct &media_device from a PCI device.
+ *
+ * @mdev:	pointer to struct &media_device
+ * @udev:	pointer to struct usb_device
+ * @name:	media device name. If %NULL, the routine will use the usb
+ *		product name, if available.
+ *
+ * This macro calls media_device_usb_init() passing the
+ * media_device_usb_init() **driver_name** parameter filled with
+ * %KBUILD_MODNAME.
+ */
 #define media_device_usb_init(mdev, udev, name) \
 	__media_device_usb_init(mdev, udev, name, KBUILD_MODNAME)
 
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 37d4948..cd23e91 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -75,8 +75,9 @@
  * @cdev:	struct cdev pointer character device
  * @parent:	parent device
  * @minor:	device node minor number
- * @flags:	flags, combination of the MEDIA_FLAG_* constants
- * @release:	release callback called at the end of media_devnode_release()
+ * @flags:	flags, combination of the ``MEDIA_FLAG_*`` constants
+ * @release:	release callback called at the end of ``media_devnode_release()``
+ *		routine at media-device.c.
  *
  * This structure represents a media-related device node.
  *
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 09b03c1..b2203ee 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -56,7 +56,7 @@
 /**
  * struct media_gobj - Define a graph object.
  *
- * @mdev:	Pointer to the struct media_device that owns the object
+ * @mdev:	Pointer to the struct &media_device that owns the object
  * @id:		Non-zero object ID identifier. The ID should be unique
  *		inside a media_device, as it is composed by
  *		%MEDIA_BITS_PER_TYPE to store the type plus
@@ -129,7 +129,7 @@
  *		an interface.
  * @gobj1:	Part of a union. Used to get the pointer for the second
  *		graph_object of the link.
- * @source:	Part of a union. Used only if the second object (gobj1) is
+ * @sink:	Part of a union. Used only if the second object (gobj1) is
  *		a pad. In that case, it represents the sink pad.
  * @entity:	Part of a union. Used only if the second object (gobj1) is
  *		an entity.
@@ -162,7 +162,9 @@
  * @graph_obj:	Embedded structure containing the media object common data
  * @entity:	Entity this pad belongs to
  * @index:	Pad index in the entity pads array, numbered from 0 to n
- * @flags:	Pad flags, as defined in uapi/media.h (MEDIA_PAD_FL_*)
+ * @flags:	Pad flags, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		(seek for ``MEDIA_PAD_FL_*``)
  */
 struct media_pad {
 	struct media_gobj graph_obj;	/* must be first field in struct */
@@ -182,7 +184,7 @@
  *
  * .. note::
  *
- *    Those these callbacks are called with struct media_device.@graph_mutex
+ *    Those these callbacks are called with struct &media_device.graph_mutex
  *    mutex held.
  */
 struct media_entity_operations {
@@ -210,7 +212,7 @@
  * This allows runtime type identification of media entities and safe casting to
  * the correct object type. For instance, a media entity structure instance
  * embedded in a v4l2_subdev structure instance will have the type
- * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev
  * structure using the container_of() macro.
  */
 enum media_entity_type {
@@ -225,9 +227,12 @@
  * @graph_obj:	Embedded structure containing the media object common data.
  * @name:	Entity name.
  * @obj_type:	Type of the object that implements the media_entity.
- * @function:	Entity main function, as defined in uapi/media.h
- *		(MEDIA_ENT_F_*)
- * @flags:	Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*)
+ * @function:	Entity main function, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		(seek for ``MEDIA_ENT_F_*``)
+ * @flags:	Entity flags, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		(seek for ``MEDIA_ENT_FL_*``)
  * @num_pads:	Number of sink and source pads.
  * @num_links:	Total number of links, forward and back, enabled and disabled.
  * @num_backlinks: Number of backlinks
@@ -246,9 +251,12 @@
  * @minor:	Devnode minor number (zero if not applicable). Kept just
  *		for backward compatibility.
  *
- * NOTE: @stream_count and @use_count reference counts must never be
- * negative, but are signed integers on purpose: a simple WARN_ON(<0) check
- * can be used to detect reference count bugs that would make them negative.
+ * .. note::
+ *
+ *    @stream_count and @use_count reference counts must never be
+ *    negative, but are signed integers on purpose: a simple ``WARN_ON(<0)``
+ *    check can be used to detect reference count bugs that would make them
+ *    negative.
  */
 struct media_entity {
 	struct media_gobj graph_obj;	/* must be first field in struct */
@@ -267,10 +275,6 @@
 
 	const struct media_entity_operations *ops;
 
-	/* Reference counts must never be negative, but are signed integers on
-	 * purpose: a simple WARN_ON(<0) check can be used to detect reference
-	 * count bugs that would make them negative.
-	 */
 	int stream_count;
 	int use_count;
 
@@ -289,10 +293,16 @@
  *
  * @graph_obj:		embedded graph object
  * @links:		List of links pointing to graph entities
- * @type:		Type of the interface as defined in the
- *			uapi/media/media.h header, e. g.
- *			MEDIA_INTF_T_*
- * @flags:		Interface flags as defined in uapi/media/media.h
+ * @type:		Type of the interface as defined in
+ *			:ref:`include/uapi/linux/media.h <media_header>`
+ *			(seek for ``MEDIA_INTF_T_*``)
+ * @flags:		Interface flags as defined in
+ *			:ref:`include/uapi/linux/media.h <media_header>`
+ *			(seek for ``MEDIA_INTF_FL_*``)
+ *
+ * .. note::
+ *
+ *    Currently, no flags for &media_interface is defined.
  */
 struct media_interface {
 	struct media_gobj		graph_obj;
@@ -319,7 +329,7 @@
 /**
  * media_entity_id() - return the media entity graph object id
  *
- * @entity:	pointer to entity
+ * @entity:	pointer to &media_entity
  */
 static inline u32 media_entity_id(struct media_entity *entity)
 {
@@ -329,7 +339,7 @@
 /**
  * media_type() - return the media object type
  *
- * @gobj:	pointer to the media graph object
+ * @gobj:	Pointer to the struct &media_gobj graph object
  */
 static inline enum media_gobj_type media_type(struct media_gobj *gobj)
 {
@@ -339,7 +349,7 @@
 /**
  * media_id() - return the media object ID
  *
- * @gobj:	pointer to the media graph object
+ * @gobj:	Pointer to the struct &media_gobj graph object
  */
 static inline u32 media_id(struct media_gobj *gobj)
 {
@@ -350,7 +360,7 @@
  * media_gobj_gen_id() - encapsulates type and ID on at the object ID
  *
  * @type:	object type as define at enum &media_gobj_type.
- * @local_id:	next ID, from struct &media_device.@id.
+ * @local_id:	next ID, from struct &media_device.id.
  */
 static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id)
 {
@@ -366,9 +376,9 @@
  * is_media_entity_v4l2_video_device() - Check if the entity is a video_device
  * @entity:	pointer to entity
  *
- * Return: true if the entity is an instance of a video_device object and can
+ * Return: %true if the entity is an instance of a video_device object and can
  * safely be cast to a struct video_device using the container_of() macro, or
- * false otherwise.
+ * %false otherwise.
  */
 static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity)
 {
@@ -379,9 +389,9 @@
  * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev
  * @entity:	pointer to entity
  *
- * Return: true if the entity is an instance of a v4l2_subdev object and can
- * safely be cast to a struct v4l2_subdev using the container_of() macro, or
- * false otherwise.
+ * Return: %true if the entity is an instance of a &v4l2_subdev object and can
+ * safely be cast to a struct &v4l2_subdev using the container_of() macro, or
+ * %false otherwise.
  */
 static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
 {
@@ -452,7 +462,7 @@
  * @ent_enum: Entity enumeration
  * @entity: Entity to be tested
  *
- * Returns true if the entity was marked.
+ * Returns %true if the entity was marked.
  */
 static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum,
 					  struct media_entity *entity)
@@ -464,12 +474,13 @@
 }
 
 /**
- * media_entity_enum_test - Test whether the entity is marked, and mark it
+ * media_entity_enum_test_and_set - Test whether the entity is marked,
+ *	and mark it
  *
  * @ent_enum: Entity enumeration
  * @entity: Entity to be tested
  *
- * Returns true if the entity was marked, and mark it before doing so.
+ * Returns %true if the entity was marked, and mark it before doing so.
  */
 static inline bool
 media_entity_enum_test_and_set(struct media_entity_enum *ent_enum,
@@ -486,7 +497,7 @@
  *
  * @ent_enum: Entity enumeration
  *
- * Returns true if the entity was marked.
+ * Return: %true if the entity was empty.
  */
 static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum)
 {
@@ -499,7 +510,8 @@
  * @ent_enum1: First entity enumeration
  * @ent_enum2: Second entity enumeration
  *
- * Returns true if entity enumerations e and f intersect, otherwise false.
+ * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect,
+ * otherwise %false.
  */
 static inline bool media_entity_enum_intersects(
 	struct media_entity_enum *ent_enum1,
@@ -511,39 +523,63 @@
 				 min(ent_enum1->idx_max, ent_enum2->idx_max));
 }
 
+/**
+ * gobj_to_entity - returns the struct &media_entity pointer from the
+ *	@gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
 #define gobj_to_entity(gobj) \
 		container_of(gobj, struct media_entity, graph_obj)
 
+/**
+ * gobj_to_pad - returns the struct &media_pad pointer from the
+ *	@gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
 #define gobj_to_pad(gobj) \
 		container_of(gobj, struct media_pad, graph_obj)
 
+/**
+ * gobj_to_link - returns the struct &media_link pointer from the
+ *	@gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
 #define gobj_to_link(gobj) \
 		container_of(gobj, struct media_link, graph_obj)
 
-#define gobj_to_link(gobj) \
-		container_of(gobj, struct media_link, graph_obj)
-
-#define gobj_to_pad(gobj) \
-		container_of(gobj, struct media_pad, graph_obj)
-
+/**
+ * gobj_to_intf - returns the struct &media_interface pointer from the
+ *	@gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
 #define gobj_to_intf(gobj) \
 		container_of(gobj, struct media_interface, graph_obj)
 
+/**
+ * intf_to_devnode - returns the struct media_intf_devnode pointer from the
+ *	@intf contained on it.
+ *
+ * @intf: Pointer to struct &media_intf_devnode
+ */
 #define intf_to_devnode(intf) \
 		container_of(intf, struct media_intf_devnode, intf)
 
 /**
  *  media_gobj_create - Initialize a graph object
  *
- * @mdev:	Pointer to the media_device that contains the object
+ * @mdev:	Pointer to the &media_device that contains the object
  * @type:	Type of the object
- * @gobj:	Pointer to the graph object
+ * @gobj:	Pointer to the struct &media_gobj graph object
  *
- * This routine initializes the embedded struct media_gobj inside a
- * media graph object. It is called automatically if media_*_create\(\)
- * calls are used. However, if the object (entity, link, pad, interface)
- * is embedded on some other object, this function should be called before
- * registering the object at the media controller.
+ * This routine initializes the embedded struct &media_gobj inside a
+ * media graph object. It is called automatically if ``media_*_create``
+ * function calls are used. However, if the object (entity, link, pad,
+ * interface) is embedded on some other object, this function should be
+ * called before registering the object at the media controller.
  */
 void media_gobj_create(struct media_device *mdev,
 		    enum media_gobj_type type,
@@ -552,7 +588,7 @@
 /**
  *  media_gobj_destroy - Stop using a graph object on a media device
  *
- * @gobj:	Pointer to the graph object
+ * @gobj:	Pointer to the struct &media_gobj graph object
  *
  * This should be called by all routines like media_device_unregister()
  * that remove/destroy media graph objects.
@@ -567,11 +603,11 @@
  * @pads:	Array of @num_pads pads.
  *
  * The pads array is managed by the entity driver and passed to
- * media_entity_pads_init() where its pointer will be stored in the entity
- * structure.
+ * media_entity_pads_init() where its pointer will be stored in the
+ * &media_entity structure.
  *
  * If no pads are needed, drivers could either directly fill
- * &media_entity->@num_pads with 0 and &media_entity->@pads with NULL or call
+ * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call
  * this function that will do the same.
  *
  * As the number of pads is known in advance, the pads array is not allocated
@@ -601,18 +637,21 @@
  * @source_pad:	number of the source pad in the pads array
  * @sink:	pointer to &media_entity of the sink pad.
  * @sink_pad:	number of the sink pad in the pads array.
- * @flags:	Link flags, as defined in include/uapi/linux/media.h.
+ * @flags:	Link flags, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		( seek for ``MEDIA_LNK_FL_*``)
  *
  * Valid values for flags:
  *
- * - A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can
- *   be used to transfer media data. When two or more links target a sink pad,
- *   only one of them can be enabled at a time.
+ * %MEDIA_LNK_FL_ENABLED
+ *   Indicates that the link is enabled and can be used to transfer media data.
+ *   When two or more links target a sink pad, only one of them can be
+ *   enabled at a time.
  *
- * - A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't
- *   be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then
- *   %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is
- *   always enabled.
+ * %MEDIA_LNK_FL_IMMUTABLE
+ *   Indicates that the link enabled state can't be modified at runtime. If
+ *   %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be
+ *   set, since an immutable link is always enabled.
  *
  * .. note::
  *
@@ -630,17 +669,17 @@
  * @source_function: Function of the source entities. Used only if @source is
  *	NULL.
  * @source: pointer to &media_entity of the source pad. If NULL, it will use
- * 	all entities that matches the @sink_function.
+ *	all entities that matches the @sink_function.
  * @source_pad: number of the source pad in the pads array
  * @sink_function: Function of the sink entities. Used only if @sink is NULL.
  * @sink: pointer to &media_entity of the sink pad. If NULL, it will use
- * 	all entities that matches the @sink_function.
+ *	all entities that matches the @sink_function.
  * @sink_pad: number of the sink pad in the pads array.
  * @flags: Link flags, as defined in include/uapi/linux/media.h.
- * @allow_both_undefined: if true, then both @source and @sink can be NULL.
+ * @allow_both_undefined: if %true, then both @source and @sink can be NULL.
  *	In such case, it will create a crossbar between all entities that
  *	matches @source_function to all entities that matches @sink_function.
- *	If false, it will return 0 and won't create any link if both @source
+ *	If %false, it will return 0 and won't create any link if both @source
  *	and @sink are NULL.
  *
  * Valid values for flags:
@@ -660,9 +699,11 @@
  * creates link by link, this function is meant to allow 1:n, n:1 and even
  * cross-bar (n:n) links.
  *
- * NOTE: Before calling this function, media_entity_pads_init() and
- * media_device_register_entity() should be called previously for the entities
- * to be linked.
+ * .. note::
+ *
+ *    Before calling this function, media_entity_pads_init() and
+ *    media_device_register_entity() should be called previously for the
+ *    entities to be linked.
  */
 int media_create_pad_links(const struct media_device *mdev,
 			   const u32 source_function,
@@ -721,7 +762,7 @@
  * flags.
  *
  * Media device drivers can be notified of link setup operations by setting the
- * media_device::link_notify pointer to a callback function. If provided, the
+ * &media_device.link_notify pointer to a callback function. If provided, the
  * notification callback will be called before enabling and after disabling
  * links.
  *
@@ -731,7 +772,7 @@
  *
  * Link configuration must not have any side effect on other links. If an
  * enabled link at a sink pad prevents another link at the same pad from
- * being enabled, the link_setup operation must return -EBUSY and can't
+ * being enabled, the link_setup operation must return %-EBUSY and can't
  * implicitly disable the first enabled link.
  *
  * .. note::
@@ -747,8 +788,8 @@
  * @source: Source pad
  * @sink: Sink pad
  *
- * Return a pointer to the link between the two entities. If no such link
- * exists, return NULL.
+ * Return: returns a pointer to the link between the two entities. If no
+ * such link exists, return %NULL.
  */
 struct media_link *media_entity_find_link(struct media_pad *source,
 		struct media_pad *sink);
@@ -760,8 +801,8 @@
  * Search for a remote pad connected to the given pad by iterating over all
  * links originating or terminating at that pad until an enabled link is found.
  *
- * Return a pointer to the pad at the remote end of the first found enabled
- * link, or NULL if no enabled link has been found.
+ * Return: returns a pointer to the pad at the remote end of the first found
+ * enabled link, or %NULL if no enabled link has been found.
  */
 struct media_pad *media_entity_remote_pad(struct media_pad *pad);
 
@@ -772,12 +813,18 @@
  *
  * Get a reference to the parent media device module.
  *
- * The function will return immediately if @entity is NULL.
+ * The function will return immediately if @entity is %NULL.
  *
- * Return a pointer to the entity on success or NULL on failure.
+ * Return: returns a pointer to the entity on success or %NULL on failure.
  */
 struct media_entity *media_entity_get(struct media_entity *entity);
 
+/**
+ * media_entity_graph_walk_init - Allocate resources used by graph walk.
+ *
+ * @graph: Media graph structure that will be used to walk the graph
+ * @mdev: Pointer to the &media_device that contains the object
+ */
 __must_check int media_entity_graph_walk_init(
 	struct media_entity_graph *graph, struct media_device *mdev);
 
@@ -795,12 +842,14 @@
  *
  * Release the reference count acquired by media_entity_get().
  *
- * The function will return immediately if @entity is NULL.
+ * The function will return immediately if @entity is %NULL.
  */
 void media_entity_put(struct media_entity *entity);
 
 /**
- * media_entity_graph_walk_start - Start walking the media graph at a given entity
+ * media_entity_graph_walk_start - Start walking the media graph at a
+ *	given entity
+ *
  * @graph: Media graph structure that will be used to walk the graph
  * @entity: Starting entity
  *
@@ -824,8 +873,8 @@
  * The graph structure must have been previously initialized with a call to
  * media_entity_graph_walk_start().
  *
- * Return the next entity in the graph or NULL if the whole graph have been
- * traversed.
+ * Return: returns the next entity in the graph or %NULL if the whole graph
+ * have been traversed.
  */
 struct media_entity *
 media_entity_graph_walk_next(struct media_entity_graph *graph);
@@ -836,8 +885,8 @@
  * @pipe: Media pipeline to be assigned to all entities in the pipeline.
  *
  * Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as streaming. The given pipeline object is assigned to
- * every entity in the pipeline and stored in the media_entity pipe field.
+ * directly or indirectly, as streaming. The given pipeline object is assigned
+ * to every entity in the pipeline and stored in the media_entity pipe field.
  *
  * Calls to this function can be nested, in which case the same number of
  * media_entity_pipeline_stop() calls will be required to stop streaming. The
@@ -863,7 +912,7 @@
  *
  * Mark all entities connected to a given entity through enabled links, either
  * directly or indirectly, as not streaming. The media_entity pipe field is
- * reset to NULL.
+ * reset to %NULL.
  *
  * If multiple calls to media_entity_pipeline_start() have been made, the same
  * number of calls to this function are required to mark the pipeline as not
@@ -884,14 +933,21 @@
  * media_devnode_create() - creates and initializes a device node interface
  *
  * @mdev:	pointer to struct &media_device
- * @type:	type of the interface, as given by MEDIA_INTF_T_* macros
- *		as defined in the uapi/media/media.h header.
- * @flags:	Interface flags as defined in uapi/media/media.h.
+ * @type:	type of the interface, as given by
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		( seek for ``MEDIA_INTF_T_*``) macros.
+ * @flags:	Interface flags, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		( seek for ``MEDIA_INTF_FL_*``)
  * @major:	Device node major number.
  * @minor:	Device node minor number.
  *
  * Return: if succeeded, returns a pointer to the newly allocated
  *	&media_intf_devnode pointer.
+ *
+ * .. note::
+ *
+ *    Currently, no flags for &media_interface is defined.
  */
 struct media_intf_devnode *
 __must_check media_devnode_create(struct media_device *mdev,
@@ -913,15 +969,19 @@
  *
  * @entity:	pointer to %media_entity
  * @intf:	pointer to %media_interface
- * @flags:	Link flags, as defined in include/uapi/linux/media.h.
+ * @flags:	Link flags, as defined in
+ *		:ref:`include/uapi/linux/media.h <media_header>`
+ *		( seek for ``MEDIA_LNK_FL_*``)
  *
  *
  * Valid values for flags:
  *
- * - The %MEDIA_LNK_FL_ENABLED flag indicates that the interface is connected to
- *   the entity hardware. That's the default value for interfaces. An
- *   interface may be disabled if the hardware is busy due to the usage
- *   of some other interface that it is currently controlling the hardware.
+ * %MEDIA_LNK_FL_ENABLED
+ *   Indicates that the interface is connected to the entity hardware.
+ *   That's the default value for interfaces. An interface may be disabled if
+ *   the hardware is busy due to the usage of some other interface that it is
+ *   currently controlling the hardware.
+ *
  *   A typical example is an hybrid TV device that handle only one type of
  *   stream on a given time. So, when the digital TV is streaming,
  *   the V4L2 interfaces won't be enabled, as such device is not able to
@@ -977,6 +1037,18 @@
  */
 void media_remove_intf_links(struct media_interface *intf);
 
+/**
+ * media_entity_call - Calls a struct media_entity_operations operation on
+ *	an entity
+ *
+ * @entity: entity where the @operation will be called
+ * @operation: type of the operation. Should be the name of a member of
+ *	struct &media_entity_operations.
+ *
+ * This helper function will check if @operation is not %NULL. On such case,
+ * it will issue a call to @operation\(@entity, @args\).
+ */
+
 #define media_entity_call(entity, operation, args...)			\
 	(((entity)->ops && (entity)->ops->operation) ?			\
 	 (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD)
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 10908e3..40188d3 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -231,7 +231,7 @@
 int rc_open(struct rc_dev *rdev);
 
 /**
- * rc_open - Closes a RC device
+ * rc_close - Closes a RC device
  *
  * @rdev: pointer to struct rc_dev.
  */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index daa75fc..e1cc14c 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -11,27 +11,55 @@
 
 #include <linux/input.h>
 
+/**
+ * enum rc_type - type of the Remote Controller protocol
+ *
+ * @RC_TYPE_UNKNOWN: Protocol not known
+ * @RC_TYPE_OTHER: Protocol known but proprietary
+ * @RC_TYPE_RC5: Philips RC5 protocol
+ * @RC_TYPE_RC5X: Philips RC5x protocol
+ * @RC_TYPE_RC5_SZ: StreamZap variant of RC5
+ * @RC_TYPE_JVC: JVC protocol
+ * @RC_TYPE_SONY12: Sony 12 bit protocol
+ * @RC_TYPE_SONY15: Sony 15 bit protocol
+ * @RC_TYPE_SONY20: Sony 20 bit protocol
+ * @RC_TYPE_NEC: NEC protocol
+ * @RC_TYPE_NECX: Extended NEC protocol
+ * @RC_TYPE_NEC32: NEC 32 bit protocol
+ * @RC_TYPE_SANYO: Sanyo protocol
+ * @RC_TYPE_MCE_KBD: RC6-ish MCE keyboard/mouse
+ * @RC_TYPE_RC6_0: Philips RC6-0-16 protocol
+ * @RC_TYPE_RC6_6A_20: Philips RC6-6A-20 protocol
+ * @RC_TYPE_RC6_6A_24: Philips RC6-6A-24 protocol
+ * @RC_TYPE_RC6_6A_32: Philips RC6-6A-32 protocol
+ * @RC_TYPE_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
+ * @RC_TYPE_SHARP: Sharp protocol
+ * @RC_TYPE_XMP: XMP protocol
+ * @RC_TYPE_CEC: CEC protocol
+ */
 enum rc_type {
-	RC_TYPE_UNKNOWN		= 0,	/* Protocol not known */
-	RC_TYPE_OTHER		= 1,	/* Protocol known but proprietary */
-	RC_TYPE_RC5		= 2,	/* Philips RC5 protocol */
-	RC_TYPE_RC5X		= 3,	/* Philips RC5x protocol */
-	RC_TYPE_RC5_SZ		= 4,	/* StreamZap variant of RC5 */
-	RC_TYPE_JVC		= 5,	/* JVC protocol */
-	RC_TYPE_SONY12		= 6,	/* Sony 12 bit protocol */
-	RC_TYPE_SONY15		= 7,	/* Sony 15 bit protocol */
-	RC_TYPE_SONY20		= 8,	/* Sony 20 bit protocol */
-	RC_TYPE_NEC		= 9,	/* NEC protocol */
-	RC_TYPE_SANYO		= 10,	/* Sanyo protocol */
-	RC_TYPE_MCE_KBD		= 11,	/* RC6-ish MCE keyboard/mouse */
-	RC_TYPE_RC6_0		= 12,	/* Philips RC6-0-16 protocol */
-	RC_TYPE_RC6_6A_20	= 13,	/* Philips RC6-6A-20 protocol */
-	RC_TYPE_RC6_6A_24	= 14,	/* Philips RC6-6A-24 protocol */
-	RC_TYPE_RC6_6A_32	= 15,	/* Philips RC6-6A-32 protocol */
-	RC_TYPE_RC6_MCE		= 16,	/* MCE (Philips RC6-6A-32 subtype) protocol */
-	RC_TYPE_SHARP		= 17,	/* Sharp protocol */
-	RC_TYPE_XMP		= 18,	/* XMP protocol */
-	RC_TYPE_CEC		= 19,	/* CEC protocol */
+	RC_TYPE_UNKNOWN		= 0,
+	RC_TYPE_OTHER		= 1,
+	RC_TYPE_RC5		= 2,
+	RC_TYPE_RC5X		= 3,
+	RC_TYPE_RC5_SZ		= 4,
+	RC_TYPE_JVC		= 5,
+	RC_TYPE_SONY12		= 6,
+	RC_TYPE_SONY15		= 7,
+	RC_TYPE_SONY20		= 8,
+	RC_TYPE_NEC		= 9,
+	RC_TYPE_NECX		= 10,
+	RC_TYPE_NEC32		= 11,
+	RC_TYPE_SANYO		= 12,
+	RC_TYPE_MCE_KBD		= 13,
+	RC_TYPE_RC6_0		= 14,
+	RC_TYPE_RC6_6A_20	= 15,
+	RC_TYPE_RC6_6A_24	= 16,
+	RC_TYPE_RC6_6A_32	= 17,
+	RC_TYPE_RC6_MCE		= 18,
+	RC_TYPE_SHARP		= 19,
+	RC_TYPE_XMP		= 20,
+	RC_TYPE_CEC		= 21,
 };
 
 #define RC_BIT_NONE		0ULL
@@ -45,6 +73,8 @@
 #define RC_BIT_SONY15		(1ULL << RC_TYPE_SONY15)
 #define RC_BIT_SONY20		(1ULL << RC_TYPE_SONY20)
 #define RC_BIT_NEC		(1ULL << RC_TYPE_NEC)
+#define RC_BIT_NECX		(1ULL << RC_TYPE_NECX)
+#define RC_BIT_NEC32		(1ULL << RC_TYPE_NEC32)
 #define RC_BIT_SANYO		(1ULL << RC_TYPE_SANYO)
 #define RC_BIT_MCE_KBD		(1ULL << RC_TYPE_MCE_KBD)
 #define RC_BIT_RC6_0		(1ULL << RC_TYPE_RC6_0)
@@ -60,8 +90,9 @@
 			 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
 			 RC_BIT_JVC | \
 			 RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
-			 RC_BIT_NEC | RC_BIT_SANYO | RC_BIT_MCE_KBD | \
-			 RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+			 RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
+			 RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
+			 RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
 			 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
 			 RC_BIT_XMP | RC_BIT_CEC)
 
@@ -76,21 +107,45 @@
 #define RC_SCANCODE_RC6_0(sys, cmd)		(((sys) << 8) | (cmd))
 #define RC_SCANCODE_RC6_6A(vendor, sys, cmd)	(((vendor) << 16) | ((sys) << 8) | (cmd))
 
+/**
+ * struct rc_map_table - represents a scancode/keycode pair
+ *
+ * @scancode: scan code (u32)
+ * @keycode: Linux input keycode
+ */
 struct rc_map_table {
 	u32	scancode;
 	u32	keycode;
 };
 
+/**
+ * struct rc_map - represents a keycode map table
+ *
+ * @scan: pointer to struct &rc_map_table
+ * @size: Max number of entries
+ * @len: Number of entries that are in use
+ * @alloc: size of \*scan, in bytes
+ * @rc_type: type of the remote controller protocol, as defined at
+ *	     enum &rc_type
+ * @name: name of the key map table
+ * @lock: lock to protect access to this structure
+ */
 struct rc_map {
 	struct rc_map_table	*scan;
-	unsigned int		size;	/* Max number of entries */
-	unsigned int		len;	/* Used number of entries */
-	unsigned int		alloc;	/* Size of *scan in bytes */
+	unsigned int		size;
+	unsigned int		len;
+	unsigned int		alloc;
 	enum rc_type		rc_type;
 	const char		*name;
 	spinlock_t		lock;
 };
 
+/**
+ * struct rc_map_list - list of the registered &rc_map maps
+ *
+ * @list: pointer to struct &list_head
+ * @map: pointer to struct &rc_map
+ */
 struct rc_map_list {
 	struct list_head	 list;
 	struct rc_map map;
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index 4c7fc77..8723f05 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -29,7 +29,7 @@
 static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { }
 static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
 {
-	return -ENOSYS;
+	return 0;
 }
 static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { }
 #endif
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 97aa133..1a15c3e 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -105,16 +105,13 @@
 	int (*get_formats)(struct soc_camera_device *, unsigned int,
 			   struct soc_camera_format_xlate *);
 	void (*put_formats)(struct soc_camera_device *);
-	int (*cropcap)(struct soc_camera_device *, struct v4l2_cropcap *);
-	int (*get_crop)(struct soc_camera_device *, struct v4l2_crop *);
-	int (*set_crop)(struct soc_camera_device *, const struct v4l2_crop *);
 	int (*get_selection)(struct soc_camera_device *, struct v4l2_selection *);
 	int (*set_selection)(struct soc_camera_device *, struct v4l2_selection *);
 	/*
-	 * The difference to .set_crop() is, that .set_livecrop is not allowed
+	 * The difference to .set_selection() is, that .set_liveselection is not allowed
 	 * to change the output sizes
 	 */
-	int (*set_livecrop)(struct soc_camera_device *, const struct v4l2_crop *);
+	int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
 	int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
 	int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
 	void (*init_videobuf)(struct videobuf_queue *,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 178a88d..e1006b3 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -93,6 +93,16 @@
 			union v4l2_ctrl_ptr ptr);
 };
 
+/**
+ * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function
+ *	that should be called when a control value has changed.
+ *
+ * @ctrl: pointer to struct &v4l2_ctrl
+ * @priv: control private data
+ *
+ * This typedef definition is used as an argument to v4l2_ctrl_notify()
+ * and as an argument at struct &v4l2_ctrl_handler.
+ */
 typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
 
 /**
@@ -229,7 +239,7 @@
  * @next:	Single-link list node for the hash.
  * @ctrl:	The actual control information.
  * @helper:	Pointer to helper struct. Used internally in
- *		prepare_ext_ctrls().
+ *		``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``.
  *
  * Each control handler has a list of these refs. The list_head is used to
  * keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -369,17 +379,39 @@
  * @key:	Used by the lock validator if CONFIG_LOCKDEP is set.
  * @name:	Used by the lock validator if CONFIG_LOCKDEP is set.
  *
- * Returns an error if the buckets could not be allocated. This error will
- * also be stored in @hdl->error.
+ * .. attention::
  *
- * Never use this call directly, always use the v4l2_ctrl_handler_init
- * macro that hides the @key and @name arguments.
+ *    Never use this call directly, always use the v4l2_ctrl_handler_init()
+ *    macro that hides the @key and @name arguments.
+ *
+ * Return: returns an error if the buckets could not be allocated. This
+ * error will also be stored in @hdl->error.
  */
 int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
 				 unsigned int nr_of_controls_hint,
 				 struct lock_class_key *key, const char *name);
 
 #ifdef CONFIG_LOCKDEP
+
+/**
+ * v4l2_ctrl_handler_init - helper function to create a static struct
+ *	 &lock_class_key and calls v4l2_ctrl_handler_init_class()
+ *
+ * @hdl:	The control handler.
+ * @nr_of_controls_hint: A hint of how many controls this handler is
+ *		expected to refer to. This is the total number, so including
+ *		any inherited controls. It doesn't have to be precise, but if
+ *		it is way off, then you either waste memory (too many buckets
+ *		are allocated) or the control lookup becomes slower (not enough
+ *		buckets are allocated, so there are more slow list lookups).
+ *		It will always work, though.
+ *
+ * This helper function creates a static struct &lock_class_key and
+ * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock
+ * validador.
+ *
+ * Use this helper function to initialize a control handler.
+ */
 #define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint)		\
 (									\
 	({								\
@@ -564,6 +596,13 @@
 					 u32 id, u8 max, u8 def,
 					 const s64 *qmenu_int);
 
+/**
+ * typedef v4l2_ctrl_filter - Typedef to define the filter function to be
+ *	used when adding a control handler.
+ *
+ * @ctrl: pointer to struct &v4l2_ctrl.
+ */
+
 typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
 
 /**
@@ -635,8 +674,8 @@
  * be marked active, and any reads will just return the current value without
  * going through g_volatile_ctrl.
  *
- * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
- * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
+ * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag
+ * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
  * if autofoo is in auto mode.
  */
 void v4l2_ctrl_auto_cluster(unsigned int ncontrols,
@@ -686,7 +725,6 @@
  */
 void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
 
-
 /**
  *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
  *
@@ -936,9 +974,9 @@
  * v4l2_ctrl_replace - Function to be used as a callback to
  *	&struct v4l2_subscribed_event_ops replace\(\)
  *
- * @old: pointer to :ref:`struct v4l2_event <v4l2-event>` with the reported
+ * @old: pointer to struct &v4l2_event with the reported
  *	 event;
- * @new: pointer to :ref:`struct v4l2_event <v4l2-event>` with the modified
+ * @new: pointer to struct &v4l2_event with the modified
  *	 event;
  */
 void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new);
@@ -947,9 +985,9 @@
  * v4l2_ctrl_merge - Function to be used as a callback to
  *	&struct v4l2_subscribed_event_ops merge(\)
  *
- * @old: pointer to :ref:`struct v4l2_event <v4l2-event>` with the reported
+ * @old: pointer to struct &v4l2_event with the reported
  *	 event;
- * @new: pointer to :ref:`struct v4l2_event <v4l2-event>` with the merged
+ * @new: pointer to struct &v4l2_event with the merged
  *	 event;
  */
 void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index a122b1b..e657614 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -25,7 +25,8 @@
 #define VFL_TYPE_RADIO		2
 #define VFL_TYPE_SUBDEV		3
 #define VFL_TYPE_SDR		4
-#define VFL_TYPE_MAX		5
+#define VFL_TYPE_TOUCH		5
+#define VFL_TYPE_MAX		6
 
 /* Is this a receiver, transmitter or mem-to-mem? */
 /* Ignored for VFL_TYPE_SUBDEV. */
@@ -55,7 +56,7 @@
  *
  * .. note::
  *    The size of @prios array matches the number of priority types defined
- *    by :ref:`enum v4l2_priority <v4l2-priority>`.
+ *    by enum &v4l2_priority.
  */
 struct v4l2_prio_state {
 	atomic_t prios[4];
@@ -72,8 +73,8 @@
  * v4l2_prio_change - changes the v4l2 file handler priority
  *
  * @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: pointer to the desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
- * @new: Priority type requested, as defined by :ref:`enum v4l2_priority <v4l2-priority>`.
+ * @local: pointer to the desired priority, as defined by enum &v4l2_priority
+ * @new: Priority type requested, as defined by enum &v4l2_priority.
  *
  * .. note::
  *	This function should be used only by the V4L2 core.
@@ -85,7 +86,7 @@
  * v4l2_prio_open - Implements the priority logic for a file handler open
  *
  * @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: pointer to the desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
+ * @local: pointer to the desired priority, as defined by enum &v4l2_priority
  *
  * .. note::
  *	This function should be used only by the V4L2 core.
@@ -96,7 +97,7 @@
  * v4l2_prio_close - Implements the priority logic for a file handler close
  *
  * @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: priority to be released, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
+ * @local: priority to be released, as defined by enum &v4l2_priority
  *
  * .. note::
  *	This function should be used only by the V4L2 core.
@@ -114,10 +115,10 @@
 enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global);
 
 /**
- * v4l2_prio_close - Implements the priority logic for a file handler close
+ * v4l2_prio_check - Implements the priority logic for a file handler close
  *
  * @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>` local
+ * @local: desired priority, as defined by enum &v4l2_priority local
  *
  * .. note::
  *	This function should be used only by the V4L2 core.
@@ -294,6 +295,7 @@
  *	- %VFL_TYPE_RADIO - A radio card
  *	- %VFL_TYPE_SUBDEV - A subdevice
  *	- %VFL_TYPE_SDR - Software Defined Radio
+ *	- %VFL_TYPE_TOUCH - A touch sensor
  *
  * .. note::
  *
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index a9d6aa4..8ffa940 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -39,7 +39,7 @@
  *	if this struct is embedded into a larger struct.
  * @name: unique device name, by default the driver name + bus ID
  * @notify: notify callback called by some sub-devices.
- * @ctrl_handler: The control handler. May be NULL.
+ * @ctrl_handler: The control handler. May be %NULL.
  * @prio: Device's priority state
  * @ref: Keep track of the references to this struct.
  * @release: Release function that is called when the ref count
@@ -53,8 +53,8 @@
  *
  * .. note::
  *
- *    #) dev->driver_data points to this struct.
- *    #) dev might be NULL if there is no parent device
+ *    #) @dev->driver_data points to this struct.
+ *    #) @dev might be %NULL if there is no parent device
  */
 
 struct v4l2_device {
@@ -76,10 +76,10 @@
 /**
  * v4l2_device_get - gets a V4L2 device reference
  *
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
  *
  * This is an ancillary routine meant to increment the usage for the
- * struct v4l2_device pointed by @v4l2_dev.
+ * struct &v4l2_device pointed by @v4l2_dev.
  */
 static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
 {
@@ -89,23 +89,23 @@
 /**
  * v4l2_device_put - putss a V4L2 device reference
  *
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
  *
  * This is an ancillary routine meant to decrement the usage for the
- * struct v4l2_device pointed by @v4l2_dev.
+ * struct &v4l2_device pointed by @v4l2_dev.
  */
 int v4l2_device_put(struct v4l2_device *v4l2_dev);
 
 /**
- * v4l2_device_register -Initialize v4l2_dev and make dev->driver_data
- * 	point to v4l2_dev.
+ * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data
+ *	point to @v4l2_dev.
  *
- * @dev: pointer to struct device
- * @v4l2_dev: pointer to struct v4l2_device
+ * @dev: pointer to struct &device
+ * @v4l2_dev: pointer to struct &v4l2_device
  *
  * .. note::
- *	dev may be NULL in rare cases (ISA devices).
- *	In such case the caller must fill in the v4l2_dev->name field
+ *	@dev may be %NULL in rare cases (ISA devices).
+ *	In such case the caller must fill in the @v4l2_dev->name field
  *	before calling this function.
  */
 int __must_check v4l2_device_register(struct device *dev,
@@ -113,14 +113,14 @@
 
 /**
  * v4l2_device_set_name - Optional function to initialize the
- * 	name field of struct v4l2_device
+ *	name field of struct &v4l2_device
  *
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
  * @basename: base name for the device name
  * @instance: pointer to a static atomic_t var with the instance usage for
- * 	the device driver.
+ *	the device driver.
  *
- * v4l2_device_set_name() initializes the name field of struct v4l2_device
+ * v4l2_device_set_name() initializes the name field of struct &v4l2_device
  * using the driver name and a driver-global atomic_t instance.
  *
  * This function will increment the instance counter and returns the
@@ -132,7 +132,7 @@
  *
  *   ...
  *
- *   instance = v4l2_device_set_name(&v4l2_dev, "foo", &drv_instance);
+ *   instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance);
  *
  * The first time this is called the name field will be set to foo0 and
  * this function returns 0. If the name ends with a digit (e.g. cx18),
@@ -147,16 +147,16 @@
  * @v4l2_dev: pointer to struct v4l2_device
  *
  * Should be called when the USB parent disconnects.
- * Since the parent disappears, this ensures that v4l2_dev doesn't have
+ * Since the parent disappears, this ensures that @v4l2_dev doesn't have
  * an invalid parent pointer.
  *
- * .. note:: This function sets v4l2_dev->dev to NULL.
+ * .. note:: This function sets @v4l2_dev->dev to NULL.
  */
 void v4l2_device_disconnect(struct v4l2_device *v4l2_dev);
 
 /**
  *  v4l2_device_unregister - Unregister all sub-devices and any other
- *	 resources related to v4l2_dev.
+ *	 resources related to @v4l2_dev.
  *
  * @v4l2_dev: pointer to struct v4l2_device
  */
@@ -165,8 +165,8 @@
 /**
  * v4l2_device_register_subdev - Registers a subdev with a v4l2 device.
  *
- * @v4l2_dev: pointer to struct v4l2_device
- * @sd: pointer to struct v4l2_subdev
+ * @v4l2_dev: pointer to struct &v4l2_device
+ * @sd: pointer to struct &v4l2_subdev
  *
  * While registered, the subdev module is marked as in-use.
  *
@@ -179,7 +179,7 @@
 /**
  * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device.
  *
- * @sd: pointer to struct v4l2_subdev
+ * @sd: pointer to struct &v4l2_subdev
  *
  * .. note ::
  *
@@ -191,7 +191,7 @@
 /**
  * v4l2_device_register_subdev_nodes - Registers device nodes for all subdevs
  *	of the v4l2 device that are marked with
- * 	the V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ *	the %V4L2_SUBDEV_FL_HAS_DEVNODE flag.
  *
  * @v4l2_dev: pointer to struct v4l2_device
  */
@@ -201,9 +201,9 @@
 /**
  * v4l2_subdev_notify - Sends a notification to v4l2_device.
  *
- * @sd: pointer to struct v4l2_subdev
+ * @sd: pointer to struct &v4l2_subdev
  * @notification: type of notification. Please notice that the notification
- * 	type is driver-specific.
+ *	type is driver-specific.
  * @arg: arguments for the notification. Those are specific to each
  *	notification type.
  */
@@ -222,7 +222,7 @@
    Ignore any errors. Note that you cannot add or delete a subdev
    while walking the subdevs list. */
 #define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...)	\
-	do { 								\
+	do {								\
 		list_for_each_entry((sd), &(v4l2_dev)->subdevs, list)	\
 			if ((cond) && (sd)->ops->o && (sd)->ops->o->f)	\
 				(sd)->ops->o->f((sd) , ##args);		\
@@ -241,15 +241,15 @@
    return with that error code. Note that you cannot add or delete a
    subdev while walking the subdevs list. */
 #define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \
-({ 									\
+({									\
 	long __err = 0;							\
 									\
 	list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) {		\
 		if ((cond) && (sd)->ops->o && (sd)->ops->o->f)		\
 			__err = (sd)->ops->o->f((sd) , ##args);		\
 		if (__err && __err != -ENOIOCTLCMD)			\
-			break; 						\
-	} 								\
+			break;						\
+	}								\
 	(__err == -ENOIOCTLCMD) ? 0 : __err;				\
 })
 
@@ -276,7 +276,7 @@
    match them all). If the callback returns an error other than 0 or
    -ENOIOCTLCMD, then return with that error code. Note that you cannot
    add or delete a subdev while walking the subdevs list. */
-#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) 	\
+#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...)	\
 ({									\
 	struct v4l2_subdev *__sd;					\
 	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
@@ -300,8 +300,8 @@
 
 /*
  * Call the specified callback for all subdevs where grp_id & grpmsk != 0
- * (if grpmsk == `0, then match them all). If the callback returns an error
- * other than 0 or -ENOIOCTLCMD, then return with that error code. Note that
+ * (if grpmsk == 0, then match them all). If the callback returns an error
+ * other than 0 or %-ENOIOCTLCMD, then return with that error code. Note that
  * you cannot add or delete a subdev while walking the subdevs list.
  */
 #define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 65caadf..0a7d9e1 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -28,8 +28,8 @@
  */
 extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
 
-/*
- * v4l2_check_dv_timings_fnc - timings check callback
+/**
+ * typedef v4l2_check_dv_timings_fnc - timings check callback
  *
  * @t: the v4l2_dv_timings struct.
  * @handle: a handle from the driver.
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index ca85420..a700285 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -222,7 +222,8 @@
 				  struct v4l2_fh *fh,
 				  struct v4l2_event_subscription *sub);
 /**
- * v4l2_src_change_event_subscribe -
+ * v4l2_src_change_event_subscribe - helper function that calls
+ * 	v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE.
  *
  * @fh: pointer to struct v4l2_fh
  * @sub: pointer to &struct v4l2_event_subscription
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 3d184ab..b0fe4d6 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -20,7 +20,7 @@
 struct v4l2_flash;
 enum led_brightness;
 
-/*
+/**
  * struct v4l2_flash_ctrl_data - flash control initialization data, filled
  *				basing on the features declared by the LED flash
  *				class driver in the v4l2_flash_config
@@ -33,14 +33,21 @@
 	u32 cid;
 };
 
+/**
+ * struct v4l2_flash_ops - V4L2 flash operations
+ *
+ * @external_strobe_set: Setup strobing the flash by hardware pin state
+ *	assertion.
+ * @intensity_to_led_brightness: Convert intensity to brightness in a device
+ *	specific manner
+ * @led_brightness_to_intensity: convert brightness to intensity in a device
+ *	specific manner.
+ */
 struct v4l2_flash_ops {
-	/* setup strobing the flash by hardware pin state assertion */
 	int (*external_strobe_set)(struct v4l2_flash *v4l2_flash,
 					bool enable);
-	/* convert intensity to brightness in a device specific manner */
 	enum led_brightness (*intensity_to_led_brightness)
 		(struct v4l2_flash *v4l2_flash, s32 intensity);
-	/* convert brightness to intensity in a device specific manner */
 	s32 (*led_brightness_to_intensity)
 		(struct v4l2_flash *v4l2_flash, enum led_brightness);
 };
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 8b1d19b..574ff2a 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -287,273 +287,286 @@
 	/* ioctl callbacks */
 
 	/* VIDIOC_QUERYCAP handler */
-	int (*vidioc_querycap)(struct file *file, void *fh, struct v4l2_capability *cap);
+	int (*vidioc_querycap)(struct file *file, void *fh,
+			       struct v4l2_capability *cap);
 
 	/* VIDIOC_ENUM_FMT handlers */
-	int (*vidioc_enum_fmt_vid_cap)     (struct file *file, void *fh,
-					    struct v4l2_fmtdesc *f);
-	int (*vidioc_enum_fmt_vid_overlay) (struct file *file, void *fh,
-					    struct v4l2_fmtdesc *f);
-	int (*vidioc_enum_fmt_vid_out)     (struct file *file, void *fh,
-					    struct v4l2_fmtdesc *f);
+	int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh,
+				       struct v4l2_fmtdesc *f);
+	int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh,
+					   struct v4l2_fmtdesc *f);
+	int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh,
+				       struct v4l2_fmtdesc *f);
 	int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *file, void *fh,
 					      struct v4l2_fmtdesc *f);
 	int (*vidioc_enum_fmt_vid_out_mplane)(struct file *file, void *fh,
 					      struct v4l2_fmtdesc *f);
-	int (*vidioc_enum_fmt_sdr_cap)     (struct file *file, void *fh,
-					    struct v4l2_fmtdesc *f);
-	int (*vidioc_enum_fmt_sdr_out)     (struct file *file, void *fh,
-					    struct v4l2_fmtdesc *f);
+	int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh,
+				       struct v4l2_fmtdesc *f);
+	int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
+				       struct v4l2_fmtdesc *f);
 
 	/* VIDIOC_G_FMT handlers */
-	int (*vidioc_g_fmt_vid_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh,
 					struct v4l2_format *f);
-	int (*vidioc_g_fmt_vid_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_g_fmt_vbi_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_g_fmt_vbi_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+					    struct v4l2_format *f);
+	int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
+	int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh,
-					struct v4l2_format *f);
+					   struct v4l2_format *f);
 	int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh,
-					struct v4l2_format *f);
+					   struct v4l2_format *f);
 	int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh,
 					   struct v4l2_format *f);
 	int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh,
 					   struct v4l2_format *f);
-	int (*vidioc_g_fmt_sdr_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_g_fmt_sdr_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
+	int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 
 	/* VIDIOC_S_FMT handlers */
-	int (*vidioc_s_fmt_vid_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh,
 					struct v4l2_format *f);
-	int (*vidioc_s_fmt_vid_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_s_fmt_vbi_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_s_fmt_vbi_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+					    struct v4l2_format *f);
+	int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
+	int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 	int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh,
-					struct v4l2_format *f);
+					   struct v4l2_format *f);
 	int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh,
-					struct v4l2_format *f);
+					   struct v4l2_format *f);
 	int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh,
 					   struct v4l2_format *f);
 	int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh,
 					   struct v4l2_format *f);
-	int (*vidioc_s_fmt_sdr_cap)    (struct file *file, void *fh,
-					struct v4l2_format *f);
-	int (*vidioc_s_fmt_sdr_out)    (struct file *file, void *fh,
-					struct v4l2_format *f);
+	int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh,
+				    struct v4l2_format *f);
+	int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
+				    struct v4l2_format *f);
 
 	/* VIDIOC_TRY_FMT handlers */
-	int (*vidioc_try_fmt_vid_cap)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
+	int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
+				      struct v4l2_format *f);
 	int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh,
 					  struct v4l2_format *f);
-	int (*vidioc_try_fmt_vid_out)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
+	int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh,
+				      struct v4l2_format *f);
 	int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh,
-					  struct v4l2_format *f);
-	int (*vidioc_try_fmt_vbi_cap)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
-	int (*vidioc_try_fmt_vbi_out)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
+					     struct v4l2_format *f);
+	int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh,
+				      struct v4l2_format *f);
+	int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh,
+				      struct v4l2_format *f);
 	int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh,
-					  struct v4l2_format *f);
+					     struct v4l2_format *f);
 	int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh,
-					  struct v4l2_format *f);
+					     struct v4l2_format *f);
 	int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh,
 					     struct v4l2_format *f);
 	int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh,
 					     struct v4l2_format *f);
-	int (*vidioc_try_fmt_sdr_cap)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
-	int (*vidioc_try_fmt_sdr_out)    (struct file *file, void *fh,
-					  struct v4l2_format *f);
+	int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh,
+				      struct v4l2_format *f);
+	int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
+				      struct v4l2_format *f);
 
 	/* Buffer handlers */
-	int (*vidioc_reqbufs) (struct file *file, void *fh, struct v4l2_requestbuffers *b);
-	int (*vidioc_querybuf)(struct file *file, void *fh, struct v4l2_buffer *b);
-	int (*vidioc_qbuf)    (struct file *file, void *fh, struct v4l2_buffer *b);
-	int (*vidioc_expbuf)  (struct file *file, void *fh,
-				struct v4l2_exportbuffer *e);
-	int (*vidioc_dqbuf)   (struct file *file, void *fh, struct v4l2_buffer *b);
+	int (*vidioc_reqbufs)(struct file *file, void *fh,
+			      struct v4l2_requestbuffers *b);
+	int (*vidioc_querybuf)(struct file *file, void *fh,
+			       struct v4l2_buffer *b);
+	int (*vidioc_qbuf)(struct file *file, void *fh,
+			   struct v4l2_buffer *b);
+	int (*vidioc_expbuf)(struct file *file, void *fh,
+			     struct v4l2_exportbuffer *e);
+	int (*vidioc_dqbuf)(struct file *file, void *fh,
+			    struct v4l2_buffer *b);
 
-	int (*vidioc_create_bufs)(struct file *file, void *fh, struct v4l2_create_buffers *b);
-	int (*vidioc_prepare_buf)(struct file *file, void *fh, struct v4l2_buffer *b);
+	int (*vidioc_create_bufs)(struct file *file, void *fh,
+				  struct v4l2_create_buffers *b);
+	int (*vidioc_prepare_buf)(struct file *file, void *fh,
+				  struct v4l2_buffer *b);
 
-	int (*vidioc_overlay) (struct file *file, void *fh, unsigned int i);
-	int (*vidioc_g_fbuf)   (struct file *file, void *fh,
-				struct v4l2_framebuffer *a);
-	int (*vidioc_s_fbuf)   (struct file *file, void *fh,
-				const struct v4l2_framebuffer *a);
+	int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i);
+	int (*vidioc_g_fbuf)(struct file *file, void *fh,
+			     struct v4l2_framebuffer *a);
+	int (*vidioc_s_fbuf)(struct file *file, void *fh,
+			     const struct v4l2_framebuffer *a);
 
 		/* Stream on/off */
-	int (*vidioc_streamon) (struct file *file, void *fh, enum v4l2_buf_type i);
-	int (*vidioc_streamoff)(struct file *file, void *fh, enum v4l2_buf_type i);
+	int (*vidioc_streamon)(struct file *file, void *fh,
+			       enum v4l2_buf_type i);
+	int (*vidioc_streamoff)(struct file *file, void *fh,
+				enum v4l2_buf_type i);
 
-		/* Standard handling
-			ENUMSTD is handled by videodev.c
+		/*
+		 * Standard handling
+		 *
+		 * Note: ENUMSTD is handled by videodev.c
 		 */
-	int (*vidioc_g_std) (struct file *file, void *fh, v4l2_std_id *norm);
-	int (*vidioc_s_std) (struct file *file, void *fh, v4l2_std_id norm);
-	int (*vidioc_querystd) (struct file *file, void *fh, v4l2_std_id *a);
+	int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm);
+	int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm);
+	int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a);
 
 		/* Input handling */
 	int (*vidioc_enum_input)(struct file *file, void *fh,
 				 struct v4l2_input *inp);
-	int (*vidioc_g_input)   (struct file *file, void *fh, unsigned int *i);
-	int (*vidioc_s_input)   (struct file *file, void *fh, unsigned int i);
+	int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i);
+	int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i);
 
 		/* Output handling */
-	int (*vidioc_enum_output) (struct file *file, void *fh,
+	int (*vidioc_enum_output)(struct file *file, void *fh,
 				  struct v4l2_output *a);
-	int (*vidioc_g_output)   (struct file *file, void *fh, unsigned int *i);
-	int (*vidioc_s_output)   (struct file *file, void *fh, unsigned int i);
+	int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i);
+	int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i);
 
 		/* Control handling */
-	int (*vidioc_queryctrl)        (struct file *file, void *fh,
-					struct v4l2_queryctrl *a);
-	int (*vidioc_query_ext_ctrl)   (struct file *file, void *fh,
-					struct v4l2_query_ext_ctrl *a);
-	int (*vidioc_g_ctrl)           (struct file *file, void *fh,
-					struct v4l2_control *a);
-	int (*vidioc_s_ctrl)           (struct file *file, void *fh,
-					struct v4l2_control *a);
-	int (*vidioc_g_ext_ctrls)      (struct file *file, void *fh,
-					struct v4l2_ext_controls *a);
-	int (*vidioc_s_ext_ctrls)      (struct file *file, void *fh,
-					struct v4l2_ext_controls *a);
-	int (*vidioc_try_ext_ctrls)    (struct file *file, void *fh,
-					struct v4l2_ext_controls *a);
-	int (*vidioc_querymenu)        (struct file *file, void *fh,
-					struct v4l2_querymenu *a);
+	int (*vidioc_queryctrl)(struct file *file, void *fh,
+				struct v4l2_queryctrl *a);
+	int (*vidioc_query_ext_ctrl)(struct file *file, void *fh,
+				     struct v4l2_query_ext_ctrl *a);
+	int (*vidioc_g_ctrl)(struct file *file, void *fh,
+			     struct v4l2_control *a);
+	int (*vidioc_s_ctrl)(struct file *file, void *fh,
+			     struct v4l2_control *a);
+	int (*vidioc_g_ext_ctrls)(struct file *file, void *fh,
+				  struct v4l2_ext_controls *a);
+	int (*vidioc_s_ext_ctrls)(struct file *file, void *fh,
+				  struct v4l2_ext_controls *a);
+	int (*vidioc_try_ext_ctrls)(struct file *file, void *fh,
+				    struct v4l2_ext_controls *a);
+	int (*vidioc_querymenu)(struct file *file, void *fh,
+				struct v4l2_querymenu *a);
 
 	/* Audio ioctls */
-	int (*vidioc_enumaudio)        (struct file *file, void *fh,
-					struct v4l2_audio *a);
-	int (*vidioc_g_audio)          (struct file *file, void *fh,
-					struct v4l2_audio *a);
-	int (*vidioc_s_audio)          (struct file *file, void *fh,
-					const struct v4l2_audio *a);
+	int (*vidioc_enumaudio)(struct file *file, void *fh,
+				struct v4l2_audio *a);
+	int (*vidioc_g_audio)(struct file *file, void *fh,
+			      struct v4l2_audio *a);
+	int (*vidioc_s_audio)(struct file *file, void *fh,
+			      const struct v4l2_audio *a);
 
 	/* Audio out ioctls */
-	int (*vidioc_enumaudout)       (struct file *file, void *fh,
-					struct v4l2_audioout *a);
-	int (*vidioc_g_audout)         (struct file *file, void *fh,
-					struct v4l2_audioout *a);
-	int (*vidioc_s_audout)         (struct file *file, void *fh,
-					const struct v4l2_audioout *a);
-	int (*vidioc_g_modulator)      (struct file *file, void *fh,
-					struct v4l2_modulator *a);
-	int (*vidioc_s_modulator)      (struct file *file, void *fh,
-					const struct v4l2_modulator *a);
+	int (*vidioc_enumaudout)(struct file *file, void *fh,
+				 struct v4l2_audioout *a);
+	int (*vidioc_g_audout)(struct file *file, void *fh,
+			       struct v4l2_audioout *a);
+	int (*vidioc_s_audout)(struct file *file, void *fh,
+			       const struct v4l2_audioout *a);
+	int (*vidioc_g_modulator)(struct file *file, void *fh,
+				  struct v4l2_modulator *a);
+	int (*vidioc_s_modulator)(struct file *file, void *fh,
+				  const struct v4l2_modulator *a);
 	/* Crop ioctls */
-	int (*vidioc_cropcap)          (struct file *file, void *fh,
-					struct v4l2_cropcap *a);
-	int (*vidioc_g_crop)           (struct file *file, void *fh,
-					struct v4l2_crop *a);
-	int (*vidioc_s_crop)           (struct file *file, void *fh,
-					const struct v4l2_crop *a);
-	int (*vidioc_g_selection)      (struct file *file, void *fh,
-					struct v4l2_selection *s);
-	int (*vidioc_s_selection)      (struct file *file, void *fh,
-					struct v4l2_selection *s);
+	int (*vidioc_cropcap)(struct file *file, void *fh,
+			      struct v4l2_cropcap *a);
+	int (*vidioc_g_crop)(struct file *file, void *fh,
+			     struct v4l2_crop *a);
+	int (*vidioc_s_crop)(struct file *file, void *fh,
+			     const struct v4l2_crop *a);
+	int (*vidioc_g_selection)(struct file *file, void *fh,
+				  struct v4l2_selection *s);
+	int (*vidioc_s_selection)(struct file *file, void *fh,
+				  struct v4l2_selection *s);
 	/* Compression ioctls */
-	int (*vidioc_g_jpegcomp)       (struct file *file, void *fh,
-					struct v4l2_jpegcompression *a);
-	int (*vidioc_s_jpegcomp)       (struct file *file, void *fh,
-					const struct v4l2_jpegcompression *a);
-	int (*vidioc_g_enc_index)      (struct file *file, void *fh,
-					struct v4l2_enc_idx *a);
-	int (*vidioc_encoder_cmd)      (struct file *file, void *fh,
-					struct v4l2_encoder_cmd *a);
-	int (*vidioc_try_encoder_cmd)  (struct file *file, void *fh,
-					struct v4l2_encoder_cmd *a);
-	int (*vidioc_decoder_cmd)      (struct file *file, void *fh,
-					struct v4l2_decoder_cmd *a);
-	int (*vidioc_try_decoder_cmd)  (struct file *file, void *fh,
-					struct v4l2_decoder_cmd *a);
+	int (*vidioc_g_jpegcomp)(struct file *file, void *fh,
+				 struct v4l2_jpegcompression *a);
+	int (*vidioc_s_jpegcomp)(struct file *file, void *fh,
+				 const struct v4l2_jpegcompression *a);
+	int (*vidioc_g_enc_index)(struct file *file, void *fh,
+				  struct v4l2_enc_idx *a);
+	int (*vidioc_encoder_cmd)(struct file *file, void *fh,
+				  struct v4l2_encoder_cmd *a);
+	int (*vidioc_try_encoder_cmd)(struct file *file, void *fh,
+				      struct v4l2_encoder_cmd *a);
+	int (*vidioc_decoder_cmd)(struct file *file, void *fh,
+				  struct v4l2_decoder_cmd *a);
+	int (*vidioc_try_decoder_cmd)(struct file *file, void *fh,
+				      struct v4l2_decoder_cmd *a);
 
 	/* Stream type-dependent parameter ioctls */
-	int (*vidioc_g_parm)           (struct file *file, void *fh,
-					struct v4l2_streamparm *a);
-	int (*vidioc_s_parm)           (struct file *file, void *fh,
-					struct v4l2_streamparm *a);
+	int (*vidioc_g_parm)(struct file *file, void *fh,
+			     struct v4l2_streamparm *a);
+	int (*vidioc_s_parm)(struct file *file, void *fh,
+			     struct v4l2_streamparm *a);
 
 	/* Tuner ioctls */
-	int (*vidioc_g_tuner)          (struct file *file, void *fh,
-					struct v4l2_tuner *a);
-	int (*vidioc_s_tuner)          (struct file *file, void *fh,
-					const struct v4l2_tuner *a);
-	int (*vidioc_g_frequency)      (struct file *file, void *fh,
-					struct v4l2_frequency *a);
-	int (*vidioc_s_frequency)      (struct file *file, void *fh,
-					const struct v4l2_frequency *a);
-	int (*vidioc_enum_freq_bands) (struct file *file, void *fh,
-				    struct v4l2_frequency_band *band);
+	int (*vidioc_g_tuner)(struct file *file, void *fh,
+			      struct v4l2_tuner *a);
+	int (*vidioc_s_tuner)(struct file *file, void *fh,
+			      const struct v4l2_tuner *a);
+	int (*vidioc_g_frequency)(struct file *file, void *fh,
+				  struct v4l2_frequency *a);
+	int (*vidioc_s_frequency)(struct file *file, void *fh,
+				  const struct v4l2_frequency *a);
+	int (*vidioc_enum_freq_bands)(struct file *file, void *fh,
+				      struct v4l2_frequency_band *band);
 
 	/* Sliced VBI cap */
-	int (*vidioc_g_sliced_vbi_cap) (struct file *file, void *fh,
-					struct v4l2_sliced_vbi_cap *a);
+	int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh,
+				       struct v4l2_sliced_vbi_cap *a);
 
 	/* Log status ioctl */
-	int (*vidioc_log_status)       (struct file *file, void *fh);
+	int (*vidioc_log_status)(struct file *file, void *fh);
 
-	int (*vidioc_s_hw_freq_seek)   (struct file *file, void *fh,
-					const struct v4l2_hw_freq_seek *a);
+	int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh,
+				     const struct v4l2_hw_freq_seek *a);
 
 	/* Debugging ioctls */
 #ifdef CONFIG_VIDEO_ADV_DEBUG
-	int (*vidioc_g_register)       (struct file *file, void *fh,
-					struct v4l2_dbg_register *reg);
-	int (*vidioc_s_register)       (struct file *file, void *fh,
-					const struct v4l2_dbg_register *reg);
+	int (*vidioc_g_register)(struct file *file, void *fh,
+				 struct v4l2_dbg_register *reg);
+	int (*vidioc_s_register)(struct file *file, void *fh,
+				 const struct v4l2_dbg_register *reg);
 
-	int (*vidioc_g_chip_info)      (struct file *file, void *fh,
-					struct v4l2_dbg_chip_info *chip);
+	int (*vidioc_g_chip_info)(struct file *file, void *fh,
+				  struct v4l2_dbg_chip_info *chip);
 #endif
 
-	int (*vidioc_enum_framesizes)   (struct file *file, void *fh,
-					 struct v4l2_frmsizeenum *fsize);
+	int (*vidioc_enum_framesizes)(struct file *file, void *fh,
+				      struct v4l2_frmsizeenum *fsize);
 
-	int (*vidioc_enum_frameintervals) (struct file *file, void *fh,
-					   struct v4l2_frmivalenum *fival);
+	int (*vidioc_enum_frameintervals)(struct file *file, void *fh,
+					  struct v4l2_frmivalenum *fival);
 
 	/* DV Timings IOCTLs */
-	int (*vidioc_s_dv_timings) (struct file *file, void *fh,
-				    struct v4l2_dv_timings *timings);
-	int (*vidioc_g_dv_timings) (struct file *file, void *fh,
-				    struct v4l2_dv_timings *timings);
-	int (*vidioc_query_dv_timings) (struct file *file, void *fh,
-				    struct v4l2_dv_timings *timings);
-	int (*vidioc_enum_dv_timings) (struct file *file, void *fh,
-				    struct v4l2_enum_dv_timings *timings);
-	int (*vidioc_dv_timings_cap) (struct file *file, void *fh,
-				    struct v4l2_dv_timings_cap *cap);
-	int (*vidioc_g_edid) (struct file *file, void *fh, struct v4l2_edid *edid);
-	int (*vidioc_s_edid) (struct file *file, void *fh, struct v4l2_edid *edid);
+	int (*vidioc_s_dv_timings)(struct file *file, void *fh,
+				   struct v4l2_dv_timings *timings);
+	int (*vidioc_g_dv_timings)(struct file *file, void *fh,
+				   struct v4l2_dv_timings *timings);
+	int (*vidioc_query_dv_timings)(struct file *file, void *fh,
+				       struct v4l2_dv_timings *timings);
+	int (*vidioc_enum_dv_timings)(struct file *file, void *fh,
+				      struct v4l2_enum_dv_timings *timings);
+	int (*vidioc_dv_timings_cap)(struct file *file, void *fh,
+				     struct v4l2_dv_timings_cap *cap);
+	int (*vidioc_g_edid)(struct file *file, void *fh,
+			     struct v4l2_edid *edid);
+	int (*vidioc_s_edid)(struct file *file, void *fh,
+			     struct v4l2_edid *edid);
 
-	int (*vidioc_subscribe_event)  (struct v4l2_fh *fh,
-					const struct v4l2_event_subscription *sub);
+	int (*vidioc_subscribe_event)(struct v4l2_fh *fh,
+				      const struct v4l2_event_subscription *sub);
 	int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh,
 					const struct v4l2_event_subscription *sub);
 
 	/* For other private ioctls */
-	long (*vidioc_default)	       (struct file *file, void *fh,
-					bool valid_prio, unsigned int cmd, void *arg);
+	long (*vidioc_default)(struct file *file, void *fh,
+			       bool valid_prio, unsigned int cmd, void *arg);
 };
 
 
@@ -573,38 +586,123 @@
 #define V4L2_DEV_DEBUG_POLL		0x10
 
 /*  Video standard functions  */
-extern const char *v4l2_norm_to_name(v4l2_std_id id);
-extern void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
-extern int v4l2_video_std_construct(struct v4l2_standard *vs,
-				    int id, const char *name);
-/* Prints the ioctl in a human-readable format. If prefix != NULL,
-   then do printk(KERN_DEBUG "%s: ", prefix) first. */
-extern void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
 
-/* Internal use only: get the mutex (if any) that we need to lock for the
-   given command. */
+/**
+ * v4l2_norm_to_name - Ancillary routine to analog TV standard name from its ID.
+ *
+ * @id:	analog TV standard ID.
+ *
+ * Return: returns a string with the name of the analog TV standard.
+ * If the standard is not found or if @id points to multiple standard,
+ * it returns "Unknown".
+ */
+const char *v4l2_norm_to_name(v4l2_std_id id);
+
+/**
+ * v4l2_video_std_frame_period - Ancillary routine that fills a
+ *	struct &v4l2_fract pointer with the default framerate fraction.
+ *
+ * @id: analog TV sdandard ID.
+ * @frameperiod: struct &v4l2_fract pointer to be filled
+ *
+ */
+void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
+
+/**
+ * v4l2_video_std_construct - Ancillary routine that fills in the fields of
+ *	a &v4l2_standard structure according to the @id parameter.
+ *
+ * @vs: struct &v4l2_standard pointer to be filled
+ * @id: analog TV sdandard ID.
+ * @name: name of the standard to be used
+ *
+ * .. note::
+ *
+ *    This ancillary routine is obsolete. Shouldn't be used on newer drivers.
+ */
+int v4l2_video_std_construct(struct v4l2_standard *vs,
+				    int id, const char *name);
+
+/**
+ * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
+ *	human-readable format.
+ *
+ * @prefix: prefix to be added at the ioctl prints.
+ * @cmd: ioctl name
+ *
+ * .. note::
+ *
+ *    If prefix != %NULL, then it will issue a
+ *    ``printk(KERN_DEBUG "%s: ", prefix)`` first.
+ */
+void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
+
 struct video_device;
-extern struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd);
+
+
+/**
+ * v4l2_ioctl_get_lock - get the mutex (if any) that it is need to lock for
+ *	a given command.
+ *
+ * @vdev: Pointer to struct &video_device.
+ * @cmd: Ioctl name.
+ *
+ * .. note:: Internal use only. Should not be used outside V4L2 core.
+ */
+struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned int cmd);
 
 /* names for fancy debug output */
 extern const char *v4l2_field_names[];
 extern const char *v4l2_type_names[];
 
 #ifdef CONFIG_COMPAT
-/* 32 Bits compatibility layer for 64 bits processors */
-extern long v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
-				unsigned long arg);
+/**
+ * v4l2_compat_ioctl32 -32 Bits compatibility layer for 64 bits processors
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ */
+long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
+			     unsigned long arg);
 #endif
 
-typedef long (*v4l2_kioctl)(struct file *file,
-		unsigned int cmd, void *arg);
+/**
+ * typedef v4l2_kioctl - Typedef used to pass an ioctl handler.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ */
+typedef long (*v4l2_kioctl)(struct file *file, unsigned int cmd, void *arg);
 
-/* Include support for obsoleted stuff */
-extern long video_usercopy(struct file *file, unsigned int cmd,
-				unsigned long arg, v4l2_kioctl func);
+/**
+ * video_usercopy - copies data from/to userspace memory when an ioctl is
+ *	issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will handle the ioctl
+ *
+ * .. note::
+ *
+ *    This routine should be used only inside the V4L2 core.
+ */
+long int video_usercopy(struct file *file, unsigned int cmd,
+			unsigned long int arg, v4l2_kioctl func);
 
-/* Standard handlers for V4L ioctl's */
-extern long video_ioctl2(struct file *file,
-			unsigned int cmd, unsigned long arg);
+/**
+ * video_ioctl2 - Handles a V4L2 ioctl.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Method used to hancle an ioctl. Should be used to fill the
+ * &v4l2_ioctl_ops.unlocked_ioctl on all V4L2 drivers.
+ */
+long int video_ioctl2(struct file *file,
+		      unsigned int cmd, unsigned long int arg);
 
 #endif /* _V4L2_IOCTL_H */
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 28c3f9d..2634d9d 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -53,7 +53,7 @@
 };
 
 /**
- * enum if_vid_dec_index - video IF-PLL pad index for
+ * enum if_vid_dec_pad_index - video IF-PLL pad index for
  *			   MEDIA_ENT_F_IF_VID_DECODER
  *
  * @IF_VID_DEC_PAD_IF_INPUT:	video Intermediate Frequency (IF) sink pad
@@ -68,7 +68,7 @@
 };
 
 /**
- * enum if_aud_dec_index - audio/sound IF-PLL pad index for
+ * enum if_aud_dec_pad_index - audio/sound IF-PLL pad index for
  *			   MEDIA_ENT_F_IF_AUD_DECODER
  *
  * @IF_AUD_DEC_PAD_IF_INPUT:	audio Intermediate Frequency (IF) sink pad
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 5a9597d..1b35534 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -41,9 +41,9 @@
  *		This function does not have to (and will usually not) wait
  *		until the device enters a state when it can be stopped.
  * @lock:	optional. Define a driver's own lock callback, instead of using
- *		m2m_ctx->q_lock.
+ *		&v4l2_m2m_ctx->q_lock.
  * @unlock:	optional. Define a driver's own unlock callback, instead of
- *		using m2m_ctx->q_lock.
+ *		using &v4l2_m2m_ctx->q_lock.
  */
 struct v4l2_m2m_ops {
 	void (*device_run)(void *priv);
@@ -55,29 +55,51 @@
 
 struct v4l2_m2m_dev;
 
+/**
+ * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
+ *	processed
+ *
+ * @q:		pointer to struct &vb2_queue
+ * @rdy_queue:	List of V4L2 mem-to-mem queues
+ * @rdy_spinlock: spin lock to protect the struct usage
+ * @num_rdy:	number of buffers ready to be processed
+ * @buffered:	is the queue buffered?
+ *
+ * Queue for buffers ready to be processed as soon as this
+ * instance receives access to the device.
+ */
+
 struct v4l2_m2m_queue_ctx {
-/* private: internal use only */
 	struct vb2_queue	q;
 
-	/* Queue for buffers ready to be processed as soon as this
-	 * instance receives access to the device */
 	struct list_head	rdy_queue;
 	spinlock_t		rdy_spinlock;
 	u8			num_rdy;
 	bool			buffered;
 };
 
+/**
+ * struct v4l2_m2m_ctx - Memory to memory context structure
+ *
+ * @q_lock: struct &mutex lock
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @cap_q_ctx: Capture (output to memory) queue context
+ * @out_q_ctx: Output (input from memory) queue context
+ * @queue: List of memory to memory contexts
+ * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
+ *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
+ * @finished: Wait queue used to signalize when a job queue finished.
+ * @priv: Instance private data
+ */
 struct v4l2_m2m_ctx {
 	/* optional cap/out vb2 queues lock */
 	struct mutex			*q_lock;
 
-/* private: internal use only */
+	/* internal use only */
 	struct v4l2_m2m_dev		*m2m_dev;
 
-	/* Capture (output to memory) queue context */
 	struct v4l2_m2m_queue_ctx	cap_q_ctx;
 
-	/* Output (input from memory) queue context */
 	struct v4l2_m2m_queue_ctx	out_q_ctx;
 
 	/* For device job queue */
@@ -85,22 +107,75 @@
 	unsigned long			job_flags;
 	wait_queue_head_t		finished;
 
-	/* Instance private data */
 	void				*priv;
 };
 
+/**
+ * struct v4l2_m2m_buffer - Memory to memory buffer
+ *
+ * @vb: pointer to struct &vb2_v4l2_buffer
+ * @list: list of m2m buffers
+ */
 struct v4l2_m2m_buffer {
 	struct vb2_v4l2_buffer	vb;
 	struct list_head	list;
 };
 
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ */
 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
 
+/**
+ * v4l2_m2m_get_vq() - return vb2_queue for the given type
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 				       enum v4l2_buf_type type);
 
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
 
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after &v4l2_m2m_ops->device_run
+ * callback has been called on the driver. To prevent recursion, it should
+ * not be called directly from the &v4l2_m2m_ops->device_run callback though.
+ */
 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 			 struct v4l2_m2m_ctx *m2m_ctx);
 
@@ -110,38 +185,165 @@
 	vb2_buffer_done(&buf->vb2_buf, state);
 }
 
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @reqbufs: pointer to struct &v4l2_requestbuffers
+ */
 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		     struct v4l2_requestbuffers *reqbufs);
 
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		      struct v4l2_buffer *buf);
 
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		  struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		   struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			 struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @create: pointer to struct &v4l2_create_buffers
+ */
 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			 struct v4l2_create_buffers *create);
 
+/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @eb: pointer to struct &v4l2_exportbuffer
+ */
 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		   struct v4l2_exportbuffer *eb);
 
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		      enum v4l2_buf_type type);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		       enum v4l2_buf_type type);
 
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @wait: pointer to struct &poll_table_struct
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 			   struct poll_table_struct *wait);
 
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vma: pointer to struct &vm_area_struct
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		  struct vm_area_struct *vma);
 
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * @m2m_ops: pointer to struct v4l2_m2m_ops
+ *
+ * Usually called from driver's ``probe()`` function.
+ *
+ * Return: returns an opaque pointer to the internal data to handle M2M context
+ */
 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Usually called from driver's ``remove()`` function.
+ */
 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
 
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @drv_priv: driver's instance private data
+ * @queue_init: a callback for queue type-specific initialization function
+ *	to be used for initializing videobuf_queues
+ *
+ * Usually called from driver's ``open()`` function.
+ */
 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 		void *drv_priv,
 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
@@ -158,8 +360,23 @@
 	m2m_ctx->cap_q_ctx.buffered = buffered;
 }
 
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * Usually called from driver's release() function.
+ */
 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &vb2_v4l2_buffer
+ *
+ * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
+ */
 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 			struct vb2_v4l2_buffer *vbuf);
 
@@ -167,7 +384,7 @@
  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
  * use
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline
 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -176,10 +393,10 @@
 }
 
 /**
- * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
+ * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
  * ready for use
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline
 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -187,13 +404,18 @@
 	return m2m_ctx->cap_q_ctx.num_rdy;
 }
 
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
 
 /**
  * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
  * buffers
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -204,7 +426,7 @@
  * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
  * ready buffers
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -214,7 +436,7 @@
 /**
  * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline
 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -225,7 +447,7 @@
 /**
  * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline
 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -233,13 +455,19 @@
 	return &m2m_ctx->cap_q_ctx.q;
 }
 
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
 
 /**
  * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
  * buffers and return it
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 {
@@ -250,7 +478,7 @@
  * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
  * ready buffers and return it
  *
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
  */
 static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 {
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 2a2240c..cf778c5 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -184,8 +184,6 @@
  *		     for it to be warned when the value of a control changes.
  *
  * @unsubscribe_event: remove event subscription from the control framework.
- *
- * @registered_async: the subdevice has been registered async.
  */
 struct v4l2_subdev_core_ops {
 	int (*log_status)(struct v4l2_subdev *sd);
@@ -211,11 +209,11 @@
 			       struct v4l2_event_subscription *sub);
 	int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh,
 				 struct v4l2_event_subscription *sub);
-	int (*registered_async)(struct v4l2_subdev *sd);
 };
 
 /**
- * struct s_radio - Callbacks used when v4l device was opened in radio mode.
+ * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened
+ *	in radio mode.
  *
  * @s_radio: callback for %VIDIOC_S_RADIO ioctl handler code.
  *
@@ -229,7 +227,7 @@
  *
  * @g_tuner: callback for %VIDIOC_G_TUNER ioctl handler code.
  *
- * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. &vt->type must be
+ * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. @vt->type must be
  *	     filled in. Normally done by video_ioctl2 or the
  *	     bridge driver.
  *
@@ -358,11 +356,7 @@
  * @s_stream: used to notify the driver that a video stream will start or has
  *	stopped.
  *
- * @cropcap: callback for %VIDIOC_CROPCAP ioctl handler code.
- *
- * @g_crop: callback for %VIDIOC_G_CROP ioctl handler code.
- *
- * @s_crop: callback for %VIDIOC_S_CROP ioctl handler code.
+ * @g_pixelaspect: callback to return the pixelaspect ratio.
  *
  * @g_parm: callback for %VIDIOC_G_PARM ioctl handler code.
  *
@@ -402,9 +396,7 @@
 	int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std);
 	int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
 	int (*s_stream)(struct v4l2_subdev *sd, int enable);
-	int (*cropcap)(struct v4l2_subdev *sd, struct v4l2_cropcap *cc);
-	int (*g_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
-	int (*s_crop)(struct v4l2_subdev *sd, const struct v4l2_crop *crop);
+	int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
 	int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
 	int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
 	int (*g_frame_interval)(struct v4l2_subdev *sd,
@@ -430,7 +422,7 @@
  *				  in video mode via the vbi device node.
  *
  *  @decode_vbi_line: video decoders that support sliced VBI need to implement
- *	this ioctl. Field p of the &struct v4l2_sliced_vbi_line is set to the
+ *	this ioctl. Field p of the &struct v4l2_decode_vbi_line is set to the
  *	start of the VBI data that was generated by the decoder. The driver
  *	then parses the sliced VBI data and sets the other fields in the
  *	struct accordingly. The pointer p is updated to point to the start of
@@ -773,7 +765,7 @@
  * @entity: pointer to &struct media_entity
  * @list: List of sub-devices
  * @owner: The owner is the same as the driver's &struct device owner.
- * @owner_v4l2_dev: true if the &sd->owner matches the owner of &v4l2_dev->dev
+ * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev
  *	ownner. Initialized by v4l2_device_register_subdev().
  * @flags: subdev flags. Can be:
  *   %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device;
@@ -783,9 +775,9 @@
  *   %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if this subdev generates
  *   events.
  *
- * @v4l2_dev: pointer to &struct v4l2_device
- * @ops: pointer to &struct v4l2_subdev_ops
- * @internal_ops: pointer to &struct v4l2_subdev_internal_ops.
+ * @v4l2_dev: pointer to struct &v4l2_device
+ * @ops: pointer to struct &v4l2_subdev_ops
+ * @internal_ops: pointer to struct &v4l2_subdev_internal_ops.
  *	Never call these internal ops from within a driver!
  * @ctrl_handler: The control handler of this subdev. May be NULL.
  * @name: Name of the sub-device. Please notice that the name must be unique.
@@ -896,7 +888,7 @@
 }
 
 /**
- * v4l2_set_subdevdata - Sets V4L2 dev private host data
+ * v4l2_set_subdev_hostdata - Sets V4L2 dev private host data
  *
  * @sd: pointer to &struct v4l2_subdev
  * @p: pointer to the private data to be stored.
@@ -907,7 +899,7 @@
 }
 
 /**
- * v4l2_get_subdevdata - Gets V4L2 dev private data
+ * v4l2_get_subdev_hostdata - Gets V4L2 dev private data
  *
  * @sd: pointer to &struct v4l2_subdev
  *
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index a4a9a55..ac5898a 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -20,6 +20,20 @@
 #define VB2_MAX_FRAME	(32)
 #define VB2_MAX_PLANES	(8)
 
+/**
+ * enum vb2_memory - type of memory model used to make the buffers visible
+ *	on userspace.
+ *
+ * @VB2_MEMORY_UNKNOWN:	Buffer status is unknown or it is not used yet on
+ *			userspace.
+ * @VB2_MEMORY_MMAP:	The buffers are allocated by the Kernel and it is
+ *			memory mapped via mmap() ioctl. This model is
+ *			also used when the user is using the buffers via
+ *			read() or write() system calls.
+ * @VB2_MEMORY_USERPTR:	The buffers was allocated in userspace and it is
+ *			memory mapped via mmap() ioctl.
+ * @VB2_MEMORY_DMABUF:	The buffers are passed to userspace via DMA buffer.
+ */
 enum vb2_memory {
 	VB2_MEMORY_UNKNOWN	= 0,
 	VB2_MEMORY_MMAP		= 1,
@@ -33,15 +47,15 @@
 /**
  * struct vb2_mem_ops - memory handling/memory allocator operations
  * @alloc:	allocate video memory and, optionally, allocator private data,
- *		return NULL on failure or a pointer to allocator private,
+ *		return ERR_PTR() on failure or a pointer to allocator private,
  *		per-buffer data on success; the returned private structure
- *		will then be passed as buf_priv argument to other ops in this
+ *		will then be passed as @buf_priv argument to other ops in this
  *		structure. Additional gfp_flags to use when allocating the
  *		are also passed to this operation. These flags are from the
  *		gfp_flags field of vb2_queue.
  * @put:	inform the allocator that the buffer will no longer be used;
  *		usually will result in the allocator freeing the buffer (if
- *		no other users of this buffer are present); the buf_priv
+ *		no other users of this buffer are present); the @buf_priv
  *		argument is the allocator private per-buffer structure
  *		previously returned from the alloc callback.
  * @get_dmabuf: acquire userspace memory for a hardware operation; used for
@@ -50,18 +64,18 @@
  *		 USERPTR memory types; vaddr is the address passed to the
  *		 videobuf layer when queuing a video buffer of USERPTR type;
  *		 should return an allocator private per-buffer structure
- *		 associated with the buffer on success, NULL on failure;
- *		 the returned private structure will then be passed as buf_priv
+ *		 associated with the buffer on success, ERR_PTR() on failure;
+ *		 the returned private structure will then be passed as @buf_priv
  *		 argument to other ops in this structure.
  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
  *		 be used.
  * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation;
  *		   used for DMABUF memory types; dev is the alloc device
- *		   dbuf is the shared dma_buf; returns NULL on failure;
+ *		   dbuf is the shared dma_buf; returns ERR_PTR() on failure;
  *		   allocator private per-buffer structure on success;
  *		   this needs to be used for further accesses to the buffer.
  * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF
- *		   buffer is no longer used; the buf_priv argument is the
+ *		   buffer is no longer used; the @buf_priv argument is the
  *		   allocator private per-buffer structure previously returned
  *		   from the attach_dmabuf callback.
  * @map_dmabuf: request for access to the dmabuf from allocator; the allocator
@@ -95,11 +109,13 @@
  *
  *    #) Required ops for read/write access types: alloc, put, num_users, vaddr.
  *
- *    #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf.
+ *    #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf,
+ *       map_dmabuf, unmap_dmabuf.
  */
 struct vb2_mem_ops {
 	void		*(*alloc)(struct device *dev, unsigned long attrs,
-				  unsigned long size, enum dma_data_direction dma_dir,
+				  unsigned long size,
+				  enum dma_data_direction dma_dir,
 				  gfp_t gfp_flags);
 	void		(*put)(void *buf_priv);
 	struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
@@ -112,7 +128,8 @@
 	void		(*prepare)(void *buf_priv);
 	void		(*finish)(void *buf_priv);
 
-	void		*(*attach_dmabuf)(struct device *dev, struct dma_buf *dbuf,
+	void		*(*attach_dmabuf)(struct device *dev,
+					  struct dma_buf *dbuf,
 					  unsigned long size,
 					  enum dma_data_direction dma_dir);
 	void		(*detach_dmabuf)(void *buf_priv);
@@ -277,7 +294,7 @@
 /**
  * struct vb2_ops - driver-specific callbacks
  *
- * @queue_setup:	called from %VIDIOC_REQBUFS and %VIDIOC_CREATE_BUFS
+ * @queue_setup:	called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS()
  *			handlers before memory allocation. It can be called
  *			twice: if the original number of requested buffers
  *			could not be allocated, then it will be called a
@@ -288,11 +305,11 @@
  *			buffer in \*num_planes, the size of each plane should be
  *			set in the sizes\[\] array and optional per-plane
  *			allocator specific device in the alloc_devs\[\] array.
- *			When called from %VIDIOC_REQBUFS, \*num_planes == 0, the
- *			driver has to use the currently configured format to
+ *			When called from VIDIOC_REQBUFS,() \*num_planes == 0,
+ *			the driver has to use the currently configured format to
  *			determine the plane sizes and \*num_buffers is the total
  *			number of buffers that are being allocated. When called
- *			from %VIDIOC_CREATE_BUFS, \*num_planes != 0 and it
+ *			from VIDIOC_CREATE_BUFS,() \*num_planes != 0 and it
  *			describes the requested number of planes and sizes\[\]
  *			contains the requested plane sizes. If either
  *			\*num_planes or the requested sizes are invalid callback
@@ -311,11 +328,11 @@
  *			initialization failure (return != 0) will prevent
  *			queue setup from completing successfully; optional.
  * @buf_prepare:	called every time the buffer is queued from userspace
- *			and from the %VIDIOC_PREPARE_BUF ioctl; drivers may
+ *			and from the VIDIOC_PREPARE_BUF() ioctl; drivers may
  *			perform any initialization required before each
  *			hardware operation in this callback; drivers can
  *			access/modify the buffer here as it is still synced for
- *			the CPU; drivers that support %VIDIOC_CREATE_BUFS must
+ *			the CPU; drivers that support VIDIOC_CREATE_BUFS() must
  *			also validate the buffer size; if an error is returned,
  *			the buffer will not be queued in driver; optional.
  * @buf_finish:		called before every dequeue of the buffer back to
@@ -339,24 +356,25 @@
  *			driver can return an error if hardware fails, in that
  *			case all buffers that have been already given by
  *			the @buf_queue callback are to be returned by the driver
- *			by calling @vb2_buffer_done\(%VB2_BUF_STATE_QUEUED\).
+ *			by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
  *			If you need a minimum number of buffers before you can
  *			start streaming, then set @min_buffers_needed in the
  *			vb2_queue structure. If that is non-zero then
- *			start_streaming won't be called until at least that
+ *			@start_streaming won't be called until at least that
  *			many buffers have been queued up by userspace.
  * @stop_streaming:	called when 'streaming' state must be disabled; driver
  *			should stop any DMA transactions or wait until they
  *			finish and give back all buffers it got from &buf_queue
- *			callback by calling @vb2_buffer_done\(\) with either
+ *			callback by calling vb2_buffer_done() with either
  *			%VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use
  *			vb2_wait_for_all_buffers() function
  * @buf_queue:		passes buffer vb to the driver; driver may start
  *			hardware operation on this buffer; driver should give
  *			the buffer back by calling vb2_buffer_done() function;
- *			it is allways called after calling %VIDIOC_STREAMON ioctl;
- *			might be called before start_streaming callback if user
- *			pre-queued buffers before calling %VIDIOC_STREAMON.
+ *			it is allways called after calling VIDIOC_STREAMON()
+ *			ioctl; might be called before @start_streaming callback
+ *			if user pre-queued buffers before calling
+ *			VIDIOC_STREAMON().
  */
 struct vb2_ops {
 	int (*queue_setup)(struct vb2_queue *q,
@@ -378,7 +396,7 @@
 };
 
 /**
- * struct vb2_ops - driver-specific callbacks
+ * struct vb2_buf_ops - driver-specific callbacks
  *
  * @verify_planes_array: Verify that a given user space structure contains
  *			enough planes for the buffer. This is called
@@ -404,7 +422,7 @@
  *
  * @type:	private buffer type whose content is defined by the vb2-core
  *		caller. For example, for V4L2, it should match
- *		the V4L2_BUF_TYPE_* in include/uapi/linux/videodev2.h
+ *		the types defined on enum &v4l2_buf_type
  * @io_modes:	supported io methods (see vb2_io_modes enum)
  * @dev:	device to use for the default allocation context if the driver
  *		doesn't fill in the @alloc_devs array.
@@ -439,12 +457,12 @@
  *		Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
  *		to force the buffer allocation to a specific memory zone.
  * @min_buffers_needed: the minimum number of buffers needed before
- *		start_streaming() can be called. Used when a DMA engine
+ *		@start_streaming can be called. Used when a DMA engine
  *		cannot be started unless at least this number of buffers
  *		have been queued into the driver.
  */
 /*
- * Private elements (won't appear at the DocBook):
+ * Private elements (won't appear at the uAPI book):
  * @mmap_lock:	private mutex used when buffers are allocated/freed/mmapped
  * @memory:	current memory type used
  * @bufs:	videobuf buffer structures
@@ -457,7 +475,7 @@
  * @done_wq:	waitqueue for processes waiting for buffers ready to be dequeued
  * @alloc_devs:	memory type/allocator-specific per-plane device
  * @streaming:	current streaming state
- * @start_streaming_called: start_streaming() was called successfully and we
+ * @start_streaming_called: @start_streaming was called successfully and we
  *		started streaming.
  * @error:	a fatal error occurred on the queue
  * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
@@ -536,36 +554,286 @@
 #endif
 };
 
+/**
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
+ * @vb:		vb2_buffer to which the plane in question belongs to
+ * @plane_no:	plane number for which the address is to be returned
+ *
+ * This function returns a kernel virtual address of a given plane if
+ * such a mapping exist, NULL otherwise.
+ */
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
+
+/**
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane
+ * @vb:		vb2_buffer to which the plane in question belongs to
+ * @plane_no:	plane number for which the cookie is to be returned
+ *
+ * This function returns an allocator specific cookie for a given plane if
+ * available, NULL otherwise. The allocator should provide some simple static
+ * inline function, which would convert this cookie to the allocator specific
+ * type that can be used directly by the driver to access the buffer. This can
+ * be for example physical address, pointer to scatter list or IOMMU mapping.
+ */
 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
 
+/**
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
+ * @vb:		vb2_buffer returned from the driver
+ * @state:	either %VB2_BUF_STATE_DONE if the operation finished
+ *		successfully, %VB2_BUF_STATE_ERROR if the operation finished
+ *		with an error or %VB2_BUF_STATE_QUEUED if the driver wants to
+ *		requeue buffers. If start_streaming fails then it should return
+ *		buffers with state %VB2_BUF_STATE_QUEUED to put them back into
+ *		the queue.
+ *
+ * This function should be called by the driver after a hardware operation on
+ * a buffer is finished and the buffer may be returned to userspace. The driver
+ * cannot use this buffer anymore until it is queued back to it by videobuf
+ * by the means of &vb2_ops->buf_queue callback. Only buffers previously queued
+ * to the driver by &vb2_ops->buf_queue can be passed to this function.
+ *
+ * While streaming a buffer can only be returned in state DONE or ERROR.
+ * The start_streaming op can also return them in case the DMA engine cannot
+ * be started for some reason. In that case the buffers should be returned with
+ * state QUEUED.
+ */
 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
+
+/**
+ * vb2_discard_done() - discard all buffers marked as DONE
+ * @q:		videobuf2 queue
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
 void vb2_discard_done(struct vb2_queue *q);
+
+/**
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
+ * @q:		videobuf2 queue
+ *
+ * This function will wait until all buffers that have been given to the driver
+ * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It
+ * doesn't call wait_prepare()/wait_finish() pair. It is intended to be called
+ * with all locks taken, for example from &vb2_ops->stop_streaming callback.
+ */
 int vb2_wait_for_all_buffers(struct vb2_queue *q);
 
+/**
+ * vb2_core_querybuf() - query video buffer information
+ * @q:		videobuf queue
+ * @index:	id number of the buffer
+ * @pb:		buffer struct passed from userspace
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * The passed buffer should have been verified.
+ * This function fills the relevant information for the userspace.
+ */
 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_reqbufs() - Initiate streaming
+ * @q:		videobuf2 queue
+ * @memory: memory type
+ * @count: requested buffer count
+ *
+ * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies streaming parameters passed from the userspace,
+ * #) sets up the queue,
+ * #) negotiates number of buffers and planes per buffer with the driver
+ *    to be used during streaming,
+ * #) allocates internal buffer structures (struct vb2_buffer), according to
+ *    the agreed parameters,
+ * #) for MMAP memory type, allocates actual video memory, using the
+ *    memory handling/allocation routines provided during queue initialization
+ *
+ * If req->count is 0, all the memory will be freed instead.
+ * If the queue has been allocated previously (by a previous vb2_reqbufs) call
+ * and the queue is not busy, memory will be reallocated.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_reqbufs handler in driver.
+ */
 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
 		unsigned int *count);
+
+/**
+ * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
+ * @q:		videobuf2 queue
+ * @memory: memory type
+ * @count: requested buffer count
+ * @requested_planes: number of planes requested
+ * @requested_sizes: array with the size of the planes
+ *
+ * Should be called from VIDIOC_CREATE_BUFS() ioctl handler of a driver.
+ * This function:
+ *
+ * #) verifies parameter sanity
+ * #) calls the .queue_setup() queue operation
+ * #) performs any necessary memory allocations
+ *
+ * Return: the return values from this function are intended to be directly
+ * returned from VIDIOC_CREATE_BUFS() handler in driver.
+ */
 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
-		unsigned int *count, unsigned requested_planes,
-		const unsigned int requested_sizes[]);
+			 unsigned int *count, unsigned int requested_planes,
+			 const unsigned int requested_sizes[]);
+
+/**
+ * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
+ *			to the kernel
+ * @q:		videobuf2 queue
+ * @index:	id number of the buffer
+ * @pb:		buffer structure passed from userspace to vidioc_prepare_buf
+ *		handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ * This function calls buf_prepare callback in the driver (if provided),
+ * in which driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_qbuf() - Queue a buffer from userspace
+ *
+ * @q:		videobuf2 queue
+ * @index:	id number of the buffer
+ * @pb:		buffer structure passed from userspace to vidioc_qbuf handler
+ *		in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ *
+ * This function:
+ *
+ * #) if necessary, calls buf_prepare callback in the driver (if provided), in
+ *    which driver-specific buffer initialization can be performed,
+ * #) if streaming is on, queues the buffer in driver by the means of
+ *    &vb2_ops->buf_queue callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_dqbuf() - Dequeue a buffer to the userspace
+ * @q:		videobuf2 queue
+ * @pindex:	pointer to the buffer index. May be NULL
+ * @pb:		buffer structure passed from userspace to vidioc_dqbuf handler
+ *		in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ *		 buffers ready for dequeuing are present. Normally the driver
+ *		 would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ *
+ * This function:
+ *
+ * #) calls buf_finish callback in the driver (if provided), in which
+ *    driver can perform any additional operations that may be required before
+ *    returning the buffer to userspace, such as cache sync,
+ * #) the buffer struct members are filled with relevant information for
+ *    the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
 		   bool nonblocking);
 
 int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
 
+/**
+ * vb2_core_expbuf() - Export a buffer as a file descriptor
+ * @q:		videobuf2 queue
+ * @fd:		file descriptor associated with DMABUF (set by driver) *
+ * @type:	buffer type
+ * @index:	id number of the buffer
+ * @plane:	index of the plane to be exported, 0 for single plane queues
+ * @flags:	flags for newly created file, currently only O_CLOEXEC is
+ *		supported, refer to manual of open syscall for more details
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
 		unsigned int index, unsigned int plane, unsigned int flags);
 
+/**
+ * vb2_core_queue_init() - initialize a videobuf2 queue
+ * @q:		videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
 int vb2_core_queue_init(struct vb2_queue *q);
+
+/**
+ * vb2_core_queue_release() - stop streaming, release the queue and free memory
+ * @q:		videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
 void vb2_core_queue_release(struct vb2_queue *q);
 
+/**
+ * vb2_queue_error() - signal a fatal error on the queue
+ * @q:		videobuf2 queue
+ *
+ * Flag that a fatal unrecoverable error has occurred and wake up all processes
+ * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
+ * buffers will return -EIO.
+ *
+ * The error flag will be cleared when cancelling the queue, either from
+ * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
+ * function before starting the stream, otherwise the error flag will remain set
+ * until the queue is released when closing the device node.
+ */
 void vb2_queue_error(struct vb2_queue *q);
 
+/**
+ * vb2_mmap() - map video buffers into application address space
+ * @q:		videobuf2 queue
+ * @vma:	vma passed to the mmap file operation handler in the driver
+ *
+ * Should be called from mmap file operation handler of a driver.
+ * This function maps one plane of one of the available video buffers to
+ * userspace. To map whole video memory allocated on reqbufs, this function
+ * has to be called once per each plane per each buffer previously allocated.
+ *
+ * When the userspace application calls mmap, it passes to it an offset returned
+ * to it earlier by the means of vidioc_querybuf handler. That offset acts as
+ * a "cookie", which is then used to identify the plane to be mapped.
+ * This function finds a plane with a matching offset and a mapping is performed
+ * by the means of a provided memory operation.
+ *
+ * The return values from this function are intended to be directly returned
+ * from the mmap handler in driver.
+ */
 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
+
 #ifndef CONFIG_MMU
 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
 				    unsigned long addr,
@@ -573,15 +841,36 @@
 				    unsigned long pgoff,
 				    unsigned long flags);
 #endif
+
+/**
+ * vb2_core_poll() - implements poll userspace operation
+ * @q:		videobuf2 queue
+ * @file:	file argument passed to the poll file operation handler
+ * @wait:	wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
 unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
-		poll_table *wait);
+			   poll_table *wait);
+
 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
 		loff_t *ppos, int nonblock);
 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
 		loff_t *ppos, int nonblock);
 
-/*
- * vb2_thread_fnc - callback function for use with vb2_thread
+/**
+ * typedef vb2_thread_fnc - callback function for use with vb2_thread
+ *
+ * @vb: pointer to struct &vb2_buffer
+ * @priv: pointer to a private pointer
  *
  * This is called whenever a buffer is dequeued in the thread.
  */
@@ -597,9 +886,11 @@
  * This starts a thread that will queue and dequeue until an error occurs
  * or @vb2_thread_stop is called.
  *
- * This function should not be used for anything else but the videobuf2-dvb
- * support. If you think you have another good use-case for this, then please
- * contact the linux-media mailinglist first.
+ * .. attention::
+ *
+ *   This function should not be used for anything else but the videobuf2-dvb
+ *   support. If you think you have another good use-case for this, then please
+ *   contact the linux-media mailing list first.
  */
 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
 		     const char *thread_name);
@@ -717,7 +1008,26 @@
  * The following functions are not part of the vb2 core API, but are useful
  * functions for videobuf2-*.
  */
+
+/**
+ * vb2_buffer_in_use() - return true if the buffer is in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ *
+ * @vb:		buffer for which plane size should be returned
+ * @q:		videobuf queue
+ */
 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
+
+/**
+ * vb2_verify_memory_type() - Check whether the memory type and buffer type
+ * passed to a buffer operation are compatible with the queue.
+ *
+ * @q:		videobuf queue
+ * @memory:	memory model, as defined by enum &vb2_memory.
+ * @type:	private buffer type whose content is defined by the vb2-core
+ *		caller. For example, for V4L2, it should match
+ *		the types defined on enum &v4l2_buf_type
+ */
 int vb2_verify_memory_type(struct vb2_queue *q,
 		enum vb2_memory memory, unsigned int type);
 #endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 3cc836f..036127c 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -25,11 +25,13 @@
 
 /**
  * struct vb2_v4l2_buffer - video buffer information for v4l2
+ *
  * @vb2_buf:	video buffer 2
  * @flags:	buffer informational flags
  * @field:	enum v4l2_field; field order of the image in the buffer
  * @timecode:	frame timecode
  * @sequence:	sequence count of this frame
+ *
  * Should contain enough information to be able to cover all the fields
  * of struct v4l2_buffer at videodev2.h
  */
@@ -49,22 +51,183 @@
 	container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
 
 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
+
+/**
+ * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
+ * the memory and type values.
+ *
+ * @q:		videobuf2 queue
+ * @req:	struct passed from userspace to vidioc_reqbufs handler
+ *		in driver
+ */
 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
 
+/**
+ * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
+ * the memory and type values.
+ *
+ * @q:		videobuf2 queue
+ * @create:	creation parameters, passed from userspace to vidioc_create_bufs
+ *		handler in driver
+ */
 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ *
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to vidioc_prepare_buf
+ *		handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) calls buf_prepare callback in the driver (if provided), in which
+ *    driver-specific buffer initialization can be performed.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
 
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to VIDIOC_QBUF() handler
+ *		in driver
+ *
+ * Should be called from VIDIOC_QBUF() ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) if necessary, calls buf_prepare callback in the driver (if provided), in
+ *    which driver-specific buffer initialization can be performed,
+ * #) if streaming is on, queues the buffer in driver by the means of buf_queue
+ *    callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_QBUF() handler in driver.
+ */
 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+
+/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q:		videobuf2 queue
+ * @eb:		export buffer structure passed from userspace to VIDIOC_EXPBUF()
+ *		handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_EXPBUF() handler in driver.
+ */
 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q:		videobuf2 queue
+ * @b:		buffer structure passed from userspace to VIDIOC_DQBUF() handler
+ *		in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ *		 buffers ready for dequeuing are present. Normally the driver
+ *		 would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from VIDIOC_DQBUF() ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) calls buf_finish callback in the driver (if provided), in which
+ *    driver can perform any additional operations that may be required before
+ *    returning the buffer to userspace, such as cache sync,
+ * #) the buffer struct members are filled with relevant information for
+ *    the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_DQBUF() handler in driver.
+ */
 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
 
+/**
+ * vb2_streamon - start streaming
+ * @q:		videobuf2 queue
+ * @type:	type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ *
+ * This function:
+ *
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q:		videobuf2 queue
+ * @type:	type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies current state,
+ * #) stop streaming and dequeues any queued buffers, including those previously
+ *    passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
 
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q:		videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
 int __must_check vb2_queue_init(struct vb2_queue *q);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q:		videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
 void vb2_queue_release(struct vb2_queue *q);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q:		videobuf2 queue
+ * @file:	file argument passed to the poll file operation handler
+ * @wait:	wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
 unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
-		poll_table *wait);
+		      poll_table *wait);
 
 /*
  * The following functions are not part of the vb2 core API, but are simple
@@ -105,9 +268,22 @@
 		unsigned long len, unsigned long pgoff, unsigned long flags);
 #endif
 
-/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */
-
+/**
+ * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
+ *
+ * @vq: pointer to struct vb2_queue
+ *
+ * ..note:: only use if vq->lock is non-NULL.
+ */
 void vb2_ops_wait_prepare(struct vb2_queue *vq);
+
+/**
+ * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue
+ *
+ * @vq: pointer to struct vb2_queue
+ *
+ * ..note:: only use if vq->lock is non-NULL.
+ */
 void vb2_ops_wait_finish(struct vb2_queue *vq);
 
 #endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 9322d977..458b400 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -26,7 +26,7 @@
 struct vsp1_du_atomic_config {
 	u32 pixelformat;
 	unsigned int pitch;
-	dma_addr_t mem[2];
+	dma_addr_t mem[3];
 	struct v4l2_rect src;
 	struct v4l2_rect dst;
 	unsigned int alpha;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 6360c25..f32f7ef 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -37,18 +37,6 @@
 #ifndef __long_aligned
 #define __long_aligned __attribute__((aligned((sizeof(long)))))
 #endif
-/*
- * Less bad way to call ioctl from within the kernel; this needs to be
- * done some other way to get the call out of interrupt context.
- * Needs "ioctl" variable to be supplied by calling context.
- */
-#define IOCTL(dev, arg, cmd) ({		\
-	int res = 0;			\
-	mm_segment_t fs = get_fs();	\
-	set_fs(get_ds());		\
-	res = ioctl(dev, arg, cmd);	\
-	set_fs(fs);			\
-	res; })
 
 #define BOND_MODE(bond) ((bond)->params.mode)
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fe78f02..bd19faa 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -796,9 +796,9 @@
  *	(or NULL for no change)
  * @supported_rates_len: number of supported rates
  * @sta_flags_mask: station flags that changed
- *	(bitmask of BIT(NL80211_STA_FLAG_...))
+ *	(bitmask of BIT(%NL80211_STA_FLAG_...))
  * @sta_flags_set: station flags values
- *	(bitmask of BIT(NL80211_STA_FLAG_...))
+ *	(bitmask of BIT(%NL80211_STA_FLAG_...))
  * @listen_interval: listen interval or -1 for no change
  * @aid: AID or zero for no change
  * @peer_aid: mesh peer AID or zero for no change
@@ -3088,47 +3088,54 @@
  *
  * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
  *
- *  struct ieee80211_iface_limit limits1[] = {
- *	{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- *	{ .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
- *  };
- *  struct ieee80211_iface_combination combination1 = {
- *	.limits = limits1,
- *	.n_limits = ARRAY_SIZE(limits1),
- *	.max_interfaces = 2,
- *	.beacon_int_infra_match = true,
- *  };
+ *    .. code-block:: c
+ *
+ *	struct ieee80211_iface_limit limits1[] = {
+ *		{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ *		{ .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ *	};
+ *	struct ieee80211_iface_combination combination1 = {
+ *		.limits = limits1,
+ *		.n_limits = ARRAY_SIZE(limits1),
+ *		.max_interfaces = 2,
+ *		.beacon_int_infra_match = true,
+ *	};
  *
  *
  * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
  *
- *  struct ieee80211_iface_limit limits2[] = {
- *	{ .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
- *			     BIT(NL80211_IFTYPE_P2P_GO), },
- *  };
- *  struct ieee80211_iface_combination combination2 = {
- *	.limits = limits2,
- *	.n_limits = ARRAY_SIZE(limits2),
- *	.max_interfaces = 8,
- *	.num_different_channels = 1,
- *  };
+ *    .. code-block:: c
+ *
+ *	struct ieee80211_iface_limit limits2[] = {
+ *		{ .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
+ *				     BIT(NL80211_IFTYPE_P2P_GO), },
+ *	};
+ *	struct ieee80211_iface_combination combination2 = {
+ *		.limits = limits2,
+ *		.n_limits = ARRAY_SIZE(limits2),
+ *		.max_interfaces = 8,
+ *		.num_different_channels = 1,
+ *	};
  *
  *
  * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
  *
- * This allows for an infrastructure connection and three P2P connections.
+ *    This allows for an infrastructure connection and three P2P connections.
  *
- *  struct ieee80211_iface_limit limits3[] = {
- *	{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- *	{ .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
- *			     BIT(NL80211_IFTYPE_P2P_CLIENT), },
- *  };
- *  struct ieee80211_iface_combination combination3 = {
- *	.limits = limits3,
- *	.n_limits = ARRAY_SIZE(limits3),
- *	.max_interfaces = 4,
- *	.num_different_channels = 2,
- *  };
+ *    .. code-block:: c
+ *
+ *	struct ieee80211_iface_limit limits3[] = {
+ *		{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ *		{ .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ *				     BIT(NL80211_IFTYPE_P2P_CLIENT), },
+ *	};
+ *	struct ieee80211_iface_combination combination3 = {
+ *		.limits = limits3,
+ *		.n_limits = ARRAY_SIZE(limits3),
+ *		.max_interfaces = 4,
+ *		.num_different_channels = 2,
+ *	};
+ *
  */
 struct ieee80211_iface_combination {
 	const struct ieee80211_iface_limit *limits;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index b220dab..3832099 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -114,6 +114,25 @@
 	return tb_id;
 }
 
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	bool rc = false;
+
+	if (ifindex == 0)
+		return false;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		rc = netif_is_l3_master(dev);
+
+	rcu_read_unlock();
+
+	return rc;
+}
+
 struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
 
 static inline
@@ -207,6 +226,11 @@
 	return 0;
 }
 
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	return false;
+}
+
 static inline
 struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
 {
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644
index 0000000..eaaf56d
--- /dev/null
+++ b/include/soc/fsl/bman.h
@@ -0,0 +1,129 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+/* wrapper for 48-bit buffers */
+struct bm_buffer {
+	union {
+		struct {
+			__be16 bpid; /* hi 8-bits reserved */
+			__be16 hi; /* High 16-bits of 48-bit address */
+			__be32 lo; /* Low 32-bits of 48-bit address */
+		};
+		__be64 data;
+	};
+} __aligned(8);
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+	return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+	return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
+{
+	buf->hi = cpu_to_be16(upper_32_bits(addr));
+	buf->lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
+{
+	return be16_to_cpu(buf->bpid) & 0xff;
+}
+
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+	buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+
+/* Managed portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
+
+#define BM_POOL_MAX		64 /* max # of buffer pools */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ *
+ * Creates a pool object, and returns a reference to it or NULL on error.
+ */
+struct bman_pool *bman_new_pool(void);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_bpid - Returns a pool object's BPID.
+ * @pool: the pool object
+ *
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
+ */
+int bman_get_bpid(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ *
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
+#endif	/* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 0000000..37f3eb0
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1074 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#include <linux/bitops.h>
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+extern u16 qm_channel_pool1;
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
+#define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
+#define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+			 QM_PIRQ_MRI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+	union {
+		struct {
+			u8 cfg8b_w1;
+			u8 bpid;	/* Buffer Pool ID */
+			u8 cfg8b_w3;
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			__be32 addr_lo;	/* low 32-bits of 40-bit address */
+		} __packed;
+		__be64 data;
+	};
+	__be32 cfg;	/* format, offset, length / congestion */
+	union {
+		__be32 cmd;
+		__be32 status;
+	};
+} __aligned(8);
+
+#define QM_FD_FORMAT_SG		BIT(31)
+#define QM_FD_FORMAT_LONG	BIT(30)
+#define QM_FD_FORMAT_COMPOUND	BIT(29)
+#define QM_FD_FORMAT_MASK	GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT		20
+#define QM_FD_OFF_MASK		GENMASK(28, 20)
+#define QM_FD_LEN_MASK		GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK	GENMASK(28, 0)
+
+enum qm_fd_format {
+	/*
+	 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+	 * scatter-gather table. 'big' implies a 29-bit length with no offset
+	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+	 * implies a s/g-like table, where each entry itself represents a frame
+	 * (contiguous or scatter-gather) and the 29-bit "length" is
+	 * interpreted purely for congestion calculations, ie. a "congestion
+	 * weight".
+	 */
+	qm_fd_contig = 0,
+	qm_fd_contig_big = QM_FD_FORMAT_LONG,
+	qm_fd_sg = QM_FD_FORMAT_SG,
+	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+	qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+	return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
+{
+	fd->addr_hi = upper_32_bits(addr);
+	fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
+ */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+	return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+	return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+				   int off, int len)
+{
+	fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+			      ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+	qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+	qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+	fd->data = 0;
+	fd->cfg = 0;
+	fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+	union {
+		struct {
+			u8 __reserved1[3];
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			__be32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		__be64 data;
+	};
+	__be32 cfg;	/* E bit, F bit, length */
+	u8 __reserved2;
+	u8 bpid;
+	__be16 offset; /* 13-bit, _res[13-15]*/
+} __packed;
+
+#define QM_SG_LEN_MASK	GENMASK(29, 0)
+#define QM_SG_OFF_MASK	GENMASK(12, 0)
+#define QM_SG_FIN	BIT(30)
+#define QM_SG_EXT	BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+	return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
+{
+	sg->addr_hi = upper_32_bits(addr);
+	sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+	sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+	sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+	return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+	u8 verb;
+	u8 stat;
+	u16 seqnum;	/* 15-bit */
+	u8 tok;
+	u8 __reserved2[3];
+	u32 fqid;	/* 24-bit */
+	u32 contextB;
+	struct qm_fd fd;
+	u8 __reserved4[32];
+} __packed;
+#define QM_DQRR_VERB_VBIT		0x80
+#define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+	struct {
+		u8 verb;
+		u8 __reserved[63];
+	};
+	struct {
+		u8 verb;
+		u8 dca;
+		u16 seqnum;
+		u8 rc;		/* Rej Code: 8-bit */
+		u8 orp_hi;	/* ORP: 24-bit */
+		u16 orp_lo;
+		u32 fqid;	/* 24-bit */
+		u32 tag;
+		struct qm_fd fd;
+		u8 __reserved1[32];
+	} __packed ern;
+	struct {
+		u8 verb;
+		u8 fqs;		/* Frame Queue Status */
+		u8 __reserved1[6];
+		u32 fqid;	/* 24-bit */
+		u32 contextB;
+		u8 __reserved2[48];
+	} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
+};
+#define QM_MR_VERB_VBIT			0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK		0x27
+#define QM_MR_VERB_DC_ERN		0x20
+#define QM_MR_VERB_FQRN			0x21
+#define QM_MR_VERB_FQRNI		0x22
+#define QM_MR_VERB_FQRL			0x23
+#define QM_MR_VERB_FQPN			0x24
+#define QM_MR_RC_MASK			0xf0	/* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP		0x00
+#define QM_MR_RC_WRED			0x10
+#define QM_MR_RC_ERROR			0x20
+#define QM_MR_RC_ORPWINDOW_EARLY	0x30
+#define QM_MR_RC_ORPWINDOW_LATE		0x40
+#define QM_MR_RC_FQ_TAILDROP		0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED	0x60
+#define QM_MR_RC_ORP_ZERO		0x70
+#define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+	/* See QM_STASHING_EXCL_<...> */
+	u8 exclusive;
+	/* Numbers of cachelines */
+	u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
+struct qm_fqd_oac {
+	/* "Overhead Accounting Control", see QM_OAC_<...> */
+	u8 oac; /* oac[6-7], _res[0-5] */
+	/* Two's-complement value (-128 to +127) */
+	s8 oal; /* "Overhead Accounting Length" */
+};
+
+struct qm_fqd {
+	/* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+	u8 orpc;
+	u8 cgid;
+	__be16 fq_ctrl;	/* See QM_FQCTRL_<...> */
+	__be16 dest_wq;	/* channel[3-15], wq[0-2] */
+	__be16 ics_cred; /* 15-bit */
+	/*
+	 * For "Initialize Frame Queue" commands, the write-enable mask
+	 * determines whether 'td' or 'oac_init' is observed. For query
+	 * commands, this field is always 'td', and 'oac_query' (below) reflects
+	 * the Overhead ACcounting values.
+	 */
+	union {
+		__be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
+		struct qm_fqd_oac oac_init;
+	};
+	__be32 context_b;
+	union {
+		/* Treat it as 64-bit opaque */
+		__be64 opaque;
+		struct {
+			__be32 hi;
+			__be32 lo;
+		};
+		/* Treat it as s/w portal stashing config */
+		/* see "FQD Context_A field used for [...]" */
+		struct {
+			struct qm_fqd_stashing stashing;
+			/*
+			 * 48-bit address of FQ context to
+			 * stash, must be cacheline-aligned
+			 */
+			__be16 context_hi;
+			__be32 context_lo;
+		} __packed;
+	} context_a;
+	struct qm_fqd_oac oac_query;
+} __packed;
+
+#define QM_FQD_CHAN_OFF		3
+#define QM_FQD_WQ_MASK		GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK	GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF	5
+#define QM_FQD_TD_MANT_MASK	GENMASK(12, 5)
+#define QM_FQD_TD_MAX		0xe0000000
+#define QM_FQD_TD_MANT_MAX	0xff
+#define QM_FQD_OAC_OFF		6
+#define QM_FQD_AS_OFF		4
+#define QM_FQD_DS_OFF		2
+#define QM_FQD_XS_MASK		0x3
+
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+	return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+	return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+	fqd->context_a.context_hi = upper_32_bits(addr);
+	fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+	fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+	fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+				      int roundup)
+{
+	u32 e = 0;
+	int td, oddbit = 0;
+
+	if (val > QM_FQD_TD_MAX)
+		return -ERANGE;
+
+	while (val > QM_FQD_TD_MANT_MAX) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+
+	td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+	td |= (e & QM_FQD_TD_EXP_MASK);
+	fqd->td = cpu_to_be16(td);
+	return 0;
+}
+/* and the other direction */
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+	int td = be16_to_cpu(fqd->td);
+
+	return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+		<< (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+	struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+	st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+		 ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+		 (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+	return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+	fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+	fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+	fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+				   (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+	return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
+{
+	return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
+}
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
+#define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
+#define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION	0x04
+#define QM_STASHING_EXCL_DATA		0x02
+#define QM_STASHING_EXCL_CTX		0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ *   MaxTH = MA * (2 ^ Mn)
+ *   Slope = SA / (2 ^ Sn)
+ *    MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+	/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+	u32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ *   CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+	/* _res[13-15], TA[5-12], Tn[0-4] */
+	u16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+	struct qm_cgr_wr_parm wr_parm_g;
+	struct qm_cgr_wr_parm wr_parm_y;
+	struct qm_cgr_wr_parm wr_parm_r;
+	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
+	u8 cscn_en;	/* boolean, use QM_CGR_EN */
+	union {
+		struct {
+			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+			u16 cscn_targ_dcp_low;	/* CSCN_TARG_DCP low-16bits */
+		};
+		u32 cscn_targ;	/* use QM_CGR_TARG_* */
+	};
+	u8 cstd_en;	/* boolean, use QM_CGR_EN */
+	u8 cs;		/* boolean, only used in query response */
+	struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+	return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	th->word = ((val & 0xff) << 5) | (e & 0x1f);
+	return 0;
+}
+
+/* "Initialize FQ" */
+struct qm_mcc_initfq {
+	u8 __reserved1[2];
+	u16 we_mask;	/* Write Enable Mask */
+	u32 fqid;	/* 24-bit */
+	u16 count;	/* Initialises 'count+1' FQDs */
+	struct qm_fqd fqd; /* the FQD fields go here */
+	u8 __reserved2[30];
+} __packed;
+/* "Initialize/Modify CGR" */
+struct qm_mcc_initcgr {
+	u8 __reserve1[2];
+	u16 we_mask;	/* Write Enable Mask */
+	struct __qm_mc_cgr cgr;	/* CGR fields */
+	u8 __reserved2[2];
+	u8 cgid;
+	u8 __reserved3[32];
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC		0x0100
+#define QM_INITFQ_WE_ORPC		0x0080
+#define QM_INITFQ_WE_CGID		0x0040
+#define QM_INITFQ_WE_FQCTRL		0x0020
+#define QM_INITFQ_WE_DESTWQ		0x0010
+#define QM_INITFQ_WE_ICSCRED		0x0008
+#define QM_INITFQ_WE_TDTHRESH		0x0004
+#define QM_INITFQ_WE_CONTEXTB		0x0002
+#define QM_INITFQ_WE_CONTEXTA		0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G		0x0400
+#define QM_CGR_WE_WR_PARM_Y		0x0200
+#define QM_CGR_WE_WR_PARM_R		0x0100
+#define QM_CGR_WE_WR_EN_G		0x0080
+#define QM_CGR_WE_WR_EN_Y		0x0040
+#define QM_CGR_WE_WR_EN_R		0x0020
+#define QM_CGR_WE_CSCN_EN		0x0010
+#define QM_CGR_WE_CSCN_TARG		0x0008
+#define QM_CGR_WE_CSTD_EN		0x0004
+#define QM_CGR_WE_CS_THRES		0x0002
+#define QM_CGR_WE_MODE			0x0001
+
+#define QMAN_CGR_FLAG_USE_INIT	     0x00000001
+
+	/* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+	/* DQRR entry can be consumed */
+	qman_cb_dqrr_consume,
+	/* Like _consume, but requests parking - FQ must be held-active */
+	qman_cb_dqrr_park,
+	/* Does not consume, for DCA mode only. */
+	qman_cb_dqrr_defer,
+	/*
+	 * Stop processing without consuming this ring entry. Exits the current
+	 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+	 * an interrupt handler, the callback would typically call
+	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+	 * otherwise the interrupt will reassert immediately.
+	 */
+	qman_cb_dqrr_stop,
+	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
+	qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+			   const union qm_mr_entry *msg);
+
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+	qman_fq_state_oos,
+	qman_fq_state_parked,
+	qman_fq_state_sched,
+	qman_fq_state_retired
+};
+
+#define QMAN_FQ_STATE_CHANGING	     0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE	     0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL	     0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS	     0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN	     0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR	     0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ *     // myfq is allocated and driver_fq callbacks filled in;
+ *     struct my_fq {
+ *	   struct qman_fq base;
+ *	   int an_extra_field;
+ *	   [ ... add other fields to be associated with each FQ ...]
+ *     } *myfq = some_my_fq_allocator();
+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;
+ *     struct my_fq *myfq = (struct my_fq *)fq;
+ *     do_something_with(myfq->an_extra_field);
+ *     [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ *     many cachelines are required to stash 'struct my_fq', to accelerate not
+ *     only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+	qman_cb_dqrr dqrr;	/* for dequeued frames */
+	qman_cb_mr ern;		/* for s/w ERNs */
+	qman_cb_mr fqs;		/* frame-queue state changes*/
+};
+
+struct qman_fq {
+	/* Caller of qman_create_fq() provides these demux callbacks */
+	struct qman_fq_cb cb;
+	/*
+	 * These are internal to the driver, don't touch. In particular, they
+	 * may change, be removed, or extended (so you shouldn't rely on
+	 * sizeof(qman_fq) being a constant).
+	 */
+	u32 fqid, idx;
+	unsigned long flags;
+	enum qman_fq_state state;
+	int cgr_groupid;
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+			    struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+	/* Set these prior to qman_create_cgr() */
+	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+	qman_cb_cgr cb;
+	/* These are private to the driver */
+	u16 chan; /* portal channel this object is created on */
+	struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE	     0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY	     0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED	     0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL	     0x00000004 /* set dest portal */
+
+	/* Portal Management */
+/**
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions).
+ */
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions.
+ */
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ */
+struct qman_portal *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * The return value represents the number of DQRR entries processed.
+ */
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+
+/**
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+
+	/* FQ management */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ *     initialised to a value used by the driver for demux.
+ *   - if context_b is initialised for demux, so is context_a in case stashing
+ *     is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full.
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
+
+/**
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_fqid(u32 fqid);
+
+	/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
+
+/**
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_pool(u32 id);
+
+	/* CGR management */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+		    struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
+ */
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
+
+/**
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_cgrid(u32 id);
+
+#endif	/* __FSL_QMAN_H */
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
new file mode 100644
index 0000000..ab68640
--- /dev/null
+++ b/include/trace/events/cgroup.h
@@ -0,0 +1,163 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cgroup
+
+#if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cgroup_root,
+
+	TP_PROTO(struct cgroup_root *root),
+
+	TP_ARGS(root),
+
+	TP_STRUCT__entry(
+		__field(	int,		root			)
+		__field(	u16,		ss_mask			)
+		__string(	name,		root->name		)
+	),
+
+	TP_fast_assign(
+		__entry->root = root->hierarchy_id;
+		__entry->ss_mask = root->subsys_mask;
+		__assign_str(name, root->name);
+	),
+
+	TP_printk("root=%d ss_mask=%#x name=%s",
+		  __entry->root, __entry->ss_mask, __get_str(name))
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_setup_root,
+
+	TP_PROTO(struct cgroup_root *root),
+
+	TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_destroy_root,
+
+	TP_PROTO(struct cgroup_root *root),
+
+	TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_remount,
+
+	TP_PROTO(struct cgroup_root *root),
+
+	TP_ARGS(root)
+);
+
+DECLARE_EVENT_CLASS(cgroup,
+
+	TP_PROTO(struct cgroup *cgrp),
+
+	TP_ARGS(cgrp),
+
+	TP_STRUCT__entry(
+		__field(	int,		root			)
+		__field(	int,		id			)
+		__field(	int,		level			)
+		__dynamic_array(char,		path,
+				cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1
+					 : strlen("(null)"))
+	),
+
+	TP_fast_assign(
+		__entry->root = cgrp->root->hierarchy_id;
+		__entry->id = cgrp->id;
+		__entry->level = cgrp->level;
+		if (cgrp->kn)
+			cgroup_path(cgrp, __get_dynamic_array(path),
+				    __get_dynamic_array_len(path));
+		else
+			__assign_str(path, "(null)");
+	),
+
+	TP_printk("root=%d id=%d level=%d path=%s",
+		  __entry->root, __entry->id, __entry->level, __get_str(path))
+);
+
+DEFINE_EVENT(cgroup, cgroup_mkdir,
+
+	TP_PROTO(struct cgroup *cgroup),
+
+	TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rmdir,
+
+	TP_PROTO(struct cgroup *cgroup),
+
+	TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_release,
+
+	TP_PROTO(struct cgroup *cgroup),
+
+	TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rename,
+
+	TP_PROTO(struct cgroup *cgroup),
+
+	TP_ARGS(cgroup)
+);
+
+DECLARE_EVENT_CLASS(cgroup_migrate,
+
+	TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+	TP_ARGS(dst_cgrp, task, threadgroup),
+
+	TP_STRUCT__entry(
+		__field(	int,		dst_root		)
+		__field(	int,		dst_id			)
+		__field(	int,		dst_level		)
+		__dynamic_array(char,		dst_path,
+				dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1
+					     : strlen("(null)"))
+		__field(	int,		pid			)
+		__string(	comm,		task->comm		)
+	),
+
+	TP_fast_assign(
+		__entry->dst_root = dst_cgrp->root->hierarchy_id;
+		__entry->dst_id = dst_cgrp->id;
+		__entry->dst_level = dst_cgrp->level;
+		if (dst_cgrp->kn)
+			cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
+				    __get_dynamic_array_len(dst_path));
+		else
+			__assign_str(dst_path, "(null)");
+		__entry->pid = task->pid;
+		__assign_str(comm, task->comm);
+	),
+
+	TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s",
+		  __entry->dst_root, __entry->dst_id, __entry->dst_level,
+		  __get_str(dst_path), __entry->pid, __get_str(comm))
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
+
+	TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+	TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
+
+	TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+	TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+#endif /* _TRACE_CGROUP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 5827438..8c27db0 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
 #define MAP_HUGE_SHIFT	26
 #define MAP_HUGE_MASK	0x3f
 
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
 #endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a26415b..9b1462e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -724,9 +724,15 @@
 __SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
 #define __NR_pwritev2 287
 __SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 288
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 289
+__SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
+#define __NR_pkey_free 290
+__SYSCALL(__NR_pkey_free,     sys_pkey_free)
 
 #undef __NR_syscalls
-#define __NR_syscalls 288
+#define __NR_syscalls 291
 
 /*
  * All syscalls below here should go away really,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 462246a..d6b5a21 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -77,6 +77,10 @@
 #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS		(1 << 1)
 /* Flag that USWC attributes should be used for GTT */
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC		(1 << 2)
+/* Flag that the memory should be in VRAM and cleared */
+#define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW		(1 << 4)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -481,6 +485,8 @@
 #define AMDGPU_INFO_DEV_INFO			0x16
 /* visible vram usage */
 #define AMDGPU_INFO_VIS_VRAM_USAGE		0x17
+/* number of TTM buffer evictions */
+#define AMDGPU_INFO_NUM_EVICTIONS		0x18
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT	0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK	0xff
@@ -643,6 +649,7 @@
  * Supported GPU families
  */
 #define AMDGPU_FAMILY_UNKNOWN			0
+#define AMDGPU_FAMILY_SI			110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
 #define AMDGPU_FAMILY_CI			120 /* Bonaire, Hawaii */
 #define AMDGPU_FAMILY_KV			125 /* Kaveri, Kabini, Mullins */
 #define AMDGPU_FAMILY_VI			130 /* Iceland, Tonga */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 452675f..b2c5284 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -646,6 +646,7 @@
 #define DRM_CAP_CURSOR_WIDTH		0x8
 #define DRM_CAP_CURSOR_HEIGHT		0x9
 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
+#define DRM_CAP_PAGE_FLIP_TARGET	0x11
 
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 49a7265..df0e350 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -520,7 +520,13 @@
 
 #define DRM_MODE_PAGE_FLIP_EVENT 0x01
 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02
-#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+				   DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+				  DRM_MODE_PAGE_FLIP_ASYNC | \
+				  DRM_MODE_PAGE_FLIP_TARGET)
 
 /*
  * Request a page flip on the specified crtc.
@@ -543,8 +549,7 @@
  * 'as soon as possible', meaning that it not delay waiting for vblank.
  * This may cause tearing on the screen.
  *
- * The reserved field must be zero until we figure out something
- * clever to use it for.
+ * The reserved field must be zero.
  */
 
 struct drm_mode_crtc_page_flip {
@@ -555,6 +560,34 @@
 	__u64 user_data;
 };
 
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+	__u32 crtc_id;
+	__u32 fb_id;
+	__u32 flags;
+	__u32 sequence;
+	__u64 user_data;
+};
+
 /* create a dumb scanout buffer */
 struct drm_mode_create_dumb {
 	__u32 height;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d7e81a3..03725fe 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -62,6 +62,30 @@
 #define I915_ERROR_UEVENT		"ERROR"
 #define I915_RESET_UEVENT		"RESET"
 
+/*
+ * MOCS indexes used for GPU surfaces, defining the cacheability of the
+ * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
+ */
+enum i915_mocs_table_index {
+	/*
+	 * Not cached anywhere, coherency between CPU and GPU accesses is
+	 * guaranteed.
+	 */
+	I915_MOCS_UNCACHED,
+	/*
+	 * Cacheability and coherency controlled by the kernel automatically
+	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
+	 * usage of the surface (used for display scanout or not).
+	 */
+	I915_MOCS_PTE,
+	/*
+	 * Cached in all GPU caches available on the platform.
+	 * Coherency between CPU and GPU accesses to the surface is not
+	 * guaranteed without extra synchronization.
+	 */
+	I915_MOCS_CACHED,
+};
+
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
 #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
@@ -363,6 +387,7 @@
 #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
 #define I915_PARAM_HAS_POOLED_EU	 38
 #define I915_PARAM_MIN_EU_IN_POOL	 39
+#define I915_PARAM_MMAP_GTT_VERSION	 40
 
 typedef struct drm_i915_getparam {
 	__s32 param;
@@ -698,15 +723,20 @@
 	 */
 	__u64 offset;
 
-#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
-#define EXEC_OBJECT_NEEDS_GTT	(1<<1)
-#define EXEC_OBJECT_WRITE	(1<<2)
+#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
+#define EXEC_OBJECT_WRITE		 (1<<2)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
-#define EXEC_OBJECT_PINNED	(1<<4)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
+#define EXEC_OBJECT_PINNED		 (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
+/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
 	__u64 flags;
 
-	__u64 rsvd1;
+	union {
+		__u64 rsvd1;
+		__u64 pad_to_size;
+	};
 	__u64 rsvd2;
 };
 
@@ -826,7 +856,16 @@
 	 * having flushed any pending activity), and a non-zero return that
 	 * the object is still in-flight on the GPU. (The GPU has not yet
 	 * signaled completion for all pending requests that reference the
-	 * object.)
+	 * object.) An object is guaranteed to become idle eventually (so
+	 * long as no new GPU commands are executed upon it). Due to the
+	 * asynchronous nature of the hardware, an object reported
+	 * as busy may become idle before the ioctl is completed.
+	 *
+	 * Furthermore, if the object is busy, which engine is busy is only
+	 * provided as a guide. There are race conditions which prevent the
+	 * report of which engines are busy from being always accurate.
+	 * However, the converse is not true. If the object is idle, the
+	 * result of the ioctl, that all engines are idle, is accurate.
 	 *
 	 * The returned dword is split into two fields to indicate both
 	 * the engines on which the object is being read, and the
@@ -849,6 +888,11 @@
 	 * execution engines, e.g. multiple media engines, which are
 	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
 	 * so are not separately reported for busyness.
+	 *
+	 * Caveat emptor:
+	 * Only the boolean result of this query is reliable; that is whether
+	 * the object is idle or busy. The report of which engines are busy
+	 * should be only used as a heuristic.
 	 */
 	__u32 busy;
 };
@@ -897,6 +941,7 @@
 #define I915_TILING_NONE	0
 #define I915_TILING_X		1
 #define I915_TILING_Y		2
+#define I915_TILING_LAST	I915_TILING_Y
 
 #define I915_BIT_6_SWIZZLE_NONE		0
 #define I915_BIT_6_SWIZZLE_9		1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 49f778d..8c51e8a 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -42,6 +42,15 @@
 #define MSM_PIPE_2D1         0x02
 #define MSM_PIPE_3D0         0x10
 
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK     0xffff
+#define MSM_PIPE_ID(x)       ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x)    ((x) & ~MSM_PIPE_ID_MASK)
+
 /* timeouts are specified in clock-monotonic absolute times (to simplify
  * restarting interrupted ioctls).  The following struct is logically the
  * same as 'struct timespec' but 32/64b ABI safe.
@@ -175,17 +184,28 @@
 	__u64 presumed;       /* in/out, presumed buffer address */
 };
 
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_NO_IMPLICIT   0x80000000 /* disable implicit sync */
+#define MSM_SUBMIT_FENCE_FD_IN   0x40000000 /* enable input fence_fd */
+#define MSM_SUBMIT_FENCE_FD_OUT  0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_FLAGS                ( \
+		MSM_SUBMIT_NO_IMPLICIT   | \
+		MSM_SUBMIT_FENCE_FD_IN   | \
+		MSM_SUBMIT_FENCE_FD_OUT  | \
+		0)
+
 /* Each cmdstream submit consists of a table of buffers involved, and
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
 struct drm_msm_gem_submit {
-	__u32 pipe;           /* in, MSM_PIPE_x */
+	__u32 flags;          /* MSM_PIPE_x | MSM_SUBMIT_x */
 	__u32 fence;          /* out */
 	__u32 nr_bos;         /* in, number of submit_bo's */
 	__u32 nr_cmds;        /* in, number of submit_cmd's */
 	__u64 __user bos;     /* in, ptr to array of submit_bo's */
 	__u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+	__s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index d0352a9..6965d09 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -336,6 +336,8 @@
 header-y += pkt_sched.h
 header-y += pmu.h
 header-y += poll.h
+header-y += posix_acl.h
+header-y += posix_acl_xattr.h
 header-y += posix_types.h
 header-y += ppdev.h
 header-y += ppp-comp.h
@@ -397,6 +399,7 @@
 header-y += suspend_ioctls.h
 header-y += swab.h
 header-y += synclink.h
+header-y += sync_file.h
 header-y += sysctl.h
 header-y += sysinfo.h
 header-y += target_core_user.h
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
new file mode 100644
index 0000000..021ed33
--- /dev/null
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _UAPI_LINUX_AUTO_DEV_IOCTL_H
+#define _UAPI_LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/auto_fs.h>
+#include <linux/string.h>
+
+#define AUTOFS_DEVICE_NAME		"autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR	1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR	0
+
+#define AUTOFS_DEV_IOCTL_SIZE		sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+struct args_protover {
+	__u32	version;
+};
+
+struct args_protosubver {
+	__u32	sub_version;
+};
+
+struct args_openmount {
+	__u32	devid;
+};
+
+struct args_ready {
+	__u32	token;
+};
+
+struct args_fail {
+	__u32	token;
+	__s32	status;
+};
+
+struct args_setpipefd {
+	__s32	pipefd;
+};
+
+struct args_timeout {
+	__u64	timeout;
+};
+
+struct args_requester {
+	__u32	uid;
+	__u32	gid;
+};
+
+struct args_expire {
+	__u32	how;
+};
+
+struct args_askumount {
+	__u32	may_umount;
+};
+
+struct args_ismountpoint {
+	union {
+		struct args_in {
+			__u32	type;
+		} in;
+		struct args_out {
+			__u32	devid;
+			__u32	magic;
+		} out;
+	};
+};
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+	__u32 ver_major;
+	__u32 ver_minor;
+	__u32 size;		/* total size of data passed in
+				 * including this struct */
+	__s32 ioctlfd;		/* automount command fd */
+
+	/* Command parameters */
+
+	union {
+		struct args_protover		protover;
+		struct args_protosubver		protosubver;
+		struct args_openmount		openmount;
+		struct args_ready		ready;
+		struct args_fail		fail;
+		struct args_setpipefd		setpipefd;
+		struct args_timeout		timeout;
+		struct args_requester		requester;
+		struct args_expire		expire;
+		struct args_askumount		askumount;
+		struct args_ismountpoint	ismountpoint;
+	};
+
+	char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+	memset(in, 0, sizeof(struct autofs_dev_ioctl));
+	in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+	in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+	in->size = sizeof(struct autofs_dev_ioctl);
+	in->ioctlfd = -1;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+	/* Get various version info */
+	AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+	AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+	AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+	/* Open mount ioctl fd */
+	AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+	/* Close mount ioctl fd */
+	AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+	/* Mount/expire status returns */
+	AUTOFS_DEV_IOCTL_READY_CMD,
+	AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+	/* Activate/deactivate autofs mount */
+	AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+	AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+	/* Expiry timeout */
+	AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+	/* Get mount last requesting uid and gid */
+	AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+	/* Check for eligible expire candidates */
+	AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+	/* Request busy status */
+	AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+	/* Check if path is a mountpoint */
+	AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+	_IOWR(AUTOFS_IOCTL, \
+	      AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif	/* _UAPI_LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 9175a1b..1bfc3ed 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -12,6 +12,7 @@
 #define _UAPI_LINUX_AUTO_FS_H
 
 #include <linux/types.h>
+#include <linux/limits.h>
 #ifndef __KERNEL__
 #include <sys/ioctl.h>
 #endif /* __KERNEL__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ac5eacd..db4c253 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -239,7 +239,17 @@
  * Used by:
  * struct btrfs_ioctl_feature_flags
  */
-#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE	(1ULL << 0)
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE		(1ULL << 0)
+/*
+ * Older kernels (< 4.9) on big-endian systems produced broken free space tree
+ * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions
+ * < 4.7.3).  If this bit is clear, then the free space tree cannot be trusted.
+ * btrfs-progs can also intentionally clear this bit to ask the kernel to
+ * rebuild the free space tree, however this might not work on older kernels
+ * that do not know about this bit. If not sure, clear the cache manually on
+ * first mount when booting older kernel versions.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID	(1ULL << 1)
 
 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF	(1ULL << 0)
 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL	(1ULL << 1)
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 4939256..260f033 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -206,7 +206,8 @@
 /*    6    line 21-2 data present in GOP (1=yes, 0=no) */
 /*    5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
 /*    2    source letterboxed (1=yes, 0=no) */
-/*    0    film/camera mode (0=camera, 1=film (625/50 only)) */
+/*    0    film/camera mode (0=
+ *camera, 1=film (625/50 only)) */
 
 
 /* bit definitions for capabilities: */
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index 3e445a7..b075f60 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -58,4 +58,22 @@
  */
 #define FALLOC_FL_INSERT_RANGE		0x20
 
+/*
+ * FALLOC_FL_UNSHARE_RANGE is used to unshare shared blocks within the
+ * file size without overwriting any existing data. The purpose of this
+ * call is to preemptively reallocate any blocks that are subject to
+ * copy-on-write.
+ *
+ * Different filesystems may implement different limitations on the
+ * granularity of the operation. Most will limit operations to filesystem
+ * block size boundaries, but this boundary may be larger or smaller
+ * depending on the filesystem and/or the configuration of the filesystem
+ * or file.
+ *
+ * This flag can only be used with allocate-mode fallocate, which is
+ * to say that it cannot be used with the punch, zero, collapse, or
+ * insert range modes.
+ */
+#define FALLOC_FL_UNSHARE_RANGE		0x40
+
 #endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 3b00f7c..acb2b61 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -132,6 +132,7 @@
 #define MS_LAZYTIME	(1<<25) /* Update the on-disk [acm]times lazily */
 
 /* These sb flags are internal to the kernel */
+#define MS_NOREMOTELOCK	(1<<27)
 #define MS_NOSEC	(1<<28)
 #define MS_BORN		(1<<29)
 #define MS_ACTIVE	(1<<30)
@@ -157,7 +158,8 @@
 	__u32		fsx_extsize;	/* extsize field value (get/set)*/
 	__u32		fsx_nextents;	/* nextents field value (get)	*/
 	__u32		fsx_projid;	/* project identifier (get/set) */
-	unsigned char	fsx_pad[12];
+	__u32		fsx_cowextsize;	/* CoW extsize field value (get/set)*/
+	unsigned char	fsx_pad[8];
 };
 
 /*
@@ -178,6 +180,7 @@
 #define FS_XFLAG_NODEFRAG	0x00002000	/* do not defragment */
 #define FS_XFLAG_FILESTREAM	0x00004000	/* use filestream allocator */
 #define FS_XFLAG_DAX		0x00008000	/* use DAX for IO */
+#define FS_XFLAG_COWEXTSIZE	0x00010000	/* CoW extent size allocator hint */
 #define FS_XFLAG_HASATTR	0x80000000	/* no DIFLAG for this	*/
 
 /* the read-only stuff doesn't really belong here, but any other place is
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index e398bea..9bd5594 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -65,6 +65,7 @@
 #define V9FS_MAGIC		0x01021997
 
 #define BDEVFS_MAGIC            0x62646576
+#define DAXFS_MAGIC             0x64646178
 #define BINFMTFS_MAGIC          0x42494e4d
 #define DEVPTS_SUPER_MAGIC	0x1cd1
 #define FUTEXFS_SUPER_MAGIC	0xBAD1DEA
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 190d491..2168759 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -97,7 +97,7 @@
 #define MEDIA_BUS_FMT_YUV10_1X30		0x2016
 #define MEDIA_BUS_FMT_AYUV8_1X32		0x2017
 
-/* Bayer - next is	0x3019 */
+/* Bayer - next is	0x3021 */
 #define MEDIA_BUS_FMT_SBGGR8_1X8		0x3001
 #define MEDIA_BUS_FMT_SGBRG8_1X8		0x3013
 #define MEDIA_BUS_FMT_SGRBG8_1X8		0x3002
@@ -122,6 +122,14 @@
 #define MEDIA_BUS_FMT_SGBRG12_1X12		0x3010
 #define MEDIA_BUS_FMT_SGRBG12_1X12		0x3011
 #define MEDIA_BUS_FMT_SRGGB12_1X12		0x3012
+#define MEDIA_BUS_FMT_SBGGR14_1X14		0x3019
+#define MEDIA_BUS_FMT_SGBRG14_1X14		0x301a
+#define MEDIA_BUS_FMT_SGRBG14_1X14		0x301b
+#define MEDIA_BUS_FMT_SRGGB14_1X14		0x301c
+#define MEDIA_BUS_FMT_SBGGR16_1X16		0x301d
+#define MEDIA_BUS_FMT_SGBRG16_1X16		0x301e
+#define MEDIA_BUS_FMT_SGRBG16_1X16		0x301f
+#define MEDIA_BUS_FMT_SRGGB16_1X16		0x3020
 
 /* JPEG compressed formats - next is	0x4002 */
 #define MEDIA_BUS_FMT_JPEG_1X8			0x4001
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 7acf0f6..4890787 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -307,6 +307,7 @@
 #define MEDIA_INTF_T_V4L_RADIO  (MEDIA_INTF_T_V4L_BASE + 2)
 #define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3)
 #define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
+#define MEDIA_INTF_T_V4L_TOUCH	(MEDIA_INTF_T_V4L_BASE + 5)
 
 #define MEDIA_INTF_T_ALSA_PCM_CAPTURE   (MEDIA_INTF_T_ALSA_BASE)
 #define MEDIA_INTF_T_ALSA_PCM_PLAYBACK  (MEDIA_INTF_T_ALSA_BASE + 1)
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index ba5a8c7..ede5c6a 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -21,14 +21,16 @@
 } __packed;
 
 #define ND_SMART_HEALTH_VALID	(1 << 0)
-#define ND_SMART_TEMP_VALID 	(1 << 1)
-#define ND_SMART_SPARES_VALID	(1 << 2)
-#define ND_SMART_ALARM_VALID	(1 << 3)
-#define ND_SMART_USED_VALID	(1 << 4)
-#define ND_SMART_SHUTDOWN_VALID	(1 << 5)
-#define ND_SMART_VENDOR_VALID	(1 << 6)
-#define ND_SMART_TEMP_TRIP	(1 << 0)
-#define ND_SMART_SPARE_TRIP	(1 << 1)
+#define ND_SMART_SPARES_VALID	(1 << 1)
+#define ND_SMART_USED_VALID	(1 << 2)
+#define ND_SMART_TEMP_VALID 	(1 << 3)
+#define ND_SMART_CTEMP_VALID 	(1 << 4)
+#define ND_SMART_ALARM_VALID	(1 << 9)
+#define ND_SMART_SHUTDOWN_VALID	(1 << 10)
+#define ND_SMART_VENDOR_VALID	(1 << 11)
+#define ND_SMART_SPARE_TRIP	(1 << 0)
+#define ND_SMART_TEMP_TRIP	(1 << 1)
+#define ND_SMART_CTEMP_TRIP	(1 << 2)
 #define ND_SMART_NON_CRITICAL_HEALTH	(1 << 0)
 #define ND_SMART_CRITICAL_HEALTH	(1 << 1)
 #define ND_SMART_FATAL_HEALTH		(1 << 2)
@@ -37,14 +39,15 @@
 	__u32 flags;
 	__u8 reserved0[4];
 	__u8 health;
-	__u16 temperature;
 	__u8 spares;
-	__u8 alarm_flags;
 	__u8 life_used;
+	__u8 alarm_flags;
+	__u16 temperature;
+	__u16 ctrl_temperature;
+	__u8 reserved1[15];
 	__u8 shutdown_state;
-	__u8 reserved1;
 	__u32 vendor_size;
-	__u8 vendor_data[108];
+	__u8 vendor_data[92];
 } __packed;
 
 struct nd_cmd_smart_threshold {
@@ -53,7 +56,8 @@
 } __packed;
 
 struct nd_smart_threshold_payload {
-	__u16 alarm_control;
+	__u8 alarm_control;
+	__u8 reserved0;
 	__u16 temperature;
 	__u8 spares;
 	__u8 reserved[3];
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2b871e0..4ae6279 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -39,8 +39,9 @@
 #define NFS4_FH_VOL_MIGRATION		0x0004
 #define NFS4_FH_VOL_RENAME		0x0008
 
-#define NFS4_OPEN_RESULT_CONFIRM 0x0002
-#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_CONFIRM		0x0002
+#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX		0x0004
+#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK	0x0020
 
 #define NFS4_SHARE_ACCESS_MASK	0x000F
 #define NFS4_SHARE_ACCESS_READ	0x0001
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index d812172..e5a2e68 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -612,6 +612,8 @@
  */
 #define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
 #define  PCI_EXP_DEVCAP2_ARI		0x00000020 /* Alternative Routing-ID */
+#define  PCI_EXP_DEVCAP2_ATOMIC_ROUTE	0x00000040 /* Atomic Op routing */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP64	0x00000100 /* Atomic 64-bit compare */
 #define  PCI_EXP_DEVCAP2_LTR		0x00000800 /* Latency tolerance reporting */
 #define  PCI_EXP_DEVCAP2_OBFF_MASK	0x000c0000 /* OBFF support mechanism */
 #define  PCI_EXP_DEVCAP2_OBFF_MSG	0x00040000 /* New message signaling */
@@ -619,6 +621,7 @@
 #define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
 #define  PCI_EXP_DEVCTL2_COMP_TIMEOUT	0x000f	/* Completion Timeout Value */
 #define  PCI_EXP_DEVCTL2_ARI		0x0020	/* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ	0x0040	/* Set Atomic requests */
 #define  PCI_EXP_DEVCTL2_IDO_REQ_EN	0x0100	/* Allow IDO for requests */
 #define  PCI_EXP_DEVCTL2_IDO_CMP_EN	0x0200	/* Allow IDO for completions */
 #define  PCI_EXP_DEVCTL2_LTR_EN		0x0400	/* Enable LTR mechanism */
diff --git a/include/uapi/linux/posix_acl.h b/include/uapi/linux/posix_acl.h
new file mode 100644
index 0000000..1037cb1
--- /dev/null
+++ b/include/uapi/linux/posix_acl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2002 Andreas Gruenbacher <a.gruenbacher@computer.org>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_POSIX_ACL_H
+#define __UAPI_POSIX_ACL_H
+
+#define ACL_UNDEFINED_ID	(-1)
+
+/* a_type field in acl_user_posix_entry_t */
+#define ACL_TYPE_ACCESS		(0x8000)
+#define ACL_TYPE_DEFAULT	(0x4000)
+
+/* e_tag entry in struct posix_acl_entry */
+#define ACL_USER_OBJ		(0x01)
+#define ACL_USER		(0x02)
+#define ACL_GROUP_OBJ		(0x04)
+#define ACL_GROUP		(0x08)
+#define ACL_MASK		(0x10)
+#define ACL_OTHER		(0x20)
+
+/* permissions in the e_perm field */
+#define ACL_READ		(0x04)
+#define ACL_WRITE		(0x02)
+#define ACL_EXECUTE		(0x01)
+
+#endif  /* __UAPI_POSIX_ACL_H */
diff --git a/include/uapi/linux/posix_acl_xattr.h b/include/uapi/linux/posix_acl_xattr.h
new file mode 100644
index 0000000..8b57984
--- /dev/null
+++ b/include/uapi/linux/posix_acl_xattr.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2002 Andreas Gruenbacher <a.gruenbacher@computer.org>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_POSIX_ACL_XATTR_H
+#define __UAPI_POSIX_ACL_XATTR_H
+
+#include <linux/types.h>
+
+/* Supported ACL a_version fields */
+#define POSIX_ACL_XATTR_VERSION	0x0002
+
+/* An undefined entry e_id value */
+#define ACL_UNDEFINED_ID	(-1)
+
+struct posix_acl_xattr_entry {
+	__le16			e_tag;
+	__le16			e_perm;
+	__le32			e_id;
+};
+
+struct posix_acl_xattr_header {
+	__le32			a_version;
+};
+
+#endif	/* __UAPI_POSIX_ACL_XATTR_H */
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index 413303d..5b287d6 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -85,15 +85,12 @@
 #define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
 
 /**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
  *
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * Takes a struct sync_file_info. If num_fences is 0, the field is updated
+ * with the actual number of fences. If num_fences is > 0, the system will
+ * use the pointer provided on sync_fence_info to return up to num_fences of
+ * struct sync_fence_info, with detailed fence information.
  */
 #define SYNC_IOC_FILE_INFO	_IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
 
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 086168e..f319571 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -934,4 +934,16 @@
 		V4L2_DV_FL_REDUCED_BLANKING) \
 }
 
+/* SDI timings definitions */
+
+/* SMPTE-125M */
+#define V4L2_DV_BT_SDI_720X487I60 { \
+	.type = V4L2_DV_BT_656_1120, \
+	V4L2_INIT_BT_TIMINGS(720, 487, 1, \
+		V4L2_DV_HSYNC_POS_POL, \
+		13500000, 16, 121, 0, 0, 19, 0, 0, 19, 0, \
+		V4L2_DV_BT_STD_SDI, \
+		V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) \
+}
+
 #endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 724f43e..94f123f 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -292,13 +292,11 @@
 	 * various colorspaces:
 	 *
 	 * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
-	 * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
-	 * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+	 * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
+	 * V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
 	 *
 	 * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
 	 *
-	 * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
-	 *
 	 * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
 	 *
 	 * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
@@ -317,8 +315,14 @@
 	/* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
 	V4L2_YCBCR_ENC_XV709          = 4,
 
-	/* sYCC (Y'CbCr encoding of sRGB) */
+#ifndef __KERNEL__
+	/*
+	 * sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added
+	 * originally due to a misunderstanding of the sYCC standard. It should
+	 * not be used, instead use V4L2_YCBCR_ENC_601.
+	 */
 	V4L2_YCBCR_ENC_SYCC           = 5,
+#endif
 
 	/* BT.2020 Non-constant Luminance Y'CbCr */
 	V4L2_YCBCR_ENC_BT2020         = 6,
@@ -345,8 +349,8 @@
 	/*
 	 * The default for R'G'B' quantization is always full range, except
 	 * for the BT2020 colorspace. For Y'CbCr the quantization is always
-	 * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
-	 * those are full range.
+	 * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB,
+	 * XV601 or XV709: those are full range.
 	 */
 	V4L2_QUANTIZATION_DEFAULT     = 0,
 	V4L2_QUANTIZATION_FULL_RANGE  = 1,
@@ -361,7 +365,8 @@
 #define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
 	(((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
 	 (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
-	  (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+	  (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
+	  (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
 	 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
 
 enum v4l2_priority {
@@ -440,6 +445,8 @@
 #define V4L2_CAP_ASYNCIO                0x02000000  /* async I/O */
 #define V4L2_CAP_STREAMING              0x04000000  /* streaming I/O ioctls */
 
+#define V4L2_CAP_TOUCH                  0x10000000  /* Is a touch device */
+
 #define V4L2_CAP_DEVICE_CAPS            0x80000000  /* sets device capabilities field */
 
 /*
@@ -635,6 +642,12 @@
 #define V4L2_SDR_FMT_CS14LE       v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
 #define V4L2_SDR_FMT_RU12LE       v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
 
+/* Touch formats - used for Touch devices */
+#define V4L2_TCH_FMT_DELTA_TD16	v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */
+#define V4L2_TCH_FMT_DELTA_TD08	v4l2_fourcc('T', 'D', '0', '8') /* 8-bit signed deltas */
+#define V4L2_TCH_FMT_TU16	v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
+#define V4L2_TCH_FMT_TU08	v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
+
 /* priv field value to indicates that subsequent fields are valid. */
 #define V4L2_PIX_FMT_PRIV_MAGIC		0xfeedcafe
 
@@ -1261,6 +1274,7 @@
 #define V4L2_DV_BT_STD_DMT	(1 << 1)  /* VESA Discrete Monitor Timings */
 #define V4L2_DV_BT_STD_CVT	(1 << 2)  /* VESA Coordinated Video Timings */
 #define V4L2_DV_BT_STD_GTF	(1 << 3)  /* VESA Generalized Timings Formula */
+#define V4L2_DV_BT_STD_SDI	(1 << 4)  /* SDI Timings */
 
 /* Flags */
 
@@ -1292,6 +1306,11 @@
  * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
  * except for the 640x480 format are CE formats. */
 #define V4L2_DV_FL_IS_CE_VIDEO			(1 << 4)
+/* Some formats like SMPTE-125M have an interlaced signal with a odd
+ * total height. For these formats, if this flag is set, the first
+ * field has the extra line. If not, it is the second field.
+ */
+#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE		(1 << 5)
 
 /* A few useful defines to calculate the total blanking and frame sizes */
 #define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1401,6 +1420,7 @@
 /*  Values for the 'type' field */
 #define V4L2_INPUT_TYPE_TUNER		1
 #define V4L2_INPUT_TYPE_CAMERA		2
+#define V4L2_INPUT_TYPE_TOUCH		3
 
 /* field 'status' - general */
 #define V4L2_IN_ST_NO_POWER    0x00000001  /* Attached device is off */
@@ -1415,6 +1435,8 @@
 /* field 'status' - analog */
 #define V4L2_IN_ST_NO_H_LOCK   0x00000100  /* No horizontal sync lock */
 #define V4L2_IN_ST_COLOR_KILL  0x00000200  /* Color killer is active */
+#define V4L2_IN_ST_NO_V_LOCK   0x00000400  /* No vertical sync lock */
+#define V4L2_IN_ST_NO_STD_LOCK 0x00000800  /* No standard format lock */
 
 /* field 'status' - digital */
 #define V4L2_IN_ST_NO_SYNC     0x00010000  /* No synchronization lock */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
new file mode 100644
index 0000000..75c270d
--- /dev/null
+++ b/include/uapi/rdma/qedr-abi.h
@@ -0,0 +1,106 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_USER_H__
+#define __QEDR_USER_H__
+
+#include <linux/types.h>
+
+#define QEDR_ABI_VERSION		(8)
+
+/* user kernel communication data structures. */
+
+struct qedr_alloc_ucontext_resp {
+	__u64 db_pa;
+	__u32 db_size;
+
+	__u32 max_send_wr;
+	__u32 max_recv_wr;
+	__u32 max_srq_wr;
+	__u32 sges_per_send_wr;
+	__u32 sges_per_recv_wr;
+	__u32 sges_per_srq_wr;
+	__u32 max_cqes;
+};
+
+struct qedr_alloc_pd_ureq {
+	__u64 rsvd1;
+};
+
+struct qedr_alloc_pd_uresp {
+	__u32 pd_id;
+};
+
+struct qedr_create_cq_ureq {
+	__u64 addr;
+	__u64 len;
+};
+
+struct qedr_create_cq_uresp {
+	__u32 db_offset;
+	__u16 icid;
+};
+
+struct qedr_create_qp_ureq {
+	__u32 qp_handle_hi;
+	__u32 qp_handle_lo;
+
+	/* SQ */
+	/* user space virtual address of SQ buffer */
+	__u64 sq_addr;
+
+	/* length of SQ buffer */
+	__u64 sq_len;
+
+	/* RQ */
+	/* user space virtual address of RQ buffer */
+	__u64 rq_addr;
+
+	/* length of RQ buffer */
+	__u64 rq_len;
+};
+
+struct qedr_create_qp_uresp {
+	__u32 qp_id;
+	__u32 atomic_supported;
+
+	/* SQ */
+	__u32 sq_db_offset;
+	__u16 sq_icid;
+
+	/* RQ */
+	__u32 rq_db_offset;
+	__u16 rq_icid;
+
+	__u32 rq_db2_offset;
+};
+
+#endif /* __QEDR_USER_H__ */
diff --git a/include/video/exynos_mipi_dsim.h b/include/video/exynos_mipi_dsim.h
deleted file mode 100644
index 6a578f8..0000000
--- a/include/video/exynos_mipi_dsim.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* include/video/exynos_mipi_dsim.h
- *
- * Platform data header for Samsung SoC MIPI-DSIM.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSIM_H
-#define _EXYNOS_MIPI_DSIM_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-#define PANEL_NAME_SIZE		(32)
-
-/*
- * Enumerate display interface type.
- *
- * DSIM_COMMAND means cpu interface and rgb interface for DSIM_VIDEO.
- *
- * P.S. MIPI DSI Master has two display controller intefaces, RGB Interface
- *	for main display and CPU Interface(same as I80 Interface) for main
- *	and sub display.
- */
-enum mipi_dsim_interface_type {
-	DSIM_COMMAND,
-	DSIM_VIDEO
-};
-
-enum mipi_dsim_virtual_ch_no {
-	DSIM_VIRTUAL_CH_0,
-	DSIM_VIRTUAL_CH_1,
-	DSIM_VIRTUAL_CH_2,
-	DSIM_VIRTUAL_CH_3
-};
-
-enum mipi_dsim_burst_mode_type {
-	DSIM_NON_BURST_SYNC_EVENT,
-	DSIM_BURST_SYNC_EVENT,
-	DSIM_NON_BURST_SYNC_PULSE,
-	DSIM_BURST,
-	DSIM_NON_VIDEO_MODE
-};
-
-enum mipi_dsim_no_of_data_lane {
-	DSIM_DATA_LANE_1,
-	DSIM_DATA_LANE_2,
-	DSIM_DATA_LANE_3,
-	DSIM_DATA_LANE_4
-};
-
-enum mipi_dsim_byte_clk_src {
-	DSIM_PLL_OUT_DIV8,
-	DSIM_EXT_CLK_DIV8,
-	DSIM_EXT_CLK_BYPASS
-};
-
-enum mipi_dsim_pixel_format {
-	DSIM_CMD_3BPP,
-	DSIM_CMD_8BPP,
-	DSIM_CMD_12BPP,
-	DSIM_CMD_16BPP,
-	DSIM_VID_16BPP_565,
-	DSIM_VID_18BPP_666PACKED,
-	DSIM_18BPP_666LOOSELYPACKED,
-	DSIM_24BPP_888
-};
-
-/*
- * struct mipi_dsim_config - interface for configuring mipi-dsi controller.
- *
- * @auto_flush: enable or disable Auto flush of MD FIFO using VSYNC pulse.
- * @eot_disable: enable or disable EoT packet in HS mode.
- * @auto_vertical_cnt: specifies auto vertical count mode.
- *	in Video mode, the vertical line transition uses line counter
- *	configured by VSA, VBP, and Vertical resolution.
- *	If this bit is set to '1', the line counter does not use VSA and VBP
- *	registers.(in command mode, this variable is ignored)
- * @hse: set horizontal sync event mode.
- *	In VSYNC pulse and Vporch area, MIPI DSI master transfers only HSYNC
- *	start packet to MIPI DSI slave at MIPI DSI spec1.1r02.
- *	this bit transfers HSYNC end packet in VSYNC pulse and Vporch area
- *	(in mommand mode, this variable is ignored)
- * @hfp: specifies HFP disable mode.
- *	if this variable is set, DSI master ignores HFP area in VIDEO mode.
- *	(in command mode, this variable is ignored)
- * @hbp: specifies HBP disable mode.
- *	if this variable is set, DSI master ignores HBP area in VIDEO mode.
- *	(in command mode, this variable is ignored)
- * @hsa: specifies HSA disable mode.
- *	if this variable is set, DSI master ignores HSA area in VIDEO mode.
- *	(in command mode, this variable is ignored)
- * @cma_allow: specifies the number of horizontal lines, where command packet
- *	transmission is allowed after Stable VFP period.
- * @e_interface: specifies interface to be used.(CPU or RGB interface)
- * @e_virtual_ch: specifies virtual channel number that main or
- *	sub diaplsy uses.
- * @e_pixel_format: specifies pixel stream format for main or sub display.
- * @e_burst_mode: selects Burst mode in Video mode.
- *	in Non-burst mode, RGB data area is filled with RGB data and NULL
- *	packets, according to input bandwidth of RGB interface.
- *	In Burst mode, RGB data area is filled with RGB data only.
- * @e_no_data_lane: specifies data lane count to be used by Master.
- * @e_byte_clk: select byte clock source. (it must be DSIM_PLL_OUT_DIV8)
- *	DSIM_EXT_CLK_DIV8 and DSIM_EXT_CLK_BYPASSS are not supported.
- * @pll_stable_time: specifies the PLL Timer for stability of the ganerated
- *	clock(System clock cycle base)
- *	if the timer value goes to 0x00000000, the clock stable bit of status
- *	and interrupt register is set.
- * @esc_clk: specifies escape clock frequency for getting the escape clock
- *	prescaler value.
- * @stop_holding_cnt: specifies the interval value between transmitting
- *	read packet(or write "set_tear_on" command) and BTA request.
- *	after transmitting read packet or write "set_tear_on" command,
- *	BTA requests to D-PHY automatically. this counter value specifies
- *	the interval between them.
- * @bta_timeout: specifies the timer for BTA.
- *	this register specifies time out from BTA request to change
- *	the direction with respect to Tx escape clock.
- * @rx_timeout: specifies the timer for LP Rx mode timeout.
- *	this register specifies time out on how long RxValid deasserts,
- *	after RxLpdt asserts with respect to Tx escape clock.
- *	- RxValid specifies Rx data valid indicator.
- *	- RxLpdt specifies an indicator that D-PHY is under RxLpdt mode.
- *	- RxValid and RxLpdt specifies signal from D-PHY.
- */
-struct mipi_dsim_config {
-	unsigned char			auto_flush;
-	unsigned char			eot_disable;
-
-	unsigned char			auto_vertical_cnt;
-	unsigned char			hse;
-	unsigned char			hfp;
-	unsigned char			hbp;
-	unsigned char			hsa;
-	unsigned char			cmd_allow;
-
-	enum mipi_dsim_interface_type	e_interface;
-	enum mipi_dsim_virtual_ch_no	e_virtual_ch;
-	enum mipi_dsim_pixel_format	e_pixel_format;
-	enum mipi_dsim_burst_mode_type	e_burst_mode;
-	enum mipi_dsim_no_of_data_lane	e_no_data_lane;
-	enum mipi_dsim_byte_clk_src	e_byte_clk;
-
-	/*
-	 * ===========================================
-	 * |    P    |    M    |    S    |    MHz    |
-	 * -------------------------------------------
-	 * |    3    |   100   |    3    |    100    |
-	 * |    3    |   100   |    2    |    200    |
-	 * |    3    |    63   |    1    |    252    |
-	 * |    4    |   100   |    1    |    300    |
-	 * |    4    |   110   |    1    |    330    |
-	 * |   12    |   350   |    1    |    350    |
-	 * |    3    |   100   |    1    |    400    |
-	 * |    4    |   150   |    1    |    450    |
-	 * |    6    |   118   |    1    |    472    |
-	 * |	3    |   120   |    1    |    480    |
-	 * |   12    |   250   |    0    |    500    |
-	 * |    4    |   100   |    0    |    600    |
-	 * |    3    |    81   |    0    |    648    |
-	 * |    3    |    88   |    0    |    704    |
-	 * |    3    |    90   |    0    |    720    |
-	 * |    3    |   100   |    0    |    800    |
-	 * |   12    |   425   |    0    |    850    |
-	 * |    4    |   150   |    0    |    900    |
-	 * |   12    |   475   |    0    |    950    |
-	 * |    6    |   250   |    0    |   1000    |
-	 * -------------------------------------------
-	 */
-
-	/*
-	 * pms could be calculated as the following.
-	 * M * 24 / P * 2 ^ S = MHz
-	 */
-	unsigned char			p;
-	unsigned short			m;
-	unsigned char			s;
-
-	unsigned int			pll_stable_time;
-	unsigned long			esc_clk;
-
-	unsigned short			stop_holding_cnt;
-	unsigned char			bta_timeout;
-	unsigned short			rx_timeout;
-};
-
-/*
- * struct mipi_dsim_device - global interface for mipi-dsi driver.
- *
- * @dev: driver model representation of the device.
- * @id: unique device id.
- * @clock: pointer to MIPI-DSI clock of clock framework.
- * @irq: interrupt number to MIPI-DSI controller.
- * @reg_base: base address to memory mapped SRF of MIPI-DSI controller.
- *	(virtual address)
- * @lock: the mutex protecting this data structure.
- * @dsim_info: infomation for configuring mipi-dsi controller.
- * @master_ops: callbacks to mipi-dsi operations.
- * @dsim_lcd_dev: pointer to activated ddi device.
- *	(it would be registered by mipi-dsi driver.)
- * @dsim_lcd_drv: pointer to activated_ddi driver.
- *	(it would be registered by mipi-dsi driver.)
- * @lcd_info: pointer to mipi_lcd_info structure.
- * @state: specifies status of MIPI-DSI controller.
- *	the status could be RESET, INIT, STOP, HSCLKEN and ULPS.
- * @data_lane: specifiec enabled data lane number.
- *	this variable would be set by driver according to e_no_data_lane
- *	automatically.
- * @e_clk_src: select byte clock source.
- * @pd: pointer to MIPI-DSI driver platform data.
- * @phy: pointer to the MIPI-DSI PHY
- */
-struct mipi_dsim_device {
-	struct device			*dev;
-	int				id;
-	struct clk			*clock;
-	unsigned int			irq;
-	void __iomem			*reg_base;
-	struct mutex			lock;
-
-	struct mipi_dsim_config		*dsim_config;
-	struct mipi_dsim_master_ops	*master_ops;
-	struct mipi_dsim_lcd_device	*dsim_lcd_dev;
-	struct mipi_dsim_lcd_driver	*dsim_lcd_drv;
-
-	unsigned int			state;
-	unsigned int			data_lane;
-	unsigned int			e_clk_src;
-	bool				suspended;
-
-	struct mipi_dsim_platform_data	*pd;
-	struct phy			*phy;
-};
-
-/*
- * struct mipi_dsim_platform_data - interface to platform data
- *	for mipi-dsi driver.
- *
- * @lcd_panel_name: specifies lcd panel name registered to mipi-dsi driver.
- *	lcd panel driver searched would be actived.
- * @dsim_config: pointer of structure for configuring mipi-dsi controller.
- * @enabled: indicate whether mipi controller got enabled or not.
- * @lcd_panel_info: pointer for lcd panel specific structure.
- *	this structure specifies width, height, timing and polarity and so on.
- */
-struct mipi_dsim_platform_data {
-	char				lcd_panel_name[PANEL_NAME_SIZE];
-
-	struct mipi_dsim_config		*dsim_config;
-	unsigned int			enabled;
-	void				*lcd_panel_info;
-};
-
-/*
- * struct mipi_dsim_master_ops - callbacks to mipi-dsi operations.
- *
- * @cmd_write: transfer command to lcd panel at LP mode.
- * @cmd_read: read command from rx register.
- * @get_dsim_frame_done: get the status that all screen data have been
- *	transferred to mipi-dsi.
- * @clear_dsim_frame_done: clear frame done status.
- * @get_fb_frame_done: get frame done status of display controller.
- * @trigger: trigger display controller.
- *	- this one would be used only in case of CPU mode.
- *  @set_early_blank_mode: set framebuffer blank mode.
- *	- this callback should be called prior to fb_blank() by a client driver
- *	only if needing.
- *  @set_blank_mode: set framebuffer blank mode.
- *	- this callback should be called after fb_blank() by a client driver
- *	only if needing.
- */
-
-struct mipi_dsim_master_ops {
-	int (*cmd_write)(struct mipi_dsim_device *dsim, unsigned int data_id,
-		const unsigned char *data0, unsigned int data1);
-	int (*cmd_read)(struct mipi_dsim_device *dsim, unsigned int data_id,
-		unsigned int data0, unsigned int req_size, u8 *rx_buf);
-	int (*get_dsim_frame_done)(struct mipi_dsim_device *dsim);
-	int (*clear_dsim_frame_done)(struct mipi_dsim_device *dsim);
-
-	int (*get_fb_frame_done)(struct fb_info *info);
-	void (*trigger)(struct fb_info *info);
-	int (*set_early_blank_mode)(struct mipi_dsim_device *dsim, int power);
-	int (*set_blank_mode)(struct mipi_dsim_device *dsim, int power);
-};
-
-/*
- * device structure for mipi-dsi based lcd panel.
- *
- * @name: name of the device to use with this device, or an
- *	alias for that name.
- * @dev: driver model representation of the device.
- * @id: id of device to be registered.
- * @bus_id: bus id for identifing connected bus
- *	and this bus id should be same as id of mipi_dsim_device.
- * @irq: irq number for signaling when framebuffer transfer of
- *	lcd panel module is completed.
- *	this irq would be used only for MIPI-DSI based CPU mode lcd panel.
- * @master: pointer to mipi-dsi master device object.
- * @platform_data: lcd panel specific platform data.
- */
-struct mipi_dsim_lcd_device {
-	char			*name;
-	struct device		dev;
-	int			id;
-	int			bus_id;
-	int			irq;
-	int			panel_reverse;
-
-	struct mipi_dsim_device *master;
-	void			*platform_data;
-};
-
-/*
- * driver structure for mipi-dsi based lcd panel.
- *
- * this structure should be registered by lcd panel driver.
- * mipi-dsi driver seeks lcd panel registered through name field
- * and calls these callback functions in appropriate time.
- *
- * @name: name of the driver to use with this device, or an
- *	alias for that name.
- * @id: id of driver to be registered.
- *	this id would be used for finding device object registered.
- */
-struct mipi_dsim_lcd_driver {
-	char			*name;
-	int			id;
-
-	void	(*power_on)(struct mipi_dsim_lcd_device *dsim_dev, int enable);
-	void	(*set_sequence)(struct mipi_dsim_lcd_device *dsim_dev);
-	int	(*probe)(struct mipi_dsim_lcd_device *dsim_dev);
-	int	(*remove)(struct mipi_dsim_lcd_device *dsim_dev);
-	void	(*shutdown)(struct mipi_dsim_lcd_device *dsim_dev);
-	int	(*suspend)(struct mipi_dsim_lcd_device *dsim_dev);
-	int	(*resume)(struct mipi_dsim_lcd_device *dsim_dev);
-};
-
-/*
- * register mipi_dsim_lcd_device to mipi-dsi master.
- */
-int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device
-						*lcd_dev);
-/**
- * register mipi_dsim_lcd_driver object defined by lcd panel driver
- * to mipi-dsi driver.
- */
-int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver
-						*lcd_drv);
-#endif /* _EXYNOS_MIPI_DSIM_H */
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644
index 0000000..7b87efc
--- /dev/null
+++ b/include/video/imx-ipu-image-convert.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * i.MX Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IMX_IPU_IMAGE_CONVERT_H__
+#define __IMX_IPU_IMAGE_CONVERT_H__
+
+#include <video/imx-ipu-v3.h>
+
+struct ipu_image_convert_ctx;
+
+/**
+ * struct ipu_image_convert_run - image conversion run request struct
+ *
+ * @ctx:	the conversion context
+ * @in_phys:	dma addr of input image buffer for this run
+ * @out_phys:	dma addr of output image buffer for this run
+ * @status:	completion status of this run
+ */
+struct ipu_image_convert_run {
+	struct ipu_image_convert_ctx *ctx;
+
+	dma_addr_t in_phys;
+	dma_addr_t out_phys;
+
+	int status;
+
+	/* internal to image converter, callers don't touch */
+	struct list_head list;
+};
+
+/**
+ * ipu_image_convert_cb_t - conversion callback function prototype
+ *
+ * @run:	the completed conversion run pointer
+ * @ctx:	a private context pointer for the callback
+ */
+typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
+				       void *ctx);
+
+/**
+ * ipu_image_convert_enum_format() - enumerate the image converter's
+ *	supported input and output pixel formats.
+ *
+ * @index:	pixel format index
+ * @fourcc:	v4l2 fourcc for this index
+ *
+ * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
+ *
+ * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
+ */
+int ipu_image_convert_enum_format(int index, u32 *fourcc);
+
+/**
+ * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
+ *
+ * @in:		input image format, adjusted on return
+ * @out:	output image format, adjusted on return
+ * @rot_mode:	rotation mode
+ *
+ * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
+ */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+			      enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_verify() - verify that input/output image formats
+ *         and rotation mode meet IPU restrictions.
+ *
+ * @in:		input image format
+ * @out:	output image format
+ * @rot_mode:	rotation mode
+ *
+ * Returns 0 if the formats and rotation mode meet IPU restrictions,
+ * -EINVAL otherwise.
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+			     enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_prepare() - prepare a conversion context.
+ *
+ * @ipu:	the IPU handle to use for the conversions
+ * @ic_task:	the IC task to use for the conversions
+ * @in:		input image format
+ * @out:	output image format
+ * @rot_mode:	rotation mode
+ * @complete:	run completion callback
+ * @complete_context:	a context pointer for the completion callback
+ *
+ * Returns an opaque conversion context pointer on success, error pointer
+ * on failure. The input/output formats and rotation mode must already meet
+ * IPU retrictions.
+ *
+ * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+			  struct ipu_image *in, struct ipu_image *out,
+			  enum ipu_rotate_mode rot_mode,
+			  ipu_image_convert_cb_t complete,
+			  void *complete_context);
+
+/**
+ * ipu_image_convert_unprepare() - unprepare a conversion context.
+ *
+ * @ctx: the conversion context pointer to unprepare
+ *
+ * Aborts any active or pending conversions for this context and
+ * frees the context. Any currently active or pending runs belonging
+ * to this context are returned via the completion callback with an
+ * error run status.
+ *
+ * In V4L2, drivers should call ipu_image_convert_unprepare() at
+ * streamoff.
+ */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert_queue() - queue a conversion run
+ *
+ * @run: the run request pointer
+ *
+ * ipu_image_convert_run must be dynamically allocated (_not_ as a local
+ * var) by callers and filled in with a previously prepared conversion
+ * context handle and the dma addr's of the input and output image buffers
+ * for this conversion run.
+ *
+ * When this conversion completes, the run pointer is returned via the
+ * completion callback. The caller is responsible for freeing the run
+ * object after it completes.
+ *
+ * In V4L2, drivers should call ipu_image_convert_queue() while
+ * streaming to queue the conversion of a received input buffer.
+ * For example mem2mem devices this would be called in .device_run.
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run);
+
+/**
+ * ipu_image_convert_abort() - abort conversions
+ *
+ * @ctx: the conversion context pointer
+ *
+ * This will abort any active or pending conversions for this context.
+ * Any currently active or pending runs belonging to this context are
+ * returned via the completion callback with an error run status.
+ */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert() - asynchronous image conversion request
+ *
+ * @ipu:	the IPU handle to use for the conversion
+ * @ic_task:	the IC task to use for the conversion
+ * @in:		input image format
+ * @out:	output image format
+ * @rot_mode:	rotation mode
+ * @complete:	run completion callback
+ * @complete_context:	a context pointer for the completion callback
+ *
+ * Request a single image conversion. Returns the run that has been queued.
+ * A conversion context is automatically created and is available in run->ctx.
+ * As with ipu_image_convert_prepare(), the input/output formats and rotation
+ * mode must already meet IPU retrictions.
+ *
+ * On successful return the caller can queue more run requests if needed, using
+ * the prepared context in run->ctx. The caller is responsible for unpreparing
+ * the context when no more conversion requests are needed.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+		  struct ipu_image *in, struct ipu_image *out,
+		  enum ipu_rotate_mode rot_mode,
+		  ipu_image_convert_cb_t complete,
+		  void *complete_context);
+
+/**
+ * ipu_image_convert_sync() - synchronous single image conversion request
+ *
+ * @ipu:	the IPU handle to use for the conversion
+ * @ic_task:	the IC task to use for the conversion
+ * @in:		input image format
+ * @out:	output image format
+ * @rot_mode:	rotation mode
+ *
+ * Carry out a single image conversion. Returns when the conversion
+ * completes. The input/output formats and rotation mode must already
+ * meet IPU retrictions. The created context is automatically unprepared
+ * and the run freed on return.
+ */
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+			   struct ipu_image *in, struct ipu_image *out,
+			   enum ipu_rotate_mode rot_mode);
+
+
+#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 7adeaae0..173073e 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -63,23 +63,41 @@
 /*
  * Enumeration of IPU rotation modes
  */
+#define IPU_ROT_BIT_VFLIP (1 << 0)
+#define IPU_ROT_BIT_HFLIP (1 << 1)
+#define IPU_ROT_BIT_90    (1 << 2)
+
 enum ipu_rotate_mode {
 	IPU_ROTATE_NONE = 0,
-	IPU_ROTATE_VERT_FLIP,
-	IPU_ROTATE_HORIZ_FLIP,
-	IPU_ROTATE_180,
-	IPU_ROTATE_90_RIGHT,
-	IPU_ROTATE_90_RIGHT_VFLIP,
-	IPU_ROTATE_90_RIGHT_HFLIP,
-	IPU_ROTATE_90_LEFT,
+	IPU_ROTATE_VERT_FLIP = IPU_ROT_BIT_VFLIP,
+	IPU_ROTATE_HORIZ_FLIP = IPU_ROT_BIT_HFLIP,
+	IPU_ROTATE_180 = (IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
+	IPU_ROTATE_90_RIGHT = IPU_ROT_BIT_90,
+	IPU_ROTATE_90_RIGHT_VFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_VFLIP),
+	IPU_ROTATE_90_RIGHT_HFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_HFLIP),
+	IPU_ROTATE_90_LEFT = (IPU_ROT_BIT_90 |
+			      IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
 };
 
+/* 90-degree rotations require the IRT unit */
+#define ipu_rot_mode_is_irt(m) (((m) & IPU_ROT_BIT_90) != 0)
+
 enum ipu_color_space {
 	IPUV3_COLORSPACE_RGB,
 	IPUV3_COLORSPACE_YUV,
 	IPUV3_COLORSPACE_UNKNOWN,
 };
 
+/*
+ * Enumeration of VDI MOTION select
+ */
+enum ipu_motion_sel {
+	MOTION_NONE = 0,
+	LOW_MOTION,
+	MED_MOTION,
+	HIGH_MOTION,
+};
+
 struct ipuv3_channel;
 
 enum ipu_channel_irq {
@@ -97,20 +115,42 @@
 #define IPUV3_CHANNEL_CSI2			 2
 #define IPUV3_CHANNEL_CSI3			 3
 #define IPUV3_CHANNEL_VDI_MEM_IC_VF		 5
+/*
+ * NOTE: channels 6,7 are unused in the IPU and are not IDMAC channels,
+ * but the direct CSI->VDI linking is handled the same way as IDMAC
+ * channel linking in the FSU via the IPU_FS_PROC_FLOW registers, so
+ * these channel names are used to support the direct CSI->VDI link.
+ */
+#define IPUV3_CHANNEL_CSI_DIRECT		 6
+#define IPUV3_CHANNEL_CSI_VDI_PREV		 7
+#define IPUV3_CHANNEL_MEM_VDI_PREV		 8
+#define IPUV3_CHANNEL_MEM_VDI_CUR		 9
+#define IPUV3_CHANNEL_MEM_VDI_NEXT		10
 #define IPUV3_CHANNEL_MEM_IC_PP			11
 #define IPUV3_CHANNEL_MEM_IC_PRP_VF		12
+#define IPUV3_CHANNEL_VDI_MEM_RECENT		13
 #define IPUV3_CHANNEL_G_MEM_IC_PRP_VF		14
 #define IPUV3_CHANNEL_G_MEM_IC_PP		15
+#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF_ALPHA	17
+#define IPUV3_CHANNEL_G_MEM_IC_PP_ALPHA		18
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB_ALPHA	19
 #define IPUV3_CHANNEL_IC_PRP_ENC_MEM		20
 #define IPUV3_CHANNEL_IC_PRP_VF_MEM		21
 #define IPUV3_CHANNEL_IC_PP_MEM			22
 #define IPUV3_CHANNEL_MEM_BG_SYNC		23
 #define IPUV3_CHANNEL_MEM_BG_ASYNC		24
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB	25
+#define IPUV3_CHANNEL_MEM_VDI_PLANE3_COMB	26
 #define IPUV3_CHANNEL_MEM_FG_SYNC		27
 #define IPUV3_CHANNEL_MEM_DC_SYNC		28
 #define IPUV3_CHANNEL_MEM_FG_ASYNC		29
 #define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA		31
+#define IPUV3_CHANNEL_MEM_FG_ASYNC_ALPHA	33
+#define IPUV3_CHANNEL_DC_MEM_READ		40
 #define IPUV3_CHANNEL_MEM_DC_ASYNC		41
+#define IPUV3_CHANNEL_MEM_DC_COMMAND		42
+#define IPUV3_CHANNEL_MEM_DC_COMMAND2		43
+#define IPUV3_CHANNEL_MEM_DC_OUTPUT_MASK	44
 #define IPUV3_CHANNEL_MEM_ROT_ENC		45
 #define IPUV3_CHANNEL_MEM_ROT_VF		46
 #define IPUV3_CHANNEL_MEM_ROT_PP		47
@@ -118,6 +158,8 @@
 #define IPUV3_CHANNEL_ROT_VF_MEM		49
 #define IPUV3_CHANNEL_ROT_PP_MEM		50
 #define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA		51
+#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA	52
+#define IPUV3_NUM_CHANNELS			64
 
 int ipu_map_irq(struct ipu_soc *ipu, int irq);
 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
@@ -138,6 +180,7 @@
 /*
  * IPU Common functions
  */
+int ipu_get_num(struct ipu_soc *ipu);
 void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2);
 void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi);
 void ipu_dump(struct ipu_soc *ipu);
@@ -160,6 +203,10 @@
 bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink);
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink);
 
 /*
  * IPU Channel Parameter Memory (cpmem) functions
@@ -184,8 +231,10 @@
 void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
 void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
 void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off);
 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
 void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch);
 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
 void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
 void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
@@ -317,6 +366,19 @@
 void ipu_ic_dump(struct ipu_ic *ic);
 
 /*
+ * IPU Video De-Interlacer (vdi) functions
+ */
+struct ipu_vdi;
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
+void ipu_vdi_unsetup(struct ipu_vdi *vdi);
+int ipu_vdi_enable(struct ipu_vdi *vdi);
+int ipu_vdi_disable(struct ipu_vdi *vdi);
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
+void ipu_vdi_put(struct ipu_vdi *vdi);
+
+/*
  * IPU Sensor Multiple FIFO Controller (SMFC) functions
  */
 struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno);
diff --git a/init/Kconfig b/init/Kconfig
index d7fc226..34407f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1288,6 +1288,7 @@
 
 config RELAY
 	bool "Kernel->user space relay support (formerly relayfs)"
+	select IRQ_WORK
 	help
 	  This option enables support for relay interface support in
 	  certain file systems (such as debugfs).
diff --git a/init/Makefile b/init/Makefile
index 7bc47ee..c4fb455 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the linux kernel.
 #
 
+ccflags-y := -fno-function-sections -fno-data-sections
+
 obj-y                          := main.o version.o mounts.o
 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
diff --git a/init/main.c b/init/main.c
index a8a58e2..2858be7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -789,6 +789,7 @@
 	}
 	WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
 
+	add_latent_entropy();
 	return ret;
 }
 
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 0b13ace..8cbd6e6 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -225,7 +225,7 @@
 	inode->i_mode = mode;
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
-	inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 
 	if (S_ISREG(mode)) {
 		struct mqueue_inode_info *info;
@@ -446,7 +446,7 @@
 
 	put_ipc_ns(ipc_ns);
 	dir->i_size += DIRENT_SIZE;
-	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 
 	d_instantiate(dentry, inode);
 	dget(dentry);
@@ -462,7 +462,7 @@
 {
 	struct inode *inode = d_inode(dentry);
 
-	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 	dir->i_size -= DIRENT_SIZE;
 	drop_nlink(inode);
 	dput(dentry);
@@ -500,7 +500,7 @@
 	if (ret <= 0)
 		return ret;
 
-	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
+	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 	return ret;
 }
 
@@ -1060,7 +1060,7 @@
 			__do_notify(info);
 		}
 		inode->i_atime = inode->i_mtime = inode->i_ctime =
-				CURRENT_TIME;
+				current_time(inode);
 	}
 out_unlock:
 	spin_unlock(&info->lock);
@@ -1156,7 +1156,7 @@
 		msg_ptr = msg_get(info);
 
 		inode->i_atime = inode->i_mtime = inode->i_ctime =
-				CURRENT_TIME;
+				current_time(inode);
 
 		/* There is now free space in queue. */
 		pipelined_receive(&wake_q, info);
@@ -1277,7 +1277,7 @@
 	if (u_notification == NULL) {
 		if (info->notify_owner == task_tgid(current)) {
 			remove_notification(info);
-			inode->i_atime = inode->i_ctime = CURRENT_TIME;
+			inode->i_atime = inode->i_ctime = current_time(inode);
 		}
 	} else if (info->notify_owner != NULL) {
 		ret = -EBUSY;
@@ -1302,7 +1302,7 @@
 
 		info->notify_owner = get_pid(task_tgid(current));
 		info->notify_user_ns = get_user_ns(current_user_ns());
-		inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_ctime = current_time(inode);
 	}
 	spin_unlock(&info->lock);
 out_fput:
@@ -1359,7 +1359,7 @@
 			f.file->f_flags &= ~O_NONBLOCK;
 		spin_unlock(&f.file->f_lock);
 
-		inode->i_atime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_ctime = current_time(inode);
 	}
 
 	spin_unlock(&info->lock);
diff --git a/ipc/msg.c b/ipc/msg.c
index c6521c2..e12307d 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -51,19 +51,14 @@
 	long			r_msgtype;
 	long			r_maxsize;
 
-	/*
-	 * Mark r_msg volatile so that the compiler
-	 * does not try to get smart and optimize
-	 * it. We rely on this for the lockless
-	 * receive algorithm.
-	 */
-	struct msg_msg		*volatile r_msg;
+	struct msg_msg		*r_msg;
 };
 
 /* one msg_sender for each sleeping sender */
 struct msg_sender {
 	struct list_head	list;
 	struct task_struct	*tsk;
+	size_t                  msgsz;
 };
 
 #define SEARCH_ANY		1
@@ -159,45 +154,72 @@
 	return msq->q_perm.id;
 }
 
-static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
+static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz)
+{
+	return msgsz + msq->q_cbytes <= msq->q_qbytes &&
+		1 + msq->q_qnum <= msq->q_qbytes;
+}
+
+static inline void ss_add(struct msg_queue *msq,
+			  struct msg_sender *mss, size_t msgsz)
 {
 	mss->tsk = current;
+	mss->msgsz = msgsz;
 	__set_current_state(TASK_INTERRUPTIBLE);
 	list_add_tail(&mss->list, &msq->q_senders);
 }
 
 static inline void ss_del(struct msg_sender *mss)
 {
-	if (mss->list.next != NULL)
+	if (mss->list.next)
 		list_del(&mss->list);
 }
 
-static void ss_wakeup(struct list_head *h, int kill)
+static void ss_wakeup(struct msg_queue *msq,
+		      struct wake_q_head *wake_q, bool kill)
 {
 	struct msg_sender *mss, *t;
+	struct task_struct *stop_tsk = NULL;
+	struct list_head *h = &msq->q_senders;
 
 	list_for_each_entry_safe(mss, t, h, list) {
 		if (kill)
 			mss->list.next = NULL;
-		wake_up_process(mss->tsk);
+
+		/*
+		 * Stop at the first task we don't wakeup,
+		 * we've already iterated the original
+		 * sender queue.
+		 */
+		else if (stop_tsk == mss->tsk)
+			break;
+		/*
+		 * We are not in an EIDRM scenario here, therefore
+		 * verify that we really need to wakeup the task.
+		 * To maintain current semantics and wakeup order,
+		 * move the sender to the tail on behalf of the
+		 * blocked task.
+		 */
+		else if (!msg_fits_inqueue(msq, mss->msgsz)) {
+			if (!stop_tsk)
+				stop_tsk = mss->tsk;
+
+			list_move_tail(&mss->list, &msq->q_senders);
+			continue;
+		}
+
+		wake_q_add(wake_q, mss->tsk);
 	}
 }
 
-static void expunge_all(struct msg_queue *msq, int res)
+static void expunge_all(struct msg_queue *msq, int res,
+			struct wake_q_head *wake_q)
 {
 	struct msg_receiver *msr, *t;
 
 	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-		msr->r_msg = NULL; /* initialize expunge ordering */
-		wake_up_process(msr->r_tsk);
-		/*
-		 * Ensure that the wakeup is visible before setting r_msg as
-		 * the receiving end depends on it: either spinning on a nil,
-		 * or dealing with -EAGAIN cases. See lockless receive part 1
-		 * and 2 in do_msgrcv().
-		 */
-		smp_wmb(); /* barrier (B) */
-		msr->r_msg = ERR_PTR(res);
+		wake_q_add(wake_q, msr->r_tsk);
+		WRITE_ONCE(msr->r_msg, ERR_PTR(res));
 	}
 }
 
@@ -213,11 +235,13 @@
 {
 	struct msg_msg *msg, *t;
 	struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
+	WAKE_Q(wake_q);
 
-	expunge_all(msq, -EIDRM);
-	ss_wakeup(&msq->q_senders, 1);
+	expunge_all(msq, -EIDRM, &wake_q);
+	ss_wakeup(msq, &wake_q, true);
 	msg_rmid(ns, msq);
 	ipc_unlock_object(&msq->q_perm);
+	wake_up_q(&wake_q);
 	rcu_read_unlock();
 
 	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
@@ -372,6 +396,9 @@
 		freeque(ns, ipcp);
 		goto out_up;
 	case IPC_SET:
+	{
+		WAKE_Q(wake_q);
+
 		if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
 		    !capable(CAP_SYS_RESOURCE)) {
 			err = -EPERM;
@@ -386,15 +413,21 @@
 		msq->q_qbytes = msqid64.msg_qbytes;
 
 		msq->q_ctime = get_seconds();
-		/* sleeping receivers might be excluded by
+		/*
+		 * Sleeping receivers might be excluded by
 		 * stricter permissions.
 		 */
-		expunge_all(msq, -EAGAIN);
-		/* sleeping senders might be able to send
+		expunge_all(msq, -EAGAIN, &wake_q);
+		/*
+		 * Sleeping senders might be able to send
 		 * due to a larger queue size.
 		 */
-		ss_wakeup(&msq->q_senders, 0);
-		break;
+		ss_wakeup(msq, &wake_q, false);
+		ipc_unlock_object(&msq->q_perm);
+		wake_up_q(&wake_q);
+
+		goto out_unlock1;
+	}
 	default:
 		err = -EINVAL;
 		goto out_unlock1;
@@ -566,7 +599,8 @@
 	return 0;
 }
 
-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
+				 struct wake_q_head *wake_q)
 {
 	struct msg_receiver *msr, *t;
 
@@ -577,27 +611,14 @@
 
 			list_del(&msr->r_list);
 			if (msr->r_maxsize < msg->m_ts) {
-				/* initialize pipelined send ordering */
-				msr->r_msg = NULL;
-				wake_up_process(msr->r_tsk);
-				/* barrier (B) see barrier comment below */
-				smp_wmb();
-				msr->r_msg = ERR_PTR(-E2BIG);
+				wake_q_add(wake_q, msr->r_tsk);
+				WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
 			} else {
-				msr->r_msg = NULL;
 				msq->q_lrpid = task_pid_vnr(msr->r_tsk);
 				msq->q_rtime = get_seconds();
-				wake_up_process(msr->r_tsk);
-				/*
-				 * Ensure that the wakeup is visible before
-				 * setting r_msg, as the receiving can otherwise
-				 * exit - once r_msg is set, the receiver can
-				 * continue. See lockless receive part 1 and 2
-				 * in do_msgrcv(). Barrier (B).
-				 */
-				smp_wmb();
-				msr->r_msg = msg;
 
+				wake_q_add(wake_q, msr->r_tsk);
+				WRITE_ONCE(msr->r_msg, msg);
 				return 1;
 			}
 		}
@@ -613,6 +634,7 @@
 	struct msg_msg *msg;
 	int err;
 	struct ipc_namespace *ns;
+	WAKE_Q(wake_q);
 
 	ns = current->nsproxy->ipc_ns;
 
@@ -654,10 +676,8 @@
 		if (err)
 			goto out_unlock0;
 
-		if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
-				1 + msq->q_qnum <= msq->q_qbytes) {
+		if (msg_fits_inqueue(msq, msgsz))
 			break;
-		}
 
 		/* queue full, wait: */
 		if (msgflg & IPC_NOWAIT) {
@@ -666,7 +686,7 @@
 		}
 
 		/* enqueue the sender and prepare to block */
-		ss_add(msq, &s);
+		ss_add(msq, &s, msgsz);
 
 		if (!ipc_rcu_getref(msq)) {
 			err = -EIDRM;
@@ -686,7 +706,6 @@
 			err = -EIDRM;
 			goto out_unlock0;
 		}
-
 		ss_del(&s);
 
 		if (signal_pending(current)) {
@@ -695,10 +714,11 @@
 		}
 
 	}
+
 	msq->q_lspid = task_tgid_vnr(current);
 	msq->q_stime = get_seconds();
 
-	if (!pipelined_send(msq, msg)) {
+	if (!pipelined_send(msq, msg, &wake_q)) {
 		/* no one is waiting for this message, enqueue it */
 		list_add_tail(&msg->m_list, &msq->q_messages);
 		msq->q_cbytes += msgsz;
@@ -712,6 +732,7 @@
 
 out_unlock0:
 	ipc_unlock_object(&msq->q_perm);
+	wake_up_q(&wake_q);
 out_unlock1:
 	rcu_read_unlock();
 	if (msg != NULL)
@@ -829,6 +850,7 @@
 	struct msg_queue *msq;
 	struct ipc_namespace *ns;
 	struct msg_msg *msg, *copy = NULL;
+	WAKE_Q(wake_q);
 
 	ns = current->nsproxy->ipc_ns;
 
@@ -893,7 +915,7 @@
 			msq->q_cbytes -= msg->m_ts;
 			atomic_sub(msg->m_ts, &ns->msg_bytes);
 			atomic_dec(&ns->msg_hdrs);
-			ss_wakeup(&msq->q_senders, 0);
+			ss_wakeup(msq, &wake_q, false);
 
 			goto out_unlock0;
 		}
@@ -919,71 +941,38 @@
 		rcu_read_unlock();
 		schedule();
 
-		/* Lockless receive, part 1:
-		 * Disable preemption.  We don't hold a reference to the queue
-		 * and getting a reference would defeat the idea of a lockless
-		 * operation, thus the code relies on rcu to guarantee the
-		 * existence of msq:
+		/*
+		 * Lockless receive, part 1:
+		 * We don't hold a reference to the queue and getting a
+		 * reference would defeat the idea of a lockless operation,
+		 * thus the code relies on rcu to guarantee the existence of
+		 * msq:
 		 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
 		 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
-		 * rcu_read_lock() prevents preemption between reading r_msg
-		 * and acquiring the q_perm.lock in ipc_lock_object().
 		 */
 		rcu_read_lock();
 
-		/* Lockless receive, part 2:
-		 * Wait until pipelined_send or expunge_all are outside of
-		 * wake_up_process(). There is a race with exit(), see
-		 * ipc/mqueue.c for the details. The correct serialization
-		 * ensures that a receiver cannot continue without the wakeup
-		 * being visibible _before_ setting r_msg:
+		/*
+		 * Lockless receive, part 2:
+		 * The work in pipelined_send() and expunge_all():
+		 * - Set pointer to message
+		 * - Queue the receiver task for later wakeup
+		 * - Wake up the process after the lock is dropped.
 		 *
-		 * CPU 0                             CPU 1
-		 * <loop receiver>
-		 *   smp_rmb(); (A) <-- pair -.      <waker thread>
-		 *   <load ->r_msg>           |        msr->r_msg = NULL;
-		 *                            |        wake_up_process();
-		 * <continue>                 `------> smp_wmb(); (B)
-		 *                                     msr->r_msg = msg;
-		 *
-		 * Where (A) orders the message value read and where (B) orders
-		 * the write to the r_msg -- done in both pipelined_send and
-		 * expunge_all.
+		 * Should the process wake up before this wakeup (due to a
+		 * signal) it will either see the message and continue ...
 		 */
-		for (;;) {
-			/*
-			 * Pairs with writer barrier in pipelined_send
-			 * or expunge_all.
-			 */
-			smp_rmb(); /* barrier (A) */
-			msg = (struct msg_msg *)msr_d.r_msg;
-			if (msg)
-				break;
-
-			/*
-			 * The cpu_relax() call is a compiler barrier
-			 * which forces everything in this loop to be
-			 * re-loaded.
-			 */
-			cpu_relax();
-		}
-
-		/* Lockless receive, part 3:
-		 * If there is a message or an error then accept it without
-		 * locking.
-		 */
+		msg = READ_ONCE(msr_d.r_msg);
 		if (msg != ERR_PTR(-EAGAIN))
 			goto out_unlock1;
 
-		/* Lockless receive, part 3:
-		 * Acquire the queue spinlock.
-		 */
+		 /*
+		  * ... or see -EAGAIN, acquire the lock to check the message
+		  * again.
+		  */
 		ipc_lock_object(&msq->q_perm);
 
-		/* Lockless receive, part 4:
-		 * Repeat test after acquiring the spinlock.
-		 */
-		msg = (struct msg_msg *)msr_d.r_msg;
+		msg = msr_d.r_msg;
 		if (msg != ERR_PTR(-EAGAIN))
 			goto out_unlock0;
 
@@ -998,6 +987,7 @@
 
 out_unlock0:
 	ipc_unlock_object(&msq->q_perm);
+	wake_up_q(&wake_q);
 out_unlock1:
 	rcu_read_unlock();
 	if (IS_ERR(msg)) {
diff --git a/ipc/sem.c b/ipc/sem.c
index 7c9d4f7..10b94bc 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -162,14 +162,21 @@
 
 /*
  * Locking:
+ * a) global sem_lock() for read/write
  *	sem_undo.id_next,
  *	sem_array.complex_count,
- *	sem_array.pending{_alter,_cont},
- *	sem_array.sem_undo: global sem_lock() for read/write
- *	sem_undo.proc_next: only "current" is allowed to read/write that field.
+ *	sem_array.complex_mode
+ *	sem_array.pending{_alter,_const},
+ *	sem_array.sem_undo
  *
+ * b) global or semaphore sem_lock() for read/write:
  *	sem_array.sem_base[i].pending_{const,alter}:
- *		global or semaphore sem_lock() for read/write
+ *	sem_array.complex_mode (for read)
+ *
+ * c) special:
+ *	sem_undo_list.list_proc:
+ *	* undo_list->lock for write
+ *	* rcu for read
  */
 
 #define sc_semmsl	sem_ctls[0]
@@ -260,31 +267,62 @@
 }
 
 /*
- * Wait until all currently ongoing simple ops have completed.
+ * Enter the mode suitable for non-simple operations:
  * Caller must own sem_perm.lock.
- * New simple ops cannot start, because simple ops first check
- * that sem_perm.lock is free.
- * that a) sem_perm.lock is free and b) complex_count is 0.
  */
-static void sem_wait_array(struct sem_array *sma)
+static void complexmode_enter(struct sem_array *sma)
 {
 	int i;
 	struct sem *sem;
 
-	if (sma->complex_count)  {
-		/* The thread that increased sma->complex_count waited on
-		 * all sem->lock locks. Thus we don't need to wait again.
-		 */
+	if (sma->complex_mode)  {
+		/* We are already in complex_mode. Nothing to do */
 		return;
 	}
 
+	/* We need a full barrier after seting complex_mode:
+	 * The write to complex_mode must be visible
+	 * before we read the first sem->lock spinlock state.
+	 */
+	smp_store_mb(sma->complex_mode, true);
+
 	for (i = 0; i < sma->sem_nsems; i++) {
 		sem = sma->sem_base + i;
 		spin_unlock_wait(&sem->lock);
 	}
+	/*
+	 * spin_unlock_wait() is not a memory barriers, it is only a
+	 * control barrier. The code must pair with spin_unlock(&sem->lock),
+	 * thus just the control barrier is insufficient.
+	 *
+	 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+	 */
+	smp_rmb();
 }
 
 /*
+ * Try to leave the mode that disallows simple operations:
+ * Caller must own sem_perm.lock.
+ */
+static void complexmode_tryleave(struct sem_array *sma)
+{
+	if (sma->complex_count)  {
+		/* Complex ops are sleeping.
+		 * We must stay in complex mode
+		 */
+		return;
+	}
+	/*
+	 * Immediately after setting complex_mode to false,
+	 * a simple op can start. Thus: all memory writes
+	 * performed by the current operation must be visible
+	 * before we set complex_mode to false.
+	 */
+	smp_store_release(&sma->complex_mode, false);
+}
+
+#define SEM_GLOBAL_LOCK	(-1)
+/*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
  * Otherwise, lock the entire semaphore array, since we either have
@@ -300,56 +338,42 @@
 		/* Complex operation - acquire a full lock */
 		ipc_lock_object(&sma->sem_perm);
 
-		/* And wait until all simple ops that are processed
-		 * right now have dropped their locks.
-		 */
-		sem_wait_array(sma);
-		return -1;
+		/* Prevent parallel simple ops */
+		complexmode_enter(sma);
+		return SEM_GLOBAL_LOCK;
 	}
 
 	/*
 	 * Only one semaphore affected - try to optimize locking.
-	 * The rules are:
-	 * - optimized locking is possible if no complex operation
-	 *   is either enqueued or processed right now.
-	 * - The test for enqueued complex ops is simple:
-	 *      sma->complex_count != 0
-	 * - Testing for complex ops that are processed right now is
-	 *   a bit more difficult. Complex ops acquire the full lock
-	 *   and first wait that the running simple ops have completed.
-	 *   (see above)
-	 *   Thus: If we own a simple lock and the global lock is free
-	 *	and complex_count is now 0, then it will stay 0 and
-	 *	thus just locking sem->lock is sufficient.
+	 * Optimized locking is possible if no complex operation
+	 * is either enqueued or processed right now.
+	 *
+	 * Both facts are tracked by complex_mode.
 	 */
 	sem = sma->sem_base + sops->sem_num;
 
-	if (sma->complex_count == 0) {
+	/*
+	 * Initial check for complex_mode. Just an optimization,
+	 * no locking, no memory barrier.
+	 */
+	if (!sma->complex_mode) {
 		/*
 		 * It appears that no complex operation is around.
 		 * Acquire the per-semaphore lock.
 		 */
 		spin_lock(&sem->lock);
 
-		/* Then check that the global lock is free */
-		if (!spin_is_locked(&sma->sem_perm.lock)) {
-			/*
-			 * We need a memory barrier with acquire semantics,
-			 * otherwise we can race with another thread that does:
-			 *	complex_count++;
-			 *	spin_unlock(sem_perm.lock);
-			 */
-			smp_acquire__after_ctrl_dep();
+		/*
+		 * See 51d7d5205d33
+		 * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
+		 * A full barrier is required: the write of sem->lock
+		 * must be visible before the read is executed
+		 */
+		smp_mb();
 
-			/*
-			 * Now repeat the test of complex_count:
-			 * It can't change anymore until we drop sem->lock.
-			 * Thus: if is now 0, then it will stay 0.
-			 */
-			if (sma->complex_count == 0) {
-				/* fast path successful! */
-				return sops->sem_num;
-			}
+		if (!smp_load_acquire(&sma->complex_mode)) {
+			/* fast path successful! */
+			return sops->sem_num;
 		}
 		spin_unlock(&sem->lock);
 	}
@@ -369,15 +393,16 @@
 		/* Not a false alarm, thus complete the sequence for a
 		 * full lock.
 		 */
-		sem_wait_array(sma);
-		return -1;
+		complexmode_enter(sma);
+		return SEM_GLOBAL_LOCK;
 	}
 }
 
 static inline void sem_unlock(struct sem_array *sma, int locknum)
 {
-	if (locknum == -1) {
+	if (locknum == SEM_GLOBAL_LOCK) {
 		unmerge_queues(sma);
+		complexmode_tryleave(sma);
 		ipc_unlock_object(&sma->sem_perm);
 	} else {
 		struct sem *sem = sma->sem_base + locknum;
@@ -529,6 +554,7 @@
 	}
 
 	sma->complex_count = 0;
+	sma->complex_mode = true; /* dropped by sem_unlock below */
 	INIT_LIST_HEAD(&sma->pending_alter);
 	INIT_LIST_HEAD(&sma->pending_const);
 	INIT_LIST_HEAD(&sma->list_id);
@@ -2079,6 +2105,8 @@
 		struct list_head tasks;
 		int semid, i;
 
+		cond_resched();
+
 		rcu_read_lock();
 		un = list_entry_rcu(ulp->list_proc.next,
 				    struct sem_undo, list_proc);
@@ -2184,10 +2212,10 @@
 	/*
 	 * The proc interface isn't aware of sem_lock(), it calls
 	 * ipc_lock_object() directly (in sysvipc_find_ipc).
-	 * In order to stay compatible with sem_lock(), we must wait until
-	 * all simple semop() calls have left their critical regions.
+	 * In order to stay compatible with sem_lock(), we must
+	 * enter / leave complex_mode.
 	 */
-	sem_wait_array(sma);
+	complexmode_enter(sma);
 
 	sem_otime = get_semotime(sma);
 
@@ -2204,6 +2232,8 @@
 		   sem_otime,
 		   sma->sem_ctime);
 
+	complexmode_tryleave(sma);
+
 	return 0;
 }
 #endif
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5967b87..1ed8473 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -97,7 +97,7 @@
 		return ERR_PTR(-ENOSPC);
 
 	inode->i_ino = get_next_ino();
-	inode->i_atime = CURRENT_TIME;
+	inode->i_atime = current_time(inode);
 	inode->i_mtime = inode->i_atime;
 	inode->i_ctime = inode->i_atime;
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4406615..85bc9be 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -64,6 +64,9 @@
 #include <linux/file.h>
 #include <net/sock.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/cgroup.h>
+
 /*
  * pidlists linger the following amount before being destroyed.  The goal
  * is avoiding frequent destruction in the middle of consecutive read calls
@@ -1176,6 +1179,8 @@
 	struct cgroup *cgrp = &root->cgrp;
 	struct cgrp_cset_link *link, *tmp_link;
 
+	trace_cgroup_destroy_root(root);
+
 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
 
 	BUG_ON(atomic_read(&root->nr_cgrps));
@@ -1874,6 +1879,9 @@
 		strcpy(root->release_agent_path, opts.release_agent);
 		spin_unlock(&release_agent_path_lock);
 	}
+
+	trace_cgroup_remount(root);
+
  out_unlock:
 	kfree(opts.release_agent);
 	kfree(opts.name);
@@ -2031,6 +2039,8 @@
 	if (ret)
 		goto destroy_root;
 
+	trace_cgroup_setup_root(root);
+
 	/*
 	 * There must be no failure case after here, since rebinding takes
 	 * care of subsystems' refcounts, which are explicitly dropped in
@@ -2315,22 +2325,18 @@
 	.fs_flags = FS_USERNS_MOUNT,
 };
 
-static char *cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
-				   struct cgroup_namespace *ns)
+static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
+				 struct cgroup_namespace *ns)
 {
 	struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
-	int ret;
 
-	ret = kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
-	if (ret < 0 || ret >= buflen)
-		return NULL;
-	return buf;
+	return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
 }
 
-char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
-		     struct cgroup_namespace *ns)
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+		   struct cgroup_namespace *ns)
 {
-	char *ret;
+	int ret;
 
 	mutex_lock(&cgroup_mutex);
 	spin_lock_irq(&css_set_lock);
@@ -2357,12 +2363,12 @@
  *
  * Return value is the same as kernfs_path().
  */
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
 {
 	struct cgroup_root *root;
 	struct cgroup *cgrp;
 	int hierarchy_id = 1;
-	char *path = NULL;
+	int ret;
 
 	mutex_lock(&cgroup_mutex);
 	spin_lock_irq(&css_set_lock);
@@ -2371,16 +2377,15 @@
 
 	if (root) {
 		cgrp = task_cgroup_from_root(task, root);
-		path = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
+		ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
 	} else {
 		/* if no hierarchy exists, everyone is in "/" */
-		if (strlcpy(buf, "/", buflen) < buflen)
-			path = buf;
+		ret = strlcpy(buf, "/", buflen);
 	}
 
 	spin_unlock_irq(&css_set_lock);
 	mutex_unlock(&cgroup_mutex);
-	return path;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(task_cgroup_path);
 
@@ -2830,6 +2835,10 @@
 		ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root);
 
 	cgroup_migrate_finish(&preloaded_csets);
+
+	if (!ret)
+		trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
+
 	return ret;
 }
 
@@ -3611,6 +3620,8 @@
 	mutex_lock(&cgroup_mutex);
 
 	ret = kernfs_rename(kn, new_parent, new_name_str);
+	if (!ret)
+		trace_cgroup_rename(cgrp);
 
 	mutex_unlock(&cgroup_mutex);
 
@@ -4381,6 +4392,8 @@
 
 		if (task) {
 			ret = cgroup_migrate(task, false, to->root);
+			if (!ret)
+				trace_cgroup_transfer_tasks(to, task, false);
 			put_task_struct(task);
 		}
 	} while (task && !ret);
@@ -5046,6 +5059,8 @@
 			ss->css_released(css);
 	} else {
 		/* cgroup release path */
+		trace_cgroup_release(cgrp);
+
 		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
 		cgrp->id = -1;
 
@@ -5332,6 +5347,8 @@
 	if (ret)
 		goto out_destroy;
 
+	trace_cgroup_mkdir(cgrp);
+
 	/* let's create and online css's */
 	kernfs_activate(kn);
 
@@ -5507,6 +5524,9 @@
 
 	ret = cgroup_destroy_locked(cgrp);
 
+	if (!ret)
+		trace_cgroup_rmdir(cgrp);
+
 	cgroup_kn_unlock(kn);
 	return ret;
 }
@@ -5743,7 +5763,7 @@
 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 		     struct pid *pid, struct task_struct *tsk)
 {
-	char *buf, *path;
+	char *buf;
 	int retval;
 	struct cgroup_root *root;
 
@@ -5786,17 +5806,17 @@
 		 * " (deleted)" is appended to the cgroup path.
 		 */
 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
-			path = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
+			retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
 						current->nsproxy->cgroup_ns);
-			if (!path) {
+			if (retval >= PATH_MAX)
 				retval = -ENAMETOOLONG;
+			if (retval < 0)
 				goto out_unlock;
-			}
-		} else {
-			path = "/";
-		}
 
-		seq_puts(m, path);
+			seq_puts(m, buf);
+		} else {
+			seq_puts(m, "/");
+		}
 
 		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
 			seq_puts(m, " (deleted)\n");
@@ -6062,8 +6082,9 @@
 {
 	struct cgroup *cgrp =
 		container_of(work, struct cgroup, release_agent_work);
-	char *pathbuf = NULL, *agentbuf = NULL, *path;
+	char *pathbuf = NULL, *agentbuf = NULL;
 	char *argv[3], *envp[3];
+	int ret;
 
 	mutex_lock(&cgroup_mutex);
 
@@ -6073,13 +6094,13 @@
 		goto out;
 
 	spin_lock_irq(&css_set_lock);
-	path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+	ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
 	spin_unlock_irq(&css_set_lock);
-	if (!path)
+	if (ret < 0 || ret >= PATH_MAX)
 		goto out;
 
 	argv[0] = agentbuf;
-	argv[1] = path;
+	argv[1] = pathbuf;
 	argv[2] = NULL;
 
 	/* minimal command environment */
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 9f748ed..1a8f34f 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -11,7 +11,6 @@
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_ASHMEM=y
 CONFIG_AUDIT=y
-CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_CPUACCT=y
@@ -19,9 +18,7 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
+CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
 CONFIG_FB=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -41,7 +38,6 @@
 CONFIG_IPV6_MIP6=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -135,6 +131,7 @@
 CONFIG_QUOTA=y
 CONFIG_RTC_CLASS=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECCOMP=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index e3b953e..297756b 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -6,12 +6,16 @@
 # CONFIG_PM_WAKELOCKS_GC is not set
 # CONFIG_VT is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_COMPACTION=y
 CONFIG_DEBUG_RODATA=y
+CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
 CONFIG_DRAGONRISE_FF=y
 CONFIG_ENABLE_DEFAULT_TRACERS=y
 CONFIG_EXT4_FS=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5df20d6..29de1a9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -228,7 +228,7 @@
 	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
 	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-	.dep_map = {.name = "cpu_hotplug.lock" },
+	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
 #endif
 };
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2b4c20a..29f815d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2715,7 +2715,7 @@
 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
 		     struct pid *pid, struct task_struct *tsk)
 {
-	char *buf, *p;
+	char *buf;
 	struct cgroup_subsys_state *css;
 	int retval;
 
@@ -2724,14 +2724,15 @@
 	if (!buf)
 		goto out;
 
-	retval = -ENAMETOOLONG;
 	css = task_get_css(tsk, cpuset_cgrp_id);
-	p = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
-			   current->nsproxy->cgroup_ns);
+	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
+				current->nsproxy->cgroup_ns);
 	css_put(css);
-	if (!p)
+	if (retval >= PATH_MAX)
+		retval = -ENAMETOOLONG;
+	if (retval < 0)
 		goto out_free;
-	seq_puts(m, p);
+	seq_puts(m, buf);
 	seq_putc(m, '\n');
 	retval = 0;
 out_free:
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d4129bb..f9ec9ad 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -300,7 +300,8 @@
 
 retry:
 	/* Read the page with vaddr into memory */
-	ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+	ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
+			&vma);
 	if (ret <= 0)
 		return ret;
 
@@ -1710,7 +1711,8 @@
 	 * but we treat this as a 'remote' access since it is
 	 * essentially a kernel access to the memory.
 	 */
-	result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
+			NULL);
 	if (result < 0)
 		return result;
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d42242..623259f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -547,7 +547,8 @@
 }
 
 #ifdef CONFIG_MMU
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static __latent_entropy int dup_mmap(struct mm_struct *mm,
+					struct mm_struct *oldmm)
 {
 	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
 	struct rb_node **rb_link, *rb_parent;
@@ -1441,7 +1442,8 @@
  * parts of the process environment (as per the clone
  * flags). The actual kick-off is left to the caller.
  */
-static struct task_struct *copy_process(unsigned long clone_flags,
+static __latent_entropy struct task_struct *copy_process(
+					unsigned long clone_flags,
 					unsigned long stack_start,
 					unsigned long stack_size,
 					int __user *child_tidptr,
@@ -1926,6 +1928,7 @@
 
 	p = copy_process(clone_flags, stack_start, stack_size,
 			 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
+	add_latent_entropy();
 	/*
 	 * Do this prior waking up the new thread - the thread pointer
 	 * might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 432c3d7..2b59c82 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -98,26 +98,26 @@
 
 	trace_sched_process_hang(t);
 
-	if (!sysctl_hung_task_warnings)
+	if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
 		return;
 
-	if (sysctl_hung_task_warnings > 0)
-		sysctl_hung_task_warnings--;
-
 	/*
 	 * Ok, the task did not get scheduled for more than 2 minutes,
 	 * complain:
 	 */
-	pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
-		t->comm, t->pid, timeout);
-	pr_err("      %s %s %.*s\n",
-		print_tainted(), init_utsname()->release,
-		(int)strcspn(init_utsname()->version, " "),
-		init_utsname()->version);
-	pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
-		" disables this message.\n");
-	sched_show_task(t);
-	debug_show_all_locks();
+	if (sysctl_hung_task_warnings) {
+		sysctl_hung_task_warnings--;
+		pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+			t->comm, t->pid, timeout);
+		pr_err("      %s %s %.*s\n",
+			print_tainted(), init_utsname()->release,
+			(int)strcspn(init_utsname()->version, " "),
+			init_utsname()->version);
+		pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+			" disables this message.\n");
+		sched_show_task(t);
+		debug_show_all_locks();
+	}
 
 	touch_nmi_watchdog();
 
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d10ab6b..d630954 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -49,7 +49,7 @@
 #include <linux/cpu.h>
 #include <linux/jump_label.h>
 
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
 #include <asm/uaccess.h>
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4ab4c37..be2cc1f 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -138,7 +138,7 @@
 }
 
 /**
- * probe_kthread_data - speculative version of kthread_data()
+ * kthread_probe_data - speculative version of kthread_data()
  * @task: possible kthread task in question
  *
  * @task could be a kthread task.  Return the data value specified when it
@@ -146,7 +146,7 @@
  * inaccessible for any reason, %NULL is returned.  This function requires
  * that @task itself is safe to dereference.
  */
-void *probe_kthread_data(struct task_struct *task)
+void *kthread_probe_data(struct task_struct *task)
 {
 	struct kthread *kthread = to_kthread(task);
 	void *data = NULL;
@@ -244,33 +244,10 @@
 	}
 }
 
-/**
- * kthread_create_on_node - create a kthread.
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @node: task and thread structures for the thread are allocated on this node
- * @namefmt: printf-style name for the thread.
- *
- * Description: This helper function creates and names a kernel
- * thread.  The thread will be stopped: use wake_up_process() to start
- * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
- * is affine to all CPUs.
- *
- * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
- * When woken, the thread will run @threadfn() with @data as its
- * argument. @threadfn() can either call do_exit() directly if it is a
- * standalone thread for which no one will call kthread_stop(), or
- * return when 'kthread_should_stop()' is true (which means
- * kthread_stop() has been called).  The return value should be zero
- * or a negative error number; it will be passed to kthread_stop().
- *
- * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
- */
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
-					   void *data, int node,
-					   const char namefmt[],
-					   ...)
+static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+						    void *data, int node,
+						    const char namefmt[],
+						    va_list args)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct task_struct *task;
@@ -311,11 +288,8 @@
 	task = create->result;
 	if (!IS_ERR(task)) {
 		static const struct sched_param param = { .sched_priority = 0 };
-		va_list args;
 
-		va_start(args, namefmt);
 		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
-		va_end(args);
 		/*
 		 * root may have changed our (kthreadd's) priority or CPU mask.
 		 * The kernel thread should not inherit these properties.
@@ -326,6 +300,44 @@
 	kfree(create);
 	return task;
 }
+
+/**
+ * kthread_create_on_node - create a kthread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @node: task and thread structures for the thread are allocated on this node
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: This helper function creates and names a kernel
+ * thread.  The thread will be stopped: use wake_up_process() to start
+ * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
+ *
+ * If thread is going to be bound on a particular cpu, give its node
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
+ * When woken, the thread will run @threadfn() with @data as its
+ * argument. @threadfn() can either call do_exit() directly if it is a
+ * standalone thread for which no one will call kthread_stop(), or
+ * return when 'kthread_should_stop()' is true (which means
+ * kthread_stop() has been called).  The return value should be zero
+ * or a negative error number; it will be passed to kthread_stop().
+ *
+ * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
+ */
+struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+					   void *data, int node,
+					   const char namefmt[],
+					   ...)
+{
+	struct task_struct *task;
+	va_list args;
+
+	va_start(args, namefmt);
+	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
+	va_end(args);
+
+	return task;
+}
 EXPORT_SYMBOL(kthread_create_on_node);
 
 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
@@ -390,10 +402,10 @@
 				   cpu);
 	if (IS_ERR(p))
 		return p;
+	kthread_bind(p, cpu);
+	/* CPU hotplug need to bind once again when unparking the thread. */
 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 	to_kthread(p)->cpu = cpu;
-	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
-	kthread_park(p);
 	return p;
 }
 
@@ -407,6 +419,10 @@
 	 * which might be about to be cleared.
 	 */
 	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+		/*
+		 * Newly created kthread was parked when the CPU was offline.
+		 * The binding was lost and we need to set it again.
+		 */
 		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 			__kthread_bind(k, kthread->cpu, TASK_PARKED);
 		wake_up_state(k, TASK_PARKED);
@@ -540,39 +556,48 @@
 	return 0;
 }
 
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
 				const char *name,
 				struct lock_class_key *key)
 {
+	memset(worker, 0, sizeof(struct kthread_worker));
 	spin_lock_init(&worker->lock);
 	lockdep_set_class_and_name(&worker->lock, key, name);
 	INIT_LIST_HEAD(&worker->work_list);
-	worker->task = NULL;
+	INIT_LIST_HEAD(&worker->delayed_work_list);
 }
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
 
 /**
  * kthread_worker_fn - kthread function to process kthread_worker
  * @worker_ptr: pointer to initialized kthread_worker
  *
- * This function can be used as @threadfn to kthread_create() or
- * kthread_run() with @worker_ptr argument pointing to an initialized
- * kthread_worker.  The started kthread will process work_list until
- * the it is stopped with kthread_stop().  A kthread can also call
- * this function directly after extra initialization.
+ * This function implements the main cycle of kthread worker. It processes
+ * work_list until it is stopped with kthread_stop(). It sleeps when the queue
+ * is empty.
  *
- * Different kthreads can be used for the same kthread_worker as long
- * as there's only one kthread attached to it at any given time.  A
- * kthread_worker without an attached kthread simply collects queued
- * kthread_works.
+ * The works are not allowed to keep any locks, disable preemption or interrupts
+ * when they finish. There is defined a safe point for freezing when one work
+ * finishes and before a new one is started.
+ *
+ * Also the works must not be handled by more than one worker at the same time,
+ * see also kthread_queue_work().
  */
 int kthread_worker_fn(void *worker_ptr)
 {
 	struct kthread_worker *worker = worker_ptr;
 	struct kthread_work *work;
 
-	WARN_ON(worker->task);
+	/*
+	 * FIXME: Update the check and remove the assignment when all kthread
+	 * worker users are created using kthread_create_worker*() functions.
+	 */
+	WARN_ON(worker->task && worker->task != current);
 	worker->task = current;
+
+	if (worker->flags & KTW_FREEZABLE)
+		set_freezable();
+
 repeat:
 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 
@@ -605,13 +630,132 @@
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
-/* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
-			       struct kthread_work *work,
-			       struct list_head *pos)
+static struct kthread_worker *
+__kthread_create_worker(int cpu, unsigned int flags,
+			const char namefmt[], va_list args)
+{
+	struct kthread_worker *worker;
+	struct task_struct *task;
+
+	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+	if (!worker)
+		return ERR_PTR(-ENOMEM);
+
+	kthread_init_worker(worker);
+
+	if (cpu >= 0) {
+		char name[TASK_COMM_LEN];
+
+		/*
+		 * kthread_create_worker_on_cpu() allows to pass a generic
+		 * namefmt in compare with kthread_create_on_cpu. We need
+		 * to format it here.
+		 */
+		vsnprintf(name, sizeof(name), namefmt, args);
+		task = kthread_create_on_cpu(kthread_worker_fn, worker,
+					     cpu, name);
+	} else {
+		task = __kthread_create_on_node(kthread_worker_fn, worker,
+						-1, namefmt, args);
+	}
+
+	if (IS_ERR(task))
+		goto fail_task;
+
+	worker->flags = flags;
+	worker->task = task;
+	wake_up_process(task);
+	return worker;
+
+fail_task:
+	kfree(worker);
+	return ERR_CAST(task);
+}
+
+/**
+ * kthread_create_worker - create a kthread worker
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...)
+{
+	struct kthread_worker *worker;
+	va_list args;
+
+	va_start(args, namefmt);
+	worker = __kthread_create_worker(-1, flags, namefmt, args);
+	va_end(args);
+
+	return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker);
+
+/**
+ * kthread_create_worker_on_cpu - create a kthread worker and bind it
+ *	it to a given CPU and the associated NUMA node.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Use a valid CPU number if you want to bind the kthread worker
+ * to the given CPU and the associated NUMA node.
+ *
+ * A good practice is to add the cpu number also into the worker name.
+ * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+			     const char namefmt[], ...)
+{
+	struct kthread_worker *worker;
+	va_list args;
+
+	va_start(args, namefmt);
+	worker = __kthread_create_worker(cpu, flags, namefmt, args);
+	va_end(args);
+
+	return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker_on_cpu);
+
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+				   struct kthread_work *work)
 {
 	lockdep_assert_held(&worker->lock);
 
+	return !list_empty(&work->node) || work->canceling;
+}
+
+static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
+					     struct kthread_work *work)
+{
+	lockdep_assert_held(&worker->lock);
+	WARN_ON_ONCE(!list_empty(&work->node));
+	/* Do not use a work with >1 worker, see kthread_queue_work() */
+	WARN_ON_ONCE(work->worker && work->worker != worker);
+}
+
+/* insert @work before @pos in @worker */
+static void kthread_insert_work(struct kthread_worker *worker,
+				struct kthread_work *work,
+				struct list_head *pos)
+{
+	kthread_insert_work_sanity_check(worker, work);
+
 	list_add_tail(&work->node, pos);
 	work->worker = worker;
 	if (!worker->current_work && likely(worker->task))
@@ -619,29 +763,133 @@
 }
 
 /**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
  * @worker: target kthread_worker
  * @work: kthread_work to queue
  *
  * Queue @work to work processor @task for async execution.  @task
  * must have been created with kthread_worker_create().  Returns %true
  * if @work was successfully queued, %false if it was already pending.
+ *
+ * Reinitialize the work if it needs to be used by another worker.
+ * For example, when the worker was stopped and started again.
  */
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
 			struct kthread_work *work)
 {
 	bool ret = false;
 	unsigned long flags;
 
 	spin_lock_irqsave(&worker->lock, flags);
-	if (list_empty(&work->node)) {
-		insert_kthread_work(worker, work, &worker->work_list);
+	if (!queuing_blocked(worker, work)) {
+		kthread_insert_work(worker, work, &worker->work_list);
 		ret = true;
 	}
 	spin_unlock_irqrestore(&worker->lock, flags);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_work);
+
+/**
+ * kthread_delayed_work_timer_fn - callback that queues the associated kthread
+ *	delayed work when the timer expires.
+ * @__data: pointer to the data associated with the timer
+ *
+ * The format of the function is defined by struct timer_list.
+ * It should have been called from irqsafe timer with irq already off.
+ */
+void kthread_delayed_work_timer_fn(unsigned long __data)
+{
+	struct kthread_delayed_work *dwork =
+		(struct kthread_delayed_work *)__data;
+	struct kthread_work *work = &dwork->work;
+	struct kthread_worker *worker = work->worker;
+
+	/*
+	 * This might happen when a pending work is reinitialized.
+	 * It means that it is used a wrong way.
+	 */
+	if (WARN_ON_ONCE(!worker))
+		return;
+
+	spin_lock(&worker->lock);
+	/* Work must not be used with >1 worker, see kthread_queue_work(). */
+	WARN_ON_ONCE(work->worker != worker);
+
+	/* Move the work from worker->delayed_work_list. */
+	WARN_ON_ONCE(list_empty(&work->node));
+	list_del_init(&work->node);
+	kthread_insert_work(worker, work, &worker->work_list);
+
+	spin_unlock(&worker->lock);
+}
+EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
+
+void __kthread_queue_delayed_work(struct kthread_worker *worker,
+				  struct kthread_delayed_work *dwork,
+				  unsigned long delay)
+{
+	struct timer_list *timer = &dwork->timer;
+	struct kthread_work *work = &dwork->work;
+
+	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
+		     timer->data != (unsigned long)dwork);
+
+	/*
+	 * If @delay is 0, queue @dwork->work immediately.  This is for
+	 * both optimization and correctness.  The earliest @timer can
+	 * expire is on the closest next tick and delayed_work users depend
+	 * on that there's no such delay when @delay is 0.
+	 */
+	if (!delay) {
+		kthread_insert_work(worker, work, &worker->work_list);
+		return;
+	}
+
+	/* Be paranoid and try to detect possible races already now. */
+	kthread_insert_work_sanity_check(worker, work);
+
+	list_add(&work->node, &worker->delayed_work_list);
+	work->worker = worker;
+	timer_stats_timer_set_start_info(&dwork->timer);
+	timer->expires = jiffies + delay;
+	add_timer(timer);
+}
+
+/**
+ * kthread_queue_delayed_work - queue the associated kthread work
+ *	after a delay.
+ * @worker: target kthread_worker
+ * @dwork: kthread_delayed_work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If the work has not been pending it starts a timer that will queue
+ * the work after the given @delay. If @delay is zero, it queues the
+ * work immediately.
+ *
+ * Return: %false if the @work has already been pending. It means that
+ * either the timer was running or the work was queued. It returns %true
+ * otherwise.
+ */
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+				struct kthread_delayed_work *dwork,
+				unsigned long delay)
+{
+	struct kthread_work *work = &dwork->work;
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&worker->lock, flags);
+
+	if (!queuing_blocked(worker, work)) {
+		__kthread_queue_delayed_work(worker, dwork, delay);
+		ret = true;
+	}
+
+	spin_unlock_irqrestore(&worker->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 
 struct kthread_flush_work {
 	struct kthread_work	work;
@@ -656,12 +904,12 @@
 }
 
 /**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
  * @work: work to flush
  *
  * If @work is queued or executing, wait for it to finish execution.
  */
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
 {
 	struct kthread_flush_work fwork = {
 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
@@ -670,21 +918,19 @@
 	struct kthread_worker *worker;
 	bool noop = false;
 
-retry:
 	worker = work->worker;
 	if (!worker)
 		return;
 
 	spin_lock_irq(&worker->lock);
-	if (work->worker != worker) {
-		spin_unlock_irq(&worker->lock);
-		goto retry;
-	}
+	/* Work must not be used with >1 worker, see kthread_queue_work(). */
+	WARN_ON_ONCE(work->worker != worker);
 
 	if (!list_empty(&work->node))
-		insert_kthread_work(worker, &fwork.work, work->node.next);
+		kthread_insert_work(worker, &fwork.work, work->node.next);
 	else if (worker->current_work == work)
-		insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+		kthread_insert_work(worker, &fwork.work,
+				    worker->work_list.next);
 	else
 		noop = true;
 
@@ -693,23 +939,214 @@
 	if (!noop)
 		wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
+
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ *	%false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+				  unsigned long *flags)
+{
+	/* Try to cancel the timer if exists. */
+	if (is_dwork) {
+		struct kthread_delayed_work *dwork =
+			container_of(work, struct kthread_delayed_work, work);
+		struct kthread_worker *worker = work->worker;
+
+		/*
+		 * del_timer_sync() must be called to make sure that the timer
+		 * callback is not running. The lock must be temporary released
+		 * to avoid a deadlock with the callback. In the meantime,
+		 * any queuing is blocked by setting the canceling counter.
+		 */
+		work->canceling++;
+		spin_unlock_irqrestore(&worker->lock, *flags);
+		del_timer_sync(&dwork->timer);
+		spin_lock_irqsave(&worker->lock, *flags);
+		work->canceling--;
+	}
+
+	/*
+	 * Try to remove the work from a worker list. It might either
+	 * be from worker->work_list or from worker->delayed_work_list.
+	 */
+	if (!list_empty(&work->node)) {
+		list_del_init(&work->node);
+		return true;
+	}
+
+	return false;
+}
 
 /**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
+ * @worker: kthread worker to use
+ * @dwork: kthread delayed work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
+ * modify @dwork's timer so that it expires after @delay. If @delay is zero,
+ * @work is guaranteed to be queued immediately.
+ *
+ * Return: %true if @dwork was pending and its timer was modified,
+ * %false otherwise.
+ *
+ * A special case is when the work is being canceled in parallel.
+ * It might be caused either by the real kthread_cancel_delayed_work_sync()
+ * or yet another kthread_mod_delayed_work() call. We let the other command
+ * win and return %false here. The caller is supposed to synchronize these
+ * operations a reasonable way.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
+ * for details.
+ */
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+			      struct kthread_delayed_work *dwork,
+			      unsigned long delay)
+{
+	struct kthread_work *work = &dwork->work;
+	unsigned long flags;
+	int ret = false;
+
+	spin_lock_irqsave(&worker->lock, flags);
+
+	/* Do not bother with canceling when never queued. */
+	if (!work->worker)
+		goto fast_queue;
+
+	/* Work must not be used with >1 worker, see kthread_queue_work() */
+	WARN_ON_ONCE(work->worker != worker);
+
+	/* Do not fight with another command that is canceling this work. */
+	if (work->canceling)
+		goto out;
+
+	ret = __kthread_cancel_work(work, true, &flags);
+fast_queue:
+	__kthread_queue_delayed_work(worker, dwork, delay);
+out:
+	spin_unlock_irqrestore(&worker->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
+{
+	struct kthread_worker *worker = work->worker;
+	unsigned long flags;
+	int ret = false;
+
+	if (!worker)
+		goto out;
+
+	spin_lock_irqsave(&worker->lock, flags);
+	/* Work must not be used with >1 worker, see kthread_queue_work(). */
+	WARN_ON_ONCE(work->worker != worker);
+
+	ret = __kthread_cancel_work(work, is_dwork, &flags);
+
+	if (worker->current_work != work)
+		goto out_fast;
+
+	/*
+	 * The work is in progress and we need to wait with the lock released.
+	 * In the meantime, block any queuing by setting the canceling counter.
+	 */
+	work->canceling++;
+	spin_unlock_irqrestore(&worker->lock, flags);
+	kthread_flush_work(work);
+	spin_lock_irqsave(&worker->lock, flags);
+	work->canceling--;
+
+out_fast:
+	spin_unlock_irqrestore(&worker->lock, flags);
+out:
+	return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+	return __kthread_cancel_work_sync(work, false);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
+/**
+ * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
+ *	wait for it to finish.
+ * @dwork: the kthread delayed work to cancel
+ *
+ * This is kthread_cancel_work_sync() for delayed works.
+ *
+ * Return: %true if @dwork was pending, %false otherwise.
+ */
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
+{
+	return __kthread_cancel_work_sync(&dwork->work, true);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
+
+/**
+ * kthread_flush_worker - flush all current works on a kthread_worker
  * @worker: worker to flush
  *
  * Wait until all currently executing or pending works on @worker are
  * finished.
  */
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
 {
 	struct kthread_flush_work fwork = {
 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 	};
 
-	queue_kthread_work(worker, &fwork.work);
+	kthread_queue_work(worker, &fwork.work);
 	wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);
+
+/**
+ * kthread_destroy_worker - destroy a kthread worker
+ * @worker: worker to be destroyed
+ *
+ * Flush and destroy @worker.  The simple flush is enough because the kthread
+ * worker API is used only in trivial scenarios.  There are no multi-step state
+ * machines needed.
+ */
+void kthread_destroy_worker(struct kthread_worker *worker)
+{
+	struct task_struct *task;
+
+	task = worker->task;
+	if (WARN_ON(!task))
+		return;
+
+	kthread_flush_worker(worker);
+	kthread_stop(task);
+	WARN_ON(!list_empty(&worker->work_list));
+	kfree(worker);
+}
+EXPORT_SYMBOL(kthread_destroy_worker);
diff --git a/kernel/panic.c b/kernel/panic.c
index ca8cea1..e6480e2 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -71,6 +71,32 @@
 	panic_smp_self_stop();
 }
 
+/*
+ * Stop other CPUs in panic.  Architecture dependent code may override this
+ * with more suitable version.  For example, if the architecture supports
+ * crash dump, it should save registers of each stopped CPU and disable
+ * per-CPU features such as virtualization extensions.
+ */
+void __weak crash_smp_send_stop(void)
+{
+	static int cpus_stopped;
+
+	/*
+	 * This function can be called twice in panic path, but obviously
+	 * we execute this only once.
+	 */
+	if (cpus_stopped)
+		return;
+
+	/*
+	 * Note smp_send_stop is the usual smp shutdown function, which
+	 * unfortunately means it may not be hardened to work in a panic
+	 * situation.
+	 */
+	smp_send_stop();
+	cpus_stopped = 1;
+}
+
 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
 
 /*
@@ -164,14 +190,21 @@
 	if (!_crash_kexec_post_notifiers) {
 		printk_nmi_flush_on_panic();
 		__crash_kexec(NULL);
-	}
 
-	/*
-	 * Note smp_send_stop is the usual smp shutdown function, which
-	 * unfortunately means it may not be hardened to work in a panic
-	 * situation.
-	 */
-	smp_send_stop();
+		/*
+		 * Note smp_send_stop is the usual smp shutdown function, which
+		 * unfortunately means it may not be hardened to work in a
+		 * panic situation.
+		 */
+		smp_send_stop();
+	} else {
+		/*
+		 * If we want to do crash dump after notifier calls and
+		 * kmsg_dump, we will need architecture dependent extra
+		 * works in addition to stopping other CPUs.
+		 */
+		crash_smp_send_stop();
+	}
 
 	/*
 	 * Run any panic handlers, including those that might need to
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8019cc0..de08fc9 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -666,11 +666,8 @@
 	 * better readable output. 'c' in the record flags mark the first
 	 * fragment of a line, '+' the following.
 	 */
-	if (msg->flags & LOG_CONT && !(prev_flags & LOG_CONT))
-		cont = 'c';
-	else if ((msg->flags & LOG_CONT) ||
-		 ((prev_flags & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
-		cont = '+';
+	if (msg->flags & LOG_CONT)
+		cont = (prev_flags & LOG_CONT) ? '+' : 'c';
 
 	return scnprintf(buf, size, "%u,%llu,%llu,%c;",
 		       (msg->facility << 3) | msg->level, seq, ts_usec, cont);
@@ -797,6 +794,8 @@
 	return ret;
 }
 
+static void cont_flush(void);
+
 static ssize_t devkmsg_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
@@ -812,6 +811,7 @@
 	if (ret)
 		return ret;
 	raw_spin_lock_irq(&logbuf_lock);
+	cont_flush();
 	while (user->seq == log_next_seq) {
 		if (file->f_flags & O_NONBLOCK) {
 			ret = -EAGAIN;
@@ -874,6 +874,7 @@
 		return -ESPIPE;
 
 	raw_spin_lock_irq(&logbuf_lock);
+	cont_flush();
 	switch (whence) {
 	case SEEK_SET:
 		/* the first record */
@@ -912,6 +913,7 @@
 	poll_wait(file, &log_wait, wait);
 
 	raw_spin_lock_irq(&logbuf_lock);
+	cont_flush();
 	if (user->seq < log_next_seq) {
 		/* return error when data has vanished underneath us */
 		if (user->seq < log_first_seq)
@@ -1298,6 +1300,7 @@
 		size_t skip;
 
 		raw_spin_lock_irq(&logbuf_lock);
+		cont_flush();
 		if (syslog_seq < log_first_seq) {
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
@@ -1357,6 +1360,7 @@
 		return -ENOMEM;
 
 	raw_spin_lock_irq(&logbuf_lock);
+	cont_flush();
 	if (buf) {
 		u64 next_seq;
 		u64 seq;
@@ -1518,6 +1522,7 @@
 	/* Number of chars in the log buffer */
 	case SYSLOG_ACTION_SIZE_UNREAD:
 		raw_spin_lock_irq(&logbuf_lock);
+		cont_flush();
 		if (syslog_seq < log_first_seq) {
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
@@ -1654,35 +1659,33 @@
 	bool flushed:1;			/* buffer sealed and committed */
 } cont;
 
-static void cont_flush(enum log_flags flags)
+static void cont_flush(void)
 {
 	if (cont.flushed)
 		return;
 	if (cont.len == 0)
 		return;
-
 	if (cont.cons) {
 		/*
 		 * If a fragment of this line was directly flushed to the
 		 * console; wait for the console to pick up the rest of the
 		 * line. LOG_NOCONS suppresses a duplicated output.
 		 */
-		log_store(cont.facility, cont.level, flags | LOG_NOCONS,
+		log_store(cont.facility, cont.level, cont.flags | LOG_NOCONS,
 			  cont.ts_nsec, NULL, 0, cont.buf, cont.len);
-		cont.flags = flags;
 		cont.flushed = true;
 	} else {
 		/*
 		 * If no fragment of this line ever reached the console,
 		 * just submit it to the store and free the buffer.
 		 */
-		log_store(cont.facility, cont.level, flags, 0,
+		log_store(cont.facility, cont.level, cont.flags, 0,
 			  NULL, 0, cont.buf, cont.len);
 		cont.len = 0;
 	}
 }
 
-static bool cont_add(int facility, int level, const char *text, size_t len)
+static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
 {
 	if (cont.len && cont.flushed)
 		return false;
@@ -1693,7 +1696,7 @@
 	 * the line gets too long, split it up in separate records.
 	 */
 	if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) {
-		cont_flush(LOG_CONT);
+		cont_flush();
 		return false;
 	}
 
@@ -1702,7 +1705,7 @@
 		cont.level = level;
 		cont.owner = current;
 		cont.ts_nsec = local_clock();
-		cont.flags = 0;
+		cont.flags = flags;
 		cont.cons = 0;
 		cont.flushed = false;
 	}
@@ -1710,8 +1713,15 @@
 	memcpy(cont.buf + cont.len, text, len);
 	cont.len += len;
 
+	// The original flags come from the first line,
+	// but later continuations can add a newline.
+	if (flags & LOG_NEWLINE) {
+		cont.flags |= LOG_NEWLINE;
+		cont_flush();
+	}
+
 	if (cont.len > (sizeof(cont.buf) * 80) / 100)
-		cont_flush(LOG_CONT);
+		cont_flush();
 
 	return true;
 }
@@ -1744,6 +1754,35 @@
 	return textlen;
 }
 
+static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
+{
+	/*
+	 * If an earlier line was buffered, and we're a continuation
+	 * write from the same process, try to add it to the buffer.
+	 */
+	if (cont.len) {
+		if (cont.owner == current && (lflags & LOG_CONT)) {
+			if (cont_add(facility, level, lflags, text, text_len))
+				return text_len;
+		}
+		/* Otherwise, make sure it's flushed */
+		cont_flush();
+	}
+
+	/* Skip empty continuation lines that couldn't be added - they just flush */
+	if (!text_len && (lflags & LOG_CONT))
+		return 0;
+
+	/* If it doesn't end in a newline, try to buffer the current line */
+	if (!(lflags & LOG_NEWLINE)) {
+		if (cont_add(facility, level, lflags, text, text_len))
+			return text_len;
+	}
+
+	/* Store it in the record log */
+	return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
 			    const char *dict, size_t dictlen,
 			    const char *fmt, va_list args)
@@ -1830,10 +1869,9 @@
 
 	/* strip kernel syslog prefix and extract log level or control flags */
 	if (facility == 0) {
-		int kern_level = printk_get_level(text);
+		int kern_level;
 
-		if (kern_level) {
-			const char *end_of_header = printk_skip_level(text);
+		while ((kern_level = printk_get_level(text)) != 0) {
 			switch (kern_level) {
 			case '0' ... '7':
 				if (level == LOGLEVEL_DEFAULT)
@@ -1841,14 +1879,13 @@
 				/* fallthrough */
 			case 'd':	/* KERN_DEFAULT */
 				lflags |= LOG_PREFIX;
+				break;
+			case 'c':	/* KERN_CONT */
+				lflags |= LOG_CONT;
 			}
-			/*
-			 * No need to check length here because vscnprintf
-			 * put '\0' at the end of the string. Only valid and
-			 * newly printed level is detected.
-			 */
-			text_len -= end_of_header - text;
-			text = (char *)end_of_header;
+
+			text_len -= 2;
+			text += 2;
 		}
 	}
 
@@ -1858,45 +1895,7 @@
 	if (dict)
 		lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-	if (!(lflags & LOG_NEWLINE)) {
-		/*
-		 * Flush the conflicting buffer. An earlier newline was missing,
-		 * or another task also prints continuation lines.
-		 */
-		if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
-			cont_flush(LOG_NEWLINE);
-
-		/* buffer line if possible, otherwise store it right away */
-		if (cont_add(facility, level, text, text_len))
-			printed_len += text_len;
-		else
-			printed_len += log_store(facility, level,
-						 lflags | LOG_CONT, 0,
-						 dict, dictlen, text, text_len);
-	} else {
-		bool stored = false;
-
-		/*
-		 * If an earlier newline was missing and it was the same task,
-		 * either merge it with the current buffer and flush, or if
-		 * there was a race with interrupts (prefix == true) then just
-		 * flush it out and store this line separately.
-		 * If the preceding printk was from a different task and missed
-		 * a newline, flush and append the newline.
-		 */
-		if (cont.len) {
-			if (cont.owner == current && !(lflags & LOG_PREFIX))
-				stored = cont_add(facility, level, text,
-						  text_len);
-			cont_flush(LOG_NEWLINE);
-		}
-
-		if (stored)
-			printed_len += text_len;
-		else
-			printed_len += log_store(facility, level, lflags, 0,
-						 dict, dictlen, text, text_len);
-	}
+	printed_len += log_output(facility, level, lflags, dict, dictlen, text, text_len);
 
 	logbuf_cpu = UINT_MAX;
 	raw_spin_unlock(&logbuf_lock);
@@ -3040,6 +3039,7 @@
 		dumper->active = true;
 
 		raw_spin_lock_irqsave(&logbuf_lock, flags);
+		cont_flush();
 		dumper->cur_seq = clear_seq;
 		dumper->cur_idx = clear_idx;
 		dumper->next_seq = log_next_seq;
@@ -3130,6 +3130,7 @@
 	bool ret;
 
 	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	cont_flush();
 	ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
 	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 
@@ -3172,6 +3173,7 @@
 		goto out;
 
 	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	cont_flush();
 	if (dumper->cur_seq < log_first_seq) {
 		/* messages are gone, move to first available one */
 		dumper->cur_seq = log_first_seq;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1d3b766..e6474f7 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -73,6 +73,8 @@
 {
 	BUG_ON(!child->ptrace);
 
+	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
 	child->parent = child->real_parent;
 	list_del_init(&child->ptrace_entry);
 
@@ -489,7 +491,6 @@
 
 	/* Architecture-specific hardware disable .. */
 	ptrace_disable(child);
-	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 
 	write_lock_irq(&tasklist_lock);
 	/*
@@ -536,7 +537,7 @@
 		int this_len, retval;
 
 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
-		retval = access_process_vm(tsk, src, buf, this_len, 0);
+		retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
 		if (!retval) {
 			if (copied)
 				break;
@@ -563,7 +564,8 @@
 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 		if (copy_from_user(buf, src, this_len))
 			return -EFAULT;
-		retval = access_process_vm(tsk, dst, buf, this_len, 1);
+		retval = access_process_vm(tsk, dst, buf, this_len,
+				FOLL_FORCE | FOLL_WRITE);
 		if (!retval) {
 			if (copied)
 				break;
@@ -1126,7 +1128,7 @@
 	unsigned long tmp;
 	int copied;
 
-	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
 	if (copied != sizeof(tmp))
 		return -EIO;
 	return put_user(tmp, (unsigned long __user *)data);
@@ -1137,7 +1139,8 @@
 {
 	int copied;
 
-	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+	copied = access_process_vm(tsk, addr, &data, sizeof(data),
+			FOLL_FORCE | FOLL_WRITE);
 	return (copied == sizeof(data)) ? 0 : -EIO;
 }
 
@@ -1154,7 +1157,8 @@
 	switch (request) {
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
-		ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+		ret = access_process_vm(child, addr, &word, sizeof(word),
+				FOLL_FORCE);
 		if (ret != sizeof(word))
 			ret = -EIO;
 		else
@@ -1163,7 +1167,8 @@
 
 	case PTRACE_POKETEXT:
 	case PTRACE_POKEDATA:
-		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+		ret = access_process_vm(child, addr, &data, sizeof(data),
+				FOLL_FORCE | FOLL_WRITE);
 		ret = (ret != sizeof(data) ? -EIO : 0);
 		break;
 
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 944b1b4..1898559 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -170,7 +170,7 @@
 				      false));
 }
 
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 {
 	__rcu_process_callbacks(&rcu_sched_ctrlblk);
 	__rcu_process_callbacks(&rcu_bh_ctrlblk);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7e2e038..69a5611 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3013,7 +3013,7 @@
 /*
  * Do RCU core processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 {
 	struct rcu_state *rsp;
 
diff --git a/kernel/relay.c b/kernel/relay.c
index 9988f5c..da79a10 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -328,13 +328,15 @@
 
 /**
  *	wakeup_readers - wake up readers waiting on a channel
- *	@data: contains the channel buffer
+ *	@work: contains the channel buffer
  *
- *	This is the timer function used to defer reader waking.
+ *	This is the function used to defer reader waking
  */
-static void wakeup_readers(unsigned long data)
+static void wakeup_readers(struct irq_work *work)
 {
-	struct rchan_buf *buf = (struct rchan_buf *)data;
+	struct rchan_buf *buf;
+
+	buf = container_of(work, struct rchan_buf, wakeup_work);
 	wake_up_interruptible(&buf->read_wait);
 }
 
@@ -352,9 +354,10 @@
 	if (init) {
 		init_waitqueue_head(&buf->read_wait);
 		kref_init(&buf->kref);
-		setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
-	} else
-		del_timer_sync(&buf->timer);
+		init_irq_work(&buf->wakeup_work, wakeup_readers);
+	} else {
+		irq_work_sync(&buf->wakeup_work);
+	}
 
 	buf->subbufs_produced = 0;
 	buf->subbufs_consumed = 0;
@@ -487,7 +490,7 @@
 static void relay_close_buf(struct rchan_buf *buf)
 {
 	buf->finalized = 1;
-	del_timer_sync(&buf->timer);
+	irq_work_sync(&buf->wakeup_work);
 	buf->chan->cb->remove_buf_file(buf->dentry);
 	kref_put(&buf->kref, relay_remove_buf);
 }
@@ -754,14 +757,15 @@
 			buf->early_bytes += buf->chan->subbuf_size -
 					    buf->padding[old_subbuf];
 		smp_mb();
-		if (waitqueue_active(&buf->read_wait))
+		if (waitqueue_active(&buf->read_wait)) {
 			/*
 			 * Calling wake_up_interruptible() from here
 			 * will deadlock if we happen to be logging
 			 * from the scheduler (trying to re-grab
 			 * rq->lock), so defer it.
 			 */
-			mod_timer(&buf->timer, jiffies + 1);
+			irq_work_queue(&buf->wakeup_work);
+		}
 	}
 
 	old = buf->data;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 1393588..fa178b6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -415,7 +415,8 @@
 	if (autogroup_path(tg, group_path, PATH_MAX))
 		return group_path;
 
-	return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+	return group_path;
 }
 #endif
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 502e95a..d941c97 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@
 	 * will definitely be update (after enqueue).
 	 */
 	sa->period_contrib = 1023;
-	sa->load_avg = scale_load_down(se->load.weight);
+	/*
+	 * Tasks are intialized with full load to be seen as heavy tasks until
+	 * they get a chance to stabilize to their real load level.
+	 * Group entities are intialized with zero load to reflect the fact that
+	 * nothing has been attached to the task group yet.
+	 */
+	if (entity_is_task(se))
+		sa->load_avg = scale_load_down(se->load.weight);
 	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
 	/*
 	 * At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@
  */
 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
 {
-	struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
-	u64 avg_idle = this_rq()->avg_idle;
-	u64 avg_cost = this_sd->avg_scan_cost;
+	struct sched_domain *this_sd;
+	u64 avg_cost, avg_idle = this_rq()->avg_idle;
 	u64 time, cost;
 	s64 delta;
 	int cpu, wrap;
 
+	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+	if (!this_sd)
+		return -1;
+
+	avg_cost = this_sd->avg_scan_cost;
+
 	/*
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * particularly is sensitive here.
@@ -8522,7 +8534,7 @@
  * run_rebalance_domains is triggered when needed from the scheduler tick.
  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  */
-static void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
 {
 	struct rq *this_rq = this_rq();
 	enum cpu_idle_type idle = this_rq->idle_balance ?
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index fc0d8270..4a5c6e7 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -122,12 +122,12 @@
 
 		if (kthread_should_park()) {
 			__set_current_state(TASK_RUNNING);
+			preempt_enable();
 			if (ht->park && td->status == HP_THREAD_ACTIVE) {
 				BUG_ON(td->cpu != smp_processor_id());
 				ht->park(td->cpu);
 				td->status = HP_THREAD_PARKED;
 			}
-			preempt_enable();
 			kthread_parkme();
 			/* We might have been woken for stop */
 			continue;
@@ -186,6 +186,11 @@
 		kfree(td);
 		return PTR_ERR(tsk);
 	}
+	/*
+	 * Park the thread so that it could start right on the CPU
+	 * when it is available.
+	 */
+	kthread_park(tsk);
 	get_task_struct(tsk);
 	*per_cpu_ptr(ht->store, cpu) = tsk;
 	if (ht->create) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6676264..1bf81ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -496,7 +496,7 @@
 }
 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
 
-static void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(struct softirq_action *a)
 {
 	struct tasklet_struct *list;
 
@@ -532,7 +532,7 @@
 	}
 }
 
-static void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
 {
 	struct tasklet_struct *list;
 
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 2c5e3a8..635482e 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -250,3 +250,8 @@
 
 /* membarrier */
 cond_syscall(sys_membarrier);
+
+/* memory protection keys */
+cond_syscall(sys_pkey_mprotect);
+cond_syscall(sys_pkey_alloc);
+cond_syscall(sys_pkey_free);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index a43775c..706309f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -107,9 +107,8 @@
 extern int pid_max;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
-extern int compat_log;
 extern int latencytop_enabled;
-extern int sysctl_nr_open_min, sysctl_nr_open_max;
+extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
 #ifndef CONFIG_MMU
 extern int sysctl_nr_trim_pages;
 #endif
@@ -1085,15 +1084,6 @@
 		.extra1		= &neg_one,
 	},
 #endif
-#ifdef CONFIG_COMPAT
-	{
-		.procname	= "compat-log",
-		.data		= &compat_log,
-		.maxlen		= sizeof (int),
-	 	.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-#endif
 #ifdef CONFIG_RT_MUTEXES
 	{
 		.procname	= "max_lock_depth",
@@ -1693,7 +1683,7 @@
 	{
 		.procname	= "nr_open",
 		.data		= &sysctl_nr_open,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &sysctl_nr_open_min,
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c3aad68..12dd190 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -542,7 +542,6 @@
 static int alarm_timer_create(struct k_itimer *new_timer)
 {
 	enum  alarmtimer_type type;
-	struct alarm_base *base;
 
 	if (!alarmtimer_get_rtcdev())
 		return -ENOTSUPP;
@@ -551,7 +550,6 @@
 		return -EPERM;
 
 	type = clock2alarm(new_timer->it_clock);
-	base = &alarm_bases[type];
 	alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
 	return 0;
 }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e07fb09..37dec7e 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -403,8 +403,11 @@
 		tkr = tkf->base + (seq & 0x01);
 		now = ktime_to_ns(tkr->base);
 
-		now += clocksource_delta(tkr->read(tkr->clock),
-					 tkr->cycle_last, tkr->mask);
+		now += timekeeping_delta_to_ns(tkr,
+				clocksource_delta(
+					tkr->read(tkr->clock),
+					tkr->cycle_last,
+					tkr->mask));
 	} while (read_seqcount_retry(&tkf->seq, seq));
 
 	return now;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 32bf6f7..2d47980 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1633,7 +1633,7 @@
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */
-static void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 {
 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 992ab9d..e579808 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -1,8 +1,4 @@
 
-# We are fully aware of the dangers of __builtin_return_address()
-FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
-KBUILD_CFLAGS += $(FRAME_CFLAGS)
-
 # Do not instrument the tracer itself:
 
 ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bd81f03..479d840 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4261,7 +4261,7 @@
 	 * This function is called without any synchronization and @task
 	 * could be in any state.  Be careful with dereferences.
 	 */
-	worker = probe_kthread_data(task);
+	worker = kthread_probe_data(task);
 
 	/*
 	 * Carefully copy the associated workqueue's workfn and name.  Keep
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 39d07e7..33bc56c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1857,15 +1857,6 @@
 
 	  See Documentation/debugging-via-ohci1394.txt for more information.
 
-config BUILD_DOCSRC
-	bool "Build targets in Documentation/ tree"
-	depends on HEADERS_CHECK
-	help
-	  This option attempts to build objects from the source files in the
-	  kernel Documentation/ tree.
-
-	  Say N if you are unsure.
-
 config DMA_API_DEBUG
 	bool "Enable debugging of DMA-API usage"
 	depends on HAVE_DMA_API_DEBUG
diff --git a/lib/Makefile b/lib/Makefile
index f3ca8c0..50144a3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -180,6 +180,7 @@
 
 obj-$(CONFIG_STACKDEPOT) += stackdepot.o
 KASAN_SANITIZE_stackdepot.o := n
+KCOV_INSTRUMENT_stackdepot.o := n
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
 	       fdt_empty_tree.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index eca8808..0b66f0e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -496,6 +496,11 @@
  * ranges.  Consecutively set bits are shown as two hyphen-separated
  * decimal numbers, the smallest and largest bit numbers set in
  * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
  *
  * Returns 0 on success, -errno on invalid input strings.
  * Error values:
@@ -507,16 +512,20 @@
 		int is_user, unsigned long *maskp,
 		int nmaskbits)
 {
-	unsigned a, b;
+	unsigned int a, b, old_a, old_b;
+	unsigned int group_size, used_size;
 	int c, old_c, totaldigits, ndigits;
 	const char __user __force *ubuf = (const char __user __force *)buf;
-	int at_start, in_range;
+	int at_start, in_range, in_partial_range;
 
 	totaldigits = c = 0;
+	old_a = old_b = 0;
+	group_size = used_size = 0;
 	bitmap_zero(maskp, nmaskbits);
 	do {
 		at_start = 1;
 		in_range = 0;
+		in_partial_range = 0;
 		a = b = 0;
 		ndigits = totaldigits;
 
@@ -547,6 +556,24 @@
 			if ((totaldigits != ndigits) && isspace(old_c))
 				return -EINVAL;
 
+			if (c == '/') {
+				used_size = a;
+				at_start = 1;
+				in_range = 0;
+				a = b = 0;
+				continue;
+			}
+
+			if (c == ':') {
+				old_a = a;
+				old_b = b;
+				at_start = 1;
+				in_range = 0;
+				in_partial_range = 1;
+				a = b = 0;
+				continue;
+			}
+
 			if (c == '-') {
 				if (at_start || in_range)
 					return -EINVAL;
@@ -567,15 +594,30 @@
 		}
 		if (ndigits == totaldigits)
 			continue;
+		if (in_partial_range) {
+			group_size = a;
+			a = old_a;
+			b = old_b;
+			old_a = old_b = 0;
+		}
 		/* if no digit is after '-', it's wrong*/
 		if (at_start && in_range)
 			return -EINVAL;
-		if (!(a <= b))
+		if (!(a <= b) || !(used_size <= group_size))
 			return -EINVAL;
 		if (b >= nmaskbits)
 			return -ERANGE;
 		while (a <= b) {
-			set_bit(a, maskp);
+			if (in_partial_range) {
+				static int pos_in_group = 1;
+
+				if (pos_in_group <= used_size)
+					set_bit(a, maskp);
+
+				if (a == b || ++pos_in_group > group_size)
+					pos_in_group = 1;
+			} else
+				set_bit(a, maskp);
 			a++;
 		}
 	} while (buflen && c == ',');
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 48b8c27..f0c7f14 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -396,8 +396,7 @@
 
 	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 		iterate_iovec(i, bytes, v, iov, skip, ({
-			err = fault_in_multipages_readable(v.iov_base,
-					v.iov_len);
+			err = fault_in_pages_readable(v.iov_base, v.iov_len);
 			if (unlikely(err))
 			return err;
 		0;}))
@@ -834,13 +833,13 @@
 				size_t *start)
 {
 	struct pipe_inode_info *pipe = i->pipe;
-	size_t n = push_pipe(i, maxsize, &idx, start);
+	ssize_t n = push_pipe(i, maxsize, &idx, start);
 	if (!n)
 		return -EFAULT;
 
 	maxsize = n;
 	n += *start;
-	while (n >= PAGE_SIZE) {
+	while (n > 0) {
 		get_page(*pages++ = pipe->bufs[idx].page);
 		idx = next_idx(idx, pipe);
 		n -= PAGE_SIZE;
@@ -1140,6 +1139,28 @@
 }
 EXPORT_SYMBOL(dup_iter);
 
+/**
+ * import_iovec() - Copy an array of &struct iovec from userspace
+ *     into the kernel, check that it is valid, and initialize a new
+ *     &struct iov_iter iterator to access it.
+ *
+ * @type: One of %READ or %WRITE.
+ * @uvector: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @iov.
+ * @iov: (input and output parameter) Pointer to pointer to (usually small
+ *     on-stack) kernel array.
+ * @i: Pointer to iterator that will be initialized on success.
+ *
+ * If the array pointed to by *@iov is large enough to hold all @nr_segs,
+ * then this function places %NULL in *@iov on return. Otherwise, a new
+ * array will be allocated and the result placed in *@iov. This means that
+ * the caller may call kfree() on *@iov regardless of whether the small
+ * on-stack array was used or not (and regardless of whether this function
+ * returns an error or not).
+ *
+ * Return: 0 on success or negative error code on error.
+ */
 int import_iovec(int type, const struct iovec __user * uvector,
 		 unsigned nr_segs, unsigned fast_segs,
 		 struct iovec **iov, struct iov_iter *i)
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2be5569..1d6565e8 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -74,7 +74,7 @@
 }
 EXPORT_SYMBOL(irq_poll_complete);
 
-static void irq_poll_softirq(struct softirq_action *h)
+static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
 {
 	struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
 	int rearm = 0, budget = irq_poll_budget;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index d8a5cf6..b8e2080 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -48,11 +48,9 @@
 {
 	unsigned long long res;
 	unsigned int rv;
-	int overflow;
 
 	res = 0;
 	rv = 0;
-	overflow = 0;
 	while (*s) {
 		unsigned int val;
 
@@ -71,15 +69,13 @@
 		 */
 		if (unlikely(res & (~0ull << 60))) {
 			if (res > div_u64(ULLONG_MAX - val, base))
-				overflow = 1;
+				rv |= KSTRTOX_OVERFLOW;
 		}
 		res = res * base + val;
 		rv++;
 		s++;
 	}
 	*p = res;
-	if (overflow)
-		rv |= KSTRTOX_OVERFLOW;
 	return rv;
 }
 
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 27fe749..9ac959e 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -33,6 +33,7 @@
 
 #define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1))
 
+static DEFINE_SPINLOCK(percpu_ref_switch_lock);
 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
 
 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
@@ -82,6 +83,7 @@
 	atomic_long_set(&ref->count, start_count);
 
 	ref->release = release;
+	ref->confirm_switch = NULL;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(percpu_ref_init);
@@ -101,6 +103,8 @@
 	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
 
 	if (percpu_count) {
+		/* non-NULL confirm_switch indicates switching in progress */
+		WARN_ON_ONCE(ref->confirm_switch);
 		free_percpu(percpu_count);
 		ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
 	}
@@ -161,34 +165,67 @@
 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 					  percpu_ref_func_t *confirm_switch)
 {
-	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
-		/* switching from percpu to atomic */
-		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-
-		/*
-		 * Non-NULL ->confirm_switch is used to indicate that
-		 * switching is in progress.  Use noop one if unspecified.
-		 */
-		WARN_ON_ONCE(ref->confirm_switch);
-		ref->confirm_switch =
-			confirm_switch ?: percpu_ref_noop_confirm_switch;
-
-		percpu_ref_get(ref);	/* put after confirmation */
-		call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
-	} else if (confirm_switch) {
-		/*
-		 * Somebody already set ATOMIC.  Switching may still be in
-		 * progress.  @confirm_switch must be invoked after the
-		 * switching is complete and a full sched RCU grace period
-		 * has passed.  Wait synchronously for the previous
-		 * switching and schedule @confirm_switch invocation.
-		 */
-		wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-		ref->confirm_switch = confirm_switch;
-
-		percpu_ref_get(ref);	/* put after confirmation */
-		call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+	if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
+		if (confirm_switch)
+			confirm_switch(ref);
+		return;
 	}
+
+	/* switching from percpu to atomic */
+	ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+	/*
+	 * Non-NULL ->confirm_switch is used to indicate that switching is
+	 * in progress.  Use noop one if unspecified.
+	 */
+	ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+	percpu_ref_get(ref);	/* put after confirmation */
+	call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+}
+
+static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+	int cpu;
+
+	BUG_ON(!percpu_count);
+
+	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
+		return;
+
+	atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
+
+	/*
+	 * Restore per-cpu operation.  smp_store_release() is paired with
+	 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
+	 * that the zeroing is visible to all percpu accesses which can see
+	 * the following __PERCPU_REF_ATOMIC clearing.
+	 */
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(percpu_count, cpu) = 0;
+
+	smp_store_release(&ref->percpu_count_ptr,
+			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+}
+
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+				     percpu_ref_func_t *confirm_switch)
+{
+	lockdep_assert_held(&percpu_ref_switch_lock);
+
+	/*
+	 * If the previous ATOMIC switching hasn't finished yet, wait for
+	 * its completion.  If the caller ensures that ATOMIC switching
+	 * isn't in progress, this function can be called from any context.
+	 */
+	wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+			    percpu_ref_switch_lock);
+
+	if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+		__percpu_ref_switch_to_atomic(ref, confirm_switch);
+	else
+		__percpu_ref_switch_to_percpu(ref);
 }
 
 /**
@@ -207,47 +244,21 @@
  * operations.  Note that @ref will stay in atomic mode across kill/reinit
  * cycles until percpu_ref_switch_to_percpu() is called.
  *
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode.  In such cases, @confirm_switch
- * will be invoked after the switching is complete.
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
+ * This function may block if @ref is in the process of switching to atomic
+ * mode.  If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
  */
 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 				 percpu_ref_func_t *confirm_switch)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
 	ref->force_atomic = true;
-	__percpu_ref_switch_to_atomic(ref, confirm_switch);
-}
+	__percpu_ref_switch_mode(ref, confirm_switch);
 
-static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
-{
-	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
-	int cpu;
-
-	BUG_ON(!percpu_count);
-
-	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
-		return;
-
-	wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
-	atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
-
-	/*
-	 * Restore per-cpu operation.  smp_store_release() is paired with
-	 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
-	 * that the zeroing is visible to all percpu accesses which can see
-	 * the following __PERCPU_REF_ATOMIC clearing.
-	 */
-	for_each_possible_cpu(cpu)
-		*per_cpu_ptr(percpu_count, cpu) = 0;
-
-	smp_store_release(&ref->percpu_count_ptr,
-			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
 }
 
 /**
@@ -264,17 +275,20 @@
  * dying or dead, the actual switching takes place on the following
  * percpu_ref_reinit().
  *
- * This function normally doesn't block and can be called from any context
- * but it may block if @ref is in the process of switching to atomic mode
- * by percpu_ref_switch_atomic().
+ * This function may block if @ref is in the process of switching to atomic
+ * mode.  If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
  */
 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
 {
-	ref->force_atomic = false;
+	unsigned long flags;
 
-	/* a dying or dead ref can't be switched to percpu mode w/o reinit */
-	if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
-		__percpu_ref_switch_to_percpu(ref);
+	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+	ref->force_atomic = false;
+	__percpu_ref_switch_mode(ref, NULL);
+
+	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
 }
 
 /**
@@ -290,21 +304,23 @@
  *
  * This function normally doesn't block and can be called from any context
  * but it may block if @confirm_kill is specified and @ref is in the
- * process of switching to atomic mode by percpu_ref_switch_atomic().
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
+ * process of switching to atomic mode by percpu_ref_switch_to_atomic().
  */
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 				 percpu_ref_func_t *confirm_kill)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
 	WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
 		  "%s called more than once on %pf!", __func__, ref->release);
 
 	ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
-	__percpu_ref_switch_to_atomic(ref, confirm_kill);
+	__percpu_ref_switch_mode(ref, confirm_kill);
 	percpu_ref_put(ref);
+
+	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
 
@@ -321,11 +337,16 @@
  */
 void percpu_ref_reinit(struct percpu_ref *ref)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
 	WARN_ON_ONCE(!percpu_ref_is_zero(ref));
 
 	ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
 	percpu_ref_get(ref);
-	if (!ref->force_atomic)
-		__percpu_ref_switch_to_percpu(ref);
+	__percpu_ref_switch_mode(ref, NULL);
+
+	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/random32.c b/lib/random32.c
index 915982b..fa594b1 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -47,7 +47,7 @@
 }
 #endif
 
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
 
 /**
  *	prandom_u32_state - seeded pseudo-random number generator.
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 9c5fe81..7e35fc4 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,6 +1,7 @@
 #include <linux/compiler.h>
 #include <linux/export.h>
 #include <linux/kasan-checks.h>
+#include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -111,6 +112,7 @@
 		long retval;
 
 		kasan_check_write(dst, count);
+		check_object_size(dst, count, false);
 		user_access_begin();
 		retval = do_strncpy_from_user(dst, src, count, max);
 		user_access_end();
diff --git a/mm/Makefile b/mm/Makefile
index 2ca1faf..295bd7a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,9 +21,6 @@
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
 
-# Since __builtin_frame_address does work as used, disable the warning.
-CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
-
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= gup.o highmem.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
diff --git a/mm/bootmem.c b/mm/bootmem.c
index a869f84..e8a55a3 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -155,7 +155,7 @@
 {
 	unsigned long cursor, end;
 
-	kmemleak_free_part(__va(physaddr), size);
+	kmemleak_free_part_phys(physaddr, size);
 
 	cursor = PFN_UP(physaddr);
 	end = PFN_DOWN(physaddr + size);
@@ -399,7 +399,7 @@
 {
 	unsigned long start, end;
 
-	kmemleak_free_part(__va(physaddr), size);
+	kmemleak_free_part_phys(physaddr, size);
 
 	start = PFN_UP(physaddr);
 	end = PFN_DOWN(physaddr + size);
@@ -420,7 +420,7 @@
 {
 	unsigned long start, end;
 
-	kmemleak_free_part(__va(physaddr), size);
+	kmemleak_free_part_phys(physaddr, size);
 
 	start = PFN_UP(physaddr);
 	end = PFN_DOWN(physaddr + size);
diff --git a/mm/cma.c b/mm/cma.c
index bd0e141..384c2cb 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -336,7 +336,7 @@
 		 * kmemleak scans/reads tracked objects for pointers to other
 		 * objects but this address isn't mapped and accessible
 		 */
-		kmemleak_ignore(phys_to_virt(addr));
+		kmemleak_ignore_phys(addr);
 		base = addr;
 	}
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f7b778..849f459 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1941,7 +1941,7 @@
 		file_accessed(file);
 
 		retval = mapping->a_ops->direct_IO(iocb, &data);
-		if (retval > 0) {
+		if (retval >= 0) {
 			iocb->ki_pos += retval;
 			iov_iter_advance(iter, retval);
 		}
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 381bb07..db77dcb 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
  * get_vaddr_frames() - map virtual addresses to pfns
  * @start:	starting user address
  * @nr_frames:	number of pages / pfns from start to map
- * @write:	whether pages will be written to by the caller
- * @force:	whether to force write access even if user mapping is
- *		readonly. See description of the same argument of
-		get_user_pages().
+ * @gup_flags:	flags modifying lookup behaviour
  * @vec:	structure which receives pages / pfns of the addresses mapped.
  *		It should have space for at least nr_frames entries.
  *
@@ -34,7 +31,7 @@
  * This function takes care of grabbing mmap_sem as necessary.
  */
 int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
-		     bool write, bool force, struct frame_vector *vec)
+		     unsigned int gup_flags, struct frame_vector *vec)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
@@ -59,7 +56,7 @@
 		vec->got_ref = true;
 		vec->is_pfns = false;
 		ret = get_user_pages_locked(start, nr_frames,
-			write, force, (struct page **)(vec->ptrs), &locked);
+			gup_flags, (struct page **)(vec->ptrs), &locked);
 		goto out;
 	}
 
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2f..7aa113c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@
 	return -EEXIST;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+	return pte_write(pte) ||
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 static struct page *follow_page_pte(struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd, unsigned int flags)
 {
@@ -95,7 +105,7 @@
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
@@ -412,7 +422,7 @@
 	 * reCOWed by userspace write).
 	 */
 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
-		*flags &= ~FOLL_WRITE;
+	        *flags |= FOLL_COW;
 	return 0;
 }
 
@@ -729,7 +739,6 @@
 						struct mm_struct *mm,
 						unsigned long start,
 						unsigned long nr_pages,
-						int write, int force,
 						struct page **pages,
 						struct vm_area_struct **vmas,
 						int *locked, bool notify_drop,
@@ -747,10 +756,6 @@
 
 	if (pages)
 		flags |= FOLL_GET;
-	if (write)
-		flags |= FOLL_WRITE;
-	if (force)
-		flags |= FOLL_FORCE;
 
 	pages_done = 0;
 	lock_dropped = false;
@@ -843,12 +848,12 @@
  *          up_read(&mm->mmap_sem);
  */
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-			   int write, int force, struct page **pages,
+			   unsigned int gup_flags, struct page **pages,
 			   int *locked)
 {
 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
-				       write, force, pages, NULL, locked, true,
-				       FOLL_TOUCH);
+				       pages, NULL, locked, true,
+				       gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
@@ -864,14 +869,14 @@
  */
 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 					       unsigned long start, unsigned long nr_pages,
-					       int write, int force, struct page **pages,
-					       unsigned int gup_flags)
+					       struct page **pages, unsigned int gup_flags)
 {
 	long ret;
 	int locked = 1;
+
 	down_read(&mm->mmap_sem);
-	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-				      pages, NULL, &locked, false, gup_flags);
+	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+				      &locked, false, gup_flags);
 	if (locked)
 		up_read(&mm->mmap_sem);
 	return ret;
@@ -896,10 +901,10 @@
  * "force" parameter).
  */
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-			     int write, int force, struct page **pages)
+			     struct page **pages, unsigned int gup_flags)
 {
 	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
-					 write, force, pages, FOLL_TOUCH);
+					 pages, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -910,9 +915,7 @@
  * @mm:		mm_struct of target mm
  * @start:	starting user address
  * @nr_pages:	number of pages from start to pin
- * @write:	whether pages will be written to by the caller
- * @force:	whether to force access even when user mapping is currently
- *		protected (but never forces write access to shared mapping).
+ * @gup_flags:	flags modifying lookup behaviour
  * @pages:	array that receives pointers to the pages pinned.
  *		Should be at least nr_pages long. Or NULL, if caller
  *		only intends to ensure the pages are faulted in.
@@ -941,9 +944,9 @@
  * or similar operation cannot guarantee anything stronger anyway because
  * locks can't be held over the syscall boundary.
  *
- * If write=0, the page must not be written to. If the page is written to,
- * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
- * after the page is finished with, and before put_page is called.
+ * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
+ * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
+ * be called after the page is finished with, and before put_page is called.
  *
  * get_user_pages is typically used for fewer-copy IO operations, to get a
  * handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +963,12 @@
  */
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 		unsigned long start, unsigned long nr_pages,
-		int write, int force, struct page **pages,
+		unsigned int gup_flags, struct page **pages,
 		struct vm_area_struct **vmas)
 {
-	return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-				       pages, vmas, NULL, false,
-				       FOLL_TOUCH | FOLL_REMOTE);
+	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+				       NULL, false,
+				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
@@ -976,12 +979,12 @@
  * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-		int write, int force, struct page **pages,
+		unsigned int gup_flags, struct page **pages,
 		struct vm_area_struct **vmas)
 {
 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
-				       write, force, pages, vmas, NULL, false,
-				       FOLL_TOUCH);
+				       pages, vmas, NULL, false,
+				       gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1505,7 +1508,8 @@
 		start += nr << PAGE_SHIFT;
 		pages += nr;
 
-		ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+				write ? FOLL_WRITE : 0);
 
 		/* Have to be a bit careful with return values */
 		if (nr > 0) {
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 88af13c..70c0097 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -34,6 +34,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
+#include <linux/bug.h>
 
 #include "kasan.h"
 #include "../slab.h"
@@ -62,7 +63,7 @@
 	}
 }
 
-static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
 {
 	void *base = task_stack_page(task);
 	size_t size = sp - base;
@@ -77,9 +78,24 @@
 }
 
 /* Unpoison the stack for the current task beyond a watermark sp value. */
-asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 {
-	__kasan_unpoison_stack(current, sp);
+	__kasan_unpoison_stack(current, watermark);
+}
+
+/*
+ * Clear all poison for the region between the current SP and a provided
+ * watermark value, as is sometimes required prior to hand-crafted asm function
+ * returns in the middle of functions.
+ */
+void kasan_unpoison_stack_above_sp_to(const void *watermark)
+{
+	const void *sp = __builtin_frame_address(0);
+	size_t size = watermark - sp;
+
+	if (WARN_ON(sp > watermark))
+		return;
+	kasan_unpoison_shadow(sp, size);
 }
 
 /*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 086292f..a5e453c 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -90,6 +90,8 @@
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
 #include <linux/mmzone.h>
 #include <linux/slab.h>
 #include <linux/thread_info.h>
@@ -1121,6 +1123,51 @@
 }
 EXPORT_SYMBOL(kmemleak_no_scan);
 
+/**
+ * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
+ *			 address argument
+ */
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+			       gfp_t gfp)
+{
+	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+		kmemleak_alloc(__va(phys), size, min_count, gfp);
+}
+EXPORT_SYMBOL(kmemleak_alloc_phys);
+
+/**
+ * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
+ *			     physical address argument
+ */
+void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+		kmemleak_free_part(__va(phys), size);
+}
+EXPORT_SYMBOL(kmemleak_free_part_phys);
+
+/**
+ * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
+ *			    address argument
+ */
+void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+{
+	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+		kmemleak_not_leak(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_not_leak_phys);
+
+/**
+ * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
+ *			  address argument
+ */
+void __ref kmemleak_ignore_phys(phys_addr_t phys)
+{
+	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+		kmemleak_ignore(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_ignore_phys);
+
 /*
  * Update an object's checksum and return true if it was modified.
  */
diff --git a/mm/memblock.c b/mm/memblock.c
index c8dfa43..7608bc3 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -723,7 +723,7 @@
 		     (unsigned long long)base + size - 1,
 		     (void *)_RET_IP_);
 
-	kmemleak_free_part(__va(base), size);
+	kmemleak_free_part_phys(base, size);
 	return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1152,7 +1152,7 @@
 		 * The min_count is set to 0 so that memblock allocations are
 		 * never reported as leaks.
 		 */
-		kmemleak_alloc(__va(found), size, 0, 0);
+		kmemleak_alloc_phys(found, size, 0, 0);
 		return found;
 	}
 	return 0;
@@ -1399,7 +1399,7 @@
 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
 		     __func__, (u64)base, (u64)base + size - 1,
 		     (void *)_RET_IP_);
-	kmemleak_free_part(__va(base), size);
+	kmemleak_free_part_phys(base, size);
 	memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1419,7 +1419,7 @@
 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
 		     __func__, (u64)base, (u64)base + size - 1,
 		     (void *)_RET_IP_);
-	kmemleak_free_part(__va(base), size);
+	kmemleak_free_part_phys(base, size);
 	cursor = PFN_UP(base);
 	end = PFN_DOWN(base + size);
 
diff --git a/mm/memory.c b/mm/memory.c
index fc1987d..e18c57b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3869,10 +3869,11 @@
  * given task for page fault accounting.
  */
 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long addr, void *buf, int len, int write)
+		unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
 	struct vm_area_struct *vma;
 	void *old_buf = buf;
+	int write = gup_flags & FOLL_WRITE;
 
 	down_read(&mm->mmap_sem);
 	/* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@
 		struct page *page = NULL;
 
 		ret = get_user_pages_remote(tsk, mm, addr, 1,
-				write, 1, &page, &vma);
+				gup_flags, &page, &vma);
 		if (ret <= 0) {
 #ifndef CONFIG_HAVE_IOREMAP_PROT
 			break;
@@ -3934,14 +3935,14 @@
  * @addr:	start address to access
  * @buf:	source or destination buffer
  * @len:	number of bytes to transfer
- * @write:	whether the access is a write
+ * @gup_flags:	flags modifying lookup behaviour
  *
  * The caller must hold a reference on @mm.
  */
 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write)
+		void *buf, int len, unsigned int gup_flags)
 {
-	return __access_remote_vm(NULL, mm, addr, buf, len, write);
+	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
 }
 
 /*
@@ -3950,7 +3951,7 @@
  * Do not walk the page table directly, use get_user_pages
  */
 int access_process_vm(struct task_struct *tsk, unsigned long addr,
-		void *buf, int len, int write)
+		void *buf, int len, unsigned int gup_flags)
 {
 	struct mm_struct *mm;
 	int ret;
@@ -3959,7 +3960,8 @@
 	if (!mm)
 		return 0;
 
-	ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+
 	mmput(mm);
 
 	return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ad1c96a..0b859af 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -850,7 +850,7 @@
 	struct page *p;
 	int err;
 
-	err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
 	if (err >= 0) {
 		err = page_to_nid(p);
 		put_page(p);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ec91dfd..bcdbe62 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,11 +23,13 @@
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/perf_event.h>
+#include <linux/pkeys.h>
 #include <linux/ksm.h>
 #include <linux/pkeys.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
 
 #include "internal.h"
@@ -353,8 +355,11 @@
 	return error;
 }
 
-SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
-		unsigned long, prot)
+/*
+ * pkey==-1 when doing a legacy mprotect()
+ */
+static int do_mprotect_pkey(unsigned long start, size_t len,
+		unsigned long prot, int pkey)
 {
 	unsigned long nstart, end, tmp, reqprot;
 	struct vm_area_struct *vma, *prev;
@@ -383,6 +388,14 @@
 	if (down_write_killable(&current->mm->mmap_sem))
 		return -EINTR;
 
+	/*
+	 * If userspace did not allocate the pkey, do not let
+	 * them use it here.
+	 */
+	error = -EINVAL;
+	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
+		goto out;
+
 	vma = find_vma(current->mm, start);
 	error = -ENOMEM;
 	if (!vma)
@@ -409,8 +422,9 @@
 		prev = vma;
 
 	for (nstart = start ; ; ) {
+		unsigned long mask_off_old_flags;
 		unsigned long newflags;
-		int pkey = arch_override_mprotect_pkey(vma, prot, -1);
+		int new_vma_pkey;
 
 		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
 
@@ -418,8 +432,17 @@
 		if (rier && (vma->vm_flags & VM_MAYEXEC))
 			prot |= PROT_EXEC;
 
-		newflags = calc_vm_prot_bits(prot, pkey);
-		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
+		/*
+		 * Each mprotect() call explicitly passes r/w/x permissions.
+		 * If a permission is not passed to mprotect(), it must be
+		 * cleared from the VMA.
+		 */
+		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
+					ARCH_VM_PKEY_FLAGS;
+
+		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
+		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
+		newflags |= (vma->vm_flags & ~mask_off_old_flags);
 
 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
@@ -455,3 +478,60 @@
 	up_write(&current->mm->mmap_sem);
 	return error;
 }
+
+SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+		unsigned long, prot)
+{
+	return do_mprotect_pkey(start, len, prot, -1);
+}
+
+SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
+		unsigned long, prot, int, pkey)
+{
+	return do_mprotect_pkey(start, len, prot, pkey);
+}
+
+SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
+{
+	int pkey;
+	int ret;
+
+	/* No flags supported yet. */
+	if (flags)
+		return -EINVAL;
+	/* check for unsupported init values */
+	if (init_val & ~PKEY_ACCESS_MASK)
+		return -EINVAL;
+
+	down_write(&current->mm->mmap_sem);
+	pkey = mm_pkey_alloc(current->mm);
+
+	ret = -ENOSPC;
+	if (pkey == -1)
+		goto out;
+
+	ret = arch_set_user_pkey_access(current, pkey, init_val);
+	if (ret) {
+		mm_pkey_free(current->mm, pkey);
+		goto out;
+	}
+	ret = pkey;
+out:
+	up_write(&current->mm->mmap_sem);
+	return ret;
+}
+
+SYSCALL_DEFINE1(pkey_free, int, pkey)
+{
+	int ret;
+
+	down_write(&current->mm->mmap_sem);
+	ret = mm_pkey_free(current->mm, pkey);
+	up_write(&current->mm->mmap_sem);
+
+	/*
+	 * We could provie warnings or errors if any VMA still
+	 * has the pkey set here.
+	 */
+	return ret;
+}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index ba609b6..487dad6 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -84,7 +84,7 @@
 {
 	unsigned long cursor, end;
 
-	kmemleak_free_part(__va(addr), size);
+	kmemleak_free_part_phys(addr, size);
 
 	cursor = PFN_UP(addr);
 	end = PFN_DOWN(addr + size);
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81..db5fd17 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -160,33 +160,25 @@
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
-		    int write, int force, struct page **pages,
+		    unsigned int gup_flags, struct page **pages,
 		    struct vm_area_struct **vmas)
 {
-	int flags = 0;
-
-	if (write)
-		flags |= FOLL_WRITE;
-	if (force)
-		flags |= FOLL_FORCE;
-
-	return __get_user_pages(current, current->mm, start, nr_pages, flags,
-				pages, vmas, NULL);
+	return __get_user_pages(current, current->mm, start, nr_pages,
+				gup_flags, pages, vmas, NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
-			    int write, int force, struct page **pages,
+			    unsigned int gup_flags, struct page **pages,
 			    int *locked)
 {
-	return get_user_pages(start, nr_pages, write, force, pages, NULL);
+	return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 			       unsigned long start, unsigned long nr_pages,
-			       int write, int force, struct page **pages,
-			       unsigned int gup_flags)
+			       struct page **pages, unsigned int gup_flags)
 {
 	long ret;
 	down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
-			     int write, int force, struct page **pages)
+			     struct page **pages, unsigned int gup_flags)
 {
 	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
-					 write, force, pages, 0);
+					 pages, gup_flags);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -1817,9 +1809,10 @@
 EXPORT_SYMBOL(filemap_map_pages);
 
 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long addr, void *buf, int len, int write)
+		unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
 	struct vm_area_struct *vma;
+	int write = gup_flags & FOLL_WRITE;
 
 	down_read(&mm->mmap_sem);
 
@@ -1854,21 +1847,22 @@
  * @addr:	start address to access
  * @buf:	source or destination buffer
  * @len:	number of bytes to transfer
- * @write:	whether the access is a write
+ * @gup_flags:	flags modifying lookup behaviour
  *
  * The caller must hold a reference on @mm.
  */
 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write)
+		void *buf, int len, unsigned int gup_flags)
 {
-	return __access_remote_vm(NULL, mm, addr, buf, len, write);
+	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
 }
 
 /*
  * Access another process' address space.
  * - source/target buffer must be kernel space
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+		unsigned int gup_flags)
 {
 	struct mm_struct *mm;
 
@@ -1879,7 +1873,7 @@
 	if (!mm)
 		return 0;
 
-	len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+	len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
 
 	mmput(mm);
 	return len;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ca423cc..2b3bf67 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -91,6 +91,11 @@
 int _node_numa_mem_[MAX_NUMNODES];
 #endif
 
+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
 /*
  * Array of node states.
  */
diff --git a/mm/percpu.c b/mm/percpu.c
index 9903830..2557143 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1961,8 +1961,9 @@
 	void *base = (void *)ULONG_MAX;
 	void **areas = NULL;
 	struct pcpu_alloc_info *ai;
-	size_t size_sum, areas_size, max_distance;
-	int group, i, rc;
+	size_t size_sum, areas_size;
+	unsigned long max_distance;
+	int group, i, highest_group, rc;
 
 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
 				   cpu_distance_fn);
@@ -1978,7 +1979,8 @@
 		goto out_free;
 	}
 
-	/* allocate, copy and determine base address */
+	/* allocate, copy and determine base address & max_distance */
+	highest_group = 0;
 	for (group = 0; group < ai->nr_groups; group++) {
 		struct pcpu_group_info *gi = &ai->groups[group];
 		unsigned int cpu = NR_CPUS;
@@ -1999,6 +2001,21 @@
 		areas[group] = ptr;
 
 		base = min(ptr, base);
+		if (ptr > areas[highest_group])
+			highest_group = group;
+	}
+	max_distance = areas[highest_group] - base;
+	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
+
+	/* warn if maximum distance is further than 75% of vmalloc space */
+	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
+		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
+				max_distance, VMALLOC_TOTAL);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+		/* and fail if we have fallback */
+		rc = -EINVAL;
+		goto out_free_areas;
+#endif
 	}
 
 	/*
@@ -2023,23 +2040,8 @@
 	}
 
 	/* base address is now known, determine group base offsets */
-	max_distance = 0;
 	for (group = 0; group < ai->nr_groups; group++) {
 		ai->groups[group].base_offset = areas[group] - base;
-		max_distance = max_t(size_t, max_distance,
-				     ai->groups[group].base_offset);
-	}
-	max_distance += ai->unit_size;
-
-	/* warn if maximum distance is further than 75% of vmalloc space */
-	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
-		pr_warn("max_distance=0x%zx too large for vmalloc space 0x%lx\n",
-			max_distance, VMALLOC_TOTAL);
-#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
-		/* and fail if we have fallback */
-		rc = -EINVAL;
-		goto out_free;
-#endif
 	}
 
 	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d4..be8dc8d 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@
 	ssize_t rc = 0;
 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
 		/ sizeof(struct pages *);
+	unsigned int flags = FOLL_REMOTE;
 
 	/* Work out address and page range required */
 	if (len == 0)
 		return 0;
 	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
 
+	if (vm_write)
+		flags |= FOLL_WRITE;
+
 	while (!rc && nr_pages && iov_iter_count(iter)) {
 		int pages = min(nr_pages, max_pages_per_loop);
 		size_t bytes;
@@ -104,8 +108,7 @@
 		 * current/current->mm
 		 */
 		pages = __get_user_pages_unlocked(task, mm, pa, pages,
-						  vm_write, 0, process_pages,
-						  FOLL_REMOTE);
+						  process_pages, flags);
 		if (pages <= 0)
 			return -EFAULT;
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 0e9901e..ad7813d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -934,7 +934,7 @@
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
 	shmem_undo_range(inode, lstart, lend, false);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+	inode->i_ctime = inode->i_mtime = current_time(inode);
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
@@ -960,7 +960,7 @@
 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 	int error;
 
-	error = inode_change_ok(inode, attr);
+	error = setattr_prepare(dentry, attr);
 	if (error)
 		return error;
 
@@ -979,7 +979,7 @@
 			if (error)
 				return error;
 			i_size_write(inode, newsize);
-			inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+			inode->i_ctime = inode->i_mtime = current_time(inode);
 		}
 		if (newsize <= oldsize) {
 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
@@ -2083,7 +2083,7 @@
 		inode->i_ino = get_next_ino();
 		inode_init_owner(inode, dir, mode);
 		inode->i_blocks = 0;
-		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 		inode->i_generation = get_seconds();
 		info = SHMEM_I(inode);
 		memset(info, 0, (char *)inode - (char *)info);
@@ -2741,7 +2741,7 @@
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
 		i_size_write(inode, offset + len);
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(inode);
 undone:
 	spin_lock(&inode->i_lock);
 	inode->i_private = NULL;
@@ -2794,7 +2794,7 @@
 
 		error = 0;
 		dir->i_size += BOGO_DIRENT_SIZE;
-		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+		dir->i_ctime = dir->i_mtime = current_time(dir);
 		d_instantiate(dentry, inode);
 		dget(dentry); /* Extra count - pin the dentry in core */
 	}
@@ -2862,7 +2862,7 @@
 		goto out;
 
 	dir->i_size += BOGO_DIRENT_SIZE;
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	inc_nlink(inode);
 	ihold(inode);	/* New dentry reference */
 	dget(dentry);		/* Extra pinning count for the created dentry */
@@ -2879,7 +2879,7 @@
 		shmem_free_inode(inode->i_sb);
 
 	dir->i_size -= BOGO_DIRENT_SIZE;
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	drop_nlink(inode);
 	dput(dentry);	/* Undo the count from "create" - this does all the work */
 	return 0;
@@ -2912,7 +2912,7 @@
 	old_dir->i_ctime = old_dir->i_mtime =
 	new_dir->i_ctime = new_dir->i_mtime =
 	d_inode(old_dentry)->i_ctime =
-	d_inode(new_dentry)->i_ctime = CURRENT_TIME;
+	d_inode(new_dentry)->i_ctime = current_time(old_dir);
 
 	return 0;
 }
@@ -2986,7 +2986,7 @@
 	new_dir->i_size += BOGO_DIRENT_SIZE;
 	old_dir->i_ctime = old_dir->i_mtime =
 	new_dir->i_ctime = new_dir->i_mtime =
-	inode->i_ctime = CURRENT_TIME;
+	inode->i_ctime = current_time(old_dir);
 	return 0;
 }
 
@@ -3041,7 +3041,7 @@
 		put_page(page);
 	}
 	dir->i_size += BOGO_DIRENT_SIZE;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	d_instantiate(dentry, inode);
 	dget(dentry);
 	return 0;
@@ -3175,10 +3175,7 @@
 	.readlink	= generic_readlink,
 	.get_link	= simple_get_link,
 #ifdef CONFIG_TMPFS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= shmem_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
 
@@ -3186,10 +3183,7 @@
 	.readlink	= generic_readlink,
 	.get_link	= shmem_get_link,
 #ifdef CONFIG_TMPFS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= shmem_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 };
 
@@ -3683,10 +3677,7 @@
 	.getattr	= shmem_getattr,
 	.setattr	= shmem_setattr,
 #ifdef CONFIG_TMPFS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= shmem_listxattr,
-	.removexattr	= generic_removexattr,
 	.set_acl	= simple_set_acl,
 #endif
 };
@@ -3701,14 +3692,11 @@
 	.mkdir		= shmem_mkdir,
 	.rmdir		= shmem_rmdir,
 	.mknod		= shmem_mknod,
-	.rename2	= shmem_rename2,
+	.rename		= shmem_rename2,
 	.tmpfile	= shmem_tmpfile,
 #endif
 #ifdef CONFIG_TMPFS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= shmem_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
 	.setattr	= shmem_setattr,
@@ -3718,10 +3706,7 @@
 
 static const struct inode_operations shmem_special_inode_operations = {
 #ifdef CONFIG_TMPFS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
 	.listxattr	= shmem_listxattr,
-	.removexattr	= generic_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
 	.setattr	= shmem_setattr,
diff --git a/mm/util.c b/mm/util.c
index 662cddf..952cbe7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -283,7 +283,8 @@
 int __weak get_user_pages_fast(unsigned long start,
 				int nr_pages, int write, struct page **pages)
 {
-	return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
+	return get_user_pages_unlocked(start, nr_pages, pages,
+				       write ? FOLL_WRITE : 0);
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
@@ -623,7 +624,7 @@
 	if (len > buflen)
 		len = buflen;
 
-	res = access_process_vm(task, arg_start, buffer, len, 0);
+	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 
 	/*
 	 * If the nul at the end of args has been overwritten, then
@@ -638,7 +639,8 @@
 			if (len > buflen - res)
 				len = buflen - res;
 			res += access_process_vm(task, env_start,
-						 buffer+res, len, 0);
+						 buffer+res, len,
+						 FOLL_FORCE);
 			res = strnlen(buffer, res);
 		}
 	}
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index c68ff3d..e49121e 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -20,8 +20,6 @@
 
 #include "main.h"
 
-#include <linux/kconfig.h>
-
 struct net_device;
 
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index c813568..e228842 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -21,8 +21,6 @@
    SOFTWARE IS DISCLAIMED.
 */
 
-#include <asm/unaligned.h>
-
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
@@ -973,33 +971,58 @@
 
 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
 {
-	size_t name_len;
+	size_t complete_len;
+	size_t short_len;
 	int max_len;
 
 	max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
-	name_len = strlen(hdev->dev_name);
-	if (name_len > 0 && max_len > 0) {
+	complete_len = strlen(hdev->dev_name);
+	short_len = strlen(hdev->short_name);
 
-		if (name_len > max_len) {
-			name_len = max_len;
-			ptr[1] = EIR_NAME_SHORT;
-		} else
-			ptr[1] = EIR_NAME_COMPLETE;
+	/* no space left for name */
+	if (max_len < 1)
+		return ad_len;
 
-		ptr[0] = name_len + 1;
+	/* no name set */
+	if (!complete_len)
+		return ad_len;
 
-		memcpy(ptr + 2, hdev->dev_name, name_len);
+	/* complete name fits and is eq to max short name len or smaller */
+	if (complete_len <= max_len &&
+	    complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
+		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+				       hdev->dev_name, complete_len);
+	}
 
-		ad_len += (name_len + 2);
-		ptr += (name_len + 2);
+	/* short name set and fits */
+	if (short_len && short_len <= max_len) {
+		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+				       hdev->short_name, short_len);
+	}
+
+	/* no short name set so shorten complete name */
+	if (!short_len) {
+		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+				       hdev->dev_name, max_len);
 	}
 
 	return ad_len;
 }
 
+static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+{
+	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
+}
+
 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 {
-	return append_local_name(hdev, ptr, 0);
+	u8 scan_rsp_len = 0;
+
+	if (hdev->appearance) {
+		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
+	}
+
+	return append_local_name(hdev, ptr, scan_rsp_len);
 }
 
 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
@@ -1016,18 +1039,13 @@
 	instance_flags = adv_instance->flags;
 
 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
-		ptr[0] = 3;
-		ptr[1] = EIR_APPEARANCE;
-		put_unaligned_le16(hdev->appearance, ptr + 2);
-		scan_rsp_len += 4;
-		ptr += 4;
+		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
 	}
 
-	memcpy(ptr, adv_instance->scan_rsp_data,
+	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
 	       adv_instance->scan_rsp_len);
 
 	scan_rsp_len += adv_instance->scan_rsp_len;
-	ptr += adv_instance->scan_rsp_len;
 
 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index ac1e110..6b06629 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -20,6 +20,8 @@
    SOFTWARE IS DISCLAIMED.
 */
 
+#include <asm/unaligned.h>
+
 #define hci_req_sync_lock(hdev)   mutex_lock(&hdev->req_lock)
 #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
 
@@ -103,3 +105,24 @@
 
 void hci_request_setup(struct hci_dev *hdev);
 void hci_request_cancel_all(struct hci_dev *hdev);
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
+				  u8 *data, u8 data_len)
+{
+	eir[eir_len++] = sizeof(type) + data_len;
+	eir[eir_len++] = type;
+	memcpy(&eir[eir_len], data, data_len);
+	eir_len += data_len;
+
+	return eir_len;
+}
+
+static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
+{
+	eir[eir_len++] = sizeof(type) + sizeof(data);
+	eir[eir_len++] = type;
+	put_unaligned_le16(data, &eir[eir_len]);
+	eir_len += sizeof(data);
+
+	return eir_len;
+}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 19b8a5e..7360380 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -867,27 +867,6 @@
 				 sizeof(rp));
 }
 
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
-				  u8 data_len)
-{
-	eir[eir_len++] = sizeof(type) + data_len;
-	eir[eir_len++] = type;
-	memcpy(&eir[eir_len], data, data_len);
-	eir_len += data_len;
-
-	return eir_len;
-}
-
-static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
-{
-	eir[eir_len++] = sizeof(type) + sizeof(data);
-	eir[eir_len++] = type;
-	put_unaligned_le16(data, &eir[eir_len]);
-	eir_len += sizeof(data);
-
-	return eir_len;
-}
-
 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
 {
 	u16 eir_len = 0;
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index e657258..8bd5696 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -217,6 +217,7 @@
 #endif
 	&brport_attr_proxyarp,
 	&brport_attr_proxyarp_wifi,
+	&brport_attr_multicast_flood,
 	NULL
 };
 
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index 84cbed6..6a51809 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -5,6 +5,7 @@
 
 libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
 	mon_client.o \
+	cls_lock_client.o \
 	osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
 	debugfs.o \
 	auth.o auth_none.o \
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 2bc5965..c822b3a 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -82,7 +82,10 @@
 	mutex_unlock(&ac->mutex);
 }
 
-int ceph_entity_name_encode(const char *name, void **p, void *end)
+/*
+ * EntityName, not to be confused with entity_name_t
+ */
+int ceph_auth_entity_name_encode(const char *name, void **p, void *end)
 {
 	int len = strlen(name);
 
@@ -111,7 +114,7 @@
 	monhdr->session_mon = cpu_to_le16(-1);
 	monhdr->session_mon_tid = 0;
 
-	ceph_encode_32(&p, 0);  /* no protocol, yet */
+	ceph_encode_32(&p, CEPH_AUTH_UNKNOWN);  /* no protocol, yet */
 
 	lenp = p;
 	p += sizeof(u32);
@@ -124,7 +127,7 @@
 	for (i = 0; i < num; i++)
 		ceph_encode_32(&p, supported_protocols[i]);
 
-	ret = ceph_entity_name_encode(ac->name, &p, end);
+	ret = ceph_auth_entity_name_encode(ac->name, &p, end);
 	if (ret < 0)
 		goto out;
 	ceph_decode_need(&p, end, sizeof(u64), bad);
@@ -259,9 +262,7 @@
 	int ret = 0;
 
 	mutex_lock(&ac->mutex);
-	if (!ac->protocol)
-		ret = ceph_auth_build_hello(ac, msg_buf, msg_len);
-	else if (ac->ops->should_authenticate(ac))
+	if (ac->ops->should_authenticate(ac))
 		ret = ceph_build_auth_request(ac, msg_buf, msg_len);
 	mutex_unlock(&ac->mutex);
 	return ret;
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 5f836f0..df45e46 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -46,7 +46,7 @@
 	int ret;
 
 	ceph_encode_8_safe(&p, end, 1, e_range);
-	ret = ceph_entity_name_encode(ac->name, &p, end);
+	ret = ceph_auth_entity_name_encode(ac->name, &p, end);
 	if (ret < 0)
 		return ret;
 
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index bddfcf6..464e885 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -566,11 +566,17 @@
 }
 EXPORT_SYMBOL(ceph_print_client_options);
 
-u64 ceph_client_id(struct ceph_client *client)
+struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client)
+{
+	return &client->msgr.inst.addr;
+}
+EXPORT_SYMBOL(ceph_client_addr);
+
+u64 ceph_client_gid(struct ceph_client *client)
 {
 	return client->monc.auth->global_id;
 }
-EXPORT_SYMBOL(ceph_client_id);
+EXPORT_SYMBOL(ceph_client_gid);
 
 /*
  * create a fresh client instance
@@ -685,7 +691,8 @@
 			return client->auth_err;
 	}
 
-	pr_info("client%llu fsid %pU\n", ceph_client_id(client), &client->fsid);
+	pr_info("client%llu fsid %pU\n", ceph_client_gid(client),
+		&client->fsid);
 	ceph_debugfs_client_init(client);
 
 	return 0;
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
index 3773a4f..19b7d8a 100644
--- a/net/ceph/ceph_strings.c
+++ b/net/ceph/ceph_strings.c
@@ -15,6 +15,7 @@
 	default: return "unknown";
 	}
 }
+EXPORT_SYMBOL(ceph_entity_type_name);
 
 const char *ceph_osd_op_name(int op)
 {
diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c
new file mode 100644
index 0000000..50f040f
--- /dev/null
+++ b/net/ceph/cls_lock_client.c
@@ -0,0 +1,325 @@
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <linux/ceph/cls_lock_client.h>
+#include <linux/ceph/decode.h>
+
+/**
+ * ceph_cls_lock - grab rados lock for object
+ * @oid, @oloc: object to lock
+ * @lock_name: the name of the lock
+ * @type: lock type (CEPH_CLS_LOCK_EXCLUSIVE or CEPH_CLS_LOCK_SHARED)
+ * @cookie: user-defined identifier for this instance of the lock
+ * @tag: user-defined tag
+ * @desc: user-defined lock description
+ * @flags: lock flags
+ *
+ * All operations on the same lock should use the same tag.
+ */
+int ceph_cls_lock(struct ceph_osd_client *osdc,
+		  struct ceph_object_id *oid,
+		  struct ceph_object_locator *oloc,
+		  char *lock_name, u8 type, char *cookie,
+		  char *tag, char *desc, u8 flags)
+{
+	int lock_op_buf_size;
+	int name_len = strlen(lock_name);
+	int cookie_len = strlen(cookie);
+	int tag_len = strlen(tag);
+	int desc_len = strlen(desc);
+	void *p, *end;
+	struct page *lock_op_page;
+	struct timespec mtime;
+	int ret;
+
+	lock_op_buf_size = name_len + sizeof(__le32) +
+			   cookie_len + sizeof(__le32) +
+			   tag_len + sizeof(__le32) +
+			   desc_len + sizeof(__le32) +
+			   sizeof(struct ceph_timespec) +
+			   /* flag and type */
+			   sizeof(u8) + sizeof(u8) +
+			   CEPH_ENCODING_START_BLK_LEN;
+	if (lock_op_buf_size > PAGE_SIZE)
+		return -E2BIG;
+
+	lock_op_page = alloc_page(GFP_NOIO);
+	if (!lock_op_page)
+		return -ENOMEM;
+
+	p = page_address(lock_op_page);
+	end = p + lock_op_buf_size;
+
+	/* encode cls_lock_lock_op struct */
+	ceph_start_encoding(&p, 1, 1,
+			    lock_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
+	ceph_encode_string(&p, end, lock_name, name_len);
+	ceph_encode_8(&p, type);
+	ceph_encode_string(&p, end, cookie, cookie_len);
+	ceph_encode_string(&p, end, tag, tag_len);
+	ceph_encode_string(&p, end, desc, desc_len);
+	/* only support infinite duration */
+	memset(&mtime, 0, sizeof(mtime));
+	ceph_encode_timespec(p, &mtime);
+	p += sizeof(struct ceph_timespec);
+	ceph_encode_8(&p, flags);
+
+	dout("%s lock_name %s type %d cookie %s tag %s desc %s flags 0x%x\n",
+	     __func__, lock_name, type, cookie, tag, desc, flags);
+	ret = ceph_osdc_call(osdc, oid, oloc, "lock", "lock",
+			     CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+			     lock_op_page, lock_op_buf_size, NULL, NULL);
+
+	dout("%s: status %d\n", __func__, ret);
+	__free_page(lock_op_page);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_cls_lock);
+
+/**
+ * ceph_cls_unlock - release rados lock for object
+ * @oid, @oloc: object to lock
+ * @lock_name: the name of the lock
+ * @cookie: user-defined identifier for this instance of the lock
+ */
+int ceph_cls_unlock(struct ceph_osd_client *osdc,
+		    struct ceph_object_id *oid,
+		    struct ceph_object_locator *oloc,
+		    char *lock_name, char *cookie)
+{
+	int unlock_op_buf_size;
+	int name_len = strlen(lock_name);
+	int cookie_len = strlen(cookie);
+	void *p, *end;
+	struct page *unlock_op_page;
+	int ret;
+
+	unlock_op_buf_size = name_len + sizeof(__le32) +
+			     cookie_len + sizeof(__le32) +
+			     CEPH_ENCODING_START_BLK_LEN;
+	if (unlock_op_buf_size > PAGE_SIZE)
+		return -E2BIG;
+
+	unlock_op_page = alloc_page(GFP_NOIO);
+	if (!unlock_op_page)
+		return -ENOMEM;
+
+	p = page_address(unlock_op_page);
+	end = p + unlock_op_buf_size;
+
+	/* encode cls_lock_unlock_op struct */
+	ceph_start_encoding(&p, 1, 1,
+			    unlock_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
+	ceph_encode_string(&p, end, lock_name, name_len);
+	ceph_encode_string(&p, end, cookie, cookie_len);
+
+	dout("%s lock_name %s cookie %s\n", __func__, lock_name, cookie);
+	ret = ceph_osdc_call(osdc, oid, oloc, "lock", "unlock",
+			     CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+			     unlock_op_page, unlock_op_buf_size, NULL, NULL);
+
+	dout("%s: status %d\n", __func__, ret);
+	__free_page(unlock_op_page);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_cls_unlock);
+
+/**
+ * ceph_cls_break_lock - release rados lock for object for specified client
+ * @oid, @oloc: object to lock
+ * @lock_name: the name of the lock
+ * @cookie: user-defined identifier for this instance of the lock
+ * @locker: current lock owner
+ */
+int ceph_cls_break_lock(struct ceph_osd_client *osdc,
+			struct ceph_object_id *oid,
+			struct ceph_object_locator *oloc,
+			char *lock_name, char *cookie,
+			struct ceph_entity_name *locker)
+{
+	int break_op_buf_size;
+	int name_len = strlen(lock_name);
+	int cookie_len = strlen(cookie);
+	struct page *break_op_page;
+	void *p, *end;
+	int ret;
+
+	break_op_buf_size = name_len + sizeof(__le32) +
+			    cookie_len + sizeof(__le32) +
+			    sizeof(u8) + sizeof(__le64) +
+			    CEPH_ENCODING_START_BLK_LEN;
+	if (break_op_buf_size > PAGE_SIZE)
+		return -E2BIG;
+
+	break_op_page = alloc_page(GFP_NOIO);
+	if (!break_op_page)
+		return -ENOMEM;
+
+	p = page_address(break_op_page);
+	end = p + break_op_buf_size;
+
+	/* encode cls_lock_break_op struct */
+	ceph_start_encoding(&p, 1, 1,
+			    break_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
+	ceph_encode_string(&p, end, lock_name, name_len);
+	ceph_encode_copy(&p, locker, sizeof(*locker));
+	ceph_encode_string(&p, end, cookie, cookie_len);
+
+	dout("%s lock_name %s cookie %s locker %s%llu\n", __func__, lock_name,
+	     cookie, ENTITY_NAME(*locker));
+	ret = ceph_osdc_call(osdc, oid, oloc, "lock", "break_lock",
+			     CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+			     break_op_page, break_op_buf_size, NULL, NULL);
+
+	dout("%s: status %d\n", __func__, ret);
+	__free_page(break_op_page);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_cls_break_lock);
+
+void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers)
+{
+	int i;
+
+	for (i = 0; i < num_lockers; i++)
+		kfree(lockers[i].id.cookie);
+	kfree(lockers);
+}
+EXPORT_SYMBOL(ceph_free_lockers);
+
+static int decode_locker(void **p, void *end, struct ceph_locker *locker)
+{
+	u8 struct_v;
+	u32 len;
+	char *s;
+	int ret;
+
+	ret = ceph_start_decoding(p, end, 1, "locker_id_t", &struct_v, &len);
+	if (ret)
+		return ret;
+
+	ceph_decode_copy(p, &locker->id.name, sizeof(locker->id.name));
+	s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
+	if (IS_ERR(s))
+		return PTR_ERR(s);
+
+	locker->id.cookie = s;
+
+	ret = ceph_start_decoding(p, end, 1, "locker_info_t", &struct_v, &len);
+	if (ret)
+		return ret;
+
+	*p += sizeof(struct ceph_timespec); /* skip expiration */
+	ceph_decode_copy(p, &locker->info.addr, sizeof(locker->info.addr));
+	ceph_decode_addr(&locker->info.addr);
+	len = ceph_decode_32(p);
+	*p += len; /* skip description */
+
+	dout("%s %s%llu cookie %s addr %s\n", __func__,
+	     ENTITY_NAME(locker->id.name), locker->id.cookie,
+	     ceph_pr_addr(&locker->info.addr.in_addr));
+	return 0;
+}
+
+static int decode_lockers(void **p, void *end, u8 *type, char **tag,
+			  struct ceph_locker **lockers, u32 *num_lockers)
+{
+	u8 struct_v;
+	u32 struct_len;
+	char *s;
+	int i;
+	int ret;
+
+	ret = ceph_start_decoding(p, end, 1, "cls_lock_get_info_reply",
+				  &struct_v, &struct_len);
+	if (ret)
+		return ret;
+
+	*num_lockers = ceph_decode_32(p);
+	*lockers = kcalloc(*num_lockers, sizeof(**lockers), GFP_NOIO);
+	if (!*lockers)
+		return -ENOMEM;
+
+	for (i = 0; i < *num_lockers; i++) {
+		ret = decode_locker(p, end, *lockers + i);
+		if (ret)
+			goto err_free_lockers;
+	}
+
+	*type = ceph_decode_8(p);
+	s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
+	if (IS_ERR(s)) {
+		ret = PTR_ERR(s);
+		goto err_free_lockers;
+	}
+
+	*tag = s;
+	return 0;
+
+err_free_lockers:
+	ceph_free_lockers(*lockers, *num_lockers);
+	return ret;
+}
+
+/*
+ * On success, the caller is responsible for:
+ *
+ *     kfree(tag);
+ *     ceph_free_lockers(lockers, num_lockers);
+ */
+int ceph_cls_lock_info(struct ceph_osd_client *osdc,
+		       struct ceph_object_id *oid,
+		       struct ceph_object_locator *oloc,
+		       char *lock_name, u8 *type, char **tag,
+		       struct ceph_locker **lockers, u32 *num_lockers)
+{
+	int get_info_op_buf_size;
+	int name_len = strlen(lock_name);
+	struct page *get_info_op_page, *reply_page;
+	size_t reply_len;
+	void *p, *end;
+	int ret;
+
+	get_info_op_buf_size = name_len + sizeof(__le32) +
+			       CEPH_ENCODING_START_BLK_LEN;
+	if (get_info_op_buf_size > PAGE_SIZE)
+		return -E2BIG;
+
+	get_info_op_page = alloc_page(GFP_NOIO);
+	if (!get_info_op_page)
+		return -ENOMEM;
+
+	reply_page = alloc_page(GFP_NOIO);
+	if (!reply_page) {
+		__free_page(get_info_op_page);
+		return -ENOMEM;
+	}
+
+	p = page_address(get_info_op_page);
+	end = p + get_info_op_buf_size;
+
+	/* encode cls_lock_get_info_op struct */
+	ceph_start_encoding(&p, 1, 1,
+			    get_info_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
+	ceph_encode_string(&p, end, lock_name, name_len);
+
+	dout("%s lock_name %s\n", __func__, lock_name);
+	ret = ceph_osdc_call(osdc, oid, oloc, "lock", "get_info",
+			     CEPH_OSD_FLAG_READ, get_info_op_page,
+			     get_info_op_buf_size, reply_page, &reply_len);
+
+	dout("%s: status %d\n", __func__, ret);
+	if (ret >= 0) {
+		p = page_address(reply_page);
+		end = p + reply_len;
+
+		ret = decode_lockers(&p, end, type, tag, lockers, num_lockers);
+	}
+
+	__free_page(get_info_op_page);
+	__free_page(reply_page);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_cls_lock_info);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 5fcfb98..a421e90 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -245,7 +245,7 @@
 /* compute 2^44*log2(input+1) */
 static __u64 crush_ln(unsigned int xin)
 {
-	unsigned int x = xin, x1;
+	unsigned int x = xin;
 	int iexpon, index1, index2;
 	__u64 RH, LH, LL, xl64, result;
 
@@ -253,9 +253,15 @@
 
 	/* normalize input */
 	iexpon = 15;
-	while (!(x & 0x18000)) {
-		x <<= 1;
-		iexpon--;
+
+	/*
+	 * figure out number of bits we need to shift and
+	 * do it in one step instead of iteratively
+	 */
+	if (!(x & 0x18000)) {
+		int bits = __builtin_clz(x & 0x1FFFF) - 16;
+		x <<= bits;
+		iexpon = 15 - bits;
 	}
 
 	index1 = (x >> 8) << 1;
@@ -267,12 +273,11 @@
 	/* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
 	xl64 = (__s64)x * RH;
 	xl64 >>= 48;
-	x1 = xl64;
 
 	result = iexpon;
 	result <<= (12 + 32);
 
-	index2 = x1 & 0xff;
+	index2 = xl64 & 0xff;
 	/* LL ~ 2^48*log2(1.0+index2/2^15) */
 	LL = __LL_tbl[index2];
 
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index ef34a02..a8effc8 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -835,6 +835,83 @@
 }
 EXPORT_SYMBOL(ceph_monc_get_version_async);
 
+static void handle_command_ack(struct ceph_mon_client *monc,
+			       struct ceph_msg *msg)
+{
+	struct ceph_mon_generic_request *req;
+	void *p = msg->front.iov_base;
+	void *const end = p + msg->front_alloc_len;
+	u64 tid = le64_to_cpu(msg->hdr.tid);
+
+	dout("%s msg %p tid %llu\n", __func__, msg, tid);
+
+	ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) +
+							    sizeof(u32), bad);
+	p += sizeof(struct ceph_mon_request_header);
+
+	mutex_lock(&monc->mutex);
+	req = lookup_generic_request(&monc->generic_request_tree, tid);
+	if (!req) {
+		mutex_unlock(&monc->mutex);
+		return;
+	}
+
+	req->result = ceph_decode_32(&p);
+	__finish_generic_request(req);
+	mutex_unlock(&monc->mutex);
+
+	complete_generic_request(req);
+	return;
+
+bad:
+	pr_err("corrupt mon_command ack, tid %llu\n", tid);
+	ceph_msg_dump(msg);
+}
+
+int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+			    struct ceph_entity_addr *client_addr)
+{
+	struct ceph_mon_generic_request *req;
+	struct ceph_mon_command *h;
+	int ret = -ENOMEM;
+	int len;
+
+	req = alloc_generic_request(monc, GFP_NOIO);
+	if (!req)
+		goto out;
+
+	req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true);
+	if (!req->request)
+		goto out;
+
+	req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO,
+				  true);
+	if (!req->reply)
+		goto out;
+
+	mutex_lock(&monc->mutex);
+	register_generic_request(req);
+	h = req->request->front.iov_base;
+	h->monhdr.have_version = 0;
+	h->monhdr.session_mon = cpu_to_le16(-1);
+	h->monhdr.session_mon_tid = 0;
+	h->fsid = monc->monmap->fsid;
+	h->num_strs = cpu_to_le32(1);
+	len = sprintf(h->str, "{ \"prefix\": \"osd blacklist\", \
+		                 \"blacklistop\": \"add\", \
+				 \"addr\": \"%pISpc/%u\" }",
+		      &client_addr->in_addr, le32_to_cpu(client_addr->nonce));
+	h->str_len = cpu_to_le32(len);
+	send_generic_request(monc, req);
+	mutex_unlock(&monc->mutex);
+
+	ret = wait_generic_request(req);
+out:
+	put_generic_request(req);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_monc_blacklist_add);
+
 /*
  * Resend pending generic requests.
  */
@@ -1139,6 +1216,10 @@
 		handle_get_version_reply(monc, msg);
 		break;
 
+	case CEPH_MSG_MON_COMMAND_ACK:
+		handle_command_ack(monc, msg);
+		break;
+
 	case CEPH_MSG_MON_MAP:
 		ceph_monc_handle_map(monc, msg);
 		break;
@@ -1178,6 +1259,7 @@
 		m = ceph_msg_get(monc->m_subscribe_ack);
 		break;
 	case CEPH_MSG_STATFS_REPLY:
+	case CEPH_MSG_MON_COMMAND_ACK:
 		return get_generic_reply(con, hdr, skip);
 	case CEPH_MSG_AUTH_REPLY:
 		m = ceph_msg_get(monc->m_auth_reply);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index a97e7b5..d9bf7a1 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -338,6 +338,9 @@
 		ceph_osd_data_release(&op->notify.request_data);
 		ceph_osd_data_release(&op->notify.response_data);
 		break;
+	case CEPH_OSD_OP_LIST_WATCHERS:
+		ceph_osd_data_release(&op->list_watchers.response_data);
+		break;
 	default:
 		break;
 	}
@@ -863,6 +866,8 @@
 	case CEPH_OSD_OP_NOTIFY:
 		dst->notify.cookie = cpu_to_le64(src->notify.cookie);
 		break;
+	case CEPH_OSD_OP_LIST_WATCHERS:
+		break;
 	case CEPH_OSD_OP_SETALLOCHINT:
 		dst->alloc_hint.expected_object_size =
 		    cpu_to_le64(src->alloc_hint.expected_object_size);
@@ -1445,6 +1450,10 @@
 			ceph_osdc_msg_data_add(req->r_reply,
 					       &op->extent.osd_data);
 			break;
+		case CEPH_OSD_OP_LIST_WATCHERS:
+			ceph_osdc_msg_data_add(req->r_reply,
+					       &op->list_watchers.response_data);
+			break;
 
 		/* both */
 		case CEPH_OSD_OP_CALL:
@@ -3891,12 +3900,121 @@
 	return ret;
 }
 
+static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
+{
+	u8 struct_v;
+	u32 struct_len;
+	int ret;
+
+	ret = ceph_start_decoding(p, end, 2, "watch_item_t",
+				  &struct_v, &struct_len);
+	if (ret)
+		return ret;
+
+	ceph_decode_copy(p, &item->name, sizeof(item->name));
+	item->cookie = ceph_decode_64(p);
+	*p += 4; /* skip timeout_seconds */
+	if (struct_v >= 2) {
+		ceph_decode_copy(p, &item->addr, sizeof(item->addr));
+		ceph_decode_addr(&item->addr);
+	}
+
+	dout("%s %s%llu cookie %llu addr %s\n", __func__,
+	     ENTITY_NAME(item->name), item->cookie,
+	     ceph_pr_addr(&item->addr.in_addr));
+	return 0;
+}
+
+static int decode_watchers(void **p, void *end,
+			   struct ceph_watch_item **watchers,
+			   u32 *num_watchers)
+{
+	u8 struct_v;
+	u32 struct_len;
+	int i;
+	int ret;
+
+	ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
+				  &struct_v, &struct_len);
+	if (ret)
+		return ret;
+
+	*num_watchers = ceph_decode_32(p);
+	*watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
+	if (!*watchers)
+		return -ENOMEM;
+
+	for (i = 0; i < *num_watchers; i++) {
+		ret = decode_watcher(p, end, *watchers + i);
+		if (ret) {
+			kfree(*watchers);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * On success, the caller is responsible for:
+ *
+ *     kfree(watchers);
+ */
+int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
+			    struct ceph_object_id *oid,
+			    struct ceph_object_locator *oloc,
+			    struct ceph_watch_item **watchers,
+			    u32 *num_watchers)
+{
+	struct ceph_osd_request *req;
+	struct page **pages;
+	int ret;
+
+	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+	if (!req)
+		return -ENOMEM;
+
+	ceph_oid_copy(&req->r_base_oid, oid);
+	ceph_oloc_copy(&req->r_base_oloc, oloc);
+	req->r_flags = CEPH_OSD_FLAG_READ;
+
+	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+	if (ret)
+		goto out_put_req;
+
+	pages = ceph_alloc_page_vector(1, GFP_NOIO);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto out_put_req;
+	}
+
+	osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
+	ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
+						 response_data),
+				 pages, PAGE_SIZE, 0, false, true);
+
+	ceph_osdc_start_request(osdc, req, false);
+	ret = ceph_osdc_wait_request(osdc, req);
+	if (ret >= 0) {
+		void *p = page_address(pages[0]);
+		void *const end = p + req->r_ops[0].outdata_len;
+
+		ret = decode_watchers(&p, end, watchers, num_watchers);
+	}
+
+out_put_req:
+	ceph_osdc_put_request(req);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_list_watchers);
+
 /*
  * Call all pending notify callbacks - for use after a watch is
  * unregistered, to make sure no more callbacks for it will be invoked
  */
 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
 {
+	dout("%s osdc %p\n", __func__, osdc);
 	flush_workqueue(osdc->notify_wq);
 }
 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
@@ -3910,6 +4028,57 @@
 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
 
 /*
+ * Execute an OSD class method on an object.
+ *
+ * @flags: CEPH_OSD_FLAG_*
+ * @resp_len: out param for reply length
+ */
+int ceph_osdc_call(struct ceph_osd_client *osdc,
+		   struct ceph_object_id *oid,
+		   struct ceph_object_locator *oloc,
+		   const char *class, const char *method,
+		   unsigned int flags,
+		   struct page *req_page, size_t req_len,
+		   struct page *resp_page, size_t *resp_len)
+{
+	struct ceph_osd_request *req;
+	int ret;
+
+	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+	if (!req)
+		return -ENOMEM;
+
+	ceph_oid_copy(&req->r_base_oid, oid);
+	ceph_oloc_copy(&req->r_base_oloc, oloc);
+	req->r_flags = flags;
+
+	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+	if (ret)
+		goto out_put_req;
+
+	osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
+	if (req_page)
+		osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
+						  0, false, false);
+	if (resp_page)
+		osd_req_op_cls_response_data_pages(req, 0, &resp_page,
+						   PAGE_SIZE, 0, false, false);
+
+	ceph_osdc_start_request(osdc, req, false);
+	ret = ceph_osdc_wait_request(osdc, req);
+	if (ret >= 0) {
+		ret = req->r_ops[0].rval;
+		if (resp_page)
+			*resp_len = req->r_ops[0].outdata_len;
+	}
+
+out_put_req:
+	ceph_osdc_put_request(req);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_call);
+
+/*
  * init, shutdown
  */
 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 00d2601..1a7c9a7 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@
 	while (got < num_pages) {
 		rc = get_user_pages_unlocked(
 		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
-		    num_pages - got, write_page, 0, pages + got);
+		    num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
 		if (rc < 0)
 			break;
 		BUG_ON(rc == 0);
diff --git a/net/core/dev.c b/net/core/dev.c
index f1fe26f..4bc19a1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3845,7 +3845,7 @@
 }
 EXPORT_SYMBOL(netif_rx_ni);
 
-static void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(struct softirq_action *h)
 {
 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
 
@@ -5198,7 +5198,7 @@
 	return work;
 }
 
-static void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(struct softirq_action *h)
 {
 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
 	unsigned long time_limit = jiffies + 2;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b06d2f4..fb7348f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1144,6 +1144,8 @@
 	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
 		return 0;
 
+	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
+
 	vf_mac.vf =
 		vf_vlan.vf =
 		vf_vlan_info.vf =
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f2be689..62d4d90 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2265,7 +2265,8 @@
 	if (err) {
 		res.fi = NULL;
 		res.table = NULL;
-		if (fl4->flowi4_oif) {
+		if (fl4->flowi4_oif &&
+		    !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
 			/* Apparently, routing tables are wrong. Assume,
 			   that the destination is on link.
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cbd9343..d8983e1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5729,6 +5729,7 @@
 	return ret;
 }
 
+static int minus_one = -1;
 static const int one = 1;
 static const int two_five_five = 255;
 
@@ -5789,7 +5790,8 @@
 		.data		= &ipv6_devconf.rtr_solicits,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &minus_one,
 	},
 	{
 		.procname	= "router_solicitation_interval",
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 54cf719..5a27ab4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1190,6 +1190,16 @@
 	return NULL;
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+	/* We need to move header back to the beginning if xfrm6_policy_check()
+	 * and tcp_v6_fill_cb() are going to be called again.
+	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+	 */
+	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+		sizeof(struct inet6_skb_parm));
+}
+
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
@@ -1319,6 +1329,7 @@
 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
 			skb_set_owner_r(opt_skb, sk);
+			tcp_v6_restore_cb(opt_skb);
 			opt_skb = xchg(&np->pktoptions, opt_skb);
 		} else {
 			__kfree_skb(opt_skb);
@@ -1352,15 +1363,6 @@
 	TCP_SKB_CB(skb)->sacked = 0;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-	/* We need to move header back to the beginning if xfrm6_policy_check()
-	 * and tcp_v6_fill_cb() are going to be called again.
-	 */
-	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-		sizeof(struct inet6_skb_parm));
-}
-
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
 	const struct tcphdr *th;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index c9d90eb..fcb5d1d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -65,49 +65,24 @@
 #define nf_entry_dereference(e) \
 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
 
-static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
-						const struct nf_hook_ops *reg)
+static struct nf_hook_entry __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
 {
-	struct nf_hook_entry *hook_head = NULL;
-
 	if (reg->pf != NFPROTO_NETDEV)
-		hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
-						 [reg->hooknum]);
-	else if (reg->hooknum == NF_NETDEV_INGRESS) {
-#ifdef CONFIG_NETFILTER_INGRESS
-		if (reg->dev && dev_net(reg->dev) == net)
-			hook_head =
-				nf_entry_dereference(
-					reg->dev->nf_hooks_ingress);
-#endif
-	}
-	return hook_head;
-}
+		return net->nf.hooks[reg->pf]+reg->hooknum;
 
-/* must hold nf_hook_mutex */
-static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
-			      struct nf_hook_entry *entry)
-{
-	switch (reg->pf) {
-	case NFPROTO_NETDEV:
 #ifdef CONFIG_NETFILTER_INGRESS
-		/* We already checked in nf_register_net_hook() that this is
-		 * used from ingress.
-		 */
-		rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
-#endif
-		break;
-	default:
-		rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
-				   entry);
-		break;
+	if (reg->hooknum == NF_NETDEV_INGRESS) {
+		if (reg->dev && dev_net(reg->dev) == net)
+			return &reg->dev->nf_hooks_ingress;
 	}
+#endif
+	return NULL;
 }
 
 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
-	struct nf_hook_entry *hooks_entry;
-	struct nf_hook_entry *entry;
+	struct nf_hook_entry __rcu **pp;
+	struct nf_hook_entry *entry, *p;
 
 	if (reg->pf == NFPROTO_NETDEV) {
 #ifndef CONFIG_NETFILTER_INGRESS
@@ -119,6 +94,10 @@
 			return -EINVAL;
 	}
 
+	pp = nf_hook_entry_head(net, reg);
+	if (!pp)
+		return -EINVAL;
+
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
 		return -ENOMEM;
@@ -128,26 +107,15 @@
 	entry->next	= NULL;
 
 	mutex_lock(&nf_hook_mutex);
-	hooks_entry = nf_hook_entry_head(net, reg);
 
-	if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
-		/* This is the case where we need to insert at the head */
-		entry->next = hooks_entry;
-		hooks_entry = NULL;
+	/* Find the spot in the list */
+	while ((p = nf_entry_dereference(*pp)) != NULL) {
+		if (reg->priority < p->orig_ops->priority)
+			break;
+		pp = &p->next;
 	}
-
-	while (hooks_entry &&
-		reg->priority >= hooks_entry->orig_ops->priority &&
-		nf_entry_dereference(hooks_entry->next)) {
-		hooks_entry = nf_entry_dereference(hooks_entry->next);
-	}
-
-	if (hooks_entry) {
-		entry->next = nf_entry_dereference(hooks_entry->next);
-		rcu_assign_pointer(hooks_entry->next, entry);
-	} else {
-		nf_set_hooks_head(net, reg, entry);
-	}
+	rcu_assign_pointer(entry->next, p);
+	rcu_assign_pointer(*pp, entry);
 
 	mutex_unlock(&nf_hook_mutex);
 #ifdef CONFIG_NETFILTER_INGRESS
@@ -163,33 +131,23 @@
 
 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
-	struct nf_hook_entry *hooks_entry;
+	struct nf_hook_entry __rcu **pp;
+	struct nf_hook_entry *p;
+
+	pp = nf_hook_entry_head(net, reg);
+	if (WARN_ON_ONCE(!pp))
+		return;
 
 	mutex_lock(&nf_hook_mutex);
-	hooks_entry = nf_hook_entry_head(net, reg);
-	if (hooks_entry && hooks_entry->orig_ops == reg) {
-		nf_set_hooks_head(net, reg,
-				  nf_entry_dereference(hooks_entry->next));
-		goto unlock;
-	}
-	while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
-		struct nf_hook_entry *next =
-			nf_entry_dereference(hooks_entry->next);
-		struct nf_hook_entry *nnext;
-
-		if (next->orig_ops != reg) {
-			hooks_entry = next;
-			continue;
+	while ((p = nf_entry_dereference(*pp)) != NULL) {
+		if (p->orig_ops == reg) {
+			rcu_assign_pointer(*pp, p->next);
+			break;
 		}
-		nnext = nf_entry_dereference(next->next);
-		rcu_assign_pointer(hooks_entry->next, nnext);
-		hooks_entry = next;
-		break;
+		pp = &p->next;
 	}
-
-unlock:
 	mutex_unlock(&nf_hook_mutex);
-	if (!hooks_entry) {
+	if (!p) {
 		WARN(1, "nf_unregister_net_hook: hook not found!\n");
 		return;
 	}
@@ -201,10 +159,10 @@
 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	synchronize_net();
-	nf_queue_nf_hook_drop(net, hooks_entry);
+	nf_queue_nf_hook_drop(net, p);
 	/* other cpu might still process nfqueue verdict that used reg */
 	synchronize_net();
-	kfree(hooks_entry);
+	kfree(p);
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
 
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 627f898..62bea45 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1832,7 +1832,7 @@
 	/* Record the max length of recvmsg() calls for future allocations */
 	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
 	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
-				     16384);
+				     SKB_WITH_OVERHEAD(32768));
 
 	copied = data_skb->len;
 	if (len < copied) {
@@ -2083,8 +2083,9 @@
 
 	if (alloc_min_size < nlk->max_recvmsg_len) {
 		alloc_size = nlk->max_recvmsg_len;
-		skb = alloc_skb(alloc_size, GFP_KERNEL |
-					    __GFP_NOWARN | __GFP_NORETRY);
+		skb = alloc_skb(alloc_size,
+				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+				__GFP_NOWARN | __GFP_NORETRY);
 	}
 	if (!skb) {
 		alloc_size = alloc_min_size;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c8c82e1..2208706 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -343,7 +343,7 @@
 	key->eth.cvlan.tci = 0;
 	key->eth.cvlan.tpid = 0;
 
-	if (likely(skb_vlan_tag_present(skb))) {
+	if (skb_vlan_tag_present(skb)) {
 		key->eth.vlan.tci = htons(skb->vlan_tci);
 		key->eth.vlan.tpid = skb->vlan_proto;
 	} else {
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 95c3614..e7da290 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -176,7 +176,7 @@
 
 	netdev->vlan_features = netdev->features;
 	netdev->hw_enc_features = netdev->features;
-	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 	netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
 
 	eth_hw_addr_random(netdev);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 8f19843..7387418 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -485,7 +485,8 @@
 {
 	unsigned int length = skb->len - ETH_HLEN;
 
-	if (skb_vlan_tagged(skb))
+	if (!skb_vlan_tag_present(skb) &&
+	    eth_type_vlan(skb->protocol))
 		length -= VLAN_HLEN;
 
 	/* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33a4697..11db0d6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3952,6 +3952,7 @@
 				}
 				if (msg == NETDEV_UNREGISTER) {
 					packet_cached_dev_reset(po);
+					fanout_release(sk);
 					po->ifindex = -1;
 					if (po->prot_hook.dev)
 						dev_put(po->prot_hook.dev);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 44c9c2b..2d59c9b 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -678,9 +678,9 @@
 	sk->sk_state = RXRPC_CLOSE;
 	spin_unlock_bh(&sk->sk_receive_queue.lock);
 
-	if (rx->local && rx->local->service == rx) {
+	if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
 		write_lock(&rx->local->services_lock);
-		rx->local->service = NULL;
+		rcu_assign_pointer(rx->local->service, NULL);
 		write_unlock(&rx->local->services_lock);
 	}
 
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index d38dffd..f60e355 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -398,6 +398,7 @@
 	RXRPC_CALL_EXPOSED,		/* The call was exposed to the world */
 	RXRPC_CALL_RX_LAST,		/* Received the last packet (at rxtx_top) */
 	RXRPC_CALL_TX_LAST,		/* Last packet in Tx buffer (at rxtx_top) */
+	RXRPC_CALL_SEND_PING,		/* A ping will need to be sent */
 	RXRPC_CALL_PINGING,		/* Ping in process */
 	RXRPC_CALL_RETRANS_TIMEOUT,	/* Retransmission due to timeout occurred */
 };
@@ -410,6 +411,7 @@
 	RXRPC_CALL_EV_ABORT,		/* need to generate abort */
 	RXRPC_CALL_EV_TIMER,		/* Timer expired */
 	RXRPC_CALL_EV_RESEND,		/* Tx resend required */
+	RXRPC_CALL_EV_PING,		/* Ping send required */
 };
 
 /*
@@ -466,6 +468,7 @@
 	struct rxrpc_sock __rcu	*socket;	/* socket responsible */
 	ktime_t			ack_at;		/* When deferred ACK needs to happen */
 	ktime_t			resend_at;	/* When next resend needs to happen */
+	ktime_t			ping_at;	/* When next to send a ping */
 	ktime_t			expire_at;	/* When the call times out */
 	struct timer_list	timer;		/* Combined event timer */
 	struct work_struct	processor;	/* Event processor */
@@ -558,8 +561,10 @@
 	rxrpc_seq_t		ackr_prev_seq;	/* previous sequence number received */
 	rxrpc_seq_t		ackr_consumed;	/* Highest packet shown consumed */
 	rxrpc_seq_t		ackr_seen;	/* Highest packet shown seen */
-	rxrpc_serial_t		ackr_ping;	/* Last ping sent */
-	ktime_t			ackr_ping_time;	/* Time last ping sent */
+
+	/* ping management */
+	rxrpc_serial_t		ping_serial;	/* Last ping sent */
+	ktime_t			ping_time;	/* Time last ping sent */
 
 	/* transmission-phase ACK management */
 	ktime_t			acks_latest_ts;	/* Timestamp of latest ACK received */
@@ -728,8 +733,10 @@
 enum rxrpc_timer_trace {
 	rxrpc_timer_begin,
 	rxrpc_timer_init_for_reply,
+	rxrpc_timer_init_for_send_reply,
 	rxrpc_timer_expired,
 	rxrpc_timer_set_for_ack,
+	rxrpc_timer_set_for_ping,
 	rxrpc_timer_set_for_resend,
 	rxrpc_timer_set_for_send,
 	rxrpc_timer__nr_trace
@@ -743,6 +750,7 @@
 	rxrpc_propose_ack_ping_for_lost_ack,
 	rxrpc_propose_ack_ping_for_lost_reply,
 	rxrpc_propose_ack_ping_for_params,
+	rxrpc_propose_ack_processing_op,
 	rxrpc_propose_ack_respond_to_ack,
 	rxrpc_propose_ack_respond_to_ping,
 	rxrpc_propose_ack_retry_tx,
@@ -777,7 +785,7 @@
 extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
 
 extern const char *const rxrpc_pkts[];
-extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
+extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
 
 #include <trace/events/rxrpc.h>
 
@@ -805,6 +813,7 @@
 /*
  * call_event.c
  */
+void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
 void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
 		       enum rxrpc_propose_ack_trace);
@@ -1068,7 +1077,8 @@
 /*
  * output.c
  */
-int rxrpc_send_call_packet(struct rxrpc_call *, u8);
+int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
+int rxrpc_send_abort_packet(struct rxrpc_call *);
 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
 void rxrpc_reject_packets(struct rxrpc_local *);
 
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 3cac231..832d854 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -337,7 +337,7 @@
 
 	/* Get the socket providing the service */
 	rx = rcu_dereference(local->service);
-	if (service_id == rx->srx.srx_service)
+	if (rx && service_id == rx->srx.srx_service)
 		goto found_service;
 
 	trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -565,7 +565,7 @@
 	write_unlock_bh(&call->state_lock);
 	write_unlock(&rx->call_lock);
 	if (abort) {
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+		rxrpc_send_abort_packet(call);
 		rxrpc_release_call(rx, call);
 		rxrpc_put_call(call, rxrpc_call_put);
 	}
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 4f00476..97a17ad 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -24,19 +24,20 @@
 /*
  * Set the timer
  */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-		     ktime_t now)
+void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+		       ktime_t now)
 {
 	unsigned long t_j, now_j = jiffies;
 	ktime_t t;
 	bool queue = false;
 
-	read_lock_bh(&call->state_lock);
-
 	if (call->state < RXRPC_CALL_COMPLETE) {
 		t = call->expire_at;
-		if (!ktime_after(t, now))
+		if (!ktime_after(t, now)) {
+			trace_rxrpc_timer(call, why, now, now_j);
+			queue = true;
 			goto out;
+		}
 
 		if (!ktime_after(call->resend_at, now)) {
 			call->resend_at = call->expire_at;
@@ -54,6 +55,14 @@
 			t = call->ack_at;
 		}
 
+		if (!ktime_after(call->ping_at, now)) {
+			call->ping_at = call->expire_at;
+			if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
+				queue = true;
+		} else if (ktime_before(call->ping_at, t)) {
+			t = call->ping_at;
+		}
+
 		t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
 		t_j += jiffies;
 
@@ -68,16 +77,46 @@
 			mod_timer(&call->timer, t_j);
 			trace_rxrpc_timer(call, why, now, now_j);
 		}
-
-		if (queue)
-			rxrpc_queue_call(call);
 	}
 
 out:
+	if (queue)
+		rxrpc_queue_call(call);
+}
+
+/*
+ * Set the timer
+ */
+void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+		     ktime_t now)
+{
+	read_lock_bh(&call->state_lock);
+	__rxrpc_set_timer(call, why, now);
 	read_unlock_bh(&call->state_lock);
 }
 
 /*
+ * Propose a PING ACK be sent.
+ */
+static void rxrpc_propose_ping(struct rxrpc_call *call,
+			       bool immediate, bool background)
+{
+	if (immediate) {
+		if (background &&
+		    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
+			rxrpc_queue_call(call);
+	} else {
+		ktime_t now = ktime_get_real();
+		ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+
+		if (ktime_before(ping_at, call->ping_at)) {
+			call->ping_at = ping_at;
+			rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+		}
+	}
+}
+
+/*
  * propose an ACK be sent
  */
 static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
@@ -90,6 +129,14 @@
 	ktime_t now, ack_at;
 	s8 prior = rxrpc_ack_priority[ack_reason];
 
+	/* Pings are handled specially because we don't want to accidentally
+	 * lose a ping response by subsuming it into a ping.
+	 */
+	if (ack_reason == RXRPC_ACK_PING) {
+		rxrpc_propose_ping(call, immediate, background);
+		goto trace;
+	}
+
 	/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
 	 * numbers, but we don't alter the timeout.
 	 */
@@ -125,7 +172,6 @@
 			expiry = rxrpc_soft_ack_delay;
 		break;
 
-	case RXRPC_ACK_PING:
 	case RXRPC_ACK_IDLE:
 		if (rxrpc_idle_ack_delay < expiry)
 			expiry = rxrpc_idle_ack_delay;
@@ -253,7 +299,7 @@
 			goto out;
 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
 				  rxrpc_propose_ack_ping_for_lost_ack);
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+		rxrpc_send_ack_packet(call, true);
 		goto out;
 	}
 
@@ -328,12 +374,13 @@
 
 recheck_state:
 	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+		rxrpc_send_abort_packet(call);
 		goto recheck_state;
 	}
 
 	if (call->state == RXRPC_CALL_COMPLETE) {
 		del_timer_sync(&call->timer);
+		rxrpc_notify_socket(call);
 		goto out_put;
 	}
 
@@ -345,13 +392,17 @@
 	}
 
 	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
-		call->ack_at = call->expire_at;
 		if (call->ackr_reason) {
-			rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+			rxrpc_send_ack_packet(call, false);
 			goto recheck_state;
 		}
 	}
 
+	if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
+		rxrpc_send_ack_packet(call, true);
+		goto recheck_state;
+	}
+
 	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
 		rxrpc_resend(call, now);
 		goto recheck_state;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 364b42d..4353a29 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -205,6 +205,7 @@
 	expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
 	call->expire_at = expire_at;
 	call->ack_at = expire_at;
+	call->ping_at = expire_at;
 	call->resend_at = expire_at;
 	call->timer.expires = jiffies + LONG_MAX / 2;
 	rxrpc_set_timer(call, rxrpc_timer_begin, now);
@@ -498,7 +499,7 @@
 				  struct rxrpc_call, sock_link);
 		rxrpc_get_call(call, rxrpc_call_got);
 		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+		rxrpc_send_abort_packet(call);
 		rxrpc_release_call(rx, call);
 		rxrpc_put_call(call, rxrpc_call_put);
 	}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 3ad9f75..44fb8d8 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -625,9 +625,9 @@
 	rxrpc_serial_t ping_serial;
 	ktime_t ping_time;
 
-	ping_time = call->ackr_ping_time;
+	ping_time = call->ping_time;
 	smp_rmb();
-	ping_serial = call->ackr_ping;
+	ping_serial = call->ping_serial;
 
 	if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
 	    before(orig_serial, ping_serial))
@@ -847,7 +847,8 @@
 
 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
 	    RXRPC_TX_ANNO_LAST &&
-	    summary.nr_acks == call->tx_top - hard_ack)
+	    summary.nr_acks == call->tx_top - hard_ack &&
+	    rxrpc_is_client_call(call))
 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
 				  false, true,
 				  rxrpc_propose_ack_ping_for_lost_reply);
@@ -938,6 +939,33 @@
 }
 
 /*
+ * Handle a new call on a channel implicitly completing the preceding call on
+ * that channel.
+ *
+ * TODO: If callNumber > call_id + 1, renegotiate security.
+ */
+static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
+					  struct rxrpc_call *call)
+{
+	switch (call->state) {
+	case RXRPC_CALL_SERVER_AWAIT_ACK:
+		rxrpc_call_completed(call);
+		break;
+	case RXRPC_CALL_COMPLETE:
+		break;
+	default:
+		if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) {
+			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+			rxrpc_queue_call(call);
+		}
+		break;
+	}
+
+	__rxrpc_disconnect_call(conn, call);
+	rxrpc_notify_socket(call);
+}
+
+/*
  * post connection-level events to the connection
  * - this includes challenges, responses, some aborts and call terminal packet
  *   retransmission.
@@ -1145,6 +1173,16 @@
 		}
 
 		call = rcu_dereference(chan->call);
+
+		if (sp->hdr.callNumber > chan->call_id) {
+			if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+				rcu_read_unlock();
+				goto reject_packet;
+			}
+			if (call)
+				rxrpc_input_implicit_end_call(conn, call);
+			call = NULL;
+		}
 	} else {
 		skew = 0;
 		call = NULL;
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 9d1c721..6dee55f 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -93,10 +93,9 @@
 	[RXRPC_ACK_EXCEEDS_WINDOW]	= 6,
 	[RXRPC_ACK_NOSPACE]		= 7,
 	[RXRPC_ACK_PING_RESPONSE]	= 8,
-	[RXRPC_ACK_PING]		= 9,
 };
 
-const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
+const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
 	"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
 	"IDL", "-?-"
 };
@@ -196,7 +195,9 @@
 	[rxrpc_timer_begin]			= "Begin ",
 	[rxrpc_timer_expired]			= "*EXPR*",
 	[rxrpc_timer_init_for_reply]		= "IniRpl",
+	[rxrpc_timer_init_for_send_reply]	= "SndRpl",
 	[rxrpc_timer_set_for_ack]		= "SetAck",
+	[rxrpc_timer_set_for_ping]		= "SetPng",
 	[rxrpc_timer_set_for_send]		= "SetTx ",
 	[rxrpc_timer_set_for_resend]		= "SetRTx",
 };
@@ -207,6 +208,7 @@
 	[rxrpc_propose_ack_ping_for_lost_ack]	= "LostAck",
 	[rxrpc_propose_ack_ping_for_lost_reply]	= "LostRpl",
 	[rxrpc_propose_ack_ping_for_params]	= "Params ",
+	[rxrpc_propose_ack_processing_op]	= "ProcOp ",
 	[rxrpc_propose_ack_respond_to_ack]	= "Rsp2Ack",
 	[rxrpc_propose_ack_respond_to_ping]	= "Rsp2Png",
 	[rxrpc_propose_ack_retry_tx]		= "RetryTx",
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 0d47db8..5dab1ff 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -19,26 +19,27 @@
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-struct rxrpc_pkt_buffer {
+struct rxrpc_ack_buffer {
 	struct rxrpc_wire_header whdr;
-	union {
-		struct {
-			struct rxrpc_ackpacket ack;
-			u8 acks[255];
-			u8 pad[3];
-		};
-		__be32 abort_code;
-	};
+	struct rxrpc_ackpacket ack;
+	u8 acks[255];
+	u8 pad[3];
 	struct rxrpc_ackinfo ackinfo;
 };
 
+struct rxrpc_abort_buffer {
+	struct rxrpc_wire_header whdr;
+	__be32 abort_code;
+};
+
 /*
  * Fill out an ACK packet.
  */
 static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
-				 struct rxrpc_pkt_buffer *pkt,
+				 struct rxrpc_ack_buffer *pkt,
 				 rxrpc_seq_t *_hard_ack,
-				 rxrpc_seq_t *_top)
+				 rxrpc_seq_t *_top,
+				 u8 reason)
 {
 	rxrpc_serial_t serial;
 	rxrpc_seq_t hard_ack, top, seq;
@@ -58,10 +59,10 @@
 	pkt->ack.firstPacket	= htonl(hard_ack + 1);
 	pkt->ack.previousPacket	= htonl(call->ackr_prev_seq);
 	pkt->ack.serial		= htonl(serial);
-	pkt->ack.reason		= call->ackr_reason;
+	pkt->ack.reason		= reason;
 	pkt->ack.nAcks		= top - hard_ack;
 
-	if (pkt->ack.reason == RXRPC_ACK_PING)
+	if (reason == RXRPC_ACK_PING)
 		pkt->whdr.flags |= RXRPC_REQUEST_ACK;
 
 	if (after(top, hard_ack)) {
@@ -91,22 +92,19 @@
 }
 
 /*
- * Send an ACK or ABORT call packet.
+ * Send an ACK call packet.
  */
-int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
+int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
 {
 	struct rxrpc_connection *conn = NULL;
-	struct rxrpc_pkt_buffer *pkt;
+	struct rxrpc_ack_buffer *pkt;
 	struct msghdr msg;
 	struct kvec iov[2];
 	rxrpc_serial_t serial;
 	rxrpc_seq_t hard_ack, top;
 	size_t len, n;
-	bool ping = false;
-	int ioc, ret;
-	u32 abort_code;
-
-	_enter("%u,%s", call->debug_id, rxrpc_pkts[type]);
+	int ret;
+	u8 reason;
 
 	spin_lock_bh(&call->lock);
 	if (call->conn)
@@ -131,68 +129,44 @@
 	pkt->whdr.cid		= htonl(call->cid);
 	pkt->whdr.callNumber	= htonl(call->call_id);
 	pkt->whdr.seq		= 0;
-	pkt->whdr.type		= type;
-	pkt->whdr.flags		= conn->out_clientflag;
+	pkt->whdr.type		= RXRPC_PACKET_TYPE_ACK;
+	pkt->whdr.flags		= RXRPC_SLOW_START_OK | conn->out_clientflag;
 	pkt->whdr.userStatus	= 0;
 	pkt->whdr.securityIndex	= call->security_ix;
 	pkt->whdr._rsvd		= 0;
 	pkt->whdr.serviceId	= htons(call->service_id);
 
-	iov[0].iov_base	= pkt;
-	iov[0].iov_len	= sizeof(pkt->whdr);
-	len = sizeof(pkt->whdr);
-
-	switch (type) {
-	case RXRPC_PACKET_TYPE_ACK:
-		spin_lock_bh(&call->lock);
+	spin_lock_bh(&call->lock);
+	if (ping) {
+		reason = RXRPC_ACK_PING;
+	} else {
+		reason = call->ackr_reason;
 		if (!call->ackr_reason) {
 			spin_unlock_bh(&call->lock);
 			ret = 0;
 			goto out;
 		}
-		ping = (call->ackr_reason == RXRPC_ACK_PING);
-		n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top);
 		call->ackr_reason = 0;
-
-		spin_unlock_bh(&call->lock);
-
-
-		pkt->whdr.flags |= RXRPC_SLOW_START_OK;
-
-		iov[0].iov_len += sizeof(pkt->ack) + n;
-		iov[1].iov_base = &pkt->ackinfo;
-		iov[1].iov_len	= sizeof(pkt->ackinfo);
-		len += sizeof(pkt->ack) + n + sizeof(pkt->ackinfo);
-		ioc = 2;
-		break;
-
-	case RXRPC_PACKET_TYPE_ABORT:
-		abort_code = call->abort_code;
-		pkt->abort_code = htonl(abort_code);
-		iov[0].iov_len += sizeof(pkt->abort_code);
-		len += sizeof(pkt->abort_code);
-		ioc = 1;
-		break;
-
-	default:
-		BUG();
-		ret = -ENOANO;
-		goto out;
 	}
+	n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason);
+
+	spin_unlock_bh(&call->lock);
+
+	iov[0].iov_base	= pkt;
+	iov[0].iov_len	= sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
+	iov[1].iov_base = &pkt->ackinfo;
+	iov[1].iov_len	= sizeof(pkt->ackinfo);
+	len = iov[0].iov_len + iov[1].iov_len;
 
 	serial = atomic_inc_return(&conn->serial);
 	pkt->whdr.serial = htonl(serial);
-	switch (type) {
-	case RXRPC_PACKET_TYPE_ACK:
-		trace_rxrpc_tx_ack(call, serial,
-				   ntohl(pkt->ack.firstPacket),
-				   ntohl(pkt->ack.serial),
-				   pkt->ack.reason, pkt->ack.nAcks);
-		break;
-	}
+	trace_rxrpc_tx_ack(call, serial,
+			   ntohl(pkt->ack.firstPacket),
+			   ntohl(pkt->ack.serial),
+			   pkt->ack.reason, pkt->ack.nAcks);
 
 	if (ping) {
-		call->ackr_ping = serial;
+		call->ping_serial = serial;
 		smp_wmb();
 		/* We need to stick a time in before we send the packet in case
 		 * the reply gets back before kernel_sendmsg() completes - but
@@ -201,19 +175,19 @@
 		 * the packet transmission is more likely to happen towards the
 		 * end of the kernel_sendmsg() call.
 		 */
-		call->ackr_ping_time = ktime_get_real();
+		call->ping_time = ktime_get_real();
 		set_bit(RXRPC_CALL_PINGING, &call->flags);
 		trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
 	}
-	ret = kernel_sendmsg(conn->params.local->socket,
-			     &msg, iov, ioc, len);
-	if (ping)
-		call->ackr_ping_time = ktime_get_real();
 
-	if (type == RXRPC_PACKET_TYPE_ACK &&
-	    call->state < RXRPC_CALL_COMPLETE) {
+	ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+	if (ping)
+		call->ping_time = ktime_get_real();
+
+	if (call->state < RXRPC_CALL_COMPLETE) {
 		if (ret < 0) {
-			clear_bit(RXRPC_CALL_PINGING, &call->flags);
+			if (ping)
+				clear_bit(RXRPC_CALL_PINGING, &call->flags);
 			rxrpc_propose_ACK(call, pkt->ack.reason,
 					  ntohs(pkt->ack.maxSkew),
 					  ntohl(pkt->ack.serial),
@@ -236,6 +210,56 @@
 }
 
 /*
+ * Send an ABORT call packet.
+ */
+int rxrpc_send_abort_packet(struct rxrpc_call *call)
+{
+	struct rxrpc_connection *conn = NULL;
+	struct rxrpc_abort_buffer pkt;
+	struct msghdr msg;
+	struct kvec iov[1];
+	rxrpc_serial_t serial;
+	int ret;
+
+	spin_lock_bh(&call->lock);
+	if (call->conn)
+		conn = rxrpc_get_connection_maybe(call->conn);
+	spin_unlock_bh(&call->lock);
+	if (!conn)
+		return -ECONNRESET;
+
+	msg.msg_name	= &call->peer->srx.transport;
+	msg.msg_namelen	= call->peer->srx.transport_len;
+	msg.msg_control	= NULL;
+	msg.msg_controllen = 0;
+	msg.msg_flags	= 0;
+
+	pkt.whdr.epoch		= htonl(conn->proto.epoch);
+	pkt.whdr.cid		= htonl(call->cid);
+	pkt.whdr.callNumber	= htonl(call->call_id);
+	pkt.whdr.seq		= 0;
+	pkt.whdr.type		= RXRPC_PACKET_TYPE_ABORT;
+	pkt.whdr.flags		= conn->out_clientflag;
+	pkt.whdr.userStatus	= 0;
+	pkt.whdr.securityIndex	= call->security_ix;
+	pkt.whdr._rsvd		= 0;
+	pkt.whdr.serviceId	= htons(call->service_id);
+	pkt.abort_code		= htonl(call->abort_code);
+
+	iov[0].iov_base	= &pkt;
+	iov[0].iov_len	= sizeof(pkt);
+
+	serial = atomic_inc_return(&conn->serial);
+	pkt.whdr.serial = htonl(serial);
+
+	ret = kernel_sendmsg(conn->params.local->socket,
+			     &msg, iov, 1, sizeof(pkt));
+
+	rxrpc_put_connection(conn);
+	return ret;
+}
+
+/*
  * send a packet through the transport endpoint
  */
 int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
@@ -283,11 +307,12 @@
 	/* If our RTT cache needs working on, request an ACK.  Also request
 	 * ACKs if a DATA packet appears to have been lost.
 	 */
-	if (retrans ||
-	    call->cong_mode == RXRPC_CALL_SLOW_START ||
-	    (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
-	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
-			 ktime_get_real()))
+	if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+	    (retrans ||
+	     call->cong_mode == RXRPC_CALL_SLOW_START ||
+	     (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
+	     ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+			  ktime_get_real())))
 		whdr.flags |= RXRPC_REQUEST_ACK;
 
 	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index f05ea0a..c29362d 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -143,7 +143,7 @@
 	if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
 		rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
 				  rxrpc_propose_ack_terminal_ack);
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+		rxrpc_send_ack_packet(call, false);
 	}
 
 	write_lock_bh(&call->state_lock);
@@ -151,17 +151,21 @@
 	switch (call->state) {
 	case RXRPC_CALL_CLIENT_RECV_REPLY:
 		__rxrpc_call_completed(call);
+		write_unlock_bh(&call->state_lock);
 		break;
 
 	case RXRPC_CALL_SERVER_RECV_REQUEST:
 		call->tx_phase = true;
 		call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
+		call->ack_at = call->expire_at;
+		write_unlock_bh(&call->state_lock);
+		rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
+				  rxrpc_propose_ack_processing_op);
 		break;
 	default:
+		write_unlock_bh(&call->state_lock);
 		break;
 	}
-
-	write_unlock_bh(&call->state_lock);
 }
 
 /*
@@ -212,7 +216,7 @@
 					  true, false,
 					  rxrpc_propose_ack_rotate_rx);
 		if (call->ackr_reason)
-			rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+			rxrpc_send_ack_packet(call, false);
 	}
 }
 
@@ -652,7 +656,7 @@
 	goto out;
 call_complete:
 	*_abort = call->abort_code;
-	ret = call->error;
+	ret = -call->error;
 	if (call->completion == RXRPC_CALL_SUCCEEDED) {
 		ret = 1;
 		if (size > 0)
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 627abed..4374e7b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -381,7 +381,7 @@
 	return 0;
 
 protocol_error:
-	rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+	rxrpc_send_abort_packet(call);
 	_leave(" = -EPROTO");
 	return -EPROTO;
 
@@ -471,7 +471,7 @@
 	return 0;
 
 protocol_error:
-	rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+	rxrpc_send_abort_packet(call);
 	_leave(" = -EPROTO");
 	return -EPROTO;
 
@@ -523,7 +523,7 @@
 
 	if (cksum != expected_cksum) {
 		rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO);
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+		rxrpc_send_abort_packet(call);
 		_leave(" = -EPROTO [csum failed]");
 		return -EPROTO;
 	}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 3322543..b214a4d 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -130,6 +130,11 @@
 			break;
 		case RXRPC_CALL_SERVER_ACK_REQUEST:
 			call->state = RXRPC_CALL_SERVER_SEND_REPLY;
+			call->ack_at = call->expire_at;
+			if (call->ackr_reason == RXRPC_ACK_DELAY)
+				call->ackr_reason = 0;
+			__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
+					  ktime_get_real());
 			if (!last)
 				break;
 		case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -197,7 +202,7 @@
 	do {
 		/* Check to see if there's a ping ACK to reply to. */
 		if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
-			rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+			rxrpc_send_ack_packet(call, false);
 
 		if (!skb) {
 			size_t size, chunk, max, space;
@@ -514,8 +519,7 @@
 	} else if (cmd == RXRPC_CMD_SEND_ABORT) {
 		ret = 0;
 		if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED))
-			ret = rxrpc_send_call_packet(call,
-						     RXRPC_PACKET_TYPE_ABORT);
+			ret = rxrpc_send_abort_packet(call);
 	} else if (cmd != RXRPC_CMD_SEND_DATA) {
 		ret = -EINVAL;
 	} else if (rxrpc_is_client_call(call) &&
@@ -597,7 +601,7 @@
 	lock_sock(sock->sk);
 
 	if (rxrpc_abort_call(why, call, 0, abort_code, error))
-		rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+		rxrpc_send_abort_packet(call);
 
 	release_sock(sock->sk);
 	_leave("");
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index c910217..a512b18 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -341,22 +341,25 @@
 	if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
 		return -EINVAL;
 
+	/* We have to register pernet ops before making the action ops visible,
+	 * otherwise tcf_action_init_1() could get a partially initialized
+	 * netns.
+	 */
+	ret = register_pernet_subsys(ops);
+	if (ret)
+		return ret;
+
 	write_lock(&act_mod_lock);
 	list_for_each_entry(a, &act_base, head) {
 		if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
 			write_unlock(&act_mod_lock);
+			unregister_pernet_subsys(ops);
 			return -EEXIST;
 		}
 	}
 	list_add_tail(&act->head, &act_base);
 	write_unlock(&act_mod_lock);
 
-	ret = register_pernet_subsys(ops);
-	if (ret) {
-		tcf_unregister_action(act, ops);
-		return ret;
-	}
-
 	return 0;
 }
 EXPORT_SYMBOL(tcf_register_action);
@@ -367,8 +370,6 @@
 	struct tc_action_ops *a;
 	int err = -ENOENT;
 
-	unregister_pernet_subsys(ops);
-
 	write_lock(&act_mod_lock);
 	list_for_each_entry(a, &act_base, head) {
 		if (a == act) {
@@ -378,6 +379,8 @@
 		}
 	}
 	write_unlock(&act_mod_lock);
+	if (!err)
+		unregister_pernet_subsys(ops);
 	return err;
 }
 EXPORT_SYMBOL(tcf_unregister_action);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 11da7da..2ee29a3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -101,7 +101,7 @@
 
 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 			  struct nlmsghdr *n, struct tcf_proto *tp,
-			  unsigned long fh, int event);
+			  unsigned long fh, int event, bool unicast);
 
 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
 				 struct nlmsghdr *n,
@@ -112,7 +112,7 @@
 
 	for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
 	     it_chain = &tp->next)
-		tfilter_notify(net, oskb, n, tp, 0, event);
+		tfilter_notify(net, oskb, n, tp, 0, event, false);
 }
 
 /* Select new prio value from the range, managed by kernel. */
@@ -319,7 +319,8 @@
 
 			RCU_INIT_POINTER(*back, next);
 
-			tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+			tfilter_notify(net, skb, n, tp, fh,
+				       RTM_DELTFILTER, false);
 			tcf_destroy(tp, true);
 			err = 0;
 			goto errout;
@@ -345,14 +346,14 @@
 				struct tcf_proto *next = rtnl_dereference(tp->next);
 
 				tfilter_notify(net, skb, n, tp, fh,
-					       RTM_DELTFILTER);
+					       RTM_DELTFILTER, false);
 				if (tcf_destroy(tp, false))
 					RCU_INIT_POINTER(*back, next);
 			}
 			goto errout;
 		case RTM_GETTFILTER:
 			err = tfilter_notify(net, skb, n, tp, fh,
-					     RTM_NEWTFILTER);
+					     RTM_NEWTFILTER, true);
 			goto errout;
 		default:
 			err = -EINVAL;
@@ -367,7 +368,7 @@
 			RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
 			rcu_assign_pointer(*back, tp);
 		}
-		tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
+		tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
 	} else {
 		if (tp_created)
 			tcf_destroy(tp, true);
@@ -419,7 +420,7 @@
 
 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 			  struct nlmsghdr *n, struct tcf_proto *tp,
-			  unsigned long fh, int event)
+			  unsigned long fh, int event, bool unicast)
 {
 	struct sk_buff *skb;
 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -433,6 +434,9 @@
 		return -EINVAL;
 	}
 
+	if (unicast)
+		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+
 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
 			      n->nlmsg_flags & NLM_F_ECHO);
 }
diff --git a/net/socket.c b/net/socket.c
index a1bd161..5a9bf5e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -320,11 +320,38 @@
 	.d_dname  = sockfs_dname,
 };
 
+static int sockfs_xattr_get(const struct xattr_handler *handler,
+			    struct dentry *dentry, struct inode *inode,
+			    const char *suffix, void *value, size_t size)
+{
+	if (value) {
+		if (dentry->d_name.len + 1 > size)
+			return -ERANGE;
+		memcpy(value, dentry->d_name.name, dentry->d_name.len + 1);
+	}
+	return dentry->d_name.len + 1;
+}
+
+#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
+#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
+#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
+
+static const struct xattr_handler sockfs_xattr_handler = {
+	.name = XATTR_NAME_SOCKPROTONAME,
+	.get = sockfs_xattr_get,
+};
+
+static const struct xattr_handler *sockfs_xattr_handlers[] = {
+	&sockfs_xattr_handler,
+	NULL
+};
+
 static struct dentry *sockfs_mount(struct file_system_type *fs_type,
 			 int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "socket:", &sockfs_ops,
-		&sockfs_dentry_operations, SOCKFS_MAGIC);
+	return mount_pseudo_xattr(fs_type, "socket:", &sockfs_ops,
+				  sockfs_xattr_handlers,
+				  &sockfs_dentry_operations, SOCKFS_MAGIC);
 }
 
 static struct vfsmount *sock_mnt __read_mostly;
@@ -463,35 +490,6 @@
 	return NULL;
 }
 
-#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
-#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
-#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
-static ssize_t sockfs_getxattr(struct dentry *dentry, struct inode *inode,
-			       const char *name, void *value, size_t size)
-{
-	const char *proto_name;
-	size_t proto_size;
-	int error;
-
-	error = -ENODATA;
-	if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) {
-		proto_name = dentry->d_name.name;
-		proto_size = strlen(proto_name);
-
-		if (value) {
-			error = -ERANGE;
-			if (proto_size + 1 > size)
-				goto out;
-
-			strncpy(value, proto_name, proto_size + 1);
-		}
-		error = proto_size + 1;
-	}
-
-out:
-	return error;
-}
-
 static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
 				size_t size)
 {
@@ -521,7 +519,6 @@
 }
 
 static const struct inode_operations sockfs_inode_ops = {
-	.getxattr = sockfs_getxattr,
 	.listxattr = sockfs_listxattr,
 };
 
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 5c7549b..41adf36 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -246,7 +246,7 @@
 				} else {
 					strp->rx_interrupted = 1;
 				}
-				strp_parser_err(strp, err, desc);
+				strp_parser_err(strp, len, desc);
 				break;
 			} else if (len > strp->sk->sk_rcvbuf) {
 				/* Message length exceeds maximum allowed */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index a7e42f9..2bff63a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -551,7 +551,7 @@
 			*entry, *new;
 	unsigned int nr;
 
-	nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
+	nr = auth->au_ops->hash_cred(acred, cache->hashbits);
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 83dffea..f1df983 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -78,6 +78,14 @@
 	return auth->au_ops->lookup_cred(auth, acred, lookupflags);
 }
 
+static int
+generic_hash_cred(struct auth_cred *acred, unsigned int hashbits)
+{
+	return hash_64(from_kgid(&init_user_ns, acred->gid) |
+		((u64)from_kuid(&init_user_ns, acred->uid) <<
+			(sizeof(gid_t) * 8)), hashbits);
+}
+
 /*
  * Lookup generic creds for current process
  */
@@ -258,6 +266,7 @@
 static const struct rpc_authops generic_auth_ops = {
 	.owner = THIS_MODULE,
 	.au_name = "Generic",
+	.hash_cred = generic_hash_cred,
 	.lookup_cred = generic_lookup_cred,
 	.crcreate = generic_create_cred,
 	.key_timeout = generic_key_timeout,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 976c781..d8bd97a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1298,6 +1298,12 @@
 	gss_destroy_nullcred(cred);
 }
 
+static int
+gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
+{
+	return hash_64(from_kuid(&init_user_ns, acred->uid), hashbits);
+}
+
 /*
  * Lookup RPCSEC_GSS cred for the current process
  */
@@ -1982,6 +1988,7 @@
 	.au_name	= "RPCSEC_GSS",
 	.create		= gss_create,
 	.destroy	= gss_destroy,
+	.hash_cred	= gss_hash_cred,
 	.lookup_cred	= gss_lookup_cred,
 	.crcreate	= gss_create_cred,
 	.list_pseudoflavors = gss_mech_list_pseudoflavors,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index a1d768a9..306fc0f 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -46,6 +46,14 @@
 	rpcauth_clear_credcache(auth->au_credcache);
 }
 
+static int
+unx_hash_cred(struct auth_cred *acred, unsigned int hashbits)
+{
+	return hash_64(from_kgid(&init_user_ns, acred->gid) |
+		((u64)from_kuid(&init_user_ns, acred->uid) <<
+			(sizeof(gid_t) * 8)), hashbits);
+}
+
 /*
  * Lookup AUTH_UNIX creds for current process
  */
@@ -220,6 +228,7 @@
 	.au_name	= "UNIX",
 	.create		= unx_create,
 	.destroy	= unx_destroy,
+	.hash_cred	= unx_hash_cred,
 	.lookup_cred	= unx_lookup_cred,
 	.crcreate	= unx_create_cred,
 };
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 229956b..ac701c2 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -76,13 +76,7 @@
 	page = alloc_page(gfp_flags);
 	if (page == NULL)
 		return -ENOMEM;
-	buf->head[0].iov_base = page_address(page);
-	buf->head[0].iov_len = PAGE_SIZE;
-	buf->tail[0].iov_base = NULL;
-	buf->tail[0].iov_len = 0;
-	buf->page_len = 0;
-	buf->len = 0;
-	buf->buflen = PAGE_SIZE;
+	xdr_buf_init(buf, page_address(page), PAGE_SIZE);
 	return 0;
 }
 
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 4d8e11f..8aabe12 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -353,7 +353,7 @@
 	spin_unlock(&cache_list_lock);
 
 	/* start the cleaning process */
-	schedule_delayed_work(&cache_cleaner, 0);
+	queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 }
 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 
@@ -476,7 +476,8 @@
 		delay = 0;
 
 	if (delay)
-		schedule_delayed_work(&cache_cleaner, delay);
+		queue_delayed_work(system_power_efficient_wq,
+				   &cache_cleaner, delay);
 }
 
 
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 66f23b3..34dd7b2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -184,7 +184,6 @@
 				   struct super_block *sb)
 {
 	struct dentry *dentry;
-	int err = 0;
 
 	switch (event) {
 	case RPC_PIPEFS_MOUNT:
@@ -201,7 +200,7 @@
 		printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
 		return -ENOTSUPP;
 	}
-	return err;
+	return 0;
 }
 
 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
@@ -988,7 +987,6 @@
 {
 
 	if (clnt != NULL) {
-		rpc_task_release_client(task);
 		if (task->tk_xprt == NULL)
 			task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
 		task->tk_client = clnt;
@@ -1693,6 +1691,7 @@
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
 	struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
+	int status;
 
 	dprint_status(task);
 
@@ -1718,11 +1717,14 @@
 	req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
 	req->rq_rcvsize <<= 2;
 
-	req->rq_buffer = xprt->ops->buf_alloc(task,
-					req->rq_callsize + req->rq_rcvsize);
-	if (req->rq_buffer != NULL)
-		return;
+	status = xprt->ops->buf_alloc(task);
 	xprt_inject_disconnect(xprt);
+	if (status == 0)
+		return;
+	if (status != -ENOMEM) {
+		rpc_exit(task, status);
+		return;
+	}
 
 	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 
@@ -1748,18 +1750,6 @@
 	task->tk_rqstp->rq_bytes_sent = 0;
 }
 
-static inline void
-rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
-{
-	buf->head[0].iov_base = start;
-	buf->head[0].iov_len = len;
-	buf->tail[0].iov_len = 0;
-	buf->page_len = 0;
-	buf->flags = 0;
-	buf->len = 0;
-	buf->buflen = len;
-}
-
 /*
  * 3.	Encode arguments of an RPC call
  */
@@ -1772,12 +1762,12 @@
 
 	dprint_status(task);
 
-	rpc_xdr_buf_init(&req->rq_snd_buf,
-			 req->rq_buffer,
-			 req->rq_callsize);
-	rpc_xdr_buf_init(&req->rq_rcv_buf,
-			 (char *)req->rq_buffer + req->rq_callsize,
-			 req->rq_rcvsize);
+	xdr_buf_init(&req->rq_snd_buf,
+		     req->rq_buffer,
+		     req->rq_callsize);
+	xdr_buf_init(&req->rq_rcv_buf,
+		     req->rq_rbuffer,
+		     req->rq_rcvsize);
 
 	p = rpc_encode_header(task);
 	if (p == NULL) {
@@ -2616,6 +2606,70 @@
 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
 
 /**
+ * rpc_clnt_setup_test_and_add_xprt()
+ *
+ * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
+ *   1) caller of the test function must dereference the rpc_xprt_switch
+ *   and the rpc_xprt.
+ *   2) test function must call rpc_xprt_switch_add_xprt, usually in
+ *   the rpc_call_done routine.
+ *
+ * Upon success (return of 1), the test function adds the new
+ * transport to the rpc_clnt xprt switch
+ *
+ * @clnt: struct rpc_clnt to get the new transport
+ * @xps:  the rpc_xprt_switch to hold the new transport
+ * @xprt: the rpc_xprt to test
+ * @data: a struct rpc_add_xprt_test pointer that holds the test function
+ *        and test function call data
+ */
+int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
+				     struct rpc_xprt_switch *xps,
+				     struct rpc_xprt *xprt,
+				     void *data)
+{
+	struct rpc_cred *cred;
+	struct rpc_task *task;
+	struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
+	int status = -EADDRINUSE;
+
+	xprt = xprt_get(xprt);
+	xprt_switch_get(xps);
+
+	if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
+		goto out_err;
+
+	/* Test the connection */
+	cred = authnull_ops.lookup_cred(NULL, NULL, 0);
+	task = rpc_call_null_helper(clnt, xprt, cred,
+				    RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
+				    NULL, NULL);
+	put_rpccred(cred);
+	if (IS_ERR(task)) {
+		status = PTR_ERR(task);
+		goto out_err;
+	}
+	status = task->tk_status;
+	rpc_put_task(task);
+
+	if (status < 0)
+		goto out_err;
+
+	/* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
+	xtest->add_xprt_test(clnt, xprt, xtest->data);
+
+	/* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
+	return 1;
+out_err:
+	xprt_put(xprt);
+	xprt_switch_put(xps);
+	pr_info("RPC:   rpc_clnt_test_xprt failed: %d addr %s not added\n",
+		status, xprt->address_strings[RPC_DISPLAY_ADDR]);
+	return status;
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
+
+/**
  * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
  * @clnt: pointer to struct rpc_clnt
  * @xprtargs: pointer to struct xprt_create
@@ -2697,6 +2751,34 @@
 }
 EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
 
+void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
+{
+	xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
+
+void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+	rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
+				 xprt);
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
+
+bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
+				   const struct sockaddr *sap)
+{
+	struct rpc_xprt_switch *xps;
+	bool ret;
+
+	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+
+	rcu_read_lock();
+	ret = rpc_xprt_switch_has_addr(xps, sap);
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
+
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 static void rpc_show_header(void)
 {
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 84f98cb..61a504f 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -477,7 +477,7 @@
 		return NULL;
 	inode->i_ino = get_next_ino();
 	inode->i_mode = mode;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	switch (mode & S_IFMT) {
 	case S_IFDIR:
 		inode->i_fop = &simple_dir_operations;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9ae5885..5db68b3 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -849,14 +849,17 @@
 }
 
 /**
- * rpc_malloc - allocate an RPC buffer
- * @task: RPC task that will use this buffer
- * @size: requested byte size
+ * rpc_malloc - allocate RPC buffer resources
+ * @task: RPC task
+ *
+ * A single memory region is allocated, which is split between the
+ * RPC call and RPC reply that this task is being used for. When
+ * this RPC is retired, the memory is released by calling rpc_free.
  *
  * To prevent rpciod from hanging, this allocator never sleeps,
- * returning NULL and suppressing warning if the request cannot be serviced
- * immediately.
- * The caller can arrange to sleep in a way that is safe for rpciod.
+ * returning -ENOMEM and suppressing warning if the request cannot
+ * be serviced immediately. The caller can arrange to sleep in a
+ * way that is safe for rpciod.
  *
  * Most requests are 'small' (under 2KiB) and can be serviced from a
  * mempool, ensuring that NFS reads and writes can always proceed,
@@ -865,8 +868,10 @@
  * In order to avoid memory starvation triggering more writebacks of
  * NFS requests, we avoid using GFP_KERNEL.
  */
-void *rpc_malloc(struct rpc_task *task, size_t size)
+int rpc_malloc(struct rpc_task *task)
 {
+	struct rpc_rqst *rqst = task->tk_rqstp;
+	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
 	struct rpc_buffer *buf;
 	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
 
@@ -880,28 +885,28 @@
 		buf = kmalloc(size, gfp);
 
 	if (!buf)
-		return NULL;
+		return -ENOMEM;
 
 	buf->len = size;
 	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
 			task->tk_pid, size, buf);
-	return &buf->data;
+	rqst->rq_buffer = buf->data;
+	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(rpc_malloc);
 
 /**
- * rpc_free - free buffer allocated via rpc_malloc
- * @buffer: buffer to free
+ * rpc_free - free RPC buffer resources allocated via rpc_malloc
+ * @task: RPC task
  *
  */
-void rpc_free(void *buffer)
+void rpc_free(struct rpc_task *task)
 {
+	void *buffer = task->tk_rqstp->rq_buffer;
 	size_t size;
 	struct rpc_buffer *buf;
 
-	if (!buffer)
-		return;
-
 	buf = container_of(buffer, struct rpc_buffer, data);
 	size = buf->len;
 
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c5b0cb4..7c8070e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -401,6 +401,21 @@
 }
 EXPORT_SYMBOL_GPL(svc_bind);
 
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+static void
+__svc_init_bc(struct svc_serv *serv)
+{
+	INIT_LIST_HEAD(&serv->sv_cb_list);
+	spin_lock_init(&serv->sv_cb_lock);
+	init_waitqueue_head(&serv->sv_cb_waitq);
+}
+#else
+static void
+__svc_init_bc(struct svc_serv *serv)
+{
+}
+#endif
+
 /*
  * Create an RPC service
  */
@@ -443,6 +458,8 @@
 	init_timer(&serv->sv_temptimer);
 	spin_lock_init(&serv->sv_lock);
 
+	__svc_init_bc(serv);
+
 	serv->sv_nrpools = npools;
 	serv->sv_pools =
 		kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index c4f3cc0..7f1071e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -767,7 +767,7 @@
 	newbase -= xdr->buf->page_base;
 
 	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
-		xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
+		xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
 }
 
 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
@@ -776,7 +776,7 @@
 		xdr_set_next_page(xdr);
 	else if (xdr->iov == xdr->buf->head) {
 		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
-			xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
+			xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
 	}
 	return xdr->p != xdr->end;
 }
@@ -859,12 +859,15 @@
 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
 {
 	__be32 *p;
-	void *cpdest = xdr->scratch.iov_base;
+	char *cpdest = xdr->scratch.iov_base;
 	size_t cplen = (char *)xdr->end - (char *)xdr->p;
 
 	if (nbytes > xdr->scratch.iov_len)
 		return NULL;
-	memcpy(cpdest, xdr->p, cplen);
+	p = __xdr_inline_decode(xdr, cplen);
+	if (p == NULL)
+		return NULL;
+	memcpy(cpdest, p, cplen);
 	cpdest += cplen;
 	nbytes -= cplen;
 	if (!xdr_set_next_buffer(xdr))
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ea244b2..685e6d2 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1295,7 +1295,7 @@
 	xprt_schedule_autodisconnect(xprt);
 	spin_unlock_bh(&xprt->transport_lock);
 	if (req->rq_buffer)
-		xprt->ops->buf_free(req->rq_buffer);
+		xprt->ops->buf_free(task);
 	xprt_inject_disconnect(xprt);
 	if (req->rq_cred != NULL)
 		put_rpccred(req->rq_cred);
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 66c9d63..ae92a9e 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -15,6 +15,7 @@
 #include <asm/cmpxchg.h>
 #include <linux/spinlock.h>
 #include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/xprtmultipath.h>
 
 typedef struct rpc_xprt *(*xprt_switch_find_xprt_t)(struct list_head *head,
@@ -49,7 +50,8 @@
 	if (xprt == NULL)
 		return;
 	spin_lock(&xps->xps_lock);
-	if (xps->xps_net == xprt->xprt_net || xps->xps_net == NULL)
+	if ((xps->xps_net == xprt->xprt_net || xps->xps_net == NULL) &&
+	    !rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
 		xprt_switch_add_xprt_locked(xps, xprt);
 	spin_unlock(&xps->xps_lock);
 }
@@ -232,6 +234,26 @@
 	return xprt_switch_find_current_entry(head, xpi->xpi_cursor);
 }
 
+bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+			      const struct sockaddr *sap)
+{
+	struct list_head *head;
+	struct rpc_xprt *pos;
+
+	if (xps == NULL || sap == NULL)
+		return false;
+
+	head = &xps->xps_xprt_list;
+	list_for_each_entry_rcu(pos, head, xprt_switch) {
+		if (rpc_cmp_addr_port(sap, (struct sockaddr *)&pos->addr)) {
+			pr_info("RPC:   addr %s already in xprt switch\n",
+				pos->address_strings[RPC_DISPLAY_ADDR]);
+			return true;
+		}
+	}
+	return false;
+}
+
 static
 struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
 		const struct rpc_xprt *cur)
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 87762d9..2c472e1 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -27,7 +27,7 @@
 	list_del(&req->rl_all);
 	spin_unlock(&buf->rb_reqslock);
 
-	rpcrdma_destroy_req(&r_xprt->rx_ia, req);
+	rpcrdma_destroy_req(req);
 
 	kfree(rqst);
 }
@@ -35,10 +35,8 @@
 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
 				 struct rpc_rqst *rqst)
 {
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	struct rpcrdma_regbuf *rb;
 	struct rpcrdma_req *req;
-	struct xdr_buf *buf;
 	size_t size;
 
 	req = rpcrdma_create_req(r_xprt);
@@ -46,30 +44,19 @@
 		return PTR_ERR(req);
 	req->rl_backchannel = true;
 
-	size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
-	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
+	rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
+				  DMA_TO_DEVICE, GFP_KERNEL);
 	if (IS_ERR(rb))
 		goto out_fail;
 	req->rl_rdmabuf = rb;
 
-	size += RPCRDMA_INLINE_READ_THRESHOLD(rqst);
-	rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
+	size = r_xprt->rx_data.inline_rsize;
+	rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
 	if (IS_ERR(rb))
 		goto out_fail;
-	rb->rg_owner = req;
 	req->rl_sendbuf = rb;
-	/* so that rpcr_to_rdmar works when receiving a request */
-	rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base;
-
-	buf = &rqst->rq_snd_buf;
-	buf->head[0].iov_base = rqst->rq_buffer;
-	buf->head[0].iov_len = 0;
-	buf->tail[0].iov_base = NULL;
-	buf->tail[0].iov_len = 0;
-	buf->page_len = 0;
-	buf->len = 0;
-	buf->buflen = size;
-
+	xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
+	rpcrdma_set_xprtdata(rqst, req);
 	return 0;
 
 out_fail:
@@ -219,7 +206,6 @@
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 	struct rpcrdma_msg *headerp;
-	size_t rpclen;
 
 	headerp = rdmab_to_msg(req->rl_rdmabuf);
 	headerp->rm_xid = rqst->rq_xid;
@@ -231,26 +217,9 @@
 	headerp->rm_body.rm_chunks[1] = xdr_zero;
 	headerp->rm_body.rm_chunks[2] = xdr_zero;
 
-	rpclen = rqst->rq_svec[0].iov_len;
-
-#ifdef RPCRDMA_BACKCHANNEL_DEBUG
-	pr_info("RPC:       %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
-		__func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf));
-	pr_info("RPC:       %s: RPC/RDMA: %*ph\n",
-		__func__, (int)RPCRDMA_HDRLEN_MIN, headerp);
-	pr_info("RPC:       %s:      RPC: %*ph\n",
-		__func__, (int)rpclen, rqst->rq_svec[0].iov_base);
-#endif
-
-	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
-	req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
-	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
-
-	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
-	req->rl_send_iov[1].length = rpclen;
-	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
-
-	req->rl_niovs = 2;
+	if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
+				       &rqst->rq_snd_buf, rpcrdma_noch))
+		return -EIO;
 	return 0;
 }
 
@@ -402,7 +371,7 @@
 out_short:
 	pr_warn("RPC/RDMA short backward direction call\n");
 
-	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
+	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
 		xprt_disconnect_done(xprt);
 	else
 		pr_warn("RPC:       %s: reposting rep %p\n",
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 21cb3b1..1ebb09e 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -160,9 +160,8 @@
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	    struct rpcrdma_create_data_internal *cdata)
 {
-	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
-						      RPCRDMA_MAX_DATA_SEGS /
-						      RPCRDMA_MAX_FMR_SGES));
+	ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
+				RPCRDMA_MAX_FMR_SGES);
 	return 0;
 }
 
@@ -274,6 +273,7 @@
 	 */
 	list_for_each_entry(mw, &req->rl_registered, mw_list)
 		list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
+	r_xprt->rx_stats.local_inv_needed++;
 	rc = ib_unmap_fmr(&unmap_list);
 	if (rc)
 		goto out_reset;
@@ -331,4 +331,5 @@
 	.ro_init_mr			= fmr_op_init_mr,
 	.ro_release_mr			= fmr_op_release_mr,
 	.ro_displayname			= "fmr",
+	.ro_send_w_inv_ok		= 0,
 };
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 892b5e1..2109495 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -67,6 +67,8 @@
  * pending send queue WRs before the transport is reconnected.
  */
 
+#include <linux/sunrpc/rpc_rdma.h>
+
 #include "xprt_rdma.h"
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -161,7 +163,7 @@
 		return PTR_ERR(f->fr_mr);
 	}
 
-	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
+	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, f);
 	f->fr_state = FRMR_IS_INVALID;
 	return 0;
 }
@@ -242,9 +244,8 @@
 					       depth;
 	}
 
-	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
-						      RPCRDMA_MAX_DATA_SEGS /
-						      ia->ri_max_frmr_depth));
+	ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
+				ia->ri_max_frmr_depth);
 	return 0;
 }
 
@@ -329,7 +330,7 @@
 	frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
 	if (wc->status != IB_WC_SUCCESS)
 		__frwr_sendcompletion_flush(wc, frmr, "localinv");
-	complete_all(&frmr->fr_linv_done);
+	complete(&frmr->fr_linv_done);
 }
 
 /* Post a REG_MR Work Request to register a memory region
@@ -396,7 +397,7 @@
 		goto out_mapmr_err;
 
 	dprintk("RPC:       %s: Using frmr %p to map %u segments (%u bytes)\n",
-		__func__, mw, mw->mw_nents, mr->length);
+		__func__, frmr, mw->mw_nents, mr->length);
 
 	key = (u8)(mr->rkey & 0x000000FF);
 	ib_update_fast_reg_key(mr, ++key);
@@ -449,6 +450,8 @@
 	struct rpcrdma_frmr *f = &mw->frmr;
 	struct ib_send_wr *invalidate_wr;
 
+	dprintk("RPC:       %s: invalidating frmr %p\n", __func__, f);
+
 	f->fr_state = FRMR_IS_INVALID;
 	invalidate_wr = &f->fr_invwr;
 
@@ -472,6 +475,7 @@
 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 {
 	struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
+	struct rpcrdma_rep *rep = req->rl_reply;
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	struct rpcrdma_mw *mw, *tmp;
 	struct rpcrdma_frmr *f;
@@ -487,6 +491,12 @@
 	f = NULL;
 	invalidate_wrs = pos = prev = NULL;
 	list_for_each_entry(mw, &req->rl_registered, mw_list) {
+		if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
+		    (mw->mw_handle == rep->rr_inv_rkey)) {
+			mw->frmr.fr_state = FRMR_IS_INVALID;
+			continue;
+		}
+
 		pos = __frwr_prepare_linv_wr(mw);
 
 		if (!invalidate_wrs)
@@ -496,6 +506,8 @@
 		prev = pos;
 		f = &mw->frmr;
 	}
+	if (!f)
+		goto unmap;
 
 	/* Strong send queue ordering guarantees that when the
 	 * last WR in the chain completes, all WRs in the chain
@@ -510,6 +522,7 @@
 	 * replaces the QP. The RPC reply handler won't call us
 	 * unless ri_id->qp is a valid pointer.
 	 */
+	r_xprt->rx_stats.local_inv_needed++;
 	rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
 	if (rc)
 		goto reset_mrs;
@@ -521,6 +534,8 @@
 	 */
 unmap:
 	list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+		dprintk("RPC:       %s: unmapping frmr %p\n",
+			__func__, &mw->frmr);
 		list_del_init(&mw->mw_list);
 		ib_dma_unmap_sg(ia->ri_device,
 				mw->mw_sg, mw->mw_nents, mw->mw_dir);
@@ -576,4 +591,5 @@
 	.ro_init_mr			= frwr_op_init_mr,
 	.ro_release_mr			= frwr_op_release_mr,
 	.ro_displayname			= "frwr",
+	.ro_send_w_inv_ok		= RPCRDMA_CMP_F_SND_W_INV_OK,
 };
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index a47f170..d987c2d 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -53,14 +53,6 @@
 # define RPCDBG_FACILITY	RPCDBG_TRANS
 #endif
 
-enum rpcrdma_chunktype {
-	rpcrdma_noch = 0,
-	rpcrdma_readch,
-	rpcrdma_areadch,
-	rpcrdma_writech,
-	rpcrdma_replych
-};
-
 static const char transfertypes[][12] = {
 	"inline",	/* no chunks */
 	"read list",	/* some argument via rdma read */
@@ -118,10 +110,12 @@
 	return size;
 }
 
-void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *ia,
-				  struct rpcrdma_create_data_internal *cdata,
-				  unsigned int maxsegs)
+void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
 {
+	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+	unsigned int maxsegs = ia->ri_max_segs;
+
 	ia->ri_max_inline_write = cdata->inline_wsize -
 				  rpcrdma_max_call_header_size(maxsegs);
 	ia->ri_max_inline_read = cdata->inline_rsize -
@@ -155,42 +149,6 @@
 	return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
 }
 
-static int
-rpcrdma_tail_pullup(struct xdr_buf *buf)
-{
-	size_t tlen = buf->tail[0].iov_len;
-	size_t skip = tlen & 3;
-
-	/* Do not include the tail if it is only an XDR pad */
-	if (tlen < 4)
-		return 0;
-
-	/* xdr_write_pages() adds a pad at the beginning of the tail
-	 * if the content in "buf->pages" is unaligned. Force the
-	 * tail's actual content to land at the next XDR position
-	 * after the head instead.
-	 */
-	if (skip) {
-		unsigned char *src, *dst;
-		unsigned int count;
-
-		src = buf->tail[0].iov_base;
-		dst = buf->head[0].iov_base;
-		dst += buf->head[0].iov_len;
-
-		src += skip;
-		tlen -= skip;
-
-		dprintk("RPC:       %s: skip=%zu, memmove(%p, %p, %zu)\n",
-			__func__, skip, dst, src, tlen);
-
-		for (count = tlen; count; count--)
-			*dst++ = *src++;
-	}
-
-	return tlen;
-}
-
 /* Split "vec" on page boundaries into segments. FMR registers pages,
  * not a byte range. Other modes coalesce these segments into a single
  * MR when they can.
@@ -229,7 +187,8 @@
 
 static int
 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
-	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg)
+	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg,
+	bool reminv_expected)
 {
 	int len, n, p, page_base;
 	struct page **ppages;
@@ -271,6 +230,13 @@
 	if (type == rpcrdma_readch)
 		return n;
 
+	/* When encoding the Write list, some servers need to see an extra
+	 * segment for odd-length Write chunks. The upper layer provides
+	 * space in the tail iovec for this purpose.
+	 */
+	if (type == rpcrdma_writech && reminv_expected)
+		return n;
+
 	if (xdrbuf->tail[0].iov_len) {
 		/* the rpcrdma protocol allows us to omit any trailing
 		 * xdr pad bytes, saving the server an RDMA operation. */
@@ -327,7 +293,7 @@
 	if (rtype == rpcrdma_areadch)
 		pos = 0;
 	seg = req->rl_segments;
-	nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg);
+	nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false);
 	if (nsegs < 0)
 		return ERR_PTR(nsegs);
 
@@ -391,7 +357,8 @@
 	seg = req->rl_segments;
 	nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
 				     rqst->rq_rcv_buf.head[0].iov_len,
-				     wtype, seg);
+				     wtype, seg,
+				     r_xprt->rx_ia.ri_reminv_expected);
 	if (nsegs < 0)
 		return ERR_PTR(nsegs);
 
@@ -456,7 +423,8 @@
 	}
 
 	seg = req->rl_segments;
-	nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg);
+	nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
+				     r_xprt->rx_ia.ri_reminv_expected);
 	if (nsegs < 0)
 		return ERR_PTR(nsegs);
 
@@ -491,74 +459,184 @@
 	return iptr;
 }
 
-/*
- * Copy write data inline.
- * This function is used for "small" requests. Data which is passed
- * to RPC via iovecs (or page list) is copied directly into the
- * pre-registered memory buffer for this request. For small amounts
- * of data, this is efficient. The cutoff value is tunable.
+/* Prepare the RPC-over-RDMA header SGE.
  */
-static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
+static bool
+rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
+			u32 len)
 {
-	int i, npages, curlen;
-	int copy_len;
-	unsigned char *srcp, *destp;
-	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
-	int page_base;
-	struct page **ppages;
+	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
+	struct ib_sge *sge = &req->rl_send_sge[0];
 
-	destp = rqst->rq_svec[0].iov_base;
-	curlen = rqst->rq_svec[0].iov_len;
-	destp += curlen;
+	if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
+		if (!__rpcrdma_dma_map_regbuf(ia, rb))
+			return false;
+		sge->addr = rdmab_addr(rb);
+		sge->lkey = rdmab_lkey(rb);
+	}
+	sge->length = len;
 
-	dprintk("RPC:       %s: destp 0x%p len %d hdrlen %d\n",
-		__func__, destp, rqst->rq_slen, curlen);
+	ib_dma_sync_single_for_device(ia->ri_device, sge->addr,
+				      sge->length, DMA_TO_DEVICE);
+	req->rl_send_wr.num_sge++;
+	return true;
+}
 
-	copy_len = rqst->rq_snd_buf.page_len;
+/* Prepare the Send SGEs. The head and tail iovec, and each entry
+ * in the page list, gets its own SGE.
+ */
+static bool
+rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
+			 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
+{
+	unsigned int sge_no, page_base, len, remaining;
+	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
+	struct ib_device *device = ia->ri_device;
+	struct ib_sge *sge = req->rl_send_sge;
+	u32 lkey = ia->ri_pd->local_dma_lkey;
+	struct page *page, **ppages;
 
-	if (rqst->rq_snd_buf.tail[0].iov_len) {
-		curlen = rqst->rq_snd_buf.tail[0].iov_len;
-		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
-			memmove(destp + copy_len,
-				rqst->rq_snd_buf.tail[0].iov_base, curlen);
-			r_xprt->rx_stats.pullup_copy_count += curlen;
+	/* The head iovec is straightforward, as it is already
+	 * DMA-mapped. Sync the content that has changed.
+	 */
+	if (!rpcrdma_dma_map_regbuf(ia, rb))
+		return false;
+	sge_no = 1;
+	sge[sge_no].addr = rdmab_addr(rb);
+	sge[sge_no].length = xdr->head[0].iov_len;
+	sge[sge_no].lkey = rdmab_lkey(rb);
+	ib_dma_sync_single_for_device(device, sge[sge_no].addr,
+				      sge[sge_no].length, DMA_TO_DEVICE);
+
+	/* If there is a Read chunk, the page list is being handled
+	 * via explicit RDMA, and thus is skipped here. However, the
+	 * tail iovec may include an XDR pad for the page list, as
+	 * well as additional content, and may not reside in the
+	 * same page as the head iovec.
+	 */
+	if (rtype == rpcrdma_readch) {
+		len = xdr->tail[0].iov_len;
+
+		/* Do not include the tail if it is only an XDR pad */
+		if (len < 4)
+			goto out;
+
+		page = virt_to_page(xdr->tail[0].iov_base);
+		page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
+
+		/* If the content in the page list is an odd length,
+		 * xdr_write_pages() has added a pad at the beginning
+		 * of the tail iovec. Force the tail's non-pad content
+		 * to land at the next XDR position in the Send message.
+		 */
+		page_base += len & 3;
+		len -= len & 3;
+		goto map_tail;
+	}
+
+	/* If there is a page list present, temporarily DMA map
+	 * and prepare an SGE for each page to be sent.
+	 */
+	if (xdr->page_len) {
+		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+		page_base = xdr->page_base & ~PAGE_MASK;
+		remaining = xdr->page_len;
+		while (remaining) {
+			sge_no++;
+			if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
+				goto out_mapping_overflow;
+
+			len = min_t(u32, PAGE_SIZE - page_base, remaining);
+			sge[sge_no].addr = ib_dma_map_page(device, *ppages,
+							   page_base, len,
+							   DMA_TO_DEVICE);
+			if (ib_dma_mapping_error(device, sge[sge_no].addr))
+				goto out_mapping_err;
+			sge[sge_no].length = len;
+			sge[sge_no].lkey = lkey;
+
+			req->rl_mapped_sges++;
+			ppages++;
+			remaining -= len;
+			page_base = 0;
 		}
-		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
-			__func__, destp + copy_len, curlen);
-		rqst->rq_svec[0].iov_len += curlen;
 	}
-	r_xprt->rx_stats.pullup_copy_count += copy_len;
 
-	page_base = rqst->rq_snd_buf.page_base;
-	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
-	page_base &= ~PAGE_MASK;
-	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
-	for (i = 0; copy_len && i < npages; i++) {
-		curlen = PAGE_SIZE - page_base;
-		if (curlen > copy_len)
-			curlen = copy_len;
-		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
-			__func__, i, destp, copy_len, curlen);
-		srcp = kmap_atomic(ppages[i]);
-		memcpy(destp, srcp+page_base, curlen);
-		kunmap_atomic(srcp);
-		rqst->rq_svec[0].iov_len += curlen;
-		destp += curlen;
-		copy_len -= curlen;
-		page_base = 0;
+	/* The tail iovec is not always constructed in the same
+	 * page where the head iovec resides (see, for example,
+	 * gss_wrap_req_priv). To neatly accommodate that case,
+	 * DMA map it separately.
+	 */
+	if (xdr->tail[0].iov_len) {
+		page = virt_to_page(xdr->tail[0].iov_base);
+		page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
+		len = xdr->tail[0].iov_len;
+
+map_tail:
+		sge_no++;
+		sge[sge_no].addr = ib_dma_map_page(device, page,
+						   page_base, len,
+						   DMA_TO_DEVICE);
+		if (ib_dma_mapping_error(device, sge[sge_no].addr))
+			goto out_mapping_err;
+		sge[sge_no].length = len;
+		sge[sge_no].lkey = lkey;
+		req->rl_mapped_sges++;
 	}
-	/* header now contains entire send message */
+
+out:
+	req->rl_send_wr.num_sge = sge_no + 1;
+	return true;
+
+out_mapping_overflow:
+	pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
+	return false;
+
+out_mapping_err:
+	pr_err("rpcrdma: Send mapping error\n");
+	return false;
+}
+
+bool
+rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
+			  u32 hdrlen, struct xdr_buf *xdr,
+			  enum rpcrdma_chunktype rtype)
+{
+	req->rl_send_wr.num_sge = 0;
+	req->rl_mapped_sges = 0;
+
+	if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
+		goto out_map;
+
+	if (rtype != rpcrdma_areadch)
+		if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
+			goto out_map;
+
+	return true;
+
+out_map:
+	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
+	return false;
+}
+
+void
+rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+{
+	struct ib_device *device = ia->ri_device;
+	struct ib_sge *sge;
+	int count;
+
+	sge = &req->rl_send_sge[2];
+	for (count = req->rl_mapped_sges; count--; sge++)
+		ib_dma_unmap_page(device, sge->addr, sge->length,
+				  DMA_TO_DEVICE);
+	req->rl_mapped_sges = 0;
 }
 
 /*
  * Marshal a request: the primary job of this routine is to choose
  * the transfer modes. See comments below.
  *
- * Prepares up to two IOVs per Call message:
- *
- *  [0] -- RPC RDMA header
- *  [1] -- the RPC header/data
- *
  * Returns zero on success, otherwise a negative errno.
  */
 
@@ -626,12 +704,11 @@
 	 */
 	if (rpcrdma_args_inline(r_xprt, rqst)) {
 		rtype = rpcrdma_noch;
-		rpcrdma_inline_pullup(rqst);
-		rpclen = rqst->rq_svec[0].iov_len;
+		rpclen = rqst->rq_snd_buf.len;
 	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 		rtype = rpcrdma_readch;
-		rpclen = rqst->rq_svec[0].iov_len;
-		rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
+		rpclen = rqst->rq_snd_buf.head[0].iov_len +
+			 rqst->rq_snd_buf.tail[0].iov_len;
 	} else {
 		r_xprt->rx_stats.nomsg_call_count++;
 		headerp->rm_type = htonl(RDMA_NOMSG);
@@ -673,34 +750,18 @@
 		goto out_unmap;
 	hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
 
-	if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
-		goto out_overflow;
-
 	dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
 		rqst->rq_task->tk_pid, __func__,
 		transfertypes[rtype], transfertypes[wtype],
 		hdrlen, rpclen);
 
-	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
-	req->rl_send_iov[0].length = hdrlen;
-	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
-
-	req->rl_niovs = 1;
-	if (rtype == rpcrdma_areadch)
-		return 0;
-
-	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
-	req->rl_send_iov[1].length = rpclen;
-	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
-
-	req->rl_niovs = 2;
+	if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen,
+				       &rqst->rq_snd_buf, rtype)) {
+		iptr = ERR_PTR(-EIO);
+		goto out_unmap;
+	}
 	return 0;
 
-out_overflow:
-	pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n",
-		hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]);
-	iptr = ERR_PTR(-EIO);
-
 out_unmap:
 	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
 	return PTR_ERR(iptr);
@@ -916,8 +977,10 @@
  * allowed to timeout, to discover the errors at that time.
  */
 void
-rpcrdma_reply_handler(struct rpcrdma_rep *rep)
+rpcrdma_reply_handler(struct work_struct *work)
 {
+	struct rpcrdma_rep *rep =
+			container_of(work, struct rpcrdma_rep, rr_work);
 	struct rpcrdma_msg *headerp;
 	struct rpcrdma_req *req;
 	struct rpc_rqst *rqst;
@@ -1132,6 +1195,6 @@
 
 repost:
 	r_xprt->rx_stats.bad_reply_count++;
-	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
+	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
 		rpcrdma_recv_buffer_put(rep);
 }
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index a2a7519..2d8545c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -129,7 +129,7 @@
 		ret = -EIO;
 		goto out_unmap;
 	}
-	atomic_inc(&rdma->sc_dma_used);
+	svc_rdma_count_mappings(rdma, ctxt);
 
 	memset(&send_wr, 0, sizeof(send_wr));
 	ctxt->cqe.done = svc_rdma_wc_send;
@@ -159,33 +159,34 @@
 /* Server-side transport endpoint wants a whole page for its send
  * buffer. The client RPC code constructs the RPC header in this
  * buffer before it invokes ->send_request.
- *
- * Returns NULL if there was a temporary allocation failure.
  */
-static void *
-xprt_rdma_bc_allocate(struct rpc_task *task, size_t size)
+static int
+xprt_rdma_bc_allocate(struct rpc_task *task)
 {
 	struct rpc_rqst *rqst = task->tk_rqstp;
 	struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
+	size_t size = rqst->rq_callsize;
 	struct svcxprt_rdma *rdma;
 	struct page *page;
 
 	rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
 
-	/* Prevent an infinite loop: try to make this case work */
-	if (size > PAGE_SIZE)
+	if (size > PAGE_SIZE) {
 		WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
 			  size);
+		return -EINVAL;
+	}
 
 	page = alloc_page(RPCRDMA_DEF_GFP);
 	if (!page)
-		return NULL;
+		return -ENOMEM;
 
-	return page_address(page);
+	rqst->rq_buffer = page_address(page);
+	return 0;
 }
 
 static void
-xprt_rdma_bc_free(void *buffer)
+xprt_rdma_bc_free(struct rpc_task *task)
 {
 	/* No-op: ctxt and page have already been freed. */
 }
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 2c25606..ad1df97 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -159,7 +159,7 @@
 					   ctxt->sge[pno].addr);
 		if (ret)
 			goto err;
-		atomic_inc(&xprt->sc_dma_used);
+		svc_rdma_count_mappings(xprt, ctxt);
 
 		ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
 		ctxt->sge[pno].length = len;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 54d53330..f5a91ed 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -225,6 +225,48 @@
 	return rp_ary;
 }
 
+/* RPC-over-RDMA Version One private extension: Remote Invalidation.
+ * Responder's choice: requester signals it can handle Send With
+ * Invalidate, and responder chooses one rkey to invalidate.
+ *
+ * Find a candidate rkey to invalidate when sending a reply.  Picks the
+ * first rkey it finds in the chunks lists.
+ *
+ * Returns zero if RPC's chunk lists are empty.
+ */
+static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
+				 struct rpcrdma_write_array *wr_ary,
+				 struct rpcrdma_write_array *rp_ary)
+{
+	struct rpcrdma_read_chunk *rd_ary;
+	struct rpcrdma_segment *arg_ch;
+	u32 inv_rkey;
+
+	inv_rkey = 0;
+
+	rd_ary = svc_rdma_get_read_chunk(rdma_argp);
+	if (rd_ary) {
+		inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
+		goto out;
+	}
+
+	if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
+		arg_ch = &wr_ary->wc_array[0].wc_target;
+		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
+		goto out;
+	}
+
+	if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
+		arg_ch = &rp_ary->wc_array[0].wc_target;
+		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
+		goto out;
+	}
+
+out:
+	dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
+	return inv_rkey;
+}
+
 /* Assumptions:
  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
  */
@@ -280,7 +322,7 @@
 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
 					 sge[sge_no].addr))
 			goto err;
-		atomic_inc(&xprt->sc_dma_used);
+		svc_rdma_count_mappings(xprt, ctxt);
 		sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
 		ctxt->count++;
 		sge_off = 0;
@@ -464,7 +506,8 @@
 		      struct page *page,
 		      struct rpcrdma_msg *rdma_resp,
 		      struct svc_rdma_req_map *vec,
-		      int byte_count)
+		      int byte_count,
+		      u32 inv_rkey)
 {
 	struct svc_rdma_op_ctxt *ctxt;
 	struct ib_send_wr send_wr;
@@ -489,7 +532,7 @@
 			    ctxt->sge[0].length, DMA_TO_DEVICE);
 	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
 		goto err;
-	atomic_inc(&rdma->sc_dma_used);
+	svc_rdma_count_mappings(rdma, ctxt);
 
 	ctxt->direction = DMA_TO_DEVICE;
 
@@ -505,7 +548,7 @@
 		if (ib_dma_mapping_error(rdma->sc_cm_id->device,
 					 ctxt->sge[sge_no].addr))
 			goto err;
-		atomic_inc(&rdma->sc_dma_used);
+		svc_rdma_count_mappings(rdma, ctxt);
 		ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
 		ctxt->sge[sge_no].length = sge_bytes;
 	}
@@ -523,23 +566,9 @@
 		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
 		ctxt->count++;
 		rqstp->rq_respages[page_no] = NULL;
-		/*
-		 * If there are more pages than SGE, terminate SGE
-		 * list so that svc_rdma_unmap_dma doesn't attempt to
-		 * unmap garbage.
-		 */
-		if (page_no+1 >= sge_no)
-			ctxt->sge[page_no+1].length = 0;
 	}
 	rqstp->rq_next_page = rqstp->rq_respages + 1;
 
-	/* The loop above bumps sc_dma_used for each sge. The
-	 * xdr_buf.tail gets a separate sge, but resides in the
-	 * same page as xdr_buf.head. Don't count it twice.
-	 */
-	if (sge_no > ctxt->count)
-		atomic_dec(&rdma->sc_dma_used);
-
 	if (sge_no > rdma->sc_max_sge) {
 		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
 		goto err;
@@ -549,7 +578,11 @@
 	send_wr.wr_cqe = &ctxt->cqe;
 	send_wr.sg_list = ctxt->sge;
 	send_wr.num_sge = sge_no;
-	send_wr.opcode = IB_WR_SEND;
+	if (inv_rkey) {
+		send_wr.opcode = IB_WR_SEND_WITH_INV;
+		send_wr.ex.invalidate_rkey = inv_rkey;
+	} else
+		send_wr.opcode = IB_WR_SEND;
 	send_wr.send_flags =  IB_SEND_SIGNALED;
 
 	ret = svc_rdma_send(rdma, &send_wr);
@@ -581,6 +614,7 @@
 	int inline_bytes;
 	struct page *res_page;
 	struct svc_rdma_req_map *vec;
+	u32 inv_rkey;
 
 	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
 
@@ -591,6 +625,10 @@
 	wr_ary = svc_rdma_get_write_array(rdma_argp);
 	rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
 
+	inv_rkey = 0;
+	if (rdma->sc_snd_w_inv)
+		inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
+
 	/* Build an req vec for the XDR */
 	vec = svc_rdma_get_req_map(rdma);
 	ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
@@ -633,9 +671,9 @@
 		goto err1;
 
 	ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
-			 inline_bytes);
+			 inline_bytes, inv_rkey);
 	if (ret < 0)
-		goto err1;
+		goto err0;
 
 	svc_rdma_put_req_map(rdma, vec);
 	dprintk("svcrdma: send_reply returns %d\n", ret);
@@ -692,7 +730,7 @@
 		svc_rdma_put_context(ctxt, 1);
 		return;
 	}
-	atomic_inc(&xprt->sc_dma_used);
+	svc_rdma_count_mappings(xprt, ctxt);
 
 	/* Prepare SEND WR */
 	memset(&err_wr, 0, sizeof(err_wr));
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index eb2857f..6864fb9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -198,6 +198,7 @@
 
 out:
 	ctxt->count = 0;
+	ctxt->mapped_sges = 0;
 	ctxt->frmr = NULL;
 	return ctxt;
 
@@ -221,22 +222,27 @@
 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
 {
 	struct svcxprt_rdma *xprt = ctxt->xprt;
-	int i;
-	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
+	struct ib_device *device = xprt->sc_cm_id->device;
+	u32 lkey = xprt->sc_pd->local_dma_lkey;
+	unsigned int i, count;
+
+	for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
 		/*
 		 * Unmap the DMA addr in the SGE if the lkey matches
 		 * the local_dma_lkey, otherwise, ignore it since it is
 		 * an FRMR lkey and will be unmapped later when the
 		 * last WR that uses it completes.
 		 */
-		if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) {
-			atomic_dec(&xprt->sc_dma_used);
-			ib_dma_unmap_page(xprt->sc_cm_id->device,
+		if (ctxt->sge[i].lkey == lkey) {
+			count++;
+			ib_dma_unmap_page(device,
 					    ctxt->sge[i].addr,
 					    ctxt->sge[i].length,
 					    ctxt->direction);
 		}
 	}
+	ctxt->mapped_sges = 0;
+	atomic_sub(count, &xprt->sc_dma_used);
 }
 
 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
@@ -600,7 +606,7 @@
 				     DMA_FROM_DEVICE);
 		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
 			goto err_put_ctxt;
-		atomic_inc(&xprt->sc_dma_used);
+		svc_rdma_count_mappings(xprt, ctxt);
 		ctxt->sge[sge_no].addr = pa;
 		ctxt->sge[sge_no].length = PAGE_SIZE;
 		ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
@@ -642,6 +648,26 @@
 	return ret;
 }
 
+static void
+svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
+			       struct rdma_conn_param *param)
+{
+	const struct rpcrdma_connect_private *pmsg = param->private_data;
+
+	if (pmsg &&
+	    pmsg->cp_magic == rpcrdma_cmp_magic &&
+	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
+		newxprt->sc_snd_w_inv = pmsg->cp_flags &
+					RPCRDMA_CMP_F_SND_W_INV_OK;
+
+		dprintk("svcrdma: client send_size %u, recv_size %u "
+			"remote inv %ssupported\n",
+			rpcrdma_decode_buffer_size(pmsg->cp_send_size),
+			rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
+			newxprt->sc_snd_w_inv ? "" : "un");
+	}
+}
+
 /*
  * This function handles the CONNECT_REQUEST event on a listening
  * endpoint. It is passed the cma_id for the _new_ connection. The context in
@@ -653,7 +679,8 @@
  * will call the recvfrom method on the listen xprt which will accept the new
  * connection.
  */
-static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
+static void handle_connect_req(struct rdma_cm_id *new_cma_id,
+			       struct rdma_conn_param *param)
 {
 	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
 	struct svcxprt_rdma *newxprt;
@@ -669,9 +696,10 @@
 	new_cma_id->context = newxprt;
 	dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
 		newxprt, newxprt->sc_cm_id, listen_xprt);
+	svc_rdma_parse_connect_private(newxprt, param);
 
 	/* Save client advertised inbound read limit for use later in accept. */
-	newxprt->sc_ord = client_ird;
+	newxprt->sc_ord = param->initiator_depth;
 
 	/* Set the local and remote addresses in the transport */
 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
@@ -706,8 +734,7 @@
 		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
 			"event = %s (%d)\n", cma_id, cma_id->context,
 			rdma_event_msg(event->event), event->event);
-		handle_connect_req(cma_id,
-				   event->param.conn.initiator_depth);
+		handle_connect_req(cma_id, &event->param.conn);
 		break;
 
 	case RDMA_CM_EVENT_ESTABLISHED:
@@ -941,6 +968,7 @@
 	struct svcxprt_rdma *listen_rdma;
 	struct svcxprt_rdma *newxprt = NULL;
 	struct rdma_conn_param conn_param;
+	struct rpcrdma_connect_private pmsg;
 	struct ib_qp_init_attr qp_attr;
 	struct ib_device *dev;
 	unsigned int i;
@@ -1070,7 +1098,8 @@
 			dev->attrs.max_fast_reg_page_list_len;
 		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
 		newxprt->sc_reader = rdma_read_chunk_frmr;
-	}
+	} else
+		newxprt->sc_snd_w_inv = false;
 
 	/*
 	 * Determine if a DMA MR is required and if so, what privs are required
@@ -1094,11 +1123,20 @@
 	/* Swap out the handler */
 	newxprt->sc_cm_id->event_handler = rdma_cma_handler;
 
+	/* Construct RDMA-CM private message */
+	pmsg.cp_magic = rpcrdma_cmp_magic;
+	pmsg.cp_version = RPCRDMA_CMP_VERSION;
+	pmsg.cp_flags = 0;
+	pmsg.cp_send_size = pmsg.cp_recv_size =
+		rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
+
 	/* Accept Connection */
 	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
 	memset(&conn_param, 0, sizeof conn_param);
 	conn_param.responder_resources = 0;
 	conn_param.initiator_depth = newxprt->sc_ord;
+	conn_param.private_data = &pmsg;
+	conn_param.private_data_len = sizeof(pmsg);
 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
 	if (ret) {
 		dprintk("svcrdma: failed to accept new connection, ret=%d\n",
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 81f0e87..ed5e285 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -97,7 +97,7 @@
 		.data		= &xprt_rdma_max_inline_read,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &min_inline_size,
 		.extra2		= &max_inline_size,
 	},
@@ -106,7 +106,7 @@
 		.data		= &xprt_rdma_max_inline_write,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &min_inline_size,
 		.extra2		= &max_inline_size,
 	},
@@ -477,115 +477,152 @@
 	}
 }
 
-/*
- * The RDMA allocate/free functions need the task structure as a place
- * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
- * sequence.
- *
- * The RPC layer allocates both send and receive buffers in the same call
- * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
- * We may register rq_rcv_buf when using reply chunks.
+/* Allocate a fixed-size buffer in which to construct and send the
+ * RPC-over-RDMA header for this request.
  */
-static void *
-xprt_rdma_allocate(struct rpc_task *task, size_t size)
+static bool
+rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		    gfp_t flags)
 {
-	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
-	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+	size_t size = RPCRDMA_HDRBUF_SIZE;
 	struct rpcrdma_regbuf *rb;
+
+	if (req->rl_rdmabuf)
+		return true;
+
+	rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
+	if (IS_ERR(rb))
+		return false;
+
+	r_xprt->rx_stats.hardway_register_count += size;
+	req->rl_rdmabuf = rb;
+	return true;
+}
+
+static bool
+rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		    size_t size, gfp_t flags)
+{
+	struct rpcrdma_regbuf *rb;
+
+	if (req->rl_sendbuf && rdmab_length(req->rl_sendbuf) >= size)
+		return true;
+
+	rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
+	if (IS_ERR(rb))
+		return false;
+
+	rpcrdma_free_regbuf(req->rl_sendbuf);
+	r_xprt->rx_stats.hardway_register_count += size;
+	req->rl_sendbuf = rb;
+	return true;
+}
+
+/* The rq_rcv_buf is used only if a Reply chunk is necessary.
+ * The decision to use a Reply chunk is made later in
+ * rpcrdma_marshal_req. This buffer is registered at that time.
+ *
+ * Otherwise, the associated RPC Reply arrives in a separate
+ * Receive buffer, arbitrarily chosen by the HCA. The buffer
+ * allocated here for the RPC Reply is not utilized in that
+ * case. See rpcrdma_inline_fixup.
+ *
+ * A regbuf is used here to remember the buffer size.
+ */
+static bool
+rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		    size_t size, gfp_t flags)
+{
+	struct rpcrdma_regbuf *rb;
+
+	if (req->rl_recvbuf && rdmab_length(req->rl_recvbuf) >= size)
+		return true;
+
+	rb = rpcrdma_alloc_regbuf(size, DMA_NONE, flags);
+	if (IS_ERR(rb))
+		return false;
+
+	rpcrdma_free_regbuf(req->rl_recvbuf);
+	r_xprt->rx_stats.hardway_register_count += size;
+	req->rl_recvbuf = rb;
+	return true;
+}
+
+/**
+ * xprt_rdma_allocate - allocate transport resources for an RPC
+ * @task: RPC task
+ *
+ * Return values:
+ *        0:	Success; rq_buffer points to RPC buffer to use
+ *   ENOMEM:	Out of memory, call again later
+ *      EIO:	A permanent error occurred, do not retry
+ *
+ * The RDMA allocate/free functions need the task structure as a place
+ * to hide the struct rpcrdma_req, which is necessary for the actual
+ * send/recv sequence.
+ *
+ * xprt_rdma_allocate provides buffers that are already mapped for
+ * DMA, and a local DMA lkey is provided for each.
+ */
+static int
+xprt_rdma_allocate(struct rpc_task *task)
+{
+	struct rpc_rqst *rqst = task->tk_rqstp;
+	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 	struct rpcrdma_req *req;
-	size_t min_size;
 	gfp_t flags;
 
 	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
 	if (req == NULL)
-		return NULL;
+		return -ENOMEM;
 
 	flags = RPCRDMA_DEF_GFP;
 	if (RPC_IS_SWAPPER(task))
 		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
 
-	if (req->rl_rdmabuf == NULL)
-		goto out_rdmabuf;
-	if (req->rl_sendbuf == NULL)
-		goto out_sendbuf;
-	if (size > req->rl_sendbuf->rg_size)
-		goto out_sendbuf;
+	if (!rpcrdma_get_rdmabuf(r_xprt, req, flags))
+		goto out_fail;
+	if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags))
+		goto out_fail;
+	if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
+		goto out_fail;
 
-out:
-	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
+	dprintk("RPC: %5u %s: send size = %zd, recv size = %zd, req = %p\n",
+		task->tk_pid, __func__, rqst->rq_callsize,
+		rqst->rq_rcvsize, req);
+
 	req->rl_connect_cookie = 0;	/* our reserved value */
-	req->rl_task = task;
-	return req->rl_sendbuf->rg_base;
-
-out_rdmabuf:
-	min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
-	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
-	if (IS_ERR(rb))
-		goto out_fail;
-	req->rl_rdmabuf = rb;
-
-out_sendbuf:
-	/* XDR encoding and RPC/RDMA marshaling of this request has not
-	 * yet occurred. Thus a lower bound is needed to prevent buffer
-	 * overrun during marshaling.
-	 *
-	 * RPC/RDMA marshaling may choose to send payload bearing ops
-	 * inline, if the result is smaller than the inline threshold.
-	 * The value of the "size" argument accounts for header
-	 * requirements but not for the payload in these cases.
-	 *
-	 * Likewise, allocate enough space to receive a reply up to the
-	 * size of the inline threshold.
-	 *
-	 * It's unlikely that both the send header and the received
-	 * reply will be large, but slush is provided here to allow
-	 * flexibility when marshaling.
-	 */
-	min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
-	min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
-	if (size < min_size)
-		size = min_size;
-
-	rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
-	if (IS_ERR(rb))
-		goto out_fail;
-	rb->rg_owner = req;
-
-	r_xprt->rx_stats.hardway_register_count += size;
-	rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
-	req->rl_sendbuf = rb;
-	goto out;
+	rpcrdma_set_xprtdata(rqst, req);
+	rqst->rq_buffer = req->rl_sendbuf->rg_base;
+	rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
+	return 0;
 
 out_fail:
 	rpcrdma_buffer_put(req);
-	return NULL;
+	return -ENOMEM;
 }
 
-/*
- * This function returns all RDMA resources to the pool.
+/**
+ * xprt_rdma_free - release resources allocated by xprt_rdma_allocate
+ * @task: RPC task
+ *
+ * Caller guarantees rqst->rq_buffer is non-NULL.
  */
 static void
-xprt_rdma_free(void *buffer)
+xprt_rdma_free(struct rpc_task *task)
 {
-	struct rpcrdma_req *req;
-	struct rpcrdma_xprt *r_xprt;
-	struct rpcrdma_regbuf *rb;
+	struct rpc_rqst *rqst = task->tk_rqstp;
+	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-	if (buffer == NULL)
-		return;
-
-	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
-	req = rb->rg_owner;
 	if (req->rl_backchannel)
 		return;
 
-	r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
-
 	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);
 
-	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
-					    !RPC_IS_ASYNC(req->rl_task));
-
+	ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
+	rpcrdma_unmap_sges(ia, req);
 	rpcrdma_buffer_put(req);
 }
 
@@ -685,10 +722,11 @@
 		   r_xprt->rx_stats.failed_marshal_count,
 		   r_xprt->rx_stats.bad_reply_count,
 		   r_xprt->rx_stats.nomsg_call_count);
-	seq_printf(seq, "%lu %lu %lu\n",
+	seq_printf(seq, "%lu %lu %lu %lu\n",
 		   r_xprt->rx_stats.mrs_recovered,
 		   r_xprt->rx_stats.mrs_orphaned,
-		   r_xprt->rx_stats.mrs_allocated);
+		   r_xprt->rx_stats.mrs_allocated,
+		   r_xprt->rx_stats.local_inv_needed);
 }
 
 static int
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index be3178e..ec74289 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -129,15 +129,6 @@
 		       wc->status, wc->vendor_err);
 }
 
-static void
-rpcrdma_receive_worker(struct work_struct *work)
-{
-	struct rpcrdma_rep *rep =
-			container_of(work, struct rpcrdma_rep, rr_work);
-
-	rpcrdma_reply_handler(rep);
-}
-
 /* Perform basic sanity checking to avoid using garbage
  * to update the credit grant value.
  */
@@ -161,13 +152,13 @@
 }
 
 /**
- * rpcrdma_receive_wc - Invoked by RDMA provider for each polled Receive WC
+ * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
  * @cq:	completion queue (ignored)
  * @wc:	completed WR
  *
  */
 static void
-rpcrdma_receive_wc(struct ib_cq *cq, struct ib_wc *wc)
+rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct ib_cqe *cqe = wc->wr_cqe;
 	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
@@ -185,6 +176,9 @@
 		__func__, rep, wc->byte_len);
 
 	rep->rr_len = wc->byte_len;
+	rep->rr_wc_flags = wc->wc_flags;
+	rep->rr_inv_rkey = wc->ex.invalidate_rkey;
+
 	ib_dma_sync_single_for_cpu(rep->rr_device,
 				   rdmab_addr(rep->rr_rdmabuf),
 				   rep->rr_len, DMA_FROM_DEVICE);
@@ -204,6 +198,36 @@
 	goto out_schedule;
 }
 
+static void
+rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
+			       struct rdma_conn_param *param)
+{
+	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+	const struct rpcrdma_connect_private *pmsg = param->private_data;
+	unsigned int rsize, wsize;
+
+	/* Default settings for RPC-over-RDMA Version One */
+	r_xprt->rx_ia.ri_reminv_expected = false;
+	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
+	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
+
+	if (pmsg &&
+	    pmsg->cp_magic == rpcrdma_cmp_magic &&
+	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
+		r_xprt->rx_ia.ri_reminv_expected = true;
+		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
+		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
+	}
+
+	if (rsize < cdata->inline_rsize)
+		cdata->inline_rsize = rsize;
+	if (wsize < cdata->inline_wsize)
+		cdata->inline_wsize = wsize;
+	pr_info("rpcrdma: max send %u, max recv %u\n",
+		cdata->inline_wsize, cdata->inline_rsize);
+	rpcrdma_set_max_header_sizes(r_xprt);
+}
+
 static int
 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
 {
@@ -244,6 +268,7 @@
 			" (%d initiator)\n",
 			__func__, attr->max_dest_rd_atomic,
 			attr->max_rd_atomic);
+		rpcrdma_update_connect_private(xprt, &event->param.conn);
 		goto connected;
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 		connstate = -ENOTCONN;
@@ -454,11 +479,12 @@
 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
 				struct rpcrdma_create_data_internal *cdata)
 {
+	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
 	struct ib_cq *sendcq, *recvcq;
 	unsigned int max_qp_wr;
 	int rc;
 
-	if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) {
+	if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) {
 		dprintk("RPC:       %s: insufficient sge's available\n",
 			__func__);
 		return -ENOMEM;
@@ -487,7 +513,7 @@
 	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
 	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
 	ep->rep_attr.cap.max_recv_wr += 1;	/* drain cqe */
-	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
+	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES;
 	ep->rep_attr.cap.max_recv_sge = 1;
 	ep->rep_attr.cap.max_inline_data = 0;
 	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -536,9 +562,14 @@
 	/* Initialize cma parameters */
 	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
 
-	/* RPC/RDMA does not use private data */
-	ep->rep_remote_cma.private_data = NULL;
-	ep->rep_remote_cma.private_data_len = 0;
+	/* Prepare RDMA-CM private message */
+	pmsg->cp_magic = rpcrdma_cmp_magic;
+	pmsg->cp_version = RPCRDMA_CMP_VERSION;
+	pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
+	pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
+	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
+	ep->rep_remote_cma.private_data = pmsg;
+	ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
 
 	/* Client offers RDMA Read but does not initiate */
 	ep->rep_remote_cma.initiator_depth = 0;
@@ -849,6 +880,10 @@
 	req->rl_cqe.done = rpcrdma_wc_send;
 	req->rl_buffer = &r_xprt->rx_buf;
 	INIT_LIST_HEAD(&req->rl_registered);
+	req->rl_send_wr.next = NULL;
+	req->rl_send_wr.wr_cqe = &req->rl_cqe;
+	req->rl_send_wr.sg_list = req->rl_send_sge;
+	req->rl_send_wr.opcode = IB_WR_SEND;
 	return req;
 }
 
@@ -865,17 +900,21 @@
 	if (rep == NULL)
 		goto out;
 
-	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
-					       GFP_KERNEL);
+	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
+					       DMA_FROM_DEVICE, GFP_KERNEL);
 	if (IS_ERR(rep->rr_rdmabuf)) {
 		rc = PTR_ERR(rep->rr_rdmabuf);
 		goto out_free;
 	}
 
 	rep->rr_device = ia->ri_device;
-	rep->rr_cqe.done = rpcrdma_receive_wc;
+	rep->rr_cqe.done = rpcrdma_wc_receive;
 	rep->rr_rxprt = r_xprt;
-	INIT_WORK(&rep->rr_work, rpcrdma_receive_worker);
+	INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
+	rep->rr_recv_wr.next = NULL;
+	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
+	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
+	rep->rr_recv_wr.num_sge = 1;
 	return rep;
 
 out_free:
@@ -966,17 +1005,18 @@
 }
 
 static void
-rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
+rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
 {
-	rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
+	rpcrdma_free_regbuf(rep->rr_rdmabuf);
 	kfree(rep);
 }
 
 void
-rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+rpcrdma_destroy_req(struct rpcrdma_req *req)
 {
-	rpcrdma_free_regbuf(ia, req->rl_sendbuf);
-	rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
+	rpcrdma_free_regbuf(req->rl_recvbuf);
+	rpcrdma_free_regbuf(req->rl_sendbuf);
+	rpcrdma_free_regbuf(req->rl_rdmabuf);
 	kfree(req);
 }
 
@@ -1009,15 +1049,13 @@
 void
 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
 {
-	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
-
 	cancel_delayed_work_sync(&buf->rb_recovery_worker);
 
 	while (!list_empty(&buf->rb_recv_bufs)) {
 		struct rpcrdma_rep *rep;
 
 		rep = rpcrdma_buffer_get_rep_locked(buf);
-		rpcrdma_destroy_rep(ia, rep);
+		rpcrdma_destroy_rep(rep);
 	}
 	buf->rb_send_count = 0;
 
@@ -1030,7 +1068,7 @@
 		list_del(&req->rl_all);
 
 		spin_unlock(&buf->rb_reqslock);
-		rpcrdma_destroy_req(ia, req);
+		rpcrdma_destroy_req(req);
 		spin_lock(&buf->rb_reqslock);
 	}
 	spin_unlock(&buf->rb_reqslock);
@@ -1129,7 +1167,7 @@
 	struct rpcrdma_buffer *buffers = req->rl_buffer;
 	struct rpcrdma_rep *rep = req->rl_reply;
 
-	req->rl_niovs = 0;
+	req->rl_send_wr.num_sge = 0;
 	req->rl_reply = NULL;
 
 	spin_lock(&buffers->rb_lock);
@@ -1171,70 +1209,81 @@
 	spin_unlock(&buffers->rb_lock);
 }
 
-/*
- * Wrappers for internal-use kmalloc memory registration, used by buffer code.
- */
-
 /**
- * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
- * @ia: controlling rpcrdma_ia
+ * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
  * @size: size of buffer to be allocated, in bytes
+ * @direction: direction of data movement
  * @flags: GFP flags
  *
- * Returns pointer to private header of an area of internally
- * registered memory, or an ERR_PTR. The registered buffer follows
- * the end of the private header.
+ * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
+ * can be persistently DMA-mapped for I/O.
  *
  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
- * receiving the payload of RDMA RECV operations. regbufs are not
- * used for RDMA READ/WRITE operations, thus are registered only for
- * LOCAL access.
+ * receiving the payload of RDMA RECV operations. During Long Calls
+ * or Replies they may be registered externally via ro_map.
  */
 struct rpcrdma_regbuf *
-rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
+rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
+		     gfp_t flags)
 {
 	struct rpcrdma_regbuf *rb;
-	struct ib_sge *iov;
 
 	rb = kmalloc(sizeof(*rb) + size, flags);
 	if (rb == NULL)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
-	iov = &rb->rg_iov;
-	iov->addr = ib_dma_map_single(ia->ri_device,
-				      (void *)rb->rg_base, size,
-				      DMA_BIDIRECTIONAL);
-	if (ib_dma_mapping_error(ia->ri_device, iov->addr))
-		goto out_free;
+	rb->rg_device = NULL;
+	rb->rg_direction = direction;
+	rb->rg_iov.length = size;
 
-	iov->length = size;
-	iov->lkey = ia->ri_pd->local_dma_lkey;
-	rb->rg_size = size;
-	rb->rg_owner = NULL;
 	return rb;
+}
 
-out_free:
-	kfree(rb);
-out:
-	return ERR_PTR(-ENOMEM);
+/**
+ * __rpcrdma_map_regbuf - DMA-map a regbuf
+ * @ia: controlling rpcrdma_ia
+ * @rb: regbuf to be mapped
+ */
+bool
+__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+{
+	if (rb->rg_direction == DMA_NONE)
+		return false;
+
+	rb->rg_iov.addr = ib_dma_map_single(ia->ri_device,
+					    (void *)rb->rg_base,
+					    rdmab_length(rb),
+					    rb->rg_direction);
+	if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb)))
+		return false;
+
+	rb->rg_device = ia->ri_device;
+	rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
+	return true;
+}
+
+static void
+rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
+{
+	if (!rpcrdma_regbuf_is_mapped(rb))
+		return;
+
+	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
+			    rdmab_length(rb), rb->rg_direction);
+	rb->rg_device = NULL;
 }
 
 /**
  * rpcrdma_free_regbuf - deregister and free registered buffer
- * @ia: controlling rpcrdma_ia
  * @rb: regbuf to be deregistered and freed
  */
 void
-rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
 {
-	struct ib_sge *iov;
-
 	if (!rb)
 		return;
 
-	iov = &rb->rg_iov;
-	ib_dma_unmap_single(ia->ri_device,
-			    iov->addr, iov->length, DMA_BIDIRECTIONAL);
+	rpcrdma_dma_unmap_regbuf(rb);
 	kfree(rb);
 }
 
@@ -1248,39 +1297,28 @@
 		struct rpcrdma_ep *ep,
 		struct rpcrdma_req *req)
 {
-	struct ib_device *device = ia->ri_device;
-	struct ib_send_wr send_wr, *send_wr_fail;
-	struct rpcrdma_rep *rep = req->rl_reply;
-	struct ib_sge *iov = req->rl_send_iov;
-	int i, rc;
+	struct ib_send_wr *send_wr = &req->rl_send_wr;
+	struct ib_send_wr *send_wr_fail;
+	int rc;
 
-	if (rep) {
-		rc = rpcrdma_ep_post_recv(ia, ep, rep);
+	if (req->rl_reply) {
+		rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
 		if (rc)
 			return rc;
 		req->rl_reply = NULL;
 	}
 
-	send_wr.next = NULL;
-	send_wr.wr_cqe = &req->rl_cqe;
-	send_wr.sg_list = iov;
-	send_wr.num_sge = req->rl_niovs;
-	send_wr.opcode = IB_WR_SEND;
-
-	for (i = 0; i < send_wr.num_sge; i++)
-		ib_dma_sync_single_for_device(device, iov[i].addr,
-					      iov[i].length, DMA_TO_DEVICE);
 	dprintk("RPC:       %s: posting %d s/g entries\n",
-		__func__, send_wr.num_sge);
+		__func__, send_wr->num_sge);
 
 	if (DECR_CQCOUNT(ep) > 0)
-		send_wr.send_flags = 0;
+		send_wr->send_flags = 0;
 	else { /* Provider must take a send completion every now and then */
 		INIT_CQCOUNT(ep);
-		send_wr.send_flags = IB_SEND_SIGNALED;
+		send_wr->send_flags = IB_SEND_SIGNALED;
 	}
 
-	rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
+	rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
 	if (rc)
 		goto out_postsend_err;
 	return 0;
@@ -1290,32 +1328,24 @@
 	return -ENOTCONN;
 }
 
-/*
- * (Re)post a receive buffer.
- */
 int
 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
-		     struct rpcrdma_ep *ep,
 		     struct rpcrdma_rep *rep)
 {
-	struct ib_recv_wr recv_wr, *recv_wr_fail;
+	struct ib_recv_wr *recv_wr_fail;
 	int rc;
 
-	recv_wr.next = NULL;
-	recv_wr.wr_cqe = &rep->rr_cqe;
-	recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
-	recv_wr.num_sge = 1;
-
-	ib_dma_sync_single_for_cpu(ia->ri_device,
-				   rdmab_addr(rep->rr_rdmabuf),
-				   rdmab_length(rep->rr_rdmabuf),
-				   DMA_BIDIRECTIONAL);
-
-	rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
+	if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
+		goto out_map;
+	rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
 	if (rc)
 		goto out_postrecv;
 	return 0;
 
+out_map:
+	pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
+	return -EIO;
+
 out_postrecv:
 	pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
 	return -ENOTCONN;
@@ -1333,7 +1363,6 @@
 {
 	struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 	struct rpcrdma_rep *rep;
 	int rc;
 
@@ -1344,7 +1373,7 @@
 		rep = rpcrdma_buffer_get_rep_locked(buffers);
 		spin_unlock(&buffers->rb_lock);
 
-		rc = rpcrdma_ep_post_recv(ia, ep, rep);
+		rc = rpcrdma_ep_post_recv(ia, rep);
 		if (rc)
 			goto out_rc;
 	}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index a71b0f5..0d35b76 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -70,9 +70,11 @@
 	struct ib_pd		*ri_pd;
 	struct completion	ri_done;
 	int			ri_async_rc;
+	unsigned int		ri_max_segs;
 	unsigned int		ri_max_frmr_depth;
 	unsigned int		ri_max_inline_write;
 	unsigned int		ri_max_inline_read;
+	bool			ri_reminv_expected;
 	struct ib_qp_attr	ri_qp_attr;
 	struct ib_qp_init_attr	ri_qp_init_attr;
 };
@@ -87,6 +89,7 @@
 	int			rep_connected;
 	struct ib_qp_init_attr	rep_attr;
 	wait_queue_head_t 	rep_connect_wait;
+	struct rpcrdma_connect_private	rep_cm_private;
 	struct rdma_conn_param	rep_remote_cma;
 	struct sockaddr_storage	rep_remote_addr;
 	struct delayed_work	rep_connect_worker;
@@ -112,9 +115,9 @@
  */
 
 struct rpcrdma_regbuf {
-	size_t			rg_size;
-	struct rpcrdma_req	*rg_owner;
 	struct ib_sge		rg_iov;
+	struct ib_device	*rg_device;
+	enum dma_data_direction	rg_direction;
 	__be32			rg_base[0] __attribute__ ((aligned(256)));
 };
 
@@ -162,7 +165,10 @@
  * The smallest inline threshold is 1024 bytes, ensuring that
  * at least 750 bytes are available for RPC messages.
  */
-#define RPCRDMA_MAX_HDR_SEGS	(8)
+enum {
+	RPCRDMA_MAX_HDR_SEGS = 8,
+	RPCRDMA_HDRBUF_SIZE = 256,
+};
 
 /*
  * struct rpcrdma_rep -- this structure encapsulates state required to recv
@@ -182,10 +188,13 @@
 struct rpcrdma_rep {
 	struct ib_cqe		rr_cqe;
 	unsigned int		rr_len;
+	int			rr_wc_flags;
+	u32			rr_inv_rkey;
 	struct ib_device	*rr_device;
 	struct rpcrdma_xprt	*rr_rxprt;
 	struct work_struct	rr_work;
 	struct list_head	rr_list;
+	struct ib_recv_wr	rr_recv_wr;
 	struct rpcrdma_regbuf	*rr_rdmabuf;
 };
 
@@ -276,19 +285,30 @@
 	char		*mr_offset;	/* kva if no page, else offset */
 };
 
-#define RPCRDMA_MAX_IOVS	(2)
+/* Reserve enough Send SGEs to send a maximum size inline request:
+ * - RPC-over-RDMA header
+ * - xdr_buf head iovec
+ * - RPCRDMA_MAX_INLINE bytes, possibly unaligned, in pages
+ * - xdr_buf tail iovec
+ */
+enum {
+	RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1,
+	RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1,
+	RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
+};
 
 struct rpcrdma_buffer;
 struct rpcrdma_req {
 	struct list_head	rl_free;
-	unsigned int		rl_niovs;
+	unsigned int		rl_mapped_sges;
 	unsigned int		rl_connect_cookie;
-	struct rpc_task		*rl_task;
 	struct rpcrdma_buffer	*rl_buffer;
-	struct rpcrdma_rep	*rl_reply;/* holder for reply buffer */
-	struct ib_sge		rl_send_iov[RPCRDMA_MAX_IOVS];
-	struct rpcrdma_regbuf	*rl_rdmabuf;
-	struct rpcrdma_regbuf	*rl_sendbuf;
+	struct rpcrdma_rep	*rl_reply;
+	struct ib_send_wr	rl_send_wr;
+	struct ib_sge		rl_send_sge[RPCRDMA_MAX_SEND_SGES];
+	struct rpcrdma_regbuf	*rl_rdmabuf;	/* xprt header */
+	struct rpcrdma_regbuf	*rl_sendbuf;	/* rq_snd_buf */
+	struct rpcrdma_regbuf	*rl_recvbuf;	/* rq_rcv_buf */
 
 	struct ib_cqe		rl_cqe;
 	struct list_head	rl_all;
@@ -298,14 +318,16 @@
 	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
 };
 
+static inline void
+rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
+{
+	rqst->rq_xprtdata = req;
+}
+
 static inline struct rpcrdma_req *
 rpcr_to_rdmar(struct rpc_rqst *rqst)
 {
-	void *buffer = rqst->rq_buffer;
-	struct rpcrdma_regbuf *rb;
-
-	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
-	return rb->rg_owner;
+	return rqst->rq_xprtdata;
 }
 
 /*
@@ -356,15 +378,6 @@
 	unsigned int	padding;	/* non-rdma write header padding */
 };
 
-#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
-	(rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
-
-#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
-	(rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
-
-#define RPCRDMA_INLINE_PAD_VALUE(rq)\
-	rpcx_to_rdmad(rq->rq_xprt).padding
-
 /*
  * Statistics for RPCRDMA
  */
@@ -386,6 +399,7 @@
 	unsigned long		mrs_recovered;
 	unsigned long		mrs_orphaned;
 	unsigned long		mrs_allocated;
+	unsigned long		local_inv_needed;
 };
 
 /*
@@ -409,6 +423,7 @@
 				      struct rpcrdma_mw *);
 	void		(*ro_release_mr)(struct rpcrdma_mw *);
 	const char	*ro_displayname;
+	const int	ro_send_w_inv_ok;
 };
 
 extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
@@ -461,15 +476,14 @@
 
 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
 				struct rpcrdma_req *);
-int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
-				struct rpcrdma_rep *);
+int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
 
 /*
  * Buffer calls - xprtrdma/verbs.c
  */
 struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
 struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
-void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *);
+void rpcrdma_destroy_req(struct rpcrdma_req *);
 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
 
@@ -482,10 +496,24 @@
 
 void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
 
-struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
-					    size_t, gfp_t);
-void rpcrdma_free_regbuf(struct rpcrdma_ia *,
-			 struct rpcrdma_regbuf *);
+struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
+					    gfp_t);
+bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
+void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
+
+static inline bool
+rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
+{
+	return rb->rg_device != NULL;
+}
+
+static inline bool
+rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
+{
+	if (likely(rpcrdma_regbuf_is_mapped(rb)))
+		return true;
+	return __rpcrdma_dma_map_regbuf(ia, rb);
+}
 
 int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
 
@@ -507,15 +535,25 @@
  */
 void rpcrdma_connect_worker(struct work_struct *);
 void rpcrdma_conn_func(struct rpcrdma_ep *);
-void rpcrdma_reply_handler(struct rpcrdma_rep *);
+void rpcrdma_reply_handler(struct work_struct *);
 
 /*
  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  */
+
+enum rpcrdma_chunktype {
+	rpcrdma_noch = 0,
+	rpcrdma_readch,
+	rpcrdma_areadch,
+	rpcrdma_writech,
+	rpcrdma_replych
+};
+
+bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
+			       u32, struct xdr_buf *, enum rpcrdma_chunktype);
+void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
 int rpcrdma_marshal_req(struct rpc_rqst *);
-void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *,
-				  struct rpcrdma_create_data_internal *,
-				  unsigned int);
+void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
 
 /* RPC/RDMA module init - xprtrdma/transport.c
  */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf16883..0137af1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -473,7 +473,16 @@
 	spin_unlock_bh(&xprt->transport_lock);
 
 	/* Race breaker in case memory is freed before above code is called */
-	sk->sk_write_space(sk);
+	if (ret == -EAGAIN) {
+		struct socket_wq *wq;
+
+		rcu_read_lock();
+		wq = rcu_dereference(sk->sk_wq);
+		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
+		rcu_read_unlock();
+
+		sk->sk_write_space(sk);
+	}
 	return ret;
 }
 
@@ -2533,35 +2542,38 @@
  * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
  * to use the server side send routines.
  */
-static void *bc_malloc(struct rpc_task *task, size_t size)
+static int bc_malloc(struct rpc_task *task)
 {
+	struct rpc_rqst *rqst = task->tk_rqstp;
+	size_t size = rqst->rq_callsize;
 	struct page *page;
 	struct rpc_buffer *buf;
 
-	WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
-	if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
-		return NULL;
+	if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
+		WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
+			  size);
+		return -EINVAL;
+	}
 
 	page = alloc_page(GFP_KERNEL);
 	if (!page)
-		return NULL;
+		return -ENOMEM;
 
 	buf = page_address(page);
 	buf->len = PAGE_SIZE;
 
-	return buf->data;
+	rqst->rq_buffer = buf->data;
+	return 0;
 }
 
 /*
  * Free the space allocated in the bc_alloc routine
  */
-static void bc_free(void *buffer)
+static void bc_free(struct rpc_task *task)
 {
+	void *buffer = task->tk_rqstp->rq_buffer;
 	struct rpc_buffer *buf;
 
-	if (!buffer)
-		return;
-
 	buf = container_of(buffer, struct rpc_buffer, data);
 	free_page((unsigned long)buf);
 }
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index d80cd3f..78cab9c 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -407,6 +407,7 @@
 	if (ntohs(addr->proto) == ETH_P_IP) {
 		struct sockaddr_in ip4;
 
+		memset(&ip4, 0, sizeof(ip4));
 		ip4.sin_family = AF_INET;
 		ip4.sin_port = addr->port;
 		ip4.sin_addr.s_addr = addr->ipv4.s_addr;
@@ -417,6 +418,7 @@
 	} else if (ntohs(addr->proto) == ETH_P_IPV6) {
 		struct sockaddr_in6 ip6;
 
+		memset(&ip6, 0, sizeof(ip6));
 		ip6.sin6_family = AF_INET6;
 		ip6.sin6_port  = addr->port;
 		memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr));
diff --git a/samples/Kconfig b/samples/Kconfig
index 85c405f..a6d2a43 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -99,4 +99,10 @@
 	  Build samples of seccomp filters using various methods of
 	  BPF filter construction.
 
+config SAMPLE_BLACKFIN_GPTIMERS
+	tristate "Build blackfin gptimers sample code -- loadable modules only"
+	depends on BLACKFIN && BFIN_GPTIMERS && m
+	help
+	  Build samples of blackfin gptimers sample module.
+
 endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 1a20169..e17d66d 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_SAMPLES)	+= kobject/ kprobes/ trace_events/ livepatch/ \
 			   hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
-			   configfs/ connector/ v4l/ trace_printk/
+			   configfs/ connector/ v4l/ trace_printk/ blackfin/
diff --git a/Documentation/auxdisplay/.gitignore b/samples/auxdisplay/.gitignore
similarity index 100%
rename from Documentation/auxdisplay/.gitignore
rename to samples/auxdisplay/.gitignore
diff --git a/samples/auxdisplay/Makefile b/samples/auxdisplay/Makefile
new file mode 100644
index 0000000..05e471f
--- /dev/null
+++ b/samples/auxdisplay/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := cfag12864b-example
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
diff --git a/Documentation/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
similarity index 100%
rename from Documentation/auxdisplay/cfag12864b-example.c
rename to samples/auxdisplay/cfag12864b-example.c
diff --git a/samples/blackfin/Makefile b/samples/blackfin/Makefile
new file mode 100644
index 0000000..89b86cf
--- /dev/null
+++ b/samples/blackfin/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SAMPLE_BLACKFIN_GPTIMERS) += gptimers-example.o
diff --git a/Documentation/blackfin/gptimers-example.c b/samples/blackfin/gptimers-example.c
similarity index 100%
rename from Documentation/blackfin/gptimers-example.c
rename to samples/blackfin/gptimers-example.c
diff --git a/Documentation/misc-devices/mei/.gitignore b/samples/mei/.gitignore
similarity index 100%
rename from Documentation/misc-devices/mei/.gitignore
rename to samples/mei/.gitignore
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
new file mode 100644
index 0000000..7aac216
--- /dev/null
+++ b/samples/mei/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := mei-amt-version
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
diff --git a/Documentation/misc-devices/mei/TODO b/samples/mei/TODO
similarity index 100%
rename from Documentation/misc-devices/mei/TODO
rename to samples/mei/TODO
diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
similarity index 100%
rename from Documentation/misc-devices/mei/mei-amt-version.c
rename to samples/mei/mei-amt-version.c
diff --git a/Documentation/mic/mpssd/.gitignore b/samples/mic/mpssd/.gitignore
similarity index 100%
rename from Documentation/mic/mpssd/.gitignore
rename to samples/mic/mpssd/.gitignore
diff --git a/samples/mic/mpssd/Makefile b/samples/mic/mpssd/Makefile
new file mode 100644
index 0000000..3e3ef91
--- /dev/null
+++ b/samples/mic/mpssd/Makefile
@@ -0,0 +1,27 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+
+PROGS := mpssd
+CC = $(CROSS_COMPILE)gcc
+CFLAGS := -I../../../usr/include -I../../../tools/include
+
+ifdef DEBUG
+CFLAGS += -DDEBUG=$(DEBUG)
+endif
+
+all: $(PROGS)
+mpssd: mpssd.c sysfs.c
+	$(CC) $(CFLAGS) mpssd.c sysfs.c -o mpssd -lpthread
+
+install:
+	install mpssd /usr/sbin/mpssd
+	install micctrl /usr/sbin/micctrl
+
+clean:
+	rm -fr $(PROGS)
+
+endif
+endif
diff --git a/Documentation/mic/mpssd/micctrl b/samples/mic/mpssd/micctrl
similarity index 100%
rename from Documentation/mic/mpssd/micctrl
rename to samples/mic/mpssd/micctrl
diff --git a/Documentation/mic/mpssd/mpss b/samples/mic/mpssd/mpss
similarity index 100%
rename from Documentation/mic/mpssd/mpss
rename to samples/mic/mpssd/mpss
diff --git a/Documentation/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
similarity index 100%
rename from Documentation/mic/mpssd/mpssd.c
rename to samples/mic/mpssd/mpssd.c
diff --git a/Documentation/mic/mpssd/mpssd.h b/samples/mic/mpssd/mpssd.h
similarity index 100%
rename from Documentation/mic/mpssd/mpssd.h
rename to samples/mic/mpssd/mpssd.h
diff --git a/Documentation/mic/mpssd/sysfs.c b/samples/mic/mpssd/sysfs.c
similarity index 100%
rename from Documentation/mic/mpssd/sysfs.c
rename to samples/mic/mpssd/sysfs.c
diff --git a/Documentation/timers/.gitignore b/samples/timers/.gitignore
similarity index 100%
rename from Documentation/timers/.gitignore
rename to samples/timers/.gitignore
diff --git a/samples/timers/Makefile b/samples/timers/Makefile
new file mode 100644
index 0000000..a5c3c4a
--- /dev/null
+++ b/samples/timers/Makefile
@@ -0,0 +1,15 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+CC := $(CROSS_COMPILE)gcc
+PROGS := hpet_example
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
+
+endif
+endif
diff --git a/Documentation/timers/hpet_example.c b/samples/timers/hpet_example.c
similarity index 100%
rename from Documentation/timers/hpet_example.c
rename to samples/timers/hpet_example.c
diff --git a/samples/watchdog/.gitignore b/samples/watchdog/.gitignore
new file mode 100644
index 0000000..ff0ebb5
--- /dev/null
+++ b/samples/watchdog/.gitignore
@@ -0,0 +1 @@
+watchdog-simple
diff --git a/samples/watchdog/Makefile b/samples/watchdog/Makefile
new file mode 100644
index 0000000..9b53d89
--- /dev/null
+++ b/samples/watchdog/Makefile
@@ -0,0 +1,8 @@
+CC := $(CROSS_COMPILE)gcc
+PROGS := watchdog-simple
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
+
diff --git a/Documentation/watchdog/src/watchdog-simple.c b/samples/watchdog/watchdog-simple.c
similarity index 100%
rename from Documentation/watchdog/src/watchdog-simple.c
rename to samples/watchdog/watchdog-simple.c
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 11602e5..de46ab0 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -81,6 +81,7 @@
 
 ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),)
 lib-target := $(obj)/lib.a
+obj-y += $(obj)/lib-ksyms.o
 endif
 
 ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
@@ -358,12 +359,22 @@
 # Rule to compile a set of .o files into one .o file
 #
 ifdef builtin-target
-quiet_cmd_link_o_target = LD      $@
+
+ifdef CONFIG_THIN_ARCHIVES
+  cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
+  cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
+  quiet_cmd_link_o_target = AR      $@
+else
+  cmd_make_builtin = $(LD) $(ld_flags) -r -o
+  cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS)
+  quiet_cmd_link_o_target = LD      $@
+endif
+
 # If the list of objects to link is empty, just create an empty built-in.o
 cmd_link_o_target = $(if $(strip $(obj-y)),\
-		      $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \
+		      $(cmd_make_builtin) $@ $(filter $(obj-y), $^) \
 		      $(cmd_secanalysis),\
-		      rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@)
+		      $(cmd_make_empty_builtin) $@)
 
 $(builtin-target): $(obj-y) FORCE
 	$(call if_changed,link_o_target)
@@ -389,12 +400,36 @@
 #
 ifdef lib-target
 quiet_cmd_link_l_target = AR      $@
-cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
+
+ifdef CONFIG_THIN_ARCHIVES
+  cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y)
+else
+  cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
+endif
 
 $(lib-target): $(lib-y) FORCE
 	$(call if_changed,link_l_target)
 
 targets += $(lib-target)
+
+dummy-object = $(obj)/.lib_exports.o
+ksyms-lds = $(dot-target).lds
+ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+ref_prefix = EXTERN(_
+else
+ref_prefix = EXTERN(
+endif
+
+quiet_cmd_export_list = EXPORTS $@
+cmd_export_list = $(OBJDUMP) -h $< | \
+	sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\
+	rm -f $(dummy-object);\
+	$(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\
+	$(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
+	rm $(dummy-object) $(ksyms-lds)
+
+$(obj)/lib-ksyms.o: $(lib-target) FORCE
+	$(call if_changed,export_list)
 endif
 
 #
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 61f0e6d..060d2cb 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -6,6 +6,12 @@
 
   gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY)	+= cyc_complexity_plugin.so
 
+  gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY)	+= latent_entropy_plugin.so
+  gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY)	+= -DLATENT_ENTROPY_PLUGIN
+  ifdef CONFIG_PAX_LATENT_ENTROPY
+    DISABLE_LATENT_ENTROPY_PLUGIN			+= -fplugin-arg-latent_entropy_plugin-disable
+  endif
+
   ifdef CONFIG_GCC_PLUGIN_SANCOV
     ifeq ($(CFLAGS_KCOV),)
       # It is needed because of the gcc-plugin.sh and gcc version checks.
@@ -21,7 +27,8 @@
 
   GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
 
-  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
+  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR
+  export SANCOV_PLUGIN DISABLE_LATENT_ENTROPY_PLUGIN
 
   ifneq ($(PLUGINCC),)
     # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1366a94..16923ba 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -115,14 +115,18 @@
 
 targets += $(modules:.ko=.mod.o)
 
-# Step 6), final link of the modules
+ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
+
+# Step 6), final link of the modules with optional arch pass after final link
 quiet_cmd_ld_ko_o = LD [M]  $@
-      cmd_ld_ko_o = $(LD) -r $(LDFLAGS)                                 \
-                             $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
-                             -o $@ $(filter-out FORCE,$^)
+      cmd_ld_ko_o =                                                     \
+	$(LD) -r $(LDFLAGS)                                             \
+                 $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE)             \
+                 -o $@ $(filter-out FORCE,$^) ;                         \
+	$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
 $(modules): %.ko :%.o %.mod.o FORCE
-	$(call if_changed,ld_ko_o)
+	+$(call if_changed,ld_ko_o)
 
 targets += $(modules)
 
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 746ec1e..fff818b 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -82,8 +82,7 @@
  * to date before even starting the recursive build, so it's too late
  * at this point anyway.
  *
- * The algorithm to grep for "CONFIG_..." is bit unusual, but should
- * be fast ;-) We don't even try to really parse the header files, but
+ * We don't even try to really parse the header files, but
  * merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will
  * be picked up as well. It's not a problem with respect to
  * correctness, since that can only give too many dependencies, thus
@@ -115,11 +114,6 @@
 #include <ctype.h>
 #include <arpa/inet.h>
 
-#define INT_CONF ntohl(0x434f4e46)
-#define INT_ONFI ntohl(0x4f4e4649)
-#define INT_NFIG ntohl(0x4e464947)
-#define INT_FIG_ ntohl(0x4649475f)
-
 int insert_extra_deps;
 char *target;
 char *depfile;
@@ -241,37 +235,22 @@
 	print_config(m, slen);
 }
 
-static void parse_config_file(const char *map, size_t len)
+static void parse_config_file(const char *p)
 {
-	const int *end = (const int *) (map + len);
-	/* start at +1, so that p can never be < map */
-	const int *m   = (const int *) map + 1;
-	const char *p, *q;
+	const char *q, *r;
 
-	for (; m < end; m++) {
-		if (*m == INT_CONF) { p = (char *) m  ; goto conf; }
-		if (*m == INT_ONFI) { p = (char *) m-1; goto conf; }
-		if (*m == INT_NFIG) { p = (char *) m-2; goto conf; }
-		if (*m == INT_FIG_) { p = (char *) m-3; goto conf; }
-		continue;
-	conf:
-		if (p > map + len - 7)
-			continue;
-		if (memcmp(p, "CONFIG_", 7))
-			continue;
+	while ((p = strstr(p, "CONFIG_"))) {
 		p += 7;
-		for (q = p; q < map + len; q++) {
-			if (!(isalnum(*q) || *q == '_'))
-				goto found;
-		}
-		continue;
-
-	found:
-		if (!memcmp(q - 7, "_MODULE", 7))
-			q -= 7;
-		if (q - p < 0)
-			continue;
-		use_config(p, q - p);
+		q = p;
+		while (*q && (isalnum(*q) || *q == '_'))
+			q++;
+		if (memcmp(q - 7, "_MODULE", 7) == 0)
+			r = q - 7;
+		else
+			r = q;
+		if (r > p)
+			use_config(p, r - p);
+		p = q;
 	}
 }
 
@@ -291,7 +270,7 @@
 {
 	struct stat st;
 	int fd;
-	void *map;
+	char *map;
 
 	fd = open(filename, O_RDONLY);
 	if (fd < 0) {
@@ -308,18 +287,23 @@
 		close(fd);
 		return;
 	}
-	map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
-	if ((long) map == -1) {
-		perror("fixdep: mmap");
+	map = malloc(st.st_size + 1);
+	if (!map) {
+		perror("fixdep: malloc");
 		close(fd);
 		return;
 	}
-
-	parse_config_file(map, st.st_size);
-
-	munmap(map, st.st_size);
-
+	if (read(fd, map, st.st_size) != st.st_size) {
+		perror("fixdep: read");
+		close(fd);
+		return;
+	}
+	map[st.st_size] = '\0';
 	close(fd);
+
+	parse_config_file(map);
+
+	free(map);
 }
 
 /*
@@ -446,22 +430,8 @@
 	close(fd);
 }
 
-static void traps(void)
-{
-	static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
-	int *p = (int *)test;
-
-	if (*p != INT_CONF) {
-		fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
-			*p);
-		exit(2);
-	}
-}
-
 int main(int argc, char *argv[])
 {
-	traps();
-
 	if (argc == 5 && !strcmp(argv[1], "-e")) {
 		insert_extra_deps = 1;
 		argv++;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 206a6b3..a8368d1 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -54,6 +54,7 @@
 my $spelling_file = "$D/spelling.txt";
 my $codespell = 0;
 my $codespellfile = "/usr/share/codespell/dictionary.txt";
+my $conststructsfile = "$D/const_structs.checkpatch";
 my $color = 1;
 my $allow_c99_comments = 1;
 
@@ -523,7 +524,11 @@
 	["module_param_array_named", 5],
 	["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
 	["proc_create(?:_data|)", 2],
-	["(?:CLASS|DEVICE|SENSOR)_ATTR", 2],
+	["(?:CLASS|DEVICE|SENSOR|SENSOR_DEVICE|IIO_DEVICE)_ATTR", 2],
+	["IIO_DEV_ATTR_[A-Z_]+", 1],
+	["SENSOR_(?:DEVICE_|)ATTR_2", 2],
+	["SENSOR_TEMPLATE(?:_2|)", 3],
+	["__ATTR", 2],
 );
 
 #Create a search pattern for all these functions to speed up a loop below
@@ -541,6 +546,32 @@
 	0[0-7][0-7][2367]
 }x;
 
+our %mode_permission_string_types = (
+	"S_IRWXU" => 0700,
+	"S_IRUSR" => 0400,
+	"S_IWUSR" => 0200,
+	"S_IXUSR" => 0100,
+	"S_IRWXG" => 0070,
+	"S_IRGRP" => 0040,
+	"S_IWGRP" => 0020,
+	"S_IXGRP" => 0010,
+	"S_IRWXO" => 0007,
+	"S_IROTH" => 0004,
+	"S_IWOTH" => 0002,
+	"S_IXOTH" => 0001,
+	"S_IRWXUGO" => 0777,
+	"S_IRUGO" => 0444,
+	"S_IWUGO" => 0222,
+	"S_IXUGO" => 0111,
+);
+
+#Create a search pattern for all these strings to speed up a loop below
+our $mode_perms_string_search = "";
+foreach my $entry (keys %mode_permission_string_types) {
+	$mode_perms_string_search .= '|' if ($mode_perms_string_search ne "");
+	$mode_perms_string_search .= $entry;
+}
+
 our $allowed_asm_includes = qr{(?x:
 	irq|
 	memory|
@@ -598,6 +629,29 @@
 
 $misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
 
+my $const_structs = "";
+if (open(my $conststructs, '<', $conststructsfile)) {
+	while (<$conststructs>) {
+		my $line = $_;
+
+		$line =~ s/\s*\n?$//g;
+		$line =~ s/^\s*//g;
+
+		next if ($line =~ m/^\s*#/);
+		next if ($line =~ m/^\s*$/);
+		if ($line =~ /\s/) {
+			print("$conststructsfile: '$line' invalid - ignored\n");
+			next;
+		}
+
+		$const_structs .= '|' if ($const_structs ne "");
+		$const_structs .= $line;
+	}
+	close($conststructsfile);
+} else {
+	warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+}
+
 sub build_types {
 	my $mods = "(?x:  \n" . join("|\n  ", (@modifierList, @modifierListFile)) . "\n)";
 	my $all = "(?x:  \n" . join("|\n  ", (@typeList, @typeListFile)) . "\n)";
@@ -704,6 +758,16 @@
 	}
 }
 
+sub is_maintained_obsolete {
+	my ($filename) = @_;
+
+	return 0 if (!(-e "$root/scripts/get_maintainer.pl"));
+
+	my $status = `perl $root/scripts/get_maintainer.pl --status --nom --nol --nogit --nogit-fallback -f $filename 2>&1`;
+
+	return $status =~ /obsolete/i;
+}
+
 my $camelcase_seeded = 0;
 sub seed_camelcase_includes {
 	return if ($camelcase_seeded);
@@ -2289,6 +2353,10 @@
 		}
 
 		if ($found_file) {
+			if (is_maintained_obsolete($realfile)) {
+				WARN("OBSOLETE",
+				     "$realfile is marked as 'obsolete' in the MAINTAINERS hierarchy.  No unnecessary modifications please.\n");
+			}
 			if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) {
 				$check = 1;
 			} else {
@@ -2939,6 +3007,30 @@
 			     "Block comments use a trailing */ on a separate line\n" . $herecurr);
 		}
 
+# Block comment * alignment
+		if ($prevline =~ /$;[ \t]*$/ &&			#ends in comment
+		    $line =~ /^\+[ \t]*$;/ &&			#leading comment
+		    $rawline =~ /^\+[ \t]*\*/ &&		#leading *
+		    (($prevrawline =~ /^\+.*?\/\*/ &&		#leading /*
+		      $prevrawline !~ /\*\/[ \t]*$/) ||		#no trailing */
+		     $prevrawline =~ /^\+[ \t]*\*/)) {		#leading *
+			my $oldindent;
+			$prevrawline =~ m@^\+([ \t]*/?)\*@;
+			if (defined($1)) {
+				$oldindent = expand_tabs($1);
+			} else {
+				$prevrawline =~ m@^\+(.*/?)\*@;
+				$oldindent = expand_tabs($1);
+			}
+			$rawline =~ m@^\+([ \t]*)\*@;
+			my $newindent = $1;
+			$newindent = expand_tabs($newindent);
+			if (length($oldindent) ne length($newindent)) {
+				WARN("BLOCK_COMMENT_STYLE",
+				     "Block comments should align the * on each line\n" . $hereprev);
+			}
+		}
+
 # check for missing blank lines after struct/union declarations
 # with exceptions for various attributes and macros
 		if ($prevline =~ /^[\+ ]};?\s*$/ &&
@@ -4665,7 +4757,17 @@
 			$has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
 			$has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
 
-			$dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
+			$dstat =~ s/^.\s*\#\s*define\s+$Ident(\([^\)]*\))?\s*//;
+			my $define_args = $1;
+			my $define_stmt = $dstat;
+			my @def_args = ();
+
+			if (defined $define_args && $define_args ne "") {
+				$define_args = substr($define_args, 1, length($define_args) - 2);
+				$define_args =~ s/\s*//g;
+				@def_args = split(",", $define_args);
+			}
+
 			$dstat =~ s/$;//g;
 			$dstat =~ s/\\\n.//g;
 			$dstat =~ s/^\s*//s;
@@ -4701,6 +4803,15 @@
 				^\[
 			}x;
 			#print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+
+			$ctx =~ s/\n*$//;
+			my $herectx = $here . "\n";
+			my $stmt_cnt = statement_rawlines($ctx);
+
+			for (my $n = 0; $n < $stmt_cnt; $n++) {
+				$herectx .= raw_line($linenr, $n) . "\n";
+			}
+
 			if ($dstat ne '' &&
 			    $dstat !~ /^(?:$Ident|-?$Constant),$/ &&			# 10, // foo(),
 			    $dstat !~ /^(?:$Ident|-?$Constant);$/ &&			# foo();
@@ -4716,13 +4827,6 @@
 			    $dstat !~ /^\(\{/ &&						# ({...
 			    $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
 			{
-				$ctx =~ s/\n*$//;
-				my $herectx = $here . "\n";
-				my $cnt = statement_rawlines($ctx);
-
-				for (my $n = 0; $n < $cnt; $n++) {
-					$herectx .= raw_line($linenr, $n) . "\n";
-				}
 
 				if ($dstat =~ /;/) {
 					ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
@@ -4731,6 +4835,46 @@
 					ERROR("COMPLEX_MACRO",
 					      "Macros with complex values should be enclosed in parentheses\n" . "$herectx");
 				}
+
+			}
+
+			# Make $define_stmt single line, comment-free, etc
+			my @stmt_array = split('\n', $define_stmt);
+			my $first = 1;
+			$define_stmt = "";
+			foreach my $l (@stmt_array) {
+				$l =~ s/\\$//;
+				if ($first) {
+					$define_stmt = $l;
+					$first = 0;
+				} elsif ($l =~ /^[\+ ]/) {
+					$define_stmt .= substr($l, 1);
+				}
+			}
+			$define_stmt =~ s/$;//g;
+			$define_stmt =~ s/\s+/ /g;
+			$define_stmt = trim($define_stmt);
+
+# check if any macro arguments are reused (ignore '...' and 'type')
+			foreach my $arg (@def_args) {
+			        next if ($arg =~ /\.\.\./);
+			        next if ($arg =~ /^type$/i);
+				my $tmp = $define_stmt;
+				$tmp =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
+				$tmp =~ s/\#+\s*$arg\b//g;
+				$tmp =~ s/\b$arg\s*\#\#//g;
+				my $use_cnt = $tmp =~ s/\b$arg\b//g;
+				if ($use_cnt > 1) {
+					CHK("MACRO_ARG_REUSE",
+					    "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
+				    }
+# check if any macro arguments may have other precedence issues
+				if ($define_stmt =~ m/($Operators)?\s*\b$arg\b\s*($Operators)?/m &&
+				    ((defined($1) && $1 ne ',') ||
+				     (defined($2) && $2 ne ','))) {
+					CHK("MACRO_ARG_PRECEDENCE",
+					    "Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx");
+				}
 			}
 
 # check for macros with flow control, but without ## concatenation
@@ -5495,46 +5639,46 @@
 		}
 
 # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar)
-		if ($^V && $^V ge 5.10.0 &&
-		    defined $stat &&
-		    $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-			if (WARN("PREFER_ETHER_ADDR_COPY",
-				 "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
-			    $fix) {
-				$fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
-			}
-		}
+#		if ($^V && $^V ge 5.10.0 &&
+#		    defined $stat &&
+#		    $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#			if (WARN("PREFER_ETHER_ADDR_COPY",
+#				 "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
+#			    $fix) {
+#				$fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
+#			}
+#		}
 
 # Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar)
-		if ($^V && $^V ge 5.10.0 &&
-		    defined $stat &&
-		    $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-			WARN("PREFER_ETHER_ADDR_EQUAL",
-			     "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
-		}
+#		if ($^V && $^V ge 5.10.0 &&
+#		    defined $stat &&
+#		    $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#			WARN("PREFER_ETHER_ADDR_EQUAL",
+#			     "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
+#		}
 
 # check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr
 # check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr
-		if ($^V && $^V ge 5.10.0 &&
-		    defined $stat &&
-		    $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-
-			my $ms_val = $7;
-
-			if ($ms_val =~ /^(?:0x|)0+$/i) {
-				if (WARN("PREFER_ETH_ZERO_ADDR",
-					 "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
-				    $fix) {
-					$fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
-				}
-			} elsif ($ms_val =~ /^(?:0xff|255)$/i) {
-				if (WARN("PREFER_ETH_BROADCAST_ADDR",
-					 "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
-				    $fix) {
-					$fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
-				}
-			}
-		}
+#		if ($^V && $^V ge 5.10.0 &&
+#		    defined $stat &&
+#		    $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#
+#			my $ms_val = $7;
+#
+#			if ($ms_val =~ /^(?:0x|)0+$/i) {
+#				if (WARN("PREFER_ETH_ZERO_ADDR",
+#					 "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
+#				    $fix) {
+#					$fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
+#				}
+#			} elsif ($ms_val =~ /^(?:0xff|255)$/i) {
+#				if (WARN("PREFER_ETH_BROADCAST_ADDR",
+#					 "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
+#				    $fix) {
+#					$fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
+#				}
+#			}
+#		}
 
 # typecasts on min/max could be min_t/max_t
 		if ($^V && $^V ge 5.10.0 &&
@@ -5654,6 +5798,19 @@
 			     "externs should be avoided in .c files\n" .  $herecurr);
 		}
 
+		if ($realfile =~ /\.[ch]$/ && defined $stat &&
+		    $stat =~ /^.\s*(?:extern\s+)?$Type\s*$Ident\s*\(\s*([^{]+)\s*\)\s*;/s &&
+		    $1 ne "void") {
+			my $args = trim($1);
+			while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
+				my $arg = trim($1);
+				if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) {
+					WARN("FUNCTION_ARGUMENTS",
+					     "function definition argument '$arg' should also have an identifier name\n" . $herecurr);
+				}
+			}
+		}
+
 # checks for new __setup's
 		if ($rawline =~ /\b__setup\("([^"]*)"/) {
 			my $name = $1;
@@ -5853,46 +6010,6 @@
 		}
 
 # check for various structs that are normally const (ops, kgdb, device_tree)
-		my $const_structs = qr{
-				acpi_dock_ops|
-				address_space_operations|
-				backlight_ops|
-				block_device_operations|
-				dentry_operations|
-				dev_pm_ops|
-				dma_map_ops|
-				extent_io_ops|
-				file_lock_operations|
-				file_operations|
-				hv_ops|
-				ide_dma_ops|
-				intel_dvo_dev_ops|
-				item_operations|
-				iwl_ops|
-				kgdb_arch|
-				kgdb_io|
-				kset_uevent_ops|
-				lock_manager_operations|
-				microcode_ops|
-				mtrr_ops|
-				neigh_ops|
-				nlmsvc_binding|
-				of_device_id|
-				pci_raw_ops|
-				pipe_buf_operations|
-				platform_hibernation_ops|
-				platform_suspend_ops|
-				proto_ops|
-				rpc_pipe_ops|
-				seq_operations|
-				snd_ac97_build_ops|
-				soc_pcmcia_socket_ops|
-				stacktrace_ops|
-				sysfs_ops|
-				tty_operations|
-				uart_ops|
-				usb_mon_operations|
-				wd_ops}x;
 		if ($line !~ /\bconst\b/ &&
 		    $line =~ /\bstruct\s+($const_structs)\b/) {
 			WARN("CONST_STRUCT",
@@ -5979,34 +6096,69 @@
 # Mode permission misuses where it seems decimal should be octal
 # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
 		if ($^V && $^V ge 5.10.0 &&
+		    defined $stat &&
 		    $line =~ /$mode_perms_search/) {
 			foreach my $entry (@mode_permission_funcs) {
 				my $func = $entry->[0];
 				my $arg_pos = $entry->[1];
 
+				my $lc = $stat =~ tr@\n@@;
+				$lc = $lc + $linenr;
+				my $stat_real = raw_line($linenr, 0);
+				for (my $count = $linenr + 1; $count <= $lc; $count++) {
+					$stat_real = $stat_real . "\n" . raw_line($count, 0);
+				}
+
 				my $skip_args = "";
 				if ($arg_pos > 1) {
 					$arg_pos--;
 					$skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
 				}
-				my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]";
-				if ($line =~ /$test/) {
+				my $test = "\\b$func\\s*\\(${skip_args}($FuncArg(?:\\|\\s*$FuncArg)*)\\s*[,\\)]";
+				if ($stat =~ /$test/) {
 					my $val = $1;
 					$val = $6 if ($skip_args ne "");
-
-					if ($val !~ /^0$/ &&
-					    (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
-					     length($val) ne 4)) {
+					if (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
+					    ($val =~ /^$Octal$/ && length($val) ne 4)) {
 						ERROR("NON_OCTAL_PERMISSIONS",
-						      "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
-					} elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) {
+						      "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real);
+					}
+					if ($val =~ /^$Octal$/ && (oct($val) & 02)) {
 						ERROR("EXPORTED_WORLD_WRITABLE",
-						      "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+						      "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . "$here\n" . $stat_real);
 					}
 				}
 			}
 		}
 
+# check for uses of S_<PERMS> that could be octal for readability
+		if ($line =~ /\b$mode_perms_string_search\b/) {
+			my $val = "";
+			my $oval = "";
+			my $to = 0;
+			my $curpos = 0;
+			my $lastpos = 0;
+			while ($line =~ /\b(($mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) {
+				$curpos = pos($line);
+				my $match = $2;
+				my $omatch = $1;
+				last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos));
+				$lastpos = $curpos;
+				$to |= $mode_permission_string_types{$match};
+				$val .= '\s*\|\s*' if ($val ne "");
+				$val .= $match;
+				$oval .= $omatch;
+			}
+			$oval =~ s/^\s*\|\s*//;
+			$oval =~ s/\s*\|\s*$//;
+			my $octal = sprintf("%04o", $to);
+			if (WARN("SYMBOLIC_PERMS",
+				 "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) &&
+			    $fix) {
+				$fixed[$fixlinenr] =~ s/$val/$octal/;
+			}
+		}
+
 # validate content of MODULE_LICENSE against list from include/linux/module.h
 		if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) {
 			my $extracted_string = get_quoted_string($line, $rawline);
diff --git a/scripts/coccicheck b/scripts/coccicheck
index c92c1528..ec487b8 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -1,7 +1,7 @@
 #!/bin/bash
 # Linux kernel coccicheck
 #
-# Read Documentation/coccinelle.txt
+# Read Documentation/dev-tools/coccinelle.rst
 #
 # This script requires at least spatch
 # version 1.0.0-rc11.
diff --git a/scripts/coccinelle/api/memdup_user.cocci b/scripts/coccinelle/api/memdup_user.cocci
index c606231..2a5aea8 100644
--- a/scripts/coccinelle/api/memdup_user.cocci
+++ b/scripts/coccinelle/api/memdup_user.cocci
@@ -15,11 +15,11 @@
 virtual report
 
 @depends on patch@
-expression from,to,size,flag;
+expression from,to,size;
 identifier l1,l2;
 @@
 
--  to = \(kmalloc\|kzalloc\)(size,flag);
+-  to = \(kmalloc\|kzalloc\)(size,GFP_KERNEL);
 +  to = memdup_user(from,size);
    if (
 -      to==NULL
@@ -37,12 +37,12 @@
 -  }
 
 @r depends on !patch@
-expression from,to,size,flag;
+expression from,to,size;
 position p;
 statement S1,S2;
 @@
 
-*  to = \(kmalloc@p\|kzalloc@p\)(size,flag);
+*  to = \(kmalloc@p\|kzalloc@p\)(size,GFP_KERNEL);
    if (to==NULL || ...) S1
    if (copy_from_user(to, from, size) != 0)
    S2
diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci
index 89b98a2..d67ccf5 100644
--- a/scripts/coccinelle/api/pm_runtime.cocci
+++ b/scripts/coccinelle/api/pm_runtime.cocci
@@ -17,9 +17,10 @@
 
 @runtime_bad_err_handle exists@
 expression ret;
+position p;
 @@
 (
-ret = \(pm_runtime_idle\|
+ret@p = \(pm_runtime_idle\|
 	pm_runtime_suspend\|
 	pm_runtime_autosuspend\|
 	pm_runtime_resume\|
@@ -47,12 +48,13 @@
 //  For context mode
 //----------------------------------------------------------
 
-@depends on runtime_bad_err_handle && context@
+@depends on context@
 identifier pm_runtime_api;
 expression ret;
+position runtime_bad_err_handle.p;
 @@
 (
-ret = pm_runtime_api(...);
+ret@p = pm_runtime_api(...);
 ...
 * IS_ERR_VALUE(ret)
 ...
@@ -62,12 +64,13 @@
 //  For patch mode
 //----------------------------------------------------------
 
-@depends on runtime_bad_err_handle && patch@
+@depends on patch@
 identifier pm_runtime_api;
 expression ret;
+position runtime_bad_err_handle.p;
 @@
 (
-ret = pm_runtime_api(...);
+ret@p = pm_runtime_api(...);
 ...
 - IS_ERR_VALUE(ret)
 + ret < 0
@@ -78,13 +81,14 @@
 //  For org and report mode
 //----------------------------------------------------------
 
-@r depends on runtime_bad_err_handle && (org || report) exists@
+@r depends on (org || report) exists@
 position p1, p2;
 identifier pm_runtime_api;
 expression ret;
+position runtime_bad_err_handle.p;
 @@
 (
-ret = pm_runtime_api@p1(...);
+ret@p = pm_runtime_api@p1(...);
 ...
 IS_ERR_VALUE@p2(ret)
 ...
diff --git a/scripts/coccinelle/misc/cond_no_effect.cocci b/scripts/coccinelle/misc/cond_no_effect.cocci
new file mode 100644
index 0000000..8467dbd
--- /dev/null
+++ b/scripts/coccinelle/misc/cond_no_effect.cocci
@@ -0,0 +1,64 @@
+///Find conditions where if and else branch are functionally
+// identical.
+//
+// There can be false positives in cases where the positional
+// information is used (as with lockdep) or where the identity
+// is a placeholder for not yet handled cases.
+// Unfortunately there also seems to be a tendency to use
+// the last if else/else as a "default behavior" - which some
+// might consider a legitimate coding pattern. From discussion
+// on kernelnewbies though it seems that this is not really an
+// accepted pattern and if at all it would need to be commented
+//
+// In the Linux kernel it does not seem to actually report
+// false positives except for those that were documented as
+// being intentional.
+// the two known cases are:
+//   arch/sh/kernel/traps_64.c:read_opcode()
+//        } else if ((pc & 1) == 0) {
+//              /* SHcompact */
+//              /* TODO : provide handling for this.  We don't really support
+//                 user-mode SHcompact yet, and for a kernel fault, this would
+//                 have to come from a module built for SHcompact.  */
+//              return -EFAULT;
+//      } else {
+//              /* misaligned */
+//              return -EFAULT;
+//      }
+//   fs/kernfs/file.c:kernfs_fop_open()
+//       * Both paths of the branch look the same.  They're supposed to
+//       * look that way and give @of->mutex different static lockdep keys.
+//       */
+//      if (has_mmap)
+//              mutex_init(&of->mutex);
+//      else
+//              mutex_init(&of->mutex);
+//
+// All other cases look like bugs or at least lack of documentation
+//
+// Confidence: Moderate
+// Copyright: (C) 2016 Nicholas Mc Guire, OSADL.  GPLv2.
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual org
+virtual report
+
+@cond@
+statement S1;
+position p;
+@@
+
+* if@p (...) S1 else S1
+
+@script:python depends on org@
+p << cond.p;
+@@
+
+cocci.print_main("WARNING: possible condition with no effect (if == else)",p)
+
+@script:python depends on report@
+p << cond.p;
+@@
+
+coccilib.report.print_report(p[0],"WARNING: possible condition with no effect (if == else)")
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
new file mode 100644
index 0000000..ac5f1267
--- /dev/null
+++ b/scripts/const_structs.checkpatch
@@ -0,0 +1,64 @@
+acpi_dock_ops
+address_space_operations
+backlight_ops
+block_device_operations
+clk_ops
+comedi_lrange
+component_ops
+dentry_operations
+dev_pm_ops
+dma_map_ops
+driver_info
+drm_connector_funcs
+drm_encoder_funcs
+drm_encoder_helper_funcs
+ethtool_ops
+extent_io_ops
+file_lock_operations
+file_operations
+hv_ops
+ide_dma_ops
+ide_port_ops
+inode_operations
+intel_dvo_dev_ops
+irq_domain_ops
+item_operations
+iwl_cfg
+iwl_ops
+kgdb_arch
+kgdb_io
+kset_uevent_ops
+lock_manager_operations
+machine_desc
+microcode_ops
+mlxsw_reg_info
+mtrr_ops
+neigh_ops
+net_device_ops
+nlmsvc_binding
+nvkm_device_chip
+of_device_id
+pci_raw_ops
+pipe_buf_operations
+platform_hibernation_ops
+platform_suspend_ops
+proto_ops
+regmap_access_table
+rpc_pipe_ops
+rtc_class_ops
+sd_desc
+seq_operations
+sirfsoc_padmux
+snd_ac97_build_ops
+snd_soc_component_driver
+soc_pcmcia_socket_ops
+stacktrace_ops
+sysfs_ops
+tty_operations
+uart_ops
+usb_mon_operations
+v4l2_ctrl_ops
+v4l2_ioctl_ops
+vm_operations_struct
+wacom_features
+wd_ops
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
new file mode 100644
index 0000000..ff1939b
--- /dev/null
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2012-2016 by the PaX Team <pageexec@freemail.hu>
+ * Copyright 2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * This gcc plugin helps generate a little bit of entropy from program state,
+ * used throughout the uptime of the kernel. Here is an instrumentation example:
+ *
+ * before:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ *	if (argc <= 1)
+ *		printf("%s: no command arguments :(\n", *argv);
+ *	else
+ *		printf("%s: %d command arguments!\n", *argv, args - 1);
+ * }
+ *
+ * after:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ *	// latent_entropy_execute() 1.
+ *	unsigned long local_entropy;
+ *	// init_local_entropy() 1.
+ *	void *local_entropy_frameaddr;
+ *	// init_local_entropy() 3.
+ *	unsigned long tmp_latent_entropy;
+ *
+ *	// init_local_entropy() 2.
+ *	local_entropy_frameaddr = __builtin_frame_address(0);
+ *	local_entropy = (unsigned long) local_entropy_frameaddr;
+ *
+ *	// init_local_entropy() 4.
+ *	tmp_latent_entropy = latent_entropy;
+ *	// init_local_entropy() 5.
+ *	local_entropy ^= tmp_latent_entropy;
+ *
+ *	// latent_entropy_execute() 3.
+ *	if (argc <= 1) {
+ *		// perturb_local_entropy()
+ *		local_entropy += 4623067384293424948;
+ *		printf("%s: no command arguments :(\n", *argv);
+ *		// perturb_local_entropy()
+ *	} else {
+ *		local_entropy ^= 3896280633962944730;
+ *		printf("%s: %d command arguments!\n", *argv, args - 1);
+ *	}
+ *
+ *	// latent_entropy_execute() 4.
+ *	tmp_latent_entropy = rol(tmp_latent_entropy, local_entropy);
+ *	latent_entropy = tmp_latent_entropy;
+ * }
+ *
+ * TODO:
+ * - add ipa pass to identify not explicitly marked candidate functions
+ * - mix in more program state (function arguments/return values,
+ *   loop variables, etc)
+ * - more instrumentation control via attribute parameters
+ *
+ * BUGS:
+ * - none known
+ *
+ * Options:
+ * -fplugin-arg-latent_entropy_plugin-disable
+ *
+ * Attribute: __attribute__((latent_entropy))
+ *  The latent_entropy gcc attribute can be only on functions and variables.
+ *  If it is on a function then the plugin will instrument it. If the attribute
+ *  is on a variable then the plugin will initialize it with a random value.
+ *  The variable must be an integer, an integer array type or a structure
+ *  with integer fields.
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static GTY(()) tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
+	.version	= "201606141920vanilla",
+	.help		= "disable\tturn off latent entropy instrumentation\n",
+};
+
+static unsigned HOST_WIDE_INT seed;
+/*
+ * get_random_seed() (this is a GCC function) generates the seed.
+ * This is a simple random generator without any cryptographic security because
+ * the entropy doesn't come from here.
+ */
+static unsigned HOST_WIDE_INT get_random_const(void)
+{
+	unsigned int i;
+	unsigned HOST_WIDE_INT ret = 0;
+
+	for (i = 0; i < 8 * sizeof(ret); i++) {
+		ret = (ret << 1) | (seed & 1);
+		seed >>= 1;
+		if (ret & 1)
+			seed ^= 0xD800000000000000ULL;
+	}
+
+	return ret;
+}
+
+static tree tree_get_random_const(tree type)
+{
+	unsigned long long mask;
+
+	mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
+	mask = 2 * (mask - 1) + 1;
+
+	if (TYPE_UNSIGNED(type))
+		return build_int_cstu(type, mask & get_random_const());
+	return build_int_cst(type, mask & get_random_const());
+}
+
+static tree handle_latent_entropy_attribute(tree *node, tree name,
+						tree args __unused,
+						int flags __unused,
+						bool *no_add_attrs)
+{
+	tree type;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(constructor_elt, gc) *vals;
+#else
+	vec<constructor_elt, va_gc> *vals;
+#endif
+
+	switch (TREE_CODE(*node)) {
+	default:
+		*no_add_attrs = true;
+		error("%qE attribute only applies to functions and variables",
+			name);
+		break;
+
+	case VAR_DECL:
+		if (DECL_INITIAL(*node)) {
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must not be initialized",
+				*node, name);
+			break;
+		}
+
+		if (!TREE_STATIC(*node)) {
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must not be local",
+				*node, name);
+			break;
+		}
+
+		type = TREE_TYPE(*node);
+		switch (TREE_CODE(type)) {
+		default:
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields",
+				*node, name);
+			break;
+
+		case RECORD_TYPE: {
+			tree fld, lst = TYPE_FIELDS(type);
+			unsigned int nelt = 0;
+
+			for (fld = lst; fld; nelt++, fld = TREE_CHAIN(fld)) {
+				tree fieldtype;
+
+				fieldtype = TREE_TYPE(fld);
+				if (TREE_CODE(fieldtype) == INTEGER_TYPE)
+					continue;
+
+				*no_add_attrs = true;
+				error("structure variable %qD with %qE attribute has a non-integer field %qE",
+					*node, name, fld);
+				break;
+			}
+
+			if (fld)
+				break;
+
+#if BUILDING_GCC_VERSION <= 4007
+			vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+			vec_alloc(vals, nelt);
+#endif
+
+			for (fld = lst; fld; fld = TREE_CHAIN(fld)) {
+				tree random_const, fld_t = TREE_TYPE(fld);
+
+				random_const = tree_get_random_const(fld_t);
+				CONSTRUCTOR_APPEND_ELT(vals, fld, random_const);
+			}
+
+			/* Initialize the fields with random constants */
+			DECL_INITIAL(*node) = build_constructor(type, vals);
+			break;
+		}
+
+		/* Initialize the variable with a random constant */
+		case INTEGER_TYPE:
+			DECL_INITIAL(*node) = tree_get_random_const(type);
+			break;
+
+		case ARRAY_TYPE: {
+			tree elt_type, array_size, elt_size;
+			unsigned int i, nelt;
+			HOST_WIDE_INT array_size_int, elt_size_int;
+
+			elt_type = TREE_TYPE(type);
+			elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
+			array_size = TYPE_SIZE_UNIT(type);
+
+			if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size
+				|| TREE_CODE(array_size) != INTEGER_CST) {
+				*no_add_attrs = true;
+				error("array variable %qD with %qE attribute must be a fixed length integer array type",
+					*node, name);
+				break;
+			}
+
+			array_size_int = TREE_INT_CST_LOW(array_size);
+			elt_size_int = TREE_INT_CST_LOW(elt_size);
+			nelt = array_size_int / elt_size_int;
+
+#if BUILDING_GCC_VERSION <= 4007
+			vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+			vec_alloc(vals, nelt);
+#endif
+
+			for (i = 0; i < nelt; i++) {
+				tree cst = size_int(i);
+				tree rand_cst = tree_get_random_const(elt_type);
+
+				CONSTRUCTOR_APPEND_ELT(vals, cst, rand_cst);
+			}
+
+			/*
+			 * Initialize the elements of the array with random
+			 * constants
+			 */
+			DECL_INITIAL(*node) = build_constructor(type, vals);
+			break;
+		}
+		}
+		break;
+
+	case FUNCTION_DECL:
+		break;
+	}
+
+	return NULL_TREE;
+}
+
+static struct attribute_spec latent_entropy_attr = {
+	.name				= "latent_entropy",
+	.min_length			= 0,
+	.max_length			= 0,
+	.decl_required			= true,
+	.type_required			= false,
+	.function_type_required		= false,
+	.handler			= handle_latent_entropy_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity		= false
+#endif
+};
+
+static void register_attributes(void *event_data __unused, void *data __unused)
+{
+	register_attribute(&latent_entropy_attr);
+}
+
+static bool latent_entropy_gate(void)
+{
+	tree list;
+
+	/* don't bother with noreturn functions for now */
+	if (TREE_THIS_VOLATILE(current_function_decl))
+		return false;
+
+	/* gcc-4.5 doesn't discover some trivial noreturn functions */
+	if (EDGE_COUNT(EXIT_BLOCK_PTR_FOR_FN(cfun)->preds) == 0)
+		return false;
+
+	list = DECL_ATTRIBUTES(current_function_decl);
+	return lookup_attribute("latent_entropy", list) != NULL_TREE;
+}
+
+static tree create_var(tree type, const char *name)
+{
+	tree var;
+
+	var = create_tmp_var(type, name);
+	add_referenced_var(var);
+	mark_sym_for_renaming(var);
+	return var;
+}
+
+/*
+ * Set up the next operation and its constant operand to use in the latent
+ * entropy PRNG. When RHS is specified, the request is for perturbing the
+ * local latent entropy variable, otherwise it is for perturbing the global
+ * latent entropy variable where the two operands are already given by the
+ * local and global latent entropy variables themselves.
+ *
+ * The operation is one of add/xor/rol when instrumenting the local entropy
+ * variable and one of add/xor when perturbing the global entropy variable.
+ * Rotation is not used for the latter case because it would transmit less
+ * entropy to the global variable than the other two operations.
+ */
+static enum tree_code get_op(tree *rhs)
+{
+	static enum tree_code op;
+	unsigned HOST_WIDE_INT random_const;
+
+	random_const = get_random_const();
+
+	switch (op) {
+	case BIT_XOR_EXPR:
+		op = PLUS_EXPR;
+		break;
+
+	case PLUS_EXPR:
+		if (rhs) {
+			op = LROTATE_EXPR;
+			/*
+			 * This code limits the value of random_const to
+			 * the size of a wide int for the rotation
+			 */
+			random_const &= HOST_BITS_PER_WIDE_INT - 1;
+			break;
+		}
+
+	case LROTATE_EXPR:
+	default:
+		op = BIT_XOR_EXPR;
+		break;
+	}
+	if (rhs)
+		*rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+	return op;
+}
+
+static gimple create_assign(enum tree_code code, tree lhs, tree op1,
+				tree op2)
+{
+	return gimple_build_assign_with_ops(code, lhs, op1, op2);
+}
+
+static void perturb_local_entropy(basic_block bb, tree local_entropy)
+{
+	gimple_stmt_iterator gsi;
+	gimple assign;
+	tree rhs;
+	enum tree_code op;
+
+	op = get_op(&rhs);
+	assign = create_assign(op, local_entropy, local_entropy, rhs);
+	gsi = gsi_after_labels(bb);
+	gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+}
+
+static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
+					tree local_entropy)
+{
+	gimple assign;
+	tree temp;
+	enum tree_code op;
+
+	/* 1. create temporary copy of latent_entropy */
+	temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+	/* 2. read... */
+	add_referenced_var(latent_entropy_decl);
+	mark_sym_for_renaming(latent_entropy_decl);
+	assign = gimple_build_assign(temp, latent_entropy_decl);
+	gsi_insert_before(gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	/* 3. ...modify... */
+	op = get_op(NULL);
+	assign = create_assign(op, temp, temp, local_entropy);
+	gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	/* 4. ...write latent_entropy */
+	assign = gimple_build_assign(latent_entropy_decl, temp);
+	gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+}
+
+static bool handle_tail_calls(basic_block bb, tree local_entropy)
+{
+	gimple_stmt_iterator gsi;
+
+	for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+		gcall *call;
+		gimple stmt = gsi_stmt(gsi);
+
+		if (!is_gimple_call(stmt))
+			continue;
+
+		call = as_a_gcall(stmt);
+		if (!gimple_call_tail_p(call))
+			continue;
+
+		__perturb_latent_entropy(&gsi, local_entropy);
+		return true;
+	}
+
+	return false;
+}
+
+static void perturb_latent_entropy(tree local_entropy)
+{
+	edge_iterator ei;
+	edge e, last_bb_e;
+	basic_block last_bb;
+
+	gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
+	last_bb_e = single_pred_edge(EXIT_BLOCK_PTR_FOR_FN(cfun));
+
+	FOR_EACH_EDGE(e, ei, last_bb_e->src->preds) {
+		if (ENTRY_BLOCK_PTR_FOR_FN(cfun) == e->src)
+			continue;
+		if (EXIT_BLOCK_PTR_FOR_FN(cfun) == e->src)
+			continue;
+
+		handle_tail_calls(e->src, local_entropy);
+	}
+
+	last_bb = single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun));
+	if (!handle_tail_calls(last_bb, local_entropy)) {
+		gimple_stmt_iterator gsi = gsi_last_bb(last_bb);
+
+		__perturb_latent_entropy(&gsi, local_entropy);
+	}
+}
+
+static void init_local_entropy(basic_block bb, tree local_entropy)
+{
+	gimple assign, call;
+	tree frame_addr, rand_const, tmp, fndecl, udi_frame_addr;
+	enum tree_code op;
+	unsigned HOST_WIDE_INT rand_cst;
+	gimple_stmt_iterator gsi = gsi_after_labels(bb);
+
+	/* 1. create local_entropy_frameaddr */
+	frame_addr = create_var(ptr_type_node, "local_entropy_frameaddr");
+
+	/* 2. local_entropy_frameaddr = __builtin_frame_address() */
+	fndecl = builtin_decl_implicit(BUILT_IN_FRAME_ADDRESS);
+	call = gimple_build_call(fndecl, 1, integer_zero_node);
+	gimple_call_set_lhs(call, frame_addr);
+	gsi_insert_before(&gsi, call, GSI_NEW_STMT);
+	update_stmt(call);
+
+	udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
+	assign = gimple_build_assign(local_entropy, udi_frame_addr);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	/* 3. create temporary copy of latent_entropy */
+	tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+	/* 4. read the global entropy variable into local entropy */
+	add_referenced_var(latent_entropy_decl);
+	mark_sym_for_renaming(latent_entropy_decl);
+	assign = gimple_build_assign(tmp, latent_entropy_decl);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	/* 5. mix local_entropy_frameaddr into local entropy */
+	assign = create_assign(BIT_XOR_EXPR, local_entropy, local_entropy, tmp);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	rand_cst = get_random_const();
+	rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst);
+	op = get_op(NULL);
+	assign = create_assign(op, local_entropy, local_entropy, rand_const);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+}
+
+static bool create_latent_entropy_decl(void)
+{
+	varpool_node_ptr node;
+
+	if (latent_entropy_decl != NULL_TREE)
+		return true;
+
+	FOR_EACH_VARIABLE(node) {
+		tree name, var = NODE_DECL(node);
+
+		if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 1)
+			continue;
+
+		name = DECL_NAME(var);
+		if (strcmp(IDENTIFIER_POINTER(name), "latent_entropy"))
+			continue;
+
+		latent_entropy_decl = var;
+		break;
+	}
+
+	return latent_entropy_decl != NULL_TREE;
+}
+
+static unsigned int latent_entropy_execute(void)
+{
+	basic_block bb;
+	tree local_entropy;
+
+	if (!create_latent_entropy_decl())
+		return 0;
+
+	/* prepare for step 2 below */
+	gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+	bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+	if (!single_pred_p(bb)) {
+		split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+		gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+		bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+	}
+
+	/* 1. create the local entropy variable */
+	local_entropy = create_var(unsigned_intDI_type_node, "local_entropy");
+
+	/* 2. initialize the local entropy variable */
+	init_local_entropy(bb, local_entropy);
+
+	bb = bb->next_bb;
+
+	/*
+	 * 3. instrument each BB with an operation on the
+	 *    local entropy variable
+	 */
+	while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
+		perturb_local_entropy(bb, local_entropy);
+		bb = bb->next_bb;
+	};
+
+	/* 4. mix local entropy into the global entropy variable */
+	perturb_latent_entropy(local_entropy);
+	return 0;
+}
+
+static void latent_entropy_start_unit(void *gcc_data __unused,
+					void *user_data __unused)
+{
+	tree type, id;
+	int quals;
+
+	seed = get_random_seed(false);
+
+	if (in_lto_p)
+		return;
+
+	/* extern volatile u64 latent_entropy */
+	gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
+	quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
+	type = build_qualified_type(long_long_unsigned_type_node, quals);
+	id = get_identifier("latent_entropy");
+	latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
+
+	TREE_STATIC(latent_entropy_decl) = 1;
+	TREE_PUBLIC(latent_entropy_decl) = 1;
+	TREE_USED(latent_entropy_decl) = 1;
+	DECL_PRESERVE_P(latent_entropy_decl) = 1;
+	TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
+	DECL_EXTERNAL(latent_entropy_decl) = 1;
+	DECL_ARTIFICIAL(latent_entropy_decl) = 1;
+	lang_hooks.decls.pushdecl(latent_entropy_decl);
+}
+
+#define PASS_NAME latent_entropy
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+	| TODO_update_ssa
+#include "gcc-generate-gimple-pass.h"
+
+int plugin_init(struct plugin_name_args *plugin_info,
+		struct plugin_gcc_version *version)
+{
+	bool enabled = true;
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	int i;
+
+	struct register_pass_info latent_entropy_pass_info;
+
+	latent_entropy_pass_info.pass		= make_latent_entropy_pass();
+	latent_entropy_pass_info.reference_pass_name		= "optimized";
+	latent_entropy_pass_info.ref_pass_instance_number	= 1;
+	latent_entropy_pass_info.pos_op		= PASS_POS_INSERT_BEFORE;
+	static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
+		{
+			.base = &latent_entropy_decl,
+			.nelt = 1,
+			.stride = sizeof(latent_entropy_decl),
+			.cb = &gt_ggc_mx_tree_node,
+			.pchw = &gt_pch_nx_tree_node
+		},
+		LAST_GGC_ROOT_TAB
+	};
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	for (i = 0; i < argc; ++i) {
+		if (!(strcmp(argv[i].key, "disable"))) {
+			enabled = false;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL,
+				&latent_entropy_plugin_info);
+	if (enabled) {
+		register_callback(plugin_name, PLUGIN_START_UNIT,
+					&latent_entropy_start_unit, NULL);
+		register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS,
+				  NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+					&latent_entropy_pass_info);
+	}
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes,
+				NULL);
+
+	return 0;
+}
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 17fa901..0055b07 100755
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -97,7 +97,10 @@
 }
 
 list_parse() {
-	[ ! -L "$1" ] && echo "$1 \\" || :
+	if [ -L "$1" ]; then
+		return
+	fi
+	echo "$1" | sed 's/:/\\:/g; s/$/ \\/'
 }
 
 # for each file print a line in following format
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l
index e583565..5235aa5 100644
--- a/scripts/genksyms/lex.l
+++ b/scripts/genksyms/lex.l
@@ -289,6 +289,23 @@
 	}
       break;
 
+    case ST_TYPEOF_1:
+      if (token == IDENT)
+	{
+	  if (is_reserved_word(yytext, yyleng)
+	      || find_symbol(yytext, SYM_TYPEDEF, 1))
+	    {
+	      yyless(0);
+	      unput('(');
+	      lexstate = ST_NORMAL;
+	      token = TYPEOF_KEYW;
+	      break;
+	    }
+	  _APP("(", 1);
+	}
+	lexstate = ST_TYPEOF;
+	/* FALLTHRU */
+
     case ST_TYPEOF:
       switch (token)
 	{
@@ -313,24 +330,6 @@
 	}
       break;
 
-    case ST_TYPEOF_1:
-      if (token == IDENT)
-	{
-	  if (is_reserved_word(yytext, yyleng)
-	      || find_symbol(yytext, SYM_TYPEDEF, 1))
-	    {
-	      yyless(0);
-	      unput('(');
-	      lexstate = ST_NORMAL;
-	      token = TYPEOF_KEYW;
-	      break;
-	    }
-	  _APP("(", 1);
-	}
-	APP;
-	lexstate = ST_TYPEOF;
-	goto repeat;
-
     case ST_BRACKET:
       APP;
       switch (token)
diff --git a/scripts/genksyms/lex.lex.c_shipped b/scripts/genksyms/lex.lex.c_shipped
index f82740a..985c554 100644
--- a/scripts/genksyms/lex.lex.c_shipped
+++ b/scripts/genksyms/lex.lex.c_shipped
@@ -2098,6 +2098,23 @@
 	}
       break;
 
+    case ST_TYPEOF_1:
+      if (token == IDENT)
+	{
+	  if (is_reserved_word(yytext, yyleng)
+	      || find_symbol(yytext, SYM_TYPEDEF, 1))
+	    {
+	      yyless(0);
+	      unput('(');
+	      lexstate = ST_NORMAL;
+	      token = TYPEOF_KEYW;
+	      break;
+	    }
+	  _APP("(", 1);
+	}
+	lexstate = ST_TYPEOF;
+	/* FALLTHRU */
+
     case ST_TYPEOF:
       switch (token)
 	{
@@ -2122,24 +2139,6 @@
 	}
       break;
 
-    case ST_TYPEOF_1:
-      if (token == IDENT)
-	{
-	  if (is_reserved_word(yytext, yyleng)
-	      || find_symbol(yytext, SYM_TYPEDEF, 1))
-	    {
-	      yyless(0);
-	      unput('(');
-	      lexstate = ST_NORMAL;
-	      token = TYPEOF_KEYW;
-	      break;
-	    }
-	  _APP("(", 1);
-	}
-	APP;
-	lexstate = ST_TYPEOF;
-	goto repeat;
-
     case ST_BRACKET:
       APP;
       switch (token)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 4f727eb..f742c65 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -37,12 +37,40 @@
 	fi
 }
 
+# Thin archive build here makes a final archive with
+# symbol table and indexes from vmlinux objects, which can be
+# used as input to linker.
+#
+# Traditional incremental style of link does not require this step
+#
+# built-in.o output file
+#
+archive_builtin()
+{
+	if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+		info AR built-in.o
+		rm -f built-in.o;
+		${AR} rcsT${KBUILD_ARFLAGS} built-in.o			\
+					${KBUILD_VMLINUX_INIT}		\
+					${KBUILD_VMLINUX_MAIN}
+	fi
+}
+
 # Link of vmlinux.o used for section mismatch analysis
 # ${1} output file
 modpost_link()
 {
-	${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT}                   \
-		--start-group ${KBUILD_VMLINUX_MAIN} --end-group
+	local objects
+
+	if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+		objects="--whole-archive built-in.o"
+	else
+		objects="${KBUILD_VMLINUX_INIT}				\
+			--start-group					\
+			${KBUILD_VMLINUX_MAIN}				\
+			--end-group"
+	fi
+	${LD} ${LDFLAGS} -r -o ${1} ${objects}
 }
 
 # Link of vmlinux
@@ -51,18 +79,36 @@
 vmlinux_link()
 {
 	local lds="${objtree}/${KBUILD_LDS}"
+	local objects
 
 	if [ "${SRCARCH}" != "um" ]; then
-		${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}                  \
-			-T ${lds} ${KBUILD_VMLINUX_INIT}                     \
-			--start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+		if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+			objects="--whole-archive built-in.o ${1}"
+		else
+			objects="${KBUILD_VMLINUX_INIT}			\
+				--start-group				\
+				${KBUILD_VMLINUX_MAIN}			\
+				--end-group				\
+				${1}"
+		fi
+
+		${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}		\
+			-T ${lds} ${objects}
 	else
-		${CC} ${CFLAGS_vmlinux} -o ${2}                              \
-			-Wl,-T,${lds} ${KBUILD_VMLINUX_INIT}                 \
-			-Wl,--start-group                                    \
-				 ${KBUILD_VMLINUX_MAIN}                      \
-			-Wl,--end-group                                      \
-			-lutil -lrt -lpthread ${1}
+		if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+			objects="-Wl,--whole-archive built-in.o ${1}"
+		else
+			objects="${KBUILD_VMLINUX_INIT}			\
+				-Wl,--start-group			\
+				${KBUILD_VMLINUX_MAIN}			\
+				-Wl,--end-group				\
+				${1}"
+		fi
+
+		${CC} ${CFLAGS_vmlinux} -o ${2}				\
+			-Wl,-T,${lds}					\
+			${objects}					\
+			-lutil -lrt -lpthread
 		rm -f linux
 	fi
 }
@@ -119,6 +165,7 @@
 	rm -f .tmp_kallsyms*
 	rm -f .tmp_version
 	rm -f .tmp_vmlinux*
+	rm -f built-in.o
 	rm -f System.map
 	rm -f vmlinux
 	rm -f vmlinux.o
@@ -162,6 +209,8 @@
 	. "./${KCONFIG_CONFIG}"
 esac
 
+archive_builtin
+
 #link vmlinux.o
 info LD vmlinux.o
 modpost_link vmlinux.o
diff --git a/scripts/tags.sh b/scripts/tags.sh
index b3775a9..a2ff338 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -263,7 +263,8 @@
 	-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
 	-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
 	-I static,const						\
-	--extra=+f --c-kinds=+px --langmap=c:+.h "${regex[@]}"
+	--extra=+fq --c-kinds=+px --fields=+iaS --langmap=c:+.h \
+	"${regex[@]}"
 
 	setup_regex exuberant kconfig
 	all_kconfigs | xargs $1 -a                              \
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 729e595..5923d56 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -381,7 +381,7 @@
 	for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
 		new->dents[i] = old->dents[i];
 		if (new->dents[i])
-			new->dents[i]->d_inode->i_mtime = CURRENT_TIME;
+			new->dents[i]->d_inode->i_mtime = current_time(new->dents[i]->d_inode);
 		old->dents[i] = NULL;
 	}
 }
diff --git a/security/commoncap.c b/security/commoncap.c
index 14540bd..8df676f 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -310,13 +310,8 @@
 	struct inode *inode = d_backing_inode(dentry);
 	int error;
 
-	if (!inode->i_op->getxattr)
-	       return 0;
-
-	error = inode->i_op->getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
-	if (error <= 0)
-		return 0;
-	return 1;
+	error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
+	return error > 0;
 }
 
 /**
@@ -329,12 +324,12 @@
  */
 int cap_inode_killpriv(struct dentry *dentry)
 {
-	struct inode *inode = d_backing_inode(dentry);
+	int error;
 
-	if (!inode->i_op->removexattr)
-	       return 0;
-
-	return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS);
+	error = __vfs_removexattr(dentry, XATTR_NAME_CAPS);
+	if (error == -EOPNOTSUPP)
+		error = 0;
+	return error;
 }
 
 /*
@@ -394,11 +389,11 @@
 
 	memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
 
-	if (!inode || !inode->i_op->getxattr)
+	if (!inode)
 		return -ENODATA;
 
-	size = inode->i_op->getxattr((struct dentry *)dentry, inode,
-				     XATTR_NAME_CAPS, &caps, XATTR_CAPS_SZ);
+	size = __vfs_getxattr((struct dentry *)dentry, inode,
+			      XATTR_NAME_CAPS, &caps, XATTR_CAPS_SZ);
 	if (size == -ENODATA || size == -EOPNOTSUPP)
 		/* no data, that's ok */
 		return -ENODATA;
diff --git a/security/inode.c b/security/inode.c
index acc3e9c..c83db05 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -117,7 +117,7 @@
 
 	inode->i_ino = get_next_ino();
 	inode->i_mode = mode;
-	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_private = data;
 	if (is_dir) {
 		inode->i_op = &simple_dir_inode_operations;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 11c1d30..bf66391 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -182,8 +182,9 @@
 	int error;
 	int size;
 
-	if (!inode->i_op->getxattr)
+	if (!(inode->i_opflags & IOP_XATTR))
 		return -EOPNOTSUPP;
+
 	desc = init_desc(type);
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
@@ -253,8 +254,8 @@
 		rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
 					   &xattr_data,
 					   sizeof(xattr_data), 0);
-	} else if (rc == -ENODATA && inode->i_op->removexattr) {
-		rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+	} else if (rc == -ENODATA && (inode->i_opflags & IOP_XATTR)) {
+		rc = __vfs_removexattr(dentry, XATTR_NAME_EVM);
 	}
 	return rc;
 }
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index b9e2628..ba86155 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -78,11 +78,11 @@
 	int error;
 	int count = 0;
 
-	if (!inode->i_op->getxattr)
+	if (!(inode->i_opflags & IOP_XATTR))
 		return -EOPNOTSUPP;
 
 	for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
-		error = inode->i_op->getxattr(dentry, inode, *xattr, NULL, 0);
+		error = __vfs_getxattr(dentry, inode, *xattr, NULL, 0);
 		if (error < 0) {
 			if (error == -ENODATA)
 				continue;
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 4b9b4a4..389325a 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -165,13 +165,13 @@
 int ima_read_xattr(struct dentry *dentry,
 		   struct evm_ima_xattr_data **xattr_value)
 {
-	struct inode *inode = d_backing_inode(dentry);
+	ssize_t ret;
 
-	if (!inode->i_op->getxattr)
-		return 0;
-
-	return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
-				  0, GFP_NOFS);
+	ret = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+				 0, GFP_NOFS);
+	if (ret == -EOPNOTSUPP)
+		ret = 0;
+	return ret;
 }
 
 /*
@@ -190,12 +190,12 @@
 {
 	static const char op[] = "appraise_data";
 	char *cause = "unknown";
-	struct dentry *dentry = file->f_path.dentry;
+	struct dentry *dentry = file_dentry(file);
 	struct inode *inode = d_backing_inode(dentry);
 	enum integrity_status status = INTEGRITY_UNKNOWN;
 	int rc = xattr_len, hash_start = 0;
 
-	if (!inode->i_op->getxattr)
+	if (!(inode->i_opflags & IOP_XATTR))
 		return INTEGRITY_UNKNOWN;
 
 	if (rc <= 0) {
@@ -295,7 +295,7 @@
  */
 void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
 {
-	struct dentry *dentry = file->f_path.dentry;
+	struct dentry *dentry = file_dentry(file);
 	int rc = 0;
 
 	/* do not collect and update hash for digital signatures */
@@ -322,10 +322,10 @@
 {
 	struct inode *inode = d_backing_inode(dentry);
 	struct integrity_iint_cache *iint;
-	int must_appraise, rc;
+	int must_appraise;
 
 	if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
-	    || !inode->i_op->removexattr)
+	    || !(inode->i_opflags & IOP_XATTR))
 		return;
 
 	must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
@@ -338,8 +338,7 @@
 			iint->flags |= IMA_APPRAISE;
 	}
 	if (!must_appraise)
-		rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
-	return;
+		__vfs_removexattr(dentry, XATTR_NAME_IMA);
 }
 
 /*
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 596ef61..423d111 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -228,7 +228,7 @@
 	if ((action & IMA_APPRAISE_SUBMASK) ||
 		    strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
 		/* read 'security.ima' */
-		xattr_len = ima_read_xattr(file->f_path.dentry, &xattr_value);
+		xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
 
 	hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2205ea2..0850579 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -507,14 +507,14 @@
 		   the root directory.  -ENODATA is ok, as this may be
 		   the first boot of the SELinux kernel before we have
 		   assigned xattr values to the filesystem. */
-		if (!root_inode->i_op->getxattr) {
+		if (!(root_inode->i_opflags & IOP_XATTR)) {
 			printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
 			       "xattr support\n", sb->s_id, sb->s_type->name);
 			rc = -EOPNOTSUPP;
 			goto out;
 		}
-		rc = root_inode->i_op->getxattr(root, root_inode,
-						XATTR_NAME_SELINUX, NULL, 0);
+
+		rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL, 0);
 		if (rc < 0 && rc != -ENODATA) {
 			if (rc == -EOPNOTSUPP)
 				printk(KERN_WARNING "SELinux: (dev %s, type "
@@ -1410,11 +1410,10 @@
 	case SECURITY_FS_USE_NATIVE:
 		break;
 	case SECURITY_FS_USE_XATTR:
-		if (!inode->i_op->getxattr) {
+		if (!(inode->i_opflags & IOP_XATTR)) {
 			isec->sid = sbsec->def_sid;
 			break;
 		}
-
 		/* Need a dentry, since the xattr API requires one.
 		   Life would be simpler if we could just pass the inode. */
 		if (opt_dentry) {
@@ -1445,14 +1444,12 @@
 			goto out_unlock;
 		}
 		context[len] = '\0';
-		rc = inode->i_op->getxattr(dentry, inode, XATTR_NAME_SELINUX,
-					   context, len);
+		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
 		if (rc == -ERANGE) {
 			kfree(context);
 
 			/* Need a larger buffer.  Query for the right size. */
-			rc = inode->i_op->getxattr(dentry, inode, XATTR_NAME_SELINUX,
-						   NULL, 0);
+			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0);
 			if (rc < 0) {
 				dput(dentry);
 				goto out_unlock;
@@ -1465,9 +1462,7 @@
 				goto out_unlock;
 			}
 			context[len] = '\0';
-			rc = inode->i_op->getxattr(dentry, inode,
-						   XATTR_NAME_SELINUX,
-						   context, len);
+			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
 		}
 		dput(dentry);
 		if (rc < 0) {
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0765c5b..72c145d 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1089,7 +1089,7 @@
 
 	if (ret) {
 		ret->i_mode = mode;
-		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+		ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
 	}
 	return ret;
 }
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index ace6838..d719db4 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -527,9 +527,9 @@
 	printk(KERN_DEBUG "SELinux:  %d users, %d roles, %d types, %d bools",
 	       p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
 	if (p->mls_enabled)
-		printk(", %d sens, %d cats", p->p_levels.nprim,
+		printk(KERN_CONT ", %d sens, %d cats", p->p_levels.nprim,
 		       p->p_cats.nprim);
-	printk("\n");
+	printk(KERN_CONT "\n");
 
 	printk(KERN_DEBUG "SELinux:  %d classes, %d rules\n",
 	       p->p_classes.nprim, p->te_avtab.nel);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index caec225..1cb0602 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -265,14 +265,14 @@
 	char *buffer;
 	struct smack_known *skp = NULL;
 
-	if (ip->i_op->getxattr == NULL)
+	if (!(ip->i_opflags & IOP_XATTR))
 		return ERR_PTR(-EOPNOTSUPP);
 
 	buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
 	if (buffer == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	rc = ip->i_op->getxattr(dp, ip, name, buffer, SMK_LONGLABEL);
+	rc = __vfs_getxattr(dp, ip, name, buffer, SMK_LONGLABEL);
 	if (rc < 0)
 		skp = ERR_PTR(rc);
 	else if (rc == 0)
@@ -3520,8 +3520,8 @@
 		 * It would be curious if the label of the task
 		 * does not match that assigned.
 		 */
-		if (inode->i_op->getxattr == NULL)
-			break;
+		if (!(inode->i_opflags & IOP_XATTR))
+		        break;
 		/*
 		 * Get the dentry for xattr.
 		 */
@@ -3545,12 +3545,12 @@
 			 */
 			if (isp->smk_flags & SMK_INODE_CHANGED) {
 				isp->smk_flags &= ~SMK_INODE_CHANGED;
-				rc = inode->i_op->setxattr(dp, inode,
+				rc = __vfs_setxattr(dp, inode,
 					XATTR_NAME_SMACKTRANSMUTE,
 					TRANS_TRUE, TRANS_TRUE_SIZE,
 					0);
 			} else {
-				rc = inode->i_op->getxattr(dp, inode,
+				rc = __vfs_getxattr(dp, inode,
 					XATTR_NAME_SMACKTRANSMUTE, trattr,
 					TRANS_TRUE_SIZE);
 				if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ade7c6c..682b73a 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@
 	 * the execve().
 	 */
 	if (get_user_pages_remote(current, bprm->mm, pos, 1,
-				0, 1, &page, NULL) <= 0)
+				FOLL_FORCE, &page, NULL) <= 0)
 		return false;
 #else
 	page = bprm->page[pos / PAGE_SIZE];
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 5077f19..a97b275 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -173,7 +173,7 @@
 		 * Use filesystem name if filesystem does not support rename()
 		 * operation.
 		 */
-		if (!inode->i_op->rename && !inode->i_op->rename2)
+		if (!inode->i_op->rename)
 			goto prepend_filesystem_name;
 	}
 	/* Prepend device name. */
@@ -283,7 +283,7 @@
 		 * or dentry without vfsmount.
 		 */
 		if (!path->mnt ||
-		    (!inode->i_op->rename && !inode->i_op->rename2))
+		    (!inode->i_op->rename))
 			pos = tomoyo_get_local_path(path->dentry, buf,
 						    buf_len - 1);
 		/* Get absolute name for the rest. */
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index fce5697..8c35072 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -58,7 +58,7 @@
 		goto error;
 	data->kernel = NULL;
 
-	err = snd_seq_kernel_client_ctl(client->number, cmd, &data);
+	err = snd_seq_kernel_client_ctl(client->number, cmd, data);
 	if (err < 0)
 		goto error;
 
diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
index 9c22f95..19d41da 100644
--- a/sound/pci/hda/dell_wmi_helper.c
+++ b/sound/pci/hda/dell_wmi_helper.c
@@ -49,7 +49,7 @@
 		removefunc = true;
 		if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
 			dell_led_value = 0;
-			if (spec->gen.num_adc_nids > 1)
+			if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
 				codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
 			else {
 				dell_old_cap_hook = spec->gen.cap_sync_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index f0955fd..6a23302 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -62,7 +62,7 @@
 			removefunc = false;
 		}
 		if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
-			if (spec->num_adc_nids > 1)
+			if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
 				codec_dbg(codec,
 					  "Skipping micmute LED control due to several ADCs");
 			else {
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index c8455b4..7ab14ce 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -338,7 +338,7 @@
 	spin_unlock_irqrestore(&sst->spinlock, flags);
 
 	/* continue to send any remaining messages... */
-	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+	kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 5d29493..0127422 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -12,7 +12,6 @@
  *
  */
 
-#include <linux/kconfig.h>
 #include <linux/stddef.h>
 #include <linux/acpi.h>
 
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index a12c7bb..6c672ac 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -111,7 +111,7 @@
 	list_add_tail(&msg->list, &ipc->tx_list);
 	spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 
-	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+	kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
 	if (wait)
 		return tx_wait_done(ipc, msg, rx_data);
@@ -281,7 +281,7 @@
 		return -ENOMEM;
 
 	/* start the IPC message thread */
-	init_kthread_worker(&ipc->kworker);
+	kthread_init_worker(&ipc->kworker);
 	ipc->tx_thread = kthread_run(kthread_worker_fn,
 					&ipc->kworker, "%s",
 					dev_name(ipc->dev));
@@ -292,7 +292,7 @@
 		return ret;
 	}
 
-	init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+	kthread_init_work(&ipc->kwork, ipc_tx_msgs);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(sst_ipc_init);
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index 9156522..e432a31 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -818,7 +818,7 @@
 	spin_unlock_irqrestore(&sst->spinlock, flags);
 
 	/* continue to send any remaining messages... */
-	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+	kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 0bd01e6..797cf40 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -464,7 +464,7 @@
 	skl_ipc_int_enable(dsp);
 
 	/* continue to send any remaining messages... */
-	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+	kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 14e587e..90009c0 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -604,8 +604,8 @@
 	}
 
 	data_copy = memdup_user(data, count);
-	if (IS_ERR(ERR_PTR))
-		return -ENOMEM;
+	if (IS_ERR(data_copy))
+		return PTR_ERR(data_copy);
 
 	rv = line6_send_raw_message(line6, data_copy, count);
 
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 9352a44..49cd4a6 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -317,7 +317,8 @@
 	if (pod->line6.properties->capabilities & LINE6_CAP_PCM) {
 		/* initialize PCM subsystem: */
 		err = line6_init_pcm(line6,
-			(id->driver_info == LINE6_PODX3) ? &podx3_pcm_properties :
+			(id->driver_info == LINE6_PODX3 ||
+			id->driver_info == LINE6_PODX3LIVE) ? &podx3_pcm_properties :
 			&podhd_pcm_properties);
 		if (err < 0)
 			return err;
diff --git a/Documentation/accounting/.gitignore b/tools/accounting/.gitignore
similarity index 100%
rename from Documentation/accounting/.gitignore
rename to tools/accounting/.gitignore
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
new file mode 100644
index 0000000..647c94a
--- /dev/null
+++ b/tools/accounting/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := getdelays
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
diff --git a/Documentation/accounting/getdelays.c b/tools/accounting/getdelays.c
similarity index 100%
rename from Documentation/accounting/getdelays.c
rename to tools/accounting/getdelays.c
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 92a8308..1188bc8 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -106,7 +106,6 @@
 #define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
 #define X86_FEATURE_EAGER_FPU	( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_MCE_RECOVERY ( 3*32+31) /* cpu has recoverable machine checks */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	( 4*32+ 0) /* "pni" SSE-3 */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 2ec0b0abb..49e6eba 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -181,11 +181,11 @@
 
 #ifndef CONFIG_UML
 /*
- * memcpy_mcsafe - memory copy with machine check exception handling
+ * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
  * Note that we only catch machine checks when reading the source addresses.
  * Writes to target are posted and don't generate machine checks.
  */
-ENTRY(memcpy_mcsafe)
+ENTRY(memcpy_mcsafe_unrolled)
 	cmpl $8, %edx
 	/* Less than 8 bytes? Go to byte copy loop */
 	jb .L_no_whole_words
@@ -273,7 +273,7 @@
 .L_done_memcpy_trap:
 	xorq %rax, %rax
 	ret
-ENDPROC(memcpy_mcsafe)
+ENDPROC(memcpy_mcsafe_unrolled)
 
 	.section .fixup, "ax"
 	/* Return -EFAULT for any failure */
diff --git a/tools/build/Build b/tools/build/Build
index 63a6c34..76d1a49 100644
--- a/tools/build/Build
+++ b/tools/build/Build
@@ -1 +1,3 @@
+hostprogs := fixdep
+
 fixdep-y := fixdep.o
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 4d000bc..1dcb95e 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -90,3 +90,9 @@
 # - per object C flags
 # - BUILD_STR macro to allow '-D"$(variable)"' constructs
 c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CFLAGS) -D"BUILD_STR(s)=\#s" $(CFLAGS_$(basetarget).o) $(CFLAGS_$(obj))
+cxx_flags = -Wp,-MD,$(depfile),-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj))
+
+###
+## HOSTCC C flags
+
+host_c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 0d5a0e3..8332959 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -14,6 +14,12 @@
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
 $(call allow-override,LD,$(CROSS_COMPILE)ld)
 
+HOSTCC ?= gcc
+HOSTLD ?= ld
+HOSTAR ?= ar
+
+export HOSTCC HOSTLD HOSTAR
+
 ifeq ($(V),1)
   Q =
 else
@@ -36,7 +42,7 @@
 	$(Q)$(MAKE) $(build)=fixdep
 
 $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
-	$(QUIET_LINK)$(CC) $(LDFLAGS) -o $@ $<
+	$(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
 
 FORCE:
 
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 27f3583..99c0ccd 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -58,6 +58,12 @@
 quiet_cmd_cc_o_c = CC       $@
       cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
 
+quiet_cmd_host_cc_o_c = HOSTCC   $@
+      cmd_host_cc_o_c = $(HOSTCC) $(host_c_flags) -c -o $@ $<
+
+quiet_cmd_cxx_o_c = CXX      $@
+      cmd_cxx_o_c = $(CXX) $(cxx_flags) -c -o $@ $<
+
 quiet_cmd_cpp_i_c = CPP      $@
       cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
 
@@ -70,16 +76,28 @@
 # If there's nothing to link, create empty $@ object.
 quiet_cmd_ld_multi = LD       $@
       cmd_ld_multi = $(if $(strip $(obj-y)),\
-		       $(LD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
+                     $(LD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
+
+quiet_cmd_host_ld_multi = HOSTLD   $@
+      cmd_host_ld_multi = $(if $(strip $(obj-y)),\
+                          $(HOSTLD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
+
+ifneq ($(filter $(obj),$(hostprogs)),)
+  host = host_
+endif
 
 # Build rules
 $(OUTPUT)%.o: %.c FORCE
 	$(call rule_mkdir)
-	$(call if_changed_dep,cc_o_c)
+	$(call if_changed_dep,$(host)cc_o_c)
+
+$(OUTPUT)%.o: %.cpp FORCE
+	$(call rule_mkdir)
+	$(call if_changed_dep,cxx_o_c)
 
 $(OUTPUT)%.o: %.S FORCE
 	$(call rule_mkdir)
-	$(call if_changed_dep,cc_o_c)
+	$(call if_changed_dep,$(host)cc_o_c)
 
 $(OUTPUT)%.i: %.c FORCE
 	$(call rule_mkdir)
@@ -119,7 +137,7 @@
 
 $(in-target): $(obj-y) FORCE
 	$(call rule_mkdir)
-	$(call if_changed,ld_multi)
+	$(call if_changed,$(host)ld_multi)
 
 __build: $(in-target)
 	@:
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index a120c6b..ae52e02 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -7,7 +7,7 @@
 
 feature_check = $(eval $(feature_check_code))
 define feature_check_code
-  feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+  feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
 endef
 
 feature_set = $(eval $(feature_set_code))
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
index be630be..ad22e4e 100644
--- a/tools/build/Makefile.include
+++ b/tools/build/Makefile.include
@@ -1,10 +1,6 @@
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
-ifdef CROSS_COMPILE
-fixdep:
-else
 fixdep:
 	$(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
-endif
 
 .PHONY: fixdep
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index a0b29a3..ac9c477 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -46,11 +46,13 @@
 	test-lzma.bin			\
 	test-bpf.bin			\
 	test-get_cpuid.bin		\
-	test-sdt.bin
+	test-sdt.bin			\
+	test-cxx.bin
 
 FILES := $(addprefix $(OUTPUT),$(FILES))
 
 CC := $(CROSS_COMPILE)gcc -MD
+CXX := $(CROSS_COMPILE)g++ -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
 
 all: $(FILES)
@@ -58,6 +60,9 @@
 __BUILD = $(CC) $(CFLAGS) -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
   BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
 
+__BUILDXX = $(CXX) $(CXXFLAGS) -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
+  BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
+
 ###############################
 
 $(OUTPUT)test-all.bin:
@@ -217,6 +222,9 @@
 $(OUTPUT)test-sdt.bin:
 	$(BUILD)
 
+$(OUTPUT)test-cxx.bin:
+	$(BUILDXX) -std=gnu++11
+
 -include $(OUTPUT)*.d
 
 ###############################
diff --git a/tools/build/feature/test-cxx.cpp b/tools/build/feature/test-cxx.cpp
new file mode 100644
index 0000000..b1dee9a
--- /dev/null
+++ b/tools/build/feature/test-cxx.cpp
@@ -0,0 +1,15 @@
+#include <iostream>
+#include <memory>
+
+static void print_str(std::string s)
+{
+	std::cout << s << std::endl;
+}
+
+int main()
+{
+	std::string s("Hello World!");
+	print_str(std::move(s));
+	std::cout << "|" << s << "|" << std::endl;
+	return 0;
+}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index da218fe..9e5fc16 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -339,7 +339,7 @@
 	BPF_FUNC_skb_change_type,
 
 	/**
-	 * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+	 * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
 	 * @skb: pointer to skb
 	 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
 	 * @index: index of the cgroup in the bpf_map
@@ -348,7 +348,7 @@
 	 *   == 1 skb succeeded the cgroup2 descendant test
 	 *    < 0 error
 	 */
-	BPF_FUNC_skb_in_cgroup,
+	BPF_FUNC_skb_under_cgroup,
 
 	/**
 	 * bpf_get_hash_recalc(skb)
diff --git a/Documentation/laptops/.gitignore b/tools/laptop/dslm/.gitignore
similarity index 100%
rename from Documentation/laptops/.gitignore
rename to tools/laptop/dslm/.gitignore
diff --git a/tools/laptop/dslm/Makefile b/tools/laptop/dslm/Makefile
new file mode 100644
index 0000000..ff613b3
--- /dev/null
+++ b/tools/laptop/dslm/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := dslm
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
diff --git a/Documentation/laptops/dslm.c b/tools/laptop/dslm/dslm.c
similarity index 100%
rename from Documentation/laptops/dslm.c
rename to tools/laptop/dslm/dslm.c
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index d50f3b58..6518bea 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -3,6 +3,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <signal.h>
+#include <sys/ioctl.h>
 #include "pager.h"
 #include "run-command.h"
 #include "sigchain.h"
@@ -14,6 +15,7 @@
  */
 
 static int spawned_pager;
+static int pager_columns;
 
 void pager_init(const char *pager_env)
 {
@@ -58,9 +60,12 @@
 void setup_pager(void)
 {
 	const char *pager = getenv(subcmd_config.pager_env);
+	struct winsize sz;
 
 	if (!isatty(1))
 		return;
+	if (ioctl(1, TIOCGWINSZ, &sz) == 0)
+		pager_columns = sz.ws_col;
 	if (!pager)
 		pager = getenv("PAGER");
 	if (!(pager || access("/usr/bin/pager", X_OK)))
@@ -98,3 +103,14 @@
 {
 	return spawned_pager;
 }
+
+int pager_get_columns(void)
+{
+	char *s;
+
+	s = getenv("COLUMNS");
+	if (s)
+		return atoi(s);
+
+	return (pager_columns ? pager_columns : 80) - 2;
+}
diff --git a/tools/lib/subcmd/pager.h b/tools/lib/subcmd/pager.h
index 8b83714..623f554 100644
--- a/tools/lib/subcmd/pager.h
+++ b/tools/lib/subcmd/pager.h
@@ -5,5 +5,6 @@
 
 extern void setup_pager(void);
 extern int pager_in_use(void);
+extern int pager_get_columns(void);
 
 #endif /* __SUBCMD_PAGER_H */
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index 3bcada3..65984f1 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -622,6 +622,7 @@
 
 	/* Reset the buffer */
 	kbuffer_load_subbuffer(kbuf, kbuf->subbuffer);
+	data = kbuffer_read_event(kbuf, ts);
 
 	while (kbuf->curr < offset) {
 		data = kbuffer_next_event(kbuf, ts);
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index c0c0b26..b63a31b 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -98,6 +98,15 @@
 			*type = INSN_FP_SETUP;
 		break;
 
+	case 0x8d:
+		if (insn.rex_prefix.bytes &&
+		    insn.rex_prefix.bytes[0] == 0x48 &&
+		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
+		    insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
+			/* lea %(rsp), %rbp */
+			*type = INSN_FP_SETUP;
+		break;
+
 	case 0x90:
 		*type = INSN_NOP;
 		break;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 143b6cd..4490601 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -97,6 +97,19 @@
 	return next;
 }
 
+static bool gcov_enabled(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *sym;
+
+	list_for_each_entry(sec, &file->elf->sections, list)
+		list_for_each_entry(sym, &sec->symbol_list, list)
+			if (!strncmp(sym->name, "__gcov_.", 8))
+				return true;
+
+	return false;
+}
+
 #define for_each_insn(file, insn)					\
 	list_for_each_entry(insn, &file->insn_list, list)
 
@@ -713,6 +726,7 @@
 				      struct instruction *insn)
 {
 	struct rela *text_rela, *rodata_rela;
+	struct instruction *orig_insn = insn;
 
 	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
 	if (text_rela && text_rela->sym == file->rodata->sym) {
@@ -733,10 +747,16 @@
 
 	/* case 3 */
 	func_for_each_insn_continue_reverse(file, func, insn) {
-		if (insn->type == INSN_JUMP_UNCONDITIONAL ||
-		    insn->type == INSN_JUMP_DYNAMIC)
+		if (insn->type == INSN_JUMP_DYNAMIC)
 			break;
 
+		/* allow small jumps within the range */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+		    insn->jump_dest &&
+		    (insn->jump_dest->offset <= insn->offset ||
+		     insn->jump_dest->offset >= orig_insn->offset))
+		    break;
+
 		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
 						    insn->len);
 		if (text_rela && text_rela->sym == file->rodata->sym)
@@ -1034,34 +1054,6 @@
 	return 0;
 }
 
-static bool is_gcov_insn(struct instruction *insn)
-{
-	struct rela *rela;
-	struct section *sec;
-	struct symbol *sym;
-	unsigned long offset;
-
-	rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
-	if (!rela)
-		return false;
-
-	if (rela->sym->type != STT_SECTION)
-		return false;
-
-	sec = rela->sym->sec;
-	offset = rela->addend + insn->offset + insn->len - rela->offset;
-
-	list_for_each_entry(sym, &sec->symbol_list, list) {
-		if (sym->type != STT_OBJECT)
-			continue;
-
-		if (offset >= sym->offset && offset < sym->offset + sym->len)
-			return (!memcmp(sym->name, "__gcov0.", 8));
-	}
-
-	return false;
-}
-
 static bool is_kasan_insn(struct instruction *insn)
 {
 	return (insn->type == INSN_CALL &&
@@ -1083,9 +1075,6 @@
 	if (insn->type == INSN_NOP)
 		return true;
 
-	if (is_gcov_insn(insn))
-		return true;
-
 	/*
 	 * Check if this (or a subsequent) instruction is related to
 	 * CONFIG_UBSAN or CONFIG_KASAN.
@@ -1146,6 +1135,19 @@
 				    ignore_unreachable_insn(func, insn))
 					continue;
 
+				/*
+				 * gcov produces a lot of unreachable
+				 * instructions.  If we get an unreachable
+				 * warning and the file has gcov enabled, just
+				 * ignore it, and all other such warnings for
+				 * the file.
+				 */
+				if (!file->ignore_unreachables &&
+				    gcov_enabled(file)) {
+					file->ignore_unreachables = true;
+					continue;
+				}
+
 				WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
 				warnings++;
 			}
diff --git a/Documentation/pcmcia/.gitignore b/tools/pcmcia/.gitignore
similarity index 100%
rename from Documentation/pcmcia/.gitignore
rename to tools/pcmcia/.gitignore
diff --git a/tools/pcmcia/Makefile b/tools/pcmcia/Makefile
new file mode 100644
index 0000000..81a7498
--- /dev/null
+++ b/tools/pcmcia/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := crc32hash
+
+all: $(PROGS)
+
+clean:
+	rm -fr $(PROGS)
diff --git a/Documentation/pcmcia/crc32hash.c b/tools/pcmcia/crc32hash.c
similarity index 100%
rename from Documentation/pcmcia/crc32hash.c
rename to tools/pcmcia/crc32hash.c
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index a126e97..41857cc 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -8,13 +8,23 @@
 SYNOPSIS
 --------
 [verse]
-'perf list' [hw|sw|cache|tracepoint|pmu|event_glob]
+'perf list' [--no-desc] [--long-desc] [hw|sw|cache|tracepoint|pmu|event_glob]
 
 DESCRIPTION
 -----------
 This command displays the symbolic event types which can be selected in the
 various perf commands with the -e option.
 
+OPTIONS
+-------
+--no-desc::
+Don't print descriptions.
+
+-v::
+--long-desc::
+Print longer event descriptions.
+
+
 [[EVENT_MODIFIERS]]
 EVENT MODIFIERS
 ---------------
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 5950b5a..8a6479c 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -28,3 +28,7 @@
 See assembly instructions with percentage: perf annotate <symbol>
 If you prefer Intel style assembly, try: perf annotate -M intel
 For hierarchical output, try: perf report --hierarchy
+Order by the overhead of source file name and line number: perf report -s srcline
+System-wide collection from all CPUs: perf record -a
+Show current config key-value pairs: perf config --list
+Show user configuration overrides: perf config --user --list
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d710db1..982d643 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -144,6 +144,10 @@
 
 LD += $(EXTRA_LDFLAGS)
 
+HOSTCC  ?= gcc
+HOSTLD  ?= ld
+HOSTAR  ?= ar
+
 PKG_CONFIG = $(CROSS_COMPILE)pkg-config
 
 RM      = rm -f
@@ -345,8 +349,18 @@
 PERF_IN := $(OUTPUT)perf-in.o
 
 export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
+export HOSTCC HOSTLD HOSTAR
 include $(srctree)/tools/build/Makefile.include
 
+JEVENTS       := $(OUTPUT)pmu-events/jevents
+JEVENTS_IN    := $(OUTPUT)pmu-events/jevents-in.o
+
+PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+
+export JEVENTS
+
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
 $(PERF_IN): prepare FORCE
 	@(test -f ../../include/uapi/linux/perf_event.h && ( \
         (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
@@ -443,9 +457,18 @@
 	|| echo "Warning: tools/include/uapi/linux/mman.h differs from kernel" >&2 )) || true
 	$(Q)$(MAKE) $(build)=perf
 
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(JEVENTS_IN): FORCE
+	$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=jevents
+
+$(JEVENTS): $(JEVENTS_IN)
+	$(QUIET_LINK)$(HOSTCC) $(JEVENTS_IN) -o $@
+
+$(PMU_EVENTS_IN): $(JEVENTS) FORCE
+	$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
 	$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
-		$(PERF_IN) $(LIBS) -o $@
+		$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
 
 $(GTK_IN): fixdep FORCE
 	$(Q)$(MAKE) $(build)=gtk
@@ -474,6 +497,8 @@
 ifneq ($(OUTPUT),)
 %.o: $(OUTPUT)%.o
 	@echo "    # Redirected target $@ => $(OUTPUT)$@"
+pmu-events/%.o: $(OUTPUT)pmu-events/%.o
+	@echo "    # Redirected target $@ => $(OUTPUT)$@"
 util/%.o: $(OUTPUT)util/%.o
 	@echo "    # Redirected target $@ => $(OUTPUT)$@"
 bench/%.o: $(OUTPUT)bench/%.o
@@ -729,10 +754,11 @@
 	$(call QUIET_CLEAN, core-objs)  $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
 	$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
 	$(Q)$(RM) $(OUTPUT).config-detected
-	$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
+	$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents
 	$(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
 		$(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
-		$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c
+		$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
+		$(OUTPUT)pmu-events/pmu-events.c
 	$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
 	$(python-clean)
 
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index f8ccee1..9aaa6f5 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -32,3 +32,14 @@
 	}
 	return -1;
 }
+
+char *
+get_cpuid_str(void)
+{
+	char *bufp;
+
+	if (asprintf(&bufp, "%.8lx", mfspr(SPRN_PVR)) < 0)
+		bufp = NULL;
+
+	return bufp;
+}
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index ed9d5d1..1030a6e 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -82,7 +82,8 @@
 	 *
 	 * In addition, we shouldn't specify an offset for kretprobes.
 	 */
-	if (pev->point.offset || pev->point.retprobe || !map || !sym)
+	if (pev->point.offset || (!pev->uprobes && pev->point.retprobe) ||
+	    !map || !sym)
 		return;
 
 	lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym);
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 146d12a..a74a48d 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -19,8 +19,8 @@
 			: "a" (op));
 }
 
-int
-get_cpuid(char *buffer, size_t sz)
+static int
+__get_cpuid(char *buffer, size_t sz, const char *fmt)
 {
 	unsigned int a, b, c, d, lvl;
 	int family = -1, model = -1, step = -1;
@@ -48,7 +48,7 @@
 		if (family >= 0x6)
 			model += ((a >> 16) & 0xf) << 4;
 	}
-	nb = scnprintf(buffer, sz, "%s,%u,%u,%u$", vendor, family, model, step);
+	nb = scnprintf(buffer, sz, fmt, vendor, family, model, step);
 
 	/* look for end marker to ensure the entire data fit */
 	if (strchr(buffer, '$')) {
@@ -57,3 +57,21 @@
 	}
 	return -1;
 }
+
+int
+get_cpuid(char *buffer, size_t sz)
+{
+	return __get_cpuid(buffer, sz, "%s,%u,%u,%u$");
+}
+
+char *
+get_cpuid_str(void)
+{
+	char *buf = malloc(128);
+
+	if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+		free(buf);
+		return NULL;
+	}
+	return buf;
+}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 88ee419..ba9322f 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -16,16 +16,23 @@
 #include "util/pmu.h"
 #include <subcmd/parse-options.h>
 
+static bool desc_flag = true;
+
 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 {
 	int i;
 	bool raw_dump = false;
+	bool long_desc_flag = false;
 	struct option list_options[] = {
 		OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
+		OPT_BOOLEAN('d', "desc", &desc_flag,
+			    "Print extra event descriptions. --no-desc to not print."),
+		OPT_BOOLEAN('v', "long-desc", &long_desc_flag,
+			    "Print longer event descriptions."),
 		OPT_END()
 	};
 	const char * const list_usage[] = {
-		"perf list [hw|sw|cache|tracepoint|pmu|sdt|event_glob]",
+		"perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|event_glob]",
 		NULL
 	};
 
@@ -40,7 +47,7 @@
 		printf("\nList of pre-defined events (to be used in -e):\n\n");
 
 	if (argc == 0) {
-		print_events(NULL, raw_dump);
+		print_events(NULL, raw_dump, !desc_flag, long_desc_flag);
 		return 0;
 	}
 
@@ -61,14 +68,16 @@
 			 strcmp(argv[i], "hwcache") == 0)
 			print_hwcache_events(NULL, raw_dump);
 		else if (strcmp(argv[i], "pmu") == 0)
-			print_pmu_events(NULL, raw_dump);
+			print_pmu_events(NULL, raw_dump, !desc_flag,
+						long_desc_flag);
 		else if (strcmp(argv[i], "sdt") == 0)
 			print_sdt_events(NULL, NULL, raw_dump);
 		else if ((sep = strchr(argv[i], ':')) != NULL) {
 			int sep_idx;
 
 			if (sep == NULL) {
-				print_events(argv[i], raw_dump);
+				print_events(argv[i], raw_dump, !desc_flag,
+							long_desc_flag);
 				continue;
 			}
 			sep_idx = sep - argv[i];
@@ -90,7 +99,8 @@
 			print_symbol_events(s, PERF_TYPE_SOFTWARE,
 					    event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
 			print_hwcache_events(s, raw_dump);
-			print_pmu_events(s, raw_dump);
+			print_pmu_events(s, raw_dump, !desc_flag,
+						long_desc_flag);
 			print_tracepoint_events(NULL, s, raw_dump);
 			print_sdt_events(NULL, s, raw_dump);
 			free(s);
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
index 5ce61a1..df14e6b 100644
--- a/tools/perf/jvmti/Makefile
+++ b/tools/perf/jvmti/Makefile
@@ -36,7 +36,7 @@
 # The following works at least on fedora 23, you may need the next
 # line for other distros.
 ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
-JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
 else
   ifneq (,$(wildcard /usr/sbin/alternatives))
     JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
new file mode 100644
index 0000000..9213a12
--- /dev/null
+++ b/tools/perf/pmu-events/Build
@@ -0,0 +1,13 @@
+hostprogs := jevents
+
+jevents-y	+= json.o jsmn.o jevents.o
+pmu-events-y	+= pmu-events.o
+JDIR		=  pmu-events/arch/$(ARCH)
+JSON		=  $(shell [ -d $(JDIR) ] &&				\
+			find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
+#
+# Locate/process JSON files in pmu-events/arch/
+# directory and create tables in pmu-events.c.
+#
+$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
+	$(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/pmu-events/README b/tools/perf/pmu-events/README
new file mode 100644
index 0000000..1408ade
--- /dev/null
+++ b/tools/perf/pmu-events/README
@@ -0,0 +1,147 @@
+
+The contents of this directory allow users to specify PMU events in their
+CPUs by their symbolic names rather than raw event codes (see example below).
+
+The main program in this directory, is the 'jevents', which is built and
+executed _BEFORE_ the perf binary itself is built.
+
+The 'jevents' program tries to locate and process JSON files in the directory
+tree tools/perf/pmu-events/arch/foo.
+
+	- Regular files with '.json' extension in the name are assumed to be
+	  JSON files, each of which describes a set of PMU events.
+
+	- Regular files with basename starting with 'mapfile.csv' are assumed
+	  to be a CSV file that maps a specific CPU to its set of PMU events.
+	  (see below for mapfile format)
+
+	- Directories are traversed, but all other files are ignored.
+
+The PMU events supported by a CPU model are expected to grouped into topics
+such as Pipelining, Cache, Memory, Floating-point etc. All events for a topic
+should be placed in a separate JSON file - where the file name identifies
+the topic. Eg: "Floating-point.json".
+
+All the topic JSON files for a CPU model/family should be in a separate
+sub directory. Thus for the Silvermont X86 CPU:
+
+	$ ls tools/perf/pmu-events/arch/x86/Silvermont_core
+	Cache.json 	Memory.json 	Virtual-Memory.json
+	Frontend.json 	Pipeline.json
+
+Using the JSON files and the mapfile, 'jevents' generates the C source file,
+'pmu-events.c', which encodes the two sets of tables:
+
+	- Set of 'PMU events tables' for all known CPUs in the architecture,
+	  (one table like the following, per JSON file; table name 'pme_power8'
+	  is derived from JSON file name, 'power8.json').
+
+		struct pmu_event pme_power8[] = {
+
+			...
+
+			{
+				.name = "pm_1plus_ppc_cmpl",
+				.event = "event=0x100f2",
+				.desc = "1 or more ppc insts finished,",
+			},
+
+			...
+		}
+
+	- A 'mapping table' that maps each CPU of the architecture, to its
+	  'PMU events table'
+
+		struct pmu_events_map pmu_events_map[] = {
+		{
+			.cpuid = "004b0000",
+			.version = "1",
+			.type = "core",
+			.table = pme_power8
+		},
+			...
+
+		};
+
+After the 'pmu-events.c' is generated, it is compiled and the resulting
+'pmu-events.o' is added to 'libperf.a' which is then used to build perf.
+
+NOTES:
+	1. Several CPUs can support same set of events and hence use a common
+	   JSON file. Hence several entries in the pmu_events_map[] could map
+	   to a single 'PMU events table'.
+
+	2. The 'pmu-events.h' has an extern declaration for the mapping table
+	   and the generated 'pmu-events.c' defines this table.
+
+	3. _All_ known CPU tables for architecture are included in the perf
+	   binary.
+
+At run time, perf determines the actual CPU it is running on, finds the
+matching events table and builds aliases for those events. This allows
+users to specify events by their name:
+
+	$ perf stat -e pm_1plus_ppc_cmpl sleep 1
+
+where 'pm_1plus_ppc_cmpl' is a Power8 PMU event.
+
+In case of errors when processing files in the tools/perf/pmu-events/arch
+directory, 'jevents' tries to create an empty mapping file to allow the perf
+build to succeed even if the PMU event aliases cannot be used.
+
+However some errors in processing may cause the perf build to fail.
+
+Mapfile format
+===============
+
+The mapfile enables multiple CPU models to share a single set of PMU events.
+It is required even if such mapping is 1:1.
+
+The mapfile.csv format is expected to be:
+
+	Header line
+	CPUID,Version,Dir/path/name,Type
+
+where:
+
+	Comma:
+		is the required field delimiter (i.e other fields cannot
+		have commas within them).
+
+	Comments:
+		Lines in which the first character is either '\n' or '#'
+		are ignored.
+
+	Header line
+		The header line is the first line in the file, which is
+		always _IGNORED_. It can empty.
+
+	CPUID:
+		CPUID is an arch-specific char string, that can be used
+		to identify CPU (and associate it with a set of PMU events
+		it supports). Multiple CPUIDS can point to the same
+		File/path/name.json.
+
+		Example:
+			CPUID == 'GenuineIntel-6-2E' (on x86).
+			CPUID == '004b0100' (PVR value in Powerpc)
+	Version:
+		is the Version of the mapfile.
+
+	Dir/path/name:
+		is the pathname to the directory containing the CPU's JSON
+		files, relative to the directory containing the mapfile.csv
+
+	Type:
+		indicates whether the events or "core" or "uncore" events.
+
+
+	Eg:
+
+	$ grep Silvermont tools/perf/pmu-events/arch/x86/mapfile.csv
+	GenuineIntel-6-37,V13,Silvermont_core,core
+	GenuineIntel-6-4D,V13,Silvermont_core,core
+	GenuineIntel-6-4C,V13,Silvermont_core,core
+
+	i.e the three CPU models use the JSON files (i.e PMU events) listed
+	in the directory 'tools/perf/pmu-events/arch/x86/Silvermont_core'.
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
new file mode 100644
index 0000000..41611d7
--- /dev/null
+++ b/tools/perf/pmu-events/jevents.c
@@ -0,0 +1,814 @@
+#define  _XOPEN_SOURCE 500	/* needed for nftw() */
+#define  _GNU_SOURCE		/* needed for asprintf() */
+
+/* Parse event JSON files */
+
+/*
+ * Copyright (c) 2014, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <sys/time.h>			/* getrlimit */
+#include <sys/resource.h>		/* getrlimit */
+#include <ftw.h>
+#include <sys/stat.h>
+#include "jsmn.h"
+#include "json.h"
+#include "jevents.h"
+
+#ifndef __maybe_unused
+#define __maybe_unused                  __attribute__((unused))
+#endif
+
+int verbose;
+char *prog;
+
+int eprintf(int level, int var, const char *fmt, ...)
+{
+
+	int ret;
+	va_list args;
+
+	if (var < level)
+		return 0;
+
+	va_start(args, fmt);
+
+	ret = vfprintf(stderr, fmt, args);
+
+	va_end(args);
+
+	return ret;
+}
+
+__attribute__((weak)) char *get_cpu_str(void)
+{
+	return NULL;
+}
+
+static void addfield(char *map, char **dst, const char *sep,
+		     const char *a, jsmntok_t *bt)
+{
+	unsigned int len = strlen(a) + 1 + strlen(sep);
+	int olen = *dst ? strlen(*dst) : 0;
+	int blen = bt ? json_len(bt) : 0;
+	char *out;
+
+	out = realloc(*dst, len + olen + blen);
+	if (!out) {
+		/* Don't add field in this case */
+		return;
+	}
+	*dst = out;
+
+	if (!olen)
+		*(*dst) = 0;
+	else
+		strcat(*dst, sep);
+	strcat(*dst, a);
+	if (bt)
+		strncat(*dst, map + bt->start, blen);
+}
+
+static void fixname(char *s)
+{
+	for (; *s; s++)
+		*s = tolower(*s);
+}
+
+static void fixdesc(char *s)
+{
+	char *e = s + strlen(s);
+
+	/* Remove trailing dots that look ugly in perf list */
+	--e;
+	while (e >= s && isspace(*e))
+		--e;
+	if (*e == '.')
+		*e = 0;
+}
+
+static struct msrmap {
+	const char *num;
+	const char *pname;
+} msrmap[] = {
+	{ "0x3F6", "ldlat=" },
+	{ "0x1A6", "offcore_rsp=" },
+	{ "0x1A7", "offcore_rsp=" },
+	{ "0x3F7", "frontend=" },
+	{ NULL, NULL }
+};
+
+static struct field {
+	const char *field;
+	const char *kernel;
+} fields[] = {
+	{ "EventCode",	"event=" },
+	{ "UMask",	"umask=" },
+	{ "CounterMask", "cmask=" },
+	{ "Invert",	"inv=" },
+	{ "AnyThread",	"any=" },
+	{ "EdgeDetect",	"edge=" },
+	{ "SampleAfterValue", "period=" },
+	{ NULL, NULL }
+};
+
+static void cut_comma(char *map, jsmntok_t *newval)
+{
+	int i;
+
+	/* Cut off everything after comma */
+	for (i = newval->start; i < newval->end; i++) {
+		if (map[i] == ',')
+			newval->end = i;
+	}
+}
+
+static int match_field(char *map, jsmntok_t *field, int nz,
+		       char **event, jsmntok_t *val)
+{
+	struct field *f;
+	jsmntok_t newval = *val;
+
+	for (f = fields; f->field; f++)
+		if (json_streq(map, field, f->field) && nz) {
+			cut_comma(map, &newval);
+			addfield(map, event, ",", f->kernel, &newval);
+			return 1;
+		}
+	return 0;
+}
+
+static struct msrmap *lookup_msr(char *map, jsmntok_t *val)
+{
+	jsmntok_t newval = *val;
+	static bool warned;
+	int i;
+
+	cut_comma(map, &newval);
+	for (i = 0; msrmap[i].num; i++)
+		if (json_streq(map, &newval, msrmap[i].num))
+			return &msrmap[i];
+	if (!warned) {
+		warned = true;
+		pr_err("%s: Unknown MSR in event file %.*s\n", prog,
+			json_len(val), map + val->start);
+	}
+	return NULL;
+}
+
+#define EXPECT(e, t, m) do { if (!(e)) {			\
+	jsmntok_t *loc = (t);					\
+	if (!(t)->start && (t) > tokens)			\
+		loc = (t) - 1;					\
+		pr_err("%s:%d: " m ", got %s\n", fn,		\
+			json_line(map, loc),			\
+			json_name(t));				\
+	goto out_free;						\
+} } while (0)
+
+#define TOPIC_DEPTH 256
+static char *topic_array[TOPIC_DEPTH];
+static int   topic_level;
+
+static char *get_topic(void)
+{
+	char *tp_old, *tp = NULL;
+	int i;
+
+	for (i = 0; i < topic_level + 1; i++) {
+		int n;
+
+		tp_old = tp;
+		n = asprintf(&tp, "%s%s", tp ?: "", topic_array[i]);
+		if (n < 0) {
+			pr_info("%s: asprintf() error %s\n", prog);
+			return NULL;
+		}
+		free(tp_old);
+	}
+
+	for (i = 0; i < (int) strlen(tp); i++) {
+		char c = tp[i];
+
+		if (c == '-')
+			tp[i] = ' ';
+		else if (c == '.') {
+			tp[i] = '\0';
+			break;
+		}
+	}
+
+	return tp;
+}
+
+static int add_topic(int level, char *bname)
+{
+	char *topic;
+
+	level -= 2;
+
+	if (level >= TOPIC_DEPTH)
+		return -EINVAL;
+
+	topic = strdup(bname);
+	if (!topic) {
+		pr_info("%s: strdup() error %s for file %s\n", prog,
+				strerror(errno), bname);
+		return -ENOMEM;
+	}
+
+	free(topic_array[topic_level]);
+	topic_array[topic_level] = topic;
+	topic_level              = level;
+	return 0;
+}
+
+struct perf_entry_data {
+	FILE *outfp;
+	char *topic;
+};
+
+static int close_table;
+
+static void print_events_table_prefix(FILE *fp, const char *tblname)
+{
+	fprintf(fp, "struct pmu_event %s[] = {\n", tblname);
+	close_table = 1;
+}
+
+static int print_events_table_entry(void *data, char *name, char *event,
+				    char *desc, char *long_desc)
+{
+	struct perf_entry_data *pd = data;
+	FILE *outfp = pd->outfp;
+	char *topic = pd->topic;
+
+	/*
+	 * TODO: Remove formatting chars after debugging to reduce
+	 *	 string lengths.
+	 */
+	fprintf(outfp, "{\n");
+
+	fprintf(outfp, "\t.name = \"%s\",\n", name);
+	fprintf(outfp, "\t.event = \"%s\",\n", event);
+	fprintf(outfp, "\t.desc = \"%s\",\n", desc);
+	fprintf(outfp, "\t.topic = \"%s\",\n", topic);
+	if (long_desc && long_desc[0])
+		fprintf(outfp, "\t.long_desc = \"%s\",\n", long_desc);
+
+	fprintf(outfp, "},\n");
+
+	return 0;
+}
+
+static void print_events_table_suffix(FILE *outfp)
+{
+	fprintf(outfp, "{\n");
+
+	fprintf(outfp, "\t.name = 0,\n");
+	fprintf(outfp, "\t.event = 0,\n");
+	fprintf(outfp, "\t.desc = 0,\n");
+
+	fprintf(outfp, "},\n");
+	fprintf(outfp, "};\n");
+	close_table = 0;
+}
+
+static struct fixed {
+	const char *name;
+	const char *event;
+} fixed[] = {
+	{ "inst_retired.any", "event=0xc0" },
+	{ "inst_retired.any_p", "event=0xc0" },
+	{ "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
+	{ "cpu_clk_unhalted.thread", "event=0x3c" },
+	{ "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+	{ NULL, NULL},
+};
+
+/*
+ * Handle different fixed counter encodings between JSON and perf.
+ */
+static char *real_event(const char *name, char *event)
+{
+	int i;
+
+	for (i = 0; fixed[i].name; i++)
+		if (!strcasecmp(name, fixed[i].name))
+			return (char *)fixed[i].event;
+	return event;
+}
+
+/* Call func with each event in the json file */
+int json_events(const char *fn,
+	  int (*func)(void *data, char *name, char *event, char *desc,
+		      char *long_desc),
+	  void *data)
+{
+	int err = -EIO;
+	size_t size;
+	jsmntok_t *tokens, *tok;
+	int i, j, len;
+	char *map;
+
+	if (!fn)
+		return -ENOENT;
+
+	tokens = parse_json(fn, &map, &size, &len);
+	if (!tokens)
+		return -EIO;
+	EXPECT(tokens->type == JSMN_ARRAY, tokens, "expected top level array");
+	tok = tokens + 1;
+	for (i = 0; i < tokens->size; i++) {
+		char *event = NULL, *desc = NULL, *name = NULL;
+		char *long_desc = NULL;
+		char *extra_desc = NULL;
+		struct msrmap *msr = NULL;
+		jsmntok_t *msrval = NULL;
+		jsmntok_t *precise = NULL;
+		jsmntok_t *obj = tok++;
+
+		EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
+		for (j = 0; j < obj->size; j += 2) {
+			jsmntok_t *field, *val;
+			int nz;
+
+			field = tok + j;
+			EXPECT(field->type == JSMN_STRING, tok + j,
+			       "Expected field name");
+			val = tok + j + 1;
+			EXPECT(val->type == JSMN_STRING, tok + j + 1,
+			       "Expected string value");
+
+			nz = !json_streq(map, val, "0");
+			if (match_field(map, field, nz, &event, val)) {
+				/* ok */
+			} else if (json_streq(map, field, "EventName")) {
+				addfield(map, &name, "", "", val);
+			} else if (json_streq(map, field, "BriefDescription")) {
+				addfield(map, &desc, "", "", val);
+				fixdesc(desc);
+			} else if (json_streq(map, field,
+					     "PublicDescription")) {
+				addfield(map, &long_desc, "", "", val);
+				fixdesc(long_desc);
+			} else if (json_streq(map, field, "PEBS") && nz) {
+				precise = val;
+			} else if (json_streq(map, field, "MSRIndex") && nz) {
+				msr = lookup_msr(map, val);
+			} else if (json_streq(map, field, "MSRValue")) {
+				msrval = val;
+			} else if (json_streq(map, field, "Errata") &&
+				   !json_streq(map, val, "null")) {
+				addfield(map, &extra_desc, ". ",
+					" Spec update: ", val);
+			} else if (json_streq(map, field, "Data_LA") && nz) {
+				addfield(map, &extra_desc, ". ",
+					" Supports address when precise",
+					NULL);
+			}
+			/* ignore unknown fields */
+		}
+		if (precise && desc && !strstr(desc, "(Precise Event)")) {
+			if (json_streq(map, precise, "2"))
+				addfield(map, &extra_desc, " ",
+						"(Must be precise)", NULL);
+			else
+				addfield(map, &extra_desc, " ",
+						"(Precise event)", NULL);
+		}
+		if (desc && extra_desc)
+			addfield(map, &desc, " ", extra_desc, NULL);
+		if (long_desc && extra_desc)
+			addfield(map, &long_desc, " ", extra_desc, NULL);
+		if (msr != NULL)
+			addfield(map, &event, ",", msr->pname, msrval);
+		fixname(name);
+
+		err = func(data, name, real_event(name, event), desc, long_desc);
+		free(event);
+		free(desc);
+		free(name);
+		free(long_desc);
+		free(extra_desc);
+		if (err)
+			break;
+		tok += j;
+	}
+	EXPECT(tok - tokens == len, tok, "unexpected objects at end");
+	err = 0;
+out_free:
+	free_json(map, size, tokens);
+	return err;
+}
+
+static char *file_name_to_table_name(char *fname)
+{
+	unsigned int i;
+	int n;
+	int c;
+	char *tblname;
+
+	/*
+	 * Ensure tablename starts with alphabetic character.
+	 * Derive rest of table name from basename of the JSON file,
+	 * replacing hyphens and stripping out .json suffix.
+	 */
+	n = asprintf(&tblname, "pme_%s", basename(fname));
+	if (n < 0) {
+		pr_info("%s: asprintf() error %s for file %s\n", prog,
+				strerror(errno), fname);
+		return NULL;
+	}
+
+	for (i = 0; i < strlen(tblname); i++) {
+		c = tblname[i];
+
+		if (c == '-')
+			tblname[i] = '_';
+		else if (c == '.') {
+			tblname[i] = '\0';
+			break;
+		} else if (!isalnum(c) && c != '_') {
+			pr_err("%s: Invalid character '%c' in file name %s\n",
+					prog, c, basename(fname));
+			free(tblname);
+			tblname = NULL;
+			break;
+		}
+	}
+
+	return tblname;
+}
+
+static void print_mapping_table_prefix(FILE *outfp)
+{
+	fprintf(outfp, "struct pmu_events_map pmu_events_map[] = {\n");
+}
+
+static void print_mapping_table_suffix(FILE *outfp)
+{
+	/*
+	 * Print the terminating, NULL entry.
+	 */
+	fprintf(outfp, "{\n");
+	fprintf(outfp, "\t.cpuid = 0,\n");
+	fprintf(outfp, "\t.version = 0,\n");
+	fprintf(outfp, "\t.type = 0,\n");
+	fprintf(outfp, "\t.table = 0,\n");
+	fprintf(outfp, "},\n");
+
+	/* and finally, the closing curly bracket for the struct */
+	fprintf(outfp, "};\n");
+}
+
+static int process_mapfile(FILE *outfp, char *fpath)
+{
+	int n = 16384;
+	FILE *mapfp;
+	char *save = NULL;
+	char *line, *p;
+	int line_num;
+	char *tblname;
+
+	pr_info("%s: Processing mapfile %s\n", prog, fpath);
+
+	line = malloc(n);
+	if (!line)
+		return -1;
+
+	mapfp = fopen(fpath, "r");
+	if (!mapfp) {
+		pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
+				fpath);
+		return -1;
+	}
+
+	print_mapping_table_prefix(outfp);
+
+	/* Skip first line (header) */
+	p = fgets(line, n, mapfp);
+	if (!p)
+		goto out;
+
+	line_num = 1;
+	while (1) {
+		char *cpuid, *version, *type, *fname;
+
+		line_num++;
+		p = fgets(line, n, mapfp);
+		if (!p)
+			break;
+
+		if (line[0] == '#' || line[0] == '\n')
+			continue;
+
+		if (line[strlen(line)-1] != '\n') {
+			/* TODO Deal with lines longer than 16K */
+			pr_info("%s: Mapfile %s: line %d too long, aborting\n",
+					prog, fpath, line_num);
+			return -1;
+		}
+		line[strlen(line)-1] = '\0';
+
+		cpuid = strtok_r(p, ",", &save);
+		version = strtok_r(NULL, ",", &save);
+		fname = strtok_r(NULL, ",", &save);
+		type = strtok_r(NULL, ",", &save);
+
+		tblname = file_name_to_table_name(fname);
+		fprintf(outfp, "{\n");
+		fprintf(outfp, "\t.cpuid = \"%s\",\n", cpuid);
+		fprintf(outfp, "\t.version = \"%s\",\n", version);
+		fprintf(outfp, "\t.type = \"%s\",\n", type);
+
+		/*
+		 * CHECK: We can't use the type (eg "core") field in the
+		 * table name. For us to do that, we need to somehow tweak
+		 * the other caller of file_name_to_table(), process_json()
+		 * to determine the type. process_json() file has no way
+		 * of knowing these are "core" events unless file name has
+		 * core in it. If filename has core in it, we can safely
+		 * ignore the type field here also.
+		 */
+		fprintf(outfp, "\t.table = %s\n", tblname);
+		fprintf(outfp, "},\n");
+	}
+
+out:
+	print_mapping_table_suffix(outfp);
+	return 0;
+}
+
+/*
+ * If we fail to locate/process JSON and map files, create a NULL mapping
+ * table. This would at least allow perf to build even if we can't find/use
+ * the aliases.
+ */
+static void create_empty_mapping(const char *output_file)
+{
+	FILE *outfp;
+
+	pr_info("%s: Creating empty pmu_events_map[] table\n", prog);
+
+	/* Truncate file to clear any partial writes to it */
+	outfp = fopen(output_file, "w");
+	if (!outfp) {
+		perror("fopen()");
+		_Exit(1);
+	}
+
+	fprintf(outfp, "#include \"../../pmu-events/pmu-events.h\"\n");
+	print_mapping_table_prefix(outfp);
+	print_mapping_table_suffix(outfp);
+	fclose(outfp);
+}
+
+static int get_maxfds(void)
+{
+	struct rlimit rlim;
+
+	if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
+		return min((int)rlim.rlim_max / 2, 512);
+
+	return 512;
+}
+
+/*
+ * nftw() doesn't let us pass an argument to the processing function,
+ * so use a global variables.
+ */
+static FILE *eventsfp;
+static char *mapfile;
+
+static int process_one_file(const char *fpath, const struct stat *sb,
+			    int typeflag, struct FTW *ftwbuf)
+{
+	char *tblname, *bname  = (char *) fpath + ftwbuf->base;
+	int is_dir  = typeflag == FTW_D;
+	int is_file = typeflag == FTW_F;
+	int level   = ftwbuf->level;
+	int err = 0;
+
+	pr_debug("%s %d %7jd %-20s %s\n",
+		 is_file ? "f" : is_dir ? "d" : "x",
+		 level, sb->st_size, bname, fpath);
+
+	/* base dir */
+	if (level == 0)
+		return 0;
+
+	/* model directory, reset topic */
+	if (level == 1 && is_dir) {
+		if (close_table)
+			print_events_table_suffix(eventsfp);
+
+		/*
+		 * Drop file name suffix. Replace hyphens with underscores.
+		 * Fail if file name contains any alphanum characters besides
+		 * underscores.
+		 */
+		tblname = file_name_to_table_name(bname);
+		if (!tblname) {
+			pr_info("%s: Error determining table name for %s\n", prog,
+				bname);
+			return -1;
+		}
+
+		print_events_table_prefix(eventsfp, tblname);
+		return 0;
+	}
+
+	/*
+	 * Save the mapfile name for now. We will process mapfile
+	 * after processing all JSON files (so we can write out the
+	 * mapping table after all PMU events tables).
+	 *
+	 * TODO: Allow for multiple mapfiles? Punt for now.
+	 */
+	if (level == 1 && is_file) {
+		if (!strncmp(bname, "mapfile.csv", 11)) {
+			if (mapfile) {
+				pr_info("%s: Many mapfiles? Using %s, ignoring %s\n",
+						prog, mapfile, fpath);
+			} else {
+				mapfile = strdup(fpath);
+			}
+			return 0;
+		}
+
+		pr_info("%s: Ignoring file %s\n", prog, fpath);
+		return 0;
+	}
+
+	/*
+	 * If the file name does not have a .json extension,
+	 * ignore it. It could be a readme.txt for instance.
+	 */
+	if (is_file) {
+		char *suffix = bname + strlen(bname) - 5;
+
+		if (strncmp(suffix, ".json", 5)) {
+			pr_info("%s: Ignoring file without .json suffix %s\n", prog,
+				fpath);
+			return 0;
+		}
+	}
+
+	if (level > 1 && add_topic(level, bname))
+		return -ENOMEM;
+
+	/*
+	 * Assume all other files are JSON files.
+	 *
+	 * If mapfile refers to 'power7_core.json', we create a table
+	 * named 'power7_core'. Any inconsistencies between the mapfile
+	 * and directory tree could result in build failure due to table
+	 * names not being found.
+	 *
+	 * Atleast for now, be strict with processing JSON file names.
+	 * i.e. if JSON file name cannot be mapped to C-style table name,
+	 * fail.
+	 */
+	if (is_file) {
+		struct perf_entry_data data = {
+			.topic = get_topic(),
+			.outfp = eventsfp,
+		};
+
+		err = json_events(fpath, print_events_table_entry, &data);
+
+		free(data.topic);
+	}
+
+	return err;
+}
+
+#ifndef PATH_MAX
+#define PATH_MAX	4096
+#endif
+
+/*
+ * Starting in directory 'start_dirname', find the "mapfile.csv" and
+ * the set of JSON files for the architecture 'arch'.
+ *
+ * From each JSON file, create a C-style "PMU events table" from the
+ * JSON file (see struct pmu_event).
+ *
+ * From the mapfile, create a mapping between the CPU revisions and
+ * PMU event tables (see struct pmu_events_map).
+ *
+ * Write out the PMU events tables and the mapping table to pmu-event.c.
+ *
+ * If unable to process the JSON or arch files, create an empty mapping
+ * table so we can continue to build/use  perf even if we cannot use the
+ * PMU event aliases.
+ */
+int main(int argc, char *argv[])
+{
+	int rc;
+	int maxfds;
+	char ldirname[PATH_MAX];
+
+	const char *arch;
+	const char *output_file;
+	const char *start_dirname;
+
+	prog = basename(argv[0]);
+	if (argc < 4) {
+		pr_err("Usage: %s <arch> <starting_dir> <output_file>\n", prog);
+		return 1;
+	}
+
+	arch = argv[1];
+	start_dirname = argv[2];
+	output_file = argv[3];
+
+	if (argc > 4)
+		verbose = atoi(argv[4]);
+
+	eventsfp = fopen(output_file, "w");
+	if (!eventsfp) {
+		pr_err("%s Unable to create required file %s (%s)\n",
+				prog, output_file, strerror(errno));
+		return 2;
+	}
+
+	/* Include pmu-events.h first */
+	fprintf(eventsfp, "#include \"../../pmu-events/pmu-events.h\"\n");
+
+	sprintf(ldirname, "%s/%s", start_dirname, arch);
+
+	/*
+	 * The mapfile allows multiple CPUids to point to the same JSON file,
+	 * so, not sure if there is a need for symlinks within the pmu-events
+	 * directory.
+	 *
+	 * For now, treat symlinks of JSON files as regular files and create
+	 * separate tables for each symlink (presumably, each symlink refers
+	 * to specific version of the CPU).
+	 */
+
+	maxfds = get_maxfds();
+	mapfile = NULL;
+	rc = nftw(ldirname, process_one_file, maxfds, 0);
+	if (rc && verbose) {
+		pr_info("%s: Error walking file tree %s\n", prog, ldirname);
+		goto empty_map;
+	} else if (rc) {
+		goto empty_map;
+	}
+
+	if (close_table)
+		print_events_table_suffix(eventsfp);
+
+	if (!mapfile) {
+		pr_info("%s: No CPU->JSON mapping?\n", prog);
+		goto empty_map;
+	}
+
+	if (process_mapfile(eventsfp, mapfile)) {
+		pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
+		goto empty_map;
+	}
+
+	return 0;
+
+empty_map:
+	fclose(eventsfp);
+	create_empty_mapping(output_file);
+	return 0;
+}
diff --git a/tools/perf/pmu-events/jevents.h b/tools/perf/pmu-events/jevents.h
new file mode 100644
index 0000000..b0eb274
--- /dev/null
+++ b/tools/perf/pmu-events/jevents.h
@@ -0,0 +1,18 @@
+#ifndef JEVENTS_H
+#define JEVENTS_H 1
+
+int json_events(const char *fn,
+		int (*func)(void *data, char *name, char *event, char *desc,
+				char *long_desc),
+		void *data);
+char *get_cpu_str(void);
+
+#ifndef min
+#define min(x, y) ({                            \
+	typeof(x) _min1 = (x);                  \
+	typeof(y) _min2 = (y);                  \
+	(void) (&_min1 == &_min2);              \
+	_min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#endif
diff --git a/tools/perf/pmu-events/jsmn.c b/tools/perf/pmu-events/jsmn.c
new file mode 100644
index 0000000..11d1fa1
--- /dev/null
+++ b/tools/perf/pmu-events/jsmn.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2010 Serge A. Zaitsev
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Slightly modified by AK to not assume 0 terminated input.
+ */
+
+#include <stdlib.h>
+#include "jsmn.h"
+
+/*
+ * Allocates a fresh unused token from the token pool.
+ */
+static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser,
+				   jsmntok_t *tokens, size_t num_tokens)
+{
+	jsmntok_t *tok;
+
+	if ((unsigned)parser->toknext >= num_tokens)
+		return NULL;
+	tok = &tokens[parser->toknext++];
+	tok->start = tok->end = -1;
+	tok->size = 0;
+	return tok;
+}
+
+/*
+ * Fills token type and boundaries.
+ */
+static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type,
+			    int start, int end)
+{
+	token->type = type;
+	token->start = start;
+	token->end = end;
+	token->size = 0;
+}
+
+/*
+ * Fills next available token with JSON primitive.
+ */
+static jsmnerr_t jsmn_parse_primitive(jsmn_parser *parser, const char *js,
+				      size_t len,
+				      jsmntok_t *tokens, size_t num_tokens)
+{
+	jsmntok_t *token;
+	int start;
+
+	start = parser->pos;
+
+	for (; parser->pos < len; parser->pos++) {
+		switch (js[parser->pos]) {
+#ifndef JSMN_STRICT
+		/*
+		 * In strict mode primitive must be followed by ","
+		 * or "}" or "]"
+		 */
+		case ':':
+#endif
+		case '\t':
+		case '\r':
+		case '\n':
+		case ' ':
+		case ',':
+		case ']':
+		case '}':
+			goto found;
+		default:
+			break;
+		}
+		if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
+			parser->pos = start;
+			return JSMN_ERROR_INVAL;
+		}
+	}
+#ifdef JSMN_STRICT
+	/*
+	 * In strict mode primitive must be followed by a
+	 * comma/object/array.
+	 */
+	parser->pos = start;
+	return JSMN_ERROR_PART;
+#endif
+
+found:
+	token = jsmn_alloc_token(parser, tokens, num_tokens);
+	if (token == NULL) {
+		parser->pos = start;
+		return JSMN_ERROR_NOMEM;
+	}
+	jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
+	parser->pos--; /* parent sees closing brackets */
+	return JSMN_SUCCESS;
+}
+
+/*
+ * Fills next token with JSON string.
+ */
+static jsmnerr_t jsmn_parse_string(jsmn_parser *parser, const char *js,
+				   size_t len,
+				   jsmntok_t *tokens, size_t num_tokens)
+{
+	jsmntok_t *token;
+	int start = parser->pos;
+
+	/* Skip starting quote */
+	parser->pos++;
+
+	for (; parser->pos < len; parser->pos++) {
+		char c = js[parser->pos];
+
+		/* Quote: end of string */
+		if (c == '\"') {
+			token = jsmn_alloc_token(parser, tokens, num_tokens);
+			if (token == NULL) {
+				parser->pos = start;
+				return JSMN_ERROR_NOMEM;
+			}
+			jsmn_fill_token(token, JSMN_STRING, start+1,
+					parser->pos);
+			return JSMN_SUCCESS;
+		}
+
+		/* Backslash: Quoted symbol expected */
+		if (c == '\\') {
+			parser->pos++;
+			switch (js[parser->pos]) {
+				/* Allowed escaped symbols */
+			case '\"':
+			case '/':
+			case '\\':
+			case 'b':
+			case 'f':
+			case 'r':
+			case 'n':
+			case 't':
+				break;
+				/* Allows escaped symbol \uXXXX */
+			case 'u':
+				/* TODO */
+				break;
+				/* Unexpected symbol */
+			default:
+				parser->pos = start;
+				return JSMN_ERROR_INVAL;
+			}
+		}
+	}
+	parser->pos = start;
+	return JSMN_ERROR_PART;
+}
+
+/*
+ * Parse JSON string and fill tokens.
+ */
+jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
+		     jsmntok_t *tokens, unsigned int num_tokens)
+{
+	jsmnerr_t r;
+	int i;
+	jsmntok_t *token;
+
+	for (; parser->pos < len; parser->pos++) {
+		char c;
+		jsmntype_t type;
+
+		c = js[parser->pos];
+		switch (c) {
+		case '{':
+		case '[':
+			token = jsmn_alloc_token(parser, tokens, num_tokens);
+			if (token == NULL)
+				return JSMN_ERROR_NOMEM;
+			if (parser->toksuper != -1)
+				tokens[parser->toksuper].size++;
+			token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
+			token->start = parser->pos;
+			parser->toksuper = parser->toknext - 1;
+			break;
+		case '}':
+		case ']':
+			type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
+			for (i = parser->toknext - 1; i >= 0; i--) {
+				token = &tokens[i];
+				if (token->start != -1 && token->end == -1) {
+					if (token->type != type)
+						return JSMN_ERROR_INVAL;
+					parser->toksuper = -1;
+					token->end = parser->pos + 1;
+					break;
+				}
+			}
+			/* Error if unmatched closing bracket */
+			if (i == -1)
+				return JSMN_ERROR_INVAL;
+			for (; i >= 0; i--) {
+				token = &tokens[i];
+				if (token->start != -1 && token->end == -1) {
+					parser->toksuper = i;
+					break;
+				}
+			}
+			break;
+		case '\"':
+			r = jsmn_parse_string(parser, js, len, tokens,
+					      num_tokens);
+			if (r < 0)
+				return r;
+			if (parser->toksuper != -1)
+				tokens[parser->toksuper].size++;
+			break;
+		case '\t':
+		case '\r':
+		case '\n':
+		case ':':
+		case ',':
+		case ' ':
+			break;
+#ifdef JSMN_STRICT
+			/*
+			 * In strict mode primitives are:
+			 * numbers and booleans.
+			 */
+		case '-':
+		case '0':
+		case '1':
+		case '2':
+		case '3':
+		case '4':
+		case '5':
+		case '6':
+		case '7':
+		case '8':
+		case '9':
+		case 't':
+		case 'f':
+		case 'n':
+#else
+			/*
+			 * In non-strict mode every unquoted value
+			 * is a primitive.
+			 */
+			/*FALL THROUGH */
+		default:
+#endif
+			r = jsmn_parse_primitive(parser, js, len, tokens,
+						 num_tokens);
+			if (r < 0)
+				return r;
+			if (parser->toksuper != -1)
+				tokens[parser->toksuper].size++;
+			break;
+
+#ifdef JSMN_STRICT
+			/* Unexpected char in strict mode */
+		default:
+			return JSMN_ERROR_INVAL;
+#endif
+		}
+	}
+
+	for (i = parser->toknext - 1; i >= 0; i--) {
+		/* Unmatched opened object or array */
+		if (tokens[i].start != -1 && tokens[i].end == -1)
+			return JSMN_ERROR_PART;
+	}
+
+	return JSMN_SUCCESS;
+}
+
+/*
+ * Creates a new parser based over a given  buffer with an array of tokens
+ * available.
+ */
+void jsmn_init(jsmn_parser *parser)
+{
+	parser->pos = 0;
+	parser->toknext = 0;
+	parser->toksuper = -1;
+}
+
+const char *jsmn_strerror(jsmnerr_t err)
+{
+	switch (err) {
+	case JSMN_ERROR_NOMEM:
+		return "No enough tokens";
+	case JSMN_ERROR_INVAL:
+		return "Invalid character inside JSON string";
+	case JSMN_ERROR_PART:
+		return "The string is not a full JSON packet, more bytes expected";
+	case JSMN_SUCCESS:
+		return "Success";
+	default:
+		return "Unknown json error";
+	}
+}
diff --git a/tools/perf/pmu-events/jsmn.h b/tools/perf/pmu-events/jsmn.h
new file mode 100644
index 0000000..d666b10
--- /dev/null
+++ b/tools/perf/pmu-events/jsmn.h
@@ -0,0 +1,67 @@
+#ifndef __JSMN_H_
+#define __JSMN_H_
+
+/*
+ * JSON type identifier. Basic types are:
+ *	o Object
+ *	o Array
+ *	o String
+ *	o Other primitive: number, boolean (true/false) or null
+ */
+typedef enum {
+	JSMN_PRIMITIVE = 0,
+	JSMN_OBJECT = 1,
+	JSMN_ARRAY = 2,
+	JSMN_STRING = 3
+} jsmntype_t;
+
+typedef enum {
+	/* Not enough tokens were provided */
+	JSMN_ERROR_NOMEM = -1,
+	/* Invalid character inside JSON string */
+	JSMN_ERROR_INVAL = -2,
+	/* The string is not a full JSON packet, more bytes expected */
+	JSMN_ERROR_PART = -3,
+	/* Everything was fine */
+	JSMN_SUCCESS = 0
+} jsmnerr_t;
+
+/*
+ * JSON token description.
+ * @param		type	type (object, array, string etc.)
+ * @param		start	start position in JSON data string
+ * @param		end		end position in JSON data string
+ */
+typedef struct {
+	jsmntype_t type;
+	int start;
+	int end;
+	int size;
+} jsmntok_t;
+
+/*
+ * JSON parser. Contains an array of token blocks available. Also stores
+ * the string being parsed now and current position in that string
+ */
+typedef struct {
+	unsigned int pos; /* offset in the JSON string */
+	int toknext; /* next token to allocate */
+	int toksuper; /* superior token node, e.g parent object or array */
+} jsmn_parser;
+
+/*
+ * Create JSON parser over an array of tokens
+ */
+void jsmn_init(jsmn_parser *parser);
+
+/*
+ * Run JSON parser. It parses a JSON data string into and array of tokens,
+ * each describing a single JSON object.
+ */
+jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js,
+		     size_t len,
+		     jsmntok_t *tokens, unsigned int num_tokens);
+
+const char *jsmn_strerror(jsmnerr_t err);
+
+#endif /* __JSMN_H_ */
diff --git a/tools/perf/pmu-events/json.c b/tools/perf/pmu-events/json.c
new file mode 100644
index 0000000..f67bbb0
--- /dev/null
+++ b/tools/perf/pmu-events/json.c
@@ -0,0 +1,162 @@
+/* Parse JSON files using the JSMN parser. */
+
+/*
+ * Copyright (c) 2014, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include "jsmn.h"
+#include "json.h"
+#include <linux/kernel.h>
+
+
+static char *mapfile(const char *fn, size_t *size)
+{
+	unsigned ps = sysconf(_SC_PAGESIZE);
+	struct stat st;
+	char *map = NULL;
+	int err;
+	int fd = open(fn, O_RDONLY);
+
+	if (fd < 0 && verbose && fn) {
+		pr_err("Error opening events file '%s': %s\n", fn,
+				strerror(errno));
+	}
+
+	if (fd < 0)
+		return NULL;
+	err = fstat(fd, &st);
+	if (err < 0)
+		goto out;
+	*size = st.st_size;
+	map = mmap(NULL,
+		   (st.st_size + ps - 1) & ~(ps - 1),
+		   PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
+	if (map == MAP_FAILED)
+		map = NULL;
+out:
+	close(fd);
+	return map;
+}
+
+static void unmapfile(char *map, size_t size)
+{
+	unsigned ps = sysconf(_SC_PAGESIZE);
+	munmap(map, roundup(size, ps));
+}
+
+/*
+ * Parse json file using jsmn. Return array of tokens,
+ * and mapped file. Caller needs to free array.
+ */
+jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len)
+{
+	jsmn_parser parser;
+	jsmntok_t *tokens;
+	jsmnerr_t res;
+	unsigned sz;
+
+	*map = mapfile(fn, size);
+	if (!*map)
+		return NULL;
+	/* Heuristic */
+	sz = *size * 16;
+	tokens = malloc(sz);
+	if (!tokens)
+		goto error;
+	jsmn_init(&parser);
+	res = jsmn_parse(&parser, *map, *size, tokens,
+			 sz / sizeof(jsmntok_t));
+	if (res != JSMN_SUCCESS) {
+		pr_err("%s: json error %s\n", fn, jsmn_strerror(res));
+		goto error_free;
+	}
+	if (len)
+		*len = parser.toknext;
+	return tokens;
+error_free:
+	free(tokens);
+error:
+	unmapfile(*map, *size);
+	return NULL;
+}
+
+void free_json(char *map, size_t size, jsmntok_t *tokens)
+{
+	free(tokens);
+	unmapfile(map, size);
+}
+
+static int countchar(char *map, char c, int end)
+{
+	int i;
+	int count = 0;
+	for (i = 0; i < end; i++)
+		if (map[i] == c)
+			count++;
+	return count;
+}
+
+/* Return line number of a jsmn token */
+int json_line(char *map, jsmntok_t *t)
+{
+	return countchar(map, '\n', t->start) + 1;
+}
+
+static const char * const jsmn_types[] = {
+	[JSMN_PRIMITIVE] = "primitive",
+	[JSMN_ARRAY] = "array",
+	[JSMN_OBJECT] = "object",
+	[JSMN_STRING] = "string"
+};
+
+#define LOOKUP(a, i) ((i) < (sizeof(a)/sizeof(*(a))) ? ((a)[i]) : "?")
+
+/* Return type name of a jsmn token */
+const char *json_name(jsmntok_t *t)
+{
+	return LOOKUP(jsmn_types, t->type);
+}
+
+int json_len(jsmntok_t *t)
+{
+	return t->end - t->start;
+}
+
+/* Is string t equal to s? */
+int json_streq(char *map, jsmntok_t *t, const char *s)
+{
+	unsigned len = json_len(t);
+	return len == strlen(s) && !strncasecmp(map + t->start, s, len);
+}
diff --git a/tools/perf/pmu-events/json.h b/tools/perf/pmu-events/json.h
new file mode 100644
index 0000000..278ebd3
--- /dev/null
+++ b/tools/perf/pmu-events/json.h
@@ -0,0 +1,38 @@
+#ifndef JSON_H
+#define JSON_H 1
+
+#include "jsmn.h"
+
+jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len);
+void free_json(char *map, size_t size, jsmntok_t *tokens);
+int json_line(char *map, jsmntok_t *t);
+const char *json_name(jsmntok_t *t);
+int json_streq(char *map, jsmntok_t *t, const char *s);
+int json_len(jsmntok_t *t);
+
+extern int verbose;
+
+#include <stdbool.h>
+
+extern int eprintf(int level, int var, const char *fmt, ...);
+#define pr_fmt(fmt)	fmt
+
+#define pr_err(fmt, ...) \
+	eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+#define pr_info(fmt, ...) \
+	eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+#define pr_debug(fmt, ...) \
+	eprintf(2, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+        const typeof(y) __y = y;                       \
+        (((x) + (__y - 1)) / __y) * __y;               \
+}                                                      \
+)
+#endif
+
+#endif
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
new file mode 100644
index 0000000..2eaef59
--- /dev/null
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -0,0 +1,37 @@
+#ifndef PMU_EVENTS_H
+#define PMU_EVENTS_H
+
+/*
+ * Describe each PMU event. Each CPU has a table of PMU events.
+ */
+struct pmu_event {
+	const char *name;
+	const char *event;
+	const char *desc;
+	const char *topic;
+	const char *long_desc;
+};
+
+/*
+ *
+ * Map a CPU to its table of PMU events. The CPU is identified by the
+ * cpuid field, which is an arch-specific identifier for the CPU.
+ * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
+ * must match the get_cpustr() in tools/perf/arch/xxx/util/header.c)
+ *
+ * The  cpuid can contain any character other than the comma.
+ */
+struct pmu_events_map {
+	const char *cpuid;
+	const char *version;
+	const char *type;		/* core, uncore etc */
+	struct pmu_event *table;
+};
+
+/*
+ * Global table mapping each known CPU for the architecture to its
+ * table of PMU events.
+ */
+extern struct pmu_events_map pmu_events_map[];
+
+#endif
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fb8e42c..4ffff7b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -601,7 +601,8 @@
 			u64 nr_entries;
 			hbt->timer(hbt->arg);
 
-			if (hist_browser__has_filter(browser))
+			if (hist_browser__has_filter(browser) ||
+			    symbol_conf.report_hierarchy)
 				hist_browser__update_nr_entries(browser);
 
 			nr_entries = hist_browser__nr_entries(browser);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ea34c5a..d92e020 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -384,15 +384,14 @@
 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
 					 struct perf_evsel *evsel, int cpu)
 {
-	int thread, err;
+	int thread;
 	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
 
 	if (!evsel->fd)
 		return -EINVAL;
 
 	for (thread = 0; thread < nr_threads; thread++) {
-		err = ioctl(FD(evsel, cpu, thread),
-			    PERF_EVENT_IOC_ENABLE, 0);
+		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 		if (err)
 			return err;
 	}
@@ -403,14 +402,14 @@
 					    struct perf_evsel *evsel,
 					    int thread)
 {
-	int cpu, err;
+	int cpu;
 	int nr_cpus = cpu_map__nr(evlist->cpus);
 
 	if (!evsel->fd)
 		return -EINVAL;
 
 	for (cpu = 0; cpu < nr_cpus; cpu++) {
-		err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
+		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 		if (err)
 			return err;
 	}
@@ -1606,10 +1605,9 @@
 	struct perf_evsel *evsel;
 	int ncpus = cpu_map__nr(evlist->cpus);
 	int nthreads = thread_map__nr(evlist->threads);
-	int n;
 
 	evlist__for_each_entry_reverse(evlist, evsel) {
-		n = evsel->cpus ? evsel->cpus->nr : ncpus;
+		int n = evsel->cpus ? evsel->cpus->nr : ncpus;
 		perf_evsel__close(evsel, n, nthreads);
 	}
 }
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 380e84c..8bc2711 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -985,14 +985,13 @@
 
 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
-	int cpu, thread;
-
 	if (evsel->system_wide)
 		nthreads = 1;
 
 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
 
 	if (evsel->fd) {
+		int cpu, thread;
 		for (cpu = 0; cpu < ncpus; cpu++) {
 			for (thread = 0; thread < nthreads; thread++) {
 				FD(evsel, cpu, thread) = -1;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 85dd0db..2f3eded 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1895,7 +1895,6 @@
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
-	ph->env.nr_numa_nodes = nr;
 	nodes = zalloc(sizeof(*nodes) * nr);
 	if (!nodes)
 		return -ENOMEM;
@@ -1932,6 +1931,7 @@
 
 		free(str);
 	}
+	ph->env.nr_numa_nodes = nr;
 	ph->env.numa_nodes = nodes;
 	return 0;
 
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d306ca1..d30109b 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -151,4 +151,5 @@
  */
 int get_cpuid(char *buffer, size_t sz);
 
+char *get_cpuid_str(void);
 #endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 7591a0c..16c06d3 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -90,6 +90,7 @@
 	bool pge;
 	bool have_tma;
 	bool have_cyc;
+	bool fixup_last_mtc;
 	uint64_t pos;
 	uint64_t last_ip;
 	uint64_t ip;
@@ -586,10 +587,31 @@
 	uint64_t        tsc_timestamp;
 	uint64_t        timestamp;
 	bool            have_tma;
+	bool            fixup_last_mtc;
 	bool            from_mtc;
 	double          cbr_cyc_to_tsc;
 };
 
+/*
+ * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
+ * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
+ * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
+ * packet by copying the missing bits from the current MTC assuming the least
+ * difference between the two, and that the current MTC comes after last_mtc.
+ */
+static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
+				    uint32_t *last_mtc)
+{
+	uint32_t first_missing_bit = 1U << (16 - mtc_shift);
+	uint32_t mask = ~(first_missing_bit - 1);
+
+	*last_mtc |= mtc & mask;
+	if (*last_mtc >= mtc) {
+		*last_mtc -= first_missing_bit;
+		*last_mtc &= 0xff;
+	}
+}
+
 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
 {
 	struct intel_pt_decoder *decoder = pkt_info->decoder;
@@ -619,6 +641,11 @@
 			return 0;
 
 		mtc = pkt_info->packet.payload;
+		if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
+			data->fixup_last_mtc = false;
+			intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
+						&data->last_mtc);
+		}
 		if (mtc > data->last_mtc)
 			mtc_delta = mtc - data->last_mtc;
 		else
@@ -687,6 +714,7 @@
 
 		data->ctc_delta = 0;
 		data->have_tma = true;
+		data->fixup_last_mtc = true;
 
 		return 0;
 
@@ -753,6 +781,7 @@
 		.tsc_timestamp  = decoder->tsc_timestamp,
 		.timestamp      = decoder->timestamp,
 		.have_tma       = decoder->have_tma,
+		.fixup_last_mtc = decoder->fixup_last_mtc,
 		.from_mtc       = from_mtc,
 		.cbr_cyc_to_tsc = 0,
 	};
@@ -1271,6 +1300,7 @@
 	}
 	decoder->ctc_delta = 0;
 	decoder->have_tma = true;
+	decoder->fixup_last_mtc = true;
 	intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x  CTC rem %#x\n",
 		     decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
 }
@@ -1285,6 +1315,12 @@
 
 	mtc = decoder->packet.payload;
 
+	if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
+		decoder->fixup_last_mtc = false;
+		intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
+					&decoder->last_mtc);
+	}
+
 	if (mtc > decoder->last_mtc)
 		mtc_delta = mtc - decoder->last_mtc;
 	else
@@ -1353,6 +1389,8 @@
 			     timestamp, decoder->timestamp);
 	else
 		decoder->timestamp = timestamp;
+
+	decoder->timestamp_insn_cnt = 0;
 }
 
 /* Walk PSB+ packets when already in sync. */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 18e4519..df85b9e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1745,9 +1745,8 @@
 					int max_stack)
 {
 	struct ip_callchain *chain = sample->callchain;
-	int chain_nr = min(max_stack, (int)chain->nr);
+	int chain_nr = min(max_stack, (int)chain->nr), i;
 	u8 cpumode = PERF_RECORD_MISC_USER;
-	int i, j, err;
 	u64 ip;
 
 	for (i = 0; i < chain_nr; i++) {
@@ -1758,7 +1757,7 @@
 	/* LBR only affects the user callchain */
 	if (i != chain_nr) {
 		struct branch_stack *lbr_stack = sample->branch_stack;
-		int lbr_nr = lbr_stack->nr;
+		int lbr_nr = lbr_stack->nr, j;
 		/*
 		 * LBR callstack can only get user call chain.
 		 * The mix_chain_nr is kernel call chain
@@ -1772,6 +1771,7 @@
 		int mix_chain_nr = i + 1 + lbr_nr + 1;
 
 		for (j = 0; j < mix_chain_nr; j++) {
+			int err;
 			if (callchain_param.order == ORDER_CALLEE) {
 				if (j < i + 1)
 					ip = chain->ips[j];
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 33546c3..4e778ea 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -924,6 +924,7 @@
 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
 	case PARSE_EVENTS__TERM_TYPE_NAME:
+	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
 		return true;
 	default:
 		if (!err)
@@ -1458,7 +1459,7 @@
 	struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
 	struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
 
-	return strcmp(pmu1->symbol, pmu2->symbol);
+	return strcasecmp(pmu1->symbol, pmu2->symbol);
 }
 
 static void perf_pmu__parse_cleanup(void)
@@ -2263,7 +2264,8 @@
 /*
  * Print the help text for the event symbols:
  */
-void print_events(const char *event_glob, bool name_only)
+void print_events(const char *event_glob, bool name_only, bool quiet_flag,
+			bool long_desc)
 {
 	print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
 			    event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
@@ -2273,7 +2275,7 @@
 
 	print_hwcache_events(event_glob, name_only);
 
-	print_pmu_events(event_glob, name_only);
+	print_pmu_events(event_glob, name_only, quiet_flag, long_desc);
 
 	if (event_glob != NULL)
 		return;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 8d09a97..da246a3 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -172,7 +172,8 @@
 void parse_events_evlist_error(struct parse_events_evlist *data,
 			       int idx, const char *str);
 
-void print_events(const char *event_glob, bool name_only);
+void print_events(const char *event_glob, bool name_only, bool quiet,
+		  bool long_desc);
 
 struct event_symbol {
 	const char	*symbol;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 9f43fda..660fca0 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -136,8 +136,8 @@
 group		[^,{}/]*[{][^}]*[}][^,{}/]*
 event_pmu	[^,{}/]+[/][^/]*[/][^,{}/]*
 event		[^,{}/]+
-bpf_object	.*\.(o|bpf)
-bpf_source	.*\.c
+bpf_object	[^,{}]+\.(o|bpf)
+bpf_source	[^,{}]+\.c
 
 num_dec		[0-9]+
 num_hex		0x[a-fA-F0-9]+
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 2babcdf..b1474dc 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -12,6 +12,9 @@
 #include "pmu.h"
 #include "parse-events.h"
 #include "cpumap.h"
+#include "header.h"
+#include "pmu-events/pmu-events.h"
+#include "cache.h"
 
 struct perf_pmu_format {
 	char *name;
@@ -220,7 +223,8 @@
 }
 
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
-				 char *desc __maybe_unused, char *val)
+				 char *desc, char *val, char *long_desc,
+				 char *topic)
 {
 	struct perf_pmu_alias *alias;
 	int ret;
@@ -253,6 +257,11 @@
 		perf_pmu__parse_snapshot(alias, dir, name);
 	}
 
+	alias->desc = desc ? strdup(desc) : NULL;
+	alias->long_desc = long_desc ? strdup(long_desc) :
+				desc ? strdup(desc) : NULL;
+	alias->topic = topic ? strdup(topic) : NULL;
+
 	list_add_tail(&alias->list, list);
 
 	return 0;
@@ -269,7 +278,7 @@
 
 	buf[ret] = 0;
 
-	return __perf_pmu__new_alias(list, dir, name, NULL, buf);
+	return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL);
 }
 
 static inline bool pmu_alias_info_file(char *name)
@@ -473,6 +482,68 @@
 	return cpus;
 }
 
+/*
+ * Return the CPU id as a raw string.
+ *
+ * Each architecture should provide a more precise id string that
+ * can be use to match the architecture's "mapfile".
+ */
+char * __weak get_cpuid_str(void)
+{
+	return NULL;
+}
+
+/*
+ * From the pmu_events_map, find the table of PMU events that corresponds
+ * to the current running CPU. Then, add all PMU events from that table
+ * as aliases.
+ */
+static void pmu_add_cpu_aliases(struct list_head *head)
+{
+	int i;
+	struct pmu_events_map *map;
+	struct pmu_event *pe;
+	char *cpuid;
+
+	cpuid = getenv("PERF_CPUID");
+	if (cpuid)
+		cpuid = strdup(cpuid);
+	if (!cpuid)
+		cpuid = get_cpuid_str();
+	if (!cpuid)
+		return;
+
+	pr_debug("Using CPUID %s\n", cpuid);
+
+	i = 0;
+	while (1) {
+		map = &pmu_events_map[i++];
+		if (!map->table)
+			goto out;
+
+		if (!strcmp(map->cpuid, cpuid))
+			break;
+	}
+
+	/*
+	 * Found a matching PMU events table. Create aliases
+	 */
+	i = 0;
+	while (1) {
+		pe = &map->table[i++];
+		if (!pe->name)
+			break;
+
+		/* need type casts to override 'const' */
+		__perf_pmu__new_alias(head, NULL, (char *)pe->name,
+				(char *)pe->desc, (char *)pe->event,
+				(char *)pe->long_desc, (char *)pe->topic);
+	}
+
+out:
+	free(cpuid);
+}
+
 struct perf_event_attr * __weak
 perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
 {
@@ -497,6 +568,9 @@
 	if (pmu_aliases(name, &aliases))
 		return NULL;
 
+	if (!strcmp(name, "cpu"))
+		pmu_add_cpu_aliases(&aliases);
+
 	if (pmu_type(name, &type))
 		return NULL;
 
@@ -983,21 +1057,63 @@
 	return buf;
 }
 
-static int cmp_string(const void *a, const void *b)
+struct sevent {
+	char *name;
+	char *desc;
+	char *topic;
+};
+
+static int cmp_sevent(const void *a, const void *b)
 {
-	const char * const *as = a;
-	const char * const *bs = b;
-	return strcmp(*as, *bs);
+	const struct sevent *as = a;
+	const struct sevent *bs = b;
+
+	/* Put extra events last */
+	if (!!as->desc != !!bs->desc)
+		return !!as->desc - !!bs->desc;
+	if (as->topic && bs->topic) {
+		int n = strcmp(as->topic, bs->topic);
+
+		if (n)
+			return n;
+	}
+	return strcmp(as->name, bs->name);
 }
 
-void print_pmu_events(const char *event_glob, bool name_only)
+static void wordwrap(char *s, int start, int max, int corr)
+{
+	int column = start;
+	int n;
+
+	while (*s) {
+		int wlen = strcspn(s, " \t");
+
+		if (column + wlen >= max && column > start) {
+			printf("\n%*s", start, "");
+			column = start + corr;
+		}
+		n = printf("%s%.*s", column > start ? " " : "", wlen, s);
+		if (n <= 0)
+			break;
+		s += wlen;
+		column += n;
+		while (isspace(*s))
+			s++;
+	}
+}
+
+void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
+			bool long_desc)
 {
 	struct perf_pmu *pmu;
 	struct perf_pmu_alias *alias;
 	char buf[1024];
 	int printed = 0;
 	int len, j;
-	char **aliases;
+	struct sevent *aliases;
+	int numdesc = 0;
+	int columns = pager_get_columns();
+	char *topic = NULL;
 
 	pmu = NULL;
 	len = 0;
@@ -1007,14 +1123,15 @@
 		if (pmu->selectable)
 			len++;
 	}
-	aliases = zalloc(sizeof(char *) * len);
+	aliases = zalloc(sizeof(struct sevent) * len);
 	if (!aliases)
 		goto out_enomem;
 	pmu = NULL;
 	j = 0;
 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
 		list_for_each_entry(alias, &pmu->aliases, list) {
-			char *name = format_alias(buf, sizeof(buf), pmu, alias);
+			char *name = alias->desc ? alias->name :
+				format_alias(buf, sizeof(buf), pmu, alias);
 			bool is_cpu = !strcmp(pmu->name, "cpu");
 
 			if (event_glob != NULL &&
@@ -1023,12 +1140,21 @@
 						       event_glob))))
 				continue;
 
-			if (is_cpu && !name_only)
+			if (is_cpu && !name_only && !alias->desc)
 				name = format_alias_or(buf, sizeof(buf), pmu, alias);
 
-			aliases[j] = strdup(name);
-			if (aliases[j] == NULL)
+			aliases[j].name = name;
+			if (is_cpu && !name_only && !alias->desc)
+				aliases[j].name = format_alias_or(buf,
+								  sizeof(buf),
+								  pmu, alias);
+			aliases[j].name = strdup(aliases[j].name);
+			if (!aliases[j].name)
 				goto out_enomem;
+
+			aliases[j].desc = long_desc ? alias->long_desc :
+						alias->desc;
+			aliases[j].topic = alias->topic;
 			j++;
 		}
 		if (pmu->selectable &&
@@ -1036,25 +1162,39 @@
 			char *s;
 			if (asprintf(&s, "%s//", pmu->name) < 0)
 				goto out_enomem;
-			aliases[j] = s;
+			aliases[j].name = s;
 			j++;
 		}
 	}
 	len = j;
-	qsort(aliases, len, sizeof(char *), cmp_string);
+	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
 	for (j = 0; j < len; j++) {
 		if (name_only) {
-			printf("%s ", aliases[j]);
+			printf("%s ", aliases[j].name);
 			continue;
 		}
-		printf("  %-50s [Kernel PMU event]\n", aliases[j]);
+		if (aliases[j].desc && !quiet_flag) {
+			if (numdesc++ == 0)
+				printf("\n");
+			if (aliases[j].topic && (!topic ||
+					strcmp(topic, aliases[j].topic))) {
+				printf("%s%s:\n", topic ? "\n" : "",
+						aliases[j].topic);
+				topic = aliases[j].topic;
+			}
+			printf("  %-50s\n", aliases[j].name);
+			printf("%*s", 8, "[");
+			wordwrap(aliases[j].desc, 8, columns, 0);
+			printf("]\n");
+		} else
+			printf("  %-50s [Kernel PMU event]\n", aliases[j].name);
 		printed++;
 	}
 	if (printed && pager_in_use())
 		printf("\n");
 out_free:
 	for (j = 0; j < len; j++)
-		zfree(&aliases[j]);
+		zfree(&aliases[j].name);
 	zfree(&aliases);
 	return;
 
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 743422a..2571203 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -40,6 +40,9 @@
 
 struct perf_pmu_alias {
 	char *name;
+	char *desc;
+	char *long_desc;
+	char *topic;
 	struct list_head terms; /* HEAD struct parse_events_term -> list */
 	struct list_head list;  /* ELEM */
 	char unit[UNIT_MAX_LEN+1];
@@ -71,7 +74,8 @@
 
 struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
 
-void print_pmu_events(const char *event_glob, bool name_only);
+void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
+		      bool long_desc);
 bool pmu_have_event(const char *pname, const char *name);
 
 int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index fcfbef0..d281ae2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -213,7 +213,7 @@
 		goto out;
 	}
 
-	for (ptr2 = ptr1; ptr2 != '\0'; ptr2++) {
+	for (ptr2 = ptr1; *ptr2 != '\0'; ptr2++) {
 		if (!isalnum(*ptr2) && *ptr2 != '_') {
 			*ptr2 = '\0';
 			break;
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index b268a66..318424e 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -66,9 +66,8 @@
 int strbuf_grow(struct strbuf *buf, size_t);
 
 static inline int strbuf_setlen(struct strbuf *sb, size_t len) {
-	int ret;
 	if (!sb->alloc) {
-		ret = strbuf_grow(sb, 0);
+		int ret = strbuf_grow(sb, 0);
 		if (ret)
 			return ret;
 	}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 8b10a55..f5af87f 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -14,13 +14,12 @@
 
 int thread__init_map_groups(struct thread *thread, struct machine *machine)
 {
-	struct thread *leader;
 	pid_t pid = thread->pid_;
 
 	if (pid == thread->tid || pid == -1) {
 		thread->mg = map_groups__new(machine);
 	} else {
-		leader = __machine__findnew_thread(machine, pid, pid);
+		struct thread *leader = __machine__findnew_thread(machine, pid, pid);
 		if (leader) {
 			thread->mg = map_groups__get(leader->mg);
 			thread__put(leader);
@@ -130,11 +129,10 @@
 		       bool exec)
 {
 	struct comm *new, *curr = thread__comm(thread);
-	int err;
 
 	/* Override the default :tid entry */
 	if (!thread->comm_set) {
-		err = comm__override(curr, str, timestamp, exec);
+		int err = comm__override(curr, str, timestamp, exec);
 		if (err)
 			return err;
 	} else {
@@ -270,10 +268,9 @@
 
 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
 {
-	int err;
-
 	if (parent->comm_set) {
 		const char *comm = thread__comm_str(parent);
+		int err;
 		if (!comm)
 			return -ENOMEM;
 		err = thread__set_comm(thread, comm, timestamp);
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index ad6dd05..582db951 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -13,6 +13,7 @@
 ldflags-y += --wrap=devm_memremap_pages
 ldflags-y += --wrap=insert_resource
 ldflags-y += --wrap=remove_resource
+ldflags-y += --wrap=acpi_evaluate_object
 
 DRIVERS := ../../../drivers
 NVDIMM_SRC := $(DRIVERS)/nvdimm
diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c
index 878daf3..7dc5a0a 100644
--- a/tools/testing/nvdimm/config_check.c
+++ b/tools/testing/nvdimm/config_check.c
@@ -1,4 +1,3 @@
-#include <linux/kconfig.h>
 #include <linux/bug.h>
 
 void check(void)
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c29f8dc..3ccef73 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/pfn_t.h>
+#include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/mm.h>
 #include "nfit_test.h"
@@ -73,7 +74,7 @@
 
 	if (nfit_res)
 		return (void __iomem *) nfit_res->buf + offset
-			- nfit_res->res->start;
+			- nfit_res->res.start;
 	return fallback_fn(offset, size);
 }
 
@@ -84,7 +85,7 @@
 
 	if (nfit_res)
 		return (void __iomem *) nfit_res->buf + offset
-			- nfit_res->res->start;
+			- nfit_res->res.start;
 	return devm_ioremap_nocache(dev, offset, size);
 }
 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
@@ -95,7 +96,7 @@
 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
 	if (nfit_res)
-		return nfit_res->buf + offset - nfit_res->res->start;
+		return nfit_res->buf + offset - nfit_res->res.start;
 	return devm_memremap(dev, offset, size, flags);
 }
 EXPORT_SYMBOL(__wrap_devm_memremap);
@@ -107,7 +108,7 @@
 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
 	if (nfit_res)
-		return nfit_res->buf + offset - nfit_res->res->start;
+		return nfit_res->buf + offset - nfit_res->res.start;
 	return devm_memremap_pages(dev, res, ref, altmap);
 }
 EXPORT_SYMBOL(__wrap_devm_memremap_pages);
@@ -128,7 +129,7 @@
 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
 	if (nfit_res)
-		return nfit_res->buf + offset - nfit_res->res->start;
+		return nfit_res->buf + offset - nfit_res->res.start;
 	return memremap(offset, size, flags);
 }
 EXPORT_SYMBOL(__wrap_memremap);
@@ -174,6 +175,63 @@
 }
 EXPORT_SYMBOL(__wrap_memunmap);
 
+static bool nfit_test_release_region(struct device *dev,
+		struct resource *parent, resource_size_t start,
+		resource_size_t n);
+
+static void nfit_devres_release(struct device *dev, void *data)
+{
+	struct resource *res = *((struct resource **) data);
+
+	WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
+			resource_size(res)));
+}
+
+static int match(struct device *dev, void *__res, void *match_data)
+{
+	struct resource *res = *((struct resource **) __res);
+	resource_size_t start = *((resource_size_t *) match_data);
+
+	return res->start == start;
+}
+
+static bool nfit_test_release_region(struct device *dev,
+		struct resource *parent, resource_size_t start,
+		resource_size_t n)
+{
+	if (parent == &iomem_resource) {
+		struct nfit_test_resource *nfit_res = get_nfit_res(start);
+
+		if (nfit_res) {
+			struct nfit_test_request *req;
+			struct resource *res = NULL;
+
+			if (dev) {
+				devres_release(dev, nfit_devres_release, match,
+						&start);
+				return true;
+			}
+
+			spin_lock(&nfit_res->lock);
+			list_for_each_entry(req, &nfit_res->requests, list)
+				if (req->res.start == start) {
+					res = &req->res;
+					list_del(&req->list);
+					break;
+				}
+			spin_unlock(&nfit_res->lock);
+
+			WARN(!res || resource_size(res) != n,
+					"%s: start: %llx n: %llx mismatch: %pr\n",
+						__func__, start, n, res);
+			if (res)
+				kfree(req);
+			return true;
+		}
+	}
+	return false;
+}
+
 static struct resource *nfit_test_request_region(struct device *dev,
 		struct resource *parent, resource_size_t start,
 		resource_size_t n, const char *name, int flags)
@@ -183,21 +241,57 @@
 	if (parent == &iomem_resource) {
 		nfit_res = get_nfit_res(start);
 		if (nfit_res) {
-			struct resource *res = nfit_res->res + 1;
+			struct nfit_test_request *req;
+			struct resource *res = NULL;
 
-			if (start + n > nfit_res->res->start
-					+ resource_size(nfit_res->res)) {
+			if (start + n > nfit_res->res.start
+					+ resource_size(&nfit_res->res)) {
 				pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
 						__func__, start, n,
-						nfit_res->res);
+						&nfit_res->res);
 				return NULL;
 			}
 
+			spin_lock(&nfit_res->lock);
+			list_for_each_entry(req, &nfit_res->requests, list)
+				if (start == req->res.start) {
+					res = &req->res;
+					break;
+				}
+			spin_unlock(&nfit_res->lock);
+
+			if (res) {
+				WARN(1, "%pr already busy\n", res);
+				return NULL;
+			}
+
+			req = kzalloc(sizeof(*req), GFP_KERNEL);
+			if (!req)
+				return NULL;
+			INIT_LIST_HEAD(&req->list);
+			res = &req->res;
+
 			res->start = start;
 			res->end = start + n - 1;
 			res->name = name;
 			res->flags = resource_type(parent);
 			res->flags |= IORESOURCE_BUSY | flags;
+			spin_lock(&nfit_res->lock);
+			list_add(&req->list, &nfit_res->requests);
+			spin_unlock(&nfit_res->lock);
+
+			if (dev) {
+				struct resource **d;
+
+				d = devres_alloc(nfit_devres_release,
+						sizeof(struct resource *),
+						GFP_KERNEL);
+				if (!d)
+					return NULL;
+				*d = res;
+				devres_add(dev, d);
+			}
+
 			pr_debug("%s: %pr\n", __func__, res);
 			return res;
 		}
@@ -241,29 +335,10 @@
 }
 EXPORT_SYMBOL(__wrap___devm_request_region);
 
-static bool nfit_test_release_region(struct resource *parent,
-		resource_size_t start, resource_size_t n)
-{
-	if (parent == &iomem_resource) {
-		struct nfit_test_resource *nfit_res = get_nfit_res(start);
-		if (nfit_res) {
-			struct resource *res = nfit_res->res + 1;
-
-			if (start != res->start || resource_size(res) != n)
-				pr_info("%s: start: %llx n: %llx mismatch: %pr\n",
-						__func__, start, n, res);
-			else
-				memset(res, 0, sizeof(*res));
-			return true;
-		}
-	}
-	return false;
-}
-
 void __wrap___release_region(struct resource *parent, resource_size_t start,
 		resource_size_t n)
 {
-	if (!nfit_test_release_region(parent, start, n))
+	if (!nfit_test_release_region(NULL, parent, start, n))
 		__release_region(parent, start, n);
 }
 EXPORT_SYMBOL(__wrap___release_region);
@@ -271,9 +346,25 @@
 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
 		resource_size_t start, resource_size_t n)
 {
-	if (!nfit_test_release_region(parent, start, n))
+	if (!nfit_test_release_region(dev, parent, start, n))
 		__devm_release_region(dev, parent, start, n);
 }
 EXPORT_SYMBOL(__wrap___devm_release_region);
 
+acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
+		struct acpi_object_list *p, struct acpi_buffer *buf)
+{
+	struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
+	union acpi_object **obj;
+
+	if (!nfit_res || strcmp(path, "_FIT") || !buf)
+		return acpi_evaluate_object(handle, path, p, buf);
+
+	obj = nfit_res->buf;
+	buf->length = sizeof(union acpi_object);
+	buf->pointer = *obj;
+	return AE_OK;
+}
+EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
+
 MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index f64c57b..c9a6458 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -132,6 +132,8 @@
 	[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
 };
 
+static unsigned long dimm_fail_cmd_flags[NUM_DCR];
+
 struct nfit_test {
 	struct acpi_nfit_desc acpi_desc;
 	struct platform_device pdev;
@@ -154,11 +156,14 @@
 	int (*alloc)(struct nfit_test *t);
 	void (*setup)(struct nfit_test *t);
 	int setup_hotplug;
+	union acpi_object **_fit;
+	dma_addr_t _fit_dma;
 	struct ars_state {
 		struct nd_cmd_ars_status *ars_status;
 		unsigned long deadline;
 		spinlock_t lock;
 	} ars_state;
+	struct device *dimm_dev[NUM_DCR];
 };
 
 static struct nfit_test *to_nfit_test(struct device *dev)
@@ -411,6 +416,9 @@
 		if (i >= ARRAY_SIZE(handle))
 			return -ENXIO;
 
+		if ((1 << func) & dimm_fail_cmd_flags[i])
+			return -EIO;
+
 		switch (func) {
 		case ND_CMD_GET_CONFIG_SIZE:
 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
@@ -428,6 +436,9 @@
 			break;
 		case ND_CMD_SMART_THRESHOLD:
 			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
+			device_lock(&t->pdev.dev);
+			__acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
+			device_unlock(&t->pdev.dev);
 			break;
 		default:
 			return -ENOTTY;
@@ -467,14 +478,12 @@
 static void release_nfit_res(void *data)
 {
 	struct nfit_test_resource *nfit_res = data;
-	struct resource *res = nfit_res->res;
 
 	spin_lock(&nfit_test_lock);
 	list_del(&nfit_res->list);
 	spin_unlock(&nfit_test_lock);
 
 	vfree(nfit_res->buf);
-	kfree(res);
 	kfree(nfit_res);
 }
 
@@ -482,12 +491,11 @@
 		void *buf)
 {
 	struct device *dev = &t->pdev.dev;
-	struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
 	struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
 			GFP_KERNEL);
 	int rc;
 
-	if (!res || !buf || !nfit_res)
+	if (!buf || !nfit_res)
 		goto err;
 	rc = devm_add_action(dev, release_nfit_res, nfit_res);
 	if (rc)
@@ -496,10 +504,11 @@
 	memset(buf, 0, size);
 	nfit_res->dev = dev;
 	nfit_res->buf = buf;
-	nfit_res->res = res;
-	res->start = *dma;
-	res->end = *dma + size - 1;
-	res->name = "NFIT";
+	nfit_res->res.start = *dma;
+	nfit_res->res.end = *dma + size - 1;
+	nfit_res->res.name = "NFIT";
+	spin_lock_init(&nfit_res->lock);
+	INIT_LIST_HEAD(&nfit_res->requests);
 	spin_lock(&nfit_test_lock);
 	list_add(&nfit_res->list, &t->resources);
 	spin_unlock(&nfit_test_lock);
@@ -508,7 +517,6 @@
  err:
 	if (buf)
 		vfree(buf);
-	kfree(res);
 	kfree(nfit_res);
 	return NULL;
 }
@@ -533,13 +541,13 @@
 			continue;
 		spin_lock(&nfit_test_lock);
 		list_for_each_entry(n, &t->resources, list) {
-			if (addr >= n->res->start && (addr < n->res->start
-						+ resource_size(n->res))) {
+			if (addr >= n->res.start && (addr < n->res.start
+						+ resource_size(&n->res))) {
 				nfit_res = n;
 				break;
 			} else if (addr >= (unsigned long) n->buf
 					&& (addr < (unsigned long) n->buf
-						+ resource_size(n->res))) {
+						+ resource_size(&n->res))) {
 				nfit_res = n;
 				break;
 			}
@@ -564,6 +572,86 @@
 	return 0;
 }
 
+static void put_dimms(void *data)
+{
+	struct device **dimm_dev = data;
+	int i;
+
+	for (i = 0; i < NUM_DCR; i++)
+		if (dimm_dev[i])
+			device_unregister(dimm_dev[i]);
+}
+
+static struct class *nfit_test_dimm;
+
+static int dimm_name_to_id(struct device *dev)
+{
+	int dimm;
+
+	if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
+			|| dimm >= NUM_DCR || dimm < 0)
+		return -ENXIO;
+	return dimm;
+}
+
+
+static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int dimm = dimm_name_to_id(dev);
+
+	if (dimm < 0)
+		return dimm;
+
+	return sprintf(buf, "%#x", handle[dimm]);
+}
+DEVICE_ATTR_RO(handle);
+
+static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int dimm = dimm_name_to_id(dev);
+
+	if (dimm < 0)
+		return dimm;
+
+	return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
+}
+
+static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	int dimm = dimm_name_to_id(dev);
+	unsigned long val;
+	ssize_t rc;
+
+	if (dimm < 0)
+		return dimm;
+
+	rc = kstrtol(buf, 0, &val);
+	if (rc)
+		return rc;
+
+	dimm_fail_cmd_flags[dimm] = val;
+	return size;
+}
+static DEVICE_ATTR_RW(fail_cmd);
+
+static struct attribute *nfit_test_dimm_attributes[] = {
+	&dev_attr_fail_cmd.attr,
+	&dev_attr_handle.attr,
+	NULL,
+};
+
+static struct attribute_group nfit_test_dimm_attribute_group = {
+	.attrs = nfit_test_dimm_attributes,
+};
+
+static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
+	&nfit_test_dimm_attribute_group,
+	NULL,
+};
+
 static int nfit_test0_alloc(struct nfit_test *t)
 {
 	size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
@@ -616,6 +704,21 @@
 			return -ENOMEM;
 	}
 
+	t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
+	if (!t->_fit)
+		return -ENOMEM;
+
+	if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
+		return -ENOMEM;
+	for (i = 0; i < NUM_DCR; i++) {
+		t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
+				&t->pdev.dev, 0, NULL,
+				nfit_test_dimm_attribute_groups,
+				"test_dimm%d", i);
+		if (!t->dimm_dev[i])
+			return -ENOMEM;
+	}
+
 	return ars_state_init(&t->pdev.dev, &t->ars_state);
 }
 
@@ -1409,6 +1512,8 @@
 	struct acpi_nfit_desc *acpi_desc;
 	struct device *dev = &pdev->dev;
 	struct nfit_test *nfit_test;
+	struct nfit_mem *nfit_mem;
+	union acpi_object *obj;
 	int rc;
 
 	nfit_test = to_nfit_test(&pdev->dev);
@@ -1476,14 +1581,30 @@
 	if (nfit_test->setup != nfit_test0_setup)
 		return 0;
 
-	flush_work(&acpi_desc->work);
 	nfit_test->setup_hotplug = 1;
 	nfit_test->setup(nfit_test);
 
-	rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
-			nfit_test->nfit_size);
-	if (rc)
-		return rc;
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return -ENOMEM;
+	obj->type = ACPI_TYPE_BUFFER;
+	obj->buffer.length = nfit_test->nfit_size;
+	obj->buffer.pointer = nfit_test->nfit_buf;
+	*(nfit_test->_fit) = obj;
+	__acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
+
+	/* associate dimm devices with nfit_mem data for notification testing */
+	mutex_lock(&acpi_desc->init_mutex);
+	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+		u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
+		int i;
+
+		for (i = 0; i < NUM_DCR; i++)
+			if (nfit_handle == handle[i])
+				dev_set_drvdata(nfit_test->dimm_dev[i],
+						nfit_mem);
+	}
+	mutex_unlock(&acpi_desc->init_mutex);
 
 	return 0;
 }
@@ -1518,6 +1639,10 @@
 {
 	int rc, i;
 
+	nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
+	if (IS_ERR(nfit_test_dimm))
+		return PTR_ERR(nfit_test_dimm);
+
 	nfit_test_setup(nfit_test_lookup);
 
 	for (i = 0; i < NUM_NFITS; i++) {
@@ -1584,6 +1709,7 @@
 	for (i = 0; i < NUM_NFITS; i++)
 		platform_device_unregister(&instances[i]->pdev);
 	nfit_test_teardown();
+	class_destroy(nfit_test_dimm);
 }
 
 module_init(nfit_test_init);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 9f18e2a..c281dd2 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -13,11 +13,21 @@
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
 #include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/spinlock_types.h>
+
+struct nfit_test_request {
+	struct list_head list;
+	struct resource res;
+};
 
 struct nfit_test_resource {
+	struct list_head requests;
 	struct list_head list;
-	struct resource *res;
+	struct resource res;
 	struct device *dev;
+	spinlock_t lock;
+	int req_count;
 	void *buf;
 };
 
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 9d0919ed..f2e07f2 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -3,7 +3,8 @@
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
-	 regression1.o regression2.o regression3.o multiorder.o
+	 regression1.o regression2.o regression3.o multiorder.o \
+	 iteration_check.o
 
 targets: $(TARGETS)
 
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
new file mode 100644
index 0000000..9adb8e7
--- /dev/null
+++ b/tools/testing/radix-tree/iteration_check.c
@@ -0,0 +1,180 @@
+/*
+ * iteration_check.c: test races having to do with radix tree iteration
+ * Copyright (c) 2016 Intel Corporation
+ * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/radix-tree.h>
+#include <pthread.h>
+#include "test.h"
+
+#define NUM_THREADS 4
+#define TAG 0
+static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_t threads[NUM_THREADS];
+RADIX_TREE(tree, GFP_KERNEL);
+bool test_complete;
+
+/* relentlessly fill the tree with tagged entries */
+static void *add_entries_fn(void *arg)
+{
+	int pgoff;
+
+	while (!test_complete) {
+		for (pgoff = 0; pgoff < 100; pgoff++) {
+			pthread_mutex_lock(&tree_lock);
+			if (item_insert(&tree, pgoff) == 0)
+				item_tag_set(&tree, pgoff, TAG);
+			pthread_mutex_unlock(&tree_lock);
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
+ * things that have been removed and randomly resetting our iteration to the
+ * next chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *tagged_iteration_fn(void *arg)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+
+	while (!test_complete) {
+		rcu_read_lock();
+		radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
+			void *entry;
+			int i;
+
+			/* busy wait to let removals happen */
+			for (i = 0; i < 1000000; i++)
+				;
+
+			entry = radix_tree_deref_slot(slot);
+			if (unlikely(!entry))
+				continue;
+
+			if (radix_tree_deref_retry(entry)) {
+				slot = radix_tree_iter_retry(&iter);
+				continue;
+			}
+
+			if (rand() % 50 == 0)
+				slot = radix_tree_iter_next(&iter);
+		}
+		rcu_read_unlock();
+	}
+
+	return NULL;
+}
+
+/*
+ * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
+ * that have been removed and randomly resetting our iteration to the next
+ * chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *untagged_iteration_fn(void *arg)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+
+	while (!test_complete) {
+		rcu_read_lock();
+		radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+			void *entry;
+			int i;
+
+			/* busy wait to let removals happen */
+			for (i = 0; i < 1000000; i++)
+				;
+
+			entry = radix_tree_deref_slot(slot);
+			if (unlikely(!entry))
+				continue;
+
+			if (radix_tree_deref_retry(entry)) {
+				slot = radix_tree_iter_retry(&iter);
+				continue;
+			}
+
+			if (rand() % 50 == 0)
+				slot = radix_tree_iter_next(&iter);
+		}
+		rcu_read_unlock();
+	}
+
+	return NULL;
+}
+
+/*
+ * Randomly remove entries to help induce radix_tree_iter_retry() calls in the
+ * two iteration functions.
+ */
+static void *remove_entries_fn(void *arg)
+{
+	while (!test_complete) {
+		int pgoff;
+
+		pgoff = rand() % 100;
+
+		pthread_mutex_lock(&tree_lock);
+		item_delete(&tree, pgoff);
+		pthread_mutex_unlock(&tree_lock);
+	}
+
+	return NULL;
+}
+
+/* This is a unit test for a bug found by the syzkaller tester */
+void iteration_test(void)
+{
+	int i;
+
+	printf("Running iteration tests for 10 seconds\n");
+
+	srand(time(0));
+	test_complete = false;
+
+	if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
+		perror("pthread_create");
+		exit(1);
+	}
+	if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
+		perror("pthread_create");
+		exit(1);
+	}
+	if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
+		perror("pthread_create");
+		exit(1);
+	}
+	if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
+		perror("pthread_create");
+		exit(1);
+	}
+
+	sleep(10);
+	test_complete = true;
+
+	for (i = 0; i < NUM_THREADS; i++) {
+		if (pthread_join(threads[i], NULL)) {
+			perror("pthread_join");
+			exit(1);
+		}
+	}
+
+	item_kill_tree(&tree);
+}
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index b7619ff..daa9010 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -332,6 +332,7 @@
 	regression1_test();
 	regression2_test();
 	regression3_test();
+	iteration_test();
 	single_thread_tests(long_run);
 
 	sleep(1);
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 2d03a63..0d6813a 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -43,7 +43,7 @@
 #include "regression.h"
 
 static RADIX_TREE(mt_tree, GFP_KERNEL);
-static pthread_mutex_t mt_lock;
+static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
 
 struct page {
 	pthread_mutex_t lock;
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index e851313..217fb24 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -27,6 +27,7 @@
 
 void tag_check(void);
 void multiorder_checks(void);
+void iteration_test(void);
 
 struct item *
 item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
diff --git a/Documentation/filesystems/.gitignore b/tools/testing/selftests/filesystems/.gitignore
similarity index 100%
rename from Documentation/filesystems/.gitignore
rename to tools/testing/selftests/filesystems/.gitignore
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
new file mode 100644
index 0000000..0ab1130
--- /dev/null
+++ b/tools/testing/selftests/filesystems/Makefile
@@ -0,0 +1,7 @@
+TEST_PROGS := dnotify_test
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
diff --git a/Documentation/filesystems/dnotify_test.c b/tools/testing/selftests/filesystems/dnotify_test.c
similarity index 100%
rename from Documentation/filesystems/dnotify_test.c
rename to tools/testing/selftests/filesystems/dnotify_test.c
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index e87dbe2..7ff002e 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -24,7 +24,7 @@
 
 # Test for a color capable console
 if [ -z "$USE_COLOR" ]; then
-    tput setf 7
+    tput setf 7 || tput setaf 7
     if [ $? -eq 0 ]; then
         USE_COLOR=1
         tput sgr0
diff --git a/tools/testing/selftests/futex/run.sh b/tools/testing/selftests/futex/run.sh
index 4126312..88bcb17 100755
--- a/tools/testing/selftests/futex/run.sh
+++ b/tools/testing/selftests/futex/run.sh
@@ -23,7 +23,7 @@
 
 # Test for a color capable shell and pass the result to the subdir scripts
 USE_COLOR=0
-tput setf 7
+tput setf 7 || tput setaf 7
 if [ $? -eq 0 ]; then
     USE_COLOR=1
     tput sgr0
diff --git a/Documentation/ia64/.gitignore b/tools/testing/selftests/ia64/.gitignore
similarity index 100%
rename from Documentation/ia64/.gitignore
rename to tools/testing/selftests/ia64/.gitignore
diff --git a/tools/testing/selftests/ia64/Makefile b/tools/testing/selftests/ia64/Makefile
new file mode 100644
index 0000000..2b3de2d
--- /dev/null
+++ b/tools/testing/selftests/ia64/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := aliasing-test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
diff --git a/Documentation/ia64/aliasing-test.c b/tools/testing/selftests/ia64/aliasing-test.c
similarity index 100%
rename from Documentation/ia64/aliasing-test.c
rename to tools/testing/selftests/ia64/aliasing-test.c
diff --git a/Documentation/networking/timestamping/.gitignore b/tools/testing/selftests/networking/timestamping/.gitignore
similarity index 100%
rename from Documentation/networking/timestamping/.gitignore
rename to tools/testing/selftests/networking/timestamping/.gitignore
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
new file mode 100644
index 0000000..ccbb9ed
--- /dev/null
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := hwtstamp_config timestamping txtimestamp
+
+all: $(TEST_PROGS)
+
+include ../../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
diff --git a/Documentation/networking/timestamping/hwtstamp_config.c b/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
similarity index 100%
rename from Documentation/networking/timestamping/hwtstamp_config.c
rename to tools/testing/selftests/networking/timestamping/hwtstamp_config.c
diff --git a/Documentation/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c
similarity index 100%
rename from Documentation/networking/timestamping/timestamping.c
rename to tools/testing/selftests/networking/timestamping/timestamping.c
diff --git a/Documentation/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
similarity index 100%
rename from Documentation/networking/timestamping/txtimestamp.c
rename to tools/testing/selftests/networking/timestamping/txtimestamp.c
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/export.h b/tools/testing/selftests/powerpc/copyloops/asm/export.h
new file mode 100644
index 0000000..2d14a9b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/asm/export.h
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore
index 4fe13a4..50ded63 100644
--- a/tools/testing/selftests/powerpc/math/.gitignore
+++ b/tools/testing/selftests/powerpc/math/.gitignore
@@ -4,3 +4,4 @@
 vmx_preempt
 fpu_signal
 vmx_signal
+vsx_preempt
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
new file mode 100644
index 0000000..1b89224
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/.gitignore
@@ -0,0 +1,2 @@
+signal
+signal_tm
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/export.h b/tools/testing/selftests/powerpc/stringloops/asm/export.h
new file mode 100644
index 0000000..2d14a9b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/asm/export.h
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 82c0a9c..4276217 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -7,3 +7,7 @@
 tm-tar
 tm-tmspr
 tm-exec
+tm-signal-context-chk-fpu
+tm-signal-context-chk-gpr
+tm-signal-context-chk-vmx
+tm-signal-context-chk-vsx
diff --git a/Documentation/prctl/.gitignore b/tools/testing/selftests/prctl/.gitignore
similarity index 100%
rename from Documentation/prctl/.gitignore
rename to tools/testing/selftests/prctl/.gitignore
diff --git a/tools/testing/selftests/prctl/Makefile b/tools/testing/selftests/prctl/Makefile
new file mode 100644
index 0000000..35aa1c8
--- /dev/null
+++ b/tools/testing/selftests/prctl/Makefile
@@ -0,0 +1,15 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+TEST_PROGS := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test \
+		disable-tsc-test
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
+endif
+endif
diff --git a/Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c b/tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c
similarity index 100%
rename from Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
rename to tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c
diff --git a/Documentation/prctl/disable-tsc-on-off-stress-test.c b/tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c
similarity index 100%
rename from Documentation/prctl/disable-tsc-on-off-stress-test.c
rename to tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c
diff --git a/Documentation/prctl/disable-tsc-test.c b/tools/testing/selftests/prctl/disable-tsc-test.c
similarity index 100%
rename from Documentation/prctl/disable-tsc-test.c
rename to tools/testing/selftests/prctl/disable-tsc-test.c
diff --git a/Documentation/ptp/.gitignore b/tools/testing/selftests/ptp/.gitignore
similarity index 100%
rename from Documentation/ptp/.gitignore
rename to tools/testing/selftests/ptp/.gitignore
diff --git a/tools/testing/selftests/ptp/Makefile b/tools/testing/selftests/ptp/Makefile
new file mode 100644
index 0000000..83dd42b
--- /dev/null
+++ b/tools/testing/selftests/ptp/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := testptp
+LDLIBS += -lrt
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
diff --git a/Documentation/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
similarity index 100%
rename from Documentation/ptp/testptp.c
rename to tools/testing/selftests/ptp/testptp.c
diff --git a/Documentation/ptp/testptp.mk b/tools/testing/selftests/ptp/testptp.mk
similarity index 100%
rename from Documentation/ptp/testptp.mk
rename to tools/testing/selftests/ptp/testptp.mk
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 5a246a0..15cf56d 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -122,7 +122,7 @@
 	else if (which == ITIMER_REAL)
 		idle_loop();
 
-	gettimeofday(&end, NULL);
+	err = gettimeofday(&end, NULL);
 	if (err < 0) {
 		perror("Can't call gettimeofday()\n");
 		return -1;
@@ -175,7 +175,7 @@
 
 	user_loop();
 
-	gettimeofday(&end, NULL);
+	err = gettimeofday(&end, NULL);
 	if (err < 0) {
 		perror("Can't call gettimeofday()\n");
 		return -1;
diff --git a/Documentation/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
similarity index 100%
rename from Documentation/vDSO/.gitignore
rename to tools/testing/selftests/vDSO/.gitignore
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
new file mode 100644
index 0000000..706b68b
--- /dev/null
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -0,0 +1,20 @@
+ifndef CROSS_COMPILE
+CFLAGS := -std=gnu99
+CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
+ifeq ($(CONFIG_X86_32),y)
+LDLIBS += -lgcc_s
+endif
+
+TEST_PROGS := vdso_test vdso_standalone_test_x86
+
+all: $(TEST_PROGS)
+vdso_test: parse_vdso.c vdso_test.c
+vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
+	$(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
+		vdso_standalone_test_x86.c parse_vdso.c \
+		-o vdso_standalone_test_x86
+
+include ../lib.mk
+clean:
+	rm -fr $(TEST_PROGS)
+endif
diff --git a/Documentation/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
similarity index 100%
rename from Documentation/vDSO/parse_vdso.c
rename to tools/testing/selftests/vDSO/parse_vdso.c
diff --git a/Documentation/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
similarity index 100%
rename from Documentation/vDSO/vdso_standalone_test_x86.c
rename to tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
diff --git a/Documentation/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test.c
similarity index 100%
rename from Documentation/vDSO/vdso_test.c
rename to tools/testing/selftests/vDSO/vdso_test.c
diff --git a/tools/testing/selftests/watchdog/.gitignore b/tools/testing/selftests/watchdog/.gitignore
new file mode 100644
index 0000000..5aac5157
--- /dev/null
+++ b/tools/testing/selftests/watchdog/.gitignore
@@ -0,0 +1 @@
+watchdog-test
diff --git a/tools/testing/selftests/watchdog/Makefile b/tools/testing/selftests/watchdog/Makefile
new file mode 100644
index 0000000..f863c66
--- /dev/null
+++ b/tools/testing/selftests/watchdog/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := watchdog-test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+	rm -fr $(TEST_PROGS)
diff --git a/Documentation/watchdog/src/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
similarity index 100%
rename from Documentation/watchdog/src/watchdog-test.c
rename to tools/testing/selftests/watchdog/watchdog-test.c
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 4f747ee..a89f80a 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,7 +5,8 @@
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
-			check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test
+			check_initial_reg_state sigreturn ldt_gdt iopl \
+			protection_keys
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \
 			vdso_restorer
diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h
new file mode 100644
index 0000000..b202939
--- /dev/null
+++ b/tools/testing/selftests/x86/pkey-helpers.h
@@ -0,0 +1,219 @@
+#ifndef _PKEYS_HELPER_H
+#define _PKEYS_HELPER_H
+#define _GNU_SOURCE
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+
+#define NR_PKEYS 16
+#define PKRU_BITS_PER_PKEY 2
+
+#ifndef DEBUG_LEVEL
+#define DEBUG_LEVEL 0
+#endif
+#define DPRINT_IN_SIGNAL_BUF_SIZE 4096
+extern int dprint_in_signal;
+extern char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
+static inline void sigsafe_printf(const char *format, ...)
+{
+	va_list ap;
+
+	va_start(ap, format);
+	if (!dprint_in_signal) {
+		vprintf(format, ap);
+	} else {
+		int len = vsnprintf(dprint_in_signal_buffer,
+				    DPRINT_IN_SIGNAL_BUF_SIZE,
+				    format, ap);
+		/*
+		 * len is amount that would have been printed,
+		 * but actual write is truncated at BUF_SIZE.
+		 */
+		if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
+			len = DPRINT_IN_SIGNAL_BUF_SIZE;
+		write(1, dprint_in_signal_buffer, len);
+	}
+	va_end(ap);
+}
+#define dprintf_level(level, args...) do {	\
+	if (level <= DEBUG_LEVEL)		\
+		sigsafe_printf(args);		\
+	fflush(NULL);				\
+} while (0)
+#define dprintf0(args...) dprintf_level(0, args)
+#define dprintf1(args...) dprintf_level(1, args)
+#define dprintf2(args...) dprintf_level(2, args)
+#define dprintf3(args...) dprintf_level(3, args)
+#define dprintf4(args...) dprintf_level(4, args)
+
+extern unsigned int shadow_pkru;
+static inline unsigned int __rdpkru(void)
+{
+	unsigned int eax, edx;
+	unsigned int ecx = 0;
+	unsigned int pkru;
+
+	asm volatile(".byte 0x0f,0x01,0xee\n\t"
+		     : "=a" (eax), "=d" (edx)
+		     : "c" (ecx));
+	pkru = eax;
+	return pkru;
+}
+
+static inline unsigned int _rdpkru(int line)
+{
+	unsigned int pkru = __rdpkru();
+
+	dprintf4("rdpkru(line=%d) pkru: %x shadow: %x\n",
+			line, pkru, shadow_pkru);
+	assert(pkru == shadow_pkru);
+
+	return pkru;
+}
+
+#define rdpkru() _rdpkru(__LINE__)
+
+static inline void __wrpkru(unsigned int pkru)
+{
+	unsigned int eax = pkru;
+	unsigned int ecx = 0;
+	unsigned int edx = 0;
+
+	dprintf4("%s() changing %08x to %08x\n", __func__, __rdpkru(), pkru);
+	asm volatile(".byte 0x0f,0x01,0xef\n\t"
+		     : : "a" (eax), "c" (ecx), "d" (edx));
+	assert(pkru == __rdpkru());
+}
+
+static inline void wrpkru(unsigned int pkru)
+{
+	dprintf4("%s() changing %08x to %08x\n", __func__, __rdpkru(), pkru);
+	/* will do the shadow check for us: */
+	rdpkru();
+	__wrpkru(pkru);
+	shadow_pkru = pkru;
+	dprintf4("%s(%08x) pkru: %08x\n", __func__, pkru, __rdpkru());
+}
+
+/*
+ * These are technically racy. since something could
+ * change PKRU between the read and the write.
+ */
+static inline void __pkey_access_allow(int pkey, int do_allow)
+{
+	unsigned int pkru = rdpkru();
+	int bit = pkey * 2;
+
+	if (do_allow)
+		pkru &= (1<<bit);
+	else
+		pkru |= (1<<bit);
+
+	dprintf4("pkru now: %08x\n", rdpkru());
+	wrpkru(pkru);
+}
+
+static inline void __pkey_write_allow(int pkey, int do_allow_write)
+{
+	long pkru = rdpkru();
+	int bit = pkey * 2 + 1;
+
+	if (do_allow_write)
+		pkru &= (1<<bit);
+	else
+		pkru |= (1<<bit);
+
+	wrpkru(pkru);
+	dprintf4("pkru now: %08x\n", rdpkru());
+}
+
+#define PROT_PKEY0     0x10            /* protection key value (bit 0) */
+#define PROT_PKEY1     0x20            /* protection key value (bit 1) */
+#define PROT_PKEY2     0x40            /* protection key value (bit 2) */
+#define PROT_PKEY3     0x80            /* protection key value (bit 3) */
+
+#define PAGE_SIZE 4096
+#define MB	(1<<20)
+
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+		unsigned int *ecx, unsigned int *edx)
+{
+	/* ecx is often an input as well as an output. */
+	asm volatile(
+		"cpuid;"
+		: "=a" (*eax),
+		  "=b" (*ebx),
+		  "=c" (*ecx),
+		  "=d" (*edx)
+		: "0" (*eax), "2" (*ecx));
+}
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx) */
+#define X86_FEATURE_PKU        (1<<3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE      (1<<4) /* OS Protection Keys Enable */
+
+static inline int cpu_has_pku(void)
+{
+	unsigned int eax;
+	unsigned int ebx;
+	unsigned int ecx;
+	unsigned int edx;
+
+	eax = 0x7;
+	ecx = 0x0;
+	__cpuid(&eax, &ebx, &ecx, &edx);
+
+	if (!(ecx & X86_FEATURE_PKU)) {
+		dprintf2("cpu does not have PKU\n");
+		return 0;
+	}
+	if (!(ecx & X86_FEATURE_OSPKE)) {
+		dprintf2("cpu does not have OSPKE\n");
+		return 0;
+	}
+	return 1;
+}
+
+#define XSTATE_PKRU_BIT	(9)
+#define XSTATE_PKRU	0x200
+
+int pkru_xstate_offset(void)
+{
+	unsigned int eax;
+	unsigned int ebx;
+	unsigned int ecx;
+	unsigned int edx;
+	int xstate_offset;
+	int xstate_size;
+	unsigned long XSTATE_CPUID = 0xd;
+	int leaf;
+
+	/* assume that XSTATE_PKRU is set in XCR0 */
+	leaf = XSTATE_PKRU_BIT;
+	{
+		eax = XSTATE_CPUID;
+		ecx = leaf;
+		__cpuid(&eax, &ebx, &ecx, &edx);
+
+		if (leaf == XSTATE_PKRU_BIT) {
+			xstate_offset = ebx;
+			xstate_size = eax;
+		}
+	}
+
+	if (xstate_size == 0) {
+		printf("could not find size/offset of PKRU in xsave state\n");
+		return 0;
+	}
+
+	return xstate_offset;
+}
+
+#endif /* _PKEYS_HELPER_H */
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
new file mode 100644
index 0000000..bdd58c7
--- /dev/null
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -0,0 +1,1410 @@
+/*
+ * Tests x86 Memory Protection Keys (see Documentation/x86/protection-keys.txt)
+ *
+ * There are examples in here of:
+ *  * how to set protection keys on memory
+ *  * how to set/clear bits in PKRU (the rights register)
+ *  * how to handle SEGV_PKRU signals and extract pkey-relevant
+ *    information from the siginfo
+ *
+ * Things to add:
+ *	make sure KSM and KSM COW breaking works
+ *	prefault pages in at malloc, or not
+ *	protect MPX bounds tables with protection keys?
+ *	make sure VMA splitting/merging is working correctly
+ *	OOMs can destroy mm->mmap (see exit_mmap()), so make sure it is immune to pkeys
+ *	look for pkey "leaks" where it is still set on a VMA but "freed" back to the kernel
+ *	do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
+ *
+ * Compile like this:
+ *	gcc      -o protection_keys    -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+ *	gcc -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+ */
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ptrace.h>
+#include <setjmp.h>
+
+#include "pkey-helpers.h"
+
+int iteration_nr = 1;
+int test_nr;
+
+unsigned int shadow_pkru;
+
+#define HPAGE_SIZE	(1UL<<21)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+#define ALIGN_UP(x, align_to)	(((x) + ((align_to)-1)) & ~((align_to)-1))
+#define ALIGN_DOWN(x, align_to) ((x) & ~((align_to)-1))
+#define ALIGN_PTR_UP(p, ptr_align_to)	((typeof(p))ALIGN_UP((unsigned long)(p),	ptr_align_to))
+#define ALIGN_PTR_DOWN(p, ptr_align_to)	((typeof(p))ALIGN_DOWN((unsigned long)(p),	ptr_align_to))
+#define __stringify_1(x...)     #x
+#define __stringify(x...)       __stringify_1(x)
+
+#define PTR_ERR_ENOTSUP ((void *)-ENOTSUP)
+
+int dprint_in_signal;
+char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
+
+extern void abort_hooks(void);
+#define pkey_assert(condition) do {		\
+	if (!(condition)) {			\
+		dprintf0("assert() at %s::%d test_nr: %d iteration: %d\n", \
+				__FILE__, __LINE__,	\
+				test_nr, iteration_nr);	\
+		dprintf0("errno at assert: %d", errno);	\
+		abort_hooks();			\
+		assert(condition);		\
+	}					\
+} while (0)
+#define raw_assert(cond) assert(cond)
+
+void cat_into_file(char *str, char *file)
+{
+	int fd = open(file, O_RDWR);
+	int ret;
+
+	dprintf2("%s(): writing '%s' to '%s'\n", __func__, str, file);
+	/*
+	 * these need to be raw because they are called under
+	 * pkey_assert()
+	 */
+	raw_assert(fd >= 0);
+	ret = write(fd, str, strlen(str));
+	if (ret != strlen(str)) {
+		perror("write to file failed");
+		fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
+		raw_assert(0);
+	}
+	close(fd);
+}
+
+#if CONTROL_TRACING > 0
+static int warned_tracing;
+int tracing_root_ok(void)
+{
+	if (geteuid() != 0) {
+		if (!warned_tracing)
+			fprintf(stderr, "WARNING: not run as root, "
+					"can not do tracing control\n");
+		warned_tracing = 1;
+		return 0;
+	}
+	return 1;
+}
+#endif
+
+void tracing_on(void)
+{
+#if CONTROL_TRACING > 0
+#define TRACEDIR "/sys/kernel/debug/tracing"
+	char pidstr[32];
+
+	if (!tracing_root_ok())
+		return;
+
+	sprintf(pidstr, "%d", getpid());
+	cat_into_file("0", TRACEDIR "/tracing_on");
+	cat_into_file("\n", TRACEDIR "/trace");
+	if (1) {
+		cat_into_file("function_graph", TRACEDIR "/current_tracer");
+		cat_into_file("1", TRACEDIR "/options/funcgraph-proc");
+	} else {
+		cat_into_file("nop", TRACEDIR "/current_tracer");
+	}
+	cat_into_file(pidstr, TRACEDIR "/set_ftrace_pid");
+	cat_into_file("1", TRACEDIR "/tracing_on");
+	dprintf1("enabled tracing\n");
+#endif
+}
+
+void tracing_off(void)
+{
+#if CONTROL_TRACING > 0
+	if (!tracing_root_ok())
+		return;
+	cat_into_file("0", "/sys/kernel/debug/tracing/tracing_on");
+#endif
+}
+
+void abort_hooks(void)
+{
+	fprintf(stderr, "running %s()...\n", __func__);
+	tracing_off();
+#ifdef SLEEP_ON_ABORT
+	sleep(SLEEP_ON_ABORT);
+#endif
+}
+
+static inline void __page_o_noops(void)
+{
+	/* 8-bytes of instruction * 512 bytes = 1 page */
+	asm(".rept 512 ; nopl 0x7eeeeeee(%eax) ; .endr");
+}
+
+/*
+ * This attempts to have roughly a page of instructions followed by a few
+ * instructions that do a write, and another page of instructions.  That
+ * way, we are pretty sure that the write is in the second page of
+ * instructions and has at least a page of padding behind it.
+ *
+ * *That* lets us be sure to madvise() away the write instruction, which
+ * will then fault, which makes sure that the fault code handles
+ * execute-only memory properly.
+ */
+__attribute__((__aligned__(PAGE_SIZE)))
+void lots_o_noops_around_write(int *write_to_me)
+{
+	dprintf3("running %s()\n", __func__);
+	__page_o_noops();
+	/* Assume this happens in the second page of instructions: */
+	*write_to_me = __LINE__;
+	/* pad out by another page: */
+	__page_o_noops();
+	dprintf3("%s() done\n", __func__);
+}
+
+/* Define some kernel-like types */
+#define  u8 uint8_t
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+
+#ifdef __i386__
+#define SYS_mprotect_key 380
+#define SYS_pkey_alloc	 381
+#define SYS_pkey_free	 382
+#define REG_IP_IDX REG_EIP
+#define si_pkey_offset 0x18
+#else
+#define SYS_mprotect_key 329
+#define SYS_pkey_alloc	 330
+#define SYS_pkey_free	 331
+#define REG_IP_IDX REG_RIP
+#define si_pkey_offset 0x20
+#endif
+
+void dump_mem(void *dumpme, int len_bytes)
+{
+	char *c = (void *)dumpme;
+	int i;
+
+	for (i = 0; i < len_bytes; i += sizeof(u64)) {
+		u64 *ptr = (u64 *)(c + i);
+		dprintf1("dump[%03d][@%p]: %016jx\n", i, ptr, *ptr);
+	}
+}
+
+#define __SI_FAULT      (3 << 16)
+#define SEGV_BNDERR     (__SI_FAULT|3)  /* failed address bound checks */
+#define SEGV_PKUERR     (__SI_FAULT|4)
+
+static char *si_code_str(int si_code)
+{
+	if (si_code & SEGV_MAPERR)
+		return "SEGV_MAPERR";
+	if (si_code & SEGV_ACCERR)
+		return "SEGV_ACCERR";
+	if (si_code & SEGV_BNDERR)
+		return "SEGV_BNDERR";
+	if (si_code & SEGV_PKUERR)
+		return "SEGV_PKUERR";
+	return "UNKNOWN";
+}
+
+int pkru_faults;
+int last_si_pkey = -1;
+void signal_handler(int signum, siginfo_t *si, void *vucontext)
+{
+	ucontext_t *uctxt = vucontext;
+	int trapno;
+	unsigned long ip;
+	char *fpregs;
+	u32 *pkru_ptr;
+	u64 si_pkey;
+	u32 *si_pkey_ptr;
+	int pkru_offset;
+	fpregset_t fpregset;
+
+	dprint_in_signal = 1;
+	dprintf1(">>>>===============SIGSEGV============================\n");
+	dprintf1("%s()::%d, pkru: 0x%x shadow: %x\n", __func__, __LINE__,
+			__rdpkru(), shadow_pkru);
+
+	trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
+	ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
+	fpregset = uctxt->uc_mcontext.fpregs;
+	fpregs = (void *)fpregset;
+
+	dprintf2("%s() trapno: %d ip: 0x%lx info->si_code: %s/%d\n", __func__,
+			trapno, ip, si_code_str(si->si_code), si->si_code);
+#ifdef __i386__
+	/*
+	 * 32-bit has some extra padding so that userspace can tell whether
+	 * the XSTATE header is present in addition to the "legacy" FPU
+	 * state.  We just assume that it is here.
+	 */
+	fpregs += 0x70;
+#endif
+	pkru_offset = pkru_xstate_offset();
+	pkru_ptr = (void *)(&fpregs[pkru_offset]);
+
+	dprintf1("siginfo: %p\n", si);
+	dprintf1(" fpregs: %p\n", fpregs);
+	/*
+	 * If we got a PKRU fault, we *HAVE* to have at least one bit set in
+	 * here.
+	 */
+	dprintf1("pkru_xstate_offset: %d\n", pkru_xstate_offset());
+	if (DEBUG_LEVEL > 4)
+		dump_mem(pkru_ptr - 128, 256);
+	pkey_assert(*pkru_ptr);
+
+	si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
+	dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
+	dump_mem(si_pkey_ptr - 8, 24);
+	si_pkey = *si_pkey_ptr;
+	pkey_assert(si_pkey < NR_PKEYS);
+	last_si_pkey = si_pkey;
+
+	if ((si->si_code == SEGV_MAPERR) ||
+	    (si->si_code == SEGV_ACCERR) ||
+	    (si->si_code == SEGV_BNDERR)) {
+		printf("non-PK si_code, exiting...\n");
+		exit(4);
+	}
+
+	dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
+	/* need __rdpkru() version so we do not do shadow_pkru checking */
+	dprintf1("signal pkru from  pkru: %08x\n", __rdpkru());
+	dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+	*(u64 *)pkru_ptr = 0x00000000;
+	dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
+	pkru_faults++;
+	dprintf1("<<<<==================================================\n");
+	return;
+	if (trapno == 14) {
+		fprintf(stderr,
+			"ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
+			trapno, ip);
+		fprintf(stderr, "si_addr %p\n", si->si_addr);
+		fprintf(stderr, "REG_ERR: %lx\n",
+				(unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+		exit(1);
+	} else {
+		fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip);
+		fprintf(stderr, "si_addr %p\n", si->si_addr);
+		fprintf(stderr, "REG_ERR: %lx\n",
+				(unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+		exit(2);
+	}
+	dprint_in_signal = 0;
+}
+
+int wait_all_children(void)
+{
+	int status;
+	return waitpid(-1, &status, 0);
+}
+
+void sig_chld(int x)
+{
+	dprint_in_signal = 1;
+	dprintf2("[%d] SIGCHLD: %d\n", getpid(), x);
+	dprint_in_signal = 0;
+}
+
+void setup_sigsegv_handler(void)
+{
+	int r, rs;
+	struct sigaction newact;
+	struct sigaction oldact;
+
+	/* #PF is mapped to sigsegv */
+	int signum  = SIGSEGV;
+
+	newact.sa_handler = 0;
+	newact.sa_sigaction = signal_handler;
+
+	/*sigset_t - signals to block while in the handler */
+	/* get the old signal mask. */
+	rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
+	pkey_assert(rs == 0);
+
+	/* call sa_sigaction, not sa_handler*/
+	newact.sa_flags = SA_SIGINFO;
+
+	newact.sa_restorer = 0;  /* void(*)(), obsolete */
+	r = sigaction(signum, &newact, &oldact);
+	r = sigaction(SIGALRM, &newact, &oldact);
+	pkey_assert(r == 0);
+}
+
+void setup_handlers(void)
+{
+	signal(SIGCHLD, &sig_chld);
+	setup_sigsegv_handler();
+}
+
+pid_t fork_lazy_child(void)
+{
+	pid_t forkret;
+
+	forkret = fork();
+	pkey_assert(forkret >= 0);
+	dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+	if (!forkret) {
+		/* in the child */
+		while (1) {
+			dprintf1("child sleeping...\n");
+			sleep(30);
+		}
+	}
+	return forkret;
+}
+
+void davecmp(void *_a, void *_b, int len)
+{
+	int i;
+	unsigned long *a = _a;
+	unsigned long *b = _b;
+
+	for (i = 0; i < len / sizeof(*a); i++) {
+		if (a[i] == b[i])
+			continue;
+
+		dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
+	}
+}
+
+void dumpit(char *f)
+{
+	int fd = open(f, O_RDONLY);
+	char buf[100];
+	int nr_read;
+
+	dprintf2("maps fd: %d\n", fd);
+	do {
+		nr_read = read(fd, &buf[0], sizeof(buf));
+		write(1, buf, nr_read);
+	} while (nr_read > 0);
+	close(fd);
+}
+
+#define PKEY_DISABLE_ACCESS    0x1
+#define PKEY_DISABLE_WRITE     0x2
+
+u32 pkey_get(int pkey, unsigned long flags)
+{
+	u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
+	u32 pkru = __rdpkru();
+	u32 shifted_pkru;
+	u32 masked_pkru;
+
+	dprintf1("%s(pkey=%d, flags=%lx) = %x / %d\n",
+			__func__, pkey, flags, 0, 0);
+	dprintf2("%s() raw pkru: %x\n", __func__, pkru);
+
+	shifted_pkru = (pkru >> (pkey * PKRU_BITS_PER_PKEY));
+	dprintf2("%s() shifted_pkru: %x\n", __func__, shifted_pkru);
+	masked_pkru = shifted_pkru & mask;
+	dprintf2("%s() masked  pkru: %x\n", __func__, masked_pkru);
+	/*
+	 * shift down the relevant bits to the lowest two, then
+	 * mask off all the other high bits.
+	 */
+	return masked_pkru;
+}
+
+int pkey_set(int pkey, unsigned long rights, unsigned long flags)
+{
+	u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
+	u32 old_pkru = __rdpkru();
+	u32 new_pkru;
+
+	/* make sure that 'rights' only contains the bits we expect: */
+	assert(!(rights & ~mask));
+
+	/* copy old pkru */
+	new_pkru = old_pkru;
+	/* mask out bits from pkey in old value: */
+	new_pkru &= ~(mask << (pkey * PKRU_BITS_PER_PKEY));
+	/* OR in new bits for pkey: */
+	new_pkru |= (rights << (pkey * PKRU_BITS_PER_PKEY));
+
+	__wrpkru(new_pkru);
+
+	dprintf3("%s(pkey=%d, rights=%lx, flags=%lx) = %x pkru now: %x old_pkru: %x\n",
+			__func__, pkey, rights, flags, 0, __rdpkru(), old_pkru);
+	return 0;
+}
+
+void pkey_disable_set(int pkey, int flags)
+{
+	unsigned long syscall_flags = 0;
+	int ret;
+	int pkey_rights;
+	u32 orig_pkru;
+
+	dprintf1("START->%s(%d, 0x%x)\n", __func__,
+		pkey, flags);
+	pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+
+	pkey_rights = pkey_get(pkey, syscall_flags);
+
+	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
+			pkey, pkey, pkey_rights);
+	pkey_assert(pkey_rights >= 0);
+
+	pkey_rights |= flags;
+
+	ret = pkey_set(pkey, pkey_rights, syscall_flags);
+	assert(!ret);
+	/*pkru and flags have the same format */
+	shadow_pkru |= flags << (pkey * 2);
+	dprintf1("%s(%d) shadow: 0x%x\n", __func__, pkey, shadow_pkru);
+
+	pkey_assert(ret >= 0);
+
+	pkey_rights = pkey_get(pkey, syscall_flags);
+	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
+			pkey, pkey, pkey_rights);
+
+	dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
+	if (flags)
+		pkey_assert(rdpkru() > orig_pkru);
+	dprintf1("END<---%s(%d, 0x%x)\n", __func__,
+		pkey, flags);
+}
+
+void pkey_disable_clear(int pkey, int flags)
+{
+	unsigned long syscall_flags = 0;
+	int ret;
+	int pkey_rights = pkey_get(pkey, syscall_flags);
+	u32 orig_pkru = rdpkru();
+
+	pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+
+	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
+			pkey, pkey, pkey_rights);
+	pkey_assert(pkey_rights >= 0);
+
+	pkey_rights |= flags;
+
+	ret = pkey_set(pkey, pkey_rights, 0);
+	/* pkru and flags have the same format */
+	shadow_pkru &= ~(flags << (pkey * 2));
+	pkey_assert(ret >= 0);
+
+	pkey_rights = pkey_get(pkey, syscall_flags);
+	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
+			pkey, pkey, pkey_rights);
+
+	dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
+	if (flags)
+		assert(rdpkru() > orig_pkru);
+}
+
+void pkey_write_allow(int pkey)
+{
+	pkey_disable_clear(pkey, PKEY_DISABLE_WRITE);
+}
+void pkey_write_deny(int pkey)
+{
+	pkey_disable_set(pkey, PKEY_DISABLE_WRITE);
+}
+void pkey_access_allow(int pkey)
+{
+	pkey_disable_clear(pkey, PKEY_DISABLE_ACCESS);
+}
+void pkey_access_deny(int pkey)
+{
+	pkey_disable_set(pkey, PKEY_DISABLE_ACCESS);
+}
+
+int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+		unsigned long pkey)
+{
+	int sret;
+
+	dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__,
+			ptr, size, orig_prot, pkey);
+
+	errno = 0;
+	sret = syscall(SYS_mprotect_key, ptr, size, orig_prot, pkey);
+	if (errno) {
+		dprintf2("SYS_mprotect_key sret: %d\n", sret);
+		dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot);
+		dprintf2("SYS_mprotect_key failed, errno: %d\n", errno);
+		if (DEBUG_LEVEL >= 2)
+			perror("SYS_mprotect_pkey");
+	}
+	return sret;
+}
+
+int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
+{
+	int ret = syscall(SYS_pkey_alloc, flags, init_val);
+	dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n",
+			__func__, flags, init_val, ret, errno);
+	return ret;
+}
+
+int alloc_pkey(void)
+{
+	int ret;
+	unsigned long init_val = 0x0;
+
+	dprintf1("alloc_pkey()::%d, pkru: 0x%x shadow: %x\n",
+			__LINE__, __rdpkru(), shadow_pkru);
+	ret = sys_pkey_alloc(0, init_val);
+	/*
+	 * pkey_alloc() sets PKRU, so we need to reflect it in
+	 * shadow_pkru:
+	 */
+	dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	if (ret) {
+		/* clear both the bits: */
+		shadow_pkru &= ~(0x3      << (ret * 2));
+		dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
+				__LINE__, ret, __rdpkru(), shadow_pkru);
+		/*
+		 * move the new state in from init_val
+		 * (remember, we cheated and init_val == pkru format)
+		 */
+		shadow_pkru |=  (init_val << (ret * 2));
+	}
+	dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	dprintf1("alloc_pkey()::%d errno: %d\n", __LINE__, errno);
+	/* for shadow checking: */
+	rdpkru();
+	dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	return ret;
+}
+
+int sys_pkey_free(unsigned long pkey)
+{
+	int ret = syscall(SYS_pkey_free, pkey);
+	dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret);
+	return ret;
+}
+
+/*
+ * I had a bug where pkey bits could be set by mprotect() but
+ * not cleared.  This ensures we get lots of random bit sets
+ * and clears on the vma and pte pkey bits.
+ */
+int alloc_random_pkey(void)
+{
+	int max_nr_pkey_allocs;
+	int ret;
+	int i;
+	int alloced_pkeys[NR_PKEYS];
+	int nr_alloced = 0;
+	int random_index;
+	memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
+
+	/* allocate every possible key and make a note of which ones we got */
+	max_nr_pkey_allocs = NR_PKEYS;
+	max_nr_pkey_allocs = 1;
+	for (i = 0; i < max_nr_pkey_allocs; i++) {
+		int new_pkey = alloc_pkey();
+		if (new_pkey < 0)
+			break;
+		alloced_pkeys[nr_alloced++] = new_pkey;
+	}
+
+	pkey_assert(nr_alloced > 0);
+	/* select a random one out of the allocated ones */
+	random_index = rand() % nr_alloced;
+	ret = alloced_pkeys[random_index];
+	/* now zero it out so we don't free it next */
+	alloced_pkeys[random_index] = 0;
+
+	/* go through the allocated ones that we did not want and free them */
+	for (i = 0; i < nr_alloced; i++) {
+		int free_ret;
+		if (!alloced_pkeys[i])
+			continue;
+		free_ret = sys_pkey_free(alloced_pkeys[i]);
+		pkey_assert(!free_ret);
+	}
+	dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	return ret;
+}
+
+int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+		unsigned long pkey)
+{
+	int nr_iterations = random() % 100;
+	int ret;
+
+	while (0) {
+		int rpkey = alloc_random_pkey();
+		ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
+		dprintf1("sys_mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
+				ptr, size, orig_prot, pkey, ret);
+		if (nr_iterations-- < 0)
+			break;
+
+		dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+		sys_pkey_free(rpkey);
+		dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	}
+	pkey_assert(pkey < NR_PKEYS);
+
+	ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
+	dprintf1("mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
+			ptr, size, orig_prot, pkey, ret);
+	pkey_assert(!ret);
+	dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
+			__LINE__, ret, __rdpkru(), shadow_pkru);
+	return ret;
+}
+
+struct pkey_malloc_record {
+	void *ptr;
+	long size;
+};
+struct pkey_malloc_record *pkey_malloc_records;
+long nr_pkey_malloc_records;
+void record_pkey_malloc(void *ptr, long size)
+{
+	long i;
+	struct pkey_malloc_record *rec = NULL;
+
+	for (i = 0; i < nr_pkey_malloc_records; i++) {
+		rec = &pkey_malloc_records[i];
+		/* find a free record */
+		if (rec)
+			break;
+	}
+	if (!rec) {
+		/* every record is full */
+		size_t old_nr_records = nr_pkey_malloc_records;
+		size_t new_nr_records = (nr_pkey_malloc_records * 2 + 1);
+		size_t new_size = new_nr_records * sizeof(struct pkey_malloc_record);
+		dprintf2("new_nr_records: %zd\n", new_nr_records);
+		dprintf2("new_size: %zd\n", new_size);
+		pkey_malloc_records = realloc(pkey_malloc_records, new_size);
+		pkey_assert(pkey_malloc_records != NULL);
+		rec = &pkey_malloc_records[nr_pkey_malloc_records];
+		/*
+		 * realloc() does not initialize memory, so zero it from
+		 * the first new record all the way to the end.
+		 */
+		for (i = 0; i < new_nr_records - old_nr_records; i++)
+			memset(rec + i, 0, sizeof(*rec));
+	}
+	dprintf3("filling malloc record[%d/%p]: {%p, %ld}\n",
+		(int)(rec - pkey_malloc_records), rec, ptr, size);
+	rec->ptr = ptr;
+	rec->size = size;
+	nr_pkey_malloc_records++;
+}
+
+void free_pkey_malloc(void *ptr)
+{
+	long i;
+	int ret;
+	dprintf3("%s(%p)\n", __func__, ptr);
+	for (i = 0; i < nr_pkey_malloc_records; i++) {
+		struct pkey_malloc_record *rec = &pkey_malloc_records[i];
+		dprintf4("looking for ptr %p at record[%ld/%p]: {%p, %ld}\n",
+				ptr, i, rec, rec->ptr, rec->size);
+		if ((ptr <  rec->ptr) ||
+		    (ptr >= rec->ptr + rec->size))
+			continue;
+
+		dprintf3("found ptr %p at record[%ld/%p]: {%p, %ld}\n",
+				ptr, i, rec, rec->ptr, rec->size);
+		nr_pkey_malloc_records--;
+		ret = munmap(rec->ptr, rec->size);
+		dprintf3("munmap ret: %d\n", ret);
+		pkey_assert(!ret);
+		dprintf3("clearing rec->ptr, rec: %p\n", rec);
+		rec->ptr = NULL;
+		dprintf3("done clearing rec->ptr, rec: %p\n", rec);
+		return;
+	}
+	pkey_assert(false);
+}
+
+
+void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
+{
+	void *ptr;
+	int ret;
+
+	rdpkru();
+	dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+			size, prot, pkey);
+	pkey_assert(pkey < NR_PKEYS);
+	ptr = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+	pkey_assert(ptr != (void *)-1);
+	ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
+	pkey_assert(!ret);
+	record_pkey_malloc(ptr, size);
+	rdpkru();
+
+	dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
+	return ptr;
+}
+
+void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
+{
+	int ret;
+	void *ptr;
+
+	dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+			size, prot, pkey);
+	/*
+	 * Guarantee we can fit at least one huge page in the resulting
+	 * allocation by allocating space for 2:
+	 */
+	size = ALIGN_UP(size, HPAGE_SIZE * 2);
+	ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+	pkey_assert(ptr != (void *)-1);
+	record_pkey_malloc(ptr, size);
+	mprotect_pkey(ptr, size, prot, pkey);
+
+	dprintf1("unaligned ptr: %p\n", ptr);
+	ptr = ALIGN_PTR_UP(ptr, HPAGE_SIZE);
+	dprintf1("  aligned ptr: %p\n", ptr);
+	ret = madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE);
+	dprintf1("MADV_HUGEPAGE ret: %d\n", ret);
+	ret = madvise(ptr, HPAGE_SIZE, MADV_WILLNEED);
+	dprintf1("MADV_WILLNEED ret: %d\n", ret);
+	memset(ptr, 0, HPAGE_SIZE);
+
+	dprintf1("mmap()'d thp for pkey %d @ %p\n", pkey, ptr);
+	return ptr;
+}
+
+int hugetlb_setup_ok;
+#define GET_NR_HUGE_PAGES 10
+void setup_hugetlbfs(void)
+{
+	int err;
+	int fd;
+	int validated_nr_pages;
+	int i;
+	char buf[] = "123";
+
+	if (geteuid() != 0) {
+		fprintf(stderr, "WARNING: not run as root, can not do hugetlb test\n");
+		return;
+	}
+
+	cat_into_file(__stringify(GET_NR_HUGE_PAGES), "/proc/sys/vm/nr_hugepages");
+
+	/*
+	 * Now go make sure that we got the pages and that they
+	 * are 2M pages.  Someone might have made 1G the default.
+	 */
+	fd = open("/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages", O_RDONLY);
+	if (fd < 0) {
+		perror("opening sysfs 2M hugetlb config");
+		return;
+	}
+
+	/* -1 to guarantee leaving the trailing \0 */
+	err = read(fd, buf, sizeof(buf)-1);
+	close(fd);
+	if (err <= 0) {
+		perror("reading sysfs 2M hugetlb config");
+		return;
+	}
+
+	if (atoi(buf) != GET_NR_HUGE_PAGES) {
+		fprintf(stderr, "could not confirm 2M pages, got: '%s' expected %d\n",
+			buf, GET_NR_HUGE_PAGES);
+		return;
+	}
+
+	hugetlb_setup_ok = 1;
+}
+
+void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
+{
+	void *ptr;
+	int flags = MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB;
+
+	if (!hugetlb_setup_ok)
+		return PTR_ERR_ENOTSUP;
+
+	dprintf1("doing %s(%ld, %x, %x)\n", __func__, size, prot, pkey);
+	size = ALIGN_UP(size, HPAGE_SIZE * 2);
+	pkey_assert(pkey < NR_PKEYS);
+	ptr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
+	pkey_assert(ptr != (void *)-1);
+	mprotect_pkey(ptr, size, prot, pkey);
+
+	record_pkey_malloc(ptr, size);
+
+	dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
+	return ptr;
+}
+
+void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
+{
+	void *ptr;
+	int fd;
+
+	dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+			size, prot, pkey);
+	pkey_assert(pkey < NR_PKEYS);
+	fd = open("/dax/foo", O_RDWR);
+	pkey_assert(fd >= 0);
+
+	ptr = mmap(0, size, prot, MAP_SHARED, fd, 0);
+	pkey_assert(ptr != (void *)-1);
+
+	mprotect_pkey(ptr, size, prot, pkey);
+
+	record_pkey_malloc(ptr, size);
+
+	dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
+	close(fd);
+	return ptr;
+}
+
+void *(*pkey_malloc[])(long size, int prot, u16 pkey) = {
+
+	malloc_pkey_with_mprotect,
+	malloc_pkey_anon_huge,
+	malloc_pkey_hugetlb
+/* can not do direct with the pkey_mprotect() API:
+	malloc_pkey_mmap_direct,
+	malloc_pkey_mmap_dax,
+*/
+};
+
+void *malloc_pkey(long size, int prot, u16 pkey)
+{
+	void *ret;
+	static int malloc_type;
+	int nr_malloc_types = ARRAY_SIZE(pkey_malloc);
+
+	pkey_assert(pkey < NR_PKEYS);
+
+	while (1) {
+		pkey_assert(malloc_type < nr_malloc_types);
+
+		ret = pkey_malloc[malloc_type](size, prot, pkey);
+		pkey_assert(ret != (void *)-1);
+
+		malloc_type++;
+		if (malloc_type >= nr_malloc_types)
+			malloc_type = (random()%nr_malloc_types);
+
+		/* try again if the malloc_type we tried is unsupported */
+		if (ret == PTR_ERR_ENOTSUP)
+			continue;
+
+		break;
+	}
+
+	dprintf3("%s(%ld, prot=%x, pkey=%x) returning: %p\n", __func__,
+			size, prot, pkey, ret);
+	return ret;
+}
+
+int last_pkru_faults;
+void expected_pk_fault(int pkey)
+{
+	dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n",
+			__func__, last_pkru_faults, pkru_faults);
+	dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
+	pkey_assert(last_pkru_faults + 1 == pkru_faults);
+	pkey_assert(last_si_pkey == pkey);
+	/*
+	 * The signal handler shold have cleared out PKRU to let the
+	 * test program continue.  We now have to restore it.
+	 */
+	if (__rdpkru() != 0)
+		pkey_assert(0);
+
+	__wrpkru(shadow_pkru);
+	dprintf1("%s() set PKRU=%x to restore state after signal nuked it\n",
+			__func__, shadow_pkru);
+	last_pkru_faults = pkru_faults;
+	last_si_pkey = -1;
+}
+
+void do_not_expect_pk_fault(void)
+{
+	pkey_assert(last_pkru_faults == pkru_faults);
+}
+
+int test_fds[10] = { -1 };
+int nr_test_fds;
+void __save_test_fd(int fd)
+{
+	pkey_assert(fd >= 0);
+	pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds));
+	test_fds[nr_test_fds] = fd;
+	nr_test_fds++;
+}
+
+int get_test_read_fd(void)
+{
+	int test_fd = open("/etc/passwd", O_RDONLY);
+	__save_test_fd(test_fd);
+	return test_fd;
+}
+
+void close_test_fds(void)
+{
+	int i;
+
+	for (i = 0; i < nr_test_fds; i++) {
+		if (test_fds[i] < 0)
+			continue;
+		close(test_fds[i]);
+		test_fds[i] = -1;
+	}
+	nr_test_fds = 0;
+}
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+__attribute__((noinline)) int read_ptr(int *ptr)
+{
+	/*
+	 * Keep GCC from optimizing this away somehow
+	 */
+	barrier();
+	return *ptr;
+}
+
+void test_read_of_write_disabled_region(int *ptr, u16 pkey)
+{
+	int ptr_contents;
+
+	dprintf1("disabling write access to PKEY[1], doing read\n");
+	pkey_write_deny(pkey);
+	ptr_contents = read_ptr(ptr);
+	dprintf1("*ptr: %d\n", ptr_contents);
+	dprintf1("\n");
+}
+void test_read_of_access_disabled_region(int *ptr, u16 pkey)
+{
+	int ptr_contents;
+
+	dprintf1("disabling access to PKEY[%02d], doing read @ %p\n", pkey, ptr);
+	rdpkru();
+	pkey_access_deny(pkey);
+	ptr_contents = read_ptr(ptr);
+	dprintf1("*ptr: %d\n", ptr_contents);
+	expected_pk_fault(pkey);
+}
+void test_write_of_write_disabled_region(int *ptr, u16 pkey)
+{
+	dprintf1("disabling write access to PKEY[%02d], doing write\n", pkey);
+	pkey_write_deny(pkey);
+	*ptr = __LINE__;
+	expected_pk_fault(pkey);
+}
+void test_write_of_access_disabled_region(int *ptr, u16 pkey)
+{
+	dprintf1("disabling access to PKEY[%02d], doing write\n", pkey);
+	pkey_access_deny(pkey);
+	*ptr = __LINE__;
+	expected_pk_fault(pkey);
+}
+void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey)
+{
+	int ret;
+	int test_fd = get_test_read_fd();
+
+	dprintf1("disabling access to PKEY[%02d], "
+		 "having kernel read() to buffer\n", pkey);
+	pkey_access_deny(pkey);
+	ret = read(test_fd, ptr, 1);
+	dprintf1("read ret: %d\n", ret);
+	pkey_assert(ret);
+}
+void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey)
+{
+	int ret;
+	int test_fd = get_test_read_fd();
+
+	pkey_write_deny(pkey);
+	ret = read(test_fd, ptr, 100);
+	dprintf1("read ret: %d\n", ret);
+	if (ret < 0 && (DEBUG_LEVEL > 0))
+		perror("verbose read result (OK for this to be bad)");
+	pkey_assert(ret);
+}
+
+void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey)
+{
+	int pipe_ret, vmsplice_ret;
+	struct iovec iov;
+	int pipe_fds[2];
+
+	pipe_ret = pipe(pipe_fds);
+
+	pkey_assert(pipe_ret == 0);
+	dprintf1("disabling access to PKEY[%02d], "
+		 "having kernel vmsplice from buffer\n", pkey);
+	pkey_access_deny(pkey);
+	iov.iov_base = ptr;
+	iov.iov_len = PAGE_SIZE;
+	vmsplice_ret = vmsplice(pipe_fds[1], &iov, 1, SPLICE_F_GIFT);
+	dprintf1("vmsplice() ret: %d\n", vmsplice_ret);
+	pkey_assert(vmsplice_ret == -1);
+
+	close(pipe_fds[0]);
+	close(pipe_fds[1]);
+}
+
+void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey)
+{
+	int ignored = 0xdada;
+	int futex_ret;
+	int some_int = __LINE__;
+
+	dprintf1("disabling write to PKEY[%02d], "
+		 "doing futex gunk in buffer\n", pkey);
+	*ptr = some_int;
+	pkey_write_deny(pkey);
+	futex_ret = syscall(SYS_futex, ptr, FUTEX_WAIT, some_int-1, NULL,
+			&ignored, ignored);
+	if (DEBUG_LEVEL > 0)
+		perror("futex");
+	dprintf1("futex() ret: %d\n", futex_ret);
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey)
+{
+	int err;
+	int i;
+
+	/* Note: 0 is the default pkey, so don't mess with it */
+	for (i = 1; i < NR_PKEYS; i++) {
+		if (pkey == i)
+			continue;
+
+		dprintf1("trying get/set/free to non-allocated pkey: %2d\n", i);
+		err = sys_pkey_free(i);
+		pkey_assert(err);
+
+		/* not enforced when pkey_get() is not a syscall
+		err = pkey_get(i, 0);
+		pkey_assert(err < 0);
+		*/
+
+		err = sys_pkey_free(i);
+		pkey_assert(err);
+
+		err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, i);
+		pkey_assert(err);
+	}
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
+{
+	int err;
+	int bad_flag = (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + 1;
+	int bad_pkey = NR_PKEYS+99;
+
+	/* not enforced when pkey_get() is not a syscall
+	err = pkey_get(bad_pkey, bad_flag);
+	pkey_assert(err < 0);
+	*/
+
+	/* pass a known-invalid pkey in: */
+	err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey);
+	pkey_assert(err);
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+{
+	unsigned long flags;
+	unsigned long init_val;
+	int err;
+	int allocated_pkeys[NR_PKEYS] = {0};
+	int nr_allocated_pkeys = 0;
+	int i;
+
+	for (i = 0; i < NR_PKEYS*2; i++) {
+		int new_pkey;
+		dprintf1("%s() alloc loop: %d\n", __func__, i);
+		new_pkey = alloc_pkey();
+		dprintf4("%s()::%d, err: %d pkru: 0x%x shadow: 0x%x\n", __func__,
+				__LINE__, err, __rdpkru(), shadow_pkru);
+		rdpkru(); /* for shadow checking */
+		dprintf2("%s() errno: %d ENOSPC: %d\n", __func__, errno, ENOSPC);
+		if ((new_pkey == -1) && (errno == ENOSPC)) {
+			dprintf2("%s() failed to allocate pkey after %d tries\n",
+				__func__, nr_allocated_pkeys);
+			break;
+		}
+		pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+		allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+	}
+
+	dprintf3("%s()::%d\n", __func__, __LINE__);
+
+	/*
+	 * ensure it did not reach the end of the loop without
+	 * failure:
+	 */
+	pkey_assert(i < NR_PKEYS*2);
+
+	/*
+	 * There are 16 pkeys supported in hardware.  One is taken
+	 * up for the default (0) and another can be taken up by
+	 * an execute-only mapping.  Ensure that we can allocate
+	 * at least 14 (16-2).
+	 */
+	pkey_assert(i >= NR_PKEYS-2);
+
+	for (i = 0; i < nr_allocated_pkeys; i++) {
+		err = sys_pkey_free(allocated_pkeys[i]);
+		pkey_assert(!err);
+		rdpkru(); /* for shadow checking */
+	}
+}
+
+void test_ptrace_of_child(int *ptr, u16 pkey)
+{
+	__attribute__((__unused__)) int peek_result;
+	pid_t child_pid;
+	void *ignored = 0;
+	long ret;
+	int status;
+	/*
+	 * This is the "control" for our little expermient.  Make sure
+	 * we can always access it when ptracing.
+	 */
+	int *plain_ptr_unaligned = malloc(HPAGE_SIZE);
+	int *plain_ptr = ALIGN_PTR_UP(plain_ptr_unaligned, PAGE_SIZE);
+
+	/*
+	 * Fork a child which is an exact copy of this process, of course.
+	 * That means we can do all of our tests via ptrace() and then plain
+	 * memory access and ensure they work differently.
+	 */
+	child_pid = fork_lazy_child();
+	dprintf1("[%d] child pid: %d\n", getpid(), child_pid);
+
+	ret = ptrace(PTRACE_ATTACH, child_pid, ignored, ignored);
+	if (ret)
+		perror("attach");
+	dprintf1("[%d] attach ret: %ld %d\n", getpid(), ret, __LINE__);
+	pkey_assert(ret != -1);
+	ret = waitpid(child_pid, &status, WUNTRACED);
+	if ((ret != child_pid) || !(WIFSTOPPED(status))) {
+		fprintf(stderr, "weird waitpid result %ld stat %x\n",
+				ret, status);
+		pkey_assert(0);
+	}
+	dprintf2("waitpid ret: %ld\n", ret);
+	dprintf2("waitpid status: %d\n", status);
+
+	pkey_access_deny(pkey);
+	pkey_write_deny(pkey);
+
+	/* Write access, untested for now:
+	ret = ptrace(PTRACE_POKEDATA, child_pid, peek_at, data);
+	pkey_assert(ret != -1);
+	dprintf1("poke at %p: %ld\n", peek_at, ret);
+	*/
+
+	/*
+	 * Try to access the pkey-protected "ptr" via ptrace:
+	 */
+	ret = ptrace(PTRACE_PEEKDATA, child_pid, ptr, ignored);
+	/* expect it to work, without an error: */
+	pkey_assert(ret != -1);
+	/* Now access from the current task, and expect an exception: */
+	peek_result = read_ptr(ptr);
+	expected_pk_fault(pkey);
+
+	/*
+	 * Try to access the NON-pkey-protected "plain_ptr" via ptrace:
+	 */
+	ret = ptrace(PTRACE_PEEKDATA, child_pid, plain_ptr, ignored);
+	/* expect it to work, without an error: */
+	pkey_assert(ret != -1);
+	/* Now access from the current task, and expect NO exception: */
+	peek_result = read_ptr(plain_ptr);
+	do_not_expect_pk_fault();
+
+	ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
+	pkey_assert(ret != -1);
+
+	ret = kill(child_pid, SIGKILL);
+	pkey_assert(ret != -1);
+
+	wait(&status);
+
+	free(plain_ptr_unaligned);
+}
+
+void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
+{
+	void *p1;
+	int scratch;
+	int ptr_contents;
+	int ret;
+
+	p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
+	dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
+	/* lots_o_noops_around_write should be page-aligned already */
+	assert(p1 == &lots_o_noops_around_write);
+
+	/* Point 'p1' at the *second* page of the function: */
+	p1 += PAGE_SIZE;
+
+	madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+	lots_o_noops_around_write(&scratch);
+	ptr_contents = read_ptr(p1);
+	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+
+	ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC, (u64)pkey);
+	pkey_assert(!ret);
+	pkey_access_deny(pkey);
+
+	dprintf2("pkru: %x\n", rdpkru());
+
+	/*
+	 * Make sure this is an *instruction* fault
+	 */
+	madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+	lots_o_noops_around_write(&scratch);
+	do_not_expect_pk_fault();
+	ptr_contents = read_ptr(p1);
+	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+	expected_pk_fault(pkey);
+}
+
+void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
+{
+	int size = PAGE_SIZE;
+	int sret;
+
+	if (cpu_has_pku()) {
+		dprintf1("SKIP: %s: no CPU support\n", __func__);
+		return;
+	}
+
+	sret = syscall(SYS_mprotect_key, ptr, size, PROT_READ, pkey);
+	pkey_assert(sret < 0);
+}
+
+void (*pkey_tests[])(int *ptr, u16 pkey) = {
+	test_read_of_write_disabled_region,
+	test_read_of_access_disabled_region,
+	test_write_of_write_disabled_region,
+	test_write_of_access_disabled_region,
+	test_kernel_write_of_access_disabled_region,
+	test_kernel_write_of_write_disabled_region,
+	test_kernel_gup_of_access_disabled_region,
+	test_kernel_gup_write_to_write_disabled_region,
+	test_executing_on_unreadable_memory,
+	test_ptrace_of_child,
+	test_pkey_syscalls_on_non_allocated_pkey,
+	test_pkey_syscalls_bad_args,
+	test_pkey_alloc_exhaust,
+};
+
+void run_tests_once(void)
+{
+	int *ptr;
+	int prot = PROT_READ|PROT_WRITE;
+
+	for (test_nr = 0; test_nr < ARRAY_SIZE(pkey_tests); test_nr++) {
+		int pkey;
+		int orig_pkru_faults = pkru_faults;
+
+		dprintf1("======================\n");
+		dprintf1("test %d preparing...\n", test_nr);
+
+		tracing_on();
+		pkey = alloc_random_pkey();
+		dprintf1("test %d starting with pkey: %d\n", test_nr, pkey);
+		ptr = malloc_pkey(PAGE_SIZE, prot, pkey);
+		dprintf1("test %d starting...\n", test_nr);
+		pkey_tests[test_nr](ptr, pkey);
+		dprintf1("freeing test memory: %p\n", ptr);
+		free_pkey_malloc(ptr);
+		sys_pkey_free(pkey);
+
+		dprintf1("pkru_faults: %d\n", pkru_faults);
+		dprintf1("orig_pkru_faults: %d\n", orig_pkru_faults);
+
+		tracing_off();
+		close_test_fds();
+
+		printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr);
+		dprintf1("======================\n\n");
+	}
+	iteration_nr++;
+}
+
+void pkey_setup_shadow(void)
+{
+	shadow_pkru = __rdpkru();
+}
+
+int main(void)
+{
+	int nr_iterations = 22;
+
+	setup_handlers();
+
+	printf("has pku: %d\n", cpu_has_pku());
+
+	if (!cpu_has_pku()) {
+		int size = PAGE_SIZE;
+		int *ptr;
+
+		printf("running PKEY tests for unsupported CPU/OS\n");
+
+		ptr  = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+		assert(ptr != (void *)-1);
+		test_mprotect_pkey_on_unsupported_cpu(ptr, 1);
+		exit(0);
+	}
+
+	pkey_setup_shadow();
+	printf("startup pkru: %x\n", rdpkru());
+	setup_hugetlbfs();
+
+	while (nr_iterations-- > 0)
+		run_tests_once();
+
+	printf("done (all tests OK)\n");
+	return 0;
+}
diff --git a/tools/testing/selftests/zram/README b/tools/testing/selftests/zram/README
index eb17917..7972cc5 100644
--- a/tools/testing/selftests/zram/README
+++ b/tools/testing/selftests/zram/README
@@ -13,7 +13,7 @@
 
 Kconfig required:
 CONFIG_ZRAM=y
-CONFIG_ZRAM_LZ4_COMPRESS=y
+CONFIG_CRYPTO_LZ4=y
 CONFIG_ZPOOL=y
 CONFIG_ZSMALLOC=y
 
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db96688..8035cc1 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@
 	 * mm and might be done in another context, so we must
 	 * use FOLL_REMOTE.
 	 */
-	__get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
+	__get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
+			FOLL_WRITE | FOLL_REMOTE);
 
 	kvm_async_page_present_sync(vcpu, apf);
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73..28510e7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1416,10 +1416,15 @@
 		down_read(&current->mm->mmap_sem);
 		npages = get_user_page_nowait(addr, write_fault, page);
 		up_read(&current->mm->mmap_sem);
-	} else
+	} else {
+		unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+		if (write_fault)
+			flags |= FOLL_WRITE;
+
 		npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
-						   write_fault, 0, page,
-						   FOLL_TOUCH|FOLL_HWPOISON);
+						   page, flags);
+	}
 	if (npages != 1)
 		return npages;